You are currently looking at version 1.1 of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the Jupyter Notebook FAQ course resource.
In [2]:
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
This assignment requires more individual learning than previous assignments - you are encouraged to check out the pandas documentation to find functions or methods you might not have used yet, or ask questions on Stack Overflow and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.
Definitions:
Hypothesis: University towns have their mean housing prices less effected by recessions. Run a t-test to compare the ratio of the mean price of houses in university towns the quarter before the recession starts compared to the recession bottom. (price_ratio=quarter_before_recession/recession_bottom
)
The following data files are available for this assignment:
City_Zhvi_AllHomes.csv
, has median home sale prices at a fine grained level.university_towns.txt
.gdplev.xls
. For this assignment, only look at GDP data from the first quarter of 2000 onward.Each function in this assignment below is worth 10%, with the exception of run_ttest()
, which is worth 50%.
In [3]:
# Use this dictionary to map state names to two letter acronyms
states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'}
In [4]:
def get_list_of_university_towns():
'''Returns a DataFrame of towns and the states they are in from the
university_towns.txt list. The format of the DataFrame should be:
DataFrame( [ ["Michigan", "Ann Arbor"], ["Michigan", "Yipsilanti"] ],
columns=["State", "RegionName"] )
The following cleaning needs to be done:
1. For "State", removing characters from "[" to the end.
2. For "RegionName", when applicable, removing every character from " (" to the end.
3. Depending on how you read the data, you may need to remove newline character '\n'. '''
region_state_list = []
with open("university_towns.txt") as fh:
region_name = ""
for line in fh:
region_state = []
if "[edit]" in line:
index_val = line.index("[")
region_name = line[:index_val]
else:
if line.count("(") > 0:
region_state = [region_name, line[:line.index("(")].strip()]
else:
region_state = [region_name, line.strip()]
region_state_list.append(region_state)
labels = ["State", "RegionName"]
df = pd.DataFrame.from_records(region_state_list, columns=labels)
return df
get_list_of_university_towns()
Out[4]:
In [5]:
def get_recession_start():
'''Returns the year and quarter of the recession start time as a
string value in a format such as 2005q3'''
df = pd.read_excel("gdplev.xls", header=None, skiprows=220, names=["Quarter", "GDP"], parse_cols="E,G")
for i in range(0, len(df)-2):
if df.loc[i]["GDP"] > df.loc[i+1]["GDP"] and df.loc[i+1]["GDP"] > df.loc[i+2]["GDP"]:
return df.loc[i+1]["Quarter"]
get_recession_start()
Out[5]:
In [6]:
def get_recession_end():
'''Returns the year and quarter of the recession end time as a
string value in a format such as 2005q3'''
df = pd.read_excel("gdplev.xls", header=None, skiprows=220, names=["Quarter", "GDP"], parse_cols="E,G")
recession_start = None
for i in range(0, len(df)-2):
if df.loc[i]["GDP"] > df.loc[i+1]["GDP"] and df.loc[i+1]["GDP"] > df.loc[i+2]["GDP"]:
recession_start = i+1
for i in range(recession_start+2, len(df)-2):
if df.loc[i]["GDP"] < df.loc[i+1]["GDP"] and df.loc[i+1]["GDP"] < df.loc[i+2]["GDP"]:
return df.loc[i+1]["Quarter"]
get_recession_end()
Out[6]:
In [7]:
def get_recession_bottom():
'''Returns the year and quarter of the recession bottom time as a
string value in a format such as 2005q3'''
df = pd.read_excel("gdplev.xls", header=None, skiprows=220, names=["Quarter", "GDP"], parse_cols="E,G")
recession_start = None
for i in range(0, len(df)-2):
if df.loc[i]["GDP"] > df.loc[i+1]["GDP"] and df.loc[i+1]["GDP"] > df.loc[i+2]["GDP"]:
recession_start = i+1
break
recession_end = None
for i in range(recession_start+2, len(df)-2):
if df.loc[i]["GDP"] < df.loc[i+1]["GDP"] and df.loc[i+1]["GDP"] < df.loc[i+2]["GDP"]:
recession_end = i+1
break
df = df.loc[recession_start:recession_end+1, ]
return df.loc[df["GDP"].idxmin()]["Quarter"]
get_recession_bottom()
Out[7]:
In [8]:
def convert_housing_data_to_quarters():
global states
'''Converts the housing data to quarters and returns it as mean
values in a dataframe. This dataframe should be a dataframe with
columns for 2000q1 through 2016q3, and should have a multi-index
in the shape of ["State","RegionName"].
Note: Quarters are defined in the assignment description, they are
not arbitrary three month periods.
The resulting dataframe should have 67 columns, and 10,730 rows.
'''
df = pd.read_csv("City_Zhvi_AllHomes.csv", header=0)
new_df = pd.DataFrame()
mean_df = pd.DataFrame()
column_name_list = df.columns.values.tolist()
for col_index in range(6,len(df.columns)+1,3):
new_df[col_index] = df[column_name_list[col_index]]
new_df[col_index+1] = df[column_name_list[col_index+1]]
if col_index+2 < len(column_name_list):
new_df[col_index+2] = df[column_name_list[col_index+2]]
mean_df["new_col"+str(col_index)] = new_df.ix[:,col_index:col_index+2].mean(axis=1)
else:
mean_df["new_col"+str(col_index)] = new_df.ix[:,col_index:col_index+1].mean(axis=1)
column_names = [col.split("-")[0] for col in df.columns if len(col.split("-")) > 1 ]
final_names = []
for name_index in range(0, len(column_names),3):
if name_index % 4 == 0:
final_names.append(column_names[name_index]+"q2")
elif name_index % 4 == 1:
final_names.append(column_names[name_index]+"q1")
elif name_index % 4 == 2:
final_names.append(column_names[name_index]+"q4")
else:
final_names.append(column_names[name_index]+"q3")
mean_df.columns = final_names
mean_df = mean_df.ix[:,15:]
for new_col in column_name_list[1:3]:
mean_df[new_col] = df[new_col]
mean_df["State"] = mean_df["State"].apply(lambda x:states[x])
mean_df = mean_df.set_index(["State","RegionName"])
return mean_df
convert_housing_data_to_quarters()
Out[8]:
In [13]:
def run_ttest():
'''First creates new data showing the decline or growth of housing prices
between the recession start and the recession bottom. Then runs a ttest
comparing the university town values to the non-university towns values,
return whether the alternative hypothesis (that the two groups are the same)
is true or not as well as the p-value of the confidence.
Return the tuple (different, p, better) where different=True if the t-test is
True at a p<0.01 (we reject the null hypothesis), or different=False if
otherwise (we cannot reject the null hypothesis). The variable p should
be equal to the exact p value returned from scipy.stats.ttest_ind(). The
value for better should be either "university town" or "non-university town"
depending on which has a lower mean price ratio (which is equivilent to a
reduced market loss).'''
df = convert_housing_data_to_quarters()
recession_bottom = get_recession_bottom()
recession_start = get_recession_start()
col = df.columns.values.tolist()
start_index = col.index(recession_start)
finish_index = col.index(recession_bottom)
df = df.ix[:,start_index:finish_index+1]
col = col[start_index:finish_index+1]
col.extend(["State", "RegionName"])
university_df = get_list_of_university_towns()
uni_df = pd.DataFrame(columns=col)
for row in university_df.iterrows():
try:
list_row = df.loc[row[1].State].loc[row[1].RegionName].tolist()
list_row.extend([row[1].State, row[1].RegionName])
uni_df.loc[int(row[0])] = list_row
except Exception as e:
pass
uni_df = uni_df.set_index(["State", "RegionName"])
no_uni_df = df[df.index.map(lambda x:x not in uni_df.index)]
ttest_tuple = ttest_ind(uni_df.mean(), no_uni_df.mean(), nan_policy='omit')
different, better = None, None
if float(ttest_tuple[1]) < 0.01:
different = True
else:
different = False
uni_df = uni_df.mean()
no_uni_df = no_uni_df.mean()
min_uni = uni_df.min()
min_no_uni = no_uni_df.min()
lower_index = [min_uni, min_no_uni].index(min([min_uni, min_no_uni]))
if lower_index == 0:
better = "university town"
else:
better = "non-university town"
return (different, ttest_tuple[1], better)
run_ttest()
Out[13]:
In [ ]: