You are currently looking at version 1.1 of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the Jupyter Notebook FAQ course resource.
In [3]:
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
import re
This assignment requires more individual learning than previous assignments - you are encouraged to check out the pandas documentation to find functions or methods you might not have used yet, or ask questions on Stack Overflow and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.
Definitions:
Hypothesis: University towns have their mean housing prices less effected by recessions. Run a t-test to compare the ratio of the mean price of houses in university towns the quarter before the recession starts compared to the recession bottom. (price_ratio=quarter_before_recession/recession_bottom
)
The following data files are available for this assignment:
City_Zhvi_AllHomes.csv
, has median home sale prices at a fine grained level.university_towns.txt
.gdplev.xls
. For this assignment, only look at GDP data from the first quarter of 2000 onward.Each function in this assignment below is worth 10%, with the exception of run_ttest()
, which is worth 50%.
In [212]:
# Use this dictionary to map state names to two letter acronyms
states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'}
In [229]:
def get_list_of_university_towns():
'''Returns a DataFrame of towns and the states they are in from the
university_towns.txt list. The format of the DataFrame should be:
DataFrame( [ ["Michigan", "Ann Arbor"], ["Michigan", "Yipsilanti"] ],
columns=["State", "RegionName"] )
The following cleaning needs to be done:
1. For "State", removing characters from "[" to the end.
2. For "RegionName", when applicable, removing every character from " (" to the end.
3. Depending on how you read the data, you may need to remove newline character '\n'. '''
df = pd.read_csv("university_towns.txt", delimiter="\n", header=None)
ans = df.head()
# clean up
#df['state'] = df[df[0] "[edit]"]
def func(row):
if row[0][-6:] == '[edit]':
return row[0][:-6]
else:
return False
#df['state'] = df.apply(func, axis=1)
df['State'] = df.loc[df[0].str.contains('[edit]', regex=False), 0]
df['State'] = df['State'].str.replace('\[edit]', "")
df['RegionName'] = df.loc[df['State'].isnull(), 0]
df['RegionName'] = df['RegionName'].str.replace('\([^)]*\)', '')
df['RegionName'] = df['RegionName'].str.replace('[0-9]+', '')
df['RegionName'] = df['RegionName'].str.replace('\[\]', '')
df['State'] = df['State'].ffill()
df = df.dropna()
df['RegionName'] = df['RegionName'].str.rstrip()
cols = ['State', 'RegionName']
df = df[cols].reset_index(drop=True)
return df
Out[229]:
In [232]:
def get_recession_start():
'''Returns the year and quarter of the recession start time as a
string value in a format such as 2005q3'''
df = pd.read_excel('gdplev.xls')
date = df.columns[7]
df.drop(df.columns[[3,7]], axis=1, inplace=True)
df = df[df.columns[-3:]]
col_names = ['Quarter', df.iloc[4,1], df.iloc[4,2]]
# # col_names = [df.iloc[1,0], df.iloc[4,1], df.iloc[4,2], df.iloc[2,3],
# # df.iloc[4,4], df.iloc[4,5]]
df.columns = col_names # column names
df = df[7:].reset_index(drop=True) # remove NaN
loc = df[df['Quarter'] == '2000q1'].index.tolist()[0]
df = df[loc:].reset_index(drop=True)
quarters = []
for i in range(len(df) - 2):
if ((df.iloc[i, 1] > df.iloc[i+1, 1]) & (df.iloc[i+1, 1] > df.iloc[i+2,1])):
quarters.append(df.iloc[i,0])
return quarters[0]
Out[232]:
In [233]:
def get_recession_end():
'''Returns the year and quarter of the recession end time as a
string value in a format such as 2005q3'''
df = pd.read_excel('gdplev.xls')
df.drop(df.columns[[3,7]], axis=1, inplace=True)
df = df[df.columns[-3:]]
col_names = ['Quarter', df.iloc[4,1], df.iloc[4,2]]
df.columns = col_names
df = df[7:].reset_index(drop=True)
start = get_recession_start()
start_idx = df[df['Quarter'] == start].index.tolist()[0]
df = df.loc[start_idx:, :]
quarters = []
for row in range(len(df) - 2):
if (df.iloc[row, 1] < df.iloc[row+1, 1]) & (df.iloc[row+1,1] < df.iloc[row+2, 1]):
return df.iloc[row+2, 0] #should be just row i think
return
Out[233]:
In [234]:
def get_recession_bottom():
'''Returns the year and quarter of the recession bottom time as a
string value in a format such as 2005q3'''
df= pd.read_excel('gdplev.xls')
df.drop(df.columns[[3,7]], axis=1, inplace=True)
df = df[df.columns[-3:]]
col_names = ['Quarter', df.iloc[4,1], df.iloc[4,2]]
df.columns = col_names
df = df[7:].reset_index(drop=True)
start = get_recession_start()
start_idx = df[df['Quarter'] == start].index.tolist()[0]
end = get_recession_end()
end_idx = df[df['Quarter'] == end].index.tolist()[0]
df = df[start_idx:end_idx]
bottom = df[df.columns[1]].idxmin()
return df.loc[bottom, df.columns[0]]
Out[234]:
In [236]:
def convert_housing_data_to_quarters():
'''Converts the housing data to quarters and returns it as mean
values in a dataframe. This dataframe should be a dataframe with
columns for 2000q1 through 2016q3, and should have a multi-index
in the shape of ["State","RegionName"].
Note: Quarters are defined in the assignment description, they are
not arbitrary three month periods.
The resulting dataframe should have 67 columns, and 10,730 rows.
'''
df = pd.read_csv('City_Zhvi_AllHomes.csv')
def new_col_names():
quarters = ['q1', 'q2', 'q3', 'q4']
yr_qr = []
for year in range(2000, 2017):
for q in quarters:
yr_qr.append(str(year)+q)
return yr_qr[:67]
# clean up data, removing unnecessary columns
df.drop(df.columns[[0,3,4,5]], axis=1, inplace=1)
df['State'].replace(states, inplace=1)
df.set_index(['State', 'RegionName'], inplace=1)
drop_cols = df.columns[df.columns.str.startswith('199')]
df.drop(drop_cols, axis=1, inplace=1)
# get the quarters from
qr = []
for i in range(0, len(df.columns),3):
q = list(df.columns[i:i+3])
qr.append(q)
col_names = new_col_names()
for col, qr in zip(col_names, qr):
df[col] = df[qr].mean(axis=1)
return df[col_names]
convert_housing_data_to_quarters()
Out[236]:
In [219]:
def run_ttest():
'''First creates new data showing the decline or growth of housing prices
between the recession start and the recession bottom. Then runs a ttest
comparing the university town values to the non-university towns values,
return whether the alternative hypothesis (that the two groups are the same)
is true or not as well as the p-value of the confidence.
Return the tuple (different, p, better) where different=True if the t-test is
True at a p<0.01 (we reject the null hypothesis), or different=False if
otherwise (we cannot reject the null hypothesis). The variable p should
be equal to the exact p value returned from scipy.stats.ttest_ind(). The
value for better should be either "university town" or "non-university town"
depending on which has a lower mean price ratio (which is equivilent to a
reduced market loss).'''
df = convert_housing_data_to_quarters()
rec_start = get_recession_start()
rec_bottom = get_recession_bottom()
df = df.loc[:, rec_start:rec_bottom]
df.reset_index(inplace=1)
# price ratio
def price_ratio(row):
return (row[rec_start] - row[rec_bottom])/row[rec_start]
df['ratio'] = df.apply(price_ratio, axis=1)
# check if university towns
df_uni_town = get_list_of_university_towns()['RegionName']
df['uni_town'] = df['RegionName'].isin(df_uni_town.values)
df_uni = df[df['uni_town'].values]
df_not_uni = df[df['uni_town'] == False]
# ttest
uni = df_uni['ratio'].dropna()
not_uni = df_not_uni['ratio'].dropna()
def better():
if uni.mean() < not_uni.mean():
return 'university town'
else:
return 'non-university town'
pval = ttest_ind(uni, not_uni).pvalue
different = pval < 0.01
return (different, pval, better())
run_ttest()
Out[219]:
In [ ]: