You are currently looking at version 1.1 of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the Jupyter Notebook FAQ course resource.
In [1]:
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
This assignment requires more individual learning than previous assignments - you are encouraged to check out the pandas documentation to find functions or methods you might not have used yet, or ask questions on Stack Overflow and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.
Definitions:
Hypothesis: University towns have their mean housing prices less effected by recessions. Run a t-test to compare the ratio of the mean price of houses in university towns the quarter before the recession starts compared to the recession bottom. (price_ratio=quarter_before_recession/recession_bottom)
The following data files are available for this assignment:
City_Zhvi_AllHomes.csv, has median home sale prices at a fine grained level.university_towns.txt.gdplev.xls. For this assignment, only look at GDP data from the first quarter of 2000 onward.Each function in this assignment below is worth 10%, with the exception of run_ttest(), which is worth 50%.
In [2]:
# Use this dictionary to map state names to two letter acronyms
states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'}
In [3]:
df_all_homes = pd.read_csv("City_Zhvi_AllHomes.csv")
In [4]:
df_all_homes.head()
Out[4]:
In [5]:
def get_list_of_university_towns():
'''Returns a DataFrame of towns and the states they are in from the
university_towns.txt list. The format of the DataFrame should be:
DataFrame( [ ["Michigan", "Ann Arbor"], ["Michigan", "Yipsilanti"] ],
columns=["State", "RegionName"] )
The following cleaning needs to be done:
1. For "State", removing characters from "[" to the end.
2. For "RegionName", when applicable, removing every character from " (" to the end.
3. Depending on how you read the data, you may need to remove newline character '\n'. '''
towns = open("university_towns.txt", "r")
lines = towns.read().split("\n")
# Elements having string "[edit]" are states.
state_indicator = "[edit]"
state = ""
states_regions = []
for element in lines:
if state_indicator in element:
# State
state = element.split("[")[0].rstrip()
else:
# Region
states_regions.append({"State" : state, "RegionName" : element.split("(")[0].rstrip()})
df = pd.DataFrame.from_dict(states_regions)
# Drop last row since it has only state without region
df = df.drop(df.index[len(df)-1])
# Moving State as a first column. This is done only for automatic grader that expects
# this order of columns
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
return df
get_list_of_university_towns()
Out[5]:
In [6]:
# Get GDP info: Drop unnessary header rows
df_gdp = pd.read_excel("gdplev.xls", header=6)
# Take only quarterly info
df_gdp = df_gdp.ix[:, 4:7]
df_gdp.columns = ["Quarter", "GDP current", "GDP chained"]
df_gdp.head()
Out[6]:
In [7]:
# Find GPD changes between quarters
df_gdp["Diff"] = df_gdp["GDP chained"].diff()
df_gdp.head()
Out[7]:
In [8]:
# Mark increase (1) or decrease (0) in GDP
df_gdp["Change"] = np.where(df_gdp["Diff"] < 0, "0", "1")
# Now drop years before 2000 since "For this assignment, only look at GDP data from the first quarter of 2000 onward"
df_gdp = df_gdp[df_gdp.Quarter >= "2000q1"]
df_gdp.head()
Out[8]:
In [9]:
# Find 2 consecutive declines in GDP. Converting increase/decline info as string
# so its easy to find consecutive declines/increases
change_str = ""
change_str = change_str.join(df_gdp["Change"].values)
change_str
Out[9]:
In [10]:
# Find recession
recession_index = change_str.index("0011")
In [11]:
def get_recession_start():
'''Returns the year and quarter of the recession start time as a
string value in a format such as 2005q3'''
# Start of recession, find 1 before start patter '0011'
start_recession_index = change_str.rindex("1", 0, recession_index) + 1
return df_gdp.iloc[start_recession_index]["Quarter"]
get_recession_start()
Out[11]:
In [12]:
def get_recession_end():
'''Returns the year and quarter of the recession end time as a
string value in a format such as 2005q3'''
# End recession is index of pattern + len of pattern
end_recession_index = recession_index + len("0011") - 1
return df_gdp.iloc[end_recession_index]["Quarter"]
get_recession_end()
Out[12]:
In [13]:
def get_recession_bottom():
'''Returns the year and quarter of the recession bottom time as a
string value in a format such as 2005q3'''
recession_start = get_recession_start()
recession_end = get_recession_end()
df_recession = df_gdp[ (df_gdp.Quarter >= recession_start) & (df_gdp.Quarter <= recession_end) ]
return df_recession.loc[df_recession["GDP chained"].argmin()]["Quarter"]
get_recession_bottom()
Out[13]:
In [14]:
def convert_housing_data_to_quarters():
'''Converts the housing data to quarters and returns it as mean
values in a dataframe. This dataframe should be a dataframe with
columns for 2000q1 through 2016q3, and should have a multi-index
in the shape of ["State","RegionName"].
Note: Quarters are defined in the assignment description, they are
not arbitrary three month periods.
The resulting dataframe should have 67 columns, and 10,730 rows.
'''
# A quarter is a specific three month period,
# - Q1 is January through March (1-3)
# - Q2 is April through June (4-6)
# - Q3 is July through September (7-9)
# - Q4 is October through December. (10-12)
# Drop columns for years not under inspection (year < 2000)
cols = [c for c in df_all_homes.columns if c.lower()[:3] != '199']
df_homes = df_all_homes[cols]
# Calculate mean for each quarter between 2000-2016
quarters = [ ["01", "02", "03"], ["04", "05", "06"], ["07", "08", "09"], ["10", "11", "12"] ]
for year in range(2000,2016):
for q in range(0, 4):
quarter_columns = ["{}-{}".format(year,quarters[q][0]), "{}-{}".format(year,quarters[q][1]), "{}-{}".format(year,quarters[q][2])]
df_homes["{}q{}".format(year, q+1)] = df_homes[quarter_columns].mean(axis=1)
# 2016 is not a full year so taking it separately
df_homes["2016q1"] = df_homes[["2016-01", "2016-02", "2016-03"]].mean(axis=1)
df_homes["2016q2"] = df_homes[["2016-04", "2016-05", "2016-06"]].mean(axis=1)
df_homes["2016q3"] = df_homes[["2016-07", "2016-08"]].mean(axis=1)
# Drop rest of the year-month columns
cols = [c for c in df_homes.columns if c.lower()[4] != '-']
df_homes = df_homes[cols]
# Drop unnessary columns
df_homes = df_homes.drop( ["RegionID", "Metro", "CountyName", "SizeRank"], axis=1 )
# Convert acronym state names to full names
df_homes["State"] = df_homes.apply(lambda row: states[row["State"]], axis=1)
# Create indexes
df_homes_indexed = df_homes.set_index(["State", "RegionName"])
return df_homes_indexed
df_houses_quarters = convert_housing_data_to_quarters()
In [15]:
# Calculate recession impact on house prices
recession_start = get_recession_start()
recession_end = get_recession_end()
print("Recession start {} and end {}".format(recession_start, recession_end))
# Below line causes notebook to run out of memory. Using lambda instead.
#df_houses_quarters["Ratio"] = df_houses_quarters[recession_start] / df_houses_quarters[recession_bottom]
df_houses_quarters["Ratio"] = df_houses_quarters.apply(lambda row: row[recession_start] / row [recession_end], axis=1)
df_houses_quarters.head()
Out[15]:
In [16]:
university_towns = get_list_of_university_towns()
university_towns = university_towns.set_index(["State", "RegionName"])
university_towns.head()
Out[16]:
In [17]:
# Get all university towns
df_univ_towns = df_houses_quarters.loc[list(university_towns.index)]
df_univ_towns = df_univ_towns.dropna()
df_univ_towns.head()
Out[17]:
In [18]:
# Get all non university towns
non_univ_index = set(df_houses_quarters.index) - set(df_univ_towns.index)
df_non_univ_towns = df_houses_quarters.loc[list(non_univ_index)]
df_non_univ_towns = df_non_univ_towns.dropna()
df_non_univ_towns.head()
Out[18]:
In [19]:
def run_ttest():
'''First creates new data showing the decline or growth of housing prices
between the recession start and the recession bottom. Then runs a ttest
comparing the university town values to the non-university towns values,
return whether the alternative hypothesis (that the two groups are the same)
is true or not as well as the p-value of the confidence.
Return the tuple (different, p, better) where different=True if the t-test is
True at a p<0.01 (we reject the null hypothesis), or different=False if
otherwise (we cannot reject the null hypothesis). The variable p should
be equal to the exact p value returned from scipy.stats.ttest_ind(). The
value for better should be either "university town" or "non-university town"
depending on which has a lower mean price ratio (which is equivilent to a
reduced market loss).'''
statistic, pvalue = ttest_ind(df_univ_towns["Ratio"], df_non_univ_towns["Ratio"])
different = pvalue < 0.01
if df_univ_towns["Ratio"].mean() < df_non_univ_towns["Ratio"].mean():
better = "university town"
else:
better = "non-university town"
return different, pvalue, better
run_ttest()
Out[19]: