Did you see my talk at PyCon 2014! You can use this notebook to try it out for yourself!
If you didn't, and you'd like to, the video is here: Diving into Open Data with IPython Notebook and Pandas. The pandas cookbook I mentioned is at http://github.com/jvns/pandas-cookbook.
IMPORTANT: To make this work, you'll need to
ipython notebook
This work is licensed under a Creative Commons Attribution 4.0 International License.
In [1]:
# Some imports we'll need
import numpy as np
import pandas as pd
julia = {'email': 'julia@jvns.ca', 'twitter': 'http://twitter.com/b0rk', 'slides': 'http://bit.ly/pycon-pandas', 'website': 'http://jvns.ca'}
Data at Stripe, work on Montréal All-Girl Hack Night, PyLadies MTL
You can follow along with this talk at:
In [2]:
print 'Email:', julia['email']
print 'Twitter:', julia['twitter']
print 'Blog:', julia['website']
In [3]:
py_list = range(20000000)
numpy_array = np.arange(20000000)
In [4]:
%%timeit
total = 0
for x in py_list:
x += total * total
In [5]:
%%timeit
np.sum(numpy_array * numpy_array)
sudo apt-get install ipython-notebook
pip install ipython tornado pyzmq
pip install numpy pandas matplotlib
In [6]:
import pandas as pd
import numpy as np
In [7]:
import matplotlib
# display graphs inline
%matplotlib inline
# Make graphs prettier
pd.set_option('display.max_columns', 15)
pd.set_option('display.line_width', 400)
pd.set_option('display.mpl_style', 'default')
# Make the fonts bigger
matplotlib.rc('figure', figsize=(14, 7))
matplotlib.rc('font', family='normal', weight='bold', size=22)
In [8]:
bike_data = pd.read_csv("./2012.csv")
bike_data[:5]
Out[8]:
In [9]:
bike_data = pd.read_csv("./2012.csv", encoding='latin1', sep=';', index_col='Date', parse_dates=True, dayfirst=True)
In [10]:
# Get rid of missing columns
bike_data = bike_data.dropna(axis=1)
# Only use 3 of the columns so it all fits on the screen
In [11]:
bike_data = bike_data[['Berri 1', u'Côte-Sainte-Catherine', 'Maisonneuve 1']]
bike_data[:5]
Out[11]:
In [12]:
bike_data[:3]
Out[12]:
In [13]:
bike_data.plot()
Out[13]:
In [13]:
bike_data.median()
Out[13]:
In [14]:
bike_data.median().plot(kind='bar')
Out[14]:
In [15]:
# column slice
column_slice = bike_data[['Berri 1', 'Maisonneuve 1']]
# row slice
column_slice[:3]
Out[15]:
In [16]:
bike_data['Berri 1'] < 75
Out[16]:
In [17]:
bike_data[bike_data['Berri 1'] < 75]
Out[17]:
In [18]:
bike_data['weekday'] = bike_data.index.weekday
bike_data.head()
Out[18]:
In [19]:
counts_by_day = bike_data.groupby('weekday').aggregate(np.sum)
counts_by_day
Out[19]:
In [20]:
counts_by_day.index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
counts_by_day['Berri 1'].plot(kind='bar')
Out[20]:
In [19]:
bike_data['Berri 1'].plot()
Out[19]:
In [20]:
def get_weather_data(year):
url_template = "http://climate.weather.gc.ca/climateData/bulkdata_e.html?format=csv&stationID=5415&Year={year}&Month={month}&timeframe=1&submit=Download+Data"
# mctavish station: 10761, airport station: 5415
data_by_month = []
for month in range(1, 13):
url = url_template.format(year=year, month=month)
weather_data = pd.read_csv(url, skiprows=16, index_col='Date/Time', parse_dates=True).dropna(axis=1)
weather_data.columns = map(lambda x: x.replace('\xb0', ''), weather_data.columns)
weather_data = weather_data.drop(['Year', 'Day', 'Month', 'Time', 'Data Quality'], axis=1)
data_by_month.append(weather_data.dropna())
# Concatenate and drop any empty columns
return pd.concat(data_by_month).dropna(axis=1, how='all').dropna()
In [21]:
weather_data = get_weather_data(2012)
In [22]:
weather_data[:5]
Out[22]:
In [23]:
bike_data['mean temp'] = weather_data['Temp (C)'].resample('D', how='mean')
In [24]:
bike_data.head()
Out[24]:
In [25]:
bike_data[['Berri 1', 'mean temp']].plot(subplots=True)
Out[25]:
In [26]:
bike_data['Rain'] = weather_data['Weather'].str.contains('Rain').resample('D', how='mean')
In [27]:
bike_data[['Berri 1', 'Rain']].plot(subplots=True)
Out[27]:
In [78]:
print 'Email:', julia['email']
print 'Twitter:', julia['twitter']
print 'Blog:', julia['website']
print 'Slides:', julia['slides']