In [1]:
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
In [2]:
# The following code is copied verbatim from @pjsier Rolling Rain N-Year Threshold.pynb
# Loading in hourly rain data from CSV, parsing the timestamp, and adding it as an index so it's more useful
rain_df = pd.read_csv('data/ohare_hourly_observations.csv')
rain_df['datetime'] = pd.to_datetime(rain_df['datetime'])
rain_df = rain_df.set_index(pd.DatetimeIndex(rain_df['datetime']))
print(rain_df.dtypes)
rain_df.head()
Out[2]:
In [3]:
chi_rain_series = rain_df['hourly_precip'].resample('1H').max()
In [4]:
# This is where I break with @pjsier
In [5]:
# I am assuming here that a single hour cannot be part of more than one storm in the event_endtimes list.
# Therefore, I am looping through the list and throwing out any storms that include hours from heavier storms in the
# same block of time.=
def get_storms_without_overlap(event_endtimes, hours):
times_taken = []
ret_val = []
for i in range(len(event_endtimes)):
timestamp = event_endtimes.iloc[i].name
times_here = []
for h in range(hours):
times_here.append(timestamp - pd.DateOffset(hours=h))
if not bool(set(times_here) & set(times_taken)):
times_taken.extend(times_here)
ret_val.append({'start': timestamp - pd.DateOffset(hours=hours), 'end': timestamp, 'inches': event_endtimes.iloc[i]['hourly_precip']})
return ret_val
In [6]:
# Find the 100 year event. First, define the storm as based in Illinois Bulletin 70 as the number of inches
# of precipition that falls over a given span of straight hours.
_100_year_storm_milestones = [{'hours': 240, 'inches': 11.14}, {'hours':120, 'inches': 9.96},
{'hours': 72, 'inches': 8.78}, {'hours': 48, 'inches': 8.16}, {'hours': 24, 'inches': 7.58},
{'hours': 18, 'inches': 6.97}, {'hours': 12, 'inches': 6.59}, {'hours': 6, 'inches': 5.68},
{'hours': 3, 'inches': 4.9}, {'hours': 2, 'inches': 4.47}, {'hours': 1, 'inches': 3.51}]
all_storms = []
print("\tSTART\t\t\tEND\t\t\tINCHES")
for storm_hours in _100_year_storm_milestones:
rolling = pd.DataFrame(chi_rain_series.rolling(window=storm_hours['hours']).sum())
event_endtimes = rolling[(rolling['hourly_precip'] >= storm_hours['inches'])]
event_endtimes = event_endtimes.sort_values(by='hourly_precip', ascending=False)
storms = get_storms_without_overlap(event_endtimes, storm_hours['hours'])
if len(storms) > 0:
print("Across %s hours" % storm_hours['hours'])
for storm in storms:
print('\t%s\t%s\t%s inches' % (storm['start'], storm['end'], storm['inches']))
all_storms.extend(storms)
In [7]:
# Analysis Questions
# 1/25/2015 - 2/4/2015 - Worst storm by far in quantity, but Jan-Feb -- is it snow?
# 9/4/2008 - 9/14/2008 - This only appeared on the 10-day event, so it must've been well distributed across the days?
# 7/21/2011 - 7/23/2011 - Very heavy summer storm!
In [8]:
# Examining the storm from 7/21-2011 - 7/23/2011
import datetime
july_2011_storm = chi_rain_series.loc[(chi_rain_series.index >= datetime.datetime(2011,7,20)) & (chi_rain_series.index <= datetime.datetime(2011,7,24))]
july_2011_storm.head()
Out[8]:
In [9]:
july_2011_storm.plot()
Out[9]:
In [10]:
# Let's take a look at the cumulative buildup of the storm over time
cumulative_rainj11 = pd.DataFrame(july_2011_storm).hourly_precip.cumsum()
cumulative_rainj11.head()
Out[10]:
In [11]:
cumulative_rainj11.plot()
Out[11]:
In [12]:
cumulative_rainj11.loc[(cumulative_rainj11.index >= datetime.datetime(2011,7,22,21,0,0)) & (cumulative_rainj11.index <= datetime.datetime(2011,7,23,5,0,0))]
Out[12]:
In [13]:
# We got a crazy, crazy downpour from about 11:00PM until 2:00AM. That alone was a 100-year storm, where we got 6.79 inches
# in 3 hours. That would've been a 100-year storm if we'd have gotten that in 12 hours!