In [1]:
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
In [2]:
frame = DataFrame({'data1': np.random.randn(1000),
'data2': np.random.randn(1000)})
factor = pd.cut(frame.data1, 4)
factor[:10]
Out[2]:
In [3]:
def get_stats(group):
return {'min': group.min(), 'max': group.max(),
'count': group.count(), 'mean': group.mean()}
grouped = frame.data2.groupby(factor)
grouped.apply(get_stats).unstack()
Out[3]:
In [4]:
grouping = pd.qcut(frame.data1, 10, labels=False)
grouped = frame.data2.groupby(grouping)
grouped.apply(get_stats).unstack()
Out[4]:
In [5]:
s = Series(np.random.randn(6))
s[::2] = np.nan
s
Out[5]:
In [6]:
s.fillna(s.mean())
Out[6]:
In [7]:
states = ['Ohio', 'New York', 'Vermont', 'Florida',
'Oregon', 'Nevada', 'California', 'Idaho']
group_key = ['East'] * 4 + ['West'] * 4
data = Series(np.random.randn(8), index=states)
data[['Vermont', 'Nevada', 'Idaho']] = np.nan
data
Out[7]:
In [8]:
data.groupby(group_key).mean()
Out[8]:
In [9]:
fill_mean = lambda g: g.fillna(g.mean())
data.groupby(group_key).apply(fill_mean)
Out[9]:
In [10]:
fill_values = {'East': 0.5, 'West': -1}
fill_func = lambda g: g.fillna(fill_values[g.name])
data.groupby(group_key).apply(fill_func)
Out[10]:
In [13]:
suits = ['H', 'S', 'C', 'D']
card_val = ([i for i in range(1, 11)] + [10] * 3) * 4
base_names = ['A'] + [i for i in range(2, 11)] + ['J', 'K', 'Q']
cards = []
for suit in ['H', 'S', 'C', 'D']:
cards.extend(str(num) + suit for num in base_names)
deck = Series(card_val, index=cards)
deck[:13]
Out[13]:
In [14]:
def draw(deck, n=5):
return deck.take(np.random.permutation(len(deck))[:n])
draw(deck)
Out[14]:
In [15]:
get_suit = lambda card: card[-1]
deck.groupby(get_suit).apply(draw, n=2)
Out[15]:
In [16]:
deck.groupby(get_suit, group_keys=False).apply(draw, n=2)
Out[16]:
In [17]:
df = DataFrame({'category': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'],
'data': np.random.randn(8),
'weights': np.random.rand(8)})
df
Out[17]:
In [18]:
grouped = df.groupby('category')
get_wavg = lambda g: np.average(g['data'], weights=g['weights'])
grouped.apply(get_wavg)
Out[18]:
In [19]:
close_px = pd.read_csv('stock_px.csv', parse_dates=True, index_col=0)
close_px.info()
In [20]:
close_px[-4:]
Out[20]:
In [21]:
rets = close_px.pct_change().dropna()
spx_corr = lambda x: x.corrwith(x['SPX'])
by_year = rets.groupby(lambda x: x.year)
by_year.apply(spx_corr)
Out[21]:
In [22]:
by_year.apply(lambda g: g['AAPL'].corr(g['MSFT']))
Out[22]:
In [23]:
import statsmodels.api as sm
def regress(data, yvar, xvars):
Y = data[yvar]
X = data[xvars]
X['intercept'] = 1.
result = sm.OLS(Y, X).fit()
return result.params
by_year.apply(regress, 'AAPL', ['SPX'])
Out[23]:
In [25]:
tips = pd.read_csv('tips.csv')
tips.pivot_table(index=['sex', 'smoker'])
Out[25]:
In [26]:
tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],
columns='smoker')
Out[26]:
In [27]:
tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],
columns='smoker', margins=True)
In [28]:
tips.pivot_table('tip_pct', index=['sex', 'smoker'], columns='day',
aggfunc=len, margins=True)
In [29]:
tips.pivot_table('size', index=['time', 'sex', 'smoker'],
columns='day', aggfunc='sum', fill_value=0)
Out[29]:
In [30]:
from StringIO import StringIO
data = """\
Sample Gender Handedness
1 Female Right-handed
2 Male Left-handed
3 Female Right-handed
4 Male Right-handed
5 Male Left-handed
6 Male Right-handed
7 Female Right-handed
8 Female Left-handed
9 Male Right-handed
10 Female Right-handed"""
data = pd.read_table(StringIO(data), sep='\s+')
In [31]:
fec = pd.read_csv('P00000001-ALL.csv')
fec.info()
In [32]:
fec.ix[123456]
Out[32]:
In [33]:
unique_cands = fec.cand_nm.unique()
unique_cands
Out[33]:
In [34]:
parties = {'Bachmann, Michelle': 'Republican',
'Cain, Herman': 'Republican',
'Gingrich, Newt': 'Republican',
'Huntsman, Jon': 'Republican',
'Johnson, Gary Earl': 'Republican',
'McCotter, Thaddeus G': 'Republican',
'Obama, Barack': 'Democrat',
'Paul, Ron': 'Republican',
'Pawlenty, Timothy': 'Republican',
'Perry, Rick': 'Republican',
"Roemer, Charles E. 'Buddy' III": 'Republican',
'Romney, Mitt': 'Republican',
'Santorum, Rick': 'Republican'}
In [35]:
fec.cand_nm[123456:123461]
Out[35]:
In [36]:
fec.cand_nm[123456:123461].map(parties)
Out[36]:
In [37]:
fec['party'] = fec.cand_nm.map(parties)
In [38]:
fec['party'].value_counts()
Out[38]:
In [39]:
(fec.contb_receipt_amt > 0).value_counts()
Out[39]:
In [40]:
fec = fec[fec.contb_receipt_amt > 0]
In [41]:
fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', 'Romney, Mitt'])]
In [42]:
fec.contbr_occupation.value_counts()[:10]
Out[42]:
In [43]:
occ_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED',
'C.E.O.': 'CEO'
}
f = lambda x: occ_mapping.get(x, x)
fec.contbr_occupation = fec.contbr_occupation.map(f)
In [44]:
emp_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'SELF' : 'SELF-EMPLOYED',
'SELF EMPLOYED' : 'SELF-EMPLOYED',
}
f = lambda x: emp_mapping.get(x, x)
fec.contbr_employer = fec.contbr_employer.map(f)
In [45]:
by_occupation = fec.pivot_table('contb_receipt_amt',
index='contbr_occupation',
columns='party', aggfunc='sum')
In [46]:
over_2mm = by_occupation[by_occupation.sum(1) > 2000000]
over_2mm
Out[46]:
In [48]:
%matplotlib inline
over_2mm.plot(kind='barh')
Out[48]:
In [55]:
def get_top_amounts(group, key, n=5):
totals = group.groupby(key)['contb_receipt_amt'].sum()
return totals.order(ascending=False)[-n:]
In [56]:
grouped = fec_mrbo.groupby('cand_nm')
grouped.apply(get_top_amounts, 'contbr_occupation', n=7)
Out[56]:
In [57]:
grouped.apply(get_top_amounts, 'contbr_employer', n=10)
Out[57]:
In [58]:
bins = np.array([0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000])
labels = pd.cut(fec_mrbo.contb_receipt_amt, bins)
labels
Out[58]:
In [59]:
grouped = fec_mrbo.groupby(['cand_nm', labels])
grouped.size().unstack(0)
Out[59]:
In [60]:
bucket_sums = grouped.contb_receipt_amt.sum().unstack(0)
bucket_sums
Out[60]:
In [61]:
normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0)
normed_sums
Out[61]:
In [62]:
normed_sums[:-2].plot(kind='barh', stacked=True)
Out[62]:
In [63]:
grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st'])
totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0)
totals = totals[totals.sum(1) > 100000]
totals[:10]
Out[63]:
In [64]:
percent = totals.div(totals.sum(1), axis=0)
percent[:10]
Out[64]:
In [ ]: