In [ ]:
import numpy as np 
import pandas as pd 
import matplotlib.pyplot as plt
%matplotlib inline

In [ ]:


In [ ]:
filename= "../data/kobe/kobe_bryant_shot_data.csv.gz"
df = pd.read_csv(filename, na_values={'shot_made_flag': ''})
df = df.dropna()
df = df.drop([u'game_event_id', u'game_id', 'combined_shot_type',
       u'lat', u'lon', u'team_id', u'team_name', u'game_date',
        u'shot_id'], axis=1)

df = df.drop(['loc_x', 'loc_y', 'shot_type','shot_zone_basic', 'shot_zone_range'], axis=1)

In [ ]:
df['home'] = df.matchup.apply(lambda matchup: 0 if '@' in matchup else 1)
df = df.drop(['matchup'], axis=1)

df['time_remaining'] = 60 * df['minutes_remaining'] + df['seconds_remaining']
df = df.drop(['minutes_remaining', 'seconds_remaining'], axis=1)

cols = df.columns.tolist()
cols.remove('shot_made_flag')
cols.append('shot_made_flag')

df = df[cols]

In [ ]:
filename= "../data/kobe/kobe_bryant_shot_data_refined.csv"
df.to_csv(filename, index=False)

In [ ]:


In [ ]:
filename= "../data/kobe/kobe_bryant_shot_data_refined.csv"
df = pd.read_csv(filename)

In [ ]:


In [ ]:
df

In [ ]:


In [ ]:
original_df = df.copy()

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:
q75 = np.percentile(df.shot_distance, 75)

In [ ]:
q75

In [ ]:
df['shot_distance'] = df.shot_distance.clip(upper=45)

In [ ]:


In [ ]:
from sklearn.preprocessing import StandardScaler

In [ ]:
scaler = StandardScaler()

In [ ]:
df['time_remaining'] = scaler.fit_transform(df.time_remaining.reshape(-1, 1)).reshape(-1, 1)

In [ ]:


In [ ]:
dist_scaler = StandardScaler()
df['shot_distance'] = dist_scaler.fit_transform(df.shot_distance.reshape(-1, 1)).reshape(-1, 1)

In [ ]:


In [ ]:
df['shot_zone_area'] = df.shot_zone_area.str.extract('\((\w).*\)')

In [ ]:


In [ ]:
df['period'] = df.period.clip(upper=5)

In [ ]:


In [ ]:
df.head()

In [ ]:


In [ ]:


In [ ]:
# turn categorical variables into dummy variables
categorical_vars = ['season', 'period', 'shot_zone_area', 'opponent', 'action_type']
for var in categorical_vars:
    df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], 1)
    df = df.drop(var, 1)

In [ ]:
df.head()

In [ ]:


In [ ]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.cross_validation import train_test_split

In [ ]:
X_train, X_test, y_train, y_test = train_test_split(df.drop('shot_made_flag', axis=1), 
                                                    df['shot_made_flag'], 
                                                    test_size=0.33, 
                                                    random_state=42)

In [ ]:
X_train.head()

In [ ]:
y_train.head()

In [ ]:


In [ ]:
model = RandomForestClassifier(n_estimators=45, max_depth=14, criterion='entropy', random_state=42, n_jobs=-1)

In [ ]:
model.fit(X_train, y_train)

In [ ]:
y_pred = model.predict(X_test)

In [ ]:
y_pred_proba = model.predict_proba(X_test)

In [ ]:
confusion_matrix(y_test, y_pred), log_loss(y_test, y_pred_proba[:,1])

In [ ]:


In [ ]:
pd.DataFrame({'feature': X_train.columns, 
              'importance': model.feature_importances_}).sort_values('importance', ascending=False).head()

In [ ]:


In [ ]:


In [ ]:


In [ ]:
pred_df = original_df.join(pd.DataFrame(y_pred, columns=['shot_made_pred'], index=X_test.index))

In [ ]:
pred_df = pred_df[~pred_df.shot_made_pred.isnull()]

In [ ]:
pred_df.head()

In [ ]:


In [ ]:
pred_df[(pred_df.shot_made_flag != pred_df.shot_made_pred)]

In [ ]:


In [ ]:


In [ ]:
from sklearn.grid_search import GridSearchCV

param_grid={
    'n_estimators': range(10, 50, 5), 
    'max_depth': range(9, 15),
    'criterion': ['entropy', 'gini'],
}

grid = GridSearchCV(RandomForestClassifier(random_state=42, n_jobs=-1), param_grid=param_grid, scoring='log_loss')

In [ ]:
grid.fit(X_train, y_train)

In [ ]:
grid.best_estimator_

In [ ]:
grid.best_score_

In [ ]:


In [ ]:


In [ ]:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis()

In [ ]:
lda.fit(X_train, y_train)

In [ ]:
confusion_matrix(lda.predict(X_test), y_test)

In [ ]:
log_loss(y_test, lda.predict_proba(X_test))

In [ ]:


In [ ]:
from sklearn.svm import LinearSVC, SVC
svc = SVC(probability=True)

In [ ]:
svc.fit(X_train, y_train)

In [ ]:
confusion_matrix(svc.predict(X_test), y_test)

In [ ]:
log_loss(y_test, svc.predict_proba(X_test))

In [ ]:


In [ ]:
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC, SVC
grid = GridSearchCV(estimator=SVC(), param_grid={})

In [ ]:
grid.fit(X_train, y_train)

In [ ]:
grid.best_estimator_

In [ ]:
grid.best_score_

In [ ]:


In [ ]:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis

h = .02  # step size in the mesh

names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
         "Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
         "Quadratic Discriminant Analysis"]
classifiers = [
    KNeighborsClassifier(),
    #SVC(kernel="linear", C=0.025),
    #SVC(gamma=2, C=1),
    DecisionTreeClassifier(),
    RandomForestClassifier(),
    AdaBoostClassifier(),
    GaussianNB(),
    LinearDiscriminantAnalysis(),
    QuadraticDiscriminantAnalysis()]

In [ ]:


In [ ]:
for name, clf in zip(names, classifiers):
    clf.fit(X_train, y_train)
    loss = log_loss(y_test,clf.predict_proba(X_test))
    
    print name, loss

In [ ]:
from sklearn.ensemble import GradientBoostingClassifier

In [ ]:
gbc = GradientBoostingClassifier(loss='exponential')

In [ ]:
gbc.fit(X_train, y_train)

In [ ]:
log_loss(y_test,gbc.predict_proba(X_test))

In [ ]:
from sklearn.grid_search import GridSearchCV

param_grid={
    'n_estimators': range(100, 200, 20), 
    'max_depth': range(3, 8),
}

grid = GridSearchCV(GradientBoostingClassifier(random_state=42), param_grid=param_grid, scoring='log_loss')

In [ ]:
grid.fit(X_train, y_train)

In [ ]: