In [22]:
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
In [23]:
models = {'svm': LinearSVC(),
'log_reg': LogisticRegression(),
'naive_baives': MultinomialNB(),
'knn': KNeighborsClassifier(),
'dec_tree': DecisionTreeClassifier()}
Read in the Kobe Bryant shooting data [https://www.kaggle.com/c/kobe-bryant-shot-selection]
In [3]:
kobe = pd.read_csv('../data/kobe.csv')
For now, use just the numerical datatypes. They are below as num_columns
In [20]:
[(col, dtype) for col, dtype in zip(kobe.columns, kobe.dtypes) if dtype != 'object']
num_columns = [col for col, dtype in zip(kobe.columns, kobe.dtypes) if dtype != 'object']
num_columns
Out[20]:
In [21]:
kobe = kobe
num_columns
, the kobe
dataframe to fit()
the models
. Choose one or more of the entries in num_columns
as features. These models are used to predict whether Kobe will make or miss a shot given the certain input parameters provided.
In [27]:
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set(font_scale=1.5)
In [37]:
# fit a linear regression model and store the predictions
example = pd.DataFrame({'a':[1,2,3,4,5,6], 'b':[1,1,0,0,0,1]})
feature_cols = ['a']
X = example[feature_cols]
y = example.b
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
linreg.fit(X, y)
example['pred'] = linreg.predict(X)
# scatter plot that includes the regression line
plt.scatter(example.a, example.b)
plt.plot(example.a, example.pred, color='red')
plt.xlabel('a')
plt.ylabel('b')
from sklearn.metrics import accuracy_score
accuracy_score(example.b, example.pred.astype(int))
Out[37]:
In [ ]: