In [ ]:
from catboost import CatBoostRegressor
from catboost import Pool
import numpy as np
import pandas as pd
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from lxml import html
import requests
import os
import catboost
import gc
import datetime
import shap
from catboost_prediction import read_data

In [ ]:
print('hello world')

In [ ]:
dropping_columns = ['Unnamed: 0', 'fantasy_points', 'ast', 'blk', 'drb', 'fg', 'fg3', 'fg3_pct', 'fg3a', 'fg_pct',
                    'fga', 'ft', 'ft_pct', 'fta', 'minutes', 'orb', 'pf', 'plus', 'pts', 'result', 'score', 'stl',
                    'tov', 'trb', 'win', 'diff', 'seconds', 'team-defrtg', 'team-offrtg', 'team-optsg', 'team-pace',
                    'team-ptsg', 'team-srs', 'opp-team-offrtg', 'opp-team-optsg', 'opp-team-pace', 'opp-team-ptsg',
                    'opp-team-srs', 'opp-team-defrtg', 'team-coach', 'team-executive', 'opp-team-coach',
                    'opp-team-executive']

model_name = "gpu-model"

cat_features = [0, 2, 4, 10, 81, 82, 83, 84, 85, 86, 91, 92, 93]
# load JS visualization code to notebook
shap.initjs()

cores = 2
iters = 500
depth = 15

# open model
print('reading data')

all_data = read_data(low=2000)

# with catboost limitations, filling nas
all_data = all_data.fillna(0)

X_train, X_test, y_train, y_test = train_test_split(all_data, all_data['fantasy_points'], test_size=0.01,
                                                    random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.5, random_state=1)

X_train = X_train.drop(
    dropping_columns, axis=1)

X_val = X_val.drop(
    dropping_columns, axis=1)

X_test = X_test.drop(
    dropping_columns, axis=1)

print(X_train.iloc[0])
print(list(X_train.columns.values))

train_pool = Pool(X_train, y_train, cat_features)
test_pool = Pool(X_test, y_test, cat_features)
validation_pool = Pool(X_val, y_val, cat_features)

# Initialize CPU CatBoostRegressor

#model = CatBoostRegressor(iterations=iters, learning_rate=0.03, depth=depth, verbose=True, od_pval=1,
#                           od_type='IncToDec', od_wait=20, thread_count=cores)

# Initialize GPU CatBoostRegressor

model = CatBoostRegressor(iterations=iters, depth=depth, verbose=True, od_pval=1,
                          od_type='IncToDec', od_wait=100, task_type='GPU', l2_leaf_reg=100, max_ctr_complexity=8)

# Fit model
# try:
model.fit(train_pool, eval_set=validation_pool)
    # Get predictions
# finally:
#     train_preds = model.predict(train_pool)

#     preds = model.predict(test_pool, verbose=True)

#     print(y_test.head())
#     print(preds)

#     print("train variance score = {}".format(explained_variance_score(y_train, train_preds)))
#     print("test variance score = {}".format(explained_variance_score(y_test, preds)))

#     print("train r2 score = {}".format(r2_score(y_train, train_preds)))
#     print("test r2 score = {}".format(r2_score(y_test, preds)))

#     model.save_model(model_name)
#     print(X_train.dtypes.index)
#     print(model.feature_importances_)
print('model fit finished')
# explain the model's predictions using SHAP values
# (same syntax works for LightGBM, CatBoost, and scikit-learn models)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(Pool(X_val, y_val, cat_features=cat_features))

#test_objects = [X_val.iloc[0:1], X_val.iloc[91:92]]

#for obj in test_objects:
#    print('Probability of class 1 = {:.4f}'.format(model.predict_proba(obj)[0][1]))
#    print('Formula raw prediction = {:.4f}'.format(model.predict(obj, prediction_type='RawFormulaVal')[0]))
#    print('\n')

In [ ]:
#explainer = shap.TreeExplainer(model)
#shap_values = explainer.shap_values(Pool(X_val, y_val, cat_features=cat_features))

shap.force_plot(explainer.expected_value, shap_values[0,:], X_val.iloc[0,:])

In [ ]:
shap.force_plot(explainer.expected_value, shap_values[91,:], X_val.iloc[91,:])

In [ ]:
shap.force_plot(explainer.expected_value, shap_values[123,:], X_val.iloc[123,:])

In [ ]:
shap.summary_plot(shap_values, X_val)

In [ ]:
#shap.force_plot(explainer.expected_value, shap_values, X_val)

In [ ]:
shap.dependence_plot("RM", shap_values, X_val)

In [ ]: