In [674]:
%load_ext autoreload
%autoreload 2


import xgboost as xgb
import lightgbm as lgb
import catboost as cat
import pandas as pd
from hyperopt import hp
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
from sklearn.metrics import log_loss, roc_auc_score, mean_squared_error
from sklearn.datasets import make_classification, make_regression


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

Prepare datasets

Binary classification


In [658]:
x, y = make_classification(n_samples=20000, n_features=100, random_state=1, n_informative=10)
x = pd.DataFrame(x, columns=['f' + str(i+1) for i in range(0, x.shape[1])])
x = x.reset_index()
x.columns = ['f_date'] + list(x.columns[1:])
y = pd.DataFrame(y, columns=['target'])

x.to_csv('tests/data/classification_binary/x_train.csv', index=False)
y.to_csv('tests/data/classification_binary/y_train.csv', index=False)

Multiclass classification


In [338]:
x, y = make_classification(n_samples=20000, n_features=100, random_state=0, n_informative=10, n_classes=4)
x = pd.DataFrame(x, columns=['f' + str(i+1) for i in range(0, x.shape[1])])
x = x.reset_index()
x.columns = ['f_date'] + list(x.columns[1:])
y = pd.DataFrame(y, columns=['target'])

x.to_csv('tests/data/classification_multiclass/x_train.csv', index=False)
y.to_csv('tests/data/classification_multiclass/y_train.csv', index=False)

Regression


In [339]:
x, y = make_regression(n_samples=20000, n_features=100, random_state=0, n_informative=10)
x = pd.DataFrame(x, columns=['f' + str(i+1) for i in range(0, x.shape[1])])
x = x.reset_index()
x.columns = ['f_date'] + list(x.columns[1:])
y = pd.DataFrame(y, columns=['target'])

x.to_csv('tests/data/regression/x_train.csv', index=False)
y.to_csv('tests/data/regression/y_train.csv', index=False)

Test Experiments

Binary classification


In [659]:
from experiment import BaseExperiment, ScikitExperiment

dataset_path = 'tests/data/classification_binary'

model_ET = ExtraTreesClassifier(n_estimators=100, random_state=0, n_jobs=-1)

space_ET = {
    'max_depth' : hp.quniform('max_depth', 2, 50, 1),
    'n_estimators': hp.choice('n_estimators', [100]),
    'n_jobs': hp.choice('n_jobs', [-1]),
    'criterion': hp.choice('criterion', ['gini', 'entropy'])
}

experiment_ET = ScikitExperiment(model_ET, dataset_path, 'classification', log_loss, 'cv', greater_is_better=False,
                                 n_folds=5, datetime_feature='f_date')

In [660]:
result = experiment_ET.run()


[SING]		eval_time=3.7 sec	log_loss=0.377940

In [661]:
bags = experiment_ET.run_bagging(seeds=[0, 1, 2, 3, 4], use_best_params=False)


[1/5]	seed=0	eval_time=4.21 sec	log_loss=0.377940
[2/5]	seed=1	eval_time=3.78 sec	log_loss=0.375443
[3/5]	seed=2	eval_time=3.79 sec	log_loss=0.376654
[4/5]	seed=3	eval_time=3.98 sec	log_loss=0.376919
[5/5]	seed=4	eval_time=3.79 sec	log_loss=0.377773
--------------------------------------------------------------------------------
[TOTAL]		eval_time=19.56 sec	log_loss=0.374929	std=0.002

In [662]:
optis = experiment_ET.run_optimize(space_ET, max_evals=5)


[1/5]		eval_time=3.69 sec	log_loss=0.391168	best=0.391168
[2/5]		eval_time=3.80 sec	log_loss=0.374688	best=0.374688
[3/5]		eval_time=2.75 sec	log_loss=0.475953	best=0.374688
[4/5]		eval_time=3.86 sec	log_loss=0.374688	best=0.374688
[5/5]		eval_time=2.16 sec	log_loss=0.546475	best=0.374688
--------------------------------------------------------------------------------
[TOTAL]		eval_time=16.31 sec	best=0.374688

{'bootstrap': False, 'class_weight': None, 'criterion': 'entropy', 'max_depth': 27.0, 'max_features': 'auto', 'max_leaf_nodes': None, 'min_impurity_split': 1e-07, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 100, 'n_jobs': -1, 'oob_score': False, 'random_state': 0, 'verbose': 0, 'warm_start': False}

In [665]:
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.2)
experiment_ET.fit_predict(x_train, y_train, x_test, use_best_params=True, return_fitted_model=True)


Out[665]:
(array([ 0.42980592,  0.24505042,  0.3748742 , ...,  0.79348175,
         0.33682492,  0.69654534]),
 ExtraTreesClassifier(bootstrap=False, class_weight=None, criterion='entropy',
            max_depth=27.0, max_features='auto', max_leaf_nodes=None,
            min_impurity_split=1e-07, min_samples_leaf=1,
            min_samples_split=2, min_weight_fraction_leaf=0.0,
            n_estimators=100, n_jobs=-1, oob_score=False, random_state=0,
            verbose=0, warm_start=False))

In [801]:
from experiment import BoostingExperiment
dataset_path = 'tests/data/classification_binary'

params_lgb = {
    'task': 'train',
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': ['binary_logloss'],
    'num_leaves': 32,
#     'bagging_freq': 1,
#     'bagging_fraction': 0.85,
#     'min_data_in_leaf': 200,
#     'feature_fraction': 0.75,
    'learning_rate': 0.1,
#     'verbose': 0,
    'num_thread': 8}

space_lgb = {
            'learning_rate': hp.loguniform('learning_rate', -7, 0),
#             'num_leaves' : hp.qloguniform('num_leaves', 0, 7, 1),
#             'feature_fraction': hp.uniform('feature_fraction', 0.5, 1),
#             'bagging_fraction': hp.uniform('bagging_fraction', 0.5, 1),
#             'min_data_in_leaf': hp.qloguniform('min_data_in_leaf', 0, 6, 1),
#             'min_sum_hessian_in_leaf': hp.loguniform('min_sum_hessian_in_leaf', -16, 5),
#             'lambda_l1': hp.choice('lambda_l1', [0, hp.loguniform('lambda_l1_positive', -16, 2)]),
#             'lambda_l2': hp.choice('lambda_l2', [0, hp.loguniform('lambda_l2_positive', -16, 2)]),
}


experiment_lgb = BoostingExperiment('lightgbm', params_lgb, 100, 
                                    dataset_path, 'classification', 
                                    log_loss, 'cv', greater_is_better=False, n_folds=5, datetime_feature='f_date')

In [802]:
result = experiment_lgb.run()


[SING]		eval_time=3.4 sec	log_loss=0.152482

In [804]:
optis = experiment_lgb.run_optimize(space_lgb, max_evals=50)


[1/50]		eval_time=3.77 sec	log_loss=0.240320	best=0.240320
[2/50]		eval_time=3.06 sec	log_loss=0.201727	best=0.201727
[3/50]		eval_time=3.06 sec	log_loss=0.484938	best=0.201727
[4/50]		eval_time=3.08 sec	log_loss=0.203338	best=0.201727
[5/50]		eval_time=3.07 sec	log_loss=0.577341	best=0.201727
[6/50]		eval_time=3.11 sec	log_loss=0.509730	best=0.201727
[7/50]		eval_time=2.94 sec	log_loss=0.155682	best=0.155682
[8/50]		eval_time=2.74 sec	log_loss=1.453861	best=0.155682
[9/50]		eval_time=3.07 sec	log_loss=0.218021	best=0.155682
[10/50]		eval_time=3.15 sec	log_loss=0.609665	best=0.155682
[11/50]		eval_time=3.27 sec	log_loss=0.365198	best=0.155682
[12/50]		eval_time=3.09 sec	log_loss=0.191895	best=0.155682
[13/50]		eval_time=3.12 sec	log_loss=0.184755	best=0.155682
[14/50]		eval_time=3.09 sec	log_loss=0.343820	best=0.155682
[15/50]		eval_time=2.99 sec	log_loss=0.157132	best=0.155682
[16/50]		eval_time=3.06 sec	log_loss=0.604336	best=0.155682
[17/50]		eval_time=2.54 sec	log_loss=2.340509	best=0.155682
[18/50]		eval_time=2.94 sec	log_loss=0.148120	best=0.148120
[19/50]		eval_time=3.06 sec	log_loss=0.436840	best=0.148120
[20/50]		eval_time=3.07 sec	log_loss=0.229485	best=0.148120
[21/50]		eval_time=2.95 sec	log_loss=0.145500	best=0.145500
[22/50]		eval_time=2.92 sec	log_loss=0.145355	best=0.145355
[23/50]		eval_time=3.26 sec	log_loss=0.159902	best=0.145355
[24/50]		eval_time=2.92 sec	log_loss=0.145282	best=0.145282
[25/50]		eval_time=2.93 sec	log_loss=0.149651	best=0.145282
[26/50]		eval_time=2.95 sec	log_loss=0.197640	best=0.145282
[27/50]		eval_time=2.93 sec	log_loss=0.147784	best=0.145282
[28/50]		eval_time=2.99 sec	log_loss=0.159966	best=0.145282
[29/50]		eval_time=2.93 sec	log_loss=0.145478	best=0.145282
[30/50]		eval_time=3.08 sec	log_loss=0.188845	best=0.145282
[31/50]		eval_time=3.28 sec	log_loss=0.159988	best=0.145282
[32/50]		eval_time=2.93 sec	log_loss=0.145450	best=0.145282
[33/50]		eval_time=3.50 sec	log_loss=0.304863	best=0.145282
[34/50]		eval_time=3.02 sec	log_loss=0.525668	best=0.145282
[35/50]		eval_time=3.40 sec	log_loss=0.151776	best=0.145282
[36/50]		eval_time=2.98 sec	log_loss=0.164512	best=0.145282
[37/50]		eval_time=3.12 sec	log_loss=0.645056	best=0.145282
[38/50]		eval_time=3.09 sec	log_loss=0.172528	best=0.145282
[39/50]		eval_time=3.12 sec	log_loss=0.273898	best=0.145282
[40/50]		eval_time=2.58 sec	log_loss=2.192376	best=0.145282
[41/50]		eval_time=2.96 sec	log_loss=0.145886	best=0.145282
[42/50]		eval_time=3.08 sec	log_loss=0.164950	best=0.145282
[43/50]		eval_time=2.95 sec	log_loss=0.152210	best=0.145282
[44/50]		eval_time=3.13 sec	log_loss=0.435618	best=0.145282
[45/50]		eval_time=3.13 sec	log_loss=0.540858	best=0.145282
[46/50]		eval_time=3.00 sec	log_loss=0.153626	best=0.145282
[47/50]		eval_time=3.00 sec	log_loss=0.146588	best=0.145282
[48/50]		eval_time=3.12 sec	log_loss=0.251282	best=0.145282
[49/50]		eval_time=2.90 sec	log_loss=0.853841	best=0.145282
[50/50]		eval_time=3.16 sec	log_loss=0.194218	best=0.145282
--------------------------------------------------------------------------------
[TOTAL]		eval_time=152.73 sec	best=0.145282

{'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': ['binary_logloss'], 'num_leaves': 32, 'learning_rate': 0.04015980483047835, 'num_thread': 8, 'verbose': 1}

In [808]:



Out[808]:
<generator object _BaseKFold.split at 0x7f384a286a40>

In [820]:
dtrain = lgb.Dataset(x, label=y.values.flatten())
params['learning_rate'] = 0.04015980483047835
_ =  lgb.cv(params, dtrain, folds=experiment_lgb.cv.split(x, y.values.flatten()), num_boost_round=1000, verbose_eval=10, early_stopping_rounds=10)


[10]	cv_agg's binary_logloss: 0.526753 + 0.00184829
[20]	cv_agg's binary_logloss: 0.429078 + 0.00221706
[30]	cv_agg's binary_logloss: 0.365114 + 0.00184727
[40]	cv_agg's binary_logloss: 0.319005 + 0.00181915
[50]	cv_agg's binary_logloss: 0.284293 + 0.00176407
[60]	cv_agg's binary_logloss: 0.257631 + 0.00183897
[70]	cv_agg's binary_logloss: 0.236494 + 0.00234233
[80]	cv_agg's binary_logloss: 0.219228 + 0.00239062
[90]	cv_agg's binary_logloss: 0.20537 + 0.00262796
[100]	cv_agg's binary_logloss: 0.193952 + 0.00281812
[110]	cv_agg's binary_logloss: 0.184712 + 0.00266676
[120]	cv_agg's binary_logloss: 0.17779 + 0.00272423
[130]	cv_agg's binary_logloss: 0.172056 + 0.0021948
[140]	cv_agg's binary_logloss: 0.167974 + 0.00171842
[150]	cv_agg's binary_logloss: 0.164967 + 0.00195689
[160]	cv_agg's binary_logloss: 0.16299 + 0.00188363
[170]	cv_agg's binary_logloss: 0.161186 + 0.00184993
[180]	cv_agg's binary_logloss: 0.159797 + 0.00194712
[190]	cv_agg's binary_logloss: 0.158586 + 0.001844
[200]	cv_agg's binary_logloss: 0.157601 + 0.00214904
[210]	cv_agg's binary_logloss: 0.156453 + 0.00210937
[220]	cv_agg's binary_logloss: 0.155328 + 0.00217481
[230]	cv_agg's binary_logloss: 0.154467 + 0.00199327
[240]	cv_agg's binary_logloss: 0.153727 + 0.00194381
[250]	cv_agg's binary_logloss: 0.152991 + 0.00199948
[260]	cv_agg's binary_logloss: 0.152249 + 0.00179556
[270]	cv_agg's binary_logloss: 0.151572 + 0.00186087
[280]	cv_agg's binary_logloss: 0.150722 + 0.0019689
[290]	cv_agg's binary_logloss: 0.150039 + 0.00181638
[300]	cv_agg's binary_logloss: 0.149368 + 0.00180173
[310]	cv_agg's binary_logloss: 0.1486 + 0.00181224
[320]	cv_agg's binary_logloss: 0.148139 + 0.00195306
[330]	cv_agg's binary_logloss: 0.147409 + 0.00216465
[340]	cv_agg's binary_logloss: 0.146871 + 0.00224813
[350]	cv_agg's binary_logloss: 0.146454 + 0.0022364
[360]	cv_agg's binary_logloss: 0.146046 + 0.00241917
[370]	cv_agg's binary_logloss: 0.145516 + 0.00248246
[380]	cv_agg's binary_logloss: 0.144955 + 0.00235768
[390]	cv_agg's binary_logloss: 0.144519 + 0.00231175
[400]	cv_agg's binary_logloss: 0.144121 + 0.00234345
[410]	cv_agg's binary_logloss: 0.143582 + 0.00235063
[420]	cv_agg's binary_logloss: 0.143359 + 0.00241736
[430]	cv_agg's binary_logloss: 0.143016 + 0.00238169
[440]	cv_agg's binary_logloss: 0.142586 + 0.00230783
[450]	cv_agg's binary_logloss: 0.14213 + 0.00235865
[460]	cv_agg's binary_logloss: 0.141899 + 0.00239863
[470]	cv_agg's binary_logloss: 0.1417 + 0.00250027
[480]	cv_agg's binary_logloss: 0.141338 + 0.00254734
[490]	cv_agg's binary_logloss: 0.141022 + 0.0025097
[500]	cv_agg's binary_logloss: 0.140574 + 0.00256256
[510]	cv_agg's binary_logloss: 0.140258 + 0.00258546
[520]	cv_agg's binary_logloss: 0.140035 + 0.00249368
[530]	cv_agg's binary_logloss: 0.139618 + 0.00259996
[540]	cv_agg's binary_logloss: 0.139306 + 0.00262772
[550]	cv_agg's binary_logloss: 0.13898 + 0.00269817
[560]	cv_agg's binary_logloss: 0.138904 + 0.0027126
[570]	cv_agg's binary_logloss: 0.138601 + 0.00275988
[580]	cv_agg's binary_logloss: 0.138383 + 0.00280151
[590]	cv_agg's binary_logloss: 0.138233 + 0.00286742
[600]	cv_agg's binary_logloss: 0.138025 + 0.00291158
[610]	cv_agg's binary_logloss: 0.137804 + 0.00298264
[620]	cv_agg's binary_logloss: 0.137768 + 0.00307304

In [ ]:


In [ ]:


In [781]:
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.2)
dtrain = lgb.Dataset(x_train, label=y_train.values.flatten())
dtest = lgb.Dataset(x_test, y_test.values.flatten())

In [782]:
experiment_lgb.eval_metric(y_test, y_test)


Out[782]:
9.9920072216264128e-16

In [812]:
# model, y_pred = experiment_lgb._fit_predict(dtrain, dtest, params_lgb)

In [ ]:


In [671]:
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.2)
dtrain = lgb.Dataset(x_train, label=y_train.values.flatten())
dtest = lgb.Dataset(x_test, y_test.values.flatten())
evals = {}

lgb.train(params, dtrain, num_boost_round=100, valid_sets=(dtrain, dtest), valid_names=('train', 'test'),
          early_stopping_rounds=100, verbose_eval=10, evals_result=evals)


Training until validation scores don't improve for 100 rounds.
[10]	train's binary_logloss: 0.378673	test's binary_logloss: 0.395243
[20]	train's binary_logloss: 0.261869	test's binary_logloss: 0.286724
[30]	train's binary_logloss: 0.200092	test's binary_logloss: 0.227135
[40]	train's binary_logloss: 0.163065	test's binary_logloss: 0.192738
[50]	train's binary_logloss: 0.140423	test's binary_logloss: 0.174916
[60]	train's binary_logloss: 0.123444	test's binary_logloss: 0.163795
[70]	train's binary_logloss: 0.11095	test's binary_logloss: 0.158015
[80]	train's binary_logloss: 0.100949	test's binary_logloss: 0.154432
[90]	train's binary_logloss: 0.0919094	test's binary_logloss: 0.151452
[100]	train's binary_logloss: 0.0837612	test's binary_logloss: 0.148148
Out[671]:
<lightgbm.basic.Booster at 0x7f3854ee9fd0>

In [554]:
bags = experiment_lgb.run_bagging(seeds=[0, 1, 2, 3, 4], use_best_params=False)


[1/5]	seed=0	eval_time=0.59 sec	roc_auc_score=-0.979890
[2/5]	seed=1	eval_time=0.59 sec	roc_auc_score=-0.979828
[3/5]	seed=2	eval_time=0.61 sec	roc_auc_score=-0.979572
[4/5]	seed=3	eval_time=0.59 sec	roc_auc_score=-0.979589
[5/5]	seed=4	eval_time=0.59 sec	roc_auc_score=-0.979384
--------------------------------------------------------------------------------
[TOTAL]		eval_time=2.97 sec	roc_auc_score=0.980212	std=0.002

In [556]:
a, b = experiment_lgb.fit_predict(x_train, y_train, x_test, return_fitted_model=True)

In [561]:
b.best_score_


Out[561]:
defaultdict(dict, {})

In [ ]:


In [ ]:


In [515]:
import numpy
import catboost
from catboost import CatBoostRegressor
dataset = numpy.array([[1,4,5,6],[4,5,6,7],[30,40,50,60],[20,15,85,60]])
train_labels = [1.2,3.4,9.5,24.5]
model = CatBoostRegressor(learning_rate=1, depth=2, loss_function='RMSE', iterations=50, train_dir='ex2')
fit_model = model.fit(dataset, train_labels, logging_level='Silent')
fit_model.get_params()


Out[515]:
{'depth': 2,
 'iterations': 50,
 'learning_rate': 1,
 'loss_function': 'RMSE',
 'train_dir': 'ex2'}

In [239]:
params = {
    'eta': 0.1
}
model = xgb.train(params, xgb.DMatrix(dataset, train_labels))

In [243]:
a = xgb.XGBClassifier()

In [244]:
a.get_params()


Out[244]:
{'base_score': 0.5,
 'booster': 'gbtree',
 'colsample_bylevel': 1,
 'colsample_bytree': 1,
 'gamma': 0,
 'learning_rate': 0.1,
 'max_delta_step': 0,
 'max_depth': 3,
 'min_child_weight': 1,
 'missing': None,
 'n_estimators': 100,
 'n_jobs': 1,
 'nthread': None,
 'objective': 'binary:logistic',
 'random_state': 0,
 'reg_alpha': 0,
 'reg_lambda': 1,
 'scale_pos_weight': 1,
 'seed': None,
 'silent': True,
 'subsample': 1}

In [24]:
w = catboost.CatboostIpythonWidget('../sandbox')
a = w.update_widget(subdirs=True)



In [35]:
import numpy as np
from collections import defaultdict


class CatCounter(object):

    def __init__(self, learning_task, sort_values=None, seed=0):
        self.learning_task = learning_task
        self.sort_values = sort_values
        self.seed = seed
        self.sum_dicts = defaultdict(lambda : defaultdict(float))
        self.count_dicts = defaultdict(lambda : defaultdict(float))


    def update(self, value, col, key):
        self.sum_dicts[col][key] += value
        self.count_dicts[col][key] += 1


    def counter(self, key, col):
        num, den = self.sum_dicts[col][key], self.count_dicts[col][key]
        if self.learning_task == 'classification':
            return (num + 1.) / (den + 2.)
        elif self.learning_task == 'regression':
            return num / den if den > 0 else 0
        else:
            raise ValueError('Task type must be "classification" or "regression"')
    

    def fit(self, X, y):
        self.sum_dicts = defaultdict(lambda : defaultdict(float))
        self.count_dicts = defaultdict(lambda : defaultdict(float))

        if self.sort_values is None:
            indices = np.arange(X.shape[0])
            np.random.seed(self.seed)
            np.random.shuffle(indices)
        else:
            indices = np.argsort(self.sort_values)

        results = [np.zeros((X.shape[0], 0))]
        for col in range(X.shape[1]):
            result = np.zeros(X.shape[0])
            for index in indices:
                key = X[index, col]
                result[index] = self.counter(key, col)
                self.update(y[index], col, key)
            results.append(result.reshape(-1, 1))

        return np.concatenate(results, axis=1)
        
        
    def transform(self, X):
        results = [np.zeros((X.shape[0], 0))]
        for col in range(X.shape[1]):
            result = np.zeros(X.shape[0])
            for index in range(X.shape[0]):
                key = X[index, col]
                result[index] = self.counter(key, col)
            results.append(result.reshape(-1, 1))
        return np.concatenate(results, axis=1)

In [55]:
import pandas as pd

cc = CatCounter('regression', None)

In [59]:


In [60]:
pd.DataFrame(a['data'])[8].value_counts()


Out[60]:
24.0    132
5.0     115
4.0     110
3.0      38
6.0      26
8.0      24
2.0      24
1.0      20
7.0      17
Name: 8, dtype: int64

In [62]:
cc.fit(data[:, [8]], a['target'])


Out[62]:
array([[ 23.49285714],
       [ 36.        ],
       [ 26.49      ],
       [ 27.72      ],
       [ 23.55714286],
       [ 25.1375    ],
       [ 22.92962963],
       [ 23.56206897],
       [ 23.68      ],
       [ 25.76036036],
       [ 23.18947368],
       [ 25.39146341],
       [ 22.88823529],
       [ 20.82753623],
       [ 20.7483871 ],
       [ 22.6       ],
       [ 20.85070423],
       [ 20.68604651],
       [ 20.93589744],
       [ 20.82567568],
       [ 20.98666667],
       [ 21.925     ],
       [ 21.32142857],
       [ 21.03975904],
       [ 20.8421875 ],
       [ 21.12178218],
       [ 21.87916667],
       [ 20.28333333],
       [ 21.47473684],
       [ 20.26981132],
       [ 20.61363636],
       [ 21.44270833],
       [ 21.37113402],
       [ 20.34901961],
       [ 20.52857143],
       [ 24.7852459 ],
       [ 25.37738095],
       [ 27.12857143],
       [ 25.99090909],
       [ 28.33428571],
       [ 27.71111111],
       [ 28.10833333],
       [ 28.03548387],
       [ 27.78076923],
       [ 28.57142857],
       [  0.        ],
       [ 19.3       ],
       [ 28.23513514],
       [ 28.25      ],
       [ 18.85      ],
       [ 21.0925    ],
       [ 20.76666667],
       [ 21.74347826],
       [ 21.41521739],
       [ 18.83333333],
       [ 23.47878788],
       [ 27.52      ],
       [ 25.77789474],
       [ 31.2       ],
       [ 34.9       ],
       [ 39.78      ],
       [ 30.93571429],
       [ 30.67272727],
       [ 31.96      ],
       [ 25.42      ],
       [ 20.83076923],
       [ 20.9175    ],
       [ 20.82380952],
       [ 21.11034483],
       [ 20.95764706],
       [ 20.93428571],
       [ 21.40588235],
       [ 21.105     ],
       [ 20.20961538],
       [ 23.53142857],
       [ 22.64761905],
       [ 22.78      ],
       [ 25.31625   ],
       [ 23.        ],
       [ 25.        ],
       [ 21.00506329],
       [ 20.66875   ],
       [ 21.20111111],
       [ 20.82142857],
       [ 28.17333333],
       [ 28.1       ],
       [ 28.35217391],
       [ 28.40277778],
       [ 26.97391304],
       [ 25.2       ],
       [ 31.2       ],
       [ 25.92631579],
       [ 20.88421053],
       [ 20.73611111],
       [ 20.96190476],
       [ 26.11176471],
       [ 29.05      ],
       [ 33.3       ],
       [ 25.00625   ],
       [ 26.51904762],
       [ 23.06666667],
       [ 24.68867925],
       [ 20.96666667],
       [ 24.55636364],
       [ 24.48615385],
       [ 26.05714286],
       [ 23.66046512],
       [ 25.38      ],
       [ 23.05      ],
       [ 24.95526316],
       [ 25.28194444],
       [ 20.9875    ],
       [ 20.87142857],
       [  0.        ],
       [ 20.8       ],
       [ 21.1173913 ],
       [ 20.84615385],
       [ 21.048     ],
       [ 20.91666667],
       [ 21.02      ],
       [ 27.23636364],
       [ 26.23888889],
       [ 25.87142857],
       [ 25.62142857],
       [ 27.05      ],
       [ 25.94615385],
       [ 26.8       ],
       [ 21.2877551 ],
       [ 21.43655914],
       [ 20.76153846],
       [ 21.06219512],
       [ 20.97471264],
       [ 21.57894737],
       [ 20.388     ],
       [ 21.4125    ],
       [ 21.44074074],
       [ 20.51914894],
       [ 21.24666667],
       [ 20.33392857],
       [ 20.88194444],
       [ 20.8452381 ],
       [ 21.71153846],
       [ 23.82941176],
       [ 25.43111111],
       [ 23.87142857],
       [ 25.00204082],
       [ 24.72222222],
       [ 26.15098039],
       [ 25.51685393],
       [ 25.23287671],
       [ 24.76862745],
       [ 26.21584158],
       [ 25.1       ],
       [ 24.92727273],
       [ 24.4       ],
       [ 23.34375   ],
       [ 24.75396825],
       [ 22.92857143],
       [ 24.778     ],
       [ 22.96666667],
       [ 23.705     ],
       [ 24.93380282],
       [ 20.375     ],
       [ 25.978     ],
       [ 23.68333333],
       [ 25.86388889],
       [ 24.63768116],
       [ 24.70576923],
       [ 24.44035088],
       [ 25.44367816],
       [ 23.66875   ],
       [ 26.07777778],
       [ 25.72982456],
       [ 24.46666667],
       [ 25.78909091],
       [ 26.75      ],
       [ 24.4625    ],
       [ 26.03883495],
       [ 25.39186047],
       [ 18.96      ],
       [ 27.77241379],
       [ 27.98461538],
       [ 27.91818182],
       [ 28.16969697],
       [ 27.99047619],
       [ 28.29705882],
       [ 26.10909091],
       [ 25.31411765],
       [ 22.82222222],
       [ 24.41969697],
       [ 24.63389831],
       [ 23.4483871 ],
       [ 25.63539823],
       [ 24.01052632],
       [ 23.        ],
       [ 21.12477064],
       [  0.        ],
       [ 26.82272727],
       [ 25.58888889],
       [ 28.08      ],
       [ 22.        ],
       [ 25.06666667],
       [ 25.73      ],
       [ 20.95909091],
       [ 20.35322581],
       [ 20.68333333],
       [ 21.02142857],
       [ 20.95697674],
       [ 21.42222222],
       [ 21.07530864],
       [ 20.76666667],
       [ 21.14166667],
       [ 20.21052632],
       [ 20.9875    ],
       [ 20.29836066],
       [ 20.22      ],
       [ 24.65735294],
       [ 24.69032258],
       [ 24.84      ],
       [  0.        ],
       [ 36.26666667],
       [ 30.80833333],
       [ 30.82380952],
       [ 29.88125   ],
       [ 34.7       ],
       [ 37.225     ],
       [ 30.86315789],
       [ 30.30434783],
       [ 29.89411765],
       [ 24.3       ],
       [  0.        ],
       [ 31.98888889],
       [ 30.10769231],
       [ 27.9       ],
       [ 29.94      ],
       [ 32.9875    ],
       [ 31.32727273],
       [ 30.82777778],
       [ 20.60833333],
       [ 20.05      ],
       [ 20.7       ],
       [ 18.7       ],
       [ 21.        ],
       [ 21.        ],
       [ 27.89333333],
       [ 30.08333333],
       [ 32.13333333],
       [  0.        ],
       [ 27.38181818],
       [ 28.42857143],
       [ 27.46666667],
       [ 27.25      ],
       [ 27.16      ],
       [ 20.5       ],
       [ 16.8       ],
       [ 23.87142857],
       [ 27.66666667],
       [ 25.81030928],
       [ 25.26049383],
       [ 24.5761194 ],
       [ 24.88311688],
       [ 24.625     ],
       [ 25.27282609],
       [ 23.58888889],
       [ 24.42931034],
       [ 25.92735849],
       [ 25.31325301],
       [ 25.52021277],
       [ 23.75      ],
       [ 28.08947368],
       [ 28.048     ],
       [ 28.50625   ],
       [ 25.53333333],
       [ 27.95      ],
       [ 20.66363636],
       [ 20.85324675],
       [ 21.58571429],
       [ 21.02990654],
       [ 19.53333333],
       [ 25.40795455],
       [ 25.06202532],
       [ 23.51891892],
       [ 24.17021277],
       [ 17.6       ],
       [ 23.5       ],
       [ 24.12222222],
       [ 23.75384615],
       [ 21.09411765],
       [ 20.93809524],
       [ 20.73333333],
       [ 21.4       ],
       [ 21.24065934],
       [ 20.96509434],
       [ 21.46      ],
       [ 20.99615385],
       [ 20.18363636],
       [ 20.77105263],
       [ 21.57142857],
       [ 23.54722222],
       [ 26.3       ],
       [ 22.98      ],
       [ 28.15      ],
       [ 30.175     ],
       [ 31.65      ],
       [ 27.22307692],
       [ 27.85714286],
       [ 29.42      ],
       [ 27.14166667],
       [ 21.668     ],
       [ 21.65      ],
       [ 21.25      ],
       [ 20.85135135],
       [ 20.8804878 ],
       [ 20.43777778],
       [ 20.90673077],
       [ 20.86571429],
       [ 20.83880597],
       [ 20.83972603],
       [ 20.79411765],
       [ 20.65454545],
       [ 26.3625    ],
       [ 25.83854167],
       [ 23.78974359],
       [ 25.69910714],
       [ 25.52580645],
       [ 23.56590909],
       [ 22.96521739],
       [ 23.83157895],
       [ 20.79066667],
       [  0.        ],
       [ 21.86666667],
       [ 16.5       ],
       [ 24.11666667],
       [ 25.89813084],
       [ 25.32307692],
       [ 25.97333333],
       [ 23.78536585],
       [ 25.8559633 ],
       [ 26.025     ],
       [ 24.571875  ],
       [ 22.1       ],
       [ 23.71875   ],
       [  0.        ],
       [ 24.96933333],
       [ 22.59090909],
       [ 28.31176471],
       [ 19.65      ],
       [ 20.46304348],
       [ 20.75882353],
       [ 23.52666667],
       [ 24.46666667],
       [ 20.45416667],
       [ 20.86      ],
       [ 24.9974359 ],
       [ 20.24827586],
       [ 20.21355932],
       [ 16.46407767],
       [ 16.04666667],
       [ 16.35963303],
       [ 16.35648855],
       [ 16.10697674],
       [ 16.18701299],
       [ 16.1308642 ],
       [ 19.18181818],
       [ 16.1       ],
       [ 16.21571429],
       [ 18.23      ],
       [ 19.1625    ],
       [ 15.81979167],
       [ 16.17142857],
       [ 16.22941176],
       [  0.        ],
       [ 17.95384615],
       [ 16.56930693],
       [ 18.83703704],
       [ 16.04044944],
       [ 16.34414414],
       [ 16.27727273],
       [ 16.73333333],
       [ 16.2625    ],
       [ 15.99058824],
       [ 15.90638298],
       [ 16.37260274],
       [ 17.76764706],
       [ 16.42962963],
       [ 16.50487805],
       [ 19.96666667],
       [ 19.38636364],
       [ 16.32035398],
       [ 16.27327586],
       [ 16.05301205],
       [ 18.88      ],
       [ 18.01212121],
       [ 16.08103448],
       [ 16.30909091],
       [ 17.61142857],
       [ 16.33333333],
       [ 16.54215686],
       [ 16.45853659],
       [ 16.31710526],
       [ 18.98333333],
       [ 16.01956522],
       [ 16.06263736],
       [ 50.        ],
       [ 16.39453125],
       [ 16.18780488],
       [ 16.18375   ],
       [ 18.86521739],
       [ 18.3483871 ],
       [ 18.765     ],
       [ 19.11764706],
       [ 21.925     ],
       [ 16.25230769],
       [ 16.04237288],
       [ 16.23461538],
       [ 16.98421053],
       [ 17.48611111],
       [ 18.5       ],
       [ 26.3       ],
       [ 17.21621622],
       [ 16.23247863],
       [ 15.92473118],
       [ 16.25733333],
       [ 16.48360656],
       [ 16.23283582],
       [ 16.41727273],
       [ 19.24166667],
       [ 16.38235294],
       [ 16.6425    ],
       [ 16.19649123],
       [ 20.24285714],
       [ 16.37301587],
       [ 16.17216495],
       [ 16.27692308],
       [ 16.04166667],
       [ 19.04615385],
       [ 16.31428571],
       [ 16.28333333],
       [ 18.3125    ],
       [ 16.30405405],
       [ 16.46190476],
       [ 16.08387097],
       [ 15.99310345],
       [ 16.23644068],
       [ 16.2942029 ],
       [ 16.23962264],
       [ 20.98      ],
       [ 15.85368421],
       [ 16.3912    ],
       [ 16.51239669],
       [ 16.29826087],
       [ 18.95263158],
       [ 16.32232143],
       [ 16.02045455],
       [ 16.47692308],
       [ 18.65714286],
       [ 16.13934426],
       [ 16.37464789],
       [ 16.412     ],
       [ 19.1       ],
       [ 16.42990654],
       [ 16.13695652],
       [ 16.36612903],
       [ 16.190625  ],
       [ 16.35511811],
       [ 16.15740741],
       [ 16.40566038],
       [ 16.33472222],
       [ 16.544     ],
       [ 18.88888889],
       [ 16.26666667],
       [ 16.51313131],
       [ 16.30384615],
       [ 16.13877551],
       [ 19.45      ],
       [ 19.86      ],
       [ 16.51083333],
       [ 16.22888889],
       [ 19.6       ],
       [ 16.11772152],
       [ 18.61428571],
       [ 19.18095238],
       [ 16.16617647],
       [ 15.9255814 ],
       [ 16.17021277],
       [ 16.22545455],
       [ 16.02888889],
       [ 29.15      ],
       [ 21.26853933],
       [ 21.05098039],
       [ 21.23636364],
       [ 22.11363636],
       [ 20.91456311],
       [ 20.97894737],
       [ 20.8875    ],
       [ 19.4       ],
       [ 20.91      ],
       [ 20.63333333],
       [ 20.84285714],
       [ 21.28888889],
       [ 21.21111111],
       [ 24.27272727],
       [ 16.85      ],
       [ 24.31      ],
       [ 24.24705882],
       [ 18.5       ]])

In [ ]: