pystack


In [1]:
from pystacknet.pystacknet import StackNetClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor, GradientBoostingClassifier,GradientBoostingRegressor
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.decomposition import PCA
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier

In [2]:
models=[
        [
         LGBMClassifier(boosting_type='gbdt', num_leaves=80, max_depth=-1, learning_rate=0.01, n_estimators=1000, subsample_for_bin=1000, objective="xentropy", min_split_gain=0.0,\
                        min_child_weight=0.01, min_child_samples=10, subsample=0.9, subsample_freq=1, colsample_bytree=0.7, reg_alpha=0.1, reg_lambda=0.1, random_state=1, n_jobs=3),
         LogisticRegression(C=1,  random_state=1),
         RandomForestClassifier(n_estimators=300, criterion="entropy", max_depth=7, max_features=0.7, random_state=1),
         RandomForestClassifier(n_estimators=200, criterion="entropy", max_depth=8, max_features=0.7, random_state=1),
         
         LGBMClassifier(boosting_type='gbdt', num_leaves=40, max_depth=10, learning_rate=0.1, n_estimators=1000, subsample_for_bin=1000, objective="xentropy", min_split_gain=0.0,\
                        min_child_weight=0.01, min_child_samples=10, subsample=0.9, subsample_freq=1, colsample_bytree=0.9, reg_alpha=0.2, reg_lambda=0.2, random_state=1, n_jobs=3),
         LogisticRegression(penalty="l1", C=1, random_state=1),
            
         XGBClassifier(max_depth=8,learning_rate=0.1, n_estimators=300, objective="binary:logistic", n_jobs=3, booster="gbtree", random_state=1, colsample_bytree=0.5),
         XGBClassifier(max_depth=10,learning_rate=0.1, n_estimators=300, objective="rank:pairwise", n_jobs=3, booster="gbtree", random_state=1, colsample_bytree=0.7),
         
         LGBMClassifier(boosting_type='gbdt', num_leaves=40, max_depth=-1, learning_rate=0.01, n_estimators=1000, subsample_for_bin=1000, objective="xentropy", min_split_gain=0.0,\
                        min_child_weight=0.01, min_child_samples=10, subsample=0.9, subsample_freq=1, colsample_bytree=0.5, reg_alpha=0.0, reg_lambda=0.0, random_state=1, n_jobs=3)             
         ],
    
        [
        XGBClassifier(max_depth=10,learning_rate=0.1, n_estimators=300, objective="rank:pairwise", n_jobs=3, booster="gbtree", random_state=1, colsample_bytree=0.7),
        LGBMClassifier(boosting_type='gbdt', num_leaves=40, max_depth=-1, learning_rate=0.01, n_estimators=1000, subsample_for_bin=1000, objective="xentropy", min_split_gain=0.0,\
                        min_child_weight=0.01, min_child_samples=10, subsample=0.9, subsample_freq=1, colsample_bytree=0.5, reg_alpha=0.0, reg_lambda=0.0, random_state=1, n_jobs=3),
        RandomForestClassifier(n_estimators=300, criterion="entropy", max_depth=8, max_features=0.7, random_state=1)
        ]
        ]

In [3]:
model=StackNetClassifier(models, metric="f1", folds=3, restacking=True,
                         use_retraining=True, use_proba=False, random_state=12345,
                         n_jobs=4, verbose=2)

In [9]:
model.fit(train_all, target)


====================== Start of Level 0 ======================
Input dinesionality: 102 at Level 0 
9 models included in Level 0 
Level 0, fold 1/3 , model 0 , f1===0.493827 
Level 0, fold 1/3 , model 1 , f1===0.376984 
Level 0, fold 1/3 , model 2 , f1===0.373967 
Level 0, fold 1/3 , model 3 , f1===0.412091 
Level 0, fold 1/3 , model 4 , f1===0.492551 
Level 0, fold 1/3 , model 5 , f1===0.420200 
Level 0, fold 1/3 , model 6 , f1===0.487144 
Level 0, fold 1/3 , model 7 , f1===0.487788 
Level 0, fold 1/3 , model 8 , f1===0.490961 
=========== end of fold  1n level 0 ===========
Level 0, fold 2/3 , model 0 , f1===0.508708 
Level 0, fold 2/3 , model 1 , f1===0.411153 
Level 0, fold 2/3 , model 2 , f1===0.407719 
Level 0, fold 2/3 , model 3 , f1===0.422081 
Level 0, fold 2/3 , model 4 , f1===0.505396 
Level 0, fold 2/3 , model 5 , f1===0.443431 
Level 0, fold 2/3 , model 6 , f1===0.506145 
Level 0, fold 2/3 , model 7 , f1===0.495443 
Level 0, fold 2/3 , model 8 , f1===0.511244 
=========== end of fold  2n level 0 ===========
Level 0, fold 3/3 , model 0 , f1===0.507260 
Level 0, fold 3/3 , model 1 , f1===0.335744 
Level 0, fold 3/3 , model 2 , f1===0.364026 
Level 0, fold 3/3 , model 3 , f1===0.403133 
Level 0, fold 3/3 , model 4 , f1===0.506977 
Level 0, fold 3/3 , model 5 , f1===0.445606 
Level 0, fold 3/3 , model 6 , f1===0.504265 
Level 0, fold 3/3 , model 7 , f1===0.486945 
Level 0, fold 3/3 , model 8 , f1===0.506770 
=========== end of fold  3n level 0 ===========
Level 0, model 0 , f1===0.503265 
Level 0, model 1 , f1===0.374627 
Level 0, model 2 , f1===0.381904 
Level 0, model 3 , f1===0.412435 
Level 0, model 4 , f1===0.501641 
Level 0, model 5 , f1===0.436412 
Level 0, model 6 , f1===0.499185 
Level 0, model 7 , f1===0.490059 
Level 0, model 8 , f1===0.502992 
Output dimensionality of level 0 is 9 
====================== End of Level 0 ======================
 level 0 lasted 957.418949 seconds 
====================== Start of Level 1 ======================
Input dinesionality: 111 at Level 1 
3 models included in Level 1 
Level 1, fold 1/3 , model 0 , f1===0.496434 
Level 1, fold 1/3 , model 1 , f1===0.497164 
Level 1, fold 1/3 , model 2 , f1===0.495012 
=========== end of fold  1n level 1 ===========
Level 1, fold 2/3 , model 0 , f1===0.497034 
Level 1, fold 2/3 , model 1 , f1===0.508924 
Level 1, fold 2/3 , model 2 , f1===0.508990 
=========== end of fold  2n level 1 ===========
Level 1, fold 3/3 , model 0 , f1===0.491158 
Level 1, fold 3/3 , model 1 , f1===0.510272 
Level 1, fold 3/3 , model 2 , f1===0.507737 
=========== end of fold  3n level 1 ===========
Level 1, model 0 , f1===0.494875 
Level 1, model 1 , f1===0.505454 
Level 1, model 2 , f1===0.503913 
Output dimensionality of level 1 is 3 
====================== End of Level 1 ======================
 level 1 lasted 889.157820 seconds 
====================== End of fit ======================
 fit() lasted 1846.609222 seconds 

In [10]:
model


Out[10]:
StackNetClassifier(folds=3, metric=<function f1 at 0x00000243EC089B70>,
          models=[[LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=0.7,
        learning_rate=0.01, max_depth=-1, min_child_samples=10,
        min_child_weight=0.01, min_split_gain=0.0, n_estimators=1000,
        n_jobs=3, num_leaves=80, objective='xentropy', random_state=1,
        ...stimators=300, n_jobs=1,
            oob_score=False, random_state=1, verbose=0, warm_start=False)]],
          n_jobs=4, random_state=12345, restacking=True, use_proba=False,
          use_retraining=True, verbose=2)

In [12]:
preds=model.predict_proba(test_all)


====================== Start of Level 0 ======================
1 estimators included in Level 0 
====================== Start of Level 1 ======================
1 estimators included in Level 1 

In [18]:
sub = np.where(preds[:,2]>=0.61,1,0)
make_submission(sub).to_csv('py_stacknet.csv', index=False)

In [15]:
np.save('preds_pystacknet.py', preds)

The Boring Stuff


In [1]:
%load_ext autoreload
%autoreload 2

%matplotlib inline

In [2]:
import time
import xgboost as xgb
import lightgbm as lgb
# import category_encoders as cat_ed
import gc, mlcrate, glob

# from gplearn.genetic import SymbolicTransformer, SymbolicClassifier
from fastai.imports import *
from fastai.structured import *
from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, ExtraTreesRegressor
from IPython.display import display

from catboost import CatBoostClassifier
from scipy.cluster import hierarchy as hc
from collections import Counter

from sklearn import metrics
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import  roc_auc_score, log_loss
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA, TruncatedSVD, FastICA, FactorAnalysis
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.cluster import KMeans

from sklearn.metrics import accuracy_score, log_loss
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF

# will ignore all warning from sklearn, seaborn etc..
def ignore_warn(*args, **kwargs):
    pass
warnings.warn = ignore_warn

pd.option_context("display.max_rows", 1000);
pd.option_context("display.max_columns", 1000);

In [3]:
PATH = os.getcwd()
PATH


Out[3]:
'D:\\Github\\fastai\\courses\\ml1\\AV_WNS'

In [4]:
df_raw = pd.read_csv(f'{PATH}\\train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}\\test.csv', low_memory=False)

In [5]:
df_raw.shape, df_test.shape


Out[5]:
((54808, 14), (23490, 13))

In [6]:
new_cols = ['employee_id', 'department', 'region', 'education', 'gender',
       'recruitment_channel', 'no_of_trainings', 'age', 'previous_year_rating',
       'length_of_service', 'KPIs_met_more_than_80_percent', 'awards_won_bool',
       'avg_training_score', 'is_promoted']
#re-naming them
df_raw.columns = new_cols
df_test.columns = new_cols[:-1]

cat_cols = list(df_raw.select_dtypes(include=['object']).columns)
num_cols = list(df_raw.select_dtypes(exclude=['object']).columns)

In [7]:
## Since we can't use id cols, it better dropping them straight front!
drop_col = ['employee_id']
df_raw.drop(drop_col, axis=1, inplace=True)
df_test.drop(drop_col, axis=1, inplace=True)

2.


In [8]:
# %%time
#creating interactions all Run At Last and add tf-idf, count vec
# for f in range (0,len(cat_cols)):
#     for s in range (f+1,len(cat_cols)):
#     # Basically this is creating interactions..( 2 - way) 
#         df_raw[cat_cols[f] + "_" + cat_cols[s]] = df_raw[cat_cols[f]] + "_" + df_raw[cat_cols[s]]
#         df_test[cat_cols[f] + "_" + cat_cols[s]] = df_test[cat_cols[f]] + "_" + df_test[cat_cols[s]]            
#         cat_cols.append(cat_cols[f] + "_" + cat_cols[s])
#     print(len(cat_cols), end=' ')

In [9]:
###########################kind of binning age at trivial level #####################################

df_raw['is_age_39'] = np.zeros(df_raw.shape[0])
my_query = df_raw.query('age<=39.').index
df_raw.iloc[my_query, -1] = 1
df_raw['is_age_39_45'] = np.zeros(df_raw.shape[0])
my_query = df_raw.query('age>=39. & age<=45.').index
df_raw.iloc[my_query, -1] = 1
df_raw['is_age_45'] = np.zeros(df_raw.shape[0])
my_query = df_raw.query('age>=45.').index
df_raw.iloc[my_query, -1] = 1

#######################################################################################################
###################young age (13–30), middle age (31–50) and senior age (51–70)########################
#######################################################################################################

df_raw['age_group'] = np.zeros(df_raw.shape[0])
my_query = df_raw.query('age>=20. & age<=30.').index
df_raw.iloc[my_query, -1] = 'young'
my_query = df_raw.query('age>=31. & age<=50.').index
df_raw.iloc[my_query, -1] = 'middle_aged'
my_query = df_raw.query('age>=51. & age<=60.').index
df_raw.iloc[my_query, -1] = 'senior_aged'
###################################################################################################################
###################################################################################################################
###################################################################################################################

###########################kind of binning age at trivial level #####################################

df_test['is_age_39'] = np.zeros(df_test.shape[0])
my_query = df_test.query('age<=39.').index
df_test.iloc[my_query, -1] = 1
df_test['is_age_39_45'] = np.zeros(df_test.shape[0])
my_query = df_test.query('age>=39. & age<=45.').index
df_test.iloc[my_query, -1] = 1
df_test['is_age_45'] = np.zeros(df_test.shape[0])
my_query = df_test.query('age>=45.').index
df_test.iloc[my_query, -1] = 1

#######################################################################################################
###################young age (13–30), middle age (31–50) and senior age (51–70)########################
#######################################################################################################

df_test['age_group'] = np.zeros(df_test.shape[0])
my_query = df_test.query('age>=20. & age<=30.').index
df_test.iloc[my_query, -1] = 'young'
my_query = df_test.query('age>=31. & age<=50.').index
df_test.iloc[my_query, -1] = 'middle_aged'
my_query = df_test.query('age>=51. & age<=60.').index
df_test.iloc[my_query, -1] = 'senior_aged';
###############################################################################

In [10]:
df_raw['promotion_chance'] = 'low'
my_query = df_raw.query('avg_training_score>=90').index
df_raw.iloc[my_query, -1] = 'very_high'
my_query = df_raw.query('avg_training_score>=75 and avg_training_score<90').index
df_raw.iloc[my_query, -1] = 'high'
my_query = df_raw.query('avg_training_score>=65 and avg_training_score<75').index
df_raw.iloc[my_query, -1] = 'medium'
my_query = df_raw.query('avg_training_score>=53 and avg_training_score<65').index
df_raw.iloc[my_query, -1] = 'low_medium'

df_test['promotion_chance'] = 'low'
my_query = df_test.query('avg_training_score>=90').index
df_test.iloc[my_query, -1] = 'very_high'
my_query = df_test.query('avg_training_score>=75 and avg_training_score<90').index
df_test.iloc[my_query, -1] = 'high'
my_query = df_test.query('avg_training_score>=65 and avg_training_score<75').index
df_test.iloc[my_query, -1] = 'medium'
my_query = df_test.query('avg_training_score>=53 and avg_training_score<65').index
df_test.iloc[my_query, -1] = 'low_medium'

In [10]:
feats_added = []

df_raw['joining_age'] = df_raw['age'] - df_raw['length_of_service']
df_test['joining_age'] = df_test['age'] - df_test['length_of_service']
feats_added.append('joining_age')

df_raw['region'].replace('region_', '', True, None, True)
df_test['region'].replace('region_', '', True, None, True)
################################################################################3
bins = [20., 25., 30., 35., 40., 45., 50., 55., 60., 70]
labels = [i+1 for i in range(len(bins) - 1)]
bin_cols = ['age']
for col in bin_cols:    
    df_raw[f'bin_{col}'.format(col)]  = pd.cut(df_raw[col] ,bins,labels = labels)
    df_test[f'bin_{col}'.format(col)] = pd.cut(df_test[col],bins,labels = labels)
feats_added.append('bin_age')

bins = [39., 44., 54., 66., 75., 80., 85., 90., 95.]
labels = [i+1 for i in range(len(bins) - 1)]
bin_cols = ['avg_training_score']
for col in bin_cols:    
    df_raw[f'bin_{col}'.format(col)]  = pd.cut(df_raw[col] ,bins,labels = labels)
    df_test[f'bin_{col}'.format(col)] = pd.cut(df_test[col],bins,labels = labels)

feats_added.append('bin_avg_training_score')
feats_added.append(['age_group', 'is_age_39', 'is_age_39_45', 'is_age_45', 'promotion_chance',\
                   'reg_count','mean_age_per_region','mean_joining_age_per_region','mean_previous_year_rating_per_region',\
                    'mean_avg_training_score_per_region','mean_length_of_service_per_region'])
################################################################################################
df_raw['promotion_chance'] = 'low'
my_query = df_raw.query('avg_training_score>=90').index
df_raw.iloc[my_query, -1] = 'very_high'
my_query = df_raw.query('avg_training_score>=75 and avg_training_score<90').index
df_raw.iloc[my_query, -1] = 'high'
my_query = df_raw.query('avg_training_score>=65 and avg_training_score<75').index
df_raw.iloc[my_query, -1] = 'medium'
my_query = df_raw.query('avg_training_score>=53 and avg_training_score<65').index
df_raw.iloc[my_query, -1] = 'low_medium'

df_test['promotion_chance'] = 'low'
my_query = df_test.query('avg_training_score>=90').index
df_test.iloc[my_query, -1] = 'very_high'
my_query = df_test.query('avg_training_score>=75 and avg_training_score<90').index
df_test.iloc[my_query, -1] = 'high'
my_query = df_test.query('avg_training_score>=65 and avg_training_score<75').index
df_test.iloc[my_query, -1] = 'medium'
my_query = df_test.query('avg_training_score>=53 and avg_training_score<65').index
df_test.iloc[my_query, -1] = 'low_medium'
###############################################################################################
def map_(regs, age):
    d = {}
    for i,j in zip(regs, age):
        d[i] = j
    return d

xyz = df_raw.groupby('region').mean().sort_values(by='region')[['age', 'joining_age', 'previous_year_rating', 'length_of_service', 'avg_training_score']]
count = Counter(df_raw['region'])

regs                   = xyz.reset_index()['region'].values
age                    = xyz.reset_index()['age'].values
joining_age            = xyz.reset_index()['joining_age'].values
previous_year_rating   = xyz.reset_index()['previous_year_rating'].values
length_of_service      = xyz.reset_index()['length_of_service'].values 
avg_training_score     = xyz.reset_index()['avg_training_score'].values

df_raw['reg_count'] = df_raw['region'].map(count)
d = map_(regs, age)
df_raw['mean_age_per_region']   = df_raw['region'].map(d)
d = map_(regs, joining_age)
df_raw['mean_joining_age_per_region']   = df_raw['region'].map(d)
d = map_(regs, previous_year_rating)
df_raw['mean_previous_year_rating_per_region']   = df_raw['region'].map(d)
d = map_(regs, avg_training_score)
df_raw['mean_avg_training_score_per_region']   = df_raw['region'].map(d)
d = map_(regs, length_of_service)
df_raw['mean_length_of_service_per_region']   = df_raw['region'].map(d)

xyz = df_test.groupby('region').mean().sort_values(by='region')[['age', 'joining_age', 'previous_year_rating', 'length_of_service', 'avg_training_score']]
count = Counter(df_test['region'])

regs                   = xyz.reset_index()['region'].values
age                    = xyz.reset_index()['age'].values
joining_age            = xyz.reset_index()['joining_age'].values
previous_year_rating   = xyz.reset_index()['previous_year_rating'].values
length_of_service      = xyz.reset_index()['length_of_service'].values 
avg_training_score     = xyz.reset_index()['avg_training_score'].values

df_test['reg_count'] = df_test['region'].map(count)
d = map_(regs, age)
df_test['mean_age_per_region']   = df_test['region'].map(d)
d = map_(regs, joining_age)
df_test['mean_joining_age_per_region']   = df_test['region'].map(d)
d = map_(regs, previous_year_rating)
df_test['mean_previous_year_rating_per_region']   = df_test['region'].map(d)
d = map_(regs, avg_training_score)
df_test['mean_avg_training_score_per_region']   = df_test['region'].map(d)
d = map_(regs, length_of_service)
df_test['mean_length_of_service_per_region']   = df_test['region'].map(d)
####################################################################################


del d, count, regs, joining_age, previous_year_rating, length_of_service, avg_training_score
gc.collect()


Out[10]:
504

In [11]:
df_raw['promotion_chance'].head()


Out[11]:
0           low
1    low_medium
2           low
3           low
4        medium
Name: promotion_chance, dtype: object

In [12]:
#https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features
def add_noise(series, noise_level):
    return series * (1 + noise_level * np.random.randn(len(series)))

def target_encode(trn_series=None, 
                  tst_series=None, 
                  target=None, 
                  min_samples_leaf=1, 
                  smoothing=1,
                  noise_level=0):
    """
    Smoothing is computed like in the following paper by Daniele Micci-Barreca
    https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
    trn_series : training categorical feature as a pd.Series
    tst_series : test categorical feature as a pd.Series
    target : target data as a pd.Series
    min_samples_leaf (int) : minimum samples to take category average into account
    smoothing (int) : smoothing effect to balance categorical average vs prior  
    """ 
    assert len(trn_series) == len(target)
    assert trn_series.name == tst_series.name
    temp = pd.concat([trn_series, target], axis=1)
    # Compute target mean 
    averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"])
    # Compute smoothing
    smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing))
    # Apply average function to all target data
    prior = target.mean()
    # The bigger the count the less full_avg is taken into account
    averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing
    averages.drop(["mean", "count"], axis=1, inplace=True)
    # Apply averages to trn and tst series
    ft_trn_series = pd.merge(
        trn_series.to_frame(trn_series.name),
        averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
        on=trn_series.name,
        how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
    # pd.merge does not keep the index so restore it
    ft_trn_series.index = trn_series.index 
    ft_tst_series = pd.merge(
        tst_series.to_frame(tst_series.name),
        averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
        on=tst_series.name,
        how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
    # pd.merge does not keep the index so restore it
    ft_tst_series.index = tst_series.index
    return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level)

In [13]:
train_cats(df_raw);
apply_cats(df_test,df_raw)

In [14]:
cat_cols = list(df_raw.select_dtypes(include=['object','category']).columns)

In [15]:
train_encoded, test_encoded = [], []
for i in range(len(cat_cols)):
    trn, sub = target_encode(df_raw[cat_cols[i]], 
                             df_test[cat_cols[i]], 
                             target=df_raw.is_promoted, 
                             min_samples_leaf=100,
                             smoothing=10,
                             noise_level=0.01)
    train_encoded.append(trn)
    test_encoded.append(sub)
    print(i, end=',')


0,1,2,3,4,5,6,7,8,

In [16]:
df_raw_cat = df_raw[cat_cols]
df_test_cat = df_test[cat_cols]
df_raw.drop(cat_cols, axis=1, inplace=True)
df_test.drop(cat_cols, axis=1, inplace=True)
df_raw.shape, df_test.shape


Out[16]:
((54808, 18), (23490, 17))

In [17]:
df_raw_cat.get_ftype_counts()


Out[17]:
category:dense    9
dtype: int64

In [18]:
df_raw.head()


Out[18]:
no_of_trainings age previous_year_rating length_of_service KPIs_met_more_than_80_percent awards_won_bool avg_training_score is_promoted is_age_39 is_age_39_45 is_age_45 joining_age reg_count mean_age_per_region mean_joining_age_per_region mean_previous_year_rating_per_region mean_avg_training_score_per_region mean_length_of_service_per_region
0 1 35 5.0 8 1 0 49 0 1.0 0.0 0.0 27 4843 35.644229 29.417716 3.405671 63.940946 6.226512
1 1 30 5.0 4 0 0 60 0 1.0 0.0 0.0 26 6428 32.321251 27.444462 3.387227 65.699440 4.876789
2 1 34 3.0 7 0 0 50 0 1.0 0.0 0.0 27 874 36.113272 29.662471 3.357311 61.270023 6.450801
3 2 39 1.0 10 0 0 50 0 1.0 1.0 0.0 29 1175 34.531064 28.654468 3.367257 64.330213 5.876596
4 1 45 3.0 2 0 0 73 0 0.0 1.0 1.0 43 2260 33.417257 28.184513 3.292135 65.084071 5.232743

In [16]:
target = df_raw.is_promoted
df_raw.drop('is_promoted', axis=1, inplace=True)

Catboost


In [23]:
categorical_features_indices1 = np.where(df_raw.dtypes == 'category')[0];
categorical_features_indices1


Out[23]:
array([ 0,  1,  2,  3,  4, 15, 16, 18, 19], dtype=int64)

In [32]:
df_raw['previous_year_rating'].fillna(0,inplace=True)
df_test['previous_year_rating'].fillna(0,inplace=True)

In [36]:
df_raw.fillna(method='bfill',inplace=True)
df_test.fillna(method='bfill',inplace=True)

In [37]:
X_train, X_validation, y_train, y_validation = train_test_split(df_raw, target, test_size=0.8, random_state=1234, stratify=target)

In [47]:
#importing library and building model
from catboost import CatBoostClassifier
model=CatBoostClassifier(logging_level='Verbose',class_weights=[0.3,0.7],iterations=500, depth=10, learning_rate=0.01, loss_function='Logloss',\
                         )
model.fit(X_train, y_train,cat_features=categorical_features_indices1,eval_set=(X_validation, y_validation))


0:	learn: 0.6782690	test: 0.6782943	best: 0.6782943 (0)	total: 130ms	remaining: 1m 4s
1:	learn: 0.6644973	test: 0.6645033	best: 0.6645033 (1)	total: 330ms	remaining: 1m 22s
2:	learn: 0.6504474	test: 0.6502320	best: 0.6502320 (2)	total: 589ms	remaining: 1m 37s
3:	learn: 0.6375652	test: 0.6372470	best: 0.6372470 (3)	total: 661ms	remaining: 1m 21s
4:	learn: 0.6271148	test: 0.6268062	best: 0.6268062 (4)	total: 761ms	remaining: 1m 15s
5:	learn: 0.6162614	test: 0.6159389	best: 0.6159389 (5)	total: 981ms	remaining: 1m 20s
6:	learn: 0.6055217	test: 0.6053170	best: 0.6053170 (6)	total: 1.03s	remaining: 1m 12s
7:	learn: 0.5957500	test: 0.5956208	best: 0.5956208 (7)	total: 1.23s	remaining: 1m 15s
8:	learn: 0.5871382	test: 0.5869913	best: 0.5869913 (8)	total: 1.33s	remaining: 1m 12s
9:	learn: 0.5820280	test: 0.5819327	best: 0.5819327 (9)	total: 1.36s	remaining: 1m 6s
10:	learn: 0.5712530	test: 0.5711174	best: 0.5711174 (10)	total: 1.45s	remaining: 1m 4s
11:	learn: 0.5632898	test: 0.5631117	best: 0.5631117 (11)	total: 1.55s	remaining: 1m 2s
12:	learn: 0.5555005	test: 0.5553312	best: 0.5553312 (12)	total: 1.75s	remaining: 1m 5s
13:	learn: 0.5456879	test: 0.5454575	best: 0.5454575 (13)	total: 1.93s	remaining: 1m 6s
14:	learn: 0.5378372	test: 0.5375579	best: 0.5375579 (14)	total: 2s	remaining: 1m 4s
15:	learn: 0.5297641	test: 0.5295544	best: 0.5295544 (15)	total: 2.19s	remaining: 1m 6s
16:	learn: 0.5209316	test: 0.5205180	best: 0.5205180 (16)	total: 2.41s	remaining: 1m 8s
17:	learn: 0.5146582	test: 0.5142492	best: 0.5142492 (17)	total: 2.46s	remaining: 1m 5s
18:	learn: 0.5066985	test: 0.5063053	best: 0.5063053 (18)	total: 2.53s	remaining: 1m 4s
19:	learn: 0.5007567	test: 0.5003766	best: 0.5003766 (19)	total: 2.67s	remaining: 1m 4s
20:	learn: 0.4938478	test: 0.4935883	best: 0.4935883 (20)	total: 2.89s	remaining: 1m 6s
21:	learn: 0.4865334	test: 0.4862325	best: 0.4862325 (21)	total: 3.1s	remaining: 1m 7s
22:	learn: 0.4811869	test: 0.4808985	best: 0.4808985 (22)	total: 3.17s	remaining: 1m 5s
23:	learn: 0.4781434	test: 0.4779419	best: 0.4779419 (23)	total: 3.21s	remaining: 1m 3s
24:	learn: 0.4751817	test: 0.4750406	best: 0.4750406 (24)	total: 3.23s	remaining: 1m 1s
25:	learn: 0.4701026	test: 0.4699910	best: 0.4699910 (25)	total: 3.48s	remaining: 1m 3s
26:	learn: 0.4635657	test: 0.4634825	best: 0.4634825 (26)	total: 3.66s	remaining: 1m 4s
27:	learn: 0.4584423	test: 0.4584070	best: 0.4584070 (27)	total: 3.87s	remaining: 1m 5s
28:	learn: 0.4526817	test: 0.4526467	best: 0.4526467 (28)	total: 4.09s	remaining: 1m 6s
29:	learn: 0.4506919	test: 0.4507018	best: 0.4507018 (29)	total: 4.11s	remaining: 1m 4s
30:	learn: 0.4472045	test: 0.4472905	best: 0.4472905 (30)	total: 4.19s	remaining: 1m 3s
31:	learn: 0.4447484	test: 0.4448938	best: 0.4448938 (31)	total: 4.24s	remaining: 1m 1s
32:	learn: 0.4396574	test: 0.4398074	best: 0.4398074 (32)	total: 4.43s	remaining: 1m 2s
33:	learn: 0.4354128	test: 0.4354885	best: 0.4354885 (33)	total: 4.55s	remaining: 1m 2s
34:	learn: 0.4316283	test: 0.4317254	best: 0.4317254 (34)	total: 4.63s	remaining: 1m 1s
35:	learn: 0.4290241	test: 0.4291715	best: 0.4291715 (35)	total: 4.7s	remaining: 1m
36:	learn: 0.4252273	test: 0.4254401	best: 0.4254401 (36)	total: 4.91s	remaining: 1m 1s
37:	learn: 0.4202775	test: 0.4205925	best: 0.4205925 (37)	total: 5.04s	remaining: 1m 1s
38:	learn: 0.4157073	test: 0.4161265	best: 0.4161265 (38)	total: 5.25s	remaining: 1m 2s
39:	learn: 0.4112213	test: 0.4117847	best: 0.4117847 (39)	total: 5.48s	remaining: 1m 2s
40:	learn: 0.4086102	test: 0.4092448	best: 0.4092448 (40)	total: 5.71s	remaining: 1m 3s
41:	learn: 0.4065959	test: 0.4072955	best: 0.4072955 (41)	total: 5.96s	remaining: 1m 4s
42:	learn: 0.4024479	test: 0.4031710	best: 0.4031710 (42)	total: 6.05s	remaining: 1m 4s
43:	learn: 0.4003905	test: 0.4011062	best: 0.4011062 (43)	total: 6.32s	remaining: 1m 5s
44:	learn: 0.3981762	test: 0.3989659	best: 0.3989659 (44)	total: 6.59s	remaining: 1m 6s
45:	learn: 0.3961466	test: 0.3969705	best: 0.3969705 (45)	total: 6.71s	remaining: 1m 6s
46:	learn: 0.3923411	test: 0.3932320	best: 0.3932320 (46)	total: 6.99s	remaining: 1m 7s
47:	learn: 0.3904394	test: 0.3913751	best: 0.3913751 (47)	total: 7.25s	remaining: 1m 8s
48:	learn: 0.3869279	test: 0.3878123	best: 0.3878123 (48)	total: 7.4s	remaining: 1m 8s
49:	learn: 0.3833209	test: 0.3842119	best: 0.3842119 (49)	total: 7.66s	remaining: 1m 8s
50:	learn: 0.3822835	test: 0.3832121	best: 0.3832121 (50)	total: 7.7s	remaining: 1m 7s
51:	learn: 0.3806727	test: 0.3816494	best: 0.3816494 (51)	total: 8s	remaining: 1m 8s
52:	learn: 0.3780353	test: 0.3790914	best: 0.3790914 (52)	total: 8.32s	remaining: 1m 10s
53:	learn: 0.3764432	test: 0.3775464	best: 0.3775464 (53)	total: 8.4s	remaining: 1m 9s
54:	learn: 0.3750015	test: 0.3761369	best: 0.3761369 (54)	total: 8.45s	remaining: 1m 8s
55:	learn: 0.3718494	test: 0.3729884	best: 0.3729884 (55)	total: 8.7s	remaining: 1m 8s
56:	learn: 0.3688136	test: 0.3700673	best: 0.3700673 (56)	total: 8.9s	remaining: 1m 9s
57:	learn: 0.3673955	test: 0.3687077	best: 0.3687077 (57)	total: 9.13s	remaining: 1m 9s
58:	learn: 0.3647198	test: 0.3660383	best: 0.3660383 (58)	total: 9.38s	remaining: 1m 10s
59:	learn: 0.3624543	test: 0.3638429	best: 0.3638429 (59)	total: 9.62s	remaining: 1m 10s
60:	learn: 0.3595921	test: 0.3610658	best: 0.3610658 (60)	total: 9.73s	remaining: 1m 10s
61:	learn: 0.3583736	test: 0.3598589	best: 0.3598589 (61)	total: 9.92s	remaining: 1m 10s
62:	learn: 0.3573617	test: 0.3588360	best: 0.3588360 (62)	total: 9.96s	remaining: 1m 9s
63:	learn: 0.3560820	test: 0.3575673	best: 0.3575673 (63)	total: 10.2s	remaining: 1m 9s
64:	learn: 0.3552563	test: 0.3567703	best: 0.3567703 (64)	total: 10.3s	remaining: 1m 8s
65:	learn: 0.3540008	test: 0.3555671	best: 0.3555671 (65)	total: 10.5s	remaining: 1m 8s
66:	learn: 0.3520967	test: 0.3537258	best: 0.3537258 (66)	total: 10.8s	remaining: 1m 9s
67:	learn: 0.3497403	test: 0.3514061	best: 0.3514061 (67)	total: 10.8s	remaining: 1m 8s
68:	learn: 0.3473160	test: 0.3490652	best: 0.3490652 (68)	total: 11.1s	remaining: 1m 9s
69:	learn: 0.3448106	test: 0.3465548	best: 0.3465548 (69)	total: 11.3s	remaining: 1m 9s
70:	learn: 0.3439656	test: 0.3457563	best: 0.3457563 (70)	total: 11.3s	remaining: 1m 8s
71:	learn: 0.3429540	test: 0.3448396	best: 0.3448396 (71)	total: 11.6s	remaining: 1m 8s
72:	learn: 0.3423621	test: 0.3442849	best: 0.3442849 (72)	total: 11.6s	remaining: 1m 7s
73:	learn: 0.3413956	test: 0.3433845	best: 0.3433845 (73)	total: 11.8s	remaining: 1m 7s
74:	learn: 0.3391985	test: 0.3412604	best: 0.3412604 (74)	total: 12s	remaining: 1m 8s
75:	learn: 0.3373724	test: 0.3394769	best: 0.3394769 (75)	total: 12.3s	remaining: 1m 8s
76:	learn: 0.3366117	test: 0.3387030	best: 0.3387030 (76)	total: 12.4s	remaining: 1m 8s
77:	learn: 0.3348050	test: 0.3367925	best: 0.3367925 (77)	total: 12.7s	remaining: 1m 8s
78:	learn: 0.3340067	test: 0.3360200	best: 0.3360200 (78)	total: 12.8s	remaining: 1m 8s
79:	learn: 0.3324117	test: 0.3346240	best: 0.3346240 (79)	total: 13s	remaining: 1m 8s
80:	learn: 0.3318770	test: 0.3341243	best: 0.3341243 (80)	total: 13s	remaining: 1m 7s
81:	learn: 0.3299482	test: 0.3323004	best: 0.3323004 (81)	total: 13.3s	remaining: 1m 7s
82:	learn: 0.3280995	test: 0.3305127	best: 0.3305127 (82)	total: 13.5s	remaining: 1m 8s
83:	learn: 0.3275309	test: 0.3299455	best: 0.3299455 (83)	total: 13.6s	remaining: 1m 7s
84:	learn: 0.3268128	test: 0.3292123	best: 0.3292123 (84)	total: 13.7s	remaining: 1m 6s
85:	learn: 0.3260303	test: 0.3284730	best: 0.3284730 (85)	total: 13.8s	remaining: 1m 6s
86:	learn: 0.3249120	test: 0.3274614	best: 0.3274614 (86)	total: 13.9s	remaining: 1m 5s
87:	learn: 0.3238399	test: 0.3264150	best: 0.3264150 (87)	total: 14s	remaining: 1m 5s
88:	learn: 0.3229691	test: 0.3256074	best: 0.3256074 (88)	total: 14.2s	remaining: 1m 5s
89:	learn: 0.3217568	test: 0.3245293	best: 0.3245293 (89)	total: 14.5s	remaining: 1m 5s
90:	learn: 0.3213546	test: 0.3241506	best: 0.3241506 (90)	total: 14.5s	remaining: 1m 5s
91:	learn: 0.3197487	test: 0.3226441	best: 0.3226441 (91)	total: 14.8s	remaining: 1m 5s
92:	learn: 0.3191463	test: 0.3220848	best: 0.3220848 (92)	total: 14.9s	remaining: 1m 5s
93:	learn: 0.3183792	test: 0.3213480	best: 0.3213480 (93)	total: 15s	remaining: 1m 4s
94:	learn: 0.3170406	test: 0.3200545	best: 0.3200545 (94)	total: 15.2s	remaining: 1m 4s
95:	learn: 0.3161440	test: 0.3192445	best: 0.3192445 (95)	total: 15.5s	remaining: 1m 5s
96:	learn: 0.3156738	test: 0.3187952	best: 0.3187952 (96)	total: 15.6s	remaining: 1m 4s
97:	learn: 0.3150372	test: 0.3181797	best: 0.3181797 (97)	total: 15.7s	remaining: 1m 4s
98:	learn: 0.3137451	test: 0.3169695	best: 0.3169695 (98)	total: 16s	remaining: 1m 4s
99:	learn: 0.3127035	test: 0.3160290	best: 0.3160290 (99)	total: 16.1s	remaining: 1m 4s
100:	learn: 0.3121719	test: 0.3155270	best: 0.3155270 (100)	total: 16.4s	remaining: 1m 4s
101:	learn: 0.3116192	test: 0.3149907	best: 0.3149907 (101)	total: 16.5s	remaining: 1m 4s
102:	learn: 0.3103718	test: 0.3137401	best: 0.3137401 (102)	total: 16.7s	remaining: 1m 4s
103:	learn: 0.3089855	test: 0.3123467	best: 0.3123467 (103)	total: 17s	remaining: 1m 4s
104:	learn: 0.3084007	test: 0.3118331	best: 0.3118331 (104)	total: 17.2s	remaining: 1m 4s
105:	learn: 0.3073362	test: 0.3108699	best: 0.3108699 (105)	total: 17.4s	remaining: 1m 4s
106:	learn: 0.3061050	test: 0.3097382	best: 0.3097382 (106)	total: 17.7s	remaining: 1m 4s
107:	learn: 0.3057971	test: 0.3094593	best: 0.3094593 (107)	total: 17.7s	remaining: 1m 4s
108:	learn: 0.3049754	test: 0.3086612	best: 0.3086612 (108)	total: 18s	remaining: 1m 4s
109:	learn: 0.3044728	test: 0.3081898	best: 0.3081898 (109)	total: 18.2s	remaining: 1m 4s
110:	learn: 0.3035208	test: 0.3072797	best: 0.3072797 (110)	total: 18.5s	remaining: 1m 4s
111:	learn: 0.3024174	test: 0.3062549	best: 0.3062549 (111)	total: 18.7s	remaining: 1m 4s
112:	learn: 0.3014549	test: 0.3053967	best: 0.3053967 (112)	total: 18.9s	remaining: 1m 4s
113:	learn: 0.3009856	test: 0.3049859	best: 0.3049859 (113)	total: 19.2s	remaining: 1m 4s
114:	learn: 0.3006466	test: 0.3047149	best: 0.3047149 (114)	total: 19.3s	remaining: 1m 4s
115:	learn: 0.3003765	test: 0.3044637	best: 0.3044637 (115)	total: 19.4s	remaining: 1m 4s
116:	learn: 0.2999084	test: 0.3040319	best: 0.3040319 (116)	total: 19.5s	remaining: 1m 3s
117:	learn: 0.2995311	test: 0.3036845	best: 0.3036845 (117)	total: 19.8s	remaining: 1m 4s
118:	learn: 0.2992722	test: 0.3034263	best: 0.3034263 (118)	total: 19.9s	remaining: 1m 3s
119:	learn: 0.2981791	test: 0.3024214	best: 0.3024214 (119)	total: 20.2s	remaining: 1m 3s
120:	learn: 0.2975075	test: 0.3019061	best: 0.3019061 (120)	total: 20.4s	remaining: 1m 3s
121:	learn: 0.2966856	test: 0.3011307	best: 0.3011307 (121)	total: 20.6s	remaining: 1m 3s
122:	learn: 0.2958488	test: 0.3003683	best: 0.3003683 (122)	total: 20.9s	remaining: 1m 3s
123:	learn: 0.2954856	test: 0.3001167	best: 0.3001167 (123)	total: 21.1s	remaining: 1m 3s
124:	learn: 0.2950475	test: 0.2998025	best: 0.2998025 (124)	total: 21.3s	remaining: 1m 4s
125:	learn: 0.2942658	test: 0.2990337	best: 0.2990337 (125)	total: 21.6s	remaining: 1m 4s
126:	learn: 0.2939930	test: 0.2988234	best: 0.2988234 (126)	total: 21.7s	remaining: 1m 3s
127:	learn: 0.2935036	test: 0.2984365	best: 0.2984365 (127)	total: 21.9s	remaining: 1m 3s
128:	learn: 0.2930226	test: 0.2980475	best: 0.2980475 (128)	total: 22.2s	remaining: 1m 3s
129:	learn: 0.2922581	test: 0.2974082	best: 0.2974082 (129)	total: 22.4s	remaining: 1m 3s
130:	learn: 0.2915306	test: 0.2969728	best: 0.2969728 (130)	total: 22.7s	remaining: 1m 3s
131:	learn: 0.2913365	test: 0.2967842	best: 0.2967842 (131)	total: 22.7s	remaining: 1m 3s
132:	learn: 0.2909376	test: 0.2964248	best: 0.2964248 (132)	total: 22.8s	remaining: 1m 2s
133:	learn: 0.2902212	test: 0.2959070	best: 0.2959070 (133)	total: 23.1s	remaining: 1m 2s
134:	learn: 0.2896093	test: 0.2954219	best: 0.2954219 (134)	total: 23.3s	remaining: 1m 3s
135:	learn: 0.2890654	test: 0.2949973	best: 0.2949973 (135)	total: 23.6s	remaining: 1m 3s
136:	learn: 0.2883360	test: 0.2943362	best: 0.2943362 (136)	total: 23.8s	remaining: 1m 3s
137:	learn: 0.2876834	test: 0.2937296	best: 0.2937296 (137)	total: 24.1s	remaining: 1m 3s
138:	learn: 0.2873753	test: 0.2934409	best: 0.2934409 (138)	total: 24.1s	remaining: 1m 2s
139:	learn: 0.2870670	test: 0.2931903	best: 0.2931903 (139)	total: 24.4s	remaining: 1m 2s
140:	learn: 0.2867735	test: 0.2929515	best: 0.2929515 (140)	total: 24.7s	remaining: 1m 2s
141:	learn: 0.2864145	test: 0.2926289	best: 0.2926289 (141)	total: 24.8s	remaining: 1m 2s
142:	learn: 0.2860321	test: 0.2923601	best: 0.2923601 (142)	total: 25s	remaining: 1m 2s
143:	learn: 0.2855776	test: 0.2919532	best: 0.2919532 (143)	total: 25.3s	remaining: 1m 2s
144:	learn: 0.2850772	test: 0.2917086	best: 0.2917086 (144)	total: 25.6s	remaining: 1m 2s
145:	learn: 0.2847815	test: 0.2915100	best: 0.2915100 (145)	total: 25.8s	remaining: 1m 2s
146:	learn: 0.2840684	test: 0.2908387	best: 0.2908387 (146)	total: 26.1s	remaining: 1m 2s
147:	learn: 0.2835912	test: 0.2904646	best: 0.2904646 (147)	total: 26.4s	remaining: 1m 2s
148:	learn: 0.2833580	test: 0.2902518	best: 0.2902518 (148)	total: 26.4s	remaining: 1m 2s
149:	learn: 0.2828894	test: 0.2898904	best: 0.2898904 (149)	total: 26.7s	remaining: 1m 2s
150:	learn: 0.2826412	test: 0.2896687	best: 0.2896687 (150)	total: 26.8s	remaining: 1m 2s
151:	learn: 0.2822633	test: 0.2894227	best: 0.2894227 (151)	total: 27.1s	remaining: 1m 1s
152:	learn: 0.2818751	test: 0.2892169	best: 0.2892169 (152)	total: 27.3s	remaining: 1m 1s
153:	learn: 0.2814719	test: 0.2888768	best: 0.2888768 (153)	total: 27.5s	remaining: 1m 1s
154:	learn: 0.2809321	test: 0.2884754	best: 0.2884754 (154)	total: 27.7s	remaining: 1m 1s
155:	learn: 0.2804631	test: 0.2881106	best: 0.2881106 (155)	total: 27.9s	remaining: 1m 1s
156:	learn: 0.2798228	test: 0.2875379	best: 0.2875379 (156)	total: 28.2s	remaining: 1m 1s
157:	learn: 0.2796151	test: 0.2873621	best: 0.2873621 (157)	total: 28.4s	remaining: 1m 1s
158:	learn: 0.2793640	test: 0.2871283	best: 0.2871283 (158)	total: 28.5s	remaining: 1m 1s
159:	learn: 0.2790942	test: 0.2869239	best: 0.2869239 (159)	total: 28.7s	remaining: 1m 1s
160:	learn: 0.2786412	test: 0.2865703	best: 0.2865703 (160)	total: 29s	remaining: 1m 1s
161:	learn: 0.2781209	test: 0.2861092	best: 0.2861092 (161)	total: 29.3s	remaining: 1m 1s
162:	learn: 0.2779718	test: 0.2860047	best: 0.2860047 (162)	total: 29.5s	remaining: 1m 1s
163:	learn: 0.2776299	test: 0.2857070	best: 0.2857070 (163)	total: 29.6s	remaining: 1m
164:	learn: 0.2773579	test: 0.2855956	best: 0.2855956 (164)	total: 29.9s	remaining: 1m
165:	learn: 0.2770519	test: 0.2853765	best: 0.2853765 (165)	total: 30.2s	remaining: 1m
166:	learn: 0.2768036	test: 0.2852101	best: 0.2852101 (166)	total: 30.5s	remaining: 1m
167:	learn: 0.2766168	test: 0.2851094	best: 0.2851094 (167)	total: 30.8s	remaining: 1m
168:	learn: 0.2762739	test: 0.2849041	best: 0.2849041 (168)	total: 31s	remaining: 1m
169:	learn: 0.2759567	test: 0.2846953	best: 0.2846953 (169)	total: 31.3s	remaining: 1m
170:	learn: 0.2758515	test: 0.2846001	best: 0.2846001 (170)	total: 31.3s	remaining: 1m
171:	learn: 0.2753825	test: 0.2842253	best: 0.2842253 (171)	total: 31.6s	remaining: 1m
172:	learn: 0.2751976	test: 0.2841190	best: 0.2841190 (172)	total: 31.8s	remaining: 1m
173:	learn: 0.2749701	test: 0.2839406	best: 0.2839406 (173)	total: 32.1s	remaining: 1m
174:	learn: 0.2746731	test: 0.2837126	best: 0.2837126 (174)	total: 32.4s	remaining: 1m
175:	learn: 0.2743865	test: 0.2835342	best: 0.2835342 (175)	total: 32.6s	remaining: 1m
176:	learn: 0.2739118	test: 0.2832004	best: 0.2832004 (176)	total: 32.9s	remaining: 1m
177:	learn: 0.2735483	test: 0.2830160	best: 0.2830160 (177)	total: 33.2s	remaining: 60s
178:	learn: 0.2731294	test: 0.2826306	best: 0.2826306 (178)	total: 33.4s	remaining: 60s
179:	learn: 0.2729406	test: 0.2825338	best: 0.2825338 (179)	total: 33.7s	remaining: 59.9s
180:	learn: 0.2725748	test: 0.2822846	best: 0.2822846 (180)	total: 33.9s	remaining: 59.8s
181:	learn: 0.2723596	test: 0.2821037	best: 0.2821037 (181)	total: 34s	remaining: 59.5s
182:	learn: 0.2721358	test: 0.2819539	best: 0.2819539 (182)	total: 34.3s	remaining: 59.4s
183:	learn: 0.2717545	test: 0.2816666	best: 0.2816666 (183)	total: 34.4s	remaining: 59.1s
184:	learn: 0.2716326	test: 0.2815385	best: 0.2815385 (184)	total: 34.4s	remaining: 58.6s
185:	learn: 0.2714154	test: 0.2814068	best: 0.2814068 (185)	total: 34.7s	remaining: 58.5s
186:	learn: 0.2711732	test: 0.2812271	best: 0.2812271 (186)	total: 34.8s	remaining: 58.2s
187:	learn: 0.2708098	test: 0.2809988	best: 0.2809988 (187)	total: 35s	remaining: 58s
188:	learn: 0.2703706	test: 0.2806192	best: 0.2806192 (188)	total: 35.2s	remaining: 57.9s
189:	learn: 0.2703106	test: 0.2805603	best: 0.2805603 (189)	total: 35.3s	remaining: 57.5s
190:	learn: 0.2701618	test: 0.2804451	best: 0.2804451 (190)	total: 35.3s	remaining: 57.1s
191:	learn: 0.2698119	test: 0.2801589	best: 0.2801589 (191)	total: 35.5s	remaining: 57s
192:	learn: 0.2695941	test: 0.2800623	best: 0.2800623 (192)	total: 35.8s	remaining: 56.9s
193:	learn: 0.2695261	test: 0.2800076	best: 0.2800076 (193)	total: 35.8s	remaining: 56.5s
194:	learn: 0.2693427	test: 0.2798824	best: 0.2798824 (194)	total: 35.9s	remaining: 56.2s
195:	learn: 0.2691514	test: 0.2797682	best: 0.2797682 (195)	total: 36.1s	remaining: 55.9s
196:	learn: 0.2688871	test: 0.2795863	best: 0.2795863 (196)	total: 36.3s	remaining: 55.8s
197:	learn: 0.2687149	test: 0.2794886	best: 0.2794886 (197)	total: 36.5s	remaining: 55.7s
198:	learn: 0.2683892	test: 0.2792350	best: 0.2792350 (198)	total: 36.7s	remaining: 55.6s
199:	learn: 0.2679915	test: 0.2788696	best: 0.2788696 (199)	total: 36.9s	remaining: 55.3s
200:	learn: 0.2678128	test: 0.2787880	best: 0.2787880 (200)	total: 37.1s	remaining: 55.2s
201:	learn: 0.2676705	test: 0.2787139	best: 0.2787139 (201)	total: 37.2s	remaining: 54.9s
202:	learn: 0.2673844	test: 0.2784969	best: 0.2784969 (202)	total: 37.5s	remaining: 54.8s
203:	learn: 0.2671347	test: 0.2783451	best: 0.2783451 (203)	total: 37.7s	remaining: 54.7s
204:	learn: 0.2669106	test: 0.2782217	best: 0.2782217 (204)	total: 38s	remaining: 54.7s
205:	learn: 0.2666143	test: 0.2779924	best: 0.2779924 (205)	total: 38.2s	remaining: 54.6s
206:	learn: 0.2663591	test: 0.2778844	best: 0.2778844 (206)	total: 38.5s	remaining: 54.4s
207:	learn: 0.2662829	test: 0.2778308	best: 0.2778308 (207)	total: 38.5s	remaining: 54.1s
208:	learn: 0.2660593	test: 0.2776688	best: 0.2776688 (208)	total: 38.8s	remaining: 54s
209:	learn: 0.2659014	test: 0.2775645	best: 0.2775645 (209)	total: 38.9s	remaining: 53.8s
210:	learn: 0.2656960	test: 0.2774549	best: 0.2774549 (210)	total: 39.2s	remaining: 53.7s
211:	learn: 0.2655184	test: 0.2773607	best: 0.2773607 (211)	total: 39.5s	remaining: 53.6s
212:	learn: 0.2652921	test: 0.2772309	best: 0.2772309 (212)	total: 39.7s	remaining: 53.5s
213:	learn: 0.2650660	test: 0.2770898	best: 0.2770898 (213)	total: 40s	remaining: 53.4s
214:	learn: 0.2649240	test: 0.2769778	best: 0.2769778 (214)	total: 40.3s	remaining: 53.4s
215:	learn: 0.2647669	test: 0.2768956	best: 0.2768956 (215)	total: 40.5s	remaining: 53.3s
216:	learn: 0.2644993	test: 0.2766696	best: 0.2766696 (216)	total: 40.7s	remaining: 53.1s
217:	learn: 0.2643713	test: 0.2766219	best: 0.2766219 (217)	total: 40.8s	remaining: 52.8s
218:	learn: 0.2641893	test: 0.2765210	best: 0.2765210 (218)	total: 41.1s	remaining: 52.7s
219:	learn: 0.2640796	test: 0.2764232	best: 0.2764232 (219)	total: 41.1s	remaining: 52.4s
220:	learn: 0.2637977	test: 0.2762704	best: 0.2762704 (220)	total: 41.4s	remaining: 52.3s
221:	learn: 0.2636729	test: 0.2761730	best: 0.2761730 (221)	total: 41.5s	remaining: 51.9s
222:	learn: 0.2634984	test: 0.2760467	best: 0.2760467 (222)	total: 41.7s	remaining: 51.8s
223:	learn: 0.2634744	test: 0.2760364	best: 0.2760364 (223)	total: 41.8s	remaining: 51.5s
224:	learn: 0.2633352	test: 0.2759403	best: 0.2759403 (224)	total: 42.1s	remaining: 51.4s
225:	learn: 0.2631122	test: 0.2757529	best: 0.2757529 (225)	total: 42.3s	remaining: 51.3s
226:	learn: 0.2628669	test: 0.2756053	best: 0.2756053 (226)	total: 42.6s	remaining: 51.2s
227:	learn: 0.2627452	test: 0.2755281	best: 0.2755281 (227)	total: 42.8s	remaining: 51.1s
228:	learn: 0.2624766	test: 0.2753156	best: 0.2753156 (228)	total: 43.1s	remaining: 51s
229:	learn: 0.2623731	test: 0.2752708	best: 0.2752708 (229)	total: 43.3s	remaining: 50.8s
230:	learn: 0.2622359	test: 0.2752226	best: 0.2752226 (230)	total: 43.5s	remaining: 50.7s
231:	learn: 0.2620457	test: 0.2751081	best: 0.2751081 (231)	total: 43.8s	remaining: 50.5s
232:	learn: 0.2617995	test: 0.2749046	best: 0.2749046 (232)	total: 43.9s	remaining: 50.3s
233:	learn: 0.2614932	test: 0.2746838	best: 0.2746838 (233)	total: 44s	remaining: 50s
234:	learn: 0.2614216	test: 0.2746388	best: 0.2746388 (234)	total: 44.3s	remaining: 50s
235:	learn: 0.2611291	test: 0.2743755	best: 0.2743755 (235)	total: 44.6s	remaining: 49.8s
236:	learn: 0.2610116	test: 0.2743155	best: 0.2743155 (236)	total: 44.8s	remaining: 49.7s
237:	learn: 0.2608787	test: 0.2742808	best: 0.2742808 (237)	total: 45.1s	remaining: 49.6s
238:	learn: 0.2607071	test: 0.2742059	best: 0.2742059 (238)	total: 45.4s	remaining: 49.5s
239:	learn: 0.2605236	test: 0.2741272	best: 0.2741272 (239)	total: 45.6s	remaining: 49.4s
240:	learn: 0.2602706	test: 0.2739185	best: 0.2739185 (240)	total: 45.8s	remaining: 49.3s
241:	learn: 0.2602349	test: 0.2738913	best: 0.2738913 (241)	total: 45.9s	remaining: 49s
242:	learn: 0.2601436	test: 0.2738275	best: 0.2738275 (242)	total: 46.2s	remaining: 48.9s
243:	learn: 0.2599723	test: 0.2737149	best: 0.2737149 (243)	total: 46.4s	remaining: 48.7s
244:	learn: 0.2598175	test: 0.2736880	best: 0.2736880 (244)	total: 46.7s	remaining: 48.6s
245:	learn: 0.2597669	test: 0.2736530	best: 0.2736530 (245)	total: 46.8s	remaining: 48.3s
246:	learn: 0.2596640	test: 0.2735918	best: 0.2735918 (246)	total: 46.9s	remaining: 48s
247:	learn: 0.2595437	test: 0.2735548	best: 0.2735548 (247)	total: 47.1s	remaining: 47.9s
248:	learn: 0.2594856	test: 0.2735244	best: 0.2735244 (248)	total: 47.2s	remaining: 47.6s
249:	learn: 0.2593804	test: 0.2735069	best: 0.2735069 (249)	total: 47.5s	remaining: 47.5s
250:	learn: 0.2592281	test: 0.2734633	best: 0.2734633 (250)	total: 47.7s	remaining: 47.4s
251:	learn: 0.2590305	test: 0.2733131	best: 0.2733131 (251)	total: 48s	remaining: 47.2s
252:	learn: 0.2588429	test: 0.2731905	best: 0.2731905 (252)	total: 48.3s	remaining: 47.2s
253:	learn: 0.2586883	test: 0.2731442	best: 0.2731442 (253)	total: 48.6s	remaining: 47s
254:	learn: 0.2585693	test: 0.2730705	best: 0.2730705 (254)	total: 48.7s	remaining: 46.8s
255:	learn: 0.2584463	test: 0.2729904	best: 0.2729904 (255)	total: 49s	remaining: 46.7s
256:	learn: 0.2583196	test: 0.2729326	best: 0.2729326 (256)	total: 49.3s	remaining: 46.6s
257:	learn: 0.2582639	test: 0.2728972	best: 0.2728972 (257)	total: 49.4s	remaining: 46.4s
258:	learn: 0.2581005	test: 0.2727688	best: 0.2727688 (258)	total: 49.6s	remaining: 46.1s
259:	learn: 0.2580115	test: 0.2727193	best: 0.2727193 (259)	total: 49.7s	remaining: 45.9s
260:	learn: 0.2578855	test: 0.2726699	best: 0.2726699 (260)	total: 49.9s	remaining: 45.7s
261:	learn: 0.2577297	test: 0.2725904	best: 0.2725904 (261)	total: 50.2s	remaining: 45.6s
262:	learn: 0.2576396	test: 0.2725360	best: 0.2725360 (262)	total: 50.5s	remaining: 45.5s
263:	learn: 0.2574989	test: 0.2725090	best: 0.2725090 (263)	total: 50.8s	remaining: 45.4s
264:	learn: 0.2574503	test: 0.2724711	best: 0.2724711 (264)	total: 50.9s	remaining: 45.2s
265:	learn: 0.2573532	test: 0.2724104	best: 0.2724104 (265)	total: 51.2s	remaining: 45.1s
266:	learn: 0.2572256	test: 0.2723495	best: 0.2723495 (266)	total: 51.5s	remaining: 45s
267:	learn: 0.2571457	test: 0.2723096	best: 0.2723096 (267)	total: 51.8s	remaining: 44.8s
268:	learn: 0.2570659	test: 0.2722625	best: 0.2722625 (268)	total: 52s	remaining: 44.7s
269:	learn: 0.2569564	test: 0.2722391	best: 0.2722391 (269)	total: 52.3s	remaining: 44.5s
270:	learn: 0.2569121	test: 0.2722214	best: 0.2722214 (270)	total: 52.4s	remaining: 44.3s
271:	learn: 0.2567455	test: 0.2721463	best: 0.2721463 (271)	total: 52.7s	remaining: 44.2s
272:	learn: 0.2566977	test: 0.2721213	best: 0.2721213 (272)	total: 52.8s	remaining: 43.9s
273:	learn: 0.2564954	test: 0.2720494	best: 0.2720494 (273)	total: 53s	remaining: 43.7s
274:	learn: 0.2563976	test: 0.2720002	best: 0.2720002 (274)	total: 53.2s	remaining: 43.5s
275:	learn: 0.2562842	test: 0.2719510	best: 0.2719510 (275)	total: 53.5s	remaining: 43.4s
276:	learn: 0.2562080	test: 0.2719215	best: 0.2719215 (276)	total: 53.8s	remaining: 43.3s
277:	learn: 0.2561633	test: 0.2719021	best: 0.2719021 (277)	total: 53.9s	remaining: 43s
278:	learn: 0.2561130	test: 0.2718698	best: 0.2718698 (278)	total: 54.1s	remaining: 42.8s
279:	learn: 0.2559764	test: 0.2718405	best: 0.2718405 (279)	total: 54.3s	remaining: 42.7s
280:	learn: 0.2558884	test: 0.2717923	best: 0.2717923 (280)	total: 54.6s	remaining: 42.5s
281:	learn: 0.2557700	test: 0.2716865	best: 0.2716865 (281)	total: 54.7s	remaining: 42.3s
282:	learn: 0.2556615	test: 0.2715942	best: 0.2715942 (282)	total: 54.8s	remaining: 42s
283:	learn: 0.2556263	test: 0.2715792	best: 0.2715792 (283)	total: 54.9s	remaining: 41.7s
284:	learn: 0.2555089	test: 0.2715271	best: 0.2715271 (284)	total: 55.2s	remaining: 41.6s
285:	learn: 0.2554592	test: 0.2715123	best: 0.2715123 (285)	total: 55.3s	remaining: 41.3s
286:	learn: 0.2553529	test: 0.2714606	best: 0.2714606 (286)	total: 55.5s	remaining: 41.2s
287:	learn: 0.2552948	test: 0.2714409	best: 0.2714409 (287)	total: 55.6s	remaining: 40.9s
288:	learn: 0.2551043	test: 0.2713170	best: 0.2713170 (288)	total: 55.9s	remaining: 40.8s
289:	learn: 0.2549711	test: 0.2712842	best: 0.2712842 (289)	total: 56.2s	remaining: 40.7s
290:	learn: 0.2548032	test: 0.2711477	best: 0.2711477 (290)	total: 56.3s	remaining: 40.4s
291:	learn: 0.2547668	test: 0.2711385	best: 0.2711385 (291)	total: 56.5s	remaining: 40.2s
292:	learn: 0.2547147	test: 0.2711187	best: 0.2711187 (292)	total: 56.6s	remaining: 40s
293:	learn: 0.2546518	test: 0.2710947	best: 0.2710947 (293)	total: 56.7s	remaining: 39.7s
294:	learn: 0.2545556	test: 0.2710715	best: 0.2710715 (294)	total: 57s	remaining: 39.6s
295:	learn: 0.2544811	test: 0.2710407	best: 0.2710407 (295)	total: 57.1s	remaining: 39.3s
296:	learn: 0.2544398	test: 0.2710179	best: 0.2710179 (296)	total: 57.1s	remaining: 39.1s
297:	learn: 0.2541810	test: 0.2708644	best: 0.2708644 (297)	total: 57.4s	remaining: 38.9s
298:	learn: 0.2541051	test: 0.2708244	best: 0.2708244 (298)	total: 57.5s	remaining: 38.7s
299:	learn: 0.2540272	test: 0.2708083	best: 0.2708083 (299)	total: 57.6s	remaining: 38.4s
300:	learn: 0.2538328	test: 0.2707201	best: 0.2707201 (300)	total: 57.9s	remaining: 38.3s
301:	learn: 0.2537203	test: 0.2707046	best: 0.2707046 (301)	total: 58.1s	remaining: 38.1s
302:	learn: 0.2536495	test: 0.2706658	best: 0.2706658 (302)	total: 58.4s	remaining: 38s
303:	learn: 0.2535586	test: 0.2706376	best: 0.2706376 (303)	total: 58.7s	remaining: 37.8s
304:	learn: 0.2534915	test: 0.2706229	best: 0.2706229 (304)	total: 58.9s	remaining: 37.7s
305:	learn: 0.2534010	test: 0.2706041	best: 0.2706041 (305)	total: 59.1s	remaining: 37.4s
306:	learn: 0.2533210	test: 0.2705813	best: 0.2705813 (306)	total: 59.2s	remaining: 37.2s
307:	learn: 0.2532712	test: 0.2705612	best: 0.2705612 (307)	total: 59.3s	remaining: 37s
308:	learn: 0.2531826	test: 0.2705411	best: 0.2705411 (308)	total: 59.6s	remaining: 36.8s
309:	learn: 0.2531406	test: 0.2705308	best: 0.2705308 (309)	total: 59.7s	remaining: 36.6s
310:	learn: 0.2530511	test: 0.2705255	best: 0.2705255 (310)	total: 60s	remaining: 36.4s
311:	learn: 0.2529049	test: 0.2704534	best: 0.2704534 (311)	total: 1m	remaining: 36.3s
312:	learn: 0.2527882	test: 0.2704296	best: 0.2704296 (312)	total: 1m	remaining: 36.2s
313:	learn: 0.2526867	test: 0.2703858	best: 0.2703858 (313)	total: 1m	remaining: 36s
314:	learn: 0.2526244	test: 0.2703794	best: 0.2703794 (314)	total: 1m	remaining: 35.7s
315:	learn: 0.2525121	test: 0.2703323	best: 0.2703323 (315)	total: 1m 1s	remaining: 35.6s
316:	learn: 0.2524920	test: 0.2703182	best: 0.2703182 (316)	total: 1m 1s	remaining: 35.3s
317:	learn: 0.2524378	test: 0.2702894	best: 0.2702894 (317)	total: 1m 1s	remaining: 35.2s
318:	learn: 0.2523190	test: 0.2702396	best: 0.2702396 (318)	total: 1m 1s	remaining: 35s
319:	learn: 0.2522709	test: 0.2702313	best: 0.2702313 (319)	total: 1m 1s	remaining: 34.8s
320:	learn: 0.2521872	test: 0.2702089	best: 0.2702089 (320)	total: 1m 2s	remaining: 34.6s
321:	learn: 0.2520313	test: 0.2700791	best: 0.2700791 (321)	total: 1m 2s	remaining: 34.4s
322:	learn: 0.2519295	test: 0.2700596	best: 0.2700596 (322)	total: 1m 2s	remaining: 34.3s
323:	learn: 0.2518725	test: 0.2700452	best: 0.2700452 (323)	total: 1m 2s	remaining: 34.1s
324:	learn: 0.2517783	test: 0.2700131	best: 0.2700131 (324)	total: 1m 2s	remaining: 33.9s
325:	learn: 0.2515790	test: 0.2699072	best: 0.2699072 (325)	total: 1m 3s	remaining: 33.7s
326:	learn: 0.2515132	test: 0.2698638	best: 0.2698638 (326)	total: 1m 3s	remaining: 33.5s
327:	learn: 0.2514737	test: 0.2698584	best: 0.2698584 (327)	total: 1m 3s	remaining: 33.2s
328:	learn: 0.2514304	test: 0.2698387	best: 0.2698387 (328)	total: 1m 3s	remaining: 33.1s
329:	learn: 0.2514110	test: 0.2698279	best: 0.2698279 (329)	total: 1m 3s	remaining: 32.8s
330:	learn: 0.2513692	test: 0.2698050	best: 0.2698050 (330)	total: 1m 3s	remaining: 32.6s
331:	learn: 0.2512639	test: 0.2697536	best: 0.2697536 (331)	total: 1m 4s	remaining: 32.4s
332:	learn: 0.2512229	test: 0.2697415	best: 0.2697415 (332)	total: 1m 4s	remaining: 32.2s
333:	learn: 0.2511909	test: 0.2697195	best: 0.2697195 (333)	total: 1m 4s	remaining: 31.9s
334:	learn: 0.2510345	test: 0.2696256	best: 0.2696256 (334)	total: 1m 4s	remaining: 31.7s
335:	learn: 0.2509829	test: 0.2696100	best: 0.2696100 (335)	total: 1m 4s	remaining: 31.5s
336:	learn: 0.2509752	test: 0.2696098	best: 0.2696098 (336)	total: 1m 4s	remaining: 31.2s
337:	learn: 0.2509237	test: 0.2696006	best: 0.2696006 (337)	total: 1m 4s	remaining: 31s
338:	learn: 0.2508163	test: 0.2695425	best: 0.2695425 (338)	total: 1m 5s	remaining: 30.9s
339:	learn: 0.2507285	test: 0.2694808	best: 0.2694808 (339)	total: 1m 5s	remaining: 30.6s
340:	learn: 0.2507018	test: 0.2694693	best: 0.2694693 (340)	total: 1m 5s	remaining: 30.4s
341:	learn: 0.2506504	test: 0.2694391	best: 0.2694391 (341)	total: 1m 5s	remaining: 30.2s
342:	learn: 0.2506145	test: 0.2694385	best: 0.2694385 (342)	total: 1m 5s	remaining: 30s
343:	learn: 0.2505489	test: 0.2694382	best: 0.2694382 (343)	total: 1m 5s	remaining: 29.9s
344:	learn: 0.2505189	test: 0.2694199	best: 0.2694199 (344)	total: 1m 5s	remaining: 29.6s
345:	learn: 0.2504750	test: 0.2694148	best: 0.2694148 (345)	total: 1m 6s	remaining: 29.4s
346:	learn: 0.2504069	test: 0.2693643	best: 0.2693643 (346)	total: 1m 6s	remaining: 29.2s
347:	learn: 0.2501878	test: 0.2692013	best: 0.2692013 (347)	total: 1m 6s	remaining: 29.1s
348:	learn: 0.2501802	test: 0.2691991	best: 0.2691991 (348)	total: 1m 6s	remaining: 28.8s
349:	learn: 0.2501496	test: 0.2691993	best: 0.2691991 (348)	total: 1m 6s	remaining: 28.7s
350:	learn: 0.2500712	test: 0.2691735	best: 0.2691735 (350)	total: 1m 7s	remaining: 28.5s
351:	learn: 0.2500113	test: 0.2691726	best: 0.2691726 (351)	total: 1m 7s	remaining: 28.4s
352:	learn: 0.2499601	test: 0.2691673	best: 0.2691673 (352)	total: 1m 7s	remaining: 28.1s
353:	learn: 0.2499383	test: 0.2691538	best: 0.2691538 (353)	total: 1m 7s	remaining: 27.9s
354:	learn: 0.2498996	test: 0.2691380	best: 0.2691380 (354)	total: 1m 7s	remaining: 27.7s
355:	learn: 0.2498102	test: 0.2691190	best: 0.2691190 (355)	total: 1m 7s	remaining: 27.5s
356:	learn: 0.2497396	test: 0.2690913	best: 0.2690913 (356)	total: 1m 8s	remaining: 27.4s
357:	learn: 0.2496778	test: 0.2690807	best: 0.2690807 (357)	total: 1m 8s	remaining: 27.2s
358:	learn: 0.2495708	test: 0.2690527	best: 0.2690527 (358)	total: 1m 8s	remaining: 27s
359:	learn: 0.2494921	test: 0.2690382	best: 0.2690382 (359)	total: 1m 9s	remaining: 26.9s
360:	learn: 0.2494542	test: 0.2690234	best: 0.2690234 (360)	total: 1m 9s	remaining: 26.7s
361:	learn: 0.2494192	test: 0.2690102	best: 0.2690102 (361)	total: 1m 9s	remaining: 26.5s
362:	learn: 0.2493477	test: 0.2689754	best: 0.2689754 (362)	total: 1m 9s	remaining: 26.3s
363:	learn: 0.2493219	test: 0.2689629	best: 0.2689629 (363)	total: 1m 9s	remaining: 26.1s
364:	learn: 0.2492999	test: 0.2689533	best: 0.2689533 (364)	total: 1m 9s	remaining: 25.9s
365:	learn: 0.2492384	test: 0.2689409	best: 0.2689409 (365)	total: 1m 10s	remaining: 25.7s
366:	learn: 0.2492136	test: 0.2689497	best: 0.2689409 (365)	total: 1m 10s	remaining: 25.5s
367:	learn: 0.2491826	test: 0.2689416	best: 0.2689409 (365)	total: 1m 10s	remaining: 25.2s
368:	learn: 0.2491210	test: 0.2688803	best: 0.2688803 (368)	total: 1m 10s	remaining: 25s
369:	learn: 0.2490576	test: 0.2688720	best: 0.2688720 (369)	total: 1m 10s	remaining: 24.9s
370:	learn: 0.2489973	test: 0.2688570	best: 0.2688570 (370)	total: 1m 11s	remaining: 24.7s
371:	learn: 0.2488961	test: 0.2688056	best: 0.2688056 (371)	total: 1m 11s	remaining: 24.6s
372:	learn: 0.2488551	test: 0.2687829	best: 0.2687829 (372)	total: 1m 11s	remaining: 24.3s
373:	learn: 0.2487541	test: 0.2687196	best: 0.2687196 (373)	total: 1m 11s	remaining: 24.2s
374:	learn: 0.2487386	test: 0.2687159	best: 0.2687159 (374)	total: 1m 11s	remaining: 23.9s
375:	learn: 0.2486257	test: 0.2686818	best: 0.2686818 (375)	total: 1m 12s	remaining: 23.8s
376:	learn: 0.2484617	test: 0.2686481	best: 0.2686481 (376)	total: 1m 12s	remaining: 23.6s
377:	learn: 0.2484026	test: 0.2686078	best: 0.2686078 (377)	total: 1m 12s	remaining: 23.4s
378:	learn: 0.2483480	test: 0.2685902	best: 0.2685902 (378)	total: 1m 12s	remaining: 23.2s
379:	learn: 0.2483092	test: 0.2685708	best: 0.2685708 (379)	total: 1m 12s	remaining: 23s
380:	learn: 0.2482213	test: 0.2685176	best: 0.2685176 (380)	total: 1m 13s	remaining: 22.8s
381:	learn: 0.2481990	test: 0.2685029	best: 0.2685029 (381)	total: 1m 13s	remaining: 22.6s
382:	learn: 0.2481595	test: 0.2684846	best: 0.2684846 (382)	total: 1m 13s	remaining: 22.4s
383:	learn: 0.2480560	test: 0.2684552	best: 0.2684552 (383)	total: 1m 13s	remaining: 22.3s
384:	learn: 0.2480336	test: 0.2684467	best: 0.2684467 (384)	total: 1m 13s	remaining: 22.1s
385:	learn: 0.2480216	test: 0.2684372	best: 0.2684372 (385)	total: 1m 13s	remaining: 21.8s
386:	learn: 0.2480010	test: 0.2684324	best: 0.2684324 (386)	total: 1m 14s	remaining: 21.6s
387:	learn: 0.2479769	test: 0.2684248	best: 0.2684248 (387)	total: 1m 14s	remaining: 21.4s
388:	learn: 0.2479661	test: 0.2684245	best: 0.2684245 (388)	total: 1m 14s	remaining: 21.2s
389:	learn: 0.2479427	test: 0.2684161	best: 0.2684161 (389)	total: 1m 14s	remaining: 21s
390:	learn: 0.2479307	test: 0.2684134	best: 0.2684134 (390)	total: 1m 14s	remaining: 20.8s
391:	learn: 0.2477774	test: 0.2683422	best: 0.2683422 (391)	total: 1m 14s	remaining: 20.6s
392:	learn: 0.2477174	test: 0.2683187	best: 0.2683187 (392)	total: 1m 15s	remaining: 20.4s
393:	learn: 0.2476524	test: 0.2682954	best: 0.2682954 (393)	total: 1m 15s	remaining: 20.2s
394:	learn: 0.2476085	test: 0.2682754	best: 0.2682754 (394)	total: 1m 15s	remaining: 20s
395:	learn: 0.2475135	test: 0.2682562	best: 0.2682562 (395)	total: 1m 15s	remaining: 19.9s
396:	learn: 0.2475020	test: 0.2682473	best: 0.2682473 (396)	total: 1m 15s	remaining: 19.6s
397:	learn: 0.2474498	test: 0.2682401	best: 0.2682401 (397)	total: 1m 15s	remaining: 19.5s
398:	learn: 0.2474039	test: 0.2682267	best: 0.2682267 (398)	total: 1m 16s	remaining: 19.3s
399:	learn: 0.2473059	test: 0.2681728	best: 0.2681728 (399)	total: 1m 16s	remaining: 19.1s
400:	learn: 0.2472677	test: 0.2681598	best: 0.2681598 (400)	total: 1m 16s	remaining: 18.9s
401:	learn: 0.2472382	test: 0.2681500	best: 0.2681500 (401)	total: 1m 16s	remaining: 18.7s
402:	learn: 0.2472171	test: 0.2681466	best: 0.2681466 (402)	total: 1m 16s	remaining: 18.5s
403:	learn: 0.2472017	test: 0.2681263	best: 0.2681263 (403)	total: 1m 17s	remaining: 18.3s
404:	learn: 0.2471895	test: 0.2681224	best: 0.2681224 (404)	total: 1m 17s	remaining: 18.1s
405:	learn: 0.2471656	test: 0.2681141	best: 0.2681141 (405)	total: 1m 17s	remaining: 17.9s
406:	learn: 0.2470517	test: 0.2680836	best: 0.2680836 (406)	total: 1m 17s	remaining: 17.7s
407:	learn: 0.2470464	test: 0.2680835	best: 0.2680835 (407)	total: 1m 17s	remaining: 17.5s
408:	learn: 0.2470407	test: 0.2680834	best: 0.2680834 (408)	total: 1m 17s	remaining: 17.3s
409:	learn: 0.2470149	test: 0.2680765	best: 0.2680765 (409)	total: 1m 17s	remaining: 17.1s
410:	learn: 0.2469951	test: 0.2680669	best: 0.2680669 (410)	total: 1m 17s	remaining: 16.9s
411:	learn: 0.2469817	test: 0.2680595	best: 0.2680595 (411)	total: 1m 18s	remaining: 16.7s
412:	learn: 0.2468831	test: 0.2680010	best: 0.2680010 (412)	total: 1m 18s	remaining: 16.5s
413:	learn: 0.2468296	test: 0.2679662	best: 0.2679662 (413)	total: 1m 18s	remaining: 16.3s
414:	learn: 0.2468143	test: 0.2679522	best: 0.2679522 (414)	total: 1m 18s	remaining: 16.1s
415:	learn: 0.2468087	test: 0.2679474	best: 0.2679474 (415)	total: 1m 18s	remaining: 15.9s
416:	learn: 0.2467781	test: 0.2679371	best: 0.2679371 (416)	total: 1m 18s	remaining: 15.7s
417:	learn: 0.2466914	test: 0.2679182	best: 0.2679182 (417)	total: 1m 19s	remaining: 15.5s
418:	learn: 0.2466766	test: 0.2679172	best: 0.2679172 (418)	total: 1m 19s	remaining: 15.3s
419:	learn: 0.2465860	test: 0.2678728	best: 0.2678728 (419)	total: 1m 19s	remaining: 15.1s
420:	learn: 0.2465633	test: 0.2678659	best: 0.2678659 (420)	total: 1m 19s	remaining: 14.9s
421:	learn: 0.2465232	test: 0.2678519	best: 0.2678519 (421)	total: 1m 19s	remaining: 14.8s
422:	learn: 0.2465031	test: 0.2678360	best: 0.2678360 (422)	total: 1m 19s	remaining: 14.5s
423:	learn: 0.2464921	test: 0.2678361	best: 0.2678360 (422)	total: 1m 20s	remaining: 14.3s
424:	learn: 0.2464859	test: 0.2678360	best: 0.2678360 (422)	total: 1m 20s	remaining: 14.1s
425:	learn: 0.2464665	test: 0.2678206	best: 0.2678206 (425)	total: 1m 20s	remaining: 13.9s
426:	learn: 0.2464077	test: 0.2677917	best: 0.2677917 (426)	total: 1m 20s	remaining: 13.8s
427:	learn: 0.2463790	test: 0.2677775	best: 0.2677775 (427)	total: 1m 20s	remaining: 13.6s
428:	learn: 0.2463736	test: 0.2677770	best: 0.2677770 (428)	total: 1m 20s	remaining: 13.4s
429:	learn: 0.2463468	test: 0.2677604	best: 0.2677604 (429)	total: 1m 20s	remaining: 13.2s
430:	learn: 0.2463425	test: 0.2677607	best: 0.2677604 (429)	total: 1m 20s	remaining: 12.9s
431:	learn: 0.2463057	test: 0.2677445	best: 0.2677445 (431)	total: 1m 20s	remaining: 12.7s
432:	learn: 0.2462856	test: 0.2677480	best: 0.2677445 (431)	total: 1m 21s	remaining: 12.6s
433:	learn: 0.2462685	test: 0.2677376	best: 0.2677376 (433)	total: 1m 21s	remaining: 12.4s
434:	learn: 0.2462544	test: 0.2677361	best: 0.2677361 (434)	total: 1m 21s	remaining: 12.2s
435:	learn: 0.2462421	test: 0.2677270	best: 0.2677270 (435)	total: 1m 21s	remaining: 12s
436:	learn: 0.2461251	test: 0.2677011	best: 0.2677011 (436)	total: 1m 21s	remaining: 11.8s
437:	learn: 0.2460628	test: 0.2676833	best: 0.2676833 (437)	total: 1m 21s	remaining: 11.6s
438:	learn: 0.2460088	test: 0.2676806	best: 0.2676806 (438)	total: 1m 22s	remaining: 11.4s
439:	learn: 0.2459916	test: 0.2676714	best: 0.2676714 (439)	total: 1m 22s	remaining: 11.2s
440:	learn: 0.2459905	test: 0.2676713	best: 0.2676713 (440)	total: 1m 22s	remaining: 11s
441:	learn: 0.2459098	test: 0.2676608	best: 0.2676608 (441)	total: 1m 22s	remaining: 10.9s
442:	learn: 0.2458848	test: 0.2676491	best: 0.2676491 (442)	total: 1m 22s	remaining: 10.7s
443:	learn: 0.2458754	test: 0.2676501	best: 0.2676491 (442)	total: 1m 22s	remaining: 10.5s
444:	learn: 0.2458651	test: 0.2676517	best: 0.2676491 (442)	total: 1m 23s	remaining: 10.3s
445:	learn: 0.2458101	test: 0.2676334	best: 0.2676334 (445)	total: 1m 23s	remaining: 10.1s
446:	learn: 0.2457981	test: 0.2676275	best: 0.2676275 (446)	total: 1m 23s	remaining: 9.88s
447:	learn: 0.2457901	test: 0.2676256	best: 0.2676256 (447)	total: 1m 23s	remaining: 9.68s
448:	learn: 0.2457785	test: 0.2676253	best: 0.2676253 (448)	total: 1m 23s	remaining: 9.48s
449:	learn: 0.2457759	test: 0.2676257	best: 0.2676253 (448)	total: 1m 23s	remaining: 9.29s
450:	learn: 0.2457529	test: 0.2676242	best: 0.2676242 (450)	total: 1m 23s	remaining: 9.09s
451:	learn: 0.2456787	test: 0.2675587	best: 0.2675587 (451)	total: 1m 23s	remaining: 8.89s
452:	learn: 0.2456311	test: 0.2675490	best: 0.2675490 (452)	total: 1m 23s	remaining: 8.7s
453:	learn: 0.2455747	test: 0.2675402	best: 0.2675402 (453)	total: 1m 24s	remaining: 8.52s
454:	learn: 0.2455567	test: 0.2675330	best: 0.2675330 (454)	total: 1m 24s	remaining: 8.35s
455:	learn: 0.2455330	test: 0.2675257	best: 0.2675257 (455)	total: 1m 24s	remaining: 8.15s
456:	learn: 0.2454600	test: 0.2675194	best: 0.2675194 (456)	total: 1m 24s	remaining: 7.97s
457:	learn: 0.2454495	test: 0.2675197	best: 0.2675194 (456)	total: 1m 24s	remaining: 7.78s
458:	learn: 0.2454314	test: 0.2675153	best: 0.2675153 (458)	total: 1m 25s	remaining: 7.61s
459:	learn: 0.2454189	test: 0.2675088	best: 0.2675088 (459)	total: 1m 25s	remaining: 7.41s
460:	learn: 0.2452996	test: 0.2674788	best: 0.2674788 (460)	total: 1m 25s	remaining: 7.23s
461:	learn: 0.2452950	test: 0.2674787	best: 0.2674787 (461)	total: 1m 25s	remaining: 7.04s
462:	learn: 0.2451842	test: 0.2674353	best: 0.2674353 (462)	total: 1m 25s	remaining: 6.86s
463:	learn: 0.2451807	test: 0.2674356	best: 0.2674353 (462)	total: 1m 25s	remaining: 6.66s
464:	learn: 0.2451332	test: 0.2674233	best: 0.2674233 (464)	total: 1m 26s	remaining: 6.48s
465:	learn: 0.2450552	test: 0.2674102	best: 0.2674102 (465)	total: 1m 26s	remaining: 6.3s
466:	learn: 0.2450256	test: 0.2673914	best: 0.2673914 (466)	total: 1m 26s	remaining: 6.13s
467:	learn: 0.2449863	test: 0.2673778	best: 0.2673778 (467)	total: 1m 26s	remaining: 5.95s
468:	learn: 0.2449661	test: 0.2673743	best: 0.2673743 (468)	total: 1m 27s	remaining: 5.75s
469:	learn: 0.2449481	test: 0.2673684	best: 0.2673684 (469)	total: 1m 27s	remaining: 5.56s
470:	learn: 0.2449228	test: 0.2673611	best: 0.2673611 (470)	total: 1m 27s	remaining: 5.37s
471:	learn: 0.2449151	test: 0.2673579	best: 0.2673579 (471)	total: 1m 27s	remaining: 5.18s
472:	learn: 0.2448990	test: 0.2673582	best: 0.2673579 (471)	total: 1m 27s	remaining: 4.99s
473:	learn: 0.2448649	test: 0.2673505	best: 0.2673505 (473)	total: 1m 27s	remaining: 4.81s
474:	learn: 0.2448609	test: 0.2673505	best: 0.2673505 (474)	total: 1m 27s	remaining: 4.62s
475:	learn: 0.2447716	test: 0.2673361	best: 0.2673361 (475)	total: 1m 28s	remaining: 4.44s
476:	learn: 0.2447148	test: 0.2673191	best: 0.2673191 (476)	total: 1m 28s	remaining: 4.26s
477:	learn: 0.2446619	test: 0.2672854	best: 0.2672854 (477)	total: 1m 28s	remaining: 4.07s
478:	learn: 0.2446587	test: 0.2672856	best: 0.2672854 (477)	total: 1m 28s	remaining: 3.88s
479:	learn: 0.2446430	test: 0.2672808	best: 0.2672808 (479)	total: 1m 28s	remaining: 3.7s
480:	learn: 0.2446310	test: 0.2672751	best: 0.2672751 (480)	total: 1m 28s	remaining: 3.51s
481:	learn: 0.2446272	test: 0.2672753	best: 0.2672751 (480)	total: 1m 28s	remaining: 3.32s
482:	learn: 0.2446027	test: 0.2672710	best: 0.2672710 (482)	total: 1m 28s	remaining: 3.13s
483:	learn: 0.2445991	test: 0.2672709	best: 0.2672709 (483)	total: 1m 28s	remaining: 2.94s
484:	learn: 0.2445835	test: 0.2672713	best: 0.2672709 (483)	total: 1m 29s	remaining: 2.75s
485:	learn: 0.2445483	test: 0.2672619	best: 0.2672619 (485)	total: 1m 29s	remaining: 2.57s
486:	learn: 0.2445151	test: 0.2672496	best: 0.2672496 (486)	total: 1m 29s	remaining: 2.38s
487:	learn: 0.2444971	test: 0.2672375	best: 0.2672375 (487)	total: 1m 29s	remaining: 2.2s
488:	learn: 0.2444790	test: 0.2672264	best: 0.2672264 (488)	total: 1m 29s	remaining: 2.01s
489:	learn: 0.2444730	test: 0.2672266	best: 0.2672264 (488)	total: 1m 29s	remaining: 1.83s
490:	learn: 0.2443985	test: 0.2672092	best: 0.2672092 (490)	total: 1m 29s	remaining: 1.65s
491:	learn: 0.2443926	test: 0.2672092	best: 0.2672092 (491)	total: 1m 29s	remaining: 1.46s
492:	learn: 0.2443282	test: 0.2672029	best: 0.2672029 (492)	total: 1m 30s	remaining: 1.28s
493:	learn: 0.2442625	test: 0.2671660	best: 0.2671660 (493)	total: 1m 30s	remaining: 1.1s
494:	learn: 0.2442569	test: 0.2671652	best: 0.2671652 (494)	total: 1m 30s	remaining: 913ms
495:	learn: 0.2442137	test: 0.2671451	best: 0.2671451 (495)	total: 1m 30s	remaining: 729ms
496:	learn: 0.2442029	test: 0.2671429	best: 0.2671429 (496)	total: 1m 30s	remaining: 546ms
497:	learn: 0.2441346	test: 0.2671031	best: 0.2671031 (497)	total: 1m 30s	remaining: 365ms
498:	learn: 0.2441208	test: 0.2670957	best: 0.2670957 (498)	total: 1m 30s	remaining: 182ms
499:	learn: 0.2441134	test: 0.2670958	best: 0.2670957 (498)	total: 1m 30s	remaining: 0us

bestTest = 0.2670956881
bestIteration = 498

Out[47]:
<catboost.core.CatBoostClassifier at 0x1addceff240>

In [50]:
model.predict_proba(df_test)


Out[50]:
array([[ 0.78023,  0.21977],
       [ 0.99108,  0.00892],
       [ 0.99825,  0.00175],
       ..., 
       [ 0.99566,  0.00434],
       [ 0.98803,  0.01197],
       [ 0.24806,  0.75194]])

In [60]:
temp = pd.DataFrame()
l =[]
for i,j in zip(model.feature_importances_, df_raw.columns):
    l.append([j,i])

In [73]:
temp = pd.DataFrame(l,columns=['col', 'imp'])
plt.figure(figsize=(10,10))
temp.plot('col','imp',kind='barh')


Out[73]:
<matplotlib.axes._subplots.AxesSubplot at 0x1ad809d3c18>
<Figure size 720x720 with 0 Axes>

xgb


In [19]:
target = df_raw.is_promoted
df_raw.drop('is_promoted', axis=1, inplace=True)
df_raw['previous_year_rating'].fillna(0,inplace=True)
df_test['previous_year_rating'].fillna(0,inplace=True)

In [20]:
#df_raw[pd.isnull(df_raw['previous_year_rating'])]

In [20]:
train_encoded_T, test_encoded_T = np.asarray(train_encoded).T, np.asarray(test_encoded).T

In [21]:
# ###need to check on this
# df_raw_cat['promotion_chance'] = df_raw['promotion_chance']
# df_test_cat['promotion_chance'] = df_test['promotion_chance']
# df_raw.drop('promotion_chance', axis=1,inplace=True)
# df_test.drop('promotion_chance', axis=1,inplace=True)

In [21]:
df_raw_cat.columns


Out[21]:
Index(['department', 'region', 'education', 'gender', 'recruitment_channel',
       'age_group', 'bin_age', 'bin_avg_training_score', 'promotion_chance'],
      dtype='object')

In [22]:
train_all, test_all = np.hstack((df_raw, train_encoded_T, pd.get_dummies(df_raw_cat))), np.hstack((df_test, test_encoded_T, pd.get_dummies(df_test_cat)))

In [23]:
train_all.shape, test_all.shape


Out[23]:
((54808, 102), (23490, 102))

Model


In [7]:
def make_submission(probs):
    sample = pd.read_csv(f'{PATH}\\sample_submission.csv')
    submit = sample.copy()
    submit['is_promoted'] = probs
    return submit

In [25]:
np.count_nonzero(target), target.shape[0]- 4668


Out[25]:
(4668, 50140)

In [8]:
train_all, test_all, target = np.load('train_all.npy'), np.load('test_all.npy'), np.load('target.npy')

In [26]:
X_train, X_valid, y_train, y_valid = train_test_split(train_all, target, test_size = .2, stratify = target)

In [27]:
def runXGB(train_X, train_y, test_X, test_y=None):
        params = {}
        params['booster'] = 'gbtree'
        params['tree_method'] = 'gpu_hist'
        params["objective"] = "binary:logistic"
        params['eval_metric'] = 'auc'
        params["eta"] = 0.05 #0.03
        params["subsample"] = .8
        params["silent"] = 0
        params['verbose'] = 1
        params["max_depth"] = 10
        params["seed"] = 1
        params["max_delta_step"] = 4
        params['scale_pos_weight'] =  50140/4668
        params["gamma"] = 0.6 #.5 #.1 #.2
        params['colsample_bytree'] = 0.75
        params['nrounds'] = 1000 #3600 #2000 #4000
        plst = list(params.items())

        xgtrain = xgb.DMatrix(train_X, label=train_y)
        xgtest = xgb.DMatrix(test_X)
        model = xgb.train(plst, xgtrain)
        pred_test_y = model.predict(xgtest)
        return pred_test_y

In [29]:
val_preds = runXGB(X_train, y_train, X_valid,)

In [30]:
val_preds, max(val_preds)


Out[30]:
(array([ 0.34756,  0.35743,  0.49354, ...,  0.36264,  0.58743,  0.46702], dtype=float32),
 0.70014787)

In [10]:
# test_preds = model_srk.predict(test_all)

hit try error optimisation


In [31]:
params = {}
params['booster'] = 'gbtree'
params['tree_method'] = 'gpu_hist'
params["objective"] = "binary:logistic"
params['eval_metric'] = 'auc'
params["eta"] = 0.05 #0.03
params["subsample"] = .85
params["silent"] = 0
params['verbose'] = 1
params["max_depth"] = 10
params["seed"] = 1
params["max_delta_step"] = 4
params['scale_pos_weight'] =  50140/4668
params["gamma"] = 0.6 #.5 #.1 #.2
params['colsample_bytree'] = 0.75
params['nrounds'] = 500 #3600 #2000 #4000

In [23]:
# X_train, X_valid, y_train, y_valid = train_test_split(train_all, target, test_size = .2, stratify = target)

In [33]:
model, p_train, p_test = mlcrate.xgb.train_kfold(params, train_all, target, test_all, folds = 7, stratify=target)


[mlcrate] Training 7 stratified XGBoost models on training set (54808, 102) with test set (23490, 102)
[mlcrate] Running fold 0, 46978 train samples, 7830 validation samples
[0]	train-auc:0.895494	valid-auc:0.864586
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.919072	valid-auc:0.885218
[2]	train-auc:0.92753	valid-auc:0.891484
[3]	train-auc:0.933167	valid-auc:0.893178
[4]	train-auc:0.939409	valid-auc:0.896358
[5]	train-auc:0.943433	valid-auc:0.896474
[6]	train-auc:0.945777	valid-auc:0.898182
[7]	train-auc:0.948576	valid-auc:0.89888
[8]	train-auc:0.949797	valid-auc:0.89965
[9]	train-auc:0.95137	valid-auc:0.899572
[10]	train-auc:0.95256	valid-auc:0.899886
[11]	train-auc:0.953416	valid-auc:0.899446
[12]	train-auc:0.954083	valid-auc:0.9001
[13]	train-auc:0.955036	valid-auc:0.900557
[14]	train-auc:0.955806	valid-auc:0.901206
[15]	train-auc:0.957452	valid-auc:0.900595
[16]	train-auc:0.958177	valid-auc:0.900851
[17]	train-auc:0.959132	valid-auc:0.900813
[18]	train-auc:0.960678	valid-auc:0.901782
[19]	train-auc:0.96055	valid-auc:0.901825
[20]	train-auc:0.961481	valid-auc:0.901748
[21]	train-auc:0.962668	valid-auc:0.901529
[22]	train-auc:0.963877	valid-auc:0.901601
[23]	train-auc:0.96496	valid-auc:0.902375
[24]	train-auc:0.965723	valid-auc:0.902037
[25]	train-auc:0.966174	valid-auc:0.902013
[26]	train-auc:0.966815	valid-auc:0.902898
[27]	train-auc:0.967191	valid-auc:0.903203
[28]	train-auc:0.967894	valid-auc:0.903662
[29]	train-auc:0.968163	valid-auc:0.903555
[30]	train-auc:0.968646	valid-auc:0.903428
[31]	train-auc:0.969027	valid-auc:0.903383
[32]	train-auc:0.969721	valid-auc:0.903491
[33]	train-auc:0.96994	valid-auc:0.903919
[34]	train-auc:0.970896	valid-auc:0.90389
[35]	train-auc:0.971663	valid-auc:0.904062
[36]	train-auc:0.971984	valid-auc:0.90436
[37]	train-auc:0.972158	valid-auc:0.904448
[38]	train-auc:0.972753	valid-auc:0.904765
[39]	train-auc:0.973414	valid-auc:0.904981
[40]	train-auc:0.973999	valid-auc:0.904941
[41]	train-auc:0.974391	valid-auc:0.90512
[42]	train-auc:0.974805	valid-auc:0.905099
[43]	train-auc:0.975228	valid-auc:0.905219
[44]	train-auc:0.975732	valid-auc:0.905509
[45]	train-auc:0.976144	valid-auc:0.905276
[46]	train-auc:0.976401	valid-auc:0.905467
[47]	train-auc:0.976758	valid-auc:0.905594
[48]	train-auc:0.977495	valid-auc:0.905444
[49]	train-auc:0.978083	valid-auc:0.905368
[50]	train-auc:0.978619	valid-auc:0.905476
[51]	train-auc:0.978774	valid-auc:0.905383
[52]	train-auc:0.979013	valid-auc:0.905446
[53]	train-auc:0.979478	valid-auc:0.90549
[54]	train-auc:0.979743	valid-auc:0.905518
[55]	train-auc:0.980107	valid-auc:0.905632
[56]	train-auc:0.980209	valid-auc:0.905722
[57]	train-auc:0.98072	valid-auc:0.90555
[58]	train-auc:0.98121	valid-auc:0.905571
[59]	train-auc:0.981565	valid-auc:0.90538
[60]	train-auc:0.981885	valid-auc:0.905428
[61]	train-auc:0.982155	valid-auc:0.90542
[62]	train-auc:0.982523	valid-auc:0.905483
[63]	train-auc:0.982689	valid-auc:0.905332
[64]	train-auc:0.982963	valid-auc:0.905407
[65]	train-auc:0.983388	valid-auc:0.905239
[66]	train-auc:0.983626	valid-auc:0.904891
[67]	train-auc:0.983916	valid-auc:0.904881
[68]	train-auc:0.984141	valid-auc:0.904819
[69]	train-auc:0.984267	valid-auc:0.904952
[70]	train-auc:0.984534	valid-auc:0.904956
[71]	train-auc:0.98474	valid-auc:0.904861
[72]	train-auc:0.984863	valid-auc:0.904917
[73]	train-auc:0.985285	valid-auc:0.904946
[74]	train-auc:0.985529	valid-auc:0.904838
[75]	train-auc:0.985664	valid-auc:0.904855
[76]	train-auc:0.985988	valid-auc:0.904874
[77]	train-auc:0.986234	valid-auc:0.904788
[78]	train-auc:0.98648	valid-auc:0.90489
[79]	train-auc:0.98672	valid-auc:0.904957
[80]	train-auc:0.986882	valid-auc:0.904992
[81]	train-auc:0.987166	valid-auc:0.905134
[82]	train-auc:0.987354	valid-auc:0.905264
[83]	train-auc:0.987606	valid-auc:0.905209
[84]	train-auc:0.987841	valid-auc:0.904981
[85]	train-auc:0.988066	valid-auc:0.904893
[86]	train-auc:0.988325	valid-auc:0.904935
[87]	train-auc:0.988548	valid-auc:0.904978
[88]	train-auc:0.988687	valid-auc:0.905011
[89]	train-auc:0.988958	valid-auc:0.904879
[90]	train-auc:0.989188	valid-auc:0.904713
[91]	train-auc:0.989489	valid-auc:0.904796
[92]	train-auc:0.989705	valid-auc:0.904843
[93]	train-auc:0.989925	valid-auc:0.904969
[94]	train-auc:0.990183	valid-auc:0.905056
[95]	train-auc:0.990448	valid-auc:0.904671
[96]	train-auc:0.990585	valid-auc:0.90467
[97]	train-auc:0.990815	valid-auc:0.904746
[98]	train-auc:0.991045	valid-auc:0.904731
[99]	train-auc:0.991174	valid-auc:0.904726
[100]	train-auc:0.991301	valid-auc:0.904776
[101]	train-auc:0.991535	valid-auc:0.904926
[102]	train-auc:0.991626	valid-auc:0.904956
[103]	train-auc:0.991878	valid-auc:0.905033
[104]	train-auc:0.992045	valid-auc:0.904852
[105]	train-auc:0.992151	valid-auc:0.904795
[106]	train-auc:0.992275	valid-auc:0.904601
Stopping. Best iteration:
[56]	train-auc:0.980209	valid-auc:0.905722

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 0 - took 19s - running score 0.905722
[mlcrate] Running fold 1, 46978 train samples, 7830 validation samples
[0]	train-auc:0.901836	valid-auc:0.856622
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.92376	valid-auc:0.879527
[2]	train-auc:0.932131	valid-auc:0.887195
[3]	train-auc:0.936522	valid-auc:0.888585
[4]	train-auc:0.943282	valid-auc:0.888041
[5]	train-auc:0.947342	valid-auc:0.888624
[6]	train-auc:0.948614	valid-auc:0.891658
[7]	train-auc:0.951232	valid-auc:0.89332
[8]	train-auc:0.952888	valid-auc:0.892759
[9]	train-auc:0.954148	valid-auc:0.893651
[10]	train-auc:0.955218	valid-auc:0.893219
[11]	train-auc:0.956239	valid-auc:0.894971
[12]	train-auc:0.957062	valid-auc:0.895332
[13]	train-auc:0.957206	valid-auc:0.895961
[14]	train-auc:0.958271	valid-auc:0.896596
[15]	train-auc:0.959368	valid-auc:0.896157
[16]	train-auc:0.960386	valid-auc:0.896635
[17]	train-auc:0.961421	valid-auc:0.895675
[18]	train-auc:0.961841	valid-auc:0.895686
[19]	train-auc:0.962417	valid-auc:0.894442
[20]	train-auc:0.96316	valid-auc:0.895154
[21]	train-auc:0.96442	valid-auc:0.89516
[22]	train-auc:0.964939	valid-auc:0.895977
[23]	train-auc:0.965594	valid-auc:0.896294
[24]	train-auc:0.966129	valid-auc:0.896126
[25]	train-auc:0.966368	valid-auc:0.89606
[26]	train-auc:0.967213	valid-auc:0.896464
[27]	train-auc:0.96783	valid-auc:0.897043
[28]	train-auc:0.968447	valid-auc:0.89775
[29]	train-auc:0.968867	valid-auc:0.898283
[30]	train-auc:0.969487	valid-auc:0.898397
[31]	train-auc:0.969894	valid-auc:0.898745
[32]	train-auc:0.970175	valid-auc:0.898766
[33]	train-auc:0.970404	valid-auc:0.898822
[34]	train-auc:0.970831	valid-auc:0.899181
[35]	train-auc:0.971168	valid-auc:0.899387
[36]	train-auc:0.971703	valid-auc:0.899394
[37]	train-auc:0.971985	valid-auc:0.89937
[38]	train-auc:0.972778	valid-auc:0.899586
[39]	train-auc:0.97306	valid-auc:0.899748
[40]	train-auc:0.973371	valid-auc:0.899926
[41]	train-auc:0.974034	valid-auc:0.900005
[42]	train-auc:0.974441	valid-auc:0.90013
[43]	train-auc:0.975114	valid-auc:0.899937
[44]	train-auc:0.97582	valid-auc:0.900492
[45]	train-auc:0.976192	valid-auc:0.900094
[46]	train-auc:0.976469	valid-auc:0.90011
[47]	train-auc:0.976875	valid-auc:0.900142
[48]	train-auc:0.976897	valid-auc:0.900125
[49]	train-auc:0.977472	valid-auc:0.900142
[50]	train-auc:0.977889	valid-auc:0.899962
[51]	train-auc:0.978286	valid-auc:0.900008
[52]	train-auc:0.978476	valid-auc:0.900391
[53]	train-auc:0.978982	valid-auc:0.900163
[54]	train-auc:0.979483	valid-auc:0.900312
[55]	train-auc:0.979822	valid-auc:0.900266
[56]	train-auc:0.980383	valid-auc:0.900314
[57]	train-auc:0.980761	valid-auc:0.900507
[58]	train-auc:0.981376	valid-auc:0.900214
[59]	train-auc:0.981646	valid-auc:0.900485
[60]	train-auc:0.982242	valid-auc:0.900037
[61]	train-auc:0.982566	valid-auc:0.900135
[62]	train-auc:0.982889	valid-auc:0.900078
[63]	train-auc:0.983234	valid-auc:0.900304
[64]	train-auc:0.983604	valid-auc:0.900227
[65]	train-auc:0.984145	valid-auc:0.900261
[66]	train-auc:0.984451	valid-auc:0.900391
[67]	train-auc:0.984682	valid-auc:0.900339
[68]	train-auc:0.984898	valid-auc:0.900411
[69]	train-auc:0.985255	valid-auc:0.900406
[70]	train-auc:0.985591	valid-auc:0.900297
[71]	train-auc:0.985883	valid-auc:0.900246
[72]	train-auc:0.986169	valid-auc:0.900384
[73]	train-auc:0.986465	valid-auc:0.900593
[74]	train-auc:0.986699	valid-auc:0.900559
[75]	train-auc:0.986772	valid-auc:0.900519
[76]	train-auc:0.98698	valid-auc:0.900521
[77]	train-auc:0.987296	valid-auc:0.900244
[78]	train-auc:0.987567	valid-auc:0.900513
[79]	train-auc:0.987748	valid-auc:0.900617
[80]	train-auc:0.988158	valid-auc:0.90081
[81]	train-auc:0.988372	valid-auc:0.900954
[82]	train-auc:0.988575	valid-auc:0.900905
[83]	train-auc:0.988774	valid-auc:0.901155
[84]	train-auc:0.988945	valid-auc:0.900937
[85]	train-auc:0.989165	valid-auc:0.900987
[86]	train-auc:0.989453	valid-auc:0.9011
[87]	train-auc:0.989784	valid-auc:0.901312
[88]	train-auc:0.989995	valid-auc:0.901464
[89]	train-auc:0.990171	valid-auc:0.901054
[90]	train-auc:0.990452	valid-auc:0.901056
[91]	train-auc:0.990582	valid-auc:0.901136
[92]	train-auc:0.990769	valid-auc:0.901053
[93]	train-auc:0.990987	valid-auc:0.901019
[94]	train-auc:0.991178	valid-auc:0.90084
[95]	train-auc:0.991381	valid-auc:0.900635
[96]	train-auc:0.991517	valid-auc:0.900599
[97]	train-auc:0.991723	valid-auc:0.900436
[98]	train-auc:0.991835	valid-auc:0.900379
[99]	train-auc:0.992021	valid-auc:0.900553
[100]	train-auc:0.99214	valid-auc:0.900454
[101]	train-auc:0.99242	valid-auc:0.900525
[102]	train-auc:0.992646	valid-auc:0.900471
[103]	train-auc:0.992883	valid-auc:0.90059
[104]	train-auc:0.992952	valid-auc:0.900479
[105]	train-auc:0.993053	valid-auc:0.900506
[106]	train-auc:0.993173	valid-auc:0.900421
[107]	train-auc:0.993396	valid-auc:0.900404
[108]	train-auc:0.993482	valid-auc:0.900227
[109]	train-auc:0.993681	valid-auc:0.899878
[110]	train-auc:0.993841	valid-auc:0.899778
[111]	train-auc:0.994007	valid-auc:0.899968
[112]	train-auc:0.994109	valid-auc:0.899951
[113]	train-auc:0.994294	valid-auc:0.900198
[114]	train-auc:0.994407	valid-auc:0.900163
[115]	train-auc:0.994592	valid-auc:0.900216
[116]	train-auc:0.9947	valid-auc:0.900238
[117]	train-auc:0.994824	valid-auc:0.900309
[118]	train-auc:0.995017	valid-auc:0.900176
[119]	train-auc:0.99519	valid-auc:0.899929
[120]	train-auc:0.995397	valid-auc:0.899973
[121]	train-auc:0.995487	valid-auc:0.899933
[122]	train-auc:0.995565	valid-auc:0.899847
[123]	train-auc:0.995658	valid-auc:0.899813
[124]	train-auc:0.99577	valid-auc:0.899907
[125]	train-auc:0.995898	valid-auc:0.899983
[126]	train-auc:0.996033	valid-auc:0.900056
[127]	train-auc:0.996135	valid-auc:0.8999
[128]	train-auc:0.996272	valid-auc:0.899827
[129]	train-auc:0.996324	valid-auc:0.89973
[130]	train-auc:0.99643	valid-auc:0.899729
[131]	train-auc:0.996576	valid-auc:0.899918
[132]	train-auc:0.996605	valid-auc:0.899904
[133]	train-auc:0.996627	valid-auc:0.899846
[134]	train-auc:0.996724	valid-auc:0.899754
[135]	train-auc:0.996826	valid-auc:0.899675
[136]	train-auc:0.996876	valid-auc:0.899778
[137]	train-auc:0.99692	valid-auc:0.899874
[138]	train-auc:0.996969	valid-auc:0.899913
Stopping. Best iteration:
[88]	train-auc:0.989995	valid-auc:0.901464

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 1 - took 25s - running score 0.9035930000000001
[mlcrate] Running fold 2, 46978 train samples, 7830 validation samples
[0]	train-auc:0.895298	valid-auc:0.851797
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.918093	valid-auc:0.879972
[2]	train-auc:0.926517	valid-auc:0.884985
[3]	train-auc:0.932614	valid-auc:0.891628
[4]	train-auc:0.940131	valid-auc:0.892256
[5]	train-auc:0.946429	valid-auc:0.894899
[6]	train-auc:0.947828	valid-auc:0.895586
[7]	train-auc:0.950501	valid-auc:0.898394
[8]	train-auc:0.952153	valid-auc:0.898026
[9]	train-auc:0.953489	valid-auc:0.89777
[10]	train-auc:0.954296	valid-auc:0.897396
[11]	train-auc:0.954983	valid-auc:0.898383
[12]	train-auc:0.95605	valid-auc:0.898259
[13]	train-auc:0.956382	valid-auc:0.898237
[14]	train-auc:0.957058	valid-auc:0.898339
[15]	train-auc:0.958126	valid-auc:0.898006
[16]	train-auc:0.959032	valid-auc:0.89883
[17]	train-auc:0.959909	valid-auc:0.898148
[18]	train-auc:0.960828	valid-auc:0.898758
[19]	train-auc:0.961114	valid-auc:0.898923
[20]	train-auc:0.961614	valid-auc:0.899355
[21]	train-auc:0.962989	valid-auc:0.899118
[22]	train-auc:0.963657	valid-auc:0.89941
[23]	train-auc:0.964133	valid-auc:0.899677
[24]	train-auc:0.964919	valid-auc:0.899564
[25]	train-auc:0.965482	valid-auc:0.900242
[26]	train-auc:0.966383	valid-auc:0.899924
[27]	train-auc:0.967102	valid-auc:0.900477
[28]	train-auc:0.967434	valid-auc:0.900434
[29]	train-auc:0.968266	valid-auc:0.900221
[30]	train-auc:0.968987	valid-auc:0.899821
[31]	train-auc:0.969281	valid-auc:0.899899
[32]	train-auc:0.969611	valid-auc:0.899668
[33]	train-auc:0.96993	valid-auc:0.899534
[34]	train-auc:0.970429	valid-auc:0.899266
[35]	train-auc:0.971059	valid-auc:0.899577
[36]	train-auc:0.971376	valid-auc:0.899321
[37]	train-auc:0.971816	valid-auc:0.899411
[38]	train-auc:0.972471	valid-auc:0.899406
[39]	train-auc:0.973194	valid-auc:0.89964
[40]	train-auc:0.973577	valid-auc:0.899719
[41]	train-auc:0.974043	valid-auc:0.899553
[42]	train-auc:0.974432	valid-auc:0.899557
[43]	train-auc:0.974973	valid-auc:0.899426
[44]	train-auc:0.975714	valid-auc:0.899355
[45]	train-auc:0.976184	valid-auc:0.899258
[46]	train-auc:0.97665	valid-auc:0.899158
[47]	train-auc:0.97688	valid-auc:0.8994
[48]	train-auc:0.977296	valid-auc:0.899628
[49]	train-auc:0.977847	valid-auc:0.899576
[50]	train-auc:0.978109	valid-auc:0.89937
[51]	train-auc:0.978358	valid-auc:0.899542
[52]	train-auc:0.978574	valid-auc:0.899797
[53]	train-auc:0.978892	valid-auc:0.899675
[54]	train-auc:0.979227	valid-auc:0.899694
[55]	train-auc:0.979389	valid-auc:0.899663
[56]	train-auc:0.979533	valid-auc:0.899631
[57]	train-auc:0.980081	valid-auc:0.90004
[58]	train-auc:0.980539	valid-auc:0.89989
[59]	train-auc:0.980929	valid-auc:0.899929
[60]	train-auc:0.981562	valid-auc:0.899741
[61]	train-auc:0.981865	valid-auc:0.899551
[62]	train-auc:0.982298	valid-auc:0.899439
[63]	train-auc:0.982508	valid-auc:0.899304
[64]	train-auc:0.982759	valid-auc:0.899239
[65]	train-auc:0.983117	valid-auc:0.898933
[66]	train-auc:0.983325	valid-auc:0.899157
[67]	train-auc:0.983578	valid-auc:0.899106
[68]	train-auc:0.98378	valid-auc:0.899078
[69]	train-auc:0.984215	valid-auc:0.899193
[70]	train-auc:0.984375	valid-auc:0.898916
[71]	train-auc:0.984712	valid-auc:0.898902
[72]	train-auc:0.98488	valid-auc:0.898807
[73]	train-auc:0.985165	valid-auc:0.899172
[74]	train-auc:0.985429	valid-auc:0.89916
[75]	train-auc:0.985666	valid-auc:0.898929
[76]	train-auc:0.985898	valid-auc:0.898804
[77]	train-auc:0.986176	valid-auc:0.89863
Stopping. Best iteration:
[27]	train-auc:0.967102	valid-auc:0.900477

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 2 - took 15s - running score 0.9025543333333333
[mlcrate] Running fold 3, 46978 train samples, 7830 validation samples
[0]	train-auc:0.897559	valid-auc:0.86076
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.919177	valid-auc:0.876111
[2]	train-auc:0.930044	valid-auc:0.887389
[3]	train-auc:0.935121	valid-auc:0.889749
[4]	train-auc:0.94166	valid-auc:0.893218
[5]	train-auc:0.94529	valid-auc:0.894372
[6]	train-auc:0.947583	valid-auc:0.896269
[7]	train-auc:0.949606	valid-auc:0.897998
[8]	train-auc:0.950981	valid-auc:0.898652
[9]	train-auc:0.952004	valid-auc:0.898125
[10]	train-auc:0.952285	valid-auc:0.898944
[11]	train-auc:0.95327	valid-auc:0.899776
[12]	train-auc:0.954695	valid-auc:0.900314
[13]	train-auc:0.955556	valid-auc:0.901095
[14]	train-auc:0.956761	valid-auc:0.901453
[15]	train-auc:0.958039	valid-auc:0.901464
[16]	train-auc:0.959196	valid-auc:0.902363
[17]	train-auc:0.960033	valid-auc:0.902517
[18]	train-auc:0.961172	valid-auc:0.903644
[19]	train-auc:0.961874	valid-auc:0.903715
[20]	train-auc:0.962306	valid-auc:0.904481
[21]	train-auc:0.964099	valid-auc:0.904442
[22]	train-auc:0.964631	valid-auc:0.904586
[23]	train-auc:0.965334	valid-auc:0.90516
[24]	train-auc:0.966265	valid-auc:0.905354
[25]	train-auc:0.967037	valid-auc:0.906238
[26]	train-auc:0.96753	valid-auc:0.906542
[27]	train-auc:0.968048	valid-auc:0.907204
[28]	train-auc:0.968302	valid-auc:0.907633
[29]	train-auc:0.968765	valid-auc:0.907816
[30]	train-auc:0.969035	valid-auc:0.908002
[31]	train-auc:0.969445	valid-auc:0.908151
[32]	train-auc:0.969999	valid-auc:0.907926
[33]	train-auc:0.970605	valid-auc:0.907742
[34]	train-auc:0.970973	valid-auc:0.908022
[35]	train-auc:0.971456	valid-auc:0.907867
[36]	train-auc:0.97198	valid-auc:0.907926
[37]	train-auc:0.97224	valid-auc:0.908064
[38]	train-auc:0.972603	valid-auc:0.908128
[39]	train-auc:0.973345	valid-auc:0.908283
[40]	train-auc:0.973559	valid-auc:0.908104
[41]	train-auc:0.974192	valid-auc:0.90839
[42]	train-auc:0.974672	valid-auc:0.908159
[43]	train-auc:0.975073	valid-auc:0.908188
[44]	train-auc:0.976106	valid-auc:0.908348
[45]	train-auc:0.97638	valid-auc:0.908535
[46]	train-auc:0.976697	valid-auc:0.908302
[47]	train-auc:0.977121	valid-auc:0.9084
[48]	train-auc:0.977426	valid-auc:0.908645
[49]	train-auc:0.978035	valid-auc:0.908607
[50]	train-auc:0.97829	valid-auc:0.908575
[51]	train-auc:0.978406	valid-auc:0.908536
[52]	train-auc:0.978608	valid-auc:0.908615
[53]	train-auc:0.978877	valid-auc:0.90867
[54]	train-auc:0.979135	valid-auc:0.908749
[55]	train-auc:0.979435	valid-auc:0.908881
[56]	train-auc:0.979745	valid-auc:0.908879
[57]	train-auc:0.979956	valid-auc:0.908718
[58]	train-auc:0.980429	valid-auc:0.908805
[59]	train-auc:0.980964	valid-auc:0.908603
[60]	train-auc:0.981312	valid-auc:0.908264
[61]	train-auc:0.981675	valid-auc:0.908628
[62]	train-auc:0.982159	valid-auc:0.908458
[63]	train-auc:0.982445	valid-auc:0.908528
[64]	train-auc:0.982903	valid-auc:0.908575
[65]	train-auc:0.98314	valid-auc:0.908508
[66]	train-auc:0.983247	valid-auc:0.908458
[67]	train-auc:0.983638	valid-auc:0.908429
[68]	train-auc:0.983827	valid-auc:0.908482
[69]	train-auc:0.984032	valid-auc:0.908522
[70]	train-auc:0.984316	valid-auc:0.908454
[71]	train-auc:0.984643	valid-auc:0.908722
[72]	train-auc:0.985053	valid-auc:0.908638
[73]	train-auc:0.985172	valid-auc:0.908717
[74]	train-auc:0.985412	valid-auc:0.908928
[75]	train-auc:0.985683	valid-auc:0.908873
[76]	train-auc:0.986022	valid-auc:0.908719
[77]	train-auc:0.986185	valid-auc:0.908704
[78]	train-auc:0.986491	valid-auc:0.90863
[79]	train-auc:0.986724	valid-auc:0.908649
[80]	train-auc:0.987108	valid-auc:0.908412
[81]	train-auc:0.987524	valid-auc:0.908333
[82]	train-auc:0.987738	valid-auc:0.908434
[83]	train-auc:0.987876	valid-auc:0.908344
[84]	train-auc:0.988042	valid-auc:0.90845
[85]	train-auc:0.988205	valid-auc:0.9085
[86]	train-auc:0.98842	valid-auc:0.908541
[87]	train-auc:0.988638	valid-auc:0.908327
[88]	train-auc:0.988782	valid-auc:0.908228
[89]	train-auc:0.989084	valid-auc:0.908474
[90]	train-auc:0.989284	valid-auc:0.908529
[91]	train-auc:0.989479	valid-auc:0.908488
[92]	train-auc:0.989612	valid-auc:0.908376
[93]	train-auc:0.989879	valid-auc:0.90836
[94]	train-auc:0.990176	valid-auc:0.908257
[95]	train-auc:0.99038	valid-auc:0.908172
[96]	train-auc:0.99068	valid-auc:0.908178
[97]	train-auc:0.990761	valid-auc:0.90813
[98]	train-auc:0.990862	valid-auc:0.908184
[99]	train-auc:0.991048	valid-auc:0.908307
[100]	train-auc:0.99119	valid-auc:0.908244
[101]	train-auc:0.991369	valid-auc:0.908161
[102]	train-auc:0.99161	valid-auc:0.908124
[103]	train-auc:0.991792	valid-auc:0.908218
[104]	train-auc:0.992108	valid-auc:0.908102
[105]	train-auc:0.992325	valid-auc:0.908222
[106]	train-auc:0.992503	valid-auc:0.908225
[107]	train-auc:0.992666	valid-auc:0.908256
[108]	train-auc:0.992809	valid-auc:0.908214
[109]	train-auc:0.992871	valid-auc:0.908302
[110]	train-auc:0.993116	valid-auc:0.908249
[111]	train-auc:0.993172	valid-auc:0.908149
[112]	train-auc:0.993233	valid-auc:0.908019
[113]	train-auc:0.993512	valid-auc:0.908027
[114]	train-auc:0.993606	valid-auc:0.908243
[115]	train-auc:0.99376	valid-auc:0.908194
[116]	train-auc:0.993934	valid-auc:0.908158
[117]	train-auc:0.993996	valid-auc:0.908071
[118]	train-auc:0.994198	valid-auc:0.908037
[119]	train-auc:0.994349	valid-auc:0.908089
[120]	train-auc:0.994436	valid-auc:0.908207
[121]	train-auc:0.9946	valid-auc:0.907967
[122]	train-auc:0.994683	valid-auc:0.907955
[123]	train-auc:0.994793	valid-auc:0.907944
[124]	train-auc:0.994905	valid-auc:0.908079
Stopping. Best iteration:
[74]	train-auc:0.985412	valid-auc:0.908928

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 3 - took 23s - running score 0.90414775
[mlcrate] Running fold 4, 46978 train samples, 7830 validation samples
[0]	train-auc:0.901459	valid-auc:0.864566
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.921762	valid-auc:0.888534
[2]	train-auc:0.931054	valid-auc:0.896112
[3]	train-auc:0.935614	valid-auc:0.900308
[4]	train-auc:0.941545	valid-auc:0.901916
[5]	train-auc:0.945525	valid-auc:0.900486
[6]	train-auc:0.947456	valid-auc:0.903586
[7]	train-auc:0.94926	valid-auc:0.90422
[8]	train-auc:0.950707	valid-auc:0.904181
[9]	train-auc:0.952439	valid-auc:0.904309
[10]	train-auc:0.953597	valid-auc:0.904615
[11]	train-auc:0.954862	valid-auc:0.904052
[12]	train-auc:0.955793	valid-auc:0.903614
[13]	train-auc:0.957049	valid-auc:0.903466
[14]	train-auc:0.95733	valid-auc:0.903685
[15]	train-auc:0.958691	valid-auc:0.903685
[16]	train-auc:0.959286	valid-auc:0.904354
[17]	train-auc:0.960352	valid-auc:0.904562
[18]	train-auc:0.961069	valid-auc:0.905131
[19]	train-auc:0.961189	valid-auc:0.904742
[20]	train-auc:0.962349	valid-auc:0.905299
[21]	train-auc:0.963504	valid-auc:0.905778
[22]	train-auc:0.963965	valid-auc:0.906539
[23]	train-auc:0.96469	valid-auc:0.906787
[24]	train-auc:0.965571	valid-auc:0.90738
[25]	train-auc:0.966257	valid-auc:0.907461
[26]	train-auc:0.966755	valid-auc:0.907484
[27]	train-auc:0.967096	valid-auc:0.907378
[28]	train-auc:0.967534	valid-auc:0.907898
[29]	train-auc:0.968154	valid-auc:0.908022
[30]	train-auc:0.968816	valid-auc:0.908167
[31]	train-auc:0.969039	valid-auc:0.908182
[32]	train-auc:0.969668	valid-auc:0.908553
[33]	train-auc:0.97033	valid-auc:0.908396
[34]	train-auc:0.971021	valid-auc:0.907997
[35]	train-auc:0.971605	valid-auc:0.908208
[36]	train-auc:0.971979	valid-auc:0.908295
[37]	train-auc:0.972272	valid-auc:0.908536
[38]	train-auc:0.972938	valid-auc:0.9085
[39]	train-auc:0.973463	valid-auc:0.908616
[40]	train-auc:0.973675	valid-auc:0.908629
[41]	train-auc:0.97436	valid-auc:0.908615
[42]	train-auc:0.974943	valid-auc:0.908485
[43]	train-auc:0.975499	valid-auc:0.908198
[44]	train-auc:0.976173	valid-auc:0.908287
[45]	train-auc:0.976596	valid-auc:0.907995
[46]	train-auc:0.976593	valid-auc:0.907862
[47]	train-auc:0.976853	valid-auc:0.908011
[48]	train-auc:0.977336	valid-auc:0.908221
[49]	train-auc:0.977757	valid-auc:0.907998
[50]	train-auc:0.978016	valid-auc:0.908218
[51]	train-auc:0.978408	valid-auc:0.908281
[52]	train-auc:0.978386	valid-auc:0.908365
[53]	train-auc:0.978905	valid-auc:0.908582
[54]	train-auc:0.979558	valid-auc:0.908661
[55]	train-auc:0.979952	valid-auc:0.908678
[56]	train-auc:0.980188	valid-auc:0.908484
[57]	train-auc:0.98055	valid-auc:0.908493
[58]	train-auc:0.981053	valid-auc:0.90856
[59]	train-auc:0.981351	valid-auc:0.908662
[60]	train-auc:0.981644	valid-auc:0.908506
[61]	train-auc:0.982008	valid-auc:0.908459
[62]	train-auc:0.982227	valid-auc:0.908452
[63]	train-auc:0.982579	valid-auc:0.90825
[64]	train-auc:0.98303	valid-auc:0.908084
[65]	train-auc:0.983294	valid-auc:0.908104
[66]	train-auc:0.983732	valid-auc:0.907935
[67]	train-auc:0.983856	valid-auc:0.907977
[68]	train-auc:0.984085	valid-auc:0.908053
[69]	train-auc:0.984243	valid-auc:0.90801
[70]	train-auc:0.984563	valid-auc:0.907857
[71]	train-auc:0.984804	valid-auc:0.907679
[72]	train-auc:0.985121	valid-auc:0.907624
[73]	train-auc:0.985252	valid-auc:0.907513
[74]	train-auc:0.985684	valid-auc:0.907437
[75]	train-auc:0.985753	valid-auc:0.907583
[76]	train-auc:0.986046	valid-auc:0.907513
[77]	train-auc:0.986176	valid-auc:0.907546
[78]	train-auc:0.986436	valid-auc:0.907686
[79]	train-auc:0.98695	valid-auc:0.907401
[80]	train-auc:0.98717	valid-auc:0.90745
[81]	train-auc:0.987445	valid-auc:0.907359
[82]	train-auc:0.987675	valid-auc:0.907387
[83]	train-auc:0.987965	valid-auc:0.907435
[84]	train-auc:0.988237	valid-auc:0.907596
[85]	train-auc:0.988601	valid-auc:0.907462
[86]	train-auc:0.988988	valid-auc:0.907629
[87]	train-auc:0.989216	valid-auc:0.907597
[88]	train-auc:0.989433	valid-auc:0.907544
[89]	train-auc:0.989698	valid-auc:0.907642
[90]	train-auc:0.990051	valid-auc:0.907514
[91]	train-auc:0.990191	valid-auc:0.90748
[92]	train-auc:0.990236	valid-auc:0.907579
[93]	train-auc:0.990449	valid-auc:0.907667
[94]	train-auc:0.990656	valid-auc:0.907657
[95]	train-auc:0.990831	valid-auc:0.907509
[96]	train-auc:0.991014	valid-auc:0.907412
[97]	train-auc:0.991158	valid-auc:0.907286
[98]	train-auc:0.991332	valid-auc:0.907334
[99]	train-auc:0.991553	valid-auc:0.907457
[100]	train-auc:0.991631	valid-auc:0.907459
[101]	train-auc:0.991838	valid-auc:0.907415
[102]	train-auc:0.992017	valid-auc:0.907528
[103]	train-auc:0.992197	valid-auc:0.907414
[104]	train-auc:0.992291	valid-auc:0.907371
[105]	train-auc:0.992402	valid-auc:0.907301
Stopping. Best iteration:
[55]	train-auc:0.979952	valid-auc:0.908678

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 4 - took 20s - running score 0.9050537999999999
[mlcrate] Running fold 5, 46978 train samples, 7830 validation samples
[0]	train-auc:0.902815	valid-auc:0.869003
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.922754	valid-auc:0.880714
[2]	train-auc:0.930071	valid-auc:0.886999
[3]	train-auc:0.935068	valid-auc:0.890989
[4]	train-auc:0.94083	valid-auc:0.89515
[5]	train-auc:0.945894	valid-auc:0.896986
[6]	train-auc:0.947604	valid-auc:0.897201
[7]	train-auc:0.94918	valid-auc:0.897859
[8]	train-auc:0.95101	valid-auc:0.897821
[9]	train-auc:0.952351	valid-auc:0.897315
[10]	train-auc:0.953212	valid-auc:0.897397
[11]	train-auc:0.954371	valid-auc:0.89793
[12]	train-auc:0.955215	valid-auc:0.899486
[13]	train-auc:0.955992	valid-auc:0.899453
[14]	train-auc:0.956698	valid-auc:0.89951
[15]	train-auc:0.958036	valid-auc:0.899714
[16]	train-auc:0.958649	valid-auc:0.900071
[17]	train-auc:0.959238	valid-auc:0.899443
[18]	train-auc:0.960117	valid-auc:0.899199
[19]	train-auc:0.960802	valid-auc:0.899458
[20]	train-auc:0.961409	valid-auc:0.899685
[21]	train-auc:0.962747	valid-auc:0.89949
[22]	train-auc:0.963732	valid-auc:0.89995
[23]	train-auc:0.964039	valid-auc:0.900158
[24]	train-auc:0.964474	valid-auc:0.900191
[25]	train-auc:0.965307	valid-auc:0.900285
[26]	train-auc:0.965873	valid-auc:0.899975
[27]	train-auc:0.96655	valid-auc:0.899608
[28]	train-auc:0.966754	valid-auc:0.899964
[29]	train-auc:0.967387	valid-auc:0.900061
[30]	train-auc:0.967748	valid-auc:0.900213
[31]	train-auc:0.968119	valid-auc:0.900011
[32]	train-auc:0.968894	valid-auc:0.900375
[33]	train-auc:0.969409	valid-auc:0.900643
[34]	train-auc:0.970098	valid-auc:0.900771
[35]	train-auc:0.970713	valid-auc:0.900831
[36]	train-auc:0.971313	valid-auc:0.901006
[37]	train-auc:0.971655	valid-auc:0.901349
[38]	train-auc:0.972148	valid-auc:0.901386
[39]	train-auc:0.972605	valid-auc:0.90127
[40]	train-auc:0.972967	valid-auc:0.901232
[41]	train-auc:0.973372	valid-auc:0.901183
[42]	train-auc:0.973678	valid-auc:0.901056
[43]	train-auc:0.974291	valid-auc:0.900918
[44]	train-auc:0.97506	valid-auc:0.900674
[45]	train-auc:0.975448	valid-auc:0.900707
[46]	train-auc:0.975543	valid-auc:0.900861
[47]	train-auc:0.975737	valid-auc:0.900761
[48]	train-auc:0.976061	valid-auc:0.900341
[49]	train-auc:0.976574	valid-auc:0.899856
[50]	train-auc:0.976704	valid-auc:0.899907
[51]	train-auc:0.977115	valid-auc:0.900034
[52]	train-auc:0.977328	valid-auc:0.899998
[53]	train-auc:0.97768	valid-auc:0.900165
[54]	train-auc:0.978056	valid-auc:0.90016
[55]	train-auc:0.978415	valid-auc:0.900264
[56]	train-auc:0.978961	valid-auc:0.900194
[57]	train-auc:0.979293	valid-auc:0.900065
[58]	train-auc:0.979831	valid-auc:0.900047
[59]	train-auc:0.9801	valid-auc:0.90005
[60]	train-auc:0.980518	valid-auc:0.900127
[61]	train-auc:0.980783	valid-auc:0.899963
[62]	train-auc:0.981055	valid-auc:0.900013
[63]	train-auc:0.9812	valid-auc:0.899898
[64]	train-auc:0.981633	valid-auc:0.899804
[65]	train-auc:0.981859	valid-auc:0.899759
[66]	train-auc:0.982291	valid-auc:0.899575
[67]	train-auc:0.982715	valid-auc:0.899601
[68]	train-auc:0.983047	valid-auc:0.899605
[69]	train-auc:0.983263	valid-auc:0.899571
[70]	train-auc:0.983793	valid-auc:0.899608
[71]	train-auc:0.984031	valid-auc:0.899518
[72]	train-auc:0.984286	valid-auc:0.899399
[73]	train-auc:0.984541	valid-auc:0.899536
[74]	train-auc:0.984714	valid-auc:0.899594
[75]	train-auc:0.985017	valid-auc:0.899348
[76]	train-auc:0.985272	valid-auc:0.899187
[77]	train-auc:0.985599	valid-auc:0.899097
[78]	train-auc:0.985913	valid-auc:0.89921
[79]	train-auc:0.986162	valid-auc:0.899237
[80]	train-auc:0.986438	valid-auc:0.899172
[81]	train-auc:0.986833	valid-auc:0.899147
[82]	train-auc:0.986997	valid-auc:0.899308
[83]	train-auc:0.987217	valid-auc:0.899442
[84]	train-auc:0.987534	valid-auc:0.899598
[85]	train-auc:0.987709	valid-auc:0.899649
[86]	train-auc:0.987905	valid-auc:0.899643
[87]	train-auc:0.988244	valid-auc:0.899762
[88]	train-auc:0.98855	valid-auc:0.899776
Stopping. Best iteration:
[38]	train-auc:0.972148	valid-auc:0.901386

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 5 - took 17s - running score 0.9044425
[mlcrate] Running fold 6, 46980 train samples, 7828 validation samples
[0]	train-auc:0.895268	valid-auc:0.871597
Multiple eval metrics have been passed: 'valid-auc' will be used for early stopping.

Will train until valid-auc hasn't improved in 50 rounds.
[1]	train-auc:0.919045	valid-auc:0.884449
[2]	train-auc:0.927116	valid-auc:0.891478
[3]	train-auc:0.930787	valid-auc:0.892921
[4]	train-auc:0.938608	valid-auc:0.89702
[5]	train-auc:0.94363	valid-auc:0.899123
[6]	train-auc:0.946251	valid-auc:0.899803
[7]	train-auc:0.948188	valid-auc:0.900711
[8]	train-auc:0.949663	valid-auc:0.901896
[9]	train-auc:0.950772	valid-auc:0.902318
[10]	train-auc:0.952315	valid-auc:0.902608
[11]	train-auc:0.953338	valid-auc:0.903006
[12]	train-auc:0.953595	valid-auc:0.904083
[13]	train-auc:0.954849	valid-auc:0.90444
[14]	train-auc:0.955655	valid-auc:0.904791
[15]	train-auc:0.956714	valid-auc:0.90501
[16]	train-auc:0.957824	valid-auc:0.905546
[17]	train-auc:0.959311	valid-auc:0.9057
[18]	train-auc:0.960434	valid-auc:0.905674
[19]	train-auc:0.960502	valid-auc:0.905962
[20]	train-auc:0.961088	valid-auc:0.906461
[21]	train-auc:0.962205	valid-auc:0.906595
[22]	train-auc:0.963045	valid-auc:0.907103
[23]	train-auc:0.963811	valid-auc:0.90727
[24]	train-auc:0.964482	valid-auc:0.907004
[25]	train-auc:0.965094	valid-auc:0.907554
[26]	train-auc:0.965833	valid-auc:0.907626
[27]	train-auc:0.966209	valid-auc:0.90801
[28]	train-auc:0.96659	valid-auc:0.907568
[29]	train-auc:0.967111	valid-auc:0.907396
[30]	train-auc:0.967425	valid-auc:0.907542
[31]	train-auc:0.968087	valid-auc:0.90715
[32]	train-auc:0.968734	valid-auc:0.907327
[33]	train-auc:0.969258	valid-auc:0.907302
[34]	train-auc:0.969645	valid-auc:0.907351
[35]	train-auc:0.970403	valid-auc:0.907603
[36]	train-auc:0.970672	valid-auc:0.907438
[37]	train-auc:0.97086	valid-auc:0.907625
[38]	train-auc:0.971258	valid-auc:0.907696
[39]	train-auc:0.971889	valid-auc:0.907306
[40]	train-auc:0.972372	valid-auc:0.907192
[41]	train-auc:0.972899	valid-auc:0.907139
[42]	train-auc:0.973361	valid-auc:0.90639
[43]	train-auc:0.974107	valid-auc:0.906606
[44]	train-auc:0.974945	valid-auc:0.90665
[45]	train-auc:0.975523	valid-auc:0.906812
[46]	train-auc:0.975792	valid-auc:0.906771
[47]	train-auc:0.976222	valid-auc:0.907005
[48]	train-auc:0.976621	valid-auc:0.907062
[49]	train-auc:0.977217	valid-auc:0.907024
[50]	train-auc:0.977716	valid-auc:0.90703
[51]	train-auc:0.977856	valid-auc:0.90691
[52]	train-auc:0.978046	valid-auc:0.906989
[53]	train-auc:0.978405	valid-auc:0.906823
[54]	train-auc:0.978695	valid-auc:0.906696
[55]	train-auc:0.979204	valid-auc:0.906637
[56]	train-auc:0.979608	valid-auc:0.906856
[57]	train-auc:0.979989	valid-auc:0.90688
[58]	train-auc:0.980536	valid-auc:0.906838
[59]	train-auc:0.980927	valid-auc:0.906665
[60]	train-auc:0.981252	valid-auc:0.906761
[61]	train-auc:0.981676	valid-auc:0.906882
[62]	train-auc:0.981863	valid-auc:0.906897
[63]	train-auc:0.982059	valid-auc:0.907105
[64]	train-auc:0.982322	valid-auc:0.907308
[65]	train-auc:0.982701	valid-auc:0.907286
[66]	train-auc:0.983035	valid-auc:0.907082
[67]	train-auc:0.983259	valid-auc:0.90723
[68]	train-auc:0.983616	valid-auc:0.907152
[69]	train-auc:0.983926	valid-auc:0.907155
[70]	train-auc:0.984353	valid-auc:0.907332
[71]	train-auc:0.984581	valid-auc:0.907329
[72]	train-auc:0.98484	valid-auc:0.907434
[73]	train-auc:0.985125	valid-auc:0.907511
[74]	train-auc:0.985368	valid-auc:0.9074
[75]	train-auc:0.985533	valid-auc:0.90758
[76]	train-auc:0.985712	valid-auc:0.907485
[77]	train-auc:0.986053	valid-auc:0.907671
Stopping. Best iteration:
[27]	train-auc:0.966209	valid-auc:0.90801

C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training fold 6 - took 15s - running score 0.9049521428571429
C:\ProgramData\Anaconda3\lib\site-packages\mlcrate\backend.py:7: UserWarning: Timer.format_elapsed() has been deprecated in favour of Timer.fsince() and will be removed soon
  warn(message)
[mlcrate] Finished training 7 XGBoost models, took 2m21s

In [34]:
import numpy as np
from sklearn.metrics import matthews_corrcoef

def find_matthews_threshold(p_valid, y_valid, try_all=False, verbose=False):
    p_valid, y_valid = np.array(p_valid), np.array(y_valid)

    best = 0
    best_score = -2
    totry = np.arange(0,1,0.01) if try_all is False else np.unique(p_valid)
    for t in totry:
        score = matthews_corrcoef(y_valid, p_valid > t)
        if score > best_score:
            best_score = score
            best = t
    if verbose is True: 
        print('Best score: ', round(best_score, 5), ' @ threshold ', best)

    return best

def best_threshold_submission(p_valid, y_valid, p_test, try_all=False, verbose=False):
    p_test = np.array(p_test)
    thresh = find_matthews_threshold(p_valid, y_valid, try_all, verbose)
    return p_test > thresh

submission_values = best_threshold_submission(val_preds, y_valid, p_test, True, True)


C:\ProgramData\Anaconda3\lib\site-packages\sklearn\metrics\classification.py:538: RuntimeWarning: invalid value encountered in double_scalars
  mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
Best score:  0.50661  @ threshold  0.617861

In [35]:
submission_values*1


Out[35]:
array([0, 0, 0, ..., 0, 0, 1])

In [36]:
sample = pd.read_csv('sample_submission.csv')

In [48]:
def get_xgb_imp(xgb, feat_names):
    imp_vals = xgb.get_fscore()
    feats_imp = pd.DataFrame(imp_vals,index=np.arange(2)).T
    feats_imp.iloc[:,0]= feats_imp.index    
    feats_imp.columns=['feature','importance']
    feats_imp.sort_values('importance',inplace=True,ascending=False)
    feats_imp.reset_index(drop=True,inplace=True)
    return feats_imp

feature_importance_df = get_xgb_imp(model[3], feat_names=[])

In [ ]:
# model, p_train, p_test = mlcrate.xgb.train_kfold(params, train_all, target, test_all, folds = 7, stratify=target)

In [51]:
predictions_test = submission_values#np.where(p_test>=0.75, 1,0)
sample['is_promoted'] = predictions_test
sample.to_csv('preds_2_xgb_with_dummies_optimised_version.csv', index=False)

In [52]:
import joblib
#save model
joblib.dump(model[0], 'xgb_model_1')


Out[52]:
['xgb_model_1']

In [250]:
train_all, target, test_all = np.save('train_all.npy',np.hstack((train_all, pd.get_dummies(df_raw_cat)))), np.save('target.npy',target), np.save('test_all.npy',np.hstack((test_all,pd.get_dummies(df_test_cat))))