In [1]:
import time
import xgboost as xgb
import lightgbm as lgb
# import category_encoders as cat_ed
import gc, mlcrate, glob

# from gplearn.genetic import SymbolicTransformer, SymbolicRegressor
from fastai.imports import *
from fastai.structured import *
from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from IPython.display import display

from catboost import CatBoostRegressor
from scipy.cluster import hierarchy as hc
from collections import Counter

from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import  roc_auc_score, log_loss
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA, TruncatedSVD, FastICA, FactorAnalysis
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.cluster import KMeans

from sklearn.metrics import accuracy_score, log_loss
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF

# will ignore all warning from sklearn, seaborn etc..
def ignore_warn(*args, **kwargs):
    pass
warnings.warn = ignore_warn

pd.option_context("display.max_rows", 1000);
pd.option_context("display.max_columns", 1000);

PATH = os.getcwd()

df_raw = pd.read_csv(f'{PATH}\\train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}\\test.csv', low_memory=False)

def display_all(df):
    with pd.option_context("display.max_rows", 100): 
        with pd.option_context("display.max_columns", 100): 
            display(df)
            
def make_submission(probs):
    sample = pd.read_csv(f'{PATH}\\sample_submission.csv')
    submit = sample.copy()
    submit['Upvotes'] = probs
    return submit

In [2]:
df_raw.head()


Out[2]:
ID Tag Reputation Answers Username Views Upvotes
0 52664 a 3942.0 2.0 155623 7855.0 42.0
1 327662 a 26046.0 12.0 21781 55801.0 1175.0
2 468453 c 1358.0 4.0 56177 8067.0 60.0
3 96996 a 264.0 3.0 168793 27064.0 9.0
4 131465 c 4271.0 4.0 112223 13986.0 83.0

Outliers + EDA


In [3]:
man_train_list = df_raw.Username.unique()
man_test_list = df_test.Username.unique()
print("Train: {0}".format(len(man_train_list)))
print("Test: {0}".format(len(man_test_list)))


Train: 141802
Test: 79351

In [4]:
man_not_in_test = set(man_train_list) - set(man_test_list)
man_not_in_train = set(man_test_list) - set(man_train_list)

print("{} man are featured in train but not in test".format(len(man_not_in_test)))
print("{} man are featured in test but not in train".format(len(man_not_in_train)))


96388 man are featured in train but not in test
33937 man are featured in test but not in train

In [5]:
#df_raw.drop(index = df_raw.loc[list(man_not_in_test)].index, inplace=True)
df_raw.drop(index = df_raw[(df_raw['Reputation'] == 0) & (df_raw['Upvotes'] != 0)].index, inplace=True)
df_raw.drop(index = df_raw[(df_raw['Upvotes'] == 0) & (df_raw['Views']>1000)].index, inplace=True)

In [6]:
df_raw.sort_values(by=['Username', 'Reputation', 'Views'], inplace=True)

In [7]:
temp1 = df_raw.groupby('Username').count().iloc[:,-1]
temp2 = df_test.groupby('Username').count().iloc[:,-1]
df_man = pd.concat([temp1,temp2], axis = 1, join = 'outer')
df_man.columns = ['train_count','test_count']

In [8]:
df_man.sort_values(by = 'train_count', ascending = False).plot.scatter(x = 'train_count', y = 'test_count')


Out[8]:
<matplotlib.axes._subplots.AxesSubplot at 0x2824da390b8>

In [9]:
xyz = pd.concat([df_raw.groupby('Username').mean(),df_raw.groupby('Username').count()], axis = 1).iloc[:,:-5]
xyz.columns = ['ID', 'Reputation', 'Answers', 'Views', 'Upvotes', 'count']
############################################################################################# Mean Aggs

unames   = xyz.sort_values(by = 'count', ascending = False).reset_index()['Username'].values
count    = xyz.sort_values(by = 'count', ascending = False).reset_index()['count'].values
answers  = xyz.sort_values(by = 'count', ascending = False).reset_index()['Answers'].values
views    = xyz.sort_values(by = 'count', ascending = False).reset_index()['Views'].values
repo     = xyz.sort_values(by = 'count', ascending = False).reset_index()['Reputation'].values 

d = {}
for idx,k in enumerate(unames):
    d[k] = count[idx]
df_raw['agg_count'] = df_raw['Username'].map(d)

d = {}
for idx,k in enumerate(unames):
    d[k] = repo[idx]
df_raw['agg_repo'] = df_raw['Username'].map(d)

In [10]:
xyz = pd.concat([df_test.groupby('Username').mean(),df_test.groupby('Username').count()], axis = 1).iloc[:,:-4]
xyz.columns = ['ID', 'Reputation', 'Answers', 'Views', 'count']
############################################################################################# Mean Aggs

unames   = xyz.sort_values(by = 'count', ascending = False).reset_index()['Username'].values
count    = xyz.sort_values(by = 'count', ascending = False).reset_index()['count'].values
answers  = xyz.sort_values(by = 'count', ascending = False).reset_index()['Answers'].values
views    = xyz.sort_values(by = 'count', ascending = False).reset_index()['Views'].values
repo     = xyz.sort_values(by = 'count', ascending = False).reset_index()['Reputation'].values 

d = {}
for idx,k in enumerate(unames):
    d[k] = count[idx]
df_test['agg_count'] = df_test['Username'].map(d)

d = {}
for idx,k in enumerate(unames):
    d[k] = repo[idx]
df_test['agg_repo'] = df_test['Username'].map(d)

In [11]:
df_raw.shape, df_test.shape


Out[11]:
((320674, 9), (141448, 8))

In [12]:
df_raw[df_raw['Username'] == 98].head(10) #intresting Stuff All have same Reputaion ?? Why


Out[12]:
ID Tag Reputation Answers Username Views Upvotes agg_count agg_repo
14992 24315 c 19251.0 4.0 98 2704.0 74.0 36 19251.0
259228 295193 x 19251.0 3.0 98 2795.0 45.0 36 19251.0
143092 219180 c 19251.0 2.0 98 4395.0 179.0 36 19251.0
263281 119851 c 19251.0 5.0 98 4428.0 98.0 36 19251.0
185191 221751 c 19251.0 2.0 98 5214.0 131.0 36 19251.0
223160 423665 i 19251.0 4.0 98 5777.0 112.0 36 19251.0
278323 254827 i 19251.0 3.0 98 6032.0 151.0 36 19251.0
36241 215050 i 19251.0 1.0 98 8579.0 156.0 36 19251.0
236475 138498 j 19251.0 2.0 98 9932.0 293.0 36 19251.0
253990 437801 c 19251.0 1.0 98 12914.0 294.0 36 19251.0

In [13]:
unames_trn  = df_raw[['Username','Reputation']].groupby('Username')['Reputation'].nunique().reset_index()['Username'].values
unames_test = df_test[['Username','Reputation']].groupby('Username')['Reputation'].nunique().reset_index()['Username'].values

repo_trn     = df_raw[['Username','Reputation']].groupby('Username')['Reputation'].nunique().reset_index()['Reputation'].values
repo_test    = df_test[['Username','Reputation']].groupby('Username')['Reputation'].nunique().reset_index()['Reputation'].values

In [14]:
d = {}
for idx,k in enumerate(unames_trn):
    d[k] = repo_trn[idx]
df_raw['unique_repo'] = df_raw['Username'].map(d)

d = {}
for idx,k in enumerate(unames_test):
    d[k] = repo_test[idx]
df_test['unique_repo'] = df_test['Username'].map(d)

In [15]:
df_raw['one_time_user'] = False 
df_test['one_time_user'] = False 

unames_trn  = df_raw[(df_raw['unique_repo']  == 1) & (df_raw['agg_count'] == 1)]['Username'].values
unames_test = df_test[(df_test['unique_repo']  == 1) & (df_test['agg_count'] == 1)]['Username'].values

d = {}
for idx,k in enumerate(unames_trn):
    d[k] = True
df_raw['one_time_user'] = df_raw['Username'].map(d)

d = {}
for idx,k in enumerate(unames_test):
    d[k] = True
df_test['one_time_user'] = df_test['Username'].map(d)

df_raw.fillna(False,inplace=True)
df_test.fillna(False,inplace=True)

In [16]:
df_raw[(df_raw['Views']>10000) & (df_raw['Answers'] == 1)] ;#remeber binning views

In [17]:
df_raw[(df_raw['Views']>75000) & (df_raw['Answers'] == 1) &(df_raw['one_time_user']==True)];

In [18]:
min(df_raw['Views']), max(df_raw['Views']), min(df_test['Views']), max(df_test['Views'])


Out[18]:
(9.0, 5231058.0, 9.0, 5004669.0)

In [19]:
df_raw['avg_repo'] = df_raw['Reputation']/ df_raw['unique_repo']
df_test['avg_repo'] = df_test['Reputation']/ df_test['unique_repo']

In [20]:
add_trans = ['Reputation', 'Answers', 'Views']

for col in add_trans:
    
    df_raw[f'log_trans_{col}'.format(col)]  = np.log(df_raw[col] + 1) #avoid log 0's if any
    df_test[f'log_trans_{col}'.format(col)] = np.log(df_test[col] + 1) #avoid log 0's if any
    df_raw[f'sqrt_trans_{col}'.format(col)]  = np.sqrt(df_raw[col])
    df_test[f'sqrt_trans_{col}'.format(col)] = np.sqrt(df_test[col])
    
df_raw['repo_per_Answers'] = df_raw['Reputation'] / (df_raw['Answers']+1)
df_raw['repo_per_Views']   = df_raw['Reputation'] / df_raw['Views']
df_raw['log_trans_repo_per_Answers'] = np.log(df_raw['repo_per_Answers'] + 1)
df_raw['log_trans_repo_per_Views']   = np.log(df_raw['repo_per_Views'] + 1)

df_test['repo_per_Answers'] = df_test['Reputation'] / (df_test['Answers'] +1)
df_test['repo_per_Views']   = df_test['Reputation'] / df_test['Views']
df_test['log_trans_repo_per_Answers'] = np.log(df_test['repo_per_Answers'] + 1)
df_test['log_trans_repo_per_Views']   = np.log(df_test['repo_per_Views'] + 1)

df_raw.shape, df_test.shape


Out[20]:
((320674, 22), (141448, 21))

In [21]:
unames_trn  = df_raw[['Username','Tag']].groupby('Username')['Tag'].nunique().reset_index()['Username'].values
unames_test = df_test[['Username','Tag']].groupby('Username')['Tag'].nunique().reset_index()['Username'].values

tag_trn     = df_raw[['Username','Tag']].groupby('Username')['Tag'].nunique().reset_index()['Tag'].values
tag_test    = df_test[['Username','Tag']].groupby('Username')['Tag'].nunique().reset_index()['Tag'].values

d = {}
for idx,k in enumerate(unames_trn):
    d[k] = tag_trn[idx]
df_raw['unique_tag'] = df_raw['Username'].map(d)

d = {}
for idx,k in enumerate(unames_test):
    d[k] = tag_test[idx]
df_test['unique_tag'] = df_test['Username'].map(d)

In [22]:
def get_score(l = []):
    score = 10
    for i in l:
        if i == 'c': score += 100
        if i == 'j': score += 90
        if i == 'p': score += 80
        if i == 'i': score += 70
        if i == 'a': score += 60
        if i == 's': score += 50
        if i == 'h': score += 40
        if i == 'o': score += 30
        if i == 'r': score += 20
    return(score)

unames_trn  = df_raw[['Username','Tag']].groupby('Username')['Tag'].nunique().reset_index()['Username'].values
unames_test = df_test[['Username','Tag']].groupby('Username')['Tag'].nunique().reset_index()['Username'].values

In [23]:
%%time
import gc

d = {}

for i in unames_trn[::-1]:
    d[i] = set(df_raw[df_raw['Username'] == i]['Tag'].values)

for k,v in d.items():
    l = []
    for i in v:
        l.append(i)
    d[k] = get_score(l)
    
df_raw['skill_score'] = df_raw['Username'].map(d)
del d
gc.collect()


Wall time: 2min 9s

In [24]:
%%time

d = {}

for i in unames_test[::-1]:
    d[i] = set(df_test[df_test['Username'] == i]['Tag'].values)

for k,v in d.items():
    l = []
    for i in v:
        l.append(i)
    d[k] = get_score(l)
    
df_test['skill_score'] = df_test['Username'].map(d)
del d
gc.collect()


Wall time: 1min 2s

In [25]:
df_raw['rep_per_skill'] = df_raw['Reputation']/ df_raw['skill_score']
df_raw['skill_per_tag'] = df_raw['skill_score']/ df_raw['unique_tag']
df_raw['views_per_ans'] = df_raw['Views'] / (df_raw['Answers']+ 1)

df_test['rep_per_skill'] = df_test['Reputation']/ df_test['skill_score']
df_test['skill_per_tag'] = df_test['skill_score']/ df_test['unique_tag']
df_test['views_per_ans'] = df_test['Views'] / (df_test['Answers']+ 1)

In [26]:
plt.scatter(range(df_raw.shape[0]), np.sort(np.log(df_raw.Upvotes+2)))


Out[26]:
<matplotlib.collections.PathCollection at 0x2825171e8d0>

In [223]:
df_raw.to_csv(f'{PATH}\\new__train.csv', index=None)
df_test.to_csv(f'{PATH}\\new__test.csv', index=None)

In [27]:
min(set(df_raw['Reputation'])), max(set(df_raw['Reputation']))


Out[27]:
(0.0, 1042428.0)

In [28]:
max(df_test['Answers'])


Out[28]:
73.0

In [29]:
min(df_raw['Answers'])


Out[29]:
0.0

In [30]:
bins = [-1., 5., 10., 15., 20., 25., 30., 35., 40., 45., 50., 55., 60., 70., 80.]
labels = [i+1 for i in range(len(bins) - 1)]
bin_cols = ['Answers']

for col in bin_cols:
    
    df_raw[f'bin_{col}'.format(col)]  = pd.cut(df_raw[col] ,bins,labels = labels)
    df_test[f'bin_{col}'.format(col)] = pd.cut(df_test[col],bins,labels = labels)

bins = [0, 5000, 10000, 25000, 50000, 75000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 10000**2]
labels = [i+1 for i in range(len(bins) - 1)]
bin_cols = ['Views']

for col in bin_cols:
    
    df_raw[f'bin_{col}'.format(col)]  = pd.cut(df_raw[col] ,bins,labels = labels)
    df_test[f'bin_{col}'.format(col)] = pd.cut(df_test[col],bins,labels = labels)
    
bins = [-1, 5000, 10000, 25000, 50000, 75000, 100000, 150000, 200000, 250000, 500000, 750000, 1000000, 2000000, 10000**2]
labels = [i+1 for i in range(len(bins) - 1)]
bin_cols = ['Reputation']

for col in bin_cols:
    
    df_raw[f'bin_{col}'.format(col)]  = pd.cut(df_raw[col] ,bins,labels = labels)
    df_test[f'bin_{col}'.format(col)] = pd.cut(df_test[col],bins,labels = labels)

In [32]:
df_raw['Tag'] = df_raw['Tag'].astype('category')
df_test['Tag'] = df_test['Tag'].astype('category')

In [33]:
target = df_raw.Upvotes.values

modelling


In [34]:
model=CatBoostRegressor(iterations=300, learning_rate= 0.06, depth = 8, loss_function='RMSE')

In [35]:
df_raw.drop(['ID','Upvotes'], axis=1,inplace=True)
df_test.drop(['ID'], axis=1,inplace=True)

In [38]:
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(df_raw, target, test_size=0.2, random_state=42)

In [39]:
len(df_raw.columns)


Out[39]:
28

In [ ]:
model.fit(X_train, y_train,cat_features=[0,8,20,25,26,27], eval_set=(X_valid,y_valid))


0:	learn: 3480.2215870	test: 3721.5029306	best: 3721.5029306 (0)	total: 688ms	remaining: 3m 25s
1:	learn: 3353.6080345	test: 3609.7501958	best: 3609.7501958 (1)	total: 1.38s	remaining: 3m 25s
2:	learn: 3241.6972829	test: 3494.8633977	best: 3494.8633977 (2)	total: 2.06s	remaining: 3m 24s
3:	learn: 3131.2979994	test: 3394.7836881	best: 3394.7836881 (3)	total: 2.74s	remaining: 3m 22s
4:	learn: 3028.4479345	test: 3303.1560145	best: 3303.1560145 (4)	total: 3.42s	remaining: 3m 21s
5:	learn: 2934.5727361	test: 3208.3988519	best: 3208.3988519 (5)	total: 4.13s	remaining: 3m 22s
6:	learn: 2846.1398521	test: 3122.1772968	best: 3122.1772968 (6)	total: 4.8s	remaining: 3m 20s
7:	learn: 2756.0567359	test: 3044.6018842	best: 3044.6018842 (7)	total: 5.46s	remaining: 3m 19s
8:	learn: 2683.6685343	test: 2965.9958596	best: 2965.9958596 (8)	total: 6.13s	remaining: 3m 18s
9:	learn: 2611.2043969	test: 2903.9877032	best: 2903.9877032 (9)	total: 6.79s	remaining: 3m 17s
10:	learn: 2545.6412362	test: 2858.4710832	best: 2858.4710832 (10)	total: 7.47s	remaining: 3m 16s
11:	learn: 2482.0143020	test: 2814.8957588	best: 2814.8957588 (11)	total: 8.16s	remaining: 3m 15s
12:	learn: 2426.4192087	test: 2761.1098019	best: 2761.1098019 (12)	total: 8.82s	remaining: 3m 14s
13:	learn: 2378.5238717	test: 2738.1131273	best: 2738.1131273 (13)	total: 9.55s	remaining: 3m 15s
14:	learn: 2322.0540583	test: 2686.4454293	best: 2686.4454293 (14)	total: 10.2s	remaining: 3m 14s
15:	learn: 2265.5818590	test: 2633.1884592	best: 2633.1884592 (15)	total: 10.9s	remaining: 3m 13s
16:	learn: 2210.0916753	test: 2581.6570058	best: 2581.6570058 (16)	total: 11.6s	remaining: 3m 12s
17:	learn: 2162.0655206	test: 2528.5116838	best: 2528.5116838 (17)	total: 12.2s	remaining: 3m 11s
18:	learn: 2115.8193475	test: 2490.7988594	best: 2490.7988594 (18)	total: 13.1s	remaining: 3m 13s
19:	learn: 2082.2577188	test: 2473.1140864	best: 2473.1140864 (19)	total: 14s	remaining: 3m 15s
20:	learn: 2039.3443291	test: 2426.9653893	best: 2426.9653893 (20)	total: 14.8s	remaining: 3m 16s
21:	learn: 2005.7404325	test: 2393.6773859	best: 2393.6773859 (21)	total: 15.6s	remaining: 3m 16s
22:	learn: 1965.5091333	test: 2350.9941370	best: 2350.9941370 (22)	total: 16.3s	remaining: 3m 16s
23:	learn: 1931.9659991	test: 2340.8768107	best: 2340.8768107 (23)	total: 17s	remaining: 3m 15s
24:	learn: 1894.3835872	test: 2300.6608045	best: 2300.6608045 (24)	total: 17.6s	remaining: 3m 13s
25:	learn: 1859.5079386	test: 2267.3805661	best: 2267.3805661 (25)	total: 18.3s	remaining: 3m 12s
26:	learn: 1832.4888016	test: 2231.9554821	best: 2231.9554821 (26)	total: 18.9s	remaining: 3m 11s
27:	learn: 1807.1750426	test: 2195.1855563	best: 2195.1855563 (27)	total: 19.7s	remaining: 3m 11s
28:	learn: 1774.8028212	test: 2164.9244619	best: 2164.9244619 (28)	total: 20.6s	remaining: 3m 12s
29:	learn: 1748.9919658	test: 2144.4678935	best: 2144.4678935 (29)	total: 21.4s	remaining: 3m 12s
30:	learn: 1727.0426056	test: 2122.1008306	best: 2122.1008306 (30)	total: 22.3s	remaining: 3m 13s
31:	learn: 1704.0914674	test: 2104.1401945	best: 2104.1401945 (31)	total: 23.1s	remaining: 3m 13s
32:	learn: 1679.8964290	test: 2085.1748145	best: 2085.1748145 (32)	total: 23.9s	remaining: 3m 13s
33:	learn: 1655.8021081	test: 2054.4395902	best: 2054.4395902 (33)	total: 24.6s	remaining: 3m 12s
34:	learn: 1633.4259327	test: 2038.3928307	best: 2038.3928307 (34)	total: 25.3s	remaining: 3m 11s
35:	learn: 1616.6576846	test: 2014.9585338	best: 2014.9585338 (35)	total: 25.9s	remaining: 3m 9s
36:	learn: 1600.4325673	test: 2003.9920130	best: 2003.9920130 (36)	total: 26.7s	remaining: 3m 9s
37:	learn: 1582.4828962	test: 1988.9255042	best: 1988.9255042 (37)	total: 27.5s	remaining: 3m 9s
38:	learn: 1563.6280487	test: 1979.0149455	best: 1979.0149455 (38)	total: 28.3s	remaining: 3m 9s
39:	learn: 1545.9256195	test: 1958.7580149	best: 1958.7580149 (39)	total: 29.1s	remaining: 3m 8s
40:	learn: 1531.9519141	test: 1948.3236290	best: 1948.3236290 (40)	total: 29.8s	remaining: 3m 8s
41:	learn: 1511.4289392	test: 1925.6188937	best: 1925.6188937 (41)	total: 30.5s	remaining: 3m 7s
42:	learn: 1495.5429031	test: 1916.8349613	best: 1916.8349613 (42)	total: 31.3s	remaining: 3m 7s
43:	learn: 1474.3903920	test: 1900.1224902	best: 1900.1224902 (43)	total: 32.1s	remaining: 3m 6s
44:	learn: 1459.0600552	test: 1891.9360113	best: 1891.9360113 (44)	total: 33s	remaining: 3m 6s
45:	learn: 1445.0135323	test: 1882.8408867	best: 1882.8408867 (45)	total: 33.9s	remaining: 3m 7s
46:	learn: 1433.8894442	test: 1879.9894459	best: 1879.9894459 (46)	total: 34.8s	remaining: 3m 7s
47:	learn: 1421.8919356	test: 1868.3993708	best: 1868.3993708 (47)	total: 35.6s	remaining: 3m 6s
48:	learn: 1413.6310105	test: 1863.0672542	best: 1863.0672542 (48)	total: 36.4s	remaining: 3m 6s
49:	learn: 1403.5882680	test: 1846.7313326	best: 1846.7313326 (49)	total: 37.3s	remaining: 3m 6s
50:	learn: 1389.0570462	test: 1843.4505770	best: 1843.4505770 (50)	total: 38.2s	remaining: 3m 6s
51:	learn: 1377.3768079	test: 1839.2588176	best: 1839.2588176 (51)	total: 39.1s	remaining: 3m 6s
52:	learn: 1365.3561848	test: 1836.3826764	best: 1836.3826764 (52)	total: 40s	remaining: 3m 6s
53:	learn: 1352.8978826	test: 1814.2377727	best: 1814.2377727 (53)	total: 40.8s	remaining: 3m 5s
54:	learn: 1344.7133693	test: 1803.4183094	best: 1803.4183094 (54)	total: 41.5s	remaining: 3m 5s
55:	learn: 1339.1353150	test: 1800.4854358	best: 1800.4854358 (55)	total: 42.6s	remaining: 3m 5s
56:	learn: 1329.9225293	test: 1790.3422023	best: 1790.3422023 (56)	total: 43.4s	remaining: 3m 4s
57:	learn: 1320.3936219	test: 1774.8752915	best: 1774.8752915 (57)	total: 44.2s	remaining: 3m 4s
58:	learn: 1307.1675366	test: 1753.9934027	best: 1753.9934027 (58)	total: 45s	remaining: 3m 3s
59:	learn: 1298.1520145	test: 1743.9436536	best: 1743.9436536 (59)	total: 45.8s	remaining: 3m 3s
60:	learn: 1285.8622177	test: 1728.3562787	best: 1728.3562787 (60)	total: 46.6s	remaining: 3m 2s
61:	learn: 1280.6978144	test: 1725.3119242	best: 1725.3119242 (61)	total: 47.6s	remaining: 3m 2s
62:	learn: 1274.6994767	test: 1720.9498330	best: 1720.9498330 (62)	total: 48.7s	remaining: 3m 3s
63:	learn: 1266.3453066	test: 1715.6695479	best: 1715.6695479 (63)	total: 49.5s	remaining: 3m 2s
64:	learn: 1258.2467983	test: 1709.5978691	best: 1709.5978691 (64)	total: 50.3s	remaining: 3m 1s
65:	learn: 1252.6074544	test: 1702.3739649	best: 1702.3739649 (65)	total: 51.3s	remaining: 3m 1s
66:	learn: 1244.7142707	test: 1695.1677795	best: 1695.1677795 (66)	total: 52.1s	remaining: 3m 1s
67:	learn: 1237.3636795	test: 1688.7909639	best: 1688.7909639 (67)	total: 53.1s	remaining: 3m 1s
68:	learn: 1229.7169758	test: 1674.5766443	best: 1674.5766443 (68)	total: 54s	remaining: 3m
69:	learn: 1224.3881217	test: 1669.9298396	best: 1669.9298396 (69)	total: 54.9s	remaining: 3m
70:	learn: 1217.9638416	test: 1667.5196908	best: 1667.5196908 (70)	total: 55.8s	remaining: 2m 59s
71:	learn: 1211.3707447	test: 1660.6217925	best: 1660.6217925 (71)	total: 56.7s	remaining: 2m 59s
72:	learn: 1205.3589066	test: 1645.6586347	best: 1645.6586347 (72)	total: 57.5s	remaining: 2m 58s
73:	learn: 1199.9958036	test: 1635.3978435	best: 1635.3978435 (73)	total: 58.4s	remaining: 2m 58s
74:	learn: 1191.9019757	test: 1637.9921161	best: 1635.3978435 (73)	total: 59.2s	remaining: 2m 57s
75:	learn: 1186.1855405	test: 1631.0776830	best: 1631.0776830 (75)	total: 1m	remaining: 2m 56s
76:	learn: 1180.6304836	test: 1625.9376823	best: 1625.9376823 (76)	total: 1m	remaining: 2m 56s
77:	learn: 1173.4009896	test: 1624.4630988	best: 1624.4630988 (77)	total: 1m 1s	remaining: 2m 55s
78:	learn: 1165.7697199	test: 1613.2998501	best: 1613.2998501 (78)	total: 1m 2s	remaining: 2m 55s
79:	learn: 1160.0709515	test: 1602.0039268	best: 1602.0039268 (79)	total: 1m 3s	remaining: 2m 54s
80:	learn: 1156.3185339	test: 1599.3731463	best: 1599.3731463 (80)	total: 1m 4s	remaining: 2m 53s
81:	learn: 1152.2688497	test: 1595.2538049	best: 1595.2538049 (81)	total: 1m 5s	remaining: 2m 53s
82:	learn: 1147.9564669	test: 1592.6205674	best: 1592.6205674 (82)	total: 1m 6s	remaining: 2m 53s
83:	learn: 1142.7776168	test: 1590.6533670	best: 1590.6533670 (83)	total: 1m 7s	remaining: 2m 53s
84:	learn: 1135.2483866	test: 1584.8827463	best: 1584.8827463 (84)	total: 1m 8s	remaining: 2m 52s
85:	learn: 1129.4640908	test: 1580.4907774	best: 1580.4907774 (85)	total: 1m 9s	remaining: 2m 52s
86:	learn: 1126.1463169	test: 1579.9993342	best: 1579.9993342 (86)	total: 1m 10s	remaining: 2m 52s
87:	learn: 1121.0825992	test: 1573.0396506	best: 1573.0396506 (87)	total: 1m 11s	remaining: 2m 51s
88:	learn: 1118.2453317	test: 1571.5924736	best: 1571.5924736 (88)	total: 1m 12s	remaining: 2m 50s
89:	learn: 1113.0185145	test: 1567.1330258	best: 1567.1330258 (89)	total: 1m 13s	remaining: 2m 50s
90:	learn: 1108.7448678	test: 1564.3379527	best: 1564.3379527 (90)	total: 1m 13s	remaining: 2m 49s
91:	learn: 1104.5529566	test: 1560.4088129	best: 1560.4088129 (91)	total: 1m 14s	remaining: 2m 48s
92:	learn: 1099.6940213	test: 1554.9988971	best: 1554.9988971 (92)	total: 1m 15s	remaining: 2m 48s
93:	learn: 1095.6979654	test: 1550.9745362	best: 1550.9745362 (93)	total: 1m 16s	remaining: 2m 47s
94:	learn: 1092.4779363	test: 1552.6322967	best: 1550.9745362 (93)	total: 1m 17s	remaining: 2m 47s
95:	learn: 1088.1677754	test: 1549.7330307	best: 1549.7330307 (95)	total: 1m 18s	remaining: 2m 46s
96:	learn: 1082.7725841	test: 1548.0445788	best: 1548.0445788 (96)	total: 1m 19s	remaining: 2m 45s
97:	learn: 1080.1662451	test: 1547.4694896	best: 1547.4694896 (97)	total: 1m 20s	remaining: 2m 45s
98:	learn: 1077.3001149	test: 1547.6296077	best: 1547.4694896 (97)	total: 1m 21s	remaining: 2m 44s
99:	learn: 1075.5576719	test: 1546.3158626	best: 1546.3158626 (99)	total: 1m 21s	remaining: 2m 43s
100:	learn: 1071.7214064	test: 1542.0816811	best: 1542.0816811 (100)	total: 1m 22s	remaining: 2m 43s
101:	learn: 1066.9155756	test: 1541.5463140	best: 1541.5463140 (101)	total: 1m 23s	remaining: 2m 42s
102:	learn: 1064.1250749	test: 1538.2822969	best: 1538.2822969 (102)	total: 1m 24s	remaining: 2m 41s
103:	learn: 1059.0617539	test: 1536.2138012	best: 1536.2138012 (103)	total: 1m 25s	remaining: 2m 40s
104:	learn: 1055.8926201	test: 1536.3339357	best: 1536.2138012 (103)	total: 1m 26s	remaining: 2m 39s
105:	learn: 1053.2185554	test: 1536.0016688	best: 1536.0016688 (105)	total: 1m 27s	remaining: 2m 39s
106:	learn: 1049.4654847	test: 1533.6898934	best: 1533.6898934 (106)	total: 1m 28s	remaining: 2m 38s
107:	learn: 1048.2732022	test: 1533.2524732	best: 1533.2524732 (107)	total: 1m 28s	remaining: 2m 37s
108:	learn: 1045.1873694	test: 1532.7654670	best: 1532.7654670 (108)	total: 1m 29s	remaining: 2m 36s
109:	learn: 1039.9018569	test: 1531.9191326	best: 1531.9191326 (109)	total: 1m 30s	remaining: 2m 36s
110:	learn: 1037.8299244	test: 1531.2267304	best: 1531.2267304 (110)	total: 1m 31s	remaining: 2m 35s
111:	learn: 1032.1917700	test: 1528.1029976	best: 1528.1029976 (111)	total: 1m 31s	remaining: 2m 34s
112:	learn: 1028.7277434	test: 1526.8702478	best: 1526.8702478 (112)	total: 1m 32s	remaining: 2m 33s
113:	learn: 1026.2289289	test: 1526.4859257	best: 1526.4859257 (113)	total: 1m 33s	remaining: 2m 32s
114:	learn: 1021.9004601	test: 1519.0666892	best: 1519.0666892 (114)	total: 1m 34s	remaining: 2m 31s
115:	learn: 1019.1529467	test: 1513.3087547	best: 1513.3087547 (115)	total: 1m 35s	remaining: 2m 31s
116:	learn: 1016.1224266	test: 1510.2883848	best: 1510.2883848 (116)	total: 1m 36s	remaining: 2m 30s
117:	learn: 1013.7225139	test: 1509.6385471	best: 1509.6385471 (117)	total: 1m 36s	remaining: 2m 29s
118:	learn: 1010.1973949	test: 1510.4103985	best: 1509.6385471 (117)	total: 1m 37s	remaining: 2m 28s
119:	learn: 1006.3338930	test: 1506.5733487	best: 1506.5733487 (119)	total: 1m 38s	remaining: 2m 27s
120:	learn: 1004.8180261	test: 1505.8027149	best: 1505.8027149 (120)	total: 1m 39s	remaining: 2m 26s
121:	learn: 1002.3917894	test: 1504.2342175	best: 1504.2342175 (121)	total: 1m 40s	remaining: 2m 26s
122:	learn: 999.9914091	test: 1502.1815724	best: 1502.1815724 (122)	total: 1m 40s	remaining: 2m 25s
123:	learn: 996.6456752	test: 1501.6255777	best: 1501.6255777 (123)	total: 1m 41s	remaining: 2m 24s
124:	learn: 993.3385567	test: 1493.5113652	best: 1493.5113652 (124)	total: 1m 42s	remaining: 2m 23s
125:	learn: 989.5153730	test: 1493.5011776	best: 1493.5011776 (125)	total: 1m 43s	remaining: 2m 22s
126:	learn: 987.1263184	test: 1491.8646924	best: 1491.8646924 (126)	total: 1m 44s	remaining: 2m 22s
127:	learn: 983.4306393	test: 1491.7078301	best: 1491.7078301 (127)	total: 1m 45s	remaining: 2m 21s
128:	learn: 981.7488041	test: 1490.0363398	best: 1490.0363398 (128)	total: 1m 46s	remaining: 2m 20s
129:	learn: 980.8589836	test: 1489.9564392	best: 1489.9564392 (129)	total: 1m 47s	remaining: 2m 20s
130:	learn: 979.9730713	test: 1489.3634966	best: 1489.3634966 (130)	total: 1m 47s	remaining: 2m 19s
131:	learn: 979.6838654	test: 1489.1054794	best: 1489.1054794 (131)	total: 1m 48s	remaining: 2m 18s
132:	learn: 977.9443396	test: 1487.7260153	best: 1487.7260153 (132)	total: 1m 49s	remaining: 2m 17s
133:	learn: 973.4632608	test: 1487.1231551	best: 1487.1231551 (133)	total: 1m 50s	remaining: 2m 17s
134:	learn: 971.1119944	test: 1486.6936420	best: 1486.6936420 (134)	total: 1m 51s	remaining: 2m 16s
135:	learn: 967.1067868	test: 1486.1319794	best: 1486.1319794 (135)	total: 1m 52s	remaining: 2m 15s
136:	learn: 963.3969755	test: 1483.9088903	best: 1483.9088903 (136)	total: 1m 53s	remaining: 2m 14s
137:	learn: 959.1674763	test: 1481.3775915	best: 1481.3775915 (137)	total: 1m 54s	remaining: 2m 14s
138:	learn: 956.5371968	test: 1478.7077947	best: 1478.7077947 (138)	total: 1m 55s	remaining: 2m 13s
139:	learn: 953.9314975	test: 1477.1553203	best: 1477.1553203 (139)	total: 1m 55s	remaining: 2m 12s
140:	learn: 950.7962380	test: 1477.8748444	best: 1477.1553203 (139)	total: 1m 57s	remaining: 2m 11s
141:	learn: 948.3308937	test: 1476.5868305	best: 1476.5868305 (141)	total: 1m 57s	remaining: 2m 11s
142:	learn: 944.9339234	test: 1478.2161363	best: 1476.5868305 (141)	total: 1m 58s	remaining: 2m 10s
143:	learn: 942.9728700	test: 1477.5075599	best: 1476.5868305 (141)	total: 1m 59s	remaining: 2m 9s
144:	learn: 941.6537793	test: 1474.9664634	best: 1474.9664634 (144)	total: 2m	remaining: 2m 8s
145:	learn: 939.8888797	test: 1474.9330452	best: 1474.9330452 (145)	total: 2m 1s	remaining: 2m 8s
146:	learn: 936.9420200	test: 1472.8297035	best: 1472.8297035 (146)	total: 2m 2s	remaining: 2m 7s
147:	learn: 933.8644822	test: 1472.9541456	best: 1472.8297035 (146)	total: 2m 3s	remaining: 2m 6s
148:	learn: 931.7450650	test: 1473.6149007	best: 1472.8297035 (146)	total: 2m 4s	remaining: 2m 5s
149:	learn: 930.5945617	test: 1473.3100730	best: 1472.8297035 (146)	total: 2m 5s	remaining: 2m 5s
150:	learn: 927.4433131	test: 1468.6734439	best: 1468.6734439 (150)	total: 2m 5s	remaining: 2m 4s
151:	learn: 925.7742071	test: 1466.7266368	best: 1466.7266368 (151)	total: 2m 6s	remaining: 2m 3s
152:	learn: 923.2658224	test: 1467.4827366	best: 1466.7266368 (151)	total: 2m 7s	remaining: 2m 2s
153:	learn: 920.9501895	test: 1467.4359936	best: 1466.7266368 (151)	total: 2m 8s	remaining: 2m 1s
154:	learn: 919.4732269	test: 1467.2714081	best: 1466.7266368 (151)	total: 2m 9s	remaining: 2m 1s
155:	learn: 917.2820162	test: 1467.1478033	best: 1466.7266368 (151)	total: 2m 10s	remaining: 2m
156:	learn: 917.0716340	test: 1467.3317876	best: 1466.7266368 (151)	total: 2m 11s	remaining: 1m 59s
157:	learn: 915.5496283	test: 1466.9191653	best: 1466.7266368 (151)	total: 2m 12s	remaining: 1m 58s
158:	learn: 913.6155609	test: 1466.1477288	best: 1466.1477288 (158)	total: 2m 13s	remaining: 1m 58s
159:	learn: 912.2990356	test: 1466.3290431	best: 1466.1477288 (158)	total: 2m 14s	remaining: 1m 57s
160:	learn: 911.1705419	test: 1466.3025928	best: 1466.1477288 (158)	total: 2m 15s	remaining: 1m 56s
161:	learn: 910.0483815	test: 1464.4343401	best: 1464.4343401 (161)	total: 2m 16s	remaining: 1m 55s
162:	learn: 906.2258844	test: 1465.4711147	best: 1464.4343401 (161)	total: 2m 16s	remaining: 1m 54s
163:	learn: 904.2462092	test: 1465.0113791	best: 1464.4343401 (161)	total: 2m 17s	remaining: 1m 54s
164:	learn: 902.3064315	test: 1464.9060087	best: 1464.4343401 (161)	total: 2m 18s	remaining: 1m 53s
165:	learn: 900.6436991	test: 1463.5233102	best: 1463.5233102 (165)	total: 2m 19s	remaining: 1m 52s
166:	learn: 898.7976668	test: 1464.2582024	best: 1463.5233102 (165)	total: 2m 20s	remaining: 1m 51s
167:	learn: 898.1285379	test: 1463.6111624	best: 1463.5233102 (165)	total: 2m 21s	remaining: 1m 50s
168:	learn: 896.7755024	test: 1462.5367090	best: 1462.5367090 (168)	total: 2m 21s	remaining: 1m 50s
169:	learn: 896.3572940	test: 1461.4298818	best: 1461.4298818 (169)	total: 2m 22s	remaining: 1m 49s
170:	learn: 895.1355086	test: 1458.9847174	best: 1458.9847174 (170)	total: 2m 23s	remaining: 1m 48s
171:	learn: 893.4143854	test: 1458.3349940	best: 1458.3349940 (171)	total: 2m 24s	remaining: 1m 47s
172:	learn: 893.3707015	test: 1458.3426034	best: 1458.3349940 (171)	total: 2m 24s	remaining: 1m 46s
173:	learn: 893.0505415	test: 1456.5648026	best: 1456.5648026 (173)	total: 2m 25s	remaining: 1m 45s
174:	learn: 890.2350204	test: 1457.1857405	best: 1456.5648026 (173)	total: 2m 26s	remaining: 1m 44s
175:	learn: 888.6384720	test: 1456.6758596	best: 1456.5648026 (173)	total: 2m 27s	remaining: 1m 43s
176:	learn: 887.7705468	test: 1455.9867810	best: 1455.9867810 (176)	total: 2m 28s	remaining: 1m 42s
177:	learn: 887.7510193	test: 1455.9668023	best: 1455.9668023 (177)	total: 2m 28s	remaining: 1m 41s
178:	learn: 886.2953545	test: 1456.4840678	best: 1455.9668023 (177)	total: 2m 29s	remaining: 1m 40s
179:	learn: 885.5675126	test: 1454.6908420	best: 1454.6908420 (179)	total: 2m 29s	remaining: 1m 39s
180:	learn: 884.3988219	test: 1453.8559028	best: 1453.8559028 (180)	total: 2m 30s	remaining: 1m 39s
181:	learn: 882.7806631	test: 1454.3673493	best: 1453.8559028 (180)	total: 2m 31s	remaining: 1m 38s
182:	learn: 881.9345486	test: 1454.4480518	best: 1453.8559028 (180)	total: 2m 32s	remaining: 1m 37s
183:	learn: 881.8892828	test: 1454.3182815	best: 1453.8559028 (180)	total: 2m 32s	remaining: 1m 36s
184:	learn: 879.9211874	test: 1452.8561204	best: 1452.8561204 (184)	total: 2m 33s	remaining: 1m 35s
185:	learn: 878.9585961	test: 1452.5371735	best: 1452.5371735 (185)	total: 2m 34s	remaining: 1m 34s
186:	learn: 875.9529696	test: 1453.2337634	best: 1452.5371735 (185)	total: 2m 35s	remaining: 1m 33s
187:	learn: 874.8335382	test: 1452.5556477	best: 1452.5371735 (185)	total: 2m 36s	remaining: 1m 33s
188:	learn: 873.8812669	test: 1451.5619070	best: 1451.5619070 (188)	total: 2m 37s	remaining: 1m 32s
189:	learn: 873.1587295	test: 1450.5972423	best: 1450.5972423 (189)	total: 2m 37s	remaining: 1m 31s
190:	learn: 871.9363630	test: 1450.7834261	best: 1450.5972423 (189)	total: 2m 38s	remaining: 1m 30s
191:	learn: 870.6845900	test: 1449.9318947	best: 1449.9318947 (191)	total: 2m 39s	remaining: 1m 29s
192:	learn: 870.6788709	test: 1449.9276638	best: 1449.9276638 (192)	total: 2m 40s	remaining: 1m 28s
193:	learn: 869.8502102	test: 1449.7636346	best: 1449.7636346 (193)	total: 2m 41s	remaining: 1m 27s
194:	learn: 869.2883762	test: 1450.2339848	best: 1449.7636346 (193)	total: 2m 41s	remaining: 1m 27s
195:	learn: 868.4394309	test: 1449.9266269	best: 1449.7636346 (193)	total: 2m 42s	remaining: 1m 26s
196:	learn: 867.4973914	test: 1449.2446514	best: 1449.2446514 (196)	total: 2m 43s	remaining: 1m 25s
197:	learn: 866.3529817	test: 1449.3095283	best: 1449.2446514 (196)	total: 2m 44s	remaining: 1m 24s
198:	learn: 864.0924387	test: 1450.1628482	best: 1449.2446514 (196)	total: 2m 45s	remaining: 1m 23s
199:	learn: 862.8340006	test: 1448.8775572	best: 1448.8775572 (199)	total: 2m 46s	remaining: 1m 23s
200:	learn: 860.2607465	test: 1449.2134974	best: 1448.8775572 (199)	total: 2m 47s	remaining: 1m 22s
201:	learn: 859.5378362	test: 1449.0407092	best: 1448.8775572 (199)	total: 2m 47s	remaining: 1m 21s
202:	learn: 857.3318354	test: 1447.7035701	best: 1447.7035701 (202)	total: 2m 48s	remaining: 1m 20s
203:	learn: 856.3667787	test: 1445.6587121	best: 1445.6587121 (203)	total: 2m 49s	remaining: 1m 19s
204:	learn: 855.1874998	test: 1445.7987675	best: 1445.6587121 (203)	total: 2m 50s	remaining: 1m 19s
205:	learn: 854.7504706	test: 1444.2924187	best: 1444.2924187 (205)	total: 2m 51s	remaining: 1m 18s
206:	learn: 853.7686121	test: 1443.8151367	best: 1443.8151367 (206)	total: 2m 52s	remaining: 1m 17s
207:	learn: 852.5174138	test: 1442.8863084	best: 1442.8863084 (207)	total: 2m 53s	remaining: 1m 16s
208:	learn: 852.0508083	test: 1440.8692516	best: 1440.8692516 (208)	total: 2m 54s	remaining: 1m 15s
209:	learn: 850.8650794	test: 1439.3015975	best: 1439.3015975 (209)	total: 2m 55s	remaining: 1m 15s
210:	learn: 850.7654814	test: 1439.2872486	best: 1439.2872486 (210)	total: 2m 56s	remaining: 1m 14s
211:	learn: 849.5053986	test: 1438.8377332	best: 1438.8377332 (211)	total: 2m 56s	remaining: 1m 13s
212:	learn: 848.9578451	test: 1439.1364362	best: 1438.8377332 (211)	total: 2m 57s	remaining: 1m 12s
213:	learn: 847.6414032	test: 1438.2524195	best: 1438.2524195 (213)	total: 2m 58s	remaining: 1m 11s
214:	learn: 845.6525437	test: 1435.5751244	best: 1435.5751244 (214)	total: 2m 59s	remaining: 1m 11s
215:	learn: 844.3987213	test: 1434.2368498	best: 1434.2368498 (215)	total: 3m	remaining: 1m 10s
216:	learn: 844.3654387	test: 1434.2649029	best: 1434.2368498 (215)	total: 3m 1s	remaining: 1m 9s
217:	learn: 844.2607547	test: 1434.1979109	best: 1434.1979109 (217)	total: 3m 2s	remaining: 1m 8s
218:	learn: 843.6556602	test: 1434.6150632	best: 1434.1979109 (217)	total: 3m 3s	remaining: 1m 7s
219:	learn: 843.0648999	test: 1434.5698751	best: 1434.1979109 (217)	total: 3m 4s	remaining: 1m 7s
220:	learn: 842.3057508	test: 1434.3020948	best: 1434.1979109 (217)	total: 3m 5s	remaining: 1m 6s
221:	learn: 841.9775296	test: 1433.9864986	best: 1433.9864986 (221)	total: 3m 6s	remaining: 1m 5s
222:	learn: 840.4184321	test: 1433.9827275	best: 1433.9827275 (222)	total: 3m 7s	remaining: 1m 4s
223:	learn: 839.9011345	test: 1434.4909447	best: 1433.9827275 (222)	total: 3m 8s	remaining: 1m 3s
224:	learn: 839.1075208	test: 1435.0929995	best: 1433.9827275 (222)	total: 3m 9s	remaining: 1m 3s
225:	learn: 839.0454279	test: 1434.9887887	best: 1433.9827275 (222)	total: 3m 10s	remaining: 1m 2s
226:	learn: 838.1341767	test: 1434.7884819	best: 1433.9827275 (222)	total: 3m 11s	remaining: 1m 1s
227:	learn: 837.1452123	test: 1433.2032184	best: 1433.2032184 (227)	total: 3m 11s	remaining: 1m
228:	learn: 837.1277124	test: 1433.1961530	best: 1433.1961530 (228)	total: 3m 12s	remaining: 59.8s
229:	learn: 836.1114248	test: 1433.1245749	best: 1433.1245749 (229)	total: 3m 13s	remaining: 58.9s
230:	learn: 834.7405661	test: 1434.6735213	best: 1433.1245749 (229)	total: 3m 14s	remaining: 58.1s
231:	learn: 834.4436785	test: 1434.6681527	best: 1433.1245749 (229)	total: 3m 15s	remaining: 57.3s
232:	learn: 833.8953792	test: 1433.9929067	best: 1433.1245749 (229)	total: 3m 16s	remaining: 56.5s
233:	learn: 833.0593522	test: 1433.6365995	best: 1433.1245749 (229)	total: 3m 17s	remaining: 55.7s
234:	learn: 832.7015272	test: 1433.4161327	best: 1433.1245749 (229)	total: 3m 17s	remaining: 54.8s
235:	learn: 832.1902988	test: 1433.3119960	best: 1433.1245749 (229)	total: 3m 18s	remaining: 53.9s
236:	learn: 831.2734695	test: 1433.3662925	best: 1433.1245749 (229)	total: 3m 19s	remaining: 53.1s
237:	learn: 830.1996370	test: 1433.7541888	best: 1433.1245749 (229)	total: 3m 20s	remaining: 52.2s
238:	learn: 829.7389066	test: 1433.1413462	best: 1433.1245749 (229)	total: 3m 21s	remaining: 51.4s
239:	learn: 828.1569663	test: 1433.3084203	best: 1433.1245749 (229)	total: 3m 22s	remaining: 50.5s
240:	learn: 827.3793888	test: 1431.9250903	best: 1431.9250903 (240)	total: 3m 22s	remaining: 49.7s
241:	learn: 826.5160353	test: 1431.0884774	best: 1431.0884774 (241)	total: 3m 23s	remaining: 48.8s
242:	learn: 825.9939865	test: 1431.1620042	best: 1431.0884774 (241)	total: 3m 24s	remaining: 48s
243:	learn: 824.8837176	test: 1432.1703464	best: 1431.0884774 (241)	total: 3m 25s	remaining: 47.2s
244:	learn: 824.2335379	test: 1431.8934236	best: 1431.0884774 (241)	total: 3m 26s	remaining: 46.4s
245:	learn: 824.2253533	test: 1431.9073648	best: 1431.0884774 (241)	total: 3m 26s	remaining: 45.4s

In [ ]:
model.save_model(f'{PATH}\\catboost_new_feats_model_depth_8', export_parameters=dict())

In [ ]:
preds = model.predict(df_test);
preds[:10]

In [ ]:
submit = make_submission(preds)

In [ ]:
submit.to_csv(f'{PATH}\\Adi_catboost_with_new_feats_10092018_depth_8.csv', index=None)

xgb


In [ ]:
from sklearn.metrics import mean_squared_error as mse
def runXGB(train_X, train_y, test_X, test_y=None):
    
        params = {}
        params['booster'] = 'gbtree'
        params["objective"] = "gpu:reg:linear"
        params["eta"] = 0.02
        params["min_child_weight"] = 2
        params["subsample"] = 0.9
        params["colsample_bytree"] = 0.8
        params["silent"] = 0
        params["max_depth"] = 8
        params["seed"] = 1
        params['alpha'] = .05
        params['tree_method'] = 'gpu_hist'
        params['gamma'] = 3
        
        plst = list(params.items())
        num_rounds = 900

        xgtrain = xgb.DMatrix(train_X, label=train_y)
        xgtest = xgb.DMatrix(test_X)
        model = xgb.train(plst, xgtrain, num_rounds)
        pred_test_y = model.predict(xgtest)
        return model, pred_test_y

def rmse(act_y, pred_y):
    return np.sqrt(mse(act_y, pred_y))

In [ ]:
model_xgb, preds = runXGB(pd.get_dummies(df_raw,prefix='dummy'), target, pd.get_dummies(df_test,prefix='dummy'))