In [50]:
import pandas as pd
import numpy as np
import os
import math
import graphlab
import graphlab as gl
import graphlab.aggregate as agg
from graphlab import SArray
In [2]:
'''钢炮'''
path = '/home/zongyi/bimbo_data/'
In [4]:
train = gl.SFrame.read_csv(path + 'train_lag5_w9_mean.csv', verbose=False)
In [4]:
town = gl.SFrame.read_csv(path + 'towns.csv', verbose=False)
train = train.join(town, on=['Agencia_ID','Producto_ID'], how='left')
train = train.fillna('t_c',1)
train = train.fillna('tcc',0)
train = train.fillna('tp_sum',0)
del train['Town']
del train['t_c']
In [5]:
del train['n_t']
In [6]:
del train['id']
del train['Venta_uni_hoy']
del train['Venta_hoy']
del train['Dev_uni_proxima']
del train['Dev_proxima']
del train['Demanda_uni_equil']
In [6]:
# relag_train = gl.SFrame.read_csv(path + 're_lag_train.csv', verbose=False)
# train = train.join(relag_train, on=['Cliente_ID','Producto_ID','Semana'], how='left')
# train = train.fillna('re_lag1',0)
# train = train.fillna('re_lag2',0)
# train = train.fillna('re_lag3',0)
# train = train.fillna('re_lag4',0)
# train = train.fillna('re_lag5',0)
# del relag_train
In [7]:
# pd = gl.SFrame.read_csv(path + 'products.csv', verbose=False)
# train = train.join(pd, on=['Producto_ID'], how='left')
# train = train.fillna('prom',0)
# train = train.fillna('weight',0)
# train = train.fillna('pieces',1)
# train = train.fillna('w_per_piece',0)
# train = train.fillna('healthy',0)
# train = train.fillna('drink',0)
# del train['brand']
# del train['NombreProducto']
# del pd
In [8]:
# client = gl.SFrame.read_csv(path + 'clients.csv', verbose=False)
# train = train.join(client, on=['Cliente_ID'], how='left')
# del client
In [9]:
# cluster = gl.SFrame.read_csv(path + 'prod_cluster.csv', verbose=False)
# cluster = cluster[['Producto_ID','cluster']]
# train = train.join(cluster, on=['Producto_ID'], how='left')
In [51]:
# del train['Canal_ID']
# del train['week_times']
# del train['Semana']
# del train['lag_sum']
# del train['prior_sum']
# train = train.fillna('lag1',0)
# train = train.fillna('lag2',0)
# train = train.fillna('lag3',0)
# train = train.fillna('lag4',0)
# train = train.fillna('lag5',0)
# # train = train.fillna('lag_sum',0)
# # train = train.fillna('prior_sum',0)
# train = train.fillna('n_a',0)
# train = train.fillna('n_r',0)
# train = train.fillna('n_c',0)
# train = train.fillna('n_p',0)
print train.column_names()
print len(train.column_names())
In [52]:
# Make a train-test split
# train_data, test_data = train.random_split(0.9,seed=543)
# Create a model.
model = gl.boosted_trees_regression.create(train, target='Demada_log',
step_size=0.1,
max_iterations=1200,
max_depth = 10,
metric='rmse',
random_seed=461,
column_subsample=0.7,
row_subsample=0.85,
validation_set=None,
model_checkpoint_path=path,
model_checkpoint_interval=1200)
#500 | 14060.836344 | 0.435407 | 0.447519
In [21]:
from IPython.core.pylabtools import figsize
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('darkgrid', {'grid.color': '.8','grid.linestyle': u'--'})
%matplotlib inline
figsize(16, 6)
plt.scatter(model.progress['Iteration'], model.progress['Training-rmse'],alpha=.5)
plt.ylim(.4,.5)
# plt.xticks(rotation=45)
Out[21]:
In [48]:
w = model.get_feature_importance()
w = w.add_row_number()
w
Out[48]:
In [49]:
from IPython.core.pylabtools import figsize
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('darkgrid', {'grid.color': '.8','grid.linestyle': u'--'})
%matplotlib inline
figsize(16, 9)
plt.bar(w['id'], w['count'], tick_label=w['name'])
plt.xticks(rotation=45)
Out[49]:
In [ ]:
In [20]:
# Save predictions to an SArray
predictions = model.predict(train)
# Evaluate the model and save the results into a dictionary
results = model.evaluate(train)
print results
In [53]:
'''Add feature to week 11'''
def feature_w11(test, lag_sum=0, prior_sum=0):
test_full = test.copy()
ids = test['id']
del test['id']
del test['Semana']
demand_log = model.predict(test)
sub1 = gl.SFrame({'id':ids,'Demanda_uni_equil':demand_log})
test_full = test_full.join(sub1,on=['id'],how='left')
lag11 = test_full.groupby(key_columns=['Semana','Cliente_ID','Producto_ID'], operations={'lag11':agg.MEAN('Demanda_uni_equil')})
lag11['Semana'] = lag11['Semana'].apply(lambda x: x+1)
test_full = test_full.join(lag11,on=['Semana','Cliente_ID','Producto_ID'],how='left')
test_full = test_full.fillna('lag11',0)
test_full['lag1'] = test_full['lag1'] + test_full['lag11']
if lag_sum == 1:
test_full['lag_sum'] = test_full['lag_sum'] + test_full['lag11']
if prior_sum == 1:
lag_sum11 = test_full.groupby(key_columns=['Semana','Cliente_ID','Producto_ID'], operations={'lag_sum11':agg.SUM('Demanda_uni_equil')})
lag_sum11['Semana'] = lag_sum11['Semana'].apply(lambda x: x+1)
test_full = test_full.join(lag_sum11,on=['Semana','Cliente_ID','Producto_ID'],how='left')
test_full = test_full.fillna('lag_sum11',0)
test_full['prior_sum'] = test_full['prior_sum'] + test_full['lag_sum11']
del test_full['lag_sum11']
del test_full['lag11']
del test_full['Demanda_uni_equil']
del test_full['Semana']
return test_full
In [54]:
test = gl.SFrame.read_csv(path + 'test_lag5_w9_mean.csv', verbose=False)
# test = test.join(town, on=['Agencia_ID','Producto_ID'], how='left')
# test = test.fillna('t_c',1)
# test = test.fillna('tcc',0)
# test = test.fillna('tp_sum',0)
# del test['Town']
# del test['t_c']
# del test['n_t']
In [55]:
print test.column_names()
print len(test.column_names())
In [56]:
del test['prior_sum']
del test['lag_sum']
del test['week_times']
# del test['Semana']
del test['Canal_ID']
test = test.fillna('lag1',0)
test = test.fillna('lag2',0)
test = test.fillna('lag3',0)
test = test.fillna('lag4',0)
test = test.fillna('lag5',0)
# test = test.fillna('lag_sum',0)
# test = test.fillna('prior_sum',0)
test = test.fillna('n_a',0)
test = test.fillna('n_r',0)
test = test.fillna('n_c',0)
test = test.fillna('n_p',0)
In [57]:
test_full = feature_w11(test, lag_sum=0, prior_sum=0)
In [58]:
ids = test_full['id']
del test_full['id']
# del test_full['Semana']
demand_log = model.predict(test_full)
sub = gl.SFrame({'id':ids,'Demanda_uni_equil':demand_log})
In [59]:
import math
sub['Demanda_uni_equil'] = sub['Demanda_uni_equil'].apply(lambda x: math.expm1(max(0, x)))
In [60]:
file_name = 'w9'+'_f'+str(model.num_features)+'_n'+str(model.max_iterations)+'_c'+str(model.column_subsample)
sub.save(path+file_name,format='csv')
In [37]:
sub
Out[37]:
In [ ]: