In [2]:
import pandas as pd
import numpy as np
import os
import math
import graphlab
import graphlab as gl
import graphlab.aggregate as agg
from graphlab import SArray
In [3]:
'''钢炮'''
path = '/home/zongyi/bimbo_data/'
In [4]:
train = gl.SFrame.read_csv(path + 'train_lag5_w9.csv', verbose=False)
In [4]:
town = gl.SFrame.read_csv(path + 'towns.csv', verbose=False)
train = train.join(town, on=['Agencia_ID','Producto_ID'], how='left')
train = train.fillna('t_c',1)
train = train.fillna('tcc',0)
train = train.fillna('tp_sum',0)
del train['Town']
del train['t_c']
In [5]:
del train['n_t']
In [6]:
del train['id']
del train['Venta_uni_hoy']
del train['Venta_hoy']
del train['Dev_uni_proxima']
del train['Dev_proxima']
del train['Demanda_uni_equil']
In [6]:
# relag_train = gl.SFrame.read_csv(path + 're_lag_train.csv', verbose=False)
# train = train.join(relag_train, on=['Cliente_ID','Producto_ID','Semana'], how='left')
# train = train.fillna('re_lag1',0)
# train = train.fillna('re_lag2',0)
# train = train.fillna('re_lag3',0)
# train = train.fillna('re_lag4',0)
# train = train.fillna('re_lag5',0)
# del relag_train
In [7]:
# pd = gl.SFrame.read_csv(path + 'products.csv', verbose=False)
# train = train.join(pd, on=['Producto_ID'], how='left')
# train = train.fillna('prom',0)
# train = train.fillna('weight',0)
# train = train.fillna('pieces',1)
# train = train.fillna('w_per_piece',0)
# train = train.fillna('healthy',0)
# train = train.fillna('drink',0)
# del train['brand']
# del train['NombreProducto']
# del pd
In [8]:
# client = gl.SFrame.read_csv(path + 'clients.csv', verbose=False)
# train = train.join(client, on=['Cliente_ID'], how='left')
# del client
In [9]:
# cluster = gl.SFrame.read_csv(path + 'prod_cluster.csv', verbose=False)
# cluster = cluster[['Producto_ID','cluster']]
# train = train.join(cluster, on=['Producto_ID'], how='left')
In [36]:
# del train['week_times']
# del train['Semana']
# del train['Canal_ID']
train = train.fillna('lag1',0)
train = train.fillna('lag2',0)
train = train.fillna('lag3',0)
train = train.fillna('lag4',0)
train = train.fillna('lag5',0)
train = train.fillna('lag_sum',0)
train = train.fillna('prior_sum',0)
train = train.fillna('n_a',0)
train = train.fillna('n_r',0)
train = train.fillna('n_c',0)
train = train.fillna('n_p',0)
train
Out[36]:
In [5]:
# Make a train-test split
train_data, test_data = train.random_split(0.99,seed=788)
# Create a model.
model = gl.boosted_trees_regression.create(train, target='Demada_log',
step_size=0.1,
max_iterations=500,
max_depth = 10,
metric='rmse',
random_seed=78,
column_subsample=0.6,
row_subsample=0.85,
validation_set=test_data,
model_checkpoint_path=path,
model_checkpoint_interval=500,
resume_from_checkpoint=path+'model_checkpoint_1000_w9')
#500 | 14060.836344 | 0.435407 | 0.447519
In [7]:
model.features
Out[7]:
In [43]:
w = model.get_feature_importance()
w = w.add_row_number()
w
Out[43]:
In [44]:
from IPython.core.pylabtools import figsize
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('darkgrid', {'grid.color': '.8','grid.linestyle': u'--'})
%matplotlib inline
figsize(16, 9)
plt.bar(w['id'], w['count'], tick_label=w['name'])
plt.xticks(rotation=45)
Out[44]:
In [20]:
# Save predictions to an SArray
predictions = model.predict(train)
# Evaluate the model and save the results into a dictionary
results = model.evaluate(train)
print results
In [88]:
'''Add feature to week 11'''
def feature_w11(test, lag_sum=0, prior_sum=0):
test_full = test.copy()
ids = test['id']
del test['id']
del test['Semana']
demand_log = model.predict(test)
sub1 = gl.SFrame({'id':ids,'Demanda_uni_equil':demand_log})
test_full = test_full.join(sub1,on=['id'],how='left')
lag11 = test_full.groupby(key_columns=['Semana','Cliente_ID','Producto_ID'], operations={'lag11':agg.MEAN('Demanda_uni_equil')})
lag11['Semana'] = lag11['Semana'].apply(lambda x: x+1)
test_full = test_full.join(lag11,on=['Semana','Cliente_ID','Producto_ID'],how='left')
test_full = test_full.fillna('lag11',0)
test_full['lag1'] = test_full['lag1'] + test_full['lag11']
if lag_sum == 1:
test_full['lag_sum'] = test_full['lag_sum'] + test_full['lag11']
if prior_sum == 1:
lag_sum11 = test_full.groupby(key_columns=['Semana','Cliente_ID','Producto_ID'], operations={'lag_sum11':agg.SUM('Demanda_uni_equil')})
lag_sum11['Semana'] = lag_sum11['Semana'].apply(lambda x: x+1)
test_full = test_full.join(lag_sum11,on=['Semana','Cliente_ID','Producto_ID'],how='left')
test_full = test_full.fillna('lag_sum11',0)
test_full['prior_sum'] = test_full['prior_sum'] + test_full['lag_sum11']
del test_full['lag_sum11']
del test_full['lag11']
del test_full['Demanda_uni_equil']
del test_full['Semana']
return test_full
In [87]:
test = gl.SFrame.read_csv(path + 'test_lag5_w9.csv', verbose=False)
test = test.join(town, on=['Agencia_ID','Producto_ID'], how='left')
test = test.fillna('t_c',1)
test = test.fillna('tcc',0)
test = test.fillna('tp_sum',0)
del test['Town']
del test['t_c']
del test['n_t']
In [89]:
# del test['prior_sum']
# del test['lag_sum']
del test['week_times']
# del test['Semana']
del test['Canal_ID']
test = test.fillna('lag1',0)
test = test.fillna('lag2',0)
test = test.fillna('lag3',0)
test = test.fillna('lag4',0)
test = test.fillna('lag5',0)
test = test.fillna('lag_sum',0)
test = test.fillna('prior_sum',0)
test = test.fillna('n_a',0)
test = test.fillna('n_r',0)
test = test.fillna('n_c',0)
test = test.fillna('n_p',0)
print test.head()
In [ ]:
test_full = feature_w11(test, lag_sum=1, prior_sum=1)
In [64]:
test_full = test.copy()
ids = test['id']
del test['id']
demand_log = model.predict(test)
sub1 = gl.SFrame({'id':ids,'Demanda_uni_equil':demand_log})
test_full = test_full.join(sub1,on=['id'],how='left')
lag = test_full.groupby(key_columns=['Semana','Cliente_ID','Producto_ID'], operations={'lag':agg.MEAN('Demanda_uni_equil')})
lag['Semana'] = lag['Semana'].apply(lambda x: x+1)
test_full = test_full.join(lag,on=['Semana','Cliente_ID','Producto_ID'],how='left')
test_full = test_full.fillna('lag',0)
test_full['lag1'] = test_full['lag1'] + test_full['lag']
del test_full['lag']
del test_full['Demanda_uni_equil']
In [65]:
In [66]:
ids = test_full['id']
del test_full['id']
del test_full['Semana']
demand_log = model.predict(test_full)
sub = gl.SFrame({'id':ids,'Demanda_uni_equil':demand_log})
In [69]:
import math
sub['Demanda_uni_equil'] = sub['Demanda_uni_equil'].apply(lambda x: math.expm1(max(0, x)))
In [80]:
file_name = 'w9'+'_f'+str(model.num_features)+'_n'+str(model.max_iterations)+'_c'+str(model.column_subsample)
sub.save(path+file_name,format='csv')
In [70]:
sub
Out[70]: