In [1]:
import os
import pandas as pd
import numpy as np
from sklearn import preprocessing, ensemble, cross_validation, grid_search
from sklearn.ensemble import RandomForestRegressor
from sklearn.grid_search import GridSearchCV
In [2]:
print os.listdir('.')
directory = os.path.join(os.getcwd(),'competition_data')
filename = 'train_set.csv'
path = os.path.join(directory,filename)
print path
In [3]:
train = pd.read_csv(os.path.join(directory,'train_set.csv'), parse_dates=[2,])
test = pd.read_csv(os.path.join(directory,'test_set.csv'), parse_dates=[3,])
In [4]:
tubes = pd.read_csv(os.path.join(directory,'tube.csv'))
train = pd.merge(train,tubes,on='tube_assembly_id',how='inner')
test = pd.merge(test,tubes,on='tube_assembly_id',how='inner')
train['material_id'].fillna('SP-9999',inplace=True)
test['material_id'].fillna('SP-9999',inplace=True)
In [5]:
# drop useless columns and create labels
idx = test.id.values.astype(int)
test = test.drop(['id', 'tube_assembly_id', 'quote_date'], axis = 1)
labels = train.cost.values
train = train.drop(['quote_date', 'cost', 'tube_assembly_id'], axis = 1)
In [6]:
# convert data to numpy array
train = np.array(train)
test = np.array(test)
In [7]:
# label encode the categorical variables
rangeset = train.shape[1]
for i in range(rangeset):
#print(i)
if i in [0,3,5, 11,12,13,14,15,16]:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[:,i]) + list(test[:,i]))
train[:,i] = lbl.transform(train[:,i])
test[:,i] = lbl.transform(test[:,i])
In [8]:
print(train[0:5,:])
print(test[0:5,:])
In [9]:
# object array to float
train = train.astype(float)
test = test.astype(float)
# i like to train on log(1+x) for RMSLE ;)
# The choice is yours :)
label_log = np.log1p(labels)
In [10]:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(train, label_log, test_size = 0.2, random_state = 0)
In [11]:
#RMSLE error function
import math
#A function to calculate Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y, y_pred):
assert len(y) == len(y_pred)
terms_to_sum = [(math.log(y_pred[i] + 1) - math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)]
return (sum(terms_to_sum) * (1.0/len(y))) ** 0.5
from sklearn.metrics import make_scorer
custom_score = make_scorer(rmsle,False)
In [18]:
# try cross validation
parameters = {'n_estimators': [1000] }
rfr = RandomForestRegressor()
model = GridSearchCV(estimator= rfr, param_grid=parameters, scoring= custom_score)
model.fit(X_train, y_train)
Out[18]:
In [19]:
val_preds = np.expm1(model.predict(X_test))
norm_labels = np.expm1(y_test)
score_val = rmsle(norm_labels,val_preds)
print score_val
In [14]:
preds = np.expm1(model.predict(test))
In [15]:
preds = pd.DataFrame({"id": idx, "cost": preds})
In [16]:
preds.to_csv('benchmark.csv', index=False)
An analysis into the relationships between the different data files:
test_set is root
sheets with tube assembly ids:
bill_of_materials; components for every tube assembly
specs; specifications for every tube assembly
tube; description for every tube, including spec id, size and bend information, end of tube info
sheets without tube assembly ids with component ids:
comp_adaptor; detailed component specs, adaptor type
comp_boss; detailed component specs, boss type
comp_elbow; detailed component specs, elbow type
comp_float; detailed component specs, float type
comp_hfl; detailed component specs, hfl type
comp_nut; detailed component specs, nut type
comp_other; detailed component specs, other type
comp_sleeve; detailed component specs, sleeve type
comp_straight; detailed component specs, straight type
comp_tee; detailed component specs, tee type
comp_threaded; detailed component specs, threaded type
sheets without tube assembly ids with other ids
tube_end_form; end of tube types, yes/no on forming
type_component; component type id with name
type_connection; connection type id (in some components) with name
type_end_form; end form id (in some components) with name
In [20]:
#try support vector machines on existing data to see what happens
from sklearn import svm
model2 = svm.SVR()
model2.fit(X_train,y_train)
Out[20]:
In [21]:
second_preds = np.expm1(model2.predict(X_test))
norm_labels = np.expm1(y_test)
score_val = rmsle(norm_labels,second_preds)
print score_val
Wow! Quite a bit worse, 0.57 vs 0.27
There are 2048 components, this will create an enormous number of features...but I think I'll try. Can't do a pivot in excel but am going to try in pandas
In [22]:
materials = pd.read_csv(os.path.join(directory,'bill_of_materials_categorized.csv'))
In [23]:
materials.head()
Out[23]:
In [65]:
materialsTable = pd.pivot_table(materials,values='quantity_1',index='tube_assembly_id',columns='component_id_1',aggfunc=np.sum, fill_value=0)
In [83]:
materialsTable.index
Out[83]:
Be smart about this need to build training data from scratch and then relink to this table above and see how to do it. "tube_assembly_id" had been dropped. So unclear what is the best way to look at this.