Задача на kaggle: https://www.kaggle.com/c/bike-sharing-demand
По историческим данным о прокате велосипедов и погодных условиях необходимо спрогнозировтаь спрос на прокат велосипедов.
В исходной постановке задачи доступно 11 признаков: https://www.kaggle.com/c/prudential-life-insurance-assessment/data
В наборе признаков присутсвуют вещественные, категориальные, и бинарные данные.
Для демонстрации используется обучающая выборка из исходных данных train.csv, файлы для работы прилагаются.
In [1]:
from sklearn import cross_validation, grid_search, linear_model, metrics, pipeline, preprocessing
import numpy as np
import pandas as pd
In [2]:
%pylab inline
In [3]:
raw_data = pd.read_csv('bike_sharing_demand.csv', header = 0, sep = ',')
In [4]:
raw_data.head()
Out[4]:
In [5]:
raw_data.datetime = raw_data.datetime.apply(pd.to_datetime)
In [6]:
raw_data['month'] = raw_data.datetime.apply(lambda x : x.month)
raw_data['hour'] = raw_data.datetime.apply(lambda x : x.hour)
In [7]:
train_data = raw_data.iloc[:-1000, :]
hold_out_test_data = raw_data.iloc[-1000:, :]
In [8]:
print raw_data.shape, train_data.shape, hold_out_test_data.shape
In [9]:
#обучение
train_labels = train_data['count'].values
train_data = train_data.drop(['datetime', 'count', 'casual', 'registered'], axis = 1)
In [10]:
#тест
test_labels = hold_out_test_data['count'].values
test_data = hold_out_test_data.drop(['datetime', 'count', 'casual', 'registered'], axis = 1)
In [11]:
binary_data_columns = ['holiday', 'workingday']
binary_data_indices = np.array([(column in binary_data_columns) for column in train_data.columns], dtype = bool)
In [12]:
print binary_data_columns
print binary_data_indices
In [13]:
categorical_data_columns = ['season', 'weather', 'month']
categorical_data_indices = np.array([(column in categorical_data_columns) for column in train_data.columns], dtype = bool)
In [14]:
print categorical_data_columns
print categorical_data_indices
In [15]:
numeric_data_columns = ['temp', 'atemp', 'humidity', 'windspeed', 'hour']
numeric_data_indices = np.array([(column in numeric_data_columns) for column in train_data.columns], dtype = bool)
In [16]:
print numeric_data_columns
print numeric_data_indices
In [17]:
regressor = linear_model.SGDRegressor(random_state = 0, n_iter = 3, loss = 'squared_loss', penalty = 'l2')
In [18]:
estimator = pipeline.Pipeline(steps = [
('feature_processing', pipeline.FeatureUnion(transformer_list = [
#binary
('binary_variables_processing', preprocessing.FunctionTransformer(lambda data: data[:, binary_data_indices])),
#numeric
('numeric_variables_processing', pipeline.Pipeline(steps = [
('selecting', preprocessing.FunctionTransformer(lambda data: data[:, numeric_data_indices])),
('scaling', preprocessing.StandardScaler(with_mean = 0))
])),
#categorical
('categorical_variables_processing', pipeline.Pipeline(steps = [
('selecting', preprocessing.FunctionTransformer(lambda data: data[:, categorical_data_indices])),
('hot_encoding', preprocessing.OneHotEncoder(handle_unknown = 'ignore'))
])),
])),
('model_fitting', regressor)
]
)
In [19]:
estimator.fit(train_data, train_labels)
Out[19]:
In [20]:
metrics.mean_absolute_error(test_labels, estimator.predict(test_data))
Out[20]:
In [21]:
estimator.get_params().keys()
Out[21]:
In [22]:
parameters_grid = {
'model_fitting__alpha' : [0.0001, 0.001, 0,1],
'model_fitting__eta0' : [0.001, 0.05],
}
In [23]:
grid_cv = grid_search.GridSearchCV(estimator, parameters_grid, scoring = 'mean_absolute_error', cv = 4)
In [24]:
%%time
grid_cv.fit(train_data, train_labels)
Out[24]:
In [25]:
print grid_cv.best_score_
print grid_cv.best_params_
In [26]:
test_predictions = grid_cv.best_estimator_.predict(test_data)
In [27]:
metrics.mean_absolute_error(test_labels, test_predictions)
Out[27]:
In [28]:
print test_labels[:20]
In [29]:
print test_predictions[:20]
In [30]:
pylab.figure(figsize=(8, 6))
pylab.grid(True)
pylab.xlim(-100,1100)
pylab.ylim(-100,1100)
pylab.scatter(train_labels, grid_cv.best_estimator_.predict(train_data), alpha=0.5, color = 'red')
pylab.scatter(test_labels, grid_cv.best_estimator_.predict(test_data), alpha=0.5, color = 'blue')
Out[30]:
In [31]:
from sklearn.ensemble import RandomForestRegressor
In [32]:
regressor = RandomForestRegressor(random_state = 0, max_depth = 20, n_estimators = 50)
In [33]:
estimator = pipeline.Pipeline(steps = [
('feature_processing', pipeline.FeatureUnion(transformer_list = [
#binary
('binary_variables_processing', preprocessing.FunctionTransformer(lambda data: data[:, binary_data_indices])),
#numeric
('numeric_variables_processing', pipeline.Pipeline(steps = [
('selecting', preprocessing.FunctionTransformer(lambda data: data[:, numeric_data_indices])),
('scaling', preprocessing.StandardScaler(with_mean = 0, with_std = 1))
])),
#categorical
('categorical_variables_processing', pipeline.Pipeline(steps = [
('selecting', preprocessing.FunctionTransformer(lambda data: data[:, categorical_data_indices])),
('hot_encoding', preprocessing.OneHotEncoder(handle_unknown = 'ignore'))
])),
])),
('model_fitting', regressor)
]
)
In [34]:
estimator.fit(train_data, train_labels)
Out[34]:
In [35]:
metrics.mean_absolute_error(test_labels, estimator.predict(test_data))
Out[35]:
In [36]:
test_labels[:10]
Out[36]:
In [37]:
estimator.predict(test_data)[:10]
Out[37]:
In [38]:
pylab.figure(figsize=(16, 6))
pylab.subplot(1,2,1)
pylab.grid(True)
pylab.xlim(-100,1100)
pylab.ylim(-100,1100)
pylab.scatter(train_labels, grid_cv.best_estimator_.predict(train_data), alpha=0.5, color = 'red')
pylab.scatter(test_labels, grid_cv.best_estimator_.predict(test_data), alpha=0.5, color = 'blue')
pylab.title('linear model')
pylab.subplot(1,2,2)
pylab.grid(True)
pylab.xlim(-100,1100)
pylab.ylim(-100,1100)
pylab.scatter(train_labels, estimator.predict(train_data), alpha=0.5, color = 'red')
pylab.scatter(test_labels, estimator.predict(test_data), alpha=0.5, color = 'blue')
pylab.title('random forest model')
Out[38]: