Shelter Animal Outcomes 3

Random Forests


In [1]:
from time import time
from operator import itemgetter
from sklearn.ensemble import RandomForestClassifier
from sklearn import cross_validation
from sklearn.feature_selection import RFECV
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
import pandas as pd
import numpy as np

In [2]:
df_train = pd.read_csv('../Shelter_train.csv')
df_test = pd.read_csv('../Shelter_test.csv')

In [3]:
X = df_train.ix[:, :-1]
y = df_train.ix[:, -1]
df_test = df_test.drop('ID', 1)

In [4]:
forest = RandomForestClassifier(n_estimators = 400, max_features='auto')
cross_validation.cross_val_score(forest, X, y, scoring="log_loss")


Out[4]:
array([-1.46005766, -1.42965826, -1.38190881])

In [5]:
params = {"clf__max_depth": [5, 3, None],
        "clf__max_features": [0.1, 0.25, 0.5, 1.0],
        "clf__min_samples_split": [1, 3, 10],
        "clf__min_samples_leaf": [1, 3, 10],
        "clf__bootstrap": [True, False],
        "clf__criterion": ["gini", "entropy"]}

In [6]:
def report(grid_scores, n_top=3):
    top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
    for i, score in enumerate(top_scores):
        print("Model with rank: {0}".format(i + 1))
        print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
              score.mean_validation_score,
              np.std(score.cv_validation_scores)))
        print("Parameters: {0}".format(score.parameters))
        print("")

In [7]:
pipeline = Pipeline([
        ('featureSelection', RFECV(estimator=RandomForestClassifier(n_estimators=20), scoring='log_loss')),
        ('clf', RandomForestClassifier(n_estimators=1000))
        ])
grid_search = GridSearchCV(pipeline, params, n_jobs=-1, scoring='log_loss')
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
      % (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
predictions = grid_search.predict_proba(df_test)
output = pd.DataFrame(predictions, columns=['Adoption', 'Died', 'Euthanasia', 'Return_to_owner', 'Transfer'])
output.index.names = ['ID']
output.index += 1
output.head()


GridSearchCV took 4943.46 seconds for 432 candidate parameter settings.
Model with rank: 1
Mean validation score: -1.008 (std: 0.017)
Parameters: {'clf__bootstrap': True, 'clf__criterion': 'gini', 'clf__max_depth': 5, 'clf__min_samples_leaf': 1, 'clf__max_features': 0.25, 'clf__min_samples_split': 3}

Model with rank: 2
Mean validation score: -1.017 (std: 0.014)
Parameters: {'clf__bootstrap': False, 'clf__criterion': 'entropy', 'clf__max_depth': 5, 'clf__min_samples_leaf': 1, 'clf__max_features': 1.0, 'clf__min_samples_split': 3}

Model with rank: 3
Mean validation score: -1.021 (std: 0.018)
Parameters: {'clf__bootstrap': True, 'clf__criterion': 'gini', 'clf__max_depth': 3, 'clf__min_samples_leaf': 3, 'clf__max_features': 1.0, 'clf__min_samples_split': 10}

Out[7]:
Adoption Died Euthanasia Return_to_owner Transfer
1 0.096385 0.004360 0.098321 0.292357 0.508577
2 0.515902 0.001343 0.030227 0.284061 0.168467
3 0.652827 0.001950 0.023525 0.120233 0.201466
4 0.077946 0.005235 0.119987 0.384071 0.412761
5 0.472442 0.001421 0.034818 0.308180 0.183139

In [8]:
output.to_csv('../submissionRF.3.0.csv', index_label = 'ID')