In [1]:
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.grid_search import GridSearchCV
from datetime import datetime

In [2]:
startTime = datetime.now()

In [3]:
print "Reading csv ..."
#donations = pd.read_csv('../data/donations.csv').sort('projectid')
projects = pd.read_csv('../data/projects.csv').sort('projectid')
outcomes = pd.read_csv('../data/outcomes.csv').sort('projectid')
#resources = pd.read_csv('../data/resources.csv').sort('projectid')
sample = pd.read_csv('../data/sampleSubmission.csv').sort('projectid')
#essays = pd.read_csv('../data/essays.csv').sort('projectid')


Reading csv ...

In [4]:
print "dividing samples ..."
dates = np.array(projects.date_posted)
train_idx = np.where(dates < '2014-01-01')[0]
test_idx = np.where(dates >= '2014-01-01')[0]


dividing samples ...

In [5]:
print "fill null vals ..."
projects = projects.fillna(method='pad')


fill null vals ...

In [6]:
outcomes = np.array(outcomes.is_exciting)

In [7]:
projectCatogorialColumns = ['school_city', 'school_state', 'school_zip', 'school_metro', 'school_district', 'school_county', 'school_charter', 'school_magnet',
 'school_year_round', 'school_nlns', 'school_kipp', 'school_charter_ready_promise', 'teacher_prefix', 'teacher_teach_for_america', 'teacher_ny_teaching_fellow', 'primary_focus_subject','primary_focus_area', 
'secondary_focus_subject', 'secondary_focus_area', 'resource_type', 'poverty_level', 'grade_level',
'students_reached', 'eligible_double_your_impact_match', 'eligible_almost_home_match' ]

In [8]:
projects = np.array(projects[projectCatogorialColumns])

In [9]:
print "encoding ..."
for i in range(0, projects.shape[1]):
    le = LabelEncoder()
    projects[:,i] = le.fit_transform(projects[:,i])
projects = projects.astype(float)


encoding ...

In [10]:
train = projects[train_idx]
test = projects[test_idx]

In [11]:
print "grid search started ..."
lr = RandomForestClassifier()
parameters = {'n_estimators':[100,10],'criterion':['entropy']}
clf = GridSearchCV(lr, parameters, scoring = 'roc_auc', n_jobs = 6, verbose = 3, refit = False)


grid search started ...

In [12]:
print "fitting ..."
clf.fit(train, outcomes=='t')


fitting ...
Fitting 3 folds for each of 2 candidates, totalling 6 fits
[Parallel(n_jobs=6)]: Done   5 out of   6 | elapsed:  1.4min remaining:   17.2s
[Parallel(n_jobs=6)]: Done   1 out of   6 | elapsed: 10.0min remaining: 50.2min
[Parallel(n_jobs=6)]: Done   2 out of   6 | elapsed: 10.9min remaining: 21.9min
[Parallel(n_jobs=6)]: Done   6 out of   6 | elapsed: 10.9min finished
Out[12]:
GridSearchCV(cv=None,
       estimator=RandomForestClassifier(bootstrap=True, compute_importances=None,
            criterion='gini', max_depth=None, max_features='auto',
            min_density=None, min_samples_leaf=1, min_samples_split=2,
            n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
            verbose=0),
       fit_params={}, iid=True, loss_func=None, n_jobs=6,
       param_grid={'n_estimators': [100, 10], 'criterion': ['entropy']},
       pre_dispatch='2*n_jobs', refit=False, score_func=None,
       scoring='roc_auc', verbose=3)

In [13]:
endTime = datetime.now()

In [14]:
clf.grid_scores_


Out[14]:
[mean: 0.64671, std: 0.00275, params: {'n_estimators': 100, 'criterion': 'entropy'},
 mean: 0.59273, std: 0.00126, params: {'n_estimators': 10, 'criterion': 'entropy'}]

In [15]:
clf.best_score_


Out[15]:
0.64670680175477802

In [16]:
clf.best_params_


Out[16]:
{'criterion': 'entropy', 'n_estimators': 100}

In [17]:
print endTime - startTime


0:11:23.690624

In [18]:
clf.best_estimator_


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-18-2d39bdb4c6dd> in <module>()
----> 1 clf.best_estimator_

AttributeError: 'GridSearchCV' object has no attribute 'best_estimator_'