In [1]:
import ensembles as en
import pandas as pd
import numpy as np
import xgboost as xgb
import category_encoders as ce
from sklearn import datasets, linear_model, preprocessing, grid_search
from sklearn.preprocessing import Imputer, PolynomialFeatures, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.externals import joblib
from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.regularizers import l2, activity_l2
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score, log_loss, accuracy_score, \
mean_absolute_error, mean_squared_error, r2_score
from sklearn.cross_validation import train_test_split
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials 
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
from functools import partial
np.random.seed(1338)


/home/prajwal/anaconda3/lib/python3.5/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
/home/prajwal/anaconda3/lib/python3.5/site-packages/sklearn/grid_search.py:43: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.
  DeprecationWarning)
Using Theano backend.

In [2]:
#Training the base models

Example 1


In [3]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

#Hyper Parameter Optimisation (max_depth and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                              max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])

#Setting max_depth, rest are default values
param_dt = en.parameter_set_decision_tree(max_depth = [6])

en.train_base_models(['gradient_boosting','decision_tree'],[param_gb, param_dt], save_models = True)

#Models saved as .pkl files
[gb, dt] = en.get_base_models()

print('Gradient Boosting Model\n', gb)
print('\nDecision Tree Model\n', dt)


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 21)

TRAINING BASE MODELS


TESTING/CROSS VALIDATION BASE MODELS

gradient_boosting 
 0.946058545288
decision_tree 
 0.909509450372
Gradient Boosting Model
 <xgboost.core.Booster object at 0x7f0264451780>

Decision Tree Model
 GridSearchCV(cv=None, error_score='raise',
       estimator=DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
            max_features=None, max_leaf_nodes=None,
            min_impurity_split=1e-07, min_samples_leaf=1,
            min_samples_split=2, min_weight_fraction_leaf=0.0,
            presort=False, random_state=None, splitter='best'),
       fit_params={}, iid=True, n_jobs=1,
       param_grid={'max_depth': [6], 'max_leaf_nodes': [None], 'max_features': [None], 'class_weight': [None], 'random_state': [None], 'min_samples_leaf': [1], 'min_samples_split': [2], 'criterion': ['gini'], 'presort': [False], 'min_weight_fraction_leaf': [0.0], 'splitter': ['best']},
       pre_dispatch='2*n_jobs', refit=True, scoring='roc_auc', verbose=0)
CPU times: user 1.4 s, sys: 88 ms, total: 1.49 s
Wall time: 29.2 s

Example 2


In [4]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y',encode = 'binary')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

en.set_no_of_layers(4)

#Setting penalty, rest are default values
param_lor = en.parameter_set_logistic_regression(penalty = ['l1'])

#Setting fit_intercept, rest are default values
param_lr = en.parameter_set_linear_regression(fit_intercept = [False])

#Setting dim_layer, activation, rest are deafault values
param_mlp = en.parameter_set_multi_layer_perceptron(hyper_parameter_optimisation = False, \
                                                    dim_layer = [[32], [64], [32], [1]], \
                                                   activation = [['sigmoid'], ['relu'], ['sigmoid'], ['relu']])

#MLP does not work well with binary encode (changes to be made)
en.train_base_models(['linear_regression','logistic_regression', 'multi_layer_perceptron'], \
                     [param_lr, param_lor, param_mlp])


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 36)

TRAINING BASE MODELS

Epoch 1/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 2/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 3/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 4/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 5/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 6/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 7/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 8/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 9/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 10/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     

TESTING/CROSS VALIDATION BASE MODELS

12672/14416 [=========================>....] - ETA: 0slinear_regression 
 0.929172991818
logistic_regression 
 0.929891719578
multi_layer_perceptron 
 0.5
CPU times: user 6.25 s, sys: 512 ms, total: 6.76 s
Wall time: 13.1 s

Example 3


In [5]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y', encode ='binary', split = True, stratify = False, split_size = 0.1)
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

#Hyper Parameter Optimisation (gamma and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                                gamma = [0, 1, 3, 5, 7], eta = [0.1, 0.3], \
                                                max_depth = [5, 10, 15], colsample_bylevel = [0.1])

#Setting max_depth, splitter, presort rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - splitter
param_dt_1 = en.parameter_set_decision_tree(max_depth = [6, 10, 12, 15], splitter = ['best', 'random'], \
                                          presort = [True])
#Default Values
param_dt_2 = en.parameter_set_decision_tree()

en.train_base_models(['decision_tree','decision_tree', 'gradient_boosting'], \
                     [param_dt_1, param_dt_2, param_gb])


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (4119, 36)

TRAINING BASE MODELS


TESTING/CROSS VALIDATION BASE MODELS

decision_tree 
 0.916712400212
decision_tree 
 0.736683372899
gradient_boosting 
 0.933923749864
CPU times: user 5.64 s, sys: 120 ms, total: 5.76 s
Wall time: 34.5 s

Example 4


In [6]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

en.set_no_of_layers(3)

#Hyper Parameter Optimisation (max_depth and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                              max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])

#Setting n_estimators, criterion, rest are default values
#Hyper parameter optimisation - n_estimators
param_rf = en.parameter_set_random_forest(n_estimators = [6, 10, 12], criterion = ['entropy'])

#Setting dim_layer, activation, rest are default values
#Hyper parameter optimisation : dim_layer - Layer1 and Layer 2
#Hyper parameter optimisation : activation - Layer1 
param_mlp = en.parameter_set_multi_layer_perceptron(hyper_parameter_optimisation = True, \
                                                    dim_layer = [[32,64,128], [32,64], [1]], \
                                                   activation = [['sigmoid','relu'], \
                                                                 ['sigmoid'], ['sigmoid','relu']], \
                                                   optimizer = 'sgd')

en.train_base_models(['random_forest','multi_layer_perceptron', 'gradient_boosting'], \
                     [param_rf, param_mlp, param_gb])


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 21)

TRAINING BASE MODELS

Epoch 1/10
9609/9609 [==============================] - 0s - loss: 0.5579 - acc: 0.7256     
Epoch 2/10
9609/9609 [==============================] - 0s - loss: 0.3437 - acc: 0.8874     
Epoch 3/10
9609/9609 [==============================] - 0s - loss: 0.3360 - acc: 0.8874     
Epoch 4/10
9609/9609 [==============================] - 0s - loss: 0.3310 - acc: 0.8874     
Epoch 5/10
9609/9609 [==============================] - 0s - loss: 0.3262 - acc: 0.8874     
Epoch 6/10
9609/9609 [==============================] - 0s - loss: 0.3214 - acc: 0.8874     
Epoch 7/10
9609/9609 [==============================] - 0s - loss: 0.3168 - acc: 0.8874     
Epoch 8/10
9609/9609 [==============================] - 0s - loss: 0.3123 - acc: 0.8874     
Epoch 9/10
9609/9609 [==============================] - 0s - loss: 0.3079 - acc: 0.8874     
Epoch 10/10
9609/9609 [==============================] - 0s - loss: 0.3035 - acc: 0.8874     
Epoch 1/10
9610/9610 [==============================] - 0s - loss: 0.4567 - acc: 0.8172     
Epoch 2/10
9610/9610 [==============================] - 0s - loss: 0.3525 - acc: 0.8873     
Epoch 3/10
9610/9610 [==============================] - 0s - loss: 0.3471 - acc: 0.8873     
Epoch 4/10
9610/9610 [==============================] - 0s - loss: 0.3431 - acc: 0.8873     
Epoch 5/10
9610/9610 [==============================] - 0s - loss: 0.3393 - acc: 0.8873     
Epoch 6/10
9610/9610 [==============================] - 0s - loss: 0.3354 - acc: 0.8873     
Epoch 7/10
9610/9610 [==============================] - 0s - loss: 0.3315 - acc: 0.8873     
Epoch 8/10
9610/9610 [==============================] - 0s - loss: 0.3276 - acc: 0.8873     
Epoch 9/10
9610/9610 [==============================] - 0s - loss: 0.3235 - acc: 0.8873     
Epoch 10/10
9610/9610 [==============================] - 0s - loss: 0.3194 - acc: 0.8873     
Epoch 1/10
9611/9611 [==============================] - 0s - loss: 0.4163 - acc: 0.8868     
Epoch 2/10
9611/9611 [==============================] - 0s - loss: 0.3587 - acc: 0.8873     
Epoch 3/10
9611/9611 [==============================] - 0s - loss: 0.3536 - acc: 0.8873     
Epoch 4/10
9611/9611 [==============================] - 0s - loss: 0.3500 - acc: 0.8873     
Epoch 5/10
9611/9611 [==============================] - 0s - loss: 0.3465 - acc: 0.8873     
Epoch 6/10
9611/9611 [==============================] - 0s - loss: 0.3431 - acc: 0.8873     
Epoch 7/10
9611/9611 [==============================] - 0s - loss: 0.3397 - acc: 0.8873     
Epoch 8/10
9611/9611 [==============================] - 0s - loss: 0.3364 - acc: 0.8873     
Epoch 9/10
9611/9611 [==============================] - 0s - loss: 0.3330 - acc: 0.8873     
Epoch 10/10
9611/9611 [==============================] - 0s - loss: 0.3295 - acc: 0.8873     
Epoch 1/10
14415/14415 [==============================] - 0s - loss: 0.4110 - acc: 0.8776     
Epoch 2/10
14415/14415 [==============================] - 0s - loss: 0.3578 - acc: 0.8873     
Epoch 3/10
14415/14415 [==============================] - 0s - loss: 0.3509 - acc: 0.8873     
Epoch 4/10
14415/14415 [==============================] - 0s - loss: 0.3446 - acc: 0.8873     
Epoch 5/10
14415/14415 [==============================] - 0s - loss: 0.3385 - acc: 0.8873     
Epoch 6/10
14415/14415 [==============================] - 0s - loss: 0.3324 - acc: 0.8873     
Epoch 7/10
14415/14415 [==============================] - 0s - loss: 0.3262 - acc: 0.8873     
Epoch 8/10
14415/14415 [==============================] - 0s - loss: 0.3200 - acc: 0.8873     
Epoch 9/10
14415/14415 [==============================] - 0s - loss: 0.3136 - acc: 0.8873     
Epoch 10/10
14415/14415 [==============================] - 0s - loss: 0.3071 - acc: 0.8873     
13856/14415 [===========================>..] - ETA: 0s
TESTING/CROSS VALIDATION BASE MODELS

13024/14416 [==========================>...] - ETA: 0srandom_forest 
 0.916069580126
multi_layer_perceptron 
 0.833374803025
gradient_boosting 
 0.946983249614
CPU times: user 2.07 s, sys: 156 ms, total: 2.22 s
Wall time: 49.7 s

Example 5


In [7]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

en.set_no_of_layers(3)

#Hyper Parameter Optimisation (max_depth and eta)
param_gb_1 = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                              max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])

#Hyper Parameter Optimisation (gamma and eta)
param_gb_2 = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                                gamma = [0, 1, 3, 5, 7], eta = [0.1, 0.3], \
                                                max_depth = [5, 10, 15], colsample_bylevel = [0.1])


#Setting max_depth, rest are default values
param_dt = en.parameter_set_decision_tree(max_depth = [6])

#Setting max_depth, n_estimators, max_features, rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - n_estimators
param_rf = en.parameter_set_random_forest(max_depth = [6, 10, 12, 15], n_estimators = [10, 20, 30], \
                                          max_features = ['log2'])

#Setting penalty, C, rest are default values
#Hyper parameter optimisation - penalty
#Hyper parameter optimisation - C
param_lor = en.parameter_set_logistic_regression(penalty = ['l1','l2'], C = [1.0, 2.0, 3.0, 5.0, 10.0])

#Setting fit_intercept, rest are default values
param_lr = en.parameter_set_linear_regression(fit_intercept = [False])

#Setting dim_layer, activation, rest are default values
#Hyper parameter optimisation : dim_layer - Layer1 and Layer 2
#Hyper parameter optimisation : activation - Layer1 and Layer 2
param_mlp = en.parameter_set_multi_layer_perceptron(hyper_parameter_optimisation = True, \
                                                    dim_layer = [[32,64,128], [32,64], [1]], \
                                                   activation = [['sigmoid','relu'], \
                                                                 ['sigmoid'], ['sigmoid','relu']], \
                                                   optimizer = 'rmsprop')



en.train_base_models(['random_forest','multi_layer_perceptron', 'gradient_boosting', \
                      'logistic_regression','linear_regression', 'decision_tree'], \
                     [param_rf, param_mlp, param_gb_1, param_lor, param_lr, param_dt])


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 21)

TRAINING BASE MODELS

Epoch 1/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 2/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 3/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 4/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 5/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 6/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 7/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 8/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 9/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 10/10
9609/9609 [==============================] - 0s - loss: 1.8149 - acc: 0.8874     
Epoch 1/10
9610/9610 [==============================] - 0s - loss: 0.4594 - acc: 0.8292     
Epoch 2/10
9610/9610 [==============================] - 0s - loss: 0.2607 - acc: 0.9048     
Epoch 3/10
9610/9610 [==============================] - 0s - loss: 0.2545 - acc: 0.9055     
Epoch 4/10
9610/9610 [==============================] - 1s - loss: 0.2627 - acc: 0.9062     
Epoch 5/10
9610/9610 [==============================] - 0s - loss: 0.2462 - acc: 0.9046     
Epoch 6/10
9610/9610 [==============================] - 1s - loss: 0.2495 - acc: 0.9067     
Epoch 7/10
9610/9610 [==============================] - 0s - loss: 0.2429 - acc: 0.9088     
Epoch 8/10
9610/9610 [==============================] - 0s - loss: 0.2408 - acc: 0.9086     
Epoch 9/10
9610/9610 [==============================] - 0s - loss: 0.2443 - acc: 0.9084     
Epoch 10/10
9610/9610 [==============================] - 0s - loss: 0.2404 - acc: 0.9101     
Epoch 1/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 2/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 3/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 4/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 5/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 6/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 7/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 8/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 9/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 10/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 1/10
14415/14415 [==============================] - 0s - loss: 0.2591 - acc: 0.9014     
Epoch 2/10
14415/14415 [==============================] - 0s - loss: 0.2504 - acc: 0.9075     
Epoch 3/10
14415/14415 [==============================] - 0s - loss: 0.2448 - acc: 0.9095     
Epoch 4/10
14415/14415 [==============================] - 0s - loss: 0.2367 - acc: 0.9118     
Epoch 5/10
14415/14415 [==============================] - 0s - loss: 0.2347 - acc: 0.9073     
Epoch 6/10
14415/14415 [==============================] - 0s - loss: 0.2398 - acc: 0.9109     
Epoch 7/10
14415/14415 [==============================] - 0s - loss: 0.2352 - acc: 0.9083     
Epoch 8/10
14415/14415 [==============================] - 1s - loss: 0.2438 - acc: 0.9100     
Epoch 9/10
14415/14415 [==============================] - 1s - loss: 0.2311 - acc: 0.9072     
Epoch 10/10
14415/14415 [==============================] - 0s - loss: 0.2394 - acc: 0.9077     
13344/14415 [==========================>...] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
TESTING/CROSS VALIDATION BASE MODELS

12512/14416 [=========================>....] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
random_forest 
 0.940931586899
multi_layer_perceptron 
 0.920753152178
gradient_boosting 
 0.946383804379
logistic_regression 
 0.924926235455
linear_regression 
 0.923427646435
decision_tree 
 0.921747654592
CPU times: user 2.56 s, sys: 196 ms, total: 2.76 s
Wall time: 1min 13s

In [ ]: