In [1]:
import ensembles as en
import pandas as pd
import numpy as np
import xgboost as xgb
import category_encoders as ce
from sklearn import datasets, linear_model, preprocessing, grid_search
from sklearn.preprocessing import Imputer, PolynomialFeatures, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.externals import joblib
from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.regularizers import l2, activity_l2
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score, log_loss, accuracy_score, \
mean_absolute_error, mean_squared_error, r2_score
from sklearn.cross_validation import train_test_split
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials 
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
from functools import partial


/home/prajwal/anaconda3/lib/python3.5/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
/home/prajwal/anaconda3/lib/python3.5/site-packages/sklearn/grid_search.py:43: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.
  DeprecationWarning)
Using Theano backend.

In [2]:
#Training the base models

Example 1


In [3]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

#Hyper Parameter Optimisation (max_depth and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                              max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])

#Setting max_depth, rest are default values
param_dt = en.parameter_set_decision_tree(max_depth = [6])

en.train_base_models(['gradient_boosting','decision_tree'],[param_gb, param_dt], save_models = True)

weights = en.assign_weights(weights = 'default', hyper_parameter_optimisation = True)

#Stacking
en.train_ensemble_models(stack_model_list = ['gradient_boosting'], stack_parameters_list = [param_gb], 
                      perform_weighted_average = True, weights_list = weights)

en.test_models(data_test)


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 21)

TRAINING BASE MODELS


TESTING/CROSS VALIDATION BASE MODELS

gradient_boosting 
 0.945293510106
decision_tree 
 0.906922348135

TRAINING ENSEMBLE MODELS

Weighted Average
Weight [5, 0]
Metric Score 0.945293510106

TESTING PHASE


TESTING/CROSS VALIDATION BASE MODELS

gradient_boosting 
 0.947718871697
decision_tree 
 0.910751522609

TESTING ENSEMBLE MODELS

Stacking gradient_boosting 
 0.946814184107
Weighted Average [5, 0] 
 0.947718871697
CPU times: user 2.49 s, sys: 220 ms, total: 2.71 s
Wall time: 1min

Example 2


In [4]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y',encode = 'binary')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

en.set_no_of_layers(4)

#Setting penalty, rest are default values
param_lor = en.parameter_set_logistic_regression(penalty = ['l1'])

#Setting fit_intercept, rest are default values
param_lr = en.parameter_set_linear_regression(fit_intercept = [False])

#Setting dim_layer, activation, rest are deafault values
param_mlp = en.parameter_set_multi_layer_perceptron(hyper_parameter_optimisation = False, \
                                                    dim_layer = [[32], [64], [32], [1]], \
                                                   activation = [['sigmoid'], ['relu'], ['sigmoid'], ['relu']])

#MLP does not work well with binary encode (changes to be made)
en.train_base_models(['linear_regression','logistic_regression', 'multi_layer_perceptron'], \
                     [param_lr, param_lor, param_mlp])

#Setting penalty, rest are default values
param_lor_ens = en.parameter_set_logistic_regression(penalty = ['l2'])

en.train_ensemble_models(blend_model_list = ['logistic_regression'], blend_parameters_list = [param_lor_ens], 
                      perform_weighted_average = False)

en.test_models(data_test)


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 36)

TRAINING BASE MODELS

Epoch 1/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 2/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 3/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 4/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 5/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 6/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 7/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 8/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 9/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     
Epoch 10/10
14415/14415 [==============================] - 0s - loss: 14.3022 - acc: 0.1127     

TESTING/CROSS VALIDATION BASE MODELS

10656/14416 [=====================>........] - ETA: 0slinear_regression 
 0.932549630773
logistic_regression 
 0.935544113162
multi_layer_perceptron 
 0.5

TRAINING ENSEMBLE MODELS


TESTING PHASE


TESTING/CROSS VALIDATION BASE MODELS

10720/12357 [=========================>....] - ETA: 0slinear_regression 
 0.934629254
logistic_regression 
 0.93571276947
multi_layer_perceptron 
 0.5

TESTING ENSEMBLE MODELS

Blending logistic_regression 
 0.935907223087
CPU times: user 8.67 s, sys: 644 ms, total: 9.32 s
Wall time: 17.9 s

Example 3


In [5]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y', encode ='binary', split = True, stratify = False, split_size = 0.1)
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

#Hyper Parameter Optimisation (gamma and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                                gamma = [0, 1, 3, 5, 7], eta = [0.1, 0.3], \
                                                max_depth = [5, 10, 15])

#Setting max_depth, splitter, presort rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - splitter
param_dt_1 = en.parameter_set_decision_tree(max_depth = [6, 10, 12, 15], splitter = ['best', 'random'], \
                                          presort = [True])
#Default Values
param_dt_2 = en.parameter_set_decision_tree()

en.train_base_models(['decision_tree','decision_tree', 'gradient_boosting'], \
                     [param_dt_1, param_dt_2, param_gb])

weights = en.assign_weights(weights = [2, 1, 3], hyper_parameter_optimisation = False)


en.train_ensemble_models(['gradient_boosting'], [param_gb],
                      ['gradient_boosting'],[param_gb], 
                      perform_weighted_average = True, weights_list = weights)

en.test_models(data_test)


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (4119, 36)

TRAINING BASE MODELS


TESTING/CROSS VALIDATION BASE MODELS

decision_tree 
 0.922828282341
decision_tree 
 0.716768227779
gradient_boosting 
 0.946306227052

TRAINING ENSEMBLE MODELS

Weighted Average
Weight [2, 1, 3]
Metric Score 0.942348069366

TESTING PHASE


TESTING/CROSS VALIDATION BASE MODELS

decision_tree 
 0.930264257081
decision_tree 
 0.727851866988
gradient_boosting 
 0.950314019388

TESTING ENSEMBLE MODELS

Stacking gradient_boosting 
 0.949605146913
Blending gradient_boosting 
 0.950797819594
Weighted Average [2, 1, 3] 
 0.948052779297
CPU times: user 7.36 s, sys: 344 ms, total: 7.7 s
Wall time: 10min 25s

Example 4


In [6]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

en.set_no_of_layers(3)

#Hyper Parameter Optimisation (max_depth and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                              max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])

#Setting n_estimators, criterion, rest are default values
#Hyper parameter optimisation - n_estimators
param_rf = en.parameter_set_random_forest(n_estimators = [6, 10, 12], criterion = ['entropy'])

#Setting dim_layer, activation, rest are default values
#Hyper parameter optimisation : dim_layer - Layer1 and Layer 2
#Hyper parameter optimisation : activation - Layer1 
param_mlp = en.parameter_set_multi_layer_perceptron(hyper_parameter_optimisation = True, \
                                                    dim_layer = [[32,64,128], [32,64], [1]], \
                                                   activation = [['sigmoid','relu'], \
                                                                 ['sigmoid'], ['sigmoid','relu']], \
                                                   optimizer = 'sgd')

en.train_base_models(['random_forest','multi_layer_perceptron', 'gradient_boosting'], \
                     [param_rf, param_mlp, param_gb])

#Setting max_depth, splitter, presort rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - splitter
param_dt = en.parameter_set_decision_tree(max_depth = [6, 10, 12, 15], splitter = ['best', 'random'], \
                                          presort = [True])

en.train_ensemble_models(['gradient_boosting','decision_tree'], [param_gb, param_dt],
                      ['decision_tree'],[param_dt], 
                      perform_weighted_average = False)

en.test_models(data_test)


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 21)

TRAINING BASE MODELS

Epoch 1/10
9609/9609 [==============================] - 0s - loss: 0.6683 - acc: 0.8476     
Epoch 2/10
9609/9609 [==============================] - 0s - loss: 0.6694 - acc: 0.8687     
Epoch 3/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 4/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 5/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 6/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 7/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 8/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 9/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 10/10
9609/9609 [==============================] - 0s - loss: 14.1472 - acc: 0.0000e+00     
Epoch 1/10
9610/9610 [==============================] - 1s - loss: 1.9163 - acc: 0.8759     
Epoch 2/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 3/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 4/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 5/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 6/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 7/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 8/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 9/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 10/10
9610/9610 [==============================] - 0s - loss: 1.8164 - acc: 0.8873     
Epoch 1/10
9611/9611 [==============================] - 1s - loss: 1.8162 - acc: 0.8873     
Epoch 2/10
9611/9611 [==============================] - 1s - loss: 1.8162 - acc: 0.8873     
Epoch 3/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 4/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 5/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 6/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 7/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 8/10
9611/9611 [==============================] - 1s - loss: 1.8162 - acc: 0.8873     
Epoch 9/10
9611/9611 [==============================] - 1s - loss: 1.8162 - acc: 0.8873     
Epoch 10/10
9611/9611 [==============================] - 0s - loss: 1.8162 - acc: 0.8873     
Epoch 1/10
14415/14415 [==============================] - 1s - loss: 1.8185 - acc: 0.8811     
Epoch 2/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 3/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 4/10
14415/14415 [==============================] - 0s - loss: 1.8159 - acc: 0.8873     
Epoch 5/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 6/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 7/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 8/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 9/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
Epoch 10/10
14415/14415 [==============================] - 1s - loss: 1.8159 - acc: 0.8873     
14048/14415 [============================>.] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
TESTING/CROSS VALIDATION BASE MODELS

12032/14416 [========================>.....] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
random_forest 
 0.915610236501
multi_layer_perceptron 
 0.523591368682
gradient_boosting 
 0.94703275812

TRAINING ENSEMBLE MODELS


TESTING PHASE


TESTING/CROSS VALIDATION BASE MODELS

10368/12357 [========================>.....] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
random_forest 
 0.920414026343
multi_layer_perceptron 
 0.527481118082
gradient_boosting 
 0.945912772353

TESTING ENSEMBLE MODELS

Stacking gradient_boosting 
 0.945852595248
Stacking decision_tree 
 0.940171804488
Blending decision_tree 
 0.939711877133
CPU times: user 3.39 s, sys: 420 ms, total: 3.81 s
Wall time: 1min 36s

Example 5


In [7]:
%%time
Data = pd.read_csv('/home/prajwal/Desktop/bank-additional/bank-additional-full.csv',delimiter=';',header=0)
data_test = en.data_import(Data, label_output='y')
print('Training Data',Data.shape)
print('Test Data',data_test.shape)

en.metric_set('roc_auc_score')

en.set_no_of_layers(3)

#Hyper Parameter Optimisation (max_depth and eta)
param_gb_1 = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                              max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])

#Hyper Parameter Optimisation (gamma and eta)
param_gb_2 = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, \
                                                eval_metric = ['auc'], objective = ['binary:logistic'], \
                                                gamma = [0, 1, 3, 5, 7], eta = [0.1, 0.3], \
                                                max_depth = [5, 10, 15], colsample_bylevel = [0.1])


#Setting max_depth, rest are default values
param_dt = en.parameter_set_decision_tree(max_depth = [6])

#Setting max_depth, n_estimators, max_features, rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - n_estimators
param_rf = en.parameter_set_random_forest(max_depth = [6, 10, 12, 15], n_estimators = [10, 20, 30], \
                                          max_features = ['log2'])

#Setting penalty, C, rest are default values
#Hyper parameter optimisation - penalty
#Hyper parameter optimisation - C
param_lor = en.parameter_set_logistic_regression(penalty = ['l1','l2'], C = [1.0, 2.0, 3.0, 5.0, 10.0])

#Setting fit_intercept, rest are default values
param_lr = en.parameter_set_linear_regression(fit_intercept = [False])

#Setting dim_layer, activation, rest are default values
#Hyper parameter optimisation : dim_layer - Layer1 and Layer 2
#Hyper parameter optimisation : activation - Layer1 and Layer 2
param_mlp = en.parameter_set_multi_layer_perceptron(hyper_parameter_optimisation = True, \
                                                    dim_layer = [[32,64,128], [32,64], [1]], \
                                                   activation = [['sigmoid','relu'], \
                                                                 ['sigmoid'], ['sigmoid','relu']], \
                                                   optimizer = 'rmsprop')



en.train_base_models(['random_forest','multi_layer_perceptron', 'gradient_boosting', \
                      'logistic_regression','linear_regression', 'decision_tree'], \
                     [param_rf, param_mlp, param_gb_1, param_lor, param_lr, param_dt])

weights = en.assign_weights(weights = [[1,2,3,4,5,6],[1,2,3,4,5,6],[1,2,3,4,5,6],[1,2,3,4,5,6],[1,2,3,4,5,6],\
                                    [1,2,3,4,5,6]], hyper_parameter_optimisation = True)

#Setting penalty, rest are default values
param_lor_ens = en.parameter_set_logistic_regression(penalty = ['l2'])

#Setting max_depth, splitter, presort rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - splitter
param_dt = en.parameter_set_decision_tree(max_depth = [6, 10, 12, 15], splitter = ['best', 'random'], \
                                          presort = [True])


en.train_ensemble_models(['gradient_boosting','logistic_regression'], [param_gb,param_lor_ens],
                      ['gradient_boosting','decision_tree','logistic_regression'],[param_gb,param_dt,\
                                                                                   param_lor_ens], 
                      perform_weighted_average = True, weights_list = weights)

en.test_models(data_test)


/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:178: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[col] = X[col].astype(int).reshape(-1, )
/home/prajwal/anaconda3/lib/python3.5/site-packages/category_encoders/ordinal.py:167: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
  X[switch.get('col')] = X[switch.get('col')].astype(int).reshape(-1, )
Training Data (41188, 21)
Test Data (12357, 21)

TRAINING BASE MODELS

Epoch 1/10
9609/9609 [==============================] - 0s - loss: 0.3407 - acc: 0.8952     
Epoch 2/10
9609/9609 [==============================] - 1s - loss: 0.2440 - acc: 0.9034     
Epoch 3/10
9609/9609 [==============================] - 1s - loss: 0.2497 - acc: 0.9082     
Epoch 4/10
9609/9609 [==============================] - 0s - loss: 0.2515 - acc: 0.9064     
Epoch 5/10
9609/9609 [==============================] - 1s - loss: 0.2440 - acc: 0.9084     
Epoch 6/10
9609/9609 [==============================] - 1s - loss: 0.2670 - acc: 0.9093     
Epoch 7/10
9609/9609 [==============================] - 0s - loss: 0.2622 - acc: 0.9086     
Epoch 8/10
9609/9609 [==============================] - 0s - loss: 0.2418 - acc: 0.9076     
Epoch 9/10
9609/9609 [==============================] - 0s - loss: 0.2472 - acc: 0.9108     
Epoch 10/10
9609/9609 [==============================] - 0s - loss: 0.2579 - acc: 0.9069     
Epoch 1/10
9610/9610 [==============================] - 0s - loss: 0.3361 - acc: 0.8874     
Epoch 2/10
9610/9610 [==============================] - 0s - loss: 0.2491 - acc: 0.8997     
Epoch 3/10
9610/9610 [==============================] - 0s - loss: 0.2414 - acc: 0.9026     
Epoch 4/10
9610/9610 [==============================] - 0s - loss: 0.2353 - acc: 0.9032     
Epoch 5/10
9610/9610 [==============================] - 0s - loss: 0.2474 - acc: 0.9053     
Epoch 6/10
9610/9610 [==============================] - 0s - loss: 0.2289 - acc: 0.9043     
Epoch 7/10
9610/9610 [==============================] - 0s - loss: 0.2271 - acc: 0.9056     
Epoch 8/10
9610/9610 [==============================] - 0s - loss: 0.2435 - acc: 0.9070     
Epoch 9/10
9610/9610 [==============================] - 0s - loss: 0.2419 - acc: 0.9062     
Epoch 10/10
9610/9610 [==============================] - 1s - loss: 0.2319 - acc: 0.9074     
Epoch 1/10
9611/9611 [==============================] - 1s - loss: 0.3014 - acc: 0.8901     
Epoch 2/10
9611/9611 [==============================] - 0s - loss: 0.2435 - acc: 0.9017     
Epoch 3/10
9611/9611 [==============================] - 1s - loss: 0.2571 - acc: 0.9027     
Epoch 4/10
9611/9611 [==============================] - 0s - loss: 0.2448 - acc: 0.9053     
Epoch 5/10
9611/9611 [==============================] - 0s - loss: 0.2539 - acc: 0.9076     
Epoch 6/10
9611/9611 [==============================] - 0s - loss: 0.2638 - acc: 0.9072     
Epoch 7/10
9611/9611 [==============================] - 0s - loss: 0.2491 - acc: 0.9075     
Epoch 8/10
9611/9611 [==============================] - 0s - loss: 0.2560 - acc: 0.9071     
Epoch 9/10
9611/9611 [==============================] - 0s - loss: 0.2490 - acc: 0.9058     
Epoch 10/10
9611/9611 [==============================] - 0s - loss: 0.2713 - acc: 0.9053     
Epoch 1/10
14415/14415 [==============================] - 1s - loss: 2.5823 - acc: 0.7328     
Epoch 2/10
14415/14415 [==============================] - 0s - loss: 0.2403 - acc: 0.9031     
Epoch 3/10
14415/14415 [==============================] - 1s - loss: 0.2601 - acc: 0.9049     
Epoch 4/10
14415/14415 [==============================] - 0s - loss: 0.2591 - acc: 0.9052     
Epoch 5/10
14415/14415 [==============================] - 0s - loss: 0.2534 - acc: 0.9068     
Epoch 6/10
14415/14415 [==============================] - 0s - loss: 0.2553 - acc: 0.9095     
Epoch 7/10
14415/14415 [==============================] - 0s - loss: 0.2601 - acc: 0.9047     
Epoch 8/10
14415/14415 [==============================] - 0s - loss: 0.2329 - acc: 0.9069     
Epoch 9/10
14415/14415 [==============================] - 0s - loss: 0.2594 - acc: 0.9041     
Epoch 10/10
14415/14415 [==============================] - 0s - loss: 0.2301 - acc: 0.9054     
14368/14415 [============================>.] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
TESTING/CROSS VALIDATION BASE MODELS

12640/14416 [=========================>....] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
random_forest 
 0.94123934833
multi_layer_perceptron 
 0.93083182762
gradient_boosting 
 0.947415299779
logistic_regression 
 0.926925830337
linear_regression 
 0.927305291253
decision_tree 
 0.918983144869

TRAINING ENSEMBLE MODELS

Weighted Average
Weight [5, 2, 5, 0, 0, 0]
Metric Score 0.946883847509

TESTING PHASE


TESTING/CROSS VALIDATION BASE MODELS

11392/12357 [==========================>...] - ETA: 0s
/home/prajwal/anaconda3/lib/python3.5/site-packages/keras/models.py:815: UserWarning: Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).
  warnings.warn('Network returning invalid probability values. '
random_forest 
 0.940707239859
multi_layer_perceptron 
 0.929103901652
gradient_boosting 
 0.946040497193
logistic_regression 
 0.925873796458
linear_regression 
 0.923094904896
decision_tree 
 0.922129483309

TESTING ENSEMBLE MODELS

Stacking gradient_boosting 
 0.945079203159
Stacking logistic_regression 
 0.944500592271
Blending gradient_boosting 
 0.946022152512
Blending decision_tree 
 0.93713955978
Blending logistic_regression 
 0.943660274856
Weighted Average [5, 2, 5, 0, 0, 0] 
 0.945867598576
CPU times: user 5.22 s, sys: 528 ms, total: 5.75 s
Wall time: 11min 39s

In [ ]: