In [1]:
#Loading useful packages
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import os.path
import sys
import argparse
import warnings
warnings.filterwarnings('ignore')


#General purpose AI packages
from sklearn.cross_validation import train_test_split,KFold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.model_selection import ParameterGrid
from sklearn.gaussian_process import GaussianProcess

#Keras packages
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, ActivityRegularization
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras.optimizers import RMSprop
from keras import regularizers 

#Hyperparameter optimization
import hyperopt
from hyperopt import hp, STATUS_OK


C:\ProgramData\Anaconda3\envs\PY36\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
Using TensorFlow backend.

In [2]:
############## LOSSHISTORY CALLBACK CLASS ######################################
class LossHistory(Callback):
    def on_train_begin(self, logs={}):
        self.losses = []
        self.val_losses = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))

In [3]:
DATAFILE = os.path.join('data','data.csv')
TARGETFILE = os.path.join('data','target.csv')
OUTDIR = os.path.join('results')

In [4]:
############## PREPARING DATA ##################################################
dataset_trans = pd.read_table(os.path.join('data','dataset_trans.csv'),sep=',')
target = np.asarray(dataset_trans['Y'])
del dataset_trans['Y']
del dataset_trans['min_risk']
train = np.asarray(dataset_trans)
train_val_size = 0.8 #80% training+validation set and 20% test set
train_size = 0.7 #70% training set and 30% validation set
X_tr_val, X_te, Y_tr_val, Y_te = train_test_split(train, target, train_size=train_val_size, random_state=1)
X_tr, X_val, Y_tr, Y_val = train_test_split(X_tr_val, Y_tr_val, train_size=train_size, random_state=1)

In [5]:
def uniform_int(name, lower, upper):
    # `quniform` returns:
    # round(uniform(low, high) / q) * q
    return hp.quniform(name, lower, upper, q=1)

def loguniform_int(name, lower, upper):
    # Do not forget to make a logarithm for the
    # lower and upper bounds.
    return hp.qloguniform(name, np.log(lower), np.log(upper), q=1)

In [6]:
#def train_nn(X_tr,Y_tr,X_val,Y_val,params,verbose,save):
def train_nn(params):
    
    print('Testing: ', params)
    verbose = 0
    
    #Model callbacks
    filepath = os.path.join('results','weights.best.hdf5')
    mdlcheck = ModelCheckpoint(filepath, verbose=0, save_best_only=True)
    mdllosses = LossHistory()
    mdlstop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto')

    #Model fit
    n_epochs = 5000
    n_batch = int(params['fit_n_batch'])
    kf = KFold(n = np.shape(X_tr_val)[0], n_folds = 5)
    performance_cv = []
    #mdllosses_cv = []
    models = []
    
    i = 1
    for tr_idx, val_idx in kf:
        
        #Build NN
        model = Sequential()
        model.add(Dense(units=int(params['n_nodes_1']), input_dim=np.shape(X_tr)[1], activity_regularizer=regularizers.l2(params['regularization_1'])))
        model.add(Activation(params['activation_1']))
        model.add(Dropout(params['dropout_1']))
        model.add(Dense(units=int(params['n_nodes_2']),activity_regularizer=regularizers.l2(params['regularization_2'])))
        model.add(Activation(params['activation_2']))
        model.add(Dropout(params['dropout_2']))
        model.add(Dense(units=int(params['n_nodes_3']),activity_regularizer=regularizers.l2(params['regularization_3'])))
        model.add(Activation(params['activation_3']))
        model.add(Dense(units=1))
        opt = RMSprop(lr=params['opt_lr'], rho=params['opt_rho'], epsilon=params['opt_epsilon'], decay=params['opt_decay'])
        model.compile(loss=params['comp_loss'],optimizer=opt)
    
        print("Fold: ",i," of 5")
        i = i+1
        X_train, X_valid = X_tr_val[tr_idx], X_tr_val[val_idx]
        Y_train, Y_valid = Y_tr_val[tr_idx], Y_tr_val[val_idx]
        scaler = StandardScaler().fit(X_train)
        X_train = scaler.transform(X_train)
        X_valid = scaler.transform(X_valid)

        history = model.fit(X_train, Y_train, validation_data = (X_valid, Y_valid),  epochs = n_epochs, batch_size = n_batch, callbacks = [mdlstop,mdlcheck,mdllosses],verbose = verbose)
        
        #Recalling best weights and appending loss value and loss history
        model.load_weights(filepath)
        models.append(model)
        performance_cv.append(min(mdllosses.val_losses))
        #mdllosses_cv.append(mdllosses)
        
    #Calculating in-cv std 
    loss_std = np.std(performance_cv)
    
    print('Obtained loss: ', np.mean(performance_cv), ' (', loss_std, ')')
    #Return model and best performances
    return {'loss' : np.mean(performance_cv), 'status': STATUS_OK, 'model': models[np.argmin(performance_cv)], 'loss_std': loss_std}

In [7]:
#Defining the trial memory space
trials = hyperopt.Trials()

#Defining the hyperparameter space
parameter_space = {
    'n_nodes_1': uniform_int('n_nodes_1', 1, 1000),
    'regularization_1': 0,
    'dropout_1': hp.uniform('dropout_1', 0, 0.5),
    'activation_1': hp.choice('activation_1', ['relu','sigmoid','tanh']),
    'n_nodes_2': uniform_int('n_nodes_2', 1, 1000),
    'regularization_2': 0,
    'dropout_2': hp.uniform('dropout_2', 0, 0.5),
    'activation_2': hp.choice('activation_2', ['relu','sigmoid','tanh']),
    'n_nodes_3': uniform_int('n_nodes_3', 1, 1000),
    'regularization_3': 0,
    'activation_3': hp.choice('activation_3', ['relu','sigmoid','tanh']),
    'fit_n_batch' : uniform_int('fit_n_batch', 16, 128),
    'comp_loss' : 'mean_squared_error',
    'opt_lr' : 0.002,
    'opt_rho' : 0.9,
    'opt_epsilon' : 1e-08,
    'opt_decay' : 0.0}

#Defining the  tree
tpe = hyperopt.partial(
    hyperopt.tpe.suggest,

    # Sample 1000 candidate and select candidate that
    # has highest Expected Improvement (EI)
    n_EI_candidates=1000,
    
    # Use 20% of best observations to estimate next
    # set of parameters
    gamma=0.2,
    
    # First 20 trials are going to be random
    n_startup_jobs=20,
)

print('===== Training the NN... =====')
best = hyperopt.fmin(
    train_nn,
    trials=trials,
    space=parameter_space,

    # Set up TPE for hyperparameter optimization
    algo=tpe,

    # Maximum number of iterations. Basically it trains at
    # most 200 networks before choose the best one.
    max_evals=100,
)
print('==============================\n')


#And the winner is...
#trials.results <--- mi da la storia
print('======== Best NN... ========')
print('Validation loss: ', trials.best_trial['result']['loss'])
print('Best model hyperparameters: ', best)
model = trials.best_trial['result']['model']
#loss_history = trials.best_trial['result']['loss_history']
print('==============================\n')


===== Training the NN... =====
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.1453706586924513, 'dropout_2': 0.23631224965565745, 'fit_n_batch': 84.0, 'n_nodes_1': 402.0, 'n_nodes_2': 399.0, 'n_nodes_3': 166.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  639.715305909  ( 177.682297298 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.29566059696561064, 'dropout_2': 0.2118677985956796, 'fit_n_batch': 50.0, 'n_nodes_1': 270.0, 'n_nodes_2': 976.0, 'n_nodes_3': 492.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  753.1744332  ( 132.568038098 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0642881253328434, 'dropout_2': 0.26998373985371227, 'fit_n_batch': 113.0, 'n_nodes_1': 719.0, 'n_nodes_2': 328.0, 'n_nodes_3': 388.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  496.483915132  ( 153.941197083 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.19201769433529975, 'dropout_2': 0.38384789707678874, 'fit_n_batch': 92.0, 'n_nodes_1': 110.0, 'n_nodes_2': 43.0, 'n_nodes_3': 61.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1029.33230853  ( 113.968286431 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.20628780509075428, 'dropout_2': 0.15250162505413084, 'fit_n_batch': 73.0, 'n_nodes_1': 288.0, 'n_nodes_2': 648.0, 'n_nodes_3': 903.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  463.490887105  ( 111.555615394 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0980872012204746, 'dropout_2': 0.28594702409425155, 'fit_n_batch': 112.0, 'n_nodes_1': 604.0, 'n_nodes_2': 75.0, 'n_nodes_3': 503.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  621.965558941  ( 125.584652913 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.4878578527471442, 'dropout_2': 0.4063697561346136, 'fit_n_batch': 58.0, 'n_nodes_1': 469.0, 'n_nodes_2': 323.0, 'n_nodes_3': 44.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  907.240237262  ( 162.693876736 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3670766289946713, 'dropout_2': 0.4806834911501146, 'fit_n_batch': 39.0, 'n_nodes_1': 873.0, 'n_nodes_2': 783.0, 'n_nodes_3': 333.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  624.169078326  ( 137.12321511 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.4822938154289541, 'dropout_2': 0.4329371254789181, 'fit_n_batch': 92.0, 'n_nodes_1': 275.0, 'n_nodes_2': 322.0, 'n_nodes_3': 802.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  753.590732244  ( 108.110332476 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'tanh', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.38735837348232416, 'dropout_2': 0.16211428503032393, 'fit_n_batch': 27.0, 'n_nodes_1': 25.0, 'n_nodes_2': 501.0, 'n_nodes_3': 73.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1736.11966425  ( 220.765960477 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'tanh', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.10624537142093371, 'dropout_2': 0.08045252346316578, 'fit_n_batch': 53.0, 'n_nodes_1': 402.0, 'n_nodes_2': 737.0, 'n_nodes_3': 391.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  517.255933574  ( 114.169417905 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'tanh', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.17945186656828982, 'dropout_2': 0.19941103913008662, 'fit_n_batch': 90.0, 'n_nodes_1': 278.0, 'n_nodes_2': 428.0, 'n_nodes_3': 96.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  525.588388318  ( 92.3546665647 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3756447374259719, 'dropout_2': 0.016207765373092653, 'fit_n_batch': 119.0, 'n_nodes_1': 866.0, 'n_nodes_2': 604.0, 'n_nodes_3': 905.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  2945.46092319  ( 416.707412954 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.08130416472133656, 'dropout_2': 0.0843654408189285, 'fit_n_batch': 58.0, 'n_nodes_1': 786.0, 'n_nodes_2': 14.0, 'n_nodes_3': 412.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  839.198840968  ( 163.937586379 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.44392631576726005, 'dropout_2': 0.16199561739670176, 'fit_n_batch': 77.0, 'n_nodes_1': 271.0, 'n_nodes_2': 402.0, 'n_nodes_3': 239.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  923.893222816  ( 136.420949574 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.33602451379115533, 'dropout_2': 0.22340553230172566, 'fit_n_batch': 65.0, 'n_nodes_1': 661.0, 'n_nodes_2': 180.0, 'n_nodes_3': 542.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  2326.64854585  ( 686.007073013 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.34986657790124565, 'dropout_2': 0.1428125630398706, 'fit_n_batch': 119.0, 'n_nodes_1': 335.0, 'n_nodes_2': 357.0, 'n_nodes_3': 156.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  564.784821267  ( 133.146588964 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'tanh', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.44886673203909194, 'dropout_2': 0.4192447698111931, 'fit_n_batch': 70.0, 'n_nodes_1': 681.0, 'n_nodes_2': 216.0, 'n_nodes_3': 841.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  5418.05806056  ( 299.730885127 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.13056275588099614, 'dropout_2': 0.21160159915011795, 'fit_n_batch': 95.0, 'n_nodes_1': 295.0, 'n_nodes_2': 125.0, 'n_nodes_3': 457.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  750.601753848  ( 173.547456467 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.03872891172805071, 'dropout_2': 0.4578479813797694, 'fit_n_batch': 120.0, 'n_nodes_1': 978.0, 'n_nodes_2': 994.0, 'n_nodes_3': 34.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1031.39784207  ( 584.362417015 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0003732208268227011, 'dropout_2': 0.33557834470574466, 'fit_n_batch': 16.0, 'n_nodes_1': 135.0, 'n_nodes_2': 869.0, 'n_nodes_3': 661.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  456.408208266  ( 121.636985283 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0003282619207469195, 'dropout_2': 0.33755361772927045, 'fit_n_batch': 16.0, 'n_nodes_1': 136.0, 'n_nodes_2': 885.0, 'n_nodes_3': 653.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  497.689365024  ( 129.842450057 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0006062138780398292, 'dropout_2': 0.33049417791029995, 'fit_n_batch': 16.0, 'n_nodes_1': 3.0, 'n_nodes_2': 882.0, 'n_nodes_3': 999.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  3747.18699989  ( 239.977804371 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2545994394949498, 'dropout_2': 0.4998907168086429, 'fit_n_batch': 31.0, 'n_nodes_1': 555.0, 'n_nodes_2': 554.0, 'n_nodes_3': 667.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  701.234096034  ( 179.203989901 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2473556710978781, 'dropout_2': 0.00019042676509303802, 'fit_n_batch': 41.0, 'n_nodes_1': 157.0, 'n_nodes_2': 714.0, 'n_nodes_3': 632.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  663.486081241  ( 112.26695006 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.03405436004728575, 'dropout_2': 0.3536290256487696, 'fit_n_batch': 24.0, 'n_nodes_1': 1000.0, 'n_nodes_2': 869.0, 'n_nodes_3': 742.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  468.029797645  ( 134.923455411 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.21829643382490568, 'dropout_2': 0.2987503374867352, 'fit_n_batch': 103.0, 'n_nodes_1': 188.0, 'n_nodes_2': 643.0, 'n_nodes_3': 998.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  495.758379283  ( 135.711861916 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2984882771571544, 'dropout_2': 0.09779742050946125, 'fit_n_batch': 78.0, 'n_nodes_1': 64.0, 'n_nodes_2': 807.0, 'n_nodes_3': 919.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  657.489262867  ( 85.2978937725 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.15403632771119025, 'dropout_2': 0.040781322613054505, 'fit_n_batch': 82.0, 'n_nodes_1': 416.0, 'n_nodes_2': 666.0, 'n_nodes_3': 739.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  451.429654945  ( 111.357210217 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.15298863575070737, 'dropout_2': 0.03933170161043467, 'fit_n_batch': 83.0, 'n_nodes_1': 456.0, 'n_nodes_2': 944.0, 'n_nodes_3': 581.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  492.255703826  ( 135.813041398 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.14576504685701555, 'dropout_2': 0.25149439442041294, 'fit_n_batch': 128.0, 'n_nodes_1': 381.0, 'n_nodes_2': 704.0, 'n_nodes_3': 733.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  470.604081667  ( 145.517655947 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.04264997927991909, 'dropout_2': 0.3723992431250842, 'fit_n_batch': 102.0, 'n_nodes_1': 206.0, 'n_nodes_2': 812.0, 'n_nodes_3': 733.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  459.459073175  ( 139.03938643 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.00032796983405405933, 'dropout_2': 0.3098913262710165, 'fit_n_batch': 44.0, 'n_nodes_1': 533.0, 'n_nodes_2': 942.0, 'n_nodes_3': 813.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  639.459865464  ( 150.756957734 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2830301659956269, 'dropout_2': 0.25669824429032195, 'fit_n_batch': 65.0, 'n_nodes_1': 80.0, 'n_nodes_2': 488.0, 'n_nodes_3': 301.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  745.768174945  ( 121.216759461 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.17233631517906114, 'dropout_2': 0.12062026332734013, 'fit_n_batch': 84.0, 'n_nodes_1': 213.0, 'n_nodes_2': 579.0, 'n_nodes_3': 589.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  535.93971965  ( 152.429200209 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.11699176243209936, 'dropout_2': 0.046833012736665236, 'fit_n_batch': 102.0, 'n_nodes_1': 353.0, 'n_nodes_2': 663.0, 'n_nodes_3': 694.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  526.022926532  ( 106.664693125 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.06884139764864253, 'dropout_2': 0.1881984222582551, 'fit_n_batch': 48.0, 'n_nodes_1': 450.0, 'n_nodes_2': 762.0, 'n_nodes_3': 489.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  578.796552011  ( 166.64421277 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'tanh', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.22175323161876864, 'dropout_2': 0.38972181239264564, 'fit_n_batch': 35.0, 'n_nodes_1': 599.0, 'n_nodes_2': 843.0, 'n_nodes_3': 952.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  505.689689953  ( 128.352815697 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.023051360016890177, 'dropout_2': 0.2774442953671984, 'fit_n_batch': 109.0, 'n_nodes_1': 762.0, 'n_nodes_2': 517.0, 'n_nodes_3': 853.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  449.963940817  ( 115.332372235 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.08862848127362505, 'dropout_2': 0.2773727881940423, 'fit_n_batch': 110.0, 'n_nodes_1': 771.0, 'n_nodes_2': 263.0, 'n_nodes_3': 854.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  469.437603913  ( 130.195381034 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.054431474942697795, 'dropout_2': 0.23629774173038764, 'fit_n_batch': 128.0, 'n_nodes_1': 928.0, 'n_nodes_2': 474.0, 'n_nodes_3': 778.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  478.738986554  ( 159.927988701 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.022834225446598477, 'dropout_2': 0.18293607640217197, 'fit_n_batch': 108.0, 'n_nodes_1': 763.0, 'n_nodes_2': 536.0, 'n_nodes_3': 866.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  517.139740879  ( 127.631507017 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.19526314207814863, 'dropout_2': 0.1258342509410969, 'fit_n_batch': 96.0, 'n_nodes_1': 839.0, 'n_nodes_2': 444.0, 'n_nodes_3': 957.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  454.851240721  ( 125.636468159 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.16506021926760056, 'dropout_2': 0.05554523274192445, 'fit_n_batch': 88.0, 'n_nodes_1': 626.0, 'n_nodes_2': 619.0, 'n_nodes_3': 778.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  458.927531682  ( 153.844127199 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.12349045395440018, 'dropout_2': 0.4485965161295732, 'fit_n_batch': 124.0, 'n_nodes_1': 717.0, 'n_nodes_2': 364.0, 'n_nodes_3': 600.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  578.285299173  ( 166.286047526 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.09851051699581398, 'dropout_2': 0.30763177107485756, 'fit_n_batch': 115.0, 'n_nodes_1': 504.0, 'n_nodes_2': 281.0, 'n_nodes_3': 536.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  524.644047154  ( 124.006865827 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.31276155666122185, 'dropout_2': 0.0002281652072172735, 'fit_n_batch': 58.0, 'n_nodes_1': 929.0, 'n_nodes_2': 680.0, 'n_nodes_3': 885.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  430.716272275  ( 113.802888945 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.4072062312172584, 'dropout_2': 0.2690793281516164, 'fit_n_batch': 56.0, 'n_nodes_1': 934.0, 'n_nodes_2': 526.0, 'n_nodes_3': 955.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  507.461898555  ( 103.945834979 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3192211448353836, 'dropout_2': 0.229328675795878, 'fit_n_batch': 64.0, 'n_nodes_1': 823.0, 'n_nodes_2': 585.0, 'n_nodes_3': 886.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  464.931956637  ( 136.583988863 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.40798598392580754, 'dropout_2': 0.1746834755089621, 'fit_n_batch': 71.0, 'n_nodes_1': 911.0, 'n_nodes_2': 438.0, 'n_nodes_3': 336.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  524.07515543  ( 113.390876823 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.4996708233709892, 'dropout_2': 0.20690475460486019, 'fit_n_batch': 50.0, 'n_nodes_1': 969.0, 'n_nodes_2': 88.0, 'n_nodes_3': 821.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  715.150151638  ( 88.5191539125 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.46043299464856746, 'dropout_2': 0.40284215141394986, 'fit_n_batch': 61.0, 'n_nodes_1': 723.0, 'n_nodes_2': 386.0, 'n_nodes_3': 435.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  643.750083565  ( 191.082575761 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'tanh', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.266439934592288, 'dropout_2': 0.36429836634694074, 'fit_n_batch': 76.0, 'n_nodes_1': 884.0, 'n_nodes_2': 697.0, 'n_nodes_3': 1000.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  6218.63456492  ( 143.580577872 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.35271111052181414, 'dropout_2': 2.5895699613378442e-05, 'fit_n_batch': 22.0, 'n_nodes_1': 812.0, 'n_nodes_2': 753.0, 'n_nodes_3': 2.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  2676.74062653  ( 270.148346338 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.31704491451989675, 'dropout_2': 0.14245902796673635, 'fit_n_batch': 68.0, 'n_nodes_1': 669.0, 'n_nodes_2': 176.0, 'n_nodes_3': 204.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  544.42020503  ( 124.933355578 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3946316753727347, 'dropout_2': 0.48046873230842135, 'fit_n_batch': 97.0, 'n_nodes_1': 731.0, 'n_nodes_2': 301.0, 'n_nodes_3': 921.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  5085.84459791  ( 198.106640642 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.4282140486129483, 'dropout_2': 0.06992569618356875, 'fit_n_batch': 54.0, 'n_nodes_1': 1000.0, 'n_nodes_2': 236.0, 'n_nodes_3': 697.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  515.653012893  ( 140.970921576 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.23520740507104126, 'dropout_2': 0.01985719429753996, 'fit_n_batch': 36.0, 'n_nodes_1': 581.0, 'n_nodes_2': 924.0, 'n_nodes_3': 121.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  465.86962053  ( 111.345744956 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.36840728519459315, 'dropout_2': 0.10325497890698232, 'fit_n_batch': 46.0, 'n_nodes_1': 636.0, 'n_nodes_2': 338.0, 'n_nodes_3': 780.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1479.80743319  ( 154.66480791 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2802638700158154, 'dropout_2': 0.29068646398128783, 'fit_n_batch': 88.0, 'n_nodes_1': 860.0, 'n_nodes_2': 624.0, 'n_nodes_3': 542.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  852.685414972  ( 145.81730595 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.47258886657914734, 'dropout_2': 0.3220563629967971, 'fit_n_batch': 74.0, 'n_nodes_1': 960.0, 'n_nodes_2': 4.0, 'n_nodes_3': 362.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1840.48871525  ( 106.9656587 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.19107590139408387, 'dropout_2': 0.3493930807885635, 'fit_n_batch': 60.0, 'n_nodes_1': 895.0, 'n_nodes_2': 999.0, 'n_nodes_3': 275.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1158.93956752  ( 169.922563351 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3305961170948415, 'dropout_2': 0.42657443188607624, 'fit_n_batch': 80.0, 'n_nodes_1': 797.0, 'n_nodes_2': 407.0, 'n_nodes_3': 624.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  865.979990149  ( 217.638261304 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.301354071475426, 'dropout_2': 0.2187009672633622, 'fit_n_batch': 106.0, 'n_nodes_1': 694.0, 'n_nodes_2': 791.0, 'n_nodes_3': 976.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  512.577293522  ( 130.108063414 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3512713385330469, 'dropout_2': 0.15995734510258094, 'fit_n_batch': 115.0, 'n_nodes_1': 752.0, 'n_nodes_2': 466.0, 'n_nodes_3': 886.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  886.710982484  ( 113.094344872 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.430405007062448, 'dropout_2': 0.26292392897946965, 'fit_n_batch': 29.0, 'n_nodes_1': 849.0, 'n_nodes_2': 512.0, 'n_nodes_3': 698.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  628.290493795  ( 153.547264035 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.21236679317948307, 'dropout_2': 0.24340873071273292, 'fit_n_batch': 41.0, 'n_nodes_1': 551.0, 'n_nodes_2': 562.0, 'n_nodes_3': 485.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  586.725328933  ( 111.76014084 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.23693014098031767, 'dropout_2': 0.1966166771008954, 'fit_n_batch': 92.0, 'n_nodes_1': 496.0, 'n_nodes_2': 728.0, 'n_nodes_3': 834.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  450.73066072  ( 124.009293216 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.017972132799591034, 'dropout_2': 0.02047786819108935, 'fit_n_batch': 51.0, 'n_nodes_1': 643.0, 'n_nodes_2': 683.0, 'n_nodes_3': 927.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  3772.09602804  ( 990.951372565 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2643077952168089, 'dropout_2': 0.4993524691445358, 'fit_n_batch': 123.0, 'n_nodes_1': 945.0, 'n_nodes_2': 835.0, 'n_nodes_3': 402.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1093.75451182  ( 154.556677434 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.13578373353644296, 'dropout_2': 0.38510134228668835, 'fit_n_batch': 20.0, 'n_nodes_1': 1000.0, 'n_nodes_2': 910.0, 'n_nodes_3': 559.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  561.769840132  ( 132.040566038 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3817504260528018, 'dropout_2': 0.28218538282713934, 'fit_n_batch': 99.0, 'n_nodes_1': 696.0, 'n_nodes_2': 972.0, 'n_nodes_3': 797.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  489.320066774  ( 114.934654743 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.07347961139071496, 'dropout_2': 0.14126375047843928, 'fit_n_batch': 86.0, 'n_nodes_1': 314.0, 'n_nodes_2': 167.0, 'n_nodes_3': 755.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  664.386050909  ( 123.797186589 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.28516011428927984, 'dropout_2': 0.4086861365171176, 'fit_n_batch': 68.0, 'n_nodes_1': 245.0, 'n_nodes_2': 643.0, 'n_nodes_3': 514.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  888.599489326  ( 206.282191421 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.11047970744557048, 'dropout_2': 0.4436547030760135, 'fit_n_batch': 33.0, 'n_nodes_1': 425.0, 'n_nodes_2': 597.0, 'n_nodes_3': 633.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  550.064920867  ( 144.443076972 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.1985483374456093, 'dropout_2': 0.47029319244237955, 'fit_n_batch': 26.0, 'n_nodes_1': 788.0, 'n_nodes_2': 768.0, 'n_nodes_3': 869.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  607.529500263  ( 178.338032717 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.1807049075308913, 'dropout_2': 0.11088260722659733, 'fit_n_batch': 93.0, 'n_nodes_1': 908.0, 'n_nodes_2': 39.0, 'n_nodes_3': 675.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  697.503707001  ( 156.20862877 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.310008476249991, 'dropout_2': 0.08524466461703804, 'fit_n_batch': 38.0, 'n_nodes_1': 601.0, 'n_nodes_2': 550.0, 'n_nodes_3': 458.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  686.190444808  ( 95.6160586241 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3363784702136408, 'dropout_2': 0.32036822658582054, 'fit_n_batch': 44.0, 'n_nodes_1': 829.0, 'n_nodes_2': 498.0, 'n_nodes_3': 716.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  561.530271485  ( 154.082636904 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.05495657631047801, 'dropout_2': 0.1731435104872877, 'fit_n_batch': 62.0, 'n_nodes_1': 746.0, 'n_nodes_2': 137.0, 'n_nodes_3': 979.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  489.309241822  ( 110.36780879 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.22886370237717057, 'dropout_2': 0.34642544094389777, 'fit_n_batch': 57.0, 'n_nodes_1': 375.0, 'n_nodes_2': 215.0, 'n_nodes_3': 899.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  614.856875427  ( 120.978942821 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'sigmoid', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.2499412851586949, 'dropout_2': 0.3692599295162913, 'fit_n_batch': 112.0, 'n_nodes_1': 873.0, 'n_nodes_2': 415.0, 'n_nodes_3': 759.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  563.244880671  ( 135.581196111 )
Testing:  {'activation_1': 'relu', 'activation_2': 'relu', 'activation_3': 'relu', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.49808725352490435, 'dropout_2': 0.12915392096288097, 'fit_n_batch': 80.0, 'n_nodes_1': 513.0, 'n_nodes_2': 376.0, 'n_nodes_3': 938.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1341.60298394  ( 171.127532785 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'relu', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.3966551110553861, 'dropout_2': 0.2982114384235914, 'fit_n_batch': 74.0, 'n_nodes_1': 573.0, 'n_nodes_2': 457.0, 'n_nodes_3': 845.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  804.752953722  ( 214.205504744 )
Testing:  {'activation_1': 'relu', 'activation_2': 'tanh', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.16198924391595745, 'dropout_2': 0.06512085055808275, 'fit_n_batch': 105.0, 'n_nodes_1': 982.0, 'n_nodes_2': 340.0, 'n_nodes_3': 609.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  458.878568667  ( 146.070066881 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'relu', 'activation_3': 'tanh', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.36247774893230034, 'dropout_2': 0.24580575343355318, 'fit_n_batch': 99.0, 'n_nodes_1': 477.0, 'n_nodes_2': 841.0, 'n_nodes_3': 572.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  622.164011579  ( 120.147745829 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.08841812977367229, 'dropout_2': 0.030601003371157107, 'fit_n_batch': 68.0, 'n_nodes_1': 655.0, 'n_nodes_2': 735.0, 'n_nodes_3': 650.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  414.413178423  ( 137.430810136 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.08813572098580352, 'dropout_2': 0.03155693545556867, 'fit_n_batch': 68.0, 'n_nodes_1': 436.0, 'n_nodes_2': 969.0, 'n_nodes_3': 373.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  402.172668005  ( 133.428983233 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0876453465804385, 'dropout_2': 0.0309743459310007, 'fit_n_batch': 68.0, 'n_nodes_1': 248.0, 'n_nodes_2': 971.0, 'n_nodes_3': 205.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  483.491597416  ( 146.110508883 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.01154092692198827, 'dropout_2': 0.009488825946787419, 'fit_n_batch': 54.0, 'n_nodes_1': 435.0, 'n_nodes_2': 897.0, 'n_nodes_3': 372.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  473.396911657  ( 126.497723968 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.1370391324645217, 'dropout_2': 0.09026918808638579, 'fit_n_batch': 65.0, 'n_nodes_1': 396.0, 'n_nodes_2': 947.0, 'n_nodes_3': 286.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  438.804773996  ( 143.874916203 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0348798741783664, 'dropout_2': 0.07278308548455462, 'fit_n_batch': 77.0, 'n_nodes_1': 342.0, 'n_nodes_2': 864.0, 'n_nodes_3': 427.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  431.059435447  ( 161.593504174 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.05275958511786433, 'dropout_2': 0.05351562776937071, 'fit_n_batch': 72.0, 'n_nodes_1': 35.0, 'n_nodes_2': 814.0, 'n_nodes_3': 141.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  644.59933943  ( 212.738111661 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.10031888858538934, 'dropout_2': 0.032151346927284795, 'fit_n_batch': 85.0, 'n_nodes_1': 166.0, 'n_nodes_2': 738.0, 'n_nodes_3': 75.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  461.242943174  ( 91.1669281819 )
Testing:  {'activation_1': 'sigmoid', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.12310638847782054, 'dropout_2': 0.11408177984909151, 'fit_n_batch': 89.0, 'n_nodes_1': 105.0, 'n_nodes_2': 787.0, 'n_nodes_3': 247.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  670.893111727  ( 101.440861812 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.07828017924214446, 'dropout_2': 0.15264994292877898, 'fit_n_batch': 48.0, 'n_nodes_1': 306.0, 'n_nodes_2': 999.0, 'n_nodes_3': 325.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  427.229793959  ( 137.157802038 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.0631899098136493, 'dropout_2': 0.09724644707528943, 'fit_n_batch': 81.0, 'n_nodes_1': 531.0, 'n_nodes_2': 927.0, 'n_nodes_3': 187.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  408.261525074  ( 121.700942803 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.06290598481894272, 'dropout_2': 0.09742114620661425, 'fit_n_batch': 81.0, 'n_nodes_1': 530.0, 'n_nodes_2': 928.0, 'n_nodes_3': 1.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  1412.8771662  ( 274.61665486 )
Testing:  {'activation_1': 'tanh', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.00879641380642579, 'dropout_2': 0.1331700443065696, 'fit_n_batch': 94.0, 'n_nodes_1': 365.0, 'n_nodes_2': 958.0, 'n_nodes_3': 32.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  475.008455988  ( 139.347709382 )
Testing:  {'activation_1': 'relu', 'activation_2': 'sigmoid', 'activation_3': 'sigmoid', 'comp_loss': 'mean_squared_error', 'dropout_1': 0.14798725810535343, 'dropout_2': 0.00969583805785336, 'fit_n_batch': 91.0, 'n_nodes_1': 474.0, 'n_nodes_2': 862.0, 'n_nodes_3': 185.0, 'opt_decay': 0.0, 'opt_epsilon': 1e-08, 'opt_lr': 0.002, 'opt_rho': 0.9, 'regularization_1': 0, 'regularization_2': 0, 'regularization_3': 0}
Fold:  1  of 5
Fold:  2  of 5
Fold:  3  of 5
Fold:  4  of 5
Fold:  5  of 5
Obtained loss:  504.439471851  ( 131.883954315 )
==============================

======== Best NN... ========
Validation loss:  402.1726680052026
Best model hyperparameters:  {'activation_1': 0, 'activation_2': 1, 'activation_3': 1, 'dropout_1': 0.08813572098580352, 'dropout_2': 0.03155693545556867, 'fit_n_batch': 68.0, 'n_nodes_1': 436.0, 'n_nodes_2': 969.0, 'n_nodes_3': 373.0}
==============================


In [9]:
############## EVALUATING RESULTS  #############################################
Y_te = np.squeeze(Y_te)
Y_NN = np.squeeze(model.predict(X_te))

#MSE
print('\n Score NN: ',mean_squared_error(Y_NN,Y_te))


#Boxplot of the difference between actual values and estimates
data_to_plot = [Y_te-Y_NN]
plt.boxplot(data_to_plot)
plt.show()
"""
#Histogram of the difference between actual values and estimates
plt.hist(data_to_plot,bins=20)
plt.show()

#Plot of the actual values and estimates
plt.plot(Y_te, marker='^')
plt.plot(Y_NN, marker='o')
plt.show()
"""


 Score NN:  18852.7424806
Out[9]:
"\n#Histogram of the difference between actual values and estimates\nplt.hist(data_to_plot,bins=20)\nplt.show()\n\n#Plot of the actual values and estimates\nplt.plot(Y_te, marker='^')\nplt.plot(Y_NN, marker='o')\nplt.show()\n"

In [ ]: