Predicting Activity with a Multilayer Perceptron

... i.e., time-independent model, full dataset


In [1]:
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.utils.np_utils import to_categorical
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline

# fix random seed for reproducibility
seed = 66
numpy.random.seed(seed)

data = pandas.read_csv("../data/processed/train.csv")
notnull_data = data[data.notnull().all(axis=1)]
train = notnull_data.values
data2 = pandas.read_csv("../data/processed/test.csv")
notnull_data2 = data2[data2.notnull().all(axis=1)]
test = notnull_data2.values


Using Theano backend.

In [2]:
X_train = train[:,3:7558].astype(float)
#X_train = train[:,3:13].astype(float)
Y_train = train[:,7558]
X_test = test[:,3:7558].astype(float)
#X_test = test[:,3:13].astype(float)
Y_test = test[:,7558]

# One hot encoding of the response variable (using dummy variables)
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y_train)
encoded_Y_train = encoder.transform(Y_train)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y_train = to_categorical(encoded_Y_train)
encoder.fit(Y_test)
encoded_Y_test = encoder.transform(Y_test)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y_test = to_categorical(encoded_Y_test)

# Sanity check on matrix dimensions, after droppinig null/nans
#print X_train.shape #(4472, 7555)
#print Y_test.shape #(1044, )
#print dummy_y_test.shape # (1044, 5)

In [3]:
from keras.layers import Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD

# baseline model
def create_baseline():
    # create model
    model = Sequential()
    model.add(Dense(200, input_dim=7555, init='uniform', activation='tanh', W_constraint=maxnorm(4)))
    model.add(Dense(20, init='uniform', activation='tanh', W_constraint=maxnorm(4)))
    model.add(Dense(5, init='uniform', activation='sigmoid'))
    # Compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

# Apply dropout regularization, it is overfitting!
def create_dropout():
    # create model
    model = Sequential()
    model.add(Dropout(0.2, input_shape=(7555,)))
    model.add(Dense(200, init='uniform', activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(20, init='uniform', activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(5, init='uniform', activation='sigmoid'))
    # Compile model, with larger learning rate and momentum, as recommended by the original paper
    sgd = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=False)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model

# Apply dropout regularization, it is overfitting!
def create_dropout_decay():
    # create model
    model = Sequential()
    model.add(Dropout(0.2, input_shape=(7555,)))
    model.add(Dense(200, init='uniform', activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(20, init='uniform', activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(5, init='uniform', activation='sigmoid'))
    # Compile model, with larger learning rate and momentum, as recommended by the original paper
    sgd = SGD(lr=0.1, momentum=0.9, decay=0.0005, nesterov=False)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model

# evaluate baseline model with standardized dataset
numpy.random.seed(seed)
#estimators = []
#estimators.append(('standardize', StandardScaler()))
#estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, nb_epoch=10, batch_size=10, verbose=1)))
# We define a pipeline of estimators, in which first the scaler is fitted to the data, then the MLP is applied
#pipeline = Pipeline(estimators)
#kfold = StratifiedKFold(y=Y_train, n_folds=3, shuffle=True, random_state=seed)

#model = create_baseline()
model = create_dropout_decay()
# We standardize on the basis of the training data
scaler = StandardScaler().fit(X_train)
X_train_st = scaler.transform(X_train)
X_test_st = scaler.transform(X_test)
scaler


Out[3]:
StandardScaler(copy=True, with_mean=True, with_std=True)

In [4]:
from keras.callbacks import ModelCheckpoint

# To save the best model
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
filepath="weights.best.hdf5"
# Define that the accuracy in cv is monitored, and that weights are stored in a file when max accuracy is achieved
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]

# Fit the model
history = model.fit(X_train_st, dummy_y_train, 
                    validation_data=(X_test_st,dummy_y_test), 
                    nb_epoch=200, batch_size=10, verbose=0, 
                    callbacks=callbacks_list)
#results = cross_val_score(pipeline, X_train, dummy_y_train, cv=kfold)
#print("Standardized data Acc (in CV training data): %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# evaluate the model
#scores = pipeline.evaluate(X_test, dummy_y_test)
#print pipeline.metrics_names[1]
#print scores[1]*100
# For other metrics, see http://machinelearningmastery.com/metrics-evaluate-machine-learning-algorithms-python/

import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score

# Other performance/accuracy metrics
Y_pred = model.predict(X_test_st)
print Y_pred.shape

# Accuracy
print('Accuracy:')
print(accuracy_score(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1)))


# Confusion matrix
cm = confusion_matrix(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1))
numpy.set_printoptions(precision=2)
print('Confusion matrix:')
print(cm)

# AUC
roc = roc_auc_score(dummy_y_test, Y_pred, average='macro')
print('AUC score:')
print(roc)
    

# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()


Epoch 00000: val_acc improved from -inf to 0.36782, saving model to weights.best.hdf5
Epoch 00001: val_acc did not improve
Epoch 00002: val_acc improved from 0.36782 to 0.43199, saving model to weights.best.hdf5
Epoch 00003: val_acc improved from 0.43199 to 0.45785, saving model to weights.best.hdf5
Epoch 00004: val_acc did not improve
Epoch 00005: val_acc improved from 0.45785 to 0.46073, saving model to weights.best.hdf5
Epoch 00006: val_acc did not improve
Epoch 00007: val_acc did not improve
Epoch 00008: val_acc did not improve
Epoch 00009: val_acc improved from 0.46073 to 0.46743, saving model to weights.best.hdf5
Epoch 00010: val_acc did not improve
Epoch 00011: val_acc did not improve
Epoch 00012: val_acc did not improve
Epoch 00013: val_acc did not improve
Epoch 00014: val_acc improved from 0.46743 to 0.47414, saving model to weights.best.hdf5
Epoch 00015: val_acc did not improve
Epoch 00016: val_acc did not improve
Epoch 00017: val_acc did not improve
Epoch 00018: val_acc did not improve
Epoch 00019: val_acc improved from 0.47414 to 0.47510, saving model to weights.best.hdf5
Epoch 00020: val_acc improved from 0.47510 to 0.48180, saving model to weights.best.hdf5
Epoch 00021: val_acc did not improve
Epoch 00022: val_acc did not improve
Epoch 00023: val_acc did not improve
Epoch 00024: val_acc did not improve
Epoch 00025: val_acc improved from 0.48180 to 0.48755, saving model to weights.best.hdf5
Epoch 00026: val_acc did not improve
Epoch 00027: val_acc did not improve
Epoch 00028: val_acc improved from 0.48755 to 0.49042, saving model to weights.best.hdf5
Epoch 00029: val_acc did not improve
Epoch 00030: val_acc did not improve
Epoch 00031: val_acc improved from 0.49042 to 0.49138, saving model to weights.best.hdf5
Epoch 00032: val_acc did not improve
Epoch 00033: val_acc did not improve
Epoch 00034: val_acc did not improve
Epoch 00035: val_acc did not improve
Epoch 00036: val_acc did not improve
Epoch 00037: val_acc did not improve
Epoch 00038: val_acc did not improve
Epoch 00039: val_acc did not improve
Epoch 00040: val_acc did not improve
Epoch 00041: val_acc did not improve
Epoch 00042: val_acc improved from 0.49138 to 0.49713, saving model to weights.best.hdf5
Epoch 00043: val_acc did not improve
Epoch 00044: val_acc improved from 0.49713 to 0.50766, saving model to weights.best.hdf5
Epoch 00045: val_acc did not improve
Epoch 00046: val_acc did not improve
Epoch 00047: val_acc did not improve
Epoch 00048: val_acc did not improve
Epoch 00049: val_acc did not improve
Epoch 00050: val_acc did not improve
Epoch 00051: val_acc did not improve
Epoch 00052: val_acc did not improve
Epoch 00053: val_acc did not improve
Epoch 00054: val_acc did not improve
Epoch 00055: val_acc did not improve
Epoch 00056: val_acc improved from 0.50766 to 0.50958, saving model to weights.best.hdf5
Epoch 00057: val_acc improved from 0.50958 to 0.51724, saving model to weights.best.hdf5
Epoch 00058: val_acc did not improve
Epoch 00059: val_acc improved from 0.51724 to 0.54693, saving model to weights.best.hdf5
Epoch 00060: val_acc did not improve
Epoch 00061: val_acc did not improve
Epoch 00062: val_acc did not improve
Epoch 00063: val_acc did not improve
Epoch 00064: val_acc did not improve
Epoch 00065: val_acc did not improve
Epoch 00066: val_acc did not improve
Epoch 00067: val_acc did not improve
Epoch 00068: val_acc did not improve
Epoch 00069: val_acc did not improve
Epoch 00070: val_acc did not improve
Epoch 00071: val_acc did not improve
Epoch 00072: val_acc did not improve
Epoch 00073: val_acc did not improve
Epoch 00074: val_acc did not improve
Epoch 00075: val_acc did not improve
Epoch 00076: val_acc did not improve
Epoch 00077: val_acc did not improve
Epoch 00078: val_acc did not improve
Epoch 00079: val_acc did not improve
Epoch 00080: val_acc did not improve
Epoch 00081: val_acc did not improve
Epoch 00082: val_acc improved from 0.54693 to 0.54693, saving model to weights.best.hdf5
Epoch 00083: val_acc did not improve
Epoch 00084: val_acc improved from 0.54693 to 0.55364, saving model to weights.best.hdf5
Epoch 00085: val_acc did not improve
Epoch 00086: val_acc did not improve
Epoch 00087: val_acc did not improve
Epoch 00088: val_acc did not improve
Epoch 00089: val_acc did not improve
Epoch 00090: val_acc did not improve
Epoch 00091: val_acc did not improve
Epoch 00092: val_acc did not improve
Epoch 00093: val_acc did not improve
Epoch 00094: val_acc did not improve
Epoch 00095: val_acc did not improve
Epoch 00096: val_acc did not improve
Epoch 00097: val_acc did not improve
Epoch 00098: val_acc did not improve
Epoch 00099: val_acc did not improve
Epoch 00100: val_acc did not improve
Epoch 00101: val_acc did not improve
Epoch 00102: val_acc did not improve
Epoch 00103: val_acc did not improve
Epoch 00104: val_acc did not improve
Epoch 00105: val_acc did not improve
Epoch 00106: val_acc did not improve
Epoch 00107: val_acc did not improve
Epoch 00108: val_acc did not improve
Epoch 00109: val_acc did not improve
Epoch 00110: val_acc did not improve
Epoch 00111: val_acc did not improve
Epoch 00112: val_acc did not improve
Epoch 00113: val_acc did not improve
Epoch 00114: val_acc did not improve
Epoch 00115: val_acc did not improve
Epoch 00116: val_acc did not improve
Epoch 00117: val_acc did not improve
Epoch 00118: val_acc did not improve
Epoch 00119: val_acc did not improve
Epoch 00120: val_acc did not improve
Epoch 00121: val_acc did not improve
Epoch 00122: val_acc improved from 0.55364 to 0.55651, saving model to weights.best.hdf5
Epoch 00123: val_acc did not improve
Epoch 00124: val_acc did not improve
Epoch 00125: val_acc did not improve
Epoch 00126: val_acc did not improve
Epoch 00127: val_acc did not improve
Epoch 00128: val_acc did not improve
Epoch 00129: val_acc did not improve
Epoch 00130: val_acc did not improve
Epoch 00131: val_acc did not improve
Epoch 00132: val_acc did not improve
Epoch 00133: val_acc did not improve
Epoch 00134: val_acc did not improve
Epoch 00135: val_acc did not improve
Epoch 00136: val_acc did not improve
Epoch 00137: val_acc did not improve
Epoch 00138: val_acc did not improve
Epoch 00139: val_acc did not improve
Epoch 00140: val_acc did not improve
Epoch 00141: val_acc did not improve
Epoch 00142: val_acc did not improve
Epoch 00143: val_acc did not improve
Epoch 00144: val_acc did not improve
Epoch 00145: val_acc did not improve
Epoch 00146: val_acc did not improve
Epoch 00147: val_acc did not improve
Epoch 00148: val_acc did not improve
Epoch 00149: val_acc did not improve
Epoch 00150: val_acc did not improve
Epoch 00151: val_acc did not improve
Epoch 00152: val_acc did not improve
Epoch 00153: val_acc did not improve
Epoch 00154: val_acc improved from 0.55651 to 0.55843, saving model to weights.best.hdf5
Epoch 00155: val_acc did not improve
Epoch 00156: val_acc did not improve
Epoch 00157: val_acc did not improve
Epoch 00158: val_acc did not improve
Epoch 00159: val_acc did not improve
Epoch 00160: val_acc did not improve
Epoch 00161: val_acc did not improve
Epoch 00162: val_acc did not improve
Epoch 00163: val_acc did not improve
Epoch 00164: val_acc did not improve
Epoch 00165: val_acc improved from 0.55843 to 0.55939, saving model to weights.best.hdf5
Epoch 00166: val_acc did not improve
Epoch 00167: val_acc did not improve
Epoch 00168: val_acc did not improve
Epoch 00169: val_acc did not improve
Epoch 00170: val_acc did not improve
Epoch 00171: val_acc improved from 0.55939 to 0.56322, saving model to weights.best.hdf5
Epoch 00172: val_acc did not improve
Epoch 00173: val_acc did not improve
Epoch 00174: val_acc did not improve
Epoch 00175: val_acc did not improve
Epoch 00176: val_acc did not improve
Epoch 00177: val_acc did not improve
Epoch 00178: val_acc did not improve
Epoch 00179: val_acc did not improve
Epoch 00180: val_acc did not improve
Epoch 00181: val_acc did not improve
Epoch 00182: val_acc did not improve
Epoch 00183: val_acc did not improve
Epoch 00184: val_acc did not improve
Epoch 00185: val_acc did not improve
Epoch 00186: val_acc did not improve
Epoch 00187: val_acc did not improve
Epoch 00188: val_acc did not improve
Epoch 00189: val_acc did not improve
Epoch 00190: val_acc did not improve
Epoch 00191: val_acc did not improve
Epoch 00192: val_acc did not improve
Epoch 00193: val_acc did not improve
Epoch 00194: val_acc did not improve
Epoch 00195: val_acc did not improve
Epoch 00196: val_acc did not improve
Epoch 00197: val_acc did not improve
Epoch 00198: val_acc did not improve
Epoch 00199: val_acc did not improve
(1044, 5)
Accuracy:
0.545977011494
Confusion matrix:
[[151  11  26  12   0]
 [ 13 189  75   7  41]
 [ 45  81 171  20  20]
 [ 35  11  19  21   9]
 [  4  24  16   5  38]]
AUC score:
0.771905612002

MLPs using PCA as inputs


In [5]:
from sklearn.decomposition import PCA

pca = PCA(n_components=100)
PCAtrain = pca.fit_transform(X_train_st)
PCAtest = pca.transform(X_test_st)

print 'Variance explained:'
print pca.explained_variance_ratio_
print 'Total variance explained by 20 components:'
print sum(pca.explained_variance_ratio_)
#print PCAtrain.shape
#print PCAtest.shape


Variance explained:
[ 0.2   0.08  0.04  0.02  0.02  0.02  0.01  0.01  0.01  0.01  0.01  0.01
  0.01  0.01  0.01  0.01  0.01  0.01  0.    0.    0.    0.    0.    0.    0.
  0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.
  0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.
  0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.
  0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.
  0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.    0.
  0.    0.    0.    0.    0.    0.    0.    0.    0.    0.  ]
Total variance explained by 20 components:
0.630815671837

In [6]:
# Apply dropout regularization, it is overfitting!
def create_dropout_decay_PCA():
    # create model
    model = Sequential()
    model.add(Dropout(0.2, input_shape=(100,)))
    model.add(Dense(200, init='uniform', activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(20, init='uniform', activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(5, init='uniform', activation='sigmoid'))
    # Compile model, with larger learning rate and momentum, as recommended by the original paper
    sgd = SGD(lr=0.1, momentum=0.9, decay=0.0005, nesterov=False)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model

model = create_dropout_decay_PCA()

# To save the best model
# serialize model to JSON
model_json = model.to_json()
with open("modelpca.json", "w") as json_file:
    json_file.write(model_json)
filepath="weightspca.best.hdf5"
# Define that the accuracy in cv is monitored, and that weights are stored in a file when max accuracy is achieved
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]

In [7]:
# Fit the model
history = model.fit(PCAtrain, dummy_y_train, 
                    validation_data=(PCAtest,dummy_y_test), 
                    nb_epoch=200, batch_size=10, verbose=0, 
                    callbacks=callbacks_list)

# Other performance/accuracy metrics
Y_pred = model.predict(PCAtest)
print Y_pred.shape

# Accuracy
print('Accuracy:')
print(accuracy_score(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1)))


# Confusion matrix
cm = confusion_matrix(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1))
numpy.set_printoptions(precision=2)
print('Confusion matrix:')
print(cm)

# AUC
roc = roc_auc_score(dummy_y_test, Y_pred, average='macro')
print('AUC score:')
print(roc)
    

# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()


Epoch 00000: val_acc improved from -inf to 0.40326, saving model to weightspca.best.hdf5
Epoch 00001: val_acc improved from 0.40326 to 0.44444, saving model to weightspca.best.hdf5
Epoch 00002: val_acc improved from 0.44444 to 0.44828, saving model to weightspca.best.hdf5
Epoch 00003: val_acc did not improve
Epoch 00004: val_acc improved from 0.44828 to 0.46552, saving model to weightspca.best.hdf5
Epoch 00005: val_acc improved from 0.46552 to 0.47510, saving model to weightspca.best.hdf5
Epoch 00006: val_acc did not improve
Epoch 00007: val_acc did not improve
Epoch 00008: val_acc did not improve
Epoch 00009: val_acc did not improve
Epoch 00010: val_acc improved from 0.47510 to 0.48467, saving model to weightspca.best.hdf5
Epoch 00011: val_acc did not improve
Epoch 00012: val_acc did not improve
Epoch 00013: val_acc did not improve
Epoch 00014: val_acc did not improve
Epoch 00015: val_acc did not improve
Epoch 00016: val_acc did not improve
Epoch 00017: val_acc did not improve
Epoch 00018: val_acc did not improve
Epoch 00019: val_acc did not improve
Epoch 00020: val_acc did not improve
Epoch 00021: val_acc did not improve
Epoch 00022: val_acc did not improve
Epoch 00023: val_acc did not improve
Epoch 00024: val_acc did not improve
Epoch 00025: val_acc did not improve
Epoch 00026: val_acc did not improve
Epoch 00027: val_acc did not improve
Epoch 00028: val_acc did not improve
Epoch 00029: val_acc did not improve
Epoch 00030: val_acc did not improve
Epoch 00031: val_acc did not improve
Epoch 00032: val_acc did not improve
Epoch 00033: val_acc did not improve
Epoch 00034: val_acc did not improve
Epoch 00035: val_acc did not improve
Epoch 00036: val_acc did not improve
Epoch 00037: val_acc did not improve
Epoch 00038: val_acc did not improve
Epoch 00039: val_acc did not improve
Epoch 00040: val_acc did not improve
Epoch 00041: val_acc did not improve
Epoch 00042: val_acc did not improve
Epoch 00043: val_acc did not improve
Epoch 00044: val_acc did not improve
Epoch 00045: val_acc did not improve
Epoch 00046: val_acc did not improve
Epoch 00047: val_acc did not improve
Epoch 00048: val_acc did not improve
Epoch 00049: val_acc improved from 0.48467 to 0.48659, saving model to weightspca.best.hdf5
Epoch 00050: val_acc improved from 0.48659 to 0.50766, saving model to weightspca.best.hdf5
Epoch 00051: val_acc did not improve
Epoch 00052: val_acc did not improve
Epoch 00053: val_acc did not improve
Epoch 00054: val_acc did not improve
Epoch 00055: val_acc did not improve
Epoch 00056: val_acc did not improve
Epoch 00057: val_acc did not improve
Epoch 00058: val_acc did not improve
Epoch 00059: val_acc did not improve
Epoch 00060: val_acc did not improve
Epoch 00061: val_acc did not improve
Epoch 00062: val_acc did not improve
Epoch 00063: val_acc did not improve
Epoch 00064: val_acc did not improve
Epoch 00065: val_acc did not improve
Epoch 00066: val_acc did not improve
Epoch 00067: val_acc did not improve
Epoch 00068: val_acc did not improve
Epoch 00069: val_acc did not improve
Epoch 00070: val_acc did not improve
Epoch 00071: val_acc did not improve
Epoch 00072: val_acc did not improve
Epoch 00073: val_acc did not improve
Epoch 00074: val_acc did not improve
Epoch 00075: val_acc did not improve
Epoch 00076: val_acc did not improve
Epoch 00077: val_acc did not improve
Epoch 00078: val_acc did not improve
Epoch 00079: val_acc did not improve
Epoch 00080: val_acc did not improve
Epoch 00081: val_acc did not improve
Epoch 00082: val_acc did not improve
Epoch 00083: val_acc did not improve
Epoch 00084: val_acc did not improve
Epoch 00085: val_acc did not improve
Epoch 00086: val_acc did not improve
Epoch 00087: val_acc did not improve
Epoch 00088: val_acc did not improve
Epoch 00089: val_acc did not improve
Epoch 00090: val_acc did not improve
Epoch 00091: val_acc did not improve
Epoch 00092: val_acc did not improve
Epoch 00093: val_acc did not improve
Epoch 00094: val_acc did not improve
Epoch 00095: val_acc did not improve
Epoch 00096: val_acc did not improve
Epoch 00097: val_acc did not improve
Epoch 00098: val_acc did not improve
Epoch 00099: val_acc did not improve
Epoch 00100: val_acc did not improve
Epoch 00101: val_acc did not improve
Epoch 00102: val_acc did not improve
Epoch 00103: val_acc improved from 0.50766 to 0.50766, saving model to weightspca.best.hdf5
Epoch 00104: val_acc did not improve
Epoch 00105: val_acc did not improve
Epoch 00106: val_acc did not improve
Epoch 00107: val_acc did not improve
Epoch 00108: val_acc did not improve
Epoch 00109: val_acc did not improve
Epoch 00110: val_acc did not improve
Epoch 00111: val_acc did not improve
Epoch 00112: val_acc did not improve
Epoch 00113: val_acc did not improve
Epoch 00114: val_acc did not improve
Epoch 00115: val_acc did not improve
Epoch 00116: val_acc did not improve
Epoch 00117: val_acc did not improve
Epoch 00118: val_acc did not improve
Epoch 00119: val_acc improved from 0.50766 to 0.51054, saving model to weightspca.best.hdf5
Epoch 00120: val_acc did not improve
Epoch 00121: val_acc did not improve
Epoch 00122: val_acc did not improve
Epoch 00123: val_acc did not improve
Epoch 00124: val_acc improved from 0.51054 to 0.51245, saving model to weightspca.best.hdf5
Epoch 00125: val_acc did not improve
Epoch 00126: val_acc did not improve
Epoch 00127: val_acc did not improve
Epoch 00128: val_acc did not improve
Epoch 00129: val_acc did not improve
Epoch 00130: val_acc did not improve
Epoch 00131: val_acc did not improve
Epoch 00132: val_acc did not improve
Epoch 00133: val_acc did not improve
Epoch 00134: val_acc improved from 0.51245 to 0.51437, saving model to weightspca.best.hdf5
Epoch 00135: val_acc did not improve
Epoch 00136: val_acc did not improve
Epoch 00137: val_acc did not improve
Epoch 00138: val_acc improved from 0.51437 to 0.51724, saving model to weightspca.best.hdf5
Epoch 00139: val_acc did not improve
Epoch 00140: val_acc did not improve
Epoch 00141: val_acc did not improve
Epoch 00142: val_acc did not improve
Epoch 00143: val_acc did not improve
Epoch 00144: val_acc did not improve
Epoch 00145: val_acc did not improve
Epoch 00146: val_acc did not improve
Epoch 00147: val_acc did not improve
Epoch 00148: val_acc did not improve
Epoch 00149: val_acc did not improve
Epoch 00150: val_acc did not improve
Epoch 00151: val_acc did not improve
Epoch 00152: val_acc did not improve
Epoch 00153: val_acc did not improve
Epoch 00154: val_acc did not improve
Epoch 00155: val_acc did not improve
Epoch 00156: val_acc did not improve
Epoch 00157: val_acc did not improve
Epoch 00158: val_acc did not improve
Epoch 00159: val_acc did not improve
Epoch 00160: val_acc did not improve
Epoch 00161: val_acc did not improve
Epoch 00162: val_acc did not improve
Epoch 00163: val_acc did not improve
Epoch 00164: val_acc did not improve
Epoch 00165: val_acc did not improve
Epoch 00166: val_acc did not improve
Epoch 00167: val_acc did not improve
Epoch 00168: val_acc did not improve
Epoch 00169: val_acc did not improve
Epoch 00170: val_acc did not improve
Epoch 00171: val_acc did not improve
Epoch 00172: val_acc did not improve
Epoch 00173: val_acc did not improve
Epoch 00174: val_acc did not improve
Epoch 00175: val_acc did not improve
Epoch 00176: val_acc did not improve
Epoch 00177: val_acc did not improve
Epoch 00178: val_acc did not improve
Epoch 00179: val_acc improved from 0.51724 to 0.52299, saving model to weightspca.best.hdf5
Epoch 00180: val_acc did not improve
Epoch 00181: val_acc did not improve
Epoch 00182: val_acc did not improve
Epoch 00183: val_acc did not improve
Epoch 00184: val_acc improved from 0.52299 to 0.52586, saving model to weightspca.best.hdf5
Epoch 00185: val_acc did not improve
Epoch 00186: val_acc did not improve
Epoch 00187: val_acc did not improve
Epoch 00188: val_acc did not improve
Epoch 00189: val_acc did not improve
Epoch 00190: val_acc did not improve
Epoch 00191: val_acc improved from 0.52586 to 0.52778, saving model to weightspca.best.hdf5
Epoch 00192: val_acc improved from 0.52778 to 0.52969, saving model to weightspca.best.hdf5
Epoch 00193: val_acc did not improve
Epoch 00194: val_acc did not improve
Epoch 00195: val_acc did not improve
Epoch 00196: val_acc did not improve
Epoch 00197: val_acc did not improve
Epoch 00198: val_acc did not improve
Epoch 00199: val_acc did not improve
(1044, 5)
Accuracy:
0.519157088123
Confusion matrix:
[[170   4  24   2   0]
 [ 17 170 112   0  26]
 [ 64  80 166   4  23]
 [ 47   7  32   5   4]
 [  7  24  25   0  31]]
AUC score:
0.754466508912