In [1]:
from __future__ import print_function

import numpy as np
from keras.datasets import imdb
from keras.models import Sequential, Model
from keras.layers import Dense, Input
from keras.layers import LSTM, Convolution1D, MaxPooling1D, GlobalAveragePooling1D, concatenate, Lambda, BatchNormalization
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from six.moves import cPickle as pickle
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint, EarlyStopping, LambdaCallback

import matplotlib.pyplot as plt

# fix random seed for reproducibility
seed=7
np.random.seed(seed)
import tensorflow as tf
tf.set_random_seed(seed)

NAME="MODEL-11-conv-1.0"
split_valid_test=False


Using TensorFlow backend.
# Read data
with open("Atmosfera-Incidents-2017.pickle", 'rb') as f:
    incidents = pickle.load(f)
# Skonwertuj root_service do intów i zapisz 
Y=[int(i) for i in incidents[1:,3]]
with open("Y.pickle", 'wb') as f:
    pickle.dump(Y, f, pickle.HIGHEST_PROTOCOL)

In [2]:
# Dane wejściowe
with open("X-sequences.pickle", 'rb') as f:
    X = pickle.load(f)
with open("Y.pickle", 'rb') as f:
    Y = pickle.load(f)

In [3]:
# Zostaw tylko poniższe kategorie, pozostale zmień na -1
lista = [2183, 
         #325, 
         37, 859, 2655, 606, 412, 2729, 1683, 1305]
# Y=[y if y in lista else -1 for y in Y]
mask = [y in lista for y in Y]

import itertools
X = np.array(list(itertools.compress(X, mask)))
Y = np.array(list(itertools.compress(Y, mask)))

In [4]:
np.unique(Y)


Out[4]:
array([  37,  412,  606,  859, 1305, 1683, 2183, 2655, 2729])

W tej wersji eksperymentu, Y zawiera root_service - 44 unikalne kategorie główne. Zamieńmy je na liczby z przedziału 0-43


In [5]:
root_services=np.sort(np.unique(Y))
# skonstruuj odwrtotny indeks kategorii głównych
services_idx={root_services[i]: i for i in range(len(root_services))}

In [6]:
# Zamień 
Y=[services_idx[y] for y in Y]

In [7]:
Y=to_categorical(Y)
Y.shape


Out[7]:
(14830, 9)

In [8]:
top_words = 5000
classes=Y[0,].shape[0]
print(classes)


9

In [9]:
# max_length (98th percentile is 476), padd the rest
max_length=500
X=sequence.pad_sequences(X, maxlen=max_length)

slice in half even/odds to nulify time differencies

X_train=X[0:][::2] # even X_test=X[1:][::2] # odds

Y_train=np.array(Y[0:][::2]) # even Y_test=np.array(Y[1:][::2]) # odds

if split_valid_test:

# Split "test" in half for validation and final testing
X_valid=X_test[:len(X_test)/2]
Y_valid=Y_test[:len(Y_test)/2]
X_test=X_test[len(X_test)/2:]
Y_test=Y_test[len(Y_test)/2:]

else: X_valid=X_test Y_valid=Y_test


In [10]:
# create the model
embedding_vecor_length = 60

_input = Input(shape=(max_length,), name='input')
embedding=Embedding(top_words, embedding_vecor_length, input_length=max_length)(_input)

conv1 = Conv1D(filters=128, kernel_size=1, padding='same', activation='relu')
conv2 = Conv1D(filters=128, kernel_size=2, padding='same', activation='relu')
conv3 = Conv1D(filters=128, kernel_size=3, padding='same', activation='relu')
conv4 = Conv1D(filters=128, kernel_size=4, padding='same', activation='relu')
conv5 = Conv1D(filters=32, kernel_size=5, padding='same', activation='relu')
conv6 = Conv1D(filters=32, kernel_size=6, padding='same', activation='relu')

conv1 = conv1(embedding)
glob1 = GlobalAveragePooling1D()(conv1)
conv2 = conv2(embedding)
glob2 = GlobalAveragePooling1D()(conv2)
conv3 = conv3(embedding)
glob3 = GlobalAveragePooling1D()(conv3)
conv4 = conv4(embedding)
glob4 = GlobalAveragePooling1D()(conv4)
conv5 = conv5(embedding)
glob5 = GlobalAveragePooling1D()(conv5)
conv6 = conv6(embedding)
glob6 = GlobalAveragePooling1D()(conv6)

merge = concatenate([glob1, glob2, glob3, glob4, glob5, glob6])
x = Dropout(0.2)(merge)
x = BatchNormalization()(x)
x = Dense(300, activation='relu')(x)
x = Dropout(0.2)(x)
x = BatchNormalization()(x)
pred = Dense(classes, activation='softmax')(x)

model = Model(inputs=[_input], outputs=pred)

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])#, decay=0.0000001)

print(model.summary())


# Callbacks
early_stop_cb = EarlyStopping(monitor='val_loss', patience=20, verbose=1)

checkpoit_cb = ModelCheckpoint(NAME+".h5", save_best_only=True)

# Print the batch number at the beginning of every batch.
batch_print_cb = LambdaCallback(on_batch_begin=lambda batch, logs: print (".",end=''), 
                                on_epoch_end=lambda batch, logs: print (batch))

# Plot the loss after every epoch.
plot_loss_cb = LambdaCallback(on_epoch_end=lambda epoch, logs: 
                              print (epoch, logs))
                              #plt.plot(np.arange(epoch), logs['loss']))

print("done")


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input (InputLayer)               (None, 500)           0                                            
____________________________________________________________________________________________________
embedding_1 (Embedding)          (None, 500, 60)       300000      input[0][0]                      
____________________________________________________________________________________________________
conv1d_1 (Conv1D)                (None, 500, 128)      7808        embedding_1[0][0]                
____________________________________________________________________________________________________
conv1d_2 (Conv1D)                (None, 500, 128)      15488       embedding_1[0][0]                
____________________________________________________________________________________________________
conv1d_3 (Conv1D)                (None, 500, 128)      23168       embedding_1[0][0]                
____________________________________________________________________________________________________
conv1d_4 (Conv1D)                (None, 500, 128)      30848       embedding_1[0][0]                
____________________________________________________________________________________________________
conv1d_5 (Conv1D)                (None, 500, 32)       9632        embedding_1[0][0]                
____________________________________________________________________________________________________
conv1d_6 (Conv1D)                (None, 500, 32)       11552       embedding_1[0][0]                
____________________________________________________________________________________________________
global_average_pooling1d_1 (Glob (None, 128)           0           conv1d_1[0][0]                   
____________________________________________________________________________________________________
global_average_pooling1d_2 (Glob (None, 128)           0           conv1d_2[0][0]                   
____________________________________________________________________________________________________
global_average_pooling1d_3 (Glob (None, 128)           0           conv1d_3[0][0]                   
____________________________________________________________________________________________________
global_average_pooling1d_4 (Glob (None, 128)           0           conv1d_4[0][0]                   
____________________________________________________________________________________________________
global_average_pooling1d_5 (Glob (None, 32)            0           conv1d_5[0][0]                   
____________________________________________________________________________________________________
global_average_pooling1d_6 (Glob (None, 32)            0           conv1d_6[0][0]                   
____________________________________________________________________________________________________
concatenate_1 (Concatenate)      (None, 576)           0           global_average_pooling1d_1[0][0] 
                                                                   global_average_pooling1d_2[0][0] 
                                                                   global_average_pooling1d_3[0][0] 
                                                                   global_average_pooling1d_4[0][0] 
                                                                   global_average_pooling1d_5[0][0] 
                                                                   global_average_pooling1d_6[0][0] 
____________________________________________________________________________________________________
dropout_1 (Dropout)              (None, 576)           0           concatenate_1[0][0]              
____________________________________________________________________________________________________
batch_normalization_1 (BatchNorm (None, 576)           2304        dropout_1[0][0]                  
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 300)           173100      batch_normalization_1[0][0]      
____________________________________________________________________________________________________
dropout_2 (Dropout)              (None, 300)           0           dense_1[0][0]                    
____________________________________________________________________________________________________
batch_normalization_2 (BatchNorm (None, 300)           1200        dropout_2[0][0]                  
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 9)             2709        batch_normalization_2[0][0]      
====================================================================================================
Total params: 577,809
Trainable params: 576,057
Non-trainable params: 1,752
____________________________________________________________________________________________________
None
done

In [11]:
history = model.fit(
        X,#_train,
        Y,#_train,
#        initial_epoch=1200,
        epochs=1500,
        batch_size=2048,
        #validation_data=(X_valid,Y_valid),
        validation_split=0.25,
        callbacks=[early_stop_cb, checkpoit_cb, batch_print_cb, plot_loss_cb],
        verbose=0
        )

#history=model.fit(X_train, Y_train, validation_data=(X_test, Y_test), nb_epoch=3, batch_size=512)


......0
0 {'val_acc': 0.29045307825941358, 'acc': 0.22235209696735758, 'val_loss': 2.1972081329444464, 'loss': 2.2907942968442958}
......1
1 {'val_acc': 0.38754044040764291, 'acc': 0.45135766988048404, 'val_loss': 1.8211152232066199, 'loss': 1.6764864422095549}
......2
2 {'val_acc': 0.5361380996956151, 'acc': 0.56374752620123547, 'val_loss': 1.5606886738712349, 'loss': 1.2684236450217607}
......3
3 {'val_acc': 0.47114348257243827, 'acc': 0.58793382772415836, 'val_loss': 5.2719509748193438, 'loss': 1.3116221091576219}
......4
4 {'val_acc': 0.580366770233537, 'acc': 0.59197986184896867, 'val_loss': 2.8518192837539229, 'loss': 1.2276255778755307}
......5
5 {'val_acc': 0.60409927387453588, 'acc': 0.64997303079645574, 'val_loss': 2.1651286851240981, 'loss': 1.0553123873786261}
......6
6 {'val_acc': 0.59924490693574839, 'acc': 0.6727207338632043, 'val_loss': 2.1094584989702048, 'loss': 0.96001683015056793}
......7
7 {'val_acc': 0.60544769657211139, 'acc': 0.69061319948293132, 'val_loss': 1.8495930316250173, 'loss': 0.91279203350264015}
......8
8 {'val_acc': 0.6267529674589698, 'acc': 0.69600791321064881, 'val_loss': 1.5374893590470349, 'loss': 0.88412005057589571}
......9
9 {'val_acc': 0.6442826259740605, 'acc': 0.705808310055557, 'val_loss': 1.3114557387116401, 'loss': 0.84982339656518058}
......10
10 {'val_acc': 0.41882418640994717, 'acc': 0.71219205313975642, 'val_loss': 2.6311618183551611, 'loss': 0.82816315963100651}
......11
11 {'val_acc': 0.61084138742671168, 'acc': 0.71920518391939658, 'val_loss': 1.2297421427606379, 'loss': 0.81535912826965451}
......12
12 {'val_acc': 0.66396983078768335, 'acc': 0.71866571236548504, 'val_loss': 0.98598118108710298, 'loss': 0.80329470641105816}
......13
13 {'val_acc': 0.68069039746524185, 'acc': 0.7303542503884447, 'val_loss': 0.96392727920538401, 'loss': 0.7874597999351185}
......14
14 {'val_acc': 0.69120821276400335, 'acc': 0.7278367175706062, 'val_loss': 0.92881643103421574, 'loss': 0.77066012991351462}
......15
15 {'val_acc': 0.70118660969255808, 'acc': 0.73520949248722234, 'val_loss': 0.86900980450703236, 'loss': 0.76282845489242657}
......16
16 {'val_acc': 0.71790726799537963, 'acc': 0.73988491746938134, 'val_loss': 0.84033114399596198, 'loss': 0.74681241423122058}
......17
17 {'val_acc': 0.71736790016373897, 'acc': 0.74447041647058232, 'val_loss': 0.82450463142981423, 'loss': 0.73372466586472596}
......18
18 {'val_acc': 0.7165588285159139, 'acc': 0.74330156868984532, 'val_loss': 0.81218666481225876, 'loss': 0.72905608661696542}
......19
19 {'val_acc': 0.73381879478853751, 'acc': 0.74581909639503952, 'val_loss': 0.7694707780336999, 'loss': 0.71886171329538073}
......20
20 {'val_acc': 0.73651563902631678, 'acc': 0.75139363520309621, 'val_loss': 0.7593699341967397, 'loss': 0.70450107716030619}
......21
21 {'val_acc': 0.74163973408609529, 'acc': 0.7538212590070964, 'val_loss': 0.74223658458571862, 'loss': 0.69435373269660594}
......22
22 {'val_acc': 0.7456850213397539, 'acc': 0.76335192058952861, 'val_loss': 0.72658181319087578, 'loss': 0.67393486607184061}
......23
23 {'val_acc': 0.74406688466684201, 'acc': 0.76308217950699853, 'val_loss': 0.7190839831497291, 'loss': 0.6684848498449204}
......24
24 {'val_acc': 0.74784250565459942, 'acc': 0.77090451602047461, 'val_loss': 0.71231908930672538, 'loss': 0.65141096064307413}
......25
25 {'val_acc': 0.75566343775073297, 'acc': 0.76730803723301821, 'val_loss': 0.6994797984116029, 'loss': 0.65086095735251182}
......26
26 {'val_acc': 0.7551240706263751, 'acc': 0.77764790377756654, 'val_loss': 0.69270061359950763, 'loss': 0.63328543652135072}
......27
27 {'val_acc': 0.75323626048613779, 'acc': 0.7772882496511051, 'val_loss': 0.70594515396681934, 'loss': 0.61847724833734552}
......28
28 {'val_acc': 0.75809062221675239, 'acc': 0.78232332148197803, 'val_loss': 0.67050066850717782, 'loss': 0.60715334654668962}
......29
29 {'val_acc': 0.76186622809437876, 'acc': 0.78951627138256486, 'val_loss': 0.66374586437383776, 'loss': 0.59130136456727078}
......30
30 {'val_acc': 0.7688781116901221, 'acc': 0.79113469061030905, 'val_loss': 0.6411825526493653, 'loss': 0.58741063924599757}
......31
31 {'val_acc': 0.77076592832450086, 'acc': 0.79590001593517579, 'val_loss': 0.62939092224341253, 'loss': 0.57405544111913454}
......32
32 {'val_acc': 0.77238406429013007, 'acc': 0.80066534475421891, 'val_loss': 0.62618418766071104, 'loss': 0.56226265688762245}
......33
33 {'val_acc': 0.78263213397872744, 'acc': 0.79751842880694901, 'val_loss': 0.61819955883417055, 'loss': 0.5546796339446961}
......34
34 {'val_acc': 0.78371088770986763, 'acc': 0.79913684276127384, 'val_loss': 0.60491779385518363, 'loss': 0.54901930773950613}
......35
35 {'val_acc': 0.78667746527681071, 'acc': 0.80947671441846647, 'val_loss': 0.59083299613693385, 'loss': 0.52431629969262961}
......36
36 {'val_acc': 0.78802592128097304, 'acc': 0.812084159433488, 'val_loss': 0.57851528697029286, 'loss': 0.51348755318276895}
......37
37 {'val_acc': 0.79234088849609852, 'acc': 0.82305340236563662, 'val_loss': 0.57279649463672078, 'loss': 0.4943956153477681}
......38
38 {'val_acc': 0.78829556806444989, 'acc': 0.82521128802392962, 'val_loss': 0.57628747200374331, 'loss': 0.48338930456999757}
......39
39 {'val_acc': 0.7993527324197105, 'acc': 0.82934723673611099, 'val_loss': 0.5524115675604202, 'loss': 0.47310240739563603}
......40
40 {'val_acc': 0.80852213421557195, 'acc': 0.83420248193248658, 'val_loss': 0.53576752186593113, 'loss': 0.45971888433984448}
......41
41 {'val_acc': 0.80070117685015529, 'acc': 0.83438230074260156, 'val_loss': 0.54893354073311518, 'loss': 0.45017901590843251}
......42
42 {'val_acc': 0.8015102765963944, 'acc': 0.83716957356041877, 'val_loss': 0.53846964574425993, 'loss': 0.45267162835227137}
......43
43 {'val_acc': 0.80825243477168074, 'acc': 0.8441827027859008, 'val_loss': 0.52727955193967113, 'loss': 0.4303149825869394}
......44
44 {'val_acc': 0.81984897416000613, 'acc': 0.84921776323390508, 'val_loss': 0.51892704080223651, 'loss': 0.41489192083507448}
......45
45 {'val_acc': 0.80393744736684825, 'acc': 0.83402265389388763, 'val_loss': 0.55556407516185258, 'loss': 0.45568862274447225}
......46
46 {'val_acc': 0.80312835270018512, 'acc': 0.84373313963488272, 'val_loss': 0.53423158803546822, 'loss': 0.43211117384138348}
......47
47 {'val_acc': 0.78991369824322044, 'acc': 0.84238446227210695, 'val_loss': 0.58198049577694499, 'loss': 0.43937599185215787}
......48
48 {'val_acc': 0.79638618873804001, 'acc': 0.85182520661974015, 'val_loss': 0.53873487313588464, 'loss': 0.4118951789296546}
......49
49 {'val_acc': 0.80204959543263132, 'acc': 0.85946772432310126, 'val_loss': 0.54166308177199052, 'loss': 0.3934172507006366}
......50
50 {'val_acc': 0.80771305903133317, 'acc': 0.8597374519219696, 'val_loss': 0.53348227129784986, 'loss': 0.38099677952646543}
......51
51 {'val_acc': 0.80987055774902705, 'acc': 0.87196547451089601, 'val_loss': 0.51670887156469025, 'loss': 0.36493872678702177}
......52
52 {'val_acc': 0.82092768892629764, 'acc': 0.8726847705342019, 'val_loss': 0.502562421907499, 'loss': 0.34770242304729915}
......53
53 {'val_acc': 0.82901834136331221, 'acc': 0.8796978938753196, 'val_loss': 0.48850013870251602, 'loss': 0.33832075142041534}
......54
54 {'val_acc': 0.827400218385966, 'acc': 0.88392375284466596, 'val_loss': 0.48627789837771379, 'loss': 0.33166902300758855}
......55
55 {'val_acc': 0.82928802781892053, 'acc': 0.88626146700244612, 'val_loss': 0.4769492436124026, 'loss': 0.31790780167938931}
......56
56 {'val_acc': 0.82874866931055224, 'acc': 0.88590181612363927, 'val_loss': 0.48265037364944285, 'loss': 0.31551378418206955}
......57
57 {'val_acc': 0.83306366970366774, 'acc': 0.8867110244191565, 'val_loss': 0.45566491041625845, 'loss': 0.30957865363465564}
......58
58 {'val_acc': 0.83683932174756692, 'acc': 0.89480309411575198, 'val_loss': 0.46510478751301892, 'loss': 0.29495946748654117}
......59
59 {'val_acc': 0.83683928927685969, 'acc': 0.8956122987670363, 'val_loss': 0.45536761771899598, 'loss': 0.2890417606061878}
......60
60 {'val_acc': 0.83845739643679207, 'acc': 0.89525264416896833, 'val_loss': 0.46851810890108253, 'loss': 0.28764614923740756}
......61
61 {'val_acc': 0.84412084055306968, 'acc': 0.90127675132643426, 'val_loss': 0.46091900474945446, 'loss': 0.27222128495683862}
......62
62 {'val_acc': 0.84142392995931059, 'acc': 0.90568242864500725, 'val_loss': 0.44824172062909845, 'loss': 0.264214736151408}
......63
63 {'val_acc': 0.83872706482454151, 'acc': 0.90891925647862848, 'val_loss': 0.45720918728952914, 'loss': 0.25413832791126839}
......64
64 {'val_acc': 0.84573895562170909, 'acc': 0.91098723125809322, 'val_loss': 0.44338655767574536, 'loss': 0.25009644442840651}
......65
65 {'val_acc': 0.84439054211880915, 'acc': 0.91548282147054083, 'val_loss': 0.45743334476616004, 'loss': 0.24079575418697119}
......66
66 {'val_acc': 0.84088458231737695, 'acc': 0.91053767345552283, 'val_loss': 0.45074453409821469, 'loss': 0.24339439583390635}
......67
67 {'val_acc': 0.83899675340199809, 'acc': 0.91710124053750997, 'val_loss': 0.45197918747492466, 'loss': 0.23328486259110945}
......68
68 {'val_acc': 0.84492992223144991, 'acc': 0.92087753604500655, 'val_loss': 0.45333293652585921, 'loss': 0.22741374746969847}
......69
69 {'val_acc': 0.84196334324994138, 'acc': 0.92411436659036572, 'val_loss': 0.45852577959578145, 'loss': 0.218147308743701}
......70
70 {'val_acc': 0.84924485485402001, 'acc': 0.92069771893910635, 'val_loss': 0.45042189161625751, 'loss': 0.22109745922512641}
......71
71 {'val_acc': 0.84600863699774143, 'acc': 0.92420427746919398, 'val_loss': 0.44646974706855536, 'loss': 0.21572749233357319}
......72
72 {'val_acc': 0.84519958271049245, 'acc': 0.92618234439240021, 'val_loss': 0.45793645908531633, 'loss': 0.20542135242774018}
......73
73 {'val_acc': 0.82524275130309976, 'acc': 0.92150691956029784, 'val_loss': 0.54317765836448217, 'loss': 0.2153709939681161}
......74
74 {'val_acc': 0.83522118578193771, 'acc': 0.92860995757453324, 'val_loss': 0.49842754009600726, 'loss': 0.20338715807158106}
......75
75 {'val_acc': 0.83603024148375216, 'acc': 0.92501348204544998, 'val_loss': 0.4902216209241359, 'loss': 0.21052370459307571}
......76
76 {'val_acc': 0.83980584086723697, 'acc': 0.92807049323405921, 'val_loss': 0.48402536040756694, 'loss': 0.20254013530482623}
......77
77 {'val_acc': 0.84385114548147178, 'acc': 0.93031828837779718, 'val_loss': 0.47090847045986911, 'loss': 0.18972403455758091}
......78
78 {'val_acc': 0.84492992364601538, 'acc': 0.9321165194380221, 'val_loss': 0.46247410298422575, 'loss': 0.19079632724045292}
......79
79 {'val_acc': 0.84061493045432412, 'acc': 0.93166696605140453, 'val_loss': 0.47045621102296031, 'loss': 0.18551987360287348}
......80
80 {'val_acc': 0.84546925547053509, 'acc': 0.93490379427088566, 'val_loss': 0.48002627938825077, 'loss': 0.18214985232540995}
......81
81 {'val_acc': 0.84466021629341725, 'acc': 0.93850027104329559, 'val_loss': 0.47390633548219097, 'loss': 0.17618309332513699}
......82
82 {'val_acc': 0.84223299215526648, 'acc': 0.93930947732376635, 'val_loss': 0.4720966952261158, 'loss': 0.17086528550322333}
......83
83 {'val_acc': 0.84115425141240918, 'acc': 0.94281604019477838, 'val_loss': 0.4694143904065623, 'loss': 0.16478477954339305}
......84
84 {'val_acc': 0.84061486409834418, 'acc': 0.94200683391430762, 'val_loss': 0.48459243311465366, 'loss': 0.16357148064220364}
......85
85 {'val_acc': 0.84358148924612564, 'acc': 0.94029851419379917, 'val_loss': 0.47290484914244701, 'loss': 0.1612867624146371}
Epoch 00085: early stopping

In [12]:
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()


# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
# plt.title('model loss (log scale)')
# plt.yscale('log')
plt.show()



In [60]:
history2 = model.fit(
        X,#_train,
        Y,#_train,
        initial_epoch=10000,    
        epochs=10010,
        batch_size=1024,
        #validation_data=(X_valid,Y_valid),
        validation_split=0.1,
        callbacks=[early_stop_cb, checkpoit_cb, batch_print_cb, plot_loss_cb],
        verbose=0
        )


..............10000
10000 {'acc': 0.92942234211274544, 'loss': 0.24218799137770838, 'val_acc': 0.82063384744981138, 'val_loss': 0.60090364376114414}
..............10001
10001 {'acc': 0.91623585825423015, 'loss': 0.2689954228043297, 'val_acc': 0.82130815604210539, 'val_loss': 0.57255609547537245}
..............10002
10002 {'acc': 0.91256462126767102, 'loss': 0.26002808122422494, 'val_acc': 0.82198247745562791, 'val_loss': 0.55839366890334308}
..............10003
10003 {'acc': 0.91676032069364077, 'loss': 0.2551032062205445, 'val_acc': 0.81726230352373441, 'val_loss': 0.5396233856557433}
..............10004
10004 {'acc': 0.90971753953538725, 'loss': 0.26793382654272591, 'val_acc': 0.82333110135226983, 'val_loss': 0.54280942650317177}
..............10005
10005 {'acc': 0.91331385330929871, 'loss': 0.25824233047893597, 'val_acc': 0.81928523625382199, 'val_loss': 0.55742760879802122}
..............10006
10006 {'acc': 0.90664568818302349, 'loss': 0.28339036189188821, 'val_acc': 0.80512474488343888, 'val_loss': 0.56352438925892934}
..............10007
10007 {'acc': 0.90484753129830064, 'loss': 0.28393372885518336, 'val_acc': 0.8118678209995468, 'val_loss': 0.54795667181445873}
..............10008
10008 {'acc': 0.91016707875321867, 'loss': 0.27542920117231934, 'val_acc': 0.83074846749199616, 'val_loss': 0.53539828758181873}
..............10009
10009 {'acc': 0.90387352964552437, 'loss': 0.29051856199788667, 'val_acc': 0.82467970069163732, 'val_loss': 0.53889474190334496}
Epoch 01365: early stopping

In [15]:
score=model.evaluate(X_test,Y_test, verbose=0)
print("OOS %s: %.2f%%" % (model.metrics_names[1], score[1]*100))
print("OOS %s: %.2f" % (model.metrics_names[0], score[0]))



NameErrorTraceback (most recent call last)
<ipython-input-15-e678b5269ea6> in <module>()
----> 1 score=model.evaluate(X_test,Y_test, verbose=0)
      2 print("OOS %s: %.2f%%" % (model.metrics_names[1], score[1]*100))
      3 print("OOS %s: %.2f" % (model.metrics_names[0], score[0]))

NameError: name 'X_test' is not defined

In [50]:
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history2.history['acc'])
plt.plot(history2.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()


# summarize history for loss
plt.plot(history2.history['loss'])
plt.plot(history2.history['val_loss'])
plt.title('model loss (log scale)')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
plt.yscale('log')
plt.show()



In [15]:
history3 = model.fit(
        X,#_train,
        Y,#_train,
        initial_epoch=60,    
        epochs=90,
        batch_size=1024,
        #validation_data=(X_valid,Y_valid),
        validation_split=0.3,
        callbacks=[early_stop_cb, checkpoit_cb, batch_print_cb, plot_loss_cb],
        verbose=0
        )


................60
60 {'acc': 0.69396716357036081, 'loss': 1.0682002606852541, 'val_acc': 0.61906176239181299, 'val_loss': 1.4453319599962575}
................61
61 {'acc': 0.7037037048871484, 'loss': 1.0332637657505632, 'val_acc': 0.61564727237275552, 'val_loss': 1.4414969362725554}
................62
62 {'acc': 0.70701285371711209, 'loss': 1.0041220267947522, 'val_acc': 0.62232779267281646, 'val_loss': 1.4268438312050282}
................63
63 {'acc': 0.71197658232548955, 'loss': 0.99611397675313584, 'val_acc': 0.62262470591946051, 'val_loss': 1.4255109716764256}
................64
64 {'acc': 0.71834033401439978, 'loss': 0.97293814648437305, 'val_acc': 0.62069477901889136, 'val_loss': 1.4260232516535671}
................65
65 {'acc': 0.72184039811467771, 'loss': 0.97190722480626812, 'val_acc': 0.61935867436424852, 'val_loss': 1.4186554088162131}
................66
66 {'acc': 0.72578592269766862, 'loss': 0.94906096593772571, 'val_acc': 0.62024940674208684, 'val_loss': 1.4094102914995932}
................67
67 {'acc': 0.69625811521767567, 'loss': 1.0645167365607617, 'val_acc': 0.60243468216649143, 'val_loss': 1.4889457426841355}
................68
68 {'acc': 0.69625811337423305, 'loss': 1.0706597206716062, 'val_acc': 0.60599762640203259, 'val_loss': 1.4689711890141357}
................69
69 {'acc': 0.69950362780902231, 'loss': 1.0387265352221471, 'val_acc': 0.60881829502463625, 'val_loss': 1.4538306377279504}
................70
70 {'acc': 0.71000381935123758, 'loss': 1.0077977372259679, 'val_acc': 0.61624109844130748, 'val_loss': 1.4287678232668697}
................71
71 {'acc': 0.70134911596949534, 'loss': 1.0318226642518822, 'val_acc': 0.60213777005247715, 'val_loss': 1.4894028610401653}
................72
72 {'acc': 0.68760341133524538, 'loss': 1.099111608152886, 'val_acc': 0.60733373091509679, 'val_loss': 1.479072934658114}
................73
73 {'acc': 0.69142166257617699, 'loss': 1.0872183346605733, 'val_acc': 0.60213776608827274, 'val_loss': 1.4834875916075536}
................74
74 {'acc': 0.69994909045379761, 'loss': 1.058706705785972, 'val_acc': 0.60881829021095946, 'val_loss': 1.4884105396950329}
................75
75 {'acc': 0.70306732959687834, 'loss': 1.037833027186035, 'val_acc': 0.61564726826697236, 'val_loss': 1.4449179574599458}
................76
76 {'acc': 0.70364006627405573, 'loss': 1.0262630202470731, 'val_acc': 0.61861639280500436, 'val_loss': 1.4351270128032658}
................77
77 {'acc': 0.70949471811612808, 'loss': 1.0033399172831814, 'val_acc': 0.61831947729310066, 'val_loss': 1.4346362315560746}
................78
78 {'acc': 0.71541300888232673, 'loss': 0.98309921032602254, 'val_acc': 0.62054632282030553, 'val_loss': 1.4234220100978208}
................79
79 {'acc': 0.71961308407149305, 'loss': 0.97791410750281926, 'val_acc': 0.62277315843699943, 'val_loss': 1.434774916981858}
................80
80 {'acc': 0.71840397145163393, 'loss': 0.98115001725258377, 'val_acc': 0.61460808280528012, 'val_loss': 1.4496738825727811}
................81
81 {'acc': 0.7203767358520885, 'loss': 0.95888457092541513, 'val_acc': 0.61980404253526988, 'val_loss': 1.4341611703614441}
................82
82 {'acc': 0.72572228624663826, 'loss': 0.95314105417007133, 'val_acc': 0.6193586749305634, 'val_loss': 1.4350798064909185}
................83
83 {'acc': 0.72871324873249754, 'loss': 0.93137366312430436, 'val_acc': 0.6227731577291058, 'val_loss': 1.4264792727744495}
................84
84 {'acc': 0.73036782408057999, 'loss': 0.93253052497890232, 'val_acc': 0.62470309694702442, 'val_loss': 1.43061292313057}
................85
85 {'acc': 0.7329133257271756, 'loss': 0.90921011269358076, 'val_acc': 0.62425771943180697, 'val_loss': 1.4309742467703559}
................86
86 {'acc': 0.73475881330351256, 'loss': 0.90705162541654194, 'val_acc': 0.62381236003866669, 'val_loss': 1.4259913146071084}
................87
87 {'acc': 0.73711340282806026, 'loss': 0.91535873602437079, 'val_acc': 0.62841448846169157, 'val_loss': 1.4194937236518588}
Epoch 00087: early stopping

In [16]:
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history3.history['acc'])
plt.plot(history3.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()


# summarize history for loss
plt.plot(history3.history['loss'])
plt.plot(history3.history['val_loss'])
plt.title('model loss (log scale)')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
plt.yscale('log')
plt.show()



In [ ]: