Created by Peter Nagy February 2017 Github

As an improvement to my previous Kernel, here I am trying to achieve better results with a Recurrent Neural Network.


In [32]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in 

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

import os
os.environ['KERAS_BACKEND']='theano'

from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Input, Dense, Embedding, LSTM, Conv2D, MaxPool2D
from keras.layers import Reshape, Flatten, Dropout, Concatenate, Convolution1D
from keras.optimizers import Adam, SGD
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.callbacks import TensorBoard

seed = 42
np.random.seed(seed)

import matplotlib.pyplot as plt
%matplotlib inline

Only keeping the necessary columns.


In [2]:
MAX_SEQUENCE_LENGTH = 1000 # top 30
MAX_NB_WORDS = 20000 # more than vocab size
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2


data_train = pd.read_csv('../result_all_windows_labels.csv')

# Cleanup - remove no labels
data_train = data_train[data_train['label'].notnull()]
data_train = data_train[data_train.label != 'environmental']
data_train = data_train[data_train.label != 'religious']
data_train = data_train[data_train.label != 'economical']


label_cat = {'violence/terrorism' : 1, 'misc': 2, 'political': 3, 
#              'religious': 4, 'economical': 5, 'environmental': 6
            }
print(label_cat) 


def to_category(x):
    return label_cat[x]

data_train['target'] = data_train.apply(lambda row: to_category(row['label']), axis=1)

data_train['target'].plot.hist(alpha=0.5)

texts = []
# Get corpus by joining all keywords
for index, row in data_train.iloc[ :, 2:32].iterrows():
    texts.append(u' '.join(row.tolist()))
    
data_train['topicFlat'] = texts

labels = data_train['target']

# print(labels)
data_train['topicFlat'].head()


{'misc': 2, 'violence/terrorism': 1, 'political': 3}
Out[2]:
0    syrian assad say syria killed damascus people ...
1    use osc copyrighted_material dissemination usa...
2    will year can people one country party make sa...
3    quot apos say the we it reuters terrorists ass...
4    baghdad iraq sunni killed bomb iraqi attacks w...
Name: topicFlat, dtype: object

Next, I am dropping the 'Neutral' sentiments as my goal was to only differentiate positive and negative tweets. After that, I am filtering the tweets so only valid texts and words remain. Then, I define the number of max features as 2000 and use Tokenizer to vectorize and convert text into Sequences so the Network can deal with it as input.


In [4]:
max_fatures = 2000
tokenizer = Tokenizer(num_words=max_fatures, split=' ')
tokenizer.fit_on_texts(data_train['topicFlat'].values)
X = tokenizer.texts_to_sequences(data_train['topicFlat'].values)
print(X[0])
X = pad_sequences(X)
print(X[0])

word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index)) # all the tokens in corpus

print(X.shape)


[4, 17, 1, 2, 16, 42, 7, 47, 178, 56, 30, 1014, 395, 8, 160, 22, 38, 12, 201, 142, 10, 3, 17, 1640, 202, 433, 396, 307, 82, 145, 649]
[   0    0    0    0    0    0    4   17    1    2   16   42    7   47
  178   56   30 1014  395    8  160   22   38   12  201  142   10    3
   17 1640  202  433  396  307   82  145  649]
Found 6077 unique tokens.
(1449, 37)

Next, I compose the LSTM Network. Note that embed_dim, lstm_out, batch_size, droupout_x variables are hyperparameters, their values are somehow intuitive, can be and must be played with in order to achieve good results. Please also note that I am using softmax as activation function. The reason is that our Network is using categorical crossentropy, and softmax is just the right activation method for that.


In [5]:
# Y = data_train['target'].values

Y = pd.get_dummies(data_train['label']).values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)

print(Y_train[100])
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)


[0 0 1]
(1159, 37) (1159, 3)
(290, 37) (290, 3)

In [6]:
def plot_history(history):
    # list all data in history
    print(history.history.keys())
    # summarize history for accuracy
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    # summarize history for loss
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

In [19]:
embed_dim = 128
lstm_out = 196
epochs = 100

sequence_length = X.shape[1] # 

model = Sequential()
model.add(Embedding(max_fatures, embed_dim, input_length=sequence_length))
model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(len(label_cat), activation='softmax'))

adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

model.compile(loss = 'categorical_crossentropy',
              optimizer=adam, metrics = ['accuracy'])
print(model.summary())


batch_size = 32

model_name = 'topicConvNet-Reg.h5'
early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
# checkpoint = ModelCheckpoint(model_name, verbose=0, save_best_only=True)

checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', 
                             monitor='val_acc', verbose=0, save_best_only=True, mode='max')

network_hist = model.fit(X_train, Y_train, epochs = epochs, 
                         validation_data=(X_test, Y_test), 
                         callbacks=[early_stop, checkpoint],
                         validation_split=0.2,
                         verbose=1, batch_size=batch_size)

score, acc = model.evaluate(X_test, Y_test)
print('Test score:', score)
print('Test accuracy:', acc)

# print(network_hist.history)
plot_history(network_hist)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_7 (Embedding)      (None, 37, 128)           256000    
_________________________________________________________________
lstm_6 (LSTM)                (None, 196)               254800    
_________________________________________________________________
dense_7 (Dense)              (None, 3)                 591       
=================================================================
Total params: 511,391
Trainable params: 511,391
Non-trainable params: 0
_________________________________________________________________
None
Train on 1159 samples, validate on 290 samples
Epoch 1/100
1159/1159 [==============================] - 18s 16ms/step - loss: 1.0961 - acc: 0.3814 - val_loss: 1.0911 - val_acc: 0.4586
Epoch 2/100
1159/1159 [==============================] - 20s 17ms/step - loss: 1.0819 - acc: 0.4487 - val_loss: 1.0652 - val_acc: 0.4103
Epoch 3/100
1159/1159 [==============================] - 19s 17ms/step - loss: 1.0266 - acc: 0.3943 - val_loss: 0.9441 - val_acc: 0.4517
Epoch 4/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.8216 - acc: 0.5910 - val_loss: 0.7517 - val_acc: 0.6310
Epoch 5/100
1159/1159 [==============================] - 18s 16ms/step - loss: 0.6576 - acc: 0.7291 - val_loss: 0.6532 - val_acc: 0.7517
Epoch 6/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.5854 - acc: 0.7610 - val_loss: 0.6291 - val_acc: 0.7138
Epoch 7/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.5305 - acc: 0.7739 - val_loss: 0.5890 - val_acc: 0.7552
Epoch 8/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.4701 - acc: 0.8162 - val_loss: 0.5635 - val_acc: 0.7931
Epoch 9/100
1159/1159 [==============================] - 20s 17ms/step - loss: 0.4092 - acc: 0.8525 - val_loss: 0.5374 - val_acc: 0.7931
Epoch 10/100
1159/1159 [==============================] - 21s 19ms/step - loss: 0.3621 - acc: 0.8783 - val_loss: 0.5298 - val_acc: 0.8000
Epoch 11/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.3294 - acc: 0.8878 - val_loss: 0.5235 - val_acc: 0.8103
Epoch 12/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.2919 - acc: 0.9120 - val_loss: 0.5394 - val_acc: 0.8138
Epoch 13/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.2722 - acc: 0.9120 - val_loss: 0.5483 - val_acc: 0.8069
Epoch 14/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.3180 - acc: 0.8835 - val_loss: 0.5325 - val_acc: 0.8345
Epoch 15/100
1159/1159 [==============================] - 19s 16ms/step - loss: 0.2283 - acc: 0.9223 - val_loss: 0.5652 - val_acc: 0.8241
Epoch 16/100
1159/1159 [==============================] - 19s 17ms/step - loss: 0.2085 - acc: 0.9362 - val_loss: 0.5680 - val_acc: 0.8241
Epoch 00016: early stopping
290/290 [==============================] - 2s 5ms/step
Test score: 0.568043890904
Test accuracy: 0.824137931034
dict_keys(['val_loss', 'loss', 'val_acc', 'acc'])

CNN-text-classification


In [49]:
%%time 

# Source
# https://github.com/bhaveshoswal/CNN-text-classification-keras/blob/master/model.py

sequence_length = X.shape[1] # 
print(sequence_length)
print(len(X))

vocabulary_size = len(word_index) # 
embedding_dim = 256
filter_sizes = [3,4,5]
num_filters = 128 # 512
drop = 0.2

epochs = 20
batch_size = 30

# this returns a tensor
print("Creating Model...")
inputs = Input(shape=(sequence_length,), dtype='int32')
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, 
                      input_length=sequence_length)(inputs)
reshape = Reshape((sequence_length,embedding_dim,1))(embedding)

conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), 
                padding='valid', kernel_initializer='normal', activation='relu')(reshape)
conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), 
                padding='valid', kernel_initializer='normal', activation='relu')(reshape)
conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), 
                padding='valid', kernel_initializer='normal', activation='relu')(reshape)

maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), 
                      strides=(1,1), padding='valid')(conv_0)
maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), 
                      strides=(1,1), padding='valid')(conv_1)
maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), 
                      strides=(1,1), padding='valid')(conv_2)

concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
output = Dense(units=len(label_cat), activation='softmax')(dropout)

# this creates a model that includes
model = Model(inputs=inputs, outputs=output)
print(model.summary())


# early_stop = EarlyStopping(monitor='val_loss', patience=4, verbose=1)
checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', 
                             monitor='val_acc', verbose=0, 
                             save_best_only=True, mode='auto')

adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

model.compile(optimizer='Adagrad', loss='categorical_crossentropy', 
              metrics=['accuracy'])
print("Traning Model...")
network_hist = model.fit(X_train, Y_train, batch_size=batch_size, 
                         epochs=epochs, verbose=1, 
                         callbacks=[checkpoint], 
                         validation_data=(X_test, Y_test))  # starts training

score, acc = model.evaluate(X_test, Y_test)
print('Test score:', score)
print('Test accuracy:', acc)

# print(network_hist.history)
plot_history(network_hist)


37
1449
Creating Model...
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_2 (InputLayer)            (None, 37)           0                                            
__________________________________________________________________________________________________
embedding_37 (Embedding)        (None, 37, 256)      1555712     input_2[0][0]                    
__________________________________________________________________________________________________
reshape_2 (Reshape)             (None, 37, 256, 1)   0           embedding_37[0][0]               
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 35, 1, 128)   98432       reshape_2[0][0]                  
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 34, 1, 128)   131200      reshape_2[0][0]                  
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 33, 1, 128)   163968      reshape_2[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)  (None, 1, 1, 128)    0           conv2d_4[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_5 (MaxPooling2D)  (None, 1, 1, 128)    0           conv2d_5[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_6 (MaxPooling2D)  (None, 1, 1, 128)    0           conv2d_6[0][0]                   
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 3, 1, 128)    0           max_pooling2d_4[0][0]            
                                                                 max_pooling2d_5[0][0]            
                                                                 max_pooling2d_6[0][0]            
__________________________________________________________________________________________________
flatten_36 (Flatten)            (None, 384)          0           concatenate_2[0][0]              
__________________________________________________________________________________________________
dropout_70 (Dropout)            (None, 384)          0           flatten_36[0][0]                 
__________________________________________________________________________________________________
dense_70 (Dense)                (None, 3)            1155        dropout_70[0][0]                 
==================================================================================================
Total params: 1,950,467
Trainable params: 1,950,467
Non-trainable params: 0
__________________________________________________________________________________________________
None
Traning Model...
Train on 1159 samples, validate on 290 samples
Epoch 1/20
1159/1159 [==============================] - 43s 37ms/step - loss: 0.6355 - acc: 0.7446 - val_loss: 0.4383 - val_acc: 0.8310
Epoch 2/20
1159/1159 [==============================] - 43s 37ms/step - loss: 0.2295 - acc: 0.9206 - val_loss: 0.4346 - val_acc: 0.8552
Epoch 3/20
1159/1159 [==============================] - 50s 44ms/step - loss: 0.1055 - acc: 0.9810 - val_loss: 0.4339 - val_acc: 0.8517
Epoch 4/20
1159/1159 [==============================] - 43s 37ms/step - loss: 0.0539 - acc: 0.9965 - val_loss: 0.4458 - val_acc: 0.8483
Epoch 5/20
1159/1159 [==============================] - 40s 35ms/step - loss: 0.0331 - acc: 0.9991 - val_loss: 0.4593 - val_acc: 0.8552
Epoch 6/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0223 - acc: 1.0000 - val_loss: 0.4701 - val_acc: 0.8517
Epoch 7/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0158 - acc: 1.0000 - val_loss: 0.4845 - val_acc: 0.8483
Epoch 8/20
1159/1159 [==============================] - 43s 37ms/step - loss: 0.0128 - acc: 1.0000 - val_loss: 0.4963 - val_acc: 0.8483
Epoch 9/20
1159/1159 [==============================] - 44s 38ms/step - loss: 0.0100 - acc: 1.0000 - val_loss: 0.5061 - val_acc: 0.8517
Epoch 10/20
1159/1159 [==============================] - 45s 39ms/step - loss: 0.0088 - acc: 1.0000 - val_loss: 0.5139 - val_acc: 0.8517
Epoch 11/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0072 - acc: 1.0000 - val_loss: 0.5216 - val_acc: 0.8517
Epoch 12/20
1159/1159 [==============================] - 41s 36ms/step - loss: 0.0064 - acc: 1.0000 - val_loss: 0.5301 - val_acc: 0.8517
Epoch 13/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0060 - acc: 1.0000 - val_loss: 0.5375 - val_acc: 0.8552
Epoch 14/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0046 - acc: 1.0000 - val_loss: 0.5460 - val_acc: 0.8552
Epoch 15/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0044 - acc: 1.0000 - val_loss: 0.5510 - val_acc: 0.8552
Epoch 16/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0041 - acc: 1.0000 - val_loss: 0.5555 - val_acc: 0.8552
Epoch 17/20
1159/1159 [==============================] - 41s 36ms/step - loss: 0.0035 - acc: 1.0000 - val_loss: 0.5620 - val_acc: 0.8517
Epoch 18/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0031 - acc: 1.0000 - val_loss: 0.5681 - val_acc: 0.8552
Epoch 19/20
1159/1159 [==============================] - 41s 35ms/step - loss: 0.0031 - acc: 1.0000 - val_loss: 0.5718 - val_acc: 0.8586
Epoch 20/20
1159/1159 [==============================] - 42s 36ms/step - loss: 0.0028 - acc: 1.0000 - val_loss: 0.5778 - val_acc: 0.8621
290/290 [==============================] - 1s 3ms/step
Test score: 0.5778315833930312
Test accuracy: 0.8620689655172413
dict_keys(['acc', 'val_acc', 'val_loss', 'loss'])
CPU times: user 19min 34s, sys: 34min 16s, total: 53min 50s
Wall time: 14min 7s

In [ ]:


In [47]:
%%time


embed_dim = 128
sequence_length = X.shape[1] # 
print(sequence_length)
print(len(X))
epochs = 30
batch_size = 30

# https://github.com/Theo-/sentiment-analysis-keras-conv/blob/master/train_keras.py
# Using keras to load the dataset with the top_words
# top_words = 10000
# (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)

# Pad the sequence to the same length
# max_review_length = 1600
# X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
# X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

# Using embedding from Keras
# embedding_vecor_length = 300
model = Sequential()
# model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
model.add(Embedding(max_fatures, embed_dim, input_length=sequence_length))
# Convolutional model (3x conv, flatten, 2x dense)
model.add(Convolution1D(64, 3, padding='same', kernel_initializer='normal', activation='relu'))
model.add(Convolution1D(32, 3, padding='same', kernel_initializer='normal', activation='relu'))
model.add(Convolution1D(16, 3, padding='same', kernel_initializer='normal', activation='relu'))
model.add(Flatten())
model.add(Dropout(0.1))
model.add(Dense(180, activation='sigmoid'))
model.add(Dropout(0.1))
model.add(Dense(units=len(label_cat), activation='sigmoid'))

print(model.summary())


adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
sgd = SGD(lr=0.01, clipnorm=1.)
# Adagrad

# Log to tensorboard
# tensorBoardCallback = TensorBoard(log_dir='./logs', write_graph=True)
model.compile(loss='categorical_crossentropy', 
              optimizer=sgd, metrics=['accuracy'])

# early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', 
                             monitor='val_acc', verbose=0, 
                             save_best_only=True, mode='auto')

print("Traning Model...")
network_hist = model.fit(X_train, Y_train, batch_size=batch_size, 
                         epochs=epochs, verbose=1, 
                         callbacks=[checkpoint], 
                         validation_data=(X_test, Y_test))  # starts training

# Evaluation on the test set
# scores = model.evaluate(X_test, Y_test, verbose=0)
# print("Accuracy: %.2f%%" % (scores[1]*100))

score, acc = model.evaluate(X_test, Y_test)
print('Test score:', score)
print('Test accuracy:', acc)

# print(network_hist.history)
plot_history(network_hist)


37
1449
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_35 (Embedding)     (None, 37, 128)           256000    
_________________________________________________________________
conv1d_100 (Conv1D)          (None, 37, 64)            24640     
_________________________________________________________________
conv1d_101 (Conv1D)          (None, 37, 32)            6176      
_________________________________________________________________
conv1d_102 (Conv1D)          (None, 37, 16)            1552      
_________________________________________________________________
flatten_34 (Flatten)         (None, 592)               0         
_________________________________________________________________
dropout_67 (Dropout)         (None, 592)               0         
_________________________________________________________________
dense_67 (Dense)             (None, 180)               106740    
_________________________________________________________________
dropout_68 (Dropout)         (None, 180)               0         
_________________________________________________________________
dense_68 (Dense)             (None, 3)                 543       
=================================================================
Total params: 395,651
Trainable params: 395,651
Non-trainable params: 0
_________________________________________________________________
None
Traning Model...
Train on 1159 samples, validate on 290 samples
Epoch 1/30
1159/1159 [==============================] - 4s 3ms/step - loss: 1.1144 - acc: 0.3322 - val_loss: 1.0831 - val_acc: 0.4103
Epoch 2/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0817 - acc: 0.3710 - val_loss: 1.0811 - val_acc: 0.4103
Epoch 3/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0892 - acc: 0.3770 - val_loss: 1.0812 - val_acc: 0.4103
Epoch 4/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0832 - acc: 0.3891 - val_loss: 1.0816 - val_acc: 0.4103
Epoch 5/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0852 - acc: 0.3770 - val_loss: 1.0822 - val_acc: 0.4103
Epoch 6/30
1159/1159 [==============================] - 6s 5ms/step - loss: 1.0831 - acc: 0.3710 - val_loss: 1.0816 - val_acc: 0.4103
Epoch 7/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0886 - acc: 0.3900 - val_loss: 1.0817 - val_acc: 0.4103
Epoch 8/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0842 - acc: 0.3831 - val_loss: 1.0818 - val_acc: 0.4103
Epoch 9/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0861 - acc: 0.3900 - val_loss: 1.0823 - val_acc: 0.4103
Epoch 10/30
1159/1159 [==============================] - 7s 6ms/step - loss: 1.0832 - acc: 0.3943 - val_loss: 1.0811 - val_acc: 0.4103
Epoch 11/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0831 - acc: 0.3727 - val_loss: 1.0818 - val_acc: 0.4103
Epoch 12/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0852 - acc: 0.3788 - val_loss: 1.0824 - val_acc: 0.4103
Epoch 13/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0816 - acc: 0.3874 - val_loss: 1.0828 - val_acc: 0.4103
Epoch 14/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0823 - acc: 0.3934 - val_loss: 1.0824 - val_acc: 0.4103
Epoch 15/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0833 - acc: 0.3796 - val_loss: 1.0827 - val_acc: 0.4103
Epoch 16/30
1159/1159 [==============================] - 5s 5ms/step - loss: 1.0801 - acc: 0.4047 - val_loss: 1.0814 - val_acc: 0.4103
Epoch 17/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0828 - acc: 0.3978 - val_loss: 1.0819 - val_acc: 0.4103
Epoch 18/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0827 - acc: 0.3883 - val_loss: 1.0824 - val_acc: 0.4103
Epoch 19/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0848 - acc: 0.3753 - val_loss: 1.0827 - val_acc: 0.4103
Epoch 20/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0813 - acc: 0.4047 - val_loss: 1.0827 - val_acc: 0.4103
Epoch 21/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0832 - acc: 0.3667 - val_loss: 1.0822 - val_acc: 0.4103
Epoch 22/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0799 - acc: 0.4012 - val_loss: 1.0822 - val_acc: 0.4103
Epoch 23/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0850 - acc: 0.3840 - val_loss: 1.0819 - val_acc: 0.4103
Epoch 24/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0821 - acc: 0.3917 - val_loss: 1.0827 - val_acc: 0.4103
Epoch 25/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0828 - acc: 0.3883 - val_loss: 1.0835 - val_acc: 0.4103
Epoch 26/30
1159/1159 [==============================] - 7s 6ms/step - loss: 1.0810 - acc: 0.3779 - val_loss: 1.0814 - val_acc: 0.4103
Epoch 27/30
1159/1159 [==============================] - 7s 6ms/step - loss: 1.0808 - acc: 0.3891 - val_loss: 1.0820 - val_acc: 0.4103
Epoch 28/30
1159/1159 [==============================] - 5s 4ms/step - loss: 1.0819 - acc: 0.4021 - val_loss: 1.0826 - val_acc: 0.4103
Epoch 29/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0794 - acc: 0.4185 - val_loss: 1.0824 - val_acc: 0.4103
Epoch 30/30
1159/1159 [==============================] - 4s 4ms/step - loss: 1.0803 - acc: 0.3762 - val_loss: 1.0820 - val_acc: 0.4103
290/290 [==============================] - 0s 390us/step
Test score: 1.0820397779859345
Test accuracy: 0.4103448275862069
dict_keys(['acc', 'val_acc', 'val_loss', 'loss'])
CPU times: user 3min 6s, sys: 5min 48s, total: 8min 55s
Wall time: 2min 27s

In [ ]: