In [31]:
import re
import tarfile
from functools import reduce
import numpy as np

from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from keras import layers
from keras.layers import Input, Embedding, Dropout, RepeatVector, Dense, recurrent, Merge
from keras.models import Model, Sequential
from keras.preprocessing.sequence import pad_sequences

In [2]:
RNN = recurrent.LSTM
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
EPOCHS = 40
print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN,
                                                           EMBED_HIDDEN_SIZE,
                                                           SENT_HIDDEN_SIZE,
                                                           QUERY_HIDDEN_SIZE))


RNN / Embed / Sent / Query = <class 'keras.layers.recurrent.LSTM'>, 50, 100, 100

In [3]:
try:
    path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
    print('Error downloading dataset.')

In [4]:
path


Out[4]:
'/Users/koichiro.mori/.keras/datasets/babi-tasks-v1-2.tar.gz'

In [5]:
tar = tarfile.open(path)

In [6]:
challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt'

In [7]:
def tokenize(sent):
    return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]

def parse_stories(lines, only_supporting=False):
    data = []
    story = []
    for line in lines:
        line = line.decode('utf-8').strip()
        # 最初の空白のみsplitする
        nid, line = line.split(' ', 1)
        nid = int(nid)
        if nid == 1:
            story = []
        if '\t' in line:  # 質問、回答、根拠が書いてある行
            q, a, supporting = line.split('\t')
            q = tokenize(q)  # 単語に分割
            substory = None
            if only_supporting:
                # 根拠となる文のみを取得
                supporting = map(int, supporting.split())
                substory = [story[i - 1] for i in supporting]
            else:
                # 全文を取得
                substory = [x for x in story if x]
            data.append((substory, q, a))
            story.append('')  # 質問は空の文章を追加しておく
        else:
            sent = tokenize(line)
            story.append(sent)
    return data

def get_stories(f, only_supporting=False, max_length=None):
    # max_lengthが指定されたらmax_lengthより長いストーリーは無視される
    # data[storyID] = (substory, quetion, answer)
    data = parse_stories(f.readlines(), only_supporting=only_supporting)
    # 単語を結合して1つのlistにする関数
    flatten = lambda data: reduce(lambda x, y: x + y, data)
    # substoryを単語リストにする
    data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
    return data

In [8]:
# train[storyID] = ([story word list], [question word list], answer word)
train = get_stories(tar.extractfile(challenge.format('train')))
test = get_stories(tar.extractfile(challenge.format('test')))
print(len(train), len(test))
print(train[0])


/Users/koichiro.mori/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/re.py:203: FutureWarning: split() requires a non-empty pattern match.
  return _compile(pattern, flags).split(string, maxsplit)
1000 1000
(['Mary', 'moved', 'to', 'the', 'bathroom', '.', 'Sandra', 'journeyed', 'to', 'the', 'bedroom', '.', 'Mary', 'got', 'the', 'football', 'there', '.', 'John', 'went', 'to', 'the', 'kitchen', '.', 'Mary', 'went', 'back', 'to', 'the', 'kitchen', '.', 'Mary', 'went', 'back', 'to', 'the', 'garden', '.'], ['Where', 'is', 'the', 'football', '?'], 'garden')

In [9]:
# 訓練データとテストデータに出現する語彙集合
vocab = set()
for story, q, answer in train + test:
    vocab |= set(story + q + [answer])
vocab = sorted(vocab)
print(len(vocab), vocab[:100])


35 ['.', '?', 'Daniel', 'John', 'Mary', 'Sandra', 'Where', 'apple', 'back', 'bathroom', 'bedroom', 'discarded', 'down', 'dropped', 'football', 'garden', 'got', 'grabbed', 'hallway', 'is', 'journeyed', 'kitchen', 'left', 'milk', 'moved', 'office', 'picked', 'put', 'the', 'there', 'to', 'took', 'travelled', 'up', 'went']

In [10]:
# 0はpad用にあけておく
# 単語 => 単語IDへの写像
vocab_size = len(vocab) + 1
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
print(word_idx)

# ストーリーと質問の最大長を計算
story_maxlen = max(map(len, (x for x, _, _ in train + test)))
query_maxlen = max(map(len, (x for _, x, _ in train + test)))
print('story_maxlen', story_maxlen)
print('query_maxlen', query_maxlen)


{'to': 31, 'down': 13, 'bathroom': 10, 'garden': 16, 'John': 4, 'grabbed': 18, 'Where': 7, 'Daniel': 3, 'apple': 8, 'kitchen': 22, 'back': 9, 'took': 32, '.': 1, 'hallway': 19, 'discarded': 12, 'bedroom': 11, 'there': 30, 'put': 28, 'journeyed': 21, 'office': 26, 'dropped': 14, 'picked': 27, 'went': 35, 'is': 20, 'Sandra': 6, 'moved': 25, 'football': 15, '?': 2, 'milk': 24, 'left': 23, 'Mary': 5, 'travelled': 33, 'up': 34, 'the': 29, 'got': 17}
story_maxlen 552
query_maxlen 5

In [11]:
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
    xs = []
    xqs = []
    ys = []
    for story, query, answer in data:
        # storyとqueryは単語ID系列で作成
        x = [word_idx[w] for w in story]
        xq = [word_idx[w] for w in query]
        # 回答はvocabの長さのベクトル(one-hot-encoding)
        y = np.zeros(len(word_idx) + 1)
        y[word_idx[answer]] = 1
        xs.append(x)
        xqs.append(xq)
        ys.append(y)
    xs = pad_sequences(xs, maxlen=story_maxlen)
    xqs = pad_sequences(xqs, maxlen=query_maxlen)
    ys = np.array(ys)
    return xs, xqs, ys

In [12]:
idx_word = {y:x for x,y in word_idx.items()}
print(idx_word)


{1: '.', 2: '?', 3: 'Daniel', 4: 'John', 5: 'Mary', 6: 'Sandra', 7: 'Where', 8: 'apple', 9: 'back', 10: 'bathroom', 11: 'bedroom', 12: 'discarded', 13: 'down', 14: 'dropped', 15: 'football', 16: 'garden', 17: 'got', 18: 'grabbed', 19: 'hallway', 20: 'is', 21: 'journeyed', 22: 'kitchen', 23: 'left', 24: 'milk', 25: 'moved', 26: 'office', 27: 'picked', 28: 'put', 29: 'the', 30: 'there', 31: 'to', 32: 'took', 33: 'travelled', 34: 'up', 35: 'went'}

In [15]:
# 文章をベクトル化
x, xq, y = vectorize_stories(train, word_idx, story_maxlen, query_maxlen)
tx, txq, ty = vectorize_stories(test, word_idx, story_maxlen, query_maxlen)
print(len(x))
print(len(xq))
print(len(y))
print(x[0])
print(xq[0], [idx_word[i] for i in xq[0]])
print(y[0], idx_word[np.argmax(y[0])])


1000
1000
1000
[ 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0  5 25 31 29 10  1  6 21 31 29 11
  1  5 17 29 15 30  1  4 35 31 29 22  1  5 35  9 31 29 22  1  5 35  9 31 29
 16  1]
[ 7 20 29 15  2] ['Where', 'is', 'the', 'football', '?']
[ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.  0.
  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.] garden

In [16]:
pad_sequences([[1,2,3,4,5], [1,2,3], [8]], 10)


Out[16]:
array([[0, 0, 0, 0, 0, 1, 2, 3, 4, 5],
       [0, 0, 0, 0, 0, 0, 0, 1, 2, 3],
       [0, 0, 0, 0, 0, 0, 0, 0, 0, 8]], dtype=int32)

In [17]:
print('vocab = {} {}'.format(len(vocab), vocab))
print('x.shape = {}'.format(x.shape))
print('xq.shape = {}'.format(xq.shape))
print('y.shape = {}'.format(y.shape))
print('story_maxlen, query_maxlen = {} {}'.format(story_maxlen, query_maxlen))


vocab = 35 ['.', '?', 'Daniel', 'John', 'Mary', 'Sandra', 'Where', 'apple', 'back', 'bathroom', 'bedroom', 'discarded', 'down', 'dropped', 'football', 'garden', 'got', 'grabbed', 'hallway', 'is', 'journeyed', 'kitchen', 'left', 'milk', 'moved', 'office', 'picked', 'put', 'the', 'there', 'to', 'took', 'travelled', 'up', 'went']
x.shape = (1000, 552)
xq.shape = (1000, 5)
y.shape = (1000, 36)
story_maxlen, query_maxlen = 552 5

In [18]:
print('Build model ...')

sentence = layers.Input(shape=(story_maxlen, ), dtype='int32')
encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encode_sentence = Dropout(0.3)(encoded_sentence)

question = layers.Input(shape=(query_maxlen, ), dtype='int32')
encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = Dropout(0.3)(encoded_question)
encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)
encoded_question = RepeatVector(story_maxlen)(encoded_question)

merged = layers.add([encoded_sentence, encoded_question])
merged = RNN(EMBED_HIDDEN_SIZE)(merged)
merged = Dropout(0.3)(merged)
preds = Dense(vocab_size, activation='softmax')(merged)

model = Model([sentence, question], preds)


Build model ...

In [19]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_2 (InputLayer)             (None, 5)             0                                            
____________________________________________________________________________________________________
embedding_2 (Embedding)          (None, 5, 50)         1800        input_2[0][0]                    
____________________________________________________________________________________________________
dropout_2 (Dropout)              (None, 5, 50)         0           embedding_2[0][0]                
____________________________________________________________________________________________________
input_1 (InputLayer)             (None, 552)           0                                            
____________________________________________________________________________________________________
lstm_1 (LSTM)                    (None, 50)            20200       dropout_2[0][0]                  
____________________________________________________________________________________________________
embedding_1 (Embedding)          (None, 552, 50)       1800        input_1[0][0]                    
____________________________________________________________________________________________________
repeat_vector_1 (RepeatVector)   (None, 552, 50)       0           lstm_1[0][0]                     
____________________________________________________________________________________________________
add_1 (Add)                      (None, 552, 50)       0           embedding_1[0][0]                
                                                                   repeat_vector_1[0][0]            
____________________________________________________________________________________________________
lstm_2 (LSTM)                    (None, 50)            20200       add_1[0][0]                      
____________________________________________________________________________________________________
dropout_3 (Dropout)              (None, 50)            0           lstm_2[0][0]                     
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 36)            1836        dropout_3[0][0]                  
====================================================================================================
Total params: 45,836
Trainable params: 45,836
Non-trainable params: 0
____________________________________________________________________________________________________

In [22]:
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

In [23]:
y.shape


Out[23]:
(1000, 36)

In [24]:
print('Training')
model.fit([x, xq], y,
          batch_size=BATCH_SIZE,
          epochs=EPOCHS,
          validation_split=0.05)


Training
Train on 950 samples, validate on 50 samples
Epoch 1/40
950/950 [==============================] - 50s - loss: 1.8081 - acc: 0.2116 - val_loss: 1.7693 - val_acc: 0.1400
Epoch 2/40
950/950 [==============================] - 49s - loss: 1.7926 - acc: 0.2242 - val_loss: 1.7677 - val_acc: 0.1800
Epoch 3/40
950/950 [==============================] - 48s - loss: 1.7784 - acc: 0.2000 - val_loss: 1.7680 - val_acc: 0.2000
Epoch 4/40
950/950 [==============================] - 49s - loss: 1.7474 - acc: 0.2516 - val_loss: 1.7511 - val_acc: 0.2200
Epoch 5/40
950/950 [==============================] - 49s - loss: 1.7546 - acc: 0.2326 - val_loss: 1.7563 - val_acc: 0.3000
Epoch 6/40
950/950 [==============================] - 49s - loss: 1.7526 - acc: 0.2200 - val_loss: 1.7494 - val_acc: 0.3000
Epoch 7/40
950/950 [==============================] - 49s - loss: 1.7438 - acc: 0.2442 - val_loss: 1.7334 - val_acc: 0.3600
Epoch 8/40
950/950 [==============================] - 49s - loss: 1.7165 - acc: 0.2737 - val_loss: 1.7469 - val_acc: 0.3200
Epoch 9/40
950/950 [==============================] - 49s - loss: 1.7099 - acc: 0.2821 - val_loss: 1.7097 - val_acc: 0.3400
Epoch 10/40
950/950 [==============================] - 49s - loss: 1.6947 - acc: 0.3000 - val_loss: 1.7072 - val_acc: 0.3600
Epoch 11/40
950/950 [==============================] - 50s - loss: 1.6863 - acc: 0.2842 - val_loss: 1.6961 - val_acc: 0.3200
Epoch 12/40
950/950 [==============================] - 51s - loss: 1.6804 - acc: 0.3011 - val_loss: 1.7167 - val_acc: 0.3800
Epoch 13/40
950/950 [==============================] - 50s - loss: 1.6656 - acc: 0.3147 - val_loss: 1.7097 - val_acc: 0.3600
Epoch 14/40
950/950 [==============================] - 50s - loss: 1.6371 - acc: 0.3411 - val_loss: 1.7047 - val_acc: 0.3400
Epoch 15/40
950/950 [==============================] - 50s - loss: 1.6894 - acc: 0.2937 - val_loss: 1.6795 - val_acc: 0.4200
Epoch 16/40
950/950 [==============================] - 49s - loss: 1.6510 - acc: 0.3263 - val_loss: 1.7044 - val_acc: 0.3800
Epoch 17/40
950/950 [==============================] - 49s - loss: 1.6378 - acc: 0.3147 - val_loss: 1.7401 - val_acc: 0.3400
Epoch 18/40
950/950 [==============================] - 49s - loss: 1.6221 - acc: 0.3484 - val_loss: 1.6810 - val_acc: 0.3400
Epoch 19/40
950/950 [==============================] - 49s - loss: 1.6002 - acc: 0.3726 - val_loss: 1.7556 - val_acc: 0.2400
Epoch 20/40
950/950 [==============================] - 49s - loss: 1.5897 - acc: 0.3695 - val_loss: 1.6753 - val_acc: 0.3800
Epoch 21/40
950/950 [==============================] - 49s - loss: 1.6195 - acc: 0.3432 - val_loss: 1.6933 - val_acc: 0.3800
Epoch 22/40
950/950 [==============================] - 52s - loss: 1.5801 - acc: 0.3821 - val_loss: 1.6943 - val_acc: 0.3800
Epoch 23/40
950/950 [==============================] - 50s - loss: 1.5932 - acc: 0.3547 - val_loss: 1.7344 - val_acc: 0.3800
Epoch 24/40
950/950 [==============================] - 49s - loss: 1.5868 - acc: 0.3611 - val_loss: 1.7104 - val_acc: 0.3800
Epoch 25/40
950/950 [==============================] - 50s - loss: 1.5450 - acc: 0.3789 - val_loss: 1.7094 - val_acc: 0.3600
Epoch 26/40
950/950 [==============================] - 48s - loss: 1.5441 - acc: 0.3874 - val_loss: 1.6418 - val_acc: 0.4000
Epoch 27/40
950/950 [==============================] - 48s - loss: 1.5116 - acc: 0.3926 - val_loss: 1.6690 - val_acc: 0.4400
Epoch 28/40
950/950 [==============================] - 48s - loss: 1.5529 - acc: 0.3768 - val_loss: 1.7181 - val_acc: 0.4000
Epoch 29/40
950/950 [==============================] - 50s - loss: 1.5296 - acc: 0.3947 - val_loss: 1.7117 - val_acc: 0.4200
Epoch 30/40
950/950 [==============================] - 50s - loss: 1.5066 - acc: 0.4053 - val_loss: 1.6835 - val_acc: 0.3800
Epoch 31/40
950/950 [==============================] - 52s - loss: 1.4944 - acc: 0.4042 - val_loss: 1.6611 - val_acc: 0.3600
Epoch 32/40
950/950 [==============================] - 51s - loss: 1.4926 - acc: 0.4032 - val_loss: 1.7538 - val_acc: 0.3800
Epoch 33/40
950/950 [==============================] - 50s - loss: 1.4846 - acc: 0.3947 - val_loss: 1.6471 - val_acc: 0.3600
Epoch 34/40
950/950 [==============================] - 49s - loss: 1.4688 - acc: 0.4232 - val_loss: 1.6987 - val_acc: 0.3200
Epoch 35/40
950/950 [==============================] - 49s - loss: 1.4439 - acc: 0.4463 - val_loss: 1.6175 - val_acc: 0.4000
Epoch 36/40
950/950 [==============================] - 50s - loss: 1.4510 - acc: 0.4221 - val_loss: 1.7254 - val_acc: 0.4400
Epoch 37/40
950/950 [==============================] - 49s - loss: 1.4299 - acc: 0.4505 - val_loss: 1.5108 - val_acc: 0.4400
Epoch 38/40
950/950 [==============================] - 49s - loss: 1.4041 - acc: 0.4484 - val_loss: 1.6011 - val_acc: 0.4200
Epoch 39/40
950/950 [==============================] - 49s - loss: 1.4163 - acc: 0.4368 - val_loss: 1.7343 - val_acc: 0.3800
Epoch 40/40
950/950 [==============================] - 49s - loss: 1.4018 - acc: 0.4432 - val_loss: 1.6533 - val_acc: 0.4400
Out[24]:
<keras.callbacks.History at 0x12ef70438>

In [25]:
loss, acc = model.evaluate([tx, txq], ty,
                           batch_size=BATCH_SIZE)
print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))


1000/1000 [==============================] - 10s    
Test loss / test accuracy = 1.5878 / 0.3660

architecture2


In [34]:
# sentence = layers.Input(shape=(story_maxlen, ), dtype='int32')
# encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
# encode_sentence = Dropout(0.3)(encoded_sentence)

# question = layers.Input(shape=(query_maxlen, ), dtype='int32')
# encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
# encoded_question = Dropout(0.3)(encoded_question)
# encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)
# encoded_question = RepeatVector(story_maxlen)(encoded_question)

# merged = layers.add([encoded_sentence, encoded_question])
# merged = RNN(EMBED_HIDDEN_SIZE)(merged)
# merged = Dropout(0.3)(merged)
# preds = Dense(vocab_size, activation='softmax')(merged)

# model = Model([sentence, question], preds)

sentence = layers.Input(shape=(story_maxlen, ), dtype='int32')
encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encoded_sentence = RNN(EMBED_HIDDEN_SIZE)(encoded_sentence)

question = layers.Input(shape=(query_maxlen, ), dtype='int32')
encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)

concated = layers.concatenate([encoded_sentence, encoded_question])
preds = Dense(vocab_size, activation='softmax')(concated)

model = Model([sentence, question], preds)

In [35]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_5 (InputLayer)             (None, 552)           0                                            
____________________________________________________________________________________________________
input_6 (InputLayer)             (None, 5)             0                                            
____________________________________________________________________________________________________
embedding_7 (Embedding)          (None, 552, 50)       1800        input_5[0][0]                    
____________________________________________________________________________________________________
embedding_8 (Embedding)          (None, 5, 50)         1800        input_6[0][0]                    
____________________________________________________________________________________________________
lstm_5 (LSTM)                    (None, 50)            20200       embedding_7[0][0]                
____________________________________________________________________________________________________
lstm_6 (LSTM)                    (None, 50)            20200       embedding_8[0][0]                
____________________________________________________________________________________________________
concatenate_2 (Concatenate)      (None, 100)           0           lstm_5[0][0]                     
                                                                   lstm_6[0][0]                     
____________________________________________________________________________________________________
dense_3 (Dense)                  (None, 36)            3636        concatenate_2[0][0]              
====================================================================================================
Total params: 47,636
Trainable params: 47,636
Non-trainable params: 0
____________________________________________________________________________________________________

In [36]:
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

In [37]:
print('Training')
model.fit([x, xq], y,
          batch_size=BATCH_SIZE,
          epochs=EPOCHS,
          validation_split=0.05)


Training
Train on 950 samples, validate on 50 samples
Epoch 1/40
950/950 [==============================] - 48s - loss: 3.0932 - acc: 0.1611 - val_loss: 2.1091 - val_acc: 0.1400
Epoch 2/40
950/950 [==============================] - 45s - loss: 1.8625 - acc: 0.1895 - val_loss: 1.7594 - val_acc: 0.3000
Epoch 3/40
950/950 [==============================] - 46s - loss: 1.7928 - acc: 0.2011 - val_loss: 1.8598 - val_acc: 0.0600
Epoch 4/40
950/950 [==============================] - 45s - loss: 1.7923 - acc: 0.2074 - val_loss: 1.8603 - val_acc: 0.0600
Epoch 5/40
950/950 [==============================] - 45s - loss: 1.7997 - acc: 0.1937 - val_loss: 1.7662 - val_acc: 0.3000
Epoch 6/40
950/950 [==============================] - 45s - loss: 1.7893 - acc: 0.1947 - val_loss: 1.8175 - val_acc: 0.0600
Epoch 7/40
950/950 [==============================] - 45s - loss: 1.7947 - acc: 0.2042 - val_loss: 1.8240 - val_acc: 0.0600
Epoch 8/40
950/950 [==============================] - 45s - loss: 1.7877 - acc: 0.2126 - val_loss: 1.7788 - val_acc: 0.3000
Epoch 9/40
950/950 [==============================] - 45s - loss: 1.7900 - acc: 0.2021 - val_loss: 1.8258 - val_acc: 0.0600
Epoch 10/40
950/950 [==============================] - 45s - loss: 1.7849 - acc: 0.2063 - val_loss: 1.7788 - val_acc: 0.3000
Epoch 11/40
950/950 [==============================] - 46s - loss: 1.7791 - acc: 0.2232 - val_loss: 1.8174 - val_acc: 0.1600
Epoch 12/40
950/950 [==============================] - 45s - loss: 1.7824 - acc: 0.2189 - val_loss: 1.7835 - val_acc: 0.3400
Epoch 13/40
950/950 [==============================] - 45s - loss: 1.7802 - acc: 0.2211 - val_loss: 1.7970 - val_acc: 0.2600
Epoch 14/40
950/950 [==============================] - 45s - loss: 1.7682 - acc: 0.2495 - val_loss: 1.7629 - val_acc: 0.3000
Epoch 15/40
950/950 [==============================] - 45s - loss: 1.7841 - acc: 0.2126 - val_loss: 1.7491 - val_acc: 0.3000
Epoch 16/40
950/950 [==============================] - 45s - loss: 1.7723 - acc: 0.2158 - val_loss: 1.7927 - val_acc: 0.2200
Epoch 17/40
950/950 [==============================] - 45s - loss: 1.7691 - acc: 0.2379 - val_loss: 1.7761 - val_acc: 0.2600
Epoch 18/40
950/950 [==============================] - 45s - loss: 1.7497 - acc: 0.2832 - val_loss: 1.7602 - val_acc: 0.2000
Epoch 19/40
950/950 [==============================] - 45s - loss: 1.7364 - acc: 0.2779 - val_loss: 1.7449 - val_acc: 0.2800
Epoch 20/40
950/950 [==============================] - 45s - loss: 1.7229 - acc: 0.2916 - val_loss: 1.7663 - val_acc: 0.2800
Epoch 21/40
950/950 [==============================] - 45s - loss: 1.7296 - acc: 0.2832 - val_loss: 1.6920 - val_acc: 0.3400
Epoch 22/40
950/950 [==============================] - 45s - loss: 1.7251 - acc: 0.2874 - val_loss: 1.6716 - val_acc: 0.3600
Epoch 23/40
950/950 [==============================] - 45s - loss: 1.7165 - acc: 0.3042 - val_loss: 1.6772 - val_acc: 0.3600
Epoch 24/40
950/950 [==============================] - 45s - loss: 1.7086 - acc: 0.2968 - val_loss: 1.7153 - val_acc: 0.3400
Epoch 25/40
950/950 [==============================] - 45s - loss: 1.7050 - acc: 0.2989 - val_loss: 1.6757 - val_acc: 0.4200
Epoch 26/40
950/950 [==============================] - 45s - loss: 1.6819 - acc: 0.3200 - val_loss: 1.6888 - val_acc: 0.4800
Epoch 27/40
950/950 [==============================] - 45s - loss: 1.6698 - acc: 0.3274 - val_loss: 1.6814 - val_acc: 0.4000
Epoch 28/40
950/950 [==============================] - 45s - loss: 1.6592 - acc: 0.3242 - val_loss: 1.7477 - val_acc: 0.3200
Epoch 29/40
950/950 [==============================] - 45s - loss: 1.6718 - acc: 0.3179 - val_loss: 1.6300 - val_acc: 0.4600
Epoch 30/40
950/950 [==============================] - 45s - loss: 1.6456 - acc: 0.3432 - val_loss: 1.6763 - val_acc: 0.3800
Epoch 31/40
950/950 [==============================] - 45s - loss: 1.6462 - acc: 0.3337 - val_loss: 1.6731 - val_acc: 0.4000
Epoch 32/40
950/950 [==============================] - 45s - loss: 1.6403 - acc: 0.3200 - val_loss: 1.6339 - val_acc: 0.4200
Epoch 33/40
950/950 [==============================] - 45s - loss: 1.6249 - acc: 0.3526 - val_loss: 1.6485 - val_acc: 0.4000
Epoch 34/40
950/950 [==============================] - 45s - loss: 1.6107 - acc: 0.3484 - val_loss: 1.5845 - val_acc: 0.4800
Epoch 35/40
950/950 [==============================] - 45s - loss: 1.5842 - acc: 0.3600 - val_loss: 1.5889 - val_acc: 0.4600
Epoch 36/40
950/950 [==============================] - 45s - loss: 1.5843 - acc: 0.3811 - val_loss: 1.6102 - val_acc: 0.4200
Epoch 37/40
950/950 [==============================] - 45s - loss: 1.5659 - acc: 0.3884 - val_loss: 1.5904 - val_acc: 0.4600
Epoch 38/40
950/950 [==============================] - 45s - loss: 1.5465 - acc: 0.3905 - val_loss: 1.6496 - val_acc: 0.3800
Epoch 39/40
950/950 [==============================] - 45s - loss: 1.5543 - acc: 0.3811 - val_loss: 1.5452 - val_acc: 0.4600
Epoch 40/40
950/950 [==============================] - 45s - loss: 1.5194 - acc: 0.3979 - val_loss: 1.5366 - val_acc: 0.4600
Out[37]:
<keras.callbacks.History at 0x13402be48>

In [ ]: