In [31]:
import re
import tarfile
from functools import reduce
import numpy as np
from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from keras import layers
from keras.layers import Input, Embedding, Dropout, RepeatVector, Dense, recurrent, Merge
from keras.models import Model, Sequential
from keras.preprocessing.sequence import pad_sequences
In [2]:
RNN = recurrent.LSTM
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
EPOCHS = 40
print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN,
EMBED_HIDDEN_SIZE,
SENT_HIDDEN_SIZE,
QUERY_HIDDEN_SIZE))
In [3]:
try:
path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset.')
In [4]:
path
Out[4]:
In [5]:
tar = tarfile.open(path)
In [6]:
challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt'
In [7]:
def tokenize(sent):
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
# 最初の空白のみsplitする
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line: # 質問、回答、根拠が書いてある行
q, a, supporting = line.split('\t')
q = tokenize(q) # 単語に分割
substory = None
if only_supporting:
# 根拠となる文のみを取得
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# 全文を取得
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('') # 質問は空の文章を追加しておく
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
# max_lengthが指定されたらmax_lengthより長いストーリーは無視される
# data[storyID] = (substory, quetion, answer)
data = parse_stories(f.readlines(), only_supporting=only_supporting)
# 単語を結合して1つのlistにする関数
flatten = lambda data: reduce(lambda x, y: x + y, data)
# substoryを単語リストにする
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
In [8]:
# train[storyID] = ([story word list], [question word list], answer word)
train = get_stories(tar.extractfile(challenge.format('train')))
test = get_stories(tar.extractfile(challenge.format('test')))
print(len(train), len(test))
print(train[0])
In [9]:
# 訓練データとテストデータに出現する語彙集合
vocab = set()
for story, q, answer in train + test:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
print(len(vocab), vocab[:100])
In [10]:
# 0はpad用にあけておく
# 単語 => 単語IDへの写像
vocab_size = len(vocab) + 1
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
print(word_idx)
# ストーリーと質問の最大長を計算
story_maxlen = max(map(len, (x for x, _, _ in train + test)))
query_maxlen = max(map(len, (x for _, x, _ in train + test)))
print('story_maxlen', story_maxlen)
print('query_maxlen', query_maxlen)
In [11]:
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
xs = []
xqs = []
ys = []
for story, query, answer in data:
# storyとqueryは単語ID系列で作成
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
# 回答はvocabの長さのベクトル(one-hot-encoding)
y = np.zeros(len(word_idx) + 1)
y[word_idx[answer]] = 1
xs.append(x)
xqs.append(xq)
ys.append(y)
xs = pad_sequences(xs, maxlen=story_maxlen)
xqs = pad_sequences(xqs, maxlen=query_maxlen)
ys = np.array(ys)
return xs, xqs, ys
In [12]:
idx_word = {y:x for x,y in word_idx.items()}
print(idx_word)
In [15]:
# 文章をベクトル化
x, xq, y = vectorize_stories(train, word_idx, story_maxlen, query_maxlen)
tx, txq, ty = vectorize_stories(test, word_idx, story_maxlen, query_maxlen)
print(len(x))
print(len(xq))
print(len(y))
print(x[0])
print(xq[0], [idx_word[i] for i in xq[0]])
print(y[0], idx_word[np.argmax(y[0])])
In [16]:
pad_sequences([[1,2,3,4,5], [1,2,3], [8]], 10)
Out[16]:
In [17]:
print('vocab = {} {}'.format(len(vocab), vocab))
print('x.shape = {}'.format(x.shape))
print('xq.shape = {}'.format(xq.shape))
print('y.shape = {}'.format(y.shape))
print('story_maxlen, query_maxlen = {} {}'.format(story_maxlen, query_maxlen))
In [18]:
print('Build model ...')
sentence = layers.Input(shape=(story_maxlen, ), dtype='int32')
encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encode_sentence = Dropout(0.3)(encoded_sentence)
question = layers.Input(shape=(query_maxlen, ), dtype='int32')
encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = Dropout(0.3)(encoded_question)
encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)
encoded_question = RepeatVector(story_maxlen)(encoded_question)
merged = layers.add([encoded_sentence, encoded_question])
merged = RNN(EMBED_HIDDEN_SIZE)(merged)
merged = Dropout(0.3)(merged)
preds = Dense(vocab_size, activation='softmax')(merged)
model = Model([sentence, question], preds)
In [19]:
model.summary()
In [22]:
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
In [23]:
y.shape
Out[23]:
In [24]:
print('Training')
model.fit([x, xq], y,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=0.05)
Out[24]:
In [25]:
loss, acc = model.evaluate([tx, txq], ty,
batch_size=BATCH_SIZE)
print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
In [34]:
# sentence = layers.Input(shape=(story_maxlen, ), dtype='int32')
# encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
# encode_sentence = Dropout(0.3)(encoded_sentence)
# question = layers.Input(shape=(query_maxlen, ), dtype='int32')
# encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
# encoded_question = Dropout(0.3)(encoded_question)
# encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)
# encoded_question = RepeatVector(story_maxlen)(encoded_question)
# merged = layers.add([encoded_sentence, encoded_question])
# merged = RNN(EMBED_HIDDEN_SIZE)(merged)
# merged = Dropout(0.3)(merged)
# preds = Dense(vocab_size, activation='softmax')(merged)
# model = Model([sentence, question], preds)
sentence = layers.Input(shape=(story_maxlen, ), dtype='int32')
encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encoded_sentence = RNN(EMBED_HIDDEN_SIZE)(encoded_sentence)
question = layers.Input(shape=(query_maxlen, ), dtype='int32')
encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)
concated = layers.concatenate([encoded_sentence, encoded_question])
preds = Dense(vocab_size, activation='softmax')(concated)
model = Model([sentence, question], preds)
In [35]:
model.summary()
In [36]:
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
In [37]:
print('Training')
model.fit([x, xq], y,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=0.05)
Out[37]:
In [ ]: