In [11]:
'''Trains a memory network on the bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
  "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
  http://arxiv.org/abs/1502.05698
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
  "End-To-End Memory Networks",
  http://arxiv.org/abs/1503.08895
Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs.
Time per epoch: 3s on CPU (core i7).
'''
from __future__ import print_function

# from keras.models import Sequential, Model
# from keras.layers.embeddings import Embedding
# from keras.layers import Input, Activation, Dense, Permute, Dropout, add, dot, concatenate
# from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from functools import reduce
import tarfile
import numpy as np, pandas as pd
import re

In [7]:
# def pad_sequences(seq, *args, **kwargs):
#     """NOP dropin for Keras pad_sequences"""
#     return seq

In [8]:
def tokenize(sent):
    '''Return the tokens of a sentence including punctuation.
    >>> tokenize('Bob dropped the apple. Where is the apple?')
    ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
    '''
    return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]


def parse_stories(lines, only_supporting=False):
    '''Parse stories provided in the bAbi tasks format
    If only_supporting is true, only the sentences
    that support the answer are kept.
    '''
    data = []
    story = []
    for line in lines:
        line = line.decode('utf-8').strip()
        nid, line = line.split(' ', 1)
        nid = int(nid)
        if nid == 1:
            story = []
        if '\t' in line:
            q, a, supporting = line.split('\t')
            q = tokenize(q)
            substory = None
            if only_supporting:
                # Only select the related substory
                supporting = map(int, supporting.split())
                substory = [story[i - 1] for i in supporting]
            else:
                # Provide all the substories
                substory = [x for x in story if x]
            data.append((substory, q, a))
            story.append('')
        else:
            sent = tokenize(line)
            story.append(sent)
    return data


def get_stories(f, only_supporting=False, max_length=None):
    '''Given a file name, read the file,
    retrieve the stories,
    and then convert the sentences into a single story.
    If max_length is supplied,
    any stories longer than max_length tokens will be discarded.
    '''
    data = parse_stories(f.readlines(), only_supporting=only_supporting)
    flatten = lambda data: reduce(lambda x, y: x + y, data)
    data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
    return data


def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
    X = []
    Xq = []
    Y = []
    for story, query, answer in data:
        x = [word_idx[w] for w in story]
        xq = [word_idx[w] for w in query]
        # let's not forget that index 0 is reserved
        y = np.zeros(len(word_idx) + 1)
        y[word_idx[answer]] = 1
        X.append(x)
        Xq.append(xq)
        Y.append(y)
    return (pad_sequences(X, maxlen=story_maxlen),
            pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))

In [10]:
try:
    path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
    print('Error downloading dataset, please download it manually:\n'
          '$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
          '$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
    raise
tar = tarfile.open(path)

challenges = {
    # QA1 with 10,000 samples
    'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
    # QA2 with 10,000 samples
    'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]

print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))

vocab = set()
for story, q, answer in train_stories + test_stories:
    vocab |= set(story + q + [answer])
vocab = sorted(vocab)

# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))

print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, answer):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')

word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories,
                                                               word_idx,
                                                               story_maxlen,
                                                               query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories,
                                                            word_idx,
                                                            story_maxlen,
                                                            query_maxlen)

print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('answers_train shape:', answers_train.shape)
print('answers_test shape:', answers_test.shape)
print('-')


Extracting stories for the challenge: single_supporting_fact_10k
/home/mike/ve/keras/lib/python3.5/re.py:203: FutureWarning: split() requires a non-empty pattern match.
  return _compile(pattern, flags).split(string, maxsplit)
-
Vocab size: 22 unique words
Story max length: 68 words
Query max length: 4 words
Number of training stories: 10000
Number of test stories: 1000
-
Here's what a "story" tuple looks like (input, query, answer):
(['Mary', 'moved', 'to', 'the', 'bathroom', '.', 'John', 'went', 'to', 'the', 'hallway', '.'], ['Where', 'is', 'Mary', '?'], 'bathroom')
-
Vectorizing the word sequences...
-
inputs: integer tensor of shape (samples, max_length)
inputs_train shape: (10000, 68)
inputs_test shape: (1000, 68)
-
queries: integer tensor of shape (samples, max_length)
queries_train shape: (10000, 4)
queries_test shape: (1000, 4)
-
answers: binary (1 or 0) tensor of shape (samples, vocab_size)
answers_train shape: (10000, 22)
answers_test shape: (1000, 22)
-

In [12]:
ts = test_stories[:20]
ts


Out[12]:
[(['John',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'bathroom',
   '.'],
  ['Where', 'is', 'John', '?'],
  'hallway'),
 (['John',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'bathroom',
   '.',
   'Daniel',
   'went',
   'back',
   'to',
   'the',
   'bathroom',
   '.',
   'John',
   'moved',
   'to',
   'the',
   'bedroom',
   '.'],
  ['Where', 'is', 'Mary', '?'],
  'bathroom'),
 (['John',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'bathroom',
   '.',
   'Daniel',
   'went',
   'back',
   'to',
   'the',
   'bathroom',
   '.',
   'John',
   'moved',
   'to',
   'the',
   'bedroom',
   '.',
   'John',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Sandra',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.'],
  ['Where', 'is', 'Sandra', '?'],
  'kitchen'),
 (['John',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'bathroom',
   '.',
   'Daniel',
   'went',
   'back',
   'to',
   'the',
   'bathroom',
   '.',
   'John',
   'moved',
   'to',
   'the',
   'bedroom',
   '.',
   'John',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Sandra',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'went',
   'to',
   'the',
   'garden',
   '.'],
  ['Where', 'is', 'Sandra', '?'],
  'hallway'),
 (['John',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'bathroom',
   '.',
   'Daniel',
   'went',
   'back',
   'to',
   'the',
   'bathroom',
   '.',
   'John',
   'moved',
   'to',
   'the',
   'bedroom',
   '.',
   'John',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Sandra',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'went',
   'to',
   'the',
   'garden',
   '.',
   'Sandra',
   'went',
   'back',
   'to',
   'the',
   'bathroom',
   '.',
   'Sandra',
   'moved',
   'to',
   'the',
   'kitchen',
   '.'],
  ['Where', 'is', 'Sandra', '?'],
  'kitchen'),
 (['Sandra',
   'travelled',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.'],
  ['Where', 'is', 'Sandra', '?'],
  'hallway'),
 (['Sandra',
   'travelled',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'bathroom',
   '.',
   'Sandra',
   'moved',
   'to',
   'the',
   'garden',
   '.'],
  ['Where', 'is', 'Sandra', '?'],
  'garden'),
 (['Sandra',
   'travelled',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'bathroom',
   '.',
   'Sandra',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'hallway',
   '.'],
  ['Where', 'is', 'Daniel', '?'],
  'hallway'),
 (['Sandra',
   'travelled',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'bathroom',
   '.',
   'Sandra',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'hallway',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'office',
   '.',
   'John',
   'moved',
   'to',
   'the',
   'hallway',
   '.'],
  ['Where', 'is', 'Sandra', '?'],
  'office'),
 (['Sandra',
   'travelled',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'bathroom',
   '.',
   'Sandra',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'hallway',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'office',
   '.',
   'John',
   'moved',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'travelled',
   'to',
   'the',
   'bathroom',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'office',
   '.'],
  ['Where', 'is', 'Daniel', '?'],
  'office'),
 (['John',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.'],
  ['Where', 'is', 'Mary', '?'],
  'kitchen'),
 (['John',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'went',
   'to',
   'the',
   'office',
   '.'],
  ['Where', 'is', 'Mary', '?'],
  'garden'),
 (['John',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'went',
   'to',
   'the',
   'office',
   '.',
   'John',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'kitchen',
   '.'],
  ['Where', 'is', 'Daniel', '?'],
  'office'),
 (['John',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'went',
   'to',
   'the',
   'office',
   '.',
   'John',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'kitchen',
   '.',
   'Mary',
   'travelled',
   'to',
   'the',
   'bedroom',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'garden',
   '.'],
  ['Where', 'is', 'Mary', '?'],
  'bedroom'),
 (['John',
   'travelled',
   'to',
   'the',
   'office',
   '.',
   'Mary',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'went',
   'to',
   'the',
   'office',
   '.',
   'John',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'moved',
   'to',
   'the',
   'kitchen',
   '.',
   'Mary',
   'travelled',
   'to',
   'the',
   'bedroom',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'garden',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'garden',
   '.',
   'Sandra',
   'journeyed',
   'to',
   'the',
   'bedroom',
   '.'],
  ['Where', 'is', 'Mary', '?'],
  'bedroom'),
 (['John',
   'moved',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.'],
  ['Where', 'is', 'John', '?'],
  'kitchen'),
 (['John',
   'moved',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'garden',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'garden',
   '.'],
  ['Where', 'is', 'John', '?'],
  'garden'),
 (['John',
   'moved',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'garden',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'office',
   '.',
   'John',
   'went',
   'to',
   'the',
   'kitchen',
   '.'],
  ['Where', 'is', 'John', '?'],
  'kitchen'),
 (['John',
   'moved',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'garden',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'office',
   '.',
   'John',
   'went',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'journeyed',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'hallway',
   '.'],
  ['Where', 'is', 'Daniel', '?'],
  'office'),
 (['John',
   'moved',
   'to',
   'the',
   'hallway',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'garden',
   '.',
   'John',
   'journeyed',
   'to',
   'the',
   'garden',
   '.',
   'Daniel',
   'journeyed',
   'to',
   'the',
   'office',
   '.',
   'John',
   'went',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'journeyed',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'hallway',
   '.',
   'Mary',
   'went',
   'to',
   'the',
   'kitchen',
   '.',
   'Sandra',
   'travelled',
   'to',
   'the',
   'garden',
   '.'],
  ['Where', 'is', 'John', '?'],
  'kitchen')]

In [17]:
stories, queries, answers = list(zip(*test_stories))

In [18]:
df = pd.DataFrame(test_stories, columns=['story', 'query', 'answer'])

In [20]:
df['story'] = df['story'].str.join(' ')

In [22]:
df['story'].unique().shape


Out[22]:
(999,)

In [23]:
df.shape


Out[23]:
(1000, 3)

In [ ]: