Adapted from https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py
In [1]:
    
!pip install -q tf-nightly-gpu-2.0-preview
    
    
In [2]:
    
import tensorflow as tf
print(tf.__version__)
    
    
In [0]:
    
class CharacterTable(object):
    """Given a set of characters:
    + Encode them to a one hot integer representation
    + Decode the one hot integer representation to their character output
    + Decode a vector of probabilities to their character output
    """
    def __init__(self, chars):
        """Initialize character table.
        # Arguments
            chars: Characters that can appear in the input.
        """
        self.chars = sorted(set(chars))
        self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
        self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
    def encode(self, C, num_rows):
        """One hot encode given string C.
        # Arguments
            num_rows: Number of rows in the returned one hot encoding. This is
                used to keep the # of rows for each data the same.
        """
        x = np.zeros((num_rows, len(self.chars)))
        for i, c in enumerate(C):
            x[i, self.char_indices[c]] = 1
        return x
    def decode(self, x, calc_argmax=True):
        if calc_argmax:
            x = x.argmax(axis=-1)
        return ''.join(self.indices_char[x] for x in x)
    
In [0]:
    
class colors:
    ok = '\033[92m'
    fail = '\033[91m'
    close = '\033[0m'
    
In [6]:
    
import numpy as np
# Parameters for the model and dataset.
TRAINING_SIZE = 50000
DIGITS = 3
# REVERSE = True
REVERSE = False
# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
# int is DIGITS.
MAXLEN = DIGITS + 1 + DIGITS
# All the numbers, plus sign and space for padding.
chars = '0123456789+ '
ctable = CharacterTable(chars)
questions = []
expected = []
seen = set()
print('Generating data...')
while len(questions) < TRAINING_SIZE:
    f = lambda: int(''.join(np.random.choice(list('0123456789'))
                    for i in range(np.random.randint(1, DIGITS + 1))))
    a, b = f(), f()
    # Skip any addition questions we've already seen
    # Also skip any such that x+Y == Y+x (hence the sorting).
    key = tuple(sorted((a, b)))
    if key in seen:
        continue
    seen.add(key)
    # Pad the data with spaces such that it is always MAXLEN.
    q = '{}+{}'.format(a, b)
    query = q + ' ' * (MAXLEN - len(q))
    ans = str(a + b)
    # Answers can be of maximum size DIGITS + 1.
    ans += ' ' * (DIGITS + 1 - len(ans))
    if REVERSE:
        # Reverse the query, e.g., '12+345  ' becomes '  543+21'. (Note the
        # space used for padding.)
        query = query[::-1]
    questions.append(query)
    expected.append(ans)
print('Total addition questions:', len(questions))
    
    
In [13]:
    
questions[0]
    
    Out[13]:
In [14]:
    
expected[0]
    
    Out[14]:
In [8]:
    
print('Vectorization...')
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
    x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(expected):
    y[i] = ctable.encode(sentence, DIGITS + 1)
    
    
In [9]:
    
len(x[0])
    
    Out[9]:
In [10]:
    
len(questions[0])
    
    Out[10]:
In [12]:
    
x[0]
    
    Out[12]:
In [15]:
    
y[0]
    
    Out[15]:
In [0]:
    
# Shuffle (x, y) in unison as the later parts of x will almost all be larger
# digits.
indices = np.arange(len(y))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
    
In [17]:
    
# Explicitly set apart 10% for validation data that we never train over.
split_at = len(x) - len(x) // 10
(x_train, x_val) = x[:split_at], x[split_at:]
(y_train, y_val) = y[:split_at], y[split_at:]
print('Training Data:')
print(x_train.shape)
print(y_train.shape)
print('Validation Data:')
print(x_val.shape)
print(y_val.shape)
    
    
In [18]:
    
# input shape: 7 digits, each being 0-9, + or space (12 possibilities)
MAXLEN, len(chars)
    
    Out[18]:
In [23]:
    
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import  LSTM, GRU, SimpleRNN, Dense, RepeatVector
# Try replacing LSTM, GRU, or SimpleRNN.
# RNN = LSTM
RNN = SimpleRNN # should be enough since we do not have long sequences and only local dependencies
# RNN = GRU
HIDDEN_SIZE = 128
BATCH_SIZE = 128
model = Sequential()
# encoder 
model.add(RNN(units=HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))
# latent space
encoding_dim = 32
model.add(Dense(units=encoding_dim, activation='relu', name="encoder"))
# decoder: have 4 temporal outputs one for each of the digits of the results
model.add(RepeatVector(DIGITS + 1))
# return_sequences=True tells it to keep all 4 temporal outputs, not only the final one (we need all four digits for the results)
model.add(RNN(units=HIDDEN_SIZE, return_sequences=True))
model.add(Dense(name='classifier', units=len(chars), activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()
    
    
In [24]:
    
%%time
# Train the model each generation and show predictions against the validation
# dataset.
merged_losses = {
    "loss": [],
    "val_loss": [],
    "accuracy": [],
    "val_accuracy": [],
    
}
for iteration in range(1, 50):
    print()
    print('-' * 50)
    print('Iteration', iteration)
    iteration_history = model.fit(x_train, y_train,
              batch_size=BATCH_SIZE,
              epochs=1,
              validation_data=(x_val, y_val))
    
    merged_losses["loss"].append(iteration_history.history["loss"])
    merged_losses["val_loss"].append(iteration_history.history["val_loss"])
    merged_losses["accuracy"].append(iteration_history.history["accuracy"])
    merged_losses["val_accuracy"].append(iteration_history.history["val_accuracy"])
    # Select 10 samples from the validation set at random so we can visualize
    # errors.
    for i in range(10):
        ind = np.random.randint(0, len(x_val))
        rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
        preds = model.predict_classes(rowx, verbose=0)
        q = ctable.decode(rowx[0])
        correct = ctable.decode(rowy[0])
        guess = ctable.decode(preds[0], calc_argmax=False)
        print('Q', q[::-1] if REVERSE else q, end=' ')
        print('T', correct, end=' ')
        if correct == guess:
            print(colors.ok + '☑' + colors.close, end=' ')
        else:
            print(colors.fail + '☒' + colors.close, end=' ')
        print(guess)
    
    
In [25]:
    
import matplotlib.pyplot as plt
plt.ylabel('loss')
plt.xlabel('epoch')
plt.yscale('log')
plt.plot(merged_losses['loss'])
plt.plot(merged_losses['val_loss'])
plt.legend(['loss', 'validation loss'])
    
    Out[25]:
    
In [27]:
    
plt.ylabel('accuracy')
plt.xlabel('epoch')
# plt.yscale('log')
plt.plot(merged_losses['accuracy'])
plt.plot(merged_losses['val_accuracy'])
plt.legend(['accuracy', 'validation accuracy'])
    
    Out[27]:
    
In [0]: