In [1]:
from tqdm import tqdm_notebook as tqdm

Resume Training

Load brain


In [2]:
import torch
brain_data = torch.load("encdec-2xgru2048-cornell-50K-glove6B_300-vmin25-e3.pt")

In [3]:
from due.models.seq2seq import EncoderDecoderBrain
brain = EncoderDecoderBrain.load(brain_data)

In [4]:
# brain = brain_base.reset_with_parameters({
#     'batch_size': 128,
#     'hidden_size': 2048,
#     'num_rnn_layers': 2
# })

Train


In [5]:
brain.epochs


Out[5]:
3

In [6]:
for i in range(1):
    brain.epoch()



Sample predictions


In [7]:
print(brain.predict("I'm in, and you?"))
print(brain.predict("no..."))
print(brain.predict("let's go!"))
print(brain.predict("yes!"))
print()
print(brain.predict("Cancellor palpatine is worse than darth vader"))
print(brain.predict("Is Anakin Skywalker evil?"))
print(brain.predict("What's the meaning of life?"))
print()
print(brain.predict("So what color lightsaber is the best?"))
print(brain.predict("hey have you seen any star wars movies lately?"))
print(brain.predict("I read the other day that a lot of the structure of Star Wars was based Akira Kurosawa movies."))
print(brain.predict("return of the jedi is my favourite"))
print(brain.predict("han shot first"))
print(brain.predict("cantina band is a great piece of music"))
print()
print(brain.predict("who is Darth Vader"))
print(brain.predict("do you like han solo?"))
print(brain.predict("did you like Clone Wars?"))
print(brain.predict("What's the best movie in the trilogy?"))
print(brain.predict("Hey I got a question for you"))
print(brain.predict("is the millennium falcon faster than an imperial crusader?"))
print(brain.predict("is the original trilogy better than the prequels?"))
print(brain.predict("did you like the prequels?"))
print(brain.predict("who's the best character in the empire strikes again?"))
print(brain.predict("i think prequels are overrated!"))
print()
print(brain.predict("hi"))
print(brain.predict("hello"))
print(brain.predict("what's your name?"))
print(brain.predict("my name is Anna"))
print(brain.predict("good to see you"))
print(brain.predict("i like wine, and you?"))


i 'm not <UNK> .
<UNK> , <UNK> .
<UNK> , <UNK> .
<UNK> , <UNK> .

i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .

i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .

i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .

<UNK> , <UNK> .
<UNK> , <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .
i 'm not <UNK> .

Save


In [8]:
brain.epochs


Out[8]:
4

In [9]:
brain.parameters


Out[9]:
{'batch_size': 128,
 'hidden_size': 2048,
 'learning_rate': 0.01,
 'max_sentence_length': 20,
 'teacher_forcing_ratio': 1.0,
 'num_rnn_layers': 2}

In [11]:
saved_brain = brain.save()

In [12]:
torch.save(saved_brain, "encdec-%sxgru%s-cornell-50K-glove6B_300-vmin25-e%s.pt" % 
           (brain.parameters['num_rnn_layers'], brain.parameters['hidden_size'], brain.epochs))

In [13]:
brain.train_loss_history


Out[13]:
[2.9676, 2.6563079481978824, 2.539921265621885, 2.463710880866219]

From Scratch


In [1]:
from due.corpora import cornell
import itertools

N_DIALOGS = 1000

episodes = list(itertools.islice(cornell.episode_generator(), N_DIALOGS))




In [2]:
from due.models.seq2seq import EncoderDecoderBrain

brain = EncoderDecoderBrain({}, episodes)


INFO:due.models.seq2seq:Extracting dataset from episodes
INFO:due.models.seq2seq:Building the embedding matrix

INFO:due.models.seq2seq:Initializing model


In [3]:
for i in range(1):
    brain.epoch()




In [ ]: