In [1]:
from tqdm import tqdm_notebook as tqdm
In [2]:
import torch
brain_data = torch.load("encdec-2xgru2048-cornell-50K-glove6B_300-vmin25-e3.pt")
In [3]:
from due.models.seq2seq import EncoderDecoderBrain
brain = EncoderDecoderBrain.load(brain_data)
In [4]:
# brain = brain_base.reset_with_parameters({
# 'batch_size': 128,
# 'hidden_size': 2048,
# 'num_rnn_layers': 2
# })
In [5]:
brain.epochs
Out[5]:
In [6]:
for i in range(1):
brain.epoch()
In [7]:
print(brain.predict("I'm in, and you?"))
print(brain.predict("no..."))
print(brain.predict("let's go!"))
print(brain.predict("yes!"))
print()
print(brain.predict("Cancellor palpatine is worse than darth vader"))
print(brain.predict("Is Anakin Skywalker evil?"))
print(brain.predict("What's the meaning of life?"))
print()
print(brain.predict("So what color lightsaber is the best?"))
print(brain.predict("hey have you seen any star wars movies lately?"))
print(brain.predict("I read the other day that a lot of the structure of Star Wars was based Akira Kurosawa movies."))
print(brain.predict("return of the jedi is my favourite"))
print(brain.predict("han shot first"))
print(brain.predict("cantina band is a great piece of music"))
print()
print(brain.predict("who is Darth Vader"))
print(brain.predict("do you like han solo?"))
print(brain.predict("did you like Clone Wars?"))
print(brain.predict("What's the best movie in the trilogy?"))
print(brain.predict("Hey I got a question for you"))
print(brain.predict("is the millennium falcon faster than an imperial crusader?"))
print(brain.predict("is the original trilogy better than the prequels?"))
print(brain.predict("did you like the prequels?"))
print(brain.predict("who's the best character in the empire strikes again?"))
print(brain.predict("i think prequels are overrated!"))
print()
print(brain.predict("hi"))
print(brain.predict("hello"))
print(brain.predict("what's your name?"))
print(brain.predict("my name is Anna"))
print(brain.predict("good to see you"))
print(brain.predict("i like wine, and you?"))
In [8]:
brain.epochs
Out[8]:
In [9]:
brain.parameters
Out[9]:
In [11]:
saved_brain = brain.save()
In [12]:
torch.save(saved_brain, "encdec-%sxgru%s-cornell-50K-glove6B_300-vmin25-e%s.pt" %
(brain.parameters['num_rnn_layers'], brain.parameters['hidden_size'], brain.epochs))
In [13]:
brain.train_loss_history
Out[13]:
In [1]:
from due.corpora import cornell
import itertools
N_DIALOGS = 1000
episodes = list(itertools.islice(cornell.episode_generator(), N_DIALOGS))
In [2]:
from due.models.seq2seq import EncoderDecoderBrain
brain = EncoderDecoderBrain({}, episodes)
In [3]:
for i in range(1):
brain.epoch()
In [ ]: