In [1]:
%load_ext autoreload
%autoreload 2
[ ] add evaluate method
[ ] use torch.utils.dataloader and stick with a standard api for data.
%%bash source activate deep-learning python -u train.py --batch-size=100 --n-epoch=10
In [2]:
from utils import ledger, Struct
import data
from language import get_language_pairs
from visdom_helper import visdom_helper
from model import VanillaSequenceToSequence
In [9]:
args = Struct(**{'BATCH_SIZE': 10,
'BI_DIRECTIONAL': False,
'DASH_ID': 'seq-to-seq-experiment',
'DEBUG': True,
'EVAL_INTERVAL': 10,
'TEACHER_FORCING_R': 0.5,
'INPUT_LANG': 'eng',
'LEARNING_RATE': 0.001,
'MAX_DATA_LEN': 10,
'MAX_OUTPUT_LEN': 100,
'N_EPOCH': 5,
'N_LAYERS': 1,
'OUTPUT_LANG': 'cmn',
'SAVE_INTERVAL': 100})
In [4]:
from train import Session
In [5]:
sess = Session(args)
In [6]:
for i in range(args.N_EPOCH):
sess.train()
sess.ledger.green('epoch {} is complete'.format(i))
In [7]:
# TODO: somehow the evaluation always stops.
In [8]:
sentence = sess.evaluate('This is a job.')
print(sentence)
In [ ]: