In [1]:
import argparse
import os
import shutil
import gzip

import torch
import torch.nn as nn
from torch.autograd import Variable

import torch
import torch.nn as nn
from torch.autograd import Variable

from dpp_nets.utils.language import Vocabulary, BeerDataset, custom_collate
from dpp_nets.layers.layers import ChunkTrainerReinforce, ChunkTrainerRelReinforce

from dpp_nets.utils.language import EvalSet

In [33]:
# Load saved checkpoint
model = 'shortwords1reg0.1reg_mean30.0lr0.0001reinforce_ckp.pth.tar'
model_dir = '/Users/Max/checkpoints/beer_reviews/reinforce/' 
model_path = model_dir + model
model = torch.load(model_path, map_location=lambda storage, loc: storage)

In [ ]:
from dpp_nets.utils.language import Vocabulary

embd_path = '/Users/Max/data/beer_reviews/review+wiki.filtered.200.txt.gz'
word_path = '/Users/Max/data/beer_reviews/reviews.all.train.words.txt.gz'

# Set-up Vocabulary
vocab = Vocabulary()
vocab.loadPretrained(embd_path)
vocab.setStops()
vocab.loadCorpus(word_path)
vocab.updateEmbedding()
vocab.setCuda(False)
vocab.EmbeddingBag.load_state_dict(model['embedding'])

In [ ]:
EMBD_DIM = 200
KERNEL_DIM = 200
HIDDEN_DIM = 500
ENC_DIM = 200
TARGET_DIM = 3 if model['aspect'] in set(['all', 'short']) else 1
ALPHA_ITER = 1

if model['mode'] == 'sents':
    trainer = ChunkTrainerReinforce(EMBD_DIM, HIDDEN_DIM, KERNEL_DIM, ENC_DIM, TARGET_DIM, ALPHA_ITER)
else:
    trainer = ChunkTrainerRelReinforce(EMBD_DIM, HIDDEN_DIM, KERNEL_DIM, ENC_DIM, TARGET_DIM, ALPHA_ITER)

trainer.load_state_dict(model['model'])
trainer.activation = nn.Sigmoid()
trainer.reg = model['reg']
trainer.reg_mean = model['reg_mean']

rat_path = '/Users/Max/data/beer_reviews/annotations.json'
evalset = EvalSet(rat_path, vocab)

In [ ]:
trainer.kernel_net.layer1.weight

In [ ]:
# Plot a table
print('__________________________Training Table__________________________')
for k, v in model['train_loss'].items():
    epoch, loss, pred_loss, reg_loss = k, v[0], model['train_pred_loss'][k][0], model['train_reg_loss'][k][0]
    print(str.join(" | ", ['Epoch: %d' % (epoch), 'Loss: %.5f' % (loss), 
                              'Pred Loss: %.5f' % (pred_loss), 'Reg Loss: %.5f' % (reg_loss)]))

In [ ]:
from dpp_nets.helper.plotting import plot_floats

# Training Plots
plot_floats(model['train_loss'], xlabel='Epochs', ylabel='MSE + Reg', title='Training MSE + Reg')
plot_floats(model['train_pred_loss'], xlabel='Epochs', ylabel='MSE', title='Training MSE')
plot_floats(model['train_reg_loss'], xlabel='Epochs', ylabel='Reg', title='Training Reg')

In [ ]:
print('_________________________Validation Table_________________________')
for k, v in model['val_loss'].items():
    epoch, loss, pred_loss, reg_loss = k, v[0], model['val_pred_loss'][k][0], model['val_reg_loss'][k][0]
    print(str.join(" | ", ['Epoch: %d' % (epoch), 'Loss: %.5f' % (loss), 
                              'Pred Loss: %.5f' % (pred_loss), 'Reg Loss: %.5f' % (reg_loss)]))

In [ ]:
from dpp_nets.helper.plotting import plot_floats

# Training Plots
plot_floats(model['val_loss'], xlabel='Epochs', ylabel='MSE + Reg', title='Validation MSE + Reg')
plot_floats(model['val_pred_loss'], xlabel='Epochs', ylabel='MSE', title='Validation MSE')
plot_floats(model['val_reg_loss'], xlabel='Epochs', ylabel='Reg', title='Validation Reg')

In [ ]:
# Evaluation on Test Set

loss, pred_loss, reg_loss = evalset.computeLoss(trainer, model['mode'])
print(str.join(" | ", ['Test Set:', 'Loss: %.5f' % (loss), 
                              'Pred Loss: %.5f' % (pred_loss), 'Reg Loss: %.5f' % (reg_loss)]))

prec, extract = evalset.evaluatePrecision(trainer,model['mode'])
print(str.join(" | ", ['Test Set:', 'Precision: %.5f' % (prec), 'Extract: %.5f' % (extract)]))

In [28]:
# Random Samples
evalset.sample(trainer, model['mode'])


index is: 468
('weak',) set() [('weak',)]
('beers',) set() [('beers',)]
('substance',) set() [('substance',)]
('forgettable',) set() [('forgettable',)]
('sour',) {'1'} [('sour',)]
('exciting',) set() [('exciting',)]
('stout',) {'0'} [('stout',)]
('saying',) set() [('saying',)]
('quickly',) {'0'} [('quickly',)]
('likewise',) {'0'} [('likewise',)]
Precision is: 0.4
Extraction Percentage is: 0.08064516129032258
[(Let me start by saying it's exciting to see beers from Mexico which have color and substance and flavor., set()), (In fact this is the first craft beer I've ever had from Mexico.		, set()), (Poured a meager 1" head which quickly dissipated to lace., {'0'}), (Dark in color almost like a stout and flavor likewise was similar to a mild stout but with a lighter body., {'0'}), (Smell and taste both had touch of sour almost like a drop of Flemish sour.		, {'1'}), (Anyway, a big step up from forgettable weak Mexican beers, but nowhere close to the craft beers from the States or from European classics.		, set()), (But keep up the good work Cucapa., set())]
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-28-b8df56b68075> in <module>()
      1 # Random Samples
----> 2 evalset.sample(trainer, model['mode'])

~/git/dpp_nets/dpp_nets/utils/language.py in sample(self, trainer, mode, ix)
    626 
    627         # Prediction and target
--> 628         trainer(self.vocab.returnEmbds(review.clean.keys()).unsqueeze(0), Variable(target))
    629         pred = trainer.pred.data
    630         loss = trainer.loss.data[0]

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    222         for hook in self._forward_pre_hooks.values():
    223             hook(self, input)
--> 224         result = self.forward(*input, **kwargs)
    225         for hook in self._forward_hooks.values():
    226             hook_result = hook(self, input, result)

~/git/dpp_nets/dpp_nets/layers/layers.py in forward(self, words, target)
    764             self.pred = self.activation(self.pred)
    765 
--> 766         self.pred_loss = self.criterion(self.pred, target)
    767 
    768         if self.reg:

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    222         for hook in self._forward_pre_hooks.values():
    223             hook(self, input)
--> 224         result = self.forward(*input, **kwargs)
    225         for hook in self._forward_hooks.values():
    226             hook_result = hook(self, input, result)

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
    270     def forward(self, input, target):
    271         _assert_no_grad(target)
--> 272         return F.mse_loss(input, target, size_average=self.size_average)
    273 
    274 

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/nn/functional.py in mse_loss(input, target, size_average)
    817 
    818 def mse_loss(input, target, size_average=True):
--> 819     return _functions.thnn.MSELoss.apply(input, target, size_average)
    820 
    821 

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/nn/_functions/thnn/auto.py in forward(ctx, input, target, *args)
     45         output = input.new(1)
     46         getattr(ctx._backend, update_output.name)(ctx._backend.library_state, input, target,
---> 47                                                   output, *ctx.additional_args)
     48         return output
     49 

RuntimeError: input and target have different number of elements: input[100 x 3] has 300 elements, while target[3] has 3 elements at /Users/soumith/miniconda2/conda-bld/pytorch_1503975723910/work/torch/lib/THNN/generic/MSECriterion.c:12

In [11]:
# Random Marginals
evalset.computeMarginals(trainer, model['mode'], 397)


index is: 397
0 0.549766924786 ('luscious',)
1 0.519408797876 ('balanced',)
2 0.50610014369 ('alcohol',)
3 0.49311295878 ('pear',)
4 0.461492428317 ('aroma',)
5 0.394499445425 ('bitter',)
6 0.342531732584 ('minimal',)
7 0.323079637132 ('initial',)
8 0.30356740096 ('rather',)
9 0.303334538764 ('also',)
10 0.303199056182 ('orangy',)
11 0.28635213081 ('modest',)
12 0.279423096371 ('maple',)
13 0.262360241968 ('back',)
14 0.239793818183 ('offset',)
15 0.231872278559 ('took',)
16 0.226868038352 ('--',)
17 0.217652021045 ('medium',)
18 0.212753540135 ("'s",)
19 0.205490446092 ('potent',)
20 0.204760849527 ('quickly',)
21 0.188842681776 ('think',)
22 0.178785820959 ('poured',)
23 0.177608366315 ('clear',)
24 0.173306350039 ('dark',)
25 0.162810314981 ('fades',)
26 0.161427160731 ('2002',)
27 0.161040237487 ('cherry',)
28 0.16007593745 ('tart',)
29 0.159982566604 ('2003',)
30 0.158804968384 ('well',)
31 0.149028697022 ('malts',)
32 0.146853994495 ('expected',)
33 0.14161987744 ('spices',)
34 0.139594424588 ('reasonably',)
35 0.137691781302 ('head',)
36 0.133268548624 ('store',)
37 0.131682234088 ('taste',)
38 0.131602793425 ('somewhere',)
39 0.1310659085 ('kick',)
40 0.126137507511 ('sweetness',)
41 0.125647371761 ('sharp',)
42 0.125638971784 ('section',)
43 0.122690341967 ('date',)
44 0.122085377148 ('case',)
45 0.11598082348 ('start',)
46 0.115281511761 ('touch',)
47 0.112379916899 ('gamble',)
48 0.108956961116 ('thickness',)
49 0.106414950003 ('gold',)
50 0.10460946615 ('carbonation',)
51 0.102683578972 ('reported',)
52 0.0966297155991 ('mouthfeel',)
53 0.0956070130292 ('discounted',)
54 0.0885062963862 ('evident',)
55 0.0830458013677 ('hops',)
56 0.080061961501 ('syrupy',)
57 0.0436853830314 ('finish',)

In [22]:
evalset.computeMUEPredLoss(trainer, model['mode'],100)


Out[22]:
(0.012876863448606085, 0.012892113315031472)

In [13]:
evalset.computeMAPPredLoss(trainer, model['mode'])


Out[13]:
(0.014311427982221337, 0.0831022014637619)

In [14]:
from torch.autograd import Variable
review = evalset.vocab.returnEmbds(evalset.words[0].clean.keys()).unsqueeze(0)
target = Variable(torch.stack([evalset.targets[0] for _ in range(100)]))

In [15]:
trainer.alpha_iter = 100
trainer.sampler.alpha_iter = 100
trainer(review, target)


Out[15]:
Variable containing:
1.00000e-02 *
  2.3543
[torch.FloatTensor of size 1]