Language Model


In [9]:
ls data


cifar-10-batches-py/    mnist/                  raw/
cifar-10-python.tar.gz  picasso.jpg             train.txt
dancing.jpg             processed/

In [13]:
!wget https://raw.githubusercontent.com/yunjey/pytorch-tutorial/master/tutorials/02-intermediate/language_model/data/train.txt -P data


--2018-01-20 16:25:02--  https://raw.githubusercontent.com/yunjey/pytorch-tutorial/master/tutorials/02-intermediate/language_model/data/train.txt
Resolving raw.githubusercontent.com... 151.101.72.133
Connecting to raw.githubusercontent.com|151.101.72.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 5101618 (4.9M) [text/plain]
Saving to: ‘data/train.txt.1’

train.txt.1         100%[===================>]   4.87M  3.53MB/s    in 1.4s    

2018-01-20 16:25:04 (3.53 MB/s) - ‘data/train.txt.1’ saved [5101618/5101618]


In [14]:
less data/train.txt

In [30]:
import os

class Dictionary(object):
    def __init__(self):
        self.word2idx = {}
        self.idx2word = {}
        self.idx = 0
    
    def add_word(self, word):
        if not word in self.word2idx:
            self.word2idx[word] = self.idx
            self.idx2word[self.idx] = word
            self.idx += 1
    
    def __len__(self):
        return len(self.word2idx)

In [31]:
d = Dictionary()
d.add_word('Me')
d.add_word('Hello')
print(d.word2idx)
print(d.idx2word)
print(len(d))


{'Me': 0, 'Hello': 1}
{0: 'Me', 1: 'Hello'}
2

In [84]:
class Corpus(object):
    def __init__(self, path='./data'):
        self.dictionary = Dictionary()

    def get_data(self, path, batch_size=20):
        # add words to the dictionary
        with open(path, 'r') as f:
            tokens = 0
            for line in f:
                words = line.split() + ['<eos>']
                tokens += len(words)
                for word in words:
                    self.dictionary.add_word(word)
        
        # tokenize the file content
        ids = torch.LongTensor(tokens)
        token = 0
        with open(path, 'r') as f:
            for line in f:
                words = line.split() + ['<eos>']
                for word in words:
                    ids[token] = self.dictionary.word2idx[word]
                    token += 1

        # バッチサイズで割り切れるサイズにする
        num_batches = ids.size(0) // batch_size
        ids = ids[:num_batches * batch_size]
        return ids.view(batch_size, -1)

In [89]:
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
#from data_utils import Dictionary, Corpus

# hyper parameters
embed_size = 128
hidden_size = 1024
num_layers = 1
num_epochs = 5
num_samples = 1000
batch_size = 20
seq_length = 30
learning_rate = 0.002

# Load Penn Treebank Dataset
train_path = './data/train.txt'
sample_path = './sample.txt'

corpus = Corpus()
# インデックスに変換したコーパス
ids = corpus.get_data(train_path, batch_size)  # (20, 46479)
vocab_size = len(corpus.dictionary)  # 10000
num_batches = ids.size(1) // seq_length  # 1549
print('ids:', ids.size())
print('vocab_size:', vocab_size)
print('num_batches:', num_batches)


ids: torch.Size([20, 46479])
vocab_size: 10000
num_batches: 1549

In [124]:
# RNN based language model
class RNNLM(nn.Module):
    def __init__(self, vocab_size, embed_size, hidden_size, num_layers):
        super(RNNLM, self).__init__()
        self.embed = nn.Embedding(vocab_size, embed_size)
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
        self.linear = nn.Linear(hidden_size, vocab_size)
        self.init_weight()
    
    def init_weight(self):
        self.embed.weight.data.uniform_(-0.1, 0.1)
        self.linear.bias.data.fill_(0)
        self.linear.weight.data.uniform_(-0.1, 0.1)
    
    def forward(self, x, h):  # [20, 30]
        print('x:', x.size())
        print('h:', h[0].size())
        print('c:', h[1].size())

        # embed word ids to vectors
        x = self.embed(x)  # [20, 30, 128]
        print('embed:', x.size())

        # forward propagate RNN
        out, h = self.lstm(x, h)  # [20, 30, 1024]
        out = out.contiguous().view(out.size(0) * out.size(1), out.size(2))
        out = self.linear(out)        

        return out, h

In [125]:
model = RNNLM(vocab_size, embed_size, hidden_size, num_layers)
#model.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

In [126]:
inputs = Variable(ids[:, 0:30])
targets = Variable(ids[:, 1:31])
states = (Variable(torch.zeros(num_layers, batch_size, hidden_size)),
          Variable(torch.zeros(num_layers, batch_size, hidden_size)))
model(inputs, states)


x: torch.Size([20, 30])
h: torch.Size([1, 20, 1024])
c: torch.Size([1, 20, 1024])
embed: torch.Size([20, 30, 128])
Out[126]:
(Variable containing:
  1.2109e-02 -4.2164e-03 -1.9561e-02  ...  -2.2535e-03  1.3184e-02  1.7042e-02
  2.0521e-02 -1.2563e-02 -4.0352e-02  ...  -1.2311e-03  1.7413e-02  2.7035e-02
  2.2108e-02 -2.7091e-02 -5.1551e-02  ...  -9.8233e-03  1.4894e-02  2.3743e-02
                 ...                   ⋱                   ...                
  1.5702e-02 -1.9350e-02 -6.8386e-02  ...  -3.6951e-03  3.1976e-02  3.7417e-02
  1.3191e-02 -2.7237e-02 -7.3997e-02  ...  -5.2689e-03  4.0979e-02  3.3340e-02
  2.4224e-02 -2.9213e-02 -7.2764e-02  ...   9.0177e-03  2.9738e-02  3.8463e-02
 [torch.FloatTensor of size 600x10000], (Variable containing:
  ( 0  ,.,.) = 
  1.00000e-02 *
   -1.6489 -0.4631 -2.1975  ...   1.1436 -3.2848 -1.6932
   -0.4430  0.0342 -1.6100  ...   0.8339 -2.7952 -2.0236
   -0.5340 -0.5879 -1.9551  ...   1.1781 -2.7270 -1.9951
             ...             ⋱             ...          
   -0.6378 -1.6057 -1.5392  ...   0.4422 -3.1842 -1.6217
   -1.1324 -0.6035 -2.3166  ...   0.6852 -2.6370 -1.9747
   -0.4819 -0.5073 -1.1526  ...   0.2920 -3.2456 -2.1341
  [torch.FloatTensor of size 1x20x1024], Variable containing:
  ( 0  ,.,.) = 
  1.00000e-02 *
   -3.3623 -0.9232 -4.4940  ...   2.2906 -6.5448 -3.3243
   -0.9066  0.0683 -3.2917  ...   1.6573 -5.5193 -3.9614
   -1.0873 -1.1723 -4.0166  ...   2.3650 -5.4462 -3.9202
             ...             ⋱             ...          
   -1.2896 -3.1974 -3.1445  ...   0.8813 -6.3159 -3.1986
   -2.2821 -1.1934 -4.7339  ...   1.3766 -5.2770 -3.8497
   -0.9760 -1.0122 -2.3656  ...   0.5813 -6.4086 -4.1912
  [torch.FloatTensor of size 1x20x1024]))

In [ ]:
# training
for epoch in range(num_epochs):
    # initial hidden and memory states
    states = (Variable(torch.zeros(num_layers, batch_size, hidden_size)),
              Variable(torch.zeros(num_layers, batch_size, hidden_size)))
    
    for i in range(0, ids.size(1) - seq_length, seq_length):
        # get batch inputs and targets
        # 入力単語系列に対して1つずらした単語系列が出力となるように学習
        # in: [0:30], out: [1:31]
        # in: [1:31], out: [2:32]
        inputs = Variable(ids[:, i:i+seq_length])
        targets = Variable(ids[:, (i+1):(i+1)+seq_length])
        
        model.zero_grad()
        states = detach(states)
        outputs, states = model(inputs, states)
        loss = criterion(outputs, targets.view(-1))
        loss.backward()
        torch.nn.utils.clip_grad_norm(model.paramters(), 0.5)
        optimizer.step()