In [13]:
# Lab 12 RNN
import torch
import torch.nn as nn
from torch.autograd import Variable

torch.manual_seed(777)  # reproducibility

# hyperparameters
learning_rate = 0.1
num_epochs = 15

idx2char = ['h', 'i', 'e', 'l', 'o']

# Teach hello: hihell -> ihello
x_data = [[0, 1, 0, 2, 3, 3]]   # hihell
x_one_hot = [[[1, 0, 0, 0, 0],   # h 0
              [0, 1, 0, 0, 0],   # i 1
              [1, 0, 0, 0, 0],   # h 0
              [0, 0, 1, 0, 0],   # e 2
              [0, 0, 0, 1, 0],   # l 3
              [0, 0, 0, 1, 0]]]  # l 3

y_data = [1, 0, 2, 3, 3, 4]    # ihello

In [14]:
# As we have one batch of samples, we will change them to variables only once
inputs = torch.Tensor(x_one_hot)
labels = torch.LongTensor(y_data)

inputs = Variable(inputs)
labels = Variable(labels)

num_classes = 5
input_size = 5  # one-hot size
hidden_size = 5  # output from the LSTM. 5 to directly predict one-hot
batch_size = 1   # one sentence
sequence_length = 6  # |ihello| == 6
num_layers = 1  # one-layer rnn

In [15]:
labels


Out[15]:
Variable containing:
 1
 0
 2
 3
 3
 4
[torch.LongTensor of size 6]

In [16]:
class RNN(nn.Module):

    def __init__(self, num_classes, input_size, hidden_size, num_layers):
        super(RNN, self).__init__()
        self.num_classes = num_classes
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sequence_length = sequence_length
        # Set parameters for RNN block
        # Note: batch_first=False by default.
        # When true, inputs are (batch_size, sequence_length, input_dimension)
        # instead of (sequence_length, batch_size, input_dimension)
        self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
                          num_layers=num_layers, batch_first=True)
        # Fully connected layer to obtain outputs corresponding to the number
        # of classes
        self.fc = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        # Initialize hidden and cell states
        h_0 = Variable(torch.zeros(
            x.size(0), self.num_layers, self.hidden_size))

        # Reshape input
        x.view(x.size(0), self.sequence_length, self.input_size)

        # Propagate input through RNN
        # Input: (batch, seq_len, input_size)
        # h_0: (batch, num_layers * num_directions, hidden_size)

        out, _ = self.rnn(x, h_0)

        # Reshape output from (batch, seq_len, hidden_size) to (batch *
        # seq_len, hidden_size)
        out = out.view(-1, self.hidden_size)
        # Return outputs applied to fully connected layer
        out = self.fc(out)
        return out

In [17]:
# Instantiate RNN model
rnn = RNN(num_classes, input_size, hidden_size, num_layers)

In [18]:
# Set loss and optimizer function
criterion = torch.nn.CrossEntropyLoss()    # Softmax is internally computed.
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)

In [19]:
# Train the model
for epoch in range(num_epochs):
    outputs = rnn(inputs)
    optimizer.zero_grad()
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()
    _, idx = outputs.max(1)
    idx = idx.data.numpy()
    result_str = [idx2char[c] for c in idx.squeeze()]
    print("epoch: %d, loss: %1.3f" % (epoch + 1, loss.data[0]))
    print("Predicted string: ", ''.join(result_str))

print("Learning finished!")


epoch: 1, loss: 1.565
Predicted string:  ililhi
epoch: 2, loss: 1.365
Predicted string:  llelel
epoch: 3, loss: 1.204
Predicted string:  elelll
epoch: 4, loss: 1.053
Predicted string:  ehelll
epoch: 5, loss: 0.910
Predicted string:  ehelll
epoch: 6, loss: 0.778
Predicted string:  ihelll
epoch: 7, loss: 0.665
Predicted string:  ihelll
epoch: 8, loss: 0.572
Predicted string:  ihelll
epoch: 9, loss: 0.502
Predicted string:  ihelll
epoch: 10, loss: 0.444
Predicted string:  ihelll
epoch: 11, loss: 0.389
Predicted string:  ihelll
epoch: 12, loss: 0.326
Predicted string:  ihello
epoch: 13, loss: 0.271
Predicted string:  ihello
epoch: 14, loss: 0.222
Predicted string:  ihello
epoch: 15, loss: 0.177
Predicted string:  ihello
Learning finished!

In [ ]:


In [ ]: