In [1]:
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.manual_seed(1)


Out[1]:
<torch._C.Generator at 0x108e83d80>

In [2]:
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5)  # 2 words in vocab, 5 dimensional embeddings
lookup_tensor = torch.LongTensor([word_to_ix["hello"]])
hello_embed = embeds(autograd.Variable(lookup_tensor))
print(hello_embed)


Variable containing:
-2.9718  1.7070 -0.4305 -2.2820  0.5237
[torch.FloatTensor of size 1x5]


In [4]:
type(lookup_tensor)


Out[4]:
torch.LongTensor

In [3]:
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples.  Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
            for i in range(len(test_sentence) - 2)]
# print the first 3, just so you can see what they look like
print(trigrams[:3])

vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}


class NGramLanguageModeler(nn.Module):

    def __init__(self, vocab_size, embedding_dim, context_size):
        super(NGramLanguageModeler, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.linear1 = nn.Linear(context_size * embedding_dim, 128)
        self.linear2 = nn.Linear(128, vocab_size)

    def forward(self, inputs):
        embeds = self.embeddings(inputs).view((1, -1))
        out = F.relu(self.linear1(embeds))
        out = self.linear2(out)
        log_probs = F.log_softmax(out)
        return log_probs


losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)

for epoch in range(10):
    total_loss = torch.Tensor([0])
    for context, target in trigrams:

        # Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
        # into integer indices and wrap them in variables)
        context_idxs = [word_to_ix[w] for w in context]
        context_var = autograd.Variable(torch.LongTensor(context_idxs))

        # Step 2. Recall that torch *accumulates* gradients. Before passing in a
        # new instance, you need to zero out the gradients from the old
        # instance
        model.zero_grad()

        # Step 3. Run the forward pass, getting log probabilities over next
        # words
        log_probs = model(context_var)

        # Step 4. Compute your loss function. (Again, Torch wants the target
        # word wrapped in a variable)
        loss = loss_function(log_probs, autograd.Variable(
            torch.LongTensor([word_to_ix[target]])))

        # Step 5. Do the backward pass and update the gradient
        loss.backward()
        optimizer.step()

        total_loss += loss.data
    losses.append(total_loss)
print(losses)  # The loss decreased every iteration over the training data!


[(['When', 'forty'], 'winters'), (['forty', 'winters'], 'shall'), (['winters', 'shall'], 'besiege')]
[
 521.3076
[torch.FloatTensor of size 1]
, 
 518.9695
[torch.FloatTensor of size 1]
, 
 516.6475
[torch.FloatTensor of size 1]
, 
 514.3402
[torch.FloatTensor of size 1]
, 
 512.0472
[torch.FloatTensor of size 1]
, 
 509.7671
[torch.FloatTensor of size 1]
, 
 507.4986
[torch.FloatTensor of size 1]
, 
 505.2417
[torch.FloatTensor of size 1]
, 
 502.9950
[torch.FloatTensor of size 1]
, 
 500.7588
[torch.FloatTensor of size 1]
]

In [12]:
em = nn.Embedding(6, 2)

In [17]:
vv = autograd.Variable(torch.LongTensor([2,5, 3]))
print(vv)
emv = em(vv)
print(emv)
print(emv.view(1, -1))


Variable containing:
 2
 5
 3
[torch.LongTensor of size 3]

Variable containing:
-0.3343  0.1837
 0.1878  0.1436
 0.4460  0.6344
[torch.FloatTensor of size 3x2]

Variable containing:
-0.3343  0.1837  0.1878  0.1436  0.4460  0.6344
[torch.FloatTensor of size 1x6]

Continuous Bag of Words (CBOW) Embeddings


In [39]:
CONTEXT_SIZE = 2  # 2 words to the left, 2 to the right
raw_text = """We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()

# By deriving a set from `raw_text`, we deduplicate the array
vocab = set(raw_text)
vocab_size = len(vocab)

word_to_ix = {word: i for i, word in enumerate(vocab)}
data = []
for i in range(2, len(raw_text) - 2):
    context = [raw_text[i - 2], raw_text[i - 1],
               raw_text[i + 1], raw_text[i + 2]]
    target = raw_text[i]
    data.append((context, target))
print(data[:5])


class CBOW(nn.Module):

    def __init__(self, vocab_size, emb_size, ctx_size):
        super(CBOW, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, emb_size)
        self.linear = nn.Linear(emb_size, vocab_size)

    def forward(self, inputs):
        embs = self.embeddings(inputs).sum(dim=0).view(1, -1)
        return F.log_softmax(self.linear(embs))

# create your model and train.  here are some functions to help you make
# the data ready for use by your module


def make_context_vector(context, word_to_ix):
    idxs = [word_to_ix[w] for w in context]
    tensor = torch.LongTensor(idxs)
    return autograd.Variable(tensor)

model = CBOW(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
loss_func = nn.NLLLoss()
optimizer = optim.SGD(params=model.parameters(), lr=0.001)

losses = []
for e in range(10):
    print(f"Epoch {e+1}")
    total_loss = torch.Tensor([0])
    for ctx, target in data:
        x = make_context_vector(ctx, word_to_ix)
        y_gold = autograd.Variable(torch.LongTensor([word_to_ix[target]]))
        model.zero_grad()
        y_pred = model(x)
        loss = loss_function(y_pred, y_gold)
        loss.backward()
        optimizer.step()
        total_loss += loss.data
    losses.append(total_loss)
print(losses)


[(['We', 'are', 'to', 'study'], 'about'), (['are', 'about', 'study', 'the'], 'to'), (['about', 'to', 'the', 'idea'], 'study'), (['to', 'study', 'idea', 'of'], 'the'), (['study', 'the', 'of', 'a'], 'idea')]
Epoch 1
Epoch 2
Epoch 3
Epoch 4
Epoch 5
Epoch 6
Epoch 7
Epoch 8
Epoch 9
Epoch 10
[
 266.7707
[torch.FloatTensor of size 1]
, 
 263.9811
[torch.FloatTensor of size 1]
, 
 261.2458
[torch.FloatTensor of size 1]
, 
 258.5628
[torch.FloatTensor of size 1]
, 
 255.9304
[torch.FloatTensor of size 1]
, 
 253.3470
[torch.FloatTensor of size 1]
, 
 250.8111
[torch.FloatTensor of size 1]
, 
 248.3215
[torch.FloatTensor of size 1]
, 
 245.8769
[torch.FloatTensor of size 1]
, 
 243.4762
[torch.FloatTensor of size 1]
]

In [ ]: