In [5]:
# install pytorch spacy torchtext
# python -m spacy download en


Collecting package metadata: - ^C
failed

CondaError: KeyboardInterrupt


In [2]:
import torch
from torchtext import data
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize = 'spacy')
LABEL = data.LabelField(dtype = torch.float)

In [3]:
from torchtext import datasets
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)


downloading aclImdb_v1.tar.gz
aclImdb_v1.tar.gz: 100%|██████████| 84.1M/84.1M [00:16<00:00, 5.03MB/s]

In [7]:
print(f'Number of training examples: {len(train_data)}')
print(f'Number of testing examples: {len(test_data)}')


Number of training examples: 25000
Number of testing examples: 25000

In [9]:
print(vars(train_data.examples[0]))


{'text': ['This', 'movie', 'is', 'simply', 'wonderful', '!', 'It', "'s", 'got', 'it', 'all', ':', 'laughter', ',', 'sorrow', ',', 'beauty', ',', 'poetry', ',', 'truth', '.', 'All', 'in', 'a', 'simple', 'yet', 'intense', 'story', '--', 'like', 'life', '!', 'You', 'wo', "n't", 'get', 'distracted', 'for', 'a', 'second.<br', '/><br', '/>10/10<br', '/><br', '/>P.S.', 'Somebody', 'tell', 'Hollywood', 'you', 'need', 'a', 'good', 'story', 'to', 'make', 'a', 'good', 'movie', ',', 'and', 'there', 'are', 'so', 'many', 'good', 'stories', 'out', 'there', '.'], 'label': 'pos'}

In [10]:
import random
train_data, valid_data = train_data.split(random_state = random.seed(SEED))

In [11]:
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')


Number of training examples: 17500
Number of validation examples: 7500
Number of testing examples: 25000

In [13]:
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE)
LABEL.build_vocab(train_data)

In [14]:
print(f"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}")
print(f"Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}")


Unique tokens in TEXT vocabulary: 25002
Unique tokens in LABEL vocabulary: 2

In [19]:
vars(train_data.examples[0])


Out[19]:
{'text': ['Seriously',
  '.',
  'This',
  'is',
  'one',
  'of',
  'the',
  'most',
  'stupid',
  'family',
  'shows',
  'of',
  'all',
  'time.<br',
  '/><br',
  '/>Plot-',
  'A',
  'family',
  'without',
  'a',
  'mother',
  'and',
  '3',
  '"',
  'dads',
  '"',
  'raise',
  '3',
  'little',
  'girls',
  'in',
  'San',
  'Francisco',
  ',',
  'California.<br',
  '/><br',
  '/>Characters-',
  'Neat',
  'freak',
  'Danny',
  ',',
  'cartoon',
  'loving',
  'Joey',
  ',',
  'hair',
  'obsessed',
  'Jesse',
  ',',
  'spoiled',
  'brat',
  'Michelle',
  ',',
  'stupid',
  'DJ',
  ',',
  'and',
  'almost',
  'normal',
  'Stephanie',
  '.',
  'The',
  'creators',
  'of',
  'this',
  'show',
  'really',
  'want',
  'you',
  'to',
  'hate',
  'the',
  'characters',
  ',',
  'do',
  "n't",
  'they',
  '?',
  '<',
  'br',
  '/><br',
  '/>Therefore',
  ',',
  'I',
  'do',
  'not',
  'think',
  'anyone',
  'should',
  'watch',
  'this',
  'show',
  '.',
  'I',
  'only',
  'chuckled',
  'at',
  'a',
  'few',
  'moments',
  'in',
  'the',
  'show',
  "'s",
  'whole',
  'running',
  ',',
  'and',
  'I',
  'think',
  'that',
  'instead',
  'of',
  'lethal',
  'injection',
  ',',
  'all',
  'criminals',
  'should',
  'be',
  'forced',
  'to',
  'watch',
  'this',
  'show',
  ',',
  'a',
  'torture',
  'far',
  'worse',
  'than',
  'anything',
  'else.<br',
  '/><br',
  '/>1.5/10',
  'or',
  ':',
  'D'],
 'label': 'neg'}

In [20]:
print(TEXT.vocab.freqs.most_common(20))
print(TEXT.vocab.itos[:10])
print(LABEL.vocab.stoi)


[('the', 201894), (',', 192487), ('.', 164866), ('a', 109230), ('and', 109082), ('of', 100541), ('to', 93580), ('is', 76396), ('in', 61098), ('I', 54064), ('it', 53289), ('that', 49329), ('"', 44246), ("'s", 43180), ('this', 42347), ('-', 37017), ('/><br', 35622), ('was', 34767), ('as', 30056), ('with', 29951)]
['<unk>', '<pad>', 'the', ',', '.', 'a', 'and', 'of', 'to', 'is']
defaultdict(<function _default_unk_index at 0x7f2420c20840>, {'neg': 0, 'pos': 1})

In [22]:
BATCH_SIZE = 64

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
    (train_data, valid_data, test_data), 
    batch_size = BATCH_SIZE,
    device = device)

In [23]:
import torch.nn as nn

class RNN(nn.Module):
    def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):        
        super().__init__()
        self.embedding = nn.Embedding(input_dim, embedding_dim)
        self.rnn = nn.RNN(embedding_dim, hidden_dim)
        self.fc = nn.Linear(hidden_dim, output_dim)
        
    def forward(self, text):
        #text = [sent len, batch size]
        embedded = self.embedding(text)
        #embedded = [sent len, batch size, emb dim]        
        output, hidden = self.rnn(embedded)        
        #output = [sent len, batch size, hid dim]
        #hidden = [1, batch size, hid dim]        
        assert torch.equal(output[-1,:,:], hidden.squeeze(0))        
        return self.fc(hidden.squeeze(0))

In [24]:
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1

model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)

In [25]:
def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

print(f'The model has {count_parameters(model):,} trainable parameters')


The model has 2,592,105 trainable parameters

In [33]:
import torch.optim as optim

optimizer = optim.SGD(model.parameters(), lr=1e-3)
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)

In [34]:
def binary_accuracy(preds, y):
    """
    Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
    """

    #round predictions to the closest integer
    rounded_preds = torch.round(torch.sigmoid(preds))
    correct = (rounded_preds == y).float() #convert into float for division 
    acc = correct.sum() / len(correct)
    return acc

In [35]:
def train(model, iterator, optimizer, criterion):    
    epoch_loss = 0
    epoch_acc = 0    
    model.train()    
    for batch in iterator:        
        optimizer.zero_grad()                
        predictions = model(batch.text).squeeze(1)        
        loss = criterion(predictions, batch.label)        
        acc = binary_accuracy(predictions, batch.label)        
        loss.backward()        
        optimizer.step()        
        epoch_loss += loss.item()
        epoch_acc += acc.item()
        
    return epoch_loss / len(iterator), epoch_acc / len(iterator)

In [36]:
def evaluate(model, iterator, criterion):    
    epoch_loss = 0
    epoch_acc = 0    
    model.eval()    
    with torch.no_grad():    
        for batch in iterator:
            predictions = model(batch.text).squeeze(1)
            loss = criterion(predictions, batch.label)            
            acc = binary_accuracy(predictions, batch.label)
            epoch_loss += loss.item()
            epoch_acc += acc.item()        
    return epoch_loss / len(iterator), epoch_acc / len(iterator)

In [37]:
import time

def epoch_time(start_time, end_time):
    elapsed_time = end_time - start_time
    elapsed_mins = int(elapsed_time / 60)
    elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
    return elapsed_mins, elapsed_secs

In [38]:
N_EPOCHS = 5

best_valid_loss = float('inf')

for epoch in range(N_EPOCHS):

    start_time = time.time()
    
    train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
    valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
    
    end_time = time.time()

    epoch_mins, epoch_secs = epoch_time(start_time, end_time)
    
    if valid_loss < best_valid_loss:
        best_valid_loss = valid_loss
        torch.save(model.state_dict(), 'tut1-model.pt')
    
    print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
    print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
    print(f'\t Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc*100:.2f}%')


Epoch: 01 | Epoch Time: 1m 0s
	Train Loss: 0.694 | Train Acc: 50.20%
	 Val. Loss: 0.696 |  Val. Acc: 50.09%
Epoch: 02 | Epoch Time: 1m 2s
	Train Loss: 0.693 | Train Acc: 49.67%
	 Val. Loss: 0.696 |  Val. Acc: 50.06%
Epoch: 03 | Epoch Time: 1m 4s
	Train Loss: 0.693 | Train Acc: 49.96%
	 Val. Loss: 0.696 |  Val. Acc: 50.56%
Epoch: 04 | Epoch Time: 1m 4s
	Train Loss: 0.693 | Train Acc: 49.94%
	 Val. Loss: 0.696 |  Val. Acc: 49.89%
Epoch: 05 | Epoch Time: 1m 4s
	Train Loss: 0.693 | Train Acc: 50.23%
	 Val. Loss: 0.696 |  Val. Acc: 50.57%

In [39]:
model.load_state_dict(torch.load('tut1-model.pt'))

test_loss, test_acc = evaluate(model, test_iterator, criterion)

print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')


Test Loss: 0.710 | Test Acc: 47.33%

In [ ]: