Deep Learning

Assignment 5

The goal of this assignment is to train a skip-gram model over Text8 data.


In [ ]:
%matplotlib inline
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import sys
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE

import outputer
import convnet

Download the data from the source website if necessary.


In [ ]:
url = 'http://mattmahoney.net/dc/'

def maybe_download(filename, expected_bytes):
    """Download a file if not present, and make sure it's the right size."""
    if not os.path.exists(filename):
        filename, _ = urlretrieve(url + filename, filename)
    statinfo = os.stat(filename)
    if statinfo.st_size == expected_bytes:
        print('Found and verified %s' % filename)
    else:
        print(statinfo.st_size)
        raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')
    return filename

filename = maybe_download('temp/text8.zip', 31344016)

Read the data into a string.


In [ ]:
def read_data(filename):
    f = zipfile.ZipFile(filename)
    for name in f.namelist():
        return tf.compat.as_str(f.read(name)).split()
    f.close()
  
words = read_data(filename)
print('Data size %d' % len(words))

print("Words[:5]=", words[:5])

Build the dictionary and replace rare words with UNK token.


In [ ]:
class Document(object):
    """ Given a sequence of words builds a two vocabulary mapping between words and numeric IDs,
        And also tracks word frequency."""
    def __init__(self, words, vocabulary_size):
        total_counts = collections.Counter(words)
        self.vocabulary_total_size = len(total_counts)
        self.word_counts = [['UNK', -1]] 
        self.word_counts.extend(total_counts.most_common(vocabulary_size - 1))
        self.word_IDs = dict()
        for word, _ in self.word_counts:
            self.word_IDs[word] = len(self.word_IDs)
        self.sequence = list()
        unknowns = 0
        for word in words:
            if word in self.word_IDs:
                index = self.word_IDs[word]
            else:
                index = 0  # dictionary['UNK']
                unknowns = unknowns + 1
            self.sequence.append(index)
        self.word_counts[0][1] = unknowns
        self.ID_to_word = dict(zip(self.word_IDs.values(), self.word_IDs.keys()))
        
    def vocabulary_size(self):
        return len(self.word_IDs)
    
    def print_stats(self):
        print("Vocabulary", self.vocabulary_size(), "of", self.vocabulary_total_size)
        print('Most common words (+UNK)', self.word_counts[:5])
        print('Sample data', self.sequence[:10])

document = Document(words, 50000)
document.print_stats()

In [ ]:
del words  # Hint to reduce memory.

Function to generate a training batch for the skip-gram model.


In [ ]:
def generate_batch(batch_size, skip_count, skip_window, sequence, index):
    assert batch_size % skip_count == 0
    assert skip_count <= 2 * skip_window
    batch = np.ndarray(shape=(batch_size), dtype=np.int32)
    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
    span = 2 * skip_window + 1 # [ skip_window target skip_window ]
    buffer = collections.deque(maxlen=span)
    for _ in range(span):
        buffer.append(sequence[index])
        index = (index + 1) % len(sequence)
    for i in range(batch_size // skip_count):
        target = skip_window  # target label at the center of the buffer
        targets_to_avoid = [ skip_window ]
        for j in range(skip_count):
            while target in targets_to_avoid:
                target = random.randint(0, span - 1)
            targets_to_avoid.append(target)
            batch[i * skip_count + j] = buffer[skip_window]
            labels[i * skip_count + j, 0] = buffer[target]
        buffer.append(sequence[index])
        index = (index + 1) % len(sequence)
    return batch, labels, index

In [ ]:
print('sequence:', [document.ID_to_word[di] for di in document.sequence[:8]])

for skip_window in [1, 2]:
    data_index = 0
    batch, labels, data_index = generate_batch(
        8, 2 * skip_window, skip_window, document.sequence, data_index
    )
    print('\nwith skip_window = %d:' % (skip_window))
    print('    batch:', [document.ID_to_word[bi] for bi in batch])
    print('    labels:', [document.ID_to_word[li] for li in labels.reshape(8)])

Train a skip-gram model.


In [ ]:
def setup_graph(vocab_size, batch_size, embedding_size, sample_count, valid_examples, cbow_skips=None):
    graph = tf.Graph()
    with graph.as_default():
        # Input data.
        input_shape = [batch_size, cbow_skips] if cbow_skips else [batch_size]
        train_dataset = tf.placeholder(tf.int32, shape=input_shape)
        train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
        valid_dataset = tf.constant(valid_examples, dtype=tf.int32)

        # Variables.
        embeddings = tf.Variable(
            tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0)
        )
        weights = tf.Variable(
            tf.truncated_normal([vocab_size, embedding_size],
                                stddev=1.0 / math.sqrt(embedding_size))
        )
        biases = tf.Variable(tf.zeros([vocab_size]))

        # Model.
        # Look up embeddings for inputs.
        embed = tf.nn.embedding_lookup(embeddings, train_dataset)
        if cbow_skips:
            embed = tf.reduce_sum(embed, 1)
        # Compute the softmax loss, using a sample of the negative labels each time.
        sampled_softmax = tf.nn.sampled_softmax_loss(
            weights, biases, embed, train_labels, sample_count, vocab_size
        )
        loss = tf.reduce_mean(sampled_softmax)

        # Optimizer.
        optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)

        # Normalize the embeddings:
        norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
        normalized_embeddings = embeddings / norm

        # Compute the similarity between validation examples and all embeddings using cosine distance:
        valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
        similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
    return {
        "graph": graph,
        "batch_size": batch_size,
        "train": train_dataset,
        "labels": train_labels,
        "optimizer": optimizer,
        "loss": loss,
        "normalized_embeddings": normalized_embeddings,
        "similarity": similarity,
        "valid_examples": valid_examples
    }

In [ ]:
def run_graph(graph_data, document, batcher, skip_window, skip_count, step_count):
    with tf.Session(graph=graph_data["graph"]) as session:
        tf.initialize_all_variables().run()
        print('Initialized')
        average_loss = 0
        data_index = 0
        for step in range(step_count + 1):
            batch_data, batch_labels, data_index = batcher(
                graph_data["batch_size"],
                skip_count, skip_window,
                document.sequence, data_index
            )
            feed_dict = {
                graph_data["train"] : batch_data,
                graph_data["labels"] : batch_labels
            }
            inputs = [graph_data["optimizer"], graph_data["loss"]]
            _, l = session.run(inputs, feed_dict=feed_dict)
            average_loss += l
            if step % 2000 == 0:
                if step > 0:
                    average_loss = average_loss / 2000
                # The average loss is an estimate of the loss over the last 2000 batches.
                print('Average loss at step %d: %f' % (step, average_loss))
                average_loss = 0
            # note that this is expensive (~20% slowdown if computed every 500 steps)
            if step % 10000 == 0:
                sim = graph_data["similarity"].eval()
                for i in range(len(graph_data["valid_examples"])):
                    valid_word = document.ID_to_word[graph_data["valid_examples"][i]]
                    top_k = 8 # number of nearest neighbors
                    nearest = (-sim[i, :]).argsort()[1:top_k+1]
                    log = 'Nearest to %s:' % valid_word
                    for k in range(top_k):
                        close_word = document.ID_to_word[nearest[k]]
                        log = '%s %s,' % (log, close_word)
                    print(log)

        return graph_data["normalized_embeddings"].eval()

In [ ]:
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. 
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples=np.array(random.sample(range(valid_window), valid_size))

graph_data = setup_graph(
    document.vocabulary_size(),
    batch_size=128,
    embedding_size=128,
    sample_count=64, # Number of negative examples to sample.
    valid_examples=valid_examples
)

final_embeddings = run_graph(
    graph_data,
    document,
    generate_batch,
    skip_window=1, # How many words to consider left and right.
    skip_count=2,
    step_count=100000
)

In [ ]:
def apply_tsne(embeddings, points):
    tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
    return tsne.fit_transform(embeddings[1:points+1, :])

In [ ]:
def plot(embeddings, labels):
    assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
    pylab.figure(figsize=(15,15))  # in inches
    for i, label in enumerate(labels):
        x, y = embeddings[i,:]
        pylab.scatter(x, y)
        pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
    pylab.show()

In [ ]:
example_words = [document.ID_to_word[i] for i in range(1, 401)]

In [ ]:
plot(apply_tsne(final_embeddings, len(example_words)), example_words)

Problem

An alternative to Word2Vec is called CBOW (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset.



In [ ]:
def generate_cbow_batch(batch_size, skip_count, skip_window, sequence, index):
    assert batch_size % skip_count == 0
    assert skip_count <= 2 * skip_window
    batch = np.ndarray(shape=(batch_size, skip_count), dtype=np.int32)
    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
    span = 2 * skip_window + 1 # [ skip_window target skip_window ]
    buffer = collections.deque(maxlen=span)
    for _ in range(span):
        buffer.append(sequence[index])
        index = (index + 1) % len(sequence)
    for i in range(batch_size):
        target = skip_window  # target label at the center of the buffer
        targets_to_avoid = [ skip_window ]
        labels[i] = buffer[skip_window]
        for j in range(skip_count):
            while target in targets_to_avoid:
                target = random.randint(0, span - 1)
            targets_to_avoid.append(target)
            batch[i][j] = buffer[target]
        buffer.append(sequence[index])
        index = (index + 1) % len(sequence)
    return batch, labels, index

print('data:', [document.ID_to_word[di] for di in document.sequence[:12]])

for skip_window in [1, 2]:
    cbow_index = 0
    cbow_batch, cbow_labels, cbow_index = generate_cbow_batch(
        8, 2 * skip_window, skip_window, document.sequence, cbow_index
    )
    print('\nwith skip_window = %d:' % (skip_window))
    print('    batch:', [[document.ID_to_word[bi] for bi in skips] for skips in cbow_batch])
    print('    labels:', [document.ID_to_word[li] for li in cbow_labels.reshape(8)])

In [ ]:
cbow_skip_count = 2
cbow_graph = graph_data = setup_graph(
    document.vocabulary_size(),
    batch_size=128,
    embedding_size=128,
    sample_count=64, # Number of negative examples to sample.
    valid_examples=valid_examples,
    cbow_skips=cbow_skip_count
)

cbow_embeddings = run_graph(
    graph_data,
    document,
    generate_cbow_batch,
    skip_window=1, # How many words to consider left and right.
    skip_count=cbow_skip_count,
    step_count=100000
)

In [ ]:
plot(apply_tsne(final_embeddings, len(example_words)), example_words)