Deep Learning

Assignment 5

The goal of this assignment is to train a Word2Vec skip-gram model over Text8 data.


In [1]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
%matplotlib inline
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE
from itertools import compress

Download the data from the source website if necessary.


In [2]:
url = 'http://mattmahoney.net/dc/'

def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename

filename = maybe_download('text8.zip', 31344016)


Found and verified text8.zip

Read the data into a string.


In [3]:
def read_data(filename):
  """Extract the first file enclosed in a zip file as a list of words"""
  with zipfile.ZipFile(filename) as f:
    data = tf.compat.as_str(f.read(f.namelist()[0])).split()
  return data
  
words = read_data(filename)
print('Data size %d' % len(words))


Data size 17005207

Build the dictionary and replace rare words with UNK token.


In [4]:
vocabulary_size = 50000

def build_dataset(words):
  count = [['UNK', -1]]
  count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
  dictionary = dict()
  for word, _ in count:
    dictionary[word] = len(dictionary)
  data = list()
  unk_count = 0
  for word in words:
    if word in dictionary:
      index = dictionary[word]
    else:
      index = 0  # dictionary['UNK']
      unk_count = unk_count + 1
    data.append(index)
  count[0][1] = unk_count
  reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) 
  return data, count, dictionary, reverse_dictionary

data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del words  # Hint to reduce memory.


Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]
Sample data [5239, 3084, 12, 6, 195, 2, 3137, 46, 59, 156]

Function to generate a training batch for the skip-gram model.


In [5]:
data_index = 0

def generate_batch(batch_size, num_skips, skip_window):
  global data_index
  assert batch_size % num_skips == 0
  assert num_skips <= 2 * skip_window
  batch = np.ndarray(shape=(batch_size), dtype=np.int32)
  labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
  span = 2 * skip_window + 1 # [ skip_window target skip_window ]
  buffer = collections.deque(maxlen=span)
  for _ in range(span):
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  for i in range(batch_size // num_skips):
    target = skip_window  # target label at the center of the buffer
    targets_to_avoid = [ skip_window ]
    for j in range(num_skips):
      while target in targets_to_avoid:
        target = random.randint(0, span - 1)
      targets_to_avoid.append(target)
      batch[i * num_skips + j] = buffer[skip_window]
      labels[i * num_skips + j, 0] = buffer[target]
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  return batch, labels

def generate_batch_cbow(batch_size, skip_window):
  global data_index
  surrounding_words = 2 * skip_window # words surrounding the target
  assert batch_size % surrounding_words == 0 
  total_labels = batch_size / surrounding_words 
  batch = np.ndarray(shape=(batch_size), dtype=np.int32)
  labels = np.ndarray(shape=(total_labels, 1), dtype=np.int32)
  span = 2 * skip_window + 1 # [ skip_window target skip_window ]
  buffer = collections.deque(maxlen=span)
  for _ in range(span):
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  for i in range(total_labels):
    target = skip_window  # target label at the center of the buffer
    targets_to_avoid = [ skip_window ]
    labels[i, 0] = buffer[target] # label the target
    for j in range(surrounding_words):
      while target in targets_to_avoid:
        target = random.randint(0, span - 1)
      targets_to_avoid.append(target)
      batch[i * surrounding_words + j] = buffer[target]
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  return batch, labels

Train a skip-gram model.


In [6]:
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. 
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
surrounding_words = 2 * skip_window
total_labels = batch_size / surrounding_words

graph = tf.Graph()

with graph.as_default(), tf.device('/cpu:0'):

  # Input data.
  train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
  train_labels = tf.placeholder(tf.int32, shape=[total_labels, 1])
  valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
  
  # Variables.
  embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
  softmax_weights = tf.Variable(
    tf.truncated_normal([vocabulary_size, embedding_size],
                         stddev=1.0 / math.sqrt(embedding_size)))
  softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)

  mask = np.zeros(batch_size, dtype=np.int32)
  mask_index = -1
  for i in range(batch_size):
    if i % surrounding_words == 0:
      mask_index = mask_index + 1
    mask[i] = mask_index
  
  embed_filtered = tf.segment_sum(embed, mask)


  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed_filtered,
                               labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))

  # Optimizer.
  # Note: The optimizer will optimize the softmax_weights AND the embeddings.
  # This is because the embeddings are defined as a variable quantity and the
  # optimizer's `minimize` method will by default modify all variable quantities 
  # that contribute to the tensor it is passed.
  # See docs on `tf.train.Optimizer.minimize()` for more details.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
  similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))

In [7]:
num_steps = 100001

with tf.Session(graph=graph) as session:
  tf.global_variables_initializer().run()
  print('Initialized')
  average_loss = 0
  for step in range(num_steps):
    batch_data, batch_labels = generate_batch_cbow(
      batch_size, skip_window)
    feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
    _, l = session.run([optimizer, loss], feed_dict=feed_dict)
    average_loss += l
    if step % 2000 == 0:
      if step > 0:
        average_loss = average_loss / 2000
      # The average loss is an estimate of the loss over the last 2000 batches.
      print('Average loss at step %d: %f' % (step, average_loss))
      average_loss = 0
    # note that this is expensive (~20% slowdown if computed every 500 steps)
    if step % 10000 == 0:
      sim = similarity.eval()
      for i in range(valid_size):
        valid_word = reverse_dictionary[valid_examples[i]]
        top_k = 8 # number of nearest neighbors
        nearest = (-sim[i, :]).argsort()[1:top_k+1]
        log = 'Nearest to %s:' % valid_word
        for k in range(top_k):
          close_word = reverse_dictionary[nearest[k]]
          log = '%s %s,' % (log, close_word)
        print(log)
  final_embeddings = normalized_embeddings.eval()


Initialized
Average loss at step 0: 8.043357
Nearest to as: bright, impressions, antiochian, crushed, weishaupt, kazakh, uua, romances,
Nearest to between: rte, fed, atomists, idolized, championship, mariana, coward, noradrenaline,
Nearest to is: continual, potemkin, exponentially, hic, sleeve, pyrite, cheaper, exhorts,
Nearest to over: chevrolet, metal, hathor, militar, guardians, severance, refueling, road,
Nearest to b: slav, bernd, undertakes, morrissey, shrinking, brzezinski, atoll, libretto,
Nearest to from: bran, marathon, desktop, akin, inhabitants, emir, enduring, peruvian,
Nearest to but: lactose, swanson, sleek, addict, troubadors, imprison, reneged, harbin,
Nearest to only: generalship, darwinian, highlights, dagger, wallet, chiefdoms, headroom, cdu,
Nearest to new: jed, aryans, finlay, rte, jute, bubba, fi, arte,
Nearest to six: namek, parlour, mockery, honky, study, penetrate, matriculation, arguably,
Nearest to called: invariably, alva, expediency, qualify, angora, sheppard, loophole, alertness,
Nearest to eight: fatty, cowboys, merciless, torts, seeing, grin, interlocking, authentication,
Nearest to his: coining, admirable, naka, detour, arin, honky, theodor, biological,
Nearest to after: ast, unionism, bioterrorism, older, patristic, carradine, deterrent, lamb,
Nearest to are: amun, owls, kangaroos, maa, exclaves, syndrome, leuven, stallman,
Nearest to a: analyzing, ta, maribor, attenuated, bhangra, homosexual, cpa, bjp,
Average loss at step 2000: 4.221557
Average loss at step 4000: 3.642233
Average loss at step 6000: 3.538862
Average loss at step 8000: 3.377591
Average loss at step 10000: 3.288740
Nearest to as: gorgias, lynched, windy, laps, costing, andechs, renderings, kazakh,
Nearest to between: with, atomists, kool, critically, ferrara, rte, crusaders, into,
Nearest to is: was, has, are, be, phosphide, pwnage, alfredo, bde,
Nearest to over: organisms, dedicate, refueling, irish, refrigeration, orbiter, trackball, durability,
Nearest to b: d, agade, hurt, dresses, slav, sanitation, lib, ginsberg,
Nearest to from: into, fellows, grunt, with, admiring, in, heresiologists, hypatia,
Nearest to but: there, afrobeat, chameleon, cas, happened, cyprian, however, substantial,
Nearest to only: yitzhak, prussians, genealogical, mariner, woman, arise, first, immediately,
Nearest to new: rte, oppositional, prasad, protoscience, bicycles, annexation, cheats, druze,
Nearest to six: eight, seven, nine, five, four, zero, three, two,
Nearest to called: penelope, expediency, leadership, jalalabad, slobodan, southwark, granites, considered,
Nearest to eight: six, seven, nine, five, three, zero, four, two,
Nearest to his: their, her, its, our, the, dow, mays, lorre,
Nearest to after: before, when, during, recur, ajmer, aka, however, amplifies,
Nearest to are: were, is, have, artillery, was, briefing, lyell, ballots,
Nearest to a: the, this, sgml, chopstick, crustaceans, firmly, daewoo, bil,
Average loss at step 12000: 3.272518
Average loss at step 14000: 3.244198
Average loss at step 16000: 3.022263
Average loss at step 18000: 3.063832
Average loss at step 20000: 3.151080
Nearest to as: gorgias, laps, repulse, hashshashin, showered, melee, taunus, chung,
Nearest to between: with, within, from, admiring, markedly, through, britannia, kool,
Nearest to is: was, are, has, does, became, but, pwnage, wrestle,
Nearest to over: bills, organisms, refueling, about, schistosomiasis, nick, immunities, aims,
Nearest to b: d, verifies, hurt, agade, specialises, metals, galactose, dina,
Nearest to from: into, through, across, while, for, like, between, at,
Nearest to but: however, is, upholds, if, and, although, that, incalculable,
Nearest to only: prussians, desserts, showtime, arise, glyphs, berbers, madeleine, yitzhak,
Nearest to new: milder, bubba, western, buckley, liberation, eminence, jed, gunnery,
Nearest to six: eight, nine, four, seven, zero, five, three, two,
Nearest to called: known, named, used, catamaran, wool, slobodan, jethro, considered,
Nearest to eight: six, nine, seven, four, five, three, zero, two,
Nearest to his: their, her, its, our, the, my, your, waived,
Nearest to after: before, while, when, during, boethius, virtually, recur, for,
Nearest to are: were, is, have, skillful, while, cantata, be, artillery,
Nearest to a: another, treaties, no, the, any, this, sgml, gulden,
Average loss at step 22000: 3.123537
Average loss at step 24000: 3.076255
Average loss at step 26000: 3.056739
Average loss at step 28000: 3.069785
Average loss at step 30000: 3.087776
Nearest to as: laps, varuna, became, without, willfully, remakes, under, gcc,
Nearest to between: with, among, across, nitrites, within, in, cloisters, salient,
Nearest to is: was, became, are, has, does, be, were, pwnage,
Nearest to over: gloomy, around, schistosomiasis, through, brute, about, resorting, aims,
Nearest to b: d, sanitation, hurt, byline, director, pronounced, tintin, discern,
Nearest to from: into, through, in, during, against, under, across, before,
Nearest to but: however, and, nor, though, pwnage, although, while, vliw,
Nearest to only: libertarian, shaolin, passport, anticipate, evangelism, always, shawn, thai,
Nearest to new: protoscience, modern, western, surrounding, quelled, druze, premiered, mobs,
Nearest to six: four, seven, eight, nine, five, zero, three, two,
Nearest to called: named, used, considered, known, binomial, referred, described, ferraris,
Nearest to eight: nine, seven, five, six, zero, four, three, two,
Nearest to his: her, their, its, the, your, my, a, whose,
Nearest to after: before, when, during, within, lutenist, for, while, multiparty,
Nearest to are: were, is, have, those, bathtub, remain, do, neat,
Nearest to a: no, sgml, another, the, cert, his, any, emptying,
Average loss at step 32000: 3.077296
Average loss at step 34000: 3.053945
Average loss at step 36000: 3.003558
Average loss at step 38000: 2.796715
Average loss at step 40000: 2.975055
Nearest to as: varuna, combinatorial, melee, lysosomes, servicing, laps, retaliate, partido,
Nearest to between: with, within, among, across, tc, salient, pleadings, behind,
Nearest to is: was, has, are, becomes, remains, exists, in, newtonian,
Nearest to over: bills, immunities, about, tractate, comparative, fostering, cimeti, anaphase,
Nearest to b: d, c, UNK, preferring, e, l, ue, steep,
Nearest to from: into, through, under, after, playboys, within, of, incredibly,
Nearest to but: although, though, however, and, do, jannah, including, gfdl,
Nearest to only: anticipate, permitted, challenger, pitch, northward, first, lafur, most,
Nearest to new: special, affidavit, modern, prasad, protoscience, mcpherson, songhai, compatible,
Nearest to six: five, seven, eight, nine, four, three, zero, two,
Nearest to called: named, referred, mend, considered, made, termed, formed, there,
Nearest to eight: nine, seven, six, four, five, three, zero, two,
Nearest to his: their, her, its, your, my, the, whose, waived,
Nearest to after: before, from, postures, while, compose, when, alef, troglodytes,
Nearest to are: were, have, include, is, including, although, asymptotes, supplementary,
Nearest to a: sgml, any, another, intercal, immoral, the, crack, nafs,
Average loss at step 42000: 2.978351
Average loss at step 44000: 2.996443
Average loss at step 46000: 2.974561
Average loss at step 48000: 2.882537
Average loss at step 50000: 2.897345
Nearest to as: laps, nadezhda, antiochus, pessimistic, internationally, fleischer, onboard, xli,
Nearest to between: with, among, within, crono, damien, caledonia, salient, necessitated,
Nearest to is: was, are, has, remains, does, becomes, were, although,
Nearest to over: immunities, bills, about, cps, on, romanticism, comb, whitcomb,
Nearest to b: d, c, apache, rms, f, glues, xxi, four,
Nearest to from: in, across, through, under, into, towards, mephisto, during,
Nearest to but: however, although, though, since, and, while, they, someday,
Nearest to only: always, actually, permitted, also, zfc, above, challenger, usually,
Nearest to new: dilbert, graphic, special, brief, carbons, oppositional, particular, surrounding,
Nearest to six: eight, seven, nine, four, three, five, zero, two,
Nearest to called: named, initiated, informant, referred, used, known, described, see,
Nearest to eight: six, nine, seven, four, zero, three, five, two,
Nearest to his: her, their, my, its, your, our, whose, the,
Nearest to after: before, when, while, if, during, later, upon, following,
Nearest to are: were, is, was, have, remain, include, those, belated,
Nearest to a: another, the, sgml, any, namath, sanctuaries, candlemas, amounts,
Average loss at step 52000: 2.958091
Average loss at step 54000: 2.927489
Average loss at step 56000: 2.963291
Average loss at step 58000: 2.878538
Average loss at step 60000: 2.895695
Nearest to as: laps, aquariums, varuna, tezuka, erectile, thing, leda, antiochus,
Nearest to between: among, within, with, in, myoglobin, xxii, from, bayonet,
Nearest to is: was, remains, does, are, has, seems, becomes, became,
Nearest to over: within, around, away, among, immunities, about, reestablished, off,
Nearest to b: f, c, d, r, l, holberg, peculiarity, h,
Nearest to from: into, through, in, within, across, after, during, including,
Nearest to but: however, marsupials, and, cyclic, although, symbionts, cuff, materialised,
Nearest to only: always, actually, until, zfc, kvac, anticipate, kazaa, otomo,
Nearest to new: particular, special, druze, mcpherson, recent, different, fundamental, single,
Nearest to six: eight, nine, four, five, seven, three, zero, two,
Nearest to called: named, termed, considered, informant, koo, used, described, bioethics,
Nearest to eight: nine, six, four, seven, five, zero, three, one,
Nearest to his: their, her, its, my, our, your, whose, generalship,
Nearest to after: before, when, during, while, despite, without, from, thereafter,
Nearest to are: were, is, have, including, remain, include, belong, many,
Nearest to a: the, another, any, this, every, sgml, sanctuaries, patroclus,
Average loss at step 62000: 2.682616
Average loss at step 64000: 2.705254
Average loss at step 66000: 2.877897
Average loss at step 68000: 2.886687
Average loss at step 70000: 2.834612
Nearest to as: varuna, laps, melee, by, like, showered, islas, before,
Nearest to between: within, among, with, from, behind, around, lagrangian, g,
Nearest to is: was, has, are, remains, seems, becomes, be, does,
Nearest to over: within, around, off, about, reestablished, bills, away, cps,
Nearest to b: d, r, clique, peculiarity, l, wabash, withheld, j,
Nearest to from: through, across, within, into, during, by, between, in,
Nearest to but: however, while, and, than, deviant, which, until, incalculable,
Nearest to only: actually, always, still, easily, best, also, never, ever,
Nearest to new: particular, special, single, different, graphic, oppositional, recent, newspeak,
Nearest to six: eight, seven, nine, four, five, three, zero, two,
Nearest to called: named, described, considered, termed, informant, attended, referred, known,
Nearest to eight: nine, six, seven, four, five, zero, three, one,
Nearest to his: her, their, my, its, our, your, the, taipa,
Nearest to after: before, during, when, thereafter, without, despite, trenton, iaea,
Nearest to are: were, is, have, although, contain, including, skillful, remain,
Nearest to a: the, sgml, another, constance, every, this, no, patroclus,
Average loss at step 72000: 2.854799
Average loss at step 74000: 2.822488
Average loss at step 76000: 2.814865
Average loss at step 78000: 2.830229
Average loss at step 80000: 2.851274
Nearest to as: varuna, ddot, like, when, before, onboard, arsenide, gourmet,
Nearest to between: within, among, from, around, across, with, behind, over,
Nearest to is: was, has, remains, becomes, are, became, does, contains,
Nearest to over: around, within, off, bills, between, into, ornamentation, across,
Nearest to b: l, d, wabash, leszek, agade, jan, trafford, c,
Nearest to from: into, through, within, between, during, under, toward, across,
Nearest to but: however, while, although, though, and, or, see, when,
Nearest to only: last, always, either, actually, certainly, even, anticipate, heliogabalus,
Nearest to new: single, particular, brief, songhai, bursa, special, frigate, andaman,
Nearest to six: five, seven, eight, four, three, nine, zero, two,
Nearest to called: named, termed, considered, informant, known, described, referred, produced,
Nearest to eight: nine, five, six, seven, four, three, zero, two,
Nearest to his: her, their, my, your, its, our, personal, the,
Nearest to after: before, when, despite, during, without, thereafter, lutenist, until,
Nearest to are: were, have, include, remain, is, contain, including, belong,
Nearest to a: sgml, another, the, every, sanctuaries, tagore, patroclus, headache,
Average loss at step 82000: 2.884245
Average loss at step 84000: 2.896450
Average loss at step 86000: 2.857191
Average loss at step 88000: 2.807474
Average loss at step 90000: 2.817394
Nearest to as: varuna, like, shyness, laps, arukh, poking, onboard, asparagus,
Nearest to between: within, with, across, among, around, into, from, ostpolitik,
Nearest to is: was, are, has, remains, represents, becomes, refers, does,
Nearest to over: around, off, within, across, about, throughout, bills, out,
Nearest to b: algebra, l, preferring, d, sergei, stable, democrat, capricornus,
Nearest to from: across, through, while, under, into, during, towards, via,
Nearest to but: however, and, although, though, while, he, see, she,
Nearest to only: even, stroud, either, zuma, maybe, no, anticipate, liberalize,
Nearest to new: particular, brief, great, birding, oppositional, second, protoscience, refutations,
Nearest to six: seven, eight, four, five, nine, zero, two, three,
Nearest to called: termed, considered, named, known, informant, played, referred, presented,
Nearest to eight: seven, nine, six, five, four, three, zero, two,
Nearest to his: their, her, its, my, your, our, whose, the,
Nearest to after: before, despite, during, while, thereafter, when, lutenist, without,
Nearest to are: were, is, contain, have, remain, belong, newsted, include,
Nearest to a: the, another, sgml, candlemas, every, any, tagging, charon,
Average loss at step 92000: 2.868702
Average loss at step 94000: 2.689612
Average loss at step 96000: 2.821761
Average loss at step 98000: 2.698462
Average loss at step 100000: 2.826084
Nearest to as: varuna, laps, when, like, ddot, after, beattie, asparagus,
Nearest to between: within, among, with, across, predicate, around, throughout, during,
Nearest to is: was, has, seems, represents, becomes, became, contains, refers,
Nearest to over: around, off, across, within, about, into, nearly, out,
Nearest to b: d, l, nausica, levinson, superclusters, hurt, skelton, sheave,
Nearest to from: in, into, across, during, misconduct, throughout, within, of,
Nearest to but: although, however, and, though, probably, which, jannah, where,
Nearest to only: anticipate, no, always, eliza, koi, calvi, meanings, gemma,
Nearest to new: brief, particular, propeller, different, modern, oppositional, specific, special,
Nearest to six: eight, five, four, nine, seven, three, two, zero,
Nearest to called: named, termed, referred, played, there, built, see, known,
Nearest to eight: seven, six, nine, five, four, three, zero, two,
Nearest to his: their, her, your, our, its, my, the, whose,
Nearest to after: before, when, without, despite, during, thereafter, lutenist, for,
Nearest to are: were, including, have, remain, contain, include, belong, is,
Nearest to a: the, any, another, candlemas, enough, sgml, breezes, peggy,

In [8]:
num_points = 400

tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])

In [9]:
def plot(embeddings, labels):
  assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
  pylab.figure(figsize=(15,15))  # in inches
  for i, label in enumerate(labels):
    x, y = embeddings[i,:]
    pylab.scatter(x, y)
    pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
                   ha='right', va='bottom')
  pylab.show()

words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)



Problem

An alternative to skip-gram is another Word2Vec model called CBOW (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset.