Deep Learning

Assignment 5

The goal of this assignment is to train a skip-gram model over Text8 data.


In [1]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE

Download the data from the source website if necessary.


In [2]:
url = 'http://mattmahoney.net/dc/'

def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename

filename = maybe_download('text8.zip', 31344016)


Found and verified text8.zip

Read the data into a string.


In [3]:
def read_data(filename):
  f = zipfile.ZipFile(filename)
  for name in f.namelist():
    return tf.compat.as_str(f.read(name)).split()
  f.close()
  
words = read_data(filename)
print('Data size %d' % len(words))
print('word 0: %s' % words[0])
print('word 1: %s' % words[1])
print('word 2: %s' % words[2])
print('word 17005206: %s' % words[17005206])


Data size 17005207
word 0: anarchism
word 1: originated
word 2: as
word 17005206: b

Build the dictionary and replace rare words with UNK token.


In [4]:
vocabulary_size = 50000

def build_dataset(words):
  count = [['UNK', -1]]
  count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
  dictionary = dict()
  for word, _ in count:
    dictionary[word] = len(dictionary)
  data = list()
  unk_count = 0
  for word in words:
    if word in dictionary:
      index = dictionary[word]
    else:
      index = 0  # dictionary['UNK']
      unk_count = unk_count + 1
    data.append(index)
  count[0][1] = unk_count
  reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) 
  return data, count, dictionary, reverse_dictionary

data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
firstPairs = {k: reverse_dictionary[k] for k in reverse_dictionary.keys()[:10]}
print('Rev dict: %s' % firstPairs)
print(dictionary['of'])
print(reverse_dictionary[5239])
print(reverse_dictionary[3084])
print(reverse_dictionary[12])
del words  # Hint to reduce memory.


Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]
Sample data [5239, 3084, 12, 6, 195, 2, 3137, 46, 59, 156]
Rev dict: {0: 'UNK', 1: 'the', 2: 'of', 3: 'and', 4: 'one', 5: 'in', 6: 'a', 7: 'to', 8: 'zero', 9: 'nine'}
2
anarchism
originated
as

Function to generate a training batch for the skip-gram model.


In [9]:
data_index = 0

def generate_batch(batch_size, num_skips, skip_window):
  global data_index
  assert batch_size % num_skips == 0
  assert num_skips <= 2 * skip_window
  batch = np.ndarray(shape=(batch_size), dtype=np.int32)
  labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
  span = 2 * skip_window + 1 # [ skip_window target skip_window ]
  buffer = collections.deque(maxlen=span)
  for _ in range(span):
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  for i in range(batch_size // num_skips):
    target = skip_window  # target label at the center of the buffer
    targets_to_avoid = [ skip_window ]
    for j in range(num_skips):
      while target in targets_to_avoid:
        target = random.randint(0, span - 1)
      targets_to_avoid.append(target)
      batch[i * num_skips + j] = buffer[skip_window]
      labels[i * num_skips + j, 0] = buffer[target]
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  return batch, labels

print('data:', [reverse_dictionary[di] for di in data[:8]])

for num_skips, skip_window in [(2, 1), (4, 2)]:
    data_index = 0
    batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
    print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
    print('    batch:', [reverse_dictionary[bi] for bi in batch])
    print('    labels:', [reverse_dictionary[li] for li in labels.reshape(8)])


data: ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first']

with num_skips = 2 and skip_window = 1:
    batch: ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term']
    labels: ['anarchism', 'as', 'a', 'originated', 'as', 'term', 'a', 'of']

with num_skips = 4 and skip_window = 2:
    batch: ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a']
    labels: ['originated', 'term', 'anarchism', 'a', 'of', 'as', 'originated', 'term']

Train a skip-gram model.


In [10]:
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. 
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.

graph = tf.Graph()

with graph.as_default():

  # Input data.
  train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
  train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
  valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
  
  # Variables.
  embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
  softmax_weights = tf.Variable(
    tf.truncated_normal([vocabulary_size, embedding_size],
                         stddev=1.0 / math.sqrt(embedding_size)))
  softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)
  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
                               train_labels, num_sampled, vocabulary_size))

  # Optimizer.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
  similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))

In [11]:
num_steps = 100001

with tf.Session(graph=graph) as session:
  tf.initialize_all_variables().run()
  print('Initialized')
  average_loss = 0
  for step in range(num_steps):
    batch_data, batch_labels = generate_batch(
      batch_size, num_skips, skip_window)
    feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
    _, l = session.run([optimizer, loss], feed_dict=feed_dict)
    average_loss += l
    if step % 2000 == 0:
      if step > 0:
        average_loss = average_loss / 2000
      # The average loss is an estimate of the loss over the last 2000 batches.
      print('Average loss at step %d: %f' % (step, average_loss))
      average_loss = 0
    # note that this is expensive (~20% slowdown if computed every 500 steps)
    if step % 10000 == 0:
      sim = similarity.eval()
      for i in xrange(valid_size):
        valid_word = reverse_dictionary[valid_examples[i]]
        top_k = 8 # number of nearest neighbors
        nearest = (-sim[i, :]).argsort()[1:top_k+1]
        log = 'Nearest to %s:' % valid_word
        for k in xrange(top_k):
          close_word = reverse_dictionary[nearest[k]]
          log = '%s %s,' % (log, close_word)
        print(log)
  final_embeddings = normalized_embeddings.eval()


Initialized
Average loss at step 0: 7.899043
Nearest to four: nominate, swarm, unipolar, denarius, statement, ivens, informs, flory,
Nearest to seven: acetate, cliches, metamorphic, flotilla, hotspots, lookout, invention, iceberg,
Nearest to nine: gage, criticises, wu, operettas, bronze, courtyard, auger, recordable,
Nearest to five: threatening, wigs, ip, propelling, overdoses, circling, gypsum, latency,
Nearest to have: funk, dollars, hold, pye, demoscene, betty, commodores, cars,
Nearest to by: annulment, greenstone, aleksander, grievances, bulletproof, hexer, diena, animists,
Nearest to so: vyasa, crisis, cumming, metal, spaces, maj, barred, dominate,
Nearest to but: longitudinal, richardson, guernsey, fijian, cernan, divergences, rothbard, represent,
Nearest to system: borer, deprogramming, attenborough, fs, diffusion, hereafter, joaqu, imdb,
Nearest to has: ductile, mammal, wattle, disbandment, gifs, rossi, saturdays, bistable,
Nearest to not: ade, lionhead, mahindra, thereafter, groundwater, analgesics, airborne, ordaining,
Nearest to american: getz, sanctioned, meiosis, mathematicians, flaminius, jon, amaranthus, historiography,
Nearest to up: linkin, ak, ath, ingram, infuriated, kirlian, monosaccharides, termini,
Nearest to first: constants, now, parc, swedes, remedial, olney, sterilized, deductive,
Nearest to two: buccaneer, beatboxing, shout, underpinning, musicology, poisonous, interrogative, mulholland,
Nearest to three: abruzzo, developments, tartaric, montreal, tatar, beyer, ccny, asatru,
Average loss at step 2000: 4.364572
Average loss at step 4000: 3.863530
Average loss at step 6000: 3.788061
Average loss at step 8000: 3.685228
Average loss at step 10000: 3.618498
Nearest to four: six, eight, three, five, seven, zero, nine, two,
Nearest to seven: eight, six, three, five, nine, four, zero, two,
Nearest to nine: eight, six, seven, zero, five, four, three, two,
Nearest to five: six, eight, three, four, seven, nine, zero, two,
Nearest to have: had, are, be, has, betty, were, secrete, hold,
Nearest to by: was, on, symmetrical, be, as, greenstone, from, been,
Nearest to so: if, hexameter, rebranded, crisis, ubangi, barred, maj, originally,
Nearest to but: longitudinal, although, divergences, magnitudes, bethlehem, stain, chefs, veritas,
Nearest to system: attenborough, borer, satisfiable, deprogramming, metz, silicates, abyssinia, joaqu,
Nearest to has: had, was, have, is, mammal, anabaptism, ductile, lamarckian,
Nearest to not: we, who, they, it, ade, to, also, groundwater,
Nearest to american: and, french, evils, barge, getz, aromas, unscathed, lifes,
Nearest to up: termini, ingram, commanding, mcvie, kirlian, omnivores, clarified, bithynia,
Nearest to first: crested, cpus, antelope, parc, under, accessed, motorbikes, icc,
Nearest to two: three, five, six, seven, four, zero, eight, one,
Nearest to three: four, six, eight, five, seven, two, zero, nine,
Average loss at step 12000: 3.604934
Average loss at step 14000: 3.575232
Average loss at step 16000: 3.402577
Average loss at step 18000: 3.456256
Average loss at step 20000: 3.534764
Nearest to four: three, five, seven, six, eight, two, nine, zero,
Nearest to seven: eight, six, three, four, nine, five, two, zero,
Nearest to nine: eight, seven, six, four, five, zero, three, two,
Nearest to five: four, seven, three, six, zero, eight, two, nine,
Nearest to have: had, has, were, be, are, betty, lemming, visited,
Nearest to by: be, aelian, symmetrical, chechen, for, stanza, against, commuted,
Nearest to so: if, originally, hexameter, ailing, rebranded, sf, schuschnigg, coco,
Nearest to but: however, although, when, which, before, and, that, is,
Nearest to system: attenborough, borer, sefirot, metz, deprogramming, ecclestone, joaqu, collier,
Nearest to has: had, have, is, was, lamarckian, benefited, nagy, mammal,
Nearest to not: they, also, we, it, to, who, yesterday, groundwater,
Nearest to american: british, german, french, evils, english, aromas, lifes, barge,
Nearest to up: disrepair, back, omnivores, burney, termini, routed, clarified, commanding,
Nearest to first: last, following, next, crested, silky, second, antelope, run,
Nearest to two: three, four, five, seven, six, eight, one, zero,
Nearest to three: four, seven, five, two, six, eight, nine, zero,
Average loss at step 22000: 3.506801
Average loss at step 24000: 3.485942
Average loss at step 26000: 3.478942
Average loss at step 28000: 3.481497
Average loss at step 30000: 3.503069
Nearest to four: six, eight, three, seven, five, two, nine, zero,
Nearest to seven: eight, six, four, nine, five, three, two, zero,
Nearest to nine: eight, seven, six, five, four, three, zero, two,
Nearest to five: four, eight, six, seven, three, zero, nine, two,
Nearest to have: had, has, were, are, be, visited, hold, secrete,
Nearest to by: was, chechen, against, commuted, molotov, steppe, were, in,
Nearest to so: if, originally, hexameter, schuschnigg, thessalonians, garnered, chiefdoms, crisis,
Nearest to but: however, although, when, that, and, malayalam, which, though,
Nearest to system: systems, attenborough, metz, supplant, bastille, joaqu, satisfiable, ahijah,
Nearest to has: had, have, is, was, reconstructions, boric, intuitive, mammal,
Nearest to not: they, rediscovered, also, to, generally, never, still, it,
Nearest to american: british, french, german, italian, australian, dutch, english, levied,
Nearest to up: him, back, off, breathy, omnivores, out, instinctively, disrepair,
Nearest to first: last, second, next, following, same, silky, run, marple,
Nearest to two: three, four, one, seven, five, six, eight, zero,
Nearest to three: four, seven, two, five, eight, six, zero, nine,
Average loss at step 32000: 3.504559
Average loss at step 34000: 3.490494
Average loss at step 36000: 3.452101
Average loss at step 38000: 3.298724
Average loss at step 40000: 3.429873
Nearest to four: six, three, seven, eight, five, two, nine, one,
Nearest to seven: eight, five, six, nine, four, three, zero, two,
Nearest to nine: eight, seven, six, five, four, zero, three, two,
Nearest to five: seven, six, three, four, eight, nine, zero, two,
Nearest to have: had, has, were, are, be, secrete, beit, hold,
Nearest to by: chechen, stanza, nonverbal, with, hiller, unu, aelian, charismatic,
Nearest to so: if, it, hexameter, liturgy, garnered, originally, dara, sai,
Nearest to but: however, although, and, while, it, which, when, choral,
Nearest to system: systems, attenborough, supplant, authority, briefcase, ahijah, saccharomyces, distributing,
Nearest to has: had, have, was, is, reconstructions, boric, restroom, punch,
Nearest to not: they, usually, generally, also, often, rediscovered, it, still,
Nearest to american: french, australian, german, british, italian, english, austrian, dutch,
Nearest to up: off, out, him, back, them, disrepair, encounter, demilitarized,
Nearest to first: last, second, next, run, same, pyramidal, crested, suriname,
Nearest to two: three, four, five, six, seven, one, eight, zero,
Nearest to three: four, two, seven, five, six, eight, nine, zero,
Average loss at step 42000: 3.436492
Average loss at step 44000: 3.451130
Average loss at step 46000: 3.455030
Average loss at step 48000: 3.353781
Average loss at step 50000: 3.379488
Nearest to four: six, seven, three, five, eight, nine, two, zero,
Nearest to seven: six, eight, four, nine, five, three, zero, two,
Nearest to nine: eight, seven, six, four, three, five, zero, two,
Nearest to five: six, four, seven, eight, three, zero, two, nine,
Nearest to have: had, has, were, be, are, secrete, having, floppies,
Nearest to by: against, chechen, commuted, moreau, during, under, through, hiller,
Nearest to so: if, sai, thessalonians, hymenoptera, dara, garnered, eliminated, minutemen,
Nearest to but: however, although, while, and, when, though, where, since,
Nearest to system: systems, coherent, attenborough, fathom, methuen, supplant, conformance, goodwin,
Nearest to has: had, have, was, is, reconstructions, boric, having, does,
Nearest to not: generally, usually, still, they, rediscovered, never, largely, almost,
Nearest to american: french, australian, english, german, italian, installs, dutch, indian,
Nearest to up: off, out, them, back, him, down, disrepair, rus,
Nearest to first: last, second, next, final, same, chickasaw, under, pyramidal,
Nearest to two: three, six, four, one, five, seven, eight, zero,
Nearest to three: four, six, seven, eight, two, five, nine, zero,
Average loss at step 52000: 3.439683
Average loss at step 54000: 3.423885
Average loss at step 56000: 3.438659
Average loss at step 58000: 3.397037
Average loss at step 60000: 3.389905
Nearest to four: five, six, eight, seven, three, nine, two, zero,
Nearest to seven: eight, six, five, four, nine, three, zero, two,
Nearest to nine: eight, seven, six, five, four, zero, three, one,
Nearest to five: four, six, seven, three, eight, zero, nine, two,
Nearest to have: had, has, were, are, be, having, floppies, secrete,
Nearest to by: chechen, with, crosses, commuted, while, reconstructions, charismatic, ningen,
Nearest to so: if, transformed, sai, garnered, mugwort, equiv, dara, too,
Nearest to but: although, however, and, though, or, which, breadth, than,
Nearest to system: systems, coherent, definition, impossibility, supplant, tabulating, program, bricks,
Nearest to has: had, have, is, was, having, reconstructions, ferromagnetic, wiener,
Nearest to not: still, rediscovered, never, nor, usually, they, it, we,
Nearest to american: australian, german, english, french, british, indian, italian, austrian,
Nearest to up: off, out, them, back, down, him, relieves, demilitarized,
Nearest to first: last, second, next, same, latter, yankovic, suriname, hispanic,
Nearest to two: three, four, five, six, one, eight, seven, zero,
Nearest to three: five, four, six, two, eight, seven, nine, zero,
Average loss at step 62000: 3.240696
Average loss at step 64000: 3.253818
Average loss at step 66000: 3.402351
Average loss at step 68000: 3.393067
Average loss at step 70000: 3.355323
Nearest to four: five, six, three, seven, eight, two, zero, nine,
Nearest to seven: six, eight, five, four, nine, three, zero, two,
Nearest to nine: eight, six, seven, five, four, zero, three, one,
Nearest to five: four, seven, six, three, eight, zero, nine, two,
Nearest to have: had, has, were, are, be, having, include, simplifies,
Nearest to by: using, through, chechen, be, ishi, imparting, from, is,
Nearest to so: if, dara, mugwort, accelerations, stouffer, too, kolingba, sai,
Nearest to but: however, although, which, though, while, really, and, that,
Nearest to system: systems, authority, coherent, program, bricks, dxf, attenborough, saccharomyces,
Nearest to has: had, have, was, is, having, since, hdtv, visited,
Nearest to not: still, never, usually, nor, rediscovered, now, always, garnering,
Nearest to american: australian, british, english, german, indian, postmodernism, italian, french,
Nearest to up: off, out, them, down, back, him, haj, demilitarized,
Nearest to first: last, second, next, same, best, latter, final, suriname,
Nearest to two: three, four, six, one, five, seven, eight, zero,
Nearest to three: four, five, two, six, seven, eight, zero, nine,
Average loss at step 72000: 3.371315
Average loss at step 74000: 3.348676
Average loss at step 76000: 3.322687
Average loss at step 78000: 3.351883
Average loss at step 80000: 3.375714
Nearest to four: five, six, seven, three, eight, nine, two, zero,
Nearest to seven: six, five, eight, four, nine, three, zero, two,
Nearest to nine: eight, seven, five, six, four, three, zero, births,
Nearest to five: six, four, seven, eight, three, nine, zero, two,
Nearest to have: had, has, were, are, be, having, include, secrete,
Nearest to by: chechen, through, under, fated, against, originally, aelian, using,
Nearest to so: if, dara, accelerations, mugwort, ignoring, pathfinder, eliminated, sai,
Nearest to but: however, although, while, and, though, biggs, see, than,
Nearest to system: systems, program, fathom, bricks, conformance, attenborough, judge, goodwin,
Nearest to has: had, have, is, was, having, since, morel, erratic,
Nearest to not: still, usually, nor, always, largely, actually, generally, almost,
Nearest to american: british, french, australian, german, english, indian, italian, installs,
Nearest to up: off, out, them, down, back, demilitarized, him, cyp,
Nearest to first: last, second, next, final, same, third, suriname, worst,
Nearest to two: three, six, four, seven, five, one, eight, zero,
Nearest to three: four, five, six, seven, two, eight, zero, nine,
Average loss at step 82000: 3.407432
Average loss at step 84000: 3.408134
Average loss at step 86000: 3.391137
Average loss at step 88000: 3.348031
Average loss at step 90000: 3.367105
Nearest to four: five, seven, six, eight, three, two, nine, zero,
Nearest to seven: eight, four, five, nine, six, three, zero, two,
Nearest to nine: seven, eight, six, five, four, zero, three, two,
Nearest to five: four, seven, six, eight, three, nine, two, zero,
Nearest to have: had, has, are, were, having, be, include, lanphier,
Nearest to by: through, when, under, chechen, shack, for, using, with,
Nearest to so: dara, too, if, sai, beyond, accelerations, how, rhetorician,
Nearest to but: however, although, and, though, while, they, it, she,
Nearest to system: systems, bricks, bastille, program, judge, design, revolutionaries, tabulating,
Nearest to has: had, have, is, was, since, having, novas, requires,
Nearest to not: still, nor, we, largely, generally, they, actually, never,
Nearest to american: french, british, australian, indian, german, italian, english, austrian,
Nearest to up: off, out, them, down, back, him, rus, demilitarized,
Nearest to first: last, second, next, same, best, yankovic, largest, leith,
Nearest to two: three, four, six, five, seven, one, eight, zero,
Nearest to three: five, two, four, seven, six, eight, zero, nine,
Average loss at step 92000: 3.398381
Average loss at step 94000: 3.250564
Average loss at step 96000: 3.355309
Average loss at step 98000: 3.239431
Average loss at step 100000: 3.353480
Nearest to four: six, seven, five, eight, three, two, zero, nine,
Nearest to seven: eight, six, four, five, nine, three, two, zero,
Nearest to nine: eight, seven, six, four, five, zero, three, squadron,
Nearest to five: six, four, seven, eight, two, three, zero, nine,
Nearest to have: had, has, be, are, were, include, secrete, having,
Nearest to by: through, when, gunman, ishi, mombasa, without, been, fated,
Nearest to so: then, thus, if, too, dara, when, sai, sometimes,
Nearest to but: however, although, though, and, while, probably, where, looked,
Nearest to system: systems, program, bricks, judge, coherent, dxf, jelly, problem,
Nearest to has: had, have, is, was, since, having, mismatch, erratic,
Nearest to not: always, still, never, nor, largely, almost, syngman, garnering,
Nearest to american: british, australian, italian, french, canadian, indian, quart, cupola,
Nearest to up: off, out, back, down, them, him, alembert, demilitarized,
Nearest to first: last, second, next, third, final, same, fourth, best,
Nearest to two: five, four, three, six, seven, one, eight, zero,
Nearest to three: six, five, four, seven, two, eight, zero, one,

In [12]:
num_points = 400

tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])

In [13]:
def plot(embeddings, labels):
  assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
  pylab.figure(figsize=(15,15))  # in inches
  for i, label in enumerate(labels):
    x, y = embeddings[i,:]
    pylab.scatter(x, y)
    pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
                   ha='right', va='bottom')
  pylab.show()

words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)


An exception has occurred, use %tb to see the full traceback.

SystemExit: 0
To exit: use 'exit', 'quit', or Ctrl-D.

Problem

An alternative to Word2Vec is called CBOW (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset.