In [1]:
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE



url = 'http://mattmahoney.net/dc/'

def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename

filename = maybe_download('text8.zip', 31344016)


def read_data(filename):
  """Extract the first file enclosed in a zip file as a list of words"""
  with zipfile.ZipFile(filename) as f:
    data = tf.compat.as_str(f.read(f.namelist()[0])).split()
  return data

words = read_data(filename)
print('Data size %d' % len(words))


vocabulary_size = 50000

def build_dataset(words):
  count = [['UNK', -1]]
  count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
  dictionary = dict()
  for word, _ in count:
    dictionary[word] = len(dictionary)
  data = list()
  unk_count = 0
  for word in words:
    if word in dictionary:
      index = dictionary[word]
    else:
      index = 0  # dictionary['UNK']
      unk_count = unk_count + 1
    data.append(index)
  count[0][1] = unk_count
  reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) 
  return data, count, dictionary, reverse_dictionary

data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del words  # Hint to reduce memory.


data_index = 0

def generate_batch(batch_size, num_skips, skip_window):
  global data_index
  assert batch_size % num_skips == 0
  assert num_skips <= 2 * skip_window
  batch = np.ndarray(shape=(batch_size), dtype=np.int32)
  labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
  span = 2 * skip_window + 1 # [ skip_window target skip_window ]
  buffer = collections.deque(maxlen=span)
  for _ in range(span):
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  for i in range(batch_size // num_skips):
    target = skip_window  # target label at the center of the buffer
    targets_to_avoid = [ skip_window ]
    for j in range(num_skips):
      while target in targets_to_avoid:
        target = random.randint(0, span - 1)
      targets_to_avoid.append(target)
      batch[i * num_skips + j] = buffer[skip_window]
      labels[i * num_skips + j, 0] = buffer[target]
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  return batch, labels

print('data:', [reverse_dictionary[di] for di in data[:8]])

for num_skips, skip_window in [(2, 1), (4, 2)]:
    data_index = 0
    batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
    print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
    print('    batch:', [reverse_dictionary[bi] for bi in batch])
    print('    labels:', [reverse_dictionary[li] for li in labels.reshape(8)])


batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. 
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.

graph = tf.Graph()

with graph.as_default(), tf.device('/cpu:0'):

  # Input data.
  train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
  train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
  valid_dataset = tf.constant(valid_examples, dtype=tf.int32)

  # Variables.
  embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
  softmax_weights = tf.Variable(
    tf.truncated_normal([vocabulary_size, embedding_size],
                         stddev=1.0 / math.sqrt(embedding_size)))
  softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))

  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)
  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed,
                               labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))

  # Optimizer.
  # Note: The optimizer will optimize the softmax_weights AND the embeddings.
  # This is because the embeddings are defined as a variable quantity and the
  # optimizer's `minimize` method will by default modify all variable quantities 
  # that contribute to the tensor it is passed.
  # See docs on `tf.train.Optimizer.minimize()` for more details.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)

  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
  similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))


num_steps = 100001

with tf.Session(graph=graph) as session:
  tf.global_variables_initializer().run()
  print('Initialized')
  average_loss = 0
  for step in range(num_steps):
    batch_data, batch_labels = generate_batch(
      batch_size, num_skips, skip_window)
    feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
    _, l = session.run([optimizer, loss], feed_dict=feed_dict)
    average_loss += l
    if step % 2000 == 0:
      if step > 0:
        average_loss = average_loss / 2000
      # The average loss is an estimate of the loss over the last 2000 batches.
      print('Average loss at step %d: %f' % (step, average_loss))
      average_loss = 0
    # note that this is expensive (~20% slowdown if computed every 500 steps)
    if step % 10000 == 0:
      sim = similarity.eval()
      for i in range(valid_size):
        valid_word = reverse_dictionary[valid_examples[i]]
        top_k = 8 # number of nearest neighbors
        nearest = (-sim[i, :]).argsort()[1:top_k+1]
        log = 'Nearest to %s:' % valid_word
        for k in range(top_k):
          close_word = reverse_dictionary[nearest[k]]
          log = '%s %s,' % (log, close_word)
        print(log)
  final_embeddings = normalized_embeddings.eval()




num_points = 400

tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])




def plot(embeddings, labels):
  assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
  pylab.figure(figsize=(15,15))  # in inches
  for i, label in enumerate(labels):
    x, y = embeddings[i,:]
    pylab.scatter(x, y)
    pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
                   ha='right', va='bottom')
  pylab.show()

words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)


Found and verified text8.zip
Data size 17005207
Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]
Sample data [5234, 3081, 12, 6, 195, 2, 3134, 46, 59, 156]
data: ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first']

with num_skips = 2 and skip_window = 1:
    batch: ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term']
    labels: ['as', 'anarchism', 'originated', 'a', 'term', 'as', 'of', 'a']

with num_skips = 4 and skip_window = 2:
    batch: ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a']
    labels: ['term', 'a', 'anarchism', 'originated', 'of', 'as', 'term', 'originated']
Initialized
Average loss at step 0: 7.562249
Nearest to not: warplanes, pains, pinus, milt, rotations, addressing, attack, barbecue,
Nearest to had: mouse, underground, zyklon, schoolgirl, poorest, propounded, contrastive, hier,
Nearest to a: flowered, ballistics, attack, anecdotes, clapping, testing, racket, spoon,
Nearest to other: councils, storming, intelligencer, dutton, spirit, cuboctahedron, cale, intellivision,
Nearest to first: cuttings, hulk, lycaon, habitations, hoffman, cassel, postman, guano,
Nearest to more: salm, sorcery, intensification, camelids, deranged, worrying, nsh, dobson,
Nearest to been: modulo, oily, heist, pathogen, sparsely, reservoir, subcategories, friz,
Nearest to it: founder, cha, tropes, cartilaginous, montag, kaos, kummer, pyrimidine,
Nearest to while: squarepants, effigy, anachronistic, heritability, demoness, kristallnacht, lech, mink,
Nearest to if: irrelevant, hornblower, revealed, afrikaans, hoaxes, discrediting, hard, chromosomes,
Nearest to they: peng, andorra, guarded, moray, catharine, ultrasonic, documents, imagine,
Nearest to american: rhyolite, uranus, looking, atomists, soma, drugs, polypropylene, chevalier,
Nearest to five: oghuz, trebizond, hasse, winfield, inspiration, mascarene, pinching, cassava,
Nearest to were: genesee, excitement, hsuan, footed, biqa, cahokia, tishri, west,
Nearest to there: relieve, objectively, blitzkrieg, needy, critique, probable, intelligences, akihabara,
Nearest to is: conspired, alton, amplifies, cambridge, fencing, crystals, personal, jealousy,
Average loss at step 2000: 4.371344
Average loss at step 4000: 3.863815
Average loss at step 6000: 3.793474
Average loss at step 8000: 3.685557
Average loss at step 10000: 3.616393
Nearest to not: never, it, often, they, to, there, entr, also,
Nearest to had: has, have, was, mouse, recently, creek, live, underground,
Nearest to a: the, this, no, any, entail, amplified, filing, shook,
Nearest to other: some, terceira, include, physics, suffered, qing, councils, storming,
Nearest to first: attained, walkways, guano, cassel, topped, contrapuntal, hulk, bryozoans,
Nearest to more: salm, dandyism, leaks, mdf, sorcery, biennial, articulate, austere,
Nearest to been: illegally, oily, is, was, utopia, profits, friz, coslet,
Nearest to it: he, this, there, not, which, they, also, that,
Nearest to while: resonated, unsafe, lech, squarepants, windshield, margarine, renaissance, heritability,
Nearest to if: astronauts, revealed, hard, does, prokaryotic, altimeter, irrelevant, dominant,
Nearest to they: he, it, we, not, there, she, alexandrine, putney,
Nearest to american: folded, bront, chemists, hypothesizing, cameos, khans, soma, uranus,
Nearest to five: six, three, seven, eight, four, nine, zero, two,
Nearest to were: are, was, by, goth, unite, fullerene, have, croquet,
Nearest to there: it, he, caliph, they, werewolf, not, fritz, methamphetamine,
Nearest to is: was, are, has, akbar, ziggy, been, unep, carmilla,
Average loss at step 12000: 3.605519
Average loss at step 14000: 3.572525
Average loss at step 16000: 3.412753
Average loss at step 18000: 3.458280
Average loss at step 20000: 3.543335
Nearest to not: never, it, often, mutt, to, also, usually, they,
Nearest to had: has, have, was, were, been, otherworldly, when, recently,
Nearest to a: any, the, thyssen, devices, flowered, no, kenner, dodger,
Nearest to other: many, some, terceira, garber, different, qing, these, include,
Nearest to first: last, attained, rung, second, following, volvo, kondratiev, territory,
Nearest to more: very, most, ira, articulate, dandyism, less, biennial, mdf,
Nearest to been: be, become, were, had, was, profits, illegally, utopia,
Nearest to it: he, this, there, which, they, not, she, peugeot,
Nearest to while: resonated, lech, unsafe, between, outpost, margarine, hemingway, are,
Nearest to if: when, does, revealed, but, hard, astronauts, boh, ditch,
Nearest to they: he, we, there, it, she, who, you, arrakis,
Nearest to american: british, english, chemists, austrian, bront, cameos, national, bogged,
Nearest to five: four, three, seven, six, eight, two, zero, nine,
Nearest to were: are, had, was, have, be, by, been, goth,
Nearest to there: it, they, he, which, probable, still, xt, methamphetamine,
Nearest to is: was, has, are, carmilla, be, arrears, pack, but,
Average loss at step 22000: 3.504781
Average loss at step 24000: 3.490597
Average loss at step 26000: 3.480833
Average loss at step 28000: 3.482919
Average loss at step 30000: 3.504253
Nearest to not: they, never, still, to, it, often, extremely, pains,
Nearest to had: has, have, was, were, when, claimed, having, otherworldly,
Nearest to a: bytecode, any, proconsul, no, kenner, the, thermodynamically, reported,
Nearest to other: different, many, some, these, garber, qing, expiry, various,
Nearest to first: second, last, clad, territory, sentencing, attained, rung, workprint,
Nearest to more: less, most, very, longer, dandyism, articulate, ira, larger,
Nearest to been: become, be, was, were, illegally, profits, utopia, had,
Nearest to it: he, there, this, she, they, which, not, also,
Nearest to while: however, although, are, resonated, before, epicurean, hemingway, when,
Nearest to if: when, must, ditch, contemplating, although, revealed, prokaryotic, does,
Nearest to they: there, we, he, who, not, it, she, you,
Nearest to american: british, english, french, national, bront, chemists, german, austrian,
Nearest to five: four, eight, six, three, seven, zero, nine, two,
Nearest to were: are, was, have, had, been, be, these, laude,
Nearest to there: they, it, he, probable, still, this, displace, she,
Nearest to is: was, has, are, became, be, were, takes, when,
Average loss at step 32000: 3.498526
Average loss at step 34000: 3.497841
Average loss at step 36000: 3.455681
Average loss at step 38000: 3.305538
Average loss at step 40000: 3.431059
Nearest to not: they, still, never, it, also, often, usually, quite,
Nearest to had: has, have, was, were, regenerated, claimed, would, otherworldly,
Nearest to a: the, any, another, inaccuracies, borel, no, kenner, geosynchronous,
Nearest to other: different, various, including, many, some, virginity, quench, rocker,
Nearest to first: second, last, next, topped, greatest, periodic, rung, clad,
Nearest to more: less, most, very, longer, mdf, articulate, dandyism, ira,
Nearest to been: become, be, was, were, illegally, friz, maser, utopia,
Nearest to it: he, there, she, this, they, which, not, but,
Nearest to while: however, when, although, but, before, after, were, was,
Nearest to if: when, where, ditch, oxidizes, whitehead, contemplating, must, is,
Nearest to they: we, there, you, he, it, not, these, she,
Nearest to american: british, german, english, french, chemists, affiliates, cameos, bront,
Nearest to five: seven, four, six, three, eight, zero, two, nine,
Nearest to were: are, have, was, had, been, these, while, be,
Nearest to there: they, it, still, probable, he, which, she, often,
Nearest to is: was, has, are, conspired, be, if, arrears, satisfiable,
Average loss at step 42000: 3.432580
Average loss at step 44000: 3.455266
Average loss at step 46000: 3.452281
Average loss at step 48000: 3.358367
Average loss at step 50000: 3.384904
Nearest to not: never, still, they, usually, it, always, quite, now,
Nearest to had: has, have, was, having, were, would, regenerated, been,
Nearest to a: any, the, bytecode, another, germaine, no, petrels, smuts,
Nearest to other: various, different, many, some, interchange, these, cheetah, hydrogen,
Nearest to first: second, last, next, greatest, same, aliases, corresponding, periodic,
Nearest to more: less, most, very, longer, articulate, rather, dandyism, smaller,
Nearest to been: become, was, be, were, maser, had, utopia, won,
Nearest to it: he, there, she, this, they, now, eea, still,
Nearest to while: when, although, however, but, after, before, though, where,
Nearest to if: when, where, ditch, although, before, could, while, contemplating,
Nearest to they: there, he, we, you, she, it, these, not,
Nearest to american: english, british, french, german, austrian, bront, affiliates, pareto,
Nearest to five: four, six, seven, eight, three, zero, nine, two,
Nearest to were: are, was, have, had, been, those, including, be,
Nearest to there: they, it, he, probable, still, she, now, this,
Nearest to is: was, are, has, became, takes, although, be, carmilla,
Average loss at step 52000: 3.436226
Average loss at step 54000: 3.428847
Average loss at step 56000: 3.438321
Average loss at step 58000: 3.398145
Average loss at step 60000: 3.390705
Nearest to not: still, never, to, they, always, now, it, i,
Nearest to had: has, have, was, having, were, been, claimed, did,
Nearest to a: the, another, mahjong, germaine, any, phonology, container, reused,
Nearest to other: different, various, many, including, these, bukem, garber, abundant,
Nearest to first: second, last, next, same, climax, greatest, only, danville,
Nearest to more: less, very, rather, most, longer, extremely, larger, articulate,
Nearest to been: become, be, were, was, had, previously, mecklenburg, illegally,
Nearest to it: he, this, there, she, which, what, they, still,
Nearest to while: although, when, before, after, though, were, however, during,
Nearest to if: when, where, because, before, since, ditch, although, must,
Nearest to they: we, there, you, he, she, it, these, i,
Nearest to american: english, british, european, french, bront, german, australian, bogged,
Nearest to five: four, six, three, eight, seven, zero, nine, two,
Nearest to were: are, was, have, had, been, including, while, those,
Nearest to there: they, it, still, he, this, now, probable, she,
Nearest to is: was, are, has, becomes, carmilla, but, became, misrepresented,
Average loss at step 62000: 3.247283
Average loss at step 64000: 3.249322
Average loss at step 66000: 3.402824
Average loss at step 68000: 3.398561
Average loss at step 70000: 3.360380
Nearest to not: still, never, nothing, quite, now, regnant, elkhart, always,
Nearest to had: has, have, was, having, were, otherworldly, claimed, been,
Nearest to a: the, bytecode, another, shook, behaved, phonology, filing, any,
Nearest to other: various, different, many, including, hydrogen, abundant, peripheral, some,
Nearest to first: second, last, next, same, rehearsing, bevin, greatest, only,
Nearest to more: less, most, very, extremely, rather, larger, longer, quite,
Nearest to been: become, be, was, were, begun, previously, had, recently,
Nearest to it: he, there, she, this, they, still, what, usually,
Nearest to while: although, when, before, however, if, though, where, after,
Nearest to if: when, while, though, although, where, ditch, since, before,
Nearest to they: we, he, there, you, she, it, these, wracked,
Nearest to american: british, english, european, african, bront, drugs, french, australian,
Nearest to five: four, six, three, seven, eight, nine, zero, two,
Nearest to were: are, have, was, had, be, been, those, including,
Nearest to there: they, it, still, probable, now, we, sometimes, he,
Nearest to is: was, has, are, be, although, seems, makes, contains,
Average loss at step 72000: 3.377010
Average loss at step 74000: 3.349422
Average loss at step 76000: 3.316612
Average loss at step 78000: 3.354689
Average loss at step 80000: 3.379203
Nearest to not: still, nothing, usually, they, never, now, generally, elkhart,
Nearest to had: has, have, were, having, was, began, since, been,
Nearest to a: another, the, bytecode, proconsul, every, mahjong, thyssen, bastiat,
Nearest to other: various, different, some, many, chun, rocker, individual, others,
Nearest to first: second, last, next, same, third, only, best, bevin,
Nearest to more: less, very, most, larger, smaller, longer, rather, extremely,
Nearest to been: become, be, were, was, previously, had, roussimoff, recently,
Nearest to it: he, she, there, this, they, peugeot, surprisingly, neither,
Nearest to while: although, when, though, before, after, however, if, or,
Nearest to if: when, though, before, where, ditch, although, since, while,
Nearest to they: we, he, there, you, she, it, these, who,
Nearest to american: british, german, english, bront, french, european, australian, russian,
Nearest to five: four, six, seven, eight, three, nine, zero, two,
Nearest to were: are, was, had, have, although, been, being, include,
Nearest to there: they, it, he, she, still, we, now, probable,
Nearest to is: was, has, are, although, carmilla, becomes, takes, does,
Average loss at step 82000: 3.408115
Average loss at step 84000: 3.411410
Average loss at step 86000: 3.387884
Average loss at step 88000: 3.351522
Average loss at step 90000: 3.368732
Nearest to not: still, elkhart, t, nothing, now, they, normally, huldrych,
Nearest to had: has, have, were, was, having, began, since, would,
Nearest to a: any, the, bytecode, another, every, lackluster, dodger, thyssen,
Nearest to other: various, individual, different, meltdown, memetics, others, cheetah, unremarkable,
Nearest to first: second, last, next, original, same, greatest, third, largest,
Nearest to more: less, very, most, rather, longer, quite, extremely, larger,
Nearest to been: become, be, was, already, were, previously, loew, abingdon,
Nearest to it: he, she, there, they, this, itself, still, generally,
Nearest to while: although, when, before, though, after, during, were, however,
Nearest to if: when, where, though, before, although, ditch, since, until,
Nearest to they: we, he, she, there, you, it, but, hitpa,
Nearest to american: british, german, french, european, english, african, bront, italian,
Nearest to five: four, seven, three, eight, six, nine, zero, two,
Nearest to were: are, had, was, have, while, been, although, being,
Nearest to there: it, they, he, still, she, now, probable, we,
Nearest to is: was, has, are, although, becomes, be, seems, circ,
Average loss at step 92000: 3.403268
Average loss at step 94000: 3.251248
Average loss at step 96000: 3.359675
Average loss at step 98000: 3.242441
Average loss at step 100000: 3.355081
Nearest to not: still, never, t, nothing, normally, you, now, they,
Nearest to had: has, have, was, were, having, would, began, is,
Nearest to a: another, any, the, proconsul, herakles, mahjong, lackluster, reused,
Nearest to other: various, others, individual, rocker, peripheral, meltdown, specific, abundant,
Nearest to first: last, second, next, third, entitled, lysander, elucidated, original,
Nearest to more: less, very, most, larger, longer, extremely, smaller, quite,
Nearest to been: become, be, already, was, previously, loew, seldom, recently,
Nearest to it: he, she, there, this, they, often, never, what,
Nearest to while: although, when, before, though, if, where, and, however,
Nearest to if: when, where, though, before, while, ditch, because, until,
Nearest to they: we, he, there, you, she, it, these, not,
Nearest to american: british, italian, bront, canadian, australian, french, bogged, russian,
Nearest to five: seven, four, six, eight, three, zero, nine, two,
Nearest to were: are, have, was, had, those, these, including, although,
Nearest to there: they, it, he, still, now, often, sometimes, generally,
Nearest to is: was, has, became, are, seems, be, becomes, makes,