Word2Vec

Word2Vec skip-gram model (see Mikolov,Sutskever,Chen,Dean paper) over Text8 data.


In [1]:
%matplotlib inline
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE

url = 'http://mattmahoney.net/dc/'

def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename

filename = maybe_download('text8.zip', 31344016)

def read_data(filename):
  """Extract the first file enclosed in a zip file as a list of words"""
  with zipfile.ZipFile(filename) as f:
    data = tf.compat.as_str(f.read(f.namelist()[0])).split()
  return data
  
words = read_data(filename)
print('Data size %d' % len(words))

vocabulary_size = 50000

def build_dataset(words):
  count = [['UNK', -1]]
  count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
  dictionary = dict()
  for word, _ in count:
    dictionary[word] = len(dictionary)
  data = list()
  unk_count = 0
  for word in words:
    if word in dictionary:
      index = dictionary[word]
    else:
      index = 0  # dictionary['UNK']
      unk_count = unk_count + 1
    data.append(index)
  count[0][1] = unk_count
  reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) 
  return data, count, dictionary, reverse_dictionary

data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del words  # Hint to reduce memory.


Found and verified text8.zip
Data size 17005207
Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]
Sample data [5237, 3082, 12, 6, 195, 2, 3136, 46, 59, 156]

In [2]:
data_index = 0

def generate_batch(batch_size, num_skips, skip_window):
  global data_index
  assert batch_size % num_skips == 0
  assert num_skips <= 2 * skip_window
  batch = np.ndarray(shape=(batch_size), dtype=np.int32)
  labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
  span = 2 * skip_window + 1 # [ skip_window target skip_window ]
  buffer = collections.deque(maxlen=span)
  for _ in range(span):
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  for i in range(batch_size // num_skips):
    target = skip_window  # target label at the center of the buffer
    targets_to_avoid = [ skip_window ]
    for j in range(num_skips):
      while target in targets_to_avoid:
        target = random.randint(0, span - 1)
      targets_to_avoid.append(target)
      batch[i * num_skips + j] = buffer[skip_window]
      labels[i * num_skips + j, 0] = buffer[target]
    buffer.append(data[data_index])
    data_index = (data_index + 1) % len(data)
  return batch, labels

print('data:', [reverse_dictionary[di] for di in data[:8]])

for num_skips, skip_window in [(2, 1), (4, 2)]:
    data_index = 0
    batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
    print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
    print('    batch:', [reverse_dictionary[bi] for bi in batch])
    print('    labels:', [reverse_dictionary[li] for li in labels.reshape(8)])


data: ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first']

with num_skips = 2 and skip_window = 1:
    batch: ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term']
    labels: ['as', 'anarchism', 'a', 'originated', 'as', 'term', 'of', 'a']

with num_skips = 4 and skip_window = 2:
    batch: ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a']
    labels: ['term', 'originated', 'a', 'anarchism', 'originated', 'of', 'term', 'as']

In [3]:
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar(var.name+'_mean', mean)
    #tf.scalar_summary(var.name+'_mean', mean)
    stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar(var.name+'_stddev', stddev)
    #tf.scalar_summary(var.name+'_stddev', stddev)
    tf.summary.scalar(var.name+'_max', tf.reduce_max(var))
    #tf.scalar_summary(var.name+'_max', tf.reduce_max(var))
    tf.summary.scalar(var.name+'_min', tf.reduce_min(var))
    #tf.histogram_summary( var.name, var)
    tf.summary.histogram( var.name, var)

In [4]:
def save_metadata(file,d):
        with open(file, 'w') as f:
            for i in range(len(d)):
                c = d[i]
                f.write('{}\n'.format(c))

In [5]:
from tensorflow.contrib.tensorboard.plugins import projector
config = projector.ProjectorConfig()

batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. 
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.

graph = tf.Graph()

with graph.as_default(), tf.device('/cpu:0'):

  # Input data.
  train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
  train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
  valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
  
  # Variables.
  #with tf.name_scope('embed'):
  embeddings = tf.Variable(
      tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0) , name="embeddings")
  #variable_summaries(embeddings)

  embedding = config.embeddings.add()
  embedding.tensor_name = embeddings.name
  # Link this tensor to its metadata file (e.g. labels).
  embedding.metadata_path = "./_logs3/metadata.tsv"

  if not os.path.exists("./_logs3/"):
    os.makedirs("./_logs3/")
  save_metadata(file="./_logs3/metadata.tsv",d=list(reverse_dictionary.values()))
    
  
  softmax_weights = tf.Variable(
    tf.truncated_normal([vocabulary_size, embedding_size],
                         stddev=1.0 / math.sqrt(embedding_size)))
  softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)
  # Compute the softmax loss, using a sample of the negative labels each time.
  
  with tf.name_scope('loss_function'):
    loss = tf.reduce_mean(
        tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
                               train_labels, num_sampled, vocabulary_size))
    tf.summary.scalar('loss', loss)

  # Optimizer.
  # Note: The optimizer will optimize the softmax_weights AND the embeddings.
  # This is because the embeddings are defined as a variable quantity and the
  # optimizer's `minimize` method will by default modify all variable quantities 
  # that contribute to the tensor it is passed.
  # See docs on `tf.train.Optimizer.minimize()` for more details.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  

  with tf.name_scope('similarity_function'):
    # Compute the similarity between minibatch examples and all embeddings.
    # We use the cosine distance:
    norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
    normalized_embeddings = embeddings / norm
    valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
    
    similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
  variable_summaries(similarity)


INFO:tensorflow:Summary name similarity_function/MatMul:0_mean is illegal; using similarity_function/MatMul_0_mean instead.
INFO:tensorflow:Summary name similarity_function/MatMul:0_stddev is illegal; using similarity_function/MatMul_0_stddev instead.
INFO:tensorflow:Summary name similarity_function/MatMul:0_max is illegal; using similarity_function/MatMul_0_max instead.
INFO:tensorflow:Summary name similarity_function/MatMul:0_min is illegal; using similarity_function/MatMul_0_min instead.
INFO:tensorflow:Summary name similarity_function/MatMul:0 is illegal; using similarity_function/MatMul_0 instead.

In [6]:
num_steps = 100001

with tf.Session(graph=graph) as session:
    
  saver = tf.train.Saver()  
    
    
  tf.global_variables_initializer().run()
  print('Initialized')
    
  merged = tf.summary.merge_all()
  writer = tf.summary.FileWriter('./_logs3',session.graph)  
    
  # Saves a configuration file that TensorBoard will read during startup.
  projector.visualize_embeddings(writer, config)
    
  average_loss = 0
  for step in range(num_steps):
    batch_data, batch_labels = generate_batch(
      batch_size, num_skips, skip_window)
    feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
    _, l = session.run([optimizer, loss], feed_dict=feed_dict)
    average_loss += l
    if step % 2000 == 0:
      if step > 0:
        average_loss = average_loss / 2000
      # The average loss is an estimate of the loss over the last 2000 batches.
      print('Average loss at step %d: %f' % (step, average_loss))
      average_loss = 0
    # note that this is expensive (~20% slowdown if computed every 500 steps)
    if step % 10000 == 0:
      sim = similarity.eval()
      for i in range(valid_size):
        valid_word = reverse_dictionary[valid_examples[i]]
        top_k = 8 # number of nearest neighbors
        nearest = (-sim[i, :]).argsort()[1:top_k+1]
        log = 'Nearest to %s:' % valid_word
        for k in range(top_k):
          close_word = reverse_dictionary[nearest[k]]
          log = '%s %s,' % (log, close_word)
        print(log)
      
      summary,_ = session.run([merged,similarity], feed_dict=feed_dict)
      writer.add_summary(summary, step)
      writer.flush()
          
      checkpoint_file = os.path.join('./_logs3', 'checkpoint')
      saver.save(session, checkpoint_file, global_step=i)  
    
  final_embeddings = normalized_embeddings.eval()


Initialized
Average loss at step 0: 7.954560
Nearest to also: pausanias, mercantilism, shortened, christological, bw, herding, nils, commons,
Nearest to so: greenwich, marten, landau, menace, bloody, explains, konar, potency,
Nearest to more: ebro, foy, translator, valuation, wakko, jacobsen, ammi, dopant,
Nearest to about: pellets, minor, speed, eero, regular, untranslated, overturn, monitoring,
Nearest to UNK: dispensation, vantage, receiver, cropped, entitled, converging, stagnation, abkhazian,
Nearest to five: scrupulous, stefan, warhol, ago, selangor, mutant, braid, specifically,
Nearest to state: enos, dong, adoptionism, ivanhoe, bridgehead, furthering, riga, cohorts,
Nearest to if: maximize, yahweh, barkley, cruisers, linseed, garland, abomination, fiddle,
Nearest to however: collimated, tethys, septuagint, doodle, pot, gi, explanation, lyell,
Nearest to on: dared, extraneous, crystallization, arguing, ignaz, extend, watches, ditches,
Nearest to these: fdr, knesset, kurosawa, rcaf, umm, tro, bundesrat, domicile,
Nearest to was: boii, hardwired, notary, precisely, jacobins, hoping, devonport, plantinga,
Nearest to can: keillor, robes, ours, twofold, futurians, unrestricted, whence, annales,
Nearest to i: vcs, effecting, idaho, intranet, dato, emeritus, kuwaiti, bernardo,
Nearest to would: commons, pkn, blenheim, dismal, generalitat, chokes, yoakum, lotharingia,
Nearest to first: inspector, discover, close, grimes, characterizing, umpires, sites, accents,
Average loss at step 2000: 4.361203
Average loss at step 4000: 3.865426
Average loss at step 6000: 3.791415
Average loss at step 8000: 3.680844
Average loss at step 10000: 3.617455
Nearest to also: not, probably, which, it, atsc, postgresql, often, prescriptive,
Nearest to so: marten, landau, bloody, menace, potency, posterior, ignited, explains,
Nearest to more: less, elba, donald, valuation, mie, hunters, makes, foy,
Nearest to about: minor, mcdonough, waterman, youngest, declaration, rohan, marten, country,
Nearest to UNK: recovering, rashid, ergative, arrangement, payroll, hesitated, hunyadi, moc,
Nearest to five: six, three, eight, four, seven, nine, two, zero,
Nearest to state: liu, enos, nascent, vs, assistance, riga, repertoire, symbolics,
Nearest to if: lateral, flaming, shapeshifting, displacements, luddites, eaters, cruisers, adept,
Nearest to however: collimated, tethys, septuagint, repeaters, pacification, personas, reserves, inaccurate,
Nearest to on: in, with, upon, arguing, associates, at, pc, sandstone,
Nearest to these: fdr, rcaf, marches, perjury, many, tro, bundesrat, semivowel,
Nearest to was: is, were, has, had, by, devonport, orbiting, became,
Nearest to can: could, will, would, may, unrestricted, ours, twofold, robes,
Nearest to i: vcs, bendix, emeritus, aardwolf, scenarios, herbalists, eca, dubbed,
Nearest to would: can, dismal, must, engl, could, will, ryaku, did,
Nearest to first: inspector, umpires, envoys, close, grandparents, discover, somatic, noree,
Average loss at step 12000: 3.609629
Average loss at step 14000: 3.573637
Average loss at step 16000: 3.409303
Average loss at step 18000: 3.459376
Average loss at step 20000: 3.540874
Nearest to also: which, often, atsc, now, probably, then, still, not,
Nearest to so: marten, posterior, potency, bloody, landau, menace, trusts, footing,
Nearest to more: less, most, elba, donald, unspecific, pitcairn, other, retina,
Nearest to about: autumnal, waterman, handwriting, minor, credible, untranslated, youngest, formants,
Nearest to UNK: melvin, r, footlights, shelved, donnelly, melanesian, fermenting, rashid,
Nearest to five: four, three, six, seven, eight, two, zero, nine,
Nearest to state: liu, repertoire, invasion, nascent, symbolics, elisabetta, furthering, dong,
Nearest to if: when, execute, lateral, adept, eaters, displacements, sternum, indonesians,
Nearest to however: but, septuagint, when, prosecution, capone, collimated, sharm, rowe,
Nearest to on: upon, in, at, against, feudal, speer, monocotyledons, ipo,
Nearest to these: many, some, all, semivowel, such, other, perjury, several,
Nearest to was: is, has, had, were, became, be, been, could,
Nearest to can: will, could, may, would, must, should, unrestricted, might,
Nearest to i: ii, they, we, vcs, scenarios, enlists, herbalists, immunosuppression,
Nearest to would: can, will, could, may, must, should, cannot, dismal,
Nearest to first: inspector, second, last, noree, legacy, umpires, befriends, leitrim,
Average loss at step 22000: 3.501311
Average loss at step 24000: 3.489143
Average loss at step 26000: 3.484368
Average loss at step 28000: 3.481408
Average loss at step 30000: 3.506894
Nearest to also: often, now, still, which, probably, who, generally, barbados,
Nearest to so: marten, trusts, potency, posterior, explains, bloody, too, landau,
Nearest to more: less, elba, very, most, disliked, transferable, unspecific, retina,
Nearest to about: autumnal, simplification, andree, sustainable, elementary, disorder, just, over,
Nearest to UNK: gruesome, svc, preliminary, et, homophobic, melvin, recovering, arabs,
Nearest to five: four, eight, seven, six, three, zero, two, nine,
Nearest to state: repertoire, liu, drag, elisabetta, knitters, nascent, invasion, prespa,
Nearest to if: when, is, adept, because, ascendant, sternum, where, that,
Nearest to however: but, when, capone, septuagint, that, ldots, factsheet, conjectures,
Nearest to on: upon, in, against, speer, vulva, from, rulings, kroto,
Nearest to these: some, many, such, several, all, they, their, other,
Nearest to was: is, has, had, were, became, been, vangelis, devonport,
Nearest to can: could, will, may, would, must, should, might, cannot,
Nearest to i: ii, we, you, vcs, they, asin, ef, subdivision,
Nearest to would: can, will, could, may, must, should, cannot, to,
Nearest to first: last, second, inspector, tipped, aggravating, legacy, designing, chk,
Average loss at step 32000: 3.500171
Average loss at step 34000: 3.493055
Average loss at step 36000: 3.456301
Average loss at step 38000: 3.300444
Average loss at step 40000: 3.429343
Nearest to also: often, which, still, now, who, there, not, probably,
Nearest to so: trusts, marten, too, herpes, bloody, afewerki, biscuit, posterior,
Nearest to more: less, most, very, greater, acidic, disliked, publish, elba,
Nearest to about: autumnal, disorder, behind, finkelstein, waterman, prat, andree, bach,
Nearest to UNK: bubblegum, fermenting, photographic, assessing, r, mathilde, complicates, palettes,
Nearest to five: six, seven, four, eight, three, two, nine, zero,
Nearest to state: government, repertoire, liu, rot, nin, fukuda, carburetor, ntsc,
Nearest to if: when, where, that, ascendant, indonesians, sternum, pillaged, because,
Nearest to however: but, that, though, although, reserves, turnpike, conjectures, monasticism,
Nearest to on: upon, elder, against, in, slater, pc, during, gly,
Nearest to these: many, some, several, both, such, they, all, were,
Nearest to was: is, had, became, has, were, be, been, when,
Nearest to can: could, will, may, would, must, should, might, cannot,
Nearest to i: ii, we, you, he, ef, t, they, moire,
Nearest to would: will, could, can, may, must, should, cannot, might,
Nearest to first: last, second, next, legacy, inspector, designing, tipped, rpr,
Average loss at step 42000: 3.439702
Average loss at step 44000: 3.449790
Average loss at step 46000: 3.449680
Average loss at step 48000: 3.350734
Average loss at step 50000: 3.384668
Nearest to also: now, which, often, still, who, gmc, postgresql, then,
Nearest to so: then, too, trusts, hermes, sioux, afewerki, biscuit, herpes,
Nearest to more: less, most, very, elba, greater, disliked, publish, jitter,
Nearest to about: autumnal, disorder, prat, behind, chic, finkelstein, handwriting, manx,
Nearest to UNK: de, buchan, arabs, von, contravariant, douglass, ostpolitik, knox,
Nearest to five: four, six, eight, seven, zero, nine, three, two,
Nearest to state: government, knitters, nin, national, carburetor, fukuda, symbolics, ntsc,
Nearest to if: when, where, though, indonesians, although, without, sternum, pillaged,
Nearest to however: but, although, that, while, when, turnpike, though, where,
Nearest to on: upon, in, at, against, through, associates, secant, monocotyledons,
Nearest to these: some, many, both, several, such, which, different, essenes,
Nearest to was: is, has, became, were, had, be, recordable, been,
Nearest to can: could, may, will, would, must, should, might, cannot,
Nearest to i: ii, we, you, t, they, ef, victors, stoppage,
Nearest to would: could, will, can, may, must, should, might, cannot,
Nearest to first: second, last, next, legacy, same, shab, tipped, only,
Average loss at step 52000: 3.435921
Average loss at step 54000: 3.424238
Average loss at step 56000: 3.437818
Average loss at step 58000: 3.400315
Average loss at step 60000: 3.391935
Nearest to also: now, still, often, there, which, sometimes, gmc, alcal,
Nearest to so: too, trusts, hermes, then, if, biscuit, marten, simply,
Nearest to more: less, very, most, greater, other, elba, larger, better,
Nearest to about: autumnal, over, prat, disorder, forty, chic, behind, kwa,
Nearest to UNK: rashid, universala, microsoft, wi, examines, excitement, finn, masada,
Nearest to five: four, six, eight, three, seven, zero, nine, two,
Nearest to state: government, knitters, fukuda, elisabetta, national, states, ntsc, symbolics,
Nearest to if: when, where, though, because, indonesians, without, then, although,
Nearest to however: but, although, that, though, when, turnpike, since, while,
Nearest to on: upon, rulings, through, against, sandstone, in, unreachable, abatis,
Nearest to these: many, some, several, both, such, different, which, they,
Nearest to was: is, had, became, has, were, be, although, feistel,
Nearest to can: may, could, will, would, must, should, might, cannot,
Nearest to i: ii, we, you, t, they, immunosuppression, childish, herbalists,
Nearest to would: could, will, can, may, must, might, should, cannot,
Nearest to first: second, last, next, only, tipped, third, soaemias, same,
Average loss at step 62000: 3.244823
Average loss at step 64000: 3.258482
Average loss at step 66000: 3.407995
Average loss at step 68000: 3.390361
Average loss at step 70000: 3.360604
Nearest to also: still, now, often, which, sometimes, there, currently, never,
Nearest to so: too, hermes, trusts, then, biscuit, afewerki, if, patented,
Nearest to more: less, most, very, better, greater, elba, extremely, rather,
Nearest to about: autumnal, over, behind, chic, kwa, age, around, prat,
Nearest to UNK: stafford, topics, quad, naturally, linebarger, hindustan, accuser, inserting,
Nearest to five: four, six, three, eight, seven, nine, zero, two,
Nearest to state: government, fukuda, knitters, states, prespa, symbolics, mom, mediocre,
Nearest to if: when, though, where, although, because, however, while, for,
Nearest to however: but, although, though, where, that, when, while, if,
Nearest to on: upon, through, within, in, slater, dealings, rulings, feudal,
Nearest to these: some, many, such, are, those, their, several, the,
Nearest to was: is, were, has, had, became, been, be, becomes,
Nearest to can: may, could, will, would, must, should, might, cannot,
Nearest to i: ii, we, you, g, alfv, t, herbalists, victors,
Nearest to would: could, will, may, can, must, might, should, cannot,
Nearest to first: second, last, next, third, tipped, same, soaemias, during,
Average loss at step 72000: 3.374328
Average loss at step 74000: 3.351312
Average loss at step 76000: 3.317458
Average loss at step 78000: 3.351869
Average loss at step 80000: 3.377374
Nearest to also: still, now, often, sometimes, which, gmc, never, currently,
Nearest to so: too, then, hermes, trusts, sputnik, horrific, biscuit, cultivars,
Nearest to more: less, very, most, rather, better, larger, extremely, elba,
Nearest to about: autumnal, over, chic, kwa, around, pianos, age, flagg,
Nearest to UNK: williams, slovakia, munch, refrigerators, ed, writer, paces, actor,
Nearest to five: four, six, seven, eight, three, nine, zero, two,
Nearest to state: fukuda, government, knitters, states, prespa, city, mediocre, symbolics,
Nearest to if: when, though, where, although, before, because, without, since,
Nearest to however: although, but, though, where, that, while, when, brasilia,
Nearest to on: upon, in, through, within, at, during, against, sandstone,
Nearest to these: many, several, such, those, both, some, various, are,
Nearest to was: is, became, were, has, had, feistel, been, kline,
Nearest to can: could, may, will, must, would, should, cannot, might,
Nearest to i: ii, you, we, t, g, kelvin, stoppage, buds,
Nearest to would: could, will, can, may, must, might, should, cannot,
Nearest to first: second, last, next, third, only, best, during, same,
Average loss at step 82000: 3.407552
Average loss at step 84000: 3.407734
Average loss at step 86000: 3.392605
Average loss at step 88000: 3.354439
Average loss at step 90000: 3.365882
Nearest to also: often, now, still, sometimes, which, never, mononobe, generally,
Nearest to so: too, hermes, trusts, surrounds, sputnik, depending, then, since,
Nearest to more: less, very, most, greater, better, rather, extremely, larger,
Nearest to about: autumnal, over, behind, concerning, chic, kwa, handwriting, occultist,
Nearest to UNK: sedans, backpack, answering, histoire, louder, subverted, fox, peacekeepers,
Nearest to five: four, eight, seven, three, six, nine, two, zero,
Nearest to state: knitters, fukuda, government, city, furthering, states, prespa, mediocre,
Nearest to if: when, though, since, where, although, cultivars, is, however,
Nearest to however: but, although, though, that, where, since, while, especially,
Nearest to on: upon, under, gly, sandstone, against, at, through, in,
Nearest to these: many, several, some, such, are, both, various, were,
Nearest to was: is, had, became, were, has, been, be, feistel,
Nearest to can: may, could, would, will, must, should, cannot, might,
Nearest to i: ii, we, you, frac, iii, t, g, chills,
Nearest to would: could, will, can, might, may, must, should, cannot,
Nearest to first: last, second, next, best, third, earliest, scytale, tipped,
Average loss at step 92000: 3.393868
Average loss at step 94000: 3.254519
Average loss at step 96000: 3.362256
Average loss at step 98000: 3.246801
Average loss at step 100000: 3.351594
Nearest to also: still, now, never, often, actually, sometimes, generally, which,
Nearest to so: too, then, thus, hermes, sometimes, sputnik, afewerki, sagan,
Nearest to more: less, most, very, greater, better, rather, larger, extremely,
Nearest to about: autumnal, chic, over, credible, behind, around, prat, pianos,
Nearest to UNK: accuser, lamanites, kirby, https, storehouses, nom, unavoidable, intersex,
Nearest to five: seven, four, eight, six, three, zero, two, nine,
Nearest to state: fukuda, knitters, government, city, furthering, regensburg, states, prespa,
Nearest to if: when, though, where, since, before, although, without, because,
Nearest to however: but, although, though, that, where, especially, and, while,
Nearest to on: upon, in, within, at, through, against, gly, during,
Nearest to these: several, many, some, such, various, were, all, different,
Nearest to was: is, became, has, were, had, appears, been, becomes,
Nearest to can: could, may, will, must, would, cannot, should, might,
Nearest to i: you, we, ii, they, iii, t, immunosuppression, frac,
Nearest to would: could, will, can, might, may, must, should, cannot,
Nearest to first: last, second, next, third, fourth, best, tipped, final,

Embedding Visualization

Embeddings are ubiquitous in machine learning, appearing in recommender systems, NLP, and many other applications. Indeed, in the context of TensorFlow, it's natural to view tensors (or slices of tensors) as points in space, so almost any TensorFlow system will naturally give rise to various embeddings.

To learn more about embeddings and how to train them, see the Vector Representations of Words tutorial. If you are interested in embeddings of images, check out this article for interesting visualizations of MNIST images. On the other hand, if you are interested in word embeddings, this article gives a good introduction

PCA

To run TensorBoard, use the following command (alternatively python -m tensorflow.tensorboard)

tensorboard --logdir=_logs3

We can see that closest neighbors of neighbors are

  • lisp 0.903
  • interestingly 0.910
  • exceeds 0.935
  • bulky 0.946

t-SNA