Import TensforFlow and Numpy, make sure the package is there


In [1]:
import numpy as np
import tensorflow as tf

Read the text into a list of strings, using tensorflow


In [3]:
import zipfile as zf

#This function return a list of words
def read_data(filename):
  with zf.ZipFile(filename) as f:
    raw=f.read(f.namelist()[0])
    data = tf.compat.as_str(raw).split()    
        #tf.compat is not in online document
  return data

word_list = read_data('data/text/text8.zip')
#data size is the total number of words in the file
print 'Data size: ', len(word_list)


Data size:  17005207

Build dataset


In [4]:
import collections

#This is our universe, we don't study any word outside this vocabulary 
vocabulary_size = 50000  

#Replace every word by its index, this is our dataset; replace rare words with token "UNK"
def build_dataset(words):
  #initialize an array with word and its count, with the first word as 'UNK'
  word_Count = [['UNK', -1]]   
  #Find common words, and add them to our vocabulary array
  unique_words=collections.Counter(word_list)
  print "unique words: ", len(unique_words)
  # We are using a function from "Collection", most_common  
  common_words=unique_words.most_common(vocabulary_size-1)  
          #make sure this is "vocabulary_size-1" as we already have 1 word in 
  word_Count.extend(common_words)
  """After the above operation, word_Count is a 2-d array with 50,001 words and their count"""

  #Initialize an empty hashtable
  dictionary = dict()
  for word, _ in word_Count:
    dictionary[word] = len(dictionary)  #current size, thus it is an index
  """After the above operation, dictionary is a hashtable with 50,001 words and their index, ordered by Count"""

  #Create our dataset, where every word is replaced by its index
  data = list()
  unk_count = 0
  for word in word_list:
    if word in dictionary:
      index = dictionary[word]
    else:
      index = 0  # dictionary['UNK']
      unk_count += 1
    data.append(index)
    
  #Update the count for UNK, any word that is not common is considered as UNK
  word_Count[0][1] = unk_count  #this is a side-product

  reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) #zip just make the hashtable more compact
    
  return data, word_Count, dictionary, reverse_dictionary

#It takes about 4 seconds to finish
data, word_Count, dictionary, reverse_dictionary = build_dataset(word_list)
del word_list  # to reduce memory.

print('top 10 common words (+UNK)', word_Count[:10])   #the top 10 common words
print ('data= ', data[:10])
print('Sample data',  [reverse_dictionary[i] for i in data[:10]])


unique words:  253854
('top 10 common words (+UNK)', [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764), ('in', 372201), ('a', 325873), ('to', 316376), ('zero', 264975), ('nine', 250430)])
('data= ', [5239, 3084, 12, 6, 195, 2, 3137, 46, 59, 156])
('Sample data', ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used', 'against'])

Generate a training batch for the skip-gram model.

Reference paper:


In [5]:
import random

data_index = 0

batch_size = 128  #the number of words we process each time 
embedding_size = 128  # Dimension of the embedding vector.
num_skips = 2      # How many times to reuse an input to generate a label.
skip_window = 1      # Number of words to consider left and right.

#Given batch size, generate labeled data from original sequence
#for every word, find its label, which is within the skip-window
def generate_batch(batch_size, num_skips, skip_window):
  global data_index

  assert batch_size % num_skips == 0  #if this condition does not hold, the system will issue error
  assert num_skips <= 2 * skip_window

  batch = np.ndarray(shape=(batch_size), dtype=np.int32)
  labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)

  """Read data if size span into buffer """ 
  ## Span is the total number of words, before + after+ current word
  span = 2 * skip_window + 1   #span=3
  buffer = collections.deque(maxlen=span)
  for _ in range(span):
    buffer.append(data[data_index])
    data_index = data_index + 1
  
  """ The actual number of operation is batch_size // num_skips, where "//" is floor division, removing decimal numbers """ 
  for i in range(batch_size // num_skips):
    # start with the center word, 
    target = skip_window  
    
    # this is a list that records what we have processed
    targets_to_avoid = [ skip_window ]   
    
    for j in range(num_skips):
      while target in targets_to_avoid:
        target = random.randint(0, span - 1)
      targets_to_avoid.append(target)
      batch[i * num_skips + j] = buffer[skip_window]
      labels[i * num_skips + j, 0] = buffer[target]
    buffer.append(data[data_index])
    data_index = data_index + 1
    
  return batch, labels

#Some sample output
batch, labels = generate_batch(batch_size=10, num_skips=2, skip_window=1)
print batch
print labels
for i in range(len(batch)):
  print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
print "data index=",data_index


[3084 3084   12   12    6    6  195  195    2    2]
[[5239]
 [  12]
 [3084]
 [   6]
 [ 195]
 [  12]
 [   6]
 [   2]
 [3137]
 [ 195]]
('originated', '->', 'anarchism')
('originated', '->', 'as')
('as', '->', 'originated')
('as', '->', 'a')
('a', '->', 'term')
('a', '->', 'as')
('term', '->', 'a')
('term', '->', 'of')
('of', '->', 'abuse')
('of', '->', 'term')
data index= 8

Build and train a skip-gram model.


In [6]:
#Some sample output
batch, labels = generate_batch(batch_size=10, num_skips=2, skip_window=1)
print batch
print labels
for i in range(len(batch)):
  print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
print "data index=",data_index


[  156   156   128   128   742   742   477   477 10572 10572]
[[  128]
 [   59]
 [  742]
 [  156]
 [  128]
 [  477]
 [10572]
 [  742]
 [  134]
 [  477]]
('against', '->', 'early')
('against', '->', 'used')
('early', '->', 'working')
('early', '->', 'against')
('working', '->', 'early')
('working', '->', 'class')
('class', '->', 'radicals')
('class', '->', 'working')
('radicals', '->', 'including')
('radicals', '->', 'class')
data index= 16

In [7]:
import math

# We pick a random validation set to sample nearest neighbors. Here we limit the validation samples to frequent words .
valid_size = 16     # Random set of words to evaluate similarity on.
valid_window = 100  # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)

num_sampled = 64    # Number of negative examples to sample.

graph = tf.Graph()
with graph.as_default():
  train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
     #a list if integers that are indexes for words
  train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
     #a list if integers that are indexes for words
  valid_dataset = tf.constant(valid_examples, dtype=tf.int32)

  # Ops and variables pinned to the CPU because of missing GPU implementation
  with tf.device('/cpu:0'):
    #word vectors are all intialized as numbers between -1 and 1 
    wordsE = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
    
    #inputE is a smaller size list than all wordsE, 
    inputE = tf.nn.embedding_lookup(wordsE, train_inputs)
    #The resulting dimension is [batch_size, embedding_size]
    
    # Construct the variables
    #The weights is initialized from a normal distribution, with mean 0 and standard deviation 1/128^2, almost 0
    initial_weights=tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))
    weights = tf.Variable(initial_weights)
    biases = tf.Variable(tf.zeros([vocabulary_size]))

  loss = tf.reduce_mean(
              tf.nn.sampled_softmax_loss(weights, biases, inputE, train_labels, num_sampled, vocabulary_size))
  # Construct the SGD optimizer using a learning rate of 1.0.
  optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)

  # Compute the cosine similarity between minibatch examples and all embeddings.
  norm = tf.sqrt(tf.reduce_sum(tf.square(wordsE), 1, keep_dims=True))
  normalized_wordsE = wordsE / norm
    
  #calculate pairwise similarity score for every word in valid set, and all the words in vocabulary
  validE = tf.nn.embedding_lookup(normalized_wordsE, valid_dataset)
  similarity = tf.matmul(validE, normalized_wordsE, transpose_b=True)

  # Add variable initializer.
  init = tf.initialize_all_variables()

In [8]:
with tf.Session() as sess:
    allwords =tf.Variable(tf.random_uniform([5, 2], -1.0, 1.0)) 
    #Variable has to initialized before running
    #sess.run(allwords.initializer)
    tf.initialize_all_variables().run() #this is more general
    print allwords.eval()
    batch=[1,3,4]
    bwords=tf.nn.embedding_lookup(allwords, batch)
    print bwords.eval()


[[ 0.72809243 -0.58059263]
 [-0.51278496  0.12801981]
 [ 0.63370943  0.52823448]
 [ 0.83592129  0.17996573]
 [ 0.06860638  0.12861061]]
[[-0.51278496  0.12801981]
 [ 0.83592129  0.17996573]
 [ 0.06860638  0.12861061]]

Begin training.


In [9]:
print normalized_wordsE


Tensor("div:0", shape=(50000, 128), dtype=float32)

In [10]:
from six.moves import xrange   

num_steps = 100001

with tf.Session(graph=graph) as session:
  init.run()  #initialize the graph
  print("Initialized")
  #print normalized_wordsE.eval()

  data_index=0 #Reinitialize, important to do this
  average_loss = 0
  for step in xrange(num_steps):
    binputs, blabels = generate_batch(batch_size, num_skips, skip_window)
    # We perform one update step by evaluating the optimizer op (including it
    # in the list of returned values for session.run()
    assignment={train_inputs : binputs, train_labels : blabels}
    _, loss_val = session.run([optimizer, loss], feed_dict = assignment )
    average_loss += loss_val

    if step % 2000 == 0:
      # The average loss is an estimate of the loss over the last 2000 batches.
      if step > 0:
        average_loss /= 2000
      print("Average loss at step ", step, ": ", average_loss)
      average_loss = 0
    
    """
    if step == 2000:
      print binputs  #a list of indexes
      #print blabels
      print wordsE.eval()  
      print normalized_wordsE.eval()
      print inputE
      #print inputE.eval()
    """
    
    # Note that this is expensive, so only doing it every 10,000
    if step % 10000 == 0:  
      sim = similarity.eval()  #get pairwise similarity for every word
      for i in range(5):
        valid_word = reverse_dictionary[valid_examples[i]]
        top_k = 8 # number of nearest neighbors
        nearest = (-sim[i, :]).argsort()[1:top_k+1]
        log_str = "Nearest to %s:" % valid_word
        for k in xrange(top_k):
          close_word = reverse_dictionary[nearest[k]]
          log_str = "%s %s," % (log_str, close_word)
        print(log_str)
        
  final_wordsE = normalized_wordsE.eval()


Initialized
('Average loss at step ', 0, ': ', 7.840111255645752)
Nearest to one: bertolt, mergers, kristallnacht, observatories, silvanus, ambedkar, selznick, electrically,
Nearest to people: behavior, coronado, npd, pathos, soliciting, mauritian, applicant, decidedly,
Nearest to that: prefrontal, gross, werth, enquiries, over, patriarchates, tortoise, rk,
Nearest to new: chap, exarch, donegal, zealot, ptah, serine, updating, hillary,
Nearest to state: archaeoastronomy, cheered, rigid, marlowe, boyd, qaeda, kilometers, psionic,
('Average loss at step ', 2000, ': ', 4.8493102321624759)
('Average loss at step ', 4000, ': ', 4.1473633738979698)
('Average loss at step ', 6000, ': ', 4.04280930018425)
('Average loss at step ', 8000, ': ', 3.8951156964600084)
('Average loss at step ', 10000, ': ', 3.8154547293186187)
Nearest to one: two, arnstadt, ambedkar, UNK, sur, the, manic, adler,
Nearest to people: behavior, npd, mauritian, pathos, coronado, no, supplemental, soliciting,
Nearest to that: over, gross, rk, enquiries, patriarchates, immortalized, mural, adjudication,
Nearest to new: exarch, chap, serine, zealot, updating, globe, regicide, vci,
Nearest to state: archaeoastronomy, rigid, boyd, psionic, marlowe, qaeda, hecate, envy,
('Average loss at step ', 12000, ': ', 3.783337225973606)
('Average loss at step ', 14000, ': ', 3.7343917431235312)
('Average loss at step ', 16000, ': ', 3.5670206896141172)
('Average loss at step ', 18000, ': ', 3.613119417488575)
('Average loss at step ', 20000, ': ', 3.7087411341667176)
Nearest to one: two, arnstadt, hypothalamus, the, selznick, eight, coaxial, enjoyed,
Nearest to people: behavior, npd, mauritian, pathos, summer, coronado, allotropes, no,
Nearest to that: over, gross, rk, also, which, colonized, immortalized, enquiries,
Nearest to new: exarch, chap, regicide, zealot, bithynia, globe, serine, updating,
Nearest to state: archaeoastronomy, rigid, psionic, boyd, cowes, qaeda, marlowe, envy,
('Average loss at step ', 22000, ': ', 3.6612494437694552)
('Average loss at step ', 24000, ': ', 3.6500439755916596)
('Average loss at step ', 26000, ': ', 3.6331495666503906)
('Average loss at step ', 28000, ': ', 3.6177412205338477)
('Average loss at step ', 30000, ': ', 3.6551618610322474)
Nearest to one: two, five, six, yukon, four, eight, seven, arnstadt,
Nearest to people: behavior, mauritian, npd, coronado, summer, pathos, allotropes, trombonist,
Nearest to that: which, gross, rk, over, colonized, this, immortalized, also,
Nearest to new: exarch, few, globe, bithynia, regicide, ogg, chap, vci,
Nearest to state: archaeoastronomy, rigid, psionic, boyd, cowes, news, envy, qaeda,
('Average loss at step ', 32000, ': ', 3.6402791076302528)
('Average loss at step ', 34000, ': ', 3.6379068987965586)
('Average loss at step ', 36000, ': ', 3.6111606106758116)
('Average loss at step ', 38000, ': ', 3.4778757376670839)
('Average loss at step ', 40000, ': ', 3.574024521589279)
Nearest to one: two, seven, eight, six, five, four, three, yukon,
Nearest to people: behavior, npd, mauritian, summer, coronado, pathos, trombonist, allotropes,
Nearest to that: which, this, rk, gross, but, adjudication, it, also,
Nearest to new: exarch, bithynia, globe, few, wakshul, ogg, regicide, serine,
Nearest to state: archaeoastronomy, rigid, cowes, psionic, news, qaeda, boyd, envy,
('Average loss at step ', 42000, ': ', 3.5772215947508812)
('Average loss at step ', 44000, ': ', 3.5912785984277726)
('Average loss at step ', 46000, ': ', 3.5886272131204606)
('Average loss at step ', 48000, ': ', 3.4945902621746061)
('Average loss at step ', 50000, ': ', 3.5178597041368485)
Nearest to one: two, six, four, three, eight, five, yukon, seven,
Nearest to people: behavior, mauritian, npd, summer, coronado, alabaster, trombonist, otters,
Nearest to that: which, gross, this, rk, colonized, coq, readability, but,
Nearest to new: bithynia, ogg, few, wakshul, globe, vci, stakeholders, regicide,
Nearest to state: archaeoastronomy, rigid, cowes, psionic, news, health, qaeda, envy,
('Average loss at step ', 52000, ': ', 3.5720396546125412)
('Average loss at step ', 54000, ': ', 3.558437523007393)
('Average loss at step ', 56000, ': ', 3.5648739863634109)
('Average loss at step ', 58000, ': ', 3.534092089921236)
('Average loss at step ', 60000, ': ', 3.5366420957446096)
Nearest to one: two, six, four, seven, three, five, eight, yukon,
Nearest to people: behavior, npd, mauritian, summer, semicolon, together, allotropes, transfusions,
Nearest to that: which, this, gross, rk, but, readability, it, colonized,
Nearest to new: bithynia, ogg, few, wakshul, stakeholders, gottlob, globe, adjective,
Nearest to state: archaeoastronomy, rigid, cowes, health, news, psionic, envy, boyd,
('Average loss at step ', 62000, ': ', 3.4059490516185762)
('Average loss at step ', 64000, ': ', 3.4126559369564058)
('Average loss at step ', 66000, ': ', 3.5441884812116622)
('Average loss at step ', 68000, ': ', 3.5298747569322586)
('Average loss at step ', 70000, ': ', 3.4998598370552063)
Nearest to one: two, six, seven, four, five, eight, three, yukon,
Nearest to people: behavior, npd, mauritian, summer, otters, those, together, allotropes,
Nearest to that: which, this, gross, rk, readability, colonized, however, coq,
Nearest to new: bithynia, ogg, few, wakshul, stakeholders, grissom, glycolipids, adjective,
Nearest to state: health, archaeoastronomy, rigid, cowes, news, psionic, waging, ris,
('Average loss at step ', 72000, ': ', 3.5100320695638656)
('Average loss at step ', 74000, ': ', 3.4818023799210787)
('Average loss at step ', 76000, ': ', 3.4572956134378909)
('Average loss at step ', 78000, ': ', 3.4941616464853285)
('Average loss at step ', 80000, ': ', 3.5062460954785348)
Nearest to one: two, six, seven, yukon, five, three, four, eight,
Nearest to people: behavior, those, npd, mauritian, summer, together, allafrica, semicolon,
Nearest to that: which, gross, rk, this, however, coq, moth, colonized,
Nearest to new: bithynia, ogg, grissom, wakshul, gottlob, globe, chap, glycolipids,
Nearest to state: health, archaeoastronomy, cowes, rigid, ris, psionic, boyd, waging,
('Average loss at step ', 82000, ': ', 3.5285345591306685)
('Average loss at step ', 84000, ': ', 3.529517646551132)
('Average loss at step ', 86000, ': ', 3.5032348444163799)
('Average loss at step ', 88000, ': ', 3.4849587701559068)
('Average loss at step ', 90000, ': ', 3.4956770838499067)
Nearest to one: two, seven, eight, six, four, yukon, three, five,
Nearest to people: behavior, those, npd, mauritian, together, scientists, semicolon, summer,
Nearest to that: which, rk, however, this, gross, coq, but, colonized,
Nearest to new: bithynia, ogg, grissom, chap, glycolipids, gottlob, wakshul, globe,
Nearest to state: archaeoastronomy, health, cowes, rigid, ris, waging, psionic, envy,
('Average loss at step ', 92000, ': ', 3.5285493825674057)
('Average loss at step ', 94000, ': ', 3.3962276338934898)
('Average loss at step ', 96000, ': ', 3.4868866152167319)
('Average loss at step ', 98000, ': ', 3.3854876936078071)
('Average loss at step ', 100000, ': ', 3.482825307607651)
Nearest to one: two, four, seven, yukon, six, three, selznick, undefeated,
Nearest to people: behavior, those, mauritian, together, npd, scientists, slaves, semicolon,
Nearest to that: which, however, this, rk, coq, but, gross, moth,
Nearest to new: bithynia, ogg, grissom, glycolipids, globe, chap, snowmobile, bolger,
Nearest to state: archaeoastronomy, health, cowes, rigid, envy, waging, kilometers, news,

Visualize the results


In [11]:
print(final_wordsE)


[[ 0.15570146 -0.02764084 -0.12364119 ..., -0.00470359  0.03385558
  -0.07861681]
 [-0.09491638 -0.0026548   0.04263811 ...,  0.12608501 -0.1125745
   0.03814645]
 [ 0.07656676  0.0519236  -0.06673657 ...,  0.13449392  0.06271509
   0.0453325 ]
 ..., 
 [-0.11385152  0.14265771 -0.11250321 ..., -0.00592266  0.11185884
  -0.02264865]
 [ 0.07109889 -0.1429483  -0.04755107 ...,  0.01965012  0.07681676
   0.12406051]
 [ 0.04363951 -0.10965925  0.03900464 ..., -0.11496992 -0.14630555
   0.09524262]]

In [12]:
import matplotlib.pyplot as plt
%matplotlib inline

import sklearn
from sklearn.manifold import TSNE



ImportErrorTraceback (most recent call last)
<ipython-input-12-dc90899588cd> in <module>()
      3 
      4 import sklearn
----> 5 from sklearn.manifold import TSNE

ImportError: cannot import name TSNE

In [ ]:
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
  assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
  plt.figure(figsize=(18, 18))  #in inches
  for i, label in enumerate(labels):
    x, y = low_dim_embs[i,:]
    plt.scatter(x, y)
    plt.annotate(label,
                 xy=(x, y),
                 xytext=(5, 2),
                 textcoords='offset points',
                 ha='right',
                 va='bottom')
  plt.savefig(filename)

tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_wordsE[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)

In [ ]:


In [ ]:


In [ ]: