In [1]:
""" Imports """
import re
from nltk.tokenize import word_tokenize, sent_tokenize
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

"""Global definitons"""
_start = 'S_START'
_end = 'S_END'

In [2]:
""" util definitions"""

def hyperbolic(net):
    return np.tanh(net)

def relu(net):
    return np.maximum(0,net)

def softmax(net):
    _exp = np.exp(net)
    return _exp/np.sum(_exp)

def predict(scores):
    return np.argmax(scores)

In [3]:
class WordItem:
    def __init__(self,word,count=0):
        self.word = word
        self.count = count

In [22]:
class RNNlayer:
    
    """ 
    RNN nodes for decoder
    
    hidden state at time step t of decoder is conditioned on hidden state at time step t-1,
    output at time step t-1 and input at time step t
    """
    
    def __init__(self, inputSize, outputSize, W_Embedding_french, idx, bptt_truncate = 5, hiddenDim = 10):
        """
        inputSize = dimensions of the input embedding 
        outputSize = vocabulary size
        hiddenDim = size of the hidden unit in RNN
        bptt_truncate = truncate the number of time steps we calculate the gradient during backpropagation
        """
        self.inputSize = inputSize
        self.outputSize = outputSize
        self.W_Embedding_french = W_Embedding_french
        self.hiddenDim = hiddenDim
        self. bptt_truncate = bptt_truncate
        self.idx = idx;
        
        self.w_in = np.random.uniform(-np.sqrt(1./inputSize), np.sqrt(1./inputSize),(hiddenDim, inputSize))
        self.w_hh = np.random.uniform(-np.sqrt(1./hiddenDim), np.sqrt(1./hiddenDim),(hiddenDim, hiddenDim))
        self.w_outH = np.random.uniform(-np.sqrt(1./hiddenDim), np.sqrt(1./hiddenDim),(inputSize, hiddenDim))
        self.w_out = np.random.uniform(-np.sqrt(1./hiddenDim), np.sqrt(1./hiddenDim),(outputSize, hiddenDim))
        
    def forwardProp(self, inSentence, expSent):
        """
        inSentence: word indices in input language vocabulary
        expSent: word indices in target language vocabulary
        """
        
        #Total number of time steps equal to number of words in the sentence
        T = len(inSentence)
        
        #Saving all hidden states and outputs during forward propagation
        _h = np.zeros((T,self.hiddenDim))
        _o = np.zeros((T,self.outputSize))
        
        #Initializing initial output as the start token
        _o[-1] = np.zeros(self.outputSize)
        _o[-1][idx] = 1;
        
        #For each time step calculating hidden state and output
        for t in np.arange(T):
            outIdx = predict(_o[t-1])
            _h[t] = hyperbolic(self.w_in.dot(inSentence[t]) + self.w_hh.dot(_h[t-1]) + self.w_outH.dot(self.W_Embedding_french[outIdx]))
            _o[t] = softmax(self.w_out.dot(_h[t]))
            
        return _o, _h
    
    def calculateLoss(self, inSentence, expSentence):
        
        #For each sentence
        o, h = self.forwardProp(inSentence, expSentence)
        #TODO recheck this part
        correctPred = o[np.arange(len(expSentence)), expSentence]
        #Loss for each sentence
        l = -1 * np.sum(np.log(correctPred))
        return l
    
    def calculateTotalLoss(self, inSentence, expSentences):
        
        L = 0.0
        for i in range(len(inSentence)):
            if len(inSentence[i]) == len(expSentences[i]) :
                L += self.calculateLoss(inSentence[i], expSentences[i])
            
        return L
    
    def backPropTT(self, inSentence, expSentence):
        
        # Total number of time steps equal to number of words in the sentence
        T = len(expSentence)
        
        # Performing forward propagation
        o, h = self.forwardProp(inSentence, expSentence)
        
        # Defining gradient variables
        dLdin = np.zeros(self.w_in.shape)
        dLdhh = np.zeros(self.w_hh.shape)
        dLdoutH = np.zeros(self.w_outH.shape)
        dLdout = np.zeros(self.w_out.shape)
        
        # Calculating the difference between output and actual output
        delta_o = o
        delta_o[np.arange(T), expSentence] -= 1
        #print 'delta_o', delta_o
        
        # Calculating gradients backwards through time
        for t in np.arange(T)[::-1]:
            #Output gradient is only dependent on time step t
            dLdout += np.outer(delta_o[t], h[t])
            
            # Initial delta calculation propagating gradients from output
            delta_t = self.w_out.T.dot(delta_o[t]) * (1 - (h[t] ** 2))
            
            # Backpropagation through time (for at most self.bptt_truncate steps)
            for bptt_step in np.arange(max(0, t-self.bptt_truncate), t+1)[::-1]:
                # print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
                # Add to gradients at each previous step
                dLdhh += np.outer(delta_t, h[bptt_step-1])              
                dLdin += np.outer(delta_t, inSentence[bptt_step-1])
                dLdoutH += np.outer(delta_t, self.W_Embedding_french[predict(o[t-1])])
                # Update delta for next step dL/dz at t-1
                delta_t = self.w_hh.T.dot(delta_t) * (1 - h[bptt_step-1] ** 2)
            """TODO review backprop implementation"""
            
        return dLdin, dLdhh, dLdoutH, dLdout
        #return dLdin, dLdhh, dLdout
    
    def sgd_step(self, inSentence, expSentence, learningRate):
        
        """ Performs a single stochastic gradient step"""
        
        # Calculating gradients
        dLdin, dLdhh, dLdoutH, dLdout = self.backPropTT(inSentence, expSentence)
        #dLdin, dLdhh, dLdout = self.backPropTT(inSentence, expSentence)
        
        # Updating parameters
        self.w_in -= learningRate * dLdin
        self.w_hh -= learningRate * dLdhh
        self.w_outH -= learningRate * dLdoutH
        self.w_out -= learningRate * dLdout
        
    def train_Decoder_With_SGD(self, X_train, Y_train, learningRate = 0.05, nepochs = 200):
        """TODO evaluate losses and update learning rate if required"""
        loss = 100000000000
        for epoch in range(nepochs):
            for i in range(len(Y_train)):
                if len(X_train[i]) == len(Y_train[i]) :
                    self.sgd_step(X_train[i], Y_train[i], learningRate)
            newLoss = self.calculateTotalLoss(X_train, Y_train)
            print epoch, " ", newLoss
            #if newLoss > loss :
            #    break
            #loss = newLoss

In [73]:
""" Word preprocessing """
def dataset(_fi='/home/jazzycrazzy/PythonScripts/dataset.csv', _fo = 'testfile.txt'):
    file_in = open(_fi)
    #file_out = open(_fo,'wb')

    words = [] #stores unique words encountered in the document as WordItem objects
    _dict = {} #temporary dictionary to maintain count of each word
    
    _dict['UNK'] = 0

    for l in file_in:
        #file_out.write(l+'\n')
        l = _start+' '+l+' '+_end
        split = word_tokenize(l.decode('utf-8'))
        for w in split:
            if len(w)==0:
                continue
            elif len(w) > 15: #if word's length is greater than 15 counting it as unknown
                _dict['UNK'] += 1
                continue
            if w not in _dict:
                _dict[w] = 1
            _dict[w] += 1
            
    _vocab = {} #dictionary with words as keys and values as indices of them in 'word' list
    _vocab['UNK'] = len(words)
    words.append(WordItem('UNK',_dict['UNK']))
    for k,v in _dict.iteritems():
        if v > 9 and k != 'UNK':
        #if k != 'UNK':
            _vocab[k] = len(words)
            words.append(WordItem(k,v))
        else:
            words[0].count += 1
    
    #cleaning up unnecessary memory
    del _dict
    file_in.close()
    #file_out.close()
    
    return _vocab, words

def UnigramTable(_vocab, words):
    """ Calculates probabilities based on count of each word present"""
    pow = 0.75
    totalFreqPow = 0.0
    unigramTable = {}
    
    l = [words[i].count**pow for i in range(len(_vocab))]
    totalFreqPow = np.sum(l)
    
    for i in range(len(_vocab)):
        unigramTable[i] = (words[i].count**pow)/totalFreqPow
    
    del l
    return unigramTable

def hotVector(wordIndex,vocabSize):
    """ Returns hot vector representation of a word """
    hVector = np.zeros(vocabSize)
    hVector[wordIndex-1] = 1
    return hVector

def softmax(net):
    """ calculates softmax score - target score normalized with noise scores and calculated as probability"""
    _exp = np.exp(net)
    return _exp/np.sum(_exp)

def sigmoid(net):
    """ Applies sigmoid logistic function on net """
    return 1.0/(1+np.exp(-net))

def randomIdx(k, vocabSize, current):
    """ Returns k indices from with unigram table randomly with respect to each word's probablity """
    global _unigramTable1
    idxs = list(np.random.choice(vocabSize, k+1, False, p = _unigramTable1.values()))
    if current in idxs:
        idxs.remove(current)
    else:
        del idxs[-1]
    return idxs
    
def softmaxCostGradient(net, target):
    prob = softmax(net)
    print(prob)
    
    
def negSamplingCostGradient(out, context, emb, vocabSize, learningRate, W_Output, k = 10):
    
    errorHidden = np.zeros(shape=(emb.size,1))
    
    actOut = sigmoid(out[context])
    negSamples = randomIdx(k, vocabSize, context)
    _negSamples = [-out[sample] for sample in negSamples]
    
    # error for context word
    e = -np.log(actOut) - np.sum(np.log(sigmoid(np.array(_negSamples))))
    
    """ calculating gradients for output vectors for both target and negative samples
    calculating hidden layer error for each context word """
    # Updating output weight vector for context word
    delta = actOut - 1
    errorHidden += delta * W_Output[:,context:context+1]
    W_Output[:,context:context+1] -= learningRate * np.reshape(delta * emb,(emb.size,1))
    
    # Updating output weight vectors for negative sampling
    for sample in negSamples:
        delta = sigmoid(out[sample])
        errorHidden += delta * W_Output[:,sample:sample+1]
        W_Output[:,sample:sample+1] -= learningRate * np.reshape(delta * emb,(emb.size,1))
    
    return errorHidden,e    
    
def skipgram(target,contextWords, vocabSize, learningRate, W_Embedding, W_Output):
    
    """
    will be called on each window with
    target: Target word index
    contextWords: Arrray of integers representing context words
    """
    loss = 0
    k = 10 #Number of negative samples
    emb = W_Embedding[target]
    out = np.matmul(emb,W_Output) # [1 x EmbSize].[EmbSize x VocabSize]
    #print out.shape
    _predicted = []
    EH = np.zeros(shape=(emb.size,1))
    for context in contextWords:
        #predicted = hotVector(context, vocabSize)
        #softmaxCostGradient(out,context)
        _EH,_e = negSamplingCostGradient(out, context, emb, vocabSize, learningRate, W_Output, k)
        EH += _EH
        loss += _e
        #EH += sof
        
    #updating hidden layer input vector embedding
    W_Embedding[target] -= learningRate * EH.T[0]
    return loss

In [72]:
""" Creates word embeddings in vector space representation """

""" Feedforward Neural Net Language model """
#Input layer

#Projection layer

#Hidden layer

#Output layer

#Initialization
fin='/Users/preethikapachaiyappa/Documents/MachineLearning/Data/English-equalLength2.txt'#/home/jazzycrazzy/PythonScripts/dataset.csv'
fin1='/Users/preethikapachaiyappa/Documents/MachineLearning/Data/French-equalLength2.txt'
fout = 'testfile.txt'
fout1 = 'testfile1.txt'
_vocab, words = dataset(fin, fout)
_vocab_f, words_f = dataset(fin1, fout1)
_unigramTable = UnigramTable(_vocab, words)
_unigramTable1 = UnigramTable(_vocab_f, words_f)

learningRate = 0.1
vocabSize = len(words)
vocabSize_f = len(words_f)
emb_size = 10
win_size = 4
target = None
contextWords = []
epoch = 20

#print _vocab
#print _vocab_f


# No need of hidden layer since when the embedding matrix is multiplied with hot vector 
#it essentially gives that embedding row
W_Embedding = np.random.randn(vocabSize,emb_size) #Embedding matrix
W_Output = np.random.randn(emb_size,vocabSize) #Outputlayer weight matrix Emb_size x Vocab

W_Embedding_f = np.random.randn(vocabSize_f,emb_size) #Embedding matrix
W_Output_f = np.random.randn(emb_size,vocabSize_f) #Outputlayer weight matrix Emb_size x Vocab

oldLoss = 10000
for _ in np.arange(epoch):
    
    totalLoss = 0
    loss = 0
    
    fileIn = open(fin)
    for l in fileIn:
        l = _start+' '+l+' '+_end
        tokens = word_tokenize(l.decode('utf-8'))
        #print 'tokens',tokens
        for token in tokens:
            
            loss = 0
            contextWords = []
            cntxtIdxs = []
            
            if token in _vocab:
                target = _vocab[token]
                trgtIdx = tokens.index(token)
                
                count = 0
                _idx = trgtIdx-1
                #print _idx
                while count < win_size and _idx >= 0:
                    if tokens[_idx] in _vocab:
                        cntxtIdxs = np.insert(cntxtIdxs,0,_idx)
                        count += 1
                    _idx -= 1
                #count = 0
                #_idx = trgtIdx + 1
                #while count < win_size and _idx < len(tokens):
                #    if tokens[_idx] in _vocab:
                #        cntxtIdxs = np.append(cntxtIdxs,_idx)
                #       count += 1
                #    _idx += 1
                    
                for idx in cntxtIdxs:
                    #print idx
                    #check for first word and last word and use UNK for context words for window where words not available
                    if idx >-1 and idx < len(tokens) and tokens[int(idx)] in _vocab:
                        contextWords = np.append(contextWords, _vocab[tokens[int(idx)]])
                    else:
                        contextWords = np.append(contextWords, _vocab['UNK'])
                #print contextWords
                loss += skipgram(target, contextWords, vocabSize, learningRate, W_Embedding, W_Output)
        totalLoss += loss
    print 'Total Loss:',totalLoss
    if totalLoss > oldLoss : 
        break;
    oldLoss = totalLoss


/Users/preethikapachaiyappa/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:92: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
/Users/preethikapachaiyappa/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:103: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
/Users/preethikapachaiyappa/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:104: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
Total Loss: 1885.04176453
Total Loss: 1130.19097158
Total Loss: 1113.26582274
Total Loss: 1103.69266895
Total Loss: 1114.53043293

In [74]:
contextWords = []
    
oldLoss = 10000
for _ in np.arange(epoch):
    
    totalLoss = 0
    loss = 0
    
    fileIn = open(fin1)
    for l in fileIn:
        l = _start+' '+l+' '+_end
        tokens = word_tokenize(l.decode('utf-8'))
        #print 'tokens',tokens
        for token in tokens:
            loss = 0
            contextWords = []
            cntxtIdxs = []
            
            if token in _vocab_f:
                target = _vocab_f[token]
                trgtIdx = tokens.index(token)
                
                count = 0
                _idx = trgtIdx-1
                #print _idx
                while count < win_size and _idx >= 0:
                    if tokens[_idx] in _vocab_f:
                        cntxtIdxs = np.insert(cntxtIdxs,0,_idx)
                        count += 1
                    _idx -= 1
                #count = 0
                #_idx = trgtIdx + 1
                #while count < win_size and _idx < len(tokens):
                #    if tokens[_idx] in _vocab_f:
                #        cntxtIdxs = np.append(cntxtIdxs,_idx)
                #        count += 1
                #    _idx += 1
                    
                for idx in cntxtIdxs:
                    #print idx
                    #check for first word and last word and use UNK for context words for window where words not available
                    if idx >-1 and idx < len(tokens) and tokens[int(idx)] in _vocab_f:
                        contextWords = np.append(contextWords, _vocab_f[tokens[int(idx)]])
                    else:
                        contextWords = np.append(contextWords, _vocab['UNK'])
                #print contextWords
                loss += skipgram(target, contextWords, vocabSize_f, learningRate, W_Embedding_f, W_Output_f)
        totalLoss += loss
    print 'Total Loss:',totalLoss
    if totalLoss > oldLoss : 
        break;
    oldLoss = totalLoss
                

print(W_Embedding_f)

idx = _vocab_f[_start]


/Users/preethikapachaiyappa/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:92: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
/Users/preethikapachaiyappa/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:103: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
/Users/preethikapachaiyappa/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:104: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
Total Loss: 2142.39385688
Total Loss: 1157.78182625
Total Loss: 1114.58410584
Total Loss: 1129.81236869
[[-1.56450208  2.82866243 -0.28745095 ..., -0.64462012 -0.36028558
   0.36278287]
 [ 0.99605632  2.89832133 -0.21634306 ..., -1.82747077  0.41848256
  -1.90786567]
 [-0.23089094  0.73134246 -0.08784773 ..., -1.45206524 -0.71318284
  -0.3407588 ]
 ..., 
 [-0.91927069  1.18149855 -0.18588161 ..., -0.77579122  0.21466177
  -0.0281404 ]
 [-1.41850957  1.80761586 -2.3898169  ..., -0.52572361  0.32095744
  -0.7096624 ]
 [-0.55791996  0.82140119 -1.63690938 ..., -0.30150419 -0.29358689
   1.13489885]]

In [ ]:
#mean_list = W_Embedding.mean(0)
#print mean_list

#W_Embedding_new = W_Embedding - mean_list
#print W_Embedding_new

In [75]:
inSentence = []
expSentence = []

fileIn0 = open(fin)
for l in fileIn0 :
    #l = _start+' '+l+' '+_end
    tokens = word_tokenize(l.decode('utf-8'))
    inSent = []
    for token in tokens :
        target = ""
        if token not in _vocab : 
            target = _vocab['UNK']
        else : 
            target = _vocab[token]
        vec = W_Embedding[target]
        vec_list = vec.tolist()
        inSent.append(vec_list)
    inSentence.append(inSent)

fileIn1 = open(fin1)
for l in fileIn1 :
    #l = _start+' '+l+' '+_end
    tokens = word_tokenize(l.decode('utf-8'))
    expSent = []
    for token in tokens :
        target = ""
        if token not in _vocab_f : 
            target = _vocab_f['UNK']
        else : 
            target = _vocab_f[token]
        expSent.append(target)
    expSentence.append(expSent)

#print inSentence
#print expSentence
        
a = RNNlayer(10,vocabSize_f,W_Embedding_f,idx)
a.train_Decoder_With_SGD(inSentence, expSentence, 0.1, 25)


0   4211.83241621
1   4041.40150395
2   4024.99919122
3   3895.17035137
4   3849.28951216
5   3848.74189257
6   3843.09477941
7   3861.3252227
8   3814.03103458
9   3781.57461909
10   3785.93635126
11   3756.32625024
12   3786.64057914
13   3949.53674101
14   3794.04687334
15   3813.37644432
16   3641.3020529
17   3637.83255795
18   3677.91086653
19   3853.56982627
20   3668.86572163
21   3540.73737278
22   3521.07750759
23   3481.96691822
24   3426.68341248

In [ ]:


In [79]:
print _vocab

inSentence = []
input = "assistance"
#target = _vocab[input]
#vec = W_Embedding_new[target]
#inSentence.append(vec)
tokens = word_tokenize(input.decode('utf-8'))
inSent = []
for token in tokens :
    target = _vocab[token]
    vec = W_Embedding[target]
    vec_list = vec.tolist()
    inSent.append(vec_list)
inSentence.append(inSent)
print inSentence

o,h = a.forwardProp(inSentence[0],None)
#print o
words1 = o.argmax(axis=1)
#print words1
for i in range(len(words1)) :
    print words_f[words1[i]].word


{u'limited': 1, u'all': 2, u'coach': 3, u'global': 4, u'9/11': 5, u'month': 6, u'appetite': 7, u'adjustment': 8, u'religious': 9, u'whose': 10, u'catastrophe': 11, u'zone': 12, u'passage': 13, u'literary': 14, u'to': 15, u'finally': 16, u'program': 17, u'under': 18, u'Not': 19, u'dominated': 221, u'include': 21, u'belonging': 22, u'risk': 23, u'very': 24, u'Political': 347, u'fan': 26, u'reforms': 27, u'affect': 28, u'screaming': 29, u'drag': 699, u'1930\u2019s': 31, u'Protocol': 32, u'level': 33, u'try': 34, u'race': 35, u'quick': 36, u'Turkey': 37, u'force': 38, u'leaders': 39, u'direct': 40, u'value': 475, u'replicated': 42, u'investment': 43, u'even': 44, u'will': 476, u'deliberate': 46, u'decisions': 47, u'assistance': 48, u'contributed': 49, u'debate': 351, u'access': 51, u'toll': 52, u'resilient': 53, u'new': 54, u';': 55, u'contributes': 56, u'niche': 58, u'proliferation': 59, u'never': 60, u'We': 356, u'here': 62, u'identifications': 64, u'protection': 65, u'English': 203, u'alone': 67, u'safer': 68, u'change': 69, u'economics': 70, u'prior': 71, u'resulting': 72, u'social': 74, u'grandee': 75, u'makes': 76, u'signifies': 639, u'secure': 77, u'danger': 78, u'When': 79, u'Relations': 80, u'EU': 233, u'total': 82, u'armed': 83, u'crisis': 84, u'market': 85, u'Europe': 86, u'use': 87, u'from': 88, u'August': 89, u'would': 90, u'remains': 91, u'destination': 92, u'direction': 675, u'two': 93, u'Russia': 390, u'live': 95, u'NATO': 96, u'therefore': 97, u'6': 98, u'taken': 99, u'assessment': 100, u'entails': 101, u'today': 102, u'more': 103, u'``': 105, u'These': 107, u'becomes': 108, u'Crowley': 109, u'particular': 110, u'effort': 111, u'hints': 112, u'autumn': 113, u'account': 114, u'join': 115, u'sanctions': 116, u'this': 117, u'challenge': 118, u'work': 119, u'can': 120, u'containment': 121, u'abandon': 122, u'example': 123, u'address': 438, u'claim': 293, u'dedicated': 125, u'citizens': 611, u'give': 127, u'process': 128, u'non-dangerous': 129, u'organized': 130, u'geo-strategists': 131, u'states': 132, u'indicates': 133, u'carrot': 134, u'S_START': 513, u'!': 135, u'needs': 136, u'democracy': 137, u'conversation': 138, u'discussion': 139, u'breaking': 140, u'regions': 141, u'how': 176, u'vital': 143, u'widespread': 144, u'Why': 145, u'economy': 146, u'A': 147, u'tried': 217, u'critics': 219, u'after': 150, u'membership': 239, u'produce': 153, u'such': 258, u'law': 155, u'response': 270, u'man': 157, u'a': 158, u'One': 159, u'Guard': 160, u'Greece': 161, u'banks': 162, u'order': 163, u'talk': 164, u'Even': 248, u'What': 166, u'indeed': 167, u'move': 168, u'mainly': 169, u'years': 170, u'attitude': 171, u'still': 172, u'its': 173, u'police': 174, u'SAN': 175, u',': 177, u'better': 178, u'individuals': 569, u'co-existence': 180, u'policy': 181, u'main': 104, u'might': 183, u'then': 184, u'them': 185, u'within': 624, u'seeking': 187, u'safe': 188, u'earn': 375, u'anticipated': 190, u'framework': 191, u'compelled': 192, u'they': 193, u'not': 194, u'now': 195, u'bank': 284, u'Iraqi': 626, u'Indeed': 198, u'India': 199, u'realistic': 200, u'Council': 201, u'each': 202, u'bond': 204, u'everyone': 205, u'significantly': 206, u'generation': 207, u'house': 208, u'energy': 209, u'Next': 210, u'Barack': 211, u'extended': 505, u'year': 213, u'clauses': 214, u'really': 215, u'by': 603, u'Approach': 631, u'crucial': 218, u'may': 149, u'acting': 220, u'safety': 222, u'rational': 223, u'indexation': 224, u'receiving': 225, u'issue': 226, u'1989': 640, u'This': 228, u'differ': 229, u'possibility': 230, u'disrespect': 231, u'clique': 232, u'qui': 81, u'Security': 234, u'enormous': 235, u'Cambridge': 236, u'benefits': 237, u'agree': 671, u'Being': 238, u'wrong': 152, u'times': 240, u'motion': 241, u'thing': 242, u'best-known': 243, u'place': 244, u'threat': 245, u'Depression': 247, u'feed': 165, u'already': 249, u'Precedents': 250, u'Palestinian': 251, u'There': 252, u'scene': 253, u'one': 254, u'Indian': 255, u'submit': 256, u'another': 257, u'Syria': 154, u'Rajan': 227, u'quality': 260, u'abrogated': 262, u'has': 549, u'little': 263, u'basketball': 264, u'Iran': 265, u'guaranteeing': 266, u'Eastern': 267, u'system': 268, u'adopted': 269, u'mal': 156, u'their': 271, u'2': 272, u'too': 273, u'Such': 275, u'Iraq': 276, u'friend': 277, u'back': 652, u'ceiling': 278, u'that': 279, u'societies': 374, u'acquire': 280, u'serve': 281, u'draped': 282, u'part': 283, u'Climate': 694, u'diminish': 285, 'UNK': 0, u'rampant': 286, u'Chernobyl': 287, u'professionals': 288, u'farming': 289, u'16': 290, u'Partnership': 291, u'limiting': 292, u'matter': 294, u'future': 295, u'contrary': 296, u'historical': 297, u'were': 298, u'naturally': 645, u'powers': 300, u'declare': 301, u'and': 302, u'illness': 303, u'mind': 304, u'argument': 305, u'talking': 306, u'manner': 307, u'have': 308, u'further': 309, u'need': 310, u'seen': 530, u'\u201cPortfolio\u201d': 312, u'Venezuela': 313, u'Kremlin': 314, u'Q.': 315, u'Kyoto': 316, u'Arab': 317, u'also': 318, u'concerns': 319, u'take': 321, u'They': 322, u'destroy': 324, u'itwas': 325, u'Bank': 326, u'circles': 327, u'begin': 328, u'who': 329, u'exercised': 330, u'most': 331, u'Rural': 332, u'The': 333, u'prestige': 423, u'radioactive': 335, u'especially': 336, u'clear': 337, u'later': 338, u'treaty': 339, u'clean': 340, u'nuclear': 403, u'Copenhagen': 342, u'That': 402, u'reactor': 343, u'gold': 344, u'hunger': 345, u'agreed': 346, u'cheap': 25, u'bring': 348, u'BERLIN': 349, u'reception': 711, u'fine': 350, u'availability': 50, u'fifth': 352, u'European': 353, u'lessons': 354, u'current': 355, u'NAFTHE': 61, u'knowledge': 357, u'modernity': 610, u'economist': 358, u'should': 359, u'brighter': 360, u'only': 361, u'developed': 362, u'fuel': 363, u'local': 364, u'do': 365, u'his': 366, u'get': 367, u'dependent': 142, u'between': 406, u'next': 630, u'during': 370, u'nasty': 320, u'Palestinians': 372, u'July': 407, u'areas': 182, u'runs': 189, u'transitional': 376, u'countries': 377, u'approach': 408, u'medicines': 379, u'worried': 380, u'soit': 487, u'forces': 381, u'unwitting': 382, u'audited': 383, u'wrote': 384, u'Afghanistan': 385, u'redefine': 386, u'learn': 387, u'disaster': 388, u'For': 389, u'national': 391, u'around': 555, u'France': 392, u'\u2013': 393, u'decided': 394, u'are': 395, u'1929': 396, u'subject': 397, u'fails': 398, u'said': 399, u'capacity': 400, u'precedents': 401, u'farmers': 151, u'arguably': 341, u'fission': 404, u'weapons': 405, u'routine': 369, u'neither': 373, u'misguided': 378, u'we': 409, u'men': 410, u'Central': 411, u'Fukushima': 471, u'importance': 412, u'attention': 413, u'however': 414, u'kicking': 415, u'key': 416, u'Were': 417, u'carbon': 418, u'debt': 419, u'Reforming': 420, u'improve': 421, u'both': 422, u'epidemics': 424, u'country': 425, u'deficits': 426, u'against': 427, u'foreign': 428, u'objects': 429, u'expended': 430, u'contribution': 431, u'argue': 432, u'comes': 433, u'otherwise': 434, u'seminal': 435, u'appeared': 436, u'acted': 437, u'others': 66, u'community': 439, u'active': 440, u'mischief': 647, u'Borders': 441, u'expensive': 442, u'forms': 246, u'unusual': 444, u'raise': 445, u'capable': 446, u'create': 447, u'political': 448, u'due': 449, u'been': 450, u'.': 451, u'Most': 453, u'much': 454, u'don\u2019t': 455, u'certain': 673, u'Otherwise': 457, u'meeting': 323, u'remedies': 459, u'life': 460, u'families': 461, u'cop': 462, u'technological': 547, u'scrutiny': 464, u'wake': 465, u'YORK': 466, u'repetition': 468, u'And': 469, u'guarantees': 470, u'look': 30, u'raw': 472, u'these': 473, u'reliable': 474, u'evaluated': 41, u"n't": 45, u'ongoing': 477, u'policies': 478, u'many': 479, u'stopping': 480, u'situation': 481, u'at': 680, u'Khan': 483, u'is': 484, u'thus': 485, u'it': 486, u'boycotts\u2019': 452, u'in': 488, u'technology': 489, u'decisively': 490, u'if': 491, u'Israel': 492, u'develop': 493, u'media': 494, u'make': 495, u'civilian': 496, u'same': 497, u'any': 498, u'member': 499, u'adverse': 500, u'parts': 501, u'big': 556, u'President': 503, u'difficult': 504, u'literature': 212, u'I': 506, u'moving': 507, u'opportunity': 508, u'cycle': 509, u'Gates': 510, u'reflection': 511, u'largely': 512, u'nuclear-energy': 106, u'Is': 514, u'no': 687, u'It': 516, u'States': 517, u'academic': 518, u'hedge': 371, u'does': 588, u'In': 520, u'y': 521, u'position': 522, u'the': 523, u'If': 524, u'pense': 525, u'United': 526, u'traditions': 527, u'just': 528, u'less': 529, u'shape': 531, u'thanks': 532, u'human': 533, u'speed': 534, u'announcement': 535, u'cut': 536, u'monitored': 537, u'illusions': 538, u'candidate': 539, u'instance': 540, u'regarded': 541, u'had': 542, u'capabilities': 543, u'adoption': 544, u'association': 196, u'easy': 546, u'Despite': 463, u'complements': 548, u'save': 63, u'irresponsible': 550, u'Change': 551, u'real': 552, u'On': 553, u'romanticized': 554, u'which': 274, u'Many': 502, u'cultural': 558, u'know': 559, u'nor': 545, u'world': 560, u'unique': 561, u'advanced': 562, u'necessary': 564, u'like': 565, u'success': 566, u'admitted': 567, u'heart': 124, u'audience': 568, u'continue': 179, u'become': 570, u'security': 571, u'reduced': 572, u'Honni': 574, u'because': 575, u'often': 576, u'people': 577, u'Great': 578, u'growth': 579, u'export': 580, u'Twenty-five': 581, u'recognition': 582, u'misadventures': 583, u'for': 584, u'integration': 585, u'prop': 586, u'everything': 587, u'critical': 57, u'Massachusetts': 589, u'provides': 590, u'innovation': 591, u'Afterward': 592, u'?': 593, u'He': 594, u'S_END': 595, u'be': 596, u'shaker': 597, u'power': 598, u'late': 443, u'of': 609, u'refused': 599, u'India\u2019s': 601, u'nerves': 602, u'contracts': 368, u'two\u2011speed': 94, u'on': 604, u'about': 605, u'central': 606, u'Reactor': 607, u'epochal': 608, u'December': 557, u'Georgia\u2019s': 148, u'violence': 126, u'sectarian': 612, u'US': 613, u'Bomb': 614, u'must': 615, u'economic': 666, u'afternoon': 616, u'Japan': 617, u'efforts': 618, u'Precisely': 619, u'or': 620, u'purported': 621, u'own': 622, u'into': 623, u'cop\u2019s': 186, u'Two': 625, u'undermining': 197, u'right': 627, u'But': 628, u'NEW': 629, u'sports': 600, u'Meeting': 216, u'mere': 632, u'fast': 633, u'her': 634, u'area': 635, u'support': 636, u'there': 637, u'sovereign': 638, u'long': 261, u'fight': 259, u'Every': 641, u'way': 642, u':': 643, u'was': 644, u'Obama': 299, u'function': 646, u'amateur': 311, u'offer': 648, u'suits': 649, u'slump': 650, u'but': 651, u'hoped': 334, u'FRANCISCO': 703, u'promote': 654, u'with': 655, u'he': 656, u'GDP': 657, u'constitute': 658, u'made': 659, u'whether': 660, u'hardly': 458, u'up': 662, u'signed': 663, u'crystal': 664, u'Britain': 665, u'imperative': 20, u'limit': 667, u'emissions': 668, u'blessings': 669, u'Reducing': 670, u'professor': 73, u'panics': 672, u'dangerous': 661, u'inadequate': 456, u'am': 674, u'command': 519, u'an': 676, u"''": 677, u'as': 678, u'exist': 679, u'promised': 482, u'our': 681, u'politics': 682, u'mover': 683, u'identities': 684, u'chance': 685, u'%': 686, u'nonetheless': 515, u'peace': 688, u'A.': 689, u'condescension': 690, u'income': 691, u'you': 692, u'conclusion': 693, u'draw': 573, u"'s": 695, u'brothers': 696, u'problems': 697, u'enlargement': 698, u'meaning': 563, u'journey': 700, u'utterly': 701, u'centuries-old': 714, u'land': 653, u'implies': 704, u'age': 705, u'required': 706, u'flag': 707, u'together': 708, u'2002': 709, u'At': 710, u'hugely': 467, u'time': 712, u'push': 713, u'serious': 702, u'2009': 715, u'resolution': 716, u'once': 717}
[[[-2.119068636305938, 0.6538693924841961, -0.868824606342945, 0.9675981599304121, -1.6344888126025316, -0.16476169844914296, -1.2089864226314548, -0.26566936290176224, -0.2033888555394979, 0.5515765585831272]]]
Beaucoup