In [1]:
import tensorflow as tf
import numpy as np
import re
import pandas as pd
import spacy

Getting input data ready


In [ ]:
filePath = "data/quora_duplicate_questions.tsv"
df = pd.read_csv(filePath,delimiter="\t")
df.question1 = df.question1.fillna("")
df.question1 = df.question1.apply(str.lower)
df.question2 = df.question2.fillna("")
df.question2 = df.question2.apply(str.lower)

Finding unique words in dataset to create vocabulary


In [ ]:
def tokenize(s,nlp):
    doc = nlp(s)
    tokSen = []
    for word in doc:
        tokSen.append(word.text)
    return tokSen

nlp = spacy.load('en')
uniqueQuestions = df.question1.unique()
tokenizedQns = [tokenize(unicode(sentence,'utf8'),nlp) for sentence in uniqueQuestions]
words = [word for tokWords in tokenizedQns for word in tokWords]

words2 = df.question2.unique()
words2 = [tokenize(unicode(sentence,'utf8'),nlp) for sentence in words2]
words2 = [word for tokWords in words2 for word in tokWords]
words.extend(words2)

Adding PAD as filler for normalizing sentence length and UNK for unkown tokens


In [ ]:
words = set(words)
vocabulary = dict(zip(words,range(2,len(words)+2)))
vocabulary['PAD'] = 0
vocabulary['UNK'] = 1
print("Vocabulary Size including PAD and UNK: ",len(vocabulary))

Each question represented as list of index in the vocabulary


In [ ]:
def loadWordVectors(filePath,vocab):
    txt = open('data/wiki.en.vec')
    wordVecs = np.zeros((len(vocab),300),dtype=float)
    for line in txt:
        splitData = line.split(" ")
        word = splitData[0]
        word = unicode(word,'utf8')
        if(word not in vocab):
            continue
        vector = splitData[1:len(splitData)-1]
        wordVecs[vocab[word]] = np.array(vector,dtype=float)
    return wordVecs
wordVecSize = 300
wordVecs = loadWordVectors('wiki/wiki.en.vec',vocabulary)

In [ ]:
idx = 0 
for w in wordVecs:
    if(w is None):
        count += 1
        wordVecs[idx] = 2 * np.random.random_sample(wordVecSize) - 1

In [ ]:
def tokenizeAndIndex(sentence):
    words = tokenize(unicode(sentence,'utf8'),nlp)
    retVal = [vocabulary[word] if word in vocabulary else vocabulary['UNK'] for word in words]
    return retVal
df['Q1Indexed'] = df.question1.apply(tokenizeAndIndex)
df['Q2Indexed'] = df.question2.apply(tokenizeAndIndex)

Threshold questions with total words <= 50


In [ ]:
seqLength = 50
df = df[df.Q1Indexed.apply(len) <= seqLength]
df = df[df.Q2Indexed.apply(len) <= seqLength]

def normalizeSequenceLength(sequence):
    if(len(sequence) < seqLength):
        padding = [vocabulary['PAD'] for i in range(seqLength - len(sequence))]
        sequence.extend(padding)
    return sequence
df.Q1Indexed = df.Q1Indexed.apply(normalizeSequenceLength)
df.Q2Indexed = df.Q2Indexed.apply(normalizeSequenceLength)

In [ ]:
positiveSamples = df[df.is_duplicate==1]
negativeSamples = df[df.is_duplicate==0]

#Testing data
positiveTest = positiveSamples.sample(frac=0.3)
negativeTest = negativeSamples.sample(frac=0.3)
testData = positiveTest.append(negativeTest)
print("Number of test samples: {0}".format(len(testData)))
#Training data
trainData = df[df.id.isin(testData.id) == False]
print("Number of train samples: {0}".format(len(trainData)))

In [2]:
df = pd.read_pickle('data/ProcessedData.pkl')
trainData = pd.read_pickle('data/TrainData.pkl')
testData = pd.read_pickle('data/TestData.pkl')
valData = pd.read_pickle('data/ValData.pkl')
wordVecs = np.load('data/wordVecs.npy')
wordVecSize = 300
seqLength = 50

Building the network

Creating setence embedding


In [3]:
tf.reset_default_graph()

In [3]:
vocab_size = len(wordVecs)
embedding_size = wordVecSize

W = tf.Variable(wordVecs,name="W")

q1Input = tf.placeholder(tf.int32, [None, seqLength], name="q1Input")
q1Embeddings = tf.nn.embedding_lookup(W, q1Input)
q1Mask = tf.placeholder(tf.float64, [None, seqLength, 1], name="q1Mask")
q1Embeddings = tf.multiply(q1Embeddings, q1Mask, name='q1Masked')
q1Embeddings = tf.reduce_sum(q1Embeddings, 1)

q2Input = tf.placeholder(tf.int32, [None, seqLength], name="q1Input")
q2Embeddings = tf.nn.embedding_lookup(W, q2Input)
q2Mask = tf.placeholder(tf.float64, [None, seqLength, 1], name="q2Mask")
q2Embeddings = tf.multiply(q2Embeddings, q2Mask, name='q2Masked')
q2Embeddings = tf.reduce_sum(q2Embeddings, 1)

sentenceEmbedding = tf.concat([q1Embeddings,q2Embeddings],axis=1,name='sentenceEmbedding')

Dense layers and output


In [4]:
dense1 = tf.layers.dense(inputs=sentenceEmbedding, units=embedding_size*2, activation=tf.nn.tanh,name='dense1')
dense2 = tf.layers.dense(inputs=dense1, units=embedding_size*2, activation=tf.nn.tanh,name='dense2')
dense3 = tf.layers.dense(inputs=dense2, units=embedding_size*2, activation=tf.nn.tanh,name='dense3')
logits = tf.layers.dense(inputs=dense3, units=2,name='logits')
predictions = tf.argmax(input=tf.nn.softmax(logits=logits,dim=-1,name='softmax'),axis=1,name='output')

Loss and gradient updates


In [5]:
num_classes = 2
labels = tf.placeholder(tf.int32,[None,num_classes],name='labels')

loss = None
train_op = None

# Calculate loss for both TRAIN and EVAL modes
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
train_op = tf.contrib.layers.optimize_loss(loss=loss,
                                           global_step=tf.contrib.framework.get_global_step(),
                                           learning_rate=0.001,
                                           optimizer="Adam")
correct_prediction = tf.equal(predictions, tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

Prepare variables for training epoch


In [6]:
session = tf.InteractiveSession()

In [27]:
fetches = {'eval_op':train_op,'accuracy':accuracy}
    
print("Starting...")
session.run(tf.global_variables_initializer())

noEpisodes = 10
batchSize = 1000
noEpochs = len(trainData) / batchSize

valLabels = tf.one_hot(valData.is_duplicate.values,on_value=1,
                        off_value=0,depth=2,axis=-1,name='one_hot_labels')
valLabels = valLabels.eval(session=session)
valQ1Indices = np.array(list(valData.Q1Indexed.values),dtype=np.int32)
valQ1Len = valData.Q1Length.values.astype(np.int32)
valQ1Mask = [np.append(np.ones((revLen,1)),np.zeros((seqLength-revLen,1)),axis=0) 
                        for revLen in valData.Q1Length]
valQ2Indices = np.array(list(valData.Q2Indexed.values),dtype=np.int32)
valQ2Len = valData.Q2Length.values.astype(np.int32)
valQ2Mask = [np.append(np.ones((revLen,1)),np.zeros((seqLength-revLen,1)),axis=0) 
                        for revLen in valData.Q2Length]

testLabels = tf.one_hot(testData.is_duplicate.values,on_value=1,
                        off_value=0,depth=2,axis=-1,name='one_hot_labels')
testLabels = testLabels.eval(session=session)
testQ1Indices = np.array(list(testData.Q1Indexed.values),dtype=np.int32)
testQ1Len = testData.Q1Length.values.astype(np.int32)
testQ1Mask = [np.append(np.ones((revLen,1)),np.zeros((seqLength-revLen,1)),axis=0) 
                        for revLen in testData.Q1Length]
testQ2Indices = np.array(list(testData.Q2Indexed.values),dtype=np.int32)
testQ2Len = testData.Q2Length.values.astype(np.int32)
testQ2Mask = [np.append(np.ones((revLen,1)),np.zeros((seqLength-revLen,1)),axis=0) 
                        for revLen in testData.Q2Length]

noTestBatches = 100
testSzPerBatch = len(valQ1Indices) / noTestBatches

print("Episode\ttrain loss\tval loss\ttest loss\tval accuracy\ttest accuracy")
for episode in range(noEpisodes):
    episodeData = trainData.iloc[np.random.permutation(len(trainData))]

    startIdx = 0
    episodeLoss = 0
    for epoch in range(noEpochs):
        batch = episodeData.iloc[startIdx:startIdx+batchSize]
        startIdx += batchSize

        oneHotLabels = tf.one_hot(batch.is_duplicate.values,
                          on_value=1,off_value=0,depth=2,axis=-1,name='one_hot_labels')
        oneHotLabels = oneHotLabels.eval(session=session)
        q1Indices = np.array(list(batch.Q1Indexed.values),dtype=np.int32)
        q1Len = batch.Q1Length.values.astype(np.int32)
        q1MaskInp = [np.append(np.ones((revLen,1)),np.zeros((seqLength-revLen,1)),axis=0) 
                        for revLen in batch.Q1Length]
        q2Indices = np.array(list(batch.Q2Indexed.values),dtype=np.int32)
        q2Len = batch.Q2Length.values.astype(np.int32)
        q2MaskInp = [np.append(np.ones((revLen,1)),np.zeros((seqLength-revLen,1)),axis=0) 
                        for revLen in batch.Q2Length]
        feed_dict = {q1Input:q1Indices,q2Input:q2Indices,labels:oneHotLabels,
                     q1Mask:q1MaskInp,q2Mask:q2MaskInp}

        trainMetrics = session.run(fetches,feed_dict)

        episodeLoss += trainMetrics['eval_op']

    episodeLoss /= noEpochs

    valLoss = 0
    valAccuracy = 0
    fetches = {'loss':loss, 'accuracy':accuracy}
    for subTest in range(noTestBatches):
        startIdx = subTest*testSzPerBatch
        endIdx = startIdx + testSzPerBatch
        if(subTest == noTestBatches-1):
            endIdx = len(testQ1Indices)
        valFeed = {q1Input:valQ1Indices[startIdx:endIdx],
                    q2Input:valQ2Indices[startIdx:endIdx],
                    labels:valLabels[startIdx:endIdx],
                    q1Mask:valQ1Mask[startIdx:endIdx],
                    q2Mask:valQ2Mask[startIdx:endIdx]}
        
        valMetrics = session.run(fetches,valFeed)
        valLoss += valMetrics['loss']
        valAccuracy += valMetrics['accuracy']

    testLoss = 0
    testAccuracy = 0
    fetches = {'loss':loss, 'accuracy':accuracy, 'predictions':predictions}
    for subTest in range(noTestBatches):
        startIdx = subTest*testSzPerBatch
        endIdx = startIdx + testSzPerBatch
        if(subTest == noTestBatches-1):
            endIdx = len(testQ1Indices)
        testFeed = {q1Input:testQ1Indices[startIdx:endIdx],
                    q2Input:testQ2Indices[startIdx:endIdx],
                    labels:testLabels[startIdx:endIdx],
                    q1Mask:testQ1Mask[startIdx:endIdx],
                    q2Mask:testQ2Mask[startIdx:endIdx]}
        testMetrics = session.run(fetches,testFeed)
        testLoss += testMetrics['loss']
        testAccuracy += testMetrics['accuracy']
        testData.loc[testData.id[startIdx:endIdx] ,'predicted'] = testMetrics['predictions']
        
    valLoss = valLoss/float(noTestBatches)
    valAccuracy = (100.0 / noTestBatches) * valAccuracy
    
    testLoss = testLoss/float(noTestBatches)
    testAccuracy = (100.0 / noTestBatches) * testAccuracy
    
    print("{}\t{}\t{}\t{}\t{}\t{}".format(episode,episodeLoss,valLoss,testLoss,valAccuracy,testAccuracy))
    fetches = {'eval_op':train_op,'accuracy':accuracy}


Starting...
Episode	train loss	val loss	test loss	val accuracy	test accuracy
0	0.560667921428	0.463563925624	0.465944086015	77.3136104941	77.1531566978
1	0.422472538367	0.432729100287	0.434539958835	79.0627623796	78.9848266244
2	0.340721890595	0.413759787828	0.414958726764	81.06008178	81.1048646569
3	0.273291072971	0.43285446927	0.435280828327	81.5830951333	81.6469463706
4	0.214320151195	0.469868721962	0.470270087421	80.7381329536	80.7151327729
5	0.166870939244	0.512846863866	0.511339385211	81.9817208052	82.0049440861
6	0.132598583013	0.603563365638	0.604476641119	81.9566389918	82.1699501872
7	0.104802990829	0.643261657655	0.644042200744	82.0841268301	82.1790724397
8	0.0827376719737	0.748832809031	0.747504140139	82.1803193688	82.3020045757
9	0.0673831367063	0.789723165929	0.78766785562	82.0385229588	82.0186268687

In [31]:
testData.to_csv('testPredictions_summedEmbeddings.csv')

In [9]:
testLoss = 0
testAccuracy = 0

falsePositives = 0
falseNegatives = 0
truePositives = 0
trueNegatives = 0

fetches = {'loss':loss, 'accuracy':accuracy, 'predictions':correct_prediction}
for subTest in range(noTestBatches):
    startIdx = subTest*testSzPerBatch
    endIdx = startIdx + testSzPerBatch
    if(subTest == noTestBatches-1):
        endIdx = len(testQ1Indices)
    testFeed = {q1Input:testQ1Indices[startIdx:endIdx],
                q2Input:testQ2Indices[startIdx:endIdx],
                labels:testLabels[startIdx:endIdx],
                q1Mask:testQ1Mask[startIdx:endIdx],
                q2Mask:testQ2Mask[startIdx:endIdx]}
    testMetrics = session.run(fetches,testFeed)
    testLoss += testMetrics['loss']
    testAccuracy += testMetrics['accuracy']
    preds = testMetrics['predictions']
    
    tl = testLabels[startIdx:endIdx]
    falsePositives += np.sum(preds[tl[:,0] == 1] == True)
    falseNegatives += np.sum(preds[tl[:,1] == 1] == False)
    truePositives += np.sum(preds[tl[:,1] == 1] == True)
    trueNegatives += np.sum(preds[tl[:,0] == 1] == False)
    
testLoss = testLoss/float(noTestBatches)
testAccuracy = (100.0 / noTestBatches) * testAccuracy
print("{}\t{}".format(testLoss,testAccuracy))


0.887173340321	81.8932206035

In [10]:
precision = 100*float(truePositives) / (truePositives + falsePositives)
recall = 100*float(truePositives) / (truePositives + falseNegatives)
print precision,recall
print truePositives,trueNegatives,falsePositives,falseNegatives


31.3653733395 69.2977753953
15513 4054 33946 6873

Testing restore and predictions


In [ ]:
with tf.Session() as sess:
    saver = tf.train.import_meta_graph('/home/ubuntu/QuestionPairs/SumModel/-9.meta')
    saver.restore(sess, '/home/ubuntu/QuestionPairs/SumModel/-9')
    
    temp = predictions.eval(session=sess,feed_dict=testFeed1)

In [ ]:
#np.argmax(testLabels[:lTest],axis=1)
actual = np.argmax(testLabels[:lTest],axis=1)
predicted = temp

In [ ]:
y = actual - predicted
print "%age of non duplicates classified as duplicates: ", float(len(y[y==-1])) / float(len(y))
print "%age of duplicates classified as non duplicates: ", float(len(y[y==1])) / float(len(y))