Download Data


In [1]:
! wget https://raw.githubusercontent.com/udacity/deep-learning/master/sentiment_network/labels.txt
! wget https://raw.githubusercontent.com/udacity/deep-learning/master/sentiment_network/reviews.txt


--2017-03-17 13:24:57--  https://raw.githubusercontent.com/udacity/deep-learning/master/sentiment_network/labels.txt
Resolving raw.githubusercontent.com... 151.101.16.133
Connecting to raw.githubusercontent.com|151.101.16.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 225000 (220K) [text/plain]
Saving to: ‘labels.txt.1’

labels.txt.1        100%[===================>] 219.73K   252KB/s    in 0.9s    

2017-03-17 13:24:59 (252 KB/s) - ‘labels.txt.1’ saved [225000/225000]

--2017-03-17 13:24:59--  https://raw.githubusercontent.com/udacity/deep-learning/master/sentiment_network/reviews.txt
Resolving raw.githubusercontent.com... 151.101.16.133
Connecting to raw.githubusercontent.com|151.101.16.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 33678267 (32M) [text/plain]
Saving to: ‘reviews.txt.1’

reviews.txt.1       100%[===================>]  32.12M   152KB/s    in 2m 59s  

2017-03-17 13:28:02 (184 KB/s) - ‘reviews.txt.1’ saved [33678267/33678267]

Curate a Dataset


In [1]:
def pretty_print_review_and_label(i):
    print(labels[i] + "\t:\t" + reviews[i][:80] + "...")

g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()

g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()

In [2]:
from collections import Counter
import numpy as np

In [3]:
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()

In [4]:
for i in range(len(reviews)):
    if(labels[i] == 'POSITIVE'):
        for word in reviews[i].split(" "):
            positive_counts[word] += 1
            total_counts[word] += 1
    else:
        for word in reviews[i].split(" "):
            negative_counts[word] += 1
            total_counts[word] += 1

In [5]:
pos_neg_ratios = Counter()

for term,cnt in list(total_counts.most_common()):
    if(cnt > 100):
        pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
        pos_neg_ratios[term] = pos_neg_ratio

for word,ratio in pos_neg_ratios.most_common():
    if(ratio > 1):
        pos_neg_ratios[word] = np.log(ratio)
    else:
        pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))

Creating the Input/Output Data


In [6]:
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)


74074

In [7]:
import numpy as np

layer_0 = np.zeros((1,vocab_size))
layer_0


Out[7]:
array([[ 0.,  0.,  0., ...,  0.,  0.,  0.]])

In [8]:
word2index = {}

for i,word in enumerate(vocab):
    word2index[word] = i

In [9]:
def get_target_for_label(label):
    if(label == 'POSITIVE'):
        return 1
    else:
        return 0

In [10]:
def update_input_layer(review):
    
    global layer_0
    
    # clear out previous state, reset the layer to be all 0s
    layer_0 *= 0
    for word in review.split(" "):
        layer_0[0][word2index[word]] += 1

update_input_layer(reviews[0])

In [11]:
review_counter = Counter()

In [12]:
for word in reviews[0].split(" "):
    review_counter[word] += 1

Encryption Logic From Previous Network


In [13]:
import numpy as np
import copy
import sys

In [14]:
def keySwitch(M,c,l):
    c_star = getBitVector(c,l)
    return M.dot(c_star)

def getRandomMatrix(row,col,bound):
    A = np.zeros((row,col))
    for i in range(row):
        for j in range(col):
            A[i][j] = np.random.randint(bound)
    return A

def getBitMatrix(S,l):
    S_star = list()
    for i in range(l):
        S_star.append(S*2**(l-i-1))
    S_star = np.array(S_star).transpose(1,2,0).reshape(len(S),len(S[0])*l)
    return S_star

def getSecretKey(T):
    assert(T.ndim == 2)
    I = np.eye(len(T)) # num rows
    return hCat(I,T)

def hCat(A,B):
    return np.concatenate((A,B),1)

def vCat(A,B):
    return np.concatenate((A,B),0)

def keySwitchMatrix(S, T,l):
    S_star = getBitMatrix(S,l)
    A = getRandomMatrix(T.shape[1],S_star.shape[1], aBound)
    E = getRandomMatrix(S_star.shape[0], S_star.shape[1], eBound)
    return vCat(S_star + E - T.dot(A), A)

def encrypt(T, x,w,l):
    return keySwitch(keySwitchMatrix(np.eye(len(x)),T,l), w * x,l)

def addVectors(c1, c2):
    return c1 + c2

def linearTransform(M, c, l):
    return M.dot(getBitVector(c, l)).astype('int64')

def linearTransformClient(G, S, T, l):
    return keySwitchMatrix(G.dot(S), T,l)

def vectorize(M):
    ans = np.zeros((len(M) * len(M[0]),1))
    for i in range(len(M)):
        for j in range(len(M[0])):
            ans[i * len(M[0]) + j][0] = M[i][j]
    return ans

def decrypt(S, c,w):
    Sc = S.dot(c)
    return (Sc / w).astype('float').round().astype('int')

def innerProdClient(T,l):
    S = getSecretKey(T)
    tvsts = vectorize(S.T.dot(S)).T
    mvsts = copyRows(tvsts, len(T))
    return keySwitchMatrix(mvsts,T,l)

def copyRows(row, numrows):
    ans = np.zeros((numrows, len(row[0])))
    for i in range(len(ans)):
        for j in range(len(ans[0])):
            ans[i][j] = row[0][j]
            
    return ans

def innerProd(c1, c2, M,l):
    
    cc1 = np.zeros((len(c1),1))
    for i in range(len(c1)):
        cc1[i][0] = c1[i]
    
    cc2 = np.zeros((1, len(c2)))
    for i in range(len(c2)):
        cc2[0][i] = c2[i]
        
    cc = vectorize(cc1.dot(cc2))
    
    bv = getBitVector((cc / w).round().astype('int64'),l)
    
    return M.dot(bv)

def one_way_encrypt_vector(vector,scaling_factor = 1000):
    padded_vector = np.random.rand(len(vector)+1)
    padded_vector[0:len(vector)] = vector
    
    vec_len = len(padded_vector)
    
    M_temp = (M_keys[vec_len-2].T*padded_vector*scaling_factor / (vec_len-1)).T
    e_vector = innerProd(c_ones[vec_len-2],c_ones[vec_len-2],M_temp,l)
    return e_vector.astype('int')

def load_linear_transformation(syn0_text,scaling_factor = 1000):
    syn0_text *= scaling_factor
    return linearTransformClient(syn0_text.T,getSecretKey(T),T,l)

def s_decrypt(vec):
    return decrypt(getSecretKey(T_keys[len(vec)-2]),vec,w)

def add_vectors(x,y,scaling_factor = 10000):
    return x + y

def transpose(syn1):

    rows = len(syn1)
    cols = len(syn1[0]) - 1
    
    max_rc = max(rows,cols)
    
    syn1_c = list()
    for i in range(len(syn1)):
        tmp = np.zeros(max_rc+1)
        tmp[:len(syn1[i])] = syn1[i]
        syn1_c.append(tmp)
    
    syn1_c_transposed = list()
    
    for row_i in range(cols):
        syn1t_column = innerProd(syn1_c[0],v_onehot[max_rc-1][row_i],M_onehot[max_rc-1][0],l) / scaling_factor
        for col_i in range(rows-1):
            syn1t_column += innerProd(syn1_c[col_i+1],v_onehot[max_rc-1][row_i],M_onehot[max_rc-1][col_i+1],l) / scaling_factor

        syn1_c_transposed.append(syn1t_column[0:rows+1])
    
    return syn1_c_transposed

def int2bin(x):
    s = list()
    mod = 2
    while(x > 0):
        s.append(int(x % 2))
        x = int(x / 2)
    return np.array(list(reversed(s))).astype('int64')


def getBitVector(c,l):
    m = len(c)
    c_star = np.zeros(l * m,dtype='int64')
    for i in range(m):
        local_c = int(c[i])
        if(local_c < 0):
            local_c = -local_c
        b = int2bin(local_c)
        if(c[i] < 0):
            b *= -1
        if(c[i] == 0):
            b *= 0
#         try:
        c_star[(i * l) + (l-len(b)): (i+1) * l] += b
#         except:
#             print(len(b))
#             print(i)
#             print(len(c_star[(i * l) + (l-len(b)): (i+1) * l]))
    return c_star

In [206]:
# HAPPENS ON SECURE SERVER

l = 100
w = 2 ** 25

aBound = 10
tBound = 10
eBound = 10

max_dim = 16

scaling_factor = 1000

# keys
T_keys = list()
for i in range(max_dim):
    T_keys.append(np.random.rand(i+1,1))

# one way encryption transformation
M_keys = list()
for i in range(max_dim):
    M_keys.append(innerProdClient(T_keys[i],l))

M_onehot = list()
for h in range(max_dim):
    i = h+1
    buffered_eyes = list()
    for row in np.eye(i+1):
        buffer = np.ones(i+1)
        buffer[0:i+1] = row
        buffered_eyes.append((M_keys[i-1].T * buffer).T)
    M_onehot.append(buffered_eyes)
    
c_ones = list()
for i in range(max_dim):
    c_ones.append(encrypt(T_keys[i],np.ones(i+1), w, l).astype('int'))
    
v_onehot = list()
onehot = list()
for i in range(max_dim):
    eyes = list()
    eyes_txt = list()
    for eye in np.eye(i+1):
        eyes_txt.append(eye)
        eyes.append(one_way_encrypt_vector(eye,scaling_factor))
    v_onehot.append(eyes)
    onehot.append(eyes_txt)

H_sigmoid_txt = np.zeros((5,5))

H_sigmoid_txt[0][0] = 0.5
H_sigmoid_txt[0][1] = 0.25
H_sigmoid_txt[0][2] = -1/48.0
H_sigmoid_txt[0][3] = 1/480.0
H_sigmoid_txt[0][4] = -17/80640.0

H_sigmoid = list()
for row in H_sigmoid_txt:
    H_sigmoid.append(one_way_encrypt_vector(row))

In [207]:
def sigmoid(layer_2_c):
    out_rows = list()
    for position in range(len(layer_2_c)-1):

        M_position = M_onehot[len(layer_2_c)-2][0]

        layer_2_index_c = innerProd(layer_2_c,v_onehot[len(layer_2_c)-2][position],M_position,l) / scaling_factor

        x = layer_2_index_c
        x2 = innerProd(x,x,M_position,l) / scaling_factor
        x3 = innerProd(x,x2,M_position,l) / scaling_factor
        x5 = innerProd(x3,x2,M_position,l) / scaling_factor
        x7 = innerProd(x5,x2,M_position,l) / scaling_factor

        xs = copy.deepcopy(v_onehot[5][0])
        xs[1] = x[0]
        xs[2] = x2[0]
        xs[3] = x3[0]
        xs[4] = x5[0]
        xs[5] = x7[0]

        out = mat_mul_forward(xs,H_sigmoid[0:1],scaling_factor)
        out_rows.append(out)
    return transpose(out_rows)[0]

def load_linear_transformation(syn0_text,scaling_factor = 1000):
    syn0_text *= scaling_factor
    return linearTransformClient(syn0_text.T,getSecretKey(T_keys[len(syn0_text)-1]),T_keys[len(syn0_text)-1],l)

def outer_product(x,y):
    flip = False
    if(len(x) < len(y)):
        flip = True
        tmp = x
        x = y
        y = tmp
        
    y_matrix = list()

    for i in range(len(x)-1):
        y_matrix.append(y)

    y_matrix_transpose = transpose(y_matrix)

    outer_result = list()
    for i in range(len(x)-1):
        outer_result.append(mat_mul_forward(x * onehot[len(x)-1][i],y_matrix_transpose,scaling_factor))
    
    if(flip):
        return transpose(outer_result)
    
    return outer_result

def mat_mul_forward(layer_1,syn1,scaling_factor):
    
    input_dim = len(layer_1)
    output_dim = len(syn1)

    buff = np.zeros(max(output_dim+1,input_dim+1))
    buff[0:len(layer_1)] = layer_1
    layer_1_c = buff
    
    syn1_c = list()
    for i in range(len(syn1)):
        buff = np.zeros(max(output_dim+1,input_dim+1))
        buff[0:len(syn1[i])] = syn1[i]
        syn1_c.append(buff)
    
    layer_2 = innerProd(syn1_c[0],layer_1_c,M_onehot[len(layer_1_c) - 2][0],l) / float(scaling_factor)
    for i in range(len(syn1)-1):
        layer_2 += innerProd(syn1_c[i+1],layer_1_c,M_onehot[len(layer_1_c) - 2][i+1],l) / float(scaling_factor)
    return layer_2[0:output_dim+1]

def elementwise_vector_mult(x,y,scaling_factor):
    
    y =[y]
    
    one_minus_layer_1 = transpose(y)

    outer_result = list()
    for i in range(len(x)-1):
        outer_result.append(mat_mul_forward(x * onehot[len(x)-1][i],y,scaling_factor))
        
    return transpose(outer_result)[0]

Encrypted Sentiment Classifier


In [208]:
import time
import sys
import numpy as np

# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
    def __init__(self, reviews,labels,min_count = 10,polarity_cutoff = 0.1,hidden_nodes = 8, learning_rate = 0.1):
       
        np.random.seed(1234)
    
        self.pre_process_data(reviews, polarity_cutoff, min_count)
        
        self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
        
        
    def pre_process_data(self,reviews, polarity_cutoff,min_count):
        
        print("Pre-processing data...")
        
        positive_counts = Counter()
        negative_counts = Counter()
        total_counts = Counter()

        for i in range(len(reviews)):
            if(labels[i] == 'POSITIVE'):
                for word in reviews[i].split(" "):
                    positive_counts[word] += 1
                    total_counts[word] += 1
            else:
                for word in reviews[i].split(" "):
                    negative_counts[word] += 1
                    total_counts[word] += 1

        pos_neg_ratios = Counter()

        for term,cnt in list(total_counts.most_common()):
            if(cnt >= 50):
                pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
                pos_neg_ratios[term] = pos_neg_ratio

        for word,ratio in pos_neg_ratios.most_common():
            if(ratio > 1):
                pos_neg_ratios[word] = np.log(ratio)
            else:
                pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
        
        review_vocab = set()
        for review in reviews:
            for word in review.split(" "):
                if(total_counts[word] > min_count):
                    if(word in pos_neg_ratios.keys()):
                        if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):
                            review_vocab.add(word)
                    else:
                        review_vocab.add(word)
        self.review_vocab = list(review_vocab)
        
        label_vocab = set()
        for label in labels:
            label_vocab.add(label)
        
        self.label_vocab = list(label_vocab)
        
        self.review_vocab_size = len(self.review_vocab)
        self.label_vocab_size = len(self.label_vocab)
        
        self.word2index = {}
        for i, word in enumerate(self.review_vocab):
            self.word2index[word] = i
        
        self.label2index = {}
        for i, label in enumerate(self.label_vocab):
            self.label2index[label] = i
         
        
    def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
        # Set number of nodes in input, hidden and output layers.
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes

        print("Initializing Weights...")
        self.weights_0_1_t = np.zeros((self.input_nodes,self.hidden_nodes))
    
        self.weights_1_2_t = np.random.normal(0.0, self.output_nodes**-0.5, 
                                                (self.hidden_nodes, self.output_nodes))
        
        print("Encrypting Weights...")
        self.weights_0_1 = list()
        for i,row in enumerate(self.weights_0_1_t):
            sys.stdout.write("\rEncrypting Weights from Layer 0 to Layer 1:" + str(float((i+1) * 100) / len(self.weights_0_1_t))[0:4] + "% done")
            self.weights_0_1.append(one_way_encrypt_vector(row,scaling_factor).astype('int64'))
        print("")
        
        self.weights_1_2 = list()
        for i,row in enumerate(self.weights_1_2_t):
            sys.stdout.write("\rEncrypting Weights from Layer 1 to Layer 2:" + str(float((i+1) * 100) / len(self.weights_1_2_t))[0:4] + "% done")
            self.weights_1_2.append(one_way_encrypt_vector(row,scaling_factor).astype('int64'))           
        self.weights_1_2 = transpose(self.weights_1_2)
        
        self.learning_rate = learning_rate
        
        self.layer_0 = np.zeros((1,input_nodes))
        self.layer_1 = np.zeros((1,hidden_nodes))
        
    def sigmoid(self,x):
        return 1 / (1 + np.exp(-x))
    
    
    def sigmoid_output_2_derivative(self,output):
        return output * (1 - output)
    
    def update_input_layer(self,review):

        # clear out previous state, reset the layer to be all 0s
        self.layer_0 *= 0
        for word in review.split(" "):
            self.layer_0[0][self.word2index[word]] = 1

    def get_target_for_label(self,label):
        if(label == 'POSITIVE'):
            return 1
        else:
            return 0
        
    def train(self, training_reviews_raw, training_labels):

        training_reviews = list()
        for review in training_reviews_raw:
            indices = set()
            for word in review.split(" "):
                if(word in self.word2index.keys()):
                    indices.add(self.word2index[word])
            training_reviews.append(list(indices))

        layer_1 = np.zeros_like(self.weights_0_1[0])

        start = time.time()
        correct_so_far = 0
        total_pred = 0.5
        for i in range(len(training_reviews_raw)):
            review_indices = training_reviews[i]
            label = training_labels[i]

            layer_1 *= 0
            for index in review_indices:
                layer_1 += self.weights_0_1[index]
            layer_1 = layer_1 / float(len(review_indices))
            layer_1 = layer_1.astype('int64') # round to nearest integer

            layer_2 = sigmoid(innerProd(layer_1,self.weights_1_2[0],M_onehot[len(layer_1) - 2][1],l) / float(scaling_factor))[0:2]

            if(label == 'POSITIVE'):
                layer_2_delta = layer_2 - (c_ones[len(layer_2) - 2] * scaling_factor)
            else:
                layer_2_delta = layer_2

            weights_1_2_trans = transpose(self.weights_1_2)
            layer_1_delta = mat_mul_forward(layer_2_delta,weights_1_2_trans,scaling_factor).astype('int64')

            self.weights_1_2 -= np.array(outer_product(layer_2_delta,layer_1))  * self.learning_rate

            for index in review_indices:
                self.weights_0_1[index] -= (layer_1_delta * self.learning_rate).astype('int64')

            # we're going to decrypt on the fly so we can watch what's happening
            total_pred += (s_decrypt(layer_2)[0] / scaling_factor)
            if((s_decrypt(layer_2)[0] / scaling_factor) >= (total_pred / float(i+2)) and label == 'POSITIVE'):
                correct_so_far += 1
            if((s_decrypt(layer_2)[0] / scaling_factor) < (total_pred / float(i+2)) and label == 'NEGATIVE'):
                correct_so_far += 1

            reviews_per_second = i / float(time.time() - start)

            sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews_raw)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
            if(i % 100 == 0):
                print(i)

    
    def test(self, testing_reviews, testing_labels):
        
        correct = 0
        
        start = time.time()
        
        for i in range(len(testing_reviews)):
            pred = self.run(testing_reviews[i])
            if(pred == testing_labels[i]):
                correct += 1
            
            reviews_per_second = i / float(time.time() - start)
            
            sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
                             + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
                            + "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
    
    def run(self, review):
        
        # Input Layer


        # Hidden layer
        self.layer_1 *= 0
        unique_indices = set()
        for word in review.lower().split(" "):
            if word in self.word2index.keys():
                unique_indices.add(self.word2index[word])
        for index in unique_indices:
            self.layer_1 += self.weights_0_1[index]
        
        # Output layer
        layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
        
        if(layer_2[0] >= 0.5):
            return "POSITIVE"
        else:
            return "NEGATIVE"

In [209]:
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.15,learning_rate=0.01)


Pre-processing data...
Initializing Weights...
Encrypting Weights...
Encrypting Weights from Layer 0 to Layer 1:100.% done
Encrypting Weights from Layer 1 to Layer 2:100.% done

In [210]:
saved_weights = (copy.deepcopy(mlp.weights_0_1),copy.deepcopy(mlp.weights_1_2))

In [211]:
mlp.weights_0_1 = copy.deepcopy(saved_weights[0])
mlp.weights_1_2 = copy.deepcopy(saved_weights[1])
mlp.learning_rate = 0.0125
mlp.train(reviews[0:-1000],labels[:-1000])


Progress:0.0% Speed(reviews/sec):0.0 #Correct:1 #Trained:1 Training Accuracy:100.%0
Progress:0.41% Speed(reviews/sec):1.978 #Correct:66 #Trained:101 Training Accuracy:65.3%100
Progress:0.83% Speed(reviews/sec):2.014 #Correct:131 #Trained:201 Training Accuracy:65.1%200
Progress:1.25% Speed(reviews/sec):2.011 #Correct:203 #Trained:301 Training Accuracy:67.4%300
Progress:1.66% Speed(reviews/sec):2.003 #Correct:276 #Trained:401 Training Accuracy:68.8%400
Progress:2.08% Speed(reviews/sec):2.007 #Correct:348 #Trained:501 Training Accuracy:69.4%500
Progress:2.5% Speed(reviews/sec):2.015 #Correct:420 #Trained:601 Training Accuracy:69.8%600
Progress:2.91% Speed(reviews/sec):1.974 #Correct:497 #Trained:701 Training Accuracy:70.8%700
Progress:3.33% Speed(reviews/sec):1.973 #Correct:581 #Trained:801 Training Accuracy:72.5%800
Progress:3.75% Speed(reviews/sec):1.976 #Correct:666 #Trained:901 Training Accuracy:73.9%900
Progress:4.16% Speed(reviews/sec):1.983 #Correct:751 #Trained:1001 Training Accuracy:75.0%1000
Progress:4.58% Speed(reviews/sec):1.909 #Correct:835 #Trained:1101 Training Accuracy:75.8%1100
Progress:5.0% Speed(reviews/sec):1.905 #Correct:913 #Trained:1201 Training Accuracy:76.0%1200
Progress:5.41% Speed(reviews/sec):1.887 #Correct:987 #Trained:1301 Training Accuracy:75.8%1300
Progress:5.83% Speed(reviews/sec):1.891 #Correct:1069 #Trained:1401 Training Accuracy:76.3%1400
Progress:6.25% Speed(reviews/sec):1.888 #Correct:1146 #Trained:1501 Training Accuracy:76.3%1500
Progress:6.66% Speed(reviews/sec):1.881 #Correct:1224 #Trained:1601 Training Accuracy:76.4%1600
Progress:7.08% Speed(reviews/sec):1.829 #Correct:1287 #Trained:1701 Training Accuracy:75.6%1700
Progress:7.5% Speed(reviews/sec):1.831 #Correct:1361 #Trained:1801 Training Accuracy:75.5%1800
Progress:7.91% Speed(reviews/sec):1.839 #Correct:1437 #Trained:1901 Training Accuracy:75.5%1900
Progress:8.33% Speed(reviews/sec):1.820 #Correct:1508 #Trained:2001 Training Accuracy:75.3%2000
Progress:8.75% Speed(reviews/sec):1.827 #Correct:1584 #Trained:2101 Training Accuracy:75.3%2100
Progress:8.85% Speed(reviews/sec):1.829 #Correct:1602 #Trained:2127 Training Accuracy:75.3%
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-211-f298df4f9c2f> in <module>()
      2 mlp.weights_1_2 = copy.deepcopy(saved_weights[1])
      3 mlp.learning_rate = 0.0125
----> 4 mlp.train(reviews[0:-1000],labels[:-1000])

<ipython-input-208-b5ea40aa61ae> in train(self, training_reviews_raw, training_labels)
    156                 layer_2_delta = layer_2
    157 
--> 158             weights_1_2_trans = transpose(self.weights_1_2)
    159             layer_1_delta = mat_mul_forward(layer_2_delta,weights_1_2_trans,scaling_factor).astype('int64')
    160 

<ipython-input-14-2886fea3356d> in transpose(syn1)
    123 
    124     for row_i in range(cols):
--> 125         syn1t_column = innerProd(syn1_c[0],v_onehot[max_rc-1][row_i],M_onehot[max_rc-1][0],l) / scaling_factor
    126         for col_i in range(rows-1):
    127             syn1t_column += innerProd(syn1_c[col_i+1],v_onehot[max_rc-1][row_i],M_onehot[max_rc-1][col_i+1],l) / scaling_factor

<ipython-input-14-2886fea3356d> in innerProd(c1, c2, M, l)
     85     bv = getBitVector((cc / w).round().astype('int64'),l)
     86 
---> 87     return M.dot(bv)
     88 
     89 def one_way_encrypt_vector(vector,scaling_factor = 1000):

KeyboardInterrupt: 

In [ ]: