In [1]:
#Imports and model parameters

import tensorflow as tf
import numpy as np
import copy
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)


#Set the learning threshold.  If .04, models will be trained until they have 4% training error.
thresh = .04

cost_thresh = 1.0

# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1

# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 # 
#synapses = []
models = []


Extracting /tmp/data/train-images-idx3-ubyte.gz
Extracting /tmp/data/train-labels-idx1-ubyte.gz
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-1-1a0ca35fd885> in <module>()
     34 #synapse1 = 2*np.random.random((hidden_dim,hidden_dim2)) - 1
     35 #synapse2 = 2*np.random.random((hidden_dim2,1)) - 1
---> 36 copy_model = multilayer_perceptron(ind=0)

NameError: name 'multilayer_perceptron' is not defined

In [2]:
#Function definitions

def func(x,a,b,c):
    return x*x*a + x*b + c

def generatecandidate4(a,b,c,tot):
    
    candidate = [[np.random.random() for x in xrange(1)] for y in xrange(tot)]
    candidatesolutions = [[func(x[0],a,b,c)] for x in candidate]
    
    return (candidate, candidatesolutions)

def synapse_interpolate(synapse1, synapse2, t):
    return (synapse2-synapse1)*t + synapse1

#Linearly interpolates between two models with weights w1 biases b1 and weights w2 biases b2
def model_interpolate(w1,b1,w2,b2,t):
    
    m1w = w1
    m1b = b1
    m2w = w2 
    m2b = b2
    
    mwi = [synapse_interpolate(m1we,m2we,t) for m1we, m2we in zip(m1w,m2w)]
    mbi = [synapse_interpolate(m1be,m2be,t) for m1be, m2be in zip(m1b,m2b)]
    
    return mwi, mbi

#Finds the maximum error on the linearly interpolated path between models (w1,b1) and (w2,b2).
#Currently only checks 20 intermediate points.  Should probably make this tunable.
#Returns the maximum error as well as the location of the maximum error (as an index, thus if the maximum error
#occurred halfway, this method returns 10 (because it checks 20 intermediate points))
def InterpBeadError(w1,b1, w2,b2, write = False, name = "00"):
    errors = []
    
    #xdat,ydat = generatecandidate4(.5, .25, .1, 1000)
    
    #xdat,ydat = mnist.train.next_batch(1000)
    
    xdat = mnist.test.images
    ydat = mnist.test.labels
    #xdat = np.array(xdat)
    #ydat = np.array(ydat)
    
    
    
    
    for tt in xrange(20):
        #print tt
        #accuracy = 0.
        t = tt/20.
        thiserror = 0

        #x0 = tf.placeholder("float", [None, n_input])
        #y0 = tf.placeholder("float", [None, n_classes])
        weights, biases = model_interpolate(w1,b1,w2,b2, t)
        interp_model = multilayer_perceptron(w=weights, b=biases)
        
        with interp_model.g.as_default():
            
            #interp_model.UpdateWeights(weights, biases)


            x = tf.placeholder("float", [None, n_input])
            y = tf.placeholder("float", [None, n_classes])
            pred = interp_model.predict(x)
            init = tf.initialize_all_variables()


            with tf.Session() as sess:
                sess.run(init)
                correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                print "Accuracy:", 1 - accuracy.eval({x: xdat, y: ydat}),"\t",tt,weights[0][1][0],weights[0][1][1]
                thiserror = 1 - accuracy.eval({x: xdat, y: ydat})


        errors.append(thiserror)

    if write == True:
        with open("f" + str(name) + ".out",'w+') as f:
            for e in errors:
                f.write(str(e) + "\n")
    
    return max(errors), np.argmax(errors)

In [3]:
#Class definitions

#This is the workhorse class for the multilayer_perceptron
class multilayer_perceptron():
    
    #weights = {}
    #biases = {}
    
    def __init__(self, w=0, b=0, ind='00'):
        
        self.index = ind #used for reading values from file
        #
        #I'm going to eschew writing to file for now because I'll be generating too many files
        #But in the future this would allow for easy read-writing.
        #Currently, the last value of the parameters is stored in self.params to be read
        
        learning_rate = 0.001
        training_epochs = 15
        batch_size = 100
        display_step = 1

        # Network Parameters
        n_hidden_1 = 256 # 1st layer number of features
        n_hidden_2 = 256 # 2nd layer number of features
        n_input = 784 # Guess quadratic function
        n_classes = 10 # 
        self.g = tf.Graph()
        
        
        self.params = []
        
        #This wrapper ensures that the graph being operated on is the correct graph.  Tensorflow isn't quite
        #set up to train multiple models simultaneously, so you have to be careful to make sure the correct graph is set as default.
        with self.g.as_default():
        
            #Note that by default, weights and biases will be initialized to random normal dists
            if w==0:
                
                self.weights = {
                    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
                    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
                    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
                }
                self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
                self.biases = {
                    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
                    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
                    'out': tf.Variable(tf.random_normal([n_classes]))
                }
                self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
                
            else:
                
                self.weights = {
                    'h1': tf.Variable(w[0]),
                    'h2': tf.Variable(w[1]),
                    'out': tf.Variable(w[2])
                }
                self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
                self.biases = {
                    'b1': tf.Variable(b[0]),
                    'b2': tf.Variable(b[1]),
                    'out': tf.Variable(b[2])
                }
                self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
            self.saver = tf.train.Saver()
    
    #Convenience method for manually resetting the weights.
    def UpdateWeights(self, w, b):
        with self.g.as_default():
            self.weights = {
                    'h1': tf.Variable(w[0]),
                    'h2': tf.Variable(w[1]),
                    'out': tf.Variable(w[2])
                }
            self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
            self.biases = {
                'b1': tf.Variable(b[0]),
                'b2': tf.Variable(b[1]),
                'out': tf.Variable(b[2])
            }
            self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
            

    #This method determines the optimization method used.  Currently using relus (fairly straightforward.  Can easily change)
    def predict(self, x):
        
        with self.g.as_default():
            layer_1 = tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1'])
            layer_1 = tf.nn.relu(layer_1)
            # Hidden layer with RELU activation
            layer_2 = tf.add(tf.matmul(layer_1, self.weights['h2']), self.biases['b2'])
            layer_2 = tf.nn.relu(layer_2)
            # Output layer with linear activation
            out_layer = tf.matmul(layer_2, self.weights['out']) + self.biases['out']
            return out_layer
        
    def ReturnParamsAsList(self):
        
        with self.g.as_default():

            with tf.Session() as sess:
                # Restore variables from disk
                self.saver.restore(sess, "/home/dfreeman/PythonFun/tmp/model"+str(self.index)+".ckpt")                
                return sess.run(self.weightslist), sess.run(self.biaseslist)

        
#This class stores the graphs for all the models that have been successively trained by dynamic string sampling.
#The models are ordered in a list such that self.AllBeads[0] is the first model and self.allBeads[-1] is the last model.
class WeightString:
    
    def __init__(self, w1, b1, w2, b2, numbeads, threshold):
        self.w1 = w1
        self.w2 = w2
        self.b1 = b1
        self.b2 = b2
        #self.w2, self.b2 = m2.params
        self.AllBeads = []

        self.threshold = threshold
        
        self.AllBeads.append([w1,b1])
        
        
        for n in xrange(numbeads):
            ws,bs = model_interpolate(w1,b1,w2,b2, (n + 1.)/(numbeads+1.))
            self.AllBeads.append([ws,bs])
            
        self.AllBeads.append([w2,b2])
        
        
        self.ConvergedList = [False for f in xrange(len(self.AllBeads))]
        self.ConvergedList[0] = True
        self.ConvergedList[-1] = True
    
    '''
    #Old method used when I was playing around with the "energy" of the path.
    def SpringNorm(self, order):
        
        total = 0.
        
        #Energy between mobile beads
        for i,b in enumerate(self.AllBeads):
            if i < len(self.AllBeads)-1:
                #print "Tallying energy between bead " + str(i) + " and bead " + str(i+1)
                subtotal = 0.
                for j in xrange(len(b)):
                    subtotal += np.linalg.norm(np.subtract(self.AllBeads[i][0][j],self.AllBeads[i+1][0][j]),ord=order)#/len(self.beads[0][j])
                for j in xrange(len(b)):
                    subtotal += np.linalg.norm(np.subtract(self.AllBeads[i][1][j],self.AllBeads[i+1][1][j]),ord=order)#/len(self.beads[0][j])
                total+=subtotal
        
        return total#/len(self.beads)'''
        
    
    #Compartmentalized gradient descent method for new models added to the weightstring.
    #TODO: Make partitioning between training, validation, and test more clear.
    def SGDBead(self, bead, thresh, maxindex):
        
        finalerror = 0.
        
        #thresh = .05

        # Parameters
        learning_rate = 0.01
        training_epochs = 15
        batch_size = 1000
        display_step = 1
        
        curWeights, curBiases = self.AllBeads[bead]
        test_model = multilayer_perceptron(w=curWeights, b=curBiases)

        with test_model.g.as_default():

            x = tf.placeholder("float", [None, n_input])
            y = tf.placeholder("float", [None, n_classes])
            pred = test_model.predict(x)
            cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
            init = tf.initialize_all_variables()
            stopcond = True

            with tf.Session() as sess:
                sess.run(init)
                xtest = mnist.test.images
                ytest = mnist.test.labels
                
                thiserror = 0.
                j = 0
                while stopcond:
                    for epoch in range(training_epochs):
                        avg_cost = 0.
                        total_batch = int(mnist.train.num_examples/batch_size)
                        if (avg_cost > thresh or avg_cost == 0.) and stopcond:
                        # Loop over all batches
                            for i in range(total_batch):
                                batch_x, batch_y = mnist.train.next_batch(batch_size)
                                # Run optimization op (backprop) and cost op (to get loss value)
                                _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                                              y: batch_y})
                                # Compute average loss
                                avg_cost += c / total_batch
                            # Display logs per epoch step
                            #if epoch % display_step == 0:
                            #    print "Epoch:", '%04d' % (epoch+1), "cost=", \
                            #        "{:.9f}".format(avg_cost)
                            correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                            # Calculate accuracy
                            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                            #print "Accuracy:", accuracy.eval({x: xtest, y: ytest})
                            thiserror = 1 - accuracy.eval({x: xtest, y: ytest})
                            if thiserror < thresh:
                                stopcond = False
                    #print "Optimization Finished!"

                    # Test model
                    #correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                    #correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                    # Calculate accuracy
                    #accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                    #print "Accuracy:", accuracy.eval({x: xtest, y: ytest})

                    #if (j%5000) == 0:
                    #    print "Error after "+str(j)+" iterations:" + str(accuracy.eval({x: xtest, y: ytest}))

                    finalerror = 1 - accuracy.eval({x: xtest, y: ytest})
                    
                    if finalerror < thresh or stopcond==False:# or j > maxindex:
                        #print "Changing stopcond!"
                        stopcond = False
                        #print "Final params:"
                        test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
                        self.AllBeads[bead]=test_model.params
                        print "Final bead error: " + str(finalerror)
                        
                    j+=1

            return finalerror

In [4]:
#Model generation
#This codeblock trains some number of models to the desired training threshold.

NumModels = 3

copy_model = multilayer_perceptron(ind=0)

for ii in xrange(NumModels):

    '''weights = {
        'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
        'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
        'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
    }
    biases = {
        'b1': tf.Variable(tf.random_normal([n_hidden_1])),
        'b2': tf.Variable(tf.random_normal([n_hidden_2])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }'''

    # Construct model with different initial weights
    test_model = multilayer_perceptron(ind=ii)
    
    #Construct model with same initial weights
    #test_model = copy.copy(copy_model)
    #test_model.index = ii
    
    
    
    
    #print test_model.weights
    

    
    models.append(test_model)
    with test_model.g.as_default():

        x = tf.placeholder("float", [None, n_input])
        y = tf.placeholder("float", [None, n_classes])
        pred = test_model.predict(x)

        # Define loss and optimizer
        #cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
        optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)

        # Initializing the variables
        init = tf.initialize_all_variables()


        #remove the comment to get random initialization
        stopcond = True




        with tf.Session() as sess:
            sess.run(init)
            xtest = mnist.test.images
            ytest = mnist.test.labels
            while stopcond:
                #print 'epoch:' + str(e)
                #X = []
                #y = []
                j = 0
                # Training cycle
                for epoch in range(training_epochs):
                    avg_cost = 0.
                    total_batch = int(10000/batch_size)

                    if (avg_cost > thresh or avg_cost == 0.) and stopcond:
                    # Loop over all batches
                        for i in range(total_batch):
                            batch_x, batch_y = mnist.train.next_batch(batch_size)
                            # Run optimization op (backprop) and cost op (to get loss value)
                            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                                          y: batch_y})
                            # Compute average loss
                            avg_cost += c / total_batch
                        # Display logs per epoch step
                        #if epoch % display_step == 0:
                        #    #print "Epoch:", '%04d' % (epoch+1), "cost=", \
                        #    #    "{:.9f}".format(avg_cost)
                        
                        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                        # Calculate accuracy
                        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                        #print "Accuracy:", accuracy.eval({x: xtest, y: ytest})
                        thiserror = 1 - accuracy.eval({x: xtest, y: ytest})
                        if thiserror < thresh:
                            stopcond = False
                            
                print "Optimization Finished!"

                # Test model
                #correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                # Calculate accuracy
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                print "Accuracy:", accuracy.eval({x: xtest, y: ytest})

                if (j%5000) == 0:
                    print "Error after "+str(j)+" iterations:" + str(accuracy.eval({x: xtest, y: ytest}))

                if 1 - accuracy.eval({x: xtest, y: ytest}) < thresh or stopcond == False:
                    #print "Changing stopcond!"
                    stopcond = False
                    print "Final params:"
                    test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
                    save_path = test_model.saver.save(sess,"/home/dfreeman/PythonFun/tmp/model" + str(ii) + ".ckpt")
                j+=1
    #remove the comment to get random initialization

    
    #synapses.append([synapse_0,synapse_1,synapse_2


Optimization Finished!
Accuracy: 0.9118
Error after 0 iterations:0.9118
Optimization Finished!
Accuracy: 0.9316
Error after 0 iterations:0.9316
Optimization Finished!
Accuracy: 0.94
Error after 0 iterations:0.94
Optimization Finished!
Accuracy: 0.9418
Error after 0 iterations:0.9418
Optimization Finished!
Accuracy: 0.9409
Error after 0 iterations:0.9409
Optimization Finished!
Accuracy: 0.9456
Error after 0 iterations:0.9456
Optimization Finished!
Accuracy: 0.9474
Error after 0 iterations:0.9474
Optimization Finished!
Accuracy: 0.9495
Error after 0 iterations:0.9495
Optimization Finished!
Accuracy: 0.9524
Error after 0 iterations:0.9524
Optimization Finished!
Accuracy: 0.9513
Error after 0 iterations:0.9513
Optimization Finished!
Accuracy: 0.9507
Error after 0 iterations:0.9507
Optimization Finished!
Accuracy: 0.9531
Error after 0 iterations:0.9531
Optimization Finished!
Accuracy: 0.9551
Error after 0 iterations:0.9551
Optimization Finished!
Accuracy: 0.9537
Error after 0 iterations:0.9537
Optimization Finished!
Accuracy: 0.9536
Error after 0 iterations:0.9536
Optimization Finished!
Accuracy: 0.9555
Error after 0 iterations:0.9555
Optimization Finished!
Accuracy: 0.9572
Error after 0 iterations:0.9572
Optimization Finished!
Accuracy: 0.9525
Error after 0 iterations:0.9525
Optimization Finished!
Accuracy: 0.9583
Error after 0 iterations:0.9583
Optimization Finished!
Accuracy: 0.9572
Error after 0 iterations:0.9572
Optimization Finished!
Accuracy: 0.9566
Error after 0 iterations:0.9566
Optimization Finished!
Accuracy: 0.9577
Error after 0 iterations:0.9577
Optimization Finished!
Accuracy: 0.9576
Error after 0 iterations:0.9576
Optimization Finished!
Accuracy: 0.9589
Error after 0 iterations:0.9589
Optimization Finished!
Accuracy: 0.9572
Error after 0 iterations:0.9572
Optimization Finished!
Accuracy: 0.957
Error after 0 iterations:0.957
Optimization Finished!
Accuracy: 0.9559
Error after 0 iterations:0.9559
Optimization Finished!
Accuracy: 0.9598
Error after 0 iterations:0.9598
Optimization Finished!
Accuracy: 0.9602
Error after 0 iterations:0.9602
Final params:
Optimization Finished!
Accuracy: 0.9152
Error after 0 iterations:0.9152
Optimization Finished!
Accuracy: 0.9331
Error after 0 iterations:0.9331
Optimization Finished!
Accuracy: 0.9381
Error after 0 iterations:0.9381
Optimization Finished!
Accuracy: 0.9423
Error after 0 iterations:0.9423
Optimization Finished!
Accuracy: 0.9439
Error after 0 iterations:0.9439
Optimization Finished!
Accuracy: 0.9478
Error after 0 iterations:0.9478
Optimization Finished!
Accuracy: 0.9481
Error after 0 iterations:0.9481
Optimization Finished!
Accuracy: 0.9488
Error after 0 iterations:0.9488
Optimization Finished!
Accuracy: 0.9518
Error after 0 iterations:0.9518
Optimization Finished!
Accuracy: 0.951
Error after 0 iterations:0.951
Optimization Finished!
Accuracy: 0.9521
Error after 0 iterations:0.9521
Optimization Finished!
Accuracy: 0.9545
Error after 0 iterations:0.9545
Optimization Finished!
Accuracy: 0.9532
Error after 0 iterations:0.9532
Optimization Finished!
Accuracy: 0.954
Error after 0 iterations:0.954
Optimization Finished!
Accuracy: 0.9579
Error after 0 iterations:0.9579
Optimization Finished!
Accuracy: 0.9555
Error after 0 iterations:0.9555
Optimization Finished!
Accuracy: 0.9558
Error after 0 iterations:0.9558
Optimization Finished!
Accuracy: 0.9534
Error after 0 iterations:0.9534
Optimization Finished!
Accuracy: 0.9553
Error after 0 iterations:0.9553
Optimization Finished!
Accuracy: 0.9545
Error after 0 iterations:0.9545
Optimization Finished!
Accuracy: 0.9562
Error after 0 iterations:0.9562
Optimization Finished!
Accuracy: 0.9582
Error after 0 iterations:0.9582
Optimization Finished!
Accuracy: 0.9568
Error after 0 iterations:0.9568
Optimization Finished!
Accuracy: 0.9562
Error after 0 iterations:0.9562
Optimization Finished!
Accuracy: 0.9589
Error after 0 iterations:0.9589
Optimization Finished!
Accuracy: 0.9578
Error after 0 iterations:0.9578
Optimization Finished!
Accuracy: 0.9584
Error after 0 iterations:0.9584
Optimization Finished!
Accuracy: 0.9593
Error after 0 iterations:0.9593
Optimization Finished!
Accuracy: 0.9579
Error after 0 iterations:0.9579
Optimization Finished!
Accuracy: 0.9608
Error after 0 iterations:0.9608
Final params:
Optimization Finished!
Accuracy: 0.9175
Error after 0 iterations:0.9175
Optimization Finished!
Accuracy: 0.9297
Error after 0 iterations:0.9297
Optimization Finished!
Accuracy: 0.9362
Error after 0 iterations:0.9362
Optimization Finished!
Accuracy: 0.942
Error after 0 iterations:0.942
Optimization Finished!
Accuracy: 0.9413
Error after 0 iterations:0.9413
Optimization Finished!
Accuracy: 0.9443
Error after 0 iterations:0.9443
Optimization Finished!
Accuracy: 0.9458
Error after 0 iterations:0.9458
Optimization Finished!
Accuracy: 0.9472
Error after 0 iterations:0.9472
Optimization Finished!
Accuracy: 0.9489
Error after 0 iterations:0.9489
Optimization Finished!
Accuracy: 0.9478
Error after 0 iterations:0.9478
Optimization Finished!
Accuracy: 0.9513
Error after 0 iterations:0.9513
Optimization Finished!
Accuracy: 0.9489
Error after 0 iterations:0.9489
Optimization Finished!
Accuracy: 0.9502
Error after 0 iterations:0.9502
Optimization Finished!
Accuracy: 0.9528
Error after 0 iterations:0.9528
Optimization Finished!
Accuracy: 0.9537
Error after 0 iterations:0.9537
Optimization Finished!
Accuracy: 0.9526
Error after 0 iterations:0.9526
Optimization Finished!
Accuracy: 0.9532
Error after 0 iterations:0.9532
Optimization Finished!
Accuracy: 0.9532
Error after 0 iterations:0.9532
Optimization Finished!
Accuracy: 0.953
Error after 0 iterations:0.953
Optimization Finished!
Accuracy: 0.9528
Error after 0 iterations:0.9528
Optimization Finished!
Accuracy: 0.9532
Error after 0 iterations:0.9532
Optimization Finished!
Accuracy: 0.9562
Error after 0 iterations:0.9562
Optimization Finished!
Accuracy: 0.9538
Error after 0 iterations:0.9538
Optimization Finished!
Accuracy: 0.9544
Error after 0 iterations:0.9544
Optimization Finished!
Accuracy: 0.9526
Error after 0 iterations:0.9526
Optimization Finished!
Accuracy: 0.9563
Error after 0 iterations:0.9563
Optimization Finished!
Accuracy: 0.9535
Error after 0 iterations:0.9535
Optimization Finished!
Accuracy: 0.9552
Error after 0 iterations:0.9552
Optimization Finished!
Accuracy: 0.9557
Error after 0 iterations:0.9557
Optimization Finished!
Accuracy: 0.9546
Error after 0 iterations:0.9546
Optimization Finished!
Accuracy: 0.9578
Error after 0 iterations:0.9578
Optimization Finished!
Accuracy: 0.954
Error after 0 iterations:0.954
Optimization Finished!
Accuracy: 0.9567
Error after 0 iterations:0.9567
Optimization Finished!
Accuracy: 0.9564
Error after 0 iterations:0.9564
Optimization Finished!
Accuracy: 0.9579
Error after 0 iterations:0.9579
Optimization Finished!
Accuracy: 0.9569
Error after 0 iterations:0.9569
Optimization Finished!
Accuracy: 0.9568
Error after 0 iterations:0.9568
Optimization Finished!
Accuracy: 0.958
Error after 0 iterations:0.958
Optimization Finished!
Accuracy: 0.9575
Error after 0 iterations:0.9575
Optimization Finished!
Accuracy: 0.9607
Error after 0 iterations:0.9607
Final params:

In [5]:
#Connected components search


#Used for softening the training criteria.  There's some fuzz required due to the difference in 
#training error between test and training
thresh_multiplier = 1.1


results = []

connecteddict = {}
for i1 in xrange(len(models)):
    connecteddict[i1] = 'not connected'


for i1 in xrange(len(models)):
    print i1
    for i2 in xrange(len(models)):
        
        if i2 > i1 and ((connecteddict[i1] != connecteddict[i2]) or (connecteddict[i1] == 'not connected' or connecteddict[i2] == 'not connected')) :
            #print "slow1?"
            #print i1,i2
            #print models[0]
            #print models[1]
            #print models[0].params
            #print models[1].params
            test = WeightString(models[i1].params[0],models[i1].params[1],models[i2].params[0],models[i2].params[1],1,1)

            training_threshold = thresh

            depth = 0
            d_max = 10

            #Check error between beads
            #Alg: for each bead at depth i, SGD until converged.
            #For beads with max error along path too large, add another bead between them, repeat

            
            #Keeps track of which indices to check the interpbeaderror between
            newindices = [0,1]
            
            while (depth < d_max):
                print newindices
                #print "slow2?"
                #X, y = GenTest(X,y)
                counter = 0

                for i,c in enumerate(test.ConvergedList):
                    if c == False:
                        #print "slow3?"
                        error = test.SGDBead(i, .8*training_threshold, 20)
                        #print "slow4?"
                            #if counter%5000==0:
                            #    print counter
                            #    print error
                        test.ConvergedList[i] = True

                print test.ConvergedList

                interperrors = []
                interp_bead_indices = []
                for b in xrange(len(test.AllBeads)-1):
                    if b in newindices:
                        e = InterpBeadError(test.AllBeads[b][0],test.AllBeads[b][1], test.AllBeads[b+1][0], test.AllBeads[b+1][1])

                        interperrors.append(e)
                        interp_bead_indices.append(b)
                print interperrors

                if max([ee[0] for ee in interperrors]) < thresh_multiplier*training_threshold:
                    depth = 2*d_max
                    #print test.ConvergedList
                    #print test.SpringNorm(2)
                    #print "Done!"

                else:
                    del newindices[:]
                    #Interperrors stores the maximum error on the path between beads
                    #shift index to account for added beads
                    shift = 0
                    for i, ie in enumerate(interperrors):
                        if ie[0] > thresh_multiplier*training_threshold:
                            k = interp_bead_indices[i]
                            
                            ws,bs = model_interpolate(test.AllBeads[k+shift][0],test.AllBeads[k+shift][1],\
                                                      test.AllBeads[k+shift+1][0],test.AllBeads[k+shift+1][1],\
                                                      ie[1]/20.)
                            
                            test.AllBeads.insert(k+shift+1,[ws,bs])
                            test.ConvergedList.insert(k+shift+1, False)
                            newindices.append(k+shift+1)
                            newindices.append(k+shift)
                            shift+=1
                            #print test.ConvergedList
                            #print test.SpringNorm(2)


                    #print d_max
                    depth += 1
            if depth == 2*d_max:
                results.append([i1,i2,test.SpringNorm(2),"Connected"])
                if connecteddict[i1] == 'not connected' and connecteddict[i2] == 'not connected':
                    connecteddict[i1] = i1
                    connecteddict[i2] = i1

                if connecteddict[i1] == 'not connected':
                    connecteddict[i1] = connecteddict[i2]
                else:
                    if connecteddict[i2] == 'not connected':
                        connecteddict[i2] = connecteddict[i1]
                    else:
                        if connecteddict[i1] != 'not connected' and connecteddict[i2] != 'not connected':
                            hold = connecteddict[i2]
                            connecteddict[i2] = connecteddict[i1]
                            for h in xrange(len(models)):
                                if connecteddict[h] == hold:
                                    connecteddict[h] = connecteddict[i1]
                    
            else:
                results.append([i1,i2,test.SpringNorm(2),"Disconnected"])
            #print results[-1]
	
	
	

uniquecomps = []
totalcomps = 0
for i in xrange(len(models)):
    if not (connecteddict[i] in uniquecomps):
        uniquecomps.append(connecteddict[i])
    
    if connecteddict[i] == 'not connected':
        totalcomps += 1
        
    #print i,connecteddict[i]

notconoffset = 0

if 'not connected' in uniquecomps:
    notconoffset = -1
    
print "Thresh: " + str(thresh)
print "Comps: " + str(len(uniquecomps) + notconoffset + totalcomps)



#for i in xrange(len(synapses)):
#    print connecteddict[i]

connsum = []
for r in results:
    if r[3] == "Connected":
        connsum.append(r[2])
        #print r[2]
        
print "***"
print np.average(connsum)
print np.std(connsum)


0
[0, 1]
Final bead error: 0.0315999984741
[True, True, True]
Accuracy: 0.0397999882698 	0 -0.235095 -1.66214
Accuracy: 0.0414000153542 	1 -0.196781 -1.6079
Accuracy: 0.0428000092506 	2 -0.158466 -1.55365
Accuracy: 0.0442000031471 	3 -0.120151 -1.4994
Accuracy: 0.0467000007629 	4 -0.0818361 -1.44516
Accuracy: 0.0505999922752 	5 -0.0435213 -1.39091
Accuracy: 0.0552999973297 	6 -0.00520651 -1.33666
Accuracy: 0.0587000250816 	7 0.0331083 -1.28242
Accuracy: 0.0593000054359 	8 0.0714231 -1.22817
Accuracy: 0.0631999969482 	9 0.109738 -1.17392
Accuracy: 0.065800011158 	10 0.148053 -1.11968
Accuracy: 0.0667999982834 	11 0.186368 -1.06543
Accuracy: 0.0649999976158 	12 0.224682 -1.01118
Accuracy: 0.0608999729156 	13 0.262997 -0.956938
Accuracy: 0.0551999807358 	14 0.301312 -0.902691
Accuracy: 0.0485000014305 	15 0.339627 -0.848445
Accuracy: 0.0425999760628 	16 0.377942 -0.794198
Accuracy: 0.0389999747276 	17 0.416256 -0.739951
Accuracy: 0.0349000096321 	18 0.454571 -0.685705
Accuracy: 0.0333999991417 	19 0.492886 -0.631458
Accuracy: 0.0315999984741 	0 0.531201 -0.577211
Accuracy: 0.0325999855995 	1 0.569516 -0.522965
Accuracy: 0.0347999930382 	2 0.60783 -0.468718
Accuracy: 0.0388000011444 	3 0.646145 -0.414472
Accuracy: 0.0443999767303 	4 0.68446 -0.360225
Accuracy: 0.050400018692 	5 0.722775 -0.305978
Accuracy: 0.056500017643 	6 0.76109 -0.251732
Accuracy: 0.0637000203133 	7 0.799404 -0.197485
Accuracy: 0.069100022316 	8 0.837719 -0.143238
Accuracy: 0.0738999843597 	9 0.876034 -0.0889918
Accuracy: 0.0756999850273 	10 0.914349 -0.0347452
Accuracy: 0.0760999917984 	11 0.952664 0.0195015
Accuracy: 0.0758000016212 	12 0.990978 0.0737481
Accuracy: 0.0705000162125 	13 1.02929 0.127995
Accuracy: 0.0638999938965 	14 1.06761 0.182241
Accuracy: 0.0583999752998 	15 1.10592 0.236488
Accuracy: 0.0530999898911 	16 1.14424 0.290735
Accuracy: 0.0479000210762 	17 1.18255 0.344981
Accuracy: 0.04390001297 	18 1.22087 0.399228
Accuracy: 0.0408999919891 	19 1.25918 0.453475
[(0.06679999828338623, 11), (0.076099991798400879, 11)]
[1, 0, 3, 2]
Final bead error: 0.0315999984741
Final bead error: 0.0318999886513
[True, True, True, True, True]
Accuracy: 0.0397999882698 	0 -0.235095 -1.66214
Accuracy: 0.0399000048637 	1 -0.214022 -1.63231
Accuracy: 0.0393000245094 	2 -0.192949 -1.60247
Accuracy: 0.0390999913216 	3 -0.171876 -1.57264
Accuracy: 0.0386999845505 	4 -0.150803 -1.5428
Accuracy: 0.0385000109673 	5 -0.12973 -1.51297
Accuracy: 0.0382999777794 	6 -0.108656 -1.48313
Accuracy: 0.0382999777794 	7 -0.0875834 -1.45329
Accuracy: 0.0379999876022 	8 -0.0665102 -1.42346
Accuracy: 0.037299990654 	9 -0.0454371 -1.39362
Accuracy: 0.0378999710083 	10 -0.0243639 -1.36379
Accuracy: 0.0367000102997 	11 -0.00329077 -1.33395
Accuracy: 0.0370000004768 	12 0.0177824 -1.30412
Accuracy: 0.0364000201225 	13 0.0388555 -1.27428
Accuracy: 0.0365999937057 	14 0.0599286 -1.24445
Accuracy: 0.0350999832153 	15 0.0810018 -1.21461
Accuracy: 0.0346999764442 	16 0.102075 -1.18477
Accuracy: 0.0336999893188 	17 0.123148 -1.15494
Accuracy: 0.0340999960899 	18 0.144221 -1.1251
Accuracy: 0.0324000120163 	19 0.165294 -1.09527
Accuracy: 0.0315999984741 	0 0.186368 -1.06543
Accuracy: 0.0310999751091 	1 0.203609 -1.04102
Accuracy: 0.0307999849319 	2 0.220851 -1.01661
Accuracy: 0.0310000181198 	3 0.238093 -0.992198
Accuracy: 0.0321999788284 	4 0.255334 -0.967787
Accuracy: 0.031300008297 	5 0.272576 -0.943376
Accuracy: 0.0310999751091 	6 0.289818 -0.918965
Accuracy: 0.0304999947548 	7 0.307059 -0.894554
Accuracy: 0.0303000211716 	8 0.324301 -0.870143
Accuracy: 0.0302000045776 	9 0.341542 -0.845732
Accuracy: 0.0302000045776 	10 0.358784 -0.821321
Accuracy: 0.0296000242233 	11 0.376026 -0.79691
Accuracy: 0.0300999879837 	12 0.393267 -0.772499
Accuracy: 0.031300008297 	13 0.410509 -0.748088
Accuracy: 0.0317000150681 	14 0.427751 -0.723677
Accuracy: 0.0315999984741 	15 0.444992 -0.699266
Accuracy: 0.0314000248909 	16 0.462234 -0.674855
Accuracy: 0.0317999720573 	17 0.479476 -0.650444
Accuracy: 0.0317000150681 	18 0.496717 -0.626033
Accuracy: 0.0315999984741 	19 0.513959 -0.601622
Accuracy: 0.0315999984741 	0 0.531201 -0.577211
Accuracy: 0.0314999818802 	1 0.552274 -0.547376
Accuracy: 0.0320000052452 	2 0.573347 -0.51754
Accuracy: 0.0325999855995 	3 0.59442 -0.487705
Accuracy: 0.0333999991417 	4 0.615493 -0.457869
Accuracy: 0.0340999960899 	5 0.636567 -0.428033
Accuracy: 0.0346999764442 	6 0.65764 -0.398198
Accuracy: 0.035000026226 	7 0.678713 -0.368362
Accuracy: 0.0357999801636 	8 0.699786 -0.338526
Accuracy: 0.034500002861 	9 0.720859 -0.308691
Accuracy: 0.0347999930382 	10 0.741932 -0.278855
Accuracy: 0.0350999832153 	11 0.763005 -0.249019
Accuracy: 0.0347999930382 	12 0.784078 -0.219184
Accuracy: 0.0338000059128 	13 0.805152 -0.189348
Accuracy: 0.0328999757767 	14 0.826225 -0.159512
Accuracy: 0.0328000187874 	15 0.847298 -0.129677
Accuracy: 0.0324000120163 	16 0.868371 -0.0998411
Accuracy: 0.0317000150681 	17 0.889444 -0.0700054
Accuracy: 0.0317999720573 	18 0.910517 -0.0401698
Accuracy: 0.0314000248909 	19 0.93159 -0.0103341
Accuracy: 0.0318999886513 	0 0.952664 0.0195015
Accuracy: 0.0310999751091 	1 0.969905 0.0439125
Accuracy: 0.0317000150681 	2 0.987147 0.0683235
Accuracy: 0.0317999720573 	3 1.00439 0.0927345
Accuracy: 0.0322999954224 	4 1.02163 0.117145
Accuracy: 0.0325999855995 	5 1.03887 0.141556
Accuracy: 0.0328999757767 	6 1.05611 0.165967
Accuracy: 0.0329999923706 	7 1.07336 0.190378
Accuracy: 0.0335999727249 	8 1.0906 0.214789
Accuracy: 0.0343000292778 	9 1.10784 0.2392
Accuracy: 0.0343999862671 	10 1.12508 0.263611
Accuracy: 0.034600019455 	11 1.14232 0.288022
Accuracy: 0.0347999930382 	12 1.15956 0.312433
Accuracy: 0.0353000164032 	13 1.17681 0.336844
Accuracy: 0.0357999801636 	14 1.19405 0.361255
Accuracy: 0.0368000268936 	15 1.21129 0.385666
Accuracy: 0.0371000170708 	16 1.22853 0.410077
Accuracy: 0.0378999710083 	17 1.24577 0.434488
Accuracy: 0.0386000275612 	18 1.26301 0.458899
Accuracy: 0.0383999943733 	19 1.28026 0.48331
[(0.039900004863739014, 1), (0.032199978828430176, 4), (0.035799980163574219, 8), (0.038600027561187744, 18)]
[0, 1]
Final bead error: 0.0318999886513
[True, True, True]
Accuracy: 0.0397999882698 	0 -0.235095 -1.66214
Accuracy: 0.041100025177 	1 -0.2126 -1.61378
Accuracy: 0.0429999828339 	2 -0.190104 -1.56542
Accuracy: 0.0475000143051 	3 -0.167609 -1.51706
Accuracy: 0.0508999824524 	4 -0.145114 -1.4687
Accuracy: 0.0547000169754 	5 -0.122618 -1.42034
Accuracy: 0.0608999729156 	6 -0.100123 -1.37197
Accuracy: 0.0665000081062 	7 -0.0776271 -1.32361
Accuracy: 0.0722000002861 	8 -0.0551317 -1.27525
Accuracy: 0.0785999894142 	9 -0.0326362 -1.22689
Accuracy: 0.0809999704361 	10 -0.0101407 -1.17853
Accuracy: 0.0795999765396 	11 0.0123547 -1.13017
Accuracy: 0.0763000249863 	12 0.0348502 -1.0818
Accuracy: 0.0702000260353 	13 0.0573456 -1.03344
Accuracy: 0.0612000226974 	14 0.0798411 -0.985082
Accuracy: 0.0547999739647 	15 0.102337 -0.93672
Accuracy: 0.0464000105858 	16 0.124832 -0.888359
Accuracy: 0.0401999950409 	17 0.147327 -0.839997
Accuracy: 0.0360999703407 	18 0.169823 -0.791635
Accuracy: 0.0336999893188 	19 0.192318 -0.743274
Accuracy: 0.0318999886513 	0 0.214814 -0.694912
Accuracy: 0.0321000218391 	1 0.237309 -0.646551
Accuracy: 0.0367000102997 	2 0.259805 -0.598189
Accuracy: 0.0404000282288 	3 0.2823 -0.549827
Accuracy: 0.0447000265121 	4 0.304796 -0.501466
Accuracy: 0.0505999922752 	5 0.327291 -0.453104
Accuracy: 0.0583000183105 	6 0.349787 -0.404742
Accuracy: 0.0643000006676 	7 0.372282 -0.356381
Accuracy: 0.0701000094414 	8 0.394778 -0.308019
Accuracy: 0.0730000138283 	9 0.417273 -0.259658
Accuracy: 0.0738999843597 	10 0.439768 -0.211296
Accuracy: 0.0738999843597 	11 0.462264 -0.162934
Accuracy: 0.0701000094414 	12 0.484759 -0.114573
Accuracy: 0.0651000142097 	13 0.507255 -0.0662112
Accuracy: 0.0600000023842 	14 0.52975 -0.0178496
Accuracy: 0.0530999898911 	15 0.552246 0.030512
Accuracy: 0.0480999946594 	16 0.574741 0.0788736
Accuracy: 0.0450000166893 	17 0.597237 0.127235
Accuracy: 0.0436000227928 	18 0.619732 0.175597
Accuracy: 0.0411999821663 	19 0.642228 0.223958
[(0.080999970436096191, 10), (0.073899984359741211, 10)]
[1, 0, 3, 2]
Final bead error: 0.031300008297
Final bead error: 0.0304999947548
[True, True, True, True, True]
Accuracy: 0.0397999882698 	0 -0.235095 -1.66214
Accuracy: 0.0396999716759 	1 -0.223848 -1.63796
Accuracy: 0.0394999980927 	2 -0.2126 -1.61378
Accuracy: 0.0383999943733 	3 -0.201352 -1.5896
Accuracy: 0.0383999943733 	4 -0.190104 -1.56542
Accuracy: 0.0376999974251 	5 -0.178857 -1.54124
Accuracy: 0.0364999771118 	6 -0.167609 -1.51706
Accuracy: 0.0360000133514 	7 -0.156361 -1.49288
Accuracy: 0.0360999703407 	8 -0.145114 -1.4687
Accuracy: 0.0356000065804 	9 -0.133866 -1.44452
Accuracy: 0.0349000096321 	10 -0.122618 -1.42034
Accuracy: 0.0349000096321 	11 -0.11137 -1.39616
Accuracy: 0.0339000225067 	12 -0.100123 -1.37197
Accuracy: 0.0327000021935 	13 -0.0888749 -1.34779
Accuracy: 0.0318999886513 	14 -0.0776271 -1.32361
Accuracy: 0.031300008297 	15 -0.0663794 -1.29943
Accuracy: 0.031300008297 	16 -0.0551317 -1.27525
Accuracy: 0.0321999788284 	17 -0.0438839 -1.25107
Accuracy: 0.0322999954224 	18 -0.0326362 -1.22689
Accuracy: 0.0309000015259 	19 -0.0213885 -1.20271
Accuracy: 0.031300008297 	0 -0.0101407 -1.17853
Accuracy: 0.031199991703 	1 0.00110698 -1.15435
Accuracy: 0.0321999788284 	2 0.0123547 -1.13017
Accuracy: 0.0320000052452 	3 0.0236024 -1.10599
Accuracy: 0.0325000286102 	4 0.0348502 -1.0818
Accuracy: 0.0327000021935 	5 0.0460979 -1.05762
Accuracy: 0.0339000225067 	6 0.0573456 -1.03344
Accuracy: 0.0338000059128 	7 0.0685934 -1.00926
Accuracy: 0.0333999991417 	8 0.0798411 -0.985082
Accuracy: 0.033999979496 	9 0.0910888 -0.960901
Accuracy: 0.0333999991417 	10 0.102337 -0.93672
Accuracy: 0.0336999893188 	11 0.113584 -0.912539
Accuracy: 0.0335000157356 	12 0.124832 -0.888359
Accuracy: 0.0332000255585 	13 0.13608 -0.864178
Accuracy: 0.0325999855995 	14 0.147327 -0.839997
Accuracy: 0.0328999757767 	15 0.158575 -0.815816
Accuracy: 0.0328000187874 	16 0.169823 -0.791635
Accuracy: 0.0333999991417 	17 0.181071 -0.767455
Accuracy: 0.0325999855995 	18 0.192318 -0.743274
Accuracy: 0.0321000218391 	19 0.203566 -0.719093
Accuracy: 0.0318999886513 	0 0.214814 -0.694912
Accuracy: 0.0310999751091 	1 0.226062 -0.670731
Accuracy: 0.0314000248909 	2 0.237309 -0.646551
Accuracy: 0.0314999818802 	3 0.248557 -0.62237
Accuracy: 0.0307999849319 	4 0.259805 -0.598189
Accuracy: 0.0296999812126 	5 0.271053 -0.574008
Accuracy: 0.0291000008583 	6 0.2823 -0.549827
Accuracy: 0.0296999812126 	7 0.293548 -0.525647
Accuracy: 0.0303999781609 	8 0.304796 -0.501466
Accuracy: 0.0304999947548 	9 0.316043 -0.477285
Accuracy: 0.0317999720573 	10 0.327291 -0.453104
Accuracy: 0.0318999886513 	11 0.338539 -0.428923
Accuracy: 0.0321999788284 	12 0.349787 -0.404742
Accuracy: 0.031199991703 	13 0.361034 -0.380562
Accuracy: 0.0320000052452 	14 0.372282 -0.356381
Accuracy: 0.0322999954224 	15 0.38353 -0.3322
Accuracy: 0.0317000150681 	16 0.394778 -0.308019
Accuracy: 0.0314999818802 	17 0.406025 -0.283838
Accuracy: 0.0310999751091 	18 0.417273 -0.259658
Accuracy: 0.0297999978065 	19 0.428521 -0.235477
Accuracy: 0.0304999947548 	0 0.439768 -0.211296
Accuracy: 0.0303000211716 	1 0.451016 -0.187115
Accuracy: 0.0307000279427 	2 0.462264 -0.162934
Accuracy: 0.0317999720573 	3 0.473512 -0.138754
Accuracy: 0.0314999818802 	4 0.484759 -0.114573
Accuracy: 0.0331000089645 	5 0.496007 -0.090392
Accuracy: 0.0332000255585 	6 0.507255 -0.0662112
Accuracy: 0.0343000292778 	7 0.518503 -0.0420304
Accuracy: 0.0339000225067 	8 0.52975 -0.0178496
Accuracy: 0.0346999764442 	9 0.540998 0.00633118
Accuracy: 0.035000026226 	10 0.552246 0.030512
Accuracy: 0.0353000164032 	11 0.563494 0.0546928
Accuracy: 0.0356000065804 	12 0.574741 0.0788736
Accuracy: 0.0358999967575 	13 0.585989 0.103054
Accuracy: 0.0365999937057 	14 0.597237 0.127235
Accuracy: 0.0370000004768 	15 0.608484 0.151416
Accuracy: 0.0370000004768 	16 0.619732 0.175597
Accuracy: 0.037299990654 	17 0.63098 0.199778
Accuracy: 0.0382999777794 	18 0.642228 0.223958
Accuracy: 0.0386999845505 	19 0.653475 0.248139
[(0.039799988269805908, 0), (0.033999979496002197, 9), (0.032299995422363281, 15), (0.038699984550476074, 19)]
1
2
Thresh: 0.04
Comps: 1
***
174.168085814
3.02714133263

In [25]:
models


Out[25]:
[<__main__.multilayer_perceptron instance at 0x7fc3f3ce0518>,
 <__main__.multilayer_perceptron instance at 0x7fc3f3ce00e0>,
 <__main__.multilayer_perceptron instance at 0x7fc3f366cb00>]

In [53]:
len(test.AllBeads)


Out[53]:
6

In [77]:
for b in xrange(len(test.AllBeads)-1):
    e = InterpBeadError(test.AllBeads[b][0],test.AllBeads[b][1], test.AllBeads[b+1][0], test.AllBeads[b+1][1])


Accuracy: 0.0494999885559 	0 0.15941 1.24111
Accuracy: 0.0486999750137 	1 0.16302 1.23706
Accuracy: 0.0487999916077 	2 0.16663 1.23302
Accuracy: 0.0494999885559 	3 0.17024 1.22898
Accuracy: 0.0493000149727 	4 0.173849 1.22494
Accuracy: 0.0496000051498 	5 0.177459 1.22089
Accuracy: 0.0490999817848 	6 0.181069 1.21685
Accuracy: 0.0490999817848 	7 0.184679 1.21281
Accuracy: 0.0486999750137 	8 0.188288 1.20877
Accuracy: 0.0483000278473 	9 0.191898 1.20473
Accuracy: 0.0482000112534 	10 0.195508 1.20068
Accuracy: 0.0483999848366 	11 0.199118 1.19664
Accuracy: 0.0476999878883 	12 0.202728 1.1926
Accuracy: 0.0475999712944 	13 0.206337 1.18856
Accuracy: 0.0476999878883 	14 0.209947 1.18451
Accuracy: 0.0475999712944 	15 0.213557 1.18047
Accuracy: 0.0475000143051 	16 0.217167 1.17643
Accuracy: 0.0472999811172 	17 0.220776 1.17239
Accuracy: 0.047100007534 	18 0.224386 1.16835
Accuracy: 0.047200024128 	19 0.227996 1.1643
Accuracy: 0.047200024128 	20 0.231606 1.16026
Accuracy: 0.0468999743462 	21 0.235215 1.15622
Accuracy: 0.047200024128 	22 0.238825 1.15218
Accuracy: 0.047200024128 	23 0.242435 1.14813
Accuracy: 0.0473999977112 	24 0.246045 1.14409
Accuracy: 0.047200024128 	25 0.249654 1.14005
Accuracy: 0.047200024128 	26 0.253264 1.13601
Accuracy: 0.047200024128 	27 0.256874 1.13196
Accuracy: 0.0468999743462 	28 0.260484 1.12792
Accuracy: 0.0468000173569 	29 0.264093 1.12388
Accuracy: 0.0460000038147 	30 0.267703 1.11984
Accuracy: 0.0457999706268 	31 0.271313 1.1158
Accuracy: 0.0458999872208 	32 0.274923 1.11175
Accuracy: 0.0454000234604 	33 0.278532 1.10771
Accuracy: 0.0453000068665 	34 0.282142 1.10367
Accuracy: 0.0450000166893 	35 0.285752 1.09963
Accuracy: 0.0450000166893 	36 0.289362 1.09558
Accuracy: 0.0450999736786 	37 0.292971 1.09154
Accuracy: 0.0450999736786 	38 0.296581 1.0875
Accuracy: 0.0449000000954 	39 0.300191 1.08346
Accuracy: 0.0451999902725 	40 0.303801 1.07942
Accuracy: 0.0450000166893 	41 0.30741 1.07537
Accuracy: 0.0447999835014 	42 0.31102 1.07133
Accuracy: 0.0450000166893 	43 0.31463 1.06729
Accuracy: 0.0447999835014 	44 0.31824 1.06325
Accuracy: 0.0447999835014 	45 0.321849 1.0592
Accuracy: 0.0447000265121 	46 0.325459 1.05516
Accuracy: 0.0447000265121 	47 0.329069 1.05112
Accuracy: 0.0450000166893 	48 0.332679 1.04708
Accuracy: 0.0447000265121 	49 0.336288 1.04303
Accuracy: 0.04390001297 	50 0.339898 1.03899
Accuracy: 0.0436999797821 	51 0.343508 1.03495
Accuracy: 0.0436999797821 	52 0.347118 1.03091
Accuracy: 0.0440000295639 	53 0.350727 1.02687
Accuracy: 0.043799996376 	54 0.354337 1.02282
Accuracy: 0.0430999994278 	55 0.357947 1.01878
Accuracy: 0.0426999926567 	56 0.361557 1.01474
Accuracy: 0.0419999957085 	57 0.365166 1.0107
Accuracy: 0.0421000123024 	58 0.368776 1.00665
Accuracy: 0.0418000221252 	59 0.372386 1.00261
Accuracy: 0.0419999957085 	60 0.375996 0.99857
Accuracy: 0.0422000288963 	61 0.379606 0.994528
Accuracy: 0.0419999957085 	62 0.383215 0.990485
Accuracy: 0.0422999858856 	63 0.386825 0.986443
Accuracy: 0.0418000221252 	64 0.390435 0.982401
Accuracy: 0.041100025177 	65 0.394045 0.978359
Accuracy: 0.0407999753952 	66 0.397654 0.974316
Accuracy: 0.0408999919891 	67 0.401264 0.970274
Accuracy: 0.040499985218 	68 0.404874 0.966232
Accuracy: 0.0401999950409 	69 0.408484 0.96219
Accuracy: 0.0396000146866 	70 0.412093 0.958147
Accuracy: 0.0390999913216 	71 0.415703 0.954105
Accuracy: 0.0393999814987 	72 0.419313 0.950063
Accuracy: 0.0390999913216 	73 0.422923 0.946021
Accuracy: 0.0390999913216 	74 0.426532 0.941978
Accuracy: 0.0389999747276 	75 0.430142 0.937936
Accuracy: 0.0389999747276 	76 0.433752 0.933894
Accuracy: 0.0388000011444 	77 0.437362 0.929852
Accuracy: 0.0389999747276 	78 0.440971 0.925809
Accuracy: 0.0392000079155 	79 0.444581 0.921767
Accuracy: 0.0396000146866 	80 0.448191 0.917725
Accuracy: 0.0399000048637 	81 0.451801 0.913682
Accuracy: 0.0396999716759 	82 0.45541 0.90964
Accuracy: 0.0392000079155 	83 0.45902 0.905598
Accuracy: 0.0392000079155 	84 0.46263 0.901556
Accuracy: 0.0389999747276 	85 0.46624 0.897513
Accuracy: 0.0389999747276 	86 0.469849 0.893471
Accuracy: 0.0389000177383 	87 0.473459 0.889429
Accuracy: 0.0389000177383 	88 0.477069 0.885387
Accuracy: 0.0396000146866 	89 0.480679 0.881344
Accuracy: 0.0396000146866 	90 0.484288 0.877302
Accuracy: 0.0389000177383 	91 0.487898 0.87326
Accuracy: 0.0389999747276 	92 0.491508 0.869218
Accuracy: 0.0390999913216 	93 0.495118 0.865175
Accuracy: 0.0390999913216 	94 0.498727 0.861133
Accuracy: 0.0392000079155 	95 0.502337 0.857091
Accuracy: 0.0389000177383 	96 0.505947 0.853049
Accuracy: 0.0389000177383 	97 0.509557 0.849006
Accuracy: 0.0386000275612 	98 0.513166 0.844964
Accuracy: 0.0385000109673 	99 0.516776 0.840922
Accuracy: 0.0386999845505 	0 0.520386 0.836879
Accuracy: 0.0389000177383 	1 0.523461 0.833436
Accuracy: 0.0390999913216 	2 0.526536 0.829993
Accuracy: 0.0389999747276 	3 0.529611 0.826549
Accuracy: 0.0389000177383 	4 0.532686 0.823106
Accuracy: 0.0388000011444 	5 0.535761 0.819662
Accuracy: 0.0389000177383 	6 0.538836 0.816219
Accuracy: 0.0390999913216 	7 0.541911 0.812776
Accuracy: 0.0389999747276 	8 0.544986 0.809332
Accuracy: 0.0392000079155 	9 0.548061 0.805889
Accuracy: 0.0392000079155 	10 0.551136 0.802445
Accuracy: 0.0392000079155 	11 0.554211 0.799002
Accuracy: 0.0396999716759 	12 0.557286 0.795559
Accuracy: 0.0396000146866 	13 0.560361 0.792115
Accuracy: 0.0396999716759 	14 0.563436 0.788672
Accuracy: 0.0392000079155 	15 0.566511 0.785228
Accuracy: 0.0393000245094 	16 0.569586 0.781785
Accuracy: 0.0393999814987 	17 0.572661 0.778341
Accuracy: 0.0396999716759 	18 0.575736 0.774898
Accuracy: 0.0397999882698 	19 0.578811 0.771455
Accuracy: 0.0399000048637 	20 0.581886 0.768011
Accuracy: 0.040099978447 	21 0.58496 0.764568
Accuracy: 0.040600001812 	22 0.588035 0.761124
Accuracy: 0.0410000085831 	23 0.59111 0.757681
Accuracy: 0.0407999753952 	24 0.594185 0.754238
Accuracy: 0.040499985218 	25 0.59726 0.750794
Accuracy: 0.0408999919891 	26 0.600335 0.747351
Accuracy: 0.040600001812 	27 0.60341 0.743907
Accuracy: 0.0403000116348 	28 0.606485 0.740464
Accuracy: 0.0401999950409 	29 0.60956 0.737021
Accuracy: 0.0401999950409 	30 0.612635 0.733577
Accuracy: 0.0407999753952 	31 0.61571 0.730134
Accuracy: 0.041100025177 	32 0.618785 0.72669
Accuracy: 0.0411999821663 	33 0.62186 0.723247
Accuracy: 0.0412999987602 	34 0.624935 0.719803
Accuracy: 0.041100025177 	35 0.62801 0.71636
Accuracy: 0.0414000153542 	36 0.631085 0.712917
Accuracy: 0.0415999889374 	37 0.63416 0.709473
Accuracy: 0.0417000055313 	38 0.637235 0.70603
Accuracy: 0.0418000221252 	39 0.64031 0.702586
Accuracy: 0.0417000055313 	40 0.643385 0.699143
Accuracy: 0.0419999957085 	41 0.64646 0.6957
Accuracy: 0.0419999957085 	42 0.649535 0.692256
Accuracy: 0.0421000123024 	43 0.65261 0.688813
Accuracy: 0.0422000288963 	44 0.655685 0.685369
Accuracy: 0.0429999828339 	45 0.65876 0.681926
Accuracy: 0.0432000160217 	46 0.661835 0.678483
Accuracy: 0.0435000061989 	47 0.66491 0.675039
Accuracy: 0.043299973011 	48 0.667985 0.671596
Accuracy: 0.0429000258446 	49 0.67106 0.668152
Accuracy: 0.0426999926567 	50 0.674135 0.664709
Accuracy: 0.0425000190735 	51 0.67721 0.661265
Accuracy: 0.0429999828339 	52 0.680285 0.657822
Accuracy: 0.0432000160217 	53 0.68336 0.654379
Accuracy: 0.0435000061989 	54 0.686435 0.650935
Accuracy: 0.0429999828339 	55 0.68951 0.647492
Accuracy: 0.0425999760628 	56 0.692585 0.644048
Accuracy: 0.0425000190735 	57 0.69566 0.640605
Accuracy: 0.0422999858856 	58 0.698735 0.637162
Accuracy: 0.0425000190735 	59 0.70181 0.633718
Accuracy: 0.0422999858856 	60 0.704885 0.630275
Accuracy: 0.0422999858856 	61 0.70796 0.626831
Accuracy: 0.0422000288963 	62 0.711035 0.623388
Accuracy: 0.0419999957085 	63 0.71411 0.619945
Accuracy: 0.0421000123024 	64 0.717184 0.616501
Accuracy: 0.0418000221252 	65 0.720259 0.613058
Accuracy: 0.0419999957085 	66 0.723334 0.609614
Accuracy: 0.0418999791145 	67 0.726409 0.606171
Accuracy: 0.0414000153542 	68 0.729484 0.602728
Accuracy: 0.0414000153542 	69 0.732559 0.599284
Accuracy: 0.0411999821663 	70 0.735634 0.595841
Accuracy: 0.0410000085831 	71 0.738709 0.592397
Accuracy: 0.0410000085831 	72 0.741784 0.588954
Accuracy: 0.0410000085831 	73 0.744859 0.58551
Accuracy: 0.0410000085831 	74 0.747934 0.582067
Accuracy: 0.0404000282288 	75 0.751009 0.578624
Accuracy: 0.0403000116348 	76 0.754084 0.57518
Accuracy: 0.0399000048637 	77 0.757159 0.571737
Accuracy: 0.0397999882698 	78 0.760234 0.568293
Accuracy: 0.0400000214577 	79 0.763309 0.56485
Accuracy: 0.0400000214577 	80 0.766384 0.561407
Accuracy: 0.0397999882698 	81 0.769459 0.557963
Accuracy: 0.0396000146866 	82 0.772534 0.55452
Accuracy: 0.0397999882698 	83 0.775609 0.551076
Accuracy: 0.0399000048637 	84 0.778684 0.547633
Accuracy: 0.040099978447 	85 0.781759 0.54419
Accuracy: 0.040099978447 	86 0.784834 0.540746
Accuracy: 0.0396000146866 	87 0.787909 0.537303
Accuracy: 0.0396000146866 	88 0.790984 0.533859
Accuracy: 0.0396000146866 	89 0.794059 0.530416
Accuracy: 0.0393000245094 	90 0.797134 0.526973
Accuracy: 0.0390999913216 	91 0.800209 0.523529
Accuracy: 0.0390999913216 	92 0.803284 0.520086
Accuracy: 0.0389000177383 	93 0.806359 0.516642
Accuracy: 0.0390999913216 	94 0.809434 0.513199
Accuracy: 0.0392000079155 	95 0.812509 0.509755
Accuracy: 0.0392000079155 	96 0.815584 0.506312
Accuracy: 0.0390999913216 	97 0.818659 0.502869
Accuracy: 0.0388000011444 	98 0.821734 0.499425
Accuracy: 0.0388000011444 	99 0.824809 0.495982
Accuracy: 0.0389999747276 	0 0.827884 0.492538
Accuracy: 0.0389999747276 	1 0.830691 0.489394
Accuracy: 0.0389000177383 	2 0.833499 0.48625
Accuracy: 0.0386000275612 	3 0.836306 0.483106
Accuracy: 0.0388000011444 	4 0.839114 0.479962
Accuracy: 0.0382999777794 	5 0.841922 0.476818
Accuracy: 0.0389000177383 	6 0.844729 0.473674
Accuracy: 0.0390999913216 	7 0.847537 0.470531
Accuracy: 0.0390999913216 	8 0.850344 0.467387
Accuracy: 0.0392000079155 	9 0.853152 0.464243
Accuracy: 0.0393999814987 	10 0.85596 0.461099
Accuracy: 0.0393999814987 	11 0.858767 0.457955
Accuracy: 0.0397999882698 	12 0.861575 0.454811
Accuracy: 0.0397999882698 	13 0.864382 0.451667
Accuracy: 0.0397999882698 	14 0.86719 0.448523
Accuracy: 0.0396000146866 	15 0.869997 0.445379
Accuracy: 0.0396000146866 	16 0.872805 0.442235
Accuracy: 0.0396000146866 	17 0.875613 0.439091
Accuracy: 0.0397999882698 	18 0.87842 0.435947
Accuracy: 0.0401999950409 	19 0.881228 0.432803
Accuracy: 0.040499985218 	20 0.884035 0.429659
Accuracy: 0.040499985218 	21 0.886843 0.426515
Accuracy: 0.0407999753952 	22 0.889651 0.423371
Accuracy: 0.0407000184059 	23 0.892458 0.420227
Accuracy: 0.0404000282288 	24 0.895266 0.417083
Accuracy: 0.0407000184059 	25 0.898073 0.413939
Accuracy: 0.0407999753952 	26 0.900881 0.410795
Accuracy: 0.0408999919891 	27 0.903688 0.407651
Accuracy: 0.0410000085831 	28 0.906496 0.404507
Accuracy: 0.0412999987602 	29 0.909304 0.401363
Accuracy: 0.041100025177 	30 0.912111 0.398219
Accuracy: 0.0407999753952 	31 0.914919 0.395075
Accuracy: 0.0407999753952 	32 0.917726 0.391931
Accuracy: 0.0408999919891 	33 0.920534 0.388787
Accuracy: 0.0404000282288 	34 0.923342 0.385643
Accuracy: 0.0401999950409 	35 0.926149 0.382499
Accuracy: 0.0403000116348 	36 0.928957 0.379355
Accuracy: 0.0400000214577 	37 0.931764 0.376211
Accuracy: 0.040099978447 	38 0.934572 0.373067
Accuracy: 0.040099978447 	39 0.93738 0.369923
Accuracy: 0.0396000146866 	40 0.940187 0.366779
Accuracy: 0.0396999716759 	41 0.942995 0.363635
Accuracy: 0.0396999716759 	42 0.945802 0.360491
Accuracy: 0.0397999882698 	43 0.94861 0.357347
Accuracy: 0.0399000048637 	44 0.951417 0.354203
Accuracy: 0.0401999950409 	45 0.954225 0.351059
Accuracy: 0.0401999950409 	46 0.957033 0.347915
Accuracy: 0.0400000214577 	47 0.95984 0.344771
Accuracy: 0.0394999980927 	48 0.962648 0.341627
Accuracy: 0.0394999980927 	49 0.965455 0.338483
Accuracy: 0.0399000048637 	50 0.968263 0.335339
Accuracy: 0.0400000214577 	51 0.971071 0.332195
Accuracy: 0.0401999950409 	52 0.973878 0.329051
Accuracy: 0.0400000214577 	53 0.976686 0.325907
Accuracy: 0.040099978447 	54 0.979493 0.322763
Accuracy: 0.0401999950409 	55 0.982301 0.319619
Accuracy: 0.0401999950409 	56 0.985108 0.316475
Accuracy: 0.040600001812 	57 0.987916 0.313331
Accuracy: 0.040499985218 	58 0.990724 0.310187
Accuracy: 0.040499985218 	59 0.993531 0.307043
Accuracy: 0.040499985218 	60 0.996339 0.303899
Accuracy: 0.0404000282288 	61 0.999146 0.300755
Accuracy: 0.0404000282288 	62 1.00195 0.297611
Accuracy: 0.040600001812 	63 1.00476 0.294467
Accuracy: 0.0407000184059 	64 1.00757 0.291323
Accuracy: 0.0407000184059 	65 1.01038 0.288179
Accuracy: 0.040600001812 	66 1.01318 0.285035
Accuracy: 0.0403000116348 	67 1.01599 0.281891
Accuracy: 0.0401999950409 	68 1.0188 0.278747
Accuracy: 0.0397999882698 	69 1.02161 0.275604
Accuracy: 0.0396000146866 	70 1.02441 0.27246
Accuracy: 0.0393999814987 	71 1.02722 0.269316
Accuracy: 0.0394999980927 	72 1.03003 0.266172
Accuracy: 0.0393999814987 	73 1.03284 0.263028
Accuracy: 0.0390999913216 	74 1.03565 0.259884
Accuracy: 0.0389999747276 	75 1.03845 0.25674
Accuracy: 0.0386999845505 	76 1.04126 0.253596
Accuracy: 0.0381000041962 	77 1.04407 0.250452
Accuracy: 0.0382999777794 	78 1.04688 0.247308
Accuracy: 0.0385000109673 	79 1.04968 0.244164
Accuracy: 0.0385000109673 	80 1.05249 0.24102
Accuracy: 0.0382999777794 	81 1.0553 0.237876
Accuracy: 0.0388000011444 	82 1.05811 0.234732
Accuracy: 0.0386000275612 	83 1.06091 0.231588
Accuracy: 0.0383999943733 	84 1.06372 0.228444
Accuracy: 0.0381000041962 	85 1.06653 0.2253
Accuracy: 0.037800014019 	86 1.06934 0.222156
Accuracy: 0.0376999974251 	87 1.07214 0.219012
Accuracy: 0.0376999974251 	88 1.07495 0.215868
Accuracy: 0.037800014019 	89 1.07776 0.212724
Accuracy: 0.037800014019 	90 1.08057 0.20958
Accuracy: 0.0379999876022 	91 1.08337 0.206436
Accuracy: 0.0381000041962 	92 1.08618 0.203292
Accuracy: 0.0382999777794 	93 1.08899 0.200148
Accuracy: 0.0385000109673 	94 1.0918 0.197004
Accuracy: 0.0386000275612 	95 1.0946 0.19386
Accuracy: 0.0386999845505 	96 1.09741 0.190716
Accuracy: 0.0388000011444 	97 1.10022 0.187572
Accuracy: 0.0392000079155 	98 1.10303 0.184428
Accuracy: 0.0389000177383 	99 1.10583 0.181284
Accuracy: 0.0393999814987 	0 1.10864 0.17814
Accuracy: 0.0394999980927 	1 1.11252 0.173798
Accuracy: 0.0396999716759 	2 1.1164 0.169457
Accuracy: 0.0396999716759 	3 1.12027 0.165115
Accuracy: 0.0404000282288 	4 1.12415 0.160773
Accuracy: 0.0408999919891 	5 1.12803 0.156432
Accuracy: 0.041100025177 	6 1.13191 0.15209
Accuracy: 0.0412999987602 	7 1.13578 0.147748
Accuracy: 0.0415999889374 	8 1.13966 0.143406
Accuracy: 0.0412999987602 	9 1.14354 0.139065
Accuracy: 0.0419999957085 	10 1.14741 0.134723
Accuracy: 0.0422999858856 	11 1.15129 0.130381
Accuracy: 0.0425999760628 	12 1.15517 0.12604
Accuracy: 0.0425999760628 	13 1.15905 0.121698
Accuracy: 0.0428000092506 	14 1.16292 0.117356
Accuracy: 0.0432000160217 	15 1.1668 0.113015
Accuracy: 0.043799996376 	16 1.17068 0.108673
Accuracy: 0.0442000031471 	17 1.17455 0.104331
Accuracy: 0.0443999767303 	18 1.17843 0.0999896
Accuracy: 0.0446000099182 	19 1.18231 0.0956479
Accuracy: 0.0450999736786 	20 1.18619 0.0913062
Accuracy: 0.0454999804497 	21 1.19006 0.0869645
Accuracy: 0.0455999970436 	22 1.19394 0.0826228
Accuracy: 0.046599984169 	23 1.19782 0.0782811
Accuracy: 0.0469999909401 	24 1.20169 0.0739394
Accuracy: 0.0469999909401 	25 1.20557 0.0695977
Accuracy: 0.0476999878883 	26 1.20945 0.065256
Accuracy: 0.0479999780655 	27 1.21333 0.0609143
Accuracy: 0.0483000278473 	28 1.2172 0.0565726
Accuracy: 0.0483000278473 	29 1.22108 0.052231
Accuracy: 0.0479999780655 	30 1.22496 0.0478893
Accuracy: 0.0480999946594 	31 1.22883 0.0435476
Accuracy: 0.0480999946594 	32 1.23271 0.0392059
Accuracy: 0.0489000082016 	33 1.23659 0.0348642
Accuracy: 0.0486999750137 	34 1.24047 0.0305225
Accuracy: 0.0490999817848 	35 1.24434 0.0261808
Accuracy: 0.049399971962 	36 1.24822 0.0218391
Accuracy: 0.0497000217438 	37 1.2521 0.0174974
Accuracy: 0.0497999787331 	38 1.25597 0.0131557
Accuracy: 0.0497999787331 	39 1.25985 0.00881404
Accuracy: 0.0501000285149 	40 1.26373 0.00447235
Accuracy: 0.050400018692 	41 1.26761 0.000130653
Accuracy: 0.0500000119209 	42 1.27148 -0.00421104
Accuracy: 0.049899995327 	43 1.27536 -0.00855273
Accuracy: 0.0501999855042 	44 1.27924 -0.0128944
Accuracy: 0.0503000020981 	45 1.28311 -0.0172361
Accuracy: 0.0496000051498 	46 1.28699 -0.0215778
Accuracy: 0.0496000051498 	47 1.29087 -0.0259195
Accuracy: 0.049899995327 	48 1.29475 -0.0302612
Accuracy: 0.0497999787331 	49 1.29862 -0.0346029
Accuracy: 0.0490000247955 	50 1.3025 -0.0389446
Accuracy: 0.0490000247955 	51 1.30638 -0.0432863
Accuracy: 0.0494999885559 	52 1.31025 -0.0476279
Accuracy: 0.049399971962 	53 1.31413 -0.0519696
Accuracy: 0.0496000051498 	54 1.31801 -0.0563114
Accuracy: 0.049899995327 	55 1.32189 -0.060653
Accuracy: 0.0497999787331 	56 1.32576 -0.0649947
Accuracy: 0.0490000247955 	57 1.32964 -0.0693364
Accuracy: 0.049399971962 	58 1.33352 -0.0736781
Accuracy: 0.0490999817848 	59 1.33739 -0.0780198
Accuracy: 0.0489000082016 	60 1.34127 -0.0823615
Accuracy: 0.0490000247955 	61 1.34515 -0.0867032
Accuracy: 0.0487999916077 	62 1.34903 -0.0910449
Accuracy: 0.0487999916077 	63 1.3529 -0.0953866
Accuracy: 0.0493000149727 	64 1.35678 -0.0997283
Accuracy: 0.0491999983788 	65 1.36066 -0.10407
Accuracy: 0.0493000149727 	66 1.36453 -0.108412
Accuracy: 0.0490000247955 	67 1.36841 -0.112753
Accuracy: 0.0487999916077 	68 1.37229 -0.117095
Accuracy: 0.0487999916077 	69 1.37617 -0.121437
Accuracy: 0.0491999983788 	70 1.38004 -0.125778
Accuracy: 0.049399971962 	71 1.38392 -0.13012
Accuracy: 0.0496000051498 	72 1.3878 -0.134462
Accuracy: 0.0491999983788 	73 1.39167 -0.138803
Accuracy: 0.0493000149727 	74 1.39555 -0.143145
Accuracy: 0.0494999885559 	75 1.39943 -0.147487
Accuracy: 0.0490999817848 	76 1.40331 -0.151829
Accuracy: 0.0487999916077 	77 1.40718 -0.15617
Accuracy: 0.0483000278473 	78 1.41106 -0.160512
Accuracy: 0.0479999780655 	79 1.41494 -0.164854
Accuracy: 0.0480999946594 	80 1.41881 -0.169195
Accuracy: 0.0482000112534 	81 1.42269 -0.173537
Accuracy: 0.0475999712944 	82 1.42657 -0.177879
Accuracy: 0.0479999780655 	83 1.43045 -0.18222
Accuracy: 0.0478000044823 	84 1.43432 -0.186562
Accuracy: 0.0478000044823 	85 1.4382 -0.190904
Accuracy: 0.0478000044823 	86 1.44208 -0.195245
Accuracy: 0.0476999878883 	87 1.44595 -0.199587
Accuracy: 0.0479999780655 	88 1.44983 -0.203929
Accuracy: 0.0476999878883 	89 1.45371 -0.208271
Accuracy: 0.0473999977112 	90 1.45759 -0.212612
Accuracy: 0.047200024128 	91 1.46146 -0.216954
Accuracy: 0.0475999712944 	92 1.46534 -0.221296
Accuracy: 0.0482000112534 	93 1.46922 -0.225637
Accuracy: 0.0478000044823 	94 1.47309 -0.229979
Accuracy: 0.0480999946594 	95 1.47697 -0.234321
Accuracy: 0.0480999946594 	96 1.48085 -0.238662
Accuracy: 0.0480999946594 	97 1.48473 -0.243004
Accuracy: 0.0482000112534 	98 1.4886 -0.247346
Accuracy: 0.0482000112534 	99 1.49248 -0.251687

In [72]:
w,b = test.AllBeads[-1]

In [74]:
b


Out[74]:
[array([-0.52026641,  0.24772249,  0.30443385, -0.74694669, -0.61910379,
        -0.96364206, -0.31234485, -0.04062728, -0.31264544,  0.79933226,
        -0.95725209,  0.30137059,  1.15755188, -1.62399101, -1.40891051,
         1.03353941,  0.50101578, -1.38635409, -0.16988727,  0.65464318,
        -0.49132031,  0.2587409 ,  0.4232485 , -1.12940395, -1.17089713,
         0.21410605, -1.40715575, -0.53397185,  0.69357997, -0.11505934,
        -1.20675778, -0.01668277, -0.49536428, -1.22702384, -1.13588881,
        -1.20673954, -0.59726006,  0.62010694,  0.69467616,  0.03794095,
        -0.16322906,  1.05579317, -0.88469303, -1.09426343,  1.05520177,
        -0.71906632,  0.59672976, -0.50000161, -0.5576089 ,  1.50208366,
        -2.50997949,  0.59063935,  0.43107703, -0.53128642, -0.04713962,
         0.19859725,  2.02716494,  1.4697119 , -1.04942441, -0.39021888,
         0.74863291, -1.50379992, -1.21944368,  0.42019922,  1.05488157,
        -1.2739656 , -1.17630577, -0.58982897,  1.74044049, -0.15614201,
        -1.95725965,  0.22762525, -0.43448061,  2.11176133, -0.36525011,
        -1.27180409, -0.45832905, -0.68754828,  0.71281052, -1.08011281,
        -0.01405062,  1.56414759, -1.12112153,  0.89823389, -1.02622235,
         1.5640043 ,  0.56441647, -0.60250258,  0.08498309,  0.07512405,
         0.8184731 , -0.11774384, -0.63389194,  0.7879253 ,  1.24543571,
        -0.55245817, -2.20619941,  0.15305792, -0.15095206,  0.61309052,
         2.08695745, -0.26535568, -1.41212142, -0.54862523, -0.11481999,
         0.19107763, -0.20386672,  0.25322413,  0.77498215,  1.02639604,
         0.46344551,  0.15323865, -0.57457662, -0.66503602, -1.63733602,
        -0.49078637, -1.09675825,  2.23333144,  1.56395853,  1.06442392,
        -0.32931131,  0.47142223, -0.84639472, -0.03360561,  0.7630983 ,
         1.09900773, -0.85410815, -0.2778208 ,  0.61274922, -0.15112965,
        -0.98521763,  1.76582778,  0.8848176 ,  0.24938644,  0.25634173,
         0.31735703,  0.72662902, -1.08217037, -0.97182274, -0.27280936,
         0.61966795,  0.11838301, -0.15098131, -1.67015183, -0.86999738,
        -1.80483186, -1.20380735,  0.33744729,  0.32784501,  0.55487835,
        -0.42065921, -2.26104259,  0.20326507, -1.05227637, -1.01744986,
         1.05173516, -1.8231616 , -0.80066895,  0.50194466, -1.95388067,
        -1.4391402 ,  0.13966687, -1.67697215, -1.24225962,  0.84286296,
        -0.54905736,  1.19326067, -1.17864764, -1.22458756, -0.29295793,
         0.30057362,  0.00705888,  0.37743023, -1.23920453, -0.15744478,
         0.2918469 , -0.29207739,  1.97760868,  0.15874423, -0.86647135,
        -0.14525419, -1.18887436,  0.11934876,  1.52983165, -1.42522275,
        -0.20683654,  1.95480835,  0.16375861, -0.3260259 ,  0.93608552,
         1.19607568, -0.66071457,  2.36982465, -1.79331374, -0.63636023,
        -0.1087935 ,  1.24920833, -1.49991786,  1.72524059, -0.25291798,
        -0.88024724,  0.22240987, -0.23660582, -0.58132148,  1.47205067,
         0.19754164, -0.21640907,  1.7684561 , -0.5570702 , -0.15071924,
        -0.26569909,  1.83550334,  1.12755537, -0.84360546, -0.68844855,
        -1.22405112, -0.40174514, -0.72491741, -0.3908442 , -0.48991638,
        -0.18473668, -1.04876864,  1.09889734, -1.52195394, -0.23336567,
         0.338213  , -1.02336526,  0.39991125,  0.20593315, -1.80190277,
         0.83319831, -1.80572712,  0.20746477,  0.10967674,  0.67174762,
         0.03207009,  0.58736247, -0.05316891, -1.01571727, -0.05488862,
         1.33087611, -1.42514133, -0.68828815, -0.15759154, -0.08729997,
        -0.01085017, -0.24643923,  1.10964143,  1.10586405, -1.11690211,
         0.6076405 , -2.49322486,  1.45090854,  0.4801316 , -0.74751318,
         0.88272679], dtype=float32),
 array([ -8.21595252e-01,  -2.22721230e-02,  -9.76333737e-01,
         -1.38896811e+00,  -6.87877655e-01,   1.18521643e+00,
          7.66462088e-01,  -9.56141353e-02,  -1.14207186e-01,
          1.63640112e-01,  -1.97290972e-01,  -2.10825011e-01,
          1.69831336e+00,  -7.60490894e-02,  -1.10037732e+00,
         -4.17452335e-01,   8.58393729e-01,   1.42138398e+00,
         -2.98899889e-01,   2.11727805e-03,  -1.57120034e-01,
          1.35947990e+00,  -6.48545384e-01,  -6.62959814e-01,
         -1.16728854e+00,   7.48800576e-01,  -7.39751875e-01,
          1.24571770e-01,  -9.35620427e-01,   1.49829119e-01,
          3.65435719e-01,   1.73232102e+00,  -1.90552771e+00,
          1.50847304e+00,   4.96079385e-01,  -8.76551211e-01,
          9.51387107e-01,   1.34580100e+00,   3.10356915e-01,
          9.31321457e-02,  -1.96810389e+00,   5.61275005e-01,
          9.65428948e-01,   9.76991653e-01,   1.45071554e+00,
          1.31641373e-01,   1.65384603e+00,   1.51370239e+00,
         -1.78499413e+00,  -3.76800179e-01,   1.89858645e-01,
         -1.75206363e+00,  -7.60813355e-01,   1.87430263e+00,
         -2.52064113e-02,  -1.51955998e+00,   1.20996507e-02,
          5.29639661e-01,  -1.74715376e+00,   2.87368149e-01,
         -9.84721109e-02,  -4.74223383e-02,  -3.06302810e+00,
          6.99477077e-01,  -3.02687913e-01,  -6.80822909e-01,
         -4.51687187e-01,  -1.64143562e+00,   7.61203647e-01,
         -5.73516190e-01,  -7.75239646e-01,  -7.29189754e-01,
         -4.67335820e-01,  -1.36266279e+00,  -2.04433584e+00,
         -2.52698958e-01,  -1.92006528e+00,   1.29426137e-01,
          1.10093367e+00,   1.51384485e+00,   1.56335020e+00,
         -6.16457403e-01,   2.72284687e-01,  -2.72939026e-01,
         -6.51860356e-01,  -5.80804169e-01,   3.70719540e-03,
         -2.12593839e-01,   3.45503300e-01,  -3.84427041e-01,
         -1.32019496e+00,  -1.04059768e+00,  -9.97523189e-01,
          1.25891697e+00,  -1.90580213e+00,   2.50689775e-01,
          3.26620549e-01,   5.58647454e-01,   2.45166212e-01,
         -6.33782864e-01,   5.44911981e-01,  -9.44834769e-01,
         -1.03981388e+00,   2.89378077e-01,   2.49647930e-01,
         -4.90579218e-01,  -7.60564208e-01,  -6.06193617e-02,
         -3.00590694e-01,  -1.56739521e+00,  -3.84743750e-01,
         -9.40849662e-01,   9.23318624e-01,  -9.03862000e-01,
          1.16799891e-01,   8.25044453e-01,   6.96205437e-01,
          2.42476654e+00,  -6.93747580e-01,  -6.52617931e-01,
         -1.08003902e+00,   5.44459283e-01,   1.13595128e+00,
         -5.77786088e-01,   6.04558825e-01,  -1.39842048e-01,
          8.85597050e-01,  -6.78144470e-02,  -3.65855992e-02,
          6.55721366e-01,  -1.25857353e+00,  -1.07726775e-01,
          1.00116169e+00,  -1.33260143e+00,  -2.26636982e+00,
         -1.23874700e+00,  -4.12589490e-01,   7.08425105e-01,
         -4.98440742e-01,  -1.15652454e+00,  -5.49404562e-01,
         -4.39788759e-01,   1.38037503e+00,  -4.79600102e-01,
         -4.39077348e-01,  -6.65886328e-02,  -2.67328191e+00,
         -3.29776973e-01,   3.95685554e-01,   2.89580584e-01,
          5.95919430e-01,   1.07386303e+00,  -6.67644620e-01,
         -4.90350842e-01,  -9.16802526e-01,   6.42959476e-01,
          1.13136685e+00,  -3.37020010e-01,   9.86904204e-01,
          7.88809478e-01,  -5.06890059e-01,   5.71731985e-01,
         -1.74489057e+00,  -7.56577194e-01,   2.61685640e-01,
          4.31760818e-01,  -5.51138222e-01,  -2.21222043e-01,
          1.16399157e+00,  -7.68629074e-01,   2.67541289e+00,
          8.63890871e-02,   5.57408094e-01,   2.97064692e-01,
          6.45241201e-01,  -5.42945325e-01,  -7.44524062e-01,
          1.02702224e+00,   3.85128111e-01,  -1.93029404e-01,
         -7.83132493e-01,  -6.11906052e-02,   6.48386478e-02,
         -6.98789001e-01,  -7.59570301e-01,  -2.89260596e-01,
          1.97721079e-01,   4.20850694e-01,  -8.08305144e-01,
         -1.93403438e-01,   9.12539065e-01,   3.39912206e-01,
         -3.67852934e-02,   1.26253653e+00,  -4.26331699e-01,
         -2.54645205e+00,  -8.58297944e-01,   9.94566232e-02,
         -2.86502331e-01,  -1.28695488e-01,  -1.37592399e+00,
          1.77185416e+00,  -8.68417084e-01,  -1.69211721e+00,
          3.19970489e-01,  -2.88471490e-01,   3.96305919e-01,
         -5.54621756e-01,   1.14738524e+00,   1.00884295e+00,
          7.53781557e-01,   1.30466938e+00,  -1.55493903e+00,
         -5.64799845e-01,   7.90246129e-01,   5.84462583e-01,
          1.28228307e-01,  -1.29814446e-01,  -6.23074472e-01,
          5.43519616e-01,   1.69531620e+00,   7.17151403e-01,
          1.47323155e+00,  -1.27093756e+00,   9.60818291e-01,
         -4.54179645e-01,   9.46830869e-01,  -1.23857118e-01,
          1.55570865e+00,  -1.38381690e-01,   2.00770587e-01,
         -7.95185566e-01,  -4.98316586e-01,   1.40864658e+00,
         -1.04276812e+00,   1.25466311e+00,  -4.57322039e-02,
          1.07208228e+00,   1.43595725e-01,   1.12169027e+00,
          5.45263231e-01,  -9.41491008e-01,  -1.79404306e+00,
          1.44434440e+00,   6.69220209e-01,  -1.30321312e+00,
         -1.86194289e+00,  -1.06763482e+00,  -1.24295461e+00,
          9.05483603e-01,   1.23212481e+00,  -1.57927001e+00,
          9.38302815e-01,   7.11243033e-01,   3.87621850e-01,
          6.12435713e-02], dtype=float32),
 array([-1.2435081 , -1.3895067 ,  0.53644782,  0.60423809, -0.19422041,
         0.79507917,  1.13472712, -1.90221465, -1.02565968, -1.01484549], dtype=float32)]

In [78]:
#Model generation

w,b = test.AllBeads[1]
thresh = .005

for ii in xrange(1):

    '''weights = {
        'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
        'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
        'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
    }
    biases = {
        'b1': tf.Variable(tf.random_normal([n_hidden_1])),
        'b2': tf.Variable(tf.random_normal([n_hidden_2])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }'''

    # Construct model with different initial weights
    test_model = multilayer_perceptron(w=w, b=b, ind=100)
    
    #Construct model with same initial weights
    #test_model = copy.copy(copy_model)
    #test_model.index = ii
    
    
    
    
    #print test_model.weights
    

    
    #models.append(test_model)
    with test_model.g.as_default():

        x = tf.placeholder("float", [None, n_input])
        y = tf.placeholder("float", [None, n_classes])
        pred = test_model.predict(x)

        # Define loss and optimizer
        #cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

        # Initializing the variables
        init = tf.initialize_all_variables()


        #remove the comment to get random initialization
        stopcond = True




        with tf.Session() as sess:
            sess.run(init)
            xtest = mnist.test.images
            ytest = mnist.test.labels
            while stopcond:
                #print 'epoch:' + str(e)
                #X = []
                #y = []
                j = 0
                # Training cycle
                for epoch in range(training_epochs):
                    avg_cost = 0.
                    total_batch = int(10000/batch_size)

                    if (avg_cost > thresh or avg_cost == 0.) and stopcond:
                    # Loop over all batches
                        for i in range(total_batch):
                            batch_x, batch_y = mnist.train.next_batch(batch_size)
                            # Run optimization op (backprop) and cost op (to get loss value)
                            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                                          y: batch_y})
                            # Compute average loss
                            avg_cost += c / total_batch
                        # Display logs per epoch step
                        if epoch % display_step == 0:
                            print "Epoch:", '%04d' % (epoch+1), "cost=", \
                                "{:.9f}".format(avg_cost)
                        
                        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                        # Calculate accuracy
                        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                        print "Accuracy:", accuracy.eval({x: xtest, y: ytest})
                        thiserror = 1 - accuracy.eval({x: xtest, y: ytest})
                        if thiserror < thresh:
                            stopcond = False
                            
                print "Optimization Finished!"

                # Test model
                #correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
                # Calculate accuracy
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                print "Accuracy:", accuracy.eval({x: xtest, y: ytest})

                if (j%5000) == 0:
                    print "Error after "+str(j)+" iterations:" + str(accuracy.eval({x: xtest, y: ytest}))

                if 1 - accuracy.eval({x: xtest, y: ytest}) < thresh or stopcond == False:
                    #print "Changing stopcond!"
                    stopcond = False
                    print "Final params:"
                    test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
                    save_path = test_model.saver.save(sess,"/home/dfreeman/PythonFun/tmp/model" + str(ii) + ".ckpt")
                j+=1
    #remove the comment to get random initialization

    
    #synapses.append([synapse_0,synapse_1,synapse_2


Epoch: 0001 cost= 0.427446254
Accuracy: 0.9642
Epoch: 0002 cost= 0.388155263
Accuracy: 0.9631
Epoch: 0003 cost= 0.215077950
Accuracy: 0.9659
Epoch: 0004 cost= 0.254136921
Accuracy: 0.9643
Epoch: 0005 cost= 0.266782776
Accuracy: 0.9622
Epoch: 0006 cost= 0.286865226
Accuracy: 0.9634
Epoch: 0007 cost= 0.349483893
Accuracy: 0.9652
Epoch: 0008 cost= 0.322126526
Accuracy: 0.9666
Epoch: 0009 cost= 0.102091477
Accuracy: 0.967
Epoch: 0010 cost= 0.144894568
Accuracy: 0.964
Epoch: 0011 cost= 0.156021222
Accuracy: 0.9675
Epoch: 0012 cost= 0.154399537
Accuracy: 0.9677
Epoch: 0013 cost= 0.157314702
Accuracy: 0.9666
Epoch: 0014 cost= 0.097399672
Accuracy: 0.9662
Epoch: 0015 cost= 0.070537900
Accuracy: 0.9664
Optimization Finished!
Accuracy: 0.9664
Error after 0 iterations:0.9664
Epoch: 0001 cost= 0.069893413
Accuracy: 0.9658
Epoch: 0002 cost= 0.111843362
Accuracy: 0.9656
Epoch: 0003 cost= 0.145028126
Accuracy: 0.9668
Epoch: 0004 cost= 0.110090393
Accuracy: 0.9677
Epoch: 0005 cost= 0.075510024
Accuracy: 0.9658
Epoch: 0006 cost= 0.091460152
Accuracy: 0.9668
Epoch: 0007 cost= 0.041279547
Accuracy: 0.9674
Epoch: 0008 cost= 0.092657605
Accuracy: 0.9662
Epoch: 0009 cost= 0.092687997
Accuracy: 0.9661
Epoch: 0010 cost= 0.055850054
Accuracy: 0.9675
Epoch: 0011 cost= 0.068601606
Accuracy: 0.9679
Epoch: 0012 cost= 0.093378161
Accuracy: 0.9685
Epoch: 0013 cost= 0.065880271
Accuracy: 0.967
Epoch: 0014 cost= 0.119259865
Accuracy: 0.9676
Epoch: 0015 cost= 0.123184984
Accuracy: 0.9659
Optimization Finished!
Accuracy: 0.9659
Error after 0 iterations:0.9659
Epoch: 0001 cost= 0.062646902
Accuracy: 0.9668
Epoch: 0002 cost= 0.118531805
Accuracy: 0.9658
Epoch: 0003 cost= 0.144570656
Accuracy: 0.9681
Epoch: 0004 cost= 0.117370040
Accuracy: 0.9684
Epoch: 0005 cost= 0.094370778
Accuracy: 0.9673
Epoch: 0006 cost= 0.106851742
Accuracy: 0.9686
Epoch: 0007 cost= 0.044365020
Accuracy: 0.967
Epoch: 0008 cost= 0.075937668
Accuracy: 0.9673
Epoch: 0009 cost= 0.039660773
Accuracy: 0.9667
Epoch: 0010 cost= 0.065723944
Accuracy: 0.9669
Epoch: 0011 cost= 0.107796821
Accuracy: 0.9658
Epoch: 0012 cost= 0.106408267
Accuracy: 0.9668
Epoch: 0013 cost= 0.056187395
Accuracy: 0.9683
Epoch: 0014 cost= 0.082605585
Accuracy: 0.9695
Epoch: 0015 cost= 0.066437753
Accuracy: 0.9682
Optimization Finished!
Accuracy: 0.9682
Error after 0 iterations:0.9682
Epoch: 0001 cost= 0.113998719
Accuracy: 0.9655
Epoch: 0002 cost= 0.097598098
Accuracy: 0.9688
Epoch: 0003 cost= 0.096100489
Accuracy: 0.9662
Epoch: 0004 cost= 0.090809174
Accuracy: 0.9684
Epoch: 0005 cost= 0.077002753
Accuracy: 0.9673
Epoch: 0006 cost= 0.055263686
Accuracy: 0.9682
Epoch: 0007 cost= 0.060709617
Accuracy: 0.9661
Epoch: 0008 cost= 0.089572028
Accuracy: 0.9693
Epoch: 0009 cost= 0.064838966
Accuracy: 0.9702
Epoch: 0010 cost= 0.063324587
Accuracy: 0.9695
Epoch: 0011 cost= 0.051863025
Accuracy: 0.9701
Epoch: 0012 cost= 0.045127958
Accuracy: 0.9703
Epoch: 0013 cost= 0.040996780
Accuracy: 0.9694
Epoch: 0014 cost= 0.041134293
Accuracy: 0.9703
Epoch: 0015 cost= 0.078443705
Accuracy: 0.9695
Optimization Finished!
Accuracy: 0.9695
Error after 0 iterations:0.9695
Epoch: 0001 cost= 0.045867218
Accuracy: 0.9698
Epoch: 0002 cost= 0.068232461
Accuracy: 0.9706
Epoch: 0003 cost= 0.100191265
Accuracy: 0.9681
Epoch: 0004 cost= 0.058207033
Accuracy: 0.9685
Epoch: 0005 cost= 0.073468120
Accuracy: 0.9672
Epoch: 0006 cost= 0.115724135
Accuracy: 0.9677
Epoch: 0007 cost= 0.069882303
Accuracy: 0.9686
Epoch: 0008 cost= 0.103297367
Accuracy: 0.9692
Epoch: 0009 cost= 0.054458868
Accuracy: 0.9696
Epoch: 0010 cost= 0.059468066
Accuracy: 0.9699
Epoch: 0011 cost= 0.081108575
Accuracy: 0.965
Epoch: 0012 cost= 0.115991086
Accuracy: 0.9672
Epoch: 0013 cost= 0.127581704
Accuracy: 0.9673
Epoch: 0014 cost= 0.050746322
Accuracy: 0.9677
Epoch: 0015 cost= 0.027768867
Accuracy: 0.9674
Optimization Finished!
Accuracy: 0.9674
Error after 0 iterations:0.9674
Epoch: 0001 cost= 0.035731059
Accuracy: 0.9698
Epoch: 0002 cost= 0.032648279
Accuracy: 0.9683
Epoch: 0003 cost= 0.072057581
Accuracy: 0.9692
Epoch: 0004 cost= 0.101116247
Accuracy: 0.9694
Epoch: 0005 cost= 0.084560442
Accuracy: 0.9672
Epoch: 0006 cost= 0.076841464
Accuracy: 0.9687
Epoch: 0007 cost= 0.075424822
Accuracy: 0.969
Epoch: 0008 cost= 0.040257943
Accuracy: 0.968
Epoch: 0009 cost= 0.059859093
Accuracy: 0.9685
Epoch: 0010 cost= 0.059823168
Accuracy: 0.9707
Epoch: 0011 cost= 0.044282347
Accuracy: 0.9716
Epoch: 0012 cost= 0.057426597
Accuracy: 0.9689
Epoch: 0013 cost= 0.040995671
Accuracy: 0.9718
Epoch: 0014 cost= 0.029555581
Accuracy: 0.9706
Epoch: 0015 cost= 0.033128446
Accuracy: 0.9701
Optimization Finished!
Accuracy: 0.9701
Error after 0 iterations:0.9701
Epoch: 0001 cost= 0.029655925
Accuracy: 0.97
Epoch: 0002 cost= 0.036713677
Accuracy: 0.9695
Epoch: 0003 cost= 0.009859686
Accuracy: 0.9697
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-78-0d07f118cc75> in <module>()
     70                     # Loop over all batches
     71                         for i in range(total_batch):
---> 72                             batch_x, batch_y = mnist.train.next_batch(batch_size)
     73                             # Run optimization op (backprop) and cost op (to get loss value)
     74                             _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,

/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.pyc in next_batch(self, batch_size, fake_data)
    145           fake_label for _ in xrange(batch_size)]
    146     start = self._index_in_epoch
--> 147     self._index_in_epoch += batch_size
    148     if self._index_in_epoch > self._num_examples:
    149       # Finished epoch

KeyboardInterrupt: 

In [ ]: