In [1]:
%matplotlib inline
from fenparsev4 import *
from pybrain.datasets import ClassificationDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import TanhLayer
import numpy as np
import random
from sklearn.metrics import confusion_matrix
import os
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
import pickle


Position(board='         \n         \n rnbqkbnr\n pppp.ppp\n ........\n ....p...\n ....P...\n .....N..\n PPPP.PPP\n RNBQKB.R\n         \n         ', score=0, wc=(True, True), bc=(True, True), ep=0, kp=0)
(94, 45)
(94, 84)
(92, 71), N->.
Using Theano backend.
Couldn't import dot_parser, loading of dot files will not be possible.

In [2]:
#most important part
def fries_ready():
    os.system('say your fries are done')
    
def write(str):
    sys.stdout.write('\r' + str)
    sys.stdout.flush()
    
def writeln(str):
    sys.stdout.write(str)
    sys.stdout.flush()

num_files = 75
filename_prefix='/media/drive/storage/csv_input/2015-12-08_112mil'
filename_suffix_range=range(1,num_files + 1)
debug=True
    
#read in csv
df = pd.DataFrame()
writeln("Inizializing read of %d files...\n" % (num_files))
for i in filename_suffix_range:
    if debug: write("Reading...%d/%d" % (i, num_files))
    df = df.append(pd.read_csv(filename_prefix + str(i)))
write("Reading...done\n")
#clean columns
df['y'] = df['y'].astype(int)
if debug: writeln("Converting to list...")
df['x'] = df['x'] = df.loc[:, 'x'].apply(lambda x: [1 if '1' == a else 0 for a in x.split(', ')])
length = df.shape[0]
df = df.set_index([range(0,length)])
writeln("done\nShuffling data...")
df = df.reindex(np.random.permutation(df.index))
writeln("done")
write("Splitting data...")
split = df.shape[0] * 4 / 5
all_train = df.iloc[:split, :]
all_test = df.iloc[split:, :]
writeln("done\n")


Inizializing read of 75 files...
Reading...done
Converting to list...done
Splitting data...done

In [3]:
#takes in full dataframe and converts to usable dataset
def build_dataset(all_train, all_test, nb_classes=2, debug=True):
    X_train = list(all_train['x'])
    X_test = list(all_test['x'])
    if debug: print("building y labels")
    y_train = [[1] if y == 1 else [0] for y in all_train['y']]
    Y_test_binary = [1 if y == 1 else 0 for y in all_test['y']]
    if debug: print("converting X_train and X_test to nparrays")
    X_train = np.array(X_train)
    X_test = np.array(X_test)
    if debug: print("converting y labels to categorical")
    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(Y_test_binary, nb_classes)
    return (X_train, Y_train, X_test, Y_test, Y_test_binary)

In [4]:
# y test binary is a binary vector
# Y_test is categorical numpy format
(X_train, Y_train, X_test, Y_test, Y_test_binary) = build_dataset(all_train, all_test)


building y labels
converting X_train and X_test to nparrays
converting y labels to categorical

In [5]:
def getActivation(activation):
    if (activation == 'tanh'):
        return Activation('tanh')
    elif (activation == 'sigmoid'):
        return Activation('sigmoid')
    elif (activation == 'hard_sigmoid'):
        return Activation('hard_sigmoid')
    else:
        print("invalid activation!")

def buildMLP(activation='tanh',depth=3, width=512):
    if depth < 2:
        depth = 2
    model = Sequential()
    model.add(Dense(width, input_shape=(1536,)))
    model.add(getActivation(activation))
    model.add(Dropout(0.2))
    for i in range(0, depth - 2):
        model.add(Dense(width))
        model.add(getActivation(activation))
        model.add(Dropout(0.2))
       
    model.add(Dense(2))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)
    #print(model.to_json())
    writeln("Model with depth %d built..." % depth)
    return model

In [ ]:
class KerasExperiment:
    def __init__(self, model, test_df, X_train, Y_train, X_test, Y_test, Y_test_binary, epochs=5, repeat_number = 0, verbose=True):
        self.model = model
        self.test_df = test_df;
        self.X_train = X_train
        self.Y_train = Y_train
        self.X_test = X_test
        self.Y_test = Y_test
        self.Y_test_binary = Y_test_binary
        self.nb_epoch = epochs
        self.repeat_number = repeat_number

    #adds specific piece confusion matrices to results dict d
    def add_piece_specifics(self, predicted_y):
        #append column for predicted
        test_df_result = self.test_df.copy()
        test_df_result.loc[:,'predicted'] = predicted_y
        
        #create new row for piece results
        d = {}
        #calculate each piece confusion matrix
        for p in "pPrRnNbBqQkK":
            specific_piece = test_df_result[test_df_result['piece_moved'] == p]
            cm = [confusion_matrix(specific_piece['y'], specific_piece['predicted'])]
            #append column
            d[p + '_cm'] = cm
        #get white and black
        color = test_df_result[test_df_result['piece_moved'].isin(list("PRNBQK"))]
        d["white_cm"] = [confusion_matrix(color['y'], color['predicted'])]
        color = test_df_result[test_df_result['piece_moved'].isin(list("prnbqk"))]
        d["black_cm"] = [confusion_matrix(color['y'], color['predicted'])]
        
        return pd.DataFrame(d, columns = [a + "_cm" for a in "pPrRnNbBqQkK"] + ["white_cm", "black_cm"], index=[1]) 
    
    def run_experiment(self):
        self.model.fit(self.X_train, self.Y_train, nb_epoch=self.nb_epoch,
                  show_accuracy=True, verbose=2,
                  validation_data=(self.X_test, self.Y_test))
        score = self.model.evaluate(self.X_test, self.Y_test,
                               show_accuracy=True, verbose=0)
        predicted_y = self.model.predict_classes(self.X_test, batch_size=32)
        cm_overall = [confusion_matrix(predicted_y, self.Y_test_binary)]
        #create results row 
        results_row = pd.DataFrame({"training_size" : len(self.X_train), \
                                    "test_size" : len(self.Y_test), \
                                      "pct_white" : sum(self.Y_test_binary) * 1.0 / len(self.Y_test_binary), \
                                      "cm_overall": cm_overall, \
                                   "epochs": self.nb_epoch, \
                                   "network" : self.model.to_json(),\
                                   "repeat_number" : self.repeat_number}, index=[1]);
        results_row = results_row.join(self.add_piece_specifics(predicted_y))
        return(results_row)
        
    #    print(confusion_matrix(y_train, out))
        #return pd.DataFrame({"train_size": self.train_df.shape[0], 
#                             "train_white_count" : sum([1 if a.isupper() else 0 for a in self.train_df['piece_moved']]),
#                             "confusion_matrix" : [cm],
#                             "accuracy": [(cm[0][0] + cm[1][1]) * 1.0 / (sum([sum(c) for c in cm]))]})

In [24]:
#testing different epoch counts
mlp_layer_set = [3, 4, 5, 6]
mlp_width_set = [5, 25, 50, 75]
test_size = 10000
train_size = 100000
e_list = []
for (mlp_layers, mlp_width) in [(a,b) for a in mlp_layer_set\
                        for b in mlp_width_set]:
    print((mlp_layers, mlp_width))
    e = KerasExperiment(buildMLP(activation='tanh', depth=mlp_layers, width=mlp_width), all_test.iloc[:test_size,:], X_train[:train_size], Y_train[:train_size],\
                         X_test[:test_size], Y_test[:test_size], Y_test_binary[:test_size])
    e_list.append(e)


(3, 5)
Model with depth 3 built...(3, 25)
Model with depth 3 built...(3, 50)
Model with depth 3 built...(3, 75)
Model with depth 3 built...(4, 5)
Model with depth 4 built...(4, 25)
Model with depth 4 built...(4, 50)
Model with depth 4 built...(4, 75)
Model with depth 4 built...(5, 5)
Model with depth 5 built...(5, 25)
Model with depth 5 built...(5, 50)
Model with depth 5 built...(5, 75)
Model with depth 5 built...(6, 5)
Model with depth 6 built...(6, 25)
Model with depth 6 built...(6, 50)
Model with depth 6 built...(6, 75)
Model with depth 6 built...

In [25]:
results_df = pd.DataFrame()
count = 0
for e in e_list:
    results_df = results_df.append(e.run_experiment())
    count += 1
    if (count % 5 == 0):
        pickle.dump(results_df, open("2015-12-12-mlpexperiments_results8.p", 'wb'))


Train on 100000 samples, validate on 10000 samples
Epoch 1/5
2s - loss: 0.6777 - acc: 0.5643 - val_loss: 0.6447 - val_acc: 0.6215
Epoch 2/5
2s - loss: 0.6242 - acc: 0.6427 - val_loss: 0.6182 - val_acc: 0.6243
Epoch 3/5
2s - loss: 0.6015 - acc: 0.6662 - val_loss: 0.5985 - val_acc: 0.6545
Epoch 4/5
2s - loss: 0.5896 - acc: 0.6797 - val_loss: 0.5863 - val_acc: 0.6675
Epoch 5/5
2s - loss: 0.5809 - acc: 0.6870 - val_loss: 0.6004 - val_acc: 0.6514
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
2s - loss: 0.6752 - acc: 0.5674 - val_loss: 0.6558 - val_acc: 0.6034
Epoch 2/5
2s - loss: 0.6173 - acc: 0.6461 - val_loss: 0.6077 - val_acc: 0.6537
Epoch 3/5
2s - loss: 0.5949 - acc: 0.6687 - val_loss: 0.6127 - val_acc: 0.6536
Epoch 4/5
2s - loss: 0.5832 - acc: 0.6815 - val_loss: 0.5782 - val_acc: 0.6873
Epoch 5/5
2s - loss: 0.5742 - acc: 0.6908 - val_loss: 0.6015 - val_acc: 0.6741
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
3s - loss: 0.6784 - acc: 0.5617 - val_loss: 0.6403 - val_acc: 0.6113
Epoch 2/5
4s - loss: 0.6187 - acc: 0.6450 - val_loss: 0.5951 - val_acc: 0.6677
Epoch 3/5
4s - loss: 0.5945 - acc: 0.6696 - val_loss: 0.5882 - val_acc: 0.6752
Epoch 4/5
4s - loss: 0.5808 - acc: 0.6840 - val_loss: 0.5830 - val_acc: 0.6787
Epoch 5/5
4s - loss: 0.5732 - acc: 0.6920 - val_loss: 0.5967 - val_acc: 0.6594
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
5s - loss: 0.6757 - acc: 0.5665 - val_loss: 0.6393 - val_acc: 0.6185
Epoch 2/5
5s - loss: 0.6162 - acc: 0.6478 - val_loss: 0.6015 - val_acc: 0.6547
Epoch 3/5
5s - loss: 0.5933 - acc: 0.6707 - val_loss: 0.6166 - val_acc: 0.6528
Epoch 4/5
5s - loss: 0.5809 - acc: 0.6851 - val_loss: 0.5775 - val_acc: 0.6846
Epoch 5/5
5s - loss: 0.5724 - acc: 0.6921 - val_loss: 0.5646 - val_acc: 0.6952
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
2s - loss: 0.6874 - acc: 0.5420 - val_loss: 0.6601 - val_acc: 0.6127
Epoch 2/5
2s - loss: 0.6401 - acc: 0.6279 - val_loss: 0.6233 - val_acc: 0.6183
Epoch 3/5
2s - loss: 0.6123 - acc: 0.6585 - val_loss: 0.5938 - val_acc: 0.6656
Epoch 4/5
2s - loss: 0.5983 - acc: 0.6747 - val_loss: 0.5848 - val_acc: 0.6767
Epoch 5/5
2s - loss: 0.5903 - acc: 0.6824 - val_loss: 0.6016 - val_acc: 0.6541
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
3s - loss: 0.6882 - acc: 0.5394 - val_loss: 0.6699 - val_acc: 0.5811
Epoch 2/5
3s - loss: 0.6299 - acc: 0.6340 - val_loss: 0.6080 - val_acc: 0.6514
Epoch 3/5
3s - loss: 0.6013 - acc: 0.6652 - val_loss: 0.5862 - val_acc: 0.6767
Epoch 4/5
3s - loss: 0.5872 - acc: 0.6813 - val_loss: 0.5766 - val_acc: 0.6955
Epoch 5/5
3s - loss: 0.5779 - acc: 0.6897 - val_loss: 0.5738 - val_acc: 0.6943
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
4s - loss: 0.6851 - acc: 0.5505 - val_loss: 0.6515 - val_acc: 0.6000
Epoch 2/5
4s - loss: 0.6255 - acc: 0.6384 - val_loss: 0.6016 - val_acc: 0.6542
Epoch 3/5
4s - loss: 0.5990 - acc: 0.6656 - val_loss: 0.5875 - val_acc: 0.6798
Epoch 4/5
4s - loss: 0.5834 - acc: 0.6812 - val_loss: 0.5806 - val_acc: 0.6808
Epoch 5/5
4s - loss: 0.5696 - acc: 0.6918 - val_loss: 0.5919 - val_acc: 0.6761
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
5s - loss: 0.6826 - acc: 0.5535 - val_loss: 0.6423 - val_acc: 0.6329
Epoch 2/5
5s - loss: 0.6231 - acc: 0.6402 - val_loss: 0.6046 - val_acc: 0.6593
Epoch 3/5
5s - loss: 0.5965 - acc: 0.6683 - val_loss: 0.5758 - val_acc: 0.6889
Epoch 4/5
5s - loss: 0.5799 - acc: 0.6810 - val_loss: 0.6008 - val_acc: 0.6627
Epoch 5/5
5s - loss: 0.5650 - acc: 0.6926 - val_loss: 0.5761 - val_acc: 0.6794
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
2s - loss: 0.6903 - acc: 0.5305 - val_loss: 0.6721 - val_acc: 0.5907
Epoch 2/5
2s - loss: 0.6511 - acc: 0.6174 - val_loss: 0.6457 - val_acc: 0.6119
Epoch 3/5
2s - loss: 0.6196 - acc: 0.6554 - val_loss: 0.6029 - val_acc: 0.6627
Epoch 4/5
2s - loss: 0.6041 - acc: 0.6723 - val_loss: 0.5823 - val_acc: 0.6917
Epoch 5/5
2s - loss: 0.5933 - acc: 0.6832 - val_loss: 0.5787 - val_acc: 0.6899
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
3s - loss: 0.6875 - acc: 0.5410 - val_loss: 0.6526 - val_acc: 0.6060
Epoch 2/5
3s - loss: 0.6336 - acc: 0.6342 - val_loss: 0.6164 - val_acc: 0.6457
Epoch 3/5
3s - loss: 0.6043 - acc: 0.6627 - val_loss: 0.5935 - val_acc: 0.6634
Epoch 4/5
3s - loss: 0.5873 - acc: 0.6804 - val_loss: 0.5897 - val_acc: 0.6788
Epoch 5/5
3s - loss: 0.5725 - acc: 0.6903 - val_loss: 0.5788 - val_acc: 0.6831
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
5s - loss: 0.6895 - acc: 0.5390 - val_loss: 0.6497 - val_acc: 0.6202
Epoch 2/5
5s - loss: 0.6312 - acc: 0.6351 - val_loss: 0.6194 - val_acc: 0.6438
Epoch 3/5
5s - loss: 0.6002 - acc: 0.6647 - val_loss: 0.6004 - val_acc: 0.6633
Epoch 4/5
5s - loss: 0.5804 - acc: 0.6807 - val_loss: 0.6027 - val_acc: 0.6629
Epoch 5/5
5s - loss: 0.5673 - acc: 0.6893 - val_loss: 0.5704 - val_acc: 0.6906
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
6s - loss: 0.6910 - acc: 0.5381 - val_loss: 0.6610 - val_acc: 0.5875
Epoch 2/5
7s - loss: 0.6336 - acc: 0.6326 - val_loss: 0.6184 - val_acc: 0.6410
Epoch 3/5
6s - loss: 0.6004 - acc: 0.6639 - val_loss: 0.6131 - val_acc: 0.6519
Epoch 4/5
6s - loss: 0.5801 - acc: 0.6794 - val_loss: 0.5658 - val_acc: 0.6903
Epoch 5/5
6s - loss: 0.5641 - acc: 0.6921 - val_loss: 0.6111 - val_acc: 0.6548
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
2s - loss: 0.6927 - acc: 0.5155 - val_loss: 0.6863 - val_acc: 0.5598
Epoch 2/5
2s - loss: 0.6673 - acc: 0.5941 - val_loss: 0.6326 - val_acc: 0.6352
Epoch 3/5
2s - loss: 0.6305 - acc: 0.6473 - val_loss: 0.6090 - val_acc: 0.6541
Epoch 4/5
2s - loss: 0.6104 - acc: 0.6689 - val_loss: 0.6016 - val_acc: 0.6584
Epoch 5/5
2s - loss: 0.5989 - acc: 0.6813 - val_loss: 0.5867 - val_acc: 0.6838
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
3s - loss: 0.6950 - acc: 0.5200 - val_loss: 0.6754 - val_acc: 0.5650
Epoch 2/5
3s - loss: 0.6490 - acc: 0.6171 - val_loss: 0.6159 - val_acc: 0.6406
Epoch 3/5
3s - loss: 0.6126 - acc: 0.6593 - val_loss: 0.5880 - val_acc: 0.6785
Epoch 4/5
3s - loss: 0.5942 - acc: 0.6779 - val_loss: 0.6087 - val_acc: 0.6481
Epoch 5/5
3s - loss: 0.5802 - acc: 0.6894 - val_loss: 0.5735 - val_acc: 0.6878
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
5s - loss: 0.6936 - acc: 0.5299 - val_loss: 0.6648 - val_acc: 0.5940
Epoch 2/5
5s - loss: 0.6388 - acc: 0.6296 - val_loss: 0.6254 - val_acc: 0.6366
Epoch 3/5
5s - loss: 0.6041 - acc: 0.6646 - val_loss: 0.5834 - val_acc: 0.6751
Epoch 4/5
5s - loss: 0.5836 - acc: 0.6794 - val_loss: 0.5959 - val_acc: 0.6651
Epoch 5/5
5s - loss: 0.5705 - acc: 0.6895 - val_loss: 0.5542 - val_acc: 0.7029
10000/10000 [==============================] - 0s     
Train on 100000 samples, validate on 10000 samples
Epoch 1/5
7s - loss: 0.6962 - acc: 0.5226 - val_loss: 0.6700 - val_acc: 0.5817
Epoch 2/5
7s - loss: 0.6407 - acc: 0.6264 - val_loss: 0.6418 - val_acc: 0.6049
Epoch 3/5
7s - loss: 0.6031 - acc: 0.6621 - val_loss: 0.6210 - val_acc: 0.6527
Epoch 4/5
7s - loss: 0.5843 - acc: 0.6770 - val_loss: 0.5665 - val_acc: 0.6941
Epoch 5/5
7s - loss: 0.5685 - acc: 0.6885 - val_loss: 0.5519 - val_acc: 0.7053
10000/10000 [==============================] - 0s     

In [30]:
#testing different epoch counts
mlp_layer_set = [3, 4, 5, 6]
mlp_width_set = [5, 25, 50, 75]
test_size = 10000
train_size = 100000
e = KerasExperiment(buildMLP(activation='tanh', depth=2), all_test.iloc[:test_size,:], X_train[:train_size], Y_train[:train_size],\
                         X_test[:test_size], Y_test[:test_size], Y_test_binary[:test_size], epochs=15)
e.run_experiment()


Model with depth 2 built...Train on 100000 samples, validate on 10000 samples
Epoch 1/15
18s - loss: 0.6715 - acc: 0.5821 - val_loss: 0.6337 - val_acc: 0.6223
Epoch 2/15
17s - loss: 0.6163 - acc: 0.6493 - val_loss: 0.6073 - val_acc: 0.6409
Epoch 3/15
17s - loss: 0.5930 - acc: 0.6714 - val_loss: 0.7060 - val_acc: 0.5564
Epoch 4/15
17s - loss: 0.5810 - acc: 0.6855 - val_loss: 0.6154 - val_acc: 0.6453
Epoch 5/15
17s - loss: 0.5712 - acc: 0.6939 - val_loss: 0.5616 - val_acc: 0.7033
Epoch 6/15
17s - loss: 0.5666 - acc: 0.6993 - val_loss: 0.5622 - val_acc: 0.6976
Epoch 7/15
17s - loss: 0.5614 - acc: 0.7053 - val_loss: 0.5940 - val_acc: 0.6496
Epoch 8/15
17s - loss: 0.5575 - acc: 0.7078 - val_loss: 0.5726 - val_acc: 0.7002
Epoch 9/15
18s - loss: 0.5543 - acc: 0.7127 - val_loss: 0.6274 - val_acc: 0.6776
Epoch 10/15
18s - loss: 0.5526 - acc: 0.7133 - val_loss: 0.5511 - val_acc: 0.7137
Epoch 11/15
17s - loss: 0.5488 - acc: 0.7198 - val_loss: 0.5671 - val_acc: 0.6952
Epoch 12/15
17s - loss: 0.5476 - acc: 0.7192 - val_loss: 0.5761 - val_acc: 0.6813
Epoch 13/15
17s - loss: 0.5450 - acc: 0.7228 - val_loss: 0.5686 - val_acc: 0.6905
Epoch 14/15
18s - loss: 0.5429 - acc: 0.7239 - val_loss: 0.5635 - val_acc: 0.7164
Epoch 15/15
17s - loss: 0.5427 - acc: 0.7245 - val_loss: 0.5791 - val_acc: 0.6930
10000/10000 [==============================] - 0s     
Out[30]:
cm_overall epochs network pct_white test_size training_size p_cm P_cm r_cm R_cm n_cm N_cm b_cm B_cm q_cm Q_cm k_cm K_cm white_cm black_cm
1 [[3191, 1289], [1781, 3739]] 15 {"layers": [{"b_constraint": null, "name": "De... 0.5028 10000 100000 [[505, 98], [113, 457]] [[328, 308], [34, 589]] [[291, 136], [228, 217]] [[268, 210], [104, 420]] [[248, 57], [188, 128]] [[205, 198], [30, 364]] [[244, 62], [170, 132]] [[205, 182], [33, 359]] [[302, 66], [136, 213]] [[210, 206], [49, 421]] [[196, 92], [127, 157]] [[189, 166], [77, 282]] [[1405, 1270], [327, 2435]] [[1786, 511], [962, 1304]]

In [ ]:
json.loads(json_str)['layers'][1]['activation']

In [23]:
#methods to decode serialized network json
import json

def get_num_layers(json_str):
    # number of actual layers - 5 for input and output / 3 for each hidden + 2 for input and output
    return (len(json.loads(json_str)['layers']) - 5) / 3 + 2

def get_first_activation(json_str):
    return json.loads(json_str)['layers'][1]['activation']

def get_first_width(json_str):
    return json.loads(json_str)['layers'][0]['output_dim']

In [24]:
get_first_width(json_str)


Out[24]:
512

In [ ]:
results_df.iloc[:, -14:].applymap(lambda x : (x[0][0] + x[1][1]) * 1.0 / sum([sum(a) for a in x]))

In [32]:
results_df.to_csv("2015-12-11-mlpexperiments_results1")

In [36]:
pickle.dump( results_df, open( "2015-12-11-mlpexperiments_results1.p", "wb" ) )

In [ ]:
df = pickle.load( open( "2015-12-11-mlpexperiments_results1.p", "rb" ) )
df.head()

In [38]:
df.columns


Out[38]:
Index([u'cm_overall', u'epochs', u'network', u'pct_white', u'test_size',
       u'training_size', u'p_cm', u'P_cm', u'r_cm', u'R_cm', u'n_cm', u'N_cm',
       u'b_cm', u'B_cm', u'q_cm', u'Q_cm', u'k_cm', u'K_cm', u'white_cm',
       u'black_cm'],
      dtype='object')

In [ ]: