In [1]:
from pin import pin
import matplotlib.pyplot as plt
from collections import Counter

import json
import os
import autograd.numpy as np
import pickle as pkl

%matplotlib inline
%load_ext autoreload
%autoreload 2


/home/ericmjl/anaconda/lib/python3.5/site-packages/sklearn/preprocessing/data.py:583: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and will raise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
  warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
/home/ericmjl/anaconda/lib/python3.5/site-packages/sklearn/preprocessing/data.py:583: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and will raise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
  warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)

In [2]:
with open('../data/batch_summary.json') as f:
    model_data = json.load(f)

In [3]:
model_data['projects'][0]


Out[3]:
{'code': 'wJ9TDy',
 'created': '2016-01-02',
 'models': [{'GMQE': 0.9800000191,
   'id': '01',
   'oligo_state': 'homo-dimer',
   'qmean': 0.3202740713,
   'seq_coverage': 1.0,
   'seq_id': 91.9191894531,
   'seq_sim': 0.5824936032,
   'status': 'COMPLETED',
   'template': '4ll3.1.B',
   'tpl_seq': 'PQITLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMNLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
   'trg_seq': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF'},
  {'GMQE': 0.9800000191,
   'id': '02',
   'oligo_state': 'homo-dimer',
   'qmean': -0.9170268789,
   'seq_coverage': 1.0,
   'seq_id': 91.9191894531,
   'seq_sim': 0.5824936032,
   'status': 'COMPLETED',
   'template': '1a8g.1.A',
   'tpl_seq': 'PQITLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMNLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
   'trg_seq': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF'},
  {'GMQE': 0.9800000191,
   'id': '03',
   'oligo_state': 'homo-dimer',
   'qmean': -0.5994937920000001,
   'seq_coverage': 1.0,
   'seq_id': 91.9191894531,
   'seq_sim': 0.5824936032,
   'status': 'COMPLETED',
   'template': '1vik.1.A',
   'tpl_seq': 'PQITLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMNLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
   'trg_seq': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF'}],
 'target': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF',
 'title': '259265-1',
 'type': 'TARGET_SEQUENCE'}

In [4]:
# Grab some summary statistics of the modelling.
# I'm curious to know how many times a particular template was chosen.

template_counter = Counter()
gmqe_scores = []
sequence_identity = []
sequence_similarity = []
for p in model_data['projects']:
    for m in p['models']:
        template_counter[m['template']] += 1
        gmqe_scores.append(m['GMQE'])
        sequence_identity.append(m['seq_id'])
        sequence_similarity.append(m['seq_sim'])

In [7]:
plt.hist(gmqe_scores)


Out[7]:
(array([  2.00000000e+00,   0.00000000e+00,   1.00000000e+01,
          1.60000000e+01,   7.40000000e+01,   2.77000000e+02,
          1.15600000e+03,   1.06300000e+03,   2.13600000e+03,
          5.46600000e+03]),
 array([ 0.92199999,  0.92879999,  0.93559999,  0.9424    ,  0.9492    ,
         0.956     ,  0.9628    ,  0.9696    ,  0.97640001,  0.98320001,
         0.99000001]),
 <a list of 10 Patch objects>)

In [8]:
plt.hist(sequence_identity)


Out[8]:
(array([   31.,    94.,   303.,  1017.,  1114.,  1457.,  2923.,  2059.,
          959.,   243.]),
 array([  76.76767731,   79.09090958,   81.41414185,   83.73737411,
          86.06060638,   88.38383865,   90.70707092,   93.03030319,
          95.35353546,   97.67676773,  100.        ]),
 <a list of 10 Patch objects>)

In [9]:
plt.hist(sequence_similarity)


Out[9]:
(array([    5.,    31.,   141.,   351.,   739.,  1263.,  1859.,  3049.,
         2326.,   436.]),
 array([ 0.5494988 ,  0.55562665,  0.5617545 ,  0.56788235,  0.57401021,
         0.58013806,  0.58626591,  0.59239376,  0.59852161,  0.60464947,
         0.61077732]),
 <a list of 10 Patch objects>)

In [10]:
model_data['projects'][0]


Out[10]:
{'code': 'wJ9TDy',
 'created': '2016-01-02',
 'models': [{'GMQE': 0.9800000191,
   'id': '01',
   'oligo_state': 'homo-dimer',
   'qmean': 0.3202740713,
   'seq_coverage': 1.0,
   'seq_id': 91.9191894531,
   'seq_sim': 0.5824936032,
   'status': 'COMPLETED',
   'template': '4ll3.1.B',
   'tpl_seq': 'PQITLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMNLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
   'trg_seq': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF'},
  {'GMQE': 0.9800000191,
   'id': '02',
   'oligo_state': 'homo-dimer',
   'qmean': -0.9170268789,
   'seq_coverage': 1.0,
   'seq_id': 91.9191894531,
   'seq_sim': 0.5824936032,
   'status': 'COMPLETED',
   'template': '1a8g.1.A',
   'tpl_seq': 'PQITLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMNLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
   'trg_seq': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF'},
  {'GMQE': 0.9800000191,
   'id': '03',
   'oligo_state': 'homo-dimer',
   'qmean': -0.5994937920000001,
   'seq_coverage': 1.0,
   'seq_id': 91.9191894531,
   'seq_sim': 0.5824936032,
   'status': 'COMPLETED',
   'template': '1vik.1.A',
   'tpl_seq': 'PQITLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMNLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
   'trg_seq': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF'}],
 'target': 'PQITLWQRPLVTIKIEGQLKEALLDTGADDTVLEEINLSGKWKPKMIGGIGGFIKVGQYDQITIEICGHKVIGTVLVGPTPVNIIGRNLLTQLGCTLNF',
 'title': '259265-1',
 'type': 'TARGET_SEQUENCE'}

In [25]:
# Write the python script here
script = '\n\
import networkx as nx\n\
import pickle\n\
from pin.pin import ProteinInteractionNetwork\n\
\n\
p = ProteinInteractionNetwork("model_01.pdb")\n\
with open("model_01.pkl", "wb") as f:\n\
    pickle.dump(p, f)\n\
'
print(script)


import networkx as nx
import pickle
from pin.pin import ProteinInteractionNetwork

p = ProteinInteractionNetwork("model_01.pdb")
with open("model_01.pkl", "wb") as f:
    pickle.dump(p, f)


In [26]:
# Write the bash script here
bash = '\n\
#!/bin/sh \n\
#$ -S /bin/sh \n\
#$ -cwd \n\
#$ -V \n\
#$ -m e \n\
#$ -M ericmjl@mit.edu \n\
#$ -pe whole_nodes 1 \n\
#############################################\n\
\n\
python script.py\n\
'

print(bash)


#!/bin/sh 
#$ -S /bin/sh 
#$ -cwd 
#$ -V 
#$ -m e 
#$ -M ericmjl@mit.edu 
#$ -pe whole_nodes 1 
#############################################

python script.py


In [27]:
# Write a master script for q-subbing
qsub = '\n\
#!/bin/sh \n\
#$ -S /bin/sh \n\
#$ -cwd \n\
#$ -V \n\
#$ -m e \n\
#$ -M ericmjl@mit.edu \n\
#$ -pe whole_nodes 1 \n\
#############################################\n\
\n\
'

print(qsub)


#!/bin/sh 
#$ -S /bin/sh 
#$ -cwd 
#$ -V 
#$ -m e 
#$ -M ericmjl@mit.edu 
#$ -pe whole_nodes 1 
#############################################



In [28]:
# Write script to disk
mdl_dir = '../data/batch_models/'
for project in model_data['projects']:
    code = project['code']
    with open('{0}/{1}/script.py'.format(mdl_dir, code), 'w+') as f:
        f.write(script)
    with open('{0}/{1}/{1}.sh'.format(mdl_dir, code), 'w+') as f:
        f.write(bash)
        

with open('{0}/master.sh'.format(mdl_dir), 'w+') as f:
    f.write(qsub)
    for project in model_data['projects']:
        
        f.write('cd {0}\n'.format(project['code']))
        f.write('qsub {0}.sh\n'.format(project['code']))
        f.write('cd ..\n')
        f.write('\n')

Prototype script for parallelizing making graphs and doing deep learning on them


In [14]:
import pandas as pd

protease_data = pd.read_csv('../data/hiv_data/hiv-protease-data-expanded.csv', index_col=0)
protease_data.describe()


Out[14]:
ATV DRV FPV IDV LPV NFV SQV SeqID TPV weight
count 5051.000000 2838.000000 6660.000000 6654.000000 6049.000000 6868.000000 6663.000000 6979.000000 4329.000000 6979.000000
mean 60.787349 22.294856 23.945511 29.115104 55.100380 49.808736 77.044349 110069.076659 9.378517 0.249033
std 138.450445 77.386476 58.589808 65.169955 99.343837 105.497697 214.590607 61028.133923 38.791570 0.316821
min 0.300000 0.200000 0.100000 0.100000 0.100000 0.100000 0.100000 2996.000000 0.200000 0.020833
25% 1.000000 0.700000 0.800000 0.900000 0.900000 1.300000 0.800000 64725.000000 0.800000 0.031250
50% 7.400000 1.000000 2.100000 4.400000 7.800000 10.000000 2.500000 112389.000000 1.300000 0.125000
75% 46.000000 7.000000 16.000000 28.000000 74.000000 53.000000 30.150000 143350.000000 3.500000 0.250000
max 700.000000 580.000000 400.000000 500.000000 500.000000 600.000000 1000.000000 259265.000000 800.000000 1.000000

Based on the number of samples available for each drug, let us start with NFV, which has the most number of values available.


In [15]:
protease_data['NFV'].describe()


Out[15]:
count    6868.000000
mean       49.808736
std       105.497697
min         0.100000
25%         1.300000
50%        10.000000
75%        53.000000
max       600.000000
Name: NFV, dtype: float64

I will log-transform these values, and then Z-standardize them.


In [16]:
import sklearn.preprocessing as pp
# from sklearn.preprocessing import scale
import numpy as np

# Drop NaN values on only the NFV column
fpv_data = protease_data.dropna(subset=['FPV'])[['FPV', 'seqid']]
fpv_data['FPV'] = fpv_data['FPV'].apply(np.log10)
# fpv_data['FPV_scaled'] = pp.scale(fpv_data['FPV'])
fpv_data.hist()
fpv_data.head()


Out[16]:
FPV seqid
0 0.397940 2996-0
1 0.397940 2996-1
2 -0.154902 4387-0
3 -0.154902 4387-1
4 -0.154902 4387-2

In [17]:
# Grab out a sample of projects, based on the seqid.
import random as rnd
# seqids_interest = rnd.sample(list(fpv_data['seqid']), 25)

proj_titles = {c['title']:c['code'] for c in model_data['projects']}


projs_interest = dict()

n_graphs = 4

while len(projs_interest) < n_graphs:
    seqid = rnd.choice(list(fpv_data['seqid']))
    if seqid in proj_titles.keys():
        projs_interest[seqid] = proj_titles[seqid]
        
        
        
# for seqid in :
#     if seqid not in proj_titles:
#         print(seqid)
# projects_interest = {c['title']:c['code'] for c in model_data['projects'] if c['title'] in seqids_interest}
projs_interest


Out[17]:
{'102798-2': 'zB9kN6',
 '112387-1': 'a8TXLz',
 '61139-1': 'waZQPE',
 '66668-0': 'jB4TGE'}

In [18]:
fpv_data[fpv_data['seqid'].isin(projs_interest.keys())]


Out[18]:
FPV seqid
1639 1.397940 61139-1
1954 0.255273 66668-0
3093 0.973128 102798-2
3464 0.939519 112387-1

In [19]:
projects = [i for i in projs_interest.values()]

p = pin.ProteinInteractionNetwork('../data/batch_models/{0}/model_01.pdb'.format(projects[0]))
p.graph['project'] = project
p.graph['input_shape'] = p.nodes(data=True)[0][1]['features'].shape
p.graph['seqid'] = seqid

In [20]:
# Takes about 2 hours for the task to finish over 3401 graphs. This is it being done on a single core.
from tqdm import tqdm
from joblib import Parallel, delayed
graphs = []

def make_protein_graphs(project, seqid):
    """
    Custom function for this notebook to parallelize the making of protein graphs over individual cores.
    """
    from pin import pin
    p = pin.ProteinInteractionNetwork('../data/batch_models/{0}/model_01.pdb'.format(project))
    p.graph['project'] = project
    p.graph['input_shape'] = p.nodes(data=True)[0][1]['features'].shape
    p.graph['seqid'] = seqid
    return p

graphs = Parallel(n_jobs=-1)(delayed(make_protein_graphs)(proj, seqid) for seqid, proj in projs_interest.items())
len(graphs)


Out[20]:
4

In [21]:
# Check a sample of the graphs to make sure that the input shapes are correct.
graphs[0].graph['input_shape']


Out[21]:
(1, 36)

In [22]:
from graphfp.layers import FingerprintLayer, GraphInputLayer, LinearRegressionLayer, GraphConvLayer,\
    FullyConnectedLayer, DropoutLayer
from graphfp.utils import initialize_network, batch_sample

In [23]:
graphs[0].nodes(data=True)[0][1]['features'].shape


Out[23]:
(1, 36)

In [33]:
def train_loss(wb_vect, unflattener, batch=True, batch_size=2, debug=False):
    """
    Training loss is MSE.

    We pass in a flattened parameter vector and its unflattener.
    """
    wb_struct = unflattener(wb_vect)

    if batch:
        batch_size = batch_size
    else:
        batch_size = len(graphs)

    samp_graphs, samp_inputs = batch_sample(graphs, input_shape, batch_size)

    preds = predict(wb_struct, samp_inputs, samp_graphs)
    graph_ids = [g.graph['seqid'] for g in samp_graphs]
    # graph_scores = fpv_data[fpv_data['seqid'].isin(graph_ids)]['FPV'].values.reshape(preds.shape)
    graph_scores = fpv_data.set_index('seqid').ix[graph_ids]['FPV'].values.reshape(preds.shape)
    assert preds.shape == graph_scores.shape
    
    if debug:
        print(graph_ids)
        print('Predictions:')
        print(preds)
        print('Mean: {0}'.format(np.mean(preds)))
        print('')
        print('Actual')
        print(graph_scores)
        print('Mean: {0}'.format(np.mean(graph_scores)))
        print('')
        print('Difference')
        print(preds - graph_scores)
        print('Mean Squared Error: {0}'.format(np.mean(np.power(preds - graph_scores, 2))))
        print('')
    # print(preds.shape, graph_scores.shape)
    # assert preds.shape[1] == graph_scores.shape[1]
    # print(preds - graph_scores)
    mse = np.mean(np.power(preds - graph_scores, 2))
    return mse

In [34]:
# fpv_data[fpv_data['seqid'].isin(['2996-1', '2996-0'])]
graph_ids = ['4387-1', '2996-1', '2996-0', '4482-1']
fpv_data.set_index('seqid').ix[graph_ids]['FPV'].values.reshape((4,1))


Out[34]:
array([[-0.15490196],
       [ 0.39794001],
       [ 0.39794001],
       [ 0.59106461]])

In [35]:
def predict(wb_struct, inputs, graphs):
    """
    Makes predictions by running the forward pass over all of the layers.

    Parameters:
    ===========
    - wb_struct: a dictionary of weights and biases stored for each layer.
    - inputs: the input data matrix. should be one row per graph.
    - graphs: a list of all graphs.
    """
    curr_inputs = inputs

    for i, layer in enumerate(layers):
        # print(type(wb_struct))
        wb = wb_struct['layer{0}_{1}'.format(i, layer)]
        curr_inputs = layer.forward_pass(wb, curr_inputs, graphs)
    return curr_inputs

In [36]:
def callback(wb, i):
    """
    Any function you want to run at each iteration of the optimization.
    """
    # from time import time
    from graphfp.flatten import flatten

    # new_time = time()
    wb_vect, wb_unflattener = flatten(wb)
    print('Iteration: {0}'.format(i))
    # print('Computing gradient w.r.t. weights...')

    print('Training Loss: ')
    
    tl = train_loss(wb_vect, wb_unflattener, batch=False)
    print(tl)
    print('')

    training_losses.append(tl)

In [37]:
from autograd import grad
grad_tl = grad(train_loss)

In [38]:
len(graphs[0])


Out[38]:
198

In [39]:
import networkx as nx
from collections import Counter
Counter([len(graphs[0].neighbors(n)) for n in graphs[0].nodes()])


Out[39]:
Counter({0: 2,
         1: 9,
         2: 69,
         3: 42,
         4: 12,
         5: 14,
         6: 16,
         7: 12,
         8: 9,
         9: 5,
         10: 4,
         11: 4})

In [63]:
from graphfp.optimizers import sgd, adam

input_shape = graphs[0].graph['input_shape']
layers = [# GraphConvLayer((input_shape[1], input_shape[1] * 2)),
          # GraphConvLayer((input_shape[1], input_shape[1])),
          # GraphConvLayer((input_shape[1], input_shape[1])),
          GraphConvLayer((input_shape[1], input_shape[1])),
          FingerprintLayer(input_shape[1]),
          # FullyConnectedLayer((input_shape[1], input_shape[1])),
          # DropoutLayer(p=0),
          # FullyConnectedLayer((input_shape[1], input_shape[1])),
          # DropoutLayer(p=0.3),
          # FullyConnectedLayer((input_shape[1], input_shape[1])),
          LinearRegressionLayer((input_shape[1], 1)),
]

wb_all = initialize_network(input_shape, graphs, layers)

training_losses = list()

In [64]:
from graphfp.flatten import flatten
wb_vect, wb_unflattener = flatten(wb_all)
train_loss(wb_vect, wb_unflattener, debug=True)


['61139-1', '66668-0']
Predictions:
[[ 0.00202669]
 [ 0.00200087]]
Mean: 0.0020137820125656902

Actual
[[ 1.39794001]
 [ 0.25527251]]
Mean: 0.8266062568876719

Difference
[[-1.39591332]
 [-0.25327163]]
Mean Squared Error: 1.0063602570175394

Out[64]:
1.0063602570175394

In [65]:
wb_vect, wb_unflattener = adam(grad_tl, wb_all, callback=clbk, num_iters=1000)
wb_all = wb_unflattener(wb_vect)


Iteration: 0
Training Loss: 
0.958683847731

Iteration: 1
Training Loss: 
0.85574625148

Iteration: 2
Training Loss: 
0.692849730205

Iteration: 3
Training Loss: 
0.470798874929

Iteration: 4
Training Loss: 
0.25610279

Iteration: 5
Training Loss: 
0.142365458042

Iteration: 6
Training Loss: 
0.213195179494

Iteration: 7
Training Loss: 
0.247360795651

Iteration: 8
Training Loss: 
0.194211675821

Iteration: 9
Training Loss: 
0.141595420563

Iteration: 10
Training Loss: 
0.142453463102

Iteration: 11
Training Loss: 
0.182790006674

Iteration: 12
Training Loss: 
0.224292844583

Iteration: 13
Training Loss: 
0.252211207652

Iteration: 14
Training Loss: 
0.275144920839

Iteration: 15
Training Loss: 
0.271139273145

Iteration: 16
Training Loss: 
0.260295271967

Iteration: 17
Training Loss: 
0.232559707405

Iteration: 18
Training Loss: 
0.183562419011

Iteration: 19
Training Loss: 
0.136512923223

Iteration: 20
Training Loss: 
0.119878363623

Iteration: 21
Training Loss: 
0.13110443196

Iteration: 22
Training Loss: 
0.151622095818

Iteration: 23
Training Loss: 
0.184779148901

Iteration: 24
Training Loss: 
0.174512189282

Iteration: 25
Training Loss: 
0.152347698212

Iteration: 26
Training Loss: 
0.129961992086

Iteration: 27
Training Loss: 
0.112996493168

Iteration: 28
Training Loss: 
0.112750886689

Iteration: 29
Training Loss: 
0.124539538782

Iteration: 30
Training Loss: 
0.141850640487

Iteration: 31
Training Loss: 
0.146933121219

Iteration: 32
Training Loss: 
0.145651126385

Iteration: 33
Training Loss: 
0.138544464627

Iteration: 34
Training Loss: 
0.134354561001

Iteration: 35
Training Loss: 
0.134133521661

Iteration: 36
Training Loss: 
0.136118127282

Iteration: 37
Training Loss: 
0.141611066827

Iteration: 38
Training Loss: 
0.13011848886

Iteration: 39
Training Loss: 
0.109799346581

Iteration: 40
Training Loss: 
0.0965045666464

Iteration: 41
Training Loss: 
0.0975239677667

Iteration: 42
Training Loss: 
0.104514083385

Iteration: 43
Training Loss: 
0.112808357716

Iteration: 44
Training Loss: 
0.115869471847

Iteration: 45
Training Loss: 
0.124810155618

Iteration: 46
Training Loss: 
0.113135138663

Iteration: 47
Training Loss: 
0.0993504632296

Iteration: 48
Training Loss: 
0.0879375191262

Iteration: 49
Training Loss: 
0.0892524242474

Iteration: 50
Training Loss: 
0.104753202377

Iteration: 51
Training Loss: 
0.114597812245

Iteration: 52
Training Loss: 
0.110250212139

Iteration: 53
Training Loss: 
0.101382343703

Iteration: 54
Training Loss: 
0.0869070238073

Iteration: 55
Training Loss: 
0.0806358964824

Iteration: 56
Training Loss: 
0.0834972373044

Iteration: 57
Training Loss: 
0.101633686583

Iteration: 58
Training Loss: 
0.123440429097

Iteration: 59
Training Loss: 
0.155288652636

Iteration: 60
Training Loss: 
0.183013727902

Iteration: 61
Training Loss: 
0.195235847164

Iteration: 62
Training Loss: 
0.168044920568

Iteration: 63
Training Loss: 
0.122787689069

Iteration: 64
Training Loss: 
0.0861910720916

Iteration: 65
Training Loss: 
0.0725613286926

Iteration: 66
Training Loss: 
0.0728106354139

Iteration: 67
Training Loss: 
0.0941777513554

Iteration: 68
Training Loss: 
0.133980906301

Iteration: 69
Training Loss: 
0.182764019322

Iteration: 70
Training Loss: 
0.214669535146

Iteration: 71
Training Loss: 
0.225125808078

Iteration: 72
Training Loss: 
0.203719195422

Iteration: 73
Training Loss: 
0.180356354256

Iteration: 74
Training Loss: 
0.144870701657

Iteration: 75
Training Loss: 
0.0985202452612

Iteration: 76
Training Loss: 
0.0677800322103

Iteration: 77
Training Loss: 
0.0657883428915

Iteration: 78
Training Loss: 
0.101011001294

Iteration: 79
Training Loss: 
0.125005654418

Iteration: 80
Training Loss: 
0.129055485356

Iteration: 81
Training Loss: 
0.0958836595543

Iteration: 82
Training Loss: 
0.0621714171681

Iteration: 83
Training Loss: 
0.0561862660318

Iteration: 84
Training Loss: 
0.0685702928111

Iteration: 85
Training Loss: 
0.0771508703409

Iteration: 86
Training Loss: 
0.0759327987509

Iteration: 87
Training Loss: 
0.0652621589671

Iteration: 88
Training Loss: 
0.0587603750024

Iteration: 89
Training Loss: 
0.0528013077109

Iteration: 90
Training Loss: 
0.0501854317193

Iteration: 91
Training Loss: 
0.0496285628856

Iteration: 92
Training Loss: 
0.047534924388

Iteration: 93
Training Loss: 
0.0465758615012

Iteration: 94
Training Loss: 
0.0456236375779

Iteration: 95
Training Loss: 
0.0447549040204

Iteration: 96
Training Loss: 
0.0440957709819

Iteration: 97
Training Loss: 
0.0465159571073

Iteration: 98
Training Loss: 
0.0567051392617

Iteration: 99
Training Loss: 
0.0601017316161

Iteration: 100
Training Loss: 
0.0514440109647

Iteration: 101
Training Loss: 
0.0468484256098

Iteration: 102
Training Loss: 
0.040010839715

Iteration: 103
Training Loss: 
0.0381732697536

Iteration: 104
Training Loss: 
0.0373667845218

Iteration: 105
Training Loss: 
0.0364259108286

Iteration: 106
Training Loss: 
0.0358295400567

Iteration: 107
Training Loss: 
0.0348203836894

Iteration: 108
Training Loss: 
0.0339890105642

Iteration: 109
Training Loss: 
0.0335671310875

Iteration: 110
Training Loss: 
0.0345228646284

Iteration: 111
Training Loss: 
0.0361525208841

Iteration: 112
Training Loss: 
0.0370452051671

Iteration: 113
Training Loss: 
0.0409557774234

Iteration: 114
Training Loss: 
0.0431840340024

Iteration: 115
Training Loss: 
0.0419811615288

Iteration: 116
Training Loss: 
0.0351427260004

Iteration: 117
Training Loss: 
0.0282860923645

Iteration: 118
Training Loss: 
0.0296284989935

Iteration: 119
Training Loss: 
0.0343693422857

Iteration: 120
Training Loss: 
0.0389209352819

Iteration: 121
Training Loss: 
0.0462614006246

Iteration: 122
Training Loss: 
0.0501749940761

Iteration: 123
Training Loss: 
0.04960867412

Iteration: 124
Training Loss: 
0.0410379241362

Iteration: 125
Training Loss: 
0.0350339954203

Iteration: 126
Training Loss: 
0.0259711860736

Iteration: 127
Training Loss: 
0.02223031449

Iteration: 128
Training Loss: 
0.0255181328253

Iteration: 129
Training Loss: 
0.0319941335613

Iteration: 130
Training Loss: 
0.0289489527269

Iteration: 131
Training Loss: 
0.02431868592

Iteration: 132
Training Loss: 
0.0215522534164

Iteration: 133
Training Loss: 
0.0199955432468

Iteration: 134
Training Loss: 
0.0214490281945

Iteration: 135
Training Loss: 
0.0222248928053

Iteration: 136
Training Loss: 
0.0205073798518

Iteration: 137
Training Loss: 
0.018932077803

Iteration: 138
Training Loss: 
0.0185542499901

Iteration: 139
Training Loss: 
0.0179695847285

Iteration: 140
Training Loss: 
0.0183020325996

Iteration: 141
Training Loss: 
0.0184487037251

Iteration: 142
Training Loss: 
0.0171156821992

Iteration: 143
Training Loss: 
0.0160114835067

Iteration: 144
Training Loss: 
0.0152699122895

Iteration: 145
Training Loss: 
0.0148434643848

Iteration: 146
Training Loss: 
0.0147913058034

Iteration: 147
Training Loss: 
0.0143645360522

Iteration: 148
Training Loss: 
0.0136603361384

Iteration: 149
Training Loss: 
0.0135236835491

Iteration: 150
Training Loss: 
0.0137747052921

Iteration: 151
Training Loss: 
0.0129545775166

Iteration: 152
Training Loss: 
0.0123083259052

Iteration: 153
Training Loss: 
0.0123095214152

Iteration: 154
Training Loss: 
0.0116748347827

Iteration: 155
Training Loss: 
0.011282663391

Iteration: 156
Training Loss: 
0.0110285181247

Iteration: 157
Training Loss: 
0.0106931779896

Iteration: 158
Training Loss: 
0.0108420456454

Iteration: 159
Training Loss: 
0.0103630170504

Iteration: 160
Training Loss: 
0.0105830050222

Iteration: 161
Training Loss: 
0.0107661046013

Iteration: 162
Training Loss: 
0.0121912027693

Iteration: 163
Training Loss: 
0.0156317838156

Iteration: 164
Training Loss: 
0.0159397560015

Iteration: 165
Training Loss: 
0.0129784223128

Iteration: 166
Training Loss: 
0.00931363365715

Iteration: 167
Training Loss: 
0.00907487447762

Iteration: 168
Training Loss: 
0.0177646213836

Iteration: 169
Training Loss: 
0.0277077321788

Iteration: 170
Training Loss: 
0.0413404162684

Iteration: 171
Training Loss: 
0.0460996239663

Iteration: 172
Training Loss: 
0.0411888391147

Iteration: 173
Training Loss: 
0.0330139877804

Iteration: 174
Training Loss: 
0.0267420208514

Iteration: 175
Training Loss: 
0.0184842771693

Iteration: 176
Training Loss: 
0.0141637099658

Iteration: 177
Training Loss: 
0.0125039892462

Iteration: 178
Training Loss: 
0.00849072318197

Iteration: 179
Training Loss: 
0.00666263825681

Iteration: 180
Training Loss: 
0.0064570677943

Iteration: 181
Training Loss: 
0.00643261256447

Iteration: 182
Training Loss: 
0.00631447313954

Iteration: 183
Training Loss: 
0.00622675968742

Iteration: 184
Training Loss: 
0.00617019196297

Iteration: 185
Training Loss: 
0.00618164637026

Iteration: 186
Training Loss: 
0.00682079466515

Iteration: 187
Training Loss: 
0.0064348478196

Iteration: 188
Training Loss: 
0.00594503789964

Iteration: 189
Training Loss: 
0.00565292995663

Iteration: 190
Training Loss: 
0.00609227867058

Iteration: 191
Training Loss: 
0.00836349348526

Iteration: 192
Training Loss: 
0.014181487671

Iteration: 193
Training Loss: 
0.0162639983036

Iteration: 194
Training Loss: 
0.0179539013652

Iteration: 195
Training Loss: 
0.015777752195

Iteration: 196
Training Loss: 
0.0150936379441

Iteration: 197
Training Loss: 
0.0142820762512

Iteration: 198
Training Loss: 
0.0121960559565

Iteration: 199
Training Loss: 
0.0117508868939

Iteration: 200
Training Loss: 
0.00824357839502

Iteration: 201
Training Loss: 
0.00547846624731

Iteration: 202
Training Loss: 
0.00459214251143

Iteration: 203
Training Loss: 
0.00492922052642

Iteration: 204
Training Loss: 
0.00621973019283

Iteration: 205
Training Loss: 
0.00924491814283

Iteration: 206
Training Loss: 
0.00938430072019

Iteration: 207
Training Loss: 
0.00991822786582

Iteration: 208
Training Loss: 
0.00911972278148

Iteration: 209
Training Loss: 
0.00628213903498

Iteration: 210
Training Loss: 
0.00387762715793

Iteration: 211
Training Loss: 
0.00581080226353

Iteration: 212
Training Loss: 
0.00996902485388

Iteration: 213
Training Loss: 
0.0177432831887

Iteration: 214
Training Loss: 
0.0212318185737

Iteration: 215
Training Loss: 
0.0229670938737

Iteration: 216
Training Loss: 
0.0171614074744

Iteration: 217
Training Loss: 
0.0093798821886

Iteration: 218
Training Loss: 
0.00372251619042

Iteration: 219
Training Loss: 
0.00344814702923

Iteration: 220
Training Loss: 
0.00537964224685

Iteration: 221
Training Loss: 
0.00799858700762

Iteration: 222
Training Loss: 
0.0121254892453

Iteration: 223
Training Loss: 
0.0145668159939

Iteration: 224
Training Loss: 
0.0167558977375

Iteration: 225
Training Loss: 
0.0194557207221

Iteration: 226
Training Loss: 
0.0225269191493

Iteration: 227
Training Loss: 
0.0246860301734

Iteration: 228
Training Loss: 
0.0201617812129

Iteration: 229
Training Loss: 
0.0141508172334

Iteration: 230
Training Loss: 
0.00954527075825

Iteration: 231
Training Loss: 
0.00524907375335

Iteration: 232
Training Loss: 
0.00256630376359

Iteration: 233
Training Loss: 
0.00413662668416

Iteration: 234
Training Loss: 
0.0109597180187

Iteration: 235
Training Loss: 
0.018256478759

Iteration: 236
Training Loss: 
0.0211808758477

Iteration: 237
Training Loss: 
0.0157388854864

Iteration: 238
Training Loss: 
0.0127090642034

Iteration: 239
Training Loss: 
0.0115008024981

Iteration: 240
Training Loss: 
0.00719714315357

Iteration: 241
Training Loss: 
0.00472286008764

Iteration: 242
Training Loss: 
0.00353657860891

Iteration: 243
Training Loss: 
0.00225673588479

Iteration: 244
Training Loss: 
0.00219890555021

Iteration: 245
Training Loss: 
0.00256828639837

Iteration: 246
Training Loss: 
0.00363872999638

Iteration: 247
Training Loss: 
0.00380396028406

Iteration: 248
Training Loss: 
0.00429231811801

Iteration: 249
Training Loss: 
0.00426787656585

Iteration: 250
Training Loss: 
0.00483880649855

Iteration: 251
Training Loss: 
0.00352907101429

Iteration: 252
Training Loss: 
0.00257318691011

Iteration: 253
Training Loss: 
0.00235117016546

Iteration: 254
Training Loss: 
0.00278035365166

Iteration: 255
Training Loss: 
0.00337228567283

Iteration: 256
Training Loss: 
0.00369089906441

Iteration: 257
Training Loss: 
0.00457015041337

Iteration: 258
Training Loss: 
0.00512209163715

Iteration: 259
Training Loss: 
0.00703555549093

Iteration: 260
Training Loss: 
0.00648938515427

Iteration: 261
Training Loss: 
0.0072146104547

Iteration: 262
Training Loss: 
0.00694411905816

Iteration: 263
Training Loss: 
0.00463311503853

Iteration: 264
Training Loss: 
0.00229616360464

Iteration: 265
Training Loss: 
0.00182408567369

Iteration: 266
Training Loss: 
0.00360513839725

Iteration: 267
Training Loss: 
0.00668732581604

Iteration: 268
Training Loss: 
0.00702259300222

Iteration: 269
Training Loss: 
0.00758420918636

Iteration: 270
Training Loss: 
0.00686062090645

Iteration: 271
Training Loss: 
0.00383440183876

Iteration: 272
Training Loss: 
0.00223515484313

Iteration: 273
Training Loss: 
0.00205782735356

Iteration: 274
Training Loss: 
0.00209999963729

Iteration: 275
Training Loss: 
0.00301583474208

Iteration: 276
Training Loss: 
0.00341259348917

Iteration: 277
Training Loss: 
0.00364064689254

Iteration: 278
Training Loss: 
0.00363991980792

Iteration: 279
Training Loss: 
0.00299317684398

Iteration: 280
Training Loss: 
0.0033828467957

Iteration: 281
Training Loss: 
0.00426962408291

Iteration: 282
Training Loss: 
0.00413038831448

Iteration: 283
Training Loss: 
0.00308285878876

Iteration: 284
Training Loss: 
0.00161492326112

Iteration: 285
Training Loss: 
0.000930367852183

Iteration: 286
Training Loss: 
0.000797575078627

Iteration: 287
Training Loss: 
0.000903452709506

Iteration: 288
Training Loss: 
0.00073495632077

Iteration: 289
Training Loss: 
0.0010942244691

Iteration: 290
Training Loss: 
0.00203378854053

Iteration: 291
Training Loss: 
0.00271727183577

Iteration: 292
Training Loss: 
0.00325415300134

Iteration: 293
Training Loss: 
0.00235264276351

Iteration: 294
Training Loss: 
0.00160350167084

Iteration: 295
Training Loss: 
0.000735613856377

Iteration: 296
Training Loss: 
0.000643416226154

Iteration: 297
Training Loss: 
0.000697233499622

Iteration: 298
Training Loss: 
0.00132712277099

Iteration: 299
Training Loss: 
0.00182072214844

Iteration: 300
Training Loss: 
0.00231285028696

Iteration: 301
Training Loss: 
0.00201489088457

Iteration: 302
Training Loss: 
0.00211913211682

Iteration: 303
Training Loss: 
0.00261170774599

Iteration: 304
Training Loss: 
0.00246098962944

Iteration: 305
Training Loss: 
0.00158467895631

Iteration: 306
Training Loss: 
0.00131018718171

Iteration: 307
Training Loss: 
0.00088057371228

Iteration: 308
Training Loss: 
0.000780611780238

Iteration: 309
Training Loss: 
0.000718566638788

Iteration: 310
Training Loss: 
0.000876867715062

Iteration: 311
Training Loss: 
0.000661584755275

Iteration: 312
Training Loss: 
0.000588974543945

Iteration: 313
Training Loss: 
0.000594619498433

Iteration: 314
Training Loss: 
0.000871274804973

Iteration: 315
Training Loss: 
0.000767166203279

Iteration: 316
Training Loss: 
0.000438947546358

Iteration: 317
Training Loss: 
0.000380326771819

Iteration: 318
Training Loss: 
0.000376691435978

Iteration: 319
Training Loss: 
0.000439696797772

Iteration: 320
Training Loss: 
0.000616407839657

Iteration: 321
Training Loss: 
0.00092410210424

Iteration: 322
Training Loss: 
0.000686264208409

Iteration: 323
Training Loss: 
0.000369725817121

Iteration: 324
Training Loss: 
0.000289588229744

Iteration: 325
Training Loss: 
0.000447909884031

Iteration: 326
Training Loss: 
0.000402572110703

Iteration: 327
Training Loss: 
0.000394739666953

Iteration: 328
Training Loss: 
0.000469706127208

Iteration: 329
Training Loss: 
0.0001980521428

Iteration: 330
Training Loss: 
0.00093195216321

Iteration: 331
Training Loss: 
0.00459981962558

Iteration: 332
Training Loss: 
0.00767579119422

Iteration: 333
Training Loss: 
0.00975805695348

Iteration: 334
Training Loss: 
0.0086602228824

Iteration: 335
Training Loss: 
0.0068531828552

Iteration: 336
Training Loss: 
0.00612185395114

Iteration: 337
Training Loss: 
0.00387644453589

Iteration: 338
Training Loss: 
0.00237354502976

Iteration: 339
Training Loss: 
0.00125501308076

Iteration: 340
Training Loss: 
0.000457243371484

Iteration: 341
Training Loss: 
0.000615985218282

Iteration: 342
Training Loss: 
0.00133836974193

Iteration: 343
Training Loss: 
0.00196942438303

Iteration: 344
Training Loss: 
0.00310750993956

Iteration: 345
Training Loss: 
0.00354560888102

Iteration: 346
Training Loss: 
0.00404599694872

Iteration: 347
Training Loss: 
0.00393466685483

Iteration: 348
Training Loss: 
0.00420012761634

Iteration: 349
Training Loss: 
0.00354880599449

Iteration: 350
Training Loss: 
0.00129307344956

Iteration: 351
Training Loss: 
0.000544520650996

Iteration: 352
Training Loss: 
0.000508937751604

Iteration: 353
Training Loss: 
0.000827124289544

Iteration: 354
Training Loss: 
0.000922549866205

Iteration: 355
Training Loss: 
0.000668909902468

Iteration: 356
Training Loss: 
0.00105547918351

Iteration: 357
Training Loss: 
0.00145826183166

Iteration: 358
Training Loss: 
0.00107967629217

Iteration: 359
Training Loss: 
0.00100340132588

Iteration: 360
Training Loss: 
0.00086773980901

Iteration: 361
Training Loss: 
0.000334894840434

Iteration: 362
Training Loss: 
0.000136235897863

Iteration: 363
Training Loss: 
0.000120362785005

Iteration: 364
Training Loss: 
0.000203334737144

Iteration: 365
Training Loss: 
0.000148267587508

Iteration: 366
Training Loss: 
0.000166878207171

Iteration: 367
Training Loss: 
0.000176674931482

Iteration: 368
Training Loss: 
0.000191593518465

Iteration: 369
Training Loss: 
0.000198916895776

Iteration: 370
Training Loss: 
0.000234069818729

Iteration: 371
Training Loss: 
0.000336284340066

Iteration: 372
Training Loss: 
0.000752922122366

Iteration: 373
Training Loss: 
0.000717711294576

Iteration: 374
Training Loss: 
0.000210660432339

Iteration: 375
Training Loss: 
0.000907836311933

Iteration: 376
Training Loss: 
0.00480606663776

Iteration: 377
Training Loss: 
0.0104013956437

Iteration: 378
Training Loss: 
0.0154672629008

Iteration: 379
Training Loss: 
0.017637155653

Iteration: 380
Training Loss: 
0.0182295777956

Iteration: 381
Training Loss: 
0.0130175707689

Iteration: 382
Training Loss: 
0.00588724682821

Iteration: 383
Training Loss: 
0.00117087513935

Iteration: 384
Training Loss: 
0.000718492414904

Iteration: 385
Training Loss: 
0.00318698396295

Iteration: 386
Training Loss: 
0.00696736424896

Iteration: 387
Training Loss: 
0.00748421814372

Iteration: 388
Training Loss: 
0.00466669169228

Iteration: 389
Training Loss: 
0.00242994618136

Iteration: 390
Training Loss: 
0.00147294006991

Iteration: 391
Training Loss: 
0.00118612912936

Iteration: 392
Training Loss: 
0.00254496345426

Iteration: 393
Training Loss: 
0.00636333192274

Iteration: 394
Training Loss: 
0.0120049333547

Iteration: 395
Training Loss: 
0.0175405740068

Iteration: 396
Training Loss: 
0.017836954621

Iteration: 397
Training Loss: 
0.0156538849184

Iteration: 398
Training Loss: 
0.00986138832065

Iteration: 399
Training Loss: 
0.00554426692145

Iteration: 400
Training Loss: 
0.00191045068725

Iteration: 401
Training Loss: 
0.000990372344912

Iteration: 402
Training Loss: 
0.00253705124573

Iteration: 403
Training Loss: 
0.00586773641039

Iteration: 404
Training Loss: 
0.00710223823354

Iteration: 405
Training Loss: 
0.00835233423112

Iteration: 406
Training Loss: 
0.00768320661852

Iteration: 407
Training Loss: 
0.00715299944146

Iteration: 408
Training Loss: 
0.00343624205038

Iteration: 409
Training Loss: 
0.00118733808646

Iteration: 410
Training Loss: 
0.000386264090601

Iteration: 411
Training Loss: 
0.000171619334499

Iteration: 412
Training Loss: 
0.000154382688553

Iteration: 413
Training Loss: 
0.000272579089603

Iteration: 414
Training Loss: 
0.000603252600574

Iteration: 415
Training Loss: 
0.00201164786216

Iteration: 416
Training Loss: 
0.00542014321206

Iteration: 417
Training Loss: 
0.00871257228765

Iteration: 418
Training Loss: 
0.0103341153252

Iteration: 419
Training Loss: 
0.00784567829209

Iteration: 420
Training Loss: 
0.00358597075538

Iteration: 421
Training Loss: 
0.000746015708781

Iteration: 422
Training Loss: 
0.000316077648235

Iteration: 423
Training Loss: 
0.00114988771702

Iteration: 424
Training Loss: 
0.003214579569

Iteration: 425
Training Loss: 
0.00626009563807

Iteration: 426
Training Loss: 
0.00626748035961

Iteration: 427
Training Loss: 
0.00395470673718

Iteration: 428
Training Loss: 
0.00250997154429

Iteration: 429
Training Loss: 
0.00177744400177

Iteration: 430
Training Loss: 
0.00122229874832

Iteration: 431
Training Loss: 
0.00114501997724

Iteration: 432
Training Loss: 
0.00132100643753

Iteration: 433
Training Loss: 
0.00159586962378

Iteration: 434
Training Loss: 
0.00187480040502

Iteration: 435
Training Loss: 
0.00204811241989

Iteration: 436
Training Loss: 
0.00259317399578

Iteration: 437
Training Loss: 
0.00408086194335

Iteration: 438
Training Loss: 
0.00419170136325

Iteration: 439
Training Loss: 
0.00500683137313

Iteration: 440
Training Loss: 
0.00647919415586

Iteration: 441
Training Loss: 
0.00668693680653

Iteration: 442
Training Loss: 
0.00615890378266

Iteration: 443
Training Loss: 
0.00410972672368

Iteration: 444
Training Loss: 
0.00246675458811

Iteration: 445
Training Loss: 
0.00192714963909

Iteration: 446
Training Loss: 
0.00157932027577

Iteration: 447
Training Loss: 
0.00195626170576

Iteration: 448
Training Loss: 
0.00136905575078

Iteration: 449
Training Loss: 
0.000572472145692

Iteration: 450
Training Loss: 
0.000480928139048

Iteration: 451
Training Loss: 
0.000435533074488

Iteration: 452
Training Loss: 
0.000427956443383

Iteration: 453
Training Loss: 
0.000454372460822

Iteration: 454
Training Loss: 
0.000995986688886

Iteration: 455
Training Loss: 
0.00194534314978

Iteration: 456
Training Loss: 
0.00320433153142

Iteration: 457
Training Loss: 
0.00360060934751

Iteration: 458
Training Loss: 
0.00350119405763

Iteration: 459
Training Loss: 
0.00253272916324

Iteration: 460
Training Loss: 
0.0017826120026

Iteration: 461
Training Loss: 
0.00085724291894

Iteration: 462
Training Loss: 
0.000706256727642

Iteration: 463
Training Loss: 
0.000741094949164

Iteration: 464
Training Loss: 
0.0013468709779

Iteration: 465
Training Loss: 
0.00221250379975

Iteration: 466
Training Loss: 
0.00208881043299

Iteration: 467
Training Loss: 
0.00117803431823

Iteration: 468
Training Loss: 
0.000586081537487

Iteration: 469
Training Loss: 
0.000176403721173

Iteration: 470
Training Loss: 
0.000379364105665

Iteration: 471
Training Loss: 
0.00154769375469

Iteration: 472
Training Loss: 
0.00219359294218

Iteration: 473
Training Loss: 
0.00305310298931

Iteration: 474
Training Loss: 
0.00241692562793

Iteration: 475
Training Loss: 
0.00134190338243

Iteration: 476
Training Loss: 
0.000680072612491

Iteration: 477
Training Loss: 
0.000222344338278

Iteration: 478
Training Loss: 
0.00074809308139

Iteration: 479
Training Loss: 
0.00183824679029

Iteration: 480
Training Loss: 
0.00257537160232

Iteration: 481
Training Loss: 
0.00303733562417

Iteration: 482
Training Loss: 
0.00310453968218

Iteration: 483
Training Loss: 
0.00374044006681

Iteration: 484
Training Loss: 
0.00331605647459

Iteration: 485
Training Loss: 
0.00274012663424

Iteration: 486
Training Loss: 
0.00203146359617

Iteration: 487
Training Loss: 
0.00201433902896

Iteration: 488
Training Loss: 
0.00130801451854

Iteration: 489
Training Loss: 
0.00051363635531

Iteration: 490
Training Loss: 
0.00024031559362

Iteration: 491
Training Loss: 
0.000338425355481

Iteration: 492
Training Loss: 
0.00085672654352

Iteration: 493
Training Loss: 
0.00191044812575

Iteration: 494
Training Loss: 
0.0034672066161

Iteration: 495
Training Loss: 
0.00474335489765

Iteration: 496
Training Loss: 
0.00572011462254

Iteration: 497
Training Loss: 
0.00517555771254

Iteration: 498
Training Loss: 
0.00279485746631

Iteration: 499
Training Loss: 
0.00134376567533

Iteration: 500
Training Loss: 
0.000515691576435

Iteration: 501
Training Loss: 
0.000180300841207

Iteration: 502
Training Loss: 
0.0001403247192

Iteration: 503
Training Loss: 
0.000174808673559

Iteration: 504
Training Loss: 
0.000155953116706

Iteration: 505
Training Loss: 
0.00014078787781

Iteration: 506
Training Loss: 
0.000149403726623

Iteration: 507
Training Loss: 
0.000139997124879

Iteration: 508
Training Loss: 
0.00020485451795

Iteration: 509
Training Loss: 
0.000517116911758

Iteration: 510
Training Loss: 
0.000731095282015

Iteration: 511
Training Loss: 
0.000657632728755

Iteration: 512
Training Loss: 
0.0004156492656

Iteration: 513
Training Loss: 
0.000111096730402

Iteration: 514
Training Loss: 
6.7558118111e-05

Iteration: 515
Training Loss: 
0.000321731787666

Iteration: 516
Training Loss: 
0.000445527956482

Iteration: 517
Training Loss: 
0.000121022158958

Iteration: 518
Training Loss: 
8.70064530168e-05

Iteration: 519
Training Loss: 
0.00071267757631

Iteration: 520
Training Loss: 
0.00153626196465

Iteration: 521
Training Loss: 
0.00261928799793

Iteration: 522
Training Loss: 
0.00300381118837

Iteration: 523
Training Loss: 
0.00313690922477

Iteration: 524
Training Loss: 
0.00377571993173

Iteration: 525
Training Loss: 
0.00318411507537

Iteration: 526
Training Loss: 
0.00182107493395

Iteration: 527
Training Loss: 
0.000645167536915

Iteration: 528
Training Loss: 
0.000240737602317

Iteration: 529
Training Loss: 
0.000339356370688

Iteration: 530
Training Loss: 
0.000285525720697

Iteration: 531
Training Loss: 
0.000391767752597

Iteration: 532
Training Loss: 
0.000621261977573

Iteration: 533
Training Loss: 
0.000690128266552

Iteration: 534
Training Loss: 
0.000551011042247

Iteration: 535
Training Loss: 
0.000548153223891

Iteration: 536
Training Loss: 
0.000665027178528

Iteration: 537
Training Loss: 
0.000582992133749

Iteration: 538
Training Loss: 
0.000394667864915

Iteration: 539
Training Loss: 
0.000328978065771

Iteration: 540
Training Loss: 
0.000355293019614

Iteration: 541
Training Loss: 
0.000440134122899

Iteration: 542
Training Loss: 
0.000459216378824

Iteration: 543
Training Loss: 
0.000498600266897

Iteration: 544
Training Loss: 
0.000589021834349

Iteration: 545
Training Loss: 
0.000684922172024

Iteration: 546
Training Loss: 
0.000903998047099

Iteration: 547
Training Loss: 
0.000613315945746

Iteration: 548
Training Loss: 
0.000400649251133

Iteration: 549
Training Loss: 
0.000272788372055

Iteration: 550
Training Loss: 
0.000214596264935

Iteration: 551
Training Loss: 
0.000232688696401

Iteration: 552
Training Loss: 
0.000202708335952

Iteration: 553
Training Loss: 
0.000196689436099

Iteration: 554
Training Loss: 
0.000210894220444

Iteration: 555
Training Loss: 
0.000182684826522

Iteration: 556
Training Loss: 
0.000144129480734

Iteration: 557
Training Loss: 
0.000132438240099

Iteration: 558
Training Loss: 
0.000138353364691

Iteration: 559
Training Loss: 
0.000243741072717

Iteration: 560
Training Loss: 
0.000231081938886

Iteration: 561
Training Loss: 
0.000183828375532

Iteration: 562
Training Loss: 
0.000425706727179

Iteration: 563
Training Loss: 
0.000840053784596

Iteration: 564
Training Loss: 
0.00128489030214

Iteration: 565
Training Loss: 
0.00182227262953

Iteration: 566
Training Loss: 
0.00233016584607

Iteration: 567
Training Loss: 
0.00201679933753

Iteration: 568
Training Loss: 
0.00117252799671

Iteration: 569
Training Loss: 
0.000658025425881

Iteration: 570
Training Loss: 
0.00019087343214

Iteration: 571
Training Loss: 
0.000203907430576

Iteration: 572
Training Loss: 
0.000950851540583

Iteration: 573
Training Loss: 
0.00203314062082

Iteration: 574
Training Loss: 
0.00204479835329

Iteration: 575
Training Loss: 
0.000830217512726

Iteration: 576
Training Loss: 
0.000281358688057

Iteration: 577
Training Loss: 
0.000611939550217

Iteration: 578
Training Loss: 
0.00188654532317

Iteration: 579
Training Loss: 
0.00302031806269

Iteration: 580
Training Loss: 
0.00325411100897

Iteration: 581
Training Loss: 
0.0025453916656

Iteration: 582
Training Loss: 
0.00162741480597

Iteration: 583
Training Loss: 
0.000854513170542

Iteration: 584
Training Loss: 
0.000317989198881

Iteration: 585
Training Loss: 
0.000347085009374

Iteration: 586
Training Loss: 
0.000573594601613

Iteration: 587
Training Loss: 
0.000387478943275

Iteration: 588
Training Loss: 
0.000257209403393

Iteration: 589
Training Loss: 
0.000616386505955

Iteration: 590
Training Loss: 
0.00113880644161

Iteration: 591
Training Loss: 
0.00128041153993

Iteration: 592
Training Loss: 
0.00190346814707

Iteration: 593
Training Loss: 
0.00184692517054

Iteration: 594
Training Loss: 
0.00140516219208

Iteration: 595
Training Loss: 
0.00100957923541

Iteration: 596
Training Loss: 
0.000467397951724

Iteration: 597
Training Loss: 
0.0001582586546

Iteration: 598
Training Loss: 
0.00037885702482

Iteration: 599
Training Loss: 
0.00104605862238

Iteration: 600
Training Loss: 
0.000903783376312

Iteration: 601
Training Loss: 
0.000679917298898

Iteration: 602
Training Loss: 
0.000368882909822

Iteration: 603
Training Loss: 
0.000193500028838

Iteration: 604
Training Loss: 
0.00066506257235

Iteration: 605
Training Loss: 
0.00123013798448

Iteration: 606
Training Loss: 
0.00194676135659

Iteration: 607
Training Loss: 
0.00204394617556

Iteration: 608
Training Loss: 
0.00175935478908

Iteration: 609
Training Loss: 
0.00105798216419

Iteration: 610
Training Loss: 
0.000502334874925

Iteration: 611
Training Loss: 
0.000215436468102

Iteration: 612
Training Loss: 
0.000217239525437

Iteration: 613
Training Loss: 
0.000198281443104

Iteration: 614
Training Loss: 
0.000186970483171

Iteration: 615
Training Loss: 
0.000179159240956

Iteration: 616
Training Loss: 
0.000159300914191

Iteration: 617
Training Loss: 
0.000142523493978

Iteration: 618
Training Loss: 
0.000122076710502

Iteration: 619
Training Loss: 
0.000134888359084

Iteration: 620
Training Loss: 
0.000120713029485

Iteration: 621
Training Loss: 
8.97785602871e-05

Iteration: 622
Training Loss: 
7.91219470625e-05

Iteration: 623
Training Loss: 
7.23528337585e-05

Iteration: 624
Training Loss: 
8.14944293248e-05

Iteration: 625
Training Loss: 
7.24262477343e-05

Iteration: 626
Training Loss: 
7.11510576224e-05

Iteration: 627
Training Loss: 
7.54749315209e-05

Iteration: 628
Training Loss: 
0.000100911972488

Iteration: 629
Training Loss: 
7.70308644659e-05

Iteration: 630
Training Loss: 
7.40180956256e-05

Iteration: 631
Training Loss: 
8.90123220255e-05

Iteration: 632
Training Loss: 
0.000361975498481

Iteration: 633
Training Loss: 
0.000574410405006

Iteration: 634
Training Loss: 
0.000648080329279

Iteration: 635
Training Loss: 
0.00076154243157

Iteration: 636
Training Loss: 
0.000896403674119

Iteration: 637
Training Loss: 
0.000637094520486

Iteration: 638
Training Loss: 
0.000295229165759

Iteration: 639
Training Loss: 
9.0895579969e-05

Iteration: 640
Training Loss: 
0.000113022261058

Iteration: 641
Training Loss: 
0.000329036666398

Iteration: 642
Training Loss: 
0.000221409866047

Iteration: 643
Training Loss: 
0.000112298253713

Iteration: 644
Training Loss: 
0.000405649580344

Iteration: 645
Training Loss: 
0.00130323942309

Iteration: 646
Training Loss: 
0.00208121910117

Iteration: 647
Training Loss: 
0.00228976120301

Iteration: 648
Training Loss: 
0.00283849634637

Iteration: 649
Training Loss: 
0.00239764272993

Iteration: 650
Training Loss: 
0.00173106455382

Iteration: 651
Training Loss: 
0.000865162996621

Iteration: 652
Training Loss: 
0.000254521302702

Iteration: 653
Training Loss: 
5.57644687191e-05

Iteration: 654
Training Loss: 
0.000291081592578

Iteration: 655
Training Loss: 
0.000759245191834

Iteration: 656
Training Loss: 
0.000514655394066

Iteration: 657
Training Loss: 
0.000134549888187

Iteration: 658
Training Loss: 
0.00010154749074

Iteration: 659
Training Loss: 
0.000293938287665

Iteration: 660
Training Loss: 
0.000588573913228

Iteration: 661
Training Loss: 
0.000848917283771

Iteration: 662
Training Loss: 
0.000969323276844

Iteration: 663
Training Loss: 
0.000846493773941

Iteration: 664
Training Loss: 
0.000456988237719

Iteration: 665
Training Loss: 
0.000232253263138

Iteration: 666
Training Loss: 
0.000179827596945

Iteration: 667
Training Loss: 
0.000229270082293

Iteration: 668
Training Loss: 
0.000290588209786

Iteration: 669
Training Loss: 
0.000228707322714

Iteration: 670
Training Loss: 
0.000404193914374

Iteration: 671
Training Loss: 
0.000972337395319

Iteration: 672
Training Loss: 
0.00137439841117

Iteration: 673
Training Loss: 
0.00143279743722

Iteration: 674
Training Loss: 
0.00126749153624

Iteration: 675
Training Loss: 
0.00144589991958

Iteration: 676
Training Loss: 
0.00126641438494

Iteration: 677
Training Loss: 
0.000766361722448

Iteration: 678
Training Loss: 
0.000697646275233

Iteration: 679
Training Loss: 
0.000584424507972

Iteration: 680
Training Loss: 
0.000464850392816

Iteration: 681
Training Loss: 
0.00030778026764

Iteration: 682
Training Loss: 
0.000245696238367

Iteration: 683
Training Loss: 
0.000336390701408

Iteration: 684
Training Loss: 
0.000349177842963

Iteration: 685
Training Loss: 
0.000431649600797

Iteration: 686
Training Loss: 
0.000500804556961

Iteration: 687
Training Loss: 
0.000650953430043

Iteration: 688
Training Loss: 
0.000327203562633

Iteration: 689
Training Loss: 
0.000223874205124

Iteration: 690
Training Loss: 
0.00027713662752

Iteration: 691
Training Loss: 
0.000346673308629

Iteration: 692
Training Loss: 
0.000352403036904

Iteration: 693
Training Loss: 
0.000328669282945

Iteration: 694
Training Loss: 
0.00045429128476

Iteration: 695
Training Loss: 
0.000713455835969

Iteration: 696
Training Loss: 
0.000877772494595

Iteration: 697
Training Loss: 
0.000857182393992

Iteration: 698
Training Loss: 
0.000758022260088

Iteration: 699
Training Loss: 
0.000735799724532

Iteration: 700
Training Loss: 
0.000648548042154

Iteration: 701
Training Loss: 
0.000481891825308

Iteration: 702
Training Loss: 
0.000351110683848

Iteration: 703
Training Loss: 
0.000375195669513

Iteration: 704
Training Loss: 
0.000363806385437

Iteration: 705
Training Loss: 
0.000247137758784

Iteration: 706
Training Loss: 
0.00020467400578

Iteration: 707
Training Loss: 
0.000448823154898

Iteration: 708
Training Loss: 
0.00082237729927

Iteration: 709
Training Loss: 
0.000979593220458

Iteration: 710
Training Loss: 
0.00148280571355

Iteration: 711
Training Loss: 
0.00167207588453

Iteration: 712
Training Loss: 
0.0013742450266

Iteration: 713
Training Loss: 
0.00105061555508

Iteration: 714
Training Loss: 
0.000541203247662

Iteration: 715
Training Loss: 
0.000282335502991

Iteration: 716
Training Loss: 
0.000115087953736

Iteration: 717
Training Loss: 
0.000100060580807

Iteration: 718
Training Loss: 
0.00017703256632

Iteration: 719
Training Loss: 
0.000414226198393

Iteration: 720
Training Loss: 
0.000251337654766

Iteration: 721
Training Loss: 
0.000128186022294

Iteration: 722
Training Loss: 
0.000113128132283

Iteration: 723
Training Loss: 
0.000150211478135

Iteration: 724
Training Loss: 
0.000166875501399

Iteration: 725
Training Loss: 
0.000161418408071

Iteration: 726
Training Loss: 
0.000175827863372

Iteration: 727
Training Loss: 
0.000219703180404

Iteration: 728
Training Loss: 
0.000177567872909

Iteration: 729
Training Loss: 
0.000159556580879

Iteration: 730
Training Loss: 
0.000251922030603

Iteration: 731
Training Loss: 
0.000951734274516

Iteration: 732
Training Loss: 
0.00173082916322

Iteration: 733
Training Loss: 
0.00197000039614

Iteration: 734
Training Loss: 
0.00159577184967

Iteration: 735
Training Loss: 
0.00088490586828

Iteration: 736
Training Loss: 
0.000468264718604

Iteration: 737
Training Loss: 
0.000193424116594

Iteration: 738
Training Loss: 
0.000225958477715

Iteration: 739
Training Loss: 
0.000267396243726

Iteration: 740
Training Loss: 
0.000144283340348

Iteration: 741
Training Loss: 
7.13984696955e-05

Iteration: 742
Training Loss: 
0.000175523602699

Iteration: 743
Training Loss: 
0.000503502615853

Iteration: 744
Training Loss: 
0.000811390861129

Iteration: 745
Training Loss: 
0.000697144232598

Iteration: 746
Training Loss: 
0.000305270285574

Iteration: 747
Training Loss: 
9.09397483723e-05

Iteration: 748
Training Loss: 
0.000109964540032

Iteration: 749
Training Loss: 
0.00032519019517

Iteration: 750
Training Loss: 
0.000204335189412

Iteration: 751
Training Loss: 
0.000113428740681

Iteration: 752
Training Loss: 
0.000117326774868

Iteration: 753
Training Loss: 
0.000216310016576

Iteration: 754
Training Loss: 
0.000301113219264

Iteration: 755
Training Loss: 
0.000323516876287

Iteration: 756
Training Loss: 
0.000295575275297

Iteration: 757
Training Loss: 
0.00051254864137

Iteration: 758
Training Loss: 
0.000605426791514

Iteration: 759
Training Loss: 
0.000524066629484

Iteration: 760
Training Loss: 
0.000715268680167

Iteration: 761
Training Loss: 
0.000681797465884

Iteration: 762
Training Loss: 
0.000495059476307

Iteration: 763
Training Loss: 
0.000343591081915

Iteration: 764
Training Loss: 
0.000417140564602

Iteration: 765
Training Loss: 
0.00053984530312

Iteration: 766
Training Loss: 
0.000593142645118

Iteration: 767
Training Loss: 
0.000570017331365

Iteration: 768
Training Loss: 
0.000525398233434

Iteration: 769
Training Loss: 
0.000530351037212

Iteration: 770
Training Loss: 
0.000610910815772

Iteration: 771
Training Loss: 
0.000747150062668

Iteration: 772
Training Loss: 
0.000992211072137

Iteration: 773
Training Loss: 
0.0011448279688

Iteration: 774
Training Loss: 
0.00123103009321

Iteration: 775
Training Loss: 
0.00135773675993

Iteration: 776
Training Loss: 
0.00121073902509

Iteration: 777
Training Loss: 
0.000622893602175

Iteration: 778
Training Loss: 
0.000705720627998

Iteration: 779
Training Loss: 
0.00149760620756

Iteration: 780
Training Loss: 
0.00210108104093

Iteration: 781
Training Loss: 
0.00281921085869

Iteration: 782
Training Loss: 
0.00284979953966

Iteration: 783
Training Loss: 
0.00236635703275

Iteration: 784
Training Loss: 
0.00145437689111

Iteration: 785
Training Loss: 
0.000935713608956

Iteration: 786
Training Loss: 
0.000481793488242

Iteration: 787
Training Loss: 
0.000266355851079

Iteration: 788
Training Loss: 
0.000333593205301

Iteration: 789
Training Loss: 
0.000555912297967

Iteration: 790
Training Loss: 
0.000436727504512

Iteration: 791
Training Loss: 
0.000248324316782

Iteration: 792
Training Loss: 
0.000330041470798

Iteration: 793
Training Loss: 
0.000517378737426

Iteration: 794
Training Loss: 
0.000603915827794

Iteration: 795
Training Loss: 
0.000912169252421

Iteration: 796
Training Loss: 
0.00102955642196

Iteration: 797
Training Loss: 
0.00131132931135

Iteration: 798
Training Loss: 
0.00126503023282

Iteration: 799
Training Loss: 
0.00086954290232

Iteration: 800
Training Loss: 
0.000560154739283

Iteration: 801
Training Loss: 
0.000501945817222

Iteration: 802
Training Loss: 
0.000399330076495

Iteration: 803
Training Loss: 
0.000268314121429

Iteration: 804
Training Loss: 
0.000201819554948

Iteration: 805
Training Loss: 
0.000249179477661

Iteration: 806
Training Loss: 
0.000236772739713

Iteration: 807
Training Loss: 
0.000211560432736

Iteration: 808
Training Loss: 
0.000204547120541

Iteration: 809
Training Loss: 
0.000215851831848

Iteration: 810
Training Loss: 
0.000126918882668

Iteration: 811
Training Loss: 
0.000278647614715

Iteration: 812
Training Loss: 
0.000625559951019

Iteration: 813
Training Loss: 
0.000848143299092

Iteration: 814
Training Loss: 
0.000805592788835

Iteration: 815
Training Loss: 
0.000938709915133

Iteration: 816
Training Loss: 
0.000787589246544

Iteration: 817
Training Loss: 
0.000465526088852

Iteration: 818
Training Loss: 
0.0001858280809

Iteration: 819
Training Loss: 
7.00932959961e-05

Iteration: 820
Training Loss: 
0.000187758908305

Iteration: 821
Training Loss: 
0.000501615051686

Iteration: 822
Training Loss: 
0.000789406559691

Iteration: 823
Training Loss: 
0.00101540371221

Iteration: 824
Training Loss: 
0.00111988497702

Iteration: 825
Training Loss: 
0.00117446752453

Iteration: 826
Training Loss: 
0.000943092566485

Iteration: 827
Training Loss: 
0.000739808412322

Iteration: 828
Training Loss: 
0.000444813683993

Iteration: 829
Training Loss: 
5.70067947356e-05

Iteration: 830
Training Loss: 
0.000371114050803

Iteration: 831
Training Loss: 
0.00157569862988

Iteration: 832
Training Loss: 
0.0027599016572

Iteration: 833
Training Loss: 
0.00288965473234

Iteration: 834
Training Loss: 
0.00224968081946

Iteration: 835
Training Loss: 
0.00122877689141

Iteration: 836
Training Loss: 
0.000363382930826

Iteration: 837
Training Loss: 
4.60767762967e-05

Iteration: 838
Training Loss: 
0.000490623137477

Iteration: 839
Training Loss: 
0.000741017282637

Iteration: 840
Training Loss: 
0.000511801824526

Iteration: 841
Training Loss: 
0.000170134274737

Iteration: 842
Training Loss: 
0.000199620172423

Iteration: 843
Training Loss: 
0.000634799094459

Iteration: 844
Training Loss: 
0.00167574195891

Iteration: 845
Training Loss: 
0.00242166625459

Iteration: 846
Training Loss: 
0.00296562706397

Iteration: 847
Training Loss: 
0.00259924556442

Iteration: 848
Training Loss: 
0.00171819812753

Iteration: 849
Training Loss: 
0.00105734230503

Iteration: 850
Training Loss: 
0.000684128835035

Iteration: 851
Training Loss: 
0.000548087772661

Iteration: 852
Training Loss: 
0.000424975718917

Iteration: 853
Training Loss: 
0.000243093661848

Iteration: 854
Training Loss: 
0.000167396943907

Iteration: 855
Training Loss: 
0.000106333986554

Iteration: 856
Training Loss: 
0.000102501036407

Iteration: 857
Training Loss: 
8.174331822e-05

Iteration: 858
Training Loss: 
7.69666496473e-05

Iteration: 859
Training Loss: 
6.6605774923e-05

Iteration: 860
Training Loss: 
6.03471380391e-05

Iteration: 861
Training Loss: 
6.00051710209e-05

Iteration: 862
Training Loss: 
7.26976724035e-05

Iteration: 863
Training Loss: 
9.98551801801e-05

Iteration: 864
Training Loss: 
0.000136274018236

Iteration: 865
Training Loss: 
0.000171195364216

Iteration: 866
Training Loss: 
3.55054083999e-05

Iteration: 867
Training Loss: 
0.000147552220588

Iteration: 868
Training Loss: 
0.000900943949631

Iteration: 869
Training Loss: 
0.00167499613086

Iteration: 870
Training Loss: 
0.00210858185937

Iteration: 871
Training Loss: 
0.00191715359138

Iteration: 872
Training Loss: 
0.0012465803588

Iteration: 873
Training Loss: 
0.000447680480398

Iteration: 874
Training Loss: 
6.38416973574e-05

Iteration: 875
Training Loss: 
0.000301912303324

Iteration: 876
Training Loss: 
0.00106874646114

Iteration: 877
Training Loss: 
0.00204473395105

Iteration: 878
Training Loss: 
0.00168955792106

Iteration: 879
Training Loss: 
0.00104457211844

Iteration: 880
Training Loss: 
0.000464629066315

Iteration: 881
Training Loss: 
0.000207128132477

Iteration: 882
Training Loss: 
0.000269861566768

Iteration: 883
Training Loss: 
0.000495934009125

Iteration: 884
Training Loss: 
0.000670166092999

Iteration: 885
Training Loss: 
0.000703167647646

Iteration: 886
Training Loss: 
0.000514974489602

Iteration: 887
Training Loss: 
0.000296434636168

Iteration: 888
Training Loss: 
0.00025181300349

Iteration: 889
Training Loss: 
0.000254284044978

Iteration: 890
Training Loss: 
0.000193672417015

Iteration: 891
Training Loss: 
0.00010810032115

Iteration: 892
Training Loss: 
6.90387442523e-05

Iteration: 893
Training Loss: 
8.67078802077e-05

Iteration: 894
Training Loss: 
0.000117908739116

Iteration: 895
Training Loss: 
0.000215601420108

Iteration: 896
Training Loss: 
0.000529735925738

Iteration: 897
Training Loss: 
0.000727485394465

Iteration: 898
Training Loss: 
0.000737749795819

Iteration: 899
Training Loss: 
0.000676749180248

Iteration: 900
Training Loss: 
0.000394919359115

Iteration: 901
Training Loss: 
0.000123718920975

Iteration: 902
Training Loss: 
2.54270231728e-05

Iteration: 903
Training Loss: 
0.000157357403979

Iteration: 904
Training Loss: 
0.000439689288171

Iteration: 905
Training Loss: 
0.000713109181025

Iteration: 906
Training Loss: 
0.000720753879076

Iteration: 907
Training Loss: 
0.000717649543264

Iteration: 908
Training Loss: 
0.00047725114823

Iteration: 909
Training Loss: 
6.05337182161e-05

Iteration: 910
Training Loss: 
0.000117028148235

Iteration: 911
Training Loss: 
0.000548179476341

Iteration: 912
Training Loss: 
0.00143962382137

Iteration: 913
Training Loss: 
0.00251730580662

Iteration: 914
Training Loss: 
0.00268131346487

Iteration: 915
Training Loss: 
0.00236273561905

Iteration: 916
Training Loss: 
0.00149329415214

Iteration: 917
Training Loss: 
0.000592127630066

Iteration: 918
Training Loss: 
0.00026871073166

Iteration: 919
Training Loss: 
0.000263929405677

Iteration: 920
Training Loss: 
0.000385022098651

Iteration: 921
Training Loss: 
0.00052129277865

Iteration: 922
Training Loss: 
0.000699540430957

Iteration: 923
Training Loss: 
0.000546677071294

Iteration: 924
Training Loss: 
0.00042509216253

Iteration: 925
Training Loss: 
0.000462341123041

Iteration: 926
Training Loss: 
0.000994171744999

Iteration: 927
Training Loss: 
0.00170837824558

Iteration: 928
Training Loss: 
0.00220865931653

Iteration: 929
Training Loss: 
0.00209692069481

Iteration: 930
Training Loss: 
0.0014200406214

Iteration: 931
Training Loss: 
0.000748311525493

Iteration: 932
Training Loss: 
0.000475071797282

Iteration: 933
Training Loss: 
0.000390311768131

Iteration: 934
Training Loss: 
0.000473845963851

Iteration: 935
Training Loss: 
0.000419937420579

Iteration: 936
Training Loss: 
0.000363534432409

Iteration: 937
Training Loss: 
0.000199204203594

Iteration: 938
Training Loss: 
0.000221687928836

Iteration: 939
Training Loss: 
0.000495614645658

Iteration: 940
Training Loss: 
0.000708371331389

Iteration: 941
Training Loss: 
0.000830087659655

Iteration: 942
Training Loss: 
0.000719200489275

Iteration: 943
Training Loss: 
0.000544569080232

Iteration: 944
Training Loss: 
0.000333284597731

Iteration: 945
Training Loss: 
0.000230798593033

Iteration: 946
Training Loss: 
0.000228245036813

Iteration: 947
Training Loss: 
0.000256095732448

Iteration: 948
Training Loss: 
0.000278716319947

Iteration: 949
Training Loss: 
0.000349231672967

Iteration: 950
Training Loss: 
0.000203509344355

Iteration: 951
Training Loss: 
0.00014608064106

Iteration: 952
Training Loss: 
0.000325291552769

Iteration: 953
Training Loss: 
0.000824563601028

Iteration: 954
Training Loss: 
0.00162817082193

Iteration: 955
Training Loss: 
0.00203104590256

Iteration: 956
Training Loss: 
0.00186265255766

Iteration: 957
Training Loss: 
0.00120022145494

Iteration: 958
Training Loss: 
0.000669957687949

Iteration: 959
Training Loss: 
0.000459303449371

Iteration: 960
Training Loss: 
0.000412496710751

Iteration: 961
Training Loss: 
0.000377110263789

Iteration: 962
Training Loss: 
0.000444036453775

Iteration: 963
Training Loss: 
0.000687690451372

Iteration: 964
Training Loss: 
0.000627053699108

Iteration: 965
Training Loss: 
0.000308086051891

Iteration: 966
Training Loss: 
0.000319328590319

Iteration: 967
Training Loss: 
0.00065256499894

Iteration: 968
Training Loss: 
0.001021958797

Iteration: 969
Training Loss: 
0.00114289998359

Iteration: 970
Training Loss: 
0.00106239051464

Iteration: 971
Training Loss: 
0.000731151579246

Iteration: 972
Training Loss: 
0.000304567029842

Iteration: 973
Training Loss: 
3.54935116938e-05

Iteration: 974
Training Loss: 
6.22990469956e-05

Iteration: 975
Training Loss: 
0.000367856541874

Iteration: 976
Training Loss: 
0.000626650500059

Iteration: 977
Training Loss: 
0.000470594298905

Iteration: 978
Training Loss: 
0.000290308212694

Iteration: 979
Training Loss: 
0.00015887958223

Iteration: 980
Training Loss: 
0.000116282644625

Iteration: 981
Training Loss: 
0.000348842641412

Iteration: 982
Training Loss: 
0.000508322963302

Iteration: 983
Training Loss: 
0.000437963948041

Iteration: 984
Training Loss: 
0.000320748960114

Iteration: 985
Training Loss: 
0.000204450040878

Iteration: 986
Training Loss: 
0.000129423926566

Iteration: 987
Training Loss: 
7.93785405712e-05

Iteration: 988
Training Loss: 
5.83113476423e-05

Iteration: 989
Training Loss: 
5.69681684601e-05

Iteration: 990
Training Loss: 
0.000115260957557

Iteration: 991
Training Loss: 
0.000243061448623

Iteration: 992
Training Loss: 
0.000605042463426

Iteration: 993
Training Loss: 
0.00077997101784

Iteration: 994
Training Loss: 
0.000734477056309

Iteration: 995
Training Loss: 
0.000749621116123

Iteration: 996
Training Loss: 
0.000573997758414

Iteration: 997
Training Loss: 
0.000316116921867

Iteration: 998
Training Loss: 
0.000237726465208

Iteration: 999
Training Loss: 
0.000167354587972


In [69]:
plt.plot(training_losses)
plt.yscale('log')



In [70]:
from graphfp.flatten import flatten
wb_vect, wb_unflattener = flatten(wb_all)
train_loss(wb_vect, wb_unflattener, debug=True)


['66668-0', '112387-1']
Predictions:
[[ 0.26481597]
 [ 0.94829197]]
Mean: 0.6065539727816811

Actual
[[ 0.25527251]
 [ 0.93951925]]
Mean: 0.5973958788609622

Difference
[[ 0.00954347]
 [ 0.00877272]]
Mean Squared Error: 8.40191984243525e-05

Out[70]:
8.4019198424352507e-05

In [72]:
wb_all['layer0_GraphConvLayer']['weights'].max()


Out[72]:
0.18730440864022327

In [ ]: