Install important packages for boosted decision trees


In [2]:
!pip install lightgbm
!pip install shap


Requirement already satisfied: lightgbm in /usr/local/lib/python3.6/dist-packages (2.2.3)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from lightgbm) (1.4.1)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from lightgbm) (0.22.2.post1)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from lightgbm) (1.18.4)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->lightgbm) (0.14.1)
Collecting shap
  Downloading https://files.pythonhosted.org/packages/a8/77/b504e43e21a2ba543a1ac4696718beb500cfa708af2fb57cb54ce299045c/shap-0.35.0.tar.gz (273kB)
     |████████████████████████████████| 276kB 6.1MB/s 
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from shap) (1.18.4)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from shap) (1.4.1)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from shap) (0.22.2.post1)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from shap) (1.0.3)
Requirement already satisfied: tqdm>4.25.0 in /usr/local/lib/python3.6/dist-packages (from shap) (4.41.1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->shap) (0.14.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->shap) (2018.9)
Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas->shap) (2.8.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.6.1->pandas->shap) (1.12.0)
Building wheels for collected packages: shap
  Building wheel for shap (setup.py) ... done
  Created wheel for shap: filename=shap-0.35.0-cp36-cp36m-linux_x86_64.whl size=394115 sha256=d457e47cb9b879ed3b83bd1f2b598f729413fab7b6e716fa13415eda482398a1
  Stored in directory: /root/.cache/pip/wheels/e7/f7/0f/b57055080cf8894906b3bd3616d2fc2bfd0b12d5161bcb24ac
Successfully built shap
Installing collected packages: shap
Successfully installed shap-0.35.0

Importing packages and defining functions and variables


In [3]:
%tensorflow_version 1.x
import lzma
from google.colab import drive
import numpy as np
import tensorflow as tf
import keras
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model
import matplotlib.pyplot as plt
import lightgbm as lgb#t
import shap
import sklearn
from sklearn import svm
from sklearn import preprocessing
from sklearn import datasets
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import plot_precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
#from scipy import interp
from sklearn.metrics import roc_auc_score

def READ_XZ (filename):
    file = lzma.LZMAFile(filename)
    type_bytes = file.read(-1)
    type_array = np.frombuffer(type_bytes,dtype='float32')                                                
    return type_array

def Count(array,val):
  count = 0.0
  for e in range(array.shape[0]):
    if array[e]>val :
      count=count+1.0
  return count / array.shape[0]

width=40
batch_size=200
ModelName = "Model_40_24_8_24_40_40"

config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 2} ) 
sess = tf.Session(config=config)
keras.backend.set_session(sess)
K.tensorflow_backend._get_available_gpus()


TensorFlow 1.x selected.
Using TensorFlow backend.
Out[3]:
['/job:localhost/replica:0/task:0/device:GPU:0']

Defining autoencoder model, Training and evaluation functions


In [4]:
# this is our input placeholder
input_img = Input(shape=(width*width,))

# "encoded" is the encoded representation of the input
Layer1 = Dense(24*24, activation='relu')(input_img)
Layer2 = Dense(8*8, activation='relu')(Layer1)
Layer3 = Dense(24*24, activation='relu')(Layer2)
Layer4 = Dense(40*40, activation='relu')(Layer3)
Out = Dense(40*40, activation='softmax')(Layer4)

# this model maps an input to its reconstruction
autoencoder = Model(input_img, Out)
autoencoder.compile(optimizer='adam', loss='mean_squared_error')

def NAME(eventtype,purpose,i,obs) :
  return "./"+eventtype+"/"+purpose+"/"+obs+"."+str(i)+".bin.xz"
#

def EvalOnFile (InFileName,OutFileName):
  data = READ_XZ (InFileName)
  x_train = data.reshape(-1,width*width)
  x_out = autoencoder.predict(x_train,200,use_multiprocessing=True)
  diff = x_train - x_out
  lrnorm = np.ones((diff.shape[0]))
  for e in range(diff.shape[0]):
    lrnorm[e] = np.linalg.norm(diff[e])
  lrnorm.tofile(OutFileName)
  print(lrnorm.shape)
BATCH_SIZE=512
def TrainOnFile (filename,testfilename,totalepochs):
  data = READ_XZ (filename)
  x_train = data.reshape(-1,width*width)
  datatest = READ_XZ (testfilename)
  x_test = datatest.reshape(-1,width*width)
  autoencoder.fit(
      x_train, x_train, epochs=totalepochs,
      batch_size=BATCH_SIZE, shuffle=True,
      validation_data=(x_test, x_test)
  )
  autoencoder.save(ModelName)


WARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.

Mount google drive to access data

Get the data from the link [https://drive.google.com/drive/folders/1_voPoiETqfWmCmBeUCKiF5oXqzbr-ZWt?usp=sharing] (this link is public and anyone can download the image data and trained models)


In [5]:
drive.mount('/gdrive')
%cd /gdrive


Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly

Enter your authorization code:
··········
Mounted at /gdrive
/gdrive

Check the files exist and make a copy of the autoencoder model for backup


In [6]:
%cd /gdrive/My Drive/S2
!ls
!cp ./Model_40_24_8_24_40_40 ../Model_40_24_8_24_40_40.bak
!ls ../
# !tar -xf S2.tar


/gdrive/My Drive/S2
Model_40_24_8_24_40_40	QCD  TOP  topeff_loss
 anime		    DATA_SCIENCE_PROBLEM	 myget
'Colab Notebooks'   Model_40_24_8_24_40_40.bak	 programs.squashfs-xz
 DABBA_FOLDER	    myencoder			 S2

CD to the main data directory and load the trained model


In [7]:
%cd /gdrive/My Drive/S2
autoencoder = keras.models.load_model(ModelName)


/gdrive/My Drive/S2
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.

Train another round if required

(You are strongly advised not to do this but to just use the trained model...)


In [0]:
%cd /gdrive/My Drive/S2
#autoencoder = keras.models.load_model(ModelName)
#!ls ./TOP/TRAIN/*.*.bin.xz
#

for e in range(20):
  for i in range(7):
    TrainOnFile(NAME("QCD","TRAIN",i%7,"out"),NAME("QCD","TEST",i%3,"out"),10)
  #
  for i in range(3):
    TrainOnFile(NAME("QCD","VAL",i%7,"out"),NAME("QCD","TEST",i%3,"out"),10)
  #
#


/gdrive/My Drive/S2
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 36us/step - loss: 5.3439e-06 - val_loss: 6.0588e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2064e-06 - val_loss: 6.0297e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1515e-06 - val_loss: 6.0431e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1172e-06 - val_loss: 6.0204e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0928e-06 - val_loss: 6.0374e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0727e-06 - val_loss: 6.0436e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0526e-06 - val_loss: 6.0485e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0372e-06 - val_loss: 6.0457e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0232e-06 - val_loss: 6.0580e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0119e-06 - val_loss: 6.0527e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 5.3838e-06 - val_loss: 6.0070e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2599e-06 - val_loss: 6.0151e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2102e-06 - val_loss: 6.0279e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1776e-06 - val_loss: 6.0142e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1502e-06 - val_loss: 6.0279e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1319e-06 - val_loss: 6.0312e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1132e-06 - val_loss: 6.0434e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0989e-06 - val_loss: 6.0430e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0880e-06 - val_loss: 6.0486e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0737e-06 - val_loss: 6.0608e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.3305e-06 - val_loss: 5.7355e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.2022e-06 - val_loss: 5.7310e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1483e-06 - val_loss: 5.7203e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1134e-06 - val_loss: 5.7257e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0860e-06 - val_loss: 5.7487e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0670e-06 - val_loss: 5.7181e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0484e-06 - val_loss: 5.7143e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0341e-06 - val_loss: 5.7335e-06
Epoch 9/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0207e-06 - val_loss: 5.7330e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0074e-06 - val_loss: 5.7636e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.3311e-06 - val_loss: 6.0435e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.2039e-06 - val_loss: 6.0338e-06
Epoch 3/10
99996/99996 [==============================] - 3s 32us/step - loss: 5.1493e-06 - val_loss: 6.0127e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.1127e-06 - val_loss: 6.0306e-06
Epoch 5/10
99996/99996 [==============================] - 3s 32us/step - loss: 5.0894e-06 - val_loss: 6.0351e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0692e-06 - val_loss: 6.0563e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0490e-06 - val_loss: 6.0330e-06
Epoch 8/10
99996/99996 [==============================] - 3s 32us/step - loss: 5.0367e-06 - val_loss: 6.0509e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0234e-06 - val_loss: 6.0530e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0101e-06 - val_loss: 6.0522e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 5.3539e-06 - val_loss: 6.0291e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.2280e-06 - val_loss: 6.0271e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.1729e-06 - val_loss: 6.0177e-06
Epoch 4/10
100000/100000 [==============================] - 3s 32us/step - loss: 5.1402e-06 - val_loss: 6.0307e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.1142e-06 - val_loss: 6.0241e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0913e-06 - val_loss: 6.0453e-06
Epoch 7/10
100000/100000 [==============================] - 3s 32us/step - loss: 5.0763e-06 - val_loss: 6.0177e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0614e-06 - val_loss: 6.0403e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0473e-06 - val_loss: 6.0415e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0352e-06 - val_loss: 6.0519e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.3202e-06 - val_loss: 5.7461e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1970e-06 - val_loss: 5.7180e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1388e-06 - val_loss: 5.7216e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1073e-06 - val_loss: 5.7122e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0805e-06 - val_loss: 5.7253e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0620e-06 - val_loss: 5.7211e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0436e-06 - val_loss: 5.7398e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0274e-06 - val_loss: 5.7218e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0170e-06 - val_loss: 5.7392e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0016e-06 - val_loss: 5.7603e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 238us/step - loss: 5.3745e-06 - val_loss: 6.0544e-06
Epoch 2/10
5523/5523 [==============================] - 1s 218us/step - loss: 5.1871e-06 - val_loss: 6.0871e-06
Epoch 3/10
5523/5523 [==============================] - 1s 222us/step - loss: 5.0819e-06 - val_loss: 6.0935e-06
Epoch 4/10
5523/5523 [==============================] - 1s 215us/step - loss: 5.0085e-06 - val_loss: 6.0962e-06
Epoch 5/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.9547e-06 - val_loss: 6.1078e-06
Epoch 6/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.9141e-06 - val_loss: 6.1215e-06
Epoch 7/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.8801e-06 - val_loss: 6.1267e-06
Epoch 8/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.8540e-06 - val_loss: 6.1304e-06
Epoch 9/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.8325e-06 - val_loss: 6.1465e-06
Epoch 10/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.8121e-06 - val_loss: 6.1511e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 5.4020e-06 - val_loss: 6.0333e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.2583e-06 - val_loss: 6.0245e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.2008e-06 - val_loss: 6.0276e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1650e-06 - val_loss: 6.0280e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1388e-06 - val_loss: 6.0405e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1204e-06 - val_loss: 6.0290e-06
Epoch 7/10
99998/99998 [==============================] - 3s 32us/step - loss: 5.1008e-06 - val_loss: 6.0568e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0846e-06 - val_loss: 6.0510e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0719e-06 - val_loss: 6.0596e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0574e-06 - val_loss: 6.0620e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 5.3417e-06 - val_loss: 6.0155e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2210e-06 - val_loss: 6.0244e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1671e-06 - val_loss: 6.0094e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1275e-06 - val_loss: 6.0150e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1049e-06 - val_loss: 6.0116e-06
Epoch 6/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0834e-06 - val_loss: 6.0175e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0650e-06 - val_loss: 6.0258e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0489e-06 - val_loss: 6.0586e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0335e-06 - val_loss: 6.0364e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0244e-06 - val_loss: 6.0483e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 44us/step - loss: 5.3350e-06 - val_loss: 5.7800e-06
Epoch 2/10
1503/1503 [==============================] - 0s 47us/step - loss: 5.1759e-06 - val_loss: 5.8308e-06
Epoch 3/10
1503/1503 [==============================] - 0s 46us/step - loss: 5.0695e-06 - val_loss: 5.8324e-06
Epoch 4/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.9897e-06 - val_loss: 5.8112e-06
Epoch 5/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.9282e-06 - val_loss: 5.8279e-06
Epoch 6/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.8734e-06 - val_loss: 5.8559e-06
Epoch 7/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.8240e-06 - val_loss: 5.8410e-06
Epoch 8/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.7847e-06 - val_loss: 5.8674e-06
Epoch 9/10
1503/1503 [==============================] - 0s 48us/step - loss: 4.7490e-06 - val_loss: 5.8792e-06
Epoch 10/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7174e-06 - val_loss: 5.8837e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 5.2912e-06 - val_loss: 5.9985e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1550e-06 - val_loss: 6.0145e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1009e-06 - val_loss: 5.9916e-06
Epoch 4/10
99999/99999 [==============================] - 3s 35us/step - loss: 5.0621e-06 - val_loss: 5.9977e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0381e-06 - val_loss: 6.0008e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0186e-06 - val_loss: 6.0064e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0013e-06 - val_loss: 6.0141e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9853e-06 - val_loss: 6.0222e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9715e-06 - val_loss: 6.0132e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9604e-06 - val_loss: 6.0377e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.3349e-06 - val_loss: 5.9985e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2155e-06 - val_loss: 5.9809e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1572e-06 - val_loss: 5.9907e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1263e-06 - val_loss: 5.9936e-06
Epoch 5/10
99999/99999 [==============================] - 3s 35us/step - loss: 5.1021e-06 - val_loss: 6.0007e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0833e-06 - val_loss: 6.0025e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0649e-06 - val_loss: 6.0048e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0465e-06 - val_loss: 6.0145e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0335e-06 - val_loss: 6.0495e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0239e-06 - val_loss: 6.0341e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.2830e-06 - val_loss: 5.6890e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1533e-06 - val_loss: 5.7093e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1000e-06 - val_loss: 5.6916e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0631e-06 - val_loss: 5.6932e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0367e-06 - val_loss: 5.6895e-06
Epoch 6/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0175e-06 - val_loss: 5.6925e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9988e-06 - val_loss: 5.6940e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9830e-06 - val_loss: 5.7154e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9710e-06 - val_loss: 5.7118e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9604e-06 - val_loss: 5.7028e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.2794e-06 - val_loss: 5.9946e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.1572e-06 - val_loss: 5.9872e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.1002e-06 - val_loss: 5.9789e-06
Epoch 4/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.0647e-06 - val_loss: 5.9965e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0410e-06 - val_loss: 5.9983e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0168e-06 - val_loss: 5.9909e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0015e-06 - val_loss: 5.9999e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9841e-06 - val_loss: 6.0177e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9734e-06 - val_loss: 6.0260e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9621e-06 - val_loss: 6.0172e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 5.3135e-06 - val_loss: 5.9873e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.1836e-06 - val_loss: 5.9904e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.1276e-06 - val_loss: 5.9786e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0907e-06 - val_loss: 5.9962e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0670e-06 - val_loss: 5.9915e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0475e-06 - val_loss: 6.0108e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0311e-06 - val_loss: 6.0070e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0136e-06 - val_loss: 6.0215e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0018e-06 - val_loss: 6.0180e-06
Epoch 10/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.9886e-06 - val_loss: 6.0293e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.2741e-06 - val_loss: 5.6914e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1450e-06 - val_loss: 5.7152e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0939e-06 - val_loss: 5.6949e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0573e-06 - val_loss: 5.6921e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0360e-06 - val_loss: 5.6904e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0132e-06 - val_loss: 5.7099e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9959e-06 - val_loss: 5.7285e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9804e-06 - val_loss: 5.7169e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9661e-06 - val_loss: 5.7003e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9553e-06 - val_loss: 5.7305e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 236us/step - loss: 5.3467e-06 - val_loss: 6.0302e-06
Epoch 2/10
5523/5523 [==============================] - 1s 220us/step - loss: 5.1398e-06 - val_loss: 6.0506e-06
Epoch 3/10
5523/5523 [==============================] - 1s 210us/step - loss: 5.0293e-06 - val_loss: 6.0530e-06
Epoch 4/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.9574e-06 - val_loss: 6.0704e-06
Epoch 5/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.9018e-06 - val_loss: 6.0824e-06
Epoch 6/10
5523/5523 [==============================] - 1s 223us/step - loss: 4.8563e-06 - val_loss: 6.0805e-06
Epoch 7/10
5523/5523 [==============================] - 1s 207us/step - loss: 4.8237e-06 - val_loss: 6.0948e-06
Epoch 8/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.7941e-06 - val_loss: 6.1216e-06
Epoch 9/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.7733e-06 - val_loss: 6.1321e-06
Epoch 10/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.7558e-06 - val_loss: 6.1370e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 5.3614e-06 - val_loss: 5.9940e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.2054e-06 - val_loss: 5.9960e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1516e-06 - val_loss: 5.9773e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1172e-06 - val_loss: 5.9863e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0915e-06 - val_loss: 5.9947e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0689e-06 - val_loss: 5.9923e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0525e-06 - val_loss: 6.0006e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0383e-06 - val_loss: 6.0051e-06
Epoch 9/10
99998/99998 [==============================] - 3s 32us/step - loss: 5.0231e-06 - val_loss: 6.0387e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0109e-06 - val_loss: 6.0365e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2927e-06 - val_loss: 6.0263e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1703e-06 - val_loss: 5.9964e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 5.1170e-06 - val_loss: 5.9857e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 5.0821e-06 - val_loss: 6.0238e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0537e-06 - val_loss: 5.9909e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0351e-06 - val_loss: 6.0026e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0174e-06 - val_loss: 6.0025e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0035e-06 - val_loss: 6.0043e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9883e-06 - val_loss: 5.9973e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9766e-06 - val_loss: 6.0273e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 41us/step - loss: 5.2780e-06 - val_loss: 5.7550e-06
Epoch 2/10
1503/1503 [==============================] - 0s 45us/step - loss: 5.1350e-06 - val_loss: 5.7810e-06
Epoch 3/10
1503/1503 [==============================] - 0s 44us/step - loss: 5.0275e-06 - val_loss: 5.7996e-06
Epoch 4/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.9431e-06 - val_loss: 5.7996e-06
Epoch 5/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.8779e-06 - val_loss: 5.7870e-06
Epoch 6/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.8221e-06 - val_loss: 5.8040e-06
Epoch 7/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.7774e-06 - val_loss: 5.7987e-06
Epoch 8/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7401e-06 - val_loss: 5.8276e-06
Epoch 9/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.7040e-06 - val_loss: 5.8378e-06
Epoch 10/10
1503/1503 [==============================] - 0s 49us/step - loss: 4.6707e-06 - val_loss: 5.8445e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2447e-06 - val_loss: 5.9901e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 5.1104e-06 - val_loss: 5.9634e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0516e-06 - val_loss: 5.9745e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0177e-06 - val_loss: 5.9894e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9926e-06 - val_loss: 5.9855e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9702e-06 - val_loss: 5.9759e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9544e-06 - val_loss: 5.9991e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9406e-06 - val_loss: 5.9808e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9254e-06 - val_loss: 5.9895e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9107e-06 - val_loss: 5.9963e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.2914e-06 - val_loss: 5.9949e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1699e-06 - val_loss: 5.9904e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 5.1145e-06 - val_loss: 5.9642e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0792e-06 - val_loss: 5.9593e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0554e-06 - val_loss: 5.9792e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0348e-06 - val_loss: 5.9765e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0188e-06 - val_loss: 6.0005e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0029e-06 - val_loss: 6.0010e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9891e-06 - val_loss: 5.9885e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9775e-06 - val_loss: 5.9909e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.2307e-06 - val_loss: 5.7151e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1061e-06 - val_loss: 5.6822e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0526e-06 - val_loss: 5.6746e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0198e-06 - val_loss: 5.6759e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9931e-06 - val_loss: 5.6821e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9737e-06 - val_loss: 5.6756e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9587e-06 - val_loss: 5.6694e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9432e-06 - val_loss: 5.6819e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9286e-06 - val_loss: 5.6820e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9177e-06 - val_loss: 5.6849e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.2312e-06 - val_loss: 5.9927e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.1054e-06 - val_loss: 5.9598e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0538e-06 - val_loss: 5.9621e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0203e-06 - val_loss: 5.9735e-06
Epoch 5/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.9937e-06 - val_loss: 5.9704e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9705e-06 - val_loss: 5.9725e-06
Epoch 7/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.9563e-06 - val_loss: 5.9863e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9393e-06 - val_loss: 5.9888e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9269e-06 - val_loss: 5.9887e-06
Epoch 10/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.9162e-06 - val_loss: 6.0065e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 5.2625e-06 - val_loss: 5.9633e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.1400e-06 - val_loss: 5.9749e-06
Epoch 3/10
100000/100000 [==============================] - 3s 32us/step - loss: 5.0826e-06 - val_loss: 5.9726e-06
Epoch 4/10
100000/100000 [==============================] - 3s 32us/step - loss: 5.0492e-06 - val_loss: 5.9909e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0225e-06 - val_loss: 5.9701e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0053e-06 - val_loss: 5.9952e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9854e-06 - val_loss: 5.9835e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9711e-06 - val_loss: 5.9820e-06
Epoch 9/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.9578e-06 - val_loss: 5.9819e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9445e-06 - val_loss: 5.9944e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.2308e-06 - val_loss: 5.7112e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.1034e-06 - val_loss: 5.6693e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0464e-06 - val_loss: 5.6789e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0123e-06 - val_loss: 5.6799e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9874e-06 - val_loss: 5.6757e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9648e-06 - val_loss: 5.6806e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9483e-06 - val_loss: 5.6772e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9341e-06 - val_loss: 5.6745e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9214e-06 - val_loss: 5.6940e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9092e-06 - val_loss: 5.6626e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 242us/step - loss: 5.2829e-06 - val_loss: 5.9952e-06
Epoch 2/10
5523/5523 [==============================] - 1s 218us/step - loss: 5.0869e-06 - val_loss: 6.0121e-06
Epoch 3/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.9817e-06 - val_loss: 6.0333e-06
Epoch 4/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.9055e-06 - val_loss: 6.0520e-06
Epoch 5/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.8517e-06 - val_loss: 6.0670e-06
Epoch 6/10
5523/5523 [==============================] - 1s 217us/step - loss: 4.8113e-06 - val_loss: 6.0700e-06
Epoch 7/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.7789e-06 - val_loss: 6.0755e-06
Epoch 8/10
5523/5523 [==============================] - 1s 201us/step - loss: 4.7518e-06 - val_loss: 6.0804e-06
Epoch 9/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.7295e-06 - val_loss: 6.0965e-06
Epoch 10/10
5523/5523 [==============================] - 1s 213us/step - loss: 4.7087e-06 - val_loss: 6.1117e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 5.3078e-06 - val_loss: 5.9654e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1600e-06 - val_loss: 5.9648e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1035e-06 - val_loss: 5.9630e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0642e-06 - val_loss: 5.9873e-06
Epoch 5/10
99998/99998 [==============================] - 3s 32us/step - loss: 5.0405e-06 - val_loss: 5.9745e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0217e-06 - val_loss: 5.9863e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0008e-06 - val_loss: 5.9871e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9864e-06 - val_loss: 5.9883e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9714e-06 - val_loss: 5.9900e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9596e-06 - val_loss: 6.0144e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.2453e-06 - val_loss: 5.9688e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1234e-06 - val_loss: 5.9527e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0689e-06 - val_loss: 5.9512e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0349e-06 - val_loss: 5.9655e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0094e-06 - val_loss: 5.9856e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9893e-06 - val_loss: 5.9765e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9692e-06 - val_loss: 5.9820e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9540e-06 - val_loss: 5.9763e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9411e-06 - val_loss: 5.9792e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9278e-06 - val_loss: 5.9866e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 44us/step - loss: 5.2065e-06 - val_loss: 5.7214e-06
Epoch 2/10
1503/1503 [==============================] - 0s 45us/step - loss: 5.0635e-06 - val_loss: 5.7600e-06
Epoch 3/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.9636e-06 - val_loss: 5.7713e-06
Epoch 4/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.8789e-06 - val_loss: 5.7735e-06
Epoch 5/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.8186e-06 - val_loss: 5.7859e-06
Epoch 6/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.7672e-06 - val_loss: 5.7922e-06
Epoch 7/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7206e-06 - val_loss: 5.7849e-06
Epoch 8/10
1503/1503 [==============================] - 0s 38us/step - loss: 4.6824e-06 - val_loss: 5.8018e-06
Epoch 9/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.6510e-06 - val_loss: 5.8336e-06
Epoch 10/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.6199e-06 - val_loss: 5.8352e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.2051e-06 - val_loss: 5.9475e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 5.0655e-06 - val_loss: 5.9283e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0076e-06 - val_loss: 5.9447e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9739e-06 - val_loss: 5.9365e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9469e-06 - val_loss: 5.9601e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9250e-06 - val_loss: 5.9585e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9092e-06 - val_loss: 5.9449e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8953e-06 - val_loss: 5.9725e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8822e-06 - val_loss: 5.9642e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8701e-06 - val_loss: 5.9643e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.2437e-06 - val_loss: 5.9473e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.1219e-06 - val_loss: 5.9532e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 5.0665e-06 - val_loss: 5.9329e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0336e-06 - val_loss: 5.9368e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0080e-06 - val_loss: 5.9577e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9866e-06 - val_loss: 5.9365e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9684e-06 - val_loss: 5.9559e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9543e-06 - val_loss: 5.9539e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9425e-06 - val_loss: 5.9697e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9284e-06 - val_loss: 5.9837e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1907e-06 - val_loss: 5.7025e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0646e-06 - val_loss: 5.6366e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0078e-06 - val_loss: 5.6775e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9737e-06 - val_loss: 5.6377e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9455e-06 - val_loss: 5.6573e-06
Epoch 6/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9267e-06 - val_loss: 5.6492e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9091e-06 - val_loss: 5.6709e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8948e-06 - val_loss: 5.6804e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8832e-06 - val_loss: 5.6828e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8703e-06 - val_loss: 5.6752e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.1882e-06 - val_loss: 5.9788e-06
Epoch 2/10
99996/99996 [==============================] - 3s 32us/step - loss: 5.0583e-06 - val_loss: 5.9399e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 5.0040e-06 - val_loss: 5.9474e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9700e-06 - val_loss: 5.9612e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9463e-06 - val_loss: 5.9400e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9227e-06 - val_loss: 5.9579e-06
Epoch 7/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.9073e-06 - val_loss: 5.9757e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8916e-06 - val_loss: 5.9656e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8787e-06 - val_loss: 5.9610e-06
Epoch 10/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.8666e-06 - val_loss: 5.9636e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 4s 35us/step - loss: 5.2197e-06 - val_loss: 5.9366e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0903e-06 - val_loss: 5.9206e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0352e-06 - val_loss: 5.9173e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9972e-06 - val_loss: 5.9404e-06
Epoch 5/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.9739e-06 - val_loss: 5.9253e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9524e-06 - val_loss: 5.9435e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9356e-06 - val_loss: 5.9483e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9188e-06 - val_loss: 5.9531e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9062e-06 - val_loss: 5.9712e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8939e-06 - val_loss: 5.9698e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1799e-06 - val_loss: 5.7073e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 5.0515e-06 - val_loss: 5.6810e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9976e-06 - val_loss: 5.6697e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9600e-06 - val_loss: 5.6716e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9340e-06 - val_loss: 5.6604e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9111e-06 - val_loss: 5.6634e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8957e-06 - val_loss: 5.6718e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8819e-06 - val_loss: 5.6882e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8668e-06 - val_loss: 5.6631e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8530e-06 - val_loss: 5.6818e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 235us/step - loss: 5.2431e-06 - val_loss: 5.9889e-06
Epoch 2/10
5523/5523 [==============================] - 1s 215us/step - loss: 5.0375e-06 - val_loss: 5.9938e-06
Epoch 3/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.9321e-06 - val_loss: 5.9882e-06
Epoch 4/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.8479e-06 - val_loss: 6.0208e-06
Epoch 5/10
5523/5523 [==============================] - 1s 213us/step - loss: 4.7935e-06 - val_loss: 6.0338e-06
Epoch 6/10
5523/5523 [==============================] - 1s 222us/step - loss: 4.7512e-06 - val_loss: 6.0388e-06
Epoch 7/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.7157e-06 - val_loss: 6.0420e-06
Epoch 8/10
5523/5523 [==============================] - 1s 201us/step - loss: 4.6925e-06 - val_loss: 6.0550e-06
Epoch 9/10
5523/5523 [==============================] - 1s 201us/step - loss: 4.6700e-06 - val_loss: 6.0667e-06
Epoch 10/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.6503e-06 - val_loss: 6.0822e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 5.2669e-06 - val_loss: 5.9592e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.1079e-06 - val_loss: 5.9241e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0507e-06 - val_loss: 5.9187e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0137e-06 - val_loss: 5.9568e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9857e-06 - val_loss: 5.9442e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9661e-06 - val_loss: 5.9662e-06
Epoch 7/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.9490e-06 - val_loss: 5.9565e-06
Epoch 8/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.9337e-06 - val_loss: 5.9666e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9200e-06 - val_loss: 5.9672e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9063e-06 - val_loss: 5.9925e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 5.2044e-06 - val_loss: 5.9446e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0738e-06 - val_loss: 5.9256e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0175e-06 - val_loss: 5.9466e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9831e-06 - val_loss: 5.9251e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9572e-06 - val_loss: 5.9520e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9349e-06 - val_loss: 5.9559e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9168e-06 - val_loss: 5.9608e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9019e-06 - val_loss: 5.9610e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8867e-06 - val_loss: 5.9568e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8764e-06 - val_loss: 5.9781e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 57us/step - loss: 5.1842e-06 - val_loss: 5.6993e-06
Epoch 2/10
1503/1503 [==============================] - 0s 44us/step - loss: 5.0209e-06 - val_loss: 5.7414e-06
Epoch 3/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.9201e-06 - val_loss: 5.7619e-06
Epoch 4/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.8358e-06 - val_loss: 5.7548e-06
Epoch 5/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.7710e-06 - val_loss: 5.7625e-06
Epoch 6/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.7213e-06 - val_loss: 5.7607e-06
Epoch 7/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.6765e-06 - val_loss: 5.7781e-06
Epoch 8/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.6360e-06 - val_loss: 5.8009e-06
Epoch 9/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5990e-06 - val_loss: 5.8119e-06
Epoch 10/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5706e-06 - val_loss: 5.8179e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.1635e-06 - val_loss: 5.9304e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0139e-06 - val_loss: 5.8959e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9550e-06 - val_loss: 5.9159e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9179e-06 - val_loss: 5.9180e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8954e-06 - val_loss: 5.9285e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8718e-06 - val_loss: 5.9514e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8565e-06 - val_loss: 5.9273e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8382e-06 - val_loss: 5.9506e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8255e-06 - val_loss: 5.9333e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8122e-06 - val_loss: 5.9596e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.2067e-06 - val_loss: 5.9495e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0762e-06 - val_loss: 5.9071e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0174e-06 - val_loss: 5.9043e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9797e-06 - val_loss: 5.9201e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9534e-06 - val_loss: 5.9208e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9327e-06 - val_loss: 5.9195e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9133e-06 - val_loss: 5.9231e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8985e-06 - val_loss: 5.9419e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8846e-06 - val_loss: 5.9412e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8728e-06 - val_loss: 5.9471e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 25us/step - loss: 5.1466e-06 - val_loss: 5.6842e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0130e-06 - val_loss: 5.6653e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9563e-06 - val_loss: 5.6610e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9215e-06 - val_loss: 5.6400e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8963e-06 - val_loss: 5.6383e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8777e-06 - val_loss: 5.6649e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8580e-06 - val_loss: 5.6502e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8405e-06 - val_loss: 5.6665e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8272e-06 - val_loss: 5.6786e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8142e-06 - val_loss: 5.6686e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.1476e-06 - val_loss: 5.9086e-06
Epoch 2/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.0180e-06 - val_loss: 5.9060e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9580e-06 - val_loss: 5.9015e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9226e-06 - val_loss: 5.9248e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8954e-06 - val_loss: 5.9115e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8743e-06 - val_loss: 5.9215e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8583e-06 - val_loss: 5.9198e-06
Epoch 8/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8432e-06 - val_loss: 5.9241e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8283e-06 - val_loss: 5.9327e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8153e-06 - val_loss: 5.9584e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 35us/step - loss: 5.1777e-06 - val_loss: 5.9161e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 5.0352e-06 - val_loss: 5.9052e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9832e-06 - val_loss: 5.8939e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9443e-06 - val_loss: 5.9019e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9200e-06 - val_loss: 5.9102e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8953e-06 - val_loss: 5.9307e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8777e-06 - val_loss: 5.9100e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8649e-06 - val_loss: 5.9292e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8504e-06 - val_loss: 5.9199e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8361e-06 - val_loss: 5.9481e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1297e-06 - val_loss: 5.6435e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9975e-06 - val_loss: 5.6279e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9410e-06 - val_loss: 5.6243e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9026e-06 - val_loss: 5.6303e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8753e-06 - val_loss: 5.6438e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8573e-06 - val_loss: 5.6432e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8384e-06 - val_loss: 5.6298e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8224e-06 - val_loss: 5.6582e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8072e-06 - val_loss: 5.6450e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7957e-06 - val_loss: 5.6411e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 243us/step - loss: 5.1557e-06 - val_loss: 5.9437e-06
Epoch 2/10
5523/5523 [==============================] - 1s 223us/step - loss: 4.9713e-06 - val_loss: 5.9731e-06
Epoch 3/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.8639e-06 - val_loss: 5.9780e-06
Epoch 4/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.7866e-06 - val_loss: 6.0007e-06
Epoch 5/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.7339e-06 - val_loss: 6.0249e-06
Epoch 6/10
5523/5523 [==============================] - 1s 222us/step - loss: 4.6892e-06 - val_loss: 6.0248e-06
Epoch 7/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.6562e-06 - val_loss: 6.0275e-06
Epoch 8/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.6276e-06 - val_loss: 6.0258e-06
Epoch 9/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.6032e-06 - val_loss: 6.0449e-06
Epoch 10/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.5825e-06 - val_loss: 6.0555e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 5.2130e-06 - val_loss: 5.9565e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 5.0554e-06 - val_loss: 5.9027e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9944e-06 - val_loss: 5.9220e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9553e-06 - val_loss: 5.8970e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9277e-06 - val_loss: 5.9165e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.9052e-06 - val_loss: 5.9398e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.8865e-06 - val_loss: 5.9293e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.8697e-06 - val_loss: 5.9199e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.8537e-06 - val_loss: 5.9416e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.8414e-06 - val_loss: 5.9419e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 5.1619e-06 - val_loss: 5.9093e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0246e-06 - val_loss: 5.9022e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9632e-06 - val_loss: 5.9091e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9276e-06 - val_loss: 5.9068e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8990e-06 - val_loss: 5.9324e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8796e-06 - val_loss: 5.9290e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8604e-06 - val_loss: 5.9145e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8428e-06 - val_loss: 5.9455e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8268e-06 - val_loss: 5.9383e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8140e-06 - val_loss: 5.9237e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 38us/step - loss: 5.1444e-06 - val_loss: 5.6615e-06
Epoch 2/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.9893e-06 - val_loss: 5.7157e-06
Epoch 3/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.8803e-06 - val_loss: 5.7349e-06
Epoch 4/10
1503/1503 [==============================] - 0s 48us/step - loss: 4.7952e-06 - val_loss: 5.7118e-06
Epoch 5/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.7276e-06 - val_loss: 5.7276e-06
Epoch 6/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.6714e-06 - val_loss: 5.7501e-06
Epoch 7/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.6238e-06 - val_loss: 5.7559e-06
Epoch 8/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5832e-06 - val_loss: 5.7536e-06
Epoch 9/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.5449e-06 - val_loss: 5.7714e-06
Epoch 10/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5140e-06 - val_loss: 5.7780e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 5.1097e-06 - val_loss: 5.8818e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9540e-06 - val_loss: 5.8838e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9028e-06 - val_loss: 5.8668e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8666e-06 - val_loss: 5.8689e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8394e-06 - val_loss: 5.8846e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8161e-06 - val_loss: 5.8880e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7979e-06 - val_loss: 5.9076e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7834e-06 - val_loss: 5.9063e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7692e-06 - val_loss: 5.9003e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7564e-06 - val_loss: 5.9275e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 5.1587e-06 - val_loss: 5.8744e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 5.0132e-06 - val_loss: 5.8844e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9599e-06 - val_loss: 5.8932e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9222e-06 - val_loss: 5.8841e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8962e-06 - val_loss: 5.8987e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8711e-06 - val_loss: 5.9022e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8516e-06 - val_loss: 5.9000e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8398e-06 - val_loss: 5.9126e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8251e-06 - val_loss: 5.8957e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8104e-06 - val_loss: 5.9307e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.1025e-06 - val_loss: 5.6548e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.9595e-06 - val_loss: 5.6568e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9037e-06 - val_loss: 5.6178e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8701e-06 - val_loss: 5.6329e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8441e-06 - val_loss: 5.6183e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8206e-06 - val_loss: 5.6305e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8042e-06 - val_loss: 5.6315e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7861e-06 - val_loss: 5.6417e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7720e-06 - val_loss: 5.6316e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7599e-06 - val_loss: 5.6524e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 35us/step - loss: 5.1033e-06 - val_loss: 5.8759e-06
Epoch 2/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.9595e-06 - val_loss: 5.8954e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.9005e-06 - val_loss: 5.8805e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8658e-06 - val_loss: 5.8810e-06
Epoch 5/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8394e-06 - val_loss: 5.8763e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8184e-06 - val_loss: 5.8947e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8028e-06 - val_loss: 5.9037e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7835e-06 - val_loss: 5.8952e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7685e-06 - val_loss: 5.8989e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7583e-06 - val_loss: 5.9134e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 5.1291e-06 - val_loss: 5.9018e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9823e-06 - val_loss: 5.8638e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.9231e-06 - val_loss: 5.8670e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8889e-06 - val_loss: 5.8816e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8627e-06 - val_loss: 5.8752e-06
Epoch 6/10
100000/100000 [==============================] - 4s 36us/step - loss: 4.8396e-06 - val_loss: 5.8851e-06
Epoch 7/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8239e-06 - val_loss: 5.8834e-06
Epoch 8/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8073e-06 - val_loss: 5.8999e-06
Epoch 9/10
100000/100000 [==============================] - 3s 35us/step - loss: 4.7916e-06 - val_loss: 5.9234e-06
Epoch 10/10
100000/100000 [==============================] - 4s 37us/step - loss: 4.7797e-06 - val_loss: 5.9206e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 3s 25us/step - loss: 5.0814e-06 - val_loss: 5.6120e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9499e-06 - val_loss: 5.6053e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8862e-06 - val_loss: 5.6093e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8554e-06 - val_loss: 5.6087e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8258e-06 - val_loss: 5.5924e-06
Epoch 6/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8009e-06 - val_loss: 5.6095e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7829e-06 - val_loss: 5.6289e-06
Epoch 8/10
99998/99998 [==============================] - 3s 25us/step - loss: 4.7701e-06 - val_loss: 5.5923e-06
Epoch 9/10
99998/99998 [==============================] - 2s 25us/step - loss: 4.7552e-06 - val_loss: 5.6086e-06
Epoch 10/10
99998/99998 [==============================] - 2s 25us/step - loss: 4.7417e-06 - val_loss: 5.6332e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 234us/step - loss: 5.1110e-06 - val_loss: 5.8856e-06
Epoch 2/10
5523/5523 [==============================] - 1s 225us/step - loss: 4.9169e-06 - val_loss: 5.9269e-06
Epoch 3/10
5523/5523 [==============================] - 1s 237us/step - loss: 4.8087e-06 - val_loss: 5.9436e-06
Epoch 4/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.7345e-06 - val_loss: 5.9613e-06
Epoch 5/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.6801e-06 - val_loss: 5.9735e-06
Epoch 6/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.6394e-06 - val_loss: 5.9764e-06
Epoch 7/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.6102e-06 - val_loss: 5.9900e-06
Epoch 8/10
5523/5523 [==============================] - 1s 209us/step - loss: 4.5844e-06 - val_loss: 6.0101e-06
Epoch 9/10
5523/5523 [==============================] - 1s 208us/step - loss: 4.5608e-06 - val_loss: 6.0148e-06
Epoch 10/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.5374e-06 - val_loss: 6.0152e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 4s 37us/step - loss: 5.1601e-06 - val_loss: 5.8991e-06
Epoch 2/10
99998/99998 [==============================] - 3s 35us/step - loss: 5.0012e-06 - val_loss: 5.8778e-06
Epoch 3/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.9420e-06 - val_loss: 5.8800e-06
Epoch 4/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8992e-06 - val_loss: 5.8623e-06
Epoch 5/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8729e-06 - val_loss: 5.8881e-06
Epoch 6/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8509e-06 - val_loss: 5.9002e-06
Epoch 7/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8315e-06 - val_loss: 5.9038e-06
Epoch 8/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8156e-06 - val_loss: 5.9098e-06
Epoch 9/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8018e-06 - val_loss: 5.9308e-06
Epoch 10/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7878e-06 - val_loss: 5.9341e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 36us/step - loss: 5.1128e-06 - val_loss: 5.8884e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9691e-06 - val_loss: 5.8590e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9089e-06 - val_loss: 5.8690e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8736e-06 - val_loss: 5.8819e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8474e-06 - val_loss: 5.8654e-06
Epoch 6/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8238e-06 - val_loss: 5.8735e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8051e-06 - val_loss: 5.8923e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7893e-06 - val_loss: 5.9065e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7765e-06 - val_loss: 5.9075e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7617e-06 - val_loss: 5.9023e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 44us/step - loss: 5.1284e-06 - val_loss: 5.6929e-06
Epoch 2/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.9391e-06 - val_loss: 5.7197e-06
Epoch 3/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.8358e-06 - val_loss: 5.7340e-06
Epoch 4/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7567e-06 - val_loss: 5.7268e-06
Epoch 5/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.6921e-06 - val_loss: 5.7448e-06
Epoch 6/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.6308e-06 - val_loss: 5.7691e-06
Epoch 7/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.5834e-06 - val_loss: 5.7767e-06
Epoch 8/10
1503/1503 [==============================] - 0s 49us/step - loss: 4.5403e-06 - val_loss: 5.7517e-06
Epoch 9/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.4967e-06 - val_loss: 5.7771e-06
Epoch 10/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.4654e-06 - val_loss: 5.7906e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 36us/step - loss: 5.0669e-06 - val_loss: 5.8674e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9087e-06 - val_loss: 5.8612e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8517e-06 - val_loss: 5.8618e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8173e-06 - val_loss: 5.8531e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7882e-06 - val_loss: 5.8606e-06
Epoch 6/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7666e-06 - val_loss: 5.8720e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7463e-06 - val_loss: 5.8758e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7321e-06 - val_loss: 5.8653e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7188e-06 - val_loss: 5.8861e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7071e-06 - val_loss: 5.9026e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 36us/step - loss: 5.1098e-06 - val_loss: 5.8705e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9669e-06 - val_loss: 5.8447e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9078e-06 - val_loss: 5.8694e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8717e-06 - val_loss: 5.8403e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8437e-06 - val_loss: 5.8545e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8236e-06 - val_loss: 5.8736e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8024e-06 - val_loss: 5.8466e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7888e-06 - val_loss: 5.8739e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7748e-06 - val_loss: 5.8806e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7624e-06 - val_loss: 5.8823e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 25us/step - loss: 5.0450e-06 - val_loss: 5.6411e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9144e-06 - val_loss: 5.6169e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8542e-06 - val_loss: 5.5766e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8203e-06 - val_loss: 5.6084e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7948e-06 - val_loss: 5.6206e-06
Epoch 6/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7733e-06 - val_loss: 5.5808e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7553e-06 - val_loss: 5.6113e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7395e-06 - val_loss: 5.6032e-06
Epoch 9/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7240e-06 - val_loss: 5.6228e-06
Epoch 10/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7120e-06 - val_loss: 5.6320e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 4s 35us/step - loss: 5.0525e-06 - val_loss: 5.8483e-06
Epoch 2/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.9154e-06 - val_loss: 5.8494e-06
Epoch 3/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8538e-06 - val_loss: 5.8602e-06
Epoch 4/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8169e-06 - val_loss: 5.8318e-06
Epoch 5/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7929e-06 - val_loss: 5.8496e-06
Epoch 6/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7714e-06 - val_loss: 5.8621e-06
Epoch 7/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7535e-06 - val_loss: 5.8748e-06
Epoch 8/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7381e-06 - val_loss: 5.8713e-06
Epoch 9/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7254e-06 - val_loss: 5.8966e-06
Epoch 10/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7129e-06 - val_loss: 5.8816e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 4s 36us/step - loss: 5.0754e-06 - val_loss: 5.8494e-06
Epoch 2/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.9368e-06 - val_loss: 5.8507e-06
Epoch 3/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8766e-06 - val_loss: 5.8478e-06
Epoch 4/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8423e-06 - val_loss: 5.8460e-06
Epoch 5/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8125e-06 - val_loss: 5.8645e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7904e-06 - val_loss: 5.8591e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7720e-06 - val_loss: 5.8673e-06
Epoch 8/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7555e-06 - val_loss: 5.8768e-06
Epoch 9/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7406e-06 - val_loss: 5.8711e-06
Epoch 10/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7283e-06 - val_loss: 5.8746e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 25us/step - loss: 5.0399e-06 - val_loss: 5.6273e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9001e-06 - val_loss: 5.6419e-06
Epoch 3/10
99998/99998 [==============================] - 3s 25us/step - loss: 4.8390e-06 - val_loss: 5.5959e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8008e-06 - val_loss: 5.5616e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7749e-06 - val_loss: 5.6069e-06
Epoch 6/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7505e-06 - val_loss: 5.5954e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7326e-06 - val_loss: 5.5919e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7186e-06 - val_loss: 5.5954e-06
Epoch 9/10
99998/99998 [==============================] - 2s 25us/step - loss: 4.7033e-06 - val_loss: 5.5952e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6903e-06 - val_loss: 5.6114e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 245us/step - loss: 5.0602e-06 - val_loss: 5.8815e-06
Epoch 2/10
5523/5523 [==============================] - 1s 220us/step - loss: 4.8787e-06 - val_loss: 5.9015e-06
Epoch 3/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.7643e-06 - val_loss: 5.9101e-06
Epoch 4/10
5523/5523 [==============================] - 1s 210us/step - loss: 4.6865e-06 - val_loss: 5.9253e-06
Epoch 5/10
5523/5523 [==============================] - 1s 209us/step - loss: 4.6288e-06 - val_loss: 5.9387e-06
Epoch 6/10
5523/5523 [==============================] - 1s 224us/step - loss: 4.5831e-06 - val_loss: 5.9532e-06
Epoch 7/10
5523/5523 [==============================] - 1s 209us/step - loss: 4.5482e-06 - val_loss: 5.9573e-06
Epoch 8/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.5212e-06 - val_loss: 5.9776e-06
Epoch 9/10
5523/5523 [==============================] - 1s 207us/step - loss: 4.4984e-06 - val_loss: 5.9863e-06
Epoch 10/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.4765e-06 - val_loss: 5.9850e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 4s 35us/step - loss: 5.1032e-06 - val_loss: 5.8685e-06
Epoch 2/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.9459e-06 - val_loss: 5.8642e-06
Epoch 3/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8811e-06 - val_loss: 5.8504e-06
Epoch 4/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8452e-06 - val_loss: 5.8438e-06
Epoch 5/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8154e-06 - val_loss: 5.8689e-06
Epoch 6/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7954e-06 - val_loss: 5.8493e-06
Epoch 7/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7762e-06 - val_loss: 5.8608e-06
Epoch 8/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7617e-06 - val_loss: 5.8683e-06
Epoch 9/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7462e-06 - val_loss: 5.8858e-06
Epoch 10/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7345e-06 - val_loss: 5.9099e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 5.0635e-06 - val_loss: 5.8594e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9185e-06 - val_loss: 5.8297e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8625e-06 - val_loss: 5.8438e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8245e-06 - val_loss: 5.8600e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7957e-06 - val_loss: 5.8645e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7726e-06 - val_loss: 5.8548e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7548e-06 - val_loss: 5.8687e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7370e-06 - val_loss: 5.8670e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7229e-06 - val_loss: 5.8762e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7125e-06 - val_loss: 5.8768e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 46us/step - loss: 5.0626e-06 - val_loss: 5.6826e-06
Epoch 2/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.8916e-06 - val_loss: 5.7617e-06
Epoch 3/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.8129e-06 - val_loss: 5.7587e-06
Epoch 4/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.7129e-06 - val_loss: 5.7487e-06
Epoch 5/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.6575e-06 - val_loss: 5.7348e-06
Epoch 6/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5808e-06 - val_loss: 5.7573e-06
Epoch 7/10
1503/1503 [==============================] - 0s 49us/step - loss: 4.5313e-06 - val_loss: 5.7527e-06
Epoch 8/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.4841e-06 - val_loss: 5.7535e-06
Epoch 9/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.4478e-06 - val_loss: 5.7784e-06
Epoch 10/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.4144e-06 - val_loss: 5.7874e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 5.0275e-06 - val_loss: 5.8278e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8670e-06 - val_loss: 5.8308e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8073e-06 - val_loss: 5.8199e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7694e-06 - val_loss: 5.8233e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7437e-06 - val_loss: 5.8301e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7216e-06 - val_loss: 5.8259e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7020e-06 - val_loss: 5.8437e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6849e-06 - val_loss: 5.8402e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6722e-06 - val_loss: 5.8534e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6600e-06 - val_loss: 5.8689e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 5.0609e-06 - val_loss: 5.8287e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.9201e-06 - val_loss: 5.8257e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8601e-06 - val_loss: 5.8342e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8206e-06 - val_loss: 5.8618e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7978e-06 - val_loss: 5.8539e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7736e-06 - val_loss: 5.8337e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7563e-06 - val_loss: 5.8446e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7429e-06 - val_loss: 5.8512e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7259e-06 - val_loss: 5.8553e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7136e-06 - val_loss: 5.8424e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 5.0058e-06 - val_loss: 5.6319e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8683e-06 - val_loss: 5.5724e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8132e-06 - val_loss: 5.6035e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7754e-06 - val_loss: 5.6055e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7496e-06 - val_loss: 5.5872e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7281e-06 - val_loss: 5.5834e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7104e-06 - val_loss: 5.6028e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6944e-06 - val_loss: 5.6019e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6800e-06 - val_loss: 5.6365e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6714e-06 - val_loss: 5.6316e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 5.0049e-06 - val_loss: 5.8309e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8668e-06 - val_loss: 5.8050e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.8105e-06 - val_loss: 5.8229e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7738e-06 - val_loss: 5.8284e-06
Epoch 5/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7484e-06 - val_loss: 5.8278e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7247e-06 - val_loss: 5.8460e-06
Epoch 7/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7081e-06 - val_loss: 5.8407e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6950e-06 - val_loss: 5.8522e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6778e-06 - val_loss: 5.8329e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6684e-06 - val_loss: 5.8679e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 35us/step - loss: 5.0282e-06 - val_loss: 5.8261e-06
Epoch 2/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8887e-06 - val_loss: 5.8270e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8300e-06 - val_loss: 5.8187e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7926e-06 - val_loss: 5.8256e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7667e-06 - val_loss: 5.8354e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7427e-06 - val_loss: 5.8539e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7235e-06 - val_loss: 5.8376e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7075e-06 - val_loss: 5.8494e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6937e-06 - val_loss: 5.8495e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6819e-06 - val_loss: 5.8675e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9845e-06 - val_loss: 5.6168e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8474e-06 - val_loss: 5.5963e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7883e-06 - val_loss: 5.5703e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7524e-06 - val_loss: 5.6106e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7246e-06 - val_loss: 5.5855e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7042e-06 - val_loss: 5.5826e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6828e-06 - val_loss: 5.5647e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6697e-06 - val_loss: 5.5848e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6564e-06 - val_loss: 5.6172e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6439e-06 - val_loss: 5.5857e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 240us/step - loss: 5.0021e-06 - val_loss: 5.8609e-06
Epoch 2/10
5523/5523 [==============================] - 1s 222us/step - loss: 4.8223e-06 - val_loss: 5.8830e-06
Epoch 3/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.7032e-06 - val_loss: 5.8979e-06
Epoch 4/10
5523/5523 [==============================] - 1s 209us/step - loss: 4.6309e-06 - val_loss: 5.9243e-06
Epoch 5/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.5748e-06 - val_loss: 5.9190e-06
Epoch 6/10
5523/5523 [==============================] - 1s 227us/step - loss: 4.5311e-06 - val_loss: 5.9282e-06
Epoch 7/10
5523/5523 [==============================] - 1s 208us/step - loss: 4.4956e-06 - val_loss: 5.9413e-06
Epoch 8/10
5523/5523 [==============================] - 1s 210us/step - loss: 4.4692e-06 - val_loss: 5.9480e-06
Epoch 9/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.4468e-06 - val_loss: 5.9607e-06
Epoch 10/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.4262e-06 - val_loss: 5.9629e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 5.0537e-06 - val_loss: 5.8370e-06
Epoch 2/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8921e-06 - val_loss: 5.8042e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.8326e-06 - val_loss: 5.8234e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7960e-06 - val_loss: 5.8267e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7657e-06 - val_loss: 5.8344e-06
Epoch 6/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.7431e-06 - val_loss: 5.8271e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7254e-06 - val_loss: 5.8493e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7100e-06 - val_loss: 5.8574e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6949e-06 - val_loss: 5.8490e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6818e-06 - val_loss: 5.8600e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0120e-06 - val_loss: 5.8569e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8739e-06 - val_loss: 5.8187e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8086e-06 - val_loss: 5.8101e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7735e-06 - val_loss: 5.8287e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7438e-06 - val_loss: 5.8288e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7223e-06 - val_loss: 5.8412e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7051e-06 - val_loss: 5.8318e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6893e-06 - val_loss: 5.8542e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6753e-06 - val_loss: 5.8396e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6599e-06 - val_loss: 5.8474e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 39us/step - loss: 4.9881e-06 - val_loss: 5.6623e-06
Epoch 2/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.8102e-06 - val_loss: 5.7049e-06
Epoch 3/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.6898e-06 - val_loss: 5.7333e-06
Epoch 4/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.6149e-06 - val_loss: 5.7206e-06
Epoch 5/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.5459e-06 - val_loss: 5.7385e-06
Epoch 6/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.4855e-06 - val_loss: 5.7525e-06
Epoch 7/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.4382e-06 - val_loss: 5.7652e-06
Epoch 8/10
1503/1503 [==============================] - 0s 39us/step - loss: 4.3940e-06 - val_loss: 5.7753e-06
Epoch 9/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.3586e-06 - val_loss: 5.7931e-06
Epoch 10/10
1503/1503 [==============================] - 0s 39us/step - loss: 4.3237e-06 - val_loss: 5.8074e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.9735e-06 - val_loss: 5.8251e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8185e-06 - val_loss: 5.7912e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7569e-06 - val_loss: 5.8015e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7189e-06 - val_loss: 5.8092e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6931e-06 - val_loss: 5.8114e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6712e-06 - val_loss: 5.8034e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6499e-06 - val_loss: 5.8056e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6359e-06 - val_loss: 5.8378e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6211e-06 - val_loss: 5.8176e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6083e-06 - val_loss: 5.8193e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 5.0162e-06 - val_loss: 5.8229e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8687e-06 - val_loss: 5.7996e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8086e-06 - val_loss: 5.8010e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7748e-06 - val_loss: 5.8236e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7485e-06 - val_loss: 5.8078e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7233e-06 - val_loss: 5.8259e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7058e-06 - val_loss: 5.8128e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6896e-06 - val_loss: 5.8396e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6769e-06 - val_loss: 5.8264e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6630e-06 - val_loss: 5.8388e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9662e-06 - val_loss: 5.6066e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8278e-06 - val_loss: 5.6148e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7633e-06 - val_loss: 5.5586e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7282e-06 - val_loss: 5.5898e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7020e-06 - val_loss: 5.5810e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6795e-06 - val_loss: 5.6008e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6584e-06 - val_loss: 5.5894e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6443e-06 - val_loss: 5.5950e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6327e-06 - val_loss: 5.6150e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6193e-06 - val_loss: 5.6208e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 4s 35us/step - loss: 4.9740e-06 - val_loss: 5.8072e-06
Epoch 2/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8243e-06 - val_loss: 5.7913e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7653e-06 - val_loss: 5.7861e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7285e-06 - val_loss: 5.7807e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7018e-06 - val_loss: 5.8269e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6835e-06 - val_loss: 5.8237e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6603e-06 - val_loss: 5.7972e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6441e-06 - val_loss: 5.8171e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6312e-06 - val_loss: 5.8281e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6165e-06 - val_loss: 5.8495e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 35us/step - loss: 4.9839e-06 - val_loss: 5.8491e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.8377e-06 - val_loss: 5.7916e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7762e-06 - val_loss: 5.7950e-06
Epoch 4/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7391e-06 - val_loss: 5.8001e-06
Epoch 5/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7114e-06 - val_loss: 5.8027e-06
Epoch 6/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.6889e-06 - val_loss: 5.8118e-06
Epoch 7/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.6717e-06 - val_loss: 5.8038e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6570e-06 - val_loss: 5.8200e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6428e-06 - val_loss: 5.8345e-06
Epoch 10/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.6300e-06 - val_loss: 5.8557e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9411e-06 - val_loss: 5.5750e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.8012e-06 - val_loss: 5.5526e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7415e-06 - val_loss: 5.5467e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7071e-06 - val_loss: 5.5533e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6756e-06 - val_loss: 5.5671e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6562e-06 - val_loss: 5.5435e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6361e-06 - val_loss: 5.5616e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6190e-06 - val_loss: 5.5635e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6050e-06 - val_loss: 5.5861e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5932e-06 - val_loss: 5.5697e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 236us/step - loss: 4.9567e-06 - val_loss: 5.8091e-06
Epoch 2/10
5523/5523 [==============================] - 1s 215us/step - loss: 4.7793e-06 - val_loss: 5.8414e-06
Epoch 3/10
5523/5523 [==============================] - 1s 207us/step - loss: 4.6589e-06 - val_loss: 5.8709e-06
Epoch 4/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.5785e-06 - val_loss: 5.8893e-06
Epoch 5/10
5523/5523 [==============================] - 1s 208us/step - loss: 4.5224e-06 - val_loss: 5.8976e-06
Epoch 6/10
5523/5523 [==============================] - 1s 217us/step - loss: 4.4799e-06 - val_loss: 5.9085e-06
Epoch 7/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.4461e-06 - val_loss: 5.9110e-06
Epoch 8/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.4150e-06 - val_loss: 5.9411e-06
Epoch 9/10
5523/5523 [==============================] - 1s 218us/step - loss: 4.3892e-06 - val_loss: 5.9328e-06
Epoch 10/10
5523/5523 [==============================] - 1s 211us/step - loss: 4.3679e-06 - val_loss: 5.9460e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 35us/step - loss: 5.0153e-06 - val_loss: 5.8236e-06
Epoch 2/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8435e-06 - val_loss: 5.7921e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7803e-06 - val_loss: 5.7974e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7438e-06 - val_loss: 5.7980e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7142e-06 - val_loss: 5.8085e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6908e-06 - val_loss: 5.8074e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6721e-06 - val_loss: 5.8094e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6569e-06 - val_loss: 5.8232e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6442e-06 - val_loss: 5.8366e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6312e-06 - val_loss: 5.8290e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9684e-06 - val_loss: 5.8174e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8221e-06 - val_loss: 5.7922e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7627e-06 - val_loss: 5.8065e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7237e-06 - val_loss: 5.8090e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6975e-06 - val_loss: 5.8052e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6762e-06 - val_loss: 5.8033e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6560e-06 - val_loss: 5.8388e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6412e-06 - val_loss: 5.8149e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6260e-06 - val_loss: 5.8159e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6165e-06 - val_loss: 5.8430e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.9339e-06 - val_loss: 5.6480e-06
Epoch 2/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7584e-06 - val_loss: 5.6989e-06
Epoch 3/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.6672e-06 - val_loss: 5.7027e-06
Epoch 4/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5755e-06 - val_loss: 5.6978e-06
Epoch 5/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.5087e-06 - val_loss: 5.7073e-06
Epoch 6/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.4438e-06 - val_loss: 5.7364e-06
Epoch 7/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.3970e-06 - val_loss: 5.7322e-06
Epoch 8/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.3549e-06 - val_loss: 5.7434e-06
Epoch 9/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.3157e-06 - val_loss: 5.7681e-06
Epoch 10/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.2812e-06 - val_loss: 5.7655e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.9261e-06 - val_loss: 5.7954e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7722e-06 - val_loss: 5.7668e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7091e-06 - val_loss: 5.7955e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6733e-06 - val_loss: 5.7820e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6465e-06 - val_loss: 5.7958e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6269e-06 - val_loss: 5.7857e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6051e-06 - val_loss: 5.7851e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5907e-06 - val_loss: 5.8067e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5767e-06 - val_loss: 5.7984e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5652e-06 - val_loss: 5.8005e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.9738e-06 - val_loss: 5.7850e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8234e-06 - val_loss: 5.7917e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7656e-06 - val_loss: 5.7777e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7281e-06 - val_loss: 5.7919e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7000e-06 - val_loss: 5.7992e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6805e-06 - val_loss: 5.8044e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6602e-06 - val_loss: 5.8116e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6481e-06 - val_loss: 5.8164e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6329e-06 - val_loss: 5.8229e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6213e-06 - val_loss: 5.8380e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.9214e-06 - val_loss: 5.6147e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7838e-06 - val_loss: 5.5590e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7215e-06 - val_loss: 5.5635e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6809e-06 - val_loss: 5.5765e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6546e-06 - val_loss: 5.5441e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6342e-06 - val_loss: 5.5573e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6146e-06 - val_loss: 5.5783e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6007e-06 - val_loss: 5.5971e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5829e-06 - val_loss: 5.6102e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5734e-06 - val_loss: 5.5837e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.9224e-06 - val_loss: 5.7703e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7791e-06 - val_loss: 5.7841e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7189e-06 - val_loss: 5.7877e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6827e-06 - val_loss: 5.7664e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6572e-06 - val_loss: 5.7709e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6325e-06 - val_loss: 5.7868e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6149e-06 - val_loss: 5.7949e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5977e-06 - val_loss: 5.7883e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5852e-06 - val_loss: 5.8082e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5722e-06 - val_loss: 5.8207e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 4s 35us/step - loss: 4.9375e-06 - val_loss: 5.8102e-06
Epoch 2/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7896e-06 - val_loss: 5.7713e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7319e-06 - val_loss: 5.7785e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6917e-06 - val_loss: 5.7814e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6678e-06 - val_loss: 5.7921e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6469e-06 - val_loss: 5.8022e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6259e-06 - val_loss: 5.8056e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6086e-06 - val_loss: 5.8122e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5975e-06 - val_loss: 5.8071e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5829e-06 - val_loss: 5.8147e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8970e-06 - val_loss: 5.5649e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7541e-06 - val_loss: 5.5599e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6961e-06 - val_loss: 5.5311e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6587e-06 - val_loss: 5.5601e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6297e-06 - val_loss: 5.5229e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6081e-06 - val_loss: 5.5521e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5904e-06 - val_loss: 5.5381e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5730e-06 - val_loss: 5.5532e-06
Epoch 9/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5603e-06 - val_loss: 5.5458e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5482e-06 - val_loss: 5.5576e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 238us/step - loss: 4.9249e-06 - val_loss: 5.8212e-06
Epoch 2/10
5523/5523 [==============================] - 1s 220us/step - loss: 4.7370e-06 - val_loss: 5.8515e-06
Epoch 3/10
5523/5523 [==============================] - 1s 210us/step - loss: 4.6105e-06 - val_loss: 5.8623e-06
Epoch 4/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.5269e-06 - val_loss: 5.8743e-06
Epoch 5/10
5523/5523 [==============================] - 1s 208us/step - loss: 4.4713e-06 - val_loss: 5.8608e-06
Epoch 6/10
5523/5523 [==============================] - 1s 213us/step - loss: 4.4300e-06 - val_loss: 5.8913e-06
Epoch 7/10
5523/5523 [==============================] - 1s 207us/step - loss: 4.3958e-06 - val_loss: 5.8946e-06
Epoch 8/10
5523/5523 [==============================] - 1s 200us/step - loss: 4.3667e-06 - val_loss: 5.9321e-06
Epoch 9/10
5523/5523 [==============================] - 1s 201us/step - loss: 4.3411e-06 - val_loss: 5.9376e-06
Epoch 10/10
5523/5523 [==============================] - 1s 199us/step - loss: 4.3214e-06 - val_loss: 5.9396e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.9681e-06 - val_loss: 5.7823e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7955e-06 - val_loss: 5.7730e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7323e-06 - val_loss: 5.7697e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6927e-06 - val_loss: 5.7942e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6660e-06 - val_loss: 5.7669e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6449e-06 - val_loss: 5.7866e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6262e-06 - val_loss: 5.7807e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6102e-06 - val_loss: 5.7980e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5958e-06 - val_loss: 5.7986e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5835e-06 - val_loss: 5.8392e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9234e-06 - val_loss: 5.7738e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7759e-06 - val_loss: 5.7933e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7148e-06 - val_loss: 5.7855e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6756e-06 - val_loss: 5.7818e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6449e-06 - val_loss: 5.7910e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6250e-06 - val_loss: 5.8009e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6094e-06 - val_loss: 5.7992e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5905e-06 - val_loss: 5.8031e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5759e-06 - val_loss: 5.8198e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5620e-06 - val_loss: 5.8386e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 47us/step - loss: 4.8951e-06 - val_loss: 5.6312e-06
Epoch 2/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.7289e-06 - val_loss: 5.6765e-06
Epoch 3/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.6150e-06 - val_loss: 5.7091e-06
Epoch 4/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.5282e-06 - val_loss: 5.6911e-06
Epoch 5/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.4667e-06 - val_loss: 5.7130e-06
Epoch 6/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.4055e-06 - val_loss: 5.7304e-06
Epoch 7/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.3567e-06 - val_loss: 5.7107e-06
Epoch 8/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.3166e-06 - val_loss: 5.7143e-06
Epoch 9/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.2804e-06 - val_loss: 5.7430e-06
Epoch 10/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.2480e-06 - val_loss: 5.7653e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.8810e-06 - val_loss: 5.7683e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7238e-06 - val_loss: 5.7500e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6602e-06 - val_loss: 5.7462e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6244e-06 - val_loss: 5.7499e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5989e-06 - val_loss: 5.7622e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5763e-06 - val_loss: 5.7581e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5577e-06 - val_loss: 5.7581e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5441e-06 - val_loss: 5.7732e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5311e-06 - val_loss: 5.7777e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5156e-06 - val_loss: 5.7980e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.9242e-06 - val_loss: 5.7763e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7799e-06 - val_loss: 5.7805e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7162e-06 - val_loss: 5.7652e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6822e-06 - val_loss: 5.7784e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6525e-06 - val_loss: 5.7692e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6283e-06 - val_loss: 5.7904e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6124e-06 - val_loss: 5.7932e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5956e-06 - val_loss: 5.7806e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5828e-06 - val_loss: 5.8128e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5699e-06 - val_loss: 5.8031e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8842e-06 - val_loss: 5.5873e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7340e-06 - val_loss: 5.5639e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6682e-06 - val_loss: 5.5653e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6338e-06 - val_loss: 5.5655e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6056e-06 - val_loss: 5.5367e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5811e-06 - val_loss: 5.5720e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5626e-06 - val_loss: 5.5574e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5472e-06 - val_loss: 5.5787e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5305e-06 - val_loss: 5.5710e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5183e-06 - val_loss: 5.5832e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8784e-06 - val_loss: 5.8075e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.7299e-06 - val_loss: 5.7438e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6712e-06 - val_loss: 5.7486e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6283e-06 - val_loss: 5.7433e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6014e-06 - val_loss: 5.7577e-06
Epoch 6/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.5818e-06 - val_loss: 5.7695e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5600e-06 - val_loss: 5.7689e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5431e-06 - val_loss: 5.7732e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5280e-06 - val_loss: 5.7792e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5183e-06 - val_loss: 5.7779e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8862e-06 - val_loss: 5.7975e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7408e-06 - val_loss: 5.7644e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6746e-06 - val_loss: 5.7653e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6403e-06 - val_loss: 5.7433e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6093e-06 - val_loss: 5.7637e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5874e-06 - val_loss: 5.7640e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5688e-06 - val_loss: 5.7845e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5517e-06 - val_loss: 5.7893e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5356e-06 - val_loss: 5.7929e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5242e-06 - val_loss: 5.8137e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 25us/step - loss: 4.8587e-06 - val_loss: 5.5457e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.7074e-06 - val_loss: 5.5133e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6453e-06 - val_loss: 5.5190e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6049e-06 - val_loss: 5.5179e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5780e-06 - val_loss: 5.5130e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5556e-06 - val_loss: 5.5001e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5373e-06 - val_loss: 5.4930e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5226e-06 - val_loss: 5.5215e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5086e-06 - val_loss: 5.5082e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4960e-06 - val_loss: 5.5128e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 237us/step - loss: 4.9288e-06 - val_loss: 5.7975e-06
Epoch 2/10
5523/5523 [==============================] - 1s 218us/step - loss: 4.6900e-06 - val_loss: 5.8156e-06
Epoch 3/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.5628e-06 - val_loss: 5.8200e-06
Epoch 4/10
5523/5523 [==============================] - 1s 207us/step - loss: 4.4842e-06 - val_loss: 5.8444e-06
Epoch 5/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.4208e-06 - val_loss: 5.8543e-06
Epoch 6/10
5523/5523 [==============================] - 1s 215us/step - loss: 4.3791e-06 - val_loss: 5.8592e-06
Epoch 7/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.3427e-06 - val_loss: 5.8786e-06
Epoch 8/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.3130e-06 - val_loss: 5.8888e-06
Epoch 9/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.2882e-06 - val_loss: 5.8928e-06
Epoch 10/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.2671e-06 - val_loss: 5.9153e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.9207e-06 - val_loss: 5.7542e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7518e-06 - val_loss: 5.7396e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6859e-06 - val_loss: 5.7395e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6474e-06 - val_loss: 5.7638e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6203e-06 - val_loss: 5.7562e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5973e-06 - val_loss: 5.7601e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5802e-06 - val_loss: 5.7968e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5610e-06 - val_loss: 5.7724e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5482e-06 - val_loss: 5.7786e-06
Epoch 10/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.5339e-06 - val_loss: 5.8049e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.8699e-06 - val_loss: 5.7925e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7244e-06 - val_loss: 5.7506e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6614e-06 - val_loss: 5.7600e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6249e-06 - val_loss: 5.7817e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5955e-06 - val_loss: 5.7642e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5732e-06 - val_loss: 5.7871e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5551e-06 - val_loss: 5.7969e-06
Epoch 8/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5386e-06 - val_loss: 5.8017e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5246e-06 - val_loss: 5.7989e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5115e-06 - val_loss: 5.8163e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.8937e-06 - val_loss: 5.5867e-06
Epoch 2/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.6866e-06 - val_loss: 5.6183e-06
Epoch 3/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.5922e-06 - val_loss: 5.6700e-06
Epoch 4/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.5069e-06 - val_loss: 5.6395e-06
Epoch 5/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.4393e-06 - val_loss: 5.6418e-06
Epoch 6/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.3739e-06 - val_loss: 5.6889e-06
Epoch 7/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.3278e-06 - val_loss: 5.6592e-06
Epoch 8/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.2841e-06 - val_loss: 5.6673e-06
Epoch 9/10
1503/1503 [==============================] - 0s 38us/step - loss: 4.2488e-06 - val_loss: 5.6989e-06
Epoch 10/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.2142e-06 - val_loss: 5.6931e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.8420e-06 - val_loss: 5.7723e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6813e-06 - val_loss: 5.7155e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6187e-06 - val_loss: 5.7189e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5806e-06 - val_loss: 5.7430e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5517e-06 - val_loss: 5.7368e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5271e-06 - val_loss: 5.7463e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5111e-06 - val_loss: 5.7569e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4945e-06 - val_loss: 5.7535e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4773e-06 - val_loss: 5.7702e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4671e-06 - val_loss: 5.7717e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8818e-06 - val_loss: 5.7821e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7345e-06 - val_loss: 5.7487e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6716e-06 - val_loss: 5.7548e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6319e-06 - val_loss: 5.7566e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6079e-06 - val_loss: 5.7600e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5821e-06 - val_loss: 5.7655e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5655e-06 - val_loss: 5.7664e-06
Epoch 8/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5481e-06 - val_loss: 5.7834e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5355e-06 - val_loss: 5.7970e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5214e-06 - val_loss: 5.7831e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8269e-06 - val_loss: 5.5201e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6765e-06 - val_loss: 5.5062e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6178e-06 - val_loss: 5.4978e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5795e-06 - val_loss: 5.5067e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5510e-06 - val_loss: 5.5196e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5277e-06 - val_loss: 5.5349e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5065e-06 - val_loss: 5.5201e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4906e-06 - val_loss: 5.5230e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4770e-06 - val_loss: 5.5458e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4642e-06 - val_loss: 5.5520e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.8404e-06 - val_loss: 5.8078e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6789e-06 - val_loss: 5.7359e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6177e-06 - val_loss: 5.7203e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5774e-06 - val_loss: 5.7235e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5524e-06 - val_loss: 5.7196e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5282e-06 - val_loss: 5.7516e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5099e-06 - val_loss: 5.7732e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4939e-06 - val_loss: 5.7492e-06
Epoch 9/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4810e-06 - val_loss: 5.7538e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4654e-06 - val_loss: 5.7732e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.8487e-06 - val_loss: 5.7369e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6860e-06 - val_loss: 5.7115e-06
Epoch 3/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.6249e-06 - val_loss: 5.7260e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5850e-06 - val_loss: 5.7557e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5583e-06 - val_loss: 5.7543e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5360e-06 - val_loss: 5.7564e-06
Epoch 7/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.5170e-06 - val_loss: 5.7651e-06
Epoch 8/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.5059e-06 - val_loss: 5.7783e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4881e-06 - val_loss: 5.7688e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4734e-06 - val_loss: 5.7779e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.8059e-06 - val_loss: 5.5042e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6546e-06 - val_loss: 5.4841e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5970e-06 - val_loss: 5.4874e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5543e-06 - val_loss: 5.4652e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5252e-06 - val_loss: 5.4858e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5060e-06 - val_loss: 5.4842e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4864e-06 - val_loss: 5.5017e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4706e-06 - val_loss: 5.4999e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4541e-06 - val_loss: 5.5090e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4445e-06 - val_loss: 5.4955e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 243us/step - loss: 4.8587e-06 - val_loss: 5.7950e-06
Epoch 2/10
5523/5523 [==============================] - 1s 216us/step - loss: 4.6471e-06 - val_loss: 5.7819e-06
Epoch 3/10
5523/5523 [==============================] - 1s 210us/step - loss: 4.5193e-06 - val_loss: 5.8114e-06
Epoch 4/10
5523/5523 [==============================] - 1s 220us/step - loss: 4.4307e-06 - val_loss: 5.8246e-06
Epoch 5/10
5523/5523 [==============================] - 1s 201us/step - loss: 4.3709e-06 - val_loss: 5.8418e-06
Epoch 6/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.3322e-06 - val_loss: 5.8468e-06
Epoch 7/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.2976e-06 - val_loss: 5.8665e-06
Epoch 8/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.2677e-06 - val_loss: 5.8821e-06
Epoch 9/10
5523/5523 [==============================] - 1s 199us/step - loss: 4.2407e-06 - val_loss: 5.8878e-06
Epoch 10/10
5523/5523 [==============================] - 1s 200us/step - loss: 4.2199e-06 - val_loss: 5.8919e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8695e-06 - val_loss: 5.7444e-06
Epoch 2/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.6952e-06 - val_loss: 5.7392e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6340e-06 - val_loss: 5.7266e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5989e-06 - val_loss: 5.7482e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5700e-06 - val_loss: 5.7390e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5464e-06 - val_loss: 5.7493e-06
Epoch 7/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5277e-06 - val_loss: 5.7565e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5104e-06 - val_loss: 5.7545e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4980e-06 - val_loss: 5.7704e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4849e-06 - val_loss: 5.7768e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.8293e-06 - val_loss: 5.7772e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6772e-06 - val_loss: 5.7452e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6139e-06 - val_loss: 5.7548e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5736e-06 - val_loss: 5.7549e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5435e-06 - val_loss: 5.7511e-06
Epoch 6/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5234e-06 - val_loss: 5.7595e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5050e-06 - val_loss: 5.7619e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4884e-06 - val_loss: 5.7820e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4747e-06 - val_loss: 5.7622e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4593e-06 - val_loss: 5.7938e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 51us/step - loss: 4.8638e-06 - val_loss: 5.5691e-06
Epoch 2/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.6482e-06 - val_loss: 5.6117e-06
Epoch 3/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.5410e-06 - val_loss: 5.6485e-06
Epoch 4/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.4442e-06 - val_loss: 5.6452e-06
Epoch 5/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.3787e-06 - val_loss: 5.6534e-06
Epoch 6/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.3135e-06 - val_loss: 5.6702e-06
Epoch 7/10
1503/1503 [==============================] - 0s 39us/step - loss: 4.2706e-06 - val_loss: 5.6599e-06
Epoch 8/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.2243e-06 - val_loss: 5.6823e-06
Epoch 9/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.1768e-06 - val_loss: 5.6920e-06
Epoch 10/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.1404e-06 - val_loss: 5.7127e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8128e-06 - val_loss: 5.7174e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.6326e-06 - val_loss: 5.7102e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5684e-06 - val_loss: 5.7243e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5295e-06 - val_loss: 5.7152e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5000e-06 - val_loss: 5.7257e-06
Epoch 6/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4801e-06 - val_loss: 5.7278e-06
Epoch 7/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4628e-06 - val_loss: 5.7178e-06
Epoch 8/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4450e-06 - val_loss: 5.7041e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4285e-06 - val_loss: 5.7407e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4178e-06 - val_loss: 5.7484e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.8299e-06 - val_loss: 5.7353e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6833e-06 - val_loss: 5.7214e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6215e-06 - val_loss: 5.7263e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5835e-06 - val_loss: 5.7210e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5571e-06 - val_loss: 5.7563e-06
Epoch 6/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5333e-06 - val_loss: 5.7475e-06
Epoch 7/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5145e-06 - val_loss: 5.7549e-06
Epoch 8/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4974e-06 - val_loss: 5.7530e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4821e-06 - val_loss: 5.7672e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4724e-06 - val_loss: 5.7810e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7802e-06 - val_loss: 5.4762e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6303e-06 - val_loss: 5.4571e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5700e-06 - val_loss: 5.4804e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5256e-06 - val_loss: 5.4682e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5003e-06 - val_loss: 5.4683e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4771e-06 - val_loss: 5.4780e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4575e-06 - val_loss: 5.5099e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4436e-06 - val_loss: 5.5028e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4309e-06 - val_loss: 5.5062e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4155e-06 - val_loss: 5.5118e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7855e-06 - val_loss: 5.7176e-06
Epoch 2/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.6319e-06 - val_loss: 5.7088e-06
Epoch 3/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.5693e-06 - val_loss: 5.6976e-06
Epoch 4/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.5304e-06 - val_loss: 5.7165e-06
Epoch 5/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.5023e-06 - val_loss: 5.7098e-06
Epoch 6/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4793e-06 - val_loss: 5.7426e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4614e-06 - val_loss: 5.7298e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4434e-06 - val_loss: 5.7287e-06
Epoch 9/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4296e-06 - val_loss: 5.7527e-06
Epoch 10/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4182e-06 - val_loss: 5.7537e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.7991e-06 - val_loss: 5.7227e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6393e-06 - val_loss: 5.7057e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5784e-06 - val_loss: 5.7200e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5418e-06 - val_loss: 5.7188e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5120e-06 - val_loss: 5.7387e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4902e-06 - val_loss: 5.7501e-06
Epoch 7/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4733e-06 - val_loss: 5.7446e-06
Epoch 8/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4560e-06 - val_loss: 5.7432e-06
Epoch 9/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4390e-06 - val_loss: 5.7548e-06
Epoch 10/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4274e-06 - val_loss: 5.7735e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7580e-06 - val_loss: 5.5143e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.6124e-06 - val_loss: 5.4969e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5473e-06 - val_loss: 5.4958e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5091e-06 - val_loss: 5.4450e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4826e-06 - val_loss: 5.4661e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4618e-06 - val_loss: 5.5003e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4439e-06 - val_loss: 5.4950e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4252e-06 - val_loss: 5.4840e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4097e-06 - val_loss: 5.4962e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3954e-06 - val_loss: 5.5165e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 231us/step - loss: 4.8521e-06 - val_loss: 5.7825e-06
Epoch 2/10
5523/5523 [==============================] - 1s 214us/step - loss: 4.6113e-06 - val_loss: 5.8069e-06
Epoch 3/10
5523/5523 [==============================] - 1s 224us/step - loss: 4.4789e-06 - val_loss: 5.8113e-06
Epoch 4/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.3918e-06 - val_loss: 5.8290e-06
Epoch 5/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.3325e-06 - val_loss: 5.8248e-06
Epoch 6/10
5523/5523 [==============================] - 1s 221us/step - loss: 4.2887e-06 - val_loss: 5.8402e-06
Epoch 7/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.2522e-06 - val_loss: 5.8484e-06
Epoch 8/10
5523/5523 [==============================] - 1s 207us/step - loss: 4.2242e-06 - val_loss: 5.8558e-06
Epoch 9/10
5523/5523 [==============================] - 1s 216us/step - loss: 4.1997e-06 - val_loss: 5.8550e-06
Epoch 10/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.1764e-06 - val_loss: 5.8798e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.8286e-06 - val_loss: 5.7105e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6544e-06 - val_loss: 5.6981e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5879e-06 - val_loss: 5.7155e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5498e-06 - val_loss: 5.7036e-06
Epoch 5/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.5214e-06 - val_loss: 5.7211e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4961e-06 - val_loss: 5.7198e-06
Epoch 7/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.4793e-06 - val_loss: 5.7269e-06
Epoch 8/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4631e-06 - val_loss: 5.7360e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4509e-06 - val_loss: 5.7591e-06
Epoch 10/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.4371e-06 - val_loss: 5.7439e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.7855e-06 - val_loss: 5.7261e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6253e-06 - val_loss: 5.7118e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5667e-06 - val_loss: 5.7525e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5254e-06 - val_loss: 5.7313e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4972e-06 - val_loss: 5.7257e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4751e-06 - val_loss: 5.7430e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4576e-06 - val_loss: 5.7421e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4413e-06 - val_loss: 5.7456e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4245e-06 - val_loss: 5.7422e-06
Epoch 10/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4098e-06 - val_loss: 5.7755e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 47us/step - loss: 4.7844e-06 - val_loss: 5.5335e-06
Epoch 2/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5736e-06 - val_loss: 5.5953e-06
Epoch 3/10
1503/1503 [==============================] - 0s 48us/step - loss: 4.4826e-06 - val_loss: 5.6520e-06
Epoch 4/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.3891e-06 - val_loss: 5.6278e-06
Epoch 5/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.3202e-06 - val_loss: 5.6424e-06
Epoch 6/10
1503/1503 [==============================] - 0s 47us/step - loss: 4.2585e-06 - val_loss: 5.6560e-06
Epoch 7/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.2032e-06 - val_loss: 5.6436e-06
Epoch 8/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.1603e-06 - val_loss: 5.6508e-06
Epoch 9/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.1223e-06 - val_loss: 5.6592e-06
Epoch 10/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.0858e-06 - val_loss: 5.6630e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 4s 35us/step - loss: 4.7517e-06 - val_loss: 5.6884e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5849e-06 - val_loss: 5.6779e-06
Epoch 3/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5261e-06 - val_loss: 5.6923e-06
Epoch 4/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4827e-06 - val_loss: 5.7056e-06
Epoch 5/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4571e-06 - val_loss: 5.7105e-06
Epoch 6/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.4337e-06 - val_loss: 5.6933e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4134e-06 - val_loss: 5.7014e-06
Epoch 8/10
99999/99999 [==============================] - 3s 35us/step - loss: 4.3949e-06 - val_loss: 5.7021e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.3810e-06 - val_loss: 5.7290e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3700e-06 - val_loss: 5.7357e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.7836e-06 - val_loss: 5.7271e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6310e-06 - val_loss: 5.6980e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5687e-06 - val_loss: 5.6897e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5291e-06 - val_loss: 5.7157e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5031e-06 - val_loss: 5.7183e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4793e-06 - val_loss: 5.7195e-06
Epoch 7/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4627e-06 - val_loss: 5.7248e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4442e-06 - val_loss: 5.7379e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4331e-06 - val_loss: 5.7545e-06
Epoch 10/10
99999/99999 [==============================] - 4s 38us/step - loss: 4.4190e-06 - val_loss: 5.7423e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 25us/step - loss: 4.7376e-06 - val_loss: 5.4690e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5910e-06 - val_loss: 5.4614e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5216e-06 - val_loss: 5.4491e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4837e-06 - val_loss: 5.4586e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4553e-06 - val_loss: 5.4739e-06
Epoch 6/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4330e-06 - val_loss: 5.4656e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4133e-06 - val_loss: 5.4707e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.3972e-06 - val_loss: 5.4832e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3855e-06 - val_loss: 5.4815e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3714e-06 - val_loss: 5.4780e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.7416e-06 - val_loss: 5.6922e-06
Epoch 2/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.5830e-06 - val_loss: 5.6733e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5218e-06 - val_loss: 5.6834e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4844e-06 - val_loss: 5.6902e-06
Epoch 5/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.4562e-06 - val_loss: 5.6811e-06
Epoch 6/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.4319e-06 - val_loss: 5.7075e-06
Epoch 7/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.4130e-06 - val_loss: 5.7027e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3987e-06 - val_loss: 5.7156e-06
Epoch 9/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.3831e-06 - val_loss: 5.7336e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3705e-06 - val_loss: 5.7454e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 35us/step - loss: 4.7573e-06 - val_loss: 5.7026e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5997e-06 - val_loss: 5.6827e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5360e-06 - val_loss: 5.6788e-06
Epoch 4/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4963e-06 - val_loss: 5.7028e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4647e-06 - val_loss: 5.7122e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4432e-06 - val_loss: 5.7139e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4248e-06 - val_loss: 5.7148e-06
Epoch 8/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.4086e-06 - val_loss: 5.7316e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3943e-06 - val_loss: 5.7290e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3809e-06 - val_loss: 5.7265e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.7172e-06 - val_loss: 5.4598e-06
Epoch 2/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5641e-06 - val_loss: 5.4510e-06
Epoch 3/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.5032e-06 - val_loss: 5.4795e-06
Epoch 4/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4641e-06 - val_loss: 5.4538e-06
Epoch 5/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.4355e-06 - val_loss: 5.4466e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4129e-06 - val_loss: 5.4912e-06
Epoch 7/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.3963e-06 - val_loss: 5.4543e-06
Epoch 8/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.3759e-06 - val_loss: 5.5026e-06
Epoch 9/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.3634e-06 - val_loss: 5.5403e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3524e-06 - val_loss: 5.4850e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 237us/step - loss: 4.7933e-06 - val_loss: 5.7083e-06
Epoch 2/10
5523/5523 [==============================] - 1s 216us/step - loss: 4.5536e-06 - val_loss: 5.7422e-06
Epoch 3/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.4270e-06 - val_loss: 5.7737e-06
Epoch 4/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.3514e-06 - val_loss: 5.7905e-06
Epoch 5/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.2880e-06 - val_loss: 5.8014e-06
Epoch 6/10
5523/5523 [==============================] - 1s 223us/step - loss: 4.2512e-06 - val_loss: 5.7943e-06
Epoch 7/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.2137e-06 - val_loss: 5.8192e-06
Epoch 8/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.1833e-06 - val_loss: 5.8310e-06
Epoch 9/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.1593e-06 - val_loss: 5.8640e-06
Epoch 10/10
5523/5523 [==============================] - 1s 205us/step - loss: 4.1395e-06 - val_loss: 5.8703e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 35us/step - loss: 4.7940e-06 - val_loss: 5.7073e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.6081e-06 - val_loss: 5.6931e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5427e-06 - val_loss: 5.6849e-06
Epoch 4/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5079e-06 - val_loss: 5.6832e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4756e-06 - val_loss: 5.7002e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4543e-06 - val_loss: 5.7019e-06
Epoch 7/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.4340e-06 - val_loss: 5.7082e-06
Epoch 8/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.4151e-06 - val_loss: 5.7319e-06
Epoch 9/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.4000e-06 - val_loss: 5.7204e-06
Epoch 10/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.3860e-06 - val_loss: 5.7190e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7277e-06 - val_loss: 5.7027e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5793e-06 - val_loss: 5.7305e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5173e-06 - val_loss: 5.6944e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4775e-06 - val_loss: 5.7026e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4479e-06 - val_loss: 5.7315e-06
Epoch 6/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4260e-06 - val_loss: 5.7024e-06
Epoch 7/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4088e-06 - val_loss: 5.7233e-06
Epoch 8/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.3909e-06 - val_loss: 5.7335e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.3755e-06 - val_loss: 5.7360e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3659e-06 - val_loss: 5.7420e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 48us/step - loss: 4.7773e-06 - val_loss: 5.5497e-06
Epoch 2/10
1503/1503 [==============================] - 0s 47us/step - loss: 4.5885e-06 - val_loss: 5.6244e-06
Epoch 3/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.4408e-06 - val_loss: 5.6257e-06
Epoch 4/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.3572e-06 - val_loss: 5.6173e-06
Epoch 5/10
1503/1503 [==============================] - 0s 46us/step - loss: 4.2903e-06 - val_loss: 5.6113e-06
Epoch 6/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.2240e-06 - val_loss: 5.6493e-06
Epoch 7/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.1800e-06 - val_loss: 5.6418e-06
Epoch 8/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.1300e-06 - val_loss: 5.6478e-06
Epoch 9/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.0928e-06 - val_loss: 5.6837e-06
Epoch 10/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.0553e-06 - val_loss: 5.6677e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7078e-06 - val_loss: 5.6981e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5422e-06 - val_loss: 5.6760e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4740e-06 - val_loss: 5.6511e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4361e-06 - val_loss: 5.6576e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4099e-06 - val_loss: 5.6804e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3876e-06 - val_loss: 5.6949e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3691e-06 - val_loss: 5.6792e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3525e-06 - val_loss: 5.6967e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3376e-06 - val_loss: 5.6995e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3249e-06 - val_loss: 5.7313e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.7428e-06 - val_loss: 5.6948e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5856e-06 - val_loss: 5.6892e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.5219e-06 - val_loss: 5.6898e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4824e-06 - val_loss: 5.6851e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4583e-06 - val_loss: 5.7022e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4341e-06 - val_loss: 5.7041e-06
Epoch 7/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4168e-06 - val_loss: 5.7107e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4016e-06 - val_loss: 5.7188e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3824e-06 - val_loss: 5.7167e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3700e-06 - val_loss: 5.7209e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6869e-06 - val_loss: 5.4594e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5386e-06 - val_loss: 5.4234e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4761e-06 - val_loss: 5.4431e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4388e-06 - val_loss: 5.4417e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4111e-06 - val_loss: 5.4311e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3899e-06 - val_loss: 5.4667e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3706e-06 - val_loss: 5.4666e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3535e-06 - val_loss: 5.4555e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3394e-06 - val_loss: 5.4599e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3257e-06 - val_loss: 5.4812e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6919e-06 - val_loss: 5.6777e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.5387e-06 - val_loss: 5.6670e-06
Epoch 3/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4776e-06 - val_loss: 5.6641e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4382e-06 - val_loss: 5.6424e-06
Epoch 5/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4069e-06 - val_loss: 5.6703e-06
Epoch 6/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.3849e-06 - val_loss: 5.6842e-06
Epoch 7/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3674e-06 - val_loss: 5.6967e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3518e-06 - val_loss: 5.6788e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3358e-06 - val_loss: 5.6858e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3237e-06 - val_loss: 5.7196e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 34us/step - loss: 4.7086e-06 - val_loss: 5.6564e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5516e-06 - val_loss: 5.6730e-06
Epoch 3/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4899e-06 - val_loss: 5.6626e-06
Epoch 4/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4521e-06 - val_loss: 5.6885e-06
Epoch 5/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4203e-06 - val_loss: 5.6753e-06
Epoch 6/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3977e-06 - val_loss: 5.7087e-06
Epoch 7/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3805e-06 - val_loss: 5.7078e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3627e-06 - val_loss: 5.7158e-06
Epoch 9/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3482e-06 - val_loss: 5.7172e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3348e-06 - val_loss: 5.7147e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6698e-06 - val_loss: 5.4887e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5162e-06 - val_loss: 5.4385e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4520e-06 - val_loss: 5.4190e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4119e-06 - val_loss: 5.4565e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3878e-06 - val_loss: 5.4719e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3631e-06 - val_loss: 5.4874e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3433e-06 - val_loss: 5.4914e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3288e-06 - val_loss: 5.5143e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3145e-06 - val_loss: 5.5429e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3026e-06 - val_loss: 5.5245e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 226us/step - loss: 4.7807e-06 - val_loss: 5.7076e-06
Epoch 2/10
5523/5523 [==============================] - 1s 210us/step - loss: 4.5241e-06 - val_loss: 5.7019e-06
Epoch 3/10
5523/5523 [==============================] - 1s 201us/step - loss: 4.3979e-06 - val_loss: 5.7459e-06
Epoch 4/10
5523/5523 [==============================] - 1s 202us/step - loss: 4.3072e-06 - val_loss: 5.7544e-06
Epoch 5/10
5523/5523 [==============================] - 1s 198us/step - loss: 4.2389e-06 - val_loss: 5.7737e-06
Epoch 6/10
5523/5523 [==============================] - 1s 209us/step - loss: 4.1933e-06 - val_loss: 5.7972e-06
Epoch 7/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.1579e-06 - val_loss: 5.7902e-06
Epoch 8/10
5523/5523 [==============================] - 1s 200us/step - loss: 4.1266e-06 - val_loss: 5.7967e-06
Epoch 9/10
5523/5523 [==============================] - 1s 198us/step - loss: 4.1010e-06 - val_loss: 5.8180e-06
Epoch 10/10
5523/5523 [==============================] - 1s 199us/step - loss: 4.0780e-06 - val_loss: 5.8235e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.7432e-06 - val_loss: 5.6635e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5659e-06 - val_loss: 5.6646e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4976e-06 - val_loss: 5.6478e-06
Epoch 4/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.4586e-06 - val_loss: 5.6866e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4322e-06 - val_loss: 5.6698e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4049e-06 - val_loss: 5.6841e-06
Epoch 7/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3884e-06 - val_loss: 5.6684e-06
Epoch 8/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3685e-06 - val_loss: 5.7097e-06
Epoch 9/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.3539e-06 - val_loss: 5.6988e-06
Epoch 10/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3415e-06 - val_loss: 5.7094e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6898e-06 - val_loss: 5.7060e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5337e-06 - val_loss: 5.6799e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4699e-06 - val_loss: 5.7029e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4314e-06 - val_loss: 5.6765e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4039e-06 - val_loss: 5.7053e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3813e-06 - val_loss: 5.6883e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3619e-06 - val_loss: 5.6963e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3480e-06 - val_loss: 5.7326e-06
Epoch 9/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.3311e-06 - val_loss: 5.7352e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3186e-06 - val_loss: 5.7306e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7339e-06 - val_loss: 5.5611e-06
Epoch 2/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.5286e-06 - val_loss: 5.5975e-06
Epoch 3/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.3957e-06 - val_loss: 5.6327e-06
Epoch 4/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.3033e-06 - val_loss: 5.6341e-06
Epoch 5/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.2328e-06 - val_loss: 5.6511e-06
Epoch 6/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.1797e-06 - val_loss: 5.6747e-06
Epoch 7/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.1290e-06 - val_loss: 5.6746e-06
Epoch 8/10
1503/1503 [==============================] - 0s 40us/step - loss: 4.0839e-06 - val_loss: 5.6690e-06
Epoch 9/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.0445e-06 - val_loss: 5.6929e-06
Epoch 10/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.0081e-06 - val_loss: 5.6939e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6682e-06 - val_loss: 5.6584e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4965e-06 - val_loss: 5.6450e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4362e-06 - val_loss: 5.6502e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3939e-06 - val_loss: 5.6483e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3646e-06 - val_loss: 5.6591e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3442e-06 - val_loss: 5.6715e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3279e-06 - val_loss: 5.6654e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3119e-06 - val_loss: 5.6616e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2942e-06 - val_loss: 5.6824e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2798e-06 - val_loss: 5.6827e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6929e-06 - val_loss: 5.6654e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5423e-06 - val_loss: 5.6860e-06
Epoch 3/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4748e-06 - val_loss: 5.6850e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.4389e-06 - val_loss: 5.6864e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4110e-06 - val_loss: 5.6671e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3872e-06 - val_loss: 5.6881e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3676e-06 - val_loss: 5.6755e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3527e-06 - val_loss: 5.7171e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3390e-06 - val_loss: 5.7132e-06
Epoch 10/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3234e-06 - val_loss: 5.6933e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6493e-06 - val_loss: 5.4525e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4974e-06 - val_loss: 5.4538e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4333e-06 - val_loss: 5.4163e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3964e-06 - val_loss: 5.4141e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3677e-06 - val_loss: 5.4144e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3471e-06 - val_loss: 5.4149e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3275e-06 - val_loss: 5.4670e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3124e-06 - val_loss: 5.4542e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2972e-06 - val_loss: 5.4342e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2863e-06 - val_loss: 5.4554e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 34us/step - loss: 4.6636e-06 - val_loss: 5.6407e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4935e-06 - val_loss: 5.6283e-06
Epoch 3/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.4340e-06 - val_loss: 5.6388e-06
Epoch 4/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.3939e-06 - val_loss: 5.6229e-06
Epoch 5/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3661e-06 - val_loss: 5.6518e-06
Epoch 6/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3463e-06 - val_loss: 5.6569e-06
Epoch 7/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.3257e-06 - val_loss: 5.6750e-06
Epoch 8/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.3110e-06 - val_loss: 5.6630e-06
Epoch 9/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.2950e-06 - val_loss: 5.6885e-06
Epoch 10/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.2812e-06 - val_loss: 5.6724e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6658e-06 - val_loss: 5.6696e-06
Epoch 2/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.5120e-06 - val_loss: 5.6478e-06
Epoch 3/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4492e-06 - val_loss: 5.6535e-06
Epoch 4/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4073e-06 - val_loss: 5.6476e-06
Epoch 5/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3758e-06 - val_loss: 5.6957e-06
Epoch 6/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3563e-06 - val_loss: 5.6880e-06
Epoch 7/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3375e-06 - val_loss: 5.6766e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3190e-06 - val_loss: 5.6847e-06
Epoch 9/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3059e-06 - val_loss: 5.7015e-06
Epoch 10/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.2935e-06 - val_loss: 5.6900e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6390e-06 - val_loss: 5.5136e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4753e-06 - val_loss: 5.4255e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4107e-06 - val_loss: 5.4241e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3746e-06 - val_loss: 5.4642e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3430e-06 - val_loss: 5.4453e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3211e-06 - val_loss: 5.4780e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3032e-06 - val_loss: 5.4921e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2865e-06 - val_loss: 5.4766e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2712e-06 - val_loss: 5.4971e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2601e-06 - val_loss: 5.5054e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 234us/step - loss: 4.7145e-06 - val_loss: 5.6769e-06
Epoch 2/10
5523/5523 [==============================] - 1s 215us/step - loss: 4.4796e-06 - val_loss: 5.7031e-06
Epoch 3/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.3518e-06 - val_loss: 5.7367e-06
Epoch 4/10
5523/5523 [==============================] - 1s 198us/step - loss: 4.2628e-06 - val_loss: 5.7456e-06
Epoch 5/10
5523/5523 [==============================] - 1s 198us/step - loss: 4.1964e-06 - val_loss: 5.7427e-06
Epoch 6/10
5523/5523 [==============================] - 1s 203us/step - loss: 4.1526e-06 - val_loss: 5.7710e-06
Epoch 7/10
5523/5523 [==============================] - 1s 206us/step - loss: 4.1178e-06 - val_loss: 5.7767e-06
Epoch 8/10
5523/5523 [==============================] - 1s 198us/step - loss: 4.0880e-06 - val_loss: 5.7822e-06
Epoch 9/10
5523/5523 [==============================] - 1s 197us/step - loss: 4.0610e-06 - val_loss: 5.7925e-06
Epoch 10/10
5523/5523 [==============================] - 1s 199us/step - loss: 4.0393e-06 - val_loss: 5.7982e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.6952e-06 - val_loss: 5.6532e-06
Epoch 2/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.5158e-06 - val_loss: 5.6246e-06
Epoch 3/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.4491e-06 - val_loss: 5.6391e-06
Epoch 4/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.4109e-06 - val_loss: 5.6316e-06
Epoch 5/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3863e-06 - val_loss: 5.6723e-06
Epoch 6/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.3614e-06 - val_loss: 5.6523e-06
Epoch 7/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3422e-06 - val_loss: 5.6698e-06
Epoch 8/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3266e-06 - val_loss: 5.6736e-06
Epoch 9/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3097e-06 - val_loss: 5.6749e-06
Epoch 10/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.2971e-06 - val_loss: 5.7015e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6513e-06 - val_loss: 5.6416e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4918e-06 - val_loss: 5.6693e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4269e-06 - val_loss: 5.6517e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3877e-06 - val_loss: 5.6803e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3623e-06 - val_loss: 5.6861e-06
Epoch 6/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.3386e-06 - val_loss: 5.6789e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3189e-06 - val_loss: 5.6868e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3034e-06 - val_loss: 5.6880e-06
Epoch 9/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2893e-06 - val_loss: 5.6952e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2792e-06 - val_loss: 5.7057e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.7040e-06 - val_loss: 5.5197e-06
Epoch 2/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.5009e-06 - val_loss: 5.5801e-06
Epoch 3/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.3663e-06 - val_loss: 5.6393e-06
Epoch 4/10
1503/1503 [==============================] - 0s 44us/step - loss: 4.2843e-06 - val_loss: 5.6113e-06
Epoch 5/10
1503/1503 [==============================] - 0s 43us/step - loss: 4.2225e-06 - val_loss: 5.6204e-06
Epoch 6/10
1503/1503 [==============================] - 0s 41us/step - loss: 4.1557e-06 - val_loss: 5.6878e-06
Epoch 7/10
1503/1503 [==============================] - 0s 45us/step - loss: 4.1099e-06 - val_loss: 5.6539e-06
Epoch 8/10
1503/1503 [==============================] - 0s 38us/step - loss: 4.0615e-06 - val_loss: 5.6347e-06
Epoch 9/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.0197e-06 - val_loss: 5.6900e-06
Epoch 10/10
1503/1503 [==============================] - 0s 41us/step - loss: 3.9827e-06 - val_loss: 5.6794e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6309e-06 - val_loss: 5.6343e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4549e-06 - val_loss: 5.6047e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3910e-06 - val_loss: 5.6115e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3503e-06 - val_loss: 5.6413e-06
Epoch 5/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3241e-06 - val_loss: 5.6390e-06
Epoch 6/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2994e-06 - val_loss: 5.6532e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2812e-06 - val_loss: 5.6523e-06
Epoch 8/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2665e-06 - val_loss: 5.6582e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2509e-06 - val_loss: 5.6711e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2365e-06 - val_loss: 5.6567e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.6668e-06 - val_loss: 5.6661e-06
Epoch 2/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.5043e-06 - val_loss: 5.6401e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4355e-06 - val_loss: 5.6453e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3997e-06 - val_loss: 5.6579e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3683e-06 - val_loss: 5.6558e-06
Epoch 6/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3453e-06 - val_loss: 5.6698e-06
Epoch 7/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3278e-06 - val_loss: 5.6837e-06
Epoch 8/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3106e-06 - val_loss: 5.6752e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2963e-06 - val_loss: 5.6936e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2824e-06 - val_loss: 5.6950e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 24us/step - loss: 4.6112e-06 - val_loss: 5.4091e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4562e-06 - val_loss: 5.4146e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3949e-06 - val_loss: 5.4065e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3565e-06 - val_loss: 5.3801e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3285e-06 - val_loss: 5.4258e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3087e-06 - val_loss: 5.4111e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2877e-06 - val_loss: 5.4020e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2716e-06 - val_loss: 5.3938e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2587e-06 - val_loss: 5.4304e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2448e-06 - val_loss: 5.4118e-06
Train on 99996 samples, validate on 99996 samples
Epoch 1/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.6091e-06 - val_loss: 5.6467e-06
Epoch 2/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.4584e-06 - val_loss: 5.6060e-06
Epoch 3/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3930e-06 - val_loss: 5.6263e-06
Epoch 4/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.3543e-06 - val_loss: 5.6414e-06
Epoch 5/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.3262e-06 - val_loss: 5.6010e-06
Epoch 6/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.3044e-06 - val_loss: 5.6346e-06
Epoch 7/10
99996/99996 [==============================] - 3s 32us/step - loss: 4.2842e-06 - val_loss: 5.6449e-06
Epoch 8/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.2684e-06 - val_loss: 5.6604e-06
Epoch 9/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.2558e-06 - val_loss: 5.6710e-06
Epoch 10/10
99996/99996 [==============================] - 3s 33us/step - loss: 4.2438e-06 - val_loss: 5.6558e-06
Train on 100000 samples, validate on 99996 samples
Epoch 1/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.6208e-06 - val_loss: 5.6712e-06
Epoch 2/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.4668e-06 - val_loss: 5.6318e-06
Epoch 3/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.4032e-06 - val_loss: 5.6295e-06
Epoch 4/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3650e-06 - val_loss: 5.6320e-06
Epoch 5/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.3383e-06 - val_loss: 5.6600e-06
Epoch 6/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.3140e-06 - val_loss: 5.6675e-06
Epoch 7/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.2969e-06 - val_loss: 5.6634e-06
Epoch 8/10
100000/100000 [==============================] - 3s 33us/step - loss: 4.2789e-06 - val_loss: 5.6818e-06
Epoch 9/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.2660e-06 - val_loss: 5.6941e-06
Epoch 10/10
100000/100000 [==============================] - 3s 32us/step - loss: 4.2511e-06 - val_loss: 5.6978e-06
Train on 99998 samples, validate on 1914 samples
Epoch 1/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.5860e-06 - val_loss: 5.4198e-06
Epoch 2/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.4336e-06 - val_loss: 5.3935e-06
Epoch 3/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3717e-06 - val_loss: 5.3900e-06
Epoch 4/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3295e-06 - val_loss: 5.3825e-06
Epoch 5/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.3024e-06 - val_loss: 5.4424e-06
Epoch 6/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2789e-06 - val_loss: 5.4110e-06
Epoch 7/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2629e-06 - val_loss: 5.4357e-06
Epoch 8/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2448e-06 - val_loss: 5.4340e-06
Epoch 9/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2307e-06 - val_loss: 5.4674e-06
Epoch 10/10
99998/99998 [==============================] - 2s 23us/step - loss: 4.2183e-06 - val_loss: 5.4617e-06
Train on 5523 samples, validate on 99996 samples
Epoch 1/10
5523/5523 [==============================] - 1s 236us/step - loss: 4.6715e-06 - val_loss: 5.6646e-06
Epoch 2/10
5523/5523 [==============================] - 1s 217us/step - loss: 4.4486e-06 - val_loss: 5.6795e-06
Epoch 3/10
5523/5523 [==============================] - 1s 212us/step - loss: 4.3218e-06 - val_loss: 5.7133e-06
Epoch 4/10
5523/5523 [==============================] - 1s 220us/step - loss: 4.2283e-06 - val_loss: 5.7317e-06
Epoch 5/10
5523/5523 [==============================] - 1s 216us/step - loss: 4.1628e-06 - val_loss: 5.7255e-06
Epoch 6/10
5523/5523 [==============================] - 1s 222us/step - loss: 4.1125e-06 - val_loss: 5.7392e-06
Epoch 7/10
5523/5523 [==============================] - 1s 204us/step - loss: 4.0772e-06 - val_loss: 5.7556e-06
Epoch 8/10
5523/5523 [==============================] - 1s 197us/step - loss: 4.0459e-06 - val_loss: 5.7690e-06
Epoch 9/10
5523/5523 [==============================] - 1s 200us/step - loss: 4.0191e-06 - val_loss: 5.7961e-06
Epoch 10/10
5523/5523 [==============================] - 1s 207us/step - loss: 3.9942e-06 - val_loss: 5.7984e-06
Train on 99998 samples, validate on 99996 samples
Epoch 1/10
99998/99998 [==============================] - 3s 34us/step - loss: 4.6611e-06 - val_loss: 5.6401e-06
Epoch 2/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.4807e-06 - val_loss: 5.5962e-06
Epoch 3/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.4088e-06 - val_loss: 5.6209e-06
Epoch 4/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3672e-06 - val_loss: 5.6275e-06
Epoch 5/10
99998/99998 [==============================] - 3s 33us/step - loss: 4.3397e-06 - val_loss: 5.6394e-06
Epoch 6/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.3186e-06 - val_loss: 5.6573e-06
Epoch 7/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.2985e-06 - val_loss: 5.6795e-06
Epoch 8/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.2817e-06 - val_loss: 5.6638e-06
Epoch 9/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.2677e-06 - val_loss: 5.6806e-06
Epoch 10/10
99998/99998 [==============================] - 3s 32us/step - loss: 4.2558e-06 - val_loss: 5.6945e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.6033e-06 - val_loss: 5.6590e-06
Epoch 2/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.4527e-06 - val_loss: 5.6677e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3870e-06 - val_loss: 5.6705e-06
Epoch 4/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.3471e-06 - val_loss: 5.6418e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3188e-06 - val_loss: 5.6561e-06
Epoch 6/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2964e-06 - val_loss: 5.6590e-06
Epoch 7/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2794e-06 - val_loss: 5.6733e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2621e-06 - val_loss: 5.6665e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2486e-06 - val_loss: 5.7008e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2387e-06 - val_loss: 5.6924e-06
Train on 1503 samples, validate on 1914 samples
Epoch 1/10
1503/1503 [==============================] - 0s 42us/step - loss: 4.6358e-06 - val_loss: 5.4568e-06
Epoch 2/10
1503/1503 [==============================] - 0s 38us/step - loss: 4.4184e-06 - val_loss: 5.5041e-06
Epoch 3/10
1503/1503 [==============================] - 0s 38us/step - loss: 4.2967e-06 - val_loss: 5.5564e-06
Epoch 4/10
1503/1503 [==============================] - 0s 36us/step - loss: 4.2100e-06 - val_loss: 5.5294e-06
Epoch 5/10
1503/1503 [==============================] - 0s 37us/step - loss: 4.1470e-06 - val_loss: 5.5470e-06
Epoch 6/10
1503/1503 [==============================] - 0s 39us/step - loss: 4.0872e-06 - val_loss: 5.5741e-06
Epoch 7/10
1503/1503 [==============================] - 0s 39us/step - loss: 4.0376e-06 - val_loss: 5.5563e-06
Epoch 8/10
1503/1503 [==============================] - 0s 40us/step - loss: 3.9994e-06 - val_loss: 5.5698e-06
Epoch 9/10
1503/1503 [==============================] - 0s 38us/step - loss: 3.9578e-06 - val_loss: 5.6076e-06
Epoch 10/10
1503/1503 [==============================] - 0s 39us/step - loss: 3.9248e-06 - val_loss: 5.5953e-06
Train on 99999 samples, validate on 99996 samples
Epoch 1/10
99999/99999 [==============================] - 3s 34us/step - loss: 4.5822e-06 - val_loss: 5.6065e-06
Epoch 2/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.4141e-06 - val_loss: 5.5939e-06
Epoch 3/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3531e-06 - val_loss: 5.6033e-06
Epoch 4/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.3093e-06 - val_loss: 5.6047e-06
Epoch 5/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2793e-06 - val_loss: 5.6168e-06
Epoch 6/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2596e-06 - val_loss: 5.6085e-06
Epoch 7/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2403e-06 - val_loss: 5.6366e-06
Epoch 8/10
99999/99999 [==============================] - 3s 33us/step - loss: 4.2265e-06 - val_loss: 5.6233e-06
Epoch 9/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2128e-06 - val_loss: 5.6482e-06
Epoch 10/10
99999/99999 [==============================] - 3s 32us/step - loss: 4.2000e-06 - val_loss: 5.6518e-06

Evaluation using the trained model


In [7]:
for i in range(7) :
  EvalOnFile(NAME("QCD","TRAIN",i,"out"),NAME("QCD","TRAIN",i,"loss"))
  EvalOnFile(NAME("TOP","TRAIN",i,"out"),NAME("TOP","TRAIN",i,"loss"))
#
for i in range(3) :
  EvalOnFile(NAME("QCD","TEST",i,"out"),NAME("QCD","TEST",i,"loss"))
  EvalOnFile(NAME("TOP","TEST",i,"out"),NAME("TOP","TEST",i,"loss"))
  EvalOnFile(NAME("QCD","VAL",i,"out"),NAME("QCD","VAL",i,"loss"))
  EvalOnFile(NAME("TOP","VAL",i,"out"),NAME("TOP","VAL",i,"loss"))
#


(99999,)
(100000,)
(99999,)
(100000,)
(99998,)
(100000,)
(99996,)
(100000,)
(100000,)
(100000,)
(99998,)
(100000,)
(5523,)
(5477,)
(99996,)
(100000,)
(99998,)
(100000,)
(99996,)
(100000,)
(99999,)
(100000,)
(1914,)
(2086,)
(1503,)
(1497,)

Read the important data

Here, we read the autoencoder loss, jet mass and nsubjettiness (mass and nsubjettiness were evaluated using the C programs main.cc, main.hh and all.hh found in [https://github.com/aravindhv10/CPP_Wrappers/tree/master/AntiQCD4/ML4JETS] )


In [0]:
def ReadLossMassNsub(eventtype,sampletype,i):
  loss = np.fromfile(NAME(eventtype,sampletype,i,"loss"), dtype=float)
  mass = READ_XZ(NAME(eventtype,sampletype,i,"mass"))
  nsub = READ_XZ(NAME(eventtype,sampletype,i,"nsub")).reshape(-1,5)
  #print(nsub.shape)
  out = np.ones((mass.shape[0],7))
  for i in range(mass.shape[0]):
    out[i][0] = loss[i]
    out[i][1] = mass[i]
    out[i][2] = nsub[i][0]
    out[i][3] = nsub[i][1]
    out[i][4] = nsub[i][2]
    out[i][5] = nsub[i][3]
    out[i][6] = nsub[i][4]
  #
  return out
#

In [0]:
vars_qcd_train = ReadLossMassNsub("QCD","TRAIN",0)
vars_qcd_train = np.append (vars_qcd_train,ReadLossMassNsub("QCD","TRAIN",1),0)
vars_qcd_train = np.append (vars_qcd_train,ReadLossMassNsub("QCD","TRAIN",2),0)
vars_qcd_train = np.append (vars_qcd_train,ReadLossMassNsub("QCD","TRAIN",3),0)
vars_qcd_train = np.append (vars_qcd_train,ReadLossMassNsub("QCD","TRAIN",4),0)
vars_qcd_train = np.append (vars_qcd_train,ReadLossMassNsub("QCD","TRAIN",5),0)
vars_qcd_train = np.append (vars_qcd_train,ReadLossMassNsub("QCD","TRAIN",6),0)

vars_qcd_test = ReadLossMassNsub("QCD","TEST",0)
vars_qcd_test = np.append (vars_qcd_test,ReadLossMassNsub("QCD","TEST",1),0)
vars_qcd_test = np.append (vars_qcd_test,ReadLossMassNsub("QCD","TEST",2),0)

vars_qcd_val = ReadLossMassNsub("QCD","VAL",0)
vars_qcd_val = np.append (vars_qcd_val,ReadLossMassNsub("QCD","VAL",1),0)
vars_qcd_val = np.append (vars_qcd_val,ReadLossMassNsub("QCD","VAL",2),0)

In [0]:
vars_top_train = ReadLossMassNsub("TOP","TRAIN",0)
vars_top_train = np.append (vars_top_train,ReadLossMassNsub("TOP","TRAIN",1),0)
vars_top_train = np.append (vars_top_train,ReadLossMassNsub("TOP","TRAIN",2),0)
vars_top_train = np.append (vars_top_train,ReadLossMassNsub("TOP","TRAIN",3),0)
vars_top_train = np.append (vars_top_train,ReadLossMassNsub("TOP","TRAIN",4),0)
vars_top_train = np.append (vars_top_train,ReadLossMassNsub("TOP","TRAIN",5),0)
vars_top_train = np.append (vars_top_train,ReadLossMassNsub("TOP","TRAIN",6),0)

vars_top_test = ReadLossMassNsub("TOP","TEST",0)
vars_top_test = np.append (vars_top_test,ReadLossMassNsub("TOP","TEST",1),0)
vars_top_test = np.append (vars_top_test,ReadLossMassNsub("TOP","TEST",2),0)

vars_top_val = ReadLossMassNsub("TOP","VAL",0)
vars_top_val = np.append (vars_top_val,ReadLossMassNsub("TOP","VAL",1),0)
vars_top_val = np.append (vars_top_val,ReadLossMassNsub("TOP","VAL",2),0)

Plotting and checking

Plot $\epsilon$ (autoencoder loss)


In [11]:
plt.hist(vars_qcd_test[:,0],100,(0.0,0.4),density=True,histtype='step')
plt.hist(vars_top_test[:,0],100,(0.0,0.4),density=True,histtype='step')
plt.show()


Plot $m_J$ (jet mass)


In [12]:
plt.hist(vars_qcd_test[:,1],100,(0.0,1000),density=True,histtype='step')
plt.hist(vars_top_test[:,1],100,(0.0,1000),density=True,histtype='step')
plt.show()


Plot jet $\tau_1$ (nsubjettiness)


In [13]:
plt.hist(vars_qcd_test[:,2],100,(0.0,100),density=True,histtype='step')
plt.hist(vars_top_test[:,2],100,(0.0,100),density=True,histtype='step')
plt.show()


Plot jet $\tau_2$ (nsubjettiness)


In [14]:
plt.hist(vars_qcd_test[:,3],100,(0.0,100),density=True,histtype='step')
plt.hist(vars_top_test[:,3],100,(0.0,100),density=True,histtype='step')
plt.show()


Plot jet $\tau_3$ (nsubjettiness)


In [15]:
plt.hist(vars_qcd_test[:,4],100,(0.0,100),density=True,histtype='step')
plt.hist(vars_top_test[:,4],100,(0.0,100),density=True,histtype='step')
plt.show()


Plot jet $\tau_4$ (nsubjettiness)


In [16]:
plt.hist(vars_qcd_test[:,5],100,(0.0,100),density=True,histtype='step')
plt.hist(vars_top_test[:,5],100,(0.0,100),density=True,histtype='step')
plt.show()


Plot ROC using only $\epsilon$

X axis is $\epsilon_t$

Y axis is $\frac{1}{\epsilon_{\text{QCD}}}$


In [18]:
dx = (0.4 - 0.0) / 100.0
qcdeff = np.ones((100))
topeff = np.ones((100))
for i in range(100):
  xval = i*dx
  qcdeff[i]=1.0/(Count(vars_qcd_test[:,0],xval)+0.0000000001)
  topeff[i]=Count(vars_top_test[:,0],xval)
plt.yscale('log')
plt.plot(topeff,qcdeff)


Out[18]:
[]

Combining variables

We now repeat the ROC exercise using boosted decision trees to combine the variables.


In [0]:
import sklearn
def prepare (qcd_vars,top_vars) :
  out_x = np.append(qcd_vars,top_vars,0)
  out_y = np.append(np.zeros((qcd_vars.shape[0]),dtype='float32'),np.ones((top_vars.shape[0]),dtype='float32'),0)
  return sklearn.utils.shuffle ( out_x , out_y , random_state=0 )

In [0]:
train_x, train_y = prepare(vars_qcd_train,vars_top_train)
test_x, test_y = prepare(vars_qcd_test,vars_top_test)
val_x, val_y = prepare(vars_qcd_val,vars_top_val)
param = { 'objective':'binary' , 'metric':'auc,binary_logloss,binary_error' }

In [21]:
plt.hist(train_x[:,0],100,(0.0,0.4),density=True,histtype='step')
plt.hist(test_x[:,0],100,(0.0,0.4),density=True,histtype='step')
plt.show()



In [22]:
plt.hist(train_x[:,1],100,(0.0,1000),density=True,histtype='step')
plt.hist(test_x[:,1],100,(0.0,1000),density=True,histtype='step')
plt.show()


Decision trees using only autoencoder loss


In [23]:
num_round = 100
#train_data = lgb.Dataset( train_x[:,0:0] , label=train_y )
#val_data   = lgb.Dataset( val_x[:,0:0]   , label=val_y   )
train_data = lgb.Dataset( train_x[:,0].reshape((-1,1)) , label=train_y )
val_data   = lgb.Dataset( val_x[:,0].reshape((-1,1))   , label=val_y   )
bst = lgb.train(param, train_data, num_round, valid_sets=val_data)
pred_qcd_test = bst.predict(vars_qcd_test[:,0].reshape((-1,1)))
pred_top_test = bst.predict(vars_top_test[:,0].reshape((-1,1)))


Usage of np.ndarray subset (sliced data) is not recommended due to it will double the peak memory cost in LightGBM.
[1]	valid_0's binary_logloss: 0.662144	valid_0's binary_error: 0.246362	valid_0's auc: 0.833258
[2]	valid_0's binary_logloss: 0.636785	valid_0's binary_error: 0.246362	valid_0's auc: 0.83344
[3]	valid_0's binary_logloss: 0.615826	valid_0's binary_error: 0.246362	valid_0's auc: 0.833473
[4]	valid_0's binary_logloss: 0.598368	valid_0's binary_error: 0.246362	valid_0's auc: 0.83349
[5]	valid_0's binary_logloss: 0.583739	valid_0's binary_error: 0.246362	valid_0's auc: 0.83351
[6]	valid_0's binary_logloss: 0.571426	valid_0's binary_error: 0.246362	valid_0's auc: 0.833528
[7]	valid_0's binary_logloss: 0.561019	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[8]	valid_0's binary_logloss: 0.552206	valid_0's binary_error: 0.246362	valid_0's auc: 0.833541
[9]	valid_0's binary_logloss: 0.544718	valid_0's binary_error: 0.246362	valid_0's auc: 0.833547
[10]	valid_0's binary_logloss: 0.538346	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[11]	valid_0's binary_logloss: 0.532917	valid_0's binary_error: 0.246362	valid_0's auc: 0.833557
[12]	valid_0's binary_logloss: 0.528285	valid_0's binary_error: 0.246362	valid_0's auc: 0.833558
[13]	valid_0's binary_logloss: 0.524331	valid_0's binary_error: 0.246362	valid_0's auc: 0.833562
[14]	valid_0's binary_logloss: 0.520951	valid_0's binary_error: 0.246362	valid_0's auc: 0.833563
[15]	valid_0's binary_logloss: 0.518061	valid_0's binary_error: 0.246362	valid_0's auc: 0.833564
[16]	valid_0's binary_logloss: 0.515588	valid_0's binary_error: 0.246362	valid_0's auc: 0.833565
[17]	valid_0's binary_logloss: 0.513472	valid_0's binary_error: 0.246362	valid_0's auc: 0.833565
[18]	valid_0's binary_logloss: 0.511662	valid_0's binary_error: 0.246362	valid_0's auc: 0.833559
[19]	valid_0's binary_logloss: 0.510111	valid_0's binary_error: 0.246362	valid_0's auc: 0.833559
[20]	valid_0's binary_logloss: 0.508783	valid_0's binary_error: 0.246362	valid_0's auc: 0.833558
[21]	valid_0's binary_logloss: 0.507644	valid_0's binary_error: 0.246362	valid_0's auc: 0.833558
[22]	valid_0's binary_logloss: 0.506669	valid_0's binary_error: 0.246362	valid_0's auc: 0.833558
[23]	valid_0's binary_logloss: 0.505831	valid_0's binary_error: 0.246362	valid_0's auc: 0.833558
[24]	valid_0's binary_logloss: 0.505112	valid_0's binary_error: 0.246362	valid_0's auc: 0.833556
[25]	valid_0's binary_logloss: 0.504496	valid_0's binary_error: 0.246362	valid_0's auc: 0.833556
[26]	valid_0's binary_logloss: 0.503967	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[27]	valid_0's binary_logloss: 0.503512	valid_0's binary_error: 0.246362	valid_0's auc: 0.833556
[28]	valid_0's binary_logloss: 0.503122	valid_0's binary_error: 0.246362	valid_0's auc: 0.833554
[29]	valid_0's binary_logloss: 0.502787	valid_0's binary_error: 0.246362	valid_0's auc: 0.833554
[30]	valid_0's binary_logloss: 0.502498	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[31]	valid_0's binary_logloss: 0.50225	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[32]	valid_0's binary_logloss: 0.502036	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[33]	valid_0's binary_logloss: 0.501851	valid_0's binary_error: 0.246362	valid_0's auc: 0.833558
[34]	valid_0's binary_logloss: 0.501691	valid_0's binary_error: 0.246362	valid_0's auc: 0.833559
[35]	valid_0's binary_logloss: 0.501555	valid_0's binary_error: 0.246362	valid_0's auc: 0.833559
[36]	valid_0's binary_logloss: 0.501437	valid_0's binary_error: 0.246362	valid_0's auc: 0.833559
[37]	valid_0's binary_logloss: 0.501335	valid_0's binary_error: 0.246362	valid_0's auc: 0.833564
[38]	valid_0's binary_logloss: 0.501246	valid_0's binary_error: 0.246362	valid_0's auc: 0.833564
[39]	valid_0's binary_logloss: 0.50117	valid_0's binary_error: 0.246362	valid_0's auc: 0.833562
[40]	valid_0's binary_logloss: 0.501104	valid_0's binary_error: 0.246362	valid_0's auc: 0.83356
[41]	valid_0's binary_logloss: 0.501046	valid_0's binary_error: 0.246362	valid_0's auc: 0.833559
[42]	valid_0's binary_logloss: 0.500997	valid_0's binary_error: 0.246362	valid_0's auc: 0.833557
[43]	valid_0's binary_logloss: 0.500954	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[44]	valid_0's binary_logloss: 0.500915	valid_0's binary_error: 0.246362	valid_0's auc: 0.833555
[45]	valid_0's binary_logloss: 0.500882	valid_0's binary_error: 0.246362	valid_0's auc: 0.833554
[46]	valid_0's binary_logloss: 0.500852	valid_0's binary_error: 0.246362	valid_0's auc: 0.833553
[47]	valid_0's binary_logloss: 0.500827	valid_0's binary_error: 0.246362	valid_0's auc: 0.833553
[48]	valid_0's binary_logloss: 0.500806	valid_0's binary_error: 0.246362	valid_0's auc: 0.833552
[49]	valid_0's binary_logloss: 0.500787	valid_0's binary_error: 0.246362	valid_0's auc: 0.833552
[50]	valid_0's binary_logloss: 0.500771	valid_0's binary_error: 0.246362	valid_0's auc: 0.83355
[51]	valid_0's binary_logloss: 0.500756	valid_0's binary_error: 0.246362	valid_0's auc: 0.83355
[52]	valid_0's binary_logloss: 0.500744	valid_0's binary_error: 0.246362	valid_0's auc: 0.83355
[53]	valid_0's binary_logloss: 0.500733	valid_0's binary_error: 0.246362	valid_0's auc: 0.83355
[54]	valid_0's binary_logloss: 0.500722	valid_0's binary_error: 0.246362	valid_0's auc: 0.83355
[55]	valid_0's binary_logloss: 0.500714	valid_0's binary_error: 0.246362	valid_0's auc: 0.833548
[56]	valid_0's binary_logloss: 0.500707	valid_0's binary_error: 0.246362	valid_0's auc: 0.833543
[57]	valid_0's binary_logloss: 0.5007	valid_0's binary_error: 0.246362	valid_0's auc: 0.833542
[58]	valid_0's binary_logloss: 0.500694	valid_0's binary_error: 0.246362	valid_0's auc: 0.83354
[59]	valid_0's binary_logloss: 0.500689	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[60]	valid_0's binary_logloss: 0.500685	valid_0's binary_error: 0.246362	valid_0's auc: 0.83354
[61]	valid_0's binary_logloss: 0.50068	valid_0's binary_error: 0.246362	valid_0's auc: 0.833542
[62]	valid_0's binary_logloss: 0.500677	valid_0's binary_error: 0.246362	valid_0's auc: 0.83354
[63]	valid_0's binary_logloss: 0.500674	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[64]	valid_0's binary_logloss: 0.500672	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[65]	valid_0's binary_logloss: 0.50067	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[66]	valid_0's binary_logloss: 0.500668	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[67]	valid_0's binary_logloss: 0.500667	valid_0's binary_error: 0.246362	valid_0's auc: 0.833538
[68]	valid_0's binary_logloss: 0.500665	valid_0's binary_error: 0.246362	valid_0's auc: 0.833539
[69]	valid_0's binary_logloss: 0.500664	valid_0's binary_error: 0.246362	valid_0's auc: 0.833538
[70]	valid_0's binary_logloss: 0.500663	valid_0's binary_error: 0.246362	valid_0's auc: 0.833537
[71]	valid_0's binary_logloss: 0.500662	valid_0's binary_error: 0.246362	valid_0's auc: 0.833537
[72]	valid_0's binary_logloss: 0.500661	valid_0's binary_error: 0.246362	valid_0's auc: 0.833537
[73]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833537
[74]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833536
[75]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833534
[76]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833532
[77]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833531
[78]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833531
[79]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.83353
[80]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.83353
[81]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833528
[82]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833528
[83]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833526
[84]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833526
[85]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833526
[86]	valid_0's binary_logloss: 0.500659	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[87]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[88]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[89]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833524
[90]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833524
[91]	valid_0's binary_logloss: 0.50066	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[92]	valid_0's binary_logloss: 0.500661	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[93]	valid_0's binary_logloss: 0.500661	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[94]	valid_0's binary_logloss: 0.500661	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[95]	valid_0's binary_logloss: 0.500661	valid_0's binary_error: 0.246362	valid_0's auc: 0.833525
[96]	valid_0's binary_logloss: 0.500661	valid_0's binary_error: 0.246362	valid_0's auc: 0.833524
[97]	valid_0's binary_logloss: 0.500662	valid_0's binary_error: 0.246362	valid_0's auc: 0.833524
[98]	valid_0's binary_logloss: 0.500662	valid_0's binary_error: 0.246362	valid_0's auc: 0.833524
[99]	valid_0's binary_logloss: 0.500662	valid_0's binary_error: 0.246362	valid_0's auc: 0.833521
[100]	valid_0's binary_logloss: 0.500662	valid_0's binary_error: 0.246362	valid_0's auc: 0.833521

Plot the ROC from the above model


In [24]:
epsilon = 0.0000001
num = 1000
dx = ( 1.0 + (epsilon*2) ) / num
qcdeff_loss = np.ones((num))
topeff_loss = np.ones((num))
for i in range(num):
  xval = (i*dx) - epsilon
  qcdeff_loss[i]=1.0/(Count(pred_qcd_test,xval)+epsilon)
  topeff_loss[i]=Count(pred_top_test,xval)
plt.yscale('log')
plt.plot(topeff_loss,qcdeff_loss)


Out[24]:
[]

Train BDT using all variables


In [28]:
num_round = 100
train_data = lgb.Dataset( train_x[:,0:6] , label=train_y )
val_data   = lgb.Dataset( val_x[:,0:6]   , label=val_y   )
bst = lgb.train(param, train_data, num_round, valid_sets=val_data)
pred_qcd_test = bst.predict(vars_qcd_test[:,0:6])
pred_top_test = bst.predict(vars_top_test[:,0:6])


[1]	valid_0's binary_logloss: 0.622961	valid_0's binary_error: 0.0853604	valid_0's auc: 0.966623
[2]	valid_0's binary_logloss: 0.565327	valid_0's binary_error: 0.0832066	valid_0's auc: 0.96846
[3]	valid_0's binary_logloss: 0.517087	valid_0's binary_error: 0.0829262	valid_0's auc: 0.970094
[4]	valid_0's binary_logloss: 0.476226	valid_0's binary_error: 0.0813306	valid_0's auc: 0.971207
[5]	valid_0's binary_logloss: 0.441305	valid_0's binary_error: 0.0809361	valid_0's auc: 0.972396
[6]	valid_0's binary_logloss: 0.411219	valid_0's binary_error: 0.0803058	valid_0's auc: 0.97309
[7]	valid_0's binary_logloss: 0.385232	valid_0's binary_error: 0.0796929	valid_0's auc: 0.973353
[8]	valid_0's binary_logloss: 0.362555	valid_0's binary_error: 0.0791569	valid_0's auc: 0.973636
[9]	valid_0's binary_logloss: 0.342904	valid_0's binary_error: 0.0791941	valid_0's auc: 0.973917
[10]	valid_0's binary_logloss: 0.325526	valid_0's binary_error: 0.078807	valid_0's auc: 0.974135
[11]	valid_0's binary_logloss: 0.310303	valid_0's binary_error: 0.0784621	valid_0's auc: 0.974332
[12]	valid_0's binary_logloss: 0.296732	valid_0's binary_error: 0.0782115	valid_0's auc: 0.974586
[13]	valid_0's binary_logloss: 0.28463	valid_0's binary_error: 0.077817	valid_0's auc: 0.974828
[14]	valid_0's binary_logloss: 0.273872	valid_0's binary_error: 0.0775539	valid_0's auc: 0.975217
[15]	valid_0's binary_logloss: 0.264532	valid_0's binary_error: 0.0774224	valid_0's auc: 0.975428
[16]	valid_0's binary_logloss: 0.256024	valid_0's binary_error: 0.0771991	valid_0's auc: 0.975675
[17]	valid_0's binary_logloss: 0.248632	valid_0's binary_error: 0.0769832	valid_0's auc: 0.975844
[18]	valid_0's binary_logloss: 0.241924	valid_0's binary_error: 0.0767772	valid_0's auc: 0.976018
[19]	valid_0's binary_logloss: 0.235924	valid_0's binary_error: 0.0766581	valid_0's auc: 0.976206
[20]	valid_0's binary_logloss: 0.230643	valid_0's binary_error: 0.0764522	valid_0's auc: 0.976311
[21]	valid_0's binary_logloss: 0.225873	valid_0's binary_error: 0.0763678	valid_0's auc: 0.976535
[22]	valid_0's binary_logloss: 0.221596	valid_0's binary_error: 0.0761619	valid_0's auc: 0.976701
[23]	valid_0's binary_logloss: 0.217756	valid_0's binary_error: 0.0760427	valid_0's auc: 0.976898
[24]	valid_0's binary_logloss: 0.214328	valid_0's binary_error: 0.0759286	valid_0's auc: 0.976975
[25]	valid_0's binary_logloss: 0.211155	valid_0's binary_error: 0.0757475	valid_0's auc: 0.977093
[26]	valid_0's binary_logloss: 0.208335	valid_0's binary_error: 0.0756209	valid_0's auc: 0.977201
[27]	valid_0's binary_logloss: 0.205829	valid_0's binary_error: 0.0755514	valid_0's auc: 0.977285
[28]	valid_0's binary_logloss: 0.203477	valid_0's binary_error: 0.0754919	valid_0's auc: 0.977429
[29]	valid_0's binary_logloss: 0.201434	valid_0's binary_error: 0.0753554	valid_0's auc: 0.977518
[30]	valid_0's binary_logloss: 0.199539	valid_0's binary_error: 0.0752189	valid_0's auc: 0.977622
[31]	valid_0's binary_logloss: 0.197822	valid_0's binary_error: 0.0751073	valid_0's auc: 0.977715
[32]	valid_0's binary_logloss: 0.196356	valid_0's binary_error: 0.0749633	valid_0's auc: 0.977761
[33]	valid_0's binary_logloss: 0.19492	valid_0's binary_error: 0.0749882	valid_0's auc: 0.977857
[34]	valid_0's binary_logloss: 0.193638	valid_0's binary_error: 0.0748591	valid_0's auc: 0.977914
[35]	valid_0's binary_logloss: 0.192526	valid_0's binary_error: 0.074745	valid_0's auc: 0.97795
[36]	valid_0's binary_logloss: 0.191441	valid_0's binary_error: 0.0746581	valid_0's auc: 0.978021
[37]	valid_0's binary_logloss: 0.190486	valid_0's binary_error: 0.0745762	valid_0's auc: 0.978103
[38]	valid_0's binary_logloss: 0.189625	valid_0's binary_error: 0.0745341	valid_0's auc: 0.978164
[39]	valid_0's binary_logloss: 0.188876	valid_0's binary_error: 0.0744695	valid_0's auc: 0.9782
[40]	valid_0's binary_logloss: 0.188205	valid_0's binary_error: 0.0744125	valid_0's auc: 0.978227
[41]	valid_0's binary_logloss: 0.18754	valid_0's binary_error: 0.0743628	valid_0's auc: 0.978278
[42]	valid_0's binary_logloss: 0.186977	valid_0's binary_error: 0.0743455	valid_0's auc: 0.978304
[43]	valid_0's binary_logloss: 0.186437	valid_0's binary_error: 0.0742561	valid_0's auc: 0.978348
[44]	valid_0's binary_logloss: 0.185954	valid_0's binary_error: 0.074271	valid_0's auc: 0.978383
[45]	valid_0's binary_logloss: 0.185535	valid_0's binary_error: 0.0742661	valid_0's auc: 0.978402
[46]	valid_0's binary_logloss: 0.185129	valid_0's binary_error: 0.0742586	valid_0's auc: 0.978432
[47]	valid_0's binary_logloss: 0.184756	valid_0's binary_error: 0.0742338	valid_0's auc: 0.978462
[48]	valid_0's binary_logloss: 0.184447	valid_0's binary_error: 0.074214	valid_0's auc: 0.978476
[49]	valid_0's binary_logloss: 0.184149	valid_0's binary_error: 0.074209	valid_0's auc: 0.978494
[50]	valid_0's binary_logloss: 0.183872	valid_0's binary_error: 0.0741941	valid_0's auc: 0.978518
[51]	valid_0's binary_logloss: 0.183611	valid_0's binary_error: 0.0741643	valid_0's auc: 0.978544
[52]	valid_0's binary_logloss: 0.183399	valid_0's binary_error: 0.0741643	valid_0's auc: 0.978558
[53]	valid_0's binary_logloss: 0.183186	valid_0's binary_error: 0.0741246	valid_0's auc: 0.978573
[54]	valid_0's binary_logloss: 0.183004	valid_0's binary_error: 0.0741122	valid_0's auc: 0.978589
[55]	valid_0's binary_logloss: 0.182805	valid_0's binary_error: 0.0740551	valid_0's auc: 0.978609
[56]	valid_0's binary_logloss: 0.182641	valid_0's binary_error: 0.0740452	valid_0's auc: 0.978626
[57]	valid_0's binary_logloss: 0.182478	valid_0's binary_error: 0.0739857	valid_0's auc: 0.978651
[58]	valid_0's binary_logloss: 0.182339	valid_0's binary_error: 0.0739584	valid_0's auc: 0.978666
[59]	valid_0's binary_logloss: 0.182201	valid_0's binary_error: 0.073941	valid_0's auc: 0.978682
[60]	valid_0's binary_logloss: 0.182081	valid_0's binary_error: 0.0739336	valid_0's auc: 0.978697
[61]	valid_0's binary_logloss: 0.181958	valid_0's binary_error: 0.0738914	valid_0's auc: 0.978712
[62]	valid_0's binary_logloss: 0.181858	valid_0's binary_error: 0.0738889	valid_0's auc: 0.978719
[63]	valid_0's binary_logloss: 0.181773	valid_0's binary_error: 0.073874	valid_0's auc: 0.978728
[64]	valid_0's binary_logloss: 0.181676	valid_0's binary_error: 0.0738591	valid_0's auc: 0.978738
[65]	valid_0's binary_logloss: 0.18159	valid_0's binary_error: 0.0738541	valid_0's auc: 0.978752
[66]	valid_0's binary_logloss: 0.181495	valid_0's binary_error: 0.0738393	valid_0's auc: 0.978764
[67]	valid_0's binary_logloss: 0.181426	valid_0's binary_error: 0.0737996	valid_0's auc: 0.978773
[68]	valid_0's binary_logloss: 0.18136	valid_0's binary_error: 0.0737797	valid_0's auc: 0.978779
[69]	valid_0's binary_logloss: 0.181303	valid_0's binary_error: 0.0737648	valid_0's auc: 0.978786
[70]	valid_0's binary_logloss: 0.181231	valid_0's binary_error: 0.0737425	valid_0's auc: 0.978799
[71]	valid_0's binary_logloss: 0.181174	valid_0's binary_error: 0.0737251	valid_0's auc: 0.978808
[72]	valid_0's binary_logloss: 0.181098	valid_0's binary_error: 0.0737276	valid_0's auc: 0.978821
[73]	valid_0's binary_logloss: 0.181046	valid_0's binary_error: 0.0737003	valid_0's auc: 0.97883
[74]	valid_0's binary_logloss: 0.180995	valid_0's binary_error: 0.0736953	valid_0's auc: 0.97884
[75]	valid_0's binary_logloss: 0.180965	valid_0's binary_error: 0.0736978	valid_0's auc: 0.978844
[76]	valid_0's binary_logloss: 0.180917	valid_0's binary_error: 0.0737028	valid_0's auc: 0.978854
[77]	valid_0's binary_logloss: 0.180876	valid_0's binary_error: 0.073673	valid_0's auc: 0.978859
[78]	valid_0's binary_logloss: 0.18098	valid_0's binary_error: 0.0737499	valid_0's auc: 0.978826
[79]	valid_0's binary_logloss: 0.180919	valid_0's binary_error: 0.0737276	valid_0's auc: 0.978843
[80]	valid_0's binary_logloss: 0.180902	valid_0's binary_error: 0.0737226	valid_0's auc: 0.978845
[81]	valid_0's binary_logloss: 0.180851	valid_0's binary_error: 0.0737003	valid_0's auc: 0.978854
[82]	valid_0's binary_logloss: 0.18085	valid_0's binary_error: 0.0737177	valid_0's auc: 0.978858
[83]	valid_0's binary_logloss: 0.181185	valid_0's binary_error: 0.073735	valid_0's auc: 0.978789
[84]	valid_0's binary_logloss: 0.180873	valid_0's binary_error: 0.0736929	valid_0's auc: 0.978848
[85]	valid_0's binary_logloss: 0.181682	valid_0's binary_error: 0.0737847	valid_0's auc: 0.97869
[86]	valid_0's binary_logloss: 0.180951	valid_0's binary_error: 0.0736953	valid_0's auc: 0.978843
[87]	valid_0's binary_logloss: 0.182026	valid_0's binary_error: 0.0738467	valid_0's auc: 0.978608
[88]	valid_0's binary_logloss: 0.181596	valid_0's binary_error: 0.0737921	valid_0's auc: 0.978714
[89]	valid_0's binary_logloss: 0.181593	valid_0's binary_error: 0.0737797	valid_0's auc: 0.978735
[90]	valid_0's binary_logloss: 0.181912	valid_0's binary_error: 0.0737648	valid_0's auc: 0.978733
[91]	valid_0's binary_logloss: 0.182002	valid_0's binary_error: 0.0737499	valid_0's auc: 0.978725
[92]	valid_0's binary_logloss: 0.183225	valid_0's binary_error: 0.0737921	valid_0's auc: 0.978622
[93]	valid_0's binary_logloss: 0.18129	valid_0's binary_error: 0.0737102	valid_0's auc: 0.978782
[94]	valid_0's binary_logloss: 0.1848	valid_0's binary_error: 0.0738442	valid_0's auc: 0.978481
[95]	valid_0's binary_logloss: 0.182605	valid_0's binary_error: 0.0737326	valid_0's auc: 0.978695
[96]	valid_0's binary_logloss: 0.181453	valid_0's binary_error: 0.0737028	valid_0's auc: 0.978767
[97]	valid_0's binary_logloss: 0.182119	valid_0's binary_error: 0.0737177	valid_0's auc: 0.978731
[98]	valid_0's binary_logloss: 0.181225	valid_0's binary_error: 0.0736804	valid_0's auc: 0.978788
[99]	valid_0's binary_logloss: 0.181689	valid_0's binary_error: 0.073678	valid_0's auc: 0.978767
[100]	valid_0's binary_logloss: 0.18192	valid_0's binary_error: 0.0736953	valid_0's auc: 0.978757

Plot ROC using above model


In [30]:
epsilon = 0.0000001
num = 1000
dx = ( 1.0 + (epsilon*2) ) / num
qcdeff_all = np.ones((num))
topeff_all = np.ones((num))
for i in range(num):
  xval = (i*dx) - epsilon
  qcdeff_all[i]=1.0/(Count(pred_qcd_test,xval)+epsilon)
  topeff_all[i]=Count(pred_top_test,xval)
plt.yscale('log')
plt.plot(topeff_all,qcdeff_all)


Out[30]:
[]

Not using the autoencoder loss


In [31]:
num_round = 100
train_data = lgb.Dataset( train_x[:,1:6] , label=train_y )
val_data   = lgb.Dataset( val_x[:,1:6]   , label=val_y   )
bst = lgb.train(param, train_data, num_round, valid_sets=val_data)
pred_qcd_test = bst.predict(vars_qcd_test[:,1:6])
pred_top_test = bst.predict(vars_top_test[:,1:6])


[1]	valid_0's binary_logloss: 0.623859	valid_0's binary_error: 0.0913977	valid_0's auc: 0.963412
[2]	valid_0's binary_logloss: 0.567135	valid_0's binary_error: 0.0903108	valid_0's auc: 0.965867
[3]	valid_0's binary_logloss: 0.519743	valid_0's binary_error: 0.0886483	valid_0's auc: 0.967105
[4]	valid_0's binary_logloss: 0.479773	valid_0's binary_error: 0.0884746	valid_0's auc: 0.967698
[5]	valid_0's binary_logloss: 0.445674	valid_0's binary_error: 0.0876632	valid_0's auc: 0.968226
[6]	valid_0's binary_logloss: 0.416379	valid_0's binary_error: 0.0875987	valid_0's auc: 0.968576
[7]	valid_0's binary_logloss: 0.391039	valid_0's binary_error: 0.0869039	valid_0's auc: 0.968837
[8]	valid_0's binary_logloss: 0.369023	valid_0's binary_error: 0.0863853	valid_0's auc: 0.969173
[9]	valid_0's binary_logloss: 0.349887	valid_0's binary_error: 0.0861867	valid_0's auc: 0.969489
[10]	valid_0's binary_logloss: 0.333154	valid_0's binary_error: 0.0858468	valid_0's auc: 0.969647
[11]	valid_0's binary_logloss: 0.318567	valid_0's binary_error: 0.0857649	valid_0's auc: 0.96985
[12]	valid_0's binary_logloss: 0.305561	valid_0's binary_error: 0.0855242	valid_0's auc: 0.970059
[13]	valid_0's binary_logloss: 0.294193	valid_0's binary_error: 0.0854473	valid_0's auc: 0.9702
[14]	valid_0's binary_logloss: 0.284185	valid_0's binary_error: 0.0855441	valid_0's auc: 0.970396
[15]	valid_0's binary_logloss: 0.275267	valid_0's binary_error: 0.0851743	valid_0's auc: 0.970586
[16]	valid_0's binary_logloss: 0.267416	valid_0's binary_error: 0.0850304	valid_0's auc: 0.970708
[17]	valid_0's binary_logloss: 0.260334	valid_0's binary_error: 0.0848766	valid_0's auc: 0.970927
[18]	valid_0's binary_logloss: 0.25409	valid_0's binary_error: 0.0845738	valid_0's auc: 0.971049
[19]	valid_0's binary_logloss: 0.248504	valid_0's binary_error: 0.0845192	valid_0's auc: 0.971221
[20]	valid_0's binary_logloss: 0.243633	valid_0's binary_error: 0.0845118	valid_0's auc: 0.971314
[21]	valid_0's binary_logloss: 0.239249	valid_0's binary_error: 0.0844671	valid_0's auc: 0.971511
[22]	valid_0's binary_logloss: 0.235288	valid_0's binary_error: 0.0843158	valid_0's auc: 0.971697
[23]	valid_0's binary_logloss: 0.23172	valid_0's binary_error: 0.0841669	valid_0's auc: 0.971782
[24]	valid_0's binary_logloss: 0.22857	valid_0's binary_error: 0.0841793	valid_0's auc: 0.971922
[25]	valid_0's binary_logloss: 0.225771	valid_0's binary_error: 0.0840875	valid_0's auc: 0.972036
[26]	valid_0's binary_logloss: 0.223256	valid_0's binary_error: 0.0840056	valid_0's auc: 0.972103
[27]	valid_0's binary_logloss: 0.220897	valid_0's binary_error: 0.0838369	valid_0's auc: 0.972248
[28]	valid_0's binary_logloss: 0.218888	valid_0's binary_error: 0.0837326	valid_0's auc: 0.972319
[29]	valid_0's binary_logloss: 0.217088	valid_0's binary_error: 0.0836681	valid_0's auc: 0.972397
[30]	valid_0's binary_logloss: 0.215382	valid_0's binary_error: 0.0836061	valid_0's auc: 0.972509
[31]	valid_0's binary_logloss: 0.213933	valid_0's binary_error: 0.0835465	valid_0's auc: 0.972553
[32]	valid_0's binary_logloss: 0.212598	valid_0's binary_error: 0.083487	valid_0's auc: 0.972606
[33]	valid_0's binary_logloss: 0.211456	valid_0's binary_error: 0.0834795	valid_0's auc: 0.972661
[34]	valid_0's binary_logloss: 0.210373	valid_0's binary_error: 0.0834101	valid_0's auc: 0.972715
[35]	valid_0's binary_logloss: 0.209421	valid_0's binary_error: 0.0834175	valid_0's auc: 0.972758
[36]	valid_0's binary_logloss: 0.208533	valid_0's binary_error: 0.0833182	valid_0's auc: 0.972811
[37]	valid_0's binary_logloss: 0.207762	valid_0's binary_error: 0.0832934	valid_0's auc: 0.972865
[38]	valid_0's binary_logloss: 0.207069	valid_0's binary_error: 0.0832512	valid_0's auc: 0.972908
[39]	valid_0's binary_logloss: 0.206412	valid_0's binary_error: 0.0832115	valid_0's auc: 0.972955
[40]	valid_0's binary_logloss: 0.205866	valid_0's binary_error: 0.0831818	valid_0's auc: 0.972981
[41]	valid_0's binary_logloss: 0.205357	valid_0's binary_error: 0.0831694	valid_0's auc: 0.973013
[42]	valid_0's binary_logloss: 0.204924	valid_0's binary_error: 0.0831321	valid_0's auc: 0.97303
[43]	valid_0's binary_logloss: 0.204507	valid_0's binary_error: 0.0830676	valid_0's auc: 0.973057
[44]	valid_0's binary_logloss: 0.204125	valid_0's binary_error: 0.0830453	valid_0's auc: 0.973085
[45]	valid_0's binary_logloss: 0.203783	valid_0's binary_error: 0.0830254	valid_0's auc: 0.973106
[46]	valid_0's binary_logloss: 0.203452	valid_0's binary_error: 0.0829783	valid_0's auc: 0.973146
[47]	valid_0's binary_logloss: 0.203186	valid_0's binary_error: 0.0829659	valid_0's auc: 0.973166
[48]	valid_0's binary_logloss: 0.202931	valid_0's binary_error: 0.0828865	valid_0's auc: 0.973185
[49]	valid_0's binary_logloss: 0.202684	valid_0's binary_error: 0.0829014	valid_0's auc: 0.973211
[50]	valid_0's binary_logloss: 0.202469	valid_0's binary_error: 0.0828666	valid_0's auc: 0.973228
[51]	valid_0's binary_logloss: 0.202267	valid_0's binary_error: 0.0828493	valid_0's auc: 0.973249
[52]	valid_0's binary_logloss: 0.202077	valid_0's binary_error: 0.0827897	valid_0's auc: 0.97327
[53]	valid_0's binary_logloss: 0.201912	valid_0's binary_error: 0.0827525	valid_0's auc: 0.973288
[54]	valid_0's binary_logloss: 0.201771	valid_0's binary_error: 0.0827748	valid_0's auc: 0.973297
[55]	valid_0's binary_logloss: 0.201626	valid_0's binary_error: 0.0827252	valid_0's auc: 0.973314
[56]	valid_0's binary_logloss: 0.2015	valid_0's binary_error: 0.0827301	valid_0's auc: 0.97333
[57]	valid_0's binary_logloss: 0.201387	valid_0's binary_error: 0.0827301	valid_0's auc: 0.973341
[58]	valid_0's binary_logloss: 0.201285	valid_0's binary_error: 0.0827103	valid_0's auc: 0.973355
[59]	valid_0's binary_logloss: 0.201198	valid_0's binary_error: 0.082688	valid_0's auc: 0.973362
[60]	valid_0's binary_logloss: 0.201094	valid_0's binary_error: 0.0826706	valid_0's auc: 0.973377
[61]	valid_0's binary_logloss: 0.201027	valid_0's binary_error: 0.0826805	valid_0's auc: 0.973385
[62]	valid_0's binary_logloss: 0.200951	valid_0's binary_error: 0.0826557	valid_0's auc: 0.973393
[63]	valid_0's binary_logloss: 0.200893	valid_0's binary_error: 0.0826731	valid_0's auc: 0.973399
[64]	valid_0's binary_logloss: 0.200835	valid_0's binary_error: 0.0826433	valid_0's auc: 0.97341
[65]	valid_0's binary_logloss: 0.200783	valid_0's binary_error: 0.0826408	valid_0's auc: 0.973418
[66]	valid_0's binary_logloss: 0.200732	valid_0's binary_error: 0.0826284	valid_0's auc: 0.973428
[67]	valid_0's binary_logloss: 0.200682	valid_0's binary_error: 0.0826582	valid_0's auc: 0.973435
[68]	valid_0's binary_logloss: 0.200644	valid_0's binary_error: 0.0826234	valid_0's auc: 0.973443
[69]	valid_0's binary_logloss: 0.200611	valid_0's binary_error: 0.0826259	valid_0's auc: 0.973446
[70]	valid_0's binary_logloss: 0.200561	valid_0's binary_error: 0.0826334	valid_0's auc: 0.973455
[71]	valid_0's binary_logloss: 0.200525	valid_0's binary_error: 0.0825937	valid_0's auc: 0.973464
[72]	valid_0's binary_logloss: 0.200498	valid_0's binary_error: 0.0825738	valid_0's auc: 0.97347
[73]	valid_0's binary_logloss: 0.200469	valid_0's binary_error: 0.0825416	valid_0's auc: 0.973475
[74]	valid_0's binary_logloss: 0.200426	valid_0's binary_error: 0.0824944	valid_0's auc: 0.973483
[75]	valid_0's binary_logloss: 0.200401	valid_0's binary_error: 0.0824894	valid_0's auc: 0.973486
[76]	valid_0's binary_logloss: 0.200362	valid_0's binary_error: 0.0824522	valid_0's auc: 0.973491
[77]	valid_0's binary_logloss: 0.200612	valid_0's binary_error: 0.082477	valid_0's auc: 0.973452
[78]	valid_0's binary_logloss: 0.200546	valid_0's binary_error: 0.082482	valid_0's auc: 0.973431
[79]	valid_0's binary_logloss: 0.200428	valid_0's binary_error: 0.0824597	valid_0's auc: 0.973474
[80]	valid_0's binary_logloss: 0.200414	valid_0's binary_error: 0.0824671	valid_0's auc: 0.973478
[81]	valid_0's binary_logloss: 0.200556	valid_0's binary_error: 0.0824696	valid_0's auc: 0.973425
[82]	valid_0's binary_logloss: 0.200911	valid_0's binary_error: 0.0824497	valid_0's auc: 0.973452
[83]	valid_0's binary_logloss: 0.200525	valid_0's binary_error: 0.0824746	valid_0's auc: 0.973437
[84]	valid_0's binary_logloss: 0.200749	valid_0's binary_error: 0.0824597	valid_0's auc: 0.973461
[85]	valid_0's binary_logloss: 0.200767	valid_0's binary_error: 0.0824299	valid_0's auc: 0.973459
[86]	valid_0's binary_logloss: 0.200927	valid_0's binary_error: 0.0824398	valid_0's auc: 0.973432
[87]	valid_0's binary_logloss: 0.20065	valid_0's binary_error: 0.082482	valid_0's auc: 0.973415
[88]	valid_0's binary_logloss: 0.201492	valid_0's binary_error: 0.0825316	valid_0's auc: 0.973312
[89]	valid_0's binary_logloss: 0.201028	valid_0's binary_error: 0.0824572	valid_0's auc: 0.973402
[90]	valid_0's binary_logloss: 0.200738	valid_0's binary_error: 0.0824423	valid_0's auc: 0.973425
[91]	valid_0's binary_logloss: 0.200592	valid_0's binary_error: 0.0824373	valid_0's auc: 0.973447
[92]	valid_0's binary_logloss: 0.201672	valid_0's binary_error: 0.0824547	valid_0's auc: 0.97339
[93]	valid_0's binary_logloss: 0.200542	valid_0's binary_error: 0.0824125	valid_0's auc: 0.973457
[94]	valid_0's binary_logloss: 0.200957	valid_0's binary_error: 0.0824373	valid_0's auc: 0.973434
[95]	valid_0's binary_logloss: 0.200524	valid_0's binary_error: 0.0824646	valid_0's auc: 0.97346
[96]	valid_0's binary_logloss: 0.20089	valid_0's binary_error: 0.082482	valid_0's auc: 0.973428
[97]	valid_0's binary_logloss: 0.200773	valid_0's binary_error: 0.082482	valid_0's auc: 0.973432
[98]	valid_0's binary_logloss: 0.200933	valid_0's binary_error: 0.0825316	valid_0's auc: 0.973345
[99]	valid_0's binary_logloss: 0.200915	valid_0's binary_error: 0.0825217	valid_0's auc: 0.973383
[100]	valid_0's binary_logloss: 0.20093	valid_0's binary_error: 0.0825316	valid_0's auc: 0.973403

Plot ROC for above model


In [32]:
epsilon = 0.0000001
num = 1000
dx = ( 1.0 + (epsilon*2) ) / num
qcdeff_noloss = np.ones((num))
topeff_noloss = np.ones((num))
for i in range(num):
  xval = (i*dx) - epsilon
  qcdeff_noloss[i]=1.0/(Count(pred_qcd_test,xval)+epsilon)
  topeff_noloss[i]=Count(pred_top_test,xval)
plt.yscale('log')
plt.plot(topeff_noloss,qcdeff_noloss)


Out[32]:
[]

In [0]:
np.savetxt("topeff_loss",topeff_loss)
np.savetxt("qcdeff_loss",qcdeff_loss)
np.savetxt("topeff_all",topeff_all)
np.savetxt("qcdeff_all",qcdeff_all)
np.savetxt("topeff_noloss",topeff_noloss)
np.savetxt("qcdeff_noloss",qcdeff_noloss)

In [0]: