Todo:

  • Experiment with normalization while creating triplets
  • Do you need dropout in the regressor network?
  • Simultaneous feature learning?
  • See when are results bad/equal/better? See which classes they correspond to..

In [1]:
import tensorflow as tf
import numpy as np
import os, sys
from time import time
from matplotlib import pyplot as plt
from itertools import cycle
import random

from utils import optimistic_restore, save
import layers

PWD = os.getcwd()
sys.path.insert(0, os.path.abspath(os.path.join(PWD, '..')))
import pickle_utils
import cifar_utils

import pdb

In [2]:
'''
HYPERPARAMS
'''
BATCH_SIZE = 10
DATA_PATH = '/media/red/capstone/data/cifar-100/cifar-custom'
LEARNING_RATE = 1e-4
BETA1 = 0.9
BETA2 = 0.99
NUM_CLASSES = 40
NUM_EPOCH = 100
RANDOM_SEED = 1234
SUMMARY_EVERY = 10
VALIDATION_PERCENTAGE = 0.05
SNAPSHOT_MAX = 10 # Keeps the last best 10 snapshots (best determined by validation accuracy)
SNAPSHOT_DIR = '/media/red/capstone/snapshots/feature_extractor_vgg16'
PRETRAINED_WEIGHT_FILE = '/media/red/capstone/pretrained_weights/vgg16_weights.npz'

np.random.seed(seed=RANDOM_SEED)

In [3]:
'''
Load custom CIFAR data. 
'''
# cifar_raw = pickle_utils.load(DATA_PATH)
custom_dataset = pickle_utils.load(DATA_PATH)

data_x, data_y = [], []
for label in custom_dataset['training'].keys():
    for item in custom_dataset['training'][label]:
        data_x.append(item) # 28 x 28 x 3
        data_y.append(label) # 0-39
data_x = np.stack(data_x).astype(np.float32)
data_x = np.flip(data_x, axis=-1) # BGR
data_y = np.stack(data_y).astype(np.int32)

# Normalize x
data_x = (data_x / 255.0) - 0.5

def round_to(n, precision):
    return int( n/precision+0.5 ) * precision

n_total_data = data_x.shape[0]
n_validation = round_to(VALIDATION_PERCENTAGE * n_total_data, BATCH_SIZE)
batches_per_epoch = np.round((n_total_data - n_validation) / BATCH_SIZE)
# Shuffle data
random_indices = np.random.permutation(n_total_data)
train_indices = cycle(random_indices[n_validation:])
validation_indices = random_indices[:n_validation]

In [ ]:
'''
Declare model
'''
class vgg16:
    '''
    VGG16 Model with ImageNet pretrained weight loader method
    Weights can be downloaded from:
    https://www.cs.toronto.edu/~frossard/vgg16/vgg16_weights.npz
    '''

    def __init__(self, x, y, phase):
        '''
        Sets up network enough to do a forward pass.
        '''

        """ init the model with hyper-parameters etc """

        # List used for loading weights from vgg16.npz (if necessary)
        self.parameters = []
        self.CONV_ACTIVATION = 'relu'
        self.FC_ACTIVATION   = 'relu'

        ########
        # Misc #
        ########
        self.global_step = tf.get_variable('global_step', dtype=tf.int32, trainable=False,
                        initializer=0)
        self.learning_rate = LEARNING_RATE
        self.IM_SHAPE = [224, 224, 3]

        ####################
        # I/O placeholders #
        ####################
        self.x = x
        self.x.set_shape([None]+self.IM_SHAPE)
        self.y = tf.to_int32(y)

        ###############
        # Main Layers #
        ###############
        with tf.variable_scope('conv_layers'):
            self._convlayers()
        with tf.variable_scope('fc_layers'):
            self._fc_layers()

        ######################
        # Define Collections #
        ######################
        self.conv_trainable = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                "conv_layers")
        self.fc_trainable = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                "fc_layers")

    def evaluate(self):
        '''
        Returns the count of correct classifications (Tensor).
        '''
        # Bool Tensor where 1 is correct and 0 is incorrect
        correct = tf.nn.in_top_k(self.predictions, self.y, 1)
        # Average them to get accuracy.  Must cast to a float32
        self.accuracy = tf.reduce_mean(tf.to_float(correct))
        return self.accuracy

    #####################
    # Private Functions #
    #####################
    def _convlayers(self):
        '''
        All conv and pooling layers of VGG16
        '''
        # zero-mean input; resizing has to be done beforehand for uniform tensor shape
        with tf.variable_scope('preprocess'):
            mean = tf.constant([123.68, 116.779, 103.939],
                    dtype=tf.float32,
                    shape=[1, 1, 1, 3],
                    name='img_mean')
            self.images = self.x*255.0 - mean

        # conv1_1
        self.conv1_1, weights, biases = layers.conv2d(name='conv1_1',
                input=self.images,
                shape=(3,3,3,64),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv1_2
        self.conv1_2, weights, biases = layers.conv2d(name='conv1_2',
                input=self.conv1_1,
                shape=(3,3,64,64),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # pool1
        self.pool1 = tf.nn.max_pool(self.conv1_2,
                ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='pool1')

        # conv2_1
        self.conv2_1, weights, biases = layers.conv2d(name='conv2_1',
                input=self.pool1,
                shape=(3,3,64,128),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv2_2
        self.conv2_2, weights, biases = layers.conv2d(name='conv2_2',
                input=self.conv2_1,
                shape=(3,3,128,128),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # pool2
        self.pool2 = tf.nn.max_pool(self.conv2_2,
                ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='pool2')

        # conv3_1
        self.conv3_1, weights, biases = layers.conv2d(name='conv3_1',
                input=self.pool2,
                shape=(3,3,128,256),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv3_2
        self.conv3_2, weights, biases = layers.conv2d(name='conv3_2',
                input=self.conv3_1,
                shape=(3,3,256,256),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv3_3
        self.conv3_3, weights, biases = layers.conv2d(name='conv3_3',
                input=self.conv3_2,
                shape=(3,3,256,256),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # pool3
        self.pool3 = tf.nn.max_pool(self.conv3_3,
                ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='pool3')

        # conv4_1
        self.conv4_1, weights, biases = layers.conv2d(name='conv4_1',
                input=self.pool3,
                shape=(3,3,256,512),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv4_2
        self.conv4_2, weights, biases = layers.conv2d(name='conv4_2',
                input=self.conv4_1,
                shape=(3,3,512,512),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv4_3
        self.conv4_3, weights, biases = layers.conv2d(name='conv4_3',
                input=self.conv4_2,
                shape=(3,3,512,512),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # pool4
        self.pool4 = tf.nn.max_pool(self.conv4_3,
                ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='pool4')

        # conv5_1
        self.conv5_1, weights, biases = layers.conv2d(name='conv5_1',
                input=self.pool4,
                shape=(3,3,512,512),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv5_2
        self.conv5_2, weights, biases = layers.conv2d(name='conv5_2',
                input=self.conv5_1,
                shape=(3,3,512,512),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # conv5_3
        self.conv5_3, weights, biases = layers.conv2d(name='conv5_3',
                input=self.conv5_2,
                shape=(3,3,512,512),
                padding='SAME',
                strides = [1,1,1,1],
                activation=self.CONV_ACTIVATION)
        self.parameters += [weights, biases]

        # pool5
        self.pool5 = tf.nn.max_pool(self.conv5_3,
                ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='pool5')

    def _fc_layers(self):
        '''
        All FC layers of VGG16 (+custom layers)
        '''
        # fc1
        self.fc1, weights, biases = layers.fc(name='fc1',
                input=tf.contrib.layers.flatten(self.pool5),
                units=4096,
                activation=self.FC_ACTIVATION)
        self.parameters += [weights, biases]

        # fc2
        self.fc2, weights, biases = layers.fc(name='fc2',
                input=self.fc1,
                units=4096,
                activation=self.FC_ACTIVATION)
        self.parameters += [weights, biases]

        # fc3
        self.fc3, weights, biases = layers.fc(name='fc3',
                input=self.fc2,
                units=NUM_CLASSES,
                activation='linear')

    def load_pretrained_weights(self, sess):
        '''
        Load Pretrained VGG16 weights from .npz file
        (weights converted from Caffe)
        To only be used when no TensorFlow Snapshot is avaialable.
        Assumes layers are properly added to self.parameters.
        '''
        print("Loading Imagenet Weights.")

        weights = np.load(PRETRAINED_WEIGHT_FILE)
        keys = sorted(weights.keys())
        for i, k in enumerate(keys):
            print(i, k, np.shape(weights[k]))
            try:
                sess.run(self.parameters[i].assign(weights[k]))
            except:
                print("%s layer not found." % k)

In [ ]:
'''
Model Setup
'''
x = tf.placeholder(dtype=tf.float32, shape=(BATCH_SIZE, 32, 32, 3))
x_resized = tf.image.resize_images(x, (224, 224))
y = tf.placeholder(dtype=tf.int32, shape=(BATCH_SIZE))
is_training = tf.placeholder(dtype=tf.bool)

net = vgg16(x_resized, y, is_training)

'''
Loss, Metrics, and Optimization Setup
'''
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=y, #GT probability distribution
        logits=net.fc3, # unscaled log prob
        name='sparse_softmax_cross_entropy')

reduced_loss = tf.reduce_mean(loss)
train_loss_summary = tf.summary.scalar('training_loss', reduced_loss)

optimizer = tf.train.AdamOptimizer(
        learning_rate=LEARNING_RATE,
        beta1=BETA1,
        beta2=BETA2,
        name='AdamOptimizer')
train_op = optimizer.minimize(reduced_loss)

pred = tf.nn.softmax(
        logits=net.fc3,
        name='softmax')
pred_class = tf.cast(tf.argmax(pred, axis=1), tf.int32)
acc = tf.reduce_mean(tf.cast(
        tf.equal(y, pred_class),
        tf.float32))

train_acc_summary = tf.summary.scalar('training_accuracy', acc)


'''
TensorBoard Setup
'''
all_train_summary = tf.summary.merge_all()

summary_writer = tf.summary.FileWriter(SNAPSHOT_DIR,
        graph=tf.get_default_graph())

'''
Tensorflow Saver Setup
'''
saver = tf.train.Saver(var_list=tf.global_variables(),
                       max_to_keep=SNAPSHOT_MAX)

'''
Tensorflow Session Setup
'''
tf.set_random_seed(RANDOM_SEED)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
init = tf.group(tf.global_variables_initializer(),
                tf.local_variables_initializer())
sess.run(init)

'''
Load Pretrained Weights (ImageNet)
'''
net.load_pretrained_weights(sess)

'''
Declare Validation Loop
'''
def run_validation():
    global best_acc
    
    start_t = time()
    overall_acc = 0
    overall_loss = 0
    for j in range(0, n_validation, BATCH_SIZE):
        # Assemble Batch
        idx = validation_indices[j:(j+BATCH_SIZE)]
        x_batch = data_x[idx,...]
        y_batch = data_y[idx,...]
        
        feed_dict = {x:x_batch,
                 y:y_batch,
                 is_training: False}
        loss_v, acc_v, pred_v = sess.run(
                [reduced_loss, acc, pred],
                feed_dict=feed_dict)
        overall_acc += acc_v
        overall_loss += loss_v
        
        
    duration = time() - start_t
    overall_acc /= (n_validation / BATCH_SIZE)
    overall_loss /= (n_validation / BATCH_SIZE)
    
    overall_acc_summary = tf.Summary()
    overall_acc_summary.value.add(tag='validation_accuracy', simple_value=overall_acc)
    overall_loss_summary = tf.Summary()
    overall_loss_summary.value.add(tag='validation_loss', simple_value=overall_loss)

    summary_writer.add_summary(overall_acc_summary, step)
    summary_writer.add_summary(overall_loss_summary, step)
    
    print('VALIDATION \t acc = {:.3f} ({:.3f} sec)'.format(
                overall_acc, duration))
    if overall_acc > best_acc:
        print('New Best Accuracy {:.3f} > Old Best {:.3f}.  Saving...'.format(
                overall_acc, best_acc))
        best_acc = overall_acc
        save(saver, sess, SNAPSHOT_DIR, step)
        
'''
Main Training Loop
'''
step = 0
epoch = 0
best_acc = 0
while epoch < NUM_EPOCH:
    step += 1
    # Allocate Space For Batch
    x_batch = np.zeros((BATCH_SIZE,) + data_x.shape[1:], dtype=np.float32)
    y_batch = np.zeros((BATCH_SIZE,) + data_y.shape[1:], dtype=np.int32)
    
    # Run Validation
    if step % batches_per_epoch == 0:
        epoch += 1
        run_validation()
        
    # Form Training Batch
    start_t = time()
    for i in range(BATCH_SIZE):
        idx = next(train_indices)
        x_batch[i,...] = data_x[idx, ...]
        y_batch[i,...] = data_y[idx, ...]
    
    # Data Augmentation
    if random.random() < 0.5:
        x_batch = np.fliplr(x_batch)
        
    # Prepare Feed Dictionary
    feed_dict = {x:x_batch,
                 y:y_batch,
                 is_training: True}
    # Run Training Summary
    if step % SUMMARY_EVERY == 0:
        loss_v, _, summary_v, acc_v, pred_v = sess.run(
                [reduced_loss, train_op, all_train_summary, acc, pred],
                feed_dict=feed_dict)
        summary_writer.add_summary(summary_v, step)
        duration = time() - start_t
        print('step {:d} \t loss = {:.3f}, train_acc = {:.3f} ({:.3f} sec/step)'.format(
                step, loss_v, acc_v, duration))
    else: # Run Simple Train
        loss_v, _ = sess.run([reduced_loss, train_op],
                feed_dict=feed_dict)


Loading Imagenet Weights.
0 conv1_1_W (3, 3, 3, 64)
1 conv1_1_b (64,)
2 conv1_2_W (3, 3, 64, 64)
3 conv1_2_b (64,)
4 conv2_1_W (3, 3, 64, 128)
5 conv2_1_b (128,)
6 conv2_2_W (3, 3, 128, 128)
7 conv2_2_b (128,)
8 conv3_1_W (3, 3, 128, 256)
9 conv3_1_b (256,)
10 conv3_2_W (3, 3, 256, 256)
11 conv3_2_b (256,)
12 conv3_3_W (3, 3, 256, 256)
13 conv3_3_b (256,)
14 conv4_1_W (3, 3, 256, 512)
15 conv4_1_b (512,)
16 conv4_2_W (3, 3, 512, 512)
17 conv4_2_b (512,)
18 conv4_3_W (3, 3, 512, 512)
19 conv4_3_b (512,)
20 conv5_1_W (3, 3, 512, 512)
21 conv5_1_b (512,)
22 conv5_2_W (3, 3, 512, 512)
23 conv5_2_b (512,)
24 conv5_3_W (3, 3, 512, 512)
25 conv5_3_b (512,)
26 fc6_W (25088, 4096)
27 fc6_b (4096,)
28 fc7_W (4096, 4096)
29 fc7_b (4096,)
30 fc8_W (4096, 1000)
fc8_W layer not found.
31 fc8_b (1000,)
fc8_b layer not found.
step 10 	 loss = 3.749, train_acc = 0.100 (3.224 sec/step)
step 20 	 loss = 3.924, train_acc = 0.000 (3.242 sec/step)
step 30 	 loss = 3.812, train_acc = 0.100 (3.191 sec/step)
step 40 	 loss = 3.663, train_acc = 0.000 (3.239 sec/step)
step 50 	 loss = 3.736, train_acc = 0.100 (3.197 sec/step)
step 60 	 loss = 3.770, train_acc = 0.100 (3.209 sec/step)
step 70 	 loss = 3.609, train_acc = 0.000 (3.224 sec/step)
step 80 	 loss = 3.862, train_acc = 0.000 (3.178 sec/step)
step 90 	 loss = 3.491, train_acc = 0.200 (3.193 sec/step)
step 100 	 loss = 3.687, train_acc = 0.000 (3.193 sec/step)
step 110 	 loss = 3.317, train_acc = 0.200 (3.315 sec/step)
step 120 	 loss = 3.695, train_acc = 0.000 (3.201 sec/step)
step 130 	 loss = 3.367, train_acc = 0.100 (3.185 sec/step)
step 140 	 loss = 3.552, train_acc = 0.000 (3.215 sec/step)
step 150 	 loss = 3.880, train_acc = 0.000 (3.166 sec/step)
step 160 	 loss = 3.772, train_acc = 0.000 (3.178 sec/step)
step 170 	 loss = 3.485, train_acc = 0.000 (3.192 sec/step)
step 180 	 loss = 3.660, train_acc = 0.000 (3.206 sec/step)
step 190 	 loss = 3.580, train_acc = 0.000 (3.193 sec/step)
step 200 	 loss = 3.870, train_acc = 0.000 (3.164 sec/step)
step 210 	 loss = 3.813, train_acc = 0.000 (3.203 sec/step)
step 220 	 loss = 3.541, train_acc = 0.100 (3.208 sec/step)
step 230 	 loss = 3.413, train_acc = 0.000 (3.191 sec/step)
step 240 	 loss = 3.331, train_acc = 0.200 (3.295 sec/step)
step 250 	 loss = 3.420, train_acc = 0.100 (3.186 sec/step)
step 260 	 loss = 3.743, train_acc = 0.000 (3.233 sec/step)
step 270 	 loss = 3.414, train_acc = 0.100 (3.189 sec/step)
step 280 	 loss = 3.919, train_acc = 0.000 (3.220 sec/step)
step 290 	 loss = 3.261, train_acc = 0.100 (3.187 sec/step)
step 300 	 loss = 3.381, train_acc = 0.100 (3.197 sec/step)
step 310 	 loss = 3.582, train_acc = 0.000 (3.243 sec/step)
step 320 	 loss = 3.779, train_acc = 0.000 (3.293 sec/step)
step 330 	 loss = 3.360, train_acc = 0.100 (3.192 sec/step)
step 340 	 loss = 3.624, train_acc = 0.000 (3.236 sec/step)
step 350 	 loss = 3.287, train_acc = 0.200 (3.245 sec/step)
step 360 	 loss = 3.478, train_acc = 0.100 (3.219 sec/step)
step 370 	 loss = 3.961, train_acc = 0.000 (3.160 sec/step)
step 380 	 loss = 3.953, train_acc = 0.100 (3.182 sec/step)
step 390 	 loss = 3.356, train_acc = 0.100 (3.207 sec/step)
step 400 	 loss = 3.489, train_acc = 0.000 (3.178 sec/step)
step 410 	 loss = 3.822, train_acc = 0.100 (3.287 sec/step)
step 420 	 loss = 3.065, train_acc = 0.200 (3.204 sec/step)
step 430 	 loss = 3.400, train_acc = 0.100 (3.171 sec/step)
step 440 	 loss = 3.333, train_acc = 0.200 (3.254 sec/step)
step 450 	 loss = 3.873, train_acc = 0.100 (3.190 sec/step)
step 460 	 loss = 3.055, train_acc = 0.200 (3.173 sec/step)
step 470 	 loss = 2.861, train_acc = 0.100 (3.196 sec/step)
step 480 	 loss = 3.089, train_acc = 0.100 (3.242 sec/step)
step 490 	 loss = 3.548, train_acc = 0.000 (3.206 sec/step)
step 500 	 loss = 3.587, train_acc = 0.000 (3.205 sec/step)
step 510 	 loss = 2.981, train_acc = 0.300 (3.209 sec/step)
step 520 	 loss = 3.169, train_acc = 0.100 (3.180 sec/step)
step 530 	 loss = 2.735, train_acc = 0.200 (3.161 sec/step)
step 540 	 loss = 2.973, train_acc = 0.100 (3.206 sec/step)
step 550 	 loss = 3.063, train_acc = 0.200 (3.230 sec/step)
step 560 	 loss = 3.698, train_acc = 0.000 (3.183 sec/step)
step 570 	 loss = 3.069, train_acc = 0.200 (3.201 sec/step)
step 580 	 loss = 3.473, train_acc = 0.000 (3.207 sec/step)
step 590 	 loss = 3.343, train_acc = 0.100 (3.177 sec/step)
step 600 	 loss = 3.223, train_acc = 0.200 (3.179 sec/step)
step 610 	 loss = 3.174, train_acc = 0.200 (3.196 sec/step)
step 620 	 loss = 3.466, train_acc = 0.300 (3.193 sec/step)
step 630 	 loss = 3.514, train_acc = 0.100 (3.200 sec/step)
step 640 	 loss = 2.835, train_acc = 0.300 (3.228 sec/step)
step 650 	 loss = 3.591, train_acc = 0.000 (3.202 sec/step)
step 660 	 loss = 3.251, train_acc = 0.000 (3.181 sec/step)
step 670 	 loss = 3.758, train_acc = 0.000 (3.193 sec/step)
step 680 	 loss = 3.138, train_acc = 0.100 (3.209 sec/step)
step 690 	 loss = 3.036, train_acc = 0.300 (3.175 sec/step)
step 700 	 loss = 3.376, train_acc = 0.000 (3.201 sec/step)
step 710 	 loss = 2.955, train_acc = 0.300 (3.180 sec/step)
step 720 	 loss = 3.178, train_acc = 0.200 (3.187 sec/step)
step 730 	 loss = 3.447, train_acc = 0.100 (3.190 sec/step)
step 740 	 loss = 3.035, train_acc = 0.100 (3.242 sec/step)
step 750 	 loss = 3.515, train_acc = 0.000 (3.202 sec/step)
step 760 	 loss = 3.681, train_acc = 0.100 (3.198 sec/step)
step 770 	 loss = 2.782, train_acc = 0.100 (3.218 sec/step)
step 780 	 loss = 2.646, train_acc = 0.300 (3.303 sec/step)
step 790 	 loss = 2.569, train_acc = 0.200 (3.286 sec/step)
step 800 	 loss = 2.806, train_acc = 0.400 (3.287 sec/step)
step 810 	 loss = 3.202, train_acc = 0.000 (3.192 sec/step)
step 820 	 loss = 2.516, train_acc = 0.100 (3.203 sec/step)
step 830 	 loss = 2.389, train_acc = 0.500 (3.207 sec/step)
step 840 	 loss = 3.013, train_acc = 0.100 (3.182 sec/step)
step 850 	 loss = 3.388, train_acc = 0.200 (3.200 sec/step)
step 860 	 loss = 2.790, train_acc = 0.300 (3.208 sec/step)
step 870 	 loss = 2.859, train_acc = 0.100 (3.201 sec/step)
step 880 	 loss = 3.392, train_acc = 0.100 (3.227 sec/step)
step 890 	 loss = 2.994, train_acc = 0.100 (3.195 sec/step)
step 900 	 loss = 3.403, train_acc = 0.000 (3.200 sec/step)
step 910 	 loss = 3.615, train_acc = 0.100 (3.197 sec/step)
step 920 	 loss = 2.902, train_acc = 0.400 (3.184 sec/step)
step 930 	 loss = 3.228, train_acc = 0.100 (3.192 sec/step)
step 940 	 loss = 3.192, train_acc = 0.100 (3.226 sec/step)
step 950 	 loss = 2.435, train_acc = 0.500 (3.223 sec/step)
step 960 	 loss = 3.112, train_acc = 0.100 (3.216 sec/step)
step 970 	 loss = 3.190, train_acc = 0.200 (3.210 sec/step)
step 980 	 loss = 3.091, train_acc = 0.200 (3.186 sec/step)
step 990 	 loss = 3.206, train_acc = 0.200 (3.233 sec/step)
step 1000 	 loss = 2.444, train_acc = 0.200 (3.160 sec/step)
step 1010 	 loss = 2.805, train_acc = 0.100 (3.172 sec/step)
step 1020 	 loss = 3.222, train_acc = 0.200 (3.217 sec/step)
step 1030 	 loss = 3.045, train_acc = 0.000 (3.225 sec/step)
step 1040 	 loss = 3.148, train_acc = 0.200 (3.256 sec/step)
step 1050 	 loss = 3.178, train_acc = 0.300 (3.236 sec/step)
step 1060 	 loss = 3.261, train_acc = 0.100 (3.179 sec/step)
step 1070 	 loss = 3.294, train_acc = 0.300 (3.180 sec/step)
step 1080 	 loss = 2.827, train_acc = 0.200 (3.198 sec/step)
step 1090 	 loss = 3.392, train_acc = 0.100 (3.193 sec/step)
step 1100 	 loss = 3.132, train_acc = 0.100 (3.170 sec/step)
step 1110 	 loss = 3.185, train_acc = 0.200 (3.211 sec/step)
step 1120 	 loss = 2.765, train_acc = 0.300 (3.215 sec/step)
step 1130 	 loss = 3.131, train_acc = 0.200 (3.209 sec/step)
step 1140 	 loss = 2.993, train_acc = 0.100 (3.204 sec/step)
step 1150 	 loss = 3.022, train_acc = 0.200 (3.199 sec/step)
step 1160 	 loss = 2.854, train_acc = 0.200 (3.180 sec/step)
step 1170 	 loss = 3.656, train_acc = 0.100 (3.201 sec/step)
step 1180 	 loss = 3.329, train_acc = 0.200 (3.177 sec/step)
step 1190 	 loss = 2.766, train_acc = 0.000 (3.185 sec/step)
step 1200 	 loss = 3.147, train_acc = 0.300 (3.209 sec/step)
step 1210 	 loss = 3.267, train_acc = 0.100 (3.255 sec/step)
step 1220 	 loss = 2.764, train_acc = 0.300 (3.178 sec/step)
step 1230 	 loss = 3.047, train_acc = 0.200 (3.344 sec/step)
step 1240 	 loss = 2.525, train_acc = 0.400 (3.197 sec/step)
step 1250 	 loss = 2.940, train_acc = 0.100 (3.203 sec/step)
step 1260 	 loss = 2.873, train_acc = 0.300 (3.195 sec/step)
step 1270 	 loss = 3.208, train_acc = 0.400 (3.190 sec/step)
step 1280 	 loss = 2.925, train_acc = 0.200 (3.194 sec/step)
step 1290 	 loss = 2.769, train_acc = 0.200 (3.161 sec/step)
step 1300 	 loss = 2.631, train_acc = 0.300 (3.186 sec/step)
step 1310 	 loss = 3.048, train_acc = 0.100 (3.290 sec/step)
step 1320 	 loss = 2.554, train_acc = 0.300 (3.225 sec/step)
step 1330 	 loss = 2.437, train_acc = 0.300 (3.242 sec/step)
step 1340 	 loss = 2.954, train_acc = 0.200 (3.270 sec/step)
step 1350 	 loss = 2.880, train_acc = 0.300 (3.183 sec/step)
step 1360 	 loss = 2.997, train_acc = 0.200 (3.257 sec/step)
step 1370 	 loss = 2.674, train_acc = 0.300 (3.181 sec/step)
step 1380 	 loss = 3.235, train_acc = 0.000 (3.190 sec/step)
step 1390 	 loss = 3.177, train_acc = 0.100 (3.224 sec/step)
step 1400 	 loss = 2.540, train_acc = 0.200 (3.181 sec/step)
step 1410 	 loss = 3.177, train_acc = 0.200 (3.174 sec/step)
step 1420 	 loss = 2.520, train_acc = 0.400 (3.206 sec/step)
step 1430 	 loss = 3.084, train_acc = 0.100 (3.212 sec/step)
step 1440 	 loss = 2.552, train_acc = 0.400 (3.227 sec/step)
step 1450 	 loss = 3.576, train_acc = 0.100 (3.201 sec/step)
step 1460 	 loss = 2.487, train_acc = 0.300 (3.188 sec/step)
step 1470 	 loss = 2.851, train_acc = 0.200 (3.189 sec/step)
step 1480 	 loss = 3.434, train_acc = 0.100 (3.210 sec/step)
step 1490 	 loss = 3.440, train_acc = 0.100 (3.192 sec/step)
step 1500 	 loss = 3.102, train_acc = 0.000 (3.184 sec/step)
step 1510 	 loss = 2.710, train_acc = 0.200 (3.204 sec/step)
step 1520 	 loss = 2.934, train_acc = 0.200 (3.183 sec/step)
step 1530 	 loss = 2.547, train_acc = 0.500 (3.178 sec/step)
step 1540 	 loss = 2.443, train_acc = 0.200 (3.337 sec/step)
step 1550 	 loss = 2.314, train_acc = 0.400 (3.197 sec/step)
step 1560 	 loss = 2.676, train_acc = 0.300 (3.212 sec/step)
step 1570 	 loss = 2.846, train_acc = 0.100 (3.201 sec/step)
step 1580 	 loss = 3.102, train_acc = 0.200 (3.208 sec/step)
step 1590 	 loss = 2.887, train_acc = 0.400 (3.171 sec/step)
step 1600 	 loss = 2.950, train_acc = 0.100 (3.208 sec/step)
step 1610 	 loss = 2.124, train_acc = 0.500 (3.170 sec/step)
step 1620 	 loss = 2.109, train_acc = 0.400 (3.204 sec/step)
step 1630 	 loss = 3.814, train_acc = 0.000 (3.269 sec/step)
step 1640 	 loss = 2.923, train_acc = 0.100 (3.261 sec/step)
step 1650 	 loss = 3.333, train_acc = 0.000 (3.258 sec/step)
step 1660 	 loss = 3.011, train_acc = 0.100 (3.228 sec/step)
step 1670 	 loss = 2.846, train_acc = 0.200 (3.182 sec/step)
step 1680 	 loss = 2.126, train_acc = 0.300 (3.186 sec/step)
step 1690 	 loss = 3.178, train_acc = 0.100 (3.152 sec/step)
step 1700 	 loss = 2.379, train_acc = 0.200 (3.182 sec/step)
step 1710 	 loss = 2.838, train_acc = 0.100 (3.237 sec/step)
step 1720 	 loss = 2.078, train_acc = 0.500 (3.249 sec/step)
step 1730 	 loss = 3.604, train_acc = 0.000 (3.168 sec/step)
step 1740 	 loss = 2.904, train_acc = 0.300 (3.183 sec/step)
step 1750 	 loss = 2.635, train_acc = 0.200 (3.180 sec/step)
step 1760 	 loss = 2.513, train_acc = 0.200 (3.191 sec/step)
step 1770 	 loss = 1.885, train_acc = 0.500 (3.216 sec/step)
step 1780 	 loss = 2.678, train_acc = 0.300 (3.232 sec/step)
step 1790 	 loss = 2.541, train_acc = 0.400 (3.152 sec/step)
step 1800 	 loss = 2.816, train_acc = 0.500 (3.228 sec/step)
step 1810 	 loss = 2.397, train_acc = 0.300 (3.177 sec/step)
step 1820 	 loss = 2.290, train_acc = 0.200 (3.166 sec/step)
step 1830 	 loss = 2.726, train_acc = 0.300 (3.225 sec/step)
step 1840 	 loss = 2.870, train_acc = 0.300 (3.203 sec/step)
step 1850 	 loss = 1.888, train_acc = 0.500 (3.261 sec/step)
step 1860 	 loss = 2.943, train_acc = 0.400 (3.260 sec/step)
step 1870 	 loss = 2.408, train_acc = 0.400 (3.201 sec/step)
step 1880 	 loss = 2.295, train_acc = 0.200 (3.234 sec/step)
step 1890 	 loss = 3.603, train_acc = 0.100 (3.184 sec/step)
VALIDATION 	 acc = 0.207 (3.633 sec)
New Best Accuracy 0.207 > Old Best 0.000.  Saving...
The checkpoint has been created.
step 1900 	 loss = 3.928, train_acc = 0.100 (3.277 sec/step)
step 1910 	 loss = 2.984, train_acc = 0.200 (3.206 sec/step)
step 1920 	 loss = 2.253, train_acc = 0.200 (3.192 sec/step)
step 1930 	 loss = 2.733, train_acc = 0.300 (3.183 sec/step)
step 1940 	 loss = 2.528, train_acc = 0.100 (3.208 sec/step)
step 1950 	 loss = 2.973, train_acc = 0.300 (3.219 sec/step)
step 1960 	 loss = 2.188, train_acc = 0.400 (3.230 sec/step)
step 1970 	 loss = 2.363, train_acc = 0.100 (3.158 sec/step)
step 1980 	 loss = 2.895, train_acc = 0.200 (3.223 sec/step)
step 1990 	 loss = 2.331, train_acc = 0.400 (3.213 sec/step)
step 2000 	 loss = 2.734, train_acc = 0.300 (3.232 sec/step)
step 2010 	 loss = 2.918, train_acc = 0.300 (3.215 sec/step)
step 2020 	 loss = 2.567, train_acc = 0.300 (3.196 sec/step)
step 2030 	 loss = 1.782, train_acc = 0.400 (3.248 sec/step)
step 2040 	 loss = 2.504, train_acc = 0.300 (3.220 sec/step)
step 2050 	 loss = 2.683, train_acc = 0.200 (3.186 sec/step)
step 2060 	 loss = 2.590, train_acc = 0.400 (3.172 sec/step)
step 2070 	 loss = 2.206, train_acc = 0.400 (3.182 sec/step)
step 2080 	 loss = 2.674, train_acc = 0.000 (3.209 sec/step)
step 2090 	 loss = 2.575, train_acc = 0.200 (3.253 sec/step)
step 2100 	 loss = 2.972, train_acc = 0.200 (3.206 sec/step)
step 2110 	 loss = 2.718, train_acc = 0.300 (3.257 sec/step)
step 2120 	 loss = 3.146, train_acc = 0.300 (3.176 sec/step)
step 2130 	 loss = 1.599, train_acc = 0.500 (3.187 sec/step)
step 2140 	 loss = 2.012, train_acc = 0.400 (3.240 sec/step)
step 2150 	 loss = 2.993, train_acc = 0.100 (3.203 sec/step)
step 2160 	 loss = 2.813, train_acc = 0.200 (3.219 sec/step)
step 2170 	 loss = 3.018, train_acc = 0.400 (3.190 sec/step)
step 2180 	 loss = 2.737, train_acc = 0.300 (3.227 sec/step)
step 2190 	 loss = 2.210, train_acc = 0.400 (3.202 sec/step)
step 2200 	 loss = 2.879, train_acc = 0.200 (3.201 sec/step)
step 2210 	 loss = 2.873, train_acc = 0.200 (3.221 sec/step)
step 2220 	 loss = 3.133, train_acc = 0.200 (3.238 sec/step)
step 2230 	 loss = 2.646, train_acc = 0.100 (3.229 sec/step)
step 2240 	 loss = 2.515, train_acc = 0.300 (3.169 sec/step)
step 2250 	 loss = 1.652, train_acc = 0.600 (3.182 sec/step)
step 2260 	 loss = 2.667, train_acc = 0.300 (3.199 sec/step)
step 2270 	 loss = 1.983, train_acc = 0.400 (3.212 sec/step)
step 2280 	 loss = 2.276, train_acc = 0.400 (3.245 sec/step)
step 2290 	 loss = 1.312, train_acc = 0.700 (3.220 sec/step)
step 2300 	 loss = 2.443, train_acc = 0.300 (3.198 sec/step)
step 2310 	 loss = 2.699, train_acc = 0.300 (3.210 sec/step)
step 2320 	 loss = 2.271, train_acc = 0.400 (3.236 sec/step)
step 2330 	 loss = 1.992, train_acc = 0.400 (3.235 sec/step)
step 2340 	 loss = 2.240, train_acc = 0.500 (3.210 sec/step)
step 2350 	 loss = 2.856, train_acc = 0.400 (3.172 sec/step)
step 2360 	 loss = 2.326, train_acc = 0.500 (3.179 sec/step)
step 2370 	 loss = 2.034, train_acc = 0.300 (3.229 sec/step)
step 2380 	 loss = 2.113, train_acc = 0.400 (3.242 sec/step)
step 2390 	 loss = 2.182, train_acc = 0.400 (3.211 sec/step)
step 2400 	 loss = 2.454, train_acc = 0.300 (3.201 sec/step)
step 2410 	 loss = 1.654, train_acc = 0.500 (3.186 sec/step)
step 2420 	 loss = 2.497, train_acc = 0.200 (3.200 sec/step)
step 2430 	 loss = 2.461, train_acc = 0.200 (3.202 sec/step)
step 2440 	 loss = 2.065, train_acc = 0.400 (3.210 sec/step)
step 2450 	 loss = 1.988, train_acc = 0.300 (3.231 sec/step)
step 2460 	 loss = 2.847, train_acc = 0.200 (3.252 sec/step)
step 2470 	 loss = 1.730, train_acc = 0.700 (3.205 sec/step)
step 2480 	 loss = 3.123, train_acc = 0.200 (3.216 sec/step)
step 2490 	 loss = 2.289, train_acc = 0.300 (3.198 sec/step)
step 2500 	 loss = 3.085, train_acc = 0.200 (3.208 sec/step)
step 2510 	 loss = 2.459, train_acc = 0.200 (3.225 sec/step)
step 2520 	 loss = 2.290, train_acc = 0.300 (3.178 sec/step)
step 2530 	 loss = 3.457, train_acc = 0.100 (3.247 sec/step)
step 2540 	 loss = 1.819, train_acc = 0.300 (3.206 sec/step)
step 2550 	 loss = 2.864, train_acc = 0.000 (3.242 sec/step)
step 2560 	 loss = 2.228, train_acc = 0.500 (3.234 sec/step)
step 2570 	 loss = 3.141, train_acc = 0.100 (3.206 sec/step)
step 2580 	 loss = 1.275, train_acc = 0.500 (3.187 sec/step)
step 2590 	 loss = 2.228, train_acc = 0.200 (3.216 sec/step)
step 2600 	 loss = 3.435, train_acc = 0.100 (3.178 sec/step)
step 2610 	 loss = 1.807, train_acc = 0.500 (3.188 sec/step)
step 2620 	 loss = 1.691, train_acc = 0.500 (3.209 sec/step)
step 2630 	 loss = 2.978, train_acc = 0.200 (3.210 sec/step)
step 2640 	 loss = 1.743, train_acc = 0.500 (3.161 sec/step)
step 2650 	 loss = 2.362, train_acc = 0.300 (3.236 sec/step)
step 2660 	 loss = 2.810, train_acc = 0.200 (3.202 sec/step)
step 2670 	 loss = 1.655, train_acc = 0.400 (3.208 sec/step)
step 2680 	 loss = 1.747, train_acc = 0.500 (3.178 sec/step)
step 2690 	 loss = 2.042, train_acc = 0.300 (3.180 sec/step)
step 2700 	 loss = 1.282, train_acc = 0.700 (3.199 sec/step)
step 2710 	 loss = 2.180, train_acc = 0.300 (3.216 sec/step)
step 2720 	 loss = 1.733, train_acc = 0.600 (3.184 sec/step)
step 2730 	 loss = 2.027, train_acc = 0.400 (3.196 sec/step)
step 2740 	 loss = 3.035, train_acc = 0.200 (3.219 sec/step)
step 2750 	 loss = 2.442, train_acc = 0.500 (3.180 sec/step)
step 2760 	 loss = 1.685, train_acc = 0.600 (3.166 sec/step)
step 2770 	 loss = 2.394, train_acc = 0.300 (3.205 sec/step)
step 2780 	 loss = 2.557, train_acc = 0.300 (3.204 sec/step)
step 2790 	 loss = 1.933, train_acc = 0.400 (3.343 sec/step)
step 2800 	 loss = 3.714, train_acc = 0.000 (3.209 sec/step)
step 2810 	 loss = 2.827, train_acc = 0.200 (3.257 sec/step)
step 2820 	 loss = 1.814, train_acc = 0.500 (3.223 sec/step)
step 2830 	 loss = 2.398, train_acc = 0.200 (3.194 sec/step)
step 2840 	 loss = 2.890, train_acc = 0.200 (3.200 sec/step)
step 2850 	 loss = 2.204, train_acc = 0.300 (3.321 sec/step)
step 2860 	 loss = 1.565, train_acc = 0.400 (3.215 sec/step)
step 2870 	 loss = 2.355, train_acc = 0.300 (3.268 sec/step)
step 2880 	 loss = 1.743, train_acc = 0.500 (3.201 sec/step)
step 2890 	 loss = 2.201, train_acc = 0.200 (3.209 sec/step)
step 2900 	 loss = 1.280, train_acc = 0.700 (3.202 sec/step)
step 2910 	 loss = 2.564, train_acc = 0.200 (3.196 sec/step)
step 2920 	 loss = 2.619, train_acc = 0.200 (3.247 sec/step)
step 2930 	 loss = 1.911, train_acc = 0.500 (3.199 sec/step)
step 2940 	 loss = 2.704, train_acc = 0.200 (3.200 sec/step)
step 2950 	 loss = 2.474, train_acc = 0.400 (3.263 sec/step)
step 2960 	 loss = 2.347, train_acc = 0.300 (3.164 sec/step)
step 2970 	 loss = 2.550, train_acc = 0.300 (3.192 sec/step)
step 2980 	 loss = 1.964, train_acc = 0.400 (3.197 sec/step)
step 2990 	 loss = 3.278, train_acc = 0.100 (3.200 sec/step)
step 3000 	 loss = 2.323, train_acc = 0.100 (3.201 sec/step)
step 3010 	 loss = 1.892, train_acc = 0.400 (3.180 sec/step)
step 3020 	 loss = 1.712, train_acc = 0.400 (3.206 sec/step)
step 3030 	 loss = 2.633, train_acc = 0.300 (3.172 sec/step)
step 3040 	 loss = 2.942, train_acc = 0.200 (3.197 sec/step)
step 3050 	 loss = 1.382, train_acc = 0.700 (3.205 sec/step)
step 3060 	 loss = 1.751, train_acc = 0.500 (3.221 sec/step)
step 3070 	 loss = 2.681, train_acc = 0.100 (3.195 sec/step)
step 3080 	 loss = 2.500, train_acc = 0.500 (3.193 sec/step)
step 3090 	 loss = 1.903, train_acc = 0.400 (3.236 sec/step)
step 3100 	 loss = 2.183, train_acc = 0.300 (3.310 sec/step)
step 3110 	 loss = 2.834, train_acc = 0.400 (3.218 sec/step)
step 3120 	 loss = 1.962, train_acc = 0.400 (3.221 sec/step)
step 3130 	 loss = 1.838, train_acc = 0.400 (3.229 sec/step)
step 3140 	 loss = 2.031, train_acc = 0.500 (3.195 sec/step)
step 3150 	 loss = 1.325, train_acc = 0.600 (3.246 sec/step)
step 3160 	 loss = 1.998, train_acc = 0.500 (3.217 sec/step)
step 3170 	 loss = 2.038, train_acc = 0.500 (3.222 sec/step)
step 3180 	 loss = 2.175, train_acc = 0.300 (3.248 sec/step)
step 3190 	 loss = 3.402, train_acc = 0.100 (3.196 sec/step)
step 3200 	 loss = 1.839, train_acc = 0.400 (3.171 sec/step)
step 3210 	 loss = 2.094, train_acc = 0.400 (3.215 sec/step)
step 3220 	 loss = 1.972, train_acc = 0.400 (3.225 sec/step)
step 3230 	 loss = 1.735, train_acc = 0.500 (3.217 sec/step)
step 3240 	 loss = 1.934, train_acc = 0.400 (3.221 sec/step)
step 3250 	 loss = 2.813, train_acc = 0.300 (3.233 sec/step)
step 3260 	 loss = 2.569, train_acc = 0.300 (3.197 sec/step)
step 3270 	 loss = 1.508, train_acc = 0.500 (3.176 sec/step)
step 3280 	 loss = 2.074, train_acc = 0.100 (3.210 sec/step)
step 3290 	 loss = 2.968, train_acc = 0.200 (3.208 sec/step)
step 3300 	 loss = 2.665, train_acc = 0.200 (3.199 sec/step)
step 3310 	 loss = 2.461, train_acc = 0.300 (3.176 sec/step)
step 3320 	 loss = 1.767, train_acc = 0.700 (3.213 sec/step)
step 3330 	 loss = 2.030, train_acc = 0.500 (3.195 sec/step)
step 3340 	 loss = 1.777, train_acc = 0.600 (3.204 sec/step)
step 3350 	 loss = 2.403, train_acc = 0.100 (3.214 sec/step)
step 3360 	 loss = 1.464, train_acc = 0.600 (3.170 sec/step)
step 3370 	 loss = 2.788, train_acc = 0.400 (3.191 sec/step)
step 3380 	 loss = 2.725, train_acc = 0.100 (3.246 sec/step)
step 3390 	 loss = 2.651, train_acc = 0.200 (3.179 sec/step)
step 3400 	 loss = 2.306, train_acc = 0.400 (3.221 sec/step)
step 3410 	 loss = 2.791, train_acc = 0.300 (3.208 sec/step)
step 3420 	 loss = 2.220, train_acc = 0.200 (3.178 sec/step)
step 3430 	 loss = 1.590, train_acc = 0.400 (3.203 sec/step)
step 3440 	 loss = 2.221, train_acc = 0.200 (3.188 sec/step)
step 3450 	 loss = 1.973, train_acc = 0.400 (3.218 sec/step)
step 3460 	 loss = 2.333, train_acc = 0.400 (3.215 sec/step)
step 3470 	 loss = 2.188, train_acc = 0.200 (3.211 sec/step)
step 3480 	 loss = 1.534, train_acc = 0.500 (3.240 sec/step)
step 3490 	 loss = 1.504, train_acc = 0.500 (3.241 sec/step)
step 3500 	 loss = 3.449, train_acc = 0.100 (3.203 sec/step)
step 3510 	 loss = 2.062, train_acc = 0.500 (3.188 sec/step)
step 3520 	 loss = 1.684, train_acc = 0.700 (3.212 sec/step)
step 3530 	 loss = 3.619, train_acc = 0.000 (3.236 sec/step)
step 3540 	 loss = 2.815, train_acc = 0.200 (3.239 sec/step)
step 3550 	 loss = 2.395, train_acc = 0.300 (3.213 sec/step)
step 3560 	 loss = 2.460, train_acc = 0.100 (3.222 sec/step)
step 3570 	 loss = 2.239, train_acc = 0.300 (3.199 sec/step)
step 3580 	 loss = 2.331, train_acc = 0.300 (3.214 sec/step)
step 3590 	 loss = 2.458, train_acc = 0.200 (3.219 sec/step)
step 3600 	 loss = 1.577, train_acc = 0.400 (3.204 sec/step)
step 3610 	 loss = 2.288, train_acc = 0.600 (3.208 sec/step)
step 3620 	 loss = 1.694, train_acc = 0.500 (3.192 sec/step)
step 3630 	 loss = 3.106, train_acc = 0.300 (3.198 sec/step)
step 3640 	 loss = 2.565, train_acc = 0.200 (3.223 sec/step)
step 3650 	 loss = 1.991, train_acc = 0.400 (3.204 sec/step)
step 3660 	 loss = 1.920, train_acc = 0.400 (3.265 sec/step)
step 3670 	 loss = 0.797, train_acc = 0.800 (3.191 sec/step)
step 3680 	 loss = 1.892, train_acc = 0.300 (3.195 sec/step)
step 3690 	 loss = 1.720, train_acc = 0.400 (3.217 sec/step)
step 3700 	 loss = 2.836, train_acc = 0.300 (3.257 sec/step)
step 3710 	 loss = 2.374, train_acc = 0.100 (3.218 sec/step)
step 3720 	 loss = 2.047, train_acc = 0.400 (3.202 sec/step)
step 3730 	 loss = 2.315, train_acc = 0.200 (3.214 sec/step)
step 3740 	 loss = 2.862, train_acc = 0.100 (3.168 sec/step)
step 3750 	 loss = 1.629, train_acc = 0.400 (3.211 sec/step)
step 3760 	 loss = 1.771, train_acc = 0.400 (3.224 sec/step)
step 3770 	 loss = 1.423, train_acc = 0.400 (3.204 sec/step)
step 3780 	 loss = 1.558, train_acc = 0.800 (3.186 sec/step)
step 3790 	 loss = 2.888, train_acc = 0.200 (3.204 sec/step)
VALIDATION 	 acc = 0.406 (3.618 sec)
New Best Accuracy 0.406 > Old Best 0.207.  Saving...
The checkpoint has been created.
step 3800 	 loss = 2.605, train_acc = 0.400 (3.240 sec/step)
step 3810 	 loss = 2.130, train_acc = 0.400 (3.166 sec/step)
step 3820 	 loss = 1.334, train_acc = 0.600 (3.218 sec/step)
step 3830 	 loss = 1.938, train_acc = 0.400 (3.199 sec/step)
step 3840 	 loss = 1.080, train_acc = 0.600 (3.217 sec/step)
step 3850 	 loss = 2.527, train_acc = 0.200 (3.225 sec/step)
step 3860 	 loss = 1.294, train_acc = 0.600 (3.252 sec/step)
step 3870 	 loss = 1.460, train_acc = 0.600 (3.214 sec/step)
step 3880 	 loss = 2.730, train_acc = 0.300 (3.242 sec/step)
step 3890 	 loss = 1.646, train_acc = 0.700 (3.223 sec/step)
step 3900 	 loss = 1.886, train_acc = 0.600 (3.227 sec/step)
step 3910 	 loss = 1.966, train_acc = 0.300 (3.199 sec/step)
step 3920 	 loss = 2.000, train_acc = 0.400 (3.252 sec/step)
step 3930 	 loss = 1.020, train_acc = 0.800 (3.213 sec/step)
step 3940 	 loss = 2.158, train_acc = 0.400 (3.218 sec/step)
step 3950 	 loss = 1.411, train_acc = 0.700 (3.192 sec/step)
step 3960 	 loss = 1.787, train_acc = 0.400 (3.247 sec/step)
step 3970 	 loss = 1.753, train_acc = 0.500 (3.182 sec/step)
step 3980 	 loss = 1.707, train_acc = 0.400 (3.192 sec/step)
step 3990 	 loss = 2.017, train_acc = 0.300 (3.193 sec/step)
step 4000 	 loss = 1.997, train_acc = 0.400 (3.263 sec/step)
step 4010 	 loss = 1.716, train_acc = 0.400 (3.212 sec/step)
step 4020 	 loss = 2.587, train_acc = 0.300 (3.209 sec/step)
step 4030 	 loss = 1.536, train_acc = 0.500 (3.201 sec/step)
step 4040 	 loss = 2.020, train_acc = 0.400 (3.218 sec/step)
step 4050 	 loss = 3.048, train_acc = 0.200 (3.247 sec/step)
step 4060 	 loss = 2.652, train_acc = 0.300 (3.211 sec/step)
step 4070 	 loss = 2.176, train_acc = 0.400 (3.227 sec/step)
step 4080 	 loss = 2.042, train_acc = 0.400 (3.241 sec/step)
step 4090 	 loss = 1.687, train_acc = 0.400 (3.224 sec/step)
step 4100 	 loss = 2.654, train_acc = 0.300 (3.242 sec/step)
step 4110 	 loss = 2.571, train_acc = 0.300 (3.267 sec/step)
step 4120 	 loss = 2.354, train_acc = 0.300 (3.205 sec/step)
step 4130 	 loss = 2.024, train_acc = 0.400 (3.239 sec/step)
step 4140 	 loss = 1.846, train_acc = 0.500 (3.190 sec/step)
step 4150 	 loss = 1.290, train_acc = 0.700 (3.201 sec/step)
step 4160 	 loss = 2.070, train_acc = 0.400 (3.263 sec/step)
step 4170 	 loss = 1.493, train_acc = 0.600 (3.236 sec/step)
step 4180 	 loss = 1.883, train_acc = 0.500 (3.228 sec/step)
step 4190 	 loss = 1.203, train_acc = 0.600 (3.235 sec/step)
step 4200 	 loss = 2.191, train_acc = 0.300 (3.253 sec/step)
step 4210 	 loss = 2.367, train_acc = 0.200 (3.253 sec/step)
step 4220 	 loss = 1.809, train_acc = 0.300 (3.292 sec/step)
step 4230 	 loss = 1.129, train_acc = 0.600 (3.239 sec/step)
step 4240 	 loss = 1.848, train_acc = 0.400 (3.237 sec/step)
step 4250 	 loss = 2.629, train_acc = 0.500 (3.196 sec/step)
step 4260 	 loss = 1.902, train_acc = 0.400 (3.219 sec/step)
step 4270 	 loss = 1.222, train_acc = 0.400 (3.215 sec/step)
step 4280 	 loss = 1.749, train_acc = 0.300 (3.219 sec/step)
step 4290 	 loss = 1.891, train_acc = 0.300 (3.237 sec/step)
step 4300 	 loss = 2.286, train_acc = 0.500 (3.208 sec/step)
step 4310 	 loss = 2.202, train_acc = 0.300 (3.245 sec/step)
step 4320 	 loss = 1.768, train_acc = 0.500 (3.213 sec/step)
step 4330 	 loss = 2.288, train_acc = 0.200 (3.207 sec/step)
step 4340 	 loss = 1.224, train_acc = 0.500 (3.205 sec/step)
step 4350 	 loss = 1.101, train_acc = 0.800 (3.241 sec/step)
step 4360 	 loss = 2.430, train_acc = 0.500 (3.211 sec/step)
step 4370 	 loss = 1.434, train_acc = 0.600 (3.245 sec/step)
step 4380 	 loss = 2.465, train_acc = 0.300 (3.199 sec/step)
step 4390 	 loss = 1.971, train_acc = 0.400 (3.258 sec/step)
step 4400 	 loss = 2.391, train_acc = 0.400 (3.227 sec/step)
step 4410 	 loss = 1.571, train_acc = 0.400 (3.210 sec/step)
step 4420 	 loss = 1.539, train_acc = 0.600 (3.270 sec/step)
step 4430 	 loss = 2.449, train_acc = 0.400 (3.215 sec/step)
step 4440 	 loss = 1.073, train_acc = 0.600 (3.268 sec/step)
step 4450 	 loss = 2.754, train_acc = 0.100 (3.204 sec/step)
step 4460 	 loss = 1.655, train_acc = 0.600 (3.211 sec/step)
step 4470 	 loss = 2.962, train_acc = 0.200 (3.280 sec/step)
step 4480 	 loss = 1.028, train_acc = 0.700 (3.238 sec/step)
step 4490 	 loss = 1.862, train_acc = 0.500 (3.243 sec/step)
step 4500 	 loss = 3.380, train_acc = 0.100 (3.208 sec/step)
step 4510 	 loss = 1.718, train_acc = 0.700 (3.203 sec/step)
step 4520 	 loss = 1.190, train_acc = 0.600 (3.250 sec/step)
step 4530 	 loss = 2.558, train_acc = 0.300 (3.223 sec/step)
step 4540 	 loss = 1.348, train_acc = 0.600 (3.295 sec/step)
step 4550 	 loss = 2.086, train_acc = 0.400 (3.250 sec/step)
step 4560 	 loss = 2.171, train_acc = 0.300 (3.251 sec/step)
step 4570 	 loss = 1.395, train_acc = 0.700 (3.260 sec/step)
step 4580 	 loss = 1.615, train_acc = 0.400 (3.337 sec/step)
step 4590 	 loss = 1.414, train_acc = 0.600 (3.224 sec/step)
step 4600 	 loss = 1.246, train_acc = 0.700 (3.217 sec/step)
step 4610 	 loss = 2.221, train_acc = 0.300 (3.201 sec/step)
step 4620 	 loss = 2.394, train_acc = 0.500 (3.267 sec/step)
step 4630 	 loss = 1.888, train_acc = 0.600 (3.299 sec/step)
step 4640 	 loss = 2.419, train_acc = 0.400 (3.222 sec/step)
step 4650 	 loss = 1.969, train_acc = 0.400 (3.223 sec/step)
step 4660 	 loss = 1.287, train_acc = 0.500 (3.194 sec/step)
step 4670 	 loss = 2.552, train_acc = 0.300 (3.203 sec/step)
step 4680 	 loss = 2.197, train_acc = 0.500 (3.241 sec/step)
step 4690 	 loss = 1.794, train_acc = 0.500 (3.273 sec/step)
step 4700 	 loss = 2.761, train_acc = 0.100 (3.248 sec/step)
step 4710 	 loss = 1.986, train_acc = 0.500 (3.230 sec/step)
step 4720 	 loss = 1.555, train_acc = 0.600 (3.212 sec/step)
step 4730 	 loss = 1.517, train_acc = 0.500 (3.210 sec/step)
step 4740 	 loss = 1.862, train_acc = 0.500 (3.250 sec/step)
step 4750 	 loss = 2.088, train_acc = 0.300 (3.286 sec/step)
step 4760 	 loss = 1.318, train_acc = 0.500 (3.270 sec/step)
step 4770 	 loss = 1.689, train_acc = 0.500 (3.359 sec/step)
step 4780 	 loss = 1.411, train_acc = 0.600 (3.176 sec/step)
step 4790 	 loss = 2.466, train_acc = 0.200 (3.238 sec/step)
step 4800 	 loss = 1.209, train_acc = 0.700 (3.199 sec/step)
step 4810 	 loss = 2.004, train_acc = 0.300 (3.272 sec/step)
step 4820 	 loss = 1.969, train_acc = 0.400 (3.255 sec/step)
step 4830 	 loss = 1.755, train_acc = 0.400 (3.227 sec/step)
step 4840 	 loss = 1.914, train_acc = 0.500 (3.265 sec/step)
step 4850 	 loss = 1.794, train_acc = 0.500 (3.230 sec/step)
step 4860 	 loss = 1.420, train_acc = 0.500 (3.215 sec/step)
step 4870 	 loss = 2.049, train_acc = 0.400 (3.205 sec/step)
step 4880 	 loss = 1.673, train_acc = 0.400 (3.261 sec/step)
step 4890 	 loss = 2.420, train_acc = 0.200 (3.221 sec/step)
step 4900 	 loss = 2.248, train_acc = 0.300 (3.212 sec/step)
step 4910 	 loss = 1.472, train_acc = 0.700 (3.189 sec/step)
step 4920 	 loss = 1.324, train_acc = 0.600 (3.237 sec/step)
step 4930 	 loss = 1.937, train_acc = 0.500 (3.231 sec/step)
step 4940 	 loss = 2.696, train_acc = 0.300 (3.254 sec/step)
step 4950 	 loss = 0.536, train_acc = 0.900 (3.220 sec/step)
step 4960 	 loss = 1.228, train_acc = 0.500 (3.218 sec/step)
step 4970 	 loss = 2.100, train_acc = 0.200 (3.265 sec/step)
step 4980 	 loss = 1.601, train_acc = 0.500 (3.213 sec/step)
step 4990 	 loss = 2.586, train_acc = 0.600 (3.221 sec/step)
step 5000 	 loss = 1.807, train_acc = 0.400 (3.234 sec/step)
step 5010 	 loss = 2.679, train_acc = 0.300 (3.181 sec/step)
step 5020 	 loss = 1.577, train_acc = 0.500 (3.241 sec/step)
step 5030 	 loss = 2.116, train_acc = 0.500 (3.218 sec/step)
step 5040 	 loss = 1.733, train_acc = 0.500 (3.222 sec/step)
step 5050 	 loss = 1.128, train_acc = 0.600 (3.279 sec/step)
step 5060 	 loss = 1.454, train_acc = 0.600 (3.205 sec/step)
step 5070 	 loss = 1.993, train_acc = 0.500 (3.262 sec/step)
step 5080 	 loss = 1.451, train_acc = 0.400 (3.225 sec/step)
step 5090 	 loss = 2.611, train_acc = 0.100 (3.280 sec/step)
step 5100 	 loss = 1.350, train_acc = 0.600 (3.225 sec/step)
step 5110 	 loss = 2.258, train_acc = 0.400 (3.235 sec/step)
step 5120 	 loss = 1.724, train_acc = 0.300 (3.208 sec/step)
step 5130 	 loss = 1.342, train_acc = 0.500 (3.189 sec/step)
step 5140 	 loss = 1.118, train_acc = 0.600 (3.220 sec/step)
step 5150 	 loss = 2.263, train_acc = 0.300 (3.218 sec/step)
step 5160 	 loss = 1.905, train_acc = 0.400 (3.214 sec/step)
step 5170 	 loss = 1.572, train_acc = 0.500 (3.200 sec/step)
step 5180 	 loss = 2.094, train_acc = 0.200 (3.209 sec/step)
step 5190 	 loss = 1.948, train_acc = 0.300 (3.213 sec/step)
step 5200 	 loss = 1.571, train_acc = 0.600 (3.224 sec/step)
step 5210 	 loss = 2.094, train_acc = 0.500 (3.248 sec/step)
step 5220 	 loss = 1.742, train_acc = 0.600 (3.244 sec/step)
step 5230 	 loss = 1.954, train_acc = 0.500 (3.214 sec/step)
step 5240 	 loss = 1.854, train_acc = 0.600 (3.211 sec/step)
step 5250 	 loss = 2.475, train_acc = 0.400 (3.228 sec/step)
step 5260 	 loss = 1.200, train_acc = 0.700 (3.216 sec/step)
step 5270 	 loss = 1.778, train_acc = 0.500 (3.257 sec/step)
step 5280 	 loss = 2.650, train_acc = 0.100 (3.245 sec/step)
step 5290 	 loss = 2.137, train_acc = 0.300 (3.260 sec/step)
step 5300 	 loss = 1.932, train_acc = 0.500 (3.208 sec/step)
step 5310 	 loss = 2.287, train_acc = 0.400 (3.259 sec/step)
step 5320 	 loss = 2.056, train_acc = 0.400 (3.242 sec/step)
step 5330 	 loss = 1.272, train_acc = 0.700 (3.195 sec/step)
step 5340 	 loss = 2.052, train_acc = 0.300 (3.248 sec/step)
step 5350 	 loss = 1.428, train_acc = 0.600 (3.217 sec/step)
step 5360 	 loss = 1.643, train_acc = 0.400 (3.253 sec/step)
step 5370 	 loss = 1.944, train_acc = 0.500 (3.200 sec/step)
step 5380 	 loss = 1.184, train_acc = 0.600 (3.209 sec/step)
step 5390 	 loss = 1.287, train_acc = 0.700 (3.198 sec/step)
step 5400 	 loss = 2.349, train_acc = 0.300 (3.237 sec/step)
step 5410 	 loss = 1.850, train_acc = 0.500 (3.212 sec/step)
step 5420 	 loss = 0.862, train_acc = 0.800 (3.246 sec/step)
step 5430 	 loss = 2.381, train_acc = 0.400 (3.226 sec/step)
step 5440 	 loss = 2.737, train_acc = 0.200 (3.188 sec/step)
step 5450 	 loss = 2.161, train_acc = 0.400 (3.242 sec/step)
step 5460 	 loss = 2.038, train_acc = 0.300 (3.195 sec/step)
step 5470 	 loss = 2.596, train_acc = 0.500 (3.242 sec/step)
step 5480 	 loss = 2.135, train_acc = 0.400 (3.207 sec/step)
step 5490 	 loss = 2.287, train_acc = 0.500 (3.282 sec/step)
step 5500 	 loss = 0.921, train_acc = 0.600 (3.204 sec/step)
step 5510 	 loss = 1.543, train_acc = 0.600 (3.258 sec/step)
step 5520 	 loss = 1.045, train_acc = 0.700 (3.292 sec/step)
step 5530 	 loss = 2.839, train_acc = 0.400 (3.239 sec/step)
step 5540 	 loss = 1.433, train_acc = 0.500 (3.220 sec/step)
step 5550 	 loss = 1.193, train_acc = 0.700 (3.220 sec/step)
step 5560 	 loss = 1.567, train_acc = 0.500 (3.223 sec/step)
step 5570 	 loss = 0.315, train_acc = 0.900 (3.212 sec/step)
step 5580 	 loss = 1.401, train_acc = 0.400 (3.237 sec/step)
step 5590 	 loss = 1.721, train_acc = 0.400 (3.214 sec/step)
step 5600 	 loss = 2.412, train_acc = 0.500 (3.206 sec/step)
step 5610 	 loss = 1.934, train_acc = 0.400 (3.238 sec/step)
step 5620 	 loss = 1.532, train_acc = 0.500 (3.230 sec/step)
step 5630 	 loss = 1.493, train_acc = 0.400 (3.243 sec/step)
step 5640 	 loss = 2.371, train_acc = 0.200 (3.219 sec/step)
step 5650 	 loss = 0.796, train_acc = 0.600 (3.229 sec/step)
step 5660 	 loss = 1.651, train_acc = 0.500 (3.251 sec/step)
step 5670 	 loss = 1.119, train_acc = 0.600 (3.266 sec/step)
step 5680 	 loss = 1.135, train_acc = 0.800 (3.256 sec/step)
step 5690 	 loss = 2.843, train_acc = 0.400 (3.226 sec/step)
VALIDATION 	 acc = 0.462 (3.620 sec)
New Best Accuracy 0.462 > Old Best 0.406.  Saving...
The checkpoint has been created.
step 5700 	 loss = 1.983, train_acc = 0.400 (3.313 sec/step)
step 5710 	 loss = 1.554, train_acc = 0.500 (3.216 sec/step)
step 5720 	 loss = 1.098, train_acc = 0.500 (3.247 sec/step)
step 5730 	 loss = 1.804, train_acc = 0.200 (3.186 sec/step)
step 5740 	 loss = 0.679, train_acc = 0.900 (3.214 sec/step)
step 5750 	 loss = 2.100, train_acc = 0.300 (3.272 sec/step)
step 5760 	 loss = 0.800, train_acc = 0.900 (3.213 sec/step)
step 5770 	 loss = 1.449, train_acc = 0.700 (3.247 sec/step)
step 5780 	 loss = 2.281, train_acc = 0.300 (3.247 sec/step)
step 5790 	 loss = 1.336, train_acc = 0.600 (3.205 sec/step)
step 5800 	 loss = 1.590, train_acc = 0.400 (3.285 sec/step)
step 5810 	 loss = 1.236, train_acc = 0.700 (3.220 sec/step)
step 5820 	 loss = 1.629, train_acc = 0.700 (3.189 sec/step)
step 5830 	 loss = 0.872, train_acc = 0.900 (3.221 sec/step)
step 5840 	 loss = 2.171, train_acc = 0.600 (3.239 sec/step)
step 5850 	 loss = 1.005, train_acc = 0.800 (3.260 sec/step)
step 5860 	 loss = 1.561, train_acc = 0.500 (3.269 sec/step)
step 5870 	 loss = 1.648, train_acc = 0.500 (3.268 sec/step)
step 5880 	 loss = 1.178, train_acc = 0.600 (3.249 sec/step)
step 5890 	 loss = 2.242, train_acc = 0.300 (3.204 sec/step)
step 5900 	 loss = 1.518, train_acc = 0.300 (3.257 sec/step)
step 5910 	 loss = 1.631, train_acc = 0.400 (3.247 sec/step)
step 5920 	 loss = 2.933, train_acc = 0.400 (3.305 sec/step)
step 5930 	 loss = 0.928, train_acc = 0.700 (3.223 sec/step)
step 5940 	 loss = 1.426, train_acc = 0.400 (3.275 sec/step)
step 5950 	 loss = 2.059, train_acc = 0.400 (3.204 sec/step)
step 5960 	 loss = 1.857, train_acc = 0.300 (3.215 sec/step)
step 5970 	 loss = 1.696, train_acc = 0.400 (3.202 sec/step)
step 5980 	 loss = 1.759, train_acc = 0.500 (3.240 sec/step)
step 5990 	 loss = 1.325, train_acc = 0.400 (3.243 sec/step)
step 6000 	 loss = 2.202, train_acc = 0.400 (3.223 sec/step)
step 6010 	 loss = 2.096, train_acc = 0.500 (3.205 sec/step)
step 6020 	 loss = 2.132, train_acc = 0.300 (3.239 sec/step)
step 6030 	 loss = 1.567, train_acc = 0.700 (3.242 sec/step)
step 6040 	 loss = 1.682, train_acc = 0.600 (3.212 sec/step)
step 6050 	 loss = 1.193, train_acc = 0.600 (3.189 sec/step)
step 6060 	 loss = 1.625, train_acc = 0.500 (3.235 sec/step)
step 6070 	 loss = 1.383, train_acc = 0.600 (3.280 sec/step)
step 6080 	 loss = 1.281, train_acc = 0.800 (3.222 sec/step)
step 6090 	 loss = 0.602, train_acc = 0.900 (3.223 sec/step)
step 6100 	 loss = 2.200, train_acc = 0.400 (3.242 sec/step)
step 6110 	 loss = 1.429, train_acc = 0.600 (3.248 sec/step)
step 6120 	 loss = 1.252, train_acc = 0.600 (3.283 sec/step)
step 6130 	 loss = 0.920, train_acc = 0.700 (3.234 sec/step)
step 6140 	 loss = 1.375, train_acc = 0.400 (3.213 sec/step)
step 6150 	 loss = 3.090, train_acc = 0.300 (3.267 sec/step)
step 6160 	 loss = 1.572, train_acc = 0.300 (3.234 sec/step)
step 6170 	 loss = 0.958, train_acc = 0.600 (3.416 sec/step)
step 6180 	 loss = 1.050, train_acc = 0.600 (3.227 sec/step)
step 6190 	 loss = 2.053, train_acc = 0.400 (3.229 sec/step)
step 6200 	 loss = 1.775, train_acc = 0.500 (3.241 sec/step)
step 6210 	 loss = 1.584, train_acc = 0.400 (3.230 sec/step)
step 6220 	 loss = 1.085, train_acc = 0.800 (3.245 sec/step)
step 6230 	 loss = 2.581, train_acc = 0.200 (3.259 sec/step)
step 6240 	 loss = 1.499, train_acc = 0.600 (3.197 sec/step)
step 6250 	 loss = 0.876, train_acc = 0.600 (3.220 sec/step)
step 6260 	 loss = 1.274, train_acc = 0.700 (3.251 sec/step)
step 6270 	 loss = 0.557, train_acc = 0.900 (3.210 sec/step)
step 6280 	 loss = 1.346, train_acc = 0.500 (3.198 sec/step)
step 6290 	 loss = 1.598, train_acc = 0.500 (3.221 sec/step)
step 6300 	 loss = 2.636, train_acc = 0.300 (3.308 sec/step)
step 6310 	 loss = 1.203, train_acc = 0.700 (3.262 sec/step)
step 6320 	 loss = 1.146, train_acc = 0.700 (3.245 sec/step)
step 6330 	 loss = 2.411, train_acc = 0.200 (3.241 sec/step)
step 6340 	 loss = 0.842, train_acc = 0.800 (3.225 sec/step)
step 6350 	 loss = 1.943, train_acc = 0.400 (3.247 sec/step)
step 6360 	 loss = 2.238, train_acc = 0.500 (3.188 sec/step)
step 6370 	 loss = 1.692, train_acc = 0.300 (3.210 sec/step)
step 6380 	 loss = 0.859, train_acc = 0.700 (3.243 sec/step)
step 6390 	 loss = 1.192, train_acc = 0.700 (3.220 sec/step)
step 6400 	 loss = 2.654, train_acc = 0.200 (3.222 sec/step)
step 6410 	 loss = 1.219, train_acc = 0.700 (3.239 sec/step)
step 6420 	 loss = 0.844, train_acc = 0.600 (3.231 sec/step)
step 6430 	 loss = 1.523, train_acc = 0.500 (3.245 sec/step)
step 6440 	 loss = 0.753, train_acc = 0.700 (3.243 sec/step)
step 6450 	 loss = 1.278, train_acc = 0.600 (3.228 sec/step)
step 6460 	 loss = 2.000, train_acc = 0.400 (3.254 sec/step)
step 6470 	 loss = 0.958, train_acc = 0.700 (3.214 sec/step)
step 6480 	 loss = 1.644, train_acc = 0.500 (3.190 sec/step)
step 6490 	 loss = 0.756, train_acc = 0.700 (3.284 sec/step)
step 6500 	 loss = 0.889, train_acc = 0.700 (3.245 sec/step)
step 6510 	 loss = 1.516, train_acc = 0.400 (3.297 sec/step)
step 6520 	 loss = 1.919, train_acc = 0.300 (3.213 sec/step)
step 6530 	 loss = 1.083, train_acc = 0.600 (3.229 sec/step)
step 6540 	 loss = 1.723, train_acc = 0.600 (3.357 sec/step)
step 6550 	 loss = 1.577, train_acc = 0.600 (3.197 sec/step)
step 6560 	 loss = 0.979, train_acc = 0.700 (3.228 sec/step)
step 6570 	 loss = 1.796, train_acc = 0.500 (3.236 sec/step)
step 6580 	 loss = 1.774, train_acc = 0.500 (3.399 sec/step)
step 6590 	 loss = 1.236, train_acc = 0.600 (3.239 sec/step)
step 6600 	 loss = 1.773, train_acc = 0.500 (3.254 sec/step)
step 6610 	 loss = 1.816, train_acc = 0.400 (3.220 sec/step)
step 6620 	 loss = 1.116, train_acc = 0.500 (3.267 sec/step)
step 6630 	 loss = 1.026, train_acc = 0.600 (3.243 sec/step)
step 6640 	 loss = 1.847, train_acc = 0.500 (3.235 sec/step)
step 6650 	 loss = 2.745, train_acc = 0.300 (3.235 sec/step)
step 6660 	 loss = 1.084, train_acc = 0.800 (3.252 sec/step)
step 6670 	 loss = 3.211, train_acc = 0.600 (3.207 sec/step)
step 6680 	 loss = 0.758, train_acc = 0.800 (3.249 sec/step)
step 6690 	 loss = 1.594, train_acc = 0.600 (3.220 sec/step)
step 6700 	 loss = 0.762, train_acc = 0.800 (3.237 sec/step)
step 6710 	 loss = 2.008, train_acc = 0.300 (3.244 sec/step)
step 6720 	 loss = 1.503, train_acc = 0.600 (3.234 sec/step)
step 6730 	 loss = 1.603, train_acc = 0.400 (3.205 sec/step)
step 6740 	 loss = 1.909, train_acc = 0.400 (3.289 sec/step)
step 6750 	 loss = 1.905, train_acc = 0.500 (3.209 sec/step)
step 6760 	 loss = 1.595, train_acc = 0.600 (3.228 sec/step)
step 6770 	 loss = 1.949, train_acc = 0.600 (3.272 sec/step)
step 6780 	 loss = 1.377, train_acc = 0.500 (3.267 sec/step)
step 6790 	 loss = 1.817, train_acc = 0.300 (3.245 sec/step)
step 6800 	 loss = 1.905, train_acc = 0.600 (3.229 sec/step)
step 6810 	 loss = 1.703, train_acc = 0.700 (3.294 sec/step)
step 6820 	 loss = 0.834, train_acc = 0.800 (3.235 sec/step)
step 6830 	 loss = 1.458, train_acc = 0.500 (3.231 sec/step)
step 6840 	 loss = 2.219, train_acc = 0.200 (3.316 sec/step)
step 6850 	 loss = 0.863, train_acc = 0.700 (3.262 sec/step)
step 6860 	 loss = 0.807, train_acc = 0.700 (3.283 sec/step)
step 6870 	 loss = 1.604, train_acc = 0.400 (3.246 sec/step)
step 6880 	 loss = 1.202, train_acc = 0.600 (3.249 sec/step)
step 6890 	 loss = 1.396, train_acc = 0.500 (3.249 sec/step)
step 6900 	 loss = 1.464, train_acc = 0.400 (3.239 sec/step)
step 6910 	 loss = 1.527, train_acc = 0.500 (3.242 sec/step)
step 6920 	 loss = 1.233, train_acc = 0.600 (3.261 sec/step)
step 6930 	 loss = 1.151, train_acc = 0.600 (3.225 sec/step)
step 6940 	 loss = 1.188, train_acc = 0.500 (3.254 sec/step)
step 6950 	 loss = 0.454, train_acc = 0.900 (3.246 sec/step)
step 6960 	 loss = 1.684, train_acc = 0.400 (3.217 sec/step)
step 6970 	 loss = 1.173, train_acc = 0.600 (3.250 sec/step)
step 6980 	 loss = 1.398, train_acc = 0.500 (3.234 sec/step)
step 6990 	 loss = 1.533, train_acc = 0.600 (3.246 sec/step)
step 7000 	 loss = 0.395, train_acc = 1.000 (3.196 sec/step)
step 7010 	 loss = 2.568, train_acc = 0.200 (3.252 sec/step)
step 7020 	 loss = 1.380, train_acc = 0.400 (3.236 sec/step)
step 7030 	 loss = 1.212, train_acc = 0.700 (3.273 sec/step)
step 7040 	 loss = 1.434, train_acc = 0.600 (3.242 sec/step)
step 7050 	 loss = 2.027, train_acc = 0.400 (3.257 sec/step)
step 7060 	 loss = 1.692, train_acc = 0.500 (3.236 sec/step)
step 7070 	 loss = 1.052, train_acc = 0.700 (3.267 sec/step)
step 7080 	 loss = 1.772, train_acc = 0.300 (3.259 sec/step)
step 7090 	 loss = 1.709, train_acc = 0.600 (3.265 sec/step)
step 7100 	 loss = 0.869, train_acc = 0.900 (3.209 sec/step)
step 7110 	 loss = 1.869, train_acc = 0.500 (3.227 sec/step)
step 7120 	 loss = 1.360, train_acc = 0.700 (3.285 sec/step)
step 7130 	 loss = 1.173, train_acc = 0.700 (3.241 sec/step)
step 7140 	 loss = 1.184, train_acc = 0.600 (3.314 sec/step)
step 7150 	 loss = 1.415, train_acc = 0.700 (3.216 sec/step)
step 7160 	 loss = 1.076, train_acc = 0.700 (3.263 sec/step)
step 7170 	 loss = 1.228, train_acc = 0.600 (3.259 sec/step)
step 7180 	 loss = 2.154, train_acc = 0.300 (3.274 sec/step)
step 7190 	 loss = 1.984, train_acc = 0.500 (3.245 sec/step)
step 7200 	 loss = 1.542, train_acc = 0.500 (3.240 sec/step)
step 7210 	 loss = 1.555, train_acc = 0.400 (3.240 sec/step)
step 7220 	 loss = 1.264, train_acc = 0.600 (3.229 sec/step)
step 7230 	 loss = 1.308, train_acc = 0.600 (3.248 sec/step)
step 7240 	 loss = 2.795, train_acc = 0.500 (3.261 sec/step)
step 7250 	 loss = 0.900, train_acc = 0.800 (3.227 sec/step)
step 7260 	 loss = 2.035, train_acc = 0.400 (3.232 sec/step)
step 7270 	 loss = 1.485, train_acc = 0.500 (3.210 sec/step)
step 7280 	 loss = 0.891, train_acc = 0.500 (3.305 sec/step)
step 7290 	 loss = 0.815, train_acc = 0.700 (3.208 sec/step)
step 7300 	 loss = 3.105, train_acc = 0.300 (3.238 sec/step)
step 7310 	 loss = 1.549, train_acc = 0.600 (3.229 sec/step)
step 7320 	 loss = 0.717, train_acc = 0.700 (3.265 sec/step)
step 7330 	 loss = 1.818, train_acc = 0.500 (3.264 sec/step)
step 7340 	 loss = 2.229, train_acc = 0.300 (3.226 sec/step)
step 7350 	 loss = 1.615, train_acc = 0.400 (3.248 sec/step)
step 7360 	 loss = 1.666, train_acc = 0.600 (3.239 sec/step)
step 7370 	 loss = 2.039, train_acc = 0.400 (3.215 sec/step)
step 7380 	 loss = 1.123, train_acc = 0.700 (3.239 sec/step)
step 7390 	 loss = 2.036, train_acc = 0.400 (3.246 sec/step)
step 7400 	 loss = 0.322, train_acc = 0.900 (3.232 sec/step)
step 7410 	 loss = 1.884, train_acc = 0.500 (3.338 sec/step)
step 7420 	 loss = 1.084, train_acc = 0.600 (3.251 sec/step)
step 7430 	 loss = 3.653, train_acc = 0.200 (3.249 sec/step)
step 7440 	 loss = 1.533, train_acc = 0.400 (3.247 sec/step)
step 7450 	 loss = 1.689, train_acc = 0.600 (3.229 sec/step)
step 7460 	 loss = 1.124, train_acc = 0.700 (3.253 sec/step)
step 7470 	 loss = 0.388, train_acc = 0.900 (3.249 sec/step)
step 7480 	 loss = 1.282, train_acc = 0.600 (3.214 sec/step)
step 7490 	 loss = 1.101, train_acc = 0.500 (3.243 sec/step)
step 7500 	 loss = 2.141, train_acc = 0.400 (3.244 sec/step)
step 7510 	 loss = 1.412, train_acc = 0.600 (3.230 sec/step)
step 7520 	 loss = 1.333, train_acc = 0.500 (3.217 sec/step)
step 7530 	 loss = 2.108, train_acc = 0.200 (3.195 sec/step)
step 7540 	 loss = 1.903, train_acc = 0.500 (3.251 sec/step)
step 7550 	 loss = 1.328, train_acc = 0.700 (3.219 sec/step)
step 7560 	 loss = 1.900, train_acc = 0.500 (3.233 sec/step)
step 7570 	 loss = 0.738, train_acc = 0.800 (3.242 sec/step)
step 7580 	 loss = 1.389, train_acc = 0.700 (3.221 sec/step)
step 7590 	 loss = 2.181, train_acc = 0.300 (3.261 sec/step)
VALIDATION 	 acc = 0.494 (3.630 sec)
New Best Accuracy 0.494 > Old Best 0.462.  Saving...
The checkpoint has been created.
step 7600 	 loss = 1.037, train_acc = 0.800 (3.261 sec/step)
step 7610 	 loss = 1.518, train_acc = 0.500 (3.323 sec/step)
step 7620 	 loss = 1.674, train_acc = 0.400 (3.359 sec/step)
step 7630 	 loss = 0.965, train_acc = 0.600 (3.253 sec/step)
step 7640 	 loss = 0.711, train_acc = 0.800 (3.210 sec/step)
step 7650 	 loss = 2.485, train_acc = 0.300 (3.217 sec/step)
step 7660 	 loss = 0.361, train_acc = 0.900 (3.235 sec/step)
step 7670 	 loss = 1.169, train_acc = 0.700 (3.218 sec/step)
step 7680 	 loss = 1.418, train_acc = 0.700 (3.292 sec/step)
step 7690 	 loss = 1.286, train_acc = 0.500 (3.263 sec/step)
step 7700 	 loss = 1.029, train_acc = 0.800 (3.242 sec/step)
step 7710 	 loss = 1.324, train_acc = 0.600 (3.244 sec/step)
step 7720 	 loss = 1.404, train_acc = 0.500 (3.211 sec/step)
step 7730 	 loss = 0.537, train_acc = 0.800 (3.320 sec/step)
step 7740 	 loss = 1.094, train_acc = 0.700 (3.260 sec/step)
step 7750 	 loss = 2.006, train_acc = 0.300 (3.235 sec/step)
step 7760 	 loss = 0.756, train_acc = 0.800 (3.311 sec/step)
step 7770 	 loss = 0.852, train_acc = 0.900 (3.298 sec/step)
step 7780 	 loss = 0.471, train_acc = 0.900 (3.239 sec/step)
step 7790 	 loss = 1.450, train_acc = 0.600 (3.212 sec/step)
step 7800 	 loss = 1.050, train_acc = 0.700 (3.223 sec/step)
step 7810 	 loss = 1.320, train_acc = 0.500 (3.247 sec/step)
step 7820 	 loss = 2.256, train_acc = 0.400 (3.255 sec/step)
step 7830 	 loss = 0.880, train_acc = 0.800 (3.321 sec/step)
step 7840 	 loss = 1.330, train_acc = 0.400 (3.244 sec/step)
step 7850 	 loss = 1.467, train_acc = 0.500 (3.244 sec/step)
step 7860 	 loss = 1.406, train_acc = 0.700 (3.260 sec/step)
step 7870 	 loss = 1.619, train_acc = 0.700 (3.218 sec/step)
step 7880 	 loss = 1.017, train_acc = 0.700 (3.238 sec/step)
step 7890 	 loss = 1.030, train_acc = 0.700 (3.230 sec/step)
step 7900 	 loss = 2.020, train_acc = 0.400 (3.229 sec/step)
step 7910 	 loss = 1.668, train_acc = 0.600 (3.220 sec/step)
step 7920 	 loss = 1.469, train_acc = 0.400 (3.262 sec/step)
step 7930 	 loss = 1.608, train_acc = 0.600 (3.230 sec/step)
step 7940 	 loss = 1.229, train_acc = 0.700 (3.275 sec/step)
step 7950 	 loss = 0.903, train_acc = 0.600 (3.240 sec/step)
step 7960 	 loss = 1.971, train_acc = 0.500 (3.255 sec/step)
step 7970 	 loss = 1.356, train_acc = 0.600 (3.284 sec/step)
step 7980 	 loss = 0.522, train_acc = 0.800 (3.244 sec/step)
step 7990 	 loss = 0.994, train_acc = 0.600 (3.305 sec/step)
step 8000 	 loss = 3.372, train_acc = 0.200 (3.215 sec/step)
step 8010 	 loss = 1.631, train_acc = 0.400 (3.250 sec/step)
step 8020 	 loss = 1.178, train_acc = 0.500 (3.275 sec/step)
step 8030 	 loss = 0.883, train_acc = 0.800 (3.261 sec/step)
step 8040 	 loss = 1.495, train_acc = 0.500 (3.235 sec/step)
step 8050 	 loss = 1.549, train_acc = 0.400 (3.231 sec/step)
step 8060 	 loss = 1.147, train_acc = 0.600 (3.202 sec/step)
step 8070 	 loss = 0.850, train_acc = 0.700 (3.215 sec/step)
step 8080 	 loss = 0.887, train_acc = 0.800 (3.247 sec/step)
step 8090 	 loss = 1.403, train_acc = 0.500 (3.256 sec/step)
step 8100 	 loss = 1.361, train_acc = 0.600 (3.273 sec/step)
step 8110 	 loss = 1.530, train_acc = 0.400 (3.228 sec/step)
step 8120 	 loss = 1.312, train_acc = 0.700 (3.289 sec/step)
step 8130 	 loss = 1.773, train_acc = 0.500 (3.234 sec/step)
step 8140 	 loss = 1.113, train_acc = 0.700 (3.252 sec/step)
step 8150 	 loss = 1.037, train_acc = 0.600 (3.225 sec/step)
step 8160 	 loss = 0.760, train_acc = 0.700 (3.225 sec/step)
step 8170 	 loss = 0.433, train_acc = 0.900 (3.204 sec/step)
step 8180 	 loss = 2.304, train_acc = 0.300 (3.228 sec/step)
step 8190 	 loss = 1.281, train_acc = 0.500 (3.226 sec/step)
step 8200 	 loss = 1.708, train_acc = 0.500 (3.239 sec/step)
step 8210 	 loss = 1.244, train_acc = 0.500 (3.206 sec/step)
step 8220 	 loss = 0.586, train_acc = 0.900 (3.246 sec/step)
step 8230 	 loss = 3.386, train_acc = 0.400 (3.224 sec/step)
step 8240 	 loss = 1.524, train_acc = 0.500 (3.224 sec/step)
step 8250 	 loss = 2.117, train_acc = 0.400 (3.219 sec/step)
step 8260 	 loss = 1.253, train_acc = 0.800 (3.226 sec/step)
step 8270 	 loss = 0.876, train_acc = 0.800 (3.250 sec/step)
step 8280 	 loss = 0.323, train_acc = 0.900 (3.238 sec/step)
step 8290 	 loss = 1.444, train_acc = 0.400 (3.213 sec/step)
step 8300 	 loss = 1.718, train_acc = 0.500 (3.250 sec/step)
step 8310 	 loss = 1.237, train_acc = 0.800 (3.245 sec/step)
step 8320 	 loss = 0.616, train_acc = 0.800 (3.226 sec/step)
step 8330 	 loss = 1.700, train_acc = 0.500 (3.233 sec/step)
step 8340 	 loss = 0.648, train_acc = 0.800 (3.242 sec/step)
step 8350 	 loss = 2.036, train_acc = 0.500 (3.206 sec/step)
step 8360 	 loss = 1.338, train_acc = 0.400 (3.275 sec/step)
step 8370 	 loss = 0.761, train_acc = 0.800 (3.285 sec/step)
step 8380 	 loss = 0.915, train_acc = 0.900 (3.258 sec/step)
step 8390 	 loss = 0.913, train_acc = 0.600 (3.261 sec/step)
step 8400 	 loss = 0.797, train_acc = 0.700 (3.242 sec/step)
step 8410 	 loss = 1.912, train_acc = 0.400 (3.275 sec/step)
step 8420 	 loss = 1.123, train_acc = 0.600 (3.234 sec/step)
step 8430 	 loss = 1.102, train_acc = 0.600 (3.266 sec/step)
step 8440 	 loss = 2.022, train_acc = 0.400 (3.228 sec/step)
step 8450 	 loss = 1.831, train_acc = 0.400 (3.271 sec/step)
step 8460 	 loss = 0.802, train_acc = 0.800 (3.259 sec/step)
step 8470 	 loss = 1.185, train_acc = 0.700 (3.239 sec/step)
step 8480 	 loss = 1.353, train_acc = 0.600 (3.246 sec/step)
step 8490 	 loss = 0.940, train_acc = 0.600 (3.243 sec/step)
step 8500 	 loss = 1.946, train_acc = 0.500 (3.211 sec/step)
step 8510 	 loss = 1.442, train_acc = 0.500 (3.238 sec/step)
step 8520 	 loss = 0.510, train_acc = 0.900 (3.248 sec/step)
step 8530 	 loss = 1.526, train_acc = 0.500 (3.221 sec/step)
step 8540 	 loss = 1.824, train_acc = 0.500 (3.250 sec/step)
step 8550 	 loss = 0.988, train_acc = 0.700 (3.222 sec/step)
step 8560 	 loss = 0.739, train_acc = 0.700 (3.211 sec/step)
step 8570 	 loss = 0.948, train_acc = 0.800 (3.262 sec/step)
step 8580 	 loss = 0.440, train_acc = 0.900 (3.250 sec/step)
step 8590 	 loss = 1.507, train_acc = 0.600 (3.265 sec/step)
step 8600 	 loss = 0.797, train_acc = 0.700 (3.212 sec/step)
step 8610 	 loss = 2.023, train_acc = 0.500 (3.250 sec/step)
step 8620 	 loss = 0.688, train_acc = 0.700 (3.263 sec/step)
step 8630 	 loss = 0.861, train_acc = 0.800 (3.220 sec/step)
step 8640 	 loss = 1.064, train_acc = 0.600 (3.263 sec/step)
step 8650 	 loss = 1.420, train_acc = 0.500 (3.372 sec/step)
step 8660 	 loss = 1.276, train_acc = 0.500 (3.273 sec/step)
step 8670 	 loss = 0.988, train_acc = 0.700 (3.255 sec/step)
step 8680 	 loss = 1.352, train_acc = 0.600 (3.248 sec/step)
step 8690 	 loss = 1.015, train_acc = 0.600 (3.292 sec/step)
step 8700 	 loss = 1.333, train_acc = 0.700 (3.382 sec/step)
step 8710 	 loss = 1.203, train_acc = 0.600 (3.250 sec/step)
step 8720 	 loss = 0.442, train_acc = 0.900 (3.269 sec/step)
step 8730 	 loss = 2.272, train_acc = 0.400 (3.240 sec/step)
step 8740 	 loss = 1.686, train_acc = 0.400 (3.217 sec/step)
step 8750 	 loss = 0.401, train_acc = 0.900 (3.245 sec/step)
step 8760 	 loss = 0.964, train_acc = 0.700 (3.223 sec/step)
step 8770 	 loss = 1.438, train_acc = 0.400 (3.240 sec/step)
step 8780 	 loss = 0.816, train_acc = 0.900 (3.309 sec/step)
step 8790 	 loss = 1.090, train_acc = 0.500 (3.229 sec/step)
step 8800 	 loss = 1.281, train_acc = 0.400 (3.260 sec/step)
step 8810 	 loss = 1.273, train_acc = 0.600 (3.252 sec/step)
step 8820 	 loss = 0.634, train_acc = 0.800 (3.266 sec/step)
step 8830 	 loss = 0.686, train_acc = 0.800 (3.286 sec/step)
step 8840 	 loss = 0.540, train_acc = 0.700 (3.252 sec/step)
step 8850 	 loss = 0.500, train_acc = 0.800 (3.221 sec/step)
step 8860 	 loss = 0.979, train_acc = 0.800 (3.265 sec/step)
step 8870 	 loss = 0.780, train_acc = 0.900 (3.267 sec/step)
step 8880 	 loss = 1.286, train_acc = 0.600 (3.229 sec/step)
step 8890 	 loss = 0.728, train_acc = 0.700 (3.245 sec/step)
step 8900 	 loss = 1.206, train_acc = 0.600 (3.280 sec/step)
step 8910 	 loss = 1.383, train_acc = 0.500 (3.273 sec/step)
step 8920 	 loss = 1.997, train_acc = 0.400 (3.228 sec/step)
step 8930 	 loss = 1.247, train_acc = 0.600 (3.210 sec/step)
step 8940 	 loss = 0.981, train_acc = 0.700 (3.252 sec/step)
step 8950 	 loss = 1.551, train_acc = 0.300 (3.217 sec/step)
step 8960 	 loss = 1.257, train_acc = 0.800 (3.227 sec/step)
step 8970 	 loss = 1.159, train_acc = 0.800 (3.241 sec/step)
step 8980 	 loss = 1.110, train_acc = 0.600 (3.264 sec/step)
step 8990 	 loss = 0.846, train_acc = 0.800 (3.241 sec/step)
step 9000 	 loss = 0.592, train_acc = 1.000 (3.214 sec/step)
step 9010 	 loss = 1.089, train_acc = 0.700 (3.271 sec/step)
step 9020 	 loss = 1.278, train_acc = 0.700 (3.258 sec/step)
step 9030 	 loss = 0.816, train_acc = 0.800 (3.233 sec/step)
step 9040 	 loss = 0.673, train_acc = 0.800 (3.206 sec/step)
step 9050 	 loss = 1.177, train_acc = 0.700 (3.297 sec/step)
step 9060 	 loss = 0.498, train_acc = 0.800 (3.240 sec/step)
step 9070 	 loss = 0.898, train_acc = 0.700 (3.242 sec/step)
step 9080 	 loss = 0.948, train_acc = 0.600 (3.242 sec/step)
step 9090 	 loss = 0.998, train_acc = 0.600 (3.263 sec/step)
step 9100 	 loss = 0.699, train_acc = 0.800 (3.271 sec/step)
step 9110 	 loss = 1.035, train_acc = 0.500 (3.238 sec/step)
step 9120 	 loss = 0.896, train_acc = 0.700 (3.219 sec/step)
step 9130 	 loss = 0.791, train_acc = 0.900 (3.229 sec/step)
step 9140 	 loss = 1.285, train_acc = 0.800 (3.217 sec/step)
step 9150 	 loss = 0.572, train_acc = 0.900 (3.329 sec/step)
step 9160 	 loss = 1.108, train_acc = 0.600 (3.207 sec/step)
step 9170 	 loss = 0.891, train_acc = 0.800 (3.226 sec/step)
step 9180 	 loss = 1.058, train_acc = 0.600 (3.240 sec/step)
step 9190 	 loss = 0.988, train_acc = 0.800 (3.294 sec/step)
step 9200 	 loss = 2.191, train_acc = 0.500 (3.264 sec/step)
step 9210 	 loss = 1.002, train_acc = 0.800 (3.215 sec/step)
step 9220 	 loss = 1.324, train_acc = 0.500 (3.224 sec/step)
step 9230 	 loss = 1.470, train_acc = 0.400 (3.287 sec/step)
step 9240 	 loss = 2.431, train_acc = 0.300 (3.264 sec/step)
step 9250 	 loss = 1.109, train_acc = 0.600 (3.285 sec/step)
step 9260 	 loss = 0.890, train_acc = 0.800 (3.280 sec/step)
step 9270 	 loss = 1.439, train_acc = 0.700 (3.286 sec/step)
step 9280 	 loss = 1.206, train_acc = 0.700 (3.242 sec/step)
step 9290 	 loss = 1.310, train_acc = 0.600 (3.313 sec/step)
step 9300 	 loss = 0.568, train_acc = 0.900 (3.263 sec/step)
step 9310 	 loss = 0.661, train_acc = 0.900 (3.266 sec/step)
step 9320 	 loss = 1.768, train_acc = 0.500 (3.219 sec/step)
step 9330 	 loss = 2.137, train_acc = 0.400 (3.242 sec/step)
step 9340 	 loss = 0.913, train_acc = 0.600 (3.269 sec/step)
step 9350 	 loss = 0.823, train_acc = 0.900 (3.256 sec/step)
step 9360 	 loss = 0.842, train_acc = 0.800 (3.265 sec/step)
step 9370 	 loss = 0.120, train_acc = 1.000 (3.256 sec/step)
step 9380 	 loss = 0.872, train_acc = 0.700 (3.251 sec/step)
step 9390 	 loss = 0.674, train_acc = 0.900 (3.220 sec/step)
step 9400 	 loss = 1.268, train_acc = 0.700 (3.257 sec/step)
step 9410 	 loss = 0.897, train_acc = 0.700 (3.244 sec/step)
step 9420 	 loss = 0.597, train_acc = 0.900 (3.288 sec/step)
step 9430 	 loss = 1.185, train_acc = 0.600 (3.233 sec/step)
step 9440 	 loss = 1.309, train_acc = 0.600 (3.264 sec/step)
step 9450 	 loss = 0.375, train_acc = 0.900 (3.262 sec/step)
step 9460 	 loss = 0.564, train_acc = 0.800 (3.246 sec/step)
step 9470 	 loss = 0.470, train_acc = 0.900 (3.236 sec/step)
step 9480 	 loss = 0.863, train_acc = 0.800 (3.254 sec/step)
step 9490 	 loss = 1.786, train_acc = 0.600 (3.241 sec/step)
VALIDATION 	 acc = 0.489 (3.620 sec)
step 9500 	 loss = 1.858, train_acc = 0.600 (3.199 sec/step)
step 9510 	 loss = 0.956, train_acc = 0.700 (3.266 sec/step)
step 9520 	 loss = 1.637, train_acc = 0.500 (3.284 sec/step)
step 9530 	 loss = 1.227, train_acc = 0.700 (3.256 sec/step)
step 9540 	 loss = 0.159, train_acc = 1.000 (3.214 sec/step)
step 9550 	 loss = 1.263, train_acc = 0.700 (3.276 sec/step)
step 9560 	 loss = 0.836, train_acc = 0.700 (3.275 sec/step)
step 9570 	 loss = 1.189, train_acc = 0.600 (3.278 sec/step)
step 9580 	 loss = 0.950, train_acc = 0.700 (3.339 sec/step)
step 9590 	 loss = 0.780, train_acc = 0.800 (3.231 sec/step)
step 9600 	 loss = 0.720, train_acc = 0.800 (3.222 sec/step)
step 9610 	 loss = 0.935, train_acc = 0.800 (3.293 sec/step)
step 9620 	 loss = 1.517, train_acc = 0.700 (3.219 sec/step)
step 9630 	 loss = 1.068, train_acc = 0.700 (3.335 sec/step)
step 9640 	 loss = 0.857, train_acc = 0.800 (3.214 sec/step)
step 9650 	 loss = 1.032, train_acc = 0.500 (3.272 sec/step)
step 9660 	 loss = 0.485, train_acc = 0.900 (3.247 sec/step)
step 9670 	 loss = 0.350, train_acc = 1.000 (3.237 sec/step)
step 9680 	 loss = 0.643, train_acc = 0.800 (3.251 sec/step)
step 9690 	 loss = 1.240, train_acc = 0.600 (3.264 sec/step)
step 9700 	 loss = 0.895, train_acc = 0.800 (3.280 sec/step)
step 9710 	 loss = 1.500, train_acc = 0.600 (3.231 sec/step)
step 9720 	 loss = 1.718, train_acc = 0.500 (3.265 sec/step)
step 9730 	 loss = 0.346, train_acc = 0.900 (3.266 sec/step)
step 9740 	 loss = 1.554, train_acc = 0.500 (3.216 sec/step)
step 9750 	 loss = 1.661, train_acc = 0.400 (3.235 sec/step)
step 9760 	 loss = 1.306, train_acc = 0.500 (3.267 sec/step)
step 9770 	 loss = 2.203, train_acc = 0.400 (3.289 sec/step)
step 9780 	 loss = 0.361, train_acc = 0.900 (3.254 sec/step)
step 9790 	 loss = 0.462, train_acc = 0.800 (3.268 sec/step)
step 9800 	 loss = 3.149, train_acc = 0.600 (3.244 sec/step)
step 9810 	 loss = 1.097, train_acc = 0.800 (3.279 sec/step)
step 9820 	 loss = 0.511, train_acc = 0.900 (3.382 sec/step)
step 9830 	 loss = 0.826, train_acc = 0.700 (3.281 sec/step)
step 9840 	 loss = 1.028, train_acc = 0.700 (3.282 sec/step)
step 9850 	 loss = 0.561, train_acc = 0.800 (3.273 sec/step)
step 9860 	 loss = 1.752, train_acc = 0.600 (3.270 sec/step)
step 9870 	 loss = 1.028, train_acc = 0.600 (3.237 sec/step)
step 9880 	 loss = 0.315, train_acc = 0.800 (3.216 sec/step)
step 9890 	 loss = 0.654, train_acc = 0.900 (3.265 sec/step)
step 9900 	 loss = 1.650, train_acc = 0.500 (3.283 sec/step)
step 9910 	 loss = 1.758, train_acc = 0.300 (3.245 sec/step)
step 9920 	 loss = 0.939, train_acc = 0.500 (3.241 sec/step)
step 9930 	 loss = 0.448, train_acc = 0.700 (3.225 sec/step)
step 9940 	 loss = 1.863, train_acc = 0.200 (3.262 sec/step)
step 9950 	 loss = 0.890, train_acc = 0.700 (3.221 sec/step)
step 9960 	 loss = 1.461, train_acc = 0.600 (3.271 sec/step)
step 9970 	 loss = 0.577, train_acc = 0.800 (3.217 sec/step)
step 9980 	 loss = 1.127, train_acc = 0.600 (3.215 sec/step)
step 9990 	 loss = 0.808, train_acc = 0.700 (3.303 sec/step)
step 10000 	 loss = 0.631, train_acc = 0.800 (3.254 sec/step)
step 10010 	 loss = 0.972, train_acc = 0.700 (3.234 sec/step)
step 10020 	 loss = 0.844, train_acc = 0.700 (3.260 sec/step)
step 10030 	 loss = 1.479, train_acc = 0.600 (3.282 sec/step)
step 10040 	 loss = 1.219, train_acc = 0.600 (3.266 sec/step)
step 10050 	 loss = 0.157, train_acc = 1.000 (3.271 sec/step)
step 10060 	 loss = 0.597, train_acc = 0.900 (3.238 sec/step)
step 10070 	 loss = 0.148, train_acc = 1.000 (3.277 sec/step)
step 10080 	 loss = 1.468, train_acc = 0.800 (3.259 sec/step)
step 10090 	 loss = 0.795, train_acc = 0.700 (3.283 sec/step)
step 10100 	 loss = 1.484, train_acc = 0.600 (3.228 sec/step)
step 10110 	 loss = 1.056, train_acc = 0.600 (3.269 sec/step)
step 10120 	 loss = 0.658, train_acc = 0.800 (3.308 sec/step)
step 10130 	 loss = 1.198, train_acc = 0.600 (3.263 sec/step)
step 10140 	 loss = 0.471, train_acc = 0.900 (3.269 sec/step)
step 10150 	 loss = 1.620, train_acc = 0.600 (3.217 sec/step)
step 10160 	 loss = 0.821, train_acc = 0.700 (3.240 sec/step)
step 10170 	 loss = 1.330, train_acc = 0.500 (3.283 sec/step)
step 10180 	 loss = 0.172, train_acc = 1.000 (3.221 sec/step)
step 10190 	 loss = 0.830, train_acc = 0.600 (3.288 sec/step)
step 10200 	 loss = 2.788, train_acc = 0.200 (3.230 sec/step)
step 10210 	 loss = 0.734, train_acc = 0.800 (3.280 sec/step)
step 10220 	 loss = 0.873, train_acc = 0.700 (3.290 sec/step)
step 10230 	 loss = 1.205, train_acc = 0.800 (3.339 sec/step)
step 10240 	 loss = 0.589, train_acc = 0.900 (3.226 sec/step)
step 10250 	 loss = 0.825, train_acc = 0.600 (3.279 sec/step)
step 10260 	 loss = 2.879, train_acc = 0.400 (3.233 sec/step)
step 10270 	 loss = 0.795, train_acc = 0.600 (3.226 sec/step)
step 10280 	 loss = 0.975, train_acc = 0.600 (3.279 sec/step)
step 10290 	 loss = 0.512, train_acc = 0.900 (3.229 sec/step)
step 10300 	 loss = 0.220, train_acc = 1.000 (3.258 sec/step)
step 10310 	 loss = 1.163, train_acc = 0.600 (3.230 sec/step)
step 10320 	 loss = 0.468, train_acc = 0.800 (3.255 sec/step)
step 10330 	 loss = 0.667, train_acc = 0.900 (3.255 sec/step)
step 10340 	 loss = 1.196, train_acc = 0.600 (3.232 sec/step)
step 10350 	 loss = 0.948, train_acc = 0.700 (3.301 sec/step)
step 10360 	 loss = 0.962, train_acc = 0.900 (3.241 sec/step)
step 10370 	 loss = 0.938, train_acc = 0.700 (3.230 sec/step)
step 10380 	 loss = 2.013, train_acc = 0.400 (3.215 sec/step)
step 10390 	 loss = 0.740, train_acc = 0.700 (3.268 sec/step)
step 10400 	 loss = 0.526, train_acc = 0.700 (3.238 sec/step)
step 10410 	 loss = 0.798, train_acc = 0.800 (3.209 sec/step)
step 10420 	 loss = 0.959, train_acc = 0.700 (3.242 sec/step)
step 10430 	 loss = 0.997, train_acc = 0.700 (3.246 sec/step)
step 10440 	 loss = 1.620, train_acc = 0.600 (3.277 sec/step)
step 10450 	 loss = 2.200, train_acc = 0.600 (3.286 sec/step)
step 10460 	 loss = 1.175, train_acc = 0.600 (3.239 sec/step)
step 10470 	 loss = 1.460, train_acc = 0.700 (3.254 sec/step)
step 10480 	 loss = 0.946, train_acc = 0.700 (3.218 sec/step)
step 10490 	 loss = 0.577, train_acc = 0.900 (3.235 sec/step)
step 10500 	 loss = 0.763, train_acc = 0.800 (3.253 sec/step)
step 10510 	 loss = 1.650, train_acc = 0.400 (3.288 sec/step)
step 10520 	 loss = 1.797, train_acc = 0.600 (3.238 sec/step)
step 10530 	 loss = 0.733, train_acc = 0.800 (3.265 sec/step)
step 10540 	 loss = 0.734, train_acc = 0.900 (3.235 sec/step)
step 10550 	 loss = 1.196, train_acc = 0.600 (3.222 sec/step)
step 10560 	 loss = 0.514, train_acc = 0.900 (3.282 sec/step)
step 10570 	 loss = 0.670, train_acc = 0.800 (3.224 sec/step)
step 10580 	 loss = 1.640, train_acc = 0.500 (3.265 sec/step)
step 10590 	 loss = 1.383, train_acc = 0.600 (3.283 sec/step)
step 10600 	 loss = 0.446, train_acc = 0.800 (3.254 sec/step)
step 10610 	 loss = 1.038, train_acc = 0.600 (3.256 sec/step)
step 10620 	 loss = 0.471, train_acc = 0.900 (3.274 sec/step)
step 10630 	 loss = 1.055, train_acc = 0.800 (3.297 sec/step)
step 10640 	 loss = 1.284, train_acc = 0.500 (3.236 sec/step)
step 10650 	 loss = 0.578, train_acc = 0.800 (3.284 sec/step)
step 10660 	 loss = 1.069, train_acc = 0.700 (3.262 sec/step)
step 10670 	 loss = 0.767, train_acc = 0.900 (3.267 sec/step)
step 10680 	 loss = 2.041, train_acc = 0.700 (3.270 sec/step)
step 10690 	 loss = 0.742, train_acc = 0.800 (3.256 sec/step)
step 10700 	 loss = 1.251, train_acc = 0.600 (3.242 sec/step)
step 10710 	 loss = 1.060, train_acc = 0.600 (3.265 sec/step)
step 10720 	 loss = 0.185, train_acc = 1.000 (3.243 sec/step)
step 10730 	 loss = 0.457, train_acc = 0.700 (3.341 sec/step)
step 10740 	 loss = 1.975, train_acc = 0.500 (3.260 sec/step)
step 10750 	 loss = 0.703, train_acc = 0.800 (3.241 sec/step)
step 10760 	 loss = 0.780, train_acc = 0.600 (3.271 sec/step)
step 10770 	 loss = 0.817, train_acc = 0.700 (3.219 sec/step)
step 10780 	 loss = 1.292, train_acc = 0.700 (3.229 sec/step)
step 10790 	 loss = 1.947, train_acc = 0.600 (3.254 sec/step)
step 10800 	 loss = 0.152, train_acc = 1.000 (3.251 sec/step)
step 10810 	 loss = 0.610, train_acc = 0.700 (3.247 sec/step)
step 10820 	 loss = 1.427, train_acc = 0.500 (3.253 sec/step)
step 10830 	 loss = 1.087, train_acc = 0.600 (3.244 sec/step)
step 10840 	 loss = 0.481, train_acc = 0.800 (3.253 sec/step)
step 10850 	 loss = 1.098, train_acc = 0.700 (3.220 sec/step)
step 10860 	 loss = 0.405, train_acc = 1.000 (3.255 sec/step)
step 10870 	 loss = 0.490, train_acc = 0.900 (3.210 sec/step)
step 10880 	 loss = 1.163, train_acc = 0.500 (3.241 sec/step)
step 10890 	 loss = 0.561, train_acc = 0.900 (3.262 sec/step)
step 10900 	 loss = 1.052, train_acc = 0.700 (3.267 sec/step)
step 10910 	 loss = 1.634, train_acc = 0.500 (3.265 sec/step)
step 10920 	 loss = 1.134, train_acc = 0.700 (3.261 sec/step)
step 10930 	 loss = 1.185, train_acc = 0.600 (3.206 sec/step)
step 10940 	 loss = 1.447, train_acc = 0.700 (3.277 sec/step)
step 10950 	 loss = 0.524, train_acc = 0.900 (3.233 sec/step)
step 10960 	 loss = 0.393, train_acc = 0.900 (3.214 sec/step)
step 10970 	 loss = 0.525, train_acc = 0.800 (3.214 sec/step)
step 10980 	 loss = 2.824, train_acc = 0.400 (3.244 sec/step)
step 10990 	 loss = 1.821, train_acc = 0.400 (3.261 sec/step)
step 11000 	 loss = 1.452, train_acc = 0.700 (3.214 sec/step)
step 11010 	 loss = 0.754, train_acc = 0.900 (3.274 sec/step)
step 11020 	 loss = 1.534, train_acc = 0.300 (3.242 sec/step)
step 11030 	 loss = 0.481, train_acc = 0.900 (3.248 sec/step)
step 11040 	 loss = 1.541, train_acc = 0.700 (3.265 sec/step)
step 11050 	 loss = 0.780, train_acc = 0.800 (3.274 sec/step)
step 11060 	 loss = 0.339, train_acc = 0.900 (3.237 sec/step)
step 11070 	 loss = 0.535, train_acc = 0.900 (3.221 sec/step)
step 11080 	 loss = 0.420, train_acc = 1.000 (3.228 sec/step)
step 11090 	 loss = 0.223, train_acc = 1.000 (3.278 sec/step)
step 11100 	 loss = 2.097, train_acc = 0.500 (3.296 sec/step)
step 11110 	 loss = 1.584, train_acc = 0.500 (3.357 sec/step)
step 11120 	 loss = 0.297, train_acc = 1.000 (3.239 sec/step)
step 11130 	 loss = 2.073, train_acc = 0.300 (3.266 sec/step)
step 11140 	 loss = 1.883, train_acc = 0.400 (3.222 sec/step)
step 11150 	 loss = 0.636, train_acc = 0.800 (3.251 sec/step)
step 11160 	 loss = 1.628, train_acc = 0.500 (3.245 sec/step)
step 11170 	 loss = 0.581, train_acc = 0.800 (3.237 sec/step)
step 11180 	 loss = 0.534, train_acc = 0.900 (3.317 sec/step)
step 11190 	 loss = 1.007, train_acc = 0.700 (3.242 sec/step)
step 11200 	 loss = 0.711, train_acc = 0.800 (3.233 sec/step)
step 11210 	 loss = 0.257, train_acc = 1.000 (3.303 sec/step)
step 11220 	 loss = 2.014, train_acc = 0.700 (3.251 sec/step)
step 11230 	 loss = 1.850, train_acc = 0.500 (3.271 sec/step)
step 11240 	 loss = 0.465, train_acc = 0.900 (3.261 sec/step)
step 11250 	 loss = 0.995, train_acc = 0.800 (3.209 sec/step)
step 11260 	 loss = 1.146, train_acc = 0.500 (3.277 sec/step)
step 11270 	 loss = 0.325, train_acc = 1.000 (3.288 sec/step)
step 11280 	 loss = 0.651, train_acc = 0.700 (3.253 sec/step)
step 11290 	 loss = 0.796, train_acc = 0.700 (3.243 sec/step)
step 11300 	 loss = 0.446, train_acc = 0.900 (3.271 sec/step)
step 11310 	 loss = 1.809, train_acc = 0.400 (3.235 sec/step)
step 11320 	 loss = 0.439, train_acc = 0.900 (3.264 sec/step)
step 11330 	 loss = 1.795, train_acc = 0.500 (3.236 sec/step)
step 11340 	 loss = 2.603, train_acc = 0.300 (3.218 sec/step)
step 11350 	 loss = 0.413, train_acc = 0.800 (3.238 sec/step)
step 11360 	 loss = 0.823, train_acc = 0.700 (3.266 sec/step)
step 11370 	 loss = 1.148, train_acc = 0.500 (3.260 sec/step)
step 11380 	 loss = 0.401, train_acc = 0.900 (3.259 sec/step)
step 11390 	 loss = 1.481, train_acc = 0.500 (3.218 sec/step)
VALIDATION 	 acc = 0.524 (3.640 sec)
New Best Accuracy 0.524 > Old Best 0.494.  Saving...
The checkpoint has been created.
step 11400 	 loss = 2.175, train_acc = 0.500 (3.288 sec/step)
step 11410 	 loss = 0.913, train_acc = 0.800 (3.256 sec/step)
step 11420 	 loss = 0.642, train_acc = 0.800 (3.262 sec/step)
step 11430 	 loss = 0.871, train_acc = 0.800 (3.248 sec/step)
step 11440 	 loss = 0.097, train_acc = 1.000 (3.230 sec/step)
step 11450 	 loss = 0.888, train_acc = 0.600 (3.264 sec/step)
step 11460 	 loss = 0.273, train_acc = 0.900 (3.246 sec/step)
step 11470 	 loss = 0.380, train_acc = 1.000 (3.268 sec/step)
step 11480 	 loss = 1.241, train_acc = 0.600 (3.285 sec/step)
step 11490 	 loss = 0.315, train_acc = 0.900 (3.222 sec/step)
step 11500 	 loss = 0.402, train_acc = 1.000 (3.252 sec/step)
step 11510 	 loss = 1.360, train_acc = 0.500 (3.240 sec/step)
step 11520 	 loss = 0.826, train_acc = 0.700 (3.252 sec/step)
step 11530 	 loss = 0.503, train_acc = 0.900 (3.212 sec/step)
step 11540 	 loss = 0.473, train_acc = 0.800 (3.247 sec/step)
step 11550 	 loss = 0.876, train_acc = 0.800 (3.263 sec/step)
step 11560 	 loss = 0.377, train_acc = 0.900 (3.323 sec/step)
step 11570 	 loss = 0.485, train_acc = 0.800 (3.259 sec/step)
step 11580 	 loss = 0.134, train_acc = 1.000 (3.247 sec/step)
step 11590 	 loss = 0.891, train_acc = 0.800 (3.289 sec/step)
step 11600 	 loss = 0.346, train_acc = 1.000 (3.241 sec/step)
step 11610 	 loss = 0.638, train_acc = 0.800 (3.246 sec/step)
step 11620 	 loss = 0.936, train_acc = 0.600 (3.251 sec/step)
step 11630 	 loss = 1.061, train_acc = 0.800 (3.221 sec/step)
step 11640 	 loss = 1.179, train_acc = 0.600 (3.271 sec/step)
step 11650 	 loss = 1.129, train_acc = 0.700 (3.268 sec/step)
step 11660 	 loss = 0.540, train_acc = 0.900 (3.268 sec/step)
step 11670 	 loss = 1.301, train_acc = 0.600 (3.332 sec/step)
step 11680 	 loss = 1.370, train_acc = 0.500 (3.220 sec/step)
step 11690 	 loss = 0.991, train_acc = 0.600 (3.232 sec/step)
step 11700 	 loss = 1.858, train_acc = 0.500 (3.227 sec/step)
step 11710 	 loss = 1.828, train_acc = 0.600 (3.276 sec/step)
step 11720 	 loss = 0.750, train_acc = 0.700 (3.245 sec/step)
step 11730 	 loss = 0.536, train_acc = 0.900 (3.249 sec/step)
step 11740 	 loss = 0.934, train_acc = 0.800 (3.371 sec/step)
step 11750 	 loss = 0.460, train_acc = 0.800 (3.240 sec/step)
step 11760 	 loss = 1.657, train_acc = 0.600 (3.241 sec/step)
step 11770 	 loss = 1.277, train_acc = 0.500 (3.236 sec/step)
step 11780 	 loss = 0.246, train_acc = 0.900 (3.336 sec/step)
step 11790 	 loss = 0.572, train_acc = 0.900 (3.311 sec/step)
step 11800 	 loss = 2.104, train_acc = 0.600 (3.251 sec/step)
step 11810 	 loss = 1.008, train_acc = 0.800 (3.316 sec/step)
step 11820 	 loss = 1.002, train_acc = 0.700 (3.264 sec/step)
step 11830 	 loss = 0.683, train_acc = 0.700 (3.295 sec/step)
step 11840 	 loss = 0.649, train_acc = 0.800 (3.243 sec/step)
step 11850 	 loss = 1.779, train_acc = 0.400 (3.292 sec/step)
step 11860 	 loss = 2.739, train_acc = 0.500 (3.283 sec/step)
step 11870 	 loss = 0.359, train_acc = 0.900 (3.275 sec/step)
step 11880 	 loss = 0.737, train_acc = 0.800 (3.222 sec/step)
step 11890 	 loss = 1.449, train_acc = 0.500 (3.262 sec/step)
step 11900 	 loss = 0.477, train_acc = 0.800 (3.255 sec/step)
step 11910 	 loss = 1.383, train_acc = 0.700 (3.373 sec/step)
step 11920 	 loss = 0.460, train_acc = 0.900 (3.254 sec/step)
step 11930 	 loss = 0.527, train_acc = 0.800 (3.253 sec/step)
step 11940 	 loss = 0.806, train_acc = 0.700 (3.274 sec/step)
step 11950 	 loss = 1.055, train_acc = 0.600 (3.283 sec/step)
step 11960 	 loss = 0.421, train_acc = 0.800 (3.227 sec/step)
step 11970 	 loss = 0.575, train_acc = 0.900 (3.231 sec/step)
step 11980 	 loss = 1.466, train_acc = 0.600 (3.236 sec/step)
step 11990 	 loss = 0.718, train_acc = 0.700 (3.213 sec/step)
step 12000 	 loss = 0.752, train_acc = 0.800 (3.225 sec/step)
step 12010 	 loss = 1.141, train_acc = 0.600 (3.291 sec/step)
step 12020 	 loss = 0.332, train_acc = 0.900 (3.248 sec/step)
step 12030 	 loss = 1.291, train_acc = 0.600 (3.266 sec/step)
step 12040 	 loss = 0.399, train_acc = 0.900 (3.228 sec/step)
step 12050 	 loss = 0.635, train_acc = 0.800 (3.228 sec/step)
step 12060 	 loss = 1.806, train_acc = 0.400 (3.277 sec/step)
step 12070 	 loss = 0.525, train_acc = 0.700 (3.268 sec/step)
step 12080 	 loss = 1.113, train_acc = 0.800 (3.237 sec/step)
step 12090 	 loss = 1.119, train_acc = 0.500 (3.245 sec/step)
step 12100 	 loss = 1.000, train_acc = 0.600 (3.220 sec/step)
step 12110 	 loss = 0.640, train_acc = 0.800 (3.278 sec/step)
step 12120 	 loss = 1.039, train_acc = 0.700 (3.273 sec/step)
step 12130 	 loss = 0.487, train_acc = 0.900 (3.272 sec/step)
step 12140 	 loss = 0.331, train_acc = 0.800 (3.289 sec/step)
step 12150 	 loss = 0.988, train_acc = 0.800 (3.295 sec/step)
step 12160 	 loss = 1.446, train_acc = 0.600 (3.254 sec/step)
step 12170 	 loss = 0.134, train_acc = 1.000 (3.296 sec/step)
step 12180 	 loss = 0.375, train_acc = 0.900 (3.244 sec/step)
step 12190 	 loss = 0.558, train_acc = 0.700 (3.245 sec/step)
step 12200 	 loss = 0.147, train_acc = 1.000 (3.335 sec/step)
step 12210 	 loss = 0.744, train_acc = 0.800 (3.306 sec/step)
step 12220 	 loss = 0.572, train_acc = 0.800 (3.253 sec/step)
step 12230 	 loss = 0.445, train_acc = 0.900 (3.288 sec/step)
step 12240 	 loss = 3.404, train_acc = 0.300 (3.250 sec/step)
step 12250 	 loss = 0.919, train_acc = 0.500 (3.268 sec/step)
step 12260 	 loss = 0.658, train_acc = 0.800 (3.272 sec/step)
step 12270 	 loss = 0.407, train_acc = 0.900 (3.288 sec/step)
step 12280 	 loss = 0.494, train_acc = 0.800 (3.265 sec/step)
step 12290 	 loss = 0.875, train_acc = 0.800 (3.226 sec/step)
step 12300 	 loss = 0.907, train_acc = 0.600 (3.294 sec/step)
step 12310 	 loss = 0.566, train_acc = 0.800 (3.252 sec/step)
step 12320 	 loss = 0.614, train_acc = 0.700 (3.301 sec/step)
step 12330 	 loss = 0.663, train_acc = 0.800 (3.297 sec/step)
step 12340 	 loss = 1.054, train_acc = 0.700 (3.301 sec/step)
step 12350 	 loss = 1.025, train_acc = 0.800 (3.269 sec/step)
step 12360 	 loss = 0.832, train_acc = 0.700 (3.280 sec/step)
step 12370 	 loss = 0.184, train_acc = 1.000 (3.257 sec/step)
step 12380 	 loss = 0.322, train_acc = 0.900 (3.235 sec/step)
step 12390 	 loss = 0.360, train_acc = 0.900 (3.239 sec/step)
step 12400 	 loss = 0.339, train_acc = 0.800 (3.283 sec/step)
step 12410 	 loss = 0.541, train_acc = 0.900 (3.225 sec/step)
step 12420 	 loss = 0.334, train_acc = 0.900 (3.263 sec/step)
step 12430 	 loss = 1.447, train_acc = 0.700 (3.368 sec/step)
step 12440 	 loss = 0.380, train_acc = 0.800 (3.238 sec/step)
step 12450 	 loss = 0.573, train_acc = 0.900 (3.281 sec/step)
step 12460 	 loss = 0.818, train_acc = 0.800 (3.244 sec/step)
step 12470 	 loss = 0.755, train_acc = 0.800 (3.261 sec/step)
step 12480 	 loss = 0.956, train_acc = 0.600 (3.269 sec/step)
step 12490 	 loss = 1.938, train_acc = 0.600 (3.224 sec/step)
step 12500 	 loss = 0.614, train_acc = 0.800 (3.275 sec/step)
step 12510 	 loss = 1.680, train_acc = 0.600 (3.223 sec/step)
step 12520 	 loss = 0.190, train_acc = 0.900 (3.257 sec/step)
step 12530 	 loss = 1.037, train_acc = 0.600 (3.233 sec/step)
step 12540 	 loss = 2.748, train_acc = 0.300 (3.260 sec/step)
step 12550 	 loss = 0.353, train_acc = 0.900 (3.240 sec/step)
step 12560 	 loss = 0.271, train_acc = 1.000 (3.286 sec/step)
step 12570 	 loss = 0.588, train_acc = 0.800 (3.242 sec/step)
step 12580 	 loss = 0.836, train_acc = 0.800 (3.219 sec/step)
step 12590 	 loss = 1.570, train_acc = 0.600 (3.241 sec/step)
step 12600 	 loss = 0.745, train_acc = 0.800 (3.217 sec/step)
step 12610 	 loss = 1.736, train_acc = 0.600 (3.351 sec/step)
step 12620 	 loss = 0.899, train_acc = 0.800 (3.246 sec/step)
step 12630 	 loss = 0.564, train_acc = 0.800 (3.257 sec/step)
step 12640 	 loss = 0.611, train_acc = 0.700 (3.272 sec/step)
step 12650 	 loss = 0.742, train_acc = 0.600 (3.280 sec/step)
step 12660 	 loss = 0.750, train_acc = 0.700 (3.294 sec/step)
step 12670 	 loss = 0.991, train_acc = 0.600 (3.250 sec/step)
step 12680 	 loss = 0.618, train_acc = 0.600 (3.349 sec/step)
step 12690 	 loss = 0.694, train_acc = 0.800 (3.287 sec/step)
step 12700 	 loss = 0.727, train_acc = 0.900 (3.443 sec/step)
step 12710 	 loss = 1.904, train_acc = 0.500 (3.249 sec/step)
step 12720 	 loss = 0.825, train_acc = 0.800 (3.266 sec/step)
step 12730 	 loss = 1.063, train_acc = 0.700 (3.294 sec/step)
step 12740 	 loss = 0.195, train_acc = 1.000 (3.297 sec/step)
step 12750 	 loss = 1.408, train_acc = 0.700 (3.295 sec/step)
step 12760 	 loss = 1.978, train_acc = 0.400 (3.360 sec/step)
step 12770 	 loss = 0.559, train_acc = 0.800 (3.319 sec/step)
step 12780 	 loss = 0.632, train_acc = 0.700 (3.250 sec/step)
step 12790 	 loss = 2.139, train_acc = 0.500 (3.235 sec/step)
step 12800 	 loss = 0.685, train_acc = 0.800 (3.233 sec/step)
step 12810 	 loss = 0.681, train_acc = 0.800 (3.259 sec/step)
step 12820 	 loss = 0.558, train_acc = 0.900 (3.243 sec/step)
step 12830 	 loss = 0.209, train_acc = 1.000 (3.302 sec/step)
step 12840 	 loss = 0.512, train_acc = 0.900 (3.305 sec/step)
step 12850 	 loss = 0.305, train_acc = 0.900 (3.265 sec/step)
step 12860 	 loss = 0.119, train_acc = 1.000 (3.371 sec/step)
step 12870 	 loss = 0.316, train_acc = 0.900 (3.234 sec/step)
step 12880 	 loss = 1.543, train_acc = 0.400 (3.231 sec/step)
step 12890 	 loss = 0.329, train_acc = 0.900 (3.279 sec/step)
step 12900 	 loss = 0.376, train_acc = 0.900 (3.232 sec/step)
step 12910 	 loss = 2.318, train_acc = 0.400 (3.244 sec/step)
step 12920 	 loss = 0.221, train_acc = 1.000 (3.245 sec/step)
step 12930 	 loss = 0.581, train_acc = 0.800 (3.280 sec/step)
step 12940 	 loss = 0.214, train_acc = 1.000 (3.229 sec/step)
step 12950 	 loss = 1.124, train_acc = 0.800 (3.277 sec/step)
step 12960 	 loss = 1.308, train_acc = 0.500 (3.260 sec/step)
step 12970 	 loss = 0.988, train_acc = 0.600 (3.231 sec/step)
step 12980 	 loss = 0.501, train_acc = 0.900 (3.247 sec/step)
step 12990 	 loss = 0.373, train_acc = 0.800 (3.259 sec/step)
step 13000 	 loss = 1.302, train_acc = 0.800 (3.252 sec/step)
step 13010 	 loss = 0.685, train_acc = 0.800 (3.239 sec/step)
step 13020 	 loss = 0.817, train_acc = 0.900 (3.322 sec/step)
step 13030 	 loss = 0.970, train_acc = 0.700 (3.251 sec/step)
step 13040 	 loss = 1.579, train_acc = 0.500 (3.265 sec/step)
step 13050 	 loss = 0.201, train_acc = 0.900 (3.366 sec/step)
step 13060 	 loss = 0.722, train_acc = 0.800 (3.257 sec/step)
step 13070 	 loss = 0.781, train_acc = 0.800 (3.259 sec/step)
step 13080 	 loss = 0.466, train_acc = 0.800 (3.256 sec/step)
step 13090 	 loss = 0.426, train_acc = 0.900 (3.286 sec/step)
step 13100 	 loss = 0.030, train_acc = 1.000 (3.293 sec/step)
step 13110 	 loss = 0.069, train_acc = 1.000 (3.249 sec/step)
step 13120 	 loss = 1.239, train_acc = 0.600 (3.324 sec/step)
step 13130 	 loss = 2.283, train_acc = 0.400 (3.294 sec/step)
step 13140 	 loss = 2.056, train_acc = 0.500 (3.245 sec/step)
step 13150 	 loss = 0.567, train_acc = 0.800 (3.209 sec/step)
step 13160 	 loss = 0.701, train_acc = 0.800 (3.287 sec/step)
step 13170 	 loss = 0.119, train_acc = 1.000 (3.314 sec/step)
step 13180 	 loss = 0.211, train_acc = 1.000 (3.271 sec/step)
step 13190 	 loss = 0.609, train_acc = 0.900 (3.333 sec/step)
step 13200 	 loss = 3.007, train_acc = 0.700 (3.253 sec/step)
step 13210 	 loss = 0.410, train_acc = 1.000 (3.297 sec/step)
step 13220 	 loss = 0.354, train_acc = 1.000 (3.243 sec/step)
step 13230 	 loss = 1.466, train_acc = 0.300 (3.292 sec/step)
step 13240 	 loss = 1.267, train_acc = 0.600 (3.251 sec/step)
step 13250 	 loss = 0.539, train_acc = 0.900 (3.279 sec/step)
step 13260 	 loss = 0.341, train_acc = 0.900 (3.257 sec/step)
step 13270 	 loss = 0.492, train_acc = 0.800 (3.246 sec/step)
step 13280 	 loss = 1.149, train_acc = 0.800 (3.264 sec/step)
step 13290 	 loss = 2.541, train_acc = 0.200 (3.261 sec/step)
VALIDATION 	 acc = 0.512 (3.636 sec)
step 13300 	 loss = 0.848, train_acc = 0.700 (3.282 sec/step)
step 13310 	 loss = 0.890, train_acc = 0.700 (3.291 sec/step)
step 13320 	 loss = 0.313, train_acc = 0.900 (3.247 sec/step)
step 13330 	 loss = 0.598, train_acc = 0.700 (3.245 sec/step)
step 13340 	 loss = 0.325, train_acc = 0.900 (3.219 sec/step)
step 13350 	 loss = 1.737, train_acc = 0.600 (3.232 sec/step)
step 13360 	 loss = 0.227, train_acc = 0.800 (3.295 sec/step)
step 13370 	 loss = 0.128, train_acc = 1.000 (3.233 sec/step)
step 13380 	 loss = 0.235, train_acc = 1.000 (3.281 sec/step)
step 13390 	 loss = 1.056, train_acc = 0.700 (3.247 sec/step)
step 13400 	 loss = 0.489, train_acc = 0.800 (3.270 sec/step)
step 13410 	 loss = 0.558, train_acc = 0.800 (3.249 sec/step)
step 13420 	 loss = 0.933, train_acc = 0.800 (3.216 sec/step)
step 13430 	 loss = 0.360, train_acc = 0.900 (3.243 sec/step)
step 13440 	 loss = 0.795, train_acc = 0.800 (3.254 sec/step)
step 13450 	 loss = 0.422, train_acc = 0.900 (3.266 sec/step)
step 13460 	 loss = 1.486, train_acc = 0.600 (3.240 sec/step)
step 13470 	 loss = 0.286, train_acc = 1.000 (3.262 sec/step)
step 13480 	 loss = 0.126, train_acc = 1.000 (3.309 sec/step)
step 13490 	 loss = 0.406, train_acc = 0.900 (3.254 sec/step)
step 13500 	 loss = 1.858, train_acc = 0.500 (3.259 sec/step)
step 13510 	 loss = 0.330, train_acc = 0.900 (3.283 sec/step)
step 13520 	 loss = 0.450, train_acc = 0.900 (3.252 sec/step)
step 13530 	 loss = 0.834, train_acc = 0.800 (3.237 sec/step)
step 13540 	 loss = 0.655, train_acc = 0.800 (3.254 sec/step)
step 13550 	 loss = 0.895, train_acc = 0.700 (3.239 sec/step)
step 13560 	 loss = 0.871, train_acc = 0.700 (3.260 sec/step)
step 13570 	 loss = 2.332, train_acc = 0.400 (3.247 sec/step)
step 13580 	 loss = 0.373, train_acc = 0.900 (3.274 sec/step)
step 13590 	 loss = 0.437, train_acc = 0.800 (3.254 sec/step)
step 13600 	 loss = 1.101, train_acc = 0.800 (3.260 sec/step)
step 13610 	 loss = 1.409, train_acc = 0.600 (3.229 sec/step)
step 13620 	 loss = 0.195, train_acc = 0.900 (3.262 sec/step)
step 13630 	 loss = 0.861, train_acc = 0.800 (3.228 sec/step)
step 13640 	 loss = 0.178, train_acc = 0.900 (3.266 sec/step)
step 13650 	 loss = 0.909, train_acc = 0.600 (3.226 sec/step)
step 13660 	 loss = 0.733, train_acc = 0.800 (3.278 sec/step)
step 13670 	 loss = 0.290, train_acc = 0.900 (3.248 sec/step)
step 13680 	 loss = 0.044, train_acc = 1.000 (3.221 sec/step)
step 13690 	 loss = 0.189, train_acc = 0.900 (3.285 sec/step)
step 13700 	 loss = 1.462, train_acc = 0.600 (3.301 sec/step)
step 13710 	 loss = 0.553, train_acc = 0.800 (3.343 sec/step)
step 13720 	 loss = 0.452, train_acc = 0.700 (3.229 sec/step)
step 13730 	 loss = 0.250, train_acc = 0.900 (3.237 sec/step)
step 13740 	 loss = 0.972, train_acc = 0.600 (3.286 sec/step)
step 13750 	 loss = 0.233, train_acc = 1.000 (3.252 sec/step)
step 13760 	 loss = 0.402, train_acc = 0.900 (3.247 sec/step)
step 13770 	 loss = 0.190, train_acc = 1.000 (3.257 sec/step)
step 13780 	 loss = 0.492, train_acc = 0.800 (3.257 sec/step)
step 13790 	 loss = 0.524, train_acc = 0.800 (3.304 sec/step)
step 13800 	 loss = 1.206, train_acc = 0.700 (3.248 sec/step)
step 13810 	 loss = 0.619, train_acc = 0.800 (3.248 sec/step)
step 13820 	 loss = 0.460, train_acc = 0.900 (3.251 sec/step)
step 13830 	 loss = 1.506, train_acc = 0.600 (3.286 sec/step)
step 13840 	 loss = 2.037, train_acc = 0.500 (3.302 sec/step)
step 13850 	 loss = 0.475, train_acc = 0.800 (3.311 sec/step)
step 13860 	 loss = 0.835, train_acc = 0.800 (3.287 sec/step)
step 13870 	 loss = 0.888, train_acc = 0.800 (3.234 sec/step)
step 13880 	 loss = 0.495, train_acc = 0.800 (3.240 sec/step)
step 13890 	 loss = 0.941, train_acc = 0.600 (3.236 sec/step)
step 13900 	 loss = 2.234, train_acc = 0.600 (3.275 sec/step)
step 13910 	 loss = 1.062, train_acc = 0.600 (3.283 sec/step)
step 13920 	 loss = 0.379, train_acc = 0.800 (3.305 sec/step)
step 13930 	 loss = 0.479, train_acc = 0.800 (3.253 sec/step)
step 13940 	 loss = 0.401, train_acc = 0.800 (3.280 sec/step)
step 13950 	 loss = 0.261, train_acc = 1.000 (3.249 sec/step)
step 13960 	 loss = 0.662, train_acc = 0.900 (3.256 sec/step)
step 13970 	 loss = 0.150, train_acc = 1.000 (3.251 sec/step)
step 13980 	 loss = 0.099, train_acc = 0.900 (3.295 sec/step)
step 13990 	 loss = 0.282, train_acc = 1.000 (3.263 sec/step)
step 14000 	 loss = 0.837, train_acc = 0.800 (3.282 sec/step)
step 14010 	 loss = 0.403, train_acc = 0.900 (3.300 sec/step)
step 14020 	 loss = 0.524, train_acc = 0.800 (3.270 sec/step)
step 14030 	 loss = 2.644, train_acc = 0.400 (3.268 sec/step)
step 14040 	 loss = 0.142, train_acc = 1.000 (3.257 sec/step)
step 14050 	 loss = 1.247, train_acc = 0.600 (3.254 sec/step)
step 14060 	 loss = 1.151, train_acc = 0.600 (3.362 sec/step)
step 14070 	 loss = 0.320, train_acc = 0.800 (3.293 sec/step)
step 14080 	 loss = 0.726, train_acc = 0.700 (3.283 sec/step)
step 14090 	 loss = 0.426, train_acc = 0.900 (3.273 sec/step)
step 14100 	 loss = 0.293, train_acc = 0.900 (3.252 sec/step)
step 14110 	 loss = 1.020, train_acc = 0.700 (3.264 sec/step)
step 14120 	 loss = 0.278, train_acc = 0.900 (3.265 sec/step)
step 14130 	 loss = 0.655, train_acc = 0.800 (3.257 sec/step)
step 14140 	 loss = 1.173, train_acc = 0.700 (3.272 sec/step)
step 14150 	 loss = 0.725, train_acc = 0.900 (3.297 sec/step)
step 14160 	 loss = 0.320, train_acc = 0.900 (3.225 sec/step)
step 14170 	 loss = 0.366, train_acc = 0.900 (3.312 sec/step)
step 14180 	 loss = 0.779, train_acc = 0.600 (3.335 sec/step)
step 14190 	 loss = 0.377, train_acc = 0.800 (3.297 sec/step)
step 14200 	 loss = 0.493, train_acc = 0.800 (3.277 sec/step)
step 14210 	 loss = 0.360, train_acc = 0.800 (3.348 sec/step)
step 14220 	 loss = 0.248, train_acc = 0.900 (3.254 sec/step)
step 14230 	 loss = 0.590, train_acc = 0.700 (3.263 sec/step)
step 14240 	 loss = 0.661, train_acc = 0.700 (3.229 sec/step)
step 14250 	 loss = 0.867, train_acc = 0.800 (3.294 sec/step)
step 14260 	 loss = 0.597, train_acc = 0.800 (3.306 sec/step)
step 14270 	 loss = 2.506, train_acc = 0.500 (3.241 sec/step)
step 14280 	 loss = 0.332, train_acc = 0.800 (3.228 sec/step)
step 14290 	 loss = 3.546, train_acc = 0.300 (3.223 sec/step)
step 14300 	 loss = 0.301, train_acc = 0.900 (3.258 sec/step)
step 14310 	 loss = 1.066, train_acc = 0.700 (3.251 sec/step)
step 14320 	 loss = 0.334, train_acc = 0.900 (3.245 sec/step)
step 14330 	 loss = 0.620, train_acc = 0.700 (3.275 sec/step)
step 14340 	 loss = 1.377, train_acc = 0.600 (3.295 sec/step)
step 14350 	 loss = 0.804, train_acc = 0.600 (3.249 sec/step)
step 14360 	 loss = 0.744, train_acc = 0.700 (3.223 sec/step)
step 14370 	 loss = 0.201, train_acc = 0.900 (3.236 sec/step)
step 14380 	 loss = 0.417, train_acc = 0.900 (3.306 sec/step)
step 14390 	 loss = 1.119, train_acc = 0.600 (3.261 sec/step)
step 14400 	 loss = 0.209, train_acc = 1.000 (3.270 sec/step)
step 14410 	 loss = 0.582, train_acc = 0.800 (3.301 sec/step)
step 14420 	 loss = 0.127, train_acc = 0.900 (3.254 sec/step)
step 14430 	 loss = 0.491, train_acc = 0.800 (3.248 sec/step)
step 14440 	 loss = 1.480, train_acc = 0.700 (3.317 sec/step)
step 14450 	 loss = 0.222, train_acc = 0.900 (3.267 sec/step)
step 14460 	 loss = 0.455, train_acc = 0.800 (3.273 sec/step)
step 14470 	 loss = 0.665, train_acc = 0.700 (3.296 sec/step)
step 14480 	 loss = 0.840, train_acc = 0.700 (3.223 sec/step)
step 14490 	 loss = 0.599, train_acc = 0.800 (3.272 sec/step)
step 14500 	 loss = 1.263, train_acc = 0.500 (3.291 sec/step)
step 14510 	 loss = 0.517, train_acc = 0.800 (3.314 sec/step)
step 14520 	 loss = 0.873, train_acc = 0.800 (3.261 sec/step)
step 14530 	 loss = 0.240, train_acc = 0.900 (3.256 sec/step)
step 14540 	 loss = 0.957, train_acc = 0.800 (3.308 sec/step)
step 14550 	 loss = 0.256, train_acc = 1.000 (3.258 sec/step)
step 14560 	 loss = 0.987, train_acc = 0.500 (3.269 sec/step)
step 14570 	 loss = 2.063, train_acc = 0.800 (3.286 sec/step)
step 14580 	 loss = 0.065, train_acc = 1.000 (3.279 sec/step)
step 14590 	 loss = 0.412, train_acc = 0.900 (3.255 sec/step)
step 14600 	 loss = 0.915, train_acc = 0.700 (3.249 sec/step)
step 14610 	 loss = 0.704, train_acc = 0.800 (3.252 sec/step)
step 14620 	 loss = 1.050, train_acc = 0.700 (3.240 sec/step)
step 14630 	 loss = 0.790, train_acc = 0.700 (3.333 sec/step)
step 14640 	 loss = 0.207, train_acc = 0.900 (3.302 sec/step)
step 14650 	 loss = 1.475, train_acc = 0.500 (3.281 sec/step)
step 14660 	 loss = 0.777, train_acc = 0.800 (3.390 sec/step)
step 14670 	 loss = 0.366, train_acc = 0.900 (3.258 sec/step)
step 14680 	 loss = 0.309, train_acc = 0.900 (3.345 sec/step)
step 14690 	 loss = 0.977, train_acc = 0.600 (3.250 sec/step)
step 14700 	 loss = 0.417, train_acc = 0.900 (3.307 sec/step)
step 14710 	 loss = 0.898, train_acc = 0.800 (3.271 sec/step)
step 14720 	 loss = 0.044, train_acc = 1.000 (3.262 sec/step)
step 14730 	 loss = 0.463, train_acc = 0.800 (3.259 sec/step)
step 14740 	 loss = 0.330, train_acc = 0.900 (3.273 sec/step)
step 14750 	 loss = 2.266, train_acc = 0.500 (3.217 sec/step)
step 14760 	 loss = 0.269, train_acc = 0.800 (3.264 sec/step)
step 14770 	 loss = 1.135, train_acc = 0.500 (3.422 sec/step)
step 14780 	 loss = 1.145, train_acc = 0.700 (3.327 sec/step)
step 14790 	 loss = 0.710, train_acc = 0.900 (3.225 sec/step)
step 14800 	 loss = 0.255, train_acc = 0.800 (3.220 sec/step)
step 14810 	 loss = 0.633, train_acc = 0.700 (3.265 sec/step)
step 14820 	 loss = 0.366, train_acc = 0.900 (3.373 sec/step)
step 14830 	 loss = 0.742, train_acc = 0.800 (3.315 sec/step)
step 14840 	 loss = 0.371, train_acc = 1.000 (3.266 sec/step)
step 14850 	 loss = 0.449, train_acc = 0.700 (3.250 sec/step)
step 14860 	 loss = 0.457, train_acc = 0.800 (3.293 sec/step)
step 14870 	 loss = 0.386, train_acc = 0.900 (3.232 sec/step)
step 14880 	 loss = 0.410, train_acc = 0.800 (3.237 sec/step)
step 14890 	 loss = 0.027, train_acc = 1.000 (3.242 sec/step)
step 14900 	 loss = 1.110, train_acc = 0.700 (3.301 sec/step)
step 14910 	 loss = 0.382, train_acc = 0.800 (3.269 sec/step)
step 14920 	 loss = 0.076, train_acc = 1.000 (3.295 sec/step)
step 14930 	 loss = 0.731, train_acc = 0.700 (3.256 sec/step)
step 14940 	 loss = 1.422, train_acc = 0.600 (3.283 sec/step)
step 14950 	 loss = 3.158, train_acc = 0.400 (3.236 sec/step)
step 14960 	 loss = 0.299, train_acc = 0.900 (3.272 sec/step)
step 14970 	 loss = 0.414, train_acc = 0.900 (3.277 sec/step)
step 14980 	 loss = 0.243, train_acc = 1.000 (3.278 sec/step)
step 14990 	 loss = 0.076, train_acc = 1.000 (3.270 sec/step)
step 15000 	 loss = 1.471, train_acc = 0.800 (3.250 sec/step)
step 15010 	 loss = 2.235, train_acc = 0.600 (3.349 sec/step)
step 15020 	 loss = 1.749, train_acc = 0.600 (3.291 sec/step)
step 15030 	 loss = 1.253, train_acc = 0.500 (3.265 sec/step)
step 15040 	 loss = 0.362, train_acc = 0.900 (3.298 sec/step)
step 15050 	 loss = 0.217, train_acc = 0.900 (3.256 sec/step)
step 15060 	 loss = 0.620, train_acc = 0.800 (3.288 sec/step)
step 15070 	 loss = 0.248, train_acc = 0.900 (3.260 sec/step)
step 15080 	 loss = 0.962, train_acc = 0.700 (3.247 sec/step)
step 15090 	 loss = 0.531, train_acc = 0.800 (3.297 sec/step)
step 15100 	 loss = 0.491, train_acc = 0.700 (3.245 sec/step)
step 15110 	 loss = 0.242, train_acc = 0.900 (3.267 sec/step)
step 15120 	 loss = 0.356, train_acc = 1.000 (3.227 sec/step)
step 15130 	 loss = 1.392, train_acc = 0.500 (3.338 sec/step)
step 15140 	 loss = 2.412, train_acc = 0.400 (3.294 sec/step)
step 15150 	 loss = 0.316, train_acc = 0.800 (3.261 sec/step)
step 15160 	 loss = 0.534, train_acc = 0.800 (3.303 sec/step)
step 15170 	 loss = 0.425, train_acc = 0.900 (3.279 sec/step)
step 15180 	 loss = 0.368, train_acc = 0.900 (3.254 sec/step)
step 15190 	 loss = 0.901, train_acc = 0.700 (3.307 sec/step)
VALIDATION 	 acc = 0.524 (3.637 sec)
New Best Accuracy 0.524 > Old Best 0.524.  Saving...
The checkpoint has been created.
step 15200 	 loss = 0.633, train_acc = 0.700 (3.251 sec/step)
step 15210 	 loss = 1.485, train_acc = 0.900 (3.327 sec/step)
step 15220 	 loss = 1.407, train_acc = 0.500 (3.270 sec/step)
step 15230 	 loss = 0.224, train_acc = 1.000 (3.286 sec/step)
step 15240 	 loss = 0.055, train_acc = 1.000 (3.254 sec/step)
step 15250 	 loss = 0.582, train_acc = 0.800 (3.292 sec/step)
step 15260 	 loss = 0.327, train_acc = 0.800 (3.244 sec/step)
step 15270 	 loss = 0.191, train_acc = 1.000 (3.305 sec/step)
step 15280 	 loss = 0.716, train_acc = 0.700 (3.272 sec/step)
step 15290 	 loss = 0.443, train_acc = 1.000 (3.277 sec/step)
step 15300 	 loss = 1.310, train_acc = 0.700 (3.279 sec/step)
step 15310 	 loss = 0.332, train_acc = 0.800 (3.249 sec/step)
step 15320 	 loss = 0.565, train_acc = 0.800 (3.292 sec/step)
step 15330 	 loss = 0.282, train_acc = 0.900 (3.251 sec/step)
step 15340 	 loss = 0.291, train_acc = 0.900 (3.274 sec/step)
step 15350 	 loss = 0.216, train_acc = 1.000 (3.330 sec/step)
step 15360 	 loss = 0.584, train_acc = 0.900 (3.318 sec/step)
step 15370 	 loss = 0.386, train_acc = 0.900 (3.296 sec/step)
step 15380 	 loss = 0.155, train_acc = 0.900 (3.243 sec/step)
step 15390 	 loss = 1.329, train_acc = 0.600 (3.250 sec/step)
step 15400 	 loss = 0.903, train_acc = 0.700 (3.290 sec/step)
step 15410 	 loss = 0.327, train_acc = 0.900 (3.237 sec/step)
step 15420 	 loss = 2.093, train_acc = 0.500 (3.253 sec/step)
step 15430 	 loss = 0.353, train_acc = 0.900 (3.254 sec/step)
step 15440 	 loss = 0.535, train_acc = 0.800 (3.296 sec/step)
step 15450 	 loss = 0.648, train_acc = 0.800 (3.281 sec/step)
step 15460 	 loss = 1.886, train_acc = 0.600 (3.233 sec/step)
step 15470 	 loss = 1.256, train_acc = 0.700 (3.301 sec/step)
step 15480 	 loss = 0.159, train_acc = 1.000 (3.261 sec/step)
step 15490 	 loss = 0.027, train_acc = 1.000 (3.231 sec/step)
step 15500 	 loss = 0.646, train_acc = 0.900 (3.223 sec/step)
step 15510 	 loss = 1.491, train_acc = 0.500 (3.279 sec/step)
step 15520 	 loss = 0.692, train_acc = 0.800 (3.242 sec/step)
step 15530 	 loss = 1.520, train_acc = 0.600 (3.293 sec/step)
step 15540 	 loss = 0.138, train_acc = 1.000 (3.279 sec/step)
step 15550 	 loss = 0.136, train_acc = 1.000 (3.258 sec/step)
step 15560 	 loss = 0.296, train_acc = 0.900 (3.256 sec/step)
step 15570 	 loss = 0.415, train_acc = 0.800 (3.259 sec/step)
step 15580 	 loss = 0.026, train_acc = 1.000 (3.258 sec/step)
step 15590 	 loss = 0.219, train_acc = 0.900 (3.289 sec/step)
step 15600 	 loss = 0.933, train_acc = 0.600 (3.243 sec/step)
step 15610 	 loss = 0.515, train_acc = 0.900 (3.263 sec/step)
step 15620 	 loss = 0.455, train_acc = 0.900 (3.302 sec/step)
step 15630 	 loss = 0.031, train_acc = 1.000 (3.257 sec/step)
step 15640 	 loss = 0.185, train_acc = 0.900 (3.283 sec/step)
step 15650 	 loss = 0.308, train_acc = 0.900 (3.280 sec/step)
step 15660 	 loss = 0.185, train_acc = 1.000 (3.290 sec/step)
step 15670 	 loss = 0.029, train_acc = 1.000 (3.264 sec/step)
step 15680 	 loss = 0.658, train_acc = 0.900 (3.288 sec/step)
step 15690 	 loss = 0.720, train_acc = 0.800 (3.247 sec/step)
step 15700 	 loss = 0.622, train_acc = 0.800 (3.276 sec/step)
step 15710 	 loss = 0.154, train_acc = 1.000 (3.262 sec/step)
step 15720 	 loss = 0.651, train_acc = 0.800 (3.240 sec/step)
step 15730 	 loss = 0.555, train_acc = 0.700 (3.271 sec/step)
step 15740 	 loss = 1.628, train_acc = 0.600 (3.255 sec/step)
step 15750 	 loss = 0.017, train_acc = 1.000 (3.413 sec/step)
step 15760 	 loss = 0.912, train_acc = 0.700 (3.273 sec/step)
step 15770 	 loss = 0.235, train_acc = 0.900 (3.265 sec/step)
step 15780 	 loss = 0.396, train_acc = 0.800 (3.293 sec/step)
step 15790 	 loss = 0.434, train_acc = 0.900 (3.280 sec/step)
step 15800 	 loss = 1.056, train_acc = 0.600 (3.247 sec/step)
step 15810 	 loss = 0.735, train_acc = 0.700 (3.273 sec/step)
step 15820 	 loss = 1.111, train_acc = 0.600 (3.233 sec/step)
step 15830 	 loss = 1.526, train_acc = 0.700 (3.259 sec/step)
step 15840 	 loss = 0.311, train_acc = 0.900 (3.298 sec/step)
step 15850 	 loss = 1.755, train_acc = 0.300 (3.258 sec/step)
step 15860 	 loss = 0.645, train_acc = 0.700 (3.259 sec/step)
step 15870 	 loss = 0.225, train_acc = 0.900 (3.281 sec/step)
step 15880 	 loss = 0.050, train_acc = 1.000 (3.283 sec/step)
step 15890 	 loss = 0.932, train_acc = 0.700 (3.291 sec/step)
step 15900 	 loss = 2.275, train_acc = 0.400 (3.304 sec/step)
step 15910 	 loss = 0.809, train_acc = 0.900 (3.294 sec/step)
step 15920 	 loss = 0.313, train_acc = 0.900 (3.267 sec/step)
step 15930 	 loss = 1.544, train_acc = 0.600 (3.242 sec/step)
step 15940 	 loss = 0.056, train_acc = 1.000 (3.317 sec/step)
step 15950 	 loss = 0.213, train_acc = 1.000 (3.318 sec/step)
step 15960 	 loss = 0.274, train_acc = 0.900 (3.285 sec/step)
step 15970 	 loss = 0.106, train_acc = 1.000 (3.278 sec/step)
step 15980 	 loss = 1.050, train_acc = 0.600 (3.284 sec/step)
step 15990 	 loss = 0.315, train_acc = 1.000 (3.276 sec/step)
step 16000 	 loss = 1.098, train_acc = 0.700 (3.287 sec/step)
step 16010 	 loss = 0.461, train_acc = 0.900 (3.284 sec/step)
step 16020 	 loss = 0.528, train_acc = 0.900 (3.313 sec/step)
step 16030 	 loss = 0.056, train_acc = 1.000 (3.252 sec/step)
step 16040 	 loss = 0.501, train_acc = 0.800 (3.290 sec/step)
step 16050 	 loss = 0.454, train_acc = 0.800 (3.278 sec/step)
step 16060 	 loss = 0.897, train_acc = 0.800 (3.313 sec/step)
step 16070 	 loss = 0.618, train_acc = 0.800 (3.313 sec/step)
step 16080 	 loss = 0.374, train_acc = 0.900 (3.228 sec/step)
step 16090 	 loss = 0.116, train_acc = 1.000 (3.264 sec/step)
step 16100 	 loss = 0.574, train_acc = 0.800 (3.404 sec/step)
step 16110 	 loss = 0.764, train_acc = 0.800 (3.290 sec/step)
step 16120 	 loss = 0.229, train_acc = 0.900 (3.255 sec/step)
step 16130 	 loss = 0.373, train_acc = 0.800 (3.256 sec/step)
step 16140 	 loss = 0.251, train_acc = 0.900 (3.306 sec/step)
step 16150 	 loss = 1.208, train_acc = 0.800 (3.270 sec/step)
step 16160 	 loss = 0.626, train_acc = 0.800 (3.337 sec/step)
step 16170 	 loss = 0.061, train_acc = 1.000 (3.256 sec/step)
step 16180 	 loss = 0.912, train_acc = 0.800 (3.250 sec/step)
step 16190 	 loss = 0.987, train_acc = 0.700 (3.250 sec/step)
step 16200 	 loss = 0.373, train_acc = 0.800 (3.268 sec/step)
step 16210 	 loss = 0.849, train_acc = 0.700 (3.265 sec/step)
step 16220 	 loss = 0.389, train_acc = 0.900 (3.334 sec/step)
step 16230 	 loss = 0.405, train_acc = 0.900 (3.270 sec/step)
step 16240 	 loss = 0.110, train_acc = 1.000 (3.264 sec/step)
step 16250 	 loss = 0.188, train_acc = 1.000 (3.251 sec/step)
step 16260 	 loss = 0.256, train_acc = 0.900 (3.275 sec/step)
step 16270 	 loss = 0.673, train_acc = 0.600 (3.237 sec/step)
step 16280 	 loss = 0.859, train_acc = 0.700 (3.297 sec/step)
step 16290 	 loss = 0.232, train_acc = 0.900 (3.287 sec/step)
step 16300 	 loss = 1.237, train_acc = 0.700 (3.285 sec/step)
step 16310 	 loss = 0.506, train_acc = 0.800 (3.282 sec/step)
step 16320 	 loss = 0.460, train_acc = 0.900 (3.246 sec/step)
step 16330 	 loss = 1.996, train_acc = 0.600 (3.268 sec/step)
step 16340 	 loss = 0.336, train_acc = 0.900 (3.349 sec/step)
step 16350 	 loss = 0.109, train_acc = 1.000 (3.319 sec/step)
step 16360 	 loss = 0.061, train_acc = 1.000 (3.258 sec/step)
step 16370 	 loss = 0.703, train_acc = 0.700 (3.226 sec/step)
step 16380 	 loss = 0.299, train_acc = 0.900 (3.289 sec/step)
step 16390 	 loss = 0.961, train_acc = 0.800 (3.272 sec/step)
step 16400 	 loss = 0.769, train_acc = 0.900 (3.300 sec/step)
step 16410 	 loss = 0.396, train_acc = 0.800 (3.291 sec/step)
step 16420 	 loss = 0.350, train_acc = 0.800 (3.315 sec/step)
step 16430 	 loss = 0.104, train_acc = 1.000 (3.293 sec/step)
step 16440 	 loss = 1.303, train_acc = 0.700 (3.266 sec/step)
step 16450 	 loss = 0.486, train_acc = 0.800 (3.282 sec/step)
step 16460 	 loss = 0.043, train_acc = 1.000 (3.391 sec/step)
step 16470 	 loss = 0.483, train_acc = 0.800 (3.292 sec/step)
step 16480 	 loss = 0.553, train_acc = 0.800 (3.246 sec/step)
step 16490 	 loss = 0.128, train_acc = 1.000 (3.244 sec/step)
step 16500 	 loss = 0.210, train_acc = 0.900 (3.244 sec/step)
step 16510 	 loss = 0.499, train_acc = 0.800 (3.250 sec/step)
step 16520 	 loss = 0.321, train_acc = 0.900 (3.260 sec/step)
step 16530 	 loss = 0.912, train_acc = 0.700 (3.276 sec/step)
step 16540 	 loss = 1.156, train_acc = 0.600 (3.287 sec/step)
step 16550 	 loss = 1.247, train_acc = 0.700 (3.231 sec/step)
step 16560 	 loss = 1.309, train_acc = 0.500 (3.262 sec/step)
step 16570 	 loss = 0.728, train_acc = 0.800 (3.259 sec/step)
step 16580 	 loss = 0.669, train_acc = 0.800 (3.334 sec/step)
step 16590 	 loss = 0.153, train_acc = 1.000 (3.228 sec/step)
step 16600 	 loss = 0.530, train_acc = 0.900 (3.298 sec/step)
step 16610 	 loss = 0.196, train_acc = 1.000 (3.279 sec/step)
step 16620 	 loss = 0.461, train_acc = 0.800 (3.258 sec/step)
step 16630 	 loss = 0.091, train_acc = 1.000 (3.280 sec/step)
step 16640 	 loss = 0.825, train_acc = 0.700 (3.301 sec/step)
step 16650 	 loss = 1.151, train_acc = 0.600 (3.245 sec/step)
step 16660 	 loss = 0.175, train_acc = 0.900 (3.384 sec/step)
step 16670 	 loss = 0.136, train_acc = 1.000 (3.315 sec/step)
step 16680 	 loss = 0.766, train_acc = 0.600 (3.308 sec/step)
step 16690 	 loss = 0.413, train_acc = 0.900 (3.297 sec/step)
step 16700 	 loss = 0.841, train_acc = 0.700 (3.333 sec/step)
step 16710 	 loss = 0.843, train_acc = 0.700 (3.250 sec/step)
step 16720 	 loss = 0.036, train_acc = 1.000 (3.315 sec/step)
step 16730 	 loss = 0.720, train_acc = 0.800 (3.272 sec/step)
step 16740 	 loss = 0.194, train_acc = 0.900 (3.248 sec/step)
step 16750 	 loss = 0.169, train_acc = 1.000 (3.297 sec/step)
step 16760 	 loss = 0.246, train_acc = 0.900 (3.307 sec/step)
step 16770 	 loss = 0.257, train_acc = 0.900 (3.280 sec/step)
step 16780 	 loss = 0.056, train_acc = 1.000 (3.287 sec/step)
step 16790 	 loss = 0.140, train_acc = 1.000 (3.262 sec/step)
step 16800 	 loss = 0.886, train_acc = 0.800 (3.288 sec/step)
step 16810 	 loss = 0.506, train_acc = 0.900 (3.332 sec/step)
step 16820 	 loss = 0.149, train_acc = 0.900 (3.271 sec/step)
step 16830 	 loss = 1.281, train_acc = 0.600 (3.348 sec/step)
step 16840 	 loss = 0.721, train_acc = 0.800 (3.306 sec/step)
step 16850 	 loss = 0.880, train_acc = 0.900 (3.237 sec/step)
step 16860 	 loss = 0.390, train_acc = 0.900 (3.287 sec/step)
step 16870 	 loss = 1.786, train_acc = 0.600 (3.260 sec/step)
step 16880 	 loss = 0.104, train_acc = 1.000 (3.284 sec/step)
step 16890 	 loss = 1.794, train_acc = 0.500 (3.232 sec/step)
step 16900 	 loss = 0.143, train_acc = 0.900 (3.359 sec/step)
step 16910 	 loss = 0.883, train_acc = 0.700 (3.261 sec/step)
step 16920 	 loss = 0.475, train_acc = 0.800 (3.257 sec/step)
step 16930 	 loss = 0.554, train_acc = 0.800 (3.260 sec/step)
step 16940 	 loss = 1.419, train_acc = 0.800 (3.265 sec/step)
step 16950 	 loss = 0.181, train_acc = 1.000 (3.283 sec/step)
step 16960 	 loss = 0.510, train_acc = 0.900 (3.269 sec/step)
step 16970 	 loss = 0.493, train_acc = 0.900 (3.308 sec/step)
step 16980 	 loss = 0.244, train_acc = 0.900 (3.280 sec/step)
step 16990 	 loss = 0.641, train_acc = 0.800 (3.249 sec/step)
step 17000 	 loss = 0.168, train_acc = 0.900 (3.253 sec/step)
step 17010 	 loss = 0.867, train_acc = 0.500 (3.321 sec/step)
step 17020 	 loss = 0.386, train_acc = 0.900 (3.282 sec/step)
step 17030 	 loss = 0.487, train_acc = 0.800 (3.256 sec/step)
step 17040 	 loss = 1.495, train_acc = 0.600 (3.254 sec/step)
step 17050 	 loss = 0.123, train_acc = 0.900 (3.273 sec/step)
step 17060 	 loss = 0.039, train_acc = 1.000 (3.267 sec/step)
step 17070 	 loss = 0.227, train_acc = 0.900 (3.285 sec/step)
step 17080 	 loss = 1.052, train_acc = 0.700 (3.329 sec/step)
step 17090 	 loss = 0.502, train_acc = 0.900 (3.280 sec/step)
VALIDATION 	 acc = 0.512 (3.695 sec)
step 17100 	 loss = 0.891, train_acc = 0.700 (3.251 sec/step)
step 17110 	 loss = 0.578, train_acc = 0.900 (3.260 sec/step)
step 17120 	 loss = 0.677, train_acc = 0.800 (3.315 sec/step)
step 17130 	 loss = 0.146, train_acc = 1.000 (3.251 sec/step)
step 17140 	 loss = 0.059, train_acc = 1.000 (3.372 sec/step)
step 17150 	 loss = 0.532, train_acc = 0.900 (3.335 sec/step)
step 17160 	 loss = 0.361, train_acc = 0.800 (3.295 sec/step)
step 17170 	 loss = 0.083, train_acc = 1.000 (3.279 sec/step)
step 17180 	 loss = 0.107, train_acc = 1.000 (3.283 sec/step)
step 17190 	 loss = 0.021, train_acc = 1.000 (3.266 sec/step)
step 17200 	 loss = 0.225, train_acc = 0.900 (3.283 sec/step)
step 17210 	 loss = 0.500, train_acc = 0.800 (3.286 sec/step)
step 17220 	 loss = 0.662, train_acc = 0.600 (3.335 sec/step)
step 17230 	 loss = 0.580, train_acc = 0.800 (3.269 sec/step)
step 17240 	 loss = 0.065, train_acc = 1.000 (3.259 sec/step)
step 17250 	 loss = 1.896, train_acc = 0.500 (3.321 sec/step)
step 17260 	 loss = 0.348, train_acc = 0.900 (3.267 sec/step)
step 17270 	 loss = 0.168, train_acc = 1.000 (3.259 sec/step)
step 17280 	 loss = 0.286, train_acc = 0.900 (3.288 sec/step)
step 17290 	 loss = 0.293, train_acc = 0.900 (3.309 sec/step)
step 17300 	 loss = 0.745, train_acc = 0.800 (3.245 sec/step)
step 17310 	 loss = 0.488, train_acc = 0.900 (3.303 sec/step)
step 17320 	 loss = 0.153, train_acc = 1.000 (3.422 sec/step)
step 17330 	 loss = 0.086, train_acc = 1.000 (3.398 sec/step)
step 17340 	 loss = 0.429, train_acc = 0.900 (3.266 sec/step)
step 17350 	 loss = 0.304, train_acc = 0.900 (3.382 sec/step)
step 17360 	 loss = 0.183, train_acc = 0.900 (3.279 sec/step)
step 17370 	 loss = 0.733, train_acc = 0.800 (3.286 sec/step)
step 17380 	 loss = 0.909, train_acc = 0.500 (3.245 sec/step)
step 17390 	 loss = 1.185, train_acc = 0.500 (3.334 sec/step)
step 17400 	 loss = 0.807, train_acc = 0.800 (3.313 sec/step)
step 17410 	 loss = 2.164, train_acc = 0.700 (3.253 sec/step)
step 17420 	 loss = 1.831, train_acc = 0.700 (3.324 sec/step)
step 17430 	 loss = 0.258, train_acc = 1.000 (3.283 sec/step)
step 17440 	 loss = 1.824, train_acc = 0.500 (3.273 sec/step)
step 17450 	 loss = 0.756, train_acc = 0.600 (3.279 sec/step)
step 17460 	 loss = 0.473, train_acc = 0.800 (3.262 sec/step)
step 17470 	 loss = 0.138, train_acc = 1.000 (3.251 sec/step)
step 17480 	 loss = 1.072, train_acc = 0.800 (3.317 sec/step)
step 17490 	 loss = 0.470, train_acc = 0.800 (3.280 sec/step)
step 17500 	 loss = 1.534, train_acc = 0.800 (3.290 sec/step)
step 17510 	 loss = 0.356, train_acc = 0.900 (3.317 sec/step)
step 17520 	 loss = 0.224, train_acc = 0.800 (3.299 sec/step)
step 17530 	 loss = 0.706, train_acc = 0.600 (3.235 sec/step)
step 17540 	 loss = 1.085, train_acc = 0.600 (3.321 sec/step)
step 17550 	 loss = 0.126, train_acc = 1.000 (3.267 sec/step)
step 17560 	 loss = 0.120, train_acc = 1.000 (3.276 sec/step)
step 17570 	 loss = 0.134, train_acc = 1.000 (3.295 sec/step)
step 17580 	 loss = 0.082, train_acc = 1.000 (3.270 sec/step)
step 17590 	 loss = 0.691, train_acc = 0.800 (3.318 sec/step)
step 17600 	 loss = 0.167, train_acc = 0.900 (3.285 sec/step)
step 17610 	 loss = 0.752, train_acc = 0.900 (3.242 sec/step)
step 17620 	 loss = 0.179, train_acc = 0.900 (3.252 sec/step)
step 17630 	 loss = 1.184, train_acc = 0.600 (3.264 sec/step)
step 17640 	 loss = 0.586, train_acc = 0.900 (3.307 sec/step)
step 17650 	 loss = 0.233, train_acc = 0.800 (3.266 sec/step)
step 17660 	 loss = 0.068, train_acc = 1.000 (3.280 sec/step)
step 17670 	 loss = 0.425, train_acc = 0.800 (3.231 sec/step)
step 17680 	 loss = 0.138, train_acc = 1.000 (3.258 sec/step)
step 17690 	 loss = 0.794, train_acc = 0.700 (3.308 sec/step)
step 17700 	 loss = 0.927, train_acc = 0.800 (3.316 sec/step)
step 17710 	 loss = 0.224, train_acc = 0.900 (3.322 sec/step)
step 17720 	 loss = 0.246, train_acc = 1.000 (3.274 sec/step)
step 17730 	 loss = 0.259, train_acc = 0.900 (3.289 sec/step)
step 17740 	 loss = 0.284, train_acc = 0.900 (3.328 sec/step)
step 17750 	 loss = 0.105, train_acc = 1.000 (3.312 sec/step)
step 17760 	 loss = 0.084, train_acc = 1.000 (3.268 sec/step)
step 17770 	 loss = 0.009, train_acc = 1.000 (3.270 sec/step)
step 17780 	 loss = 0.314, train_acc = 0.900 (3.303 sec/step)
step 17790 	 loss = 0.238, train_acc = 0.900 (3.323 sec/step)
step 17800 	 loss = 0.477, train_acc = 0.900 (3.333 sec/step)
step 17810 	 loss = 0.281, train_acc = 0.900 (3.274 sec/step)
step 17820 	 loss = 0.481, train_acc = 0.800 (3.336 sec/step)
step 17830 	 loss = 0.827, train_acc = 0.600 (3.281 sec/step)
step 17840 	 loss = 0.034, train_acc = 1.000 (3.266 sec/step)
step 17850 	 loss = 0.091, train_acc = 1.000 (3.244 sec/step)
step 17860 	 loss = 0.723, train_acc = 0.700 (3.296 sec/step)
step 17870 	 loss = 0.661, train_acc = 0.900 (3.303 sec/step)
step 17880 	 loss = 0.263, train_acc = 0.900 (3.346 sec/step)
step 17890 	 loss = 0.075, train_acc = 1.000 (3.303 sec/step)
step 17900 	 loss = 0.138, train_acc = 0.900 (3.286 sec/step)
step 17910 	 loss = 0.681, train_acc = 0.600 (3.328 sec/step)
step 17920 	 loss = 0.171, train_acc = 1.000 (3.294 sec/step)
step 17930 	 loss = 0.067, train_acc = 1.000 (3.330 sec/step)
step 17940 	 loss = 0.376, train_acc = 0.900 (3.240 sec/step)
step 17950 	 loss = 0.240, train_acc = 0.900 (3.280 sec/step)
step 17960 	 loss = 0.551, train_acc = 0.700 (3.307 sec/step)
step 17970 	 loss = 0.217, train_acc = 0.900 (3.269 sec/step)
step 17980 	 loss = 0.634, train_acc = 0.800 (3.307 sec/step)
step 17990 	 loss = 0.853, train_acc = 0.700 (3.260 sec/step)
step 18000 	 loss = 0.552, train_acc = 0.900 (3.322 sec/step)
step 18010 	 loss = 2.188, train_acc = 0.500 (3.418 sec/step)
step 18020 	 loss = 0.069, train_acc = 1.000 (3.274 sec/step)
step 18030 	 loss = 0.598, train_acc = 0.800 (3.267 sec/step)
step 18040 	 loss = 0.714, train_acc = 0.800 (3.286 sec/step)
step 18050 	 loss = 1.253, train_acc = 0.800 (3.232 sec/step)
step 18060 	 loss = 0.599, train_acc = 0.900 (3.228 sec/step)
step 18070 	 loss = 0.091, train_acc = 1.000 (3.268 sec/step)
step 18080 	 loss = 0.089, train_acc = 1.000 (3.232 sec/step)
step 18090 	 loss = 0.499, train_acc = 0.900 (3.302 sec/step)
step 18100 	 loss = 0.326, train_acc = 0.800 (3.323 sec/step)
step 18110 	 loss = 0.254, train_acc = 0.900 (3.297 sec/step)
step 18120 	 loss = 0.026, train_acc = 1.000 (3.289 sec/step)
step 18130 	 loss = 0.336, train_acc = 0.900 (3.276 sec/step)
step 18140 	 loss = 0.374, train_acc = 0.900 (3.393 sec/step)
step 18150 	 loss = 0.492, train_acc = 0.800 (3.264 sec/step)
step 18160 	 loss = 0.571, train_acc = 0.900 (3.234 sec/step)
step 18170 	 loss = 0.298, train_acc = 0.900 (3.239 sec/step)
step 18180 	 loss = 0.157, train_acc = 1.000 (3.254 sec/step)
step 18190 	 loss = 0.226, train_acc = 0.900 (3.329 sec/step)
step 18200 	 loss = 0.366, train_acc = 0.900 (3.259 sec/step)
step 18210 	 loss = 0.060, train_acc = 1.000 (3.296 sec/step)
step 18220 	 loss = 0.014, train_acc = 1.000 (3.294 sec/step)
step 18230 	 loss = 0.496, train_acc = 0.900 (3.305 sec/step)
step 18240 	 loss = 2.164, train_acc = 0.400 (3.290 sec/step)
step 18250 	 loss = 0.194, train_acc = 0.900 (3.273 sec/step)
step 18260 	 loss = 0.185, train_acc = 0.900 (3.279 sec/step)
step 18270 	 loss = 0.629, train_acc = 0.800 (3.294 sec/step)
step 18280 	 loss = 0.758, train_acc = 0.800 (3.325 sec/step)
step 18290 	 loss = 0.425, train_acc = 0.800 (3.317 sec/step)
step 18300 	 loss = 0.664, train_acc = 0.800 (3.301 sec/step)
step 18310 	 loss = 0.491, train_acc = 0.800 (3.319 sec/step)
step 18320 	 loss = 0.152, train_acc = 1.000 (3.224 sec/step)
step 18330 	 loss = 0.437, train_acc = 0.900 (3.261 sec/step)
step 18340 	 loss = 0.134, train_acc = 1.000 (3.284 sec/step)
step 18350 	 loss = 0.068, train_acc = 1.000 (3.322 sec/step)
step 18360 	 loss = 0.531, train_acc = 0.800 (3.289 sec/step)
step 18370 	 loss = 0.216, train_acc = 0.900 (3.378 sec/step)
step 18380 	 loss = 0.318, train_acc = 0.900 (3.420 sec/step)
step 18390 	 loss = 0.145, train_acc = 0.900 (3.259 sec/step)
step 18400 	 loss = 0.382, train_acc = 0.900 (3.312 sec/step)
step 18410 	 loss = 0.134, train_acc = 1.000 (3.269 sec/step)
step 18420 	 loss = 0.334, train_acc = 0.900 (3.242 sec/step)
step 18430 	 loss = 0.308, train_acc = 0.800 (3.284 sec/step)
step 18440 	 loss = 0.008, train_acc = 1.000 (3.296 sec/step)
step 18450 	 loss = 0.426, train_acc = 0.900 (3.280 sec/step)
step 18460 	 loss = 0.306, train_acc = 0.900 (3.289 sec/step)
step 18470 	 loss = 0.159, train_acc = 0.900 (3.328 sec/step)
step 18480 	 loss = 0.852, train_acc = 0.700 (3.284 sec/step)
step 18490 	 loss = 0.538, train_acc = 0.900 (3.308 sec/step)
step 18500 	 loss = 0.357, train_acc = 0.900 (3.266 sec/step)
step 18510 	 loss = 2.099, train_acc = 0.700 (3.271 sec/step)
step 18520 	 loss = 0.095, train_acc = 1.000 (3.257 sec/step)
step 18530 	 loss = 0.210, train_acc = 0.900 (3.324 sec/step)
step 18540 	 loss = 0.210, train_acc = 1.000 (3.335 sec/step)
step 18550 	 loss = 0.247, train_acc = 0.900 (3.281 sec/step)
step 18560 	 loss = 0.062, train_acc = 1.000 (3.266 sec/step)
step 18570 	 loss = 0.078, train_acc = 1.000 (3.356 sec/step)
step 18580 	 loss = 0.403, train_acc = 0.900 (3.368 sec/step)
step 18590 	 loss = 0.606, train_acc = 0.900 (3.391 sec/step)
step 18600 	 loss = 0.113, train_acc = 1.000 (3.230 sec/step)
step 18610 	 loss = 0.532, train_acc = 0.900 (3.313 sec/step)
step 18620 	 loss = 0.077, train_acc = 1.000 (3.246 sec/step)
step 18630 	 loss = 0.377, train_acc = 0.900 (3.262 sec/step)
step 18640 	 loss = 1.340, train_acc = 0.700 (3.290 sec/step)
step 18650 	 loss = 0.106, train_acc = 1.000 (3.331 sec/step)
step 18660 	 loss = 0.399, train_acc = 0.800 (3.255 sec/step)
step 18670 	 loss = 0.537, train_acc = 0.800 (3.278 sec/step)
step 18680 	 loss = 0.402, train_acc = 0.900 (3.261 sec/step)
step 18690 	 loss = 0.046, train_acc = 1.000 (3.267 sec/step)
step 18700 	 loss = 0.535, train_acc = 0.900 (3.303 sec/step)
step 18710 	 loss = 0.186, train_acc = 0.900 (3.277 sec/step)
step 18720 	 loss = 0.262, train_acc = 0.800 (3.268 sec/step)
step 18730 	 loss = 0.250, train_acc = 0.900 (3.290 sec/step)
step 18740 	 loss = 0.996, train_acc = 0.600 (3.263 sec/step)
step 18750 	 loss = 0.193, train_acc = 0.900 (3.295 sec/step)
step 18760 	 loss = 0.440, train_acc = 0.800 (3.258 sec/step)
step 18770 	 loss = 0.460, train_acc = 0.900 (3.281 sec/step)
step 18780 	 loss = 0.084, train_acc = 1.000 (3.417 sec/step)
step 18790 	 loss = 0.040, train_acc = 1.000 (3.276 sec/step)
step 18800 	 loss = 0.053, train_acc = 1.000 (3.376 sec/step)
step 18810 	 loss = 0.422, train_acc = 0.900 (3.265 sec/step)
step 18820 	 loss = 0.376, train_acc = 0.800 (3.268 sec/step)
step 18830 	 loss = 0.770, train_acc = 0.800 (3.258 sec/step)
step 18840 	 loss = 0.449, train_acc = 0.800 (3.298 sec/step)
step 18850 	 loss = 0.285, train_acc = 0.900 (3.242 sec/step)
step 18860 	 loss = 0.380, train_acc = 0.900 (3.259 sec/step)
step 18870 	 loss = 0.068, train_acc = 1.000 (3.269 sec/step)
step 18880 	 loss = 0.115, train_acc = 1.000 (3.286 sec/step)
step 18890 	 loss = 0.130, train_acc = 1.000 (3.292 sec/step)
step 18900 	 loss = 0.210, train_acc = 0.900 (3.261 sec/step)
step 18910 	 loss = 0.074, train_acc = 1.000 (3.293 sec/step)
step 18920 	 loss = 0.671, train_acc = 0.800 (3.254 sec/step)
step 18930 	 loss = 0.321, train_acc = 0.900 (3.295 sec/step)
step 18940 	 loss = 1.336, train_acc = 0.700 (3.302 sec/step)
step 18950 	 loss = 0.052, train_acc = 1.000 (3.338 sec/step)
step 18960 	 loss = 0.015, train_acc = 1.000 (3.332 sec/step)
step 18970 	 loss = 0.121, train_acc = 0.900 (3.242 sec/step)
step 18980 	 loss = 0.257, train_acc = 1.000 (3.279 sec/step)
step 18990 	 loss = 2.174, train_acc = 0.700 (3.248 sec/step)
VALIDATION 	 acc = 0.518 (3.659 sec)
step 19000 	 loss = 0.420, train_acc = 0.900 (3.319 sec/step)
step 19010 	 loss = 0.276, train_acc = 0.900 (3.249 sec/step)
step 19020 	 loss = 0.774, train_acc = 0.700 (3.242 sec/step)
step 19030 	 loss = 0.020, train_acc = 1.000 (3.311 sec/step)
step 19040 	 loss = 0.039, train_acc = 1.000 (3.226 sec/step)
step 19050 	 loss = 0.355, train_acc = 0.900 (3.258 sec/step)
step 19060 	 loss = 0.149, train_acc = 0.900 (3.274 sec/step)
step 19070 	 loss = 0.356, train_acc = 0.900 (3.342 sec/step)
step 19080 	 loss = 0.025, train_acc = 1.000 (3.296 sec/step)
step 19090 	 loss = 0.578, train_acc = 0.800 (3.290 sec/step)
step 19100 	 loss = 1.641, train_acc = 0.800 (3.233 sec/step)
step 19110 	 loss = 0.041, train_acc = 1.000 (3.239 sec/step)
step 19120 	 loss = 1.040, train_acc = 0.600 (3.296 sec/step)
step 19130 	 loss = 0.750, train_acc = 0.700 (3.312 sec/step)
step 19140 	 loss = 0.634, train_acc = 0.800 (3.273 sec/step)
step 19150 	 loss = 0.111, train_acc = 1.000 (3.241 sec/step)
step 19160 	 loss = 1.772, train_acc = 0.800 (3.297 sec/step)
step 19170 	 loss = 0.178, train_acc = 0.900 (3.324 sec/step)
step 19180 	 loss = 0.016, train_acc = 1.000 (3.244 sec/step)
step 19190 	 loss = 0.412, train_acc = 0.800 (3.291 sec/step)
step 19200 	 loss = 0.716, train_acc = 0.800 (3.288 sec/step)
step 19210 	 loss = 0.429, train_acc = 0.900 (3.280 sec/step)
step 19220 	 loss = 0.899, train_acc = 0.700 (3.235 sec/step)
step 19230 	 loss = 0.409, train_acc = 0.900 (3.245 sec/step)
step 19240 	 loss = 0.397, train_acc = 0.900 (3.266 sec/step)
step 19250 	 loss = 0.173, train_acc = 1.000 (3.292 sec/step)
step 19260 	 loss = 0.115, train_acc = 1.000 (3.261 sec/step)
step 19270 	 loss = 0.869, train_acc = 0.600 (3.244 sec/step)
step 19280 	 loss = 0.077, train_acc = 1.000 (3.272 sec/step)
step 19290 	 loss = 0.074, train_acc = 1.000 (3.295 sec/step)
step 19300 	 loss = 0.814, train_acc = 0.800 (3.270 sec/step)
step 19310 	 loss = 0.907, train_acc = 0.700 (3.299 sec/step)
step 19320 	 loss = 0.074, train_acc = 1.000 (3.313 sec/step)
step 19330 	 loss = 0.179, train_acc = 1.000 (3.299 sec/step)
step 19340 	 loss = 0.052, train_acc = 1.000 (3.306 sec/step)
step 19350 	 loss = 0.110, train_acc = 1.000 (3.366 sec/step)
step 19360 	 loss = 0.198, train_acc = 0.900 (3.297 sec/step)
step 19370 	 loss = 0.187, train_acc = 1.000 (3.234 sec/step)
step 19380 	 loss = 0.490, train_acc = 0.800 (3.278 sec/step)
step 19390 	 loss = 0.115, train_acc = 0.900 (3.294 sec/step)
step 19400 	 loss = 1.096, train_acc = 0.600 (3.304 sec/step)
step 19410 	 loss = 0.174, train_acc = 0.900 (3.287 sec/step)
step 19420 	 loss = 0.126, train_acc = 1.000 (3.289 sec/step)
step 19430 	 loss = 0.752, train_acc = 0.800 (3.265 sec/step)
step 19440 	 loss = 0.710, train_acc = 0.900 (3.280 sec/step)
step 19450 	 loss = 0.815, train_acc = 0.800 (3.312 sec/step)
step 19460 	 loss = 0.185, train_acc = 0.900 (3.302 sec/step)
step 19470 	 loss = 0.171, train_acc = 0.900 (3.262 sec/step)
step 19480 	 loss = 0.205, train_acc = 1.000 (3.332 sec/step)
step 19490 	 loss = 0.140, train_acc = 1.000 (3.318 sec/step)
step 19500 	 loss = 0.082, train_acc = 1.000 (3.264 sec/step)
step 19510 	 loss = 0.766, train_acc = 0.900 (3.252 sec/step)
step 19520 	 loss = 0.585, train_acc = 0.900 (3.325 sec/step)
step 19530 	 loss = 0.191, train_acc = 0.900 (3.287 sec/step)
step 19540 	 loss = 0.234, train_acc = 0.800 (3.312 sec/step)
step 19550 	 loss = 0.101, train_acc = 1.000 (3.313 sec/step)
step 19560 	 loss = 0.887, train_acc = 0.800 (3.261 sec/step)
step 19570 	 loss = 0.092, train_acc = 1.000 (3.351 sec/step)
step 19580 	 loss = 0.719, train_acc = 0.600 (3.282 sec/step)
step 19590 	 loss = 1.303, train_acc = 0.700 (3.309 sec/step)
step 19600 	 loss = 2.097, train_acc = 0.500 (3.264 sec/step)
step 19610 	 loss = 0.083, train_acc = 1.000 (3.273 sec/step)
step 19620 	 loss = 0.128, train_acc = 1.000 (3.258 sec/step)
step 19630 	 loss = 0.545, train_acc = 0.800 (3.254 sec/step)
step 19640 	 loss = 1.940, train_acc = 0.600 (3.286 sec/step)
step 19650 	 loss = 0.196, train_acc = 1.000 (3.292 sec/step)
step 19660 	 loss = 0.281, train_acc = 1.000 (3.340 sec/step)
step 19670 	 loss = 0.002, train_acc = 1.000 (3.363 sec/step)
step 19680 	 loss = 0.887, train_acc = 0.800 (3.307 sec/step)
step 19690 	 loss = 0.013, train_acc = 1.000 (3.295 sec/step)
step 19700 	 loss = 0.755, train_acc = 0.700 (3.274 sec/step)
step 19710 	 loss = 0.208, train_acc = 0.900 (3.255 sec/step)
step 19720 	 loss = 0.152, train_acc = 0.900 (3.280 sec/step)
step 19730 	 loss = 0.244, train_acc = 1.000 (3.289 sec/step)
step 19740 	 loss = 0.242, train_acc = 0.900 (3.275 sec/step)
step 19750 	 loss = 1.128, train_acc = 0.600 (3.322 sec/step)
step 19760 	 loss = 0.052, train_acc = 1.000 (3.316 sec/step)
step 19770 	 loss = 0.375, train_acc = 0.800 (3.269 sec/step)
step 19780 	 loss = 0.380, train_acc = 0.800 (3.323 sec/step)
step 19790 	 loss = 0.446, train_acc = 0.900 (3.321 sec/step)
step 19800 	 loss = 0.083, train_acc = 1.000 (3.292 sec/step)
step 19810 	 loss = 0.706, train_acc = 0.900 (3.288 sec/step)
step 19820 	 loss = 0.228, train_acc = 0.900 (3.253 sec/step)
step 19830 	 loss = 0.223, train_acc = 0.900 (3.299 sec/step)
step 19840 	 loss = 0.009, train_acc = 1.000 (3.299 sec/step)
step 19850 	 loss = 0.265, train_acc = 0.900 (3.297 sec/step)
step 19860 	 loss = 0.724, train_acc = 0.800 (3.326 sec/step)
step 19870 	 loss = 2.135, train_acc = 0.700 (3.295 sec/step)
step 19880 	 loss = 0.277, train_acc = 0.900 (3.228 sec/step)
step 19890 	 loss = 0.629, train_acc = 0.800 (3.313 sec/step)
step 19900 	 loss = 0.887, train_acc = 0.900 (3.317 sec/step)
step 19910 	 loss = 0.683, train_acc = 0.700 (3.237 sec/step)
step 19920 	 loss = 0.388, train_acc = 0.800 (3.322 sec/step)
step 19930 	 loss = 0.138, train_acc = 0.900 (3.286 sec/step)
step 19940 	 loss = 1.090, train_acc = 0.500 (3.314 sec/step)
step 19950 	 loss = 0.395, train_acc = 0.800 (3.311 sec/step)
step 19960 	 loss = 0.342, train_acc = 0.800 (3.286 sec/step)
step 19970 	 loss = 0.370, train_acc = 0.900 (3.321 sec/step)
step 19980 	 loss = 0.472, train_acc = 0.900 (3.285 sec/step)
step 19990 	 loss = 0.637, train_acc = 0.800 (3.391 sec/step)
step 20000 	 loss = 0.379, train_acc = 0.900 (3.296 sec/step)
step 20010 	 loss = 0.178, train_acc = 1.000 (3.273 sec/step)
step 20020 	 loss = 0.798, train_acc = 0.800 (3.296 sec/step)
step 20030 	 loss = 0.047, train_acc = 1.000 (3.420 sec/step)
step 20040 	 loss = 1.108, train_acc = 0.700 (3.307 sec/step)
step 20050 	 loss = 0.274, train_acc = 0.900 (3.236 sec/step)
step 20060 	 loss = 0.185, train_acc = 0.900 (3.279 sec/step)
step 20070 	 loss = 0.427, train_acc = 0.800 (3.268 sec/step)
step 20080 	 loss = 0.634, train_acc = 0.800 (3.305 sec/step)
step 20090 	 loss = 0.401, train_acc = 0.700 (3.241 sec/step)
step 20100 	 loss = 0.437, train_acc = 0.800 (3.272 sec/step)
step 20110 	 loss = 0.360, train_acc = 0.900 (3.241 sec/step)
step 20120 	 loss = 0.287, train_acc = 0.900 (3.252 sec/step)
step 20130 	 loss = 0.118, train_acc = 1.000 (3.297 sec/step)
step 20140 	 loss = 0.606, train_acc = 0.800 (3.268 sec/step)
step 20150 	 loss = 0.608, train_acc = 0.800 (3.452 sec/step)
step 20160 	 loss = 0.223, train_acc = 1.000 (3.231 sec/step)
step 20170 	 loss = 0.168, train_acc = 0.900 (3.286 sec/step)
step 20180 	 loss = 0.591, train_acc = 0.800 (3.326 sec/step)
step 20190 	 loss = 0.284, train_acc = 0.800 (3.296 sec/step)
step 20200 	 loss = 0.982, train_acc = 0.600 (3.427 sec/step)
step 20210 	 loss = 0.761, train_acc = 0.800 (3.285 sec/step)
step 20220 	 loss = 0.146, train_acc = 1.000 (3.278 sec/step)
step 20230 	 loss = 0.619, train_acc = 0.800 (3.267 sec/step)
step 20240 	 loss = 0.182, train_acc = 1.000 (3.242 sec/step)
step 20250 	 loss = 0.578, train_acc = 0.800 (3.283 sec/step)
step 20260 	 loss = 0.124, train_acc = 1.000 (3.259 sec/step)
step 20270 	 loss = 0.109, train_acc = 1.000 (3.309 sec/step)
step 20280 	 loss = 0.224, train_acc = 0.900 (3.325 sec/step)
step 20290 	 loss = 0.128, train_acc = 0.900 (3.249 sec/step)
step 20300 	 loss = 0.065, train_acc = 1.000 (3.239 sec/step)
step 20310 	 loss = 0.707, train_acc = 0.800 (3.319 sec/step)
step 20320 	 loss = 0.092, train_acc = 1.000 (3.347 sec/step)
step 20330 	 loss = 0.303, train_acc = 0.900 (3.319 sec/step)
step 20340 	 loss = 0.537, train_acc = 0.700 (3.324 sec/step)
step 20350 	 loss = 0.259, train_acc = 0.900 (3.240 sec/step)
step 20360 	 loss = 0.452, train_acc = 0.900 (3.339 sec/step)
step 20370 	 loss = 0.108, train_acc = 1.000 (3.260 sec/step)
step 20380 	 loss = 0.056, train_acc = 1.000 (3.331 sec/step)
step 20390 	 loss = 1.561, train_acc = 0.700 (3.265 sec/step)
step 20400 	 loss = 0.034, train_acc = 1.000 (3.305 sec/step)
step 20410 	 loss = 1.056, train_acc = 0.700 (3.312 sec/step)
step 20420 	 loss = 0.656, train_acc = 0.900 (3.316 sec/step)
step 20430 	 loss = 0.551, train_acc = 0.900 (3.278 sec/step)
step 20440 	 loss = 0.688, train_acc = 0.700 (3.251 sec/step)
step 20450 	 loss = 0.718, train_acc = 0.900 (3.304 sec/step)
step 20460 	 loss = 0.370, train_acc = 0.700 (3.252 sec/step)
step 20470 	 loss = 1.112, train_acc = 0.600 (3.302 sec/step)
step 20480 	 loss = 1.426, train_acc = 0.800 (3.301 sec/step)
step 20490 	 loss = 0.091, train_acc = 1.000 (3.293 sec/step)
step 20500 	 loss = 0.244, train_acc = 0.900 (3.262 sec/step)
step 20510 	 loss = 0.408, train_acc = 0.800 (3.305 sec/step)
step 20520 	 loss = 1.624, train_acc = 0.700 (3.329 sec/step)
step 20530 	 loss = 0.223, train_acc = 1.000 (3.261 sec/step)
step 20540 	 loss = 0.019, train_acc = 1.000 (3.323 sec/step)
step 20550 	 loss = 0.009, train_acc = 1.000 (3.264 sec/step)
step 20560 	 loss = 0.242, train_acc = 0.800 (3.269 sec/step)
step 20570 	 loss = 0.036, train_acc = 1.000 (3.237 sec/step)
step 20580 	 loss = 0.316, train_acc = 0.900 (3.278 sec/step)
step 20590 	 loss = 0.037, train_acc = 1.000 (3.255 sec/step)
step 20600 	 loss = 0.285, train_acc = 0.800 (3.271 sec/step)
step 20610 	 loss = 1.884, train_acc = 0.600 (3.289 sec/step)
step 20620 	 loss = 0.272, train_acc = 0.900 (3.256 sec/step)
step 20630 	 loss = 1.141, train_acc = 0.600 (3.249 sec/step)
step 20640 	 loss = 1.288, train_acc = 0.600 (3.272 sec/step)
step 20650 	 loss = 0.061, train_acc = 1.000 (3.256 sec/step)
step 20660 	 loss = 0.467, train_acc = 0.800 (3.254 sec/step)
step 20670 	 loss = 0.164, train_acc = 0.900 (3.286 sec/step)
step 20680 	 loss = 0.124, train_acc = 1.000 (3.320 sec/step)
step 20690 	 loss = 0.378, train_acc = 0.900 (3.295 sec/step)
step 20700 	 loss = 0.579, train_acc = 0.700 (3.309 sec/step)
step 20710 	 loss = 0.031, train_acc = 1.000 (3.306 sec/step)
step 20720 	 loss = 0.829, train_acc = 0.800 (3.252 sec/step)
step 20730 	 loss = 0.747, train_acc = 0.700 (3.242 sec/step)
step 20740 	 loss = 0.134, train_acc = 1.000 (3.283 sec/step)
step 20750 	 loss = 0.025, train_acc = 1.000 (3.279 sec/step)
step 20760 	 loss = 0.053, train_acc = 1.000 (3.302 sec/step)
step 20770 	 loss = 0.070, train_acc = 1.000 (3.276 sec/step)
step 20780 	 loss = 0.418, train_acc = 0.800 (3.287 sec/step)
step 20790 	 loss = 0.090, train_acc = 1.000 (3.328 sec/step)
step 20800 	 loss = 2.417, train_acc = 0.700 (3.352 sec/step)
step 20810 	 loss = 0.217, train_acc = 1.000 (3.259 sec/step)
step 20820 	 loss = 0.016, train_acc = 1.000 (3.284 sec/step)
step 20830 	 loss = 0.528, train_acc = 0.800 (3.308 sec/step)
step 20840 	 loss = 0.506, train_acc = 0.900 (3.280 sec/step)
step 20850 	 loss = 0.380, train_acc = 0.800 (3.321 sec/step)
step 20860 	 loss = 0.322, train_acc = 0.900 (3.258 sec/step)
step 20870 	 loss = 0.470, train_acc = 0.800 (3.272 sec/step)
step 20880 	 loss = 0.016, train_acc = 1.000 (3.278 sec/step)
step 20890 	 loss = 0.474, train_acc = 0.900 (3.240 sec/step)
VALIDATION 	 acc = 0.526 (3.617 sec)
New Best Accuracy 0.526 > Old Best 0.524.  Saving...
The checkpoint has been created.
step 20900 	 loss = 0.128, train_acc = 0.900 (3.441 sec/step)
step 20910 	 loss = 0.123, train_acc = 1.000 (3.303 sec/step)
step 20920 	 loss = 0.165, train_acc = 1.000 (3.323 sec/step)
step 20930 	 loss = 1.508, train_acc = 0.400 (3.304 sec/step)
step 20940 	 loss = 0.135, train_acc = 1.000 (3.261 sec/step)
step 20950 	 loss = 0.341, train_acc = 0.900 (3.329 sec/step)
step 20960 	 loss = 0.005, train_acc = 1.000 (3.299 sec/step)
step 20970 	 loss = 0.561, train_acc = 0.800 (3.296 sec/step)
step 20980 	 loss = 0.282, train_acc = 0.900 (3.300 sec/step)
step 20990 	 loss = 0.106, train_acc = 1.000 (3.329 sec/step)
step 21000 	 loss = 0.216, train_acc = 0.800 (3.250 sec/step)
step 21010 	 loss = 0.165, train_acc = 0.900 (3.279 sec/step)
step 21020 	 loss = 0.012, train_acc = 1.000 (3.280 sec/step)
step 21030 	 loss = 0.669, train_acc = 0.800 (3.277 sec/step)
step 21040 	 loss = 0.219, train_acc = 0.900 (3.317 sec/step)
step 21050 	 loss = 0.089, train_acc = 1.000 (3.300 sec/step)
step 21060 	 loss = 0.085, train_acc = 1.000 (3.301 sec/step)
step 21070 	 loss = 0.077, train_acc = 0.900 (3.316 sec/step)
step 21080 	 loss = 0.060, train_acc = 1.000 (3.276 sec/step)
step 21090 	 loss = 0.768, train_acc = 0.700 (3.274 sec/step)
step 21100 	 loss = 0.701, train_acc = 0.700 (3.268 sec/step)
step 21110 	 loss = 0.242, train_acc = 0.900 (3.258 sec/step)
step 21120 	 loss = 0.685, train_acc = 0.800 (3.335 sec/step)
step 21130 	 loss = 0.177, train_acc = 1.000 (3.268 sec/step)
step 21140 	 loss = 0.050, train_acc = 1.000 (3.247 sec/step)
step 21150 	 loss = 0.197, train_acc = 0.900 (3.269 sec/step)
step 21160 	 loss = 0.190, train_acc = 1.000 (3.316 sec/step)
step 21170 	 loss = 0.244, train_acc = 0.900 (3.264 sec/step)
step 21180 	 loss = 0.565, train_acc = 0.800 (3.336 sec/step)
step 21190 	 loss = 0.025, train_acc = 1.000 (3.282 sec/step)
step 21200 	 loss = 0.399, train_acc = 0.900 (3.245 sec/step)
step 21210 	 loss = 0.494, train_acc = 0.800 (3.275 sec/step)
step 21220 	 loss = 0.881, train_acc = 0.800 (3.280 sec/step)
step 21230 	 loss = 0.342, train_acc = 0.900 (3.319 sec/step)
step 21240 	 loss = 0.384, train_acc = 0.900 (3.248 sec/step)
step 21250 	 loss = 0.127, train_acc = 1.000 (3.321 sec/step)
step 21260 	 loss = 1.222, train_acc = 0.800 (3.279 sec/step)
step 21270 	 loss = 0.107, train_acc = 1.000 (3.317 sec/step)
step 21280 	 loss = 0.036, train_acc = 1.000 (3.299 sec/step)
step 21290 	 loss = 0.282, train_acc = 0.900 (3.301 sec/step)
step 21300 	 loss = 0.347, train_acc = 0.900 (3.308 sec/step)
step 21310 	 loss = 1.100, train_acc = 0.700 (3.285 sec/step)
step 21320 	 loss = 1.345, train_acc = 0.600 (3.269 sec/step)
step 21330 	 loss = 0.490, train_acc = 0.900 (3.281 sec/step)
step 21340 	 loss = 0.715, train_acc = 0.700 (3.279 sec/step)
step 21350 	 loss = 0.096, train_acc = 1.000 (3.260 sec/step)
step 21360 	 loss = 0.258, train_acc = 0.900 (3.330 sec/step)
step 21370 	 loss = 0.091, train_acc = 0.900 (3.242 sec/step)
step 21380 	 loss = 0.168, train_acc = 1.000 (3.244 sec/step)
step 21390 	 loss = 0.102, train_acc = 1.000 (3.303 sec/step)
step 21400 	 loss = 0.258, train_acc = 0.900 (3.288 sec/step)
step 21410 	 loss = 0.154, train_acc = 1.000 (3.277 sec/step)
step 21420 	 loss = 0.313, train_acc = 0.900 (3.312 sec/step)
step 21430 	 loss = 0.310, train_acc = 0.900 (3.302 sec/step)
step 21440 	 loss = 0.112, train_acc = 0.900 (3.313 sec/step)
step 21450 	 loss = 0.516, train_acc = 0.800 (3.267 sec/step)
step 21460 	 loss = 0.221, train_acc = 0.900 (3.349 sec/step)
step 21470 	 loss = 0.040, train_acc = 1.000 (3.267 sec/step)
step 21480 	 loss = 1.362, train_acc = 0.500 (3.335 sec/step)
step 21490 	 loss = 0.595, train_acc = 0.800 (3.280 sec/step)
step 21500 	 loss = 0.496, train_acc = 0.700 (3.269 sec/step)
step 21510 	 loss = 0.532, train_acc = 0.800 (3.362 sec/step)
step 21520 	 loss = 0.409, train_acc = 0.800 (3.276 sec/step)
step 21530 	 loss = 1.445, train_acc = 0.400 (3.294 sec/step)
step 21540 	 loss = 0.015, train_acc = 1.000 (3.332 sec/step)
step 21550 	 loss = 0.907, train_acc = 0.700 (3.327 sec/step)
step 21560 	 loss = 0.185, train_acc = 1.000 (3.282 sec/step)
step 21570 	 loss = 2.420, train_acc = 0.500 (3.306 sec/step)
step 21580 	 loss = 0.413, train_acc = 0.900 (3.290 sec/step)
step 21590 	 loss = 0.004, train_acc = 1.000 (3.279 sec/step)
step 21600 	 loss = 0.269, train_acc = 0.900 (3.295 sec/step)
step 21610 	 loss = 0.899, train_acc = 0.700 (3.303 sec/step)
step 21620 	 loss = 0.324, train_acc = 0.900 (3.295 sec/step)
step 21630 	 loss = 0.038, train_acc = 1.000 (3.306 sec/step)
step 21640 	 loss = 0.120, train_acc = 0.900 (3.296 sec/step)
step 21650 	 loss = 0.254, train_acc = 0.900 (3.329 sec/step)
step 21660 	 loss = 0.201, train_acc = 0.900 (3.296 sec/step)
step 21670 	 loss = 0.310, train_acc = 0.800 (3.249 sec/step)
step 21680 	 loss = 0.218, train_acc = 0.900 (3.283 sec/step)
step 21690 	 loss = 0.226, train_acc = 1.000 (3.274 sec/step)
step 21700 	 loss = 0.600, train_acc = 0.700 (3.301 sec/step)
step 21710 	 loss = 0.260, train_acc = 0.900 (3.436 sec/step)
step 21720 	 loss = 0.439, train_acc = 0.900 (3.267 sec/step)
step 21730 	 loss = 0.318, train_acc = 1.000 (3.319 sec/step)
step 21740 	 loss = 1.017, train_acc = 0.500 (3.245 sec/step)
step 21750 	 loss = 0.155, train_acc = 0.900 (3.268 sec/step)
step 21760 	 loss = 0.136, train_acc = 0.900 (3.273 sec/step)
step 21770 	 loss = 0.003, train_acc = 1.000 (3.298 sec/step)
step 21780 	 loss = 0.138, train_acc = 1.000 (3.235 sec/step)
step 21790 	 loss = 0.543, train_acc = 0.900 (3.352 sec/step)
step 21800 	 loss = 0.018, train_acc = 1.000 (3.250 sec/step)
step 21810 	 loss = 0.126, train_acc = 1.000 (3.293 sec/step)
step 21820 	 loss = 0.038, train_acc = 1.000 (3.343 sec/step)
step 21830 	 loss = 0.368, train_acc = 0.900 (3.293 sec/step)
step 21840 	 loss = 0.210, train_acc = 0.900 (3.327 sec/step)
step 21850 	 loss = 0.131, train_acc = 1.000 (3.328 sec/step)
step 21860 	 loss = 0.686, train_acc = 0.800 (3.267 sec/step)
step 21870 	 loss = 0.007, train_acc = 1.000 (3.291 sec/step)
step 21880 	 loss = 0.242, train_acc = 0.900 (3.385 sec/step)
step 21890 	 loss = 0.101, train_acc = 1.000 (3.381 sec/step)
step 21900 	 loss = 0.024, train_acc = 1.000 (3.307 sec/step)
step 21910 	 loss = 1.108, train_acc = 0.700 (3.253 sec/step)
step 21920 	 loss = 0.384, train_acc = 0.900 (3.285 sec/step)
step 21930 	 loss = 2.201, train_acc = 0.500 (3.277 sec/step)
step 21940 	 loss = 0.343, train_acc = 0.900 (3.274 sec/step)
step 21950 	 loss = 0.058, train_acc = 1.000 (3.296 sec/step)
step 21960 	 loss = 0.170, train_acc = 0.900 (3.284 sec/step)
step 21970 	 loss = 0.224, train_acc = 0.900 (3.295 sec/step)
step 21980 	 loss = 0.109, train_acc = 1.000 (3.311 sec/step)
step 21990 	 loss = 0.704, train_acc = 0.900 (3.246 sec/step)
step 22000 	 loss = 0.184, train_acc = 1.000 (3.336 sec/step)
step 22010 	 loss = 0.872, train_acc = 0.700 (3.295 sec/step)
step 22020 	 loss = 0.160, train_acc = 0.900 (3.330 sec/step)
step 22030 	 loss = 1.197, train_acc = 0.700 (3.243 sec/step)
step 22040 	 loss = 0.429, train_acc = 0.700 (3.262 sec/step)
step 22050 	 loss = 0.091, train_acc = 1.000 (3.251 sec/step)
step 22060 	 loss = 0.477, train_acc = 0.900 (3.270 sec/step)
step 22070 	 loss = 0.103, train_acc = 1.000 (3.269 sec/step)
step 22080 	 loss = 0.580, train_acc = 0.900 (3.251 sec/step)
step 22090 	 loss = 1.692, train_acc = 0.600 (3.294 sec/step)
step 22100 	 loss = 0.223, train_acc = 0.900 (3.303 sec/step)
step 22110 	 loss = 0.432, train_acc = 0.900 (3.268 sec/step)
step 22120 	 loss = 0.028, train_acc = 1.000 (3.276 sec/step)
step 22130 	 loss = 0.266, train_acc = 0.900 (3.305 sec/step)
step 22140 	 loss = 0.330, train_acc = 0.900 (3.246 sec/step)
step 22150 	 loss = 0.188, train_acc = 1.000 (3.321 sec/step)
step 22160 	 loss = 0.492, train_acc = 0.800 (3.278 sec/step)
step 22170 	 loss = 0.709, train_acc = 0.900 (3.300 sec/step)
step 22180 	 loss = 0.659, train_acc = 0.900 (3.309 sec/step)
step 22190 	 loss = 1.432, train_acc = 0.800 (3.302 sec/step)
step 22200 	 loss = 0.553, train_acc = 0.900 (3.247 sec/step)
step 22210 	 loss = 0.385, train_acc = 0.900 (3.229 sec/step)
step 22220 	 loss = 0.097, train_acc = 0.900 (3.319 sec/step)
step 22230 	 loss = 0.020, train_acc = 1.000 (3.394 sec/step)
step 22240 	 loss = 0.148, train_acc = 1.000 (3.282 sec/step)
step 22250 	 loss = 0.287, train_acc = 0.800 (3.340 sec/step)
step 22260 	 loss = 0.145, train_acc = 0.900 (3.271 sec/step)
step 22270 	 loss = 0.826, train_acc = 0.700 (3.326 sec/step)
step 22280 	 loss = 0.541, train_acc = 0.900 (3.281 sec/step)
step 22290 	 loss = 0.835, train_acc = 0.600 (3.262 sec/step)
step 22300 	 loss = 0.338, train_acc = 0.900 (3.347 sec/step)
step 22310 	 loss = 0.916, train_acc = 0.700 (3.315 sec/step)
step 22320 	 loss = 2.127, train_acc = 0.700 (3.335 sec/step)
step 22330 	 loss = 0.027, train_acc = 1.000 (3.305 sec/step)
step 22340 	 loss = 0.227, train_acc = 0.900 (3.327 sec/step)
step 22350 	 loss = 0.081, train_acc = 1.000 (3.258 sec/step)
step 22360 	 loss = 0.017, train_acc = 1.000 (3.254 sec/step)
step 22370 	 loss = 0.029, train_acc = 1.000 (3.368 sec/step)
step 22380 	 loss = 0.412, train_acc = 0.800 (3.336 sec/step)
step 22390 	 loss = 0.039, train_acc = 1.000 (3.358 sec/step)
step 22400 	 loss = 0.112, train_acc = 1.000 (3.306 sec/step)
step 22410 	 loss = 0.133, train_acc = 0.900 (3.248 sec/step)
step 22420 	 loss = 0.080, train_acc = 1.000 (3.285 sec/step)
step 22430 	 loss = 0.072, train_acc = 1.000 (3.289 sec/step)
step 22440 	 loss = 1.053, train_acc = 0.700 (3.255 sec/step)
step 22450 	 loss = 0.267, train_acc = 0.900 (3.230 sec/step)
step 22460 	 loss = 0.105, train_acc = 1.000 (3.267 sec/step)
step 22470 	 loss = 0.111, train_acc = 1.000 (3.284 sec/step)
step 22480 	 loss = 0.020, train_acc = 1.000 (3.302 sec/step)
step 22490 	 loss = 0.288, train_acc = 0.800 (3.326 sec/step)
step 22500 	 loss = 0.467, train_acc = 0.900 (3.268 sec/step)
step 22510 	 loss = 1.050, train_acc = 0.700 (3.354 sec/step)
step 22520 	 loss = 0.034, train_acc = 1.000 (3.300 sec/step)
step 22530 	 loss = 0.307, train_acc = 0.900 (3.267 sec/step)
step 22540 	 loss = 0.305, train_acc = 0.900 (3.262 sec/step)
step 22550 	 loss = 0.419, train_acc = 0.800 (3.272 sec/step)
step 22560 	 loss = 0.454, train_acc = 0.800 (3.244 sec/step)
step 22570 	 loss = 1.469, train_acc = 0.700 (3.284 sec/step)
step 22580 	 loss = 0.832, train_acc = 0.900 (3.290 sec/step)
step 22590 	 loss = 1.046, train_acc = 0.500 (3.297 sec/step)
step 22600 	 loss = 0.097, train_acc = 1.000 (3.290 sec/step)
step 22610 	 loss = 1.549, train_acc = 0.700 (3.269 sec/step)
step 22620 	 loss = 0.109, train_acc = 1.000 (3.269 sec/step)
step 22630 	 loss = 0.228, train_acc = 0.900 (3.251 sec/step)
step 22640 	 loss = 1.138, train_acc = 0.800 (3.289 sec/step)
step 22650 	 loss = 1.006, train_acc = 0.800 (3.360 sec/step)
step 22660 	 loss = 0.043, train_acc = 1.000 (3.383 sec/step)
step 22670 	 loss = 0.137, train_acc = 0.900 (3.269 sec/step)
step 22680 	 loss = 0.184, train_acc = 0.900 (3.334 sec/step)
step 22690 	 loss = 0.056, train_acc = 1.000 (3.257 sec/step)
step 22700 	 loss = 0.426, train_acc = 0.800 (3.258 sec/step)
step 22710 	 loss = 0.336, train_acc = 0.900 (3.270 sec/step)
step 22720 	 loss = 1.028, train_acc = 0.600 (3.338 sec/step)
step 22730 	 loss = 0.599, train_acc = 0.800 (3.293 sec/step)
step 22740 	 loss = 0.647, train_acc = 0.700 (3.316 sec/step)
step 22750 	 loss = 0.072, train_acc = 1.000 (3.298 sec/step)
step 22760 	 loss = 2.704, train_acc = 0.300 (3.348 sec/step)
step 22770 	 loss = 0.233, train_acc = 0.900 (3.286 sec/step)
step 22780 	 loss = 0.103, train_acc = 0.900 (3.252 sec/step)
step 22790 	 loss = 0.146, train_acc = 0.900 (3.295 sec/step)
VALIDATION 	 acc = 0.540 (3.647 sec)
New Best Accuracy 0.540 > Old Best 0.526.  Saving...
The checkpoint has been created.
step 22800 	 loss = 0.251, train_acc = 0.900 (3.321 sec/step)
step 22810 	 loss = 0.733, train_acc = 0.800 (3.229 sec/step)
step 22820 	 loss = 0.486, train_acc = 0.800 (3.336 sec/step)
step 22830 	 loss = 0.435, train_acc = 0.900 (3.301 sec/step)
step 22840 	 loss = 1.008, train_acc = 0.700 (3.318 sec/step)
step 22850 	 loss = 0.425, train_acc = 0.900 (3.277 sec/step)
step 22860 	 loss = 0.028, train_acc = 1.000 (3.303 sec/step)
step 22870 	 loss = 0.017, train_acc = 1.000 (3.251 sec/step)
step 22880 	 loss = 0.564, train_acc = 0.900 (3.347 sec/step)
step 22890 	 loss = 1.571, train_acc = 0.600 (3.304 sec/step)
step 22900 	 loss = 0.395, train_acc = 0.900 (3.315 sec/step)
step 22910 	 loss = 0.222, train_acc = 0.900 (3.320 sec/step)
step 22920 	 loss = 0.325, train_acc = 0.900 (3.279 sec/step)
step 22930 	 loss = 0.254, train_acc = 0.900 (3.283 sec/step)
step 22940 	 loss = 0.839, train_acc = 0.700 (3.287 sec/step)
step 22950 	 loss = 2.116, train_acc = 0.600 (3.328 sec/step)
step 22960 	 loss = 0.028, train_acc = 1.000 (3.372 sec/step)
step 22970 	 loss = 1.310, train_acc = 0.900 (3.285 sec/step)
step 22980 	 loss = 0.030, train_acc = 1.000 (3.275 sec/step)
step 22990 	 loss = 0.209, train_acc = 0.900 (3.322 sec/step)
step 23000 	 loss = 0.258, train_acc = 0.900 (3.312 sec/step)
step 23010 	 loss = 0.047, train_acc = 1.000 (3.311 sec/step)
step 23020 	 loss = 1.297, train_acc = 0.600 (3.322 sec/step)
step 23030 	 loss = 0.206, train_acc = 0.900 (3.300 sec/step)
step 23040 	 loss = 0.470, train_acc = 0.700 (3.289 sec/step)
step 23050 	 loss = 0.708, train_acc = 0.700 (3.282 sec/step)
step 23060 	 loss = 0.138, train_acc = 1.000 (3.277 sec/step)
step 23070 	 loss = 0.805, train_acc = 0.700 (3.307 sec/step)
step 23080 	 loss = 0.292, train_acc = 0.900 (3.377 sec/step)
step 23090 	 loss = 0.901, train_acc = 0.600 (3.346 sec/step)
step 23100 	 loss = 0.206, train_acc = 0.900 (3.315 sec/step)
step 23110 	 loss = 0.314, train_acc = 0.900 (3.290 sec/step)
step 23120 	 loss = 0.139, train_acc = 1.000 (3.272 sec/step)
step 23130 	 loss = 0.031, train_acc = 1.000 (3.317 sec/step)
step 23140 	 loss = 0.166, train_acc = 0.900 (3.326 sec/step)
step 23150 	 loss = 0.056, train_acc = 1.000 (3.279 sec/step)
step 23160 	 loss = 0.082, train_acc = 1.000 (3.269 sec/step)
step 23170 	 loss = 0.614, train_acc = 0.900 (3.274 sec/step)
step 23180 	 loss = 0.741, train_acc = 0.900 (3.285 sec/step)
step 23190 	 loss = 0.034, train_acc = 1.000 (3.290 sec/step)
step 23200 	 loss = 0.834, train_acc = 0.800 (3.325 sec/step)
step 23210 	 loss = 0.441, train_acc = 0.900 (3.302 sec/step)
step 23220 	 loss = 0.469, train_acc = 0.900 (3.299 sec/step)
step 23230 	 loss = 0.210, train_acc = 0.900 (3.275 sec/step)
step 23240 	 loss = 0.330, train_acc = 0.900 (3.325 sec/step)
step 23250 	 loss = 0.777, train_acc = 0.700 (3.315 sec/step)
step 23260 	 loss = 0.085, train_acc = 1.000 (3.329 sec/step)
step 23270 	 loss = 0.010, train_acc = 1.000 (3.285 sec/step)
step 23280 	 loss = 0.574, train_acc = 0.700 (3.289 sec/step)
step 23290 	 loss = 0.764, train_acc = 0.800 (3.256 sec/step)
step 23300 	 loss = 1.369, train_acc = 0.700 (3.311 sec/step)
step 23310 	 loss = 0.570, train_acc = 0.700 (3.278 sec/step)
step 23320 	 loss = 0.245, train_acc = 0.800 (3.272 sec/step)
step 23330 	 loss = 0.419, train_acc = 0.800 (3.320 sec/step)
step 23340 	 loss = 0.004, train_acc = 1.000 (3.322 sec/step)
step 23350 	 loss = 0.082, train_acc = 1.000 (3.313 sec/step)
step 23360 	 loss = 0.034, train_acc = 1.000 (3.329 sec/step)
step 23370 	 loss = 0.287, train_acc = 0.900 (3.330 sec/step)
step 23380 	 loss = 0.622, train_acc = 0.800 (3.320 sec/step)
step 23390 	 loss = 0.938, train_acc = 0.700 (3.295 sec/step)
step 23400 	 loss = 0.234, train_acc = 0.900 (3.311 sec/step)
step 23410 	 loss = 0.302, train_acc = 0.900 (3.266 sec/step)
step 23420 	 loss = 0.027, train_acc = 1.000 (3.272 sec/step)
step 23430 	 loss = 0.024, train_acc = 1.000 (3.309 sec/step)
step 23440 	 loss = 0.026, train_acc = 1.000 (3.301 sec/step)
step 23450 	 loss = 0.025, train_acc = 1.000 (3.285 sec/step)
step 23460 	 loss = 0.058, train_acc = 1.000 (3.320 sec/step)
step 23470 	 loss = 0.052, train_acc = 1.000 (3.264 sec/step)
step 23480 	 loss = 0.503, train_acc = 0.900 (3.339 sec/step)
step 23490 	 loss = 0.404, train_acc = 0.900 (3.237 sec/step)
step 23500 	 loss = 0.604, train_acc = 0.900 (3.389 sec/step)
step 23510 	 loss = 0.049, train_acc = 1.000 (3.284 sec/step)
step 23520 	 loss = 0.121, train_acc = 1.000 (3.331 sec/step)
step 23530 	 loss = 1.392, train_acc = 0.600 (3.295 sec/step)
step 23540 	 loss = 0.228, train_acc = 0.900 (3.313 sec/step)
step 23550 	 loss = 0.669, train_acc = 0.900 (3.295 sec/step)
step 23560 	 loss = 0.457, train_acc = 0.900 (3.287 sec/step)
step 23570 	 loss = 0.199, train_acc = 1.000 (3.295 sec/step)
step 23580 	 loss = 0.166, train_acc = 0.900 (3.296 sec/step)
step 23590 	 loss = 0.229, train_acc = 1.000 (3.266 sec/step)
step 23600 	 loss = 0.191, train_acc = 0.900 (3.282 sec/step)
step 23610 	 loss = 0.081, train_acc = 1.000 (3.294 sec/step)
step 23620 	 loss = 0.067, train_acc = 1.000 (3.320 sec/step)
step 23630 	 loss = 0.456, train_acc = 0.800 (3.288 sec/step)
step 23640 	 loss = 0.578, train_acc = 0.900 (3.286 sec/step)
step 23650 	 loss = 0.231, train_acc = 0.900 (3.310 sec/step)
step 23660 	 loss = 0.520, train_acc = 0.600 (3.267 sec/step)
step 23670 	 loss = 0.281, train_acc = 0.900 (3.343 sec/step)
step 23680 	 loss = 0.133, train_acc = 0.900 (3.293 sec/step)
step 23690 	 loss = 0.116, train_acc = 0.900 (3.349 sec/step)
step 23700 	 loss = 0.094, train_acc = 1.000 (3.263 sec/step)
step 23710 	 loss = 0.678, train_acc = 0.800 (3.289 sec/step)
step 23720 	 loss = 0.869, train_acc = 0.900 (3.301 sec/step)
step 23730 	 loss = 0.060, train_acc = 1.000 (3.256 sec/step)
step 23740 	 loss = 0.052, train_acc = 1.000 (3.290 sec/step)
step 23750 	 loss = 0.147, train_acc = 1.000 (3.304 sec/step)
step 23760 	 loss = 0.291, train_acc = 0.900 (3.277 sec/step)
step 23770 	 loss = 0.179, train_acc = 0.900 (3.297 sec/step)
step 23780 	 loss = 0.047, train_acc = 1.000 (3.297 sec/step)
step 23790 	 loss = 0.453, train_acc = 0.900 (3.271 sec/step)
step 23800 	 loss = 0.137, train_acc = 1.000 (3.289 sec/step)
step 23810 	 loss = 0.204, train_acc = 0.900 (3.312 sec/step)
step 23820 	 loss = 0.100, train_acc = 1.000 (3.297 sec/step)
step 23830 	 loss = 0.624, train_acc = 0.800 (3.286 sec/step)
step 23840 	 loss = 0.345, train_acc = 0.900 (3.290 sec/step)
step 23850 	 loss = 0.121, train_acc = 0.900 (3.291 sec/step)
step 23860 	 loss = 0.043, train_acc = 1.000 (3.318 sec/step)
step 23870 	 loss = 1.102, train_acc = 0.800 (3.332 sec/step)
step 23880 	 loss = 0.438, train_acc = 0.800 (3.300 sec/step)
step 23890 	 loss = 0.901, train_acc = 0.700 (3.275 sec/step)
step 23900 	 loss = 0.563, train_acc = 0.800 (3.272 sec/step)
step 23910 	 loss = 1.015, train_acc = 0.800 (3.295 sec/step)
step 23920 	 loss = 0.006, train_acc = 1.000 (3.318 sec/step)
step 23930 	 loss = 0.256, train_acc = 0.900 (3.306 sec/step)
step 23940 	 loss = 0.147, train_acc = 1.000 (3.306 sec/step)
step 23950 	 loss = 0.055, train_acc = 1.000 (3.350 sec/step)
step 23960 	 loss = 0.010, train_acc = 1.000 (3.280 sec/step)
step 23970 	 loss = 0.137, train_acc = 1.000 (3.401 sec/step)
step 23980 	 loss = 0.097, train_acc = 1.000 (3.252 sec/step)
step 23990 	 loss = 1.054, train_acc = 0.700 (3.328 sec/step)
step 24000 	 loss = 0.159, train_acc = 1.000 (3.297 sec/step)
step 24010 	 loss = 0.028, train_acc = 1.000 (3.310 sec/step)
step 24020 	 loss = 0.005, train_acc = 1.000 (3.289 sec/step)
step 24030 	 loss = 0.220, train_acc = 0.900 (3.281 sec/step)
step 24040 	 loss = 0.171, train_acc = 0.900 (3.310 sec/step)
step 24050 	 loss = 0.182, train_acc = 0.900 (3.350 sec/step)
step 24060 	 loss = 0.084, train_acc = 1.000 (3.273 sec/step)
step 24070 	 loss = 1.210, train_acc = 0.600 (3.339 sec/step)
step 24080 	 loss = 0.000, train_acc = 1.000 (3.325 sec/step)
step 24090 	 loss = 0.672, train_acc = 0.800 (3.298 sec/step)
step 24100 	 loss = 0.047, train_acc = 1.000 (3.291 sec/step)
step 24110 	 loss = 0.318, train_acc = 0.900 (3.344 sec/step)
step 24120 	 loss = 0.051, train_acc = 1.000 (3.263 sec/step)
step 24130 	 loss = 0.261, train_acc = 0.900 (3.284 sec/step)
step 24140 	 loss = 0.371, train_acc = 0.900 (3.304 sec/step)
step 24150 	 loss = 0.749, train_acc = 0.900 (3.276 sec/step)
step 24160 	 loss = 0.055, train_acc = 1.000 (3.279 sec/step)
step 24170 	 loss = 0.047, train_acc = 1.000 (3.285 sec/step)
step 24180 	 loss = 0.904, train_acc = 0.800 (3.270 sec/step)
step 24190 	 loss = 1.280, train_acc = 0.600 (3.289 sec/step)
step 24200 	 loss = 0.048, train_acc = 1.000 (3.273 sec/step)
step 24210 	 loss = 0.686, train_acc = 0.900 (3.296 sec/step)
step 24220 	 loss = 0.460, train_acc = 0.800 (3.280 sec/step)
step 24230 	 loss = 0.021, train_acc = 1.000 (3.291 sec/step)
step 24240 	 loss = 0.006, train_acc = 1.000 (3.393 sec/step)
step 24250 	 loss = 0.665, train_acc = 0.700 (3.316 sec/step)
step 24260 	 loss = 0.059, train_acc = 1.000 (3.310 sec/step)
step 24270 	 loss = 0.275, train_acc = 0.800 (3.252 sec/step)
step 24280 	 loss = 1.406, train_acc = 0.500 (3.319 sec/step)
step 24290 	 loss = 0.230, train_acc = 0.900 (3.310 sec/step)
step 24300 	 loss = 0.187, train_acc = 0.900 (3.296 sec/step)
step 24310 	 loss = 0.125, train_acc = 1.000 (3.295 sec/step)
step 24320 	 loss = 0.114, train_acc = 1.000 (3.307 sec/step)
step 24330 	 loss = 1.545, train_acc = 0.600 (3.344 sec/step)
step 24340 	 loss = 0.356, train_acc = 0.900 (3.305 sec/step)
step 24350 	 loss = 0.258, train_acc = 0.900 (3.332 sec/step)
step 24360 	 loss = 0.563, train_acc = 0.800 (3.310 sec/step)
step 24370 	 loss = 0.028, train_acc = 1.000 (3.298 sec/step)
step 24380 	 loss = 0.274, train_acc = 0.900 (3.307 sec/step)
step 24390 	 loss = 0.073, train_acc = 1.000 (3.316 sec/step)
step 24400 	 loss = 0.217, train_acc = 1.000 (3.310 sec/step)
step 24410 	 loss = 0.628, train_acc = 0.800 (3.303 sec/step)
step 24420 	 loss = 0.088, train_acc = 1.000 (3.351 sec/step)
step 24430 	 loss = 0.074, train_acc = 1.000 (3.299 sec/step)
step 24440 	 loss = 0.099, train_acc = 1.000 (3.304 sec/step)
step 24450 	 loss = 0.117, train_acc = 1.000 (3.297 sec/step)
step 24460 	 loss = 0.776, train_acc = 0.700 (3.326 sec/step)
step 24470 	 loss = 0.233, train_acc = 0.900 (3.277 sec/step)
step 24480 	 loss = 0.016, train_acc = 1.000 (3.298 sec/step)
step 24490 	 loss = 0.231, train_acc = 0.900 (3.269 sec/step)
step 24500 	 loss = 0.020, train_acc = 1.000 (3.266 sec/step)
step 24510 	 loss = 0.014, train_acc = 1.000 (3.284 sec/step)
step 24520 	 loss = 0.407, train_acc = 0.800 (3.276 sec/step)
step 24530 	 loss = 0.591, train_acc = 0.800 (3.297 sec/step)
step 24540 	 loss = 0.012, train_acc = 1.000 (3.315 sec/step)
step 24550 	 loss = 0.464, train_acc = 0.800 (3.312 sec/step)
step 24560 	 loss = 0.635, train_acc = 0.900 (3.288 sec/step)
step 24570 	 loss = 0.000, train_acc = 1.000 (3.332 sec/step)
step 24580 	 loss = 0.267, train_acc = 0.800 (3.284 sec/step)
step 24590 	 loss = 0.675, train_acc = 0.800 (3.251 sec/step)
step 24600 	 loss = 0.030, train_acc = 1.000 (3.270 sec/step)
step 24610 	 loss = 0.271, train_acc = 0.800 (3.318 sec/step)
step 24620 	 loss = 0.012, train_acc = 1.000 (3.289 sec/step)
step 24630 	 loss = 0.074, train_acc = 1.000 (3.269 sec/step)
step 24640 	 loss = 0.503, train_acc = 0.900 (3.308 sec/step)
step 24650 	 loss = 0.031, train_acc = 1.000 (3.318 sec/step)
step 24660 	 loss = 1.496, train_acc = 0.700 (3.293 sec/step)
step 24670 	 loss = 0.231, train_acc = 0.900 (3.309 sec/step)
step 24680 	 loss = 0.018, train_acc = 1.000 (3.320 sec/step)
step 24690 	 loss = 0.050, train_acc = 1.000 (3.279 sec/step)
VALIDATION 	 acc = 0.561 (3.652 sec)
New Best Accuracy 0.561 > Old Best 0.540.  Saving...
The checkpoint has been created.
step 24700 	 loss = 0.408, train_acc = 0.900 (3.290 sec/step)
step 24710 	 loss = 0.298, train_acc = 0.800 (3.269 sec/step)
step 24720 	 loss = 0.994, train_acc = 0.800 (3.296 sec/step)
step 24730 	 loss = 0.826, train_acc = 0.700 (3.357 sec/step)
step 24740 	 loss = 0.626, train_acc = 0.800 (3.331 sec/step)
step 24750 	 loss = 4.854, train_acc = 0.300 (3.319 sec/step)
step 24760 	 loss = 0.070, train_acc = 1.000 (3.268 sec/step)
step 24770 	 loss = 0.263, train_acc = 1.000 (3.262 sec/step)
step 24780 	 loss = 0.022, train_acc = 1.000 (3.313 sec/step)
step 24790 	 loss = 0.206, train_acc = 1.000 (3.319 sec/step)
step 24800 	 loss = 0.395, train_acc = 0.900 (3.249 sec/step)
step 24810 	 loss = 0.055, train_acc = 1.000 (3.284 sec/step)
step 24820 	 loss = 0.427, train_acc = 0.800 (3.262 sec/step)
step 24830 	 loss = 0.052, train_acc = 1.000 (3.329 sec/step)
step 24840 	 loss = 0.076, train_acc = 1.000 (3.393 sec/step)
step 24850 	 loss = 0.137, train_acc = 1.000 (3.349 sec/step)
step 24860 	 loss = 0.406, train_acc = 0.900 (3.338 sec/step)
step 24870 	 loss = 0.037, train_acc = 1.000 (3.290 sec/step)
step 24880 	 loss = 1.634, train_acc = 0.700 (3.332 sec/step)
step 24890 	 loss = 0.227, train_acc = 0.900 (3.381 sec/step)
step 24900 	 loss = 0.358, train_acc = 0.900 (3.310 sec/step)
step 24910 	 loss = 0.034, train_acc = 1.000 (3.273 sec/step)
step 24920 	 loss = 0.377, train_acc = 0.800 (3.317 sec/step)
step 24930 	 loss = 0.512, train_acc = 0.900 (3.278 sec/step)
step 24940 	 loss = 0.008, train_acc = 1.000 (3.309 sec/step)
step 24950 	 loss = 0.068, train_acc = 1.000 (3.327 sec/step)
step 24960 	 loss = 0.175, train_acc = 0.900 (3.249 sec/step)
step 24970 	 loss = 1.015, train_acc = 0.800 (3.273 sec/step)
step 24980 	 loss = 0.017, train_acc = 1.000 (3.347 sec/step)
step 24990 	 loss = 0.107, train_acc = 1.000 (3.252 sec/step)
step 25000 	 loss = 0.441, train_acc = 0.900 (3.323 sec/step)
step 25010 	 loss = 0.835, train_acc = 0.800 (3.322 sec/step)
step 25020 	 loss = 0.182, train_acc = 0.900 (3.288 sec/step)
step 25030 	 loss = 0.053, train_acc = 1.000 (3.275 sec/step)
step 25040 	 loss = 0.074, train_acc = 1.000 (3.350 sec/step)
step 25050 	 loss = 0.394, train_acc = 0.900 (3.283 sec/step)
step 25060 	 loss = 0.225, train_acc = 0.800 (3.310 sec/step)
step 25070 	 loss = 0.532, train_acc = 0.900 (3.265 sec/step)
step 25080 	 loss = 0.001, train_acc = 1.000 (3.310 sec/step)
step 25090 	 loss = 2.732, train_acc = 0.900 (3.280 sec/step)
step 25100 	 loss = 0.803, train_acc = 0.800 (3.315 sec/step)
step 25110 	 loss = 0.824, train_acc = 0.800 (3.285 sec/step)
step 25120 	 loss = 0.668, train_acc = 0.800 (3.325 sec/step)
step 25130 	 loss = 0.129, train_acc = 0.900 (3.252 sec/step)
step 25140 	 loss = 0.363, train_acc = 0.900 (3.285 sec/step)
step 25150 	 loss = 0.558, train_acc = 0.900 (3.278 sec/step)
step 25160 	 loss = 0.017, train_acc = 1.000 (3.265 sec/step)
step 25170 	 loss = 0.523, train_acc = 0.900 (3.380 sec/step)
step 25180 	 loss = 0.499, train_acc = 0.900 (3.304 sec/step)
step 25190 	 loss = 0.432, train_acc = 0.800 (3.289 sec/step)
step 25200 	 loss = 0.009, train_acc = 1.000 (3.272 sec/step)
step 25210 	 loss = 0.006, train_acc = 1.000 (3.267 sec/step)
step 25220 	 loss = 0.060, train_acc = 1.000 (3.348 sec/step)
step 25230 	 loss = 0.216, train_acc = 0.900 (3.283 sec/step)
step 25240 	 loss = 0.721, train_acc = 0.800 (3.283 sec/step)
step 25250 	 loss = 0.276, train_acc = 0.900 (3.309 sec/step)
step 25260 	 loss = 0.290, train_acc = 0.800 (3.265 sec/step)
step 25270 	 loss = 0.183, train_acc = 1.000 (3.256 sec/step)
step 25280 	 loss = 0.075, train_acc = 1.000 (3.310 sec/step)
step 25290 	 loss = 0.077, train_acc = 1.000 (3.297 sec/step)
step 25300 	 loss = 0.076, train_acc = 1.000 (3.290 sec/step)
step 25310 	 loss = 0.150, train_acc = 0.900 (3.300 sec/step)
step 25320 	 loss = 0.847, train_acc = 0.700 (3.294 sec/step)
step 25330 	 loss = 0.448, train_acc = 0.800 (3.356 sec/step)
step 25340 	 loss = 0.152, train_acc = 0.900 (3.294 sec/step)
step 25350 	 loss = 0.227, train_acc = 0.900 (3.268 sec/step)
step 25360 	 loss = 0.701, train_acc = 0.700 (3.306 sec/step)
step 25370 	 loss = 0.134, train_acc = 1.000 (3.275 sec/step)
step 25380 	 loss = 0.001, train_acc = 1.000 (3.328 sec/step)
step 25390 	 loss = 0.003, train_acc = 1.000 (3.246 sec/step)
step 25400 	 loss = 0.777, train_acc = 0.800 (3.274 sec/step)
step 25410 	 loss = 0.294, train_acc = 0.900 (3.295 sec/step)
step 25420 	 loss = 0.240, train_acc = 0.900 (3.258 sec/step)
step 25430 	 loss = 0.350, train_acc = 0.900 (3.311 sec/step)
step 25440 	 loss = 0.335, train_acc = 0.800 (3.257 sec/step)
step 25450 	 loss = 0.058, train_acc = 1.000 (3.327 sec/step)
step 25460 	 loss = 0.191, train_acc = 0.900 (3.243 sec/step)
step 25470 	 loss = 0.014, train_acc = 1.000 (3.305 sec/step)
step 25480 	 loss = 0.147, train_acc = 0.900 (3.310 sec/step)
step 25490 	 loss = 0.038, train_acc = 1.000 (3.363 sec/step)
step 25500 	 loss = 0.147, train_acc = 0.900 (3.319 sec/step)
step 25510 	 loss = 0.113, train_acc = 0.900 (3.273 sec/step)
step 25520 	 loss = 0.070, train_acc = 1.000 (3.303 sec/step)
step 25530 	 loss = 0.003, train_acc = 1.000 (3.257 sec/step)
step 25540 	 loss = 0.297, train_acc = 0.800 (3.258 sec/step)
step 25550 	 loss = 0.836, train_acc = 0.700 (3.301 sec/step)
step 25560 	 loss = 0.061, train_acc = 1.000 (3.258 sec/step)
step 25570 	 loss = 0.090, train_acc = 1.000 (3.319 sec/step)
step 25580 	 loss = 0.009, train_acc = 1.000 (3.286 sec/step)
step 25590 	 loss = 0.022, train_acc = 1.000 (3.334 sec/step)
step 25600 	 loss = 0.631, train_acc = 0.800 (3.332 sec/step)
step 25610 	 loss = 0.150, train_acc = 0.900 (3.315 sec/step)
step 25620 	 loss = 0.005, train_acc = 1.000 (3.290 sec/step)
step 25630 	 loss = 0.068, train_acc = 1.000 (3.305 sec/step)
step 25640 	 loss = 0.838, train_acc = 0.800 (3.322 sec/step)
step 25650 	 loss = 0.010, train_acc = 1.000 (3.289 sec/step)
step 25660 	 loss = 0.410, train_acc = 0.900 (3.270 sec/step)
step 25670 	 loss = 3.790, train_acc = 0.700 (3.372 sec/step)
step 25680 	 loss = 0.010, train_acc = 1.000 (3.286 sec/step)
step 25690 	 loss = 0.669, train_acc = 0.800 (3.358 sec/step)
step 25700 	 loss = 0.155, train_acc = 1.000 (3.266 sec/step)
step 25710 	 loss = 0.542, train_acc = 0.800 (3.366 sec/step)
step 25720 	 loss = 0.103, train_acc = 1.000 (3.314 sec/step)
step 25730 	 loss = 0.191, train_acc = 0.900 (3.337 sec/step)
step 25740 	 loss = 1.357, train_acc = 0.600 (3.286 sec/step)
step 25750 	 loss = 0.577, train_acc = 0.800 (3.302 sec/step)
step 25760 	 loss = 0.616, train_acc = 0.800 (3.286 sec/step)
step 25770 	 loss = 0.115, train_acc = 0.900 (3.262 sec/step)
step 25780 	 loss = 0.490, train_acc = 0.800 (3.280 sec/step)
step 25790 	 loss = 0.194, train_acc = 0.800 (3.291 sec/step)
step 25800 	 loss = 1.110, train_acc = 0.800 (3.267 sec/step)
step 25810 	 loss = 0.530, train_acc = 0.900 (3.325 sec/step)
step 25820 	 loss = 0.479, train_acc = 0.900 (3.266 sec/step)
step 25830 	 loss = 1.104, train_acc = 0.700 (3.289 sec/step)
step 25840 	 loss = 0.920, train_acc = 0.800 (3.280 sec/step)
step 25850 	 loss = 0.268, train_acc = 0.900 (3.301 sec/step)
step 25860 	 loss = 0.131, train_acc = 0.900 (3.338 sec/step)
step 25870 	 loss = 0.532, train_acc = 0.800 (3.318 sec/step)
step 25880 	 loss = 0.188, train_acc = 0.900 (3.317 sec/step)
step 25890 	 loss = 0.338, train_acc = 0.900 (3.273 sec/step)
step 25900 	 loss = 0.216, train_acc = 0.900 (3.315 sec/step)
step 25910 	 loss = 0.366, train_acc = 0.800 (3.284 sec/step)
step 25920 	 loss = 0.265, train_acc = 0.800 (3.303 sec/step)
step 25930 	 loss = 0.502, train_acc = 0.900 (3.291 sec/step)
step 25940 	 loss = 0.748, train_acc = 0.900 (3.349 sec/step)
step 25950 	 loss = 0.130, train_acc = 1.000 (3.321 sec/step)
step 25960 	 loss = 0.306, train_acc = 0.900 (3.277 sec/step)
step 25970 	 loss = 0.079, train_acc = 1.000 (3.302 sec/step)
step 25980 	 loss = 0.185, train_acc = 0.900 (3.269 sec/step)
step 25990 	 loss = 0.001, train_acc = 1.000 (3.304 sec/step)
step 26000 	 loss = 0.000, train_acc = 1.000 (3.294 sec/step)
step 26010 	 loss = 0.052, train_acc = 1.000 (3.314 sec/step)
step 26020 	 loss = 0.036, train_acc = 1.000 (3.271 sec/step)
step 26030 	 loss = 0.123, train_acc = 1.000 (3.334 sec/step)
step 26040 	 loss = 0.393, train_acc = 0.800 (3.397 sec/step)
step 26050 	 loss = 0.083, train_acc = 1.000 (3.346 sec/step)
step 26060 	 loss = 1.075, train_acc = 0.600 (3.310 sec/step)
step 26070 	 loss = 0.015, train_acc = 1.000 (3.325 sec/step)
step 26080 	 loss = 0.314, train_acc = 0.900 (3.337 sec/step)
step 26090 	 loss = 0.459, train_acc = 0.900 (3.264 sec/step)
step 26100 	 loss = 0.051, train_acc = 1.000 (3.302 sec/step)
step 26110 	 loss = 0.053, train_acc = 1.000 (3.392 sec/step)
step 26120 	 loss = 0.046, train_acc = 1.000 (3.317 sec/step)
step 26130 	 loss = 0.000, train_acc = 1.000 (3.372 sec/step)
step 26140 	 loss = 0.619, train_acc = 0.700 (3.339 sec/step)
step 26150 	 loss = 0.253, train_acc = 0.900 (3.318 sec/step)
step 26160 	 loss = 0.201, train_acc = 0.900 (3.256 sec/step)
step 26170 	 loss = 1.123, train_acc = 0.700 (3.298 sec/step)
step 26180 	 loss = 0.796, train_acc = 0.900 (3.322 sec/step)
step 26190 	 loss = 0.023, train_acc = 1.000 (3.286 sec/step)
step 26200 	 loss = 0.038, train_acc = 1.000 (3.267 sec/step)
step 26210 	 loss = 0.109, train_acc = 1.000 (3.281 sec/step)
step 26220 	 loss = 0.011, train_acc = 1.000 (3.369 sec/step)
step 26230 	 loss = 0.007, train_acc = 1.000 (3.333 sec/step)
step 26240 	 loss = 0.274, train_acc = 0.900 (3.278 sec/step)
step 26250 	 loss = 0.111, train_acc = 0.900 (3.357 sec/step)
step 26260 	 loss = 0.050, train_acc = 1.000 (3.302 sec/step)
step 26270 	 loss = 0.886, train_acc = 0.600 (3.306 sec/step)
step 26280 	 loss = 0.009, train_acc = 1.000 (3.287 sec/step)
step 26290 	 loss = 0.021, train_acc = 1.000 (3.314 sec/step)
step 26300 	 loss = 0.536, train_acc = 0.900 (3.272 sec/step)
step 26310 	 loss = 0.213, train_acc = 1.000 (3.312 sec/step)
step 26320 	 loss = 0.217, train_acc = 0.900 (3.272 sec/step)
step 26330 	 loss = 0.281, train_acc = 0.900 (3.288 sec/step)
step 26340 	 loss = 0.610, train_acc = 0.900 (3.315 sec/step)
step 26350 	 loss = 0.512, train_acc = 0.900 (3.277 sec/step)
step 26360 	 loss = 0.069, train_acc = 1.000 (3.369 sec/step)
step 26370 	 loss = 0.189, train_acc = 0.900 (3.316 sec/step)
step 26380 	 loss = 0.234, train_acc = 0.900 (3.252 sec/step)
step 26390 	 loss = 0.270, train_acc = 0.900 (3.297 sec/step)
step 26400 	 loss = 0.084, train_acc = 1.000 (3.361 sec/step)
step 26410 	 loss = 0.535, train_acc = 0.900 (3.313 sec/step)
step 26420 	 loss = 0.303, train_acc = 0.900 (3.307 sec/step)
step 26430 	 loss = 0.025, train_acc = 1.000 (3.279 sec/step)
step 26440 	 loss = 0.019, train_acc = 1.000 (3.333 sec/step)
step 26450 	 loss = 0.685, train_acc = 0.800 (3.356 sec/step)
step 26460 	 loss = 0.200, train_acc = 0.800 (3.300 sec/step)
step 26470 	 loss = 0.050, train_acc = 1.000 (3.277 sec/step)
step 26480 	 loss = 0.282, train_acc = 0.900 (3.296 sec/step)
step 26490 	 loss = 0.079, train_acc = 1.000 (3.278 sec/step)
step 26500 	 loss = 0.005, train_acc = 1.000 (3.319 sec/step)
step 26510 	 loss = 0.137, train_acc = 1.000 (3.283 sec/step)
step 26520 	 loss = 0.169, train_acc = 0.900 (3.354 sec/step)
step 26530 	 loss = 0.406, train_acc = 0.900 (3.261 sec/step)
step 26540 	 loss = 1.469, train_acc = 0.600 (3.319 sec/step)
step 26550 	 loss = 0.111, train_acc = 1.000 (3.349 sec/step)
step 26560 	 loss = 0.845, train_acc = 0.800 (3.296 sec/step)
step 26570 	 loss = 0.115, train_acc = 1.000 (3.317 sec/step)
step 26580 	 loss = 0.174, train_acc = 1.000 (3.312 sec/step)
step 26590 	 loss = 0.034, train_acc = 1.000 (3.292 sec/step)
VALIDATION 	 acc = 0.534 (3.638 sec)
step 26600 	 loss = 0.040, train_acc = 1.000 (3.245 sec/step)
step 26610 	 loss = 0.282, train_acc = 0.900 (3.273 sec/step)
step 26620 	 loss = 0.033, train_acc = 1.000 (3.291 sec/step)
step 26630 	 loss = 0.116, train_acc = 0.900 (3.278 sec/step)
step 26640 	 loss = 0.108, train_acc = 0.900 (3.314 sec/step)
step 26650 	 loss = 0.034, train_acc = 1.000 (3.387 sec/step)
step 26660 	 loss = 0.027, train_acc = 1.000 (3.311 sec/step)
step 26670 	 loss = 0.002, train_acc = 1.000 (3.276 sec/step)
step 26680 	 loss = 0.851, train_acc = 0.700 (3.343 sec/step)
step 26690 	 loss = 0.077, train_acc = 1.000 (3.271 sec/step)
step 26700 	 loss = 0.311, train_acc = 0.900 (3.302 sec/step)
step 26710 	 loss = 0.064, train_acc = 1.000 (3.317 sec/step)
step 26720 	 loss = 0.684, train_acc = 0.700 (3.282 sec/step)
step 26730 	 loss = 0.770, train_acc = 0.900 (3.421 sec/step)
step 26740 	 loss = 0.012, train_acc = 1.000 (3.274 sec/step)
step 26750 	 loss = 0.018, train_acc = 1.000 (3.277 sec/step)
step 26760 	 loss = 1.500, train_acc = 0.800 (3.266 sec/step)
step 26770 	 loss = 0.220, train_acc = 0.900 (3.270 sec/step)
step 26780 	 loss = 0.228, train_acc = 0.900 (3.282 sec/step)
step 26790 	 loss = 0.634, train_acc = 0.800 (3.341 sec/step)
step 26800 	 loss = 0.364, train_acc = 0.800 (3.305 sec/step)
step 26810 	 loss = 0.062, train_acc = 1.000 (3.312 sec/step)
step 26820 	 loss = 0.207, train_acc = 0.900 (3.357 sec/step)
step 26830 	 loss = 0.103, train_acc = 1.000 (3.263 sec/step)
step 26840 	 loss = 1.127, train_acc = 0.700 (3.313 sec/step)
step 26850 	 loss = 0.081, train_acc = 1.000 (3.267 sec/step)
step 26860 	 loss = 0.054, train_acc = 1.000 (3.321 sec/step)
step 26870 	 loss = 0.005, train_acc = 1.000 (3.343 sec/step)
step 26880 	 loss = 0.014, train_acc = 1.000 (3.251 sec/step)
step 26890 	 loss = 0.006, train_acc = 1.000 (3.293 sec/step)
step 26900 	 loss = 0.150, train_acc = 0.900 (3.264 sec/step)
step 26910 	 loss = 0.162, train_acc = 0.900 (3.294 sec/step)
step 26920 	 loss = 0.852, train_acc = 0.800 (3.283 sec/step)
step 26930 	 loss = 0.107, train_acc = 1.000 (3.298 sec/step)
step 26940 	 loss = 0.759, train_acc = 0.800 (3.294 sec/step)
step 26950 	 loss = 0.043, train_acc = 1.000 (3.307 sec/step)
step 26960 	 loss = 1.671, train_acc = 0.600 (3.336 sec/step)
step 26970 	 loss = 0.168, train_acc = 0.900 (3.343 sec/step)
step 26980 	 loss = 0.053, train_acc = 1.000 (3.403 sec/step)
step 26990 	 loss = 0.066, train_acc = 1.000 (3.298 sec/step)
step 27000 	 loss = 0.586, train_acc = 0.800 (3.290 sec/step)
step 27010 	 loss = 0.054, train_acc = 1.000 (3.245 sec/step)
step 27020 	 loss = 0.003, train_acc = 1.000 (3.275 sec/step)
step 27030 	 loss = 0.270, train_acc = 0.900 (3.285 sec/step)
step 27040 	 loss = 0.142, train_acc = 0.900 (3.268 sec/step)
step 27050 	 loss = 1.173, train_acc = 0.700 (3.299 sec/step)
step 27060 	 loss = 0.992, train_acc = 0.700 (3.304 sec/step)
step 27070 	 loss = 0.035, train_acc = 1.000 (3.275 sec/step)
step 27080 	 loss = 0.212, train_acc = 0.900 (3.319 sec/step)
step 27090 	 loss = 0.438, train_acc = 0.800 (3.343 sec/step)
step 27100 	 loss = 0.484, train_acc = 0.900 (3.318 sec/step)
step 27110 	 loss = 0.211, train_acc = 1.000 (3.321 sec/step)
step 27120 	 loss = 0.605, train_acc = 0.800 (3.299 sec/step)
step 27130 	 loss = 0.500, train_acc = 0.800 (3.354 sec/step)
step 27140 	 loss = 0.839, train_acc = 0.800 (3.280 sec/step)
step 27150 	 loss = 0.002, train_acc = 1.000 (3.287 sec/step)
step 27160 	 loss = 0.018, train_acc = 1.000 (3.253 sec/step)
step 27170 	 loss = 0.002, train_acc = 1.000 (3.305 sec/step)
step 27180 	 loss = 0.697, train_acc = 0.700 (3.283 sec/step)
step 27190 	 loss = 0.289, train_acc = 0.800 (3.268 sec/step)
step 27200 	 loss = 0.089, train_acc = 1.000 (3.330 sec/step)
step 27210 	 loss = 0.312, train_acc = 0.900 (3.341 sec/step)
step 27220 	 loss = 0.012, train_acc = 1.000 (3.241 sec/step)
step 27230 	 loss = 0.157, train_acc = 1.000 (3.256 sec/step)
step 27240 	 loss = 0.302, train_acc = 0.900 (3.307 sec/step)
step 27250 	 loss = 0.133, train_acc = 0.900 (3.298 sec/step)
step 27260 	 loss = 0.135, train_acc = 1.000 (3.291 sec/step)
step 27270 	 loss = 0.094, train_acc = 1.000 (3.313 sec/step)
step 27280 	 loss = 0.009, train_acc = 1.000 (3.288 sec/step)
step 27290 	 loss = 0.086, train_acc = 1.000 (3.308 sec/step)
step 27300 	 loss = 0.362, train_acc = 0.800 (3.275 sec/step)
step 27310 	 loss = 0.143, train_acc = 1.000 (3.285 sec/step)
step 27320 	 loss = 0.009, train_acc = 1.000 (3.281 sec/step)
step 27330 	 loss = 1.141, train_acc = 0.800 (3.294 sec/step)
step 27340 	 loss = 0.404, train_acc = 0.800 (3.286 sec/step)
step 27350 	 loss = 0.186, train_acc = 0.900 (3.314 sec/step)
step 27360 	 loss = 1.770, train_acc = 0.700 (3.310 sec/step)
step 27370 	 loss = 0.946, train_acc = 0.700 (3.268 sec/step)
step 27380 	 loss = 0.374, train_acc = 0.800 (3.319 sec/step)
step 27390 	 loss = 0.102, train_acc = 0.900 (3.346 sec/step)
step 27400 	 loss = 1.193, train_acc = 0.900 (3.252 sec/step)
step 27410 	 loss = 0.690, train_acc = 0.800 (3.355 sec/step)
step 27420 	 loss = 0.471, train_acc = 0.900 (3.328 sec/step)
step 27430 	 loss = 0.162, train_acc = 1.000 (3.319 sec/step)
step 27440 	 loss = 0.317, train_acc = 0.900 (3.290 sec/step)
step 27450 	 loss = 0.657, train_acc = 0.800 (3.283 sec/step)
step 27460 	 loss = 0.190, train_acc = 0.900 (3.324 sec/step)
step 27470 	 loss = 0.293, train_acc = 0.900 (3.319 sec/step)
step 27480 	 loss = 0.000, train_acc = 1.000 (3.266 sec/step)
step 27490 	 loss = 0.008, train_acc = 1.000 (3.455 sec/step)
step 27500 	 loss = 0.039, train_acc = 1.000 (3.280 sec/step)
step 27510 	 loss = 0.116, train_acc = 1.000 (3.319 sec/step)
step 27520 	 loss = 0.043, train_acc = 1.000 (3.294 sec/step)
step 27530 	 loss = 0.118, train_acc = 0.900 (3.298 sec/step)
step 27540 	 loss = 0.018, train_acc = 1.000 (3.311 sec/step)
step 27550 	 loss = 0.008, train_acc = 1.000 (3.269 sec/step)
step 27560 	 loss = 0.032, train_acc = 1.000 (3.282 sec/step)
step 27570 	 loss = 0.066, train_acc = 1.000 (3.290 sec/step)
step 27580 	 loss = 0.245, train_acc = 0.900 (3.301 sec/step)
step 27590 	 loss = 0.022, train_acc = 1.000 (3.344 sec/step)
step 27600 	 loss = 0.186, train_acc = 0.900 (3.295 sec/step)
step 27610 	 loss = 0.156, train_acc = 1.000 (3.285 sec/step)
step 27620 	 loss = 0.044, train_acc = 1.000 (3.295 sec/step)
step 27630 	 loss = 0.412, train_acc = 0.800 (3.307 sec/step)
step 27640 	 loss = 0.393, train_acc = 0.900 (3.273 sec/step)
step 27650 	 loss = 0.066, train_acc = 1.000 (3.284 sec/step)
step 27660 	 loss = 0.608, train_acc = 0.900 (3.289 sec/step)
step 27670 	 loss = 0.008, train_acc = 1.000 (3.296 sec/step)
step 27680 	 loss = 0.098, train_acc = 1.000 (3.326 sec/step)
step 27690 	 loss = 0.096, train_acc = 1.000 (3.292 sec/step)
step 27700 	 loss = 0.171, train_acc = 0.900 (3.295 sec/step)
step 27710 	 loss = 1.004, train_acc = 0.800 (3.266 sec/step)
step 27720 	 loss = 0.127, train_acc = 0.900 (3.280 sec/step)
step 27730 	 loss = 0.689, train_acc = 0.800 (3.317 sec/step)
step 27740 	 loss = 0.437, train_acc = 0.800 (3.272 sec/step)
step 27750 	 loss = 0.145, train_acc = 0.900 (3.302 sec/step)
step 27760 	 loss = 0.048, train_acc = 1.000 (3.354 sec/step)
step 27770 	 loss = 0.145, train_acc = 0.900 (3.291 sec/step)
step 27780 	 loss = 0.113, train_acc = 1.000 (3.298 sec/step)
step 27790 	 loss = 0.166, train_acc = 1.000 (3.255 sec/step)
step 27800 	 loss = 1.047, train_acc = 0.700 (3.294 sec/step)
step 27810 	 loss = 0.074, train_acc = 1.000 (3.341 sec/step)
step 27820 	 loss = 0.023, train_acc = 1.000 (3.260 sec/step)
step 27830 	 loss = 0.105, train_acc = 0.900 (3.283 sec/step)
step 27840 	 loss = 1.189, train_acc = 0.600 (3.277 sec/step)
step 27850 	 loss = 0.115, train_acc = 1.000 (3.345 sec/step)
step 27860 	 loss = 0.907, train_acc = 0.900 (3.279 sec/step)
step 27870 	 loss = 0.056, train_acc = 1.000 (3.290 sec/step)
step 27880 	 loss = 0.009, train_acc = 1.000 (3.280 sec/step)
step 27890 	 loss = 0.401, train_acc = 0.800 (3.291 sec/step)
step 27900 	 loss = 0.008, train_acc = 1.000 (3.305 sec/step)
step 27910 	 loss = 0.347, train_acc = 0.900 (3.311 sec/step)
step 27920 	 loss = 1.764, train_acc = 0.600 (3.300 sec/step)
step 27930 	 loss = 0.297, train_acc = 0.900 (3.263 sec/step)
step 27940 	 loss = 0.745, train_acc = 0.800 (3.246 sec/step)
step 27950 	 loss = 0.953, train_acc = 0.600 (3.312 sec/step)
step 27960 	 loss = 1.301, train_acc = 0.700 (3.285 sec/step)
step 27970 	 loss = 0.038, train_acc = 1.000 (3.305 sec/step)
step 27980 	 loss = 0.038, train_acc = 1.000 (3.329 sec/step)
step 27990 	 loss = 0.722, train_acc = 0.600 (3.275 sec/step)
step 28000 	 loss = 0.418, train_acc = 0.800 (3.281 sec/step)
step 28010 	 loss = 0.045, train_acc = 1.000 (3.324 sec/step)
step 28020 	 loss = 0.748, train_acc = 0.900 (3.327 sec/step)
step 28030 	 loss = 0.180, train_acc = 0.900 (3.398 sec/step)
step 28040 	 loss = 0.058, train_acc = 1.000 (3.286 sec/step)
step 28050 	 loss = 0.188, train_acc = 0.900 (3.291 sec/step)
step 28060 	 loss = 0.191, train_acc = 1.000 (3.294 sec/step)
step 28070 	 loss = 0.168, train_acc = 0.900 (3.327 sec/step)
step 28080 	 loss = 0.284, train_acc = 0.900 (3.259 sec/step)
step 28090 	 loss = 0.159, train_acc = 0.900 (3.349 sec/step)
step 28100 	 loss = 0.013, train_acc = 1.000 (3.293 sec/step)
step 28110 	 loss = 0.010, train_acc = 1.000 (3.288 sec/step)
step 28120 	 loss = 0.397, train_acc = 0.900 (3.291 sec/step)
step 28130 	 loss = 0.178, train_acc = 0.900 (3.287 sec/step)
step 28140 	 loss = 0.017, train_acc = 1.000 (3.308 sec/step)
step 28150 	 loss = 0.057, train_acc = 1.000 (3.291 sec/step)
step 28160 	 loss = 0.400, train_acc = 0.800 (3.322 sec/step)
step 28170 	 loss = 0.164, train_acc = 0.900 (3.293 sec/step)
step 28180 	 loss = 0.466, train_acc = 0.900 (3.337 sec/step)
step 28190 	 loss = 0.200, train_acc = 0.900 (3.296 sec/step)
step 28200 	 loss = 1.158, train_acc = 0.700 (3.266 sec/step)
step 28210 	 loss = 0.187, train_acc = 1.000 (3.308 sec/step)
step 28220 	 loss = 0.055, train_acc = 1.000 (3.294 sec/step)
step 28230 	 loss = 0.829, train_acc = 0.800 (3.294 sec/step)
step 28240 	 loss = 0.606, train_acc = 0.800 (3.363 sec/step)
step 28250 	 loss = 0.427, train_acc = 0.800 (3.312 sec/step)
step 28260 	 loss = 0.168, train_acc = 1.000 (3.299 sec/step)
step 28270 	 loss = 0.032, train_acc = 1.000 (3.318 sec/step)
step 28280 	 loss = 3.068, train_acc = 0.800 (3.284 sec/step)
step 28290 	 loss = 0.032, train_acc = 1.000 (3.307 sec/step)
step 28300 	 loss = 0.291, train_acc = 0.800 (3.295 sec/step)
step 28310 	 loss = 0.313, train_acc = 0.900 (3.293 sec/step)
step 28320 	 loss = 0.036, train_acc = 1.000 (3.284 sec/step)
step 28330 	 loss = 0.221, train_acc = 1.000 (3.328 sec/step)
step 28340 	 loss = 0.288, train_acc = 0.900 (3.327 sec/step)
step 28350 	 loss = 0.026, train_acc = 1.000 (3.286 sec/step)
step 28360 	 loss = 0.140, train_acc = 1.000 (3.285 sec/step)
step 28370 	 loss = 0.039, train_acc = 1.000 (3.317 sec/step)
step 28380 	 loss = 0.089, train_acc = 1.000 (3.289 sec/step)
step 28390 	 loss = 0.168, train_acc = 0.900 (3.300 sec/step)
step 28400 	 loss = 0.203, train_acc = 0.900 (3.334 sec/step)
step 28410 	 loss = 0.168, train_acc = 1.000 (3.263 sec/step)
step 28420 	 loss = 0.054, train_acc = 1.000 (3.303 sec/step)
step 28430 	 loss = 0.527, train_acc = 0.900 (3.264 sec/step)
step 28440 	 loss = 0.117, train_acc = 0.900 (3.340 sec/step)
step 28450 	 loss = 0.014, train_acc = 1.000 (3.334 sec/step)
step 28460 	 loss = 0.839, train_acc = 0.800 (3.255 sec/step)
step 28470 	 loss = 0.031, train_acc = 1.000 (3.325 sec/step)
step 28480 	 loss = 0.017, train_acc = 1.000 (3.299 sec/step)
step 28490 	 loss = 0.061, train_acc = 1.000 (3.371 sec/step)
VALIDATION 	 acc = 0.534 (3.643 sec)
step 28500 	 loss = 1.687, train_acc = 0.800 (3.299 sec/step)
step 28510 	 loss = 0.892, train_acc = 0.800 (3.288 sec/step)
step 28520 	 loss = 0.706, train_acc = 0.900 (3.280 sec/step)
step 28530 	 loss = 0.346, train_acc = 0.900 (3.335 sec/step)
step 28540 	 loss = 0.017, train_acc = 1.000 (3.355 sec/step)
step 28550 	 loss = 0.626, train_acc = 0.700 (3.284 sec/step)
step 28560 	 loss = 0.085, train_acc = 0.900 (3.271 sec/step)
step 28570 	 loss = 0.094, train_acc = 1.000 (3.307 sec/step)
step 28580 	 loss = 0.217, train_acc = 0.900 (3.324 sec/step)
step 28590 	 loss = 0.321, train_acc = 0.900 (3.370 sec/step)
step 28600 	 loss = 0.981, train_acc = 0.900 (3.286 sec/step)
step 28610 	 loss = 0.297, train_acc = 0.900 (3.279 sec/step)
step 28620 	 loss = 0.383, train_acc = 0.900 (3.305 sec/step)
step 28630 	 loss = 0.473, train_acc = 0.800 (3.344 sec/step)
step 28640 	 loss = 0.025, train_acc = 1.000 (3.317 sec/step)
step 28650 	 loss = 0.138, train_acc = 1.000 (3.343 sec/step)
step 28660 	 loss = 0.170, train_acc = 0.900 (3.271 sec/step)
step 28670 	 loss = 0.016, train_acc = 1.000 (3.290 sec/step)
step 28680 	 loss = 0.014, train_acc = 1.000 (3.289 sec/step)
step 28690 	 loss = 0.088, train_acc = 1.000 (3.361 sec/step)
step 28700 	 loss = 0.816, train_acc = 0.700 (3.282 sec/step)
step 28710 	 loss = 0.071, train_acc = 1.000 (3.265 sec/step)
step 28720 	 loss = 0.577, train_acc = 0.900 (3.356 sec/step)
step 28730 	 loss = 0.685, train_acc = 0.800 (3.289 sec/step)
step 28740 	 loss = 0.064, train_acc = 1.000 (3.339 sec/step)
step 28750 	 loss = 0.057, train_acc = 1.000 (3.283 sec/step)
step 28760 	 loss = 0.153, train_acc = 1.000 (3.287 sec/step)
step 28770 	 loss = 0.668, train_acc = 0.800 (3.305 sec/step)
step 28780 	 loss = 0.364, train_acc = 0.900 (3.284 sec/step)
step 28790 	 loss = 0.211, train_acc = 0.900 (3.309 sec/step)
step 28800 	 loss = 0.977, train_acc = 0.900 (3.313 sec/step)
step 28810 	 loss = 0.455, train_acc = 0.900 (3.290 sec/step)
step 28820 	 loss = 0.794, train_acc = 0.700 (3.257 sec/step)
step 28830 	 loss = 0.070, train_acc = 1.000 (3.281 sec/step)
step 28840 	 loss = 0.144, train_acc = 0.900 (3.266 sec/step)
step 28850 	 loss = 0.201, train_acc = 1.000 (3.339 sec/step)
step 28860 	 loss = 0.456, train_acc = 0.900 (3.322 sec/step)
step 28870 	 loss = 0.063, train_acc = 1.000 (3.352 sec/step)
step 28880 	 loss = 0.003, train_acc = 1.000 (3.328 sec/step)
step 28890 	 loss = 0.358, train_acc = 0.900 (3.337 sec/step)
step 28900 	 loss = 2.990, train_acc = 0.700 (3.342 sec/step)
step 28910 	 loss = 0.359, train_acc = 0.900 (3.268 sec/step)
step 28920 	 loss = 0.088, train_acc = 1.000 (3.339 sec/step)
step 28930 	 loss = 0.048, train_acc = 1.000 (3.317 sec/step)
step 28940 	 loss = 0.070, train_acc = 1.000 (3.271 sec/step)
step 28950 	 loss = 0.244, train_acc = 0.900 (3.295 sec/step)
step 28960 	 loss = 0.338, train_acc = 0.900 (3.349 sec/step)
step 28970 	 loss = 0.247, train_acc = 0.900 (3.284 sec/step)
step 28980 	 loss = 0.007, train_acc = 1.000 (3.300 sec/step)
step 28990 	 loss = 0.540, train_acc = 0.900 (3.282 sec/step)
step 29000 	 loss = 0.023, train_acc = 1.000 (3.349 sec/step)
step 29010 	 loss = 0.146, train_acc = 0.900 (3.305 sec/step)
step 29020 	 loss = 0.459, train_acc = 0.700 (3.299 sec/step)
step 29030 	 loss = 0.911, train_acc = 0.700 (3.305 sec/step)
step 29040 	 loss = 0.006, train_acc = 1.000 (3.307 sec/step)
step 29050 	 loss = 0.001, train_acc = 1.000 (3.285 sec/step)
step 29060 	 loss = 0.863, train_acc = 0.800 (3.259 sec/step)
step 29070 	 loss = 0.146, train_acc = 1.000 (3.335 sec/step)
step 29080 	 loss = 0.174, train_acc = 0.900 (3.279 sec/step)
step 29090 	 loss = 0.104, train_acc = 1.000 (3.302 sec/step)
step 29100 	 loss = 2.195, train_acc = 0.600 (3.356 sec/step)
step 29110 	 loss = 0.521, train_acc = 0.900 (3.323 sec/step)
step 29120 	 loss = 0.093, train_acc = 1.000 (3.275 sec/step)
step 29130 	 loss = 1.625, train_acc = 0.800 (3.277 sec/step)
step 29140 	 loss = 0.836, train_acc = 0.900 (3.313 sec/step)
step 29150 	 loss = 0.438, train_acc = 0.800 (3.311 sec/step)
step 29160 	 loss = 1.549, train_acc = 0.800 (3.409 sec/step)
step 29170 	 loss = 0.157, train_acc = 1.000 (3.281 sec/step)
step 29180 	 loss = 0.300, train_acc = 0.800 (3.301 sec/step)
step 29190 	 loss = 2.745, train_acc = 0.600 (3.258 sec/step)
step 29200 	 loss = 0.469, train_acc = 0.800 (3.272 sec/step)
step 29210 	 loss = 0.211, train_acc = 0.900 (3.291 sec/step)
step 29220 	 loss = 0.088, train_acc = 1.000 (3.286 sec/step)
step 29230 	 loss = 0.037, train_acc = 1.000 (3.290 sec/step)
step 29240 	 loss = 0.028, train_acc = 1.000 (3.288 sec/step)
step 29250 	 loss = 0.148, train_acc = 0.900 (3.282 sec/step)
step 29260 	 loss = 0.007, train_acc = 1.000 (3.327 sec/step)
step 29270 	 loss = 0.221, train_acc = 0.900 (3.250 sec/step)
step 29280 	 loss = 0.337, train_acc = 0.900 (3.311 sec/step)
step 29290 	 loss = 0.126, train_acc = 0.900 (3.333 sec/step)
step 29300 	 loss = 0.013, train_acc = 1.000 (3.276 sec/step)
step 29310 	 loss = 0.005, train_acc = 1.000 (3.365 sec/step)
step 29320 	 loss = 0.052, train_acc = 1.000 (3.303 sec/step)
step 29330 	 loss = 0.062, train_acc = 1.000 (3.289 sec/step)
step 29340 	 loss = 2.248, train_acc = 0.600 (3.319 sec/step)
step 29350 	 loss = 0.316, train_acc = 0.900 (3.350 sec/step)
step 29360 	 loss = 0.687, train_acc = 0.800 (3.344 sec/step)
step 29370 	 loss = 0.195, train_acc = 0.900 (3.322 sec/step)
step 29380 	 loss = 0.059, train_acc = 1.000 (3.332 sec/step)
step 29390 	 loss = 0.488, train_acc = 0.900 (3.288 sec/step)
step 29400 	 loss = 0.015, train_acc = 1.000 (3.333 sec/step)
step 29410 	 loss = 0.571, train_acc = 0.800 (3.277 sec/step)
step 29420 	 loss = 0.005, train_acc = 1.000 (3.262 sec/step)
step 29430 	 loss = 0.181, train_acc = 0.900 (3.338 sec/step)
step 29440 	 loss = 0.007, train_acc = 1.000 (3.325 sec/step)
step 29450 	 loss = 0.226, train_acc = 0.900 (3.257 sec/step)
step 29460 	 loss = 0.185, train_acc = 1.000 (3.350 sec/step)
step 29470 	 loss = 0.031, train_acc = 1.000 (3.355 sec/step)
step 29480 	 loss = 0.037, train_acc = 1.000 (3.281 sec/step)
step 29490 	 loss = 0.375, train_acc = 0.800 (3.350 sec/step)
step 29500 	 loss = 0.030, train_acc = 1.000 (3.396 sec/step)
step 29510 	 loss = 0.005, train_acc = 1.000 (3.307 sec/step)
step 29520 	 loss = 0.053, train_acc = 1.000 (3.306 sec/step)
step 29530 	 loss = 0.108, train_acc = 0.900 (3.244 sec/step)
step 29540 	 loss = 0.309, train_acc = 0.900 (3.307 sec/step)
step 29550 	 loss = 0.154, train_acc = 0.900 (3.315 sec/step)
step 29560 	 loss = 0.159, train_acc = 0.900 (3.298 sec/step)
step 29570 	 loss = 0.091, train_acc = 1.000 (3.343 sec/step)
step 29580 	 loss = 0.101, train_acc = 1.000 (3.274 sec/step)
step 29590 	 loss = 0.711, train_acc = 0.900 (3.358 sec/step)
step 29600 	 loss = 1.009, train_acc = 0.800 (3.318 sec/step)
step 29610 	 loss = 0.005, train_acc = 1.000 (3.316 sec/step)
step 29620 	 loss = 0.673, train_acc = 0.900 (3.255 sec/step)
step 29630 	 loss = 0.140, train_acc = 0.900 (3.331 sec/step)
step 29640 	 loss = 0.879, train_acc = 0.900 (3.269 sec/step)
step 29650 	 loss = 0.118, train_acc = 1.000 (3.325 sec/step)
step 29660 	 loss = 0.069, train_acc = 1.000 (3.333 sec/step)
step 29670 	 loss = 1.149, train_acc = 0.600 (3.308 sec/step)
step 29680 	 loss = 0.151, train_acc = 1.000 (3.357 sec/step)
step 29690 	 loss = 0.236, train_acc = 0.900 (3.298 sec/step)
step 29700 	 loss = 0.173, train_acc = 1.000 (3.272 sec/step)
step 29710 	 loss = 0.100, train_acc = 1.000 (3.282 sec/step)
step 29720 	 loss = 0.301, train_acc = 0.900 (3.280 sec/step)
step 29730 	 loss = 0.235, train_acc = 0.900 (3.269 sec/step)
step 29740 	 loss = 0.036, train_acc = 1.000 (3.310 sec/step)
step 29750 	 loss = 0.933, train_acc = 0.700 (3.276 sec/step)
step 29760 	 loss = 0.096, train_acc = 1.000 (3.338 sec/step)
step 29770 	 loss = 0.063, train_acc = 1.000 (3.311 sec/step)
step 29780 	 loss = 0.003, train_acc = 1.000 (3.265 sec/step)
step 29790 	 loss = 0.172, train_acc = 1.000 (3.311 sec/step)
step 29800 	 loss = 0.002, train_acc = 1.000 (3.299 sec/step)
step 29810 	 loss = 0.260, train_acc = 0.900 (3.322 sec/step)
step 29820 	 loss = 0.907, train_acc = 0.800 (3.344 sec/step)
step 29830 	 loss = 0.284, train_acc = 0.900 (3.265 sec/step)
step 29840 	 loss = 0.028, train_acc = 1.000 (3.250 sec/step)
step 29850 	 loss = 0.660, train_acc = 0.800 (3.296 sec/step)
step 29860 	 loss = 0.551, train_acc = 0.900 (3.319 sec/step)
step 29870 	 loss = 0.356, train_acc = 0.900 (3.294 sec/step)
step 29880 	 loss = 0.464, train_acc = 0.800 (3.306 sec/step)
step 29890 	 loss = 0.074, train_acc = 1.000 (3.404 sec/step)
step 29900 	 loss = 0.629, train_acc = 0.900 (3.351 sec/step)
step 29910 	 loss = 0.272, train_acc = 0.900 (3.310 sec/step)
step 29920 	 loss = 0.507, train_acc = 0.900 (3.311 sec/step)
step 29930 	 loss = 0.656, train_acc = 0.800 (3.286 sec/step)
step 29940 	 loss = 0.007, train_acc = 1.000 (3.320 sec/step)
step 29950 	 loss = 0.007, train_acc = 1.000 (3.312 sec/step)
step 29960 	 loss = 0.702, train_acc = 0.900 (3.286 sec/step)
step 29970 	 loss = 0.017, train_acc = 1.000 (3.308 sec/step)
step 29980 	 loss = 0.242, train_acc = 0.900 (3.292 sec/step)
step 29990 	 loss = 0.327, train_acc = 0.800 (3.269 sec/step)
step 30000 	 loss = 0.092, train_acc = 1.000 (3.294 sec/step)
step 30010 	 loss = 0.092, train_acc = 1.000 (3.284 sec/step)
step 30020 	 loss = 0.679, train_acc = 0.800 (3.342 sec/step)
step 30030 	 loss = 0.183, train_acc = 0.900 (3.352 sec/step)
step 30040 	 loss = 0.374, train_acc = 0.800 (3.292 sec/step)
step 30050 	 loss = 0.130, train_acc = 1.000 (3.296 sec/step)
step 30060 	 loss = 0.519, train_acc = 0.900 (3.328 sec/step)
step 30070 	 loss = 0.047, train_acc = 1.000 (3.328 sec/step)
step 30080 	 loss = 0.796, train_acc = 0.800 (3.312 sec/step)
step 30090 	 loss = 0.386, train_acc = 0.900 (3.327 sec/step)
step 30100 	 loss = 0.270, train_acc = 0.900 (3.335 sec/step)
step 30110 	 loss = 0.488, train_acc = 0.900 (3.336 sec/step)
step 30120 	 loss = 0.052, train_acc = 1.000 (3.283 sec/step)
step 30130 	 loss = 0.350, train_acc = 0.900 (3.293 sec/step)
step 30140 	 loss = 0.324, train_acc = 0.800 (3.420 sec/step)
step 30150 	 loss = 0.171, train_acc = 1.000 (3.307 sec/step)
step 30160 	 loss = 0.113, train_acc = 1.000 (3.302 sec/step)
step 30170 	 loss = 0.344, train_acc = 0.800 (3.351 sec/step)
step 30180 	 loss = 0.474, train_acc = 0.900 (3.299 sec/step)
step 30190 	 loss = 0.616, train_acc = 0.800 (3.288 sec/step)
step 30200 	 loss = 0.236, train_acc = 0.900 (3.252 sec/step)
step 30210 	 loss = 0.474, train_acc = 0.900 (3.316 sec/step)
step 30220 	 loss = 0.060, train_acc = 1.000 (3.262 sec/step)
step 30230 	 loss = 0.005, train_acc = 1.000 (3.339 sec/step)
step 30240 	 loss = 0.327, train_acc = 0.800 (3.286 sec/step)
step 30250 	 loss = 0.549, train_acc = 0.800 (3.349 sec/step)
step 30260 	 loss = 0.513, train_acc = 0.900 (3.354 sec/step)
step 30270 	 loss = 0.293, train_acc = 0.900 (3.290 sec/step)
step 30280 	 loss = 0.413, train_acc = 0.900 (3.310 sec/step)
step 30290 	 loss = 0.006, train_acc = 1.000 (3.297 sec/step)
step 30300 	 loss = 0.025, train_acc = 1.000 (3.319 sec/step)
step 30310 	 loss = 0.166, train_acc = 0.900 (3.286 sec/step)
step 30320 	 loss = 0.047, train_acc = 1.000 (3.312 sec/step)
step 30330 	 loss = 0.111, train_acc = 0.900 (3.272 sec/step)
step 30340 	 loss = 0.714, train_acc = 0.800 (3.262 sec/step)
step 30350 	 loss = 0.340, train_acc = 0.800 (3.299 sec/step)
step 30360 	 loss = 0.220, train_acc = 0.900 (3.285 sec/step)
step 30370 	 loss = 0.353, train_acc = 0.900 (3.240 sec/step)
step 30380 	 loss = 0.329, train_acc = 0.900 (3.316 sec/step)
step 30390 	 loss = 1.245, train_acc = 0.600 (3.311 sec/step)
VALIDATION 	 acc = 0.516 (3.609 sec)
step 30400 	 loss = 0.044, train_acc = 1.000 (3.278 sec/step)
step 30410 	 loss = 0.034, train_acc = 1.000 (3.287 sec/step)
step 30420 	 loss = 0.247, train_acc = 0.800 (3.321 sec/step)
step 30430 	 loss = 0.034, train_acc = 1.000 (3.367 sec/step)
step 30440 	 loss = 0.012, train_acc = 1.000 (3.314 sec/step)
step 30450 	 loss = 0.149, train_acc = 1.000 (3.326 sec/step)
step 30460 	 loss = 0.009, train_acc = 1.000 (3.261 sec/step)
step 30470 	 loss = 0.042, train_acc = 1.000 (3.372 sec/step)
step 30480 	 loss = 0.115, train_acc = 0.900 (3.254 sec/step)
step 30490 	 loss = 0.020, train_acc = 1.000 (3.324 sec/step)
step 30500 	 loss = 0.816, train_acc = 0.800 (3.287 sec/step)
step 30510 	 loss = 0.734, train_acc = 0.900 (3.355 sec/step)
step 30520 	 loss = 0.127, train_acc = 1.000 (3.268 sec/step)
step 30530 	 loss = 0.029, train_acc = 1.000 (3.312 sec/step)
step 30540 	 loss = 0.126, train_acc = 0.900 (3.261 sec/step)
step 30550 	 loss = 0.025, train_acc = 1.000 (3.309 sec/step)
step 30560 	 loss = 0.072, train_acc = 1.000 (3.336 sec/step)
step 30570 	 loss = 0.189, train_acc = 0.900 (3.277 sec/step)
step 30580 	 loss = 0.503, train_acc = 0.800 (3.350 sec/step)
step 30590 	 loss = 0.002, train_acc = 1.000 (3.252 sec/step)
step 30600 	 loss = 0.245, train_acc = 0.900 (3.319 sec/step)
step 30610 	 loss = 0.062, train_acc = 1.000 (3.307 sec/step)
step 30620 	 loss = 0.015, train_acc = 1.000 (3.359 sec/step)
step 30630 	 loss = 0.053, train_acc = 1.000 (3.285 sec/step)
step 30640 	 loss = 0.734, train_acc = 0.900 (3.297 sec/step)
step 30650 	 loss = 0.154, train_acc = 1.000 (3.352 sec/step)
step 30660 	 loss = 0.062, train_acc = 1.000 (3.339 sec/step)
step 30670 	 loss = 0.261, train_acc = 0.900 (3.295 sec/step)
step 30680 	 loss = 0.136, train_acc = 0.900 (3.274 sec/step)
step 30690 	 loss = 0.057, train_acc = 1.000 (3.266 sec/step)
step 30700 	 loss = 0.064, train_acc = 1.000 (3.282 sec/step)
step 30710 	 loss = 0.293, train_acc = 0.900 (3.252 sec/step)
step 30720 	 loss = 0.073, train_acc = 1.000 (3.255 sec/step)
step 30730 	 loss = 0.603, train_acc = 0.900 (3.254 sec/step)
step 30740 	 loss = 0.388, train_acc = 0.800 (3.288 sec/step)
step 30750 	 loss = 0.261, train_acc = 0.900 (3.256 sec/step)
step 30760 	 loss = 1.135, train_acc = 0.800 (3.261 sec/step)
step 30770 	 loss = 0.010, train_acc = 1.000 (3.275 sec/step)
step 30780 	 loss = 0.007, train_acc = 1.000 (3.301 sec/step)
step 30790 	 loss = 0.001, train_acc = 1.000 (3.303 sec/step)
step 30800 	 loss = 0.651, train_acc = 0.900 (3.277 sec/step)
step 30810 	 loss = 0.267, train_acc = 0.900 (3.274 sec/step)
step 30820 	 loss = 0.029, train_acc = 1.000 (3.311 sec/step)
step 30830 	 loss = 0.009, train_acc = 1.000 (3.378 sec/step)
step 30840 	 loss = 0.610, train_acc = 0.900 (3.273 sec/step)
step 30850 	 loss = 0.105, train_acc = 0.900 (3.263 sec/step)
step 30860 	 loss = 1.185, train_acc = 0.800 (3.302 sec/step)
step 30870 	 loss = 0.061, train_acc = 1.000 (3.275 sec/step)
step 30880 	 loss = 0.453, train_acc = 0.800 (3.292 sec/step)
step 30890 	 loss = 0.129, train_acc = 1.000 (3.318 sec/step)
step 30900 	 loss = 0.042, train_acc = 1.000 (3.334 sec/step)
step 30910 	 loss = 0.117, train_acc = 1.000 (3.284 sec/step)
step 30920 	 loss = 0.083, train_acc = 1.000 (3.297 sec/step)
step 30930 	 loss = 0.230, train_acc = 0.900 (3.368 sec/step)
step 30940 	 loss = 0.500, train_acc = 0.800 (3.264 sec/step)
step 30950 	 loss = 0.007, train_acc = 1.000 (3.290 sec/step)
step 30960 	 loss = 0.335, train_acc = 0.900 (3.299 sec/step)
step 30970 	 loss = 0.129, train_acc = 1.000 (3.331 sec/step)
step 30980 	 loss = 0.499, train_acc = 0.900 (3.274 sec/step)
step 30990 	 loss = 0.119, train_acc = 1.000 (3.317 sec/step)
step 31000 	 loss = 0.298, train_acc = 0.900 (3.272 sec/step)
step 31010 	 loss = 0.342, train_acc = 0.900 (3.309 sec/step)
step 31020 	 loss = 0.069, train_acc = 1.000 (3.350 sec/step)
step 31030 	 loss = 1.016, train_acc = 0.900 (3.288 sec/step)
step 31040 	 loss = 0.276, train_acc = 0.800 (3.305 sec/step)
step 31050 	 loss = 0.053, train_acc = 1.000 (3.360 sec/step)
step 31060 	 loss = 0.328, train_acc = 0.900 (3.290 sec/step)
step 31070 	 loss = 0.158, train_acc = 0.900 (3.333 sec/step)
step 31080 	 loss = 0.320, train_acc = 0.900 (3.273 sec/step)
step 31090 	 loss = 0.253, train_acc = 0.900 (3.283 sec/step)
step 31100 	 loss = 0.869, train_acc = 0.700 (3.297 sec/step)
step 31110 	 loss = 0.135, train_acc = 0.900 (3.313 sec/step)
step 31120 	 loss = 0.063, train_acc = 1.000 (3.330 sec/step)
step 31130 	 loss = 0.815, train_acc = 0.900 (3.332 sec/step)
step 31140 	 loss = 0.194, train_acc = 0.900 (3.253 sec/step)
step 31150 	 loss = 0.260, train_acc = 0.900 (3.326 sec/step)
step 31160 	 loss = 0.034, train_acc = 1.000 (3.292 sec/step)
step 31170 	 loss = 0.335, train_acc = 0.900 (3.312 sec/step)
step 31180 	 loss = 0.881, train_acc = 0.800 (3.330 sec/step)
step 31190 	 loss = 0.032, train_acc = 1.000 (3.264 sec/step)
step 31200 	 loss = 0.081, train_acc = 1.000 (3.343 sec/step)
step 31210 	 loss = 0.043, train_acc = 1.000 (3.351 sec/step)
step 31220 	 loss = 0.007, train_acc = 1.000 (3.325 sec/step)
step 31230 	 loss = 0.033, train_acc = 1.000 (3.281 sec/step)
step 31240 	 loss = 0.496, train_acc = 0.800 (3.337 sec/step)
step 31250 	 loss = 0.039, train_acc = 1.000 (3.342 sec/step)
step 31260 	 loss = 0.089, train_acc = 1.000 (3.253 sec/step)
step 31270 	 loss = 0.398, train_acc = 0.900 (3.321 sec/step)
step 31280 	 loss = 1.055, train_acc = 0.800 (3.302 sec/step)
step 31290 	 loss = 0.629, train_acc = 0.800 (3.282 sec/step)
step 31300 	 loss = 0.022, train_acc = 1.000 (3.309 sec/step)
step 31310 	 loss = 0.217, train_acc = 0.900 (3.347 sec/step)
step 31320 	 loss = 0.046, train_acc = 1.000 (3.316 sec/step)
step 31330 	 loss = 0.002, train_acc = 1.000 (3.315 sec/step)
step 31340 	 loss = 0.059, train_acc = 1.000 (3.340 sec/step)
step 31350 	 loss = 1.238, train_acc = 0.600 (3.281 sec/step)
step 31360 	 loss = 0.325, train_acc = 0.900 (3.343 sec/step)
step 31370 	 loss = 0.304, train_acc = 0.800 (3.296 sec/step)
step 31380 	 loss = 0.358, train_acc = 0.900 (3.285 sec/step)
step 31390 	 loss = 0.871, train_acc = 0.800 (3.258 sec/step)
step 31400 	 loss = 0.731, train_acc = 0.800 (3.338 sec/step)
step 31410 	 loss = 0.290, train_acc = 0.900 (3.357 sec/step)
step 31420 	 loss = 0.161, train_acc = 0.900 (3.381 sec/step)
step 31430 	 loss = 0.001, train_acc = 1.000 (3.296 sec/step)
step 31440 	 loss = 0.224, train_acc = 0.900 (3.298 sec/step)
step 31450 	 loss = 0.109, train_acc = 0.900 (3.325 sec/step)
step 31460 	 loss = 0.343, train_acc = 0.900 (3.307 sec/step)
step 31470 	 loss = 0.002, train_acc = 1.000 (3.300 sec/step)
step 31480 	 loss = 0.278, train_acc = 0.900 (3.280 sec/step)
step 31490 	 loss = 0.705, train_acc = 0.700 (3.252 sec/step)
step 31500 	 loss = 0.041, train_acc = 1.000 (3.335 sec/step)
step 31510 	 loss = 0.315, train_acc = 0.800 (3.300 sec/step)
step 31520 	 loss = 1.521, train_acc = 0.900 (3.260 sec/step)
step 31530 	 loss = 0.152, train_acc = 0.900 (3.303 sec/step)
step 31540 	 loss = 1.096, train_acc = 0.900 (3.338 sec/step)
step 31550 	 loss = 0.003, train_acc = 1.000 (3.313 sec/step)
step 31560 	 loss = 1.408, train_acc = 0.700 (3.347 sec/step)
step 31570 	 loss = 0.149, train_acc = 1.000 (3.296 sec/step)
step 31580 	 loss = 0.134, train_acc = 1.000 (3.252 sec/step)
step 31590 	 loss = 0.111, train_acc = 1.000 (3.351 sec/step)
step 31600 	 loss = 0.896, train_acc = 0.700 (3.352 sec/step)
step 31610 	 loss = 0.195, train_acc = 0.900 (3.300 sec/step)
step 31620 	 loss = 0.034, train_acc = 1.000 (3.277 sec/step)
step 31630 	 loss = 0.394, train_acc = 0.900 (3.316 sec/step)
step 31640 	 loss = 0.042, train_acc = 1.000 (3.285 sec/step)
step 31650 	 loss = 0.076, train_acc = 1.000 (3.331 sec/step)
step 31660 	 loss = 0.307, train_acc = 0.900 (3.311 sec/step)
step 31670 	 loss = 0.015, train_acc = 1.000 (3.348 sec/step)
step 31680 	 loss = 0.002, train_acc = 1.000 (3.393 sec/step)
step 31690 	 loss = 0.393, train_acc = 0.900 (3.260 sec/step)
step 31700 	 loss = 0.007, train_acc = 1.000 (3.306 sec/step)
step 31710 	 loss = 0.560, train_acc = 0.900 (3.331 sec/step)
step 31720 	 loss = 0.166, train_acc = 0.900 (3.267 sec/step)
step 31730 	 loss = 0.328, train_acc = 0.900 (3.286 sec/step)
step 31740 	 loss = 0.666, train_acc = 0.800 (3.295 sec/step)
step 31750 	 loss = 0.005, train_acc = 1.000 (3.268 sec/step)
step 31760 	 loss = 0.641, train_acc = 0.800 (3.337 sec/step)
step 31770 	 loss = 0.741, train_acc = 0.900 (3.326 sec/step)
step 31780 	 loss = 0.041, train_acc = 1.000 (3.316 sec/step)
step 31790 	 loss = 0.157, train_acc = 1.000 (3.318 sec/step)
step 31800 	 loss = 0.042, train_acc = 1.000 (3.319 sec/step)
step 31810 	 loss = 0.144, train_acc = 0.900 (3.256 sec/step)
step 31820 	 loss = 0.665, train_acc = 0.700 (3.332 sec/step)
step 31830 	 loss = 0.147, train_acc = 0.900 (3.264 sec/step)
step 31840 	 loss = 0.137, train_acc = 0.900 (3.286 sec/step)
step 31850 	 loss = 0.031, train_acc = 1.000 (3.297 sec/step)
step 31860 	 loss = 0.477, train_acc = 0.800 (3.283 sec/step)
step 31870 	 loss = 0.020, train_acc = 1.000 (3.315 sec/step)
step 31880 	 loss = 0.195, train_acc = 0.900 (3.283 sec/step)
step 31890 	 loss = 0.914, train_acc = 0.900 (3.414 sec/step)
step 31900 	 loss = 0.015, train_acc = 1.000 (3.266 sec/step)
step 31910 	 loss = 1.402, train_acc = 0.800 (3.353 sec/step)
step 31920 	 loss = 0.538, train_acc = 0.900 (3.305 sec/step)
step 31930 	 loss = 0.023, train_acc = 1.000 (3.366 sec/step)
step 31940 	 loss = 0.915, train_acc = 0.800 (3.338 sec/step)
step 31950 	 loss = 0.207, train_acc = 0.900 (3.303 sec/step)
step 31960 	 loss = 0.652, train_acc = 0.800 (3.292 sec/step)
step 31970 	 loss = 0.003, train_acc = 1.000 (3.274 sec/step)
step 31980 	 loss = 0.045, train_acc = 1.000 (3.272 sec/step)
step 31990 	 loss = 0.050, train_acc = 1.000 (3.298 sec/step)
step 32000 	 loss = 0.112, train_acc = 1.000 (3.303 sec/step)
step 32010 	 loss = 0.699, train_acc = 0.900 (3.333 sec/step)
step 32020 	 loss = 0.026, train_acc = 1.000 (3.251 sec/step)
step 32030 	 loss = 0.426, train_acc = 0.800 (3.258 sec/step)
step 32040 	 loss = 0.465, train_acc = 0.900 (3.369 sec/step)
step 32050 	 loss = 0.699, train_acc = 0.800 (3.323 sec/step)
step 32060 	 loss = 0.439, train_acc = 0.800 (3.304 sec/step)
step 32070 	 loss = 0.015, train_acc = 1.000 (3.313 sec/step)
step 32080 	 loss = 0.004, train_acc = 1.000 (3.284 sec/step)
step 32090 	 loss = 0.001, train_acc = 1.000 (3.297 sec/step)
step 32100 	 loss = 0.000, train_acc = 1.000 (3.290 sec/step)
step 32110 	 loss = 0.607, train_acc = 0.800 (3.292 sec/step)
step 32120 	 loss = 0.161, train_acc = 1.000 (3.337 sec/step)
step 32130 	 loss = 0.840, train_acc = 0.600 (3.375 sec/step)
step 32140 	 loss = 0.012, train_acc = 1.000 (3.327 sec/step)
step 32150 	 loss = 0.946, train_acc = 0.700 (3.303 sec/step)
step 32160 	 loss = 0.201, train_acc = 1.000 (3.284 sec/step)
step 32170 	 loss = 0.015, train_acc = 1.000 (3.306 sec/step)
step 32180 	 loss = 0.008, train_acc = 1.000 (3.274 sec/step)
step 32190 	 loss = 1.019, train_acc = 0.800 (3.320 sec/step)
step 32200 	 loss = 2.159, train_acc = 0.500 (3.292 sec/step)
step 32210 	 loss = 1.230, train_acc = 0.800 (3.297 sec/step)
step 32220 	 loss = 2.087, train_acc = 0.500 (3.298 sec/step)
step 32230 	 loss = 1.304, train_acc = 0.600 (3.306 sec/step)
step 32240 	 loss = 1.508, train_acc = 0.500 (3.354 sec/step)
step 32250 	 loss = 1.173, train_acc = 0.800 (3.310 sec/step)
step 32260 	 loss = 1.201, train_acc = 0.800 (3.310 sec/step)
step 32270 	 loss = 0.557, train_acc = 0.900 (3.309 sec/step)
step 32280 	 loss = 0.244, train_acc = 1.000 (3.329 sec/step)
step 32290 	 loss = 0.223, train_acc = 0.900 (3.274 sec/step)
VALIDATION 	 acc = 0.526 (3.617 sec)
step 32300 	 loss = 0.077, train_acc = 1.000 (3.285 sec/step)
step 32310 	 loss = 0.181, train_acc = 0.900 (3.327 sec/step)
step 32320 	 loss = 0.033, train_acc = 1.000 (3.286 sec/step)
step 32330 	 loss = 0.058, train_acc = 1.000 (3.292 sec/step)
step 32340 	 loss = 0.171, train_acc = 0.900 (3.274 sec/step)
step 32350 	 loss = 1.088, train_acc = 0.800 (3.301 sec/step)
step 32360 	 loss = 0.941, train_acc = 0.800 (3.320 sec/step)
step 32370 	 loss = 0.104, train_acc = 0.900 (3.293 sec/step)
step 32380 	 loss = 0.081, train_acc = 0.900 (3.329 sec/step)
step 32390 	 loss = 0.409, train_acc = 0.900 (3.318 sec/step)
step 32400 	 loss = 0.006, train_acc = 1.000 (3.280 sec/step)
step 32410 	 loss = 0.000, train_acc = 1.000 (3.311 sec/step)
step 32420 	 loss = 0.851, train_acc = 0.900 (3.316 sec/step)
step 32430 	 loss = 0.037, train_acc = 1.000 (3.329 sec/step)
step 32440 	 loss = 0.854, train_acc = 0.700 (3.342 sec/step)
step 32450 	 loss = 0.045, train_acc = 1.000 (3.256 sec/step)
step 32460 	 loss = 0.550, train_acc = 0.900 (3.376 sec/step)
step 32470 	 loss = 0.015, train_acc = 1.000 (3.304 sec/step)
step 32480 	 loss = 0.048, train_acc = 1.000 (3.257 sec/step)
step 32490 	 loss = 0.131, train_acc = 0.900 (3.276 sec/step)
step 32500 	 loss = 0.562, train_acc = 0.700 (3.334 sec/step)
step 32510 	 loss = 0.135, train_acc = 0.900 (3.338 sec/step)
step 32520 	 loss = 0.236, train_acc = 0.900 (3.339 sec/step)
step 32530 	 loss = 0.084, train_acc = 1.000 (3.325 sec/step)
step 32540 	 loss = 0.004, train_acc = 1.000 (3.328 sec/step)
step 32550 	 loss = 0.094, train_acc = 1.000 (3.300 sec/step)
step 32560 	 loss = 0.369, train_acc = 0.800 (3.394 sec/step)
step 32570 	 loss = 0.088, train_acc = 1.000 (3.313 sec/step)
step 32580 	 loss = 0.002, train_acc = 1.000 (3.314 sec/step)
step 32590 	 loss = 0.010, train_acc = 1.000 (3.329 sec/step)
step 32600 	 loss = 0.165, train_acc = 0.900 (3.280 sec/step)
step 32610 	 loss = 0.183, train_acc = 0.900 (3.281 sec/step)
step 32620 	 loss = 0.322, train_acc = 0.900 (3.319 sec/step)
step 32630 	 loss = 0.354, train_acc = 0.900 (3.322 sec/step)
step 32640 	 loss = 0.172, train_acc = 1.000 (3.272 sec/step)
step 32650 	 loss = 0.211, train_acc = 0.900 (3.365 sec/step)
step 32660 	 loss = 0.249, train_acc = 0.800 (3.361 sec/step)
step 32670 	 loss = 0.171, train_acc = 0.900 (3.279 sec/step)
step 32680 	 loss = 0.602, train_acc = 0.900 (3.270 sec/step)
step 32690 	 loss = 0.027, train_acc = 1.000 (3.342 sec/step)
step 32700 	 loss = 0.840, train_acc = 0.700 (3.279 sec/step)
step 32710 	 loss = 0.324, train_acc = 0.900 (3.317 sec/step)
step 32720 	 loss = 0.184, train_acc = 0.900 (3.308 sec/step)
step 32730 	 loss = 0.941, train_acc = 0.700 (3.320 sec/step)
step 32740 	 loss = 0.251, train_acc = 0.900 (3.306 sec/step)
step 32750 	 loss = 0.203, train_acc = 0.900 (3.459 sec/step)
step 32760 	 loss = 0.931, train_acc = 0.900 (3.290 sec/step)
step 32770 	 loss = 0.111, train_acc = 0.900 (3.291 sec/step)
step 32780 	 loss = 0.033, train_acc = 1.000 (3.340 sec/step)
step 32790 	 loss = 0.024, train_acc = 1.000 (3.390 sec/step)
step 32800 	 loss = 1.066, train_acc = 0.900 (3.266 sec/step)
step 32810 	 loss = 0.397, train_acc = 0.900 (3.303 sec/step)
step 32820 	 loss = 0.303, train_acc = 0.800 (3.304 sec/step)
step 32830 	 loss = 0.180, train_acc = 1.000 (3.354 sec/step)
step 32840 	 loss = 0.022, train_acc = 1.000 (3.376 sec/step)
step 32850 	 loss = 0.003, train_acc = 1.000 (3.305 sec/step)
step 32860 	 loss = 0.006, train_acc = 1.000 (3.262 sec/step)
step 32870 	 loss = 0.435, train_acc = 0.800 (3.353 sec/step)
step 32880 	 loss = 0.190, train_acc = 0.900 (3.324 sec/step)
step 32890 	 loss = 0.039, train_acc = 1.000 (3.306 sec/step)
step 32900 	 loss = 0.078, train_acc = 1.000 (3.301 sec/step)
step 32910 	 loss = 0.090, train_acc = 1.000 (3.330 sec/step)
step 32920 	 loss = 0.298, train_acc = 0.900 (3.317 sec/step)
step 32930 	 loss = 0.240, train_acc = 0.900 (3.268 sec/step)
step 32940 	 loss = 1.241, train_acc = 0.800 (3.290 sec/step)
step 32950 	 loss = 0.035, train_acc = 1.000 (3.319 sec/step)
step 32960 	 loss = 0.002, train_acc = 1.000 (3.283 sec/step)
step 32970 	 loss = 0.138, train_acc = 0.900 (3.252 sec/step)
step 32980 	 loss = 0.068, train_acc = 1.000 (3.352 sec/step)
step 32990 	 loss = 0.214, train_acc = 1.000 (3.359 sec/step)
step 33000 	 loss = 0.593, train_acc = 0.900 (3.279 sec/step)
step 33010 	 loss = 0.004, train_acc = 1.000 (3.302 sec/step)
step 33020 	 loss = 0.730, train_acc = 0.800 (3.295 sec/step)
step 33030 	 loss = 0.176, train_acc = 0.900 (3.343 sec/step)
step 33040 	 loss = 0.000, train_acc = 1.000 (3.317 sec/step)
step 33050 	 loss = 0.055, train_acc = 1.000 (3.306 sec/step)
step 33060 	 loss = 0.287, train_acc = 0.900 (3.279 sec/step)
step 33070 	 loss = 0.447, train_acc = 0.900 (3.293 sec/step)
step 33080 	 loss = 0.026, train_acc = 1.000 (3.291 sec/step)
step 33090 	 loss = 0.022, train_acc = 1.000 (3.265 sec/step)
step 33100 	 loss = 2.532, train_acc = 0.800 (3.337 sec/step)
step 33110 	 loss = 0.315, train_acc = 0.900 (3.332 sec/step)
step 33120 	 loss = 0.463, train_acc = 0.800 (3.296 sec/step)
step 33130 	 loss = 0.004, train_acc = 1.000 (3.302 sec/step)
step 33140 	 loss = 0.780, train_acc = 0.800 (3.308 sec/step)
step 33150 	 loss = 0.163, train_acc = 1.000 (3.313 sec/step)
step 33160 	 loss = 0.549, train_acc = 0.900 (3.282 sec/step)
step 33170 	 loss = 0.157, train_acc = 0.900 (3.303 sec/step)
step 33180 	 loss = 0.027, train_acc = 1.000 (3.291 sec/step)
step 33190 	 loss = 0.033, train_acc = 1.000 (3.286 sec/step)
step 33200 	 loss = 0.002, train_acc = 1.000 (3.269 sec/step)
step 33210 	 loss = 0.124, train_acc = 1.000 (3.302 sec/step)
step 33220 	 loss = 0.281, train_acc = 0.800 (3.255 sec/step)
step 33230 	 loss = 0.004, train_acc = 1.000 (3.320 sec/step)
step 33240 	 loss = 0.085, train_acc = 1.000 (3.296 sec/step)
step 33250 	 loss = 0.054, train_acc = 1.000 (3.253 sec/step)
step 33260 	 loss = 0.260, train_acc = 0.900 (3.301 sec/step)
step 33270 	 loss = 0.736, train_acc = 0.900 (3.325 sec/step)
step 33280 	 loss = 0.357, train_acc = 0.800 (3.265 sec/step)
step 33290 	 loss = 0.137, train_acc = 0.900 (3.251 sec/step)
step 33300 	 loss = 0.011, train_acc = 1.000 (3.383 sec/step)
step 33310 	 loss = 1.115, train_acc = 0.800 (3.339 sec/step)
step 33320 	 loss = 0.280, train_acc = 0.900 (3.257 sec/step)
step 33330 	 loss = 0.261, train_acc = 0.900 (3.328 sec/step)
step 33340 	 loss = 0.373, train_acc = 0.900 (3.321 sec/step)
step 33350 	 loss = 0.675, train_acc = 0.700 (3.319 sec/step)
step 33360 	 loss = 0.082, train_acc = 1.000 (3.314 sec/step)
step 33370 	 loss = 0.004, train_acc = 1.000 (3.317 sec/step)
step 33380 	 loss = 0.225, train_acc = 0.900 (3.297 sec/step)
step 33390 	 loss = 0.035, train_acc = 1.000 (3.283 sec/step)
step 33400 	 loss = 0.051, train_acc = 1.000 (3.285 sec/step)
step 33410 	 loss = 0.063, train_acc = 1.000 (3.326 sec/step)
step 33420 	 loss = 0.000, train_acc = 1.000 (3.332 sec/step)
step 33430 	 loss = 0.060, train_acc = 1.000 (3.331 sec/step)
step 33440 	 loss = 0.096, train_acc = 1.000 (3.332 sec/step)
step 33450 	 loss = 0.101, train_acc = 0.900 (3.278 sec/step)
step 33460 	 loss = 0.085, train_acc = 1.000 (3.309 sec/step)
step 33470 	 loss = 0.245, train_acc = 0.900 (3.328 sec/step)
step 33480 	 loss = 0.186, train_acc = 0.900 (3.300 sec/step)
step 33490 	 loss = 0.070, train_acc = 1.000 (3.259 sec/step)
step 33500 	 loss = 0.192, train_acc = 0.900 (3.357 sec/step)
step 33510 	 loss = 0.171, train_acc = 0.900 (3.322 sec/step)
step 33520 	 loss = 0.189, train_acc = 0.900 (3.279 sec/step)
step 33530 	 loss = 0.183, train_acc = 1.000 (3.317 sec/step)
step 33540 	 loss = 0.241, train_acc = 0.900 (3.333 sec/step)
step 33550 	 loss = 0.859, train_acc = 0.700 (3.395 sec/step)
step 33560 	 loss = 0.678, train_acc = 0.900 (3.295 sec/step)
step 33570 	 loss = 0.312, train_acc = 0.900 (3.275 sec/step)
step 33580 	 loss = 0.723, train_acc = 0.800 (3.304 sec/step)
step 33590 	 loss = 0.718, train_acc = 0.800 (3.313 sec/step)
step 33600 	 loss = 0.154, train_acc = 0.900 (3.328 sec/step)
step 33610 	 loss = 0.062, train_acc = 1.000 (3.358 sec/step)
step 33620 	 loss = 0.092, train_acc = 1.000 (3.332 sec/step)
step 33630 	 loss = 0.445, train_acc = 0.900 (3.266 sec/step)
step 33640 	 loss = 0.027, train_acc = 1.000 (3.315 sec/step)
step 33650 	 loss = 0.322, train_acc = 0.900 (3.295 sec/step)
step 33660 	 loss = 0.006, train_acc = 1.000 (3.328 sec/step)
step 33670 	 loss = 0.061, train_acc = 1.000 (3.301 sec/step)
step 33680 	 loss = 0.349, train_acc = 0.900 (3.292 sec/step)
step 33690 	 loss = 1.927, train_acc = 0.600 (3.305 sec/step)
step 33700 	 loss = 0.012, train_acc = 1.000 (3.280 sec/step)
step 33710 	 loss = 0.316, train_acc = 0.900 (3.274 sec/step)
step 33720 	 loss = 0.208, train_acc = 0.900 (3.317 sec/step)
step 33730 	 loss = 0.070, train_acc = 1.000 (3.354 sec/step)
step 33740 	 loss = 0.224, train_acc = 0.900 (3.332 sec/step)
step 33750 	 loss = 0.077, train_acc = 1.000 (3.279 sec/step)
step 33760 	 loss = 0.044, train_acc = 1.000 (3.300 sec/step)
step 33770 	 loss = 0.142, train_acc = 1.000 (3.282 sec/step)
step 33780 	 loss = 0.186, train_acc = 0.900 (3.328 sec/step)
step 33790 	 loss = 0.399, train_acc = 0.800 (3.311 sec/step)
step 33800 	 loss = 0.285, train_acc = 0.900 (3.305 sec/step)
step 33810 	 loss = 0.181, train_acc = 0.900 (3.311 sec/step)
step 33820 	 loss = 0.530, train_acc = 0.900 (3.286 sec/step)
step 33830 	 loss = 0.208, train_acc = 0.900 (3.262 sec/step)
step 33840 	 loss = 1.580, train_acc = 0.700 (3.343 sec/step)
step 33850 	 loss = 1.374, train_acc = 0.600 (3.379 sec/step)
step 33860 	 loss = 0.364, train_acc = 0.900 (3.341 sec/step)
step 33870 	 loss = 0.278, train_acc = 0.900 (3.275 sec/step)
step 33880 	 loss = 0.215, train_acc = 0.900 (3.268 sec/step)
step 33890 	 loss = 0.001, train_acc = 1.000 (3.273 sec/step)
step 33900 	 loss = 0.220, train_acc = 0.900 (3.332 sec/step)
step 33910 	 loss = 0.031, train_acc = 1.000 (3.265 sec/step)
step 33920 	 loss = 0.229, train_acc = 0.900 (3.317 sec/step)
step 33930 	 loss = 0.570, train_acc = 0.800 (3.324 sec/step)
step 33940 	 loss = 0.614, train_acc = 0.800 (3.271 sec/step)
step 33950 	 loss = 0.305, train_acc = 0.900 (3.438 sec/step)
step 33960 	 loss = 0.055, train_acc = 1.000 (3.322 sec/step)
step 33970 	 loss = 0.243, train_acc = 0.900 (3.255 sec/step)
step 33980 	 loss = 0.917, train_acc = 0.900 (3.287 sec/step)
step 33990 	 loss = 0.214, train_acc = 0.900 (3.312 sec/step)
step 34000 	 loss = 0.108, train_acc = 1.000 (3.292 sec/step)
step 34010 	 loss = 0.074, train_acc = 1.000 (3.268 sec/step)
step 34020 	 loss = 0.562, train_acc = 0.700 (3.359 sec/step)
step 34030 	 loss = 0.308, train_acc = 0.900 (3.329 sec/step)
step 34040 	 loss = 0.022, train_acc = 1.000 (3.311 sec/step)
step 34050 	 loss = 0.071, train_acc = 1.000 (3.294 sec/step)
step 34060 	 loss = 0.006, train_acc = 1.000 (3.301 sec/step)
step 34070 	 loss = 0.227, train_acc = 0.900 (3.301 sec/step)
step 34080 	 loss = 0.725, train_acc = 0.800 (3.269 sec/step)
step 34090 	 loss = 0.066, train_acc = 1.000 (3.256 sec/step)
step 34100 	 loss = 0.635, train_acc = 0.800 (3.326 sec/step)
step 34110 	 loss = 0.647, train_acc = 0.800 (3.350 sec/step)
step 34120 	 loss = 0.085, train_acc = 1.000 (3.345 sec/step)
step 34130 	 loss = 0.131, train_acc = 0.900 (3.296 sec/step)
step 34140 	 loss = 1.139, train_acc = 0.800 (3.327 sec/step)
step 34150 	 loss = 0.007, train_acc = 1.000 (3.298 sec/step)
step 34160 	 loss = 0.113, train_acc = 1.000 (3.278 sec/step)
step 34170 	 loss = 0.009, train_acc = 1.000 (3.311 sec/step)
step 34180 	 loss = 0.016, train_acc = 1.000 (3.377 sec/step)
step 34190 	 loss = 0.121, train_acc = 0.900 (3.323 sec/step)
VALIDATION 	 acc = 0.543 (3.652 sec)
step 34200 	 loss = 0.018, train_acc = 1.000 (3.305 sec/step)
step 34210 	 loss = 0.087, train_acc = 1.000 (3.291 sec/step)
step 34220 	 loss = 0.208, train_acc = 0.900 (3.278 sec/step)
step 34230 	 loss = 0.143, train_acc = 1.000 (3.268 sec/step)
step 34240 	 loss = 0.023, train_acc = 1.000 (3.293 sec/step)
step 34250 	 loss = 0.536, train_acc = 0.800 (3.280 sec/step)
step 34260 	 loss = 0.003, train_acc = 1.000 (3.309 sec/step)
step 34270 	 loss = 0.016, train_acc = 1.000 (3.312 sec/step)
step 34280 	 loss = 0.173, train_acc = 0.900 (3.334 sec/step)
step 34290 	 loss = 0.277, train_acc = 0.800 (3.292 sec/step)
step 34300 	 loss = 0.279, train_acc = 0.900 (3.271 sec/step)
step 34310 	 loss = 1.419, train_acc = 0.800 (3.351 sec/step)
step 34320 	 loss = 0.183, train_acc = 0.900 (3.298 sec/step)
step 34330 	 loss = 0.378, train_acc = 0.900 (3.329 sec/step)
step 34340 	 loss = 0.001, train_acc = 1.000 (3.283 sec/step)
step 34350 	 loss = 0.570, train_acc = 0.800 (3.380 sec/step)
step 34360 	 loss = 0.126, train_acc = 1.000 (3.278 sec/step)
step 34370 	 loss = 0.004, train_acc = 1.000 (3.315 sec/step)
step 34380 	 loss = 0.004, train_acc = 1.000 (3.330 sec/step)
step 34390 	 loss = 0.000, train_acc = 1.000 (3.296 sec/step)
step 34400 	 loss = 0.157, train_acc = 1.000 (3.305 sec/step)
step 34410 	 loss = 0.180, train_acc = 0.900 (3.301 sec/step)
step 34420 	 loss = 1.285, train_acc = 0.800 (3.281 sec/step)
step 34430 	 loss = 0.141, train_acc = 0.900 (3.301 sec/step)
step 34440 	 loss = 0.344, train_acc = 0.900 (3.296 sec/step)
step 34450 	 loss = 0.026, train_acc = 1.000 (3.350 sec/step)
step 34460 	 loss = 0.180, train_acc = 1.000 (3.305 sec/step)
step 34470 	 loss = 1.182, train_acc = 0.700 (3.323 sec/step)
step 34480 	 loss = 0.950, train_acc = 0.800 (3.271 sec/step)
step 34490 	 loss = 0.032, train_acc = 1.000 (3.327 sec/step)
step 34500 	 loss = 0.006, train_acc = 1.000 (3.321 sec/step)
step 34510 	 loss = 0.425, train_acc = 0.800 (3.299 sec/step)
step 34520 	 loss = 0.344, train_acc = 0.800 (3.279 sec/step)
step 34530 	 loss = 0.539, train_acc = 0.800 (3.272 sec/step)
step 34540 	 loss = 0.781, train_acc = 0.800 (3.285 sec/step)
step 34550 	 loss = 0.079, train_acc = 1.000 (3.298 sec/step)
step 34560 	 loss = 0.047, train_acc = 1.000 (3.316 sec/step)
step 34570 	 loss = 0.009, train_acc = 1.000 (3.311 sec/step)
step 34580 	 loss = 0.066, train_acc = 1.000 (3.290 sec/step)
step 34590 	 loss = 0.045, train_acc = 1.000 (3.260 sec/step)
step 34600 	 loss = 1.290, train_acc = 0.700 (3.362 sec/step)
step 34610 	 loss = 0.229, train_acc = 0.900 (3.291 sec/step)
step 34620 	 loss = 0.214, train_acc = 0.900 (3.273 sec/step)
step 34630 	 loss = 0.159, train_acc = 0.900 (3.315 sec/step)
step 34640 	 loss = 1.275, train_acc = 0.800 (3.350 sec/step)
step 34650 	 loss = 0.351, train_acc = 0.900 (3.337 sec/step)
step 34660 	 loss = 0.028, train_acc = 1.000 (3.306 sec/step)
step 34670 	 loss = 0.189, train_acc = 0.900 (3.337 sec/step)
step 34680 	 loss = 0.001, train_acc = 1.000 (3.309 sec/step)
step 34690 	 loss = 1.692, train_acc = 0.800 (3.354 sec/step)
step 34700 	 loss = 0.834, train_acc = 0.700 (3.288 sec/step)
step 34710 	 loss = 0.315, train_acc = 0.900 (3.331 sec/step)
step 34720 	 loss = 0.076, train_acc = 1.000 (3.339 sec/step)
step 34730 	 loss = 0.012, train_acc = 1.000 (3.308 sec/step)
step 34740 	 loss = 0.039, train_acc = 1.000 (3.286 sec/step)
step 34750 	 loss = 0.006, train_acc = 1.000 (3.302 sec/step)
step 34760 	 loss = 0.333, train_acc = 0.900 (3.350 sec/step)
step 34770 	 loss = 0.200, train_acc = 0.900 (3.343 sec/step)
step 34780 	 loss = 0.103, train_acc = 1.000 (3.254 sec/step)
step 34790 	 loss = 0.037, train_acc = 1.000 (3.362 sec/step)
step 34800 	 loss = 0.039, train_acc = 1.000 (3.288 sec/step)
step 34810 	 loss = 2.006, train_acc = 0.900 (3.284 sec/step)
step 34820 	 loss = 0.839, train_acc = 0.700 (3.297 sec/step)
step 34830 	 loss = 0.207, train_acc = 1.000 (3.287 sec/step)
step 34840 	 loss = 0.039, train_acc = 1.000 (3.332 sec/step)
step 34850 	 loss = 0.295, train_acc = 0.900 (3.364 sec/step)
step 34860 	 loss = 0.002, train_acc = 1.000 (3.339 sec/step)
step 34870 	 loss = 0.486, train_acc = 0.900 (3.311 sec/step)
step 34880 	 loss = 0.040, train_acc = 1.000 (3.296 sec/step)
step 34890 	 loss = 0.052, train_acc = 1.000 (3.412 sec/step)
step 34900 	 loss = 0.164, train_acc = 1.000 (3.323 sec/step)
step 34910 	 loss = 0.854, train_acc = 0.800 (3.339 sec/step)
step 34920 	 loss = 0.507, train_acc = 0.800 (3.281 sec/step)
step 34930 	 loss = 0.211, train_acc = 1.000 (3.264 sec/step)
step 34940 	 loss = 0.121, train_acc = 0.900 (3.350 sec/step)
step 34950 	 loss = 0.379, train_acc = 0.900 (3.276 sec/step)
step 34960 	 loss = 0.051, train_acc = 1.000 (3.333 sec/step)
step 34970 	 loss = 0.287, train_acc = 0.900 (3.308 sec/step)
step 34980 	 loss = 0.012, train_acc = 1.000 (3.294 sec/step)
step 34990 	 loss = 0.004, train_acc = 1.000 (3.335 sec/step)
step 35000 	 loss = 0.121, train_acc = 0.900 (3.276 sec/step)
step 35010 	 loss = 1.074, train_acc = 0.700 (3.259 sec/step)
step 35020 	 loss = 0.002, train_acc = 1.000 (3.285 sec/step)
step 35030 	 loss = 0.281, train_acc = 0.900 (3.273 sec/step)
step 35040 	 loss = 0.176, train_acc = 1.000 (3.289 sec/step)
step 35050 	 loss = 0.011, train_acc = 1.000 (3.296 sec/step)
step 35060 	 loss = 0.305, train_acc = 0.800 (3.348 sec/step)
step 35070 	 loss = 0.211, train_acc = 0.900 (3.312 sec/step)
step 35080 	 loss = 0.171, train_acc = 0.900 (3.283 sec/step)
step 35090 	 loss = 0.722, train_acc = 0.800 (3.281 sec/step)
step 35100 	 loss = 0.367, train_acc = 0.800 (3.343 sec/step)
step 35110 	 loss = 0.697, train_acc = 0.800 (3.329 sec/step)
step 35120 	 loss = 0.012, train_acc = 1.000 (3.318 sec/step)
step 35130 	 loss = 0.016, train_acc = 1.000 (3.315 sec/step)
step 35140 	 loss = 0.456, train_acc = 0.900 (3.354 sec/step)
step 35150 	 loss = 0.139, train_acc = 0.900 (3.330 sec/step)
step 35160 	 loss = 0.002, train_acc = 1.000 (3.309 sec/step)
step 35170 	 loss = 0.249, train_acc = 0.900 (3.349 sec/step)
step 35180 	 loss = 0.364, train_acc = 0.900 (3.345 sec/step)
step 35190 	 loss = 0.284, train_acc = 0.900 (3.304 sec/step)
step 35200 	 loss = 0.008, train_acc = 1.000 (3.361 sec/step)
step 35210 	 loss = 0.093, train_acc = 1.000 (3.339 sec/step)
step 35220 	 loss = 1.192, train_acc = 0.900 (3.330 sec/step)
step 35230 	 loss = 0.124, train_acc = 0.900 (3.349 sec/step)
step 35240 	 loss = 0.071, train_acc = 1.000 (3.343 sec/step)
step 35250 	 loss = 0.043, train_acc = 1.000 (3.289 sec/step)
step 35260 	 loss = 0.085, train_acc = 1.000 (3.342 sec/step)
step 35270 	 loss = 0.219, train_acc = 0.900 (3.283 sec/step)
step 35280 	 loss = 0.525, train_acc = 0.800 (3.286 sec/step)
step 35290 	 loss = 0.154, train_acc = 0.900 (3.300 sec/step)
step 35300 	 loss = 0.076, train_acc = 1.000 (3.273 sec/step)
step 35310 	 loss = 0.317, train_acc = 0.900 (3.294 sec/step)
step 35320 	 loss = 0.003, train_acc = 1.000 (3.331 sec/step)
step 35330 	 loss = 0.124, train_acc = 0.900 (3.407 sec/step)
step 35340 	 loss = 0.205, train_acc = 0.800 (3.286 sec/step)
step 35350 	 loss = 0.825, train_acc = 0.800 (3.338 sec/step)
step 35360 	 loss = 0.091, train_acc = 1.000 (3.289 sec/step)
step 35370 	 loss = 0.064, train_acc = 1.000 (3.316 sec/step)
step 35380 	 loss = 0.020, train_acc = 1.000 (3.306 sec/step)
step 35390 	 loss = 0.219, train_acc = 1.000 (3.314 sec/step)
step 35400 	 loss = 0.166, train_acc = 1.000 (3.371 sec/step)
step 35410 	 loss = 0.140, train_acc = 0.900 (3.302 sec/step)
step 35420 	 loss = 0.074, train_acc = 1.000 (3.260 sec/step)
step 35430 	 loss = 0.006, train_acc = 1.000 (3.333 sec/step)
step 35440 	 loss = 0.024, train_acc = 1.000 (3.316 sec/step)
step 35450 	 loss = 0.074, train_acc = 1.000 (3.290 sec/step)
step 35460 	 loss = 0.221, train_acc = 0.900 (3.291 sec/step)
step 35470 	 loss = 0.359, train_acc = 0.900 (3.355 sec/step)
step 35480 	 loss = 0.028, train_acc = 1.000 (3.333 sec/step)
step 35490 	 loss = 0.006, train_acc = 1.000 (3.285 sec/step)
step 35500 	 loss = 0.011, train_acc = 1.000 (3.362 sec/step)
step 35510 	 loss = 0.328, train_acc = 0.900 (3.323 sec/step)
step 35520 	 loss = 0.630, train_acc = 0.800 (3.343 sec/step)
step 35530 	 loss = 0.136, train_acc = 1.000 (3.283 sec/step)
step 35540 	 loss = 0.113, train_acc = 0.900 (3.338 sec/step)
step 35550 	 loss = 0.628, train_acc = 0.900 (3.375 sec/step)
step 35560 	 loss = 0.395, train_acc = 0.900 (3.301 sec/step)
step 35570 	 loss = 0.027, train_acc = 1.000 (3.358 sec/step)
step 35580 	 loss = 0.056, train_acc = 1.000 (3.335 sec/step)
step 35590 	 loss = 0.796, train_acc = 0.800 (3.315 sec/step)
step 35600 	 loss = 0.047, train_acc = 1.000 (3.342 sec/step)
step 35610 	 loss = 0.927, train_acc = 0.800 (3.337 sec/step)
step 35620 	 loss = 0.426, train_acc = 0.800 (3.300 sec/step)
step 35630 	 loss = 0.174, train_acc = 0.900 (3.335 sec/step)
step 35640 	 loss = 0.015, train_acc = 1.000 (3.279 sec/step)
step 35650 	 loss = 1.106, train_acc = 0.700 (3.342 sec/step)
step 35660 	 loss = 0.023, train_acc = 1.000 (3.296 sec/step)
step 35670 	 loss = 0.173, train_acc = 1.000 (3.349 sec/step)
step 35680 	 loss = 0.142, train_acc = 1.000 (3.257 sec/step)
step 35690 	 loss = 0.233, train_acc = 0.900 (3.295 sec/step)
step 35700 	 loss = 0.752, train_acc = 0.900 (3.298 sec/step)
step 35710 	 loss = 0.583, train_acc = 0.900 (3.319 sec/step)
step 35720 	 loss = 0.015, train_acc = 1.000 (3.342 sec/step)
step 35730 	 loss = 0.010, train_acc = 1.000 (3.294 sec/step)
step 35740 	 loss = 0.041, train_acc = 1.000 (3.338 sec/step)
step 35750 	 loss = 1.817, train_acc = 0.700 (3.324 sec/step)
step 35760 	 loss = 0.122, train_acc = 1.000 (3.326 sec/step)
step 35770 	 loss = 0.328, train_acc = 0.800 (3.311 sec/step)
step 35780 	 loss = 0.056, train_acc = 1.000 (3.290 sec/step)
step 35790 	 loss = 0.081, train_acc = 1.000 (3.315 sec/step)
step 35800 	 loss = 0.154, train_acc = 0.900 (3.312 sec/step)
step 35810 	 loss = 0.496, train_acc = 0.800 (3.283 sec/step)
step 35820 	 loss = 0.005, train_acc = 1.000 (3.358 sec/step)
step 35830 	 loss = 0.238, train_acc = 1.000 (3.337 sec/step)
step 35840 	 loss = 1.900, train_acc = 0.700 (3.299 sec/step)
step 35850 	 loss = 0.064, train_acc = 1.000 (3.311 sec/step)
step 35860 	 loss = 0.333, train_acc = 0.900 (3.268 sec/step)
step 35870 	 loss = 0.017, train_acc = 1.000 (3.279 sec/step)
step 35880 	 loss = 0.014, train_acc = 1.000 (3.333 sec/step)
step 35890 	 loss = 0.570, train_acc = 0.600 (3.250 sec/step)
step 35900 	 loss = 0.267, train_acc = 1.000 (3.324 sec/step)
step 35910 	 loss = 0.229, train_acc = 0.900 (3.334 sec/step)
step 35920 	 loss = 0.063, train_acc = 1.000 (3.297 sec/step)
step 35930 	 loss = 0.393, train_acc = 0.900 (3.361 sec/step)
step 35940 	 loss = 0.170, train_acc = 0.900 (3.301 sec/step)
step 35950 	 loss = 0.372, train_acc = 0.900 (3.326 sec/step)
step 35960 	 loss = 0.011, train_acc = 1.000 (3.317 sec/step)
step 35970 	 loss = 0.009, train_acc = 1.000 (3.322 sec/step)
step 35980 	 loss = 0.310, train_acc = 0.900 (3.326 sec/step)
step 35990 	 loss = 0.311, train_acc = 0.900 (3.261 sec/step)
step 36000 	 loss = 0.349, train_acc = 0.900 (3.312 sec/step)
step 36010 	 loss = 1.281, train_acc = 0.700 (3.309 sec/step)
step 36020 	 loss = 0.099, train_acc = 0.900 (3.324 sec/step)
step 36030 	 loss = 1.412, train_acc = 0.600 (3.302 sec/step)
step 36040 	 loss = 1.029, train_acc = 0.600 (3.281 sec/step)
step 36050 	 loss = 0.151, train_acc = 1.000 (3.263 sec/step)
step 36060 	 loss = 1.142, train_acc = 0.700 (3.317 sec/step)
step 36070 	 loss = 1.882, train_acc = 0.600 (3.274 sec/step)
step 36080 	 loss = 0.465, train_acc = 0.900 (3.290 sec/step)
step 36090 	 loss = 0.902, train_acc = 0.700 (3.292 sec/step)
VALIDATION 	 acc = 0.509 (3.634 sec)
step 36100 	 loss = 0.179, train_acc = 0.900 (3.352 sec/step)
step 36110 	 loss = 0.150, train_acc = 1.000 (3.299 sec/step)
step 36120 	 loss = 0.272, train_acc = 0.900 (3.445 sec/step)
step 36130 	 loss = 0.133, train_acc = 0.900 (3.286 sec/step)
step 36140 	 loss = 1.044, train_acc = 0.900 (3.278 sec/step)
step 36150 	 loss = 0.549, train_acc = 0.900 (3.256 sec/step)
step 36160 	 loss = 0.389, train_acc = 0.900 (3.302 sec/step)
step 36170 	 loss = 0.128, train_acc = 0.900 (3.283 sec/step)
step 36180 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 36190 	 loss = 0.046, train_acc = 1.000 (3.258 sec/step)
step 36200 	 loss = 0.248, train_acc = 0.900 (3.327 sec/step)
step 36210 	 loss = 0.344, train_acc = 0.900 (3.333 sec/step)
step 36220 	 loss = 0.044, train_acc = 1.000 (3.325 sec/step)
step 36230 	 loss = 0.394, train_acc = 0.900 (3.298 sec/step)
step 36240 	 loss = 0.839, train_acc = 0.900 (3.311 sec/step)
step 36250 	 loss = 0.041, train_acc = 1.000 (3.320 sec/step)
step 36260 	 loss = 0.609, train_acc = 0.900 (3.325 sec/step)
step 36270 	 loss = 0.086, train_acc = 0.900 (3.336 sec/step)
step 36280 	 loss = 0.053, train_acc = 1.000 (3.352 sec/step)
step 36290 	 loss = 0.731, train_acc = 0.900 (3.360 sec/step)
step 36300 	 loss = 0.567, train_acc = 0.900 (3.317 sec/step)
step 36310 	 loss = 0.187, train_acc = 0.900 (3.332 sec/step)
step 36320 	 loss = 0.442, train_acc = 0.800 (3.306 sec/step)
step 36330 	 loss = 0.065, train_acc = 1.000 (3.418 sec/step)
step 36340 	 loss = 0.331, train_acc = 0.900 (3.321 sec/step)
step 36350 	 loss = 0.067, train_acc = 1.000 (3.258 sec/step)
step 36360 	 loss = 0.614, train_acc = 0.800 (3.317 sec/step)
step 36370 	 loss = 0.014, train_acc = 1.000 (3.332 sec/step)
step 36380 	 loss = 0.066, train_acc = 1.000 (3.314 sec/step)
step 36390 	 loss = 0.293, train_acc = 0.900 (3.281 sec/step)
step 36400 	 loss = 0.916, train_acc = 0.900 (3.357 sec/step)
step 36410 	 loss = 0.017, train_acc = 1.000 (3.267 sec/step)
step 36420 	 loss = 0.022, train_acc = 1.000 (3.351 sec/step)
step 36430 	 loss = 0.019, train_acc = 1.000 (3.316 sec/step)
step 36440 	 loss = 0.013, train_acc = 1.000 (3.289 sec/step)
step 36450 	 loss = 0.124, train_acc = 0.900 (3.299 sec/step)
step 36460 	 loss = 1.535, train_acc = 0.800 (3.313 sec/step)
step 36470 	 loss = 0.026, train_acc = 1.000 (3.327 sec/step)
step 36480 	 loss = 0.001, train_acc = 1.000 (3.318 sec/step)
step 36490 	 loss = 0.030, train_acc = 1.000 (3.311 sec/step)
step 36500 	 loss = 0.838, train_acc = 0.800 (3.280 sec/step)
step 36510 	 loss = 0.765, train_acc = 0.800 (3.296 sec/step)
step 36520 	 loss = 0.020, train_acc = 1.000 (3.277 sec/step)
step 36530 	 loss = 0.289, train_acc = 0.900 (3.344 sec/step)
step 36540 	 loss = 0.013, train_acc = 1.000 (3.331 sec/step)
step 36550 	 loss = 0.302, train_acc = 0.800 (3.266 sec/step)
step 36560 	 loss = 0.001, train_acc = 1.000 (3.334 sec/step)
step 36570 	 loss = 0.463, train_acc = 0.900 (3.310 sec/step)
step 36580 	 loss = 0.866, train_acc = 0.800 (3.409 sec/step)
step 36590 	 loss = 0.054, train_acc = 1.000 (3.248 sec/step)
step 36600 	 loss = 0.007, train_acc = 1.000 (3.282 sec/step)
step 36610 	 loss = 0.002, train_acc = 1.000 (3.268 sec/step)
step 36620 	 loss = 0.152, train_acc = 0.900 (3.322 sec/step)
step 36630 	 loss = 0.092, train_acc = 1.000 (3.319 sec/step)
step 36640 	 loss = 0.921, train_acc = 0.800 (3.347 sec/step)
step 36650 	 loss = 0.049, train_acc = 1.000 (3.279 sec/step)
step 36660 	 loss = 0.970, train_acc = 0.900 (3.273 sec/step)
step 36670 	 loss = 0.303, train_acc = 0.900 (3.361 sec/step)
step 36680 	 loss = 0.002, train_acc = 1.000 (3.344 sec/step)
step 36690 	 loss = 0.767, train_acc = 0.700 (3.341 sec/step)
step 36700 	 loss = 0.620, train_acc = 0.800 (3.285 sec/step)
step 36710 	 loss = 0.018, train_acc = 1.000 (3.265 sec/step)
step 36720 	 loss = 0.002, train_acc = 1.000 (3.361 sec/step)
step 36730 	 loss = 0.234, train_acc = 0.900 (3.285 sec/step)
step 36740 	 loss = 0.071, train_acc = 1.000 (3.323 sec/step)
step 36750 	 loss = 0.000, train_acc = 1.000 (3.303 sec/step)
step 36760 	 loss = 1.732, train_acc = 0.800 (3.288 sec/step)
step 36770 	 loss = 0.098, train_acc = 0.900 (3.320 sec/step)
step 36780 	 loss = 0.583, train_acc = 0.900 (3.317 sec/step)
step 36790 	 loss = 0.015, train_acc = 1.000 (3.285 sec/step)
step 36800 	 loss = 0.724, train_acc = 0.900 (3.286 sec/step)
step 36810 	 loss = 0.098, train_acc = 1.000 (3.263 sec/step)
step 36820 	 loss = 0.691, train_acc = 0.900 (3.326 sec/step)
step 36830 	 loss = 0.213, train_acc = 1.000 (3.335 sec/step)
step 36840 	 loss = 0.077, train_acc = 0.900 (3.293 sec/step)
step 36850 	 loss = 0.655, train_acc = 0.900 (3.264 sec/step)
step 36860 	 loss = 0.400, train_acc = 0.800 (3.270 sec/step)
step 36870 	 loss = 0.077, train_acc = 1.000 (3.379 sec/step)
step 36880 	 loss = 0.205, train_acc = 0.900 (3.355 sec/step)
step 36890 	 loss = 0.003, train_acc = 1.000 (3.298 sec/step)
step 36900 	 loss = 0.157, train_acc = 0.900 (3.297 sec/step)
step 36910 	 loss = 0.211, train_acc = 0.900 (3.375 sec/step)
step 36920 	 loss = 0.017, train_acc = 1.000 (3.330 sec/step)
step 36930 	 loss = 0.624, train_acc = 0.900 (3.292 sec/step)
step 36940 	 loss = 0.145, train_acc = 1.000 (3.354 sec/step)
step 36950 	 loss = 0.100, train_acc = 0.900 (3.330 sec/step)
step 36960 	 loss = 0.009, train_acc = 1.000 (3.323 sec/step)
step 36970 	 loss = 0.011, train_acc = 1.000 (3.347 sec/step)
step 36980 	 loss = 0.959, train_acc = 0.900 (3.339 sec/step)
step 36990 	 loss = 0.376, train_acc = 0.900 (3.356 sec/step)
step 37000 	 loss = 0.198, train_acc = 1.000 (3.270 sec/step)
step 37010 	 loss = 0.064, train_acc = 1.000 (3.258 sec/step)
step 37020 	 loss = 0.017, train_acc = 1.000 (3.306 sec/step)
step 37030 	 loss = 0.001, train_acc = 1.000 (3.400 sec/step)
step 37040 	 loss = 0.019, train_acc = 1.000 (3.330 sec/step)
step 37050 	 loss = 0.049, train_acc = 1.000 (3.287 sec/step)
step 37060 	 loss = 0.075, train_acc = 1.000 (3.287 sec/step)
step 37070 	 loss = 0.307, train_acc = 0.900 (3.369 sec/step)
step 37080 	 loss = 0.187, train_acc = 0.900 (3.335 sec/step)
step 37090 	 loss = 0.951, train_acc = 0.800 (3.337 sec/step)
step 37100 	 loss = 0.017, train_acc = 1.000 (3.298 sec/step)
step 37110 	 loss = 0.031, train_acc = 1.000 (3.290 sec/step)
step 37120 	 loss = 0.225, train_acc = 0.900 (3.358 sec/step)
step 37130 	 loss = 0.145, train_acc = 0.900 (3.292 sec/step)
step 37140 	 loss = 0.079, train_acc = 1.000 (3.310 sec/step)
step 37150 	 loss = 0.016, train_acc = 1.000 (3.282 sec/step)
step 37160 	 loss = 0.016, train_acc = 1.000 (3.281 sec/step)
step 37170 	 loss = 0.099, train_acc = 1.000 (3.288 sec/step)
step 37180 	 loss = 0.025, train_acc = 1.000 (3.302 sec/step)
step 37190 	 loss = 0.003, train_acc = 1.000 (3.297 sec/step)
step 37200 	 loss = 0.011, train_acc = 1.000 (3.314 sec/step)
step 37210 	 loss = 0.776, train_acc = 0.800 (3.315 sec/step)
step 37220 	 loss = 0.004, train_acc = 1.000 (3.288 sec/step)
step 37230 	 loss = 0.222, train_acc = 0.900 (3.275 sec/step)
step 37240 	 loss = 0.692, train_acc = 0.700 (3.311 sec/step)
step 37250 	 loss = 0.033, train_acc = 1.000 (3.449 sec/step)
step 37260 	 loss = 0.100, train_acc = 1.000 (3.281 sec/step)
step 37270 	 loss = 0.002, train_acc = 1.000 (3.320 sec/step)
step 37280 	 loss = 1.029, train_acc = 0.900 (3.332 sec/step)
step 37290 	 loss = 0.308, train_acc = 0.900 (3.293 sec/step)
step 37300 	 loss = 0.091, train_acc = 1.000 (3.296 sec/step)
step 37310 	 loss = 0.678, train_acc = 0.800 (3.317 sec/step)
step 37320 	 loss = 0.536, train_acc = 0.900 (3.341 sec/step)
step 37330 	 loss = 0.049, train_acc = 1.000 (3.338 sec/step)
step 37340 	 loss = 0.074, train_acc = 1.000 (3.448 sec/step)
step 37350 	 loss = 0.095, train_acc = 1.000 (3.320 sec/step)
step 37360 	 loss = 0.007, train_acc = 1.000 (3.294 sec/step)
step 37370 	 loss = 0.143, train_acc = 0.900 (3.321 sec/step)
step 37380 	 loss = 0.120, train_acc = 0.900 (3.312 sec/step)
step 37390 	 loss = 1.175, train_acc = 0.800 (3.367 sec/step)
step 37400 	 loss = 0.000, train_acc = 1.000 (3.348 sec/step)
step 37410 	 loss = 0.016, train_acc = 1.000 (3.363 sec/step)
step 37420 	 loss = 0.199, train_acc = 0.900 (3.316 sec/step)
step 37430 	 loss = 0.304, train_acc = 0.900 (3.257 sec/step)
step 37440 	 loss = 0.293, train_acc = 0.800 (3.304 sec/step)
step 37450 	 loss = 0.174, train_acc = 0.900 (3.332 sec/step)
step 37460 	 loss = 0.039, train_acc = 1.000 (3.326 sec/step)
step 37470 	 loss = 0.055, train_acc = 1.000 (3.291 sec/step)
step 37480 	 loss = 0.028, train_acc = 1.000 (3.308 sec/step)
step 37490 	 loss = 0.040, train_acc = 1.000 (3.343 sec/step)
step 37500 	 loss = 0.035, train_acc = 1.000 (3.315 sec/step)
step 37510 	 loss = 0.097, train_acc = 1.000 (3.289 sec/step)
step 37520 	 loss = 0.225, train_acc = 0.900 (3.300 sec/step)
step 37530 	 loss = 0.426, train_acc = 0.800 (3.352 sec/step)
step 37540 	 loss = 0.341, train_acc = 0.800 (3.357 sec/step)
step 37550 	 loss = 0.450, train_acc = 0.900 (3.303 sec/step)
step 37560 	 loss = 0.032, train_acc = 1.000 (3.311 sec/step)
step 37570 	 loss = 0.003, train_acc = 1.000 (3.364 sec/step)
step 37580 	 loss = 0.602, train_acc = 0.800 (3.349 sec/step)
step 37590 	 loss = 0.131, train_acc = 0.900 (3.339 sec/step)
step 37600 	 loss = 0.087, train_acc = 1.000 (3.343 sec/step)
step 37610 	 loss = 0.001, train_acc = 1.000 (3.341 sec/step)
step 37620 	 loss = 0.117, train_acc = 0.900 (3.327 sec/step)
step 37630 	 loss = 0.511, train_acc = 0.900 (3.293 sec/step)
step 37640 	 loss = 0.356, train_acc = 0.800 (3.327 sec/step)
step 37650 	 loss = 0.250, train_acc = 0.900 (3.270 sec/step)
step 37660 	 loss = 0.219, train_acc = 0.900 (3.291 sec/step)
step 37670 	 loss = 0.008, train_acc = 1.000 (3.295 sec/step)
step 37680 	 loss = 0.005, train_acc = 1.000 (3.328 sec/step)
step 37690 	 loss = 0.157, train_acc = 1.000 (3.309 sec/step)
step 37700 	 loss = 0.060, train_acc = 1.000 (3.301 sec/step)
step 37710 	 loss = 0.499, train_acc = 0.900 (3.305 sec/step)
step 37720 	 loss = 0.067, train_acc = 1.000 (3.307 sec/step)
step 37730 	 loss = 0.283, train_acc = 0.900 (3.297 sec/step)
step 37740 	 loss = 0.386, train_acc = 0.900 (3.302 sec/step)
step 37750 	 loss = 0.793, train_acc = 0.800 (3.409 sec/step)
step 37760 	 loss = 0.314, train_acc = 0.800 (3.290 sec/step)
step 37770 	 loss = 0.171, train_acc = 1.000 (3.316 sec/step)
step 37780 	 loss = 0.001, train_acc = 1.000 (3.464 sec/step)
step 37790 	 loss = 0.169, train_acc = 0.900 (3.306 sec/step)
step 37800 	 loss = 0.180, train_acc = 0.900 (3.306 sec/step)
step 37810 	 loss = 0.219, train_acc = 0.900 (3.275 sec/step)
step 37820 	 loss = 0.162, train_acc = 0.900 (3.304 sec/step)
step 37830 	 loss = 0.041, train_acc = 1.000 (3.348 sec/step)
step 37840 	 loss = 0.408, train_acc = 0.900 (3.335 sec/step)
step 37850 	 loss = 0.035, train_acc = 1.000 (3.303 sec/step)
step 37860 	 loss = 0.048, train_acc = 1.000 (3.325 sec/step)
step 37870 	 loss = 0.051, train_acc = 1.000 (3.401 sec/step)
step 37880 	 loss = 1.381, train_acc = 0.700 (3.399 sec/step)
step 37890 	 loss = 0.050, train_acc = 1.000 (3.326 sec/step)
step 37900 	 loss = 0.167, train_acc = 0.900 (3.356 sec/step)
step 37910 	 loss = 0.070, train_acc = 1.000 (3.325 sec/step)
step 37920 	 loss = 0.062, train_acc = 1.000 (3.265 sec/step)
step 37930 	 loss = 0.326, train_acc = 0.800 (3.342 sec/step)
step 37940 	 loss = 0.025, train_acc = 1.000 (3.359 sec/step)
step 37950 	 loss = 0.808, train_acc = 0.700 (3.352 sec/step)
step 37960 	 loss = 0.237, train_acc = 0.900 (3.352 sec/step)
step 37970 	 loss = 0.130, train_acc = 0.900 (3.358 sec/step)
step 37980 	 loss = 0.094, train_acc = 1.000 (3.448 sec/step)
step 37990 	 loss = 1.008, train_acc = 0.700 (3.320 sec/step)
VALIDATION 	 acc = 0.563 (3.612 sec)
New Best Accuracy 0.563 > Old Best 0.561.  Saving...
The checkpoint has been created.
step 38000 	 loss = 0.041, train_acc = 1.000 (3.308 sec/step)
step 38010 	 loss = 0.073, train_acc = 1.000 (3.287 sec/step)
step 38020 	 loss = 0.012, train_acc = 1.000 (3.322 sec/step)
step 38030 	 loss = 0.453, train_acc = 0.900 (3.308 sec/step)
step 38040 	 loss = 0.002, train_acc = 1.000 (3.328 sec/step)
step 38050 	 loss = 0.271, train_acc = 0.900 (3.276 sec/step)
step 38060 	 loss = 0.388, train_acc = 0.700 (3.282 sec/step)
step 38070 	 loss = 0.056, train_acc = 1.000 (3.277 sec/step)
step 38080 	 loss = 0.242, train_acc = 0.900 (3.454 sec/step)
step 38090 	 loss = 0.014, train_acc = 1.000 (3.312 sec/step)
step 38100 	 loss = 0.449, train_acc = 0.900 (3.286 sec/step)
step 38110 	 loss = 0.004, train_acc = 1.000 (3.282 sec/step)
step 38120 	 loss = 0.150, train_acc = 0.900 (3.307 sec/step)
step 38130 	 loss = 0.016, train_acc = 1.000 (3.309 sec/step)
step 38140 	 loss = 0.701, train_acc = 0.900 (3.316 sec/step)
step 38150 	 loss = 0.197, train_acc = 0.800 (3.329 sec/step)
step 38160 	 loss = 0.336, train_acc = 0.900 (3.354 sec/step)
step 38170 	 loss = 0.498, train_acc = 0.900 (3.377 sec/step)
step 38180 	 loss = 0.011, train_acc = 1.000 (3.280 sec/step)
step 38190 	 loss = 0.009, train_acc = 1.000 (3.345 sec/step)
step 38200 	 loss = 1.322, train_acc = 0.900 (3.276 sec/step)
step 38210 	 loss = 0.706, train_acc = 0.600 (3.287 sec/step)
step 38220 	 loss = 0.442, train_acc = 0.800 (3.308 sec/step)
step 38230 	 loss = 0.040, train_acc = 1.000 (3.320 sec/step)
step 38240 	 loss = 0.023, train_acc = 1.000 (3.390 sec/step)
step 38250 	 loss = 0.126, train_acc = 0.900 (3.372 sec/step)
step 38260 	 loss = 0.150, train_acc = 0.900 (3.327 sec/step)
step 38270 	 loss = 0.210, train_acc = 0.900 (3.298 sec/step)
step 38280 	 loss = 0.142, train_acc = 1.000 (3.310 sec/step)
step 38290 	 loss = 0.939, train_acc = 0.900 (3.344 sec/step)
step 38300 	 loss = 0.369, train_acc = 0.900 (3.302 sec/step)
step 38310 	 loss = 1.301, train_acc = 0.700 (3.334 sec/step)
step 38320 	 loss = 0.130, train_acc = 1.000 (3.294 sec/step)
step 38330 	 loss = 0.985, train_acc = 0.700 (3.275 sec/step)
step 38340 	 loss = 0.048, train_acc = 1.000 (3.276 sec/step)
step 38350 	 loss = 0.001, train_acc = 1.000 (3.291 sec/step)
step 38360 	 loss = 0.016, train_acc = 1.000 (3.275 sec/step)
step 38370 	 loss = 0.141, train_acc = 0.900 (3.330 sec/step)
step 38380 	 loss = 0.079, train_acc = 0.900 (3.269 sec/step)
step 38390 	 loss = 0.094, train_acc = 0.900 (3.358 sec/step)
step 38400 	 loss = 0.089, train_acc = 0.900 (3.309 sec/step)
step 38410 	 loss = 0.509, train_acc = 0.900 (3.288 sec/step)
step 38420 	 loss = 0.072, train_acc = 1.000 (3.364 sec/step)
step 38430 	 loss = 0.081, train_acc = 1.000 (3.346 sec/step)
step 38440 	 loss = 0.104, train_acc = 1.000 (3.267 sec/step)
step 38450 	 loss = 0.014, train_acc = 1.000 (3.337 sec/step)
step 38460 	 loss = 0.878, train_acc = 0.900 (3.359 sec/step)
step 38470 	 loss = 0.048, train_acc = 1.000 (3.271 sec/step)
step 38480 	 loss = 0.102, train_acc = 1.000 (3.296 sec/step)
step 38490 	 loss = 0.197, train_acc = 0.900 (3.293 sec/step)
step 38500 	 loss = 0.619, train_acc = 0.900 (3.325 sec/step)
step 38510 	 loss = 0.204, train_acc = 0.900 (3.358 sec/step)
step 38520 	 loss = 0.016, train_acc = 1.000 (3.345 sec/step)
step 38530 	 loss = 0.162, train_acc = 0.900 (3.318 sec/step)
step 38540 	 loss = 1.357, train_acc = 0.500 (3.334 sec/step)
step 38550 	 loss = 0.003, train_acc = 1.000 (3.347 sec/step)
step 38560 	 loss = 0.002, train_acc = 1.000 (3.308 sec/step)
step 38570 	 loss = 0.047, train_acc = 1.000 (3.311 sec/step)
step 38580 	 loss = 0.004, train_acc = 1.000 (3.305 sec/step)
step 38590 	 loss = 0.028, train_acc = 1.000 (3.317 sec/step)
step 38600 	 loss = 0.132, train_acc = 1.000 (3.318 sec/step)
step 38610 	 loss = 0.202, train_acc = 0.900 (3.307 sec/step)
step 38620 	 loss = 0.001, train_acc = 1.000 (3.296 sec/step)
step 38630 	 loss = 0.727, train_acc = 0.600 (3.365 sec/step)
step 38640 	 loss = 0.081, train_acc = 1.000 (3.287 sec/step)
step 38650 	 loss = 0.007, train_acc = 1.000 (3.343 sec/step)
step 38660 	 loss = 0.025, train_acc = 1.000 (3.272 sec/step)
step 38670 	 loss = 0.006, train_acc = 1.000 (3.298 sec/step)
step 38680 	 loss = 0.047, train_acc = 1.000 (3.376 sec/step)
step 38690 	 loss = 0.743, train_acc = 0.800 (3.386 sec/step)
step 38700 	 loss = 0.112, train_acc = 1.000 (3.282 sec/step)
step 38710 	 loss = 0.571, train_acc = 0.800 (3.359 sec/step)
step 38720 	 loss = 0.055, train_acc = 1.000 (3.294 sec/step)
step 38730 	 loss = 0.436, train_acc = 0.900 (3.269 sec/step)
step 38740 	 loss = 0.000, train_acc = 1.000 (3.312 sec/step)
step 38750 	 loss = 0.082, train_acc = 1.000 (3.334 sec/step)
step 38760 	 loss = 0.062, train_acc = 1.000 (3.342 sec/step)
step 38770 	 loss = 0.020, train_acc = 1.000 (3.295 sec/step)
step 38780 	 loss = 0.003, train_acc = 1.000 (3.308 sec/step)
step 38790 	 loss = 0.014, train_acc = 1.000 (3.287 sec/step)
step 38800 	 loss = 0.054, train_acc = 1.000 (3.365 sec/step)
step 38810 	 loss = 0.564, train_acc = 0.800 (3.320 sec/step)
step 38820 	 loss = 0.622, train_acc = 0.900 (3.319 sec/step)
step 38830 	 loss = 0.004, train_acc = 1.000 (3.369 sec/step)
step 38840 	 loss = 0.128, train_acc = 1.000 (3.311 sec/step)
step 38850 	 loss = 0.564, train_acc = 0.900 (3.297 sec/step)
step 38860 	 loss = 0.019, train_acc = 1.000 (3.276 sec/step)
step 38870 	 loss = 0.701, train_acc = 0.800 (3.319 sec/step)
step 38880 	 loss = 0.083, train_acc = 1.000 (3.259 sec/step)
step 38890 	 loss = 0.012, train_acc = 1.000 (3.361 sec/step)
step 38900 	 loss = 1.662, train_acc = 0.700 (3.405 sec/step)
step 38910 	 loss = 0.036, train_acc = 1.000 (3.276 sec/step)
step 38920 	 loss = 0.060, train_acc = 1.000 (3.301 sec/step)
step 38930 	 loss = 0.004, train_acc = 1.000 (3.337 sec/step)
step 38940 	 loss = 0.004, train_acc = 1.000 (3.301 sec/step)
step 38950 	 loss = 0.484, train_acc = 0.900 (3.320 sec/step)
step 38960 	 loss = 0.005, train_acc = 1.000 (3.309 sec/step)
step 38970 	 loss = 0.070, train_acc = 1.000 (3.346 sec/step)
step 38980 	 loss = 0.045, train_acc = 1.000 (3.321 sec/step)
step 38990 	 loss = 0.259, train_acc = 0.900 (3.297 sec/step)
step 39000 	 loss = 0.006, train_acc = 1.000 (3.329 sec/step)
step 39010 	 loss = 0.077, train_acc = 1.000 (3.313 sec/step)
step 39020 	 loss = 0.143, train_acc = 0.900 (3.265 sec/step)
step 39030 	 loss = 0.027, train_acc = 1.000 (3.284 sec/step)
step 39040 	 loss = 0.213, train_acc = 0.900 (3.339 sec/step)
step 39050 	 loss = 0.261, train_acc = 1.000 (3.318 sec/step)
step 39060 	 loss = 0.486, train_acc = 0.800 (3.339 sec/step)
step 39070 	 loss = 0.201, train_acc = 0.900 (3.266 sec/step)
step 39080 	 loss = 0.423, train_acc = 0.800 (3.292 sec/step)
step 39090 	 loss = 0.713, train_acc = 0.800 (3.316 sec/step)
step 39100 	 loss = 0.743, train_acc = 0.900 (3.380 sec/step)
step 39110 	 loss = 3.242, train_acc = 0.800 (3.344 sec/step)
step 39120 	 loss = 0.139, train_acc = 0.900 (3.300 sec/step)
step 39130 	 loss = 1.179, train_acc = 0.700 (3.341 sec/step)
step 39140 	 loss = 0.527, train_acc = 0.800 (3.263 sec/step)
step 39150 	 loss = 0.384, train_acc = 0.900 (3.275 sec/step)
step 39160 	 loss = 0.230, train_acc = 0.900 (3.297 sec/step)
step 39170 	 loss = 0.141, train_acc = 0.900 (3.288 sec/step)
step 39180 	 loss = 0.096, train_acc = 1.000 (3.301 sec/step)
step 39190 	 loss = 0.027, train_acc = 1.000 (3.289 sec/step)
step 39200 	 loss = 0.537, train_acc = 0.900 (3.321 sec/step)
step 39210 	 loss = 0.122, train_acc = 0.900 (3.328 sec/step)
step 39220 	 loss = 0.237, train_acc = 0.900 (3.317 sec/step)
step 39230 	 loss = 0.001, train_acc = 1.000 (3.273 sec/step)
step 39240 	 loss = 0.001, train_acc = 1.000 (3.265 sec/step)
step 39250 	 loss = 0.021, train_acc = 1.000 (3.310 sec/step)
step 39260 	 loss = 0.232, train_acc = 0.900 (3.338 sec/step)
step 39270 	 loss = 0.361, train_acc = 0.900 (3.351 sec/step)
step 39280 	 loss = 0.614, train_acc = 0.900 (3.320 sec/step)
step 39290 	 loss = 0.754, train_acc = 0.900 (3.312 sec/step)
step 39300 	 loss = 0.795, train_acc = 0.900 (3.292 sec/step)
step 39310 	 loss = 0.099, train_acc = 1.000 (3.372 sec/step)
step 39320 	 loss = 0.775, train_acc = 0.800 (3.364 sec/step)
step 39330 	 loss = 0.033, train_acc = 1.000 (3.343 sec/step)
step 39340 	 loss = 0.005, train_acc = 1.000 (3.348 sec/step)
step 39350 	 loss = 0.577, train_acc = 0.900 (3.322 sec/step)
step 39360 	 loss = 0.171, train_acc = 0.900 (3.307 sec/step)
step 39370 	 loss = 0.903, train_acc = 0.800 (3.317 sec/step)
step 39380 	 loss = 0.008, train_acc = 1.000 (3.332 sec/step)
step 39390 	 loss = 0.020, train_acc = 1.000 (3.322 sec/step)
step 39400 	 loss = 0.799, train_acc = 0.800 (3.288 sec/step)
step 39410 	 loss = 0.028, train_acc = 1.000 (3.275 sec/step)
step 39420 	 loss = 0.105, train_acc = 0.900 (3.320 sec/step)
step 39430 	 loss = 0.054, train_acc = 1.000 (3.278 sec/step)
step 39440 	 loss = 0.136, train_acc = 0.900 (3.346 sec/step)
step 39450 	 loss = 0.237, train_acc = 0.900 (3.327 sec/step)
step 39460 	 loss = 0.043, train_acc = 1.000 (3.305 sec/step)
step 39470 	 loss = 0.333, train_acc = 0.800 (3.352 sec/step)
step 39480 	 loss = 0.493, train_acc = 0.900 (3.383 sec/step)
step 39490 	 loss = 0.032, train_acc = 1.000 (3.341 sec/step)
step 39500 	 loss = 0.179, train_acc = 0.900 (3.327 sec/step)
step 39510 	 loss = 0.362, train_acc = 0.900 (3.351 sec/step)
step 39520 	 loss = 0.160, train_acc = 0.900 (3.262 sec/step)
step 39530 	 loss = 0.337, train_acc = 0.900 (3.292 sec/step)
step 39540 	 loss = 0.193, train_acc = 0.900 (3.271 sec/step)
step 39550 	 loss = 0.032, train_acc = 1.000 (3.367 sec/step)
step 39560 	 loss = 0.480, train_acc = 0.800 (3.339 sec/step)
step 39570 	 loss = 0.199, train_acc = 0.900 (3.304 sec/step)
step 39580 	 loss = 0.534, train_acc = 0.900 (3.297 sec/step)
step 39590 	 loss = 0.008, train_acc = 1.000 (3.354 sec/step)
step 39600 	 loss = 0.384, train_acc = 0.800 (3.299 sec/step)
step 39610 	 loss = 0.011, train_acc = 1.000 (3.268 sec/step)
step 39620 	 loss = 0.017, train_acc = 1.000 (3.261 sec/step)
step 39630 	 loss = 0.584, train_acc = 0.700 (3.292 sec/step)
step 39640 	 loss = 1.750, train_acc = 0.600 (3.389 sec/step)
step 39650 	 loss = 0.027, train_acc = 1.000 (3.261 sec/step)
step 39660 	 loss = 1.284, train_acc = 0.600 (3.281 sec/step)
step 39670 	 loss = 0.740, train_acc = 0.800 (3.323 sec/step)
step 39680 	 loss = 0.156, train_acc = 0.900 (3.363 sec/step)
step 39690 	 loss = 0.000, train_acc = 1.000 (3.300 sec/step)
step 39700 	 loss = 0.019, train_acc = 1.000 (3.342 sec/step)
step 39710 	 loss = 0.001, train_acc = 1.000 (3.333 sec/step)
step 39720 	 loss = 0.168, train_acc = 0.900 (3.271 sec/step)
step 39730 	 loss = 0.417, train_acc = 0.900 (3.378 sec/step)
step 39740 	 loss = 0.002, train_acc = 1.000 (3.318 sec/step)
step 39750 	 loss = 0.086, train_acc = 1.000 (3.337 sec/step)
step 39760 	 loss = 1.011, train_acc = 0.800 (3.322 sec/step)
step 39770 	 loss = 0.539, train_acc = 0.900 (3.260 sec/step)
step 39780 	 loss = 0.000, train_acc = 1.000 (3.337 sec/step)
step 39790 	 loss = 0.085, train_acc = 1.000 (3.388 sec/step)
step 39800 	 loss = 0.146, train_acc = 1.000 (3.320 sec/step)
step 39810 	 loss = 0.630, train_acc = 0.900 (3.356 sec/step)
step 39820 	 loss = 0.022, train_acc = 1.000 (3.362 sec/step)
step 39830 	 loss = 0.439, train_acc = 0.800 (3.267 sec/step)
step 39840 	 loss = 0.988, train_acc = 0.800 (3.356 sec/step)
step 39850 	 loss = 0.135, train_acc = 1.000 (3.446 sec/step)
step 39860 	 loss = 0.003, train_acc = 1.000 (3.267 sec/step)
step 39870 	 loss = 0.207, train_acc = 0.900 (3.335 sec/step)
step 39880 	 loss = 0.024, train_acc = 1.000 (3.312 sec/step)
step 39890 	 loss = 0.024, train_acc = 1.000 (3.297 sec/step)
VALIDATION 	 acc = 0.555 (3.625 sec)
step 39900 	 loss = 0.009, train_acc = 1.000 (3.283 sec/step)
step 39910 	 loss = 0.019, train_acc = 1.000 (3.302 sec/step)
step 39920 	 loss = 0.124, train_acc = 1.000 (3.374 sec/step)
step 39930 	 loss = 0.582, train_acc = 0.900 (3.387 sec/step)
step 39940 	 loss = 0.329, train_acc = 0.900 (3.295 sec/step)
step 39950 	 loss = 0.310, train_acc = 0.800 (3.359 sec/step)
step 39960 	 loss = 0.004, train_acc = 1.000 (3.279 sec/step)
step 39970 	 loss = 0.537, train_acc = 0.800 (3.334 sec/step)
step 39980 	 loss = 0.000, train_acc = 1.000 (3.296 sec/step)
step 39990 	 loss = 0.152, train_acc = 0.900 (3.298 sec/step)
step 40000 	 loss = 0.020, train_acc = 1.000 (3.320 sec/step)
step 40010 	 loss = 0.002, train_acc = 1.000 (3.324 sec/step)
step 40020 	 loss = 0.318, train_acc = 0.900 (3.298 sec/step)
step 40030 	 loss = 0.542, train_acc = 0.900 (3.326 sec/step)
step 40040 	 loss = 1.337, train_acc = 0.900 (3.350 sec/step)
step 40050 	 loss = 0.010, train_acc = 1.000 (3.266 sec/step)
step 40060 	 loss = 0.092, train_acc = 1.000 (3.353 sec/step)
step 40070 	 loss = 0.505, train_acc = 0.900 (3.338 sec/step)
step 40080 	 loss = 0.596, train_acc = 0.900 (3.317 sec/step)
step 40090 	 loss = 0.178, train_acc = 0.900 (3.324 sec/step)
step 40100 	 loss = 0.357, train_acc = 0.900 (3.321 sec/step)
step 40110 	 loss = 0.064, train_acc = 1.000 (3.328 sec/step)
step 40120 	 loss = 0.268, train_acc = 0.800 (3.344 sec/step)
step 40130 	 loss = 0.627, train_acc = 0.900 (3.325 sec/step)
step 40140 	 loss = 0.487, train_acc = 0.900 (3.313 sec/step)
step 40150 	 loss = 0.457, train_acc = 0.700 (3.349 sec/step)
step 40160 	 loss = 0.035, train_acc = 1.000 (3.381 sec/step)
step 40170 	 loss = 0.001, train_acc = 1.000 (3.329 sec/step)
step 40180 	 loss = 0.189, train_acc = 0.900 (3.268 sec/step)
step 40190 	 loss = 0.912, train_acc = 0.900 (3.352 sec/step)
step 40200 	 loss = 0.002, train_acc = 1.000 (3.279 sec/step)
step 40210 	 loss = 0.697, train_acc = 0.800 (3.325 sec/step)
step 40220 	 loss = 0.003, train_acc = 1.000 (3.341 sec/step)
step 40230 	 loss = 0.202, train_acc = 0.900 (3.289 sec/step)
step 40240 	 loss = 0.058, train_acc = 1.000 (3.296 sec/step)
step 40250 	 loss = 0.104, train_acc = 1.000 (3.295 sec/step)
step 40260 	 loss = 0.364, train_acc = 0.800 (3.340 sec/step)
step 40270 	 loss = 0.009, train_acc = 1.000 (3.312 sec/step)
step 40280 	 loss = 0.164, train_acc = 0.900 (3.324 sec/step)
step 40290 	 loss = 0.015, train_acc = 1.000 (3.310 sec/step)
step 40300 	 loss = 0.123, train_acc = 0.900 (3.280 sec/step)
step 40310 	 loss = 0.003, train_acc = 1.000 (3.298 sec/step)
step 40320 	 loss = 0.533, train_acc = 0.700 (3.296 sec/step)
step 40330 	 loss = 0.099, train_acc = 1.000 (3.369 sec/step)
step 40340 	 loss = 0.206, train_acc = 0.900 (3.334 sec/step)
step 40350 	 loss = 0.202, train_acc = 0.800 (3.289 sec/step)
step 40360 	 loss = 0.001, train_acc = 1.000 (3.392 sec/step)
step 40370 	 loss = 0.000, train_acc = 1.000 (3.342 sec/step)
step 40380 	 loss = 0.043, train_acc = 1.000 (3.312 sec/step)
step 40390 	 loss = 0.365, train_acc = 0.700 (3.316 sec/step)
step 40400 	 loss = 0.030, train_acc = 1.000 (3.287 sec/step)
step 40410 	 loss = 0.364, train_acc = 0.900 (3.265 sec/step)
step 40420 	 loss = 0.013, train_acc = 1.000 (3.293 sec/step)
step 40430 	 loss = 0.387, train_acc = 0.900 (3.304 sec/step)
step 40440 	 loss = 0.101, train_acc = 1.000 (3.267 sec/step)
step 40450 	 loss = 0.930, train_acc = 0.800 (3.352 sec/step)
step 40460 	 loss = 0.667, train_acc = 0.900 (3.393 sec/step)
step 40470 	 loss = 0.106, train_acc = 1.000 (3.329 sec/step)
step 40480 	 loss = 0.070, train_acc = 1.000 (3.407 sec/step)
step 40490 	 loss = 0.497, train_acc = 0.900 (3.318 sec/step)
step 40500 	 loss = 0.073, train_acc = 1.000 (3.396 sec/step)
step 40510 	 loss = 0.008, train_acc = 1.000 (3.274 sec/step)
step 40520 	 loss = 0.019, train_acc = 1.000 (3.324 sec/step)
step 40530 	 loss = 0.183, train_acc = 0.900 (3.305 sec/step)
step 40540 	 loss = 0.324, train_acc = 0.800 (3.275 sec/step)
step 40550 	 loss = 0.763, train_acc = 0.800 (3.324 sec/step)
step 40560 	 loss = 0.355, train_acc = 0.900 (3.303 sec/step)
step 40570 	 loss = 0.420, train_acc = 0.800 (3.271 sec/step)
step 40580 	 loss = 0.001, train_acc = 1.000 (3.353 sec/step)
step 40590 	 loss = 0.420, train_acc = 0.800 (3.291 sec/step)
step 40600 	 loss = 0.049, train_acc = 1.000 (3.292 sec/step)
step 40610 	 loss = 0.197, train_acc = 0.900 (3.335 sec/step)
step 40620 	 loss = 0.186, train_acc = 0.900 (3.343 sec/step)
step 40630 	 loss = 0.312, train_acc = 0.900 (3.298 sec/step)
step 40640 	 loss = 0.006, train_acc = 1.000 (3.318 sec/step)
step 40650 	 loss = 0.062, train_acc = 1.000 (3.322 sec/step)
step 40660 	 loss = 0.002, train_acc = 1.000 (3.318 sec/step)
step 40670 	 loss = 0.603, train_acc = 0.900 (3.331 sec/step)
step 40680 	 loss = 0.133, train_acc = 1.000 (3.298 sec/step)
step 40690 	 loss = 0.077, train_acc = 1.000 (3.331 sec/step)
step 40700 	 loss = 0.059, train_acc = 1.000 (3.276 sec/step)
step 40710 	 loss = 0.120, train_acc = 0.900 (3.291 sec/step)
step 40720 	 loss = 0.387, train_acc = 0.700 (3.298 sec/step)
step 40730 	 loss = 0.208, train_acc = 0.900 (3.325 sec/step)
step 40740 	 loss = 0.261, train_acc = 0.900 (3.346 sec/step)
step 40750 	 loss = 0.094, train_acc = 0.900 (3.280 sec/step)
step 40760 	 loss = 0.843, train_acc = 0.900 (3.336 sec/step)
step 40770 	 loss = 0.014, train_acc = 1.000 (3.322 sec/step)
step 40780 	 loss = 0.007, train_acc = 1.000 (3.312 sec/step)
step 40790 	 loss = 0.159, train_acc = 0.900 (3.296 sec/step)
step 40800 	 loss = 0.146, train_acc = 0.900 (3.282 sec/step)
step 40810 	 loss = 0.142, train_acc = 0.900 (3.316 sec/step)
step 40820 	 loss = 0.011, train_acc = 1.000 (3.359 sec/step)
step 40830 	 loss = 0.190, train_acc = 0.900 (3.257 sec/step)
step 40840 	 loss = 0.237, train_acc = 0.900 (3.298 sec/step)
step 40850 	 loss = 0.076, train_acc = 1.000 (3.334 sec/step)
step 40860 	 loss = 0.021, train_acc = 1.000 (3.303 sec/step)
step 40870 	 loss = 0.002, train_acc = 1.000 (3.302 sec/step)
step 40880 	 loss = 3.499, train_acc = 0.700 (3.302 sec/step)
step 40890 	 loss = 0.147, train_acc = 1.000 (3.466 sec/step)
step 40900 	 loss = 1.034, train_acc = 0.800 (3.312 sec/step)
step 40910 	 loss = 0.849, train_acc = 0.900 (3.339 sec/step)
step 40920 	 loss = 0.003, train_acc = 1.000 (3.347 sec/step)
step 40930 	 loss = 0.132, train_acc = 0.900 (3.330 sec/step)
step 40940 	 loss = 0.164, train_acc = 1.000 (3.323 sec/step)
step 40950 	 loss = 0.003, train_acc = 1.000 (3.301 sec/step)
step 40960 	 loss = 0.032, train_acc = 1.000 (3.332 sec/step)
step 40970 	 loss = 0.223, train_acc = 0.900 (3.303 sec/step)
step 40980 	 loss = 0.008, train_acc = 1.000 (3.311 sec/step)
step 40990 	 loss = 0.510, train_acc = 0.800 (3.286 sec/step)
step 41000 	 loss = 0.020, train_acc = 1.000 (3.350 sec/step)
step 41010 	 loss = 0.000, train_acc = 1.000 (3.274 sec/step)
step 41020 	 loss = 0.003, train_acc = 1.000 (3.330 sec/step)
step 41030 	 loss = 0.110, train_acc = 0.900 (3.341 sec/step)
step 41040 	 loss = 0.010, train_acc = 1.000 (3.330 sec/step)
step 41050 	 loss = 0.052, train_acc = 1.000 (3.364 sec/step)
step 41060 	 loss = 0.957, train_acc = 0.900 (3.330 sec/step)
step 41070 	 loss = 0.003, train_acc = 1.000 (3.289 sec/step)
step 41080 	 loss = 0.867, train_acc = 0.800 (3.303 sec/step)
step 41090 	 loss = 0.333, train_acc = 1.000 (3.313 sec/step)
step 41100 	 loss = 0.003, train_acc = 1.000 (3.320 sec/step)
step 41110 	 loss = 0.089, train_acc = 0.900 (3.309 sec/step)
step 41120 	 loss = 0.256, train_acc = 0.900 (3.295 sec/step)
step 41130 	 loss = 0.644, train_acc = 0.900 (3.404 sec/step)
step 41140 	 loss = 0.233, train_acc = 1.000 (3.328 sec/step)
step 41150 	 loss = 0.259, train_acc = 0.900 (3.350 sec/step)
step 41160 	 loss = 0.131, train_acc = 0.900 (3.305 sec/step)
step 41170 	 loss = 0.046, train_acc = 1.000 (3.308 sec/step)
step 41180 	 loss = 0.240, train_acc = 0.900 (3.356 sec/step)
step 41190 	 loss = 0.211, train_acc = 0.900 (3.382 sec/step)
step 41200 	 loss = 0.026, train_acc = 1.000 (3.302 sec/step)
step 41210 	 loss = 0.020, train_acc = 1.000 (3.295 sec/step)
step 41220 	 loss = 0.363, train_acc = 0.900 (3.301 sec/step)
step 41230 	 loss = 0.007, train_acc = 1.000 (3.350 sec/step)
step 41240 	 loss = 0.410, train_acc = 0.900 (3.375 sec/step)
step 41250 	 loss = 0.225, train_acc = 0.800 (3.299 sec/step)
step 41260 	 loss = 0.739, train_acc = 0.800 (3.341 sec/step)
step 41270 	 loss = 0.313, train_acc = 0.900 (3.277 sec/step)
step 41280 	 loss = 0.280, train_acc = 0.900 (3.316 sec/step)
step 41290 	 loss = 0.175, train_acc = 0.900 (3.315 sec/step)
step 41300 	 loss = 0.893, train_acc = 0.900 (3.323 sec/step)
step 41310 	 loss = 0.348, train_acc = 0.900 (3.281 sec/step)
step 41320 	 loss = 0.071, train_acc = 1.000 (3.313 sec/step)
step 41330 	 loss = 0.138, train_acc = 0.900 (3.344 sec/step)
step 41340 	 loss = 0.083, train_acc = 0.900 (3.368 sec/step)
step 41350 	 loss = 0.250, train_acc = 0.900 (3.337 sec/step)
step 41360 	 loss = 0.018, train_acc = 1.000 (3.354 sec/step)
step 41370 	 loss = 0.652, train_acc = 0.900 (3.318 sec/step)
step 41380 	 loss = 0.210, train_acc = 0.900 (3.299 sec/step)
step 41390 	 loss = 0.001, train_acc = 1.000 (3.314 sec/step)
step 41400 	 loss = 0.008, train_acc = 1.000 (3.316 sec/step)
step 41410 	 loss = 0.225, train_acc = 0.900 (3.323 sec/step)
step 41420 	 loss = 0.002, train_acc = 1.000 (3.306 sec/step)
step 41430 	 loss = 0.194, train_acc = 0.900 (3.306 sec/step)
step 41440 	 loss = 0.121, train_acc = 1.000 (3.336 sec/step)
step 41450 	 loss = 0.003, train_acc = 1.000 (3.259 sec/step)
step 41460 	 loss = 0.045, train_acc = 1.000 (3.319 sec/step)
step 41470 	 loss = 0.003, train_acc = 1.000 (3.305 sec/step)
step 41480 	 loss = 0.006, train_acc = 1.000 (3.342 sec/step)
step 41490 	 loss = 0.000, train_acc = 1.000 (3.286 sec/step)
step 41500 	 loss = 1.131, train_acc = 0.800 (3.346 sec/step)
step 41510 	 loss = 0.517, train_acc = 0.900 (3.319 sec/step)
step 41520 	 loss = 0.054, train_acc = 1.000 (3.336 sec/step)
step 41530 	 loss = 0.199, train_acc = 1.000 (3.346 sec/step)
step 41540 	 loss = 0.001, train_acc = 1.000 (3.478 sec/step)
step 41550 	 loss = 0.211, train_acc = 0.900 (3.346 sec/step)
step 41560 	 loss = 0.001, train_acc = 1.000 (3.340 sec/step)
step 41570 	 loss = 0.079, train_acc = 1.000 (3.322 sec/step)
step 41580 	 loss = 0.001, train_acc = 1.000 (3.323 sec/step)
step 41590 	 loss = 0.000, train_acc = 1.000 (3.285 sec/step)
step 41600 	 loss = 0.006, train_acc = 1.000 (3.324 sec/step)
step 41610 	 loss = 0.038, train_acc = 1.000 (3.307 sec/step)
step 41620 	 loss = 0.102, train_acc = 1.000 (3.310 sec/step)
step 41630 	 loss = 0.167, train_acc = 1.000 (3.323 sec/step)
step 41640 	 loss = 0.054, train_acc = 1.000 (3.333 sec/step)
step 41650 	 loss = 0.006, train_acc = 1.000 (3.336 sec/step)
step 41660 	 loss = 0.032, train_acc = 1.000 (3.316 sec/step)
step 41670 	 loss = 0.000, train_acc = 1.000 (3.361 sec/step)
step 41680 	 loss = 1.638, train_acc = 0.800 (3.393 sec/step)
step 41690 	 loss = 0.061, train_acc = 1.000 (3.292 sec/step)
step 41700 	 loss = 0.089, train_acc = 1.000 (3.280 sec/step)
step 41710 	 loss = 0.750, train_acc = 0.900 (3.344 sec/step)
step 41720 	 loss = 0.646, train_acc = 0.800 (3.268 sec/step)
step 41730 	 loss = 0.186, train_acc = 0.900 (3.368 sec/step)
step 41740 	 loss = 0.392, train_acc = 0.900 (3.328 sec/step)
step 41750 	 loss = 0.247, train_acc = 0.900 (3.310 sec/step)
step 41760 	 loss = 0.136, train_acc = 0.900 (3.321 sec/step)
step 41770 	 loss = 0.124, train_acc = 1.000 (3.428 sec/step)
step 41780 	 loss = 0.478, train_acc = 0.900 (3.344 sec/step)
step 41790 	 loss = 0.476, train_acc = 0.800 (3.345 sec/step)
VALIDATION 	 acc = 0.520 (3.633 sec)
step 41800 	 loss = 0.389, train_acc = 0.900 (3.304 sec/step)
step 41810 	 loss = 0.460, train_acc = 0.800 (3.281 sec/step)
step 41820 	 loss = 0.351, train_acc = 0.800 (3.290 sec/step)
step 41830 	 loss = 0.009, train_acc = 1.000 (3.405 sec/step)
step 41840 	 loss = 0.426, train_acc = 0.900 (3.350 sec/step)
step 41850 	 loss = 0.022, train_acc = 1.000 (3.346 sec/step)
step 41860 	 loss = 0.016, train_acc = 1.000 (3.299 sec/step)
step 41870 	 loss = 0.481, train_acc = 0.800 (3.266 sec/step)
step 41880 	 loss = 0.256, train_acc = 0.900 (3.327 sec/step)
step 41890 	 loss = 0.040, train_acc = 1.000 (3.304 sec/step)
step 41900 	 loss = 0.206, train_acc = 0.900 (3.289 sec/step)
step 41910 	 loss = 0.131, train_acc = 0.900 (3.273 sec/step)
step 41920 	 loss = 0.339, train_acc = 0.800 (3.318 sec/step)
step 41930 	 loss = 0.270, train_acc = 0.900 (3.303 sec/step)
step 41940 	 loss = 0.096, train_acc = 1.000 (3.322 sec/step)
step 41950 	 loss = 2.675, train_acc = 0.600 (3.363 sec/step)
step 41960 	 loss = 0.382, train_acc = 0.900 (3.383 sec/step)
step 41970 	 loss = 0.004, train_acc = 1.000 (3.357 sec/step)
step 41980 	 loss = 0.212, train_acc = 0.900 (3.291 sec/step)
step 41990 	 loss = 0.630, train_acc = 0.900 (3.276 sec/step)
step 42000 	 loss = 0.570, train_acc = 0.900 (3.480 sec/step)
step 42010 	 loss = 0.551, train_acc = 0.900 (3.301 sec/step)
step 42020 	 loss = 0.505, train_acc = 0.800 (3.314 sec/step)
step 42030 	 loss = 0.503, train_acc = 0.900 (3.352 sec/step)
step 42040 	 loss = 0.055, train_acc = 1.000 (3.328 sec/step)
step 42050 	 loss = 0.086, train_acc = 1.000 (3.374 sec/step)
step 42060 	 loss = 0.044, train_acc = 1.000 (3.324 sec/step)
step 42070 	 loss = 0.268, train_acc = 1.000 (3.333 sec/step)
step 42080 	 loss = 0.067, train_acc = 1.000 (3.344 sec/step)
step 42090 	 loss = 0.108, train_acc = 0.900 (3.332 sec/step)
step 42100 	 loss = 0.671, train_acc = 0.800 (3.316 sec/step)
step 42110 	 loss = 0.040, train_acc = 1.000 (3.319 sec/step)
step 42120 	 loss = 0.161, train_acc = 0.900 (3.319 sec/step)
step 42130 	 loss = 0.483, train_acc = 0.900 (3.344 sec/step)
step 42140 	 loss = 0.322, train_acc = 0.800 (3.289 sec/step)
step 42150 	 loss = 0.306, train_acc = 0.900 (3.304 sec/step)
step 42160 	 loss = 0.156, train_acc = 1.000 (3.325 sec/step)
step 42170 	 loss = 0.118, train_acc = 1.000 (3.354 sec/step)
step 42180 	 loss = 0.008, train_acc = 1.000 (3.331 sec/step)
step 42190 	 loss = 0.116, train_acc = 0.900 (3.342 sec/step)
step 42200 	 loss = 0.342, train_acc = 0.900 (3.296 sec/step)
step 42210 	 loss = 0.125, train_acc = 1.000 (3.360 sec/step)
step 42220 	 loss = 0.003, train_acc = 1.000 (3.308 sec/step)
step 42230 	 loss = 0.023, train_acc = 1.000 (3.298 sec/step)
step 42240 	 loss = 1.113, train_acc = 0.700 (3.279 sec/step)
step 42250 	 loss = 0.020, train_acc = 1.000 (3.360 sec/step)
step 42260 	 loss = 0.003, train_acc = 1.000 (3.332 sec/step)
step 42270 	 loss = 0.003, train_acc = 1.000 (3.317 sec/step)
step 42280 	 loss = 0.504, train_acc = 0.800 (3.352 sec/step)
step 42290 	 loss = 0.041, train_acc = 1.000 (3.310 sec/step)
step 42300 	 loss = 0.003, train_acc = 1.000 (3.290 sec/step)
step 42310 	 loss = 0.022, train_acc = 1.000 (3.312 sec/step)
step 42320 	 loss = 0.342, train_acc = 0.900 (3.305 sec/step)
step 42330 	 loss = 0.666, train_acc = 0.800 (3.362 sec/step)
step 42340 	 loss = 0.126, train_acc = 1.000 (3.314 sec/step)
step 42350 	 loss = 0.054, train_acc = 1.000 (3.282 sec/step)
step 42360 	 loss = 0.002, train_acc = 1.000 (3.345 sec/step)
step 42370 	 loss = 0.125, train_acc = 0.900 (3.286 sec/step)
step 42380 	 loss = 0.916, train_acc = 0.900 (3.306 sec/step)
step 42390 	 loss = 0.147, train_acc = 0.900 (3.336 sec/step)
step 42400 	 loss = 0.260, train_acc = 0.900 (3.280 sec/step)
step 42410 	 loss = 0.294, train_acc = 0.900 (3.374 sec/step)
step 42420 	 loss = 0.007, train_acc = 1.000 (3.330 sec/step)
step 42430 	 loss = 0.256, train_acc = 0.900 (3.334 sec/step)
step 42440 	 loss = 0.242, train_acc = 0.900 (3.303 sec/step)
step 42450 	 loss = 0.000, train_acc = 1.000 (3.294 sec/step)
step 42460 	 loss = 0.038, train_acc = 1.000 (3.268 sec/step)
step 42470 	 loss = 0.423, train_acc = 0.900 (3.296 sec/step)
step 42480 	 loss = 0.001, train_acc = 1.000 (3.278 sec/step)
step 42490 	 loss = 0.147, train_acc = 1.000 (3.286 sec/step)
step 42500 	 loss = 0.671, train_acc = 0.700 (3.313 sec/step)
step 42510 	 loss = 0.002, train_acc = 1.000 (3.307 sec/step)
step 42520 	 loss = 0.517, train_acc = 0.800 (3.260 sec/step)
step 42530 	 loss = 0.144, train_acc = 0.900 (3.324 sec/step)
step 42540 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 42550 	 loss = 0.017, train_acc = 1.000 (3.250 sec/step)
step 42560 	 loss = 0.019, train_acc = 1.000 (3.269 sec/step)
step 42570 	 loss = 0.115, train_acc = 1.000 (3.322 sec/step)
step 42580 	 loss = 0.396, train_acc = 0.800 (3.335 sec/step)
step 42590 	 loss = 0.021, train_acc = 1.000 (3.297 sec/step)
step 42600 	 loss = 0.261, train_acc = 0.900 (3.306 sec/step)
step 42610 	 loss = 0.021, train_acc = 1.000 (3.339 sec/step)
step 42620 	 loss = 0.100, train_acc = 1.000 (3.308 sec/step)
step 42630 	 loss = 0.266, train_acc = 0.900 (3.293 sec/step)
step 42640 	 loss = 0.015, train_acc = 1.000 (3.338 sec/step)
step 42650 	 loss = 0.764, train_acc = 0.700 (3.283 sec/step)
step 42660 	 loss = 0.013, train_acc = 1.000 (3.310 sec/step)
step 42670 	 loss = 0.286, train_acc = 0.900 (3.364 sec/step)
step 42680 	 loss = 0.067, train_acc = 1.000 (3.284 sec/step)
step 42690 	 loss = 0.703, train_acc = 0.800 (3.305 sec/step)
step 42700 	 loss = 0.040, train_acc = 1.000 (3.300 sec/step)
step 42710 	 loss = 0.193, train_acc = 0.900 (3.268 sec/step)
step 42720 	 loss = 0.235, train_acc = 0.900 (3.447 sec/step)
step 42730 	 loss = 0.311, train_acc = 0.800 (3.288 sec/step)
step 42740 	 loss = 0.046, train_acc = 1.000 (3.419 sec/step)
step 42750 	 loss = 0.000, train_acc = 1.000 (3.319 sec/step)
step 42760 	 loss = 0.293, train_acc = 0.900 (3.319 sec/step)
step 42770 	 loss = 0.032, train_acc = 1.000 (3.299 sec/step)
step 42780 	 loss = 0.396, train_acc = 0.900 (3.304 sec/step)
step 42790 	 loss = 0.015, train_acc = 1.000 (3.315 sec/step)
step 42800 	 loss = 0.025, train_acc = 1.000 (3.302 sec/step)
step 42810 	 loss = 0.095, train_acc = 1.000 (3.307 sec/step)
step 42820 	 loss = 0.505, train_acc = 0.900 (3.317 sec/step)
step 42830 	 loss = 0.008, train_acc = 1.000 (3.323 sec/step)
step 42840 	 loss = 0.292, train_acc = 0.800 (3.337 sec/step)
step 42850 	 loss = 0.026, train_acc = 1.000 (3.339 sec/step)
step 42860 	 loss = 0.118, train_acc = 1.000 (3.335 sec/step)
step 42870 	 loss = 0.001, train_acc = 1.000 (3.344 sec/step)
step 42880 	 loss = 0.062, train_acc = 1.000 (3.264 sec/step)
step 42890 	 loss = 0.014, train_acc = 1.000 (3.296 sec/step)
step 42900 	 loss = 0.246, train_acc = 0.900 (3.321 sec/step)
step 42910 	 loss = 0.059, train_acc = 1.000 (3.333 sec/step)
step 42920 	 loss = 0.088, train_acc = 1.000 (3.331 sec/step)
step 42930 	 loss = 0.442, train_acc = 0.700 (3.346 sec/step)
step 42940 	 loss = 0.785, train_acc = 0.800 (3.281 sec/step)
step 42950 	 loss = 0.862, train_acc = 0.900 (3.286 sec/step)
step 42960 	 loss = 0.001, train_acc = 1.000 (3.317 sec/step)
step 42970 	 loss = 0.178, train_acc = 0.900 (3.306 sec/step)
step 42980 	 loss = 0.019, train_acc = 1.000 (3.295 sec/step)
step 42990 	 loss = 0.682, train_acc = 0.700 (3.310 sec/step)
step 43000 	 loss = 0.154, train_acc = 0.900 (3.327 sec/step)
step 43010 	 loss = 0.026, train_acc = 1.000 (3.306 sec/step)
step 43020 	 loss = 0.128, train_acc = 0.900 (3.293 sec/step)
step 43030 	 loss = 0.015, train_acc = 1.000 (3.340 sec/step)
step 43040 	 loss = 0.292, train_acc = 0.900 (3.301 sec/step)
step 43050 	 loss = 0.404, train_acc = 0.800 (3.337 sec/step)
step 43060 	 loss = 0.045, train_acc = 1.000 (3.326 sec/step)
step 43070 	 loss = 0.028, train_acc = 1.000 (3.317 sec/step)
step 43080 	 loss = 0.178, train_acc = 0.900 (3.316 sec/step)
step 43090 	 loss = 0.357, train_acc = 0.900 (3.357 sec/step)
step 43100 	 loss = 0.036, train_acc = 1.000 (3.314 sec/step)
step 43110 	 loss = 0.059, train_acc = 1.000 (3.276 sec/step)
step 43120 	 loss = 0.106, train_acc = 0.900 (3.286 sec/step)
step 43130 	 loss = 0.255, train_acc = 0.900 (3.303 sec/step)
step 43140 	 loss = 0.003, train_acc = 1.000 (3.291 sec/step)
step 43150 	 loss = 0.540, train_acc = 0.900 (3.322 sec/step)
step 43160 	 loss = 0.144, train_acc = 1.000 (3.309 sec/step)
step 43170 	 loss = 0.264, train_acc = 0.900 (3.296 sec/step)
step 43180 	 loss = 0.101, train_acc = 1.000 (3.301 sec/step)
step 43190 	 loss = 0.310, train_acc = 0.800 (3.333 sec/step)
step 43200 	 loss = 0.608, train_acc = 0.900 (3.285 sec/step)
step 43210 	 loss = 0.025, train_acc = 1.000 (3.290 sec/step)
step 43220 	 loss = 0.336, train_acc = 0.900 (3.316 sec/step)
step 43230 	 loss = 0.004, train_acc = 1.000 (3.264 sec/step)
step 43240 	 loss = 0.046, train_acc = 1.000 (3.315 sec/step)
step 43250 	 loss = 0.193, train_acc = 0.900 (3.327 sec/step)
step 43260 	 loss = 0.001, train_acc = 1.000 (3.364 sec/step)
step 43270 	 loss = 0.021, train_acc = 1.000 (3.354 sec/step)
step 43280 	 loss = 0.085, train_acc = 1.000 (3.305 sec/step)
step 43290 	 loss = 1.013, train_acc = 0.900 (3.298 sec/step)
step 43300 	 loss = 0.030, train_acc = 1.000 (3.318 sec/step)
step 43310 	 loss = 0.470, train_acc = 0.900 (3.302 sec/step)
step 43320 	 loss = 0.606, train_acc = 0.900 (3.294 sec/step)
step 43330 	 loss = 0.485, train_acc = 0.800 (3.275 sec/step)
step 43340 	 loss = 0.073, train_acc = 1.000 (3.339 sec/step)
step 43350 	 loss = 0.041, train_acc = 1.000 (3.357 sec/step)
step 43360 	 loss = 0.191, train_acc = 1.000 (3.362 sec/step)
step 43370 	 loss = 0.103, train_acc = 0.900 (3.315 sec/step)
step 43380 	 loss = 0.019, train_acc = 1.000 (3.306 sec/step)
step 43390 	 loss = 0.016, train_acc = 1.000 (3.438 sec/step)
step 43400 	 loss = 0.084, train_acc = 0.900 (3.318 sec/step)
step 43410 	 loss = 0.093, train_acc = 1.000 (3.345 sec/step)
step 43420 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 43430 	 loss = 0.055, train_acc = 1.000 (3.357 sec/step)
step 43440 	 loss = 0.004, train_acc = 1.000 (3.318 sec/step)
step 43450 	 loss = 0.501, train_acc = 0.700 (3.357 sec/step)
step 43460 	 loss = 0.052, train_acc = 1.000 (3.319 sec/step)
step 43470 	 loss = 0.111, train_acc = 0.900 (3.326 sec/step)
step 43480 	 loss = 0.358, train_acc = 0.800 (3.320 sec/step)
step 43490 	 loss = 0.057, train_acc = 1.000 (3.309 sec/step)
step 43500 	 loss = 0.017, train_acc = 1.000 (3.342 sec/step)
step 43510 	 loss = 0.086, train_acc = 1.000 (3.361 sec/step)
step 43520 	 loss = 0.235, train_acc = 0.900 (3.284 sec/step)
step 43530 	 loss = 0.216, train_acc = 0.900 (3.279 sec/step)
step 43540 	 loss = 0.302, train_acc = 0.900 (3.272 sec/step)
step 43550 	 loss = 0.186, train_acc = 0.900 (3.346 sec/step)
step 43560 	 loss = 0.300, train_acc = 0.900 (3.323 sec/step)
step 43570 	 loss = 0.002, train_acc = 1.000 (3.340 sec/step)
step 43580 	 loss = 0.122, train_acc = 0.900 (3.321 sec/step)
step 43590 	 loss = 0.288, train_acc = 0.900 (3.374 sec/step)
step 43600 	 loss = 0.663, train_acc = 0.900 (3.281 sec/step)
step 43610 	 loss = 0.075, train_acc = 1.000 (3.317 sec/step)
step 43620 	 loss = 0.191, train_acc = 0.900 (3.280 sec/step)
step 43630 	 loss = 0.021, train_acc = 1.000 (3.301 sec/step)
step 43640 	 loss = 0.019, train_acc = 1.000 (3.300 sec/step)
step 43650 	 loss = 0.052, train_acc = 1.000 (3.339 sec/step)
step 43660 	 loss = 0.004, train_acc = 1.000 (3.283 sec/step)
step 43670 	 loss = 0.155, train_acc = 0.900 (3.353 sec/step)
step 43680 	 loss = 0.764, train_acc = 0.800 (3.281 sec/step)
step 43690 	 loss = 0.050, train_acc = 1.000 (3.330 sec/step)
VALIDATION 	 acc = 0.543 (3.631 sec)
step 43700 	 loss = 0.941, train_acc = 0.700 (3.344 sec/step)
step 43710 	 loss = 0.079, train_acc = 1.000 (3.295 sec/step)
step 43720 	 loss = 1.023, train_acc = 0.700 (3.295 sec/step)
step 43730 	 loss = 0.234, train_acc = 0.900 (3.330 sec/step)
step 43740 	 loss = 0.000, train_acc = 1.000 (3.374 sec/step)
step 43750 	 loss = 0.885, train_acc = 0.800 (3.318 sec/step)
step 43760 	 loss = 0.006, train_acc = 1.000 (3.315 sec/step)
step 43770 	 loss = 0.000, train_acc = 1.000 (3.266 sec/step)
step 43780 	 loss = 0.217, train_acc = 0.900 (3.329 sec/step)
step 43790 	 loss = 0.181, train_acc = 0.900 (3.364 sec/step)
step 43800 	 loss = 0.999, train_acc = 0.800 (3.299 sec/step)
step 43810 	 loss = 0.460, train_acc = 0.900 (3.333 sec/step)
step 43820 	 loss = 0.125, train_acc = 0.900 (3.290 sec/step)
step 43830 	 loss = 0.937, train_acc = 0.900 (3.355 sec/step)
step 43840 	 loss = 0.101, train_acc = 1.000 (3.330 sec/step)
step 43850 	 loss = 0.008, train_acc = 1.000 (3.306 sec/step)
step 43860 	 loss = 0.056, train_acc = 1.000 (3.346 sec/step)
step 43870 	 loss = 0.062, train_acc = 1.000 (3.324 sec/step)
step 43880 	 loss = 0.054, train_acc = 1.000 (3.350 sec/step)
step 43890 	 loss = 0.375, train_acc = 0.900 (3.316 sec/step)
step 43900 	 loss = 0.302, train_acc = 0.900 (3.447 sec/step)
step 43910 	 loss = 0.516, train_acc = 0.900 (3.279 sec/step)
step 43920 	 loss = 0.064, train_acc = 1.000 (3.306 sec/step)
step 43930 	 loss = 0.001, train_acc = 1.000 (3.307 sec/step)
step 43940 	 loss = 0.605, train_acc = 0.900 (3.355 sec/step)
step 43950 	 loss = 0.132, train_acc = 1.000 (3.307 sec/step)
step 43960 	 loss = 0.003, train_acc = 1.000 (3.359 sec/step)
step 43970 	 loss = 0.132, train_acc = 0.900 (3.352 sec/step)
step 43980 	 loss = 0.013, train_acc = 1.000 (3.306 sec/step)
step 43990 	 loss = 0.236, train_acc = 0.900 (3.327 sec/step)
step 44000 	 loss = 0.002, train_acc = 1.000 (3.276 sec/step)
step 44010 	 loss = 0.754, train_acc = 0.800 (3.317 sec/step)
step 44020 	 loss = 0.232, train_acc = 1.000 (3.315 sec/step)
step 44030 	 loss = 0.149, train_acc = 0.900 (3.351 sec/step)
step 44040 	 loss = 0.151, train_acc = 0.900 (3.311 sec/step)
step 44050 	 loss = 0.579, train_acc = 0.900 (3.295 sec/step)
step 44060 	 loss = 0.049, train_acc = 1.000 (3.317 sec/step)
step 44070 	 loss = 0.057, train_acc = 1.000 (3.317 sec/step)
step 44080 	 loss = 0.000, train_acc = 1.000 (3.281 sec/step)
step 44090 	 loss = 0.140, train_acc = 0.900 (3.360 sec/step)
step 44100 	 loss = 0.330, train_acc = 0.900 (3.301 sec/step)
step 44110 	 loss = 0.596, train_acc = 0.800 (3.312 sec/step)
step 44120 	 loss = 0.558, train_acc = 0.800 (3.347 sec/step)
step 44130 	 loss = 0.018, train_acc = 1.000 (3.287 sec/step)
step 44140 	 loss = 0.276, train_acc = 0.900 (3.275 sec/step)
step 44150 	 loss = 0.608, train_acc = 0.900 (3.308 sec/step)
step 44160 	 loss = 0.934, train_acc = 0.800 (3.339 sec/step)
step 44170 	 loss = 0.001, train_acc = 1.000 (3.357 sec/step)
step 44180 	 loss = 0.256, train_acc = 0.900 (3.296 sec/step)
step 44190 	 loss = 0.431, train_acc = 0.900 (3.326 sec/step)
step 44200 	 loss = 0.075, train_acc = 1.000 (3.274 sec/step)
step 44210 	 loss = 0.233, train_acc = 0.800 (3.300 sec/step)
step 44220 	 loss = 0.062, train_acc = 1.000 (3.298 sec/step)
step 44230 	 loss = 0.249, train_acc = 0.900 (3.299 sec/step)
step 44240 	 loss = 0.652, train_acc = 0.900 (3.325 sec/step)
step 44250 	 loss = 0.109, train_acc = 1.000 (3.339 sec/step)
step 44260 	 loss = 0.001, train_acc = 1.000 (3.282 sec/step)
step 44270 	 loss = 0.019, train_acc = 1.000 (3.287 sec/step)
step 44280 	 loss = 0.002, train_acc = 1.000 (3.289 sec/step)
step 44290 	 loss = 0.371, train_acc = 0.800 (3.342 sec/step)
step 44300 	 loss = 0.443, train_acc = 0.800 (3.319 sec/step)
step 44310 	 loss = 0.003, train_acc = 1.000 (3.298 sec/step)
step 44320 	 loss = 0.001, train_acc = 1.000 (3.304 sec/step)
step 44330 	 loss = 0.185, train_acc = 0.900 (3.304 sec/step)
step 44340 	 loss = 0.880, train_acc = 0.900 (3.351 sec/step)
step 44350 	 loss = 0.001, train_acc = 1.000 (3.306 sec/step)
step 44360 	 loss = 0.031, train_acc = 1.000 (3.344 sec/step)
step 44370 	 loss = 0.176, train_acc = 1.000 (3.278 sec/step)
step 44380 	 loss = 0.006, train_acc = 1.000 (3.453 sec/step)
step 44390 	 loss = 0.232, train_acc = 0.900 (3.282 sec/step)
step 44400 	 loss = 0.553, train_acc = 0.800 (3.281 sec/step)
step 44410 	 loss = 0.024, train_acc = 1.000 (3.352 sec/step)
step 44420 	 loss = 0.001, train_acc = 1.000 (3.273 sec/step)
step 44430 	 loss = 0.113, train_acc = 1.000 (3.348 sec/step)
step 44440 	 loss = 0.146, train_acc = 0.900 (3.359 sec/step)
step 44450 	 loss = 0.048, train_acc = 1.000 (3.404 sec/step)
step 44460 	 loss = 0.302, train_acc = 0.900 (3.323 sec/step)
step 44470 	 loss = 0.185, train_acc = 1.000 (3.265 sec/step)
step 44480 	 loss = 0.012, train_acc = 1.000 (3.299 sec/step)
step 44490 	 loss = 0.008, train_acc = 1.000 (3.368 sec/step)
step 44500 	 loss = 0.025, train_acc = 1.000 (3.308 sec/step)
step 44510 	 loss = 0.411, train_acc = 0.900 (3.346 sec/step)
step 44520 	 loss = 0.001, train_acc = 1.000 (3.338 sec/step)
step 44530 	 loss = 0.045, train_acc = 1.000 (3.310 sec/step)
step 44540 	 loss = 0.597, train_acc = 0.800 (3.358 sec/step)
step 44550 	 loss = 0.112, train_acc = 1.000 (3.299 sec/step)
step 44560 	 loss = 0.509, train_acc = 0.900 (3.282 sec/step)
step 44570 	 loss = 0.005, train_acc = 1.000 (3.332 sec/step)
step 44580 	 loss = 0.054, train_acc = 1.000 (3.371 sec/step)
step 44590 	 loss = 0.000, train_acc = 1.000 (3.386 sec/step)
step 44600 	 loss = 0.023, train_acc = 1.000 (3.280 sec/step)
step 44610 	 loss = 0.040, train_acc = 1.000 (3.347 sec/step)
step 44620 	 loss = 0.427, train_acc = 0.900 (3.352 sec/step)
step 44630 	 loss = 0.253, train_acc = 0.900 (3.302 sec/step)
step 44640 	 loss = 0.063, train_acc = 1.000 (3.345 sec/step)
step 44650 	 loss = 0.001, train_acc = 1.000 (3.322 sec/step)
step 44660 	 loss = 0.099, train_acc = 1.000 (3.348 sec/step)
step 44670 	 loss = 0.845, train_acc = 0.900 (3.332 sec/step)
step 44680 	 loss = 0.001, train_acc = 1.000 (3.319 sec/step)
step 44690 	 loss = 0.924, train_acc = 0.900 (3.385 sec/step)
step 44700 	 loss = 0.329, train_acc = 0.800 (3.340 sec/step)
step 44710 	 loss = 0.056, train_acc = 1.000 (3.323 sec/step)
step 44720 	 loss = 0.017, train_acc = 1.000 (3.354 sec/step)
step 44730 	 loss = 0.000, train_acc = 1.000 (3.337 sec/step)
step 44740 	 loss = 0.589, train_acc = 0.900 (3.323 sec/step)
step 44750 	 loss = 0.957, train_acc = 0.900 (3.331 sec/step)
step 44760 	 loss = 0.317, train_acc = 0.900 (3.315 sec/step)
step 44770 	 loss = 0.174, train_acc = 0.900 (3.347 sec/step)
step 44780 	 loss = 0.088, train_acc = 1.000 (3.289 sec/step)
step 44790 	 loss = 2.106, train_acc = 0.800 (3.355 sec/step)
step 44800 	 loss = 0.006, train_acc = 1.000 (3.281 sec/step)
step 44810 	 loss = 0.151, train_acc = 0.900 (3.272 sec/step)
step 44820 	 loss = 0.001, train_acc = 1.000 (3.284 sec/step)
step 44830 	 loss = 0.136, train_acc = 1.000 (3.307 sec/step)
step 44840 	 loss = 0.055, train_acc = 1.000 (3.333 sec/step)
step 44850 	 loss = 0.475, train_acc = 0.900 (3.306 sec/step)
step 44860 	 loss = 0.007, train_acc = 1.000 (3.336 sec/step)
step 44870 	 loss = 0.015, train_acc = 1.000 (3.349 sec/step)
step 44880 	 loss = 0.039, train_acc = 1.000 (3.410 sec/step)
step 44890 	 loss = 0.295, train_acc = 0.900 (3.342 sec/step)
step 44900 	 loss = 0.201, train_acc = 0.900 (3.326 sec/step)
step 44910 	 loss = 0.287, train_acc = 0.900 (3.324 sec/step)
step 44920 	 loss = 0.097, train_acc = 1.000 (3.288 sec/step)
step 44930 	 loss = 0.236, train_acc = 0.900 (3.343 sec/step)
step 44940 	 loss = 0.014, train_acc = 1.000 (3.354 sec/step)
step 44950 	 loss = 0.010, train_acc = 1.000 (3.366 sec/step)
step 44960 	 loss = 0.062, train_acc = 1.000 (3.374 sec/step)
step 44970 	 loss = 0.836, train_acc = 0.900 (3.334 sec/step)
step 44980 	 loss = 0.001, train_acc = 1.000 (3.306 sec/step)
step 44990 	 loss = 0.062, train_acc = 1.000 (3.343 sec/step)
step 45000 	 loss = 0.001, train_acc = 1.000 (3.307 sec/step)
step 45010 	 loss = 0.242, train_acc = 0.900 (3.369 sec/step)
step 45020 	 loss = 0.069, train_acc = 1.000 (3.385 sec/step)
step 45030 	 loss = 0.003, train_acc = 1.000 (3.426 sec/step)
step 45040 	 loss = 0.707, train_acc = 0.900 (3.326 sec/step)
step 45050 	 loss = 0.407, train_acc = 0.900 (3.344 sec/step)
step 45060 	 loss = 0.205, train_acc = 0.900 (3.314 sec/step)
step 45070 	 loss = 0.021, train_acc = 1.000 (3.302 sec/step)
step 45080 	 loss = 0.023, train_acc = 1.000 (3.364 sec/step)
step 45090 	 loss = 0.174, train_acc = 0.900 (3.283 sec/step)
step 45100 	 loss = 0.351, train_acc = 0.800 (3.358 sec/step)
step 45110 	 loss = 0.183, train_acc = 1.000 (3.325 sec/step)
step 45120 	 loss = 0.002, train_acc = 1.000 (3.334 sec/step)
step 45130 	 loss = 1.899, train_acc = 0.900 (3.337 sec/step)
step 45140 	 loss = 0.226, train_acc = 0.800 (3.336 sec/step)
step 45150 	 loss = 1.213, train_acc = 0.800 (3.299 sec/step)
step 45160 	 loss = 0.132, train_acc = 0.900 (3.372 sec/step)
step 45170 	 loss = 0.053, train_acc = 1.000 (3.322 sec/step)
step 45180 	 loss = 0.071, train_acc = 1.000 (3.291 sec/step)
step 45190 	 loss = 0.157, train_acc = 0.900 (3.327 sec/step)
step 45200 	 loss = 0.572, train_acc = 0.900 (3.328 sec/step)
step 45210 	 loss = 0.002, train_acc = 1.000 (3.272 sec/step)
step 45220 	 loss = 0.001, train_acc = 1.000 (3.304 sec/step)
step 45230 	 loss = 0.031, train_acc = 1.000 (3.316 sec/step)
step 45240 	 loss = 0.086, train_acc = 1.000 (3.272 sec/step)
step 45250 	 loss = 0.004, train_acc = 1.000 (3.319 sec/step)
step 45260 	 loss = 0.029, train_acc = 1.000 (3.361 sec/step)
step 45270 	 loss = 0.353, train_acc = 0.700 (3.381 sec/step)
step 45280 	 loss = 0.030, train_acc = 1.000 (3.370 sec/step)
step 45290 	 loss = 0.113, train_acc = 0.900 (3.292 sec/step)
step 45300 	 loss = 0.163, train_acc = 0.900 (3.305 sec/step)
step 45310 	 loss = 0.305, train_acc = 0.900 (3.337 sec/step)
step 45320 	 loss = 0.000, train_acc = 1.000 (3.392 sec/step)
step 45330 	 loss = 0.003, train_acc = 1.000 (3.321 sec/step)
step 45340 	 loss = 0.039, train_acc = 1.000 (3.281 sec/step)
step 45350 	 loss = 0.059, train_acc = 1.000 (3.307 sec/step)
step 45360 	 loss = 0.185, train_acc = 0.900 (3.319 sec/step)
step 45370 	 loss = 0.029, train_acc = 1.000 (3.282 sec/step)
step 45380 	 loss = 0.068, train_acc = 1.000 (3.274 sec/step)
step 45390 	 loss = 0.004, train_acc = 1.000 (3.359 sec/step)
step 45400 	 loss = 0.001, train_acc = 1.000 (3.336 sec/step)
step 45410 	 loss = 0.730, train_acc = 0.900 (3.311 sec/step)
step 45420 	 loss = 0.060, train_acc = 1.000 (3.346 sec/step)
step 45430 	 loss = 0.100, train_acc = 1.000 (3.345 sec/step)
step 45440 	 loss = 0.269, train_acc = 0.900 (3.315 sec/step)
step 45450 	 loss = 0.023, train_acc = 1.000 (3.415 sec/step)
step 45460 	 loss = 0.036, train_acc = 1.000 (3.316 sec/step)
step 45470 	 loss = 0.000, train_acc = 1.000 (3.336 sec/step)
step 45480 	 loss = 0.010, train_acc = 1.000 (3.297 sec/step)
step 45490 	 loss = 0.000, train_acc = 1.000 (3.351 sec/step)
step 45500 	 loss = 0.628, train_acc = 0.800 (3.343 sec/step)
step 45510 	 loss = 0.188, train_acc = 0.900 (3.337 sec/step)
step 45520 	 loss = 3.747, train_acc = 0.900 (3.357 sec/step)
step 45530 	 loss = 0.220, train_acc = 0.900 (3.300 sec/step)
step 45540 	 loss = 0.501, train_acc = 0.900 (3.295 sec/step)
step 45550 	 loss = 0.104, train_acc = 1.000 (3.304 sec/step)
step 45560 	 loss = 0.297, train_acc = 0.900 (3.396 sec/step)
step 45570 	 loss = 0.030, train_acc = 1.000 (3.325 sec/step)
step 45580 	 loss = 0.017, train_acc = 1.000 (3.307 sec/step)
step 45590 	 loss = 0.940, train_acc = 0.800 (3.321 sec/step)
VALIDATION 	 acc = 0.545 (3.618 sec)
step 45600 	 loss = 0.291, train_acc = 0.900 (3.336 sec/step)
step 45610 	 loss = 0.378, train_acc = 0.900 (3.352 sec/step)
step 45620 	 loss = 0.119, train_acc = 1.000 (3.316 sec/step)
step 45630 	 loss = 0.110, train_acc = 1.000 (3.282 sec/step)
step 45640 	 loss = 0.262, train_acc = 0.800 (3.347 sec/step)
step 45650 	 loss = 0.123, train_acc = 1.000 (3.284 sec/step)
step 45660 	 loss = 0.003, train_acc = 1.000 (3.342 sec/step)
step 45670 	 loss = 0.230, train_acc = 0.900 (3.325 sec/step)
step 45680 	 loss = 0.014, train_acc = 1.000 (3.343 sec/step)
step 45690 	 loss = 0.002, train_acc = 1.000 (3.326 sec/step)
step 45700 	 loss = 0.362, train_acc = 0.900 (3.275 sec/step)
step 45710 	 loss = 0.074, train_acc = 1.000 (3.335 sec/step)
step 45720 	 loss = 0.477, train_acc = 0.800 (3.408 sec/step)
step 45730 	 loss = 0.142, train_acc = 0.900 (3.402 sec/step)
step 45740 	 loss = 0.512, train_acc = 0.900 (3.344 sec/step)
step 45750 	 loss = 0.001, train_acc = 1.000 (3.320 sec/step)
step 45760 	 loss = 0.601, train_acc = 0.800 (3.408 sec/step)
step 45770 	 loss = 0.584, train_acc = 0.900 (3.322 sec/step)
step 45780 	 loss = 0.798, train_acc = 0.900 (3.309 sec/step)
step 45790 	 loss = 0.190, train_acc = 1.000 (3.307 sec/step)
step 45800 	 loss = 0.112, train_acc = 1.000 (3.268 sec/step)
step 45810 	 loss = 0.039, train_acc = 1.000 (3.300 sec/step)
step 45820 	 loss = 0.020, train_acc = 1.000 (3.343 sec/step)
step 45830 	 loss = 0.321, train_acc = 0.900 (3.318 sec/step)
step 45840 	 loss = 0.454, train_acc = 0.800 (3.326 sec/step)
step 45850 	 loss = 0.271, train_acc = 0.900 (3.297 sec/step)
step 45860 	 loss = 0.078, train_acc = 1.000 (3.348 sec/step)
step 45870 	 loss = 0.444, train_acc = 0.800 (3.393 sec/step)
step 45880 	 loss = 0.080, train_acc = 1.000 (3.336 sec/step)
step 45890 	 loss = 0.234, train_acc = 0.900 (3.318 sec/step)
step 45900 	 loss = 1.558, train_acc = 0.600 (3.298 sec/step)
step 45910 	 loss = 0.251, train_acc = 0.900 (3.365 sec/step)
step 45920 	 loss = 0.699, train_acc = 0.900 (3.348 sec/step)
step 45930 	 loss = 0.354, train_acc = 0.900 (3.366 sec/step)
step 45940 	 loss = 0.363, train_acc = 0.900 (3.276 sec/step)
step 45950 	 loss = 0.334, train_acc = 0.900 (3.342 sec/step)
step 45960 	 loss = 0.619, train_acc = 0.800 (3.371 sec/step)
step 45970 	 loss = 0.001, train_acc = 1.000 (3.374 sec/step)
step 45980 	 loss = 0.013, train_acc = 1.000 (3.407 sec/step)
step 45990 	 loss = 0.389, train_acc = 0.900 (3.303 sec/step)
step 46000 	 loss = 0.007, train_acc = 1.000 (3.302 sec/step)
step 46010 	 loss = 0.108, train_acc = 1.000 (3.368 sec/step)
step 46020 	 loss = 0.205, train_acc = 0.800 (3.298 sec/step)
step 46030 	 loss = 0.010, train_acc = 1.000 (3.378 sec/step)
step 46040 	 loss = 0.140, train_acc = 0.900 (3.366 sec/step)
step 46050 	 loss = 0.189, train_acc = 1.000 (3.423 sec/step)
step 46060 	 loss = 0.227, train_acc = 0.900 (3.295 sec/step)
step 46070 	 loss = 1.088, train_acc = 0.800 (3.303 sec/step)
step 46080 	 loss = 0.088, train_acc = 0.900 (3.328 sec/step)
step 46090 	 loss = 0.173, train_acc = 0.900 (3.285 sec/step)
step 46100 	 loss = 0.567, train_acc = 0.700 (3.302 sec/step)
step 46110 	 loss = 0.200, train_acc = 0.900 (3.350 sec/step)
step 46120 	 loss = 0.160, train_acc = 0.900 (3.323 sec/step)
step 46130 	 loss = 0.535, train_acc = 0.900 (3.432 sec/step)
step 46140 	 loss = 0.005, train_acc = 1.000 (3.320 sec/step)
step 46150 	 loss = 0.174, train_acc = 0.900 (3.360 sec/step)
step 46160 	 loss = 0.406, train_acc = 0.800 (3.271 sec/step)
step 46170 	 loss = 0.030, train_acc = 1.000 (3.307 sec/step)
step 46180 	 loss = 0.193, train_acc = 1.000 (3.311 sec/step)
step 46190 	 loss = 0.029, train_acc = 1.000 (3.332 sec/step)
step 46200 	 loss = 0.211, train_acc = 0.900 (3.413 sec/step)
step 46210 	 loss = 0.681, train_acc = 0.800 (3.309 sec/step)
step 46220 	 loss = 0.194, train_acc = 0.900 (3.376 sec/step)
step 46230 	 loss = 0.724, train_acc = 0.600 (3.353 sec/step)
step 46240 	 loss = 0.963, train_acc = 0.900 (3.313 sec/step)
step 46250 	 loss = 0.495, train_acc = 0.800 (3.362 sec/step)
step 46260 	 loss = 0.208, train_acc = 0.900 (3.323 sec/step)
step 46270 	 loss = 0.136, train_acc = 1.000 (3.284 sec/step)
step 46280 	 loss = 0.641, train_acc = 0.900 (3.350 sec/step)
step 46290 	 loss = 0.027, train_acc = 1.000 (3.283 sec/step)
step 46300 	 loss = 0.014, train_acc = 1.000 (3.334 sec/step)
step 46310 	 loss = 0.471, train_acc = 0.900 (3.345 sec/step)
step 46320 	 loss = 0.218, train_acc = 0.900 (3.321 sec/step)
step 46330 	 loss = 0.057, train_acc = 1.000 (3.352 sec/step)
step 46340 	 loss = 0.061, train_acc = 1.000 (3.297 sec/step)
step 46350 	 loss = 0.047, train_acc = 1.000 (3.370 sec/step)
step 46360 	 loss = 0.249, train_acc = 0.800 (3.304 sec/step)
step 46370 	 loss = 0.002, train_acc = 1.000 (3.322 sec/step)
step 46380 	 loss = 0.063, train_acc = 1.000 (3.317 sec/step)
step 46390 	 loss = 0.288, train_acc = 0.900 (3.342 sec/step)
step 46400 	 loss = 0.044, train_acc = 1.000 (3.335 sec/step)
step 46410 	 loss = 0.175, train_acc = 0.900 (3.338 sec/step)
step 46420 	 loss = 0.034, train_acc = 1.000 (3.420 sec/step)
step 46430 	 loss = 0.000, train_acc = 1.000 (3.327 sec/step)
step 46440 	 loss = 0.243, train_acc = 0.900 (3.364 sec/step)
step 46450 	 loss = 0.899, train_acc = 0.900 (3.327 sec/step)
step 46460 	 loss = 0.401, train_acc = 0.900 (3.277 sec/step)
step 46470 	 loss = 1.277, train_acc = 0.900 (3.305 sec/step)
step 46480 	 loss = 0.606, train_acc = 0.900 (3.342 sec/step)
step 46490 	 loss = 0.274, train_acc = 0.900 (3.416 sec/step)
step 46500 	 loss = 0.368, train_acc = 0.900 (3.294 sec/step)
step 46510 	 loss = 0.496, train_acc = 0.900 (3.322 sec/step)
step 46520 	 loss = 0.220, train_acc = 0.900 (3.360 sec/step)
step 46530 	 loss = 0.178, train_acc = 0.900 (3.338 sec/step)
step 46540 	 loss = 0.618, train_acc = 0.800 (3.361 sec/step)
step 46550 	 loss = 0.393, train_acc = 0.800 (3.326 sec/step)
step 46560 	 loss = 0.409, train_acc = 0.900 (3.303 sec/step)
step 46570 	 loss = 0.000, train_acc = 1.000 (3.348 sec/step)
step 46580 	 loss = 0.240, train_acc = 0.800 (3.297 sec/step)
step 46590 	 loss = 0.176, train_acc = 0.900 (3.311 sec/step)
step 46600 	 loss = 0.191, train_acc = 0.900 (3.370 sec/step)
step 46610 	 loss = 0.071, train_acc = 1.000 (3.372 sec/step)
step 46620 	 loss = 0.008, train_acc = 1.000 (3.312 sec/step)
step 46630 	 loss = 0.041, train_acc = 1.000 (3.326 sec/step)
step 46640 	 loss = 0.082, train_acc = 1.000 (3.312 sec/step)
step 46650 	 loss = 0.192, train_acc = 0.900 (3.328 sec/step)
step 46660 	 loss = 0.039, train_acc = 1.000 (3.348 sec/step)
step 46670 	 loss = 0.288, train_acc = 0.800 (3.408 sec/step)
step 46680 	 loss = 0.106, train_acc = 1.000 (3.342 sec/step)
step 46690 	 loss = 0.078, train_acc = 1.000 (3.361 sec/step)
step 46700 	 loss = 0.027, train_acc = 1.000 (3.311 sec/step)
step 46710 	 loss = 0.259, train_acc = 0.900 (3.331 sec/step)
step 46720 	 loss = 0.055, train_acc = 1.000 (3.375 sec/step)
step 46730 	 loss = 0.018, train_acc = 1.000 (3.298 sec/step)
step 46740 	 loss = 0.446, train_acc = 0.900 (3.308 sec/step)
step 46750 	 loss = 0.052, train_acc = 1.000 (3.367 sec/step)
step 46760 	 loss = 0.106, train_acc = 1.000 (3.361 sec/step)
step 46770 	 loss = 0.000, train_acc = 1.000 (3.321 sec/step)
step 46780 	 loss = 0.015, train_acc = 1.000 (3.333 sec/step)
step 46790 	 loss = 0.035, train_acc = 1.000 (3.357 sec/step)
step 46800 	 loss = 1.581, train_acc = 0.900 (3.304 sec/step)
step 46810 	 loss = 0.378, train_acc = 0.800 (3.281 sec/step)
step 46820 	 loss = 0.004, train_acc = 1.000 (3.327 sec/step)
step 46830 	 loss = 0.168, train_acc = 0.900 (3.358 sec/step)
step 46840 	 loss = 1.065, train_acc = 0.900 (3.280 sec/step)
step 46850 	 loss = 0.113, train_acc = 0.900 (3.340 sec/step)
step 46860 	 loss = 0.235, train_acc = 0.900 (3.299 sec/step)
step 46870 	 loss = 0.482, train_acc = 0.800 (3.308 sec/step)
step 46880 	 loss = 0.052, train_acc = 1.000 (3.294 sec/step)
step 46890 	 loss = 0.492, train_acc = 0.800 (3.342 sec/step)
step 46900 	 loss = 0.003, train_acc = 1.000 (3.287 sec/step)
step 46910 	 loss = 0.169, train_acc = 0.900 (3.346 sec/step)
step 46920 	 loss = 0.237, train_acc = 0.900 (3.321 sec/step)
step 46930 	 loss = 0.068, train_acc = 1.000 (3.331 sec/step)
step 46940 	 loss = 0.002, train_acc = 1.000 (3.363 sec/step)
step 46950 	 loss = 0.126, train_acc = 0.900 (3.368 sec/step)
step 46960 	 loss = 0.036, train_acc = 1.000 (3.284 sec/step)
step 46970 	 loss = 0.017, train_acc = 1.000 (3.383 sec/step)
step 46980 	 loss = 0.001, train_acc = 1.000 (3.310 sec/step)
step 46990 	 loss = 1.210, train_acc = 0.900 (3.285 sec/step)
step 47000 	 loss = 0.259, train_acc = 0.900 (3.294 sec/step)
step 47010 	 loss = 0.383, train_acc = 0.800 (3.304 sec/step)
step 47020 	 loss = 0.109, train_acc = 1.000 (3.368 sec/step)
step 47030 	 loss = 0.021, train_acc = 1.000 (3.273 sec/step)
step 47040 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 47050 	 loss = 0.318, train_acc = 0.900 (3.289 sec/step)
step 47060 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 47070 	 loss = 0.870, train_acc = 0.800 (3.284 sec/step)
step 47080 	 loss = 0.640, train_acc = 0.800 (3.268 sec/step)
step 47090 	 loss = 0.367, train_acc = 0.800 (3.350 sec/step)
step 47100 	 loss = 0.380, train_acc = 0.900 (3.358 sec/step)
step 47110 	 loss = 0.372, train_acc = 0.900 (3.293 sec/step)
step 47120 	 loss = 0.880, train_acc = 0.800 (3.285 sec/step)
step 47130 	 loss = 0.098, train_acc = 1.000 (3.301 sec/step)
step 47140 	 loss = 0.259, train_acc = 0.900 (3.282 sec/step)
step 47150 	 loss = 0.072, train_acc = 1.000 (3.291 sec/step)
step 47160 	 loss = 0.087, train_acc = 0.900 (3.381 sec/step)
step 47170 	 loss = 0.000, train_acc = 1.000 (3.286 sec/step)
step 47180 	 loss = 0.014, train_acc = 1.000 (3.310 sec/step)
step 47190 	 loss = 0.006, train_acc = 1.000 (3.322 sec/step)
step 47200 	 loss = 0.316, train_acc = 0.900 (3.308 sec/step)
step 47210 	 loss = 0.539, train_acc = 0.800 (3.323 sec/step)
step 47220 	 loss = 0.019, train_acc = 1.000 (3.324 sec/step)
step 47230 	 loss = 0.032, train_acc = 1.000 (3.352 sec/step)
step 47240 	 loss = 0.007, train_acc = 1.000 (3.358 sec/step)
step 47250 	 loss = 0.225, train_acc = 0.800 (3.352 sec/step)
step 47260 	 loss = 0.004, train_acc = 1.000 (3.364 sec/step)
step 47270 	 loss = 0.003, train_acc = 1.000 (3.349 sec/step)
step 47280 	 loss = 0.156, train_acc = 0.900 (3.358 sec/step)
step 47290 	 loss = 0.003, train_acc = 1.000 (3.357 sec/step)
step 47300 	 loss = 0.254, train_acc = 0.900 (3.291 sec/step)
step 47310 	 loss = 0.100, train_acc = 1.000 (3.304 sec/step)
step 47320 	 loss = 0.317, train_acc = 0.900 (3.324 sec/step)
step 47330 	 loss = 0.074, train_acc = 1.000 (3.283 sec/step)
step 47340 	 loss = 0.050, train_acc = 1.000 (3.276 sec/step)
step 47350 	 loss = 0.003, train_acc = 1.000 (3.342 sec/step)
step 47360 	 loss = 0.270, train_acc = 0.900 (3.324 sec/step)
step 47370 	 loss = 0.067, train_acc = 1.000 (3.322 sec/step)
step 47380 	 loss = 0.369, train_acc = 0.900 (3.317 sec/step)
step 47390 	 loss = 0.416, train_acc = 0.900 (3.353 sec/step)
step 47400 	 loss = 0.246, train_acc = 0.900 (3.327 sec/step)
step 47410 	 loss = 0.002, train_acc = 1.000 (3.354 sec/step)
step 47420 	 loss = 0.017, train_acc = 1.000 (3.399 sec/step)
step 47430 	 loss = 0.166, train_acc = 0.900 (3.347 sec/step)
step 47440 	 loss = 0.494, train_acc = 0.900 (3.352 sec/step)
step 47450 	 loss = 0.256, train_acc = 0.900 (3.327 sec/step)
step 47460 	 loss = 0.084, train_acc = 1.000 (3.379 sec/step)
step 47470 	 loss = 0.011, train_acc = 1.000 (3.424 sec/step)
step 47480 	 loss = 0.006, train_acc = 1.000 (3.316 sec/step)
step 47490 	 loss = 0.678, train_acc = 0.900 (3.325 sec/step)
VALIDATION 	 acc = 0.546 (3.628 sec)
step 47500 	 loss = 0.967, train_acc = 0.800 (3.339 sec/step)
step 47510 	 loss = 0.106, train_acc = 1.000 (3.303 sec/step)
step 47520 	 loss = 0.051, train_acc = 1.000 (3.348 sec/step)
step 47530 	 loss = 0.149, train_acc = 0.900 (3.296 sec/step)
step 47540 	 loss = 0.044, train_acc = 1.000 (3.348 sec/step)
step 47550 	 loss = 0.349, train_acc = 0.900 (3.342 sec/step)
step 47560 	 loss = 0.277, train_acc = 0.800 (3.372 sec/step)
step 47570 	 loss = 0.001, train_acc = 1.000 (3.292 sec/step)
step 47580 	 loss = 0.184, train_acc = 0.900 (3.336 sec/step)
step 47590 	 loss = 0.002, train_acc = 1.000 (3.319 sec/step)
step 47600 	 loss = 0.113, train_acc = 0.900 (3.301 sec/step)
step 47610 	 loss = 0.067, train_acc = 1.000 (3.358 sec/step)
step 47620 	 loss = 0.110, train_acc = 1.000 (3.303 sec/step)
step 47630 	 loss = 0.063, train_acc = 1.000 (3.328 sec/step)
step 47640 	 loss = 0.076, train_acc = 1.000 (3.360 sec/step)
step 47650 	 loss = 0.414, train_acc = 0.800 (3.326 sec/step)
step 47660 	 loss = 0.005, train_acc = 1.000 (3.283 sec/step)
step 47670 	 loss = 0.040, train_acc = 1.000 (3.361 sec/step)
step 47680 	 loss = 0.001, train_acc = 1.000 (3.323 sec/step)
step 47690 	 loss = 0.026, train_acc = 1.000 (3.347 sec/step)
step 47700 	 loss = 1.146, train_acc = 0.900 (3.309 sec/step)
step 47710 	 loss = 0.771, train_acc = 0.900 (3.351 sec/step)
step 47720 	 loss = 0.081, train_acc = 1.000 (3.361 sec/step)
step 47730 	 loss = 0.194, train_acc = 0.900 (3.322 sec/step)
step 47740 	 loss = 0.158, train_acc = 1.000 (3.343 sec/step)
step 47750 	 loss = 0.294, train_acc = 0.900 (3.303 sec/step)
step 47760 	 loss = 0.354, train_acc = 0.900 (3.342 sec/step)
step 47770 	 loss = 0.312, train_acc = 0.800 (3.361 sec/step)
step 47780 	 loss = 0.006, train_acc = 1.000 (3.322 sec/step)
step 47790 	 loss = 0.101, train_acc = 1.000 (3.287 sec/step)
step 47800 	 loss = 0.548, train_acc = 0.800 (3.315 sec/step)
step 47810 	 loss = 0.702, train_acc = 0.800 (3.367 sec/step)
step 47820 	 loss = 0.025, train_acc = 1.000 (3.301 sec/step)
step 47830 	 loss = 0.198, train_acc = 0.900 (3.298 sec/step)
step 47840 	 loss = 1.347, train_acc = 0.800 (3.313 sec/step)
step 47850 	 loss = 0.327, train_acc = 0.800 (3.309 sec/step)
step 47860 	 loss = 0.450, train_acc = 0.900 (3.293 sec/step)
step 47870 	 loss = 0.045, train_acc = 1.000 (3.332 sec/step)
step 47880 	 loss = 0.018, train_acc = 1.000 (3.334 sec/step)
step 47890 	 loss = 0.084, train_acc = 1.000 (3.302 sec/step)
step 47900 	 loss = 1.012, train_acc = 0.700 (3.377 sec/step)
step 47910 	 loss = 0.088, train_acc = 1.000 (3.300 sec/step)
step 47920 	 loss = 0.262, train_acc = 0.900 (3.334 sec/step)
step 47930 	 loss = 0.009, train_acc = 1.000 (3.323 sec/step)
step 47940 	 loss = 1.024, train_acc = 0.800 (3.352 sec/step)
step 47950 	 loss = 0.361, train_acc = 0.900 (3.297 sec/step)
step 47960 	 loss = 0.001, train_acc = 1.000 (3.316 sec/step)
step 47970 	 loss = 0.348, train_acc = 0.900 (3.284 sec/step)
step 47980 	 loss = 0.301, train_acc = 0.800 (3.297 sec/step)
step 47990 	 loss = 0.226, train_acc = 0.900 (3.357 sec/step)
step 48000 	 loss = 0.076, train_acc = 1.000 (3.304 sec/step)
step 48010 	 loss = 1.307, train_acc = 0.900 (3.360 sec/step)
step 48020 	 loss = 0.620, train_acc = 0.700 (3.346 sec/step)
step 48030 	 loss = 0.632, train_acc = 0.800 (3.276 sec/step)
step 48040 	 loss = 0.270, train_acc = 0.900 (3.371 sec/step)
step 48050 	 loss = 0.000, train_acc = 1.000 (3.345 sec/step)
step 48060 	 loss = 0.115, train_acc = 1.000 (3.296 sec/step)
step 48070 	 loss = 0.043, train_acc = 1.000 (3.361 sec/step)
step 48080 	 loss = 0.061, train_acc = 1.000 (3.325 sec/step)
step 48090 	 loss = 0.019, train_acc = 1.000 (3.353 sec/step)
step 48100 	 loss = 0.300, train_acc = 0.900 (3.359 sec/step)
step 48110 	 loss = 0.095, train_acc = 1.000 (3.315 sec/step)
step 48120 	 loss = 0.002, train_acc = 1.000 (3.343 sec/step)
step 48130 	 loss = 0.130, train_acc = 0.900 (3.332 sec/step)
step 48140 	 loss = 0.950, train_acc = 0.800 (3.272 sec/step)
step 48150 	 loss = 0.381, train_acc = 0.900 (3.360 sec/step)
step 48160 	 loss = 0.435, train_acc = 0.900 (3.353 sec/step)
step 48170 	 loss = 0.703, train_acc = 0.900 (3.331 sec/step)
step 48180 	 loss = 0.528, train_acc = 0.900 (3.360 sec/step)
step 48190 	 loss = 0.113, train_acc = 0.900 (3.368 sec/step)
step 48200 	 loss = 0.340, train_acc = 0.900 (3.360 sec/step)
step 48210 	 loss = 0.039, train_acc = 1.000 (3.328 sec/step)
step 48220 	 loss = 0.028, train_acc = 1.000 (3.290 sec/step)
step 48230 	 loss = 0.012, train_acc = 1.000 (3.294 sec/step)
step 48240 	 loss = 0.000, train_acc = 1.000 (3.332 sec/step)
step 48250 	 loss = 0.198, train_acc = 0.900 (3.309 sec/step)
step 48260 	 loss = 0.065, train_acc = 1.000 (3.313 sec/step)
step 48270 	 loss = 0.673, train_acc = 0.900 (3.324 sec/step)
step 48280 	 loss = 0.491, train_acc = 0.900 (3.301 sec/step)
step 48290 	 loss = 0.048, train_acc = 1.000 (3.323 sec/step)
step 48300 	 loss = 0.001, train_acc = 1.000 (3.298 sec/step)
step 48310 	 loss = 0.031, train_acc = 1.000 (3.293 sec/step)
step 48320 	 loss = 0.001, train_acc = 1.000 (3.312 sec/step)
step 48330 	 loss = 0.000, train_acc = 1.000 (3.325 sec/step)
step 48340 	 loss = 0.859, train_acc = 0.700 (3.318 sec/step)
step 48350 	 loss = 0.032, train_acc = 1.000 (3.354 sec/step)
step 48360 	 loss = 0.802, train_acc = 0.800 (3.330 sec/step)
step 48370 	 loss = 0.029, train_acc = 1.000 (3.349 sec/step)
step 48380 	 loss = 0.004, train_acc = 1.000 (3.365 sec/step)
step 48390 	 loss = 0.891, train_acc = 0.900 (3.292 sec/step)
step 48400 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 48410 	 loss = 0.234, train_acc = 0.900 (3.286 sec/step)
step 48420 	 loss = 0.038, train_acc = 1.000 (3.352 sec/step)
step 48430 	 loss = 0.027, train_acc = 1.000 (3.347 sec/step)
step 48440 	 loss = 0.023, train_acc = 1.000 (3.289 sec/step)
step 48450 	 loss = 0.022, train_acc = 1.000 (3.301 sec/step)
step 48460 	 loss = 0.002, train_acc = 1.000 (3.317 sec/step)
step 48470 	 loss = 0.902, train_acc = 0.900 (3.315 sec/step)
step 48480 	 loss = 0.156, train_acc = 0.900 (3.322 sec/step)
step 48490 	 loss = 0.227, train_acc = 0.800 (3.282 sec/step)
step 48500 	 loss = 0.167, train_acc = 0.900 (3.385 sec/step)
step 48510 	 loss = 0.318, train_acc = 0.800 (3.302 sec/step)
step 48520 	 loss = 0.486, train_acc = 0.900 (3.277 sec/step)
step 48530 	 loss = 0.903, train_acc = 0.800 (3.362 sec/step)
step 48540 	 loss = 0.375, train_acc = 0.900 (3.342 sec/step)
step 48550 	 loss = 0.285, train_acc = 0.800 (3.432 sec/step)
step 48560 	 loss = 0.012, train_acc = 1.000 (3.326 sec/step)
step 48570 	 loss = 0.744, train_acc = 0.800 (3.364 sec/step)
step 48580 	 loss = 0.480, train_acc = 0.900 (3.318 sec/step)
step 48590 	 loss = 0.369, train_acc = 0.800 (3.311 sec/step)
step 48600 	 loss = 0.121, train_acc = 0.900 (3.332 sec/step)
step 48610 	 loss = 0.089, train_acc = 1.000 (3.283 sec/step)
step 48620 	 loss = 0.060, train_acc = 1.000 (3.313 sec/step)
step 48630 	 loss = 0.177, train_acc = 0.900 (3.304 sec/step)
step 48640 	 loss = 0.394, train_acc = 0.800 (3.335 sec/step)
step 48650 	 loss = 0.009, train_acc = 1.000 (3.312 sec/step)
step 48660 	 loss = 0.417, train_acc = 0.800 (3.332 sec/step)
step 48670 	 loss = 0.561, train_acc = 0.800 (3.294 sec/step)
step 48680 	 loss = 0.107, train_acc = 0.900 (3.315 sec/step)
step 48690 	 loss = 0.006, train_acc = 1.000 (3.308 sec/step)
step 48700 	 loss = 0.000, train_acc = 1.000 (3.338 sec/step)
step 48710 	 loss = 0.042, train_acc = 1.000 (3.302 sec/step)
step 48720 	 loss = 0.276, train_acc = 0.800 (3.395 sec/step)
step 48730 	 loss = 0.003, train_acc = 1.000 (3.315 sec/step)
step 48740 	 loss = 0.354, train_acc = 0.900 (3.387 sec/step)
step 48750 	 loss = 0.028, train_acc = 1.000 (3.305 sec/step)
step 48760 	 loss = 0.049, train_acc = 1.000 (3.400 sec/step)
step 48770 	 loss = 0.005, train_acc = 1.000 (3.307 sec/step)
step 48780 	 loss = 0.236, train_acc = 0.900 (3.326 sec/step)
step 48790 	 loss = 0.199, train_acc = 0.900 (3.313 sec/step)
step 48800 	 loss = 0.022, train_acc = 1.000 (3.355 sec/step)
step 48810 	 loss = 0.314, train_acc = 0.900 (3.386 sec/step)
step 48820 	 loss = 0.002, train_acc = 1.000 (3.284 sec/step)
step 48830 	 loss = 0.340, train_acc = 0.800 (3.324 sec/step)
step 48840 	 loss = 0.025, train_acc = 1.000 (3.297 sec/step)
step 48850 	 loss = 1.268, train_acc = 0.800 (3.347 sec/step)
step 48860 	 loss = 0.309, train_acc = 0.900 (3.341 sec/step)
step 48870 	 loss = 0.475, train_acc = 0.900 (3.378 sec/step)
step 48880 	 loss = 1.126, train_acc = 0.700 (3.363 sec/step)
step 48890 	 loss = 1.105, train_acc = 0.800 (3.288 sec/step)
step 48900 	 loss = 0.110, train_acc = 1.000 (3.361 sec/step)
step 48910 	 loss = 0.224, train_acc = 0.900 (3.317 sec/step)
step 48920 	 loss = 0.105, train_acc = 1.000 (3.349 sec/step)
step 48930 	 loss = 0.452, train_acc = 0.800 (3.293 sec/step)
step 48940 	 loss = 0.015, train_acc = 1.000 (3.341 sec/step)
step 48950 	 loss = 0.672, train_acc = 0.800 (3.295 sec/step)
step 48960 	 loss = 0.000, train_acc = 1.000 (3.309 sec/step)
step 48970 	 loss = 0.004, train_acc = 1.000 (3.285 sec/step)
step 48980 	 loss = 0.159, train_acc = 0.900 (3.292 sec/step)
step 48990 	 loss = 3.243, train_acc = 0.700 (3.295 sec/step)
step 49000 	 loss = 0.079, train_acc = 1.000 (3.344 sec/step)
step 49010 	 loss = 0.235, train_acc = 0.900 (3.348 sec/step)
step 49020 	 loss = 0.070, train_acc = 0.900 (3.311 sec/step)
step 49030 	 loss = 2.552, train_acc = 0.600 (3.346 sec/step)
step 49040 	 loss = 0.357, train_acc = 0.900 (3.402 sec/step)
step 49050 	 loss = 0.094, train_acc = 1.000 (3.375 sec/step)
step 49060 	 loss = 0.015, train_acc = 1.000 (3.329 sec/step)
step 49070 	 loss = 0.000, train_acc = 1.000 (3.340 sec/step)
step 49080 	 loss = 0.004, train_acc = 1.000 (3.317 sec/step)
step 49090 	 loss = 0.001, train_acc = 1.000 (3.366 sec/step)
step 49100 	 loss = 0.370, train_acc = 0.900 (3.322 sec/step)
step 49110 	 loss = 0.084, train_acc = 1.000 (3.326 sec/step)
step 49120 	 loss = 0.029, train_acc = 1.000 (3.268 sec/step)
step 49130 	 loss = 0.906, train_acc = 0.700 (3.301 sec/step)
step 49140 	 loss = 1.694, train_acc = 0.500 (3.374 sec/step)
step 49150 	 loss = 0.166, train_acc = 0.900 (3.317 sec/step)
step 49160 	 loss = 0.343, train_acc = 0.900 (3.322 sec/step)
step 49170 	 loss = 0.542, train_acc = 0.700 (3.368 sec/step)
step 49180 	 loss = 0.009, train_acc = 1.000 (3.362 sec/step)
step 49190 	 loss = 0.008, train_acc = 1.000 (3.289 sec/step)
step 49200 	 loss = 0.662, train_acc = 0.900 (3.292 sec/step)
step 49210 	 loss = 0.347, train_acc = 0.900 (3.376 sec/step)
step 49220 	 loss = 0.140, train_acc = 0.900 (3.340 sec/step)
step 49230 	 loss = 0.181, train_acc = 0.900 (3.360 sec/step)
step 49240 	 loss = 4.162, train_acc = 0.700 (3.323 sec/step)
step 49250 	 loss = 0.155, train_acc = 0.900 (3.307 sec/step)
step 49260 	 loss = 0.167, train_acc = 0.900 (3.330 sec/step)
step 49270 	 loss = 0.306, train_acc = 0.900 (3.361 sec/step)
step 49280 	 loss = 0.306, train_acc = 0.900 (3.344 sec/step)
step 49290 	 loss = 0.111, train_acc = 0.900 (3.293 sec/step)
step 49300 	 loss = 0.155, train_acc = 0.900 (3.319 sec/step)
step 49310 	 loss = 0.141, train_acc = 0.900 (3.306 sec/step)
step 49320 	 loss = 0.308, train_acc = 0.800 (3.376 sec/step)
step 49330 	 loss = 1.215, train_acc = 0.600 (3.320 sec/step)
step 49340 	 loss = 0.984, train_acc = 0.700 (3.332 sec/step)
step 49350 	 loss = 0.090, train_acc = 0.900 (3.270 sec/step)
step 49360 	 loss = 0.516, train_acc = 0.800 (3.331 sec/step)
step 49370 	 loss = 0.104, train_acc = 0.900 (3.335 sec/step)
step 49380 	 loss = 0.066, train_acc = 1.000 (3.320 sec/step)
step 49390 	 loss = 0.104, train_acc = 0.900 (3.318 sec/step)
VALIDATION 	 acc = 0.533 (3.640 sec)
step 49400 	 loss = 0.001, train_acc = 1.000 (3.292 sec/step)
step 49410 	 loss = 0.152, train_acc = 0.900 (3.410 sec/step)
step 49420 	 loss = 0.000, train_acc = 1.000 (3.353 sec/step)
step 49430 	 loss = 0.006, train_acc = 1.000 (3.323 sec/step)
step 49440 	 loss = 0.002, train_acc = 1.000 (3.364 sec/step)
step 49450 	 loss = 0.345, train_acc = 0.900 (3.304 sec/step)
step 49460 	 loss = 0.370, train_acc = 0.900 (3.322 sec/step)
step 49470 	 loss = 0.296, train_acc = 0.900 (3.415 sec/step)
step 49480 	 loss = 0.000, train_acc = 1.000 (3.285 sec/step)
step 49490 	 loss = 0.039, train_acc = 1.000 (3.350 sec/step)
step 49500 	 loss = 0.004, train_acc = 1.000 (3.296 sec/step)
step 49510 	 loss = 0.033, train_acc = 1.000 (3.319 sec/step)
step 49520 	 loss = 0.216, train_acc = 0.900 (3.328 sec/step)
step 49530 	 loss = 0.152, train_acc = 0.900 (3.371 sec/step)
step 49540 	 loss = 0.257, train_acc = 0.900 (3.313 sec/step)
step 49550 	 loss = 0.013, train_acc = 1.000 (3.305 sec/step)
step 49560 	 loss = 0.287, train_acc = 0.800 (3.287 sec/step)
step 49570 	 loss = 0.026, train_acc = 1.000 (3.344 sec/step)
step 49580 	 loss = 0.044, train_acc = 1.000 (3.307 sec/step)
step 49590 	 loss = 0.607, train_acc = 0.900 (3.316 sec/step)
step 49600 	 loss = 0.395, train_acc = 0.800 (3.280 sec/step)
step 49610 	 loss = 0.161, train_acc = 1.000 (3.340 sec/step)
step 49620 	 loss = 0.002, train_acc = 1.000 (3.297 sec/step)
step 49630 	 loss = 0.035, train_acc = 1.000 (3.413 sec/step)
step 49640 	 loss = 0.017, train_acc = 1.000 (3.326 sec/step)
step 49650 	 loss = 0.585, train_acc = 0.800 (3.272 sec/step)
step 49660 	 loss = 0.288, train_acc = 0.900 (3.302 sec/step)
step 49670 	 loss = 0.005, train_acc = 1.000 (3.307 sec/step)
step 49680 	 loss = 0.000, train_acc = 1.000 (3.318 sec/step)
step 49690 	 loss = 0.026, train_acc = 1.000 (3.364 sec/step)
step 49700 	 loss = 0.023, train_acc = 1.000 (3.312 sec/step)
step 49710 	 loss = 1.758, train_acc = 0.900 (3.297 sec/step)
step 49720 	 loss = 0.002, train_acc = 1.000 (3.352 sec/step)
step 49730 	 loss = 0.006, train_acc = 1.000 (3.366 sec/step)
step 49740 	 loss = 0.055, train_acc = 1.000 (3.335 sec/step)
step 49750 	 loss = 0.001, train_acc = 1.000 (3.299 sec/step)
step 49760 	 loss = 0.004, train_acc = 1.000 (3.337 sec/step)
step 49770 	 loss = 0.142, train_acc = 0.900 (3.478 sec/step)
step 49780 	 loss = 1.062, train_acc = 0.900 (3.293 sec/step)
step 49790 	 loss = 0.000, train_acc = 1.000 (3.331 sec/step)
step 49800 	 loss = 2.561, train_acc = 0.600 (3.357 sec/step)
step 49810 	 loss = 0.148, train_acc = 0.900 (3.309 sec/step)
step 49820 	 loss = 0.002, train_acc = 1.000 (3.328 sec/step)
step 49830 	 loss = 0.045, train_acc = 1.000 (3.349 sec/step)
step 49840 	 loss = 0.245, train_acc = 0.800 (3.310 sec/step)
step 49850 	 loss = 0.797, train_acc = 0.800 (3.351 sec/step)
step 49860 	 loss = 1.001, train_acc = 0.900 (3.278 sec/step)
step 49870 	 loss = 0.000, train_acc = 1.000 (3.347 sec/step)
step 49880 	 loss = 0.004, train_acc = 1.000 (3.317 sec/step)
step 49890 	 loss = 0.052, train_acc = 1.000 (3.348 sec/step)
step 49900 	 loss = 0.022, train_acc = 1.000 (3.369 sec/step)
step 49910 	 loss = 0.073, train_acc = 1.000 (3.356 sec/step)
step 49920 	 loss = 0.783, train_acc = 0.900 (3.294 sec/step)
step 49930 	 loss = 0.299, train_acc = 1.000 (3.319 sec/step)
step 49940 	 loss = 0.007, train_acc = 1.000 (3.312 sec/step)
step 49950 	 loss = 0.000, train_acc = 1.000 (3.339 sec/step)
step 49960 	 loss = 0.001, train_acc = 1.000 (3.318 sec/step)
step 49970 	 loss = 0.022, train_acc = 1.000 (3.292 sec/step)
step 49980 	 loss = 0.100, train_acc = 1.000 (3.323 sec/step)
step 49990 	 loss = 0.042, train_acc = 1.000 (3.315 sec/step)
step 50000 	 loss = 1.661, train_acc = 0.900 (3.307 sec/step)
step 50010 	 loss = 0.034, train_acc = 1.000 (3.334 sec/step)
step 50020 	 loss = 0.156, train_acc = 1.000 (3.299 sec/step)
step 50030 	 loss = 0.055, train_acc = 1.000 (3.320 sec/step)
step 50040 	 loss = 0.006, train_acc = 1.000 (3.303 sec/step)
step 50050 	 loss = 0.203, train_acc = 0.900 (3.299 sec/step)
step 50060 	 loss = 0.051, train_acc = 1.000 (3.337 sec/step)
step 50070 	 loss = 0.020, train_acc = 1.000 (3.305 sec/step)
step 50080 	 loss = 0.017, train_acc = 1.000 (3.315 sec/step)
step 50090 	 loss = 0.069, train_acc = 1.000 (3.330 sec/step)
step 50100 	 loss = 0.036, train_acc = 1.000 (3.341 sec/step)
step 50110 	 loss = 0.046, train_acc = 1.000 (3.289 sec/step)
step 50120 	 loss = 0.006, train_acc = 1.000 (3.356 sec/step)
step 50130 	 loss = 0.258, train_acc = 0.900 (3.395 sec/step)
step 50140 	 loss = 0.000, train_acc = 1.000 (3.322 sec/step)
step 50150 	 loss = 0.000, train_acc = 1.000 (3.339 sec/step)
step 50160 	 loss = 0.659, train_acc = 0.900 (3.362 sec/step)
step 50170 	 loss = 0.003, train_acc = 1.000 (3.347 sec/step)
step 50180 	 loss = 0.013, train_acc = 1.000 (3.375 sec/step)
step 50190 	 loss = 0.004, train_acc = 1.000 (3.277 sec/step)
step 50200 	 loss = 0.511, train_acc = 0.900 (3.314 sec/step)
step 50210 	 loss = 0.000, train_acc = 1.000 (3.314 sec/step)
step 50220 	 loss = 1.102, train_acc = 0.900 (3.308 sec/step)
step 50230 	 loss = 0.039, train_acc = 1.000 (3.320 sec/step)
step 50240 	 loss = 0.017, train_acc = 1.000 (3.317 sec/step)
step 50250 	 loss = 0.000, train_acc = 1.000 (3.364 sec/step)
step 50260 	 loss = 0.000, train_acc = 1.000 (3.369 sec/step)
step 50270 	 loss = 0.128, train_acc = 0.900 (3.283 sec/step)
step 50280 	 loss = 0.150, train_acc = 1.000 (3.333 sec/step)
step 50290 	 loss = 0.002, train_acc = 1.000 (3.345 sec/step)
step 50300 	 loss = 0.072, train_acc = 1.000 (3.382 sec/step)
step 50310 	 loss = 0.749, train_acc = 0.800 (3.303 sec/step)
step 50320 	 loss = 0.169, train_acc = 0.900 (3.306 sec/step)
step 50330 	 loss = 0.016, train_acc = 1.000 (3.337 sec/step)
step 50340 	 loss = 0.334, train_acc = 0.900 (3.332 sec/step)
step 50350 	 loss = 0.368, train_acc = 0.800 (3.361 sec/step)
step 50360 	 loss = 0.107, train_acc = 1.000 (3.347 sec/step)
step 50370 	 loss = 0.031, train_acc = 1.000 (3.347 sec/step)
step 50380 	 loss = 0.010, train_acc = 1.000 (3.332 sec/step)
step 50390 	 loss = 0.010, train_acc = 1.000 (3.289 sec/step)
step 50400 	 loss = 0.024, train_acc = 1.000 (3.321 sec/step)
step 50410 	 loss = 0.186, train_acc = 0.900 (3.345 sec/step)
step 50420 	 loss = 0.012, train_acc = 1.000 (3.282 sec/step)
step 50430 	 loss = 1.736, train_acc = 0.800 (3.287 sec/step)
step 50440 	 loss = 1.029, train_acc = 0.600 (3.328 sec/step)
step 50450 	 loss = 0.052, train_acc = 1.000 (3.307 sec/step)
step 50460 	 loss = 0.133, train_acc = 0.900 (3.383 sec/step)
step 50470 	 loss = 0.284, train_acc = 0.900 (3.284 sec/step)
step 50480 	 loss = 0.481, train_acc = 0.900 (3.355 sec/step)
step 50490 	 loss = 0.632, train_acc = 0.900 (3.307 sec/step)
step 50500 	 loss = 0.010, train_acc = 1.000 (3.289 sec/step)
step 50510 	 loss = 0.003, train_acc = 1.000 (3.308 sec/step)
step 50520 	 loss = 0.002, train_acc = 1.000 (3.343 sec/step)
step 50530 	 loss = 0.008, train_acc = 1.000 (3.345 sec/step)
step 50540 	 loss = 0.123, train_acc = 0.900 (3.309 sec/step)
step 50550 	 loss = 0.516, train_acc = 0.900 (3.363 sec/step)
step 50560 	 loss = 0.060, train_acc = 1.000 (3.321 sec/step)
step 50570 	 loss = 0.004, train_acc = 1.000 (3.347 sec/step)
step 50580 	 loss = 0.022, train_acc = 1.000 (3.341 sec/step)
step 50590 	 loss = 0.088, train_acc = 1.000 (3.280 sec/step)
step 50600 	 loss = 0.187, train_acc = 0.900 (3.395 sec/step)
step 50610 	 loss = 0.096, train_acc = 0.900 (3.340 sec/step)
step 50620 	 loss = 0.425, train_acc = 0.900 (3.328 sec/step)
step 50630 	 loss = 0.010, train_acc = 1.000 (3.310 sec/step)
step 50640 	 loss = 0.017, train_acc = 1.000 (3.385 sec/step)
step 50650 	 loss = 0.180, train_acc = 0.900 (3.331 sec/step)
step 50660 	 loss = 0.113, train_acc = 0.900 (3.333 sec/step)
step 50670 	 loss = 0.175, train_acc = 0.900 (3.322 sec/step)
step 50680 	 loss = 0.000, train_acc = 1.000 (3.356 sec/step)
step 50690 	 loss = 0.006, train_acc = 1.000 (3.309 sec/step)
step 50700 	 loss = 0.629, train_acc = 0.900 (3.330 sec/step)
step 50710 	 loss = 0.228, train_acc = 0.900 (3.342 sec/step)
step 50720 	 loss = 0.022, train_acc = 1.000 (3.363 sec/step)
step 50730 	 loss = 0.150, train_acc = 0.900 (3.327 sec/step)
step 50740 	 loss = 0.334, train_acc = 0.900 (3.301 sec/step)
step 50750 	 loss = 0.705, train_acc = 0.900 (3.302 sec/step)
step 50760 	 loss = 0.803, train_acc = 0.700 (3.404 sec/step)
step 50770 	 loss = 0.000, train_acc = 1.000 (3.287 sec/step)
step 50780 	 loss = 0.774, train_acc = 0.800 (3.346 sec/step)
step 50790 	 loss = 0.151, train_acc = 0.900 (3.305 sec/step)
step 50800 	 loss = 0.077, train_acc = 1.000 (3.355 sec/step)
step 50810 	 loss = 0.048, train_acc = 1.000 (3.286 sec/step)
step 50820 	 loss = 0.066, train_acc = 1.000 (3.326 sec/step)
step 50830 	 loss = 0.041, train_acc = 1.000 (3.324 sec/step)
step 50840 	 loss = 0.138, train_acc = 0.900 (3.303 sec/step)
step 50850 	 loss = 0.010, train_acc = 1.000 (3.344 sec/step)
step 50860 	 loss = 0.008, train_acc = 1.000 (3.306 sec/step)
step 50870 	 loss = 0.022, train_acc = 1.000 (3.315 sec/step)
step 50880 	 loss = 0.950, train_acc = 0.700 (3.329 sec/step)
step 50890 	 loss = 0.014, train_acc = 1.000 (3.359 sec/step)
step 50900 	 loss = 0.845, train_acc = 0.700 (3.348 sec/step)
step 50910 	 loss = 0.596, train_acc = 0.900 (3.319 sec/step)
step 50920 	 loss = 0.114, train_acc = 1.000 (3.318 sec/step)
step 50930 	 loss = 0.071, train_acc = 1.000 (3.317 sec/step)
step 50940 	 loss = 0.047, train_acc = 1.000 (3.378 sec/step)
step 50950 	 loss = 1.509, train_acc = 0.800 (3.362 sec/step)
step 50960 	 loss = 1.076, train_acc = 0.900 (3.345 sec/step)
step 50970 	 loss = 0.082, train_acc = 1.000 (3.388 sec/step)
step 50980 	 loss = 0.002, train_acc = 1.000 (3.331 sec/step)
step 50990 	 loss = 0.532, train_acc = 0.900 (3.341 sec/step)
step 51000 	 loss = 0.043, train_acc = 1.000 (3.374 sec/step)
step 51010 	 loss = 0.367, train_acc = 0.800 (3.359 sec/step)
step 51020 	 loss = 0.002, train_acc = 1.000 (3.286 sec/step)
step 51030 	 loss = 0.075, train_acc = 1.000 (3.349 sec/step)
step 51040 	 loss = 0.076, train_acc = 1.000 (3.374 sec/step)
step 51050 	 loss = 0.073, train_acc = 1.000 (3.272 sec/step)
step 51060 	 loss = 0.353, train_acc = 0.900 (3.306 sec/step)
step 51070 	 loss = 0.004, train_acc = 1.000 (3.324 sec/step)
step 51080 	 loss = 0.442, train_acc = 0.900 (3.308 sec/step)
step 51090 	 loss = 0.556, train_acc = 0.900 (3.311 sec/step)
step 51100 	 loss = 0.064, train_acc = 1.000 (3.356 sec/step)
step 51110 	 loss = 0.086, train_acc = 1.000 (3.287 sec/step)
step 51120 	 loss = 0.232, train_acc = 0.900 (3.364 sec/step)
step 51130 	 loss = 0.569, train_acc = 0.900 (3.324 sec/step)
step 51140 	 loss = 0.052, train_acc = 1.000 (3.381 sec/step)
step 51150 	 loss = 0.001, train_acc = 1.000 (3.321 sec/step)
step 51160 	 loss = 0.101, train_acc = 0.900 (3.337 sec/step)
step 51170 	 loss = 0.000, train_acc = 1.000 (3.378 sec/step)
step 51180 	 loss = 0.529, train_acc = 0.800 (3.342 sec/step)
step 51190 	 loss = 0.187, train_acc = 0.900 (3.303 sec/step)
step 51200 	 loss = 0.365, train_acc = 0.900 (3.442 sec/step)
step 51210 	 loss = 0.002, train_acc = 1.000 (3.288 sec/step)
step 51220 	 loss = 0.350, train_acc = 0.900 (3.336 sec/step)
step 51230 	 loss = 0.071, train_acc = 1.000 (3.332 sec/step)
step 51240 	 loss = 0.071, train_acc = 1.000 (3.376 sec/step)
step 51250 	 loss = 0.004, train_acc = 1.000 (3.319 sec/step)
step 51260 	 loss = 0.814, train_acc = 0.800 (3.363 sec/step)
step 51270 	 loss = 0.383, train_acc = 0.900 (3.312 sec/step)
step 51280 	 loss = 0.007, train_acc = 1.000 (3.356 sec/step)
step 51290 	 loss = 0.002, train_acc = 1.000 (3.380 sec/step)
VALIDATION 	 acc = 0.560 (3.644 sec)
step 51300 	 loss = 0.397, train_acc = 0.900 (3.319 sec/step)
step 51310 	 loss = 0.023, train_acc = 1.000 (3.357 sec/step)
step 51320 	 loss = 0.460, train_acc = 0.900 (3.361 sec/step)
step 51330 	 loss = 0.078, train_acc = 1.000 (3.324 sec/step)
step 51340 	 loss = 0.101, train_acc = 1.000 (3.280 sec/step)
step 51350 	 loss = 0.804, train_acc = 0.800 (3.310 sec/step)
step 51360 	 loss = 0.124, train_acc = 0.900 (3.324 sec/step)
step 51370 	 loss = 0.009, train_acc = 1.000 (3.348 sec/step)
step 51380 	 loss = 0.047, train_acc = 1.000 (3.303 sec/step)
step 51390 	 loss = 0.085, train_acc = 0.900 (3.354 sec/step)
step 51400 	 loss = 0.063, train_acc = 1.000 (3.277 sec/step)
step 51410 	 loss = 1.496, train_acc = 0.900 (3.314 sec/step)
step 51420 	 loss = 0.084, train_acc = 1.000 (3.372 sec/step)
step 51430 	 loss = 0.003, train_acc = 1.000 (3.466 sec/step)
step 51440 	 loss = 0.050, train_acc = 1.000 (3.349 sec/step)
step 51450 	 loss = 2.182, train_acc = 0.700 (3.396 sec/step)
step 51460 	 loss = 0.036, train_acc = 1.000 (3.346 sec/step)
step 51470 	 loss = 0.007, train_acc = 1.000 (3.334 sec/step)
step 51480 	 loss = 0.000, train_acc = 1.000 (3.356 sec/step)
step 51490 	 loss = 0.001, train_acc = 1.000 (3.304 sec/step)
step 51500 	 loss = 0.513, train_acc = 0.900 (3.357 sec/step)
step 51510 	 loss = 0.074, train_acc = 1.000 (3.330 sec/step)
step 51520 	 loss = 0.293, train_acc = 0.900 (3.356 sec/step)
step 51530 	 loss = 0.256, train_acc = 0.900 (3.278 sec/step)
step 51540 	 loss = 0.140, train_acc = 1.000 (3.338 sec/step)
step 51550 	 loss = 0.093, train_acc = 1.000 (3.279 sec/step)
step 51560 	 loss = 0.063, train_acc = 1.000 (3.350 sec/step)
step 51570 	 loss = 0.004, train_acc = 1.000 (3.296 sec/step)
step 51580 	 loss = 1.831, train_acc = 0.800 (3.345 sec/step)
step 51590 	 loss = 0.106, train_acc = 1.000 (3.323 sec/step)
step 51600 	 loss = 0.242, train_acc = 1.000 (3.300 sec/step)
step 51610 	 loss = 0.220, train_acc = 0.900 (3.337 sec/step)
step 51620 	 loss = 0.000, train_acc = 1.000 (3.285 sec/step)
step 51630 	 loss = 0.003, train_acc = 1.000 (3.313 sec/step)
step 51640 	 loss = 0.003, train_acc = 1.000 (3.282 sec/step)
step 51650 	 loss = 0.045, train_acc = 1.000 (3.360 sec/step)
step 51660 	 loss = 0.083, train_acc = 1.000 (3.322 sec/step)
step 51670 	 loss = 0.027, train_acc = 1.000 (3.316 sec/step)
step 51680 	 loss = 0.210, train_acc = 0.900 (3.338 sec/step)
step 51690 	 loss = 0.608, train_acc = 0.800 (3.362 sec/step)
step 51700 	 loss = 0.283, train_acc = 0.900 (3.315 sec/step)
step 51710 	 loss = 0.248, train_acc = 0.900 (3.300 sec/step)
step 51720 	 loss = 0.553, train_acc = 0.800 (3.369 sec/step)
step 51730 	 loss = 0.154, train_acc = 0.900 (3.279 sec/step)
step 51740 	 loss = 0.515, train_acc = 0.900 (3.409 sec/step)
step 51750 	 loss = 0.029, train_acc = 1.000 (3.384 sec/step)
step 51760 	 loss = 2.004, train_acc = 0.800 (3.318 sec/step)
step 51770 	 loss = 0.012, train_acc = 1.000 (3.377 sec/step)
step 51780 	 loss = 0.191, train_acc = 0.900 (3.301 sec/step)
step 51790 	 loss = 0.176, train_acc = 0.900 (3.321 sec/step)
step 51800 	 loss = 0.036, train_acc = 1.000 (3.327 sec/step)
step 51810 	 loss = 0.369, train_acc = 0.800 (3.333 sec/step)
step 51820 	 loss = 0.166, train_acc = 0.900 (3.358 sec/step)
step 51830 	 loss = 0.018, train_acc = 1.000 (3.294 sec/step)
step 51840 	 loss = 0.193, train_acc = 0.900 (3.379 sec/step)
step 51850 	 loss = 0.019, train_acc = 1.000 (3.322 sec/step)
step 51860 	 loss = 0.167, train_acc = 0.900 (3.347 sec/step)
step 51870 	 loss = 0.263, train_acc = 0.900 (3.368 sec/step)
step 51880 	 loss = 0.139, train_acc = 1.000 (3.365 sec/step)
step 51890 	 loss = 0.406, train_acc = 0.900 (3.331 sec/step)
step 51900 	 loss = 0.662, train_acc = 0.900 (3.283 sec/step)
step 51910 	 loss = 0.020, train_acc = 1.000 (3.348 sec/step)
step 51920 	 loss = 0.473, train_acc = 0.800 (3.310 sec/step)
step 51930 	 loss = 0.074, train_acc = 1.000 (3.435 sec/step)
step 51940 	 loss = 0.177, train_acc = 1.000 (3.309 sec/step)
step 51950 	 loss = 0.032, train_acc = 1.000 (3.418 sec/step)
step 51960 	 loss = 0.715, train_acc = 0.800 (3.313 sec/step)
step 51970 	 loss = 0.051, train_acc = 1.000 (3.342 sec/step)
step 51980 	 loss = 0.089, train_acc = 1.000 (3.367 sec/step)
step 51990 	 loss = 0.822, train_acc = 0.900 (3.362 sec/step)
step 52000 	 loss = 0.722, train_acc = 0.900 (3.347 sec/step)
step 52010 	 loss = 0.225, train_acc = 0.900 (3.328 sec/step)
step 52020 	 loss = 0.043, train_acc = 1.000 (3.328 sec/step)
step 52030 	 loss = 0.588, train_acc = 0.800 (3.344 sec/step)
step 52040 	 loss = 0.026, train_acc = 1.000 (3.315 sec/step)
step 52050 	 loss = 0.042, train_acc = 1.000 (3.314 sec/step)
step 52060 	 loss = 0.207, train_acc = 0.900 (3.314 sec/step)
step 52070 	 loss = 0.386, train_acc = 0.900 (3.266 sec/step)
step 52080 	 loss = 0.273, train_acc = 0.900 (3.307 sec/step)
step 52090 	 loss = 0.209, train_acc = 0.900 (3.357 sec/step)
step 52100 	 loss = 0.114, train_acc = 0.900 (3.434 sec/step)
step 52110 	 loss = 0.053, train_acc = 1.000 (3.291 sec/step)
step 52120 	 loss = 0.013, train_acc = 1.000 (3.314 sec/step)
step 52130 	 loss = 0.537, train_acc = 0.800 (3.349 sec/step)
step 52140 	 loss = 0.007, train_acc = 1.000 (3.323 sec/step)
step 52150 	 loss = 0.001, train_acc = 1.000 (3.358 sec/step)
step 52160 	 loss = 0.070, train_acc = 1.000 (3.359 sec/step)
step 52170 	 loss = 0.326, train_acc = 0.900 (3.296 sec/step)
step 52180 	 loss = 0.027, train_acc = 1.000 (3.356 sec/step)
step 52190 	 loss = 0.002, train_acc = 1.000 (3.347 sec/step)
step 52200 	 loss = 0.043, train_acc = 1.000 (3.344 sec/step)
step 52210 	 loss = 0.600, train_acc = 0.800 (3.320 sec/step)
step 52220 	 loss = 0.031, train_acc = 1.000 (3.289 sec/step)
step 52230 	 loss = 0.001, train_acc = 1.000 (3.314 sec/step)
step 52240 	 loss = 0.307, train_acc = 0.900 (3.304 sec/step)
step 52250 	 loss = 0.629, train_acc = 0.900 (3.339 sec/step)
step 52260 	 loss = 0.008, train_acc = 1.000 (3.328 sec/step)
step 52270 	 loss = 0.007, train_acc = 1.000 (3.303 sec/step)
step 52280 	 loss = 0.024, train_acc = 1.000 (3.336 sec/step)
step 52290 	 loss = 0.656, train_acc = 0.700 (3.293 sec/step)
step 52300 	 loss = 0.013, train_acc = 1.000 (3.356 sec/step)
step 52310 	 loss = 0.080, train_acc = 1.000 (3.332 sec/step)
step 52320 	 loss = 0.187, train_acc = 0.900 (3.327 sec/step)
step 52330 	 loss = 0.012, train_acc = 1.000 (3.293 sec/step)
step 52340 	 loss = 0.044, train_acc = 1.000 (3.364 sec/step)
step 52350 	 loss = 0.811, train_acc = 0.700 (3.340 sec/step)
step 52360 	 loss = 0.105, train_acc = 1.000 (3.340 sec/step)
step 52370 	 loss = 0.133, train_acc = 0.900 (3.336 sec/step)
step 52380 	 loss = 0.084, train_acc = 1.000 (3.334 sec/step)
step 52390 	 loss = 0.338, train_acc = 0.900 (3.309 sec/step)
step 52400 	 loss = 0.784, train_acc = 0.900 (3.310 sec/step)
step 52410 	 loss = 0.026, train_acc = 1.000 (3.308 sec/step)
step 52420 	 loss = 0.792, train_acc = 0.900 (3.330 sec/step)
step 52430 	 loss = 0.368, train_acc = 0.900 (3.370 sec/step)
step 52440 	 loss = 2.462, train_acc = 0.700 (3.347 sec/step)
step 52450 	 loss = 0.204, train_acc = 0.900 (3.339 sec/step)
step 52460 	 loss = 0.327, train_acc = 0.900 (3.342 sec/step)
step 52470 	 loss = 0.005, train_acc = 1.000 (3.307 sec/step)
step 52480 	 loss = 0.092, train_acc = 0.900 (3.340 sec/step)
step 52490 	 loss = 0.018, train_acc = 1.000 (3.311 sec/step)
step 52500 	 loss = 0.037, train_acc = 1.000 (3.367 sec/step)
step 52510 	 loss = 0.100, train_acc = 1.000 (3.347 sec/step)
step 52520 	 loss = 0.336, train_acc = 0.800 (3.338 sec/step)
step 52530 	 loss = 0.620, train_acc = 0.900 (3.287 sec/step)
step 52540 	 loss = 0.003, train_acc = 1.000 (3.300 sec/step)
step 52550 	 loss = 0.127, train_acc = 0.900 (3.327 sec/step)
step 52560 	 loss = 0.025, train_acc = 1.000 (3.278 sec/step)
step 52570 	 loss = 1.291, train_acc = 0.700 (3.323 sec/step)
step 52580 	 loss = 0.003, train_acc = 1.000 (3.302 sec/step)
step 52590 	 loss = 0.006, train_acc = 1.000 (3.303 sec/step)
step 52600 	 loss = 0.156, train_acc = 0.900 (3.340 sec/step)
step 52610 	 loss = 0.230, train_acc = 0.800 (3.294 sec/step)
step 52620 	 loss = 0.007, train_acc = 1.000 (3.327 sec/step)
step 52630 	 loss = 0.048, train_acc = 1.000 (3.325 sec/step)
step 52640 	 loss = 0.695, train_acc = 0.800 (3.346 sec/step)
step 52650 	 loss = 0.550, train_acc = 0.900 (3.325 sec/step)
step 52660 	 loss = 0.257, train_acc = 0.800 (3.307 sec/step)
step 52670 	 loss = 0.389, train_acc = 0.800 (3.330 sec/step)
step 52680 	 loss = 0.080, train_acc = 0.900 (3.365 sec/step)
step 52690 	 loss = 0.399, train_acc = 0.800 (3.305 sec/step)
step 52700 	 loss = 0.007, train_acc = 1.000 (3.310 sec/step)
step 52710 	 loss = 0.115, train_acc = 0.900 (3.342 sec/step)
step 52720 	 loss = 0.436, train_acc = 0.800 (3.284 sec/step)
step 52730 	 loss = 0.606, train_acc = 0.800 (3.410 sec/step)
step 52740 	 loss = 0.248, train_acc = 0.900 (3.317 sec/step)
step 52750 	 loss = 0.028, train_acc = 1.000 (3.306 sec/step)
step 52760 	 loss = 0.004, train_acc = 1.000 (3.300 sec/step)
step 52770 	 loss = 0.008, train_acc = 1.000 (3.330 sec/step)
step 52780 	 loss = 0.919, train_acc = 0.900 (3.322 sec/step)
step 52790 	 loss = 0.152, train_acc = 0.900 (3.316 sec/step)
step 52800 	 loss = 0.011, train_acc = 1.000 (3.372 sec/step)
step 52810 	 loss = 0.009, train_acc = 1.000 (3.281 sec/step)
step 52820 	 loss = 0.434, train_acc = 0.900 (3.326 sec/step)
step 52830 	 loss = 0.416, train_acc = 0.900 (3.342 sec/step)
step 52840 	 loss = 0.026, train_acc = 1.000 (3.292 sec/step)
step 52850 	 loss = 0.283, train_acc = 0.900 (3.321 sec/step)
step 52860 	 loss = 0.120, train_acc = 0.900 (3.358 sec/step)
step 52870 	 loss = 0.026, train_acc = 1.000 (3.407 sec/step)
step 52880 	 loss = 1.019, train_acc = 0.900 (3.456 sec/step)
step 52890 	 loss = 0.009, train_acc = 1.000 (3.373 sec/step)
step 52900 	 loss = 0.823, train_acc = 0.700 (3.311 sec/step)
step 52910 	 loss = 0.244, train_acc = 0.800 (3.316 sec/step)
step 52920 	 loss = 0.094, train_acc = 0.900 (3.345 sec/step)
step 52930 	 loss = 0.301, train_acc = 0.800 (3.321 sec/step)
step 52940 	 loss = 0.115, train_acc = 1.000 (3.345 sec/step)
step 52950 	 loss = 0.001, train_acc = 1.000 (3.361 sec/step)
step 52960 	 loss = 0.020, train_acc = 1.000 (3.305 sec/step)
step 52970 	 loss = 0.132, train_acc = 0.900 (3.289 sec/step)
step 52980 	 loss = 0.179, train_acc = 0.900 (3.277 sec/step)
step 52990 	 loss = 0.043, train_acc = 1.000 (3.353 sec/step)
step 53000 	 loss = 0.052, train_acc = 1.000 (3.371 sec/step)
step 53010 	 loss = 0.355, train_acc = 0.900 (3.335 sec/step)
step 53020 	 loss = 0.041, train_acc = 1.000 (3.330 sec/step)
step 53030 	 loss = 0.080, train_acc = 1.000 (3.343 sec/step)
step 53040 	 loss = 0.113, train_acc = 0.900 (3.350 sec/step)
step 53050 	 loss = 0.009, train_acc = 1.000 (3.334 sec/step)
step 53060 	 loss = 0.011, train_acc = 1.000 (3.331 sec/step)
step 53070 	 loss = 0.002, train_acc = 1.000 (3.360 sec/step)
step 53080 	 loss = 0.229, train_acc = 0.900 (3.382 sec/step)
step 53090 	 loss = 0.233, train_acc = 0.900 (3.281 sec/step)
step 53100 	 loss = 0.568, train_acc = 0.800 (3.314 sec/step)
step 53110 	 loss = 0.311, train_acc = 0.800 (3.399 sec/step)
step 53120 	 loss = 0.008, train_acc = 1.000 (3.364 sec/step)
step 53130 	 loss = 0.043, train_acc = 1.000 (3.323 sec/step)
step 53140 	 loss = 0.476, train_acc = 0.700 (3.349 sec/step)
step 53150 	 loss = 0.032, train_acc = 1.000 (3.304 sec/step)
step 53160 	 loss = 0.002, train_acc = 1.000 (3.340 sec/step)
step 53170 	 loss = 0.022, train_acc = 1.000 (3.351 sec/step)
step 53180 	 loss = 0.356, train_acc = 0.900 (3.278 sec/step)
step 53190 	 loss = 0.003, train_acc = 1.000 (3.362 sec/step)
VALIDATION 	 acc = 0.530 (3.617 sec)
step 53200 	 loss = 0.003, train_acc = 1.000 (3.308 sec/step)
step 53210 	 loss = 0.115, train_acc = 1.000 (3.348 sec/step)
step 53220 	 loss = 0.009, train_acc = 1.000 (3.352 sec/step)
step 53230 	 loss = 1.210, train_acc = 0.700 (3.268 sec/step)
step 53240 	 loss = 0.013, train_acc = 1.000 (3.315 sec/step)
step 53250 	 loss = 0.078, train_acc = 1.000 (3.311 sec/step)
step 53260 	 loss = 0.527, train_acc = 0.800 (3.353 sec/step)
step 53270 	 loss = 0.032, train_acc = 1.000 (3.314 sec/step)
step 53280 	 loss = 0.019, train_acc = 1.000 (3.428 sec/step)
step 53290 	 loss = 0.110, train_acc = 0.900 (3.319 sec/step)
step 53300 	 loss = 0.043, train_acc = 1.000 (3.317 sec/step)
step 53310 	 loss = 0.042, train_acc = 1.000 (3.306 sec/step)
step 53320 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 53330 	 loss = 0.001, train_acc = 1.000 (3.412 sec/step)
step 53340 	 loss = 0.379, train_acc = 0.900 (3.301 sec/step)
step 53350 	 loss = 0.034, train_acc = 1.000 (3.292 sec/step)
step 53360 	 loss = 0.523, train_acc = 0.900 (3.286 sec/step)
step 53370 	 loss = 0.044, train_acc = 1.000 (3.330 sec/step)
step 53380 	 loss = 0.008, train_acc = 1.000 (3.360 sec/step)
step 53390 	 loss = 0.000, train_acc = 1.000 (3.316 sec/step)
step 53400 	 loss = 0.047, train_acc = 1.000 (3.351 sec/step)
step 53410 	 loss = 0.226, train_acc = 0.900 (3.323 sec/step)
step 53420 	 loss = 0.036, train_acc = 1.000 (3.317 sec/step)
step 53430 	 loss = 0.221, train_acc = 0.900 (3.323 sec/step)
step 53440 	 loss = 0.026, train_acc = 1.000 (3.369 sec/step)
step 53450 	 loss = 0.412, train_acc = 0.900 (3.366 sec/step)
step 53460 	 loss = 0.387, train_acc = 0.900 (3.294 sec/step)
step 53470 	 loss = 0.069, train_acc = 1.000 (3.335 sec/step)
step 53480 	 loss = 0.018, train_acc = 1.000 (3.419 sec/step)
step 53490 	 loss = 0.049, train_acc = 1.000 (3.349 sec/step)
step 53500 	 loss = 0.010, train_acc = 1.000 (3.375 sec/step)
step 53510 	 loss = 0.311, train_acc = 0.900 (3.400 sec/step)
step 53520 	 loss = 0.190, train_acc = 0.900 (3.353 sec/step)
step 53530 	 loss = 0.586, train_acc = 0.800 (3.405 sec/step)
step 53540 	 loss = 0.870, train_acc = 0.900 (3.346 sec/step)
step 53550 	 loss = 0.322, train_acc = 0.800 (3.374 sec/step)
step 53560 	 loss = 0.202, train_acc = 0.900 (3.331 sec/step)
step 53570 	 loss = 0.027, train_acc = 1.000 (3.290 sec/step)
step 53580 	 loss = 0.002, train_acc = 1.000 (3.290 sec/step)
step 53590 	 loss = 0.081, train_acc = 0.900 (3.319 sec/step)
step 53600 	 loss = 0.126, train_acc = 0.900 (3.333 sec/step)
step 53610 	 loss = 0.017, train_acc = 1.000 (3.297 sec/step)
step 53620 	 loss = 0.169, train_acc = 0.900 (3.331 sec/step)
step 53630 	 loss = 0.017, train_acc = 1.000 (3.327 sec/step)
step 53640 	 loss = 1.014, train_acc = 0.900 (3.331 sec/step)
step 53650 	 loss = 0.124, train_acc = 0.900 (3.317 sec/step)
step 53660 	 loss = 0.021, train_acc = 1.000 (3.345 sec/step)
step 53670 	 loss = 0.000, train_acc = 1.000 (3.351 sec/step)
step 53680 	 loss = 1.055, train_acc = 0.800 (3.346 sec/step)
step 53690 	 loss = 0.320, train_acc = 0.900 (3.339 sec/step)
step 53700 	 loss = 0.922, train_acc = 0.900 (3.287 sec/step)
step 53710 	 loss = 0.491, train_acc = 0.900 (3.325 sec/step)
step 53720 	 loss = 0.220, train_acc = 0.900 (3.322 sec/step)
step 53730 	 loss = 0.717, train_acc = 0.700 (3.352 sec/step)
step 53740 	 loss = 0.067, train_acc = 1.000 (3.299 sec/step)
step 53750 	 loss = 0.000, train_acc = 1.000 (3.319 sec/step)
step 53760 	 loss = 0.295, train_acc = 0.900 (3.402 sec/step)
step 53770 	 loss = 0.273, train_acc = 0.900 (3.283 sec/step)
step 53780 	 loss = 0.044, train_acc = 1.000 (3.337 sec/step)
step 53790 	 loss = 0.002, train_acc = 1.000 (3.426 sec/step)
step 53800 	 loss = 0.464, train_acc = 0.900 (3.293 sec/step)
step 53810 	 loss = 0.136, train_acc = 1.000 (3.287 sec/step)
step 53820 	 loss = 0.039, train_acc = 1.000 (3.371 sec/step)
step 53830 	 loss = 0.008, train_acc = 1.000 (3.429 sec/step)
step 53840 	 loss = 0.174, train_acc = 0.900 (3.346 sec/step)
step 53850 	 loss = 0.215, train_acc = 1.000 (3.333 sec/step)
step 53860 	 loss = 0.100, train_acc = 1.000 (3.295 sec/step)
step 53870 	 loss = 0.008, train_acc = 1.000 (3.363 sec/step)
step 53880 	 loss = 0.275, train_acc = 0.900 (3.374 sec/step)
step 53890 	 loss = 0.331, train_acc = 0.900 (3.364 sec/step)
step 53900 	 loss = 0.200, train_acc = 0.900 (3.298 sec/step)
step 53910 	 loss = 0.121, train_acc = 0.900 (3.320 sec/step)
step 53920 	 loss = 0.171, train_acc = 0.900 (3.345 sec/step)
step 53930 	 loss = 1.172, train_acc = 0.600 (3.353 sec/step)
step 53940 	 loss = 0.348, train_acc = 0.900 (3.304 sec/step)
step 53950 	 loss = 0.013, train_acc = 1.000 (3.373 sec/step)
step 53960 	 loss = 0.003, train_acc = 1.000 (3.363 sec/step)
step 53970 	 loss = 0.052, train_acc = 1.000 (3.322 sec/step)
step 53980 	 loss = 0.030, train_acc = 1.000 (3.325 sec/step)
step 53990 	 loss = 0.147, train_acc = 0.900 (3.285 sec/step)
step 54000 	 loss = 0.007, train_acc = 1.000 (3.353 sec/step)
step 54010 	 loss = 0.718, train_acc = 0.600 (3.314 sec/step)
step 54020 	 loss = 0.023, train_acc = 1.000 (3.320 sec/step)
step 54030 	 loss = 0.001, train_acc = 1.000 (3.325 sec/step)
step 54040 	 loss = 0.053, train_acc = 1.000 (3.297 sec/step)
step 54050 	 loss = 0.138, train_acc = 1.000 (3.324 sec/step)
step 54060 	 loss = 0.053, train_acc = 1.000 (3.333 sec/step)
step 54070 	 loss = 0.073, train_acc = 0.900 (3.275 sec/step)
step 54080 	 loss = 0.322, train_acc = 0.900 (3.335 sec/step)
step 54090 	 loss = 0.000, train_acc = 1.000 (3.319 sec/step)
step 54100 	 loss = 0.043, train_acc = 1.000 (3.327 sec/step)
step 54110 	 loss = 0.003, train_acc = 1.000 (3.374 sec/step)
step 54120 	 loss = 0.293, train_acc = 0.900 (3.292 sec/step)
step 54130 	 loss = 0.244, train_acc = 0.900 (3.276 sec/step)
step 54140 	 loss = 0.098, train_acc = 0.900 (3.379 sec/step)
step 54150 	 loss = 0.002, train_acc = 1.000 (3.296 sec/step)
step 54160 	 loss = 0.097, train_acc = 1.000 (3.346 sec/step)
step 54170 	 loss = 0.101, train_acc = 0.900 (3.353 sec/step)
step 54180 	 loss = 0.342, train_acc = 0.900 (3.309 sec/step)
step 54190 	 loss = 0.022, train_acc = 1.000 (3.305 sec/step)
step 54200 	 loss = 0.001, train_acc = 1.000 (3.317 sec/step)
step 54210 	 loss = 0.059, train_acc = 1.000 (3.329 sec/step)
step 54220 	 loss = 0.680, train_acc = 0.900 (3.317 sec/step)
step 54230 	 loss = 0.156, train_acc = 0.900 (3.351 sec/step)
step 54240 	 loss = 0.068, train_acc = 1.000 (3.289 sec/step)
step 54250 	 loss = 0.027, train_acc = 1.000 (3.297 sec/step)
step 54260 	 loss = 0.002, train_acc = 1.000 (3.359 sec/step)
step 54270 	 loss = 0.000, train_acc = 1.000 (3.479 sec/step)
step 54280 	 loss = 0.050, train_acc = 1.000 (3.345 sec/step)
step 54290 	 loss = 0.002, train_acc = 1.000 (3.299 sec/step)
step 54300 	 loss = 0.337, train_acc = 0.800 (3.360 sec/step)
step 54310 	 loss = 0.231, train_acc = 0.900 (3.324 sec/step)
step 54320 	 loss = 0.251, train_acc = 0.900 (3.340 sec/step)
step 54330 	 loss = 0.256, train_acc = 0.900 (3.438 sec/step)
step 54340 	 loss = 0.073, train_acc = 1.000 (3.295 sec/step)
step 54350 	 loss = 0.005, train_acc = 1.000 (3.332 sec/step)
step 54360 	 loss = 0.008, train_acc = 1.000 (3.317 sec/step)
step 54370 	 loss = 0.389, train_acc = 0.900 (3.332 sec/step)
step 54380 	 loss = 0.029, train_acc = 1.000 (3.431 sec/step)
step 54390 	 loss = 0.164, train_acc = 0.900 (3.368 sec/step)
step 54400 	 loss = 0.370, train_acc = 0.700 (3.340 sec/step)
step 54410 	 loss = 0.066, train_acc = 1.000 (3.358 sec/step)
step 54420 	 loss = 0.789, train_acc = 0.800 (3.342 sec/step)
step 54430 	 loss = 0.147, train_acc = 0.900 (3.387 sec/step)
step 54440 	 loss = 0.000, train_acc = 1.000 (3.328 sec/step)
step 54450 	 loss = 0.044, train_acc = 1.000 (3.333 sec/step)
step 54460 	 loss = 0.049, train_acc = 1.000 (3.374 sec/step)
step 54470 	 loss = 0.015, train_acc = 1.000 (3.312 sec/step)
step 54480 	 loss = 0.002, train_acc = 1.000 (3.345 sec/step)
step 54490 	 loss = 1.987, train_acc = 0.900 (3.429 sec/step)
step 54500 	 loss = 0.024, train_acc = 1.000 (3.365 sec/step)
step 54510 	 loss = 0.037, train_acc = 1.000 (3.384 sec/step)
step 54520 	 loss = 0.483, train_acc = 0.800 (3.294 sec/step)
step 54530 	 loss = 0.002, train_acc = 1.000 (3.360 sec/step)
step 54540 	 loss = 0.096, train_acc = 0.900 (3.312 sec/step)
step 54550 	 loss = 0.090, train_acc = 1.000 (3.293 sec/step)
step 54560 	 loss = 0.509, train_acc = 0.800 (3.328 sec/step)
step 54570 	 loss = 0.110, train_acc = 0.900 (3.357 sec/step)
step 54580 	 loss = 0.174, train_acc = 1.000 (3.316 sec/step)
step 54590 	 loss = 0.018, train_acc = 1.000 (3.348 sec/step)
step 54600 	 loss = 0.098, train_acc = 0.900 (3.317 sec/step)
step 54610 	 loss = 0.484, train_acc = 0.900 (3.345 sec/step)
step 54620 	 loss = 0.014, train_acc = 1.000 (3.328 sec/step)
step 54630 	 loss = 0.064, train_acc = 1.000 (3.312 sec/step)
step 54640 	 loss = 0.291, train_acc = 0.800 (3.313 sec/step)
step 54650 	 loss = 0.321, train_acc = 0.800 (3.286 sec/step)
step 54660 	 loss = 0.026, train_acc = 1.000 (3.396 sec/step)
step 54670 	 loss = 0.034, train_acc = 1.000 (3.382 sec/step)
step 54680 	 loss = 0.798, train_acc = 0.900 (3.312 sec/step)
step 54690 	 loss = 0.282, train_acc = 0.900 (3.310 sec/step)
step 54700 	 loss = 0.001, train_acc = 1.000 (3.366 sec/step)
step 54710 	 loss = 0.001, train_acc = 1.000 (3.310 sec/step)
step 54720 	 loss = 0.399, train_acc = 0.900 (3.292 sec/step)
step 54730 	 loss = 0.286, train_acc = 0.900 (3.334 sec/step)
step 54740 	 loss = 0.006, train_acc = 1.000 (3.315 sec/step)
step 54750 	 loss = 0.580, train_acc = 0.800 (3.368 sec/step)
step 54760 	 loss = 0.006, train_acc = 1.000 (3.351 sec/step)
step 54770 	 loss = 0.467, train_acc = 0.900 (3.285 sec/step)
step 54780 	 loss = 0.002, train_acc = 1.000 (3.303 sec/step)
step 54790 	 loss = 0.004, train_acc = 1.000 (3.340 sec/step)
step 54800 	 loss = 0.017, train_acc = 1.000 (3.327 sec/step)
step 54810 	 loss = 0.570, train_acc = 0.900 (3.350 sec/step)
step 54820 	 loss = 0.078, train_acc = 1.000 (3.317 sec/step)
step 54830 	 loss = 0.751, train_acc = 0.800 (3.313 sec/step)
step 54840 	 loss = 0.623, train_acc = 0.800 (3.335 sec/step)
step 54850 	 loss = 0.032, train_acc = 1.000 (3.362 sec/step)
step 54860 	 loss = 0.481, train_acc = 0.800 (3.345 sec/step)
step 54870 	 loss = 0.717, train_acc = 0.800 (3.378 sec/step)
step 54880 	 loss = 0.003, train_acc = 1.000 (3.378 sec/step)
step 54890 	 loss = 0.904, train_acc = 0.900 (3.394 sec/step)
step 54900 	 loss = 0.317, train_acc = 0.900 (3.316 sec/step)
step 54910 	 loss = 0.320, train_acc = 0.800 (3.333 sec/step)
step 54920 	 loss = 0.853, train_acc = 0.800 (3.318 sec/step)
step 54930 	 loss = 0.063, train_acc = 1.000 (3.380 sec/step)
step 54940 	 loss = 0.001, train_acc = 1.000 (3.352 sec/step)
step 54950 	 loss = 0.413, train_acc = 0.900 (3.351 sec/step)
step 54960 	 loss = 0.003, train_acc = 1.000 (3.308 sec/step)
step 54970 	 loss = 0.000, train_acc = 1.000 (3.295 sec/step)
step 54980 	 loss = 0.033, train_acc = 1.000 (3.358 sec/step)
step 54990 	 loss = 0.399, train_acc = 0.900 (3.431 sec/step)
step 55000 	 loss = 0.161, train_acc = 0.900 (3.329 sec/step)
step 55010 	 loss = 0.005, train_acc = 1.000 (3.288 sec/step)
step 55020 	 loss = 0.113, train_acc = 0.900 (3.335 sec/step)
step 55030 	 loss = 0.156, train_acc = 0.900 (3.334 sec/step)
step 55040 	 loss = 0.250, train_acc = 0.800 (3.374 sec/step)
step 55050 	 loss = 0.014, train_acc = 1.000 (3.277 sec/step)
step 55060 	 loss = 0.378, train_acc = 0.900 (3.339 sec/step)
step 55070 	 loss = 0.295, train_acc = 0.900 (3.304 sec/step)
step 55080 	 loss = 0.201, train_acc = 1.000 (3.324 sec/step)
step 55090 	 loss = 0.378, train_acc = 0.900 (3.402 sec/step)
VALIDATION 	 acc = 0.538 (3.616 sec)
step 55100 	 loss = 0.843, train_acc = 0.800 (3.343 sec/step)
step 55110 	 loss = 0.023, train_acc = 1.000 (3.307 sec/step)
step 55120 	 loss = 1.256, train_acc = 0.800 (3.296 sec/step)
step 55130 	 loss = 0.548, train_acc = 0.900 (3.348 sec/step)
step 55140 	 loss = 0.608, train_acc = 0.900 (3.347 sec/step)
step 55150 	 loss = 0.006, train_acc = 1.000 (3.315 sec/step)
step 55160 	 loss = 0.000, train_acc = 1.000 (3.435 sec/step)
step 55170 	 loss = 0.086, train_acc = 0.900 (3.378 sec/step)
step 55180 	 loss = 0.040, train_acc = 1.000 (3.348 sec/step)
step 55190 	 loss = 0.281, train_acc = 0.900 (3.332 sec/step)
step 55200 	 loss = 0.031, train_acc = 1.000 (3.341 sec/step)
step 55210 	 loss = 0.032, train_acc = 1.000 (3.293 sec/step)
step 55220 	 loss = 0.563, train_acc = 0.700 (3.331 sec/step)
step 55230 	 loss = 0.326, train_acc = 0.900 (3.300 sec/step)
step 55240 	 loss = 0.004, train_acc = 1.000 (3.317 sec/step)
step 55250 	 loss = 0.006, train_acc = 1.000 (3.331 sec/step)
step 55260 	 loss = 0.003, train_acc = 1.000 (3.453 sec/step)
step 55270 	 loss = 0.027, train_acc = 1.000 (3.285 sec/step)
step 55280 	 loss = 0.339, train_acc = 0.900 (3.321 sec/step)
step 55290 	 loss = 0.034, train_acc = 1.000 (3.281 sec/step)
step 55300 	 loss = 0.575, train_acc = 0.800 (3.324 sec/step)
step 55310 	 loss = 0.002, train_acc = 1.000 (3.356 sec/step)
step 55320 	 loss = 0.042, train_acc = 1.000 (3.320 sec/step)
step 55330 	 loss = 0.231, train_acc = 0.900 (3.355 sec/step)
step 55340 	 loss = 0.002, train_acc = 1.000 (3.339 sec/step)
step 55350 	 loss = 0.000, train_acc = 1.000 (3.313 sec/step)
step 55360 	 loss = 0.006, train_acc = 1.000 (3.292 sec/step)
step 55370 	 loss = 0.017, train_acc = 1.000 (3.364 sec/step)
step 55380 	 loss = 0.112, train_acc = 1.000 (3.343 sec/step)
step 55390 	 loss = 0.137, train_acc = 1.000 (3.291 sec/step)
step 55400 	 loss = 0.000, train_acc = 1.000 (3.301 sec/step)
step 55410 	 loss = 0.262, train_acc = 0.900 (3.324 sec/step)
step 55420 	 loss = 0.002, train_acc = 1.000 (3.341 sec/step)
step 55430 	 loss = 0.073, train_acc = 1.000 (3.318 sec/step)
step 55440 	 loss = 0.029, train_acc = 1.000 (3.322 sec/step)
step 55450 	 loss = 0.042, train_acc = 1.000 (3.338 sec/step)
step 55460 	 loss = 0.009, train_acc = 1.000 (3.307 sec/step)
step 55470 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 55480 	 loss = 0.001, train_acc = 1.000 (3.350 sec/step)
step 55490 	 loss = 0.001, train_acc = 1.000 (3.344 sec/step)
step 55500 	 loss = 0.455, train_acc = 0.900 (3.321 sec/step)
step 55510 	 loss = 0.406, train_acc = 0.900 (3.307 sec/step)
step 55520 	 loss = 0.117, train_acc = 0.900 (3.347 sec/step)
step 55530 	 loss = 0.262, train_acc = 0.900 (3.381 sec/step)
step 55540 	 loss = 3.158, train_acc = 0.900 (3.348 sec/step)
step 55550 	 loss = 0.019, train_acc = 1.000 (3.362 sec/step)
step 55560 	 loss = 2.476, train_acc = 0.800 (3.368 sec/step)
step 55570 	 loss = 0.002, train_acc = 1.000 (3.292 sec/step)
step 55580 	 loss = 0.014, train_acc = 1.000 (3.351 sec/step)
step 55590 	 loss = 0.112, train_acc = 1.000 (3.320 sec/step)
step 55600 	 loss = 0.109, train_acc = 0.900 (3.418 sec/step)
step 55610 	 loss = 0.663, train_acc = 0.900 (3.320 sec/step)
step 55620 	 loss = 0.047, train_acc = 1.000 (3.365 sec/step)
step 55630 	 loss = 0.347, train_acc = 0.800 (3.350 sec/step)
step 55640 	 loss = 0.472, train_acc = 0.900 (3.316 sec/step)
step 55650 	 loss = 0.583, train_acc = 0.800 (3.295 sec/step)
step 55660 	 loss = 0.025, train_acc = 1.000 (3.316 sec/step)
step 55670 	 loss = 0.000, train_acc = 1.000 (3.309 sec/step)
step 55680 	 loss = 0.426, train_acc = 0.700 (3.333 sec/step)
step 55690 	 loss = 0.088, train_acc = 1.000 (3.337 sec/step)
step 55700 	 loss = 1.171, train_acc = 0.700 (3.416 sec/step)
step 55710 	 loss = 0.018, train_acc = 1.000 (3.426 sec/step)
step 55720 	 loss = 0.235, train_acc = 1.000 (3.295 sec/step)
step 55730 	 loss = 0.442, train_acc = 0.900 (3.286 sec/step)
step 55740 	 loss = 0.016, train_acc = 1.000 (3.342 sec/step)
step 55750 	 loss = 1.202, train_acc = 0.800 (3.316 sec/step)
step 55760 	 loss = 0.021, train_acc = 1.000 (3.318 sec/step)
step 55770 	 loss = 0.000, train_acc = 1.000 (3.313 sec/step)
step 55780 	 loss = 0.002, train_acc = 1.000 (3.360 sec/step)
step 55790 	 loss = 0.027, train_acc = 1.000 (3.298 sec/step)
step 55800 	 loss = 0.087, train_acc = 0.900 (3.329 sec/step)
step 55810 	 loss = 0.135, train_acc = 1.000 (3.357 sec/step)
step 55820 	 loss = 0.045, train_acc = 1.000 (3.329 sec/step)
step 55830 	 loss = 0.068, train_acc = 1.000 (3.317 sec/step)
step 55840 	 loss = 0.000, train_acc = 1.000 (3.335 sec/step)
step 55850 	 loss = 0.005, train_acc = 1.000 (3.318 sec/step)
step 55860 	 loss = 0.021, train_acc = 1.000 (3.305 sec/step)
step 55870 	 loss = 0.088, train_acc = 1.000 (3.318 sec/step)
step 55880 	 loss = 0.021, train_acc = 1.000 (3.327 sec/step)
step 55890 	 loss = 0.164, train_acc = 0.900 (3.350 sec/step)
step 55900 	 loss = 0.124, train_acc = 1.000 (3.352 sec/step)
step 55910 	 loss = 0.049, train_acc = 1.000 (3.367 sec/step)
step 55920 	 loss = 0.128, train_acc = 1.000 (3.369 sec/step)
step 55930 	 loss = 0.054, train_acc = 1.000 (3.367 sec/step)
step 55940 	 loss = 0.218, train_acc = 0.800 (3.327 sec/step)
step 55950 	 loss = 0.094, train_acc = 1.000 (3.322 sec/step)
step 55960 	 loss = 0.143, train_acc = 1.000 (3.328 sec/step)
step 55970 	 loss = 0.002, train_acc = 1.000 (3.313 sec/step)
step 55980 	 loss = 0.223, train_acc = 0.900 (3.351 sec/step)
step 55990 	 loss = 0.156, train_acc = 0.900 (3.302 sec/step)
step 56000 	 loss = 0.381, train_acc = 0.800 (3.325 sec/step)
step 56010 	 loss = 0.263, train_acc = 0.900 (3.312 sec/step)
step 56020 	 loss = 0.054, train_acc = 1.000 (3.338 sec/step)
step 56030 	 loss = 0.000, train_acc = 1.000 (3.353 sec/step)
step 56040 	 loss = 0.001, train_acc = 1.000 (3.465 sec/step)
step 56050 	 loss = 0.285, train_acc = 0.900 (3.302 sec/step)
step 56060 	 loss = 0.019, train_acc = 1.000 (3.347 sec/step)
step 56070 	 loss = 0.033, train_acc = 1.000 (3.287 sec/step)
step 56080 	 loss = 0.003, train_acc = 1.000 (3.298 sec/step)
step 56090 	 loss = 0.045, train_acc = 1.000 (3.433 sec/step)
step 56100 	 loss = 0.002, train_acc = 1.000 (3.328 sec/step)
step 56110 	 loss = 0.120, train_acc = 0.900 (3.300 sec/step)
step 56120 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 56130 	 loss = 0.025, train_acc = 1.000 (3.386 sec/step)
step 56140 	 loss = 0.245, train_acc = 0.900 (3.375 sec/step)
step 56150 	 loss = 0.102, train_acc = 0.900 (3.354 sec/step)
step 56160 	 loss = 0.069, train_acc = 1.000 (3.342 sec/step)
step 56170 	 loss = 0.138, train_acc = 1.000 (3.408 sec/step)
step 56180 	 loss = 0.445, train_acc = 0.800 (3.372 sec/step)
step 56190 	 loss = 0.095, train_acc = 1.000 (3.370 sec/step)
step 56200 	 loss = 0.003, train_acc = 1.000 (3.399 sec/step)
step 56210 	 loss = 0.303, train_acc = 0.900 (3.337 sec/step)
step 56220 	 loss = 0.019, train_acc = 1.000 (3.278 sec/step)
step 56230 	 loss = 0.153, train_acc = 1.000 (3.350 sec/step)
step 56240 	 loss = 0.677, train_acc = 0.800 (3.300 sec/step)
step 56250 	 loss = 0.012, train_acc = 1.000 (3.316 sec/step)
step 56260 	 loss = 0.006, train_acc = 1.000 (3.381 sec/step)
step 56270 	 loss = 0.487, train_acc = 0.900 (3.305 sec/step)
step 56280 	 loss = 0.016, train_acc = 1.000 (3.323 sec/step)
step 56290 	 loss = 0.397, train_acc = 0.800 (3.274 sec/step)
step 56300 	 loss = 0.110, train_acc = 0.900 (3.344 sec/step)
step 56310 	 loss = 0.893, train_acc = 0.900 (3.295 sec/step)
step 56320 	 loss = 0.034, train_acc = 1.000 (3.278 sec/step)
step 56330 	 loss = 0.026, train_acc = 1.000 (3.339 sec/step)
step 56340 	 loss = 0.036, train_acc = 1.000 (3.320 sec/step)
step 56350 	 loss = 0.329, train_acc = 0.900 (3.352 sec/step)
step 56360 	 loss = 0.091, train_acc = 1.000 (3.336 sec/step)
step 56370 	 loss = 0.048, train_acc = 1.000 (3.321 sec/step)
step 56380 	 loss = 0.232, train_acc = 0.900 (3.422 sec/step)
step 56390 	 loss = 0.005, train_acc = 1.000 (3.396 sec/step)
step 56400 	 loss = 0.067, train_acc = 1.000 (3.307 sec/step)
step 56410 	 loss = 1.117, train_acc = 0.800 (3.364 sec/step)
step 56420 	 loss = 0.064, train_acc = 1.000 (3.395 sec/step)
step 56430 	 loss = 0.578, train_acc = 0.900 (3.314 sec/step)
step 56440 	 loss = 0.008, train_acc = 1.000 (3.321 sec/step)
step 56450 	 loss = 0.025, train_acc = 1.000 (3.287 sec/step)
step 56460 	 loss = 0.201, train_acc = 0.900 (3.366 sec/step)
step 56470 	 loss = 0.045, train_acc = 1.000 (3.367 sec/step)
step 56480 	 loss = 0.017, train_acc = 1.000 (3.351 sec/step)
step 56490 	 loss = 0.209, train_acc = 0.900 (3.363 sec/step)
step 56500 	 loss = 0.043, train_acc = 1.000 (3.369 sec/step)
step 56510 	 loss = 0.019, train_acc = 1.000 (3.304 sec/step)
step 56520 	 loss = 0.754, train_acc = 0.800 (3.365 sec/step)
step 56530 	 loss = 0.083, train_acc = 1.000 (3.346 sec/step)
step 56540 	 loss = 0.103, train_acc = 1.000 (3.430 sec/step)
step 56550 	 loss = 0.264, train_acc = 0.900 (3.356 sec/step)
step 56560 	 loss = 0.544, train_acc = 0.900 (3.339 sec/step)
step 56570 	 loss = 0.195, train_acc = 0.800 (3.342 sec/step)
step 56580 	 loss = 0.553, train_acc = 0.800 (3.313 sec/step)
step 56590 	 loss = 0.239, train_acc = 0.900 (3.362 sec/step)
step 56600 	 loss = 0.371, train_acc = 0.800 (3.305 sec/step)
step 56610 	 loss = 0.072, train_acc = 1.000 (3.320 sec/step)
step 56620 	 loss = 0.091, train_acc = 1.000 (3.303 sec/step)
step 56630 	 loss = 0.019, train_acc = 1.000 (3.305 sec/step)
step 56640 	 loss = 0.129, train_acc = 0.900 (3.370 sec/step)
step 56650 	 loss = 0.149, train_acc = 0.900 (3.333 sec/step)
step 56660 	 loss = 0.097, train_acc = 0.900 (3.275 sec/step)
step 56670 	 loss = 0.001, train_acc = 1.000 (3.325 sec/step)
step 56680 	 loss = 0.023, train_acc = 1.000 (3.317 sec/step)
step 56690 	 loss = 0.029, train_acc = 1.000 (3.336 sec/step)
step 56700 	 loss = 0.166, train_acc = 0.900 (3.383 sec/step)
step 56710 	 loss = 2.615, train_acc = 0.900 (3.312 sec/step)
step 56720 	 loss = 0.040, train_acc = 1.000 (3.349 sec/step)
step 56730 	 loss = 0.483, train_acc = 0.900 (3.329 sec/step)
step 56740 	 loss = 0.014, train_acc = 1.000 (3.352 sec/step)
step 56750 	 loss = 1.337, train_acc = 0.800 (3.435 sec/step)
step 56760 	 loss = 0.093, train_acc = 1.000 (3.317 sec/step)
step 56770 	 loss = 0.006, train_acc = 1.000 (3.345 sec/step)
step 56780 	 loss = 0.152, train_acc = 0.900 (3.292 sec/step)
step 56790 	 loss = 0.720, train_acc = 0.900 (3.329 sec/step)
step 56800 	 loss = 0.015, train_acc = 1.000 (3.355 sec/step)
step 56810 	 loss = 0.027, train_acc = 1.000 (3.354 sec/step)
step 56820 	 loss = 0.085, train_acc = 1.000 (3.422 sec/step)
step 56830 	 loss = 0.247, train_acc = 0.900 (3.406 sec/step)
step 56840 	 loss = 1.283, train_acc = 0.800 (3.293 sec/step)
step 56850 	 loss = 0.020, train_acc = 1.000 (3.333 sec/step)
step 56860 	 loss = 0.054, train_acc = 1.000 (3.316 sec/step)
step 56870 	 loss = 0.011, train_acc = 1.000 (3.336 sec/step)
step 56880 	 loss = 0.127, train_acc = 0.900 (3.350 sec/step)
step 56890 	 loss = 0.001, train_acc = 1.000 (3.299 sec/step)
step 56900 	 loss = 0.035, train_acc = 1.000 (3.315 sec/step)
step 56910 	 loss = 0.112, train_acc = 0.900 (3.307 sec/step)
step 56920 	 loss = 0.002, train_acc = 1.000 (3.315 sec/step)
step 56930 	 loss = 0.018, train_acc = 1.000 (3.324 sec/step)
step 56940 	 loss = 0.072, train_acc = 1.000 (3.387 sec/step)
step 56950 	 loss = 0.078, train_acc = 1.000 (3.365 sec/step)
step 56960 	 loss = 0.156, train_acc = 0.900 (3.399 sec/step)
step 56970 	 loss = 0.000, train_acc = 1.000 (3.390 sec/step)
step 56980 	 loss = 0.001, train_acc = 1.000 (3.379 sec/step)
step 56990 	 loss = 0.104, train_acc = 0.900 (3.360 sec/step)
VALIDATION 	 acc = 0.515 (3.616 sec)
step 57000 	 loss = 0.114, train_acc = 1.000 (3.286 sec/step)
step 57010 	 loss = 0.029, train_acc = 1.000 (3.302 sec/step)
step 57020 	 loss = 0.031, train_acc = 1.000 (3.330 sec/step)
step 57030 	 loss = 0.386, train_acc = 0.900 (3.316 sec/step)
step 57040 	 loss = 0.117, train_acc = 1.000 (3.381 sec/step)
step 57050 	 loss = 0.037, train_acc = 1.000 (3.453 sec/step)
step 57060 	 loss = 0.048, train_acc = 1.000 (3.345 sec/step)
step 57070 	 loss = 0.001, train_acc = 1.000 (3.314 sec/step)
step 57080 	 loss = 0.000, train_acc = 1.000 (3.328 sec/step)
step 57090 	 loss = 0.000, train_acc = 1.000 (3.372 sec/step)
step 57100 	 loss = 0.049, train_acc = 1.000 (3.357 sec/step)
step 57110 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 57120 	 loss = 0.307, train_acc = 0.900 (3.387 sec/step)
step 57130 	 loss = 0.133, train_acc = 0.900 (3.355 sec/step)
step 57140 	 loss = 0.364, train_acc = 0.800 (3.350 sec/step)
step 57150 	 loss = 0.514, train_acc = 0.900 (3.362 sec/step)
step 57160 	 loss = 0.191, train_acc = 0.900 (3.284 sec/step)
step 57170 	 loss = 0.068, train_acc = 1.000 (3.302 sec/step)
step 57180 	 loss = 0.232, train_acc = 0.900 (3.329 sec/step)
step 57190 	 loss = 0.078, train_acc = 1.000 (3.389 sec/step)
step 57200 	 loss = 1.412, train_acc = 0.900 (3.327 sec/step)
step 57210 	 loss = 0.020, train_acc = 1.000 (3.315 sec/step)
step 57220 	 loss = 2.202, train_acc = 0.800 (3.369 sec/step)
step 57230 	 loss = 0.056, train_acc = 1.000 (3.350 sec/step)
step 57240 	 loss = 0.292, train_acc = 0.900 (3.360 sec/step)
step 57250 	 loss = 0.462, train_acc = 0.900 (3.335 sec/step)
step 57260 	 loss = 0.014, train_acc = 1.000 (3.320 sec/step)
step 57270 	 loss = 0.083, train_acc = 1.000 (3.349 sec/step)
step 57280 	 loss = 0.001, train_acc = 1.000 (3.307 sec/step)
step 57290 	 loss = 0.036, train_acc = 1.000 (3.323 sec/step)
step 57300 	 loss = 0.073, train_acc = 1.000 (3.416 sec/step)
step 57310 	 loss = 0.011, train_acc = 1.000 (3.297 sec/step)
step 57320 	 loss = 2.913, train_acc = 0.600 (3.317 sec/step)
step 57330 	 loss = 0.253, train_acc = 0.900 (3.330 sec/step)
step 57340 	 loss = 0.900, train_acc = 0.800 (3.334 sec/step)
step 57350 	 loss = 0.293, train_acc = 0.900 (3.371 sec/step)
step 57360 	 loss = 1.017, train_acc = 0.700 (3.321 sec/step)
step 57370 	 loss = 0.026, train_acc = 1.000 (3.366 sec/step)
step 57380 	 loss = 0.013, train_acc = 1.000 (3.385 sec/step)
step 57390 	 loss = 0.005, train_acc = 1.000 (3.357 sec/step)
step 57400 	 loss = 0.889, train_acc = 0.900 (3.314 sec/step)
step 57410 	 loss = 0.228, train_acc = 0.900 (3.361 sec/step)
step 57420 	 loss = 0.157, train_acc = 0.900 (3.345 sec/step)
step 57430 	 loss = 0.382, train_acc = 0.900 (3.357 sec/step)
step 57440 	 loss = 0.434, train_acc = 0.900 (3.367 sec/step)
step 57450 	 loss = 0.091, train_acc = 0.900 (3.313 sec/step)
step 57460 	 loss = 0.014, train_acc = 1.000 (3.351 sec/step)
step 57470 	 loss = 0.320, train_acc = 0.900 (3.289 sec/step)
step 57480 	 loss = 0.054, train_acc = 1.000 (3.305 sec/step)
step 57490 	 loss = 0.292, train_acc = 0.900 (3.293 sec/step)
step 57500 	 loss = 0.006, train_acc = 1.000 (3.332 sec/step)
step 57510 	 loss = 0.123, train_acc = 0.900 (3.375 sec/step)
step 57520 	 loss = 0.089, train_acc = 1.000 (3.332 sec/step)
step 57530 	 loss = 0.189, train_acc = 0.900 (3.362 sec/step)
step 57540 	 loss = 0.066, train_acc = 1.000 (3.327 sec/step)
step 57550 	 loss = 0.070, train_acc = 1.000 (3.343 sec/step)
step 57560 	 loss = 0.115, train_acc = 1.000 (3.295 sec/step)
step 57570 	 loss = 0.240, train_acc = 0.900 (3.344 sec/step)
step 57580 	 loss = 0.592, train_acc = 0.900 (3.394 sec/step)
step 57590 	 loss = 0.610, train_acc = 0.800 (3.367 sec/step)
step 57600 	 loss = 0.055, train_acc = 1.000 (3.375 sec/step)
step 57610 	 loss = 0.554, train_acc = 0.900 (3.334 sec/step)
step 57620 	 loss = 0.190, train_acc = 0.900 (3.325 sec/step)
step 57630 	 loss = 0.023, train_acc = 1.000 (3.443 sec/step)
step 57640 	 loss = 0.926, train_acc = 0.900 (3.284 sec/step)
step 57650 	 loss = 0.114, train_acc = 0.900 (3.371 sec/step)
step 57660 	 loss = 0.052, train_acc = 1.000 (3.349 sec/step)
step 57670 	 loss = 0.424, train_acc = 0.900 (3.404 sec/step)
step 57680 	 loss = 0.172, train_acc = 0.900 (3.327 sec/step)
step 57690 	 loss = 0.056, train_acc = 1.000 (3.303 sec/step)
step 57700 	 loss = 0.387, train_acc = 0.900 (3.383 sec/step)
step 57710 	 loss = 0.001, train_acc = 1.000 (3.310 sec/step)
step 57720 	 loss = 0.387, train_acc = 0.800 (3.328 sec/step)
step 57730 	 loss = 0.086, train_acc = 1.000 (3.354 sec/step)
step 57740 	 loss = 0.033, train_acc = 1.000 (3.325 sec/step)
step 57750 	 loss = 0.001, train_acc = 1.000 (3.321 sec/step)
step 57760 	 loss = 0.874, train_acc = 0.900 (3.353 sec/step)
step 57770 	 loss = 0.448, train_acc = 0.900 (3.328 sec/step)
step 57780 	 loss = 0.035, train_acc = 1.000 (3.343 sec/step)
step 57790 	 loss = 0.009, train_acc = 1.000 (3.386 sec/step)
step 57800 	 loss = 0.249, train_acc = 0.800 (3.335 sec/step)
step 57810 	 loss = 0.077, train_acc = 1.000 (3.333 sec/step)
step 57820 	 loss = 0.095, train_acc = 0.900 (3.414 sec/step)
step 57830 	 loss = 0.003, train_acc = 1.000 (3.318 sec/step)
step 57840 	 loss = 0.014, train_acc = 1.000 (3.352 sec/step)
step 57850 	 loss = 0.134, train_acc = 0.900 (3.366 sec/step)
step 57860 	 loss = 0.465, train_acc = 0.900 (3.354 sec/step)
step 57870 	 loss = 0.080, train_acc = 1.000 (3.348 sec/step)
step 57880 	 loss = 0.102, train_acc = 0.900 (3.307 sec/step)
step 57890 	 loss = 0.219, train_acc = 0.900 (3.391 sec/step)
step 57900 	 loss = 0.629, train_acc = 0.800 (3.378 sec/step)
step 57910 	 loss = 0.271, train_acc = 0.900 (3.355 sec/step)
step 57920 	 loss = 0.095, train_acc = 1.000 (3.289 sec/step)
step 57930 	 loss = 0.213, train_acc = 0.900 (3.292 sec/step)
step 57940 	 loss = 22.855, train_acc = 0.900 (3.337 sec/step)
step 57950 	 loss = 0.772, train_acc = 0.800 (3.319 sec/step)
step 57960 	 loss = 0.513, train_acc = 0.800 (3.291 sec/step)
step 57970 	 loss = 0.111, train_acc = 0.900 (3.356 sec/step)
step 57980 	 loss = 0.019, train_acc = 1.000 (3.335 sec/step)
step 57990 	 loss = 0.440, train_acc = 0.900 (3.304 sec/step)
step 58000 	 loss = 1.077, train_acc = 0.700 (3.342 sec/step)
step 58010 	 loss = 0.011, train_acc = 1.000 (3.340 sec/step)
step 58020 	 loss = 0.011, train_acc = 1.000 (3.377 sec/step)
step 58030 	 loss = 0.208, train_acc = 0.900 (3.290 sec/step)
step 58040 	 loss = 0.014, train_acc = 1.000 (3.335 sec/step)
step 58050 	 loss = 0.003, train_acc = 1.000 (3.467 sec/step)
step 58060 	 loss = 0.096, train_acc = 0.900 (3.341 sec/step)
step 58070 	 loss = 0.178, train_acc = 0.900 (3.326 sec/step)
step 58080 	 loss = 0.061, train_acc = 1.000 (3.335 sec/step)
step 58090 	 loss = 0.436, train_acc = 0.900 (3.358 sec/step)
step 58100 	 loss = 0.068, train_acc = 1.000 (3.321 sec/step)
step 58110 	 loss = 0.558, train_acc = 0.800 (3.364 sec/step)
step 58120 	 loss = 0.003, train_acc = 1.000 (3.292 sec/step)
step 58130 	 loss = 0.092, train_acc = 0.900 (3.341 sec/step)
step 58140 	 loss = 0.066, train_acc = 1.000 (3.286 sec/step)
step 58150 	 loss = 0.733, train_acc = 0.700 (3.333 sec/step)
step 58160 	 loss = 0.161, train_acc = 0.900 (3.319 sec/step)
step 58170 	 loss = 0.191, train_acc = 1.000 (3.317 sec/step)
step 58180 	 loss = 0.135, train_acc = 0.900 (3.342 sec/step)
step 58190 	 loss = 0.210, train_acc = 0.900 (3.386 sec/step)
step 58200 	 loss = 0.001, train_acc = 1.000 (3.292 sec/step)
step 58210 	 loss = 0.000, train_acc = 1.000 (3.319 sec/step)
step 58220 	 loss = 0.002, train_acc = 1.000 (3.299 sec/step)
step 58230 	 loss = 0.015, train_acc = 1.000 (3.335 sec/step)
step 58240 	 loss = 0.022, train_acc = 1.000 (3.370 sec/step)
step 58250 	 loss = 0.002, train_acc = 1.000 (3.314 sec/step)
step 58260 	 loss = 0.073, train_acc = 0.900 (3.294 sec/step)
step 58270 	 loss = 0.002, train_acc = 1.000 (3.294 sec/step)
step 58280 	 loss = 0.068, train_acc = 1.000 (3.335 sec/step)
step 58290 	 loss = 0.002, train_acc = 1.000 (3.400 sec/step)
step 58300 	 loss = 0.096, train_acc = 0.900 (3.365 sec/step)
step 58310 	 loss = 0.287, train_acc = 0.900 (3.355 sec/step)
step 58320 	 loss = 0.136, train_acc = 1.000 (3.319 sec/step)
step 58330 	 loss = 0.296, train_acc = 0.800 (3.373 sec/step)
step 58340 	 loss = 0.470, train_acc = 0.900 (3.319 sec/step)
step 58350 	 loss = 0.010, train_acc = 1.000 (3.336 sec/step)
step 58360 	 loss = 0.013, train_acc = 1.000 (3.339 sec/step)
step 58370 	 loss = 0.127, train_acc = 0.900 (3.338 sec/step)
step 58380 	 loss = 0.160, train_acc = 1.000 (3.339 sec/step)
step 58390 	 loss = 0.254, train_acc = 0.900 (3.367 sec/step)
step 58400 	 loss = 1.397, train_acc = 0.900 (3.330 sec/step)
step 58410 	 loss = 0.011, train_acc = 1.000 (3.347 sec/step)
step 58420 	 loss = 0.007, train_acc = 1.000 (3.311 sec/step)
step 58430 	 loss = 0.020, train_acc = 1.000 (3.400 sec/step)
step 58440 	 loss = 0.002, train_acc = 1.000 (3.432 sec/step)
step 58450 	 loss = 0.056, train_acc = 1.000 (3.341 sec/step)
step 58460 	 loss = 0.018, train_acc = 1.000 (3.332 sec/step)
step 58470 	 loss = 0.022, train_acc = 1.000 (3.368 sec/step)
step 58480 	 loss = 0.022, train_acc = 1.000 (3.421 sec/step)
step 58490 	 loss = 0.006, train_acc = 1.000 (3.370 sec/step)
step 58500 	 loss = 3.795, train_acc = 0.800 (3.322 sec/step)
step 58510 	 loss = 0.948, train_acc = 0.800 (3.364 sec/step)
step 58520 	 loss = 0.470, train_acc = 0.800 (3.387 sec/step)
step 58530 	 loss = 0.174, train_acc = 0.900 (3.376 sec/step)
step 58540 	 loss = 0.457, train_acc = 0.900 (3.352 sec/step)
step 58550 	 loss = 0.808, train_acc = 0.900 (3.377 sec/step)
step 58560 	 loss = 0.127, train_acc = 0.900 (3.289 sec/step)
step 58570 	 loss = 0.026, train_acc = 1.000 (3.327 sec/step)
step 58580 	 loss = 0.051, train_acc = 1.000 (3.386 sec/step)
step 58590 	 loss = 0.084, train_acc = 1.000 (3.330 sec/step)
step 58600 	 loss = 0.326, train_acc = 0.900 (3.320 sec/step)
step 58610 	 loss = 0.617, train_acc = 0.900 (3.503 sec/step)
step 58620 	 loss = 0.004, train_acc = 1.000 (3.327 sec/step)
step 58630 	 loss = 0.001, train_acc = 1.000 (3.319 sec/step)
step 58640 	 loss = 0.211, train_acc = 0.900 (3.329 sec/step)
step 58650 	 loss = 0.182, train_acc = 0.900 (3.381 sec/step)
step 58660 	 loss = 0.079, train_acc = 1.000 (3.347 sec/step)
step 58670 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 58680 	 loss = 0.436, train_acc = 0.800 (3.312 sec/step)
step 58690 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 58700 	 loss = 1.377, train_acc = 0.800 (3.350 sec/step)
step 58710 	 loss = 0.002, train_acc = 1.000 (3.363 sec/step)
step 58720 	 loss = 0.138, train_acc = 0.900 (3.370 sec/step)
step 58730 	 loss = 0.292, train_acc = 0.900 (3.340 sec/step)
step 58740 	 loss = 0.002, train_acc = 1.000 (3.362 sec/step)
step 58750 	 loss = 0.092, train_acc = 1.000 (3.333 sec/step)
step 58760 	 loss = 0.005, train_acc = 1.000 (3.327 sec/step)
step 58770 	 loss = 0.003, train_acc = 1.000 (3.328 sec/step)
step 58780 	 loss = 0.301, train_acc = 0.900 (3.370 sec/step)
step 58790 	 loss = 0.002, train_acc = 1.000 (3.364 sec/step)
step 58800 	 loss = 0.021, train_acc = 1.000 (3.306 sec/step)
step 58810 	 loss = 0.000, train_acc = 1.000 (3.380 sec/step)
step 58820 	 loss = 0.005, train_acc = 1.000 (3.366 sec/step)
step 58830 	 loss = 0.000, train_acc = 1.000 (3.356 sec/step)
step 58840 	 loss = 0.977, train_acc = 0.700 (3.272 sec/step)
step 58850 	 loss = 0.168, train_acc = 1.000 (3.330 sec/step)
step 58860 	 loss = 0.101, train_acc = 1.000 (3.320 sec/step)
step 58870 	 loss = 0.194, train_acc = 0.900 (3.315 sec/step)
step 58880 	 loss = 0.076, train_acc = 1.000 (3.370 sec/step)
step 58890 	 loss = 0.077, train_acc = 0.900 (3.281 sec/step)
VALIDATION 	 acc = 0.548 (3.602 sec)
step 58900 	 loss = 0.004, train_acc = 1.000 (3.302 sec/step)
step 58910 	 loss = 0.606, train_acc = 0.700 (3.317 sec/step)
step 58920 	 loss = 0.319, train_acc = 0.900 (3.324 sec/step)
step 58930 	 loss = 0.016, train_acc = 1.000 (3.350 sec/step)
step 58940 	 loss = 0.114, train_acc = 0.900 (3.363 sec/step)
step 58950 	 loss = 0.001, train_acc = 1.000 (3.301 sec/step)
step 58960 	 loss = 0.766, train_acc = 0.800 (3.343 sec/step)
step 58970 	 loss = 0.516, train_acc = 0.900 (3.323 sec/step)
step 58980 	 loss = 0.417, train_acc = 0.900 (3.299 sec/step)
step 58990 	 loss = 0.016, train_acc = 1.000 (3.346 sec/step)
step 59000 	 loss = 0.163, train_acc = 0.900 (3.337 sec/step)
step 59010 	 loss = 0.002, train_acc = 1.000 (3.375 sec/step)
step 59020 	 loss = 0.075, train_acc = 1.000 (3.348 sec/step)
step 59030 	 loss = 0.025, train_acc = 1.000 (3.357 sec/step)
step 59040 	 loss = 0.048, train_acc = 1.000 (3.294 sec/step)
step 59050 	 loss = 0.111, train_acc = 1.000 (3.375 sec/step)
step 59060 	 loss = 0.317, train_acc = 0.900 (3.344 sec/step)
step 59070 	 loss = 0.008, train_acc = 1.000 (3.380 sec/step)
step 59080 	 loss = 0.056, train_acc = 1.000 (3.346 sec/step)
step 59090 	 loss = 0.344, train_acc = 0.900 (3.319 sec/step)
step 59100 	 loss = 0.010, train_acc = 1.000 (3.320 sec/step)
step 59110 	 loss = 0.106, train_acc = 1.000 (3.354 sec/step)
step 59120 	 loss = 0.153, train_acc = 0.900 (3.328 sec/step)
step 59130 	 loss = 0.029, train_acc = 1.000 (3.323 sec/step)
step 59140 	 loss = 0.001, train_acc = 1.000 (3.360 sec/step)
step 59150 	 loss = 0.062, train_acc = 1.000 (3.378 sec/step)
step 59160 	 loss = 0.141, train_acc = 0.900 (3.325 sec/step)
step 59170 	 loss = 0.141, train_acc = 1.000 (3.387 sec/step)
step 59180 	 loss = 0.128, train_acc = 1.000 (3.389 sec/step)
step 59190 	 loss = 0.008, train_acc = 1.000 (3.317 sec/step)
step 59200 	 loss = 0.001, train_acc = 1.000 (3.316 sec/step)
step 59210 	 loss = 1.259, train_acc = 0.700 (3.345 sec/step)
step 59220 	 loss = 0.086, train_acc = 1.000 (3.360 sec/step)
step 59230 	 loss = 0.003, train_acc = 1.000 (3.333 sec/step)
step 59240 	 loss = 0.027, train_acc = 1.000 (3.328 sec/step)
step 59250 	 loss = 0.071, train_acc = 1.000 (3.335 sec/step)
step 59260 	 loss = 0.021, train_acc = 1.000 (3.386 sec/step)
step 59270 	 loss = 0.035, train_acc = 1.000 (3.396 sec/step)
step 59280 	 loss = 0.003, train_acc = 1.000 (3.381 sec/step)
step 59290 	 loss = 0.000, train_acc = 1.000 (3.312 sec/step)
step 59300 	 loss = 0.077, train_acc = 0.900 (3.364 sec/step)
step 59310 	 loss = 0.098, train_acc = 0.900 (3.300 sec/step)
step 59320 	 loss = 1.005, train_acc = 0.800 (3.365 sec/step)
step 59330 	 loss = 0.281, train_acc = 0.900 (3.391 sec/step)
step 59340 	 loss = 0.064, train_acc = 1.000 (3.342 sec/step)
step 59350 	 loss = 0.827, train_acc = 0.900 (3.322 sec/step)
step 59360 	 loss = 0.000, train_acc = 1.000 (3.363 sec/step)
step 59370 	 loss = 0.011, train_acc = 1.000 (3.365 sec/step)
step 59380 	 loss = 0.754, train_acc = 0.900 (3.359 sec/step)
step 59390 	 loss = 0.075, train_acc = 1.000 (3.297 sec/step)
step 59400 	 loss = 0.001, train_acc = 1.000 (3.317 sec/step)
step 59410 	 loss = 0.138, train_acc = 0.900 (3.374 sec/step)
step 59420 	 loss = 0.122, train_acc = 1.000 (3.345 sec/step)
step 59430 	 loss = 0.012, train_acc = 1.000 (3.335 sec/step)
step 59440 	 loss = 0.633, train_acc = 0.900 (3.407 sec/step)
step 59450 	 loss = 0.002, train_acc = 1.000 (3.395 sec/step)
step 59460 	 loss = 0.378, train_acc = 0.900 (3.333 sec/step)
step 59470 	 loss = 0.011, train_acc = 1.000 (3.376 sec/step)
step 59480 	 loss = 0.156, train_acc = 0.900 (3.312 sec/step)
step 59490 	 loss = 1.326, train_acc = 0.700 (3.318 sec/step)
step 59500 	 loss = 1.687, train_acc = 0.700 (3.318 sec/step)
step 59510 	 loss = 0.005, train_acc = 1.000 (3.329 sec/step)
step 59520 	 loss = 0.565, train_acc = 0.800 (3.474 sec/step)
step 59530 	 loss = 0.001, train_acc = 1.000 (3.359 sec/step)
step 59540 	 loss = 0.005, train_acc = 1.000 (3.355 sec/step)
step 59550 	 loss = 1.581, train_acc = 0.900 (3.353 sec/step)
step 59560 	 loss = 0.395, train_acc = 0.900 (3.316 sec/step)
step 59570 	 loss = 0.271, train_acc = 0.900 (3.329 sec/step)
step 59580 	 loss = 0.434, train_acc = 0.800 (3.310 sec/step)
step 59590 	 loss = 0.044, train_acc = 1.000 (3.407 sec/step)
step 59600 	 loss = 0.416, train_acc = 0.800 (3.330 sec/step)
step 59610 	 loss = 0.027, train_acc = 1.000 (3.301 sec/step)
step 59620 	 loss = 0.346, train_acc = 0.800 (3.352 sec/step)
step 59630 	 loss = 0.438, train_acc = 0.700 (3.320 sec/step)
step 59640 	 loss = 0.011, train_acc = 1.000 (3.365 sec/step)
step 59650 	 loss = 0.558, train_acc = 0.900 (3.397 sec/step)
step 59660 	 loss = 0.044, train_acc = 1.000 (3.397 sec/step)
step 59670 	 loss = 0.008, train_acc = 1.000 (3.351 sec/step)
step 59680 	 loss = 0.003, train_acc = 1.000 (3.327 sec/step)
step 59690 	 loss = 0.028, train_acc = 1.000 (3.338 sec/step)
step 59700 	 loss = 0.004, train_acc = 1.000 (3.356 sec/step)
step 59710 	 loss = 0.090, train_acc = 1.000 (3.317 sec/step)
step 59720 	 loss = 0.953, train_acc = 0.700 (3.341 sec/step)
step 59730 	 loss = 0.251, train_acc = 0.900 (3.333 sec/step)
step 59740 	 loss = 0.049, train_acc = 1.000 (3.298 sec/step)
step 59750 	 loss = 0.419, train_acc = 0.800 (3.335 sec/step)
step 59760 	 loss = 0.392, train_acc = 0.800 (3.364 sec/step)
step 59770 	 loss = 0.652, train_acc = 0.800 (3.380 sec/step)
step 59780 	 loss = 0.172, train_acc = 0.900 (3.335 sec/step)
step 59790 	 loss = 0.087, train_acc = 0.900 (3.328 sec/step)
step 59800 	 loss = 0.041, train_acc = 1.000 (3.327 sec/step)
step 59810 	 loss = 0.085, train_acc = 1.000 (3.322 sec/step)
step 59820 	 loss = 0.004, train_acc = 1.000 (3.387 sec/step)
step 59830 	 loss = 0.040, train_acc = 1.000 (3.391 sec/step)
step 59840 	 loss = 0.475, train_acc = 0.900 (3.314 sec/step)
step 59850 	 loss = 0.062, train_acc = 1.000 (3.311 sec/step)
step 59860 	 loss = 0.389, train_acc = 0.900 (3.347 sec/step)
step 59870 	 loss = 0.037, train_acc = 1.000 (3.285 sec/step)
step 59880 	 loss = 0.103, train_acc = 1.000 (3.362 sec/step)
step 59890 	 loss = 0.357, train_acc = 0.800 (3.349 sec/step)
step 59900 	 loss = 0.003, train_acc = 1.000 (3.459 sec/step)
step 59910 	 loss = 0.001, train_acc = 1.000 (3.358 sec/step)
step 59920 	 loss = 0.338, train_acc = 0.900 (3.341 sec/step)
step 59930 	 loss = 0.134, train_acc = 0.900 (3.353 sec/step)
step 59940 	 loss = 0.012, train_acc = 1.000 (3.352 sec/step)
step 59950 	 loss = 0.373, train_acc = 0.900 (3.321 sec/step)
step 59960 	 loss = 0.228, train_acc = 0.900 (3.332 sec/step)
step 59970 	 loss = 0.359, train_acc = 0.900 (3.306 sec/step)
step 59980 	 loss = 0.003, train_acc = 1.000 (3.390 sec/step)
step 59990 	 loss = 0.098, train_acc = 0.900 (3.383 sec/step)
step 60000 	 loss = 0.022, train_acc = 1.000 (3.365 sec/step)
step 60010 	 loss = 0.004, train_acc = 1.000 (3.310 sec/step)
step 60020 	 loss = 0.000, train_acc = 1.000 (3.333 sec/step)
step 60030 	 loss = 0.352, train_acc = 0.900 (3.347 sec/step)
step 60040 	 loss = 0.022, train_acc = 1.000 (3.388 sec/step)
step 60050 	 loss = 0.156, train_acc = 0.900 (3.377 sec/step)
step 60060 	 loss = 0.027, train_acc = 1.000 (3.297 sec/step)
step 60070 	 loss = 0.406, train_acc = 0.900 (3.307 sec/step)
step 60080 	 loss = 0.012, train_acc = 1.000 (3.328 sec/step)
step 60090 	 loss = 0.415, train_acc = 0.800 (3.340 sec/step)
step 60100 	 loss = 0.022, train_acc = 1.000 (3.321 sec/step)
step 60110 	 loss = 0.028, train_acc = 1.000 (3.399 sec/step)
step 60120 	 loss = 0.002, train_acc = 1.000 (3.317 sec/step)
step 60130 	 loss = 0.000, train_acc = 1.000 (3.321 sec/step)
step 60140 	 loss = 0.000, train_acc = 1.000 (3.396 sec/step)
step 60150 	 loss = 0.026, train_acc = 1.000 (3.304 sec/step)
step 60160 	 loss = 0.015, train_acc = 1.000 (3.353 sec/step)
step 60170 	 loss = 0.446, train_acc = 0.900 (3.379 sec/step)
step 60180 	 loss = 0.187, train_acc = 0.900 (3.320 sec/step)
step 60190 	 loss = 0.000, train_acc = 1.000 (3.305 sec/step)
step 60200 	 loss = 0.072, train_acc = 1.000 (3.346 sec/step)
step 60210 	 loss = 0.184, train_acc = 0.900 (3.326 sec/step)
step 60220 	 loss = 0.012, train_acc = 1.000 (3.417 sec/step)
step 60230 	 loss = 0.011, train_acc = 1.000 (3.325 sec/step)
step 60240 	 loss = 0.126, train_acc = 0.900 (3.323 sec/step)
step 60250 	 loss = 0.901, train_acc = 0.800 (3.306 sec/step)
step 60260 	 loss = 0.022, train_acc = 1.000 (3.420 sec/step)
step 60270 	 loss = 0.235, train_acc = 0.900 (3.286 sec/step)
step 60280 	 loss = 0.019, train_acc = 1.000 (3.302 sec/step)
step 60290 	 loss = 0.172, train_acc = 0.900 (3.290 sec/step)
step 60300 	 loss = 0.525, train_acc = 0.900 (3.287 sec/step)
step 60310 	 loss = 0.702, train_acc = 0.900 (3.280 sec/step)
step 60320 	 loss = 0.004, train_acc = 1.000 (3.413 sec/step)
step 60330 	 loss = 0.002, train_acc = 1.000 (3.358 sec/step)
step 60340 	 loss = 0.530, train_acc = 0.800 (3.372 sec/step)
step 60350 	 loss = 0.011, train_acc = 1.000 (3.327 sec/step)
step 60360 	 loss = 0.062, train_acc = 1.000 (3.329 sec/step)
step 60370 	 loss = 0.017, train_acc = 1.000 (3.433 sec/step)
step 60380 	 loss = 0.806, train_acc = 0.900 (3.303 sec/step)
step 60390 	 loss = 0.216, train_acc = 0.900 (3.308 sec/step)
step 60400 	 loss = 0.057, train_acc = 1.000 (3.383 sec/step)
step 60410 	 loss = 0.037, train_acc = 1.000 (3.288 sec/step)
step 60420 	 loss = 0.048, train_acc = 1.000 (3.365 sec/step)
step 60430 	 loss = 0.015, train_acc = 1.000 (3.316 sec/step)
step 60440 	 loss = 0.568, train_acc = 0.900 (3.305 sec/step)
step 60450 	 loss = 0.438, train_acc = 0.900 (3.367 sec/step)
step 60460 	 loss = 0.021, train_acc = 1.000 (3.364 sec/step)
step 60470 	 loss = 0.003, train_acc = 1.000 (3.359 sec/step)
step 60480 	 loss = 0.001, train_acc = 1.000 (3.344 sec/step)
step 60490 	 loss = 0.152, train_acc = 0.800 (3.306 sec/step)
step 60500 	 loss = 0.991, train_acc = 0.700 (3.368 sec/step)
step 60510 	 loss = 0.145, train_acc = 1.000 (3.325 sec/step)
step 60520 	 loss = 0.013, train_acc = 1.000 (3.328 sec/step)
step 60530 	 loss = 0.023, train_acc = 1.000 (3.299 sec/step)
step 60540 	 loss = 0.004, train_acc = 1.000 (3.384 sec/step)
step 60550 	 loss = 0.288, train_acc = 0.900 (3.439 sec/step)
step 60560 	 loss = 0.088, train_acc = 1.000 (3.324 sec/step)
step 60570 	 loss = 0.027, train_acc = 1.000 (3.321 sec/step)
step 60580 	 loss = 0.018, train_acc = 1.000 (3.327 sec/step)
step 60590 	 loss = 0.001, train_acc = 1.000 (3.390 sec/step)
step 60600 	 loss = 0.000, train_acc = 1.000 (3.307 sec/step)
step 60610 	 loss = 0.218, train_acc = 0.900 (3.286 sec/step)
step 60620 	 loss = 0.088, train_acc = 0.900 (3.314 sec/step)
step 60630 	 loss = 0.155, train_acc = 0.900 (3.318 sec/step)
step 60640 	 loss = 0.036, train_acc = 1.000 (3.290 sec/step)
step 60650 	 loss = 0.023, train_acc = 1.000 (3.325 sec/step)
step 60660 	 loss = 0.172, train_acc = 1.000 (3.342 sec/step)
step 60670 	 loss = 0.002, train_acc = 1.000 (3.359 sec/step)
step 60680 	 loss = 0.709, train_acc = 0.900 (3.315 sec/step)
step 60690 	 loss = 0.080, train_acc = 1.000 (3.295 sec/step)
step 60700 	 loss = 0.019, train_acc = 1.000 (3.330 sec/step)
step 60710 	 loss = 0.037, train_acc = 1.000 (3.365 sec/step)
step 60720 	 loss = 0.274, train_acc = 0.900 (3.420 sec/step)
step 60730 	 loss = 0.001, train_acc = 1.000 (3.325 sec/step)
step 60740 	 loss = 0.213, train_acc = 0.900 (3.338 sec/step)
step 60750 	 loss = 0.228, train_acc = 0.900 (3.367 sec/step)
step 60760 	 loss = 0.002, train_acc = 1.000 (3.321 sec/step)
step 60770 	 loss = 0.489, train_acc = 0.800 (3.323 sec/step)
step 60780 	 loss = 0.003, train_acc = 1.000 (3.436 sec/step)
step 60790 	 loss = 0.037, train_acc = 1.000 (3.333 sec/step)
VALIDATION 	 acc = 0.536 (3.648 sec)
step 60800 	 loss = 0.478, train_acc = 0.900 (3.355 sec/step)
step 60810 	 loss = 0.040, train_acc = 1.000 (3.366 sec/step)
step 60820 	 loss = 0.227, train_acc = 0.900 (3.282 sec/step)
step 60830 	 loss = 0.129, train_acc = 1.000 (3.345 sec/step)
step 60840 	 loss = 0.008, train_acc = 1.000 (3.309 sec/step)
step 60850 	 loss = 0.290, train_acc = 0.900 (3.334 sec/step)
step 60860 	 loss = 0.051, train_acc = 1.000 (3.346 sec/step)
step 60870 	 loss = 0.276, train_acc = 0.800 (3.360 sec/step)
step 60880 	 loss = 0.320, train_acc = 0.800 (3.305 sec/step)
step 60890 	 loss = 0.720, train_acc = 0.800 (3.324 sec/step)
step 60900 	 loss = 0.092, train_acc = 1.000 (3.362 sec/step)
step 60910 	 loss = 0.001, train_acc = 1.000 (3.394 sec/step)
step 60920 	 loss = 0.000, train_acc = 1.000 (3.362 sec/step)
step 60930 	 loss = 0.022, train_acc = 1.000 (3.361 sec/step)
step 60940 	 loss = 0.004, train_acc = 1.000 (3.291 sec/step)
step 60950 	 loss = 0.025, train_acc = 1.000 (3.393 sec/step)
step 60960 	 loss = 0.391, train_acc = 0.900 (3.365 sec/step)
step 60970 	 loss = 0.202, train_acc = 0.900 (3.287 sec/step)
step 60980 	 loss = 0.023, train_acc = 1.000 (3.365 sec/step)
step 60990 	 loss = 0.000, train_acc = 1.000 (3.335 sec/step)
step 61000 	 loss = 0.673, train_acc = 0.700 (3.359 sec/step)
step 61010 	 loss = 0.014, train_acc = 1.000 (3.345 sec/step)
step 61020 	 loss = 1.154, train_acc = 0.800 (3.381 sec/step)
step 61030 	 loss = 0.173, train_acc = 0.900 (3.306 sec/step)
step 61040 	 loss = 0.278, train_acc = 0.900 (3.370 sec/step)
step 61050 	 loss = 0.022, train_acc = 1.000 (3.346 sec/step)
step 61060 	 loss = 0.001, train_acc = 1.000 (3.332 sec/step)
step 61070 	 loss = 0.017, train_acc = 1.000 (3.282 sec/step)
step 61080 	 loss = 0.397, train_acc = 0.900 (3.377 sec/step)
step 61090 	 loss = 0.204, train_acc = 0.900 (3.314 sec/step)
step 61100 	 loss = 0.108, train_acc = 1.000 (3.302 sec/step)
step 61110 	 loss = 0.016, train_acc = 1.000 (3.368 sec/step)
step 61120 	 loss = 0.948, train_acc = 0.900 (3.309 sec/step)
step 61130 	 loss = 0.337, train_acc = 0.900 (3.339 sec/step)
step 61140 	 loss = 0.120, train_acc = 0.900 (3.365 sec/step)
step 61150 	 loss = 0.255, train_acc = 0.900 (3.345 sec/step)
step 61160 	 loss = 0.006, train_acc = 1.000 (3.317 sec/step)
step 61170 	 loss = 0.011, train_acc = 1.000 (3.309 sec/step)
step 61180 	 loss = 0.000, train_acc = 1.000 (3.311 sec/step)
step 61190 	 loss = 0.000, train_acc = 1.000 (3.440 sec/step)
step 61200 	 loss = 0.049, train_acc = 1.000 (3.320 sec/step)
step 61210 	 loss = 0.629, train_acc = 0.900 (3.323 sec/step)
step 61220 	 loss = 0.188, train_acc = 0.900 (3.329 sec/step)
step 61230 	 loss = 0.007, train_acc = 1.000 (3.370 sec/step)
step 61240 	 loss = 0.568, train_acc = 0.900 (3.409 sec/step)
step 61250 	 loss = 0.300, train_acc = 0.900 (3.337 sec/step)
step 61260 	 loss = 0.406, train_acc = 0.900 (3.373 sec/step)
step 61270 	 loss = 0.195, train_acc = 0.900 (3.318 sec/step)
step 61280 	 loss = 0.017, train_acc = 1.000 (3.321 sec/step)
step 61290 	 loss = 0.071, train_acc = 1.000 (3.371 sec/step)
step 61300 	 loss = 0.507, train_acc = 0.900 (3.296 sec/step)
step 61310 	 loss = 0.001, train_acc = 1.000 (3.334 sec/step)
step 61320 	 loss = 0.035, train_acc = 1.000 (3.385 sec/step)
step 61330 	 loss = 0.375, train_acc = 0.900 (3.297 sec/step)
step 61340 	 loss = 0.421, train_acc = 0.900 (3.326 sec/step)
step 61350 	 loss = 0.011, train_acc = 1.000 (3.342 sec/step)
step 61360 	 loss = 0.041, train_acc = 1.000 (3.324 sec/step)
step 61370 	 loss = 0.036, train_acc = 1.000 (3.322 sec/step)
step 61380 	 loss = 0.072, train_acc = 1.000 (3.323 sec/step)
step 61390 	 loss = 0.001, train_acc = 1.000 (3.329 sec/step)
step 61400 	 loss = 0.026, train_acc = 1.000 (3.333 sec/step)
step 61410 	 loss = 0.459, train_acc = 0.900 (3.307 sec/step)
step 61420 	 loss = 0.125, train_acc = 1.000 (3.359 sec/step)
step 61430 	 loss = 3.224, train_acc = 0.400 (3.398 sec/step)
step 61440 	 loss = 0.107, train_acc = 1.000 (3.299 sec/step)
step 61450 	 loss = 0.395, train_acc = 0.900 (3.302 sec/step)
step 61460 	 loss = 0.060, train_acc = 1.000 (3.372 sec/step)
step 61470 	 loss = 0.330, train_acc = 0.900 (3.311 sec/step)
step 61480 	 loss = 0.315, train_acc = 0.900 (3.351 sec/step)
step 61490 	 loss = 0.415, train_acc = 0.700 (3.367 sec/step)
step 61500 	 loss = 0.526, train_acc = 0.700 (3.289 sec/step)
step 61510 	 loss = 0.185, train_acc = 0.900 (3.336 sec/step)
step 61520 	 loss = 0.001, train_acc = 1.000 (3.359 sec/step)
step 61530 	 loss = 0.397, train_acc = 0.900 (3.350 sec/step)
step 61540 	 loss = 0.430, train_acc = 0.900 (3.293 sec/step)
step 61550 	 loss = 0.177, train_acc = 0.900 (3.380 sec/step)
step 61560 	 loss = 0.365, train_acc = 0.800 (3.373 sec/step)
step 61570 	 loss = 0.304, train_acc = 0.900 (3.362 sec/step)
step 61580 	 loss = 0.010, train_acc = 1.000 (3.413 sec/step)
step 61590 	 loss = 0.106, train_acc = 1.000 (3.360 sec/step)
step 61600 	 loss = 0.326, train_acc = 0.900 (3.368 sec/step)
step 61610 	 loss = 0.583, train_acc = 0.900 (3.354 sec/step)
step 61620 	 loss = 0.040, train_acc = 1.000 (3.341 sec/step)
step 61630 	 loss = 0.163, train_acc = 1.000 (3.326 sec/step)
step 61640 	 loss = 0.207, train_acc = 0.900 (3.314 sec/step)
step 61650 	 loss = 0.157, train_acc = 1.000 (3.313 sec/step)
step 61660 	 loss = 0.090, train_acc = 1.000 (3.294 sec/step)
step 61670 	 loss = 0.038, train_acc = 1.000 (3.396 sec/step)
step 61680 	 loss = 0.095, train_acc = 0.900 (3.395 sec/step)
step 61690 	 loss = 0.006, train_acc = 1.000 (3.378 sec/step)
step 61700 	 loss = 0.046, train_acc = 1.000 (3.325 sec/step)
step 61710 	 loss = 0.016, train_acc = 1.000 (3.356 sec/step)
step 61720 	 loss = 0.009, train_acc = 1.000 (3.350 sec/step)
step 61730 	 loss = 0.046, train_acc = 1.000 (3.367 sec/step)
step 61740 	 loss = 0.160, train_acc = 0.900 (3.361 sec/step)
step 61750 	 loss = 0.534, train_acc = 0.900 (3.307 sec/step)
step 61760 	 loss = 0.003, train_acc = 1.000 (3.331 sec/step)
step 61770 	 loss = 0.048, train_acc = 1.000 (3.308 sec/step)
step 61780 	 loss = 0.011, train_acc = 1.000 (3.402 sec/step)
step 61790 	 loss = 0.054, train_acc = 1.000 (3.345 sec/step)
step 61800 	 loss = 0.761, train_acc = 0.900 (3.303 sec/step)
step 61810 	 loss = 0.438, train_acc = 0.900 (3.395 sec/step)
step 61820 	 loss = 0.019, train_acc = 1.000 (3.337 sec/step)
step 61830 	 loss = 0.007, train_acc = 1.000 (3.297 sec/step)
step 61840 	 loss = 0.455, train_acc = 0.800 (3.318 sec/step)
step 61850 	 loss = 0.645, train_acc = 0.800 (3.383 sec/step)
step 61860 	 loss = 0.276, train_acc = 0.900 (3.361 sec/step)
step 61870 	 loss = 0.000, train_acc = 1.000 (3.387 sec/step)
step 61880 	 loss = 0.375, train_acc = 0.900 (3.327 sec/step)
step 61890 	 loss = 0.014, train_acc = 1.000 (3.374 sec/step)
step 61900 	 loss = 0.001, train_acc = 1.000 (3.326 sec/step)
step 61910 	 loss = 0.009, train_acc = 1.000 (3.321 sec/step)
step 61920 	 loss = 0.297, train_acc = 0.900 (3.400 sec/step)
step 61930 	 loss = 0.029, train_acc = 1.000 (3.295 sec/step)
step 61940 	 loss = 0.282, train_acc = 0.900 (3.339 sec/step)
step 61950 	 loss = 1.076, train_acc = 0.700 (3.329 sec/step)
step 61960 	 loss = 1.149, train_acc = 0.900 (3.328 sec/step)
step 61970 	 loss = 0.060, train_acc = 1.000 (3.379 sec/step)
step 61980 	 loss = 0.037, train_acc = 1.000 (3.352 sec/step)
step 61990 	 loss = 0.126, train_acc = 0.900 (3.341 sec/step)
step 62000 	 loss = 0.405, train_acc = 0.900 (3.313 sec/step)
step 62010 	 loss = 0.518, train_acc = 0.900 (3.351 sec/step)
step 62020 	 loss = 0.001, train_acc = 1.000 (3.282 sec/step)
step 62030 	 loss = 0.013, train_acc = 1.000 (3.349 sec/step)
step 62040 	 loss = 0.225, train_acc = 1.000 (3.401 sec/step)
step 62050 	 loss = 0.002, train_acc = 1.000 (3.300 sec/step)
step 62060 	 loss = 0.147, train_acc = 0.900 (3.351 sec/step)
step 62070 	 loss = 0.121, train_acc = 0.900 (3.299 sec/step)
step 62080 	 loss = 0.787, train_acc = 0.900 (3.348 sec/step)
step 62090 	 loss = 0.017, train_acc = 1.000 (3.371 sec/step)
step 62100 	 loss = 0.000, train_acc = 1.000 (3.348 sec/step)
step 62110 	 loss = 0.011, train_acc = 1.000 (3.364 sec/step)
step 62120 	 loss = 0.002, train_acc = 1.000 (3.370 sec/step)
step 62130 	 loss = 0.103, train_acc = 0.900 (3.368 sec/step)
step 62140 	 loss = 0.057, train_acc = 1.000 (3.363 sec/step)
step 62150 	 loss = 0.023, train_acc = 1.000 (3.325 sec/step)
step 62160 	 loss = 0.105, train_acc = 1.000 (3.339 sec/step)
step 62170 	 loss = 0.009, train_acc = 1.000 (3.371 sec/step)
step 62180 	 loss = 0.025, train_acc = 1.000 (3.329 sec/step)
step 62190 	 loss = 0.134, train_acc = 0.900 (3.331 sec/step)
step 62200 	 loss = 0.460, train_acc = 0.800 (3.305 sec/step)
step 62210 	 loss = 0.117, train_acc = 0.900 (3.327 sec/step)
step 62220 	 loss = 0.010, train_acc = 1.000 (3.368 sec/step)
step 62230 	 loss = 0.108, train_acc = 1.000 (3.337 sec/step)
step 62240 	 loss = 0.004, train_acc = 1.000 (3.302 sec/step)
step 62250 	 loss = 0.000, train_acc = 1.000 (3.433 sec/step)
step 62260 	 loss = 0.196, train_acc = 0.900 (3.303 sec/step)
step 62270 	 loss = 0.001, train_acc = 1.000 (3.400 sec/step)
step 62280 	 loss = 0.124, train_acc = 1.000 (3.387 sec/step)
step 62290 	 loss = 0.482, train_acc = 0.800 (3.392 sec/step)
step 62300 	 loss = 0.004, train_acc = 1.000 (3.296 sec/step)
step 62310 	 loss = 0.012, train_acc = 1.000 (3.292 sec/step)
step 62320 	 loss = 2.936, train_acc = 0.700 (3.322 sec/step)
step 62330 	 loss = 0.108, train_acc = 0.900 (3.318 sec/step)
step 62340 	 loss = 0.099, train_acc = 1.000 (3.330 sec/step)
step 62350 	 loss = 0.502, train_acc = 0.800 (3.374 sec/step)
step 62360 	 loss = 0.261, train_acc = 0.900 (3.394 sec/step)
step 62370 	 loss = 1.421, train_acc = 0.800 (3.298 sec/step)
step 62380 	 loss = 0.008, train_acc = 1.000 (3.335 sec/step)
step 62390 	 loss = 0.107, train_acc = 1.000 (3.481 sec/step)
step 62400 	 loss = 0.012, train_acc = 1.000 (3.401 sec/step)
step 62410 	 loss = 0.119, train_acc = 1.000 (3.361 sec/step)
step 62420 	 loss = 0.032, train_acc = 1.000 (3.367 sec/step)
step 62430 	 loss = 0.275, train_acc = 0.900 (3.348 sec/step)
step 62440 	 loss = 0.783, train_acc = 0.800 (3.344 sec/step)
step 62450 	 loss = 0.259, train_acc = 0.900 (3.429 sec/step)
step 62460 	 loss = 0.010, train_acc = 1.000 (3.329 sec/step)
step 62470 	 loss = 0.171, train_acc = 0.900 (3.353 sec/step)
step 62480 	 loss = 0.065, train_acc = 1.000 (3.352 sec/step)
step 62490 	 loss = 0.238, train_acc = 0.900 (3.335 sec/step)
step 62500 	 loss = 0.033, train_acc = 1.000 (3.451 sec/step)
step 62510 	 loss = 0.110, train_acc = 0.900 (3.322 sec/step)
step 62520 	 loss = 0.075, train_acc = 1.000 (3.374 sec/step)
step 62530 	 loss = 0.386, train_acc = 0.900 (3.329 sec/step)
step 62540 	 loss = 0.048, train_acc = 1.000 (3.344 sec/step)
step 62550 	 loss = 0.107, train_acc = 0.900 (3.386 sec/step)
step 62560 	 loss = 0.104, train_acc = 1.000 (3.336 sec/step)
step 62570 	 loss = 0.005, train_acc = 1.000 (3.307 sec/step)
step 62580 	 loss = 0.240, train_acc = 0.800 (3.304 sec/step)
step 62590 	 loss = 0.010, train_acc = 1.000 (3.319 sec/step)
step 62600 	 loss = 0.114, train_acc = 1.000 (3.375 sec/step)
step 62610 	 loss = 0.019, train_acc = 1.000 (3.332 sec/step)
step 62620 	 loss = 1.106, train_acc = 0.800 (3.363 sec/step)
step 62630 	 loss = 0.047, train_acc = 1.000 (3.383 sec/step)
step 62640 	 loss = 0.154, train_acc = 1.000 (3.370 sec/step)
step 62650 	 loss = 0.009, train_acc = 1.000 (3.316 sec/step)
step 62660 	 loss = 0.217, train_acc = 0.900 (3.337 sec/step)
step 62670 	 loss = 0.013, train_acc = 1.000 (3.318 sec/step)
step 62680 	 loss = 0.004, train_acc = 1.000 (3.333 sec/step)
step 62690 	 loss = 1.417, train_acc = 0.500 (3.293 sec/step)
VALIDATION 	 acc = 0.532 (3.630 sec)
step 62700 	 loss = 0.341, train_acc = 0.900 (3.294 sec/step)
step 62710 	 loss = 0.173, train_acc = 0.900 (3.383 sec/step)
step 62720 	 loss = 0.000, train_acc = 1.000 (3.322 sec/step)
step 62730 	 loss = 0.027, train_acc = 1.000 (3.329 sec/step)
step 62740 	 loss = 0.027, train_acc = 1.000 (3.327 sec/step)
step 62750 	 loss = 0.108, train_acc = 1.000 (3.326 sec/step)
step 62760 	 loss = 0.013, train_acc = 1.000 (3.481 sec/step)
step 62770 	 loss = 57.153, train_acc = 0.800 (3.299 sec/step)
step 62780 	 loss = 0.101, train_acc = 1.000 (3.380 sec/step)
step 62790 	 loss = 1.384, train_acc = 0.900 (3.343 sec/step)
step 62800 	 loss = 0.322, train_acc = 0.900 (3.345 sec/step)
step 62810 	 loss = 0.143, train_acc = 0.900 (3.339 sec/step)
step 62820 	 loss = 0.038, train_acc = 1.000 (3.321 sec/step)
step 62830 	 loss = 0.087, train_acc = 0.900 (3.352 sec/step)
step 62840 	 loss = 0.997, train_acc = 0.700 (3.330 sec/step)
step 62850 	 loss = 0.882, train_acc = 0.900 (3.301 sec/step)
step 62860 	 loss = 0.360, train_acc = 0.900 (3.385 sec/step)
step 62870 	 loss = 0.017, train_acc = 1.000 (3.327 sec/step)
step 62880 	 loss = 1.133, train_acc = 0.900 (3.363 sec/step)
step 62890 	 loss = 0.996, train_acc = 0.800 (3.344 sec/step)
step 62900 	 loss = 1.042, train_acc = 0.800 (3.332 sec/step)
step 62910 	 loss = 0.048, train_acc = 1.000 (3.315 sec/step)
step 62920 	 loss = 0.058, train_acc = 1.000 (3.353 sec/step)
step 62930 	 loss = 0.210, train_acc = 0.900 (3.339 sec/step)
step 62940 	 loss = 0.376, train_acc = 0.900 (3.350 sec/step)
step 62950 	 loss = 0.331, train_acc = 0.900 (3.301 sec/step)
step 62960 	 loss = 0.002, train_acc = 1.000 (3.429 sec/step)
step 62970 	 loss = 0.000, train_acc = 1.000 (3.363 sec/step)
step 62980 	 loss = 0.083, train_acc = 1.000 (3.354 sec/step)
step 62990 	 loss = 0.002, train_acc = 1.000 (3.387 sec/step)
step 63000 	 loss = 0.001, train_acc = 1.000 (3.358 sec/step)
step 63010 	 loss = 1.813, train_acc = 0.800 (3.343 sec/step)
step 63020 	 loss = 0.385, train_acc = 0.900 (3.304 sec/step)
step 63030 	 loss = 0.019, train_acc = 1.000 (3.369 sec/step)
step 63040 	 loss = 0.083, train_acc = 1.000 (3.309 sec/step)
step 63050 	 loss = 0.028, train_acc = 1.000 (3.326 sec/step)
step 63060 	 loss = 0.080, train_acc = 1.000 (3.305 sec/step)
step 63070 	 loss = 0.341, train_acc = 0.900 (3.298 sec/step)
step 63080 	 loss = 0.002, train_acc = 1.000 (3.396 sec/step)
step 63090 	 loss = 0.282, train_acc = 0.900 (3.308 sec/step)
step 63100 	 loss = 1.305, train_acc = 0.800 (3.322 sec/step)
step 63110 	 loss = 0.446, train_acc = 0.900 (3.369 sec/step)
step 63120 	 loss = 0.002, train_acc = 1.000 (3.319 sec/step)
step 63130 	 loss = 0.158, train_acc = 1.000 (3.332 sec/step)
step 63140 	 loss = 0.497, train_acc = 0.900 (3.328 sec/step)
step 63150 	 loss = 0.004, train_acc = 1.000 (3.358 sec/step)
step 63160 	 loss = 0.003, train_acc = 1.000 (3.381 sec/step)
step 63170 	 loss = 0.006, train_acc = 1.000 (3.388 sec/step)
step 63180 	 loss = 0.171, train_acc = 0.900 (3.341 sec/step)
step 63190 	 loss = 0.062, train_acc = 1.000 (3.331 sec/step)
step 63200 	 loss = 0.568, train_acc = 0.800 (3.379 sec/step)
step 63210 	 loss = 0.001, train_acc = 1.000 (3.351 sec/step)
step 63220 	 loss = 0.047, train_acc = 1.000 (3.354 sec/step)
step 63230 	 loss = 0.356, train_acc = 0.900 (3.334 sec/step)
step 63240 	 loss = 0.013, train_acc = 1.000 (3.377 sec/step)
step 63250 	 loss = 0.009, train_acc = 1.000 (3.296 sec/step)
step 63260 	 loss = 0.001, train_acc = 1.000 (3.321 sec/step)
step 63270 	 loss = 0.083, train_acc = 0.900 (3.344 sec/step)
step 63280 	 loss = 0.022, train_acc = 1.000 (3.305 sec/step)
step 63290 	 loss = 0.050, train_acc = 1.000 (3.301 sec/step)
step 63300 	 loss = 0.289, train_acc = 0.900 (3.333 sec/step)
step 63310 	 loss = 0.042, train_acc = 1.000 (3.361 sec/step)
step 63320 	 loss = 0.004, train_acc = 1.000 (3.339 sec/step)
step 63330 	 loss = 0.293, train_acc = 0.900 (3.326 sec/step)
step 63340 	 loss = 0.040, train_acc = 1.000 (3.352 sec/step)
step 63350 	 loss = 1.088, train_acc = 0.900 (3.352 sec/step)
step 63360 	 loss = 1.159, train_acc = 0.800 (3.368 sec/step)
step 63370 	 loss = 1.595, train_acc = 0.800 (3.305 sec/step)
step 63380 	 loss = 0.000, train_acc = 1.000 (3.408 sec/step)
step 63390 	 loss = 0.481, train_acc = 0.800 (3.427 sec/step)
step 63400 	 loss = 0.873, train_acc = 0.900 (3.309 sec/step)
step 63410 	 loss = 0.389, train_acc = 0.800 (3.318 sec/step)
step 63420 	 loss = 0.003, train_acc = 1.000 (3.392 sec/step)
step 63430 	 loss = 0.373, train_acc = 0.900 (3.344 sec/step)
step 63440 	 loss = 0.005, train_acc = 1.000 (3.361 sec/step)
step 63450 	 loss = 0.077, train_acc = 1.000 (3.317 sec/step)
step 63460 	 loss = 0.041, train_acc = 1.000 (3.337 sec/step)
step 63470 	 loss = 0.434, train_acc = 0.900 (3.341 sec/step)
step 63480 	 loss = 0.794, train_acc = 0.600 (3.368 sec/step)
step 63490 	 loss = 0.162, train_acc = 0.900 (3.337 sec/step)
step 63500 	 loss = 0.737, train_acc = 0.900 (3.302 sec/step)
step 63510 	 loss = 0.177, train_acc = 1.000 (3.366 sec/step)
step 63520 	 loss = 0.114, train_acc = 0.900 (3.312 sec/step)
step 63530 	 loss = 0.006, train_acc = 1.000 (3.339 sec/step)
step 63540 	 loss = 0.018, train_acc = 1.000 (3.316 sec/step)
step 63550 	 loss = 0.067, train_acc = 1.000 (3.336 sec/step)
step 63560 	 loss = 0.043, train_acc = 1.000 (3.408 sec/step)
step 63570 	 loss = 0.113, train_acc = 0.900 (3.393 sec/step)
step 63580 	 loss = 0.053, train_acc = 1.000 (3.314 sec/step)
step 63590 	 loss = 0.190, train_acc = 0.900 (3.395 sec/step)
step 63600 	 loss = 0.074, train_acc = 1.000 (3.470 sec/step)
step 63610 	 loss = 0.408, train_acc = 0.800 (3.346 sec/step)
step 63620 	 loss = 0.106, train_acc = 1.000 (3.391 sec/step)
step 63630 	 loss = 0.061, train_acc = 1.000 (3.362 sec/step)
step 63640 	 loss = 0.651, train_acc = 0.900 (3.356 sec/step)
step 63650 	 loss = 0.605, train_acc = 0.900 (3.346 sec/step)
step 63660 	 loss = 0.002, train_acc = 1.000 (3.344 sec/step)
step 63670 	 loss = 0.224, train_acc = 0.900 (3.368 sec/step)
step 63680 	 loss = 0.009, train_acc = 1.000 (3.406 sec/step)
step 63690 	 loss = 0.176, train_acc = 0.900 (3.374 sec/step)
step 63700 	 loss = 0.093, train_acc = 1.000 (3.397 sec/step)
step 63710 	 loss = 0.518, train_acc = 0.800 (3.336 sec/step)
step 63720 	 loss = 0.058, train_acc = 1.000 (3.325 sec/step)
step 63730 	 loss = 0.067, train_acc = 1.000 (3.315 sec/step)
step 63740 	 loss = 0.028, train_acc = 1.000 (3.312 sec/step)
step 63750 	 loss = 0.001, train_acc = 1.000 (3.372 sec/step)
step 63760 	 loss = 0.002, train_acc = 1.000 (3.305 sec/step)
step 63770 	 loss = 1.677, train_acc = 0.900 (3.306 sec/step)
step 63780 	 loss = 0.316, train_acc = 0.900 (3.325 sec/step)
step 63790 	 loss = 0.123, train_acc = 0.900 (3.331 sec/step)
step 63800 	 loss = 0.108, train_acc = 1.000 (3.336 sec/step)
step 63810 	 loss = 0.024, train_acc = 1.000 (3.347 sec/step)
step 63820 	 loss = 0.111, train_acc = 1.000 (3.307 sec/step)
step 63830 	 loss = 0.055, train_acc = 1.000 (3.386 sec/step)
step 63840 	 loss = 0.941, train_acc = 0.900 (3.341 sec/step)
step 63850 	 loss = 0.017, train_acc = 1.000 (3.327 sec/step)
step 63860 	 loss = 0.046, train_acc = 1.000 (3.383 sec/step)
step 63870 	 loss = 0.010, train_acc = 1.000 (3.302 sec/step)
step 63880 	 loss = 0.006, train_acc = 1.000 (3.297 sec/step)
step 63890 	 loss = 0.023, train_acc = 1.000 (3.375 sec/step)
step 63900 	 loss = 0.788, train_acc = 0.800 (3.329 sec/step)
step 63910 	 loss = 0.594, train_acc = 0.900 (3.325 sec/step)
step 63920 	 loss = 0.336, train_acc = 0.900 (3.327 sec/step)
step 63930 	 loss = 0.113, train_acc = 0.900 (3.337 sec/step)
step 63940 	 loss = 0.008, train_acc = 1.000 (3.364 sec/step)
step 63950 	 loss = 0.134, train_acc = 1.000 (3.525 sec/step)
step 63960 	 loss = 0.103, train_acc = 1.000 (3.320 sec/step)
step 63970 	 loss = 0.561, train_acc = 0.800 (3.296 sec/step)
step 63980 	 loss = 0.208, train_acc = 0.800 (3.333 sec/step)
step 63990 	 loss = 0.000, train_acc = 1.000 (3.366 sec/step)
step 64000 	 loss = 0.152, train_acc = 0.900 (3.348 sec/step)
step 64010 	 loss = 0.436, train_acc = 0.800 (3.362 sec/step)
step 64020 	 loss = 0.539, train_acc = 0.800 (3.331 sec/step)
step 64030 	 loss = 0.085, train_acc = 1.000 (3.486 sec/step)
step 64040 	 loss = 0.006, train_acc = 1.000 (3.377 sec/step)
step 64050 	 loss = 0.141, train_acc = 1.000 (3.371 sec/step)
step 64060 	 loss = 0.013, train_acc = 1.000 (3.365 sec/step)
step 64070 	 loss = 0.174, train_acc = 0.900 (3.375 sec/step)
step 64080 	 loss = 0.024, train_acc = 1.000 (3.378 sec/step)
step 64090 	 loss = 0.882, train_acc = 0.700 (3.292 sec/step)
step 64100 	 loss = 0.037, train_acc = 1.000 (3.374 sec/step)
step 64110 	 loss = 0.007, train_acc = 1.000 (3.303 sec/step)
step 64120 	 loss = 0.046, train_acc = 1.000 (3.314 sec/step)
step 64130 	 loss = 0.636, train_acc = 0.900 (3.345 sec/step)
step 64140 	 loss = 0.003, train_acc = 1.000 (3.331 sec/step)
step 64150 	 loss = 0.007, train_acc = 1.000 (3.339 sec/step)
step 64160 	 loss = 0.012, train_acc = 1.000 (3.346 sec/step)
step 64170 	 loss = 0.395, train_acc = 0.800 (3.357 sec/step)
step 64180 	 loss = 0.231, train_acc = 1.000 (3.305 sec/step)
step 64190 	 loss = 0.008, train_acc = 1.000 (3.336 sec/step)
step 64200 	 loss = 0.305, train_acc = 0.900 (3.315 sec/step)
step 64210 	 loss = 0.303, train_acc = 0.900 (3.311 sec/step)
step 64220 	 loss = 0.324, train_acc = 0.900 (3.367 sec/step)
step 64230 	 loss = 0.072, train_acc = 1.000 (3.381 sec/step)
step 64240 	 loss = 0.160, train_acc = 0.900 (3.376 sec/step)
step 64250 	 loss = 0.002, train_acc = 1.000 (3.381 sec/step)
step 64260 	 loss = 0.008, train_acc = 1.000 (3.372 sec/step)
step 64270 	 loss = 0.128, train_acc = 0.900 (3.358 sec/step)
step 64280 	 loss = 0.010, train_acc = 1.000 (3.352 sec/step)
step 64290 	 loss = 0.370, train_acc = 0.900 (3.320 sec/step)
step 64300 	 loss = 0.012, train_acc = 1.000 (3.407 sec/step)
step 64310 	 loss = 0.412, train_acc = 0.800 (3.320 sec/step)
step 64320 	 loss = 0.010, train_acc = 1.000 (3.319 sec/step)
step 64330 	 loss = 0.461, train_acc = 0.800 (3.331 sec/step)
step 64340 	 loss = 0.429, train_acc = 0.800 (3.324 sec/step)
step 64350 	 loss = 0.015, train_acc = 1.000 (3.363 sec/step)
step 64360 	 loss = 0.242, train_acc = 0.800 (3.315 sec/step)
step 64370 	 loss = 0.279, train_acc = 0.900 (3.354 sec/step)
step 64380 	 loss = 0.228, train_acc = 0.900 (3.384 sec/step)
step 64390 	 loss = 0.005, train_acc = 1.000 (3.409 sec/step)
step 64400 	 loss = 0.054, train_acc = 1.000 (3.370 sec/step)
step 64410 	 loss = 0.090, train_acc = 0.900 (3.357 sec/step)
step 64420 	 loss = 0.029, train_acc = 1.000 (3.367 sec/step)
step 64430 	 loss = 0.144, train_acc = 0.900 (3.324 sec/step)
step 64440 	 loss = 0.246, train_acc = 0.800 (3.387 sec/step)
step 64450 	 loss = 0.033, train_acc = 1.000 (3.295 sec/step)
step 64460 	 loss = 0.024, train_acc = 1.000 (3.341 sec/step)
step 64470 	 loss = 0.001, train_acc = 1.000 (3.385 sec/step)
step 64480 	 loss = 0.013, train_acc = 1.000 (3.299 sec/step)
step 64490 	 loss = 0.000, train_acc = 1.000 (3.385 sec/step)
step 64500 	 loss = 0.001, train_acc = 1.000 (3.346 sec/step)
step 64510 	 loss = 0.026, train_acc = 1.000 (3.332 sec/step)
step 64520 	 loss = 0.910, train_acc = 0.800 (3.309 sec/step)
step 64530 	 loss = 1.037, train_acc = 0.700 (3.321 sec/step)
step 64540 	 loss = 0.187, train_acc = 1.000 (3.360 sec/step)
step 64550 	 loss = 0.020, train_acc = 1.000 (3.387 sec/step)
step 64560 	 loss = 0.045, train_acc = 1.000 (3.391 sec/step)
step 64570 	 loss = 0.248, train_acc = 0.900 (3.305 sec/step)
step 64580 	 loss = 0.357, train_acc = 0.900 (3.398 sec/step)
step 64590 	 loss = 0.008, train_acc = 1.000 (3.384 sec/step)
VALIDATION 	 acc = 0.565 (3.652 sec)
New Best Accuracy 0.565 > Old Best 0.563.  Saving...
The checkpoint has been created.
step 64600 	 loss = 1.461, train_acc = 0.800 (3.366 sec/step)
step 64610 	 loss = 0.018, train_acc = 1.000 (3.394 sec/step)
step 64620 	 loss = 0.458, train_acc = 0.800 (3.351 sec/step)
step 64630 	 loss = 0.754, train_acc = 0.900 (3.353 sec/step)
step 64640 	 loss = 0.007, train_acc = 1.000 (3.329 sec/step)
step 64650 	 loss = 0.247, train_acc = 0.900 (3.343 sec/step)
step 64660 	 loss = 0.001, train_acc = 1.000 (3.308 sec/step)
step 64670 	 loss = 0.060, train_acc = 1.000 (3.351 sec/step)
step 64680 	 loss = 0.201, train_acc = 0.900 (3.376 sec/step)
step 64690 	 loss = 0.532, train_acc = 0.900 (3.286 sec/step)
step 64700 	 loss = 0.003, train_acc = 1.000 (3.373 sec/step)
step 64710 	 loss = 0.000, train_acc = 1.000 (3.360 sec/step)
step 64720 	 loss = 2.202, train_acc = 0.900 (3.358 sec/step)
step 64730 	 loss = 0.201, train_acc = 0.900 (3.308 sec/step)
step 64740 	 loss = 0.030, train_acc = 1.000 (3.293 sec/step)
step 64750 	 loss = 0.633, train_acc = 0.900 (3.370 sec/step)
step 64760 	 loss = 0.627, train_acc = 0.900 (3.348 sec/step)
step 64770 	 loss = 0.113, train_acc = 1.000 (3.380 sec/step)
step 64780 	 loss = 0.077, train_acc = 1.000 (3.343 sec/step)
step 64790 	 loss = 0.037, train_acc = 1.000 (3.307 sec/step)
step 64800 	 loss = 0.141, train_acc = 1.000 (3.328 sec/step)
step 64810 	 loss = 0.030, train_acc = 1.000 (3.292 sec/step)
step 64820 	 loss = 0.031, train_acc = 1.000 (3.328 sec/step)
step 64830 	 loss = 0.010, train_acc = 1.000 (3.301 sec/step)
step 64840 	 loss = 0.422, train_acc = 0.900 (3.350 sec/step)
step 64850 	 loss = 0.712, train_acc = 0.800 (3.366 sec/step)
step 64860 	 loss = 0.008, train_acc = 1.000 (3.368 sec/step)
step 64870 	 loss = 0.007, train_acc = 1.000 (3.321 sec/step)
step 64880 	 loss = 0.000, train_acc = 1.000 (3.351 sec/step)
step 64890 	 loss = 0.400, train_acc = 0.900 (3.326 sec/step)
step 64900 	 loss = 0.100, train_acc = 0.900 (3.305 sec/step)
step 64910 	 loss = 0.214, train_acc = 0.900 (3.353 sec/step)
step 64920 	 loss = 0.101, train_acc = 0.900 (3.356 sec/step)
step 64930 	 loss = 0.019, train_acc = 1.000 (3.376 sec/step)
step 64940 	 loss = 0.077, train_acc = 1.000 (3.325 sec/step)
step 64950 	 loss = 0.354, train_acc = 0.900 (3.354 sec/step)
step 64960 	 loss = 0.534, train_acc = 0.800 (3.418 sec/step)
step 64970 	 loss = 0.348, train_acc = 0.900 (3.354 sec/step)
step 64980 	 loss = 0.640, train_acc = 0.900 (3.342 sec/step)
step 64990 	 loss = 0.570, train_acc = 0.900 (3.333 sec/step)
step 65000 	 loss = 0.061, train_acc = 1.000 (3.302 sec/step)
step 65010 	 loss = 0.590, train_acc = 0.900 (3.316 sec/step)
step 65020 	 loss = 0.254, train_acc = 0.900 (3.359 sec/step)
step 65030 	 loss = 0.083, train_acc = 1.000 (3.377 sec/step)
step 65040 	 loss = 0.330, train_acc = 0.900 (3.339 sec/step)
step 65050 	 loss = 0.107, train_acc = 0.900 (3.299 sec/step)
step 65060 	 loss = 0.007, train_acc = 1.000 (3.392 sec/step)
step 65070 	 loss = 0.003, train_acc = 1.000 (3.313 sec/step)
step 65080 	 loss = 0.003, train_acc = 1.000 (3.304 sec/step)
step 65090 	 loss = 0.396, train_acc = 0.800 (3.325 sec/step)
step 65100 	 loss = 0.014, train_acc = 1.000 (3.324 sec/step)
step 65110 	 loss = 0.134, train_acc = 0.900 (3.318 sec/step)
step 65120 	 loss = 0.113, train_acc = 0.900 (3.315 sec/step)
step 65130 	 loss = 0.323, train_acc = 0.900 (3.334 sec/step)
step 65140 	 loss = 0.021, train_acc = 1.000 (3.356 sec/step)
step 65150 	 loss = 0.000, train_acc = 1.000 (3.311 sec/step)
step 65160 	 loss = 0.000, train_acc = 1.000 (3.321 sec/step)
step 65170 	 loss = 0.014, train_acc = 1.000 (3.316 sec/step)
step 65180 	 loss = 0.374, train_acc = 0.900 (3.367 sec/step)
step 65190 	 loss = 0.003, train_acc = 1.000 (3.339 sec/step)
step 65200 	 loss = 0.057, train_acc = 1.000 (3.400 sec/step)
step 65210 	 loss = 0.145, train_acc = 0.900 (3.295 sec/step)
step 65220 	 loss = 0.188, train_acc = 1.000 (3.325 sec/step)
step 65230 	 loss = 0.245, train_acc = 0.900 (3.314 sec/step)
step 65240 	 loss = 0.054, train_acc = 1.000 (3.315 sec/step)
step 65250 	 loss = 0.003, train_acc = 1.000 (3.369 sec/step)
step 65260 	 loss = 1.461, train_acc = 0.700 (3.331 sec/step)
step 65270 	 loss = 0.029, train_acc = 1.000 (3.367 sec/step)
step 65280 	 loss = 0.885, train_acc = 0.900 (3.384 sec/step)
step 65290 	 loss = 0.038, train_acc = 1.000 (3.333 sec/step)
step 65300 	 loss = 0.003, train_acc = 1.000 (3.327 sec/step)
step 65310 	 loss = 0.097, train_acc = 0.900 (3.418 sec/step)
step 65320 	 loss = 1.101, train_acc = 0.700 (3.329 sec/step)
step 65330 	 loss = 0.004, train_acc = 1.000 (3.360 sec/step)
step 65340 	 loss = 0.016, train_acc = 1.000 (3.374 sec/step)
step 65350 	 loss = 0.003, train_acc = 1.000 (3.374 sec/step)
step 65360 	 loss = 0.231, train_acc = 0.900 (3.376 sec/step)
step 65370 	 loss = 0.338, train_acc = 0.800 (3.338 sec/step)
step 65380 	 loss = 0.006, train_acc = 1.000 (3.357 sec/step)
step 65390 	 loss = 0.024, train_acc = 1.000 (3.363 sec/step)
step 65400 	 loss = 0.235, train_acc = 0.900 (3.327 sec/step)
step 65410 	 loss = 0.000, train_acc = 1.000 (3.363 sec/step)
step 65420 	 loss = 0.896, train_acc = 0.900 (3.326 sec/step)
step 65430 	 loss = 0.140, train_acc = 0.900 (3.334 sec/step)
step 65440 	 loss = 0.002, train_acc = 1.000 (3.368 sec/step)
step 65450 	 loss = 0.325, train_acc = 0.900 (3.316 sec/step)
step 65460 	 loss = 0.003, train_acc = 1.000 (3.350 sec/step)
step 65470 	 loss = 0.133, train_acc = 0.900 (3.350 sec/step)
step 65480 	 loss = 0.003, train_acc = 1.000 (3.377 sec/step)
step 65490 	 loss = 0.517, train_acc = 0.800 (3.310 sec/step)
step 65500 	 loss = 0.002, train_acc = 1.000 (3.331 sec/step)
step 65510 	 loss = 0.024, train_acc = 1.000 (3.316 sec/step)
step 65520 	 loss = 0.119, train_acc = 0.900 (3.303 sec/step)
step 65530 	 loss = 0.322, train_acc = 0.900 (3.321 sec/step)
step 65540 	 loss = 0.288, train_acc = 0.900 (3.285 sec/step)
step 65550 	 loss = 0.418, train_acc = 0.900 (3.314 sec/step)
step 65560 	 loss = 0.000, train_acc = 1.000 (3.287 sec/step)
step 65570 	 loss = 0.004, train_acc = 1.000 (3.354 sec/step)
step 65580 	 loss = 0.064, train_acc = 1.000 (3.323 sec/step)
step 65590 	 loss = 0.036, train_acc = 1.000 (3.315 sec/step)
step 65600 	 loss = 0.734, train_acc = 0.900 (3.325 sec/step)
step 65610 	 loss = 0.094, train_acc = 1.000 (3.335 sec/step)
step 65620 	 loss = 0.007, train_acc = 1.000 (3.381 sec/step)
step 65630 	 loss = 0.193, train_acc = 0.900 (3.316 sec/step)
step 65640 	 loss = 0.013, train_acc = 1.000 (3.362 sec/step)
step 65650 	 loss = 0.599, train_acc = 0.800 (3.319 sec/step)
step 65660 	 loss = 0.397, train_acc = 0.900 (3.386 sec/step)
step 65670 	 loss = 0.057, train_acc = 1.000 (3.366 sec/step)
step 65680 	 loss = 0.008, train_acc = 1.000 (3.309 sec/step)
step 65690 	 loss = 3.145, train_acc = 0.900 (3.349 sec/step)
step 65700 	 loss = 0.059, train_acc = 1.000 (3.310 sec/step)
step 65710 	 loss = 0.024, train_acc = 1.000 (3.396 sec/step)
step 65720 	 loss = 0.011, train_acc = 1.000 (3.366 sec/step)
step 65730 	 loss = 0.072, train_acc = 1.000 (3.304 sec/step)
step 65740 	 loss = 0.885, train_acc = 0.800 (3.329 sec/step)
step 65750 	 loss = 0.323, train_acc = 0.900 (3.302 sec/step)
step 65760 	 loss = 0.453, train_acc = 0.900 (3.317 sec/step)
step 65770 	 loss = 0.018, train_acc = 1.000 (3.313 sec/step)
step 65780 	 loss = 0.032, train_acc = 1.000 (3.350 sec/step)
step 65790 	 loss = 0.786, train_acc = 0.900 (3.348 sec/step)
step 65800 	 loss = 0.032, train_acc = 1.000 (3.350 sec/step)
step 65810 	 loss = 0.022, train_acc = 1.000 (3.395 sec/step)
step 65820 	 loss = 0.019, train_acc = 1.000 (3.348 sec/step)
step 65830 	 loss = 0.001, train_acc = 1.000 (3.367 sec/step)
step 65840 	 loss = 0.238, train_acc = 0.900 (3.303 sec/step)
step 65850 	 loss = 0.159, train_acc = 0.900 (3.471 sec/step)
step 65860 	 loss = 0.129, train_acc = 1.000 (3.299 sec/step)
step 65870 	 loss = 0.357, train_acc = 0.800 (3.338 sec/step)
step 65880 	 loss = 0.002, train_acc = 1.000 (3.288 sec/step)
step 65890 	 loss = 0.039, train_acc = 1.000 (3.369 sec/step)
step 65900 	 loss = 0.000, train_acc = 1.000 (3.316 sec/step)
step 65910 	 loss = 0.032, train_acc = 1.000 (3.354 sec/step)
step 65920 	 loss = 0.017, train_acc = 1.000 (3.316 sec/step)
step 65930 	 loss = 0.304, train_acc = 0.900 (3.300 sec/step)
step 65940 	 loss = 0.225, train_acc = 0.900 (3.307 sec/step)
step 65950 	 loss = 0.011, train_acc = 1.000 (3.325 sec/step)
step 65960 	 loss = 0.410, train_acc = 0.800 (3.380 sec/step)
step 65970 	 loss = 0.001, train_acc = 1.000 (3.319 sec/step)
step 65980 	 loss = 0.497, train_acc = 0.800 (3.320 sec/step)
step 65990 	 loss = 0.770, train_acc = 0.800 (3.321 sec/step)
step 66000 	 loss = 0.002, train_acc = 1.000 (3.352 sec/step)
step 66010 	 loss = 1.430, train_acc = 0.900 (3.299 sec/step)
step 66020 	 loss = 0.492, train_acc = 0.900 (3.360 sec/step)
step 66030 	 loss = 0.058, train_acc = 1.000 (3.346 sec/step)
step 66040 	 loss = 0.062, train_acc = 1.000 (3.304 sec/step)
step 66050 	 loss = 0.145, train_acc = 0.900 (3.353 sec/step)
step 66060 	 loss = 0.007, train_acc = 1.000 (3.305 sec/step)
step 66070 	 loss = 0.970, train_acc = 0.900 (3.364 sec/step)
step 66080 	 loss = 0.092, train_acc = 1.000 (3.314 sec/step)
step 66090 	 loss = 0.003, train_acc = 1.000 (3.338 sec/step)
step 66100 	 loss = 0.000, train_acc = 1.000 (3.345 sec/step)
step 66110 	 loss = 0.024, train_acc = 1.000 (3.351 sec/step)
step 66120 	 loss = 0.089, train_acc = 1.000 (3.344 sec/step)
step 66130 	 loss = 0.410, train_acc = 0.900 (3.363 sec/step)
step 66140 	 loss = 0.055, train_acc = 1.000 (3.309 sec/step)
step 66150 	 loss = 2.566, train_acc = 0.800 (3.354 sec/step)
step 66160 	 loss = 0.003, train_acc = 1.000 (3.328 sec/step)
step 66170 	 loss = 0.016, train_acc = 1.000 (3.376 sec/step)
step 66180 	 loss = 0.210, train_acc = 0.900 (3.300 sec/step)
step 66190 	 loss = 0.021, train_acc = 1.000 (3.385 sec/step)
step 66200 	 loss = 1.411, train_acc = 0.800 (3.391 sec/step)
step 66210 	 loss = 0.083, train_acc = 1.000 (3.369 sec/step)
step 66220 	 loss = 0.001, train_acc = 1.000 (3.443 sec/step)
step 66230 	 loss = 0.021, train_acc = 1.000 (3.316 sec/step)
step 66240 	 loss = 0.109, train_acc = 0.900 (3.326 sec/step)
step 66250 	 loss = 0.071, train_acc = 1.000 (3.309 sec/step)
step 66260 	 loss = 0.107, train_acc = 1.000 (3.320 sec/step)
step 66270 	 loss = 2.036, train_acc = 0.700 (3.337 sec/step)
step 66280 	 loss = 0.055, train_acc = 1.000 (3.355 sec/step)
step 66290 	 loss = 0.032, train_acc = 1.000 (3.373 sec/step)
step 66300 	 loss = 0.000, train_acc = 1.000 (3.336 sec/step)
step 66310 	 loss = 0.000, train_acc = 1.000 (3.320 sec/step)
step 66320 	 loss = 0.808, train_acc = 0.800 (3.340 sec/step)
step 66330 	 loss = 0.002, train_acc = 1.000 (3.387 sec/step)
step 66340 	 loss = 0.104, train_acc = 1.000 (3.370 sec/step)
step 66350 	 loss = 0.288, train_acc = 0.900 (3.321 sec/step)
step 66360 	 loss = 0.326, train_acc = 0.900 (3.315 sec/step)
step 66370 	 loss = 0.375, train_acc = 0.900 (3.309 sec/step)
step 66380 	 loss = 0.108, train_acc = 1.000 (3.346 sec/step)
step 66390 	 loss = 0.126, train_acc = 1.000 (3.347 sec/step)
step 66400 	 loss = 0.024, train_acc = 1.000 (3.323 sec/step)
step 66410 	 loss = 0.024, train_acc = 1.000 (3.339 sec/step)
step 66420 	 loss = 0.001, train_acc = 1.000 (3.382 sec/step)
step 66430 	 loss = 0.027, train_acc = 1.000 (3.349 sec/step)
step 66440 	 loss = 0.032, train_acc = 1.000 (3.411 sec/step)
step 66450 	 loss = 1.278, train_acc = 0.900 (3.335 sec/step)
step 66460 	 loss = 0.264, train_acc = 0.900 (3.368 sec/step)
step 66470 	 loss = 0.008, train_acc = 1.000 (3.322 sec/step)
step 66480 	 loss = 0.453, train_acc = 0.900 (3.339 sec/step)
step 66490 	 loss = 0.259, train_acc = 0.900 (3.305 sec/step)
VALIDATION 	 acc = 0.549 (3.668 sec)
step 66500 	 loss = 0.153, train_acc = 0.900 (3.321 sec/step)
step 66510 	 loss = 0.005, train_acc = 1.000 (3.325 sec/step)
step 66520 	 loss = 0.061, train_acc = 1.000 (3.333 sec/step)
step 66530 	 loss = 0.149, train_acc = 0.900 (3.313 sec/step)
step 66540 	 loss = 0.026, train_acc = 1.000 (3.392 sec/step)
step 66550 	 loss = 0.180, train_acc = 0.900 (3.356 sec/step)
step 66560 	 loss = 0.547, train_acc = 0.800 (3.412 sec/step)
step 66570 	 loss = 0.042, train_acc = 1.000 (3.382 sec/step)
step 66580 	 loss = 0.001, train_acc = 1.000 (3.328 sec/step)
step 66590 	 loss = 0.103, train_acc = 1.000 (3.339 sec/step)
step 66600 	 loss = 0.103, train_acc = 0.900 (3.335 sec/step)
step 66610 	 loss = 0.570, train_acc = 0.900 (3.391 sec/step)
step 66620 	 loss = 0.334, train_acc = 0.900 (3.346 sec/step)
step 66630 	 loss = 0.014, train_acc = 1.000 (3.356 sec/step)
step 66640 	 loss = 0.188, train_acc = 0.900 (3.325 sec/step)
step 66650 	 loss = 0.100, train_acc = 1.000 (3.307 sec/step)
step 66660 	 loss = 0.102, train_acc = 1.000 (3.306 sec/step)
step 66670 	 loss = 0.076, train_acc = 1.000 (3.363 sec/step)
step 66680 	 loss = 0.011, train_acc = 1.000 (3.352 sec/step)
step 66690 	 loss = 0.004, train_acc = 1.000 (3.378 sec/step)
step 66700 	 loss = 0.050, train_acc = 1.000 (3.341 sec/step)
step 66710 	 loss = 0.387, train_acc = 0.900 (3.353 sec/step)
step 66720 	 loss = 0.018, train_acc = 1.000 (3.386 sec/step)
step 66730 	 loss = 0.281, train_acc = 0.900 (3.314 sec/step)
step 66740 	 loss = 0.462, train_acc = 0.800 (3.315 sec/step)
step 66750 	 loss = 0.002, train_acc = 1.000 (3.297 sec/step)
step 66760 	 loss = 0.006, train_acc = 1.000 (3.322 sec/step)
step 66770 	 loss = 0.097, train_acc = 0.900 (3.341 sec/step)
step 66780 	 loss = 0.000, train_acc = 1.000 (3.382 sec/step)
step 66790 	 loss = 0.000, train_acc = 1.000 (3.339 sec/step)
step 66800 	 loss = 0.202, train_acc = 0.900 (3.345 sec/step)
step 66810 	 loss = 0.395, train_acc = 0.900 (3.383 sec/step)
step 66820 	 loss = 0.070, train_acc = 1.000 (3.431 sec/step)
step 66830 	 loss = 0.030, train_acc = 1.000 (3.319 sec/step)
step 66840 	 loss = 0.048, train_acc = 1.000 (3.303 sec/step)
step 66850 	 loss = 0.572, train_acc = 0.900 (3.342 sec/step)
step 66860 	 loss = 0.575, train_acc = 0.900 (3.334 sec/step)
step 66870 	 loss = 0.028, train_acc = 1.000 (3.327 sec/step)
step 66880 	 loss = 0.040, train_acc = 1.000 (3.329 sec/step)
step 66890 	 loss = 0.001, train_acc = 1.000 (3.298 sec/step)
step 66900 	 loss = 0.017, train_acc = 1.000 (3.388 sec/step)
step 66910 	 loss = 0.003, train_acc = 1.000 (3.351 sec/step)
step 66920 	 loss = 1.072, train_acc = 0.900 (3.377 sec/step)
step 66930 	 loss = 0.037, train_acc = 1.000 (3.364 sec/step)
step 66940 	 loss = 0.005, train_acc = 1.000 (3.314 sec/step)
step 66950 	 loss = 0.282, train_acc = 0.800 (3.331 sec/step)
step 66960 	 loss = 0.362, train_acc = 0.900 (3.382 sec/step)
step 66970 	 loss = 0.000, train_acc = 1.000 (3.339 sec/step)
step 66980 	 loss = 0.002, train_acc = 1.000 (3.366 sec/step)
step 66990 	 loss = 1.038, train_acc = 0.900 (3.337 sec/step)
step 67000 	 loss = 0.161, train_acc = 0.900 (3.395 sec/step)
step 67010 	 loss = 0.020, train_acc = 1.000 (3.371 sec/step)
step 67020 	 loss = 0.000, train_acc = 1.000 (3.334 sec/step)
step 67030 	 loss = 0.233, train_acc = 0.900 (3.363 sec/step)
step 67040 	 loss = 1.429, train_acc = 0.900 (3.348 sec/step)
step 67050 	 loss = 0.024, train_acc = 1.000 (3.365 sec/step)
step 67060 	 loss = 0.000, train_acc = 1.000 (3.364 sec/step)
step 67070 	 loss = 0.621, train_acc = 0.900 (3.340 sec/step)
step 67080 	 loss = 0.365, train_acc = 0.900 (3.356 sec/step)
step 67090 	 loss = 0.055, train_acc = 1.000 (3.365 sec/step)
step 67100 	 loss = 0.030, train_acc = 1.000 (3.327 sec/step)
step 67110 	 loss = 0.010, train_acc = 1.000 (3.365 sec/step)
step 67120 	 loss = 0.018, train_acc = 1.000 (3.340 sec/step)
step 67130 	 loss = 0.019, train_acc = 1.000 (3.348 sec/step)
step 67140 	 loss = 0.045, train_acc = 1.000 (3.335 sec/step)
step 67150 	 loss = 0.010, train_acc = 1.000 (3.353 sec/step)
step 67160 	 loss = 0.110, train_acc = 0.900 (3.367 sec/step)
step 67170 	 loss = 0.010, train_acc = 1.000 (3.296 sec/step)
step 67180 	 loss = 0.024, train_acc = 1.000 (3.286 sec/step)
step 67190 	 loss = 0.011, train_acc = 1.000 (3.373 sec/step)
step 67200 	 loss = 0.147, train_acc = 0.900 (3.374 sec/step)
step 67210 	 loss = 0.048, train_acc = 1.000 (3.347 sec/step)
step 67220 	 loss = 0.012, train_acc = 1.000 (3.379 sec/step)
step 67230 	 loss = 0.008, train_acc = 1.000 (3.367 sec/step)
step 67240 	 loss = 0.737, train_acc = 0.900 (3.285 sec/step)
step 67250 	 loss = 0.019, train_acc = 1.000 (3.310 sec/step)
step 67260 	 loss = 0.767, train_acc = 0.900 (3.438 sec/step)
step 67270 	 loss = 0.008, train_acc = 1.000 (3.378 sec/step)
step 67280 	 loss = 0.003, train_acc = 1.000 (3.366 sec/step)
step 67290 	 loss = 0.206, train_acc = 0.900 (3.387 sec/step)
step 67300 	 loss = 0.168, train_acc = 0.900 (3.401 sec/step)
step 67310 	 loss = 0.039, train_acc = 1.000 (3.346 sec/step)
step 67320 	 loss = 0.000, train_acc = 1.000 (3.381 sec/step)
step 67330 	 loss = 0.014, train_acc = 1.000 (3.325 sec/step)
step 67340 	 loss = 0.002, train_acc = 1.000 (3.336 sec/step)
step 67350 	 loss = 0.027, train_acc = 1.000 (3.343 sec/step)
step 67360 	 loss = 0.027, train_acc = 1.000 (3.341 sec/step)
step 67370 	 loss = 0.360, train_acc = 0.900 (3.400 sec/step)
step 67380 	 loss = 0.008, train_acc = 1.000 (3.347 sec/step)
step 67390 	 loss = 0.006, train_acc = 1.000 (3.354 sec/step)
step 67400 	 loss = 0.441, train_acc = 0.900 (3.331 sec/step)
step 67410 	 loss = 0.297, train_acc = 0.900 (3.358 sec/step)
step 67420 	 loss = 0.046, train_acc = 1.000 (3.365 sec/step)
step 67430 	 loss = 0.008, train_acc = 1.000 (3.364 sec/step)
step 67440 	 loss = 0.003, train_acc = 1.000 (3.347 sec/step)
step 67450 	 loss = 0.079, train_acc = 1.000 (3.389 sec/step)
step 67460 	 loss = 0.008, train_acc = 1.000 (3.307 sec/step)
step 67470 	 loss = 0.002, train_acc = 1.000 (3.363 sec/step)
step 67480 	 loss = 0.289, train_acc = 0.900 (3.353 sec/step)
step 67490 	 loss = 0.232, train_acc = 0.900 (3.378 sec/step)
step 67500 	 loss = 0.000, train_acc = 1.000 (3.374 sec/step)
step 67510 	 loss = 0.031, train_acc = 1.000 (3.329 sec/step)
step 67520 	 loss = 0.748, train_acc = 0.800 (3.336 sec/step)
step 67530 	 loss = 0.019, train_acc = 1.000 (3.296 sec/step)
step 67540 	 loss = 1.572, train_acc = 0.900 (3.338 sec/step)
step 67550 	 loss = 0.001, train_acc = 1.000 (3.290 sec/step)
step 67560 	 loss = 0.093, train_acc = 1.000 (3.351 sec/step)
step 67570 	 loss = 0.023, train_acc = 1.000 (3.299 sec/step)
step 67580 	 loss = 0.011, train_acc = 1.000 (3.332 sec/step)
step 67590 	 loss = 0.006, train_acc = 1.000 (3.331 sec/step)
step 67600 	 loss = 0.037, train_acc = 1.000 (3.325 sec/step)
step 67610 	 loss = 0.410, train_acc = 0.900 (3.375 sec/step)
step 67620 	 loss = 0.004, train_acc = 1.000 (3.373 sec/step)
step 67630 	 loss = 0.047, train_acc = 1.000 (3.327 sec/step)
step 67640 	 loss = 0.048, train_acc = 1.000 (3.322 sec/step)
step 67650 	 loss = 0.001, train_acc = 1.000 (3.287 sec/step)
step 67660 	 loss = 0.057, train_acc = 1.000 (3.324 sec/step)
step 67670 	 loss = 0.337, train_acc = 0.900 (3.333 sec/step)
step 67680 	 loss = 0.035, train_acc = 1.000 (3.339 sec/step)
step 67690 	 loss = 0.145, train_acc = 0.900 (3.390 sec/step)
step 67700 	 loss = 1.018, train_acc = 0.800 (3.372 sec/step)
step 67710 	 loss = 0.040, train_acc = 1.000 (3.387 sec/step)
step 67720 	 loss = 0.895, train_acc = 0.900 (3.385 sec/step)
step 67730 	 loss = 0.268, train_acc = 0.900 (3.426 sec/step)
step 67740 	 loss = 0.258, train_acc = 0.900 (3.343 sec/step)
step 67750 	 loss = 0.067, train_acc = 1.000 (3.393 sec/step)
step 67760 	 loss = 0.071, train_acc = 1.000 (3.348 sec/step)
step 67770 	 loss = 0.196, train_acc = 0.900 (3.326 sec/step)
step 67780 	 loss = 0.382, train_acc = 0.900 (3.390 sec/step)
step 67790 	 loss = 0.257, train_acc = 0.900 (3.317 sec/step)
step 67800 	 loss = 0.816, train_acc = 0.900 (3.340 sec/step)
step 67810 	 loss = 0.141, train_acc = 0.900 (3.342 sec/step)
step 67820 	 loss = 0.237, train_acc = 0.900 (3.343 sec/step)
step 67830 	 loss = 0.013, train_acc = 1.000 (3.331 sec/step)
step 67840 	 loss = 0.080, train_acc = 0.900 (3.375 sec/step)
step 67850 	 loss = 0.133, train_acc = 0.900 (3.328 sec/step)
step 67860 	 loss = 0.322, train_acc = 0.800 (3.362 sec/step)
step 67870 	 loss = 0.028, train_acc = 1.000 (3.304 sec/step)
step 67880 	 loss = 0.019, train_acc = 1.000 (3.361 sec/step)
step 67890 	 loss = 0.010, train_acc = 1.000 (3.360 sec/step)
step 67900 	 loss = 0.091, train_acc = 1.000 (3.336 sec/step)
step 67910 	 loss = 0.010, train_acc = 1.000 (3.331 sec/step)
step 67920 	 loss = 0.075, train_acc = 1.000 (3.339 sec/step)
step 67930 	 loss = 0.028, train_acc = 1.000 (3.327 sec/step)
step 67940 	 loss = 0.006, train_acc = 1.000 (3.371 sec/step)
step 67950 	 loss = 0.386, train_acc = 0.900 (3.295 sec/step)
step 67960 	 loss = 0.002, train_acc = 1.000 (3.328 sec/step)
step 67970 	 loss = 0.559, train_acc = 0.900 (3.329 sec/step)
step 67980 	 loss = 0.225, train_acc = 1.000 (3.366 sec/step)
step 67990 	 loss = 0.327, train_acc = 0.800 (3.352 sec/step)
step 68000 	 loss = 0.246, train_acc = 0.900 (3.339 sec/step)
step 68010 	 loss = 0.036, train_acc = 1.000 (3.345 sec/step)
step 68020 	 loss = 0.023, train_acc = 1.000 (3.307 sec/step)
step 68030 	 loss = 1.235, train_acc = 0.600 (3.335 sec/step)
step 68040 	 loss = 0.008, train_acc = 1.000 (3.307 sec/step)
step 68050 	 loss = 0.155, train_acc = 1.000 (3.359 sec/step)
step 68060 	 loss = 0.257, train_acc = 0.900 (3.327 sec/step)
step 68070 	 loss = 0.003, train_acc = 1.000 (3.302 sec/step)
step 68080 	 loss = 0.017, train_acc = 1.000 (3.339 sec/step)
step 68090 	 loss = 0.000, train_acc = 1.000 (3.347 sec/step)
step 68100 	 loss = 0.002, train_acc = 1.000 (3.462 sec/step)
step 68110 	 loss = 0.077, train_acc = 1.000 (3.343 sec/step)
step 68120 	 loss = 0.071, train_acc = 1.000 (3.382 sec/step)
step 68130 	 loss = 0.207, train_acc = 0.900 (3.355 sec/step)
step 68140 	 loss = 0.837, train_acc = 0.900 (3.310 sec/step)
step 68150 	 loss = 0.010, train_acc = 1.000 (3.402 sec/step)
step 68160 	 loss = 1.068, train_acc = 0.800 (3.362 sec/step)
step 68170 	 loss = 0.157, train_acc = 1.000 (3.331 sec/step)
step 68180 	 loss = 0.000, train_acc = 1.000 (3.419 sec/step)
step 68190 	 loss = 0.407, train_acc = 0.800 (3.323 sec/step)
step 68200 	 loss = 0.129, train_acc = 1.000 (3.360 sec/step)
step 68210 	 loss = 0.117, train_acc = 0.900 (3.322 sec/step)
step 68220 	 loss = 0.063, train_acc = 1.000 (3.326 sec/step)
step 68230 	 loss = 0.006, train_acc = 1.000 (3.330 sec/step)
step 68240 	 loss = 0.250, train_acc = 0.900 (3.352 sec/step)
step 68250 	 loss = 0.259, train_acc = 0.900 (3.365 sec/step)
step 68260 	 loss = 0.044, train_acc = 1.000 (3.393 sec/step)
step 68270 	 loss = 0.000, train_acc = 1.000 (3.318 sec/step)
step 68280 	 loss = 0.096, train_acc = 0.900 (3.363 sec/step)
step 68290 	 loss = 0.313, train_acc = 0.900 (3.378 sec/step)
step 68300 	 loss = 0.047, train_acc = 1.000 (3.317 sec/step)
step 68310 	 loss = 0.542, train_acc = 0.800 (3.302 sec/step)
step 68320 	 loss = 0.663, train_acc = 0.800 (3.316 sec/step)
step 68330 	 loss = 0.212, train_acc = 0.900 (3.345 sec/step)
step 68340 	 loss = 0.768, train_acc = 0.900 (3.383 sec/step)
step 68350 	 loss = 0.009, train_acc = 1.000 (3.308 sec/step)
step 68360 	 loss = 0.002, train_acc = 1.000 (3.375 sec/step)
step 68370 	 loss = 0.014, train_acc = 1.000 (3.468 sec/step)
step 68380 	 loss = 0.000, train_acc = 1.000 (3.296 sec/step)
step 68390 	 loss = 0.141, train_acc = 0.900 (3.335 sec/step)
VALIDATION 	 acc = 0.535 (3.631 sec)
step 68400 	 loss = 0.004, train_acc = 1.000 (3.368 sec/step)
step 68410 	 loss = 0.045, train_acc = 1.000 (3.346 sec/step)
step 68420 	 loss = 0.022, train_acc = 1.000 (3.330 sec/step)
step 68430 	 loss = 0.007, train_acc = 1.000 (3.420 sec/step)
step 68440 	 loss = 0.039, train_acc = 1.000 (3.304 sec/step)
step 68450 	 loss = 0.002, train_acc = 1.000 (3.335 sec/step)
step 68460 	 loss = 0.013, train_acc = 1.000 (3.328 sec/step)
step 68470 	 loss = 0.019, train_acc = 1.000 (3.376 sec/step)
step 68480 	 loss = 0.001, train_acc = 1.000 (3.347 sec/step)
step 68490 	 loss = 0.000, train_acc = 1.000 (3.331 sec/step)
step 68500 	 loss = 0.082, train_acc = 0.900 (3.359 sec/step)
step 68510 	 loss = 2.563, train_acc = 0.800 (3.309 sec/step)
step 68520 	 loss = 0.533, train_acc = 0.800 (3.376 sec/step)
step 68530 	 loss = 0.000, train_acc = 1.000 (3.355 sec/step)
step 68540 	 loss = 0.043, train_acc = 1.000 (3.361 sec/step)
step 68550 	 loss = 0.003, train_acc = 1.000 (3.335 sec/step)
step 68560 	 loss = 0.006, train_acc = 1.000 (3.406 sec/step)
step 68570 	 loss = 0.001, train_acc = 1.000 (3.322 sec/step)
step 68580 	 loss = 0.125, train_acc = 0.900 (3.331 sec/step)
step 68590 	 loss = 0.379, train_acc = 0.900 (3.345 sec/step)
step 68600 	 loss = 0.158, train_acc = 0.900 (3.353 sec/step)
step 68610 	 loss = 0.146, train_acc = 1.000 (3.432 sec/step)
step 68620 	 loss = 0.013, train_acc = 1.000 (3.322 sec/step)
step 68630 	 loss = 0.006, train_acc = 1.000 (3.367 sec/step)
step 68640 	 loss = 0.487, train_acc = 0.800 (3.328 sec/step)
step 68650 	 loss = 0.216, train_acc = 0.900 (3.347 sec/step)
step 68660 	 loss = 0.810, train_acc = 0.600 (3.355 sec/step)
step 68670 	 loss = 1.631, train_acc = 0.600 (3.342 sec/step)
step 68680 	 loss = 0.062, train_acc = 1.000 (3.345 sec/step)
step 68690 	 loss = 1.012, train_acc = 0.600 (3.324 sec/step)
step 68700 	 loss = 0.832, train_acc = 0.800 (3.326 sec/step)
step 68710 	 loss = 0.406, train_acc = 0.900 (3.307 sec/step)
step 68720 	 loss = 0.037, train_acc = 1.000 (3.346 sec/step)
step 68730 	 loss = 0.056, train_acc = 1.000 (3.317 sec/step)
step 68740 	 loss = 0.012, train_acc = 1.000 (3.396 sec/step)
step 68750 	 loss = 0.003, train_acc = 1.000 (3.323 sec/step)
step 68760 	 loss = 0.104, train_acc = 0.900 (3.320 sec/step)
step 68770 	 loss = 0.352, train_acc = 0.900 (3.341 sec/step)
step 68780 	 loss = 1.068, train_acc = 0.800 (3.358 sec/step)
step 68790 	 loss = 0.004, train_acc = 1.000 (3.338 sec/step)
step 68800 	 loss = 1.441, train_acc = 0.600 (3.328 sec/step)
step 68810 	 loss = 0.009, train_acc = 1.000 (3.385 sec/step)
step 68820 	 loss = 0.799, train_acc = 0.900 (3.487 sec/step)
step 68830 	 loss = 0.011, train_acc = 1.000 (3.329 sec/step)
step 68840 	 loss = 1.080, train_acc = 0.800 (3.364 sec/step)
step 68850 	 loss = 0.604, train_acc = 0.700 (3.374 sec/step)
step 68860 	 loss = 0.089, train_acc = 1.000 (3.351 sec/step)
step 68870 	 loss = 0.001, train_acc = 1.000 (3.329 sec/step)
step 68880 	 loss = 0.084, train_acc = 1.000 (3.384 sec/step)
step 68890 	 loss = 0.026, train_acc = 1.000 (3.354 sec/step)
step 68900 	 loss = 0.005, train_acc = 1.000 (3.362 sec/step)
step 68910 	 loss = 0.000, train_acc = 1.000 (3.364 sec/step)
step 68920 	 loss = 0.386, train_acc = 0.900 (3.358 sec/step)
step 68930 	 loss = 0.539, train_acc = 0.700 (3.375 sec/step)
step 68940 	 loss = 1.127, train_acc = 0.700 (3.311 sec/step)
step 68950 	 loss = 0.002, train_acc = 1.000 (3.307 sec/step)
step 68960 	 loss = 0.321, train_acc = 0.800 (3.346 sec/step)
step 68970 	 loss = 0.631, train_acc = 0.800 (3.355 sec/step)
step 68980 	 loss = 0.357, train_acc = 0.900 (3.325 sec/step)
step 68990 	 loss = 0.330, train_acc = 0.900 (3.291 sec/step)
step 69000 	 loss = 0.136, train_acc = 0.900 (3.411 sec/step)
step 69010 	 loss = 0.017, train_acc = 1.000 (3.310 sec/step)
step 69020 	 loss = 0.012, train_acc = 1.000 (3.313 sec/step)
step 69030 	 loss = 0.489, train_acc = 0.800 (3.335 sec/step)
step 69040 	 loss = 0.109, train_acc = 1.000 (3.345 sec/step)
step 69050 	 loss = 0.916, train_acc = 0.800 (3.349 sec/step)
step 69060 	 loss = 0.025, train_acc = 1.000 (3.362 sec/step)
step 69070 	 loss = 0.188, train_acc = 0.900 (3.380 sec/step)
step 69080 	 loss = 0.087, train_acc = 1.000 (3.381 sec/step)
step 69090 	 loss = 0.061, train_acc = 1.000 (3.335 sec/step)
step 69100 	 loss = 0.077, train_acc = 1.000 (3.351 sec/step)
step 69110 	 loss = 0.004, train_acc = 1.000 (3.306 sec/step)
step 69120 	 loss = 0.073, train_acc = 1.000 (3.334 sec/step)
step 69130 	 loss = 0.001, train_acc = 1.000 (3.332 sec/step)
step 69140 	 loss = 0.006, train_acc = 1.000 (3.328 sec/step)
step 69150 	 loss = 0.076, train_acc = 1.000 (3.352 sec/step)
step 69160 	 loss = 0.002, train_acc = 1.000 (3.341 sec/step)
step 69170 	 loss = 0.056, train_acc = 1.000 (3.379 sec/step)
step 69180 	 loss = 0.337, train_acc = 0.900 (3.366 sec/step)
step 69190 	 loss = 0.065, train_acc = 1.000 (3.382 sec/step)
step 69200 	 loss = 0.240, train_acc = 0.900 (3.322 sec/step)
step 69210 	 loss = 0.004, train_acc = 1.000 (3.370 sec/step)
step 69220 	 loss = 0.120, train_acc = 1.000 (3.306 sec/step)
step 69230 	 loss = 0.087, train_acc = 1.000 (3.357 sec/step)
step 69240 	 loss = 0.311, train_acc = 0.900 (3.404 sec/step)
step 69250 	 loss = 0.008, train_acc = 1.000 (3.351 sec/step)
step 69260 	 loss = 0.087, train_acc = 0.900 (3.416 sec/step)
step 69270 	 loss = 0.166, train_acc = 0.900 (3.366 sec/step)
step 69280 	 loss = 0.264, train_acc = 0.900 (3.329 sec/step)
step 69290 	 loss = 0.009, train_acc = 1.000 (3.370 sec/step)
step 69300 	 loss = 0.037, train_acc = 1.000 (3.351 sec/step)
step 69310 	 loss = 0.012, train_acc = 1.000 (3.336 sec/step)
step 69320 	 loss = 0.341, train_acc = 0.900 (3.297 sec/step)
step 69330 	 loss = 0.034, train_acc = 1.000 (3.334 sec/step)
step 69340 	 loss = 0.000, train_acc = 1.000 (3.348 sec/step)
step 69350 	 loss = 0.561, train_acc = 0.900 (3.318 sec/step)
step 69360 	 loss = 0.057, train_acc = 1.000 (3.472 sec/step)
step 69370 	 loss = 0.068, train_acc = 1.000 (3.294 sec/step)
step 69380 	 loss = 0.003, train_acc = 1.000 (3.323 sec/step)
step 69390 	 loss = 0.043, train_acc = 1.000 (3.380 sec/step)
step 69400 	 loss = 0.000, train_acc = 1.000 (3.338 sec/step)
step 69410 	 loss = 0.856, train_acc = 0.800 (3.380 sec/step)
step 69420 	 loss = 0.002, train_acc = 1.000 (3.329 sec/step)
step 69430 	 loss = 0.036, train_acc = 1.000 (3.350 sec/step)
step 69440 	 loss = 0.095, train_acc = 1.000 (3.331 sec/step)
step 69450 	 loss = 0.000, train_acc = 1.000 (3.369 sec/step)
step 69460 	 loss = 0.095, train_acc = 1.000 (3.298 sec/step)
step 69470 	 loss = 0.013, train_acc = 1.000 (3.377 sec/step)
step 69480 	 loss = 0.216, train_acc = 1.000 (3.343 sec/step)
step 69490 	 loss = 0.009, train_acc = 1.000 (3.290 sec/step)
step 69500 	 loss = 0.007, train_acc = 1.000 (3.324 sec/step)
step 69510 	 loss = 0.000, train_acc = 1.000 (3.393 sec/step)
step 69520 	 loss = 0.171, train_acc = 0.900 (3.340 sec/step)
step 69530 	 loss = 0.007, train_acc = 1.000 (3.399 sec/step)
step 69540 	 loss = 0.335, train_acc = 0.900 (3.335 sec/step)
step 69550 	 loss = 0.003, train_acc = 1.000 (3.383 sec/step)
step 69560 	 loss = 0.062, train_acc = 1.000 (3.407 sec/step)
step 69570 	 loss = 1.152, train_acc = 0.800 (3.299 sec/step)
step 69580 	 loss = 0.001, train_acc = 1.000 (3.333 sec/step)
step 69590 	 loss = 0.453, train_acc = 0.900 (3.323 sec/step)
step 69600 	 loss = 0.001, train_acc = 1.000 (3.353 sec/step)
step 69610 	 loss = 0.050, train_acc = 1.000 (3.336 sec/step)
step 69620 	 loss = 0.008, train_acc = 1.000 (3.372 sec/step)
step 69630 	 loss = 0.003, train_acc = 1.000 (3.377 sec/step)
step 69640 	 loss = 0.137, train_acc = 0.900 (3.382 sec/step)
step 69650 	 loss = 0.041, train_acc = 1.000 (3.318 sec/step)
step 69660 	 loss = 0.234, train_acc = 0.900 (3.308 sec/step)
step 69670 	 loss = 0.137, train_acc = 1.000 (3.356 sec/step)
step 69680 	 loss = 0.095, train_acc = 1.000 (3.425 sec/step)
step 69690 	 loss = 0.003, train_acc = 1.000 (3.365 sec/step)
step 69700 	 loss = 0.025, train_acc = 1.000 (3.332 sec/step)
step 69710 	 loss = 0.013, train_acc = 1.000 (3.356 sec/step)
step 69720 	 loss = 0.031, train_acc = 1.000 (3.403 sec/step)
step 69730 	 loss = 0.467, train_acc = 0.800 (3.370 sec/step)
step 69740 	 loss = 0.003, train_acc = 1.000 (3.347 sec/step)
step 69750 	 loss = 0.666, train_acc = 0.900 (3.334 sec/step)
step 69760 	 loss = 0.042, train_acc = 1.000 (3.322 sec/step)
step 69770 	 loss = 1.016, train_acc = 0.900 (3.358 sec/step)
step 69780 	 loss = 0.095, train_acc = 1.000 (3.302 sec/step)
step 69790 	 loss = 0.057, train_acc = 1.000 (3.325 sec/step)
step 69800 	 loss = 0.797, train_acc = 0.700 (3.362 sec/step)
step 69810 	 loss = 0.239, train_acc = 0.900 (3.343 sec/step)
step 69820 	 loss = 0.107, train_acc = 0.900 (3.290 sec/step)
step 69830 	 loss = 0.448, train_acc = 0.900 (3.348 sec/step)
step 69840 	 loss = 0.176, train_acc = 0.900 (3.361 sec/step)
step 69850 	 loss = 0.045, train_acc = 1.000 (3.371 sec/step)
step 69860 	 loss = 0.771, train_acc = 0.900 (3.369 sec/step)
step 69870 	 loss = 0.376, train_acc = 0.900 (3.325 sec/step)
step 69880 	 loss = 0.083, train_acc = 1.000 (3.391 sec/step)
step 69890 	 loss = 0.007, train_acc = 1.000 (3.314 sec/step)
step 69900 	 loss = 0.722, train_acc = 0.800 (3.378 sec/step)
step 69910 	 loss = 0.846, train_acc = 0.900 (3.354 sec/step)
step 69920 	 loss = 0.007, train_acc = 1.000 (3.317 sec/step)
step 69930 	 loss = 0.004, train_acc = 1.000 (3.335 sec/step)
step 69940 	 loss = 0.249, train_acc = 0.900 (3.316 sec/step)
step 69950 	 loss = 0.066, train_acc = 1.000 (3.333 sec/step)
step 69960 	 loss = 0.000, train_acc = 1.000 (3.314 sec/step)
step 69970 	 loss = 0.360, train_acc = 0.900 (3.353 sec/step)
step 69980 	 loss = 0.173, train_acc = 0.900 (3.366 sec/step)
step 69990 	 loss = 0.726, train_acc = 0.900 (3.298 sec/step)
step 70000 	 loss = 0.007, train_acc = 1.000 (3.339 sec/step)
step 70010 	 loss = 0.334, train_acc = 0.900 (3.407 sec/step)
step 70020 	 loss = 0.000, train_acc = 1.000 (3.400 sec/step)
step 70030 	 loss = 0.296, train_acc = 0.900 (3.351 sec/step)
step 70040 	 loss = 0.480, train_acc = 0.900 (3.333 sec/step)
step 70050 	 loss = 0.202, train_acc = 0.900 (3.328 sec/step)
step 70060 	 loss = 0.613, train_acc = 0.800 (3.320 sec/step)
step 70070 	 loss = 0.003, train_acc = 1.000 (3.361 sec/step)
step 70080 	 loss = 0.045, train_acc = 1.000 (3.416 sec/step)
step 70090 	 loss = 0.066, train_acc = 1.000 (3.303 sec/step)
step 70100 	 loss = 0.004, train_acc = 1.000 (3.367 sec/step)
step 70110 	 loss = 0.107, train_acc = 0.900 (3.341 sec/step)
step 70120 	 loss = 0.055, train_acc = 1.000 (3.347 sec/step)
step 70130 	 loss = 0.652, train_acc = 0.900 (3.364 sec/step)
step 70140 	 loss = 1.502, train_acc = 0.800 (3.333 sec/step)
step 70150 	 loss = 0.071, train_acc = 1.000 (3.316 sec/step)
step 70160 	 loss = 0.019, train_acc = 1.000 (3.303 sec/step)
step 70170 	 loss = 0.108, train_acc = 0.900 (3.338 sec/step)
step 70180 	 loss = 0.000, train_acc = 1.000 (3.342 sec/step)
step 70190 	 loss = 0.513, train_acc = 0.900 (3.314 sec/step)
step 70200 	 loss = 0.048, train_acc = 1.000 (3.288 sec/step)
step 70210 	 loss = 0.052, train_acc = 1.000 (3.366 sec/step)
step 70220 	 loss = 0.025, train_acc = 1.000 (3.355 sec/step)
step 70230 	 loss = 0.078, train_acc = 1.000 (3.406 sec/step)
step 70240 	 loss = 0.254, train_acc = 0.800 (3.318 sec/step)
step 70250 	 loss = 0.000, train_acc = 1.000 (3.295 sec/step)
step 70260 	 loss = 1.741, train_acc = 0.900 (3.324 sec/step)
step 70270 	 loss = 0.033, train_acc = 1.000 (3.377 sec/step)
step 70280 	 loss = 1.482, train_acc = 0.800 (3.357 sec/step)
step 70290 	 loss = 0.578, train_acc = 0.800 (3.295 sec/step)
VALIDATION 	 acc = 0.508 (3.602 sec)
step 70300 	 loss = 0.260, train_acc = 0.900 (3.337 sec/step)
step 70310 	 loss = 0.279, train_acc = 0.900 (3.340 sec/step)
step 70320 	 loss = 0.211, train_acc = 0.900 (3.328 sec/step)
step 70330 	 loss = 1.099, train_acc = 0.600 (3.376 sec/step)
step 70340 	 loss = 0.008, train_acc = 1.000 (3.343 sec/step)
step 70350 	 loss = 0.314, train_acc = 0.900 (3.369 sec/step)
step 70360 	 loss = 0.001, train_acc = 1.000 (3.367 sec/step)
step 70370 	 loss = 0.003, train_acc = 1.000 (3.395 sec/step)
step 70380 	 loss = 0.002, train_acc = 1.000 (3.394 sec/step)
step 70390 	 loss = 0.132, train_acc = 0.900 (3.358 sec/step)
step 70400 	 loss = 0.000, train_acc = 1.000 (3.337 sec/step)
step 70410 	 loss = 0.000, train_acc = 1.000 (3.356 sec/step)
step 70420 	 loss = 0.010, train_acc = 1.000 (3.330 sec/step)
step 70430 	 loss = 0.000, train_acc = 1.000 (3.408 sec/step)
step 70440 	 loss = 0.004, train_acc = 1.000 (3.388 sec/step)
step 70450 	 loss = 0.322, train_acc = 0.900 (3.446 sec/step)
step 70460 	 loss = 0.068, train_acc = 1.000 (3.335 sec/step)
step 70470 	 loss = 0.600, train_acc = 0.900 (3.304 sec/step)
step 70480 	 loss = 0.876, train_acc = 0.800 (3.307 sec/step)
step 70490 	 loss = 0.291, train_acc = 0.900 (3.335 sec/step)
step 70500 	 loss = 0.559, train_acc = 0.800 (3.321 sec/step)
step 70510 	 loss = 0.087, train_acc = 1.000 (3.348 sec/step)
step 70520 	 loss = 0.535, train_acc = 0.800 (3.352 sec/step)
step 70530 	 loss = 0.182, train_acc = 0.900 (3.325 sec/step)
step 70540 	 loss = 0.040, train_acc = 1.000 (3.407 sec/step)
step 70550 	 loss = 0.180, train_acc = 0.900 (3.382 sec/step)
step 70560 	 loss = 0.223, train_acc = 1.000 (3.325 sec/step)
step 70570 	 loss = 0.104, train_acc = 0.900 (3.417 sec/step)
step 70580 	 loss = 0.053, train_acc = 1.000 (3.336 sec/step)
step 70590 	 loss = 0.969, train_acc = 0.800 (3.324 sec/step)
step 70600 	 loss = 0.055, train_acc = 1.000 (3.358 sec/step)
step 70610 	 loss = 0.113, train_acc = 1.000 (3.405 sec/step)
step 70620 	 loss = 0.010, train_acc = 1.000 (3.453 sec/step)
step 70630 	 loss = 1.448, train_acc = 0.900 (3.388 sec/step)
step 70640 	 loss = 0.108, train_acc = 1.000 (3.355 sec/step)
step 70650 	 loss = 0.049, train_acc = 1.000 (3.366 sec/step)
step 70660 	 loss = 0.315, train_acc = 0.900 (3.401 sec/step)
step 70670 	 loss = 0.078, train_acc = 0.900 (3.359 sec/step)
step 70680 	 loss = 0.015, train_acc = 1.000 (3.322 sec/step)
step 70690 	 loss = 0.000, train_acc = 1.000 (3.352 sec/step)
step 70700 	 loss = 1.544, train_acc = 0.800 (3.376 sec/step)
step 70710 	 loss = 0.112, train_acc = 1.000 (3.351 sec/step)
step 70720 	 loss = 0.305, train_acc = 0.900 (3.355 sec/step)
step 70730 	 loss = 0.538, train_acc = 0.800 (3.394 sec/step)
step 70740 	 loss = 0.136, train_acc = 1.000 (3.340 sec/step)
step 70750 	 loss = 0.025, train_acc = 1.000 (3.308 sec/step)
step 70760 	 loss = 0.365, train_acc = 0.800 (3.304 sec/step)
step 70770 	 loss = 0.206, train_acc = 0.900 (3.299 sec/step)
step 70780 	 loss = 0.003, train_acc = 1.000 (3.375 sec/step)
step 70790 	 loss = 0.474, train_acc = 0.900 (3.310 sec/step)
step 70800 	 loss = 0.020, train_acc = 1.000 (3.346 sec/step)
step 70810 	 loss = 0.080, train_acc = 1.000 (3.349 sec/step)
step 70820 	 loss = 0.143, train_acc = 0.900 (3.366 sec/step)
step 70830 	 loss = 0.097, train_acc = 1.000 (3.335 sec/step)
step 70840 	 loss = 0.066, train_acc = 1.000 (3.325 sec/step)
step 70850 	 loss = 0.009, train_acc = 1.000 (3.369 sec/step)
step 70860 	 loss = 0.013, train_acc = 1.000 (3.367 sec/step)
step 70870 	 loss = 0.001, train_acc = 1.000 (3.381 sec/step)
step 70880 	 loss = 0.147, train_acc = 0.900 (3.372 sec/step)
step 70890 	 loss = 0.220, train_acc = 0.900 (3.344 sec/step)
step 70900 	 loss = 0.660, train_acc = 0.900 (3.360 sec/step)
step 70910 	 loss = 0.162, train_acc = 0.900 (3.332 sec/step)
step 70920 	 loss = 0.055, train_acc = 1.000 (3.377 sec/step)
step 70930 	 loss = 2.714, train_acc = 0.800 (3.373 sec/step)
step 70940 	 loss = 0.084, train_acc = 1.000 (3.357 sec/step)
step 70950 	 loss = 1.315, train_acc = 0.800 (3.325 sec/step)
step 70960 	 loss = 0.446, train_acc = 0.900 (3.316 sec/step)
step 70970 	 loss = 0.932, train_acc = 0.900 (3.302 sec/step)
step 70980 	 loss = 0.014, train_acc = 1.000 (3.354 sec/step)
step 70990 	 loss = 0.293, train_acc = 0.900 (3.320 sec/step)
step 71000 	 loss = 0.279, train_acc = 1.000 (3.334 sec/step)
step 71010 	 loss = 0.483, train_acc = 0.900 (3.335 sec/step)
step 71020 	 loss = 0.384, train_acc = 0.900 (3.366 sec/step)
step 71030 	 loss = 0.387, train_acc = 0.900 (3.391 sec/step)
step 71040 	 loss = 0.008, train_acc = 1.000 (3.370 sec/step)
step 71050 	 loss = 0.104, train_acc = 1.000 (3.299 sec/step)
step 71060 	 loss = 0.111, train_acc = 0.900 (3.391 sec/step)
step 71070 	 loss = 0.000, train_acc = 1.000 (3.344 sec/step)
step 71080 	 loss = 0.893, train_acc = 0.900 (3.336 sec/step)
step 71090 	 loss = 0.001, train_acc = 1.000 (3.365 sec/step)
step 71100 	 loss = 0.004, train_acc = 1.000 (3.361 sec/step)
step 71110 	 loss = 0.306, train_acc = 0.900 (3.339 sec/step)
step 71120 	 loss = 0.139, train_acc = 0.900 (3.350 sec/step)
step 71130 	 loss = 0.313, train_acc = 0.800 (3.372 sec/step)
step 71140 	 loss = 0.362, train_acc = 0.900 (3.332 sec/step)
step 71150 	 loss = 0.338, train_acc = 1.000 (3.403 sec/step)
step 71160 	 loss = 0.200, train_acc = 0.900 (3.340 sec/step)
step 71170 	 loss = 0.076, train_acc = 1.000 (3.387 sec/step)
step 71180 	 loss = 0.061, train_acc = 1.000 (3.362 sec/step)
step 71190 	 loss = 0.057, train_acc = 1.000 (3.401 sec/step)
step 71200 	 loss = 0.248, train_acc = 0.900 (3.317 sec/step)
step 71210 	 loss = 0.005, train_acc = 1.000 (3.325 sec/step)
step 71220 	 loss = 0.000, train_acc = 1.000 (3.320 sec/step)
step 71230 	 loss = 0.008, train_acc = 1.000 (3.368 sec/step)
step 71240 	 loss = 0.164, train_acc = 0.900 (3.345 sec/step)
step 71250 	 loss = 0.009, train_acc = 1.000 (3.324 sec/step)
step 71260 	 loss = 0.384, train_acc = 0.900 (3.326 sec/step)
step 71270 	 loss = 0.064, train_acc = 1.000 (3.333 sec/step)
step 71280 	 loss = 0.000, train_acc = 1.000 (3.395 sec/step)
step 71290 	 loss = 0.042, train_acc = 1.000 (3.320 sec/step)
step 71300 	 loss = 0.078, train_acc = 1.000 (3.321 sec/step)
step 71310 	 loss = 0.289, train_acc = 0.900 (3.324 sec/step)
step 71320 	 loss = 0.839, train_acc = 0.900 (3.337 sec/step)
step 71330 	 loss = 0.002, train_acc = 1.000 (3.411 sec/step)
step 71340 	 loss = 0.145, train_acc = 0.900 (3.371 sec/step)
step 71350 	 loss = 0.120, train_acc = 0.900 (3.309 sec/step)
step 71360 	 loss = 0.362, train_acc = 0.800 (3.358 sec/step)
step 71370 	 loss = 0.003, train_acc = 1.000 (3.338 sec/step)
step 71380 	 loss = 0.006, train_acc = 1.000 (3.311 sec/step)
step 71390 	 loss = 0.010, train_acc = 1.000 (3.362 sec/step)
step 71400 	 loss = 1.456, train_acc = 0.800 (3.330 sec/step)
step 71410 	 loss = 0.003, train_acc = 1.000 (3.376 sec/step)
step 71420 	 loss = 0.000, train_acc = 1.000 (3.316 sec/step)
step 71430 	 loss = 0.511, train_acc = 0.800 (3.321 sec/step)
step 71440 	 loss = 0.001, train_acc = 1.000 (3.335 sec/step)
step 71450 	 loss = 0.949, train_acc = 0.900 (3.404 sec/step)
step 71460 	 loss = 0.002, train_acc = 1.000 (3.321 sec/step)
step 71470 	 loss = 0.273, train_acc = 0.900 (3.499 sec/step)
step 71480 	 loss = 0.000, train_acc = 1.000 (3.366 sec/step)
step 71490 	 loss = 0.022, train_acc = 1.000 (3.379 sec/step)
step 71500 	 loss = 0.704, train_acc = 0.800 (3.359 sec/step)
step 71510 	 loss = 0.032, train_acc = 1.000 (3.323 sec/step)
step 71520 	 loss = 0.040, train_acc = 1.000 (3.369 sec/step)
step 71530 	 loss = 0.042, train_acc = 1.000 (3.334 sec/step)
step 71540 	 loss = 1.366, train_acc = 0.900 (3.359 sec/step)
step 71550 	 loss = 0.266, train_acc = 0.900 (3.334 sec/step)
step 71560 	 loss = 1.424, train_acc = 0.800 (3.371 sec/step)
step 71570 	 loss = 0.029, train_acc = 1.000 (3.353 sec/step)
step 71580 	 loss = 0.164, train_acc = 0.900 (3.293 sec/step)
step 71590 	 loss = 0.104, train_acc = 0.900 (3.390 sec/step)
step 71600 	 loss = 0.003, train_acc = 1.000 (3.361 sec/step)
step 71610 	 loss = 0.281, train_acc = 0.800 (3.298 sec/step)
step 71620 	 loss = 0.537, train_acc = 0.700 (3.369 sec/step)
step 71630 	 loss = 0.167, train_acc = 0.900 (3.346 sec/step)
step 71640 	 loss = 0.045, train_acc = 1.000 (3.374 sec/step)
step 71650 	 loss = 1.603, train_acc = 0.800 (3.383 sec/step)
step 71660 	 loss = 0.002, train_acc = 1.000 (3.362 sec/step)
step 71670 	 loss = 0.005, train_acc = 1.000 (3.319 sec/step)
step 71680 	 loss = 0.015, train_acc = 1.000 (3.397 sec/step)
step 71690 	 loss = 0.081, train_acc = 0.900 (3.383 sec/step)
step 71700 	 loss = 0.001, train_acc = 1.000 (3.320 sec/step)
step 71710 	 loss = 0.207, train_acc = 0.900 (3.377 sec/step)
step 71720 	 loss = 0.236, train_acc = 0.900 (3.366 sec/step)
step 71730 	 loss = 0.001, train_acc = 1.000 (3.305 sec/step)
step 71740 	 loss = 0.256, train_acc = 0.900 (3.315 sec/step)
step 71750 	 loss = 0.048, train_acc = 1.000 (3.374 sec/step)
step 71760 	 loss = 0.256, train_acc = 0.900 (3.350 sec/step)
step 71770 	 loss = 0.167, train_acc = 0.900 (3.367 sec/step)
step 71780 	 loss = 0.198, train_acc = 0.900 (3.333 sec/step)
step 71790 	 loss = 0.063, train_acc = 1.000 (3.341 sec/step)
step 71800 	 loss = 0.010, train_acc = 1.000 (3.347 sec/step)
step 71810 	 loss = 1.281, train_acc = 0.800 (3.302 sec/step)
step 71820 	 loss = 0.360, train_acc = 0.900 (3.334 sec/step)
step 71830 	 loss = 0.009, train_acc = 1.000 (3.302 sec/step)
step 71840 	 loss = 0.299, train_acc = 0.900 (3.360 sec/step)
step 71850 	 loss = 0.088, train_acc = 1.000 (3.356 sec/step)
step 71860 	 loss = 0.015, train_acc = 1.000 (3.341 sec/step)
step 71870 	 loss = 2.245, train_acc = 0.900 (3.340 sec/step)
step 71880 	 loss = 0.053, train_acc = 1.000 (3.334 sec/step)
step 71890 	 loss = 0.066, train_acc = 1.000 (3.331 sec/step)
step 71900 	 loss = 0.845, train_acc = 0.900 (3.416 sec/step)
step 71910 	 loss = 0.259, train_acc = 0.900 (3.312 sec/step)
step 71920 	 loss = 0.060, train_acc = 1.000 (3.344 sec/step)
step 71930 	 loss = 1.144, train_acc = 0.800 (3.314 sec/step)
step 71940 	 loss = 0.041, train_acc = 1.000 (3.354 sec/step)
step 71950 	 loss = 0.005, train_acc = 1.000 (3.368 sec/step)
step 71960 	 loss = 0.524, train_acc = 0.900 (3.319 sec/step)
step 71970 	 loss = 0.001, train_acc = 1.000 (3.377 sec/step)
step 71980 	 loss = 0.020, train_acc = 1.000 (3.365 sec/step)
step 71990 	 loss = 0.252, train_acc = 0.800 (3.334 sec/step)
step 72000 	 loss = 0.051, train_acc = 1.000 (3.379 sec/step)
step 72010 	 loss = 0.416, train_acc = 0.800 (3.378 sec/step)
step 72020 	 loss = 0.003, train_acc = 1.000 (3.299 sec/step)
step 72030 	 loss = 0.136, train_acc = 0.900 (3.319 sec/step)
step 72040 	 loss = 0.076, train_acc = 1.000 (3.341 sec/step)
step 72050 	 loss = 0.004, train_acc = 1.000 (3.403 sec/step)
step 72060 	 loss = 0.003, train_acc = 1.000 (3.356 sec/step)
step 72070 	 loss = 0.016, train_acc = 1.000 (3.370 sec/step)
step 72080 	 loss = 0.154, train_acc = 0.900 (3.402 sec/step)
step 72090 	 loss = 0.366, train_acc = 0.900 (3.364 sec/step)
step 72100 	 loss = 0.226, train_acc = 0.900 (3.306 sec/step)
step 72110 	 loss = 0.001, train_acc = 1.000 (3.334 sec/step)
step 72120 	 loss = 0.410, train_acc = 0.900 (3.474 sec/step)
step 72130 	 loss = 0.020, train_acc = 1.000 (3.333 sec/step)
step 72140 	 loss = 0.004, train_acc = 1.000 (3.367 sec/step)
step 72150 	 loss = 0.026, train_acc = 1.000 (3.304 sec/step)
step 72160 	 loss = 0.020, train_acc = 1.000 (3.394 sec/step)
step 72170 	 loss = 0.041, train_acc = 1.000 (3.333 sec/step)
step 72180 	 loss = 0.014, train_acc = 1.000 (3.349 sec/step)
step 72190 	 loss = 0.119, train_acc = 0.900 (3.308 sec/step)
VALIDATION 	 acc = 0.537 (3.616 sec)
step 72200 	 loss = 0.045, train_acc = 1.000 (3.335 sec/step)
step 72210 	 loss = 0.088, train_acc = 1.000 (3.391 sec/step)
step 72220 	 loss = 0.050, train_acc = 1.000 (3.359 sec/step)
step 72230 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 72240 	 loss = 0.000, train_acc = 1.000 (3.313 sec/step)
step 72250 	 loss = 0.031, train_acc = 1.000 (3.332 sec/step)
step 72260 	 loss = 0.010, train_acc = 1.000 (3.354 sec/step)
step 72270 	 loss = 0.002, train_acc = 1.000 (3.350 sec/step)
step 72280 	 loss = 0.061, train_acc = 1.000 (3.318 sec/step)
step 72290 	 loss = 0.095, train_acc = 1.000 (3.362 sec/step)
step 72300 	 loss = 0.006, train_acc = 1.000 (3.294 sec/step)
step 72310 	 loss = 0.034, train_acc = 1.000 (3.331 sec/step)
step 72320 	 loss = 0.601, train_acc = 0.900 (3.347 sec/step)
step 72330 	 loss = 0.532, train_acc = 0.800 (3.331 sec/step)
step 72340 	 loss = 0.002, train_acc = 1.000 (3.310 sec/step)
step 72350 	 loss = 0.290, train_acc = 0.900 (3.311 sec/step)
step 72360 	 loss = 0.041, train_acc = 1.000 (3.341 sec/step)
step 72370 	 loss = 0.297, train_acc = 0.900 (3.393 sec/step)
step 72380 	 loss = 0.503, train_acc = 0.900 (3.374 sec/step)
step 72390 	 loss = 0.057, train_acc = 1.000 (3.424 sec/step)
step 72400 	 loss = 0.377, train_acc = 0.800 (3.333 sec/step)
step 72410 	 loss = 0.155, train_acc = 0.900 (3.402 sec/step)
step 72420 	 loss = 0.075, train_acc = 1.000 (3.287 sec/step)
step 72430 	 loss = 0.000, train_acc = 1.000 (3.411 sec/step)
step 72440 	 loss = 0.000, train_acc = 1.000 (3.367 sec/step)
step 72450 	 loss = 0.300, train_acc = 0.900 (3.389 sec/step)
step 72460 	 loss = 0.997, train_acc = 0.700 (3.375 sec/step)
step 72470 	 loss = 0.298, train_acc = 0.900 (3.339 sec/step)
step 72480 	 loss = 0.004, train_acc = 1.000 (3.296 sec/step)
step 72490 	 loss = 0.011, train_acc = 1.000 (3.323 sec/step)
step 72500 	 loss = 0.020, train_acc = 1.000 (3.399 sec/step)
step 72510 	 loss = 0.019, train_acc = 1.000 (3.409 sec/step)
step 72520 	 loss = 0.106, train_acc = 1.000 (3.389 sec/step)
step 72530 	 loss = 0.003, train_acc = 1.000 (3.335 sec/step)
step 72540 	 loss = 0.000, train_acc = 1.000 (3.343 sec/step)
step 72550 	 loss = 0.000, train_acc = 1.000 (3.309 sec/step)
step 72560 	 loss = 0.043, train_acc = 1.000 (3.370 sec/step)
step 72570 	 loss = 0.456, train_acc = 0.900 (3.382 sec/step)
step 72580 	 loss = 0.464, train_acc = 0.900 (3.313 sec/step)
step 72590 	 loss = 0.016, train_acc = 1.000 (3.347 sec/step)
step 72600 	 loss = 0.135, train_acc = 0.900 (3.394 sec/step)
step 72610 	 loss = 0.038, train_acc = 1.000 (3.369 sec/step)
step 72620 	 loss = 0.230, train_acc = 0.900 (3.325 sec/step)
step 72630 	 loss = 0.573, train_acc = 0.900 (3.315 sec/step)
step 72640 	 loss = 0.072, train_acc = 1.000 (3.349 sec/step)
step 72650 	 loss = 0.000, train_acc = 1.000 (3.389 sec/step)
step 72660 	 loss = 0.051, train_acc = 1.000 (3.342 sec/step)
step 72670 	 loss = 0.032, train_acc = 1.000 (3.357 sec/step)
step 72680 	 loss = 0.111, train_acc = 0.900 (3.306 sec/step)
step 72690 	 loss = 0.131, train_acc = 0.900 (3.414 sec/step)
step 72700 	 loss = 0.001, train_acc = 1.000 (3.366 sec/step)
step 72710 	 loss = 0.004, train_acc = 1.000 (3.362 sec/step)
step 72720 	 loss = 0.148, train_acc = 0.900 (3.391 sec/step)
step 72730 	 loss = 0.026, train_acc = 1.000 (3.330 sec/step)
step 72740 	 loss = 0.126, train_acc = 0.900 (3.298 sec/step)
step 72750 	 loss = 0.015, train_acc = 1.000 (3.393 sec/step)
step 72760 	 loss = 0.279, train_acc = 0.900 (3.393 sec/step)
step 72770 	 loss = 0.021, train_acc = 1.000 (3.351 sec/step)
step 72780 	 loss = 0.438, train_acc = 0.900 (3.377 sec/step)
step 72790 	 loss = 0.494, train_acc = 0.800 (3.338 sec/step)
step 72800 	 loss = 0.405, train_acc = 0.900 (3.380 sec/step)
step 72810 	 loss = 0.004, train_acc = 1.000 (3.350 sec/step)
step 72820 	 loss = 0.000, train_acc = 1.000 (3.394 sec/step)
step 72830 	 loss = 0.546, train_acc = 0.900 (3.375 sec/step)
step 72840 	 loss = 0.059, train_acc = 1.000 (3.353 sec/step)
step 72850 	 loss = 0.451, train_acc = 0.900 (3.383 sec/step)
step 72860 	 loss = 0.088, train_acc = 0.900 (3.338 sec/step)
step 72870 	 loss = 0.263, train_acc = 1.000 (3.346 sec/step)
step 72880 	 loss = 0.487, train_acc = 0.900 (3.304 sec/step)
step 72890 	 loss = 0.253, train_acc = 0.900 (3.380 sec/step)
step 72900 	 loss = 0.004, train_acc = 1.000 (3.330 sec/step)
step 72910 	 loss = 0.032, train_acc = 1.000 (3.374 sec/step)
step 72920 	 loss = 0.136, train_acc = 1.000 (3.333 sec/step)
step 72930 	 loss = 0.015, train_acc = 1.000 (3.358 sec/step)
step 72940 	 loss = 0.001, train_acc = 1.000 (3.317 sec/step)
step 72950 	 loss = 0.066, train_acc = 1.000 (3.345 sec/step)
step 72960 	 loss = 0.852, train_acc = 0.800 (3.390 sec/step)
step 72970 	 loss = 0.017, train_acc = 1.000 (3.335 sec/step)
step 72980 	 loss = 0.267, train_acc = 0.900 (3.375 sec/step)
step 72990 	 loss = 0.003, train_acc = 1.000 (3.371 sec/step)
step 73000 	 loss = 0.056, train_acc = 1.000 (3.316 sec/step)
step 73010 	 loss = 0.797, train_acc = 0.900 (3.328 sec/step)
step 73020 	 loss = 0.095, train_acc = 1.000 (3.363 sec/step)
step 73030 	 loss = 0.145, train_acc = 0.900 (3.367 sec/step)
step 73040 	 loss = 2.310, train_acc = 0.800 (3.309 sec/step)
step 73050 	 loss = 0.415, train_acc = 0.800 (3.369 sec/step)
step 73060 	 loss = 0.085, train_acc = 1.000 (3.294 sec/step)
step 73070 	 loss = 0.027, train_acc = 1.000 (3.325 sec/step)
step 73080 	 loss = 0.017, train_acc = 1.000 (3.401 sec/step)
step 73090 	 loss = 0.097, train_acc = 1.000 (3.291 sec/step)
step 73100 	 loss = 0.193, train_acc = 0.900 (3.361 sec/step)
step 73110 	 loss = 0.079, train_acc = 1.000 (3.338 sec/step)
step 73120 	 loss = 0.023, train_acc = 1.000 (3.362 sec/step)
step 73130 	 loss = 0.225, train_acc = 0.900 (3.342 sec/step)
step 73140 	 loss = 0.004, train_acc = 1.000 (3.330 sec/step)
step 73150 	 loss = 0.000, train_acc = 1.000 (3.346 sec/step)
step 73160 	 loss = 0.001, train_acc = 1.000 (3.311 sec/step)
step 73170 	 loss = 0.476, train_acc = 0.800 (3.327 sec/step)
step 73180 	 loss = 0.396, train_acc = 0.900 (3.364 sec/step)
step 73190 	 loss = 0.845, train_acc = 0.900 (3.368 sec/step)
step 73200 	 loss = 0.053, train_acc = 1.000 (3.365 sec/step)
step 73210 	 loss = 0.131, train_acc = 0.900 (3.366 sec/step)
step 73220 	 loss = 0.001, train_acc = 1.000 (3.320 sec/step)
step 73230 	 loss = 0.003, train_acc = 1.000 (3.367 sec/step)
step 73240 	 loss = 0.102, train_acc = 1.000 (3.337 sec/step)
step 73250 	 loss = 0.119, train_acc = 0.900 (3.313 sec/step)
step 73260 	 loss = 0.370, train_acc = 0.800 (3.308 sec/step)
step 73270 	 loss = 0.126, train_acc = 0.900 (3.330 sec/step)
step 73280 	 loss = 1.347, train_acc = 0.800 (3.324 sec/step)
step 73290 	 loss = 0.105, train_acc = 0.900 (3.366 sec/step)
step 73300 	 loss = 0.043, train_acc = 1.000 (3.365 sec/step)
step 73310 	 loss = 0.469, train_acc = 0.900 (3.316 sec/step)
step 73320 	 loss = 0.130, train_acc = 0.900 (3.313 sec/step)
step 73330 	 loss = 0.024, train_acc = 1.000 (3.339 sec/step)
step 73340 	 loss = 0.153, train_acc = 0.900 (3.322 sec/step)
step 73350 	 loss = 0.081, train_acc = 0.900 (3.344 sec/step)
step 73360 	 loss = 0.268, train_acc = 0.900 (3.307 sec/step)
step 73370 	 loss = 0.122, train_acc = 0.900 (3.360 sec/step)
step 73380 	 loss = 0.022, train_acc = 1.000 (3.467 sec/step)
step 73390 	 loss = 0.002, train_acc = 1.000 (3.331 sec/step)
step 73400 	 loss = 1.210, train_acc = 0.900 (3.358 sec/step)
step 73410 	 loss = 0.438, train_acc = 0.900 (3.331 sec/step)
step 73420 	 loss = 0.381, train_acc = 0.900 (3.381 sec/step)
step 73430 	 loss = 0.068, train_acc = 1.000 (3.386 sec/step)
step 73440 	 loss = 0.082, train_acc = 0.900 (3.322 sec/step)
step 73450 	 loss = 0.198, train_acc = 0.900 (3.339 sec/step)
step 73460 	 loss = 0.000, train_acc = 1.000 (3.365 sec/step)
step 73470 	 loss = 0.505, train_acc = 0.800 (3.378 sec/step)
step 73480 	 loss = 0.129, train_acc = 1.000 (3.302 sec/step)
step 73490 	 loss = 1.487, train_acc = 0.700 (3.301 sec/step)
step 73500 	 loss = 0.944, train_acc = 0.800 (3.356 sec/step)
step 73510 	 loss = 0.113, train_acc = 1.000 (3.364 sec/step)
step 73520 	 loss = 0.001, train_acc = 1.000 (3.322 sec/step)
step 73530 	 loss = 0.110, train_acc = 1.000 (3.353 sec/step)
step 73540 	 loss = 0.281, train_acc = 0.900 (3.341 sec/step)
step 73550 	 loss = 0.681, train_acc = 0.800 (3.308 sec/step)
step 73560 	 loss = 0.016, train_acc = 1.000 (3.333 sec/step)
step 73570 	 loss = 0.145, train_acc = 0.900 (3.361 sec/step)
step 73580 	 loss = 0.168, train_acc = 0.900 (3.288 sec/step)
step 73590 	 loss = 0.834, train_acc = 0.700 (3.345 sec/step)
step 73600 	 loss = 0.021, train_acc = 1.000 (3.309 sec/step)
step 73610 	 loss = 0.530, train_acc = 0.900 (3.314 sec/step)
step 73620 	 loss = 0.189, train_acc = 0.900 (3.378 sec/step)
step 73630 	 loss = 0.017, train_acc = 1.000 (3.334 sec/step)
step 73640 	 loss = 0.009, train_acc = 1.000 (3.354 sec/step)
step 73650 	 loss = 0.003, train_acc = 1.000 (3.303 sec/step)
step 73660 	 loss = 0.005, train_acc = 1.000 (3.338 sec/step)
step 73670 	 loss = 0.533, train_acc = 0.900 (3.382 sec/step)
step 73680 	 loss = 0.409, train_acc = 0.800 (3.327 sec/step)
step 73690 	 loss = 0.000, train_acc = 1.000 (3.317 sec/step)
step 73700 	 loss = 0.010, train_acc = 1.000 (3.359 sec/step)
step 73710 	 loss = 0.010, train_acc = 1.000 (3.361 sec/step)
step 73720 	 loss = 0.053, train_acc = 1.000 (3.370 sec/step)
step 73730 	 loss = 0.073, train_acc = 1.000 (3.392 sec/step)
step 73740 	 loss = 0.069, train_acc = 1.000 (3.349 sec/step)
step 73750 	 loss = 0.072, train_acc = 1.000 (3.356 sec/step)
step 73760 	 loss = 0.011, train_acc = 1.000 (3.365 sec/step)
step 73770 	 loss = 0.169, train_acc = 0.900 (3.329 sec/step)
step 73780 	 loss = 0.049, train_acc = 1.000 (3.319 sec/step)
step 73790 	 loss = 0.003, train_acc = 1.000 (3.365 sec/step)
step 73800 	 loss = 0.040, train_acc = 1.000 (3.279 sec/step)
step 73810 	 loss = 0.002, train_acc = 1.000 (3.352 sec/step)
step 73820 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 73830 	 loss = 0.086, train_acc = 1.000 (3.346 sec/step)
step 73840 	 loss = 1.437, train_acc = 0.900 (3.369 sec/step)
step 73850 	 loss = 0.612, train_acc = 0.800 (3.285 sec/step)
step 73860 	 loss = 0.553, train_acc = 0.800 (3.297 sec/step)
step 73870 	 loss = 0.000, train_acc = 1.000 (3.374 sec/step)
step 73880 	 loss = 0.000, train_acc = 1.000 (3.326 sec/step)
step 73890 	 loss = 0.004, train_acc = 1.000 (3.365 sec/step)
step 73900 	 loss = 0.348, train_acc = 0.900 (3.375 sec/step)
step 73910 	 loss = 0.000, train_acc = 1.000 (3.406 sec/step)
step 73920 	 loss = 0.009, train_acc = 1.000 (3.324 sec/step)
step 73930 	 loss = 0.153, train_acc = 0.900 (3.315 sec/step)
step 73940 	 loss = 0.006, train_acc = 1.000 (3.376 sec/step)
step 73950 	 loss = 0.279, train_acc = 0.900 (3.356 sec/step)
step 73960 	 loss = 0.042, train_acc = 1.000 (3.359 sec/step)
step 73970 	 loss = 0.000, train_acc = 1.000 (3.478 sec/step)
step 73980 	 loss = 0.568, train_acc = 0.900 (3.320 sec/step)
step 73990 	 loss = 0.036, train_acc = 1.000 (3.320 sec/step)
step 74000 	 loss = 0.001, train_acc = 1.000 (3.383 sec/step)
step 74010 	 loss = 0.009, train_acc = 1.000 (3.365 sec/step)
step 74020 	 loss = 0.033, train_acc = 1.000 (3.318 sec/step)
step 74030 	 loss = 0.001, train_acc = 1.000 (3.337 sec/step)
step 74040 	 loss = 0.169, train_acc = 0.900 (3.365 sec/step)
step 74050 	 loss = 0.521, train_acc = 0.900 (3.323 sec/step)
step 74060 	 loss = 0.034, train_acc = 1.000 (3.389 sec/step)
step 74070 	 loss = 0.036, train_acc = 1.000 (3.370 sec/step)
step 74080 	 loss = 0.230, train_acc = 0.900 (3.337 sec/step)
step 74090 	 loss = 0.594, train_acc = 0.800 (3.366 sec/step)
VALIDATION 	 acc = 0.567 (3.636 sec)
New Best Accuracy 0.567 > Old Best 0.565.  Saving...
The checkpoint has been created.
step 74100 	 loss = 0.001, train_acc = 1.000 (3.373 sec/step)
step 74110 	 loss = 0.001, train_acc = 1.000 (3.382 sec/step)
step 74120 	 loss = 0.001, train_acc = 1.000 (3.396 sec/step)
step 74130 	 loss = 0.585, train_acc = 0.800 (3.354 sec/step)
step 74140 	 loss = 0.004, train_acc = 1.000 (3.389 sec/step)
step 74150 	 loss = 0.239, train_acc = 0.800 (3.380 sec/step)
step 74160 	 loss = 0.272, train_acc = 0.900 (3.352 sec/step)
step 74170 	 loss = 0.051, train_acc = 1.000 (3.303 sec/step)
step 74180 	 loss = 0.000, train_acc = 1.000 (3.327 sec/step)
step 74190 	 loss = 0.358, train_acc = 0.900 (3.315 sec/step)
step 74200 	 loss = 0.014, train_acc = 1.000 (3.346 sec/step)
step 74210 	 loss = 0.026, train_acc = 1.000 (3.318 sec/step)
step 74220 	 loss = 0.898, train_acc = 0.900 (3.348 sec/step)
step 74230 	 loss = 0.163, train_acc = 0.900 (3.374 sec/step)
step 74240 	 loss = 0.040, train_acc = 1.000 (3.348 sec/step)
step 74250 	 loss = 0.204, train_acc = 0.900 (3.369 sec/step)
step 74260 	 loss = 0.044, train_acc = 1.000 (3.378 sec/step)
step 74270 	 loss = 0.004, train_acc = 1.000 (3.299 sec/step)
step 74280 	 loss = 0.010, train_acc = 1.000 (3.343 sec/step)
step 74290 	 loss = 0.558, train_acc = 0.900 (3.384 sec/step)
step 74300 	 loss = 0.354, train_acc = 0.900 (3.363 sec/step)
step 74310 	 loss = 0.055, train_acc = 1.000 (3.387 sec/step)
step 74320 	 loss = 0.000, train_acc = 1.000 (3.342 sec/step)
step 74330 	 loss = 0.001, train_acc = 1.000 (3.388 sec/step)
step 74340 	 loss = 0.072, train_acc = 1.000 (3.351 sec/step)
step 74350 	 loss = 0.007, train_acc = 1.000 (3.303 sec/step)
step 74360 	 loss = 0.151, train_acc = 1.000 (3.338 sec/step)
step 74370 	 loss = 0.064, train_acc = 1.000 (3.359 sec/step)
step 74380 	 loss = 0.067, train_acc = 1.000 (3.370 sec/step)
step 74390 	 loss = 1.199, train_acc = 0.600 (3.294 sec/step)
step 74400 	 loss = 1.026, train_acc = 0.700 (3.335 sec/step)
step 74410 	 loss = 0.694, train_acc = 0.800 (3.325 sec/step)
step 74420 	 loss = 0.676, train_acc = 0.700 (3.406 sec/step)
step 74430 	 loss = 1.819, train_acc = 0.700 (3.343 sec/step)
step 74440 	 loss = 2.009, train_acc = 0.700 (3.384 sec/step)
step 74450 	 loss = 0.003, train_acc = 1.000 (3.327 sec/step)
step 74460 	 loss = 0.164, train_acc = 0.900 (3.351 sec/step)
step 74470 	 loss = 0.005, train_acc = 1.000 (3.332 sec/step)
step 74480 	 loss = 0.038, train_acc = 1.000 (3.312 sec/step)
step 74490 	 loss = 0.304, train_acc = 0.900 (3.403 sec/step)
step 74500 	 loss = 0.228, train_acc = 0.900 (3.333 sec/step)
step 74510 	 loss = 0.203, train_acc = 0.900 (3.341 sec/step)
step 74520 	 loss = 0.035, train_acc = 1.000 (3.316 sec/step)
step 74530 	 loss = 0.009, train_acc = 1.000 (3.328 sec/step)
step 74540 	 loss = 0.504, train_acc = 0.900 (3.335 sec/step)
step 74550 	 loss = 0.658, train_acc = 0.800 (3.375 sec/step)
step 74560 	 loss = 0.130, train_acc = 0.900 (3.367 sec/step)
step 74570 	 loss = 0.531, train_acc = 0.900 (3.313 sec/step)
step 74580 	 loss = 0.356, train_acc = 0.900 (3.314 sec/step)
step 74590 	 loss = 0.018, train_acc = 1.000 (3.302 sec/step)
step 74600 	 loss = 0.007, train_acc = 1.000 (3.367 sec/step)
step 74610 	 loss = 0.121, train_acc = 0.900 (3.383 sec/step)
step 74620 	 loss = 0.089, train_acc = 1.000 (3.403 sec/step)
step 74630 	 loss = 0.421, train_acc = 0.900 (3.323 sec/step)
step 74640 	 loss = 0.279, train_acc = 0.800 (3.314 sec/step)
step 74650 	 loss = 0.232, train_acc = 0.900 (3.342 sec/step)
step 74660 	 loss = 0.000, train_acc = 1.000 (3.393 sec/step)
step 74670 	 loss = 0.076, train_acc = 0.900 (3.360 sec/step)
step 74680 	 loss = 0.388, train_acc = 0.900 (3.360 sec/step)
step 74690 	 loss = 0.006, train_acc = 1.000 (3.395 sec/step)
step 74700 	 loss = 0.043, train_acc = 1.000 (3.314 sec/step)
step 74710 	 loss = 0.306, train_acc = 0.900 (3.347 sec/step)
step 74720 	 loss = 0.014, train_acc = 1.000 (3.293 sec/step)
step 74730 	 loss = 0.253, train_acc = 0.900 (3.327 sec/step)
step 74740 	 loss = 0.022, train_acc = 1.000 (3.353 sec/step)
step 74750 	 loss = 0.415, train_acc = 0.900 (3.396 sec/step)
step 74760 	 loss = 0.000, train_acc = 1.000 (3.396 sec/step)
step 74770 	 loss = 0.403, train_acc = 0.900 (3.352 sec/step)
step 74780 	 loss = 0.001, train_acc = 1.000 (3.308 sec/step)
step 74790 	 loss = 1.747, train_acc = 0.900 (3.347 sec/step)
step 74800 	 loss = 0.021, train_acc = 1.000 (3.334 sec/step)
step 74810 	 loss = 0.294, train_acc = 0.900 (3.357 sec/step)
step 74820 	 loss = 0.001, train_acc = 1.000 (3.315 sec/step)
step 74830 	 loss = 0.952, train_acc = 0.900 (3.368 sec/step)
step 74840 	 loss = 0.000, train_acc = 1.000 (3.351 sec/step)
step 74850 	 loss = 0.091, train_acc = 1.000 (3.379 sec/step)
step 74860 	 loss = 0.016, train_acc = 1.000 (3.343 sec/step)
step 74870 	 loss = 0.001, train_acc = 1.000 (3.392 sec/step)
step 74880 	 loss = 0.055, train_acc = 1.000 (3.375 sec/step)
step 74890 	 loss = 0.000, train_acc = 1.000 (3.443 sec/step)
step 74900 	 loss = 0.652, train_acc = 0.800 (3.318 sec/step)
step 74910 	 loss = 0.221, train_acc = 0.900 (3.295 sec/step)
step 74920 	 loss = 0.078, train_acc = 1.000 (3.314 sec/step)
step 74930 	 loss = 0.006, train_acc = 1.000 (3.349 sec/step)
step 74940 	 loss = 0.229, train_acc = 0.900 (3.375 sec/step)
step 74950 	 loss = 0.000, train_acc = 1.000 (3.361 sec/step)
step 74960 	 loss = 0.007, train_acc = 1.000 (3.388 sec/step)
step 74970 	 loss = 0.023, train_acc = 1.000 (3.293 sec/step)
step 74980 	 loss = 0.000, train_acc = 1.000 (3.370 sec/step)
step 74990 	 loss = 0.010, train_acc = 1.000 (3.375 sec/step)
step 75000 	 loss = 0.086, train_acc = 0.900 (3.343 sec/step)
step 75010 	 loss = 0.005, train_acc = 1.000 (3.325 sec/step)
step 75020 	 loss = 0.006, train_acc = 1.000 (3.364 sec/step)
step 75030 	 loss = 0.009, train_acc = 1.000 (3.295 sec/step)
step 75040 	 loss = 0.884, train_acc = 0.800 (3.351 sec/step)
step 75050 	 loss = 0.092, train_acc = 0.900 (3.324 sec/step)
step 75060 	 loss = 0.001, train_acc = 1.000 (3.456 sec/step)
step 75070 	 loss = 0.007, train_acc = 1.000 (3.367 sec/step)
step 75080 	 loss = 0.544, train_acc = 0.900 (3.386 sec/step)
step 75090 	 loss = 0.435, train_acc = 0.700 (3.349 sec/step)
step 75100 	 loss = 0.027, train_acc = 1.000 (3.340 sec/step)
step 75110 	 loss = 0.376, train_acc = 0.800 (3.369 sec/step)
step 75120 	 loss = 0.012, train_acc = 1.000 (3.334 sec/step)
step 75130 	 loss = 0.002, train_acc = 1.000 (3.343 sec/step)
step 75140 	 loss = 0.131, train_acc = 0.900 (3.350 sec/step)
step 75150 	 loss = 0.049, train_acc = 1.000 (3.350 sec/step)
step 75160 	 loss = 0.858, train_acc = 0.900 (3.371 sec/step)
step 75170 	 loss = 0.157, train_acc = 0.900 (3.389 sec/step)
step 75180 	 loss = 0.303, train_acc = 0.900 (3.373 sec/step)
step 75190 	 loss = 0.030, train_acc = 1.000 (3.351 sec/step)
step 75200 	 loss = 0.147, train_acc = 0.900 (3.348 sec/step)
step 75210 	 loss = 0.001, train_acc = 1.000 (3.354 sec/step)
step 75220 	 loss = 0.001, train_acc = 1.000 (3.399 sec/step)
step 75230 	 loss = 0.564, train_acc = 0.800 (3.377 sec/step)
step 75240 	 loss = 0.134, train_acc = 0.900 (3.386 sec/step)
step 75250 	 loss = 0.003, train_acc = 1.000 (3.319 sec/step)
step 75260 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 75270 	 loss = 0.172, train_acc = 0.900 (3.417 sec/step)
step 75280 	 loss = 0.052, train_acc = 1.000 (3.368 sec/step)
step 75290 	 loss = 0.004, train_acc = 1.000 (3.397 sec/step)
step 75300 	 loss = 0.003, train_acc = 1.000 (3.422 sec/step)
step 75310 	 loss = 0.035, train_acc = 1.000 (3.327 sec/step)
step 75320 	 loss = 0.003, train_acc = 1.000 (3.359 sec/step)
step 75330 	 loss = 0.450, train_acc = 0.900 (3.330 sec/step)
step 75340 	 loss = 0.018, train_acc = 1.000 (3.300 sec/step)
step 75350 	 loss = 0.055, train_acc = 1.000 (3.395 sec/step)
step 75360 	 loss = 0.058, train_acc = 1.000 (3.298 sec/step)
step 75370 	 loss = 0.200, train_acc = 0.900 (3.343 sec/step)
step 75380 	 loss = 0.089, train_acc = 1.000 (3.362 sec/step)
step 75390 	 loss = 0.004, train_acc = 1.000 (3.335 sec/step)
step 75400 	 loss = 0.069, train_acc = 1.000 (3.298 sec/step)
step 75410 	 loss = 0.000, train_acc = 1.000 (3.367 sec/step)
step 75420 	 loss = 0.017, train_acc = 1.000 (3.328 sec/step)
step 75430 	 loss = 0.231, train_acc = 0.900 (3.333 sec/step)
step 75440 	 loss = 0.161, train_acc = 0.900 (3.338 sec/step)
step 75450 	 loss = 0.173, train_acc = 0.900 (3.352 sec/step)
step 75460 	 loss = 0.157, train_acc = 0.900 (3.353 sec/step)
step 75470 	 loss = 0.003, train_acc = 1.000 (3.369 sec/step)
step 75480 	 loss = 0.021, train_acc = 1.000 (3.364 sec/step)
step 75490 	 loss = 0.002, train_acc = 1.000 (3.320 sec/step)
step 75500 	 loss = 0.100, train_acc = 0.900 (3.358 sec/step)
step 75510 	 loss = 0.000, train_acc = 1.000 (3.404 sec/step)
step 75520 	 loss = 0.330, train_acc = 0.900 (3.352 sec/step)
step 75530 	 loss = 0.033, train_acc = 1.000 (3.364 sec/step)
step 75540 	 loss = 0.036, train_acc = 1.000 (3.314 sec/step)
step 75550 	 loss = 0.744, train_acc = 0.800 (3.322 sec/step)
step 75560 	 loss = 0.296, train_acc = 0.900 (3.348 sec/step)
step 75570 	 loss = 0.034, train_acc = 1.000 (3.306 sec/step)
step 75580 	 loss = 0.023, train_acc = 1.000 (3.359 sec/step)
step 75590 	 loss = 0.012, train_acc = 1.000 (3.359 sec/step)
step 75600 	 loss = 0.047, train_acc = 1.000 (3.345 sec/step)
step 75610 	 loss = 1.568, train_acc = 0.700 (3.381 sec/step)
step 75620 	 loss = 0.199, train_acc = 0.900 (3.371 sec/step)
step 75630 	 loss = 0.038, train_acc = 1.000 (3.335 sec/step)
step 75640 	 loss = 0.025, train_acc = 1.000 (3.358 sec/step)
step 75650 	 loss = 0.116, train_acc = 0.900 (3.370 sec/step)
step 75660 	 loss = 0.940, train_acc = 0.900 (3.320 sec/step)
step 75670 	 loss = 0.533, train_acc = 0.800 (3.338 sec/step)
step 75680 	 loss = 0.527, train_acc = 0.800 (3.317 sec/step)
step 75690 	 loss = 0.001, train_acc = 1.000 (3.328 sec/step)
step 75700 	 loss = 0.037, train_acc = 1.000 (3.358 sec/step)
step 75710 	 loss = 0.033, train_acc = 1.000 (3.345 sec/step)
step 75720 	 loss = 0.233, train_acc = 0.800 (3.410 sec/step)
step 75730 	 loss = 0.543, train_acc = 0.800 (3.333 sec/step)
step 75740 	 loss = 0.567, train_acc = 0.800 (3.318 sec/step)
step 75750 	 loss = 0.828, train_acc = 0.800 (3.300 sec/step)
step 75760 	 loss = 0.714, train_acc = 0.800 (3.328 sec/step)
step 75770 	 loss = 0.986, train_acc = 0.800 (3.321 sec/step)
step 75780 	 loss = 0.271, train_acc = 0.800 (3.439 sec/step)
step 75790 	 loss = 1.535, train_acc = 0.900 (3.353 sec/step)
step 75800 	 loss = 0.000, train_acc = 1.000 (3.358 sec/step)
step 75810 	 loss = 0.136, train_acc = 1.000 (3.322 sec/step)
step 75820 	 loss = 0.938, train_acc = 0.700 (3.377 sec/step)
step 75830 	 loss = 0.235, train_acc = 1.000 (3.348 sec/step)
step 75840 	 loss = 0.014, train_acc = 1.000 (3.358 sec/step)
step 75850 	 loss = 0.828, train_acc = 0.800 (3.344 sec/step)
step 75860 	 loss = 0.030, train_acc = 1.000 (3.334 sec/step)
step 75870 	 loss = 0.022, train_acc = 1.000 (3.332 sec/step)
step 75880 	 loss = 0.142, train_acc = 1.000 (3.346 sec/step)
step 75890 	 loss = 0.000, train_acc = 1.000 (3.407 sec/step)
step 75900 	 loss = 0.542, train_acc = 0.800 (3.308 sec/step)
step 75910 	 loss = 0.156, train_acc = 0.900 (3.308 sec/step)
step 75920 	 loss = 0.298, train_acc = 0.800 (3.343 sec/step)
step 75930 	 loss = 0.027, train_acc = 1.000 (3.346 sec/step)
step 75940 	 loss = 0.078, train_acc = 1.000 (3.293 sec/step)
step 75950 	 loss = 0.004, train_acc = 1.000 (3.389 sec/step)
step 75960 	 loss = 0.102, train_acc = 0.900 (3.326 sec/step)
step 75970 	 loss = 0.004, train_acc = 1.000 (3.340 sec/step)
step 75980 	 loss = 0.004, train_acc = 1.000 (3.389 sec/step)
step 75990 	 loss = 0.172, train_acc = 0.900 (3.398 sec/step)
VALIDATION 	 acc = 0.562 (3.647 sec)
step 76000 	 loss = 0.075, train_acc = 1.000 (3.342 sec/step)
step 76010 	 loss = 0.004, train_acc = 1.000 (3.335 sec/step)
step 76020 	 loss = 0.001, train_acc = 1.000 (3.385 sec/step)
step 76030 	 loss = 0.002, train_acc = 1.000 (3.337 sec/step)
step 76040 	 loss = 0.001, train_acc = 1.000 (3.375 sec/step)
step 76050 	 loss = 0.425, train_acc = 0.900 (3.365 sec/step)
step 76060 	 loss = 0.007, train_acc = 1.000 (3.309 sec/step)
step 76070 	 loss = 0.087, train_acc = 1.000 (3.293 sec/step)
step 76080 	 loss = 0.590, train_acc = 0.800 (3.420 sec/step)
step 76090 	 loss = 0.006, train_acc = 1.000 (3.305 sec/step)
step 76100 	 loss = 0.024, train_acc = 1.000 (3.372 sec/step)
step 76110 	 loss = 0.220, train_acc = 0.900 (3.344 sec/step)
step 76120 	 loss = 0.113, train_acc = 1.000 (3.330 sec/step)
step 76130 	 loss = 0.006, train_acc = 1.000 (3.351 sec/step)
step 76140 	 loss = 0.001, train_acc = 1.000 (3.449 sec/step)
step 76150 	 loss = 0.006, train_acc = 1.000 (3.316 sec/step)
step 76160 	 loss = 0.052, train_acc = 1.000 (3.319 sec/step)
step 76170 	 loss = 5.901, train_acc = 0.800 (3.321 sec/step)
step 76180 	 loss = 0.341, train_acc = 0.900 (3.359 sec/step)
step 76190 	 loss = 0.181, train_acc = 1.000 (3.304 sec/step)
step 76200 	 loss = 0.143, train_acc = 1.000 (3.367 sec/step)
step 76210 	 loss = 0.864, train_acc = 0.800 (3.339 sec/step)
step 76220 	 loss = 0.176, train_acc = 0.900 (3.345 sec/step)
step 76230 	 loss = 0.002, train_acc = 1.000 (3.331 sec/step)
step 76240 	 loss = 1.145, train_acc = 0.800 (3.312 sec/step)
step 76250 	 loss = 0.466, train_acc = 0.900 (3.355 sec/step)
step 76260 	 loss = 0.028, train_acc = 1.000 (3.351 sec/step)
step 76270 	 loss = 0.219, train_acc = 0.900 (3.344 sec/step)
step 76280 	 loss = 0.105, train_acc = 1.000 (3.336 sec/step)
step 76290 	 loss = 0.003, train_acc = 1.000 (3.297 sec/step)
step 76300 	 loss = 0.000, train_acc = 1.000 (3.394 sec/step)
step 76310 	 loss = 0.237, train_acc = 0.900 (3.319 sec/step)
step 76320 	 loss = 0.770, train_acc = 0.900 (3.361 sec/step)
step 76330 	 loss = 0.045, train_acc = 1.000 (3.317 sec/step)
step 76340 	 loss = 0.004, train_acc = 1.000 (3.308 sec/step)
step 76350 	 loss = 0.001, train_acc = 1.000 (3.382 sec/step)
step 76360 	 loss = 0.062, train_acc = 1.000 (3.385 sec/step)
step 76370 	 loss = 0.000, train_acc = 1.000 (3.321 sec/step)
step 76380 	 loss = 0.297, train_acc = 0.900 (3.327 sec/step)
step 76390 	 loss = 0.002, train_acc = 1.000 (3.323 sec/step)
step 76400 	 loss = 0.195, train_acc = 0.900 (3.302 sec/step)
step 76410 	 loss = 0.998, train_acc = 0.700 (3.432 sec/step)
step 76420 	 loss = 0.000, train_acc = 1.000 (3.305 sec/step)
step 76430 	 loss = 0.107, train_acc = 0.900 (3.311 sec/step)
step 76440 	 loss = 0.013, train_acc = 1.000 (3.306 sec/step)
step 76450 	 loss = 0.023, train_acc = 1.000 (3.332 sec/step)
step 76460 	 loss = 0.108, train_acc = 0.900 (3.353 sec/step)
step 76470 	 loss = 0.522, train_acc = 0.900 (3.355 sec/step)
step 76480 	 loss = 0.525, train_acc = 0.900 (3.298 sec/step)
step 76490 	 loss = 0.502, train_acc = 0.900 (3.330 sec/step)
step 76500 	 loss = 0.388, train_acc = 0.700 (3.383 sec/step)
step 76510 	 loss = 0.002, train_acc = 1.000 (3.321 sec/step)
step 76520 	 loss = 0.001, train_acc = 1.000 (3.323 sec/step)
step 76530 	 loss = 0.504, train_acc = 0.800 (3.313 sec/step)
step 76540 	 loss = 0.303, train_acc = 0.900 (3.384 sec/step)
step 76550 	 loss = 0.142, train_acc = 0.900 (3.344 sec/step)
step 76560 	 loss = 0.535, train_acc = 0.900 (3.381 sec/step)
step 76570 	 loss = 0.079, train_acc = 1.000 (3.351 sec/step)
step 76580 	 loss = 0.036, train_acc = 1.000 (3.370 sec/step)
step 76590 	 loss = 0.036, train_acc = 1.000 (3.382 sec/step)
step 76600 	 loss = 0.019, train_acc = 1.000 (3.381 sec/step)
step 76610 	 loss = 0.024, train_acc = 1.000 (3.346 sec/step)
step 76620 	 loss = 0.047, train_acc = 1.000 (3.371 sec/step)
step 76630 	 loss = 0.790, train_acc = 0.800 (3.364 sec/step)
step 76640 	 loss = 0.014, train_acc = 1.000 (3.348 sec/step)
step 76650 	 loss = 0.004, train_acc = 1.000 (3.377 sec/step)
step 76660 	 loss = 0.051, train_acc = 1.000 (3.365 sec/step)
step 76670 	 loss = 0.374, train_acc = 0.900 (3.412 sec/step)
step 76680 	 loss = 0.080, train_acc = 1.000 (3.370 sec/step)
step 76690 	 loss = 0.118, train_acc = 0.900 (3.345 sec/step)
step 76700 	 loss = 0.418, train_acc = 0.900 (3.337 sec/step)
step 76710 	 loss = 0.061, train_acc = 1.000 (3.338 sec/step)
step 76720 	 loss = 0.453, train_acc = 0.900 (3.403 sec/step)
step 76730 	 loss = 0.037, train_acc = 1.000 (3.397 sec/step)
step 76740 	 loss = 0.023, train_acc = 1.000 (3.351 sec/step)
step 76750 	 loss = 0.020, train_acc = 1.000 (3.334 sec/step)
step 76760 	 loss = 0.296, train_acc = 0.900 (3.328 sec/step)
step 76770 	 loss = 0.184, train_acc = 0.900 (3.339 sec/step)
step 76780 	 loss = 0.128, train_acc = 0.900 (3.394 sec/step)
step 76790 	 loss = 0.256, train_acc = 0.900 (3.321 sec/step)
step 76800 	 loss = 0.584, train_acc = 0.800 (3.329 sec/step)
step 76810 	 loss = 0.126, train_acc = 0.900 (3.349 sec/step)
step 76820 	 loss = 0.042, train_acc = 1.000 (3.340 sec/step)
step 76830 	 loss = 0.001, train_acc = 1.000 (3.360 sec/step)
step 76840 	 loss = 0.047, train_acc = 1.000 (3.347 sec/step)
step 76850 	 loss = 0.131, train_acc = 1.000 (3.336 sec/step)
step 76860 	 loss = 0.320, train_acc = 0.900 (3.364 sec/step)
step 76870 	 loss = 0.309, train_acc = 0.900 (3.313 sec/step)
step 76880 	 loss = 0.001, train_acc = 1.000 (3.327 sec/step)
step 76890 	 loss = 0.006, train_acc = 1.000 (3.308 sec/step)
step 76900 	 loss = 0.011, train_acc = 1.000 (3.355 sec/step)
step 76910 	 loss = 0.854, train_acc = 0.700 (3.368 sec/step)
step 76920 	 loss = 0.089, train_acc = 0.900 (3.352 sec/step)
step 76930 	 loss = 0.004, train_acc = 1.000 (3.303 sec/step)
step 76940 	 loss = 0.075, train_acc = 1.000 (3.411 sec/step)
step 76950 	 loss = 0.022, train_acc = 1.000 (3.360 sec/step)
step 76960 	 loss = 0.044, train_acc = 1.000 (3.361 sec/step)
step 76970 	 loss = 0.014, train_acc = 1.000 (3.326 sec/step)
step 76980 	 loss = 2.429, train_acc = 0.800 (3.318 sec/step)
step 76990 	 loss = 0.016, train_acc = 1.000 (3.309 sec/step)
step 77000 	 loss = 0.108, train_acc = 0.900 (3.369 sec/step)
step 77010 	 loss = 0.157, train_acc = 1.000 (3.342 sec/step)
step 77020 	 loss = 0.036, train_acc = 1.000 (3.415 sec/step)
step 77030 	 loss = 0.039, train_acc = 1.000 (3.376 sec/step)
step 77040 	 loss = 0.247, train_acc = 0.900 (3.296 sec/step)
step 77050 	 loss = 2.511, train_acc = 0.900 (3.344 sec/step)
step 77060 	 loss = 0.417, train_acc = 0.800 (3.336 sec/step)
step 77070 	 loss = 0.502, train_acc = 0.900 (3.386 sec/step)
step 77080 	 loss = 1.226, train_acc = 0.800 (3.344 sec/step)
step 77090 	 loss = 0.987, train_acc = 0.800 (3.324 sec/step)
step 77100 	 loss = 0.244, train_acc = 0.900 (3.340 sec/step)
step 77110 	 loss = 0.011, train_acc = 1.000 (3.344 sec/step)
step 77120 	 loss = 0.030, train_acc = 1.000 (3.342 sec/step)
step 77130 	 loss = 0.146, train_acc = 0.900 (3.358 sec/step)
step 77140 	 loss = 0.100, train_acc = 0.900 (3.360 sec/step)
step 77150 	 loss = 0.026, train_acc = 1.000 (3.375 sec/step)
step 77160 	 loss = 0.113, train_acc = 1.000 (3.329 sec/step)
step 77170 	 loss = 0.006, train_acc = 1.000 (3.341 sec/step)
step 77180 	 loss = 0.056, train_acc = 1.000 (3.366 sec/step)
step 77190 	 loss = 0.170, train_acc = 0.900 (3.347 sec/step)
step 77200 	 loss = 0.004, train_acc = 1.000 (3.338 sec/step)
step 77210 	 loss = 0.010, train_acc = 1.000 (3.346 sec/step)
step 77220 	 loss = 0.015, train_acc = 1.000 (3.375 sec/step)
step 77230 	 loss = 0.244, train_acc = 0.900 (3.323 sec/step)
step 77240 	 loss = 0.077, train_acc = 0.900 (3.370 sec/step)
step 77250 	 loss = 0.000, train_acc = 1.000 (3.383 sec/step)
step 77260 	 loss = 0.246, train_acc = 0.900 (3.438 sec/step)
step 77270 	 loss = 0.371, train_acc = 0.900 (3.393 sec/step)
step 77280 	 loss = 0.003, train_acc = 1.000 (3.364 sec/step)
step 77290 	 loss = 0.057, train_acc = 1.000 (3.357 sec/step)
step 77300 	 loss = 0.000, train_acc = 1.000 (3.353 sec/step)
step 77310 	 loss = 0.000, train_acc = 1.000 (3.382 sec/step)
step 77320 	 loss = 0.021, train_acc = 1.000 (3.351 sec/step)
step 77330 	 loss = 0.000, train_acc = 1.000 (3.438 sec/step)
step 77340 	 loss = 0.132, train_acc = 0.900 (3.365 sec/step)
step 77350 	 loss = 0.437, train_acc = 0.900 (3.310 sec/step)
step 77360 	 loss = 0.181, train_acc = 0.900 (3.383 sec/step)
step 77370 	 loss = 0.368, train_acc = 0.900 (3.323 sec/step)
step 77380 	 loss = 0.035, train_acc = 1.000 (3.378 sec/step)
step 77390 	 loss = 0.133, train_acc = 0.900 (3.306 sec/step)
step 77400 	 loss = 0.219, train_acc = 0.900 (3.375 sec/step)
step 77410 	 loss = 0.112, train_acc = 0.900 (3.321 sec/step)
step 77420 	 loss = 0.028, train_acc = 1.000 (3.340 sec/step)
step 77430 	 loss = 0.000, train_acc = 1.000 (3.328 sec/step)
step 77440 	 loss = 0.237, train_acc = 0.900 (3.361 sec/step)
step 77450 	 loss = 0.197, train_acc = 0.900 (3.306 sec/step)
step 77460 	 loss = 0.053, train_acc = 1.000 (3.357 sec/step)
step 77470 	 loss = 0.311, train_acc = 0.800 (3.359 sec/step)
step 77480 	 loss = 0.857, train_acc = 0.900 (3.337 sec/step)
step 77490 	 loss = 0.526, train_acc = 0.900 (3.325 sec/step)
step 77500 	 loss = 0.089, train_acc = 1.000 (3.454 sec/step)
step 77510 	 loss = 0.017, train_acc = 1.000 (3.441 sec/step)
step 77520 	 loss = 0.000, train_acc = 1.000 (3.450 sec/step)
step 77530 	 loss = 0.776, train_acc = 0.900 (3.315 sec/step)
step 77540 	 loss = 0.015, train_acc = 1.000 (3.334 sec/step)
step 77550 	 loss = 0.100, train_acc = 1.000 (3.394 sec/step)
step 77560 	 loss = 0.626, train_acc = 0.800 (3.319 sec/step)
step 77570 	 loss = 0.001, train_acc = 1.000 (3.355 sec/step)
step 77580 	 loss = 0.004, train_acc = 1.000 (3.329 sec/step)
step 77590 	 loss = 0.065, train_acc = 1.000 (3.332 sec/step)
step 77600 	 loss = 0.005, train_acc = 1.000 (3.318 sec/step)
step 77610 	 loss = 0.001, train_acc = 1.000 (3.432 sec/step)
step 77620 	 loss = 0.080, train_acc = 0.900 (3.331 sec/step)
step 77630 	 loss = 0.096, train_acc = 0.900 (3.338 sec/step)
step 77640 	 loss = 0.002, train_acc = 1.000 (3.406 sec/step)
step 77650 	 loss = 0.089, train_acc = 0.900 (3.342 sec/step)
step 77660 	 loss = 0.897, train_acc = 0.800 (3.363 sec/step)
step 77670 	 loss = 0.240, train_acc = 0.900 (3.370 sec/step)
step 77680 	 loss = 0.024, train_acc = 1.000 (3.350 sec/step)
step 77690 	 loss = 0.322, train_acc = 0.900 (3.350 sec/step)
step 77700 	 loss = 0.081, train_acc = 0.900 (3.399 sec/step)
step 77710 	 loss = 0.093, train_acc = 1.000 (3.332 sec/step)
step 77720 	 loss = 0.069, train_acc = 1.000 (3.373 sec/step)
step 77730 	 loss = 0.091, train_acc = 1.000 (3.361 sec/step)
step 77740 	 loss = 0.006, train_acc = 1.000 (3.469 sec/step)
step 77750 	 loss = 0.097, train_acc = 1.000 (3.350 sec/step)
step 77760 	 loss = 0.266, train_acc = 0.900 (3.344 sec/step)
step 77770 	 loss = 0.661, train_acc = 0.900 (3.353 sec/step)
step 77780 	 loss = 0.394, train_acc = 0.900 (3.378 sec/step)
step 77790 	 loss = 0.117, train_acc = 0.900 (3.333 sec/step)
step 77800 	 loss = 1.358, train_acc = 0.800 (3.313 sec/step)
step 77810 	 loss = 0.462, train_acc = 0.900 (3.353 sec/step)
step 77820 	 loss = 0.029, train_acc = 1.000 (3.364 sec/step)
step 77830 	 loss = 0.719, train_acc = 0.800 (3.398 sec/step)
step 77840 	 loss = 0.091, train_acc = 0.900 (3.350 sec/step)
step 77850 	 loss = 0.655, train_acc = 0.900 (3.360 sec/step)
step 77860 	 loss = 0.008, train_acc = 1.000 (3.337 sec/step)
step 77870 	 loss = 0.091, train_acc = 0.900 (3.398 sec/step)
step 77880 	 loss = 0.017, train_acc = 1.000 (3.352 sec/step)
step 77890 	 loss = 1.218, train_acc = 0.900 (3.315 sec/step)
VALIDATION 	 acc = 0.527 (3.627 sec)
step 77900 	 loss = 0.098, train_acc = 1.000 (3.411 sec/step)
step 77910 	 loss = 0.025, train_acc = 1.000 (3.305 sec/step)
step 77920 	 loss = 0.220, train_acc = 0.900 (3.330 sec/step)
step 77930 	 loss = 0.153, train_acc = 0.900 (3.402 sec/step)
step 77940 	 loss = 0.007, train_acc = 1.000 (3.370 sec/step)
step 77950 	 loss = 0.159, train_acc = 0.900 (3.338 sec/step)
step 77960 	 loss = 0.085, train_acc = 0.900 (3.418 sec/step)
step 77970 	 loss = 0.003, train_acc = 1.000 (3.326 sec/step)
step 77980 	 loss = 0.185, train_acc = 0.800 (3.333 sec/step)
step 77990 	 loss = 0.003, train_acc = 1.000 (3.324 sec/step)
step 78000 	 loss = 0.040, train_acc = 1.000 (3.356 sec/step)
step 78010 	 loss = 0.008, train_acc = 1.000 (3.340 sec/step)
step 78020 	 loss = 0.928, train_acc = 0.800 (3.354 sec/step)
step 78030 	 loss = 0.003, train_acc = 1.000 (3.350 sec/step)
step 78040 	 loss = 0.012, train_acc = 1.000 (3.371 sec/step)
step 78050 	 loss = 0.347, train_acc = 0.900 (3.372 sec/step)
step 78060 	 loss = 0.939, train_acc = 0.900 (3.406 sec/step)
step 78070 	 loss = 0.034, train_acc = 1.000 (3.375 sec/step)
step 78080 	 loss = 0.038, train_acc = 1.000 (3.330 sec/step)
step 78090 	 loss = 0.104, train_acc = 1.000 (3.332 sec/step)
step 78100 	 loss = 0.145, train_acc = 1.000 (3.381 sec/step)
step 78110 	 loss = 0.104, train_acc = 1.000 (3.387 sec/step)
step 78120 	 loss = 0.161, train_acc = 0.900 (3.439 sec/step)
step 78130 	 loss = 0.000, train_acc = 1.000 (3.333 sec/step)
step 78140 	 loss = 0.134, train_acc = 0.900 (3.305 sec/step)
step 78150 	 loss = 0.067, train_acc = 1.000 (3.303 sec/step)
step 78160 	 loss = 0.048, train_acc = 1.000 (3.375 sec/step)
step 78170 	 loss = 2.449, train_acc = 0.700 (3.345 sec/step)
step 78180 	 loss = 0.220, train_acc = 0.900 (3.331 sec/step)
step 78190 	 loss = 0.247, train_acc = 0.900 (3.375 sec/step)
step 78200 	 loss = 0.026, train_acc = 1.000 (3.326 sec/step)
step 78210 	 loss = 2.128, train_acc = 0.900 (3.342 sec/step)
step 78220 	 loss = 0.041, train_acc = 1.000 (3.340 sec/step)
step 78230 	 loss = 0.041, train_acc = 1.000 (3.320 sec/step)
step 78240 	 loss = 0.004, train_acc = 1.000 (3.332 sec/step)
step 78250 	 loss = 0.002, train_acc = 1.000 (3.303 sec/step)
step 78260 	 loss = 0.001, train_acc = 1.000 (3.347 sec/step)
step 78270 	 loss = 0.037, train_acc = 1.000 (3.307 sec/step)
step 78280 	 loss = 0.003, train_acc = 1.000 (3.387 sec/step)
step 78290 	 loss = 0.013, train_acc = 1.000 (3.388 sec/step)
step 78300 	 loss = 0.008, train_acc = 1.000 (3.380 sec/step)
step 78310 	 loss = 0.008, train_acc = 1.000 (3.347 sec/step)
step 78320 	 loss = 0.001, train_acc = 1.000 (3.324 sec/step)
step 78330 	 loss = 0.328, train_acc = 0.900 (3.348 sec/step)
step 78340 	 loss = 0.527, train_acc = 0.800 (3.379 sec/step)
step 78350 	 loss = 0.003, train_acc = 1.000 (3.376 sec/step)
step 78360 	 loss = 0.043, train_acc = 1.000 (3.345 sec/step)
step 78370 	 loss = 0.037, train_acc = 1.000 (3.368 sec/step)
step 78380 	 loss = 0.301, train_acc = 0.900 (3.362 sec/step)
step 78390 	 loss = 0.029, train_acc = 1.000 (3.341 sec/step)
step 78400 	 loss = 0.033, train_acc = 1.000 (3.342 sec/step)
step 78410 	 loss = 0.015, train_acc = 1.000 (3.385 sec/step)
step 78420 	 loss = 0.410, train_acc = 0.900 (3.326 sec/step)
step 78430 	 loss = 1.462, train_acc = 0.600 (3.398 sec/step)
step 78440 	 loss = 0.388, train_acc = 0.900 (3.313 sec/step)
step 78450 	 loss = 0.285, train_acc = 0.900 (3.362 sec/step)
step 78460 	 loss = 0.068, train_acc = 1.000 (3.371 sec/step)
step 78470 	 loss = 0.391, train_acc = 0.900 (3.298 sec/step)
step 78480 	 loss = 0.172, train_acc = 0.900 (3.342 sec/step)
step 78490 	 loss = 0.007, train_acc = 1.000 (3.341 sec/step)
step 78500 	 loss = 0.082, train_acc = 1.000 (3.307 sec/step)
step 78510 	 loss = 0.329, train_acc = 0.900 (3.382 sec/step)
step 78520 	 loss = 0.001, train_acc = 1.000 (3.346 sec/step)
step 78530 	 loss = 0.500, train_acc = 0.900 (3.374 sec/step)
step 78540 	 loss = 0.500, train_acc = 0.800 (3.412 sec/step)
step 78550 	 loss = 0.178, train_acc = 0.900 (3.337 sec/step)
step 78560 	 loss = 0.000, train_acc = 1.000 (3.331 sec/step)
step 78570 	 loss = 0.005, train_acc = 1.000 (3.384 sec/step)
step 78580 	 loss = 0.087, train_acc = 0.900 (3.338 sec/step)
step 78590 	 loss = 0.001, train_acc = 1.000 (3.344 sec/step)
step 78600 	 loss = 0.123, train_acc = 0.900 (3.353 sec/step)
step 78610 	 loss = 0.167, train_acc = 0.900 (3.368 sec/step)
step 78620 	 loss = 0.373, train_acc = 0.900 (3.383 sec/step)
step 78630 	 loss = 0.173, train_acc = 0.900 (3.384 sec/step)
step 78640 	 loss = 0.010, train_acc = 1.000 (3.353 sec/step)
step 78650 	 loss = 0.000, train_acc = 1.000 (3.362 sec/step)
step 78660 	 loss = 0.190, train_acc = 0.900 (3.363 sec/step)
step 78670 	 loss = 0.033, train_acc = 1.000 (3.334 sec/step)
step 78680 	 loss = 0.070, train_acc = 1.000 (3.305 sec/step)
step 78690 	 loss = 0.053, train_acc = 1.000 (3.358 sec/step)
step 78700 	 loss = 0.066, train_acc = 1.000 (3.360 sec/step)
step 78710 	 loss = 0.000, train_acc = 1.000 (3.382 sec/step)
step 78720 	 loss = 0.006, train_acc = 1.000 (3.352 sec/step)
step 78730 	 loss = 0.007, train_acc = 1.000 (3.297 sec/step)
step 78740 	 loss = 0.105, train_acc = 1.000 (3.332 sec/step)
step 78750 	 loss = 0.249, train_acc = 0.900 (3.351 sec/step)
step 78760 	 loss = 0.016, train_acc = 1.000 (3.363 sec/step)
step 78770 	 loss = 1.158, train_acc = 0.900 (3.340 sec/step)
step 78780 	 loss = 0.528, train_acc = 0.900 (3.332 sec/step)
step 78790 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 78800 	 loss = 0.020, train_acc = 1.000 (3.329 sec/step)
step 78810 	 loss = 0.174, train_acc = 0.900 (3.344 sec/step)
step 78820 	 loss = 0.084, train_acc = 1.000 (3.339 sec/step)
step 78830 	 loss = 0.018, train_acc = 1.000 (3.337 sec/step)
step 78840 	 loss = 0.004, train_acc = 1.000 (3.345 sec/step)
step 78850 	 loss = 0.000, train_acc = 1.000 (3.371 sec/step)
step 78860 	 loss = 0.032, train_acc = 1.000 (3.304 sec/step)
step 78870 	 loss = 0.002, train_acc = 1.000 (3.353 sec/step)
step 78880 	 loss = 0.001, train_acc = 1.000 (3.318 sec/step)
step 78890 	 loss = 0.112, train_acc = 0.900 (3.298 sec/step)
step 78900 	 loss = 0.000, train_acc = 1.000 (3.316 sec/step)
step 78910 	 loss = 0.030, train_acc = 1.000 (3.337 sec/step)
step 78920 	 loss = 0.085, train_acc = 0.900 (3.378 sec/step)
step 78930 	 loss = 0.010, train_acc = 1.000 (3.342 sec/step)
step 78940 	 loss = 0.236, train_acc = 0.900 (3.337 sec/step)
step 78950 	 loss = 0.239, train_acc = 0.900 (3.370 sec/step)
step 78960 	 loss = 0.166, train_acc = 0.900 (3.303 sec/step)
step 78970 	 loss = 0.003, train_acc = 1.000 (3.307 sec/step)
step 78980 	 loss = 0.000, train_acc = 1.000 (3.332 sec/step)
step 78990 	 loss = 0.428, train_acc = 0.900 (3.332 sec/step)
step 79000 	 loss = 0.753, train_acc = 0.900 (3.300 sec/step)
step 79010 	 loss = 0.001, train_acc = 1.000 (3.363 sec/step)
step 79020 	 loss = 0.000, train_acc = 1.000 (3.301 sec/step)
step 79030 	 loss = 0.028, train_acc = 1.000 (3.402 sec/step)
step 79040 	 loss = 1.720, train_acc = 0.800 (3.352 sec/step)
step 79050 	 loss = 0.519, train_acc = 0.800 (3.321 sec/step)
step 79060 	 loss = 0.031, train_acc = 1.000 (3.334 sec/step)
step 79070 	 loss = 0.251, train_acc = 0.900 (3.395 sec/step)
step 79080 	 loss = 0.011, train_acc = 1.000 (3.326 sec/step)
step 79090 	 loss = 0.009, train_acc = 1.000 (3.343 sec/step)
step 79100 	 loss = 0.312, train_acc = 0.800 (3.353 sec/step)
step 79110 	 loss = 0.336, train_acc = 0.900 (3.347 sec/step)
step 79120 	 loss = 0.653, train_acc = 0.900 (3.404 sec/step)
step 79130 	 loss = 0.002, train_acc = 1.000 (3.341 sec/step)
step 79140 	 loss = 0.348, train_acc = 0.800 (3.327 sec/step)
step 79150 	 loss = 0.901, train_acc = 0.700 (3.331 sec/step)
step 79160 	 loss = 0.001, train_acc = 1.000 (3.327 sec/step)
step 79170 	 loss = 0.019, train_acc = 1.000 (3.359 sec/step)
step 79180 	 loss = 0.017, train_acc = 1.000 (3.340 sec/step)
step 79190 	 loss = 0.824, train_acc = 0.900 (3.389 sec/step)
step 79200 	 loss = 0.000, train_acc = 1.000 (3.360 sec/step)
step 79210 	 loss = 0.565, train_acc = 0.800 (3.370 sec/step)
step 79220 	 loss = 0.022, train_acc = 1.000 (3.385 sec/step)
step 79230 	 loss = 0.006, train_acc = 1.000 (3.341 sec/step)
step 79240 	 loss = 0.365, train_acc = 0.900 (3.382 sec/step)
step 79250 	 loss = 0.071, train_acc = 1.000 (3.328 sec/step)
step 79260 	 loss = 0.129, train_acc = 0.900 (3.380 sec/step)
step 79270 	 loss = 0.224, train_acc = 0.800 (3.340 sec/step)
step 79280 	 loss = 0.170, train_acc = 0.900 (3.319 sec/step)
step 79290 	 loss = 0.248, train_acc = 0.900 (3.403 sec/step)
step 79300 	 loss = 0.035, train_acc = 1.000 (3.354 sec/step)
step 79310 	 loss = 0.000, train_acc = 1.000 (3.353 sec/step)
step 79320 	 loss = 0.278, train_acc = 0.900 (3.348 sec/step)
step 79330 	 loss = 0.024, train_acc = 1.000 (3.378 sec/step)
step 79340 	 loss = 0.183, train_acc = 1.000 (3.396 sec/step)
step 79350 	 loss = 0.029, train_acc = 1.000 (3.339 sec/step)
step 79360 	 loss = 0.000, train_acc = 1.000 (3.410 sec/step)
step 79370 	 loss = 0.042, train_acc = 1.000 (3.330 sec/step)
step 79380 	 loss = 0.101, train_acc = 1.000 (3.321 sec/step)
step 79390 	 loss = 0.453, train_acc = 0.900 (3.390 sec/step)
step 79400 	 loss = 0.061, train_acc = 1.000 (3.311 sec/step)
step 79410 	 loss = 0.702, train_acc = 0.900 (3.319 sec/step)
step 79420 	 loss = 0.052, train_acc = 1.000 (3.380 sec/step)
step 79430 	 loss = 0.746, train_acc = 0.700 (3.314 sec/step)
step 79440 	 loss = 0.032, train_acc = 1.000 (3.379 sec/step)
step 79450 	 loss = 0.256, train_acc = 0.900 (3.332 sec/step)
step 79460 	 loss = 0.027, train_acc = 1.000 (3.310 sec/step)
step 79470 	 loss = 0.001, train_acc = 1.000 (3.362 sec/step)
step 79480 	 loss = 0.333, train_acc = 0.900 (3.402 sec/step)
step 79490 	 loss = 0.052, train_acc = 1.000 (3.419 sec/step)
step 79500 	 loss = 0.051, train_acc = 1.000 (3.378 sec/step)
step 79510 	 loss = 0.262, train_acc = 0.900 (3.362 sec/step)
step 79520 	 loss = 0.000, train_acc = 1.000 (3.359 sec/step)
step 79530 	 loss = 0.313, train_acc = 0.900 (3.370 sec/step)
step 79540 	 loss = 0.578, train_acc = 0.900 (3.410 sec/step)
step 79550 	 loss = 0.013, train_acc = 1.000 (3.340 sec/step)
step 79560 	 loss = 0.110, train_acc = 1.000 (3.303 sec/step)
step 79570 	 loss = 0.352, train_acc = 0.900 (3.339 sec/step)
step 79580 	 loss = 0.263, train_acc = 0.900 (3.367 sec/step)
step 79590 	 loss = 0.767, train_acc = 0.800 (3.362 sec/step)
step 79600 	 loss = 0.236, train_acc = 0.900 (3.380 sec/step)
step 79610 	 loss = 0.006, train_acc = 1.000 (3.330 sec/step)
step 79620 	 loss = 0.023, train_acc = 1.000 (3.358 sec/step)
step 79630 	 loss = 0.278, train_acc = 0.800 (3.362 sec/step)
step 79640 	 loss = 0.011, train_acc = 1.000 (3.388 sec/step)
step 79650 	 loss = 0.001, train_acc = 1.000 (3.390 sec/step)
step 79660 	 loss = 0.011, train_acc = 1.000 (3.330 sec/step)
step 79670 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 79680 	 loss = 0.002, train_acc = 1.000 (3.371 sec/step)
step 79690 	 loss = 0.001, train_acc = 1.000 (3.444 sec/step)
step 79700 	 loss = 2.495, train_acc = 0.900 (3.356 sec/step)
step 79710 	 loss = 0.014, train_acc = 1.000 (3.349 sec/step)
step 79720 	 loss = 0.455, train_acc = 0.900 (3.370 sec/step)
step 79730 	 loss = 0.045, train_acc = 1.000 (3.357 sec/step)
step 79740 	 loss = 0.004, train_acc = 1.000 (3.455 sec/step)
step 79750 	 loss = 0.003, train_acc = 1.000 (3.306 sec/step)
step 79760 	 loss = 0.087, train_acc = 1.000 (3.363 sec/step)
step 79770 	 loss = 0.800, train_acc = 0.700 (3.365 sec/step)
step 79780 	 loss = 0.298, train_acc = 0.900 (3.315 sec/step)
step 79790 	 loss = 1.122, train_acc = 0.800 (3.328 sec/step)
VALIDATION 	 acc = 0.540 (3.629 sec)
step 79800 	 loss = 0.225, train_acc = 0.900 (3.332 sec/step)
step 79810 	 loss = 0.669, train_acc = 0.900 (3.331 sec/step)
step 79820 	 loss = 0.010, train_acc = 1.000 (3.332 sec/step)
step 79830 	 loss = 0.468, train_acc = 0.900 (3.377 sec/step)
step 79840 	 loss = 0.014, train_acc = 1.000 (3.392 sec/step)
step 79850 	 loss = 0.356, train_acc = 0.900 (3.354 sec/step)
step 79860 	 loss = 0.285, train_acc = 0.900 (3.312 sec/step)
step 79870 	 loss = 0.019, train_acc = 1.000 (3.303 sec/step)
step 79880 	 loss = 0.007, train_acc = 1.000 (3.362 sec/step)
step 79890 	 loss = 0.001, train_acc = 1.000 (3.409 sec/step)
step 79900 	 loss = 0.378, train_acc = 0.900 (3.321 sec/step)
step 79910 	 loss = 0.000, train_acc = 1.000 (3.399 sec/step)
step 79920 	 loss = 0.019, train_acc = 1.000 (3.381 sec/step)
step 79930 	 loss = 0.155, train_acc = 1.000 (3.376 sec/step)
step 79940 	 loss = 0.137, train_acc = 0.900 (3.363 sec/step)
step 79950 	 loss = 0.484, train_acc = 0.900 (3.344 sec/step)
step 79960 	 loss = 0.253, train_acc = 0.900 (3.349 sec/step)
step 79970 	 loss = 0.022, train_acc = 1.000 (3.371 sec/step)
step 79980 	 loss = 0.000, train_acc = 1.000 (3.370 sec/step)
step 79990 	 loss = 0.351, train_acc = 0.800 (3.378 sec/step)
step 80000 	 loss = 0.513, train_acc = 0.700 (3.392 sec/step)
step 80010 	 loss = 0.002, train_acc = 1.000 (3.346 sec/step)
step 80020 	 loss = 0.338, train_acc = 0.800 (3.389 sec/step)
step 80030 	 loss = 0.000, train_acc = 1.000 (3.344 sec/step)
step 80040 	 loss = 0.003, train_acc = 1.000 (3.488 sec/step)
step 80050 	 loss = 0.114, train_acc = 0.900 (3.314 sec/step)
step 80060 	 loss = 0.001, train_acc = 1.000 (3.357 sec/step)
step 80070 	 loss = 0.000, train_acc = 1.000 (3.330 sec/step)
step 80080 	 loss = 0.337, train_acc = 0.800 (3.345 sec/step)
step 80090 	 loss = 0.105, train_acc = 0.900 (3.389 sec/step)
step 80100 	 loss = 0.132, train_acc = 0.900 (3.391 sec/step)
step 80110 	 loss = 0.007, train_acc = 1.000 (3.318 sec/step)
step 80120 	 loss = 1.010, train_acc = 0.800 (3.364 sec/step)
step 80130 	 loss = 0.436, train_acc = 0.900 (3.343 sec/step)
step 80140 	 loss = 0.898, train_acc = 0.700 (3.400 sec/step)
step 80150 	 loss = 0.003, train_acc = 1.000 (3.349 sec/step)
step 80160 	 loss = 2.037, train_acc = 0.800 (3.351 sec/step)
step 80170 	 loss = 0.067, train_acc = 1.000 (3.341 sec/step)
step 80180 	 loss = 0.001, train_acc = 1.000 (3.378 sec/step)
step 80190 	 loss = 0.145, train_acc = 0.900 (3.309 sec/step)
step 80200 	 loss = 0.297, train_acc = 0.800 (3.337 sec/step)
step 80210 	 loss = 0.552, train_acc = 0.900 (3.396 sec/step)
step 80220 	 loss = 0.024, train_acc = 1.000 (3.369 sec/step)
step 80230 	 loss = 0.350, train_acc = 0.800 (3.367 sec/step)
step 80240 	 loss = 0.031, train_acc = 1.000 (3.317 sec/step)
step 80250 	 loss = 0.834, train_acc = 0.800 (3.366 sec/step)
step 80260 	 loss = 0.006, train_acc = 1.000 (3.368 sec/step)
step 80270 	 loss = 0.397, train_acc = 0.800 (3.371 sec/step)
step 80280 	 loss = 0.359, train_acc = 0.900 (3.341 sec/step)
step 80290 	 loss = 0.010, train_acc = 1.000 (3.390 sec/step)
step 80300 	 loss = 0.662, train_acc = 0.800 (3.302 sec/step)
step 80310 	 loss = 0.014, train_acc = 1.000 (3.349 sec/step)
step 80320 	 loss = 0.012, train_acc = 1.000 (3.341 sec/step)
step 80330 	 loss = 0.356, train_acc = 0.900 (3.318 sec/step)
step 80340 	 loss = 0.544, train_acc = 0.900 (3.397 sec/step)
step 80350 	 loss = 0.510, train_acc = 0.800 (3.367 sec/step)
step 80360 	 loss = 0.424, train_acc = 0.900 (3.313 sec/step)
step 80370 	 loss = 0.305, train_acc = 0.900 (3.425 sec/step)
step 80380 	 loss = 0.081, train_acc = 0.900 (3.423 sec/step)
step 80390 	 loss = 0.201, train_acc = 0.900 (3.329 sec/step)
step 80400 	 loss = 0.005, train_acc = 1.000 (3.312 sec/step)
step 80410 	 loss = 0.010, train_acc = 1.000 (3.397 sec/step)
step 80420 	 loss = 0.039, train_acc = 1.000 (3.377 sec/step)
step 80430 	 loss = 0.245, train_acc = 0.900 (3.342 sec/step)
step 80440 	 loss = 0.016, train_acc = 1.000 (3.387 sec/step)
step 80450 	 loss = 0.003, train_acc = 1.000 (3.361 sec/step)
step 80460 	 loss = 0.001, train_acc = 1.000 (3.345 sec/step)
step 80470 	 loss = 0.364, train_acc = 0.900 (3.357 sec/step)
step 80480 	 loss = 0.002, train_acc = 1.000 (3.349 sec/step)
step 80490 	 loss = 0.018, train_acc = 1.000 (3.372 sec/step)
step 80500 	 loss = 0.034, train_acc = 1.000 (3.365 sec/step)
step 80510 	 loss = 0.035, train_acc = 1.000 (3.449 sec/step)
step 80520 	 loss = 0.187, train_acc = 0.900 (3.415 sec/step)
step 80530 	 loss = 0.116, train_acc = 1.000 (3.353 sec/step)
step 80540 	 loss = 0.003, train_acc = 1.000 (3.307 sec/step)
step 80550 	 loss = 0.032, train_acc = 1.000 (3.390 sec/step)
step 80560 	 loss = 0.057, train_acc = 1.000 (3.322 sec/step)
step 80570 	 loss = 0.063, train_acc = 1.000 (3.319 sec/step)
step 80580 	 loss = 0.037, train_acc = 1.000 (3.356 sec/step)
step 80590 	 loss = 0.005, train_acc = 1.000 (3.322 sec/step)
step 80600 	 loss = 0.000, train_acc = 1.000 (3.307 sec/step)
step 80610 	 loss = 0.379, train_acc = 0.900 (3.417 sec/step)
step 80620 	 loss = 0.002, train_acc = 1.000 (3.341 sec/step)
step 80630 	 loss = 0.119, train_acc = 0.900 (3.388 sec/step)
step 80640 	 loss = 1.182, train_acc = 0.800 (3.372 sec/step)
step 80650 	 loss = 0.670, train_acc = 0.900 (3.364 sec/step)
step 80660 	 loss = 0.045, train_acc = 1.000 (3.353 sec/step)
step 80670 	 loss = 0.063, train_acc = 1.000 (3.394 sec/step)
step 80680 	 loss = 0.012, train_acc = 1.000 (3.385 sec/step)
step 80690 	 loss = 0.009, train_acc = 1.000 (3.333 sec/step)
step 80700 	 loss = 0.611, train_acc = 0.800 (3.350 sec/step)
step 80710 	 loss = 0.002, train_acc = 1.000 (3.364 sec/step)
step 80720 	 loss = 0.080, train_acc = 1.000 (3.372 sec/step)
step 80730 	 loss = 0.440, train_acc = 0.900 (3.318 sec/step)
step 80740 	 loss = 0.372, train_acc = 0.800 (3.393 sec/step)
step 80750 	 loss = 0.172, train_acc = 0.900 (3.378 sec/step)
step 80760 	 loss = 0.001, train_acc = 1.000 (3.336 sec/step)
step 80770 	 loss = 0.002, train_acc = 1.000 (3.322 sec/step)
step 80780 	 loss = 0.005, train_acc = 1.000 (3.387 sec/step)
step 80790 	 loss = 1.402, train_acc = 0.800 (3.332 sec/step)
step 80800 	 loss = 0.025, train_acc = 1.000 (3.355 sec/step)
step 80810 	 loss = 0.230, train_acc = 0.900 (3.308 sec/step)
step 80820 	 loss = 0.369, train_acc = 0.900 (3.329 sec/step)
step 80830 	 loss = 0.330, train_acc = 0.900 (3.318 sec/step)
step 80840 	 loss = 0.003, train_acc = 1.000 (3.369 sec/step)
step 80850 	 loss = 0.121, train_acc = 0.900 (3.314 sec/step)
step 80860 	 loss = 0.210, train_acc = 0.900 (3.325 sec/step)
step 80870 	 loss = 0.566, train_acc = 0.800 (3.321 sec/step)
step 80880 	 loss = 0.110, train_acc = 1.000 (3.318 sec/step)
step 80890 	 loss = 0.394, train_acc = 0.800 (3.375 sec/step)
step 80900 	 loss = 0.111, train_acc = 0.900 (3.387 sec/step)
step 80910 	 loss = 0.004, train_acc = 1.000 (3.313 sec/step)
step 80920 	 loss = 0.106, train_acc = 1.000 (3.381 sec/step)
step 80930 	 loss = 0.032, train_acc = 1.000 (3.315 sec/step)
step 80940 	 loss = 0.228, train_acc = 0.900 (3.371 sec/step)
step 80950 	 loss = 0.046, train_acc = 1.000 (3.360 sec/step)
step 80960 	 loss = 0.044, train_acc = 1.000 (3.300 sec/step)
step 80970 	 loss = 0.364, train_acc = 0.900 (3.348 sec/step)
step 80980 	 loss = 0.138, train_acc = 1.000 (3.396 sec/step)
step 80990 	 loss = 0.002, train_acc = 1.000 (3.385 sec/step)
step 81000 	 loss = 0.224, train_acc = 0.900 (3.323 sec/step)
step 81010 	 loss = 0.209, train_acc = 0.900 (3.370 sec/step)
step 81020 	 loss = 0.019, train_acc = 1.000 (3.325 sec/step)
step 81030 	 loss = 0.042, train_acc = 1.000 (3.381 sec/step)
step 81040 	 loss = 0.706, train_acc = 0.800 (3.359 sec/step)
step 81050 	 loss = 0.002, train_acc = 1.000 (3.362 sec/step)
step 81060 	 loss = 0.004, train_acc = 1.000 (3.362 sec/step)
step 81070 	 loss = 0.004, train_acc = 1.000 (3.344 sec/step)
step 81080 	 loss = 0.001, train_acc = 1.000 (3.329 sec/step)
step 81090 	 loss = 0.387, train_acc = 0.900 (3.374 sec/step)
step 81100 	 loss = 0.000, train_acc = 1.000 (3.366 sec/step)
step 81110 	 loss = 0.243, train_acc = 0.900 (3.366 sec/step)
step 81120 	 loss = 0.425, train_acc = 0.900 (3.366 sec/step)
step 81130 	 loss = 0.001, train_acc = 1.000 (3.347 sec/step)
step 81140 	 loss = 0.589, train_acc = 0.900 (3.387 sec/step)
step 81150 	 loss = 0.015, train_acc = 1.000 (3.406 sec/step)
step 81160 	 loss = 0.022, train_acc = 1.000 (3.324 sec/step)
step 81170 	 loss = 0.013, train_acc = 1.000 (3.398 sec/step)
step 81180 	 loss = 0.002, train_acc = 1.000 (3.368 sec/step)
step 81190 	 loss = 0.384, train_acc = 0.800 (3.406 sec/step)
step 81200 	 loss = 0.005, train_acc = 1.000 (3.315 sec/step)
step 81210 	 loss = 0.301, train_acc = 0.900 (3.356 sec/step)
step 81220 	 loss = 0.677, train_acc = 0.800 (3.358 sec/step)
step 81230 	 loss = 0.079, train_acc = 1.000 (3.315 sec/step)
step 81240 	 loss = 0.074, train_acc = 0.900 (3.368 sec/step)
step 81250 	 loss = 0.431, train_acc = 0.700 (3.396 sec/step)
step 81260 	 loss = 0.203, train_acc = 0.800 (3.345 sec/step)
step 81270 	 loss = 0.126, train_acc = 0.900 (3.377 sec/step)
step 81280 	 loss = 0.001, train_acc = 1.000 (3.322 sec/step)
step 81290 	 loss = 0.064, train_acc = 1.000 (3.353 sec/step)
step 81300 	 loss = 0.079, train_acc = 1.000 (3.345 sec/step)
step 81310 	 loss = 0.135, train_acc = 0.900 (3.320 sec/step)
step 81320 	 loss = 0.063, train_acc = 1.000 (3.313 sec/step)
step 81330 	 loss = 0.000, train_acc = 1.000 (3.368 sec/step)
step 81340 	 loss = 0.009, train_acc = 1.000 (3.376 sec/step)
step 81350 	 loss = 0.000, train_acc = 1.000 (3.355 sec/step)
step 81360 	 loss = 1.780, train_acc = 0.900 (3.365 sec/step)
step 81370 	 loss = 0.022, train_acc = 1.000 (3.364 sec/step)
step 81380 	 loss = 0.184, train_acc = 0.900 (3.378 sec/step)
step 81390 	 loss = 0.000, train_acc = 1.000 (3.382 sec/step)
step 81400 	 loss = 1.196, train_acc = 0.600 (3.322 sec/step)
step 81410 	 loss = 0.753, train_acc = 0.900 (3.317 sec/step)
step 81420 	 loss = 0.746, train_acc = 0.800 (3.367 sec/step)
step 81430 	 loss = 0.325, train_acc = 0.900 (3.339 sec/step)
step 81440 	 loss = 0.126, train_acc = 1.000 (3.321 sec/step)
step 81450 	 loss = 0.730, train_acc = 0.800 (3.402 sec/step)
step 81460 	 loss = 0.226, train_acc = 0.900 (3.342 sec/step)
step 81470 	 loss = 0.012, train_acc = 1.000 (3.310 sec/step)
step 81480 	 loss = 0.000, train_acc = 1.000 (3.382 sec/step)
step 81490 	 loss = 0.006, train_acc = 1.000 (3.370 sec/step)
step 81500 	 loss = 0.016, train_acc = 1.000 (3.359 sec/step)
step 81510 	 loss = 0.014, train_acc = 1.000 (3.379 sec/step)
step 81520 	 loss = 0.017, train_acc = 1.000 (3.365 sec/step)
step 81530 	 loss = 0.267, train_acc = 0.900 (3.367 sec/step)
step 81540 	 loss = 0.271, train_acc = 0.900 (3.375 sec/step)
step 81550 	 loss = 0.242, train_acc = 0.900 (3.338 sec/step)
step 81560 	 loss = 0.058, train_acc = 1.000 (3.346 sec/step)
step 81570 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 81580 	 loss = 0.024, train_acc = 1.000 (3.338 sec/step)
step 81590 	 loss = 0.003, train_acc = 1.000 (3.306 sec/step)
step 81600 	 loss = 0.047, train_acc = 1.000 (3.377 sec/step)
step 81610 	 loss = 0.601, train_acc = 0.900 (3.317 sec/step)
step 81620 	 loss = 0.000, train_acc = 1.000 (3.330 sec/step)
step 81630 	 loss = 0.219, train_acc = 0.900 (3.303 sec/step)
step 81640 	 loss = 1.199, train_acc = 0.900 (3.373 sec/step)
step 81650 	 loss = 0.001, train_acc = 1.000 (3.334 sec/step)
step 81660 	 loss = 0.001, train_acc = 1.000 (3.404 sec/step)
step 81670 	 loss = 0.049, train_acc = 1.000 (3.358 sec/step)
step 81680 	 loss = 0.002, train_acc = 1.000 (3.399 sec/step)
step 81690 	 loss = 0.116, train_acc = 1.000 (3.396 sec/step)
VALIDATION 	 acc = 0.538 (3.642 sec)
step 81700 	 loss = 0.424, train_acc = 0.900 (3.376 sec/step)
step 81710 	 loss = 0.060, train_acc = 1.000 (3.292 sec/step)
step 81720 	 loss = 0.127, train_acc = 0.900 (3.323 sec/step)
step 81730 	 loss = 0.405, train_acc = 0.900 (3.399 sec/step)
step 81740 	 loss = 1.225, train_acc = 0.800 (3.481 sec/step)
step 81750 	 loss = 0.022, train_acc = 1.000 (3.410 sec/step)
step 81760 	 loss = 0.482, train_acc = 0.800 (3.303 sec/step)
step 81770 	 loss = 0.498, train_acc = 0.800 (3.312 sec/step)
step 81780 	 loss = 0.321, train_acc = 0.900 (3.332 sec/step)
step 81790 	 loss = 0.146, train_acc = 0.900 (3.329 sec/step)
step 81800 	 loss = 0.064, train_acc = 1.000 (3.303 sec/step)
step 81810 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 81820 	 loss = 0.003, train_acc = 1.000 (3.319 sec/step)
step 81830 	 loss = 0.006, train_acc = 1.000 (3.294 sec/step)
step 81840 	 loss = 0.962, train_acc = 0.700 (3.340 sec/step)
step 81850 	 loss = 0.443, train_acc = 0.800 (3.387 sec/step)
step 81860 	 loss = 0.218, train_acc = 0.900 (3.350 sec/step)
step 81870 	 loss = 0.307, train_acc = 0.900 (3.391 sec/step)
step 81880 	 loss = 0.949, train_acc = 0.800 (3.329 sec/step)
step 81890 	 loss = 0.019, train_acc = 1.000 (3.336 sec/step)
step 81900 	 loss = 0.307, train_acc = 0.800 (3.321 sec/step)
step 81910 	 loss = 0.226, train_acc = 0.900 (3.462 sec/step)
step 81920 	 loss = 0.092, train_acc = 1.000 (3.371 sec/step)
step 81930 	 loss = 0.078, train_acc = 0.900 (3.324 sec/step)
step 81940 	 loss = 0.033, train_acc = 1.000 (3.344 sec/step)
step 81950 	 loss = 0.044, train_acc = 1.000 (3.334 sec/step)
step 81960 	 loss = 0.065, train_acc = 1.000 (3.442 sec/step)
step 81970 	 loss = 0.954, train_acc = 0.900 (3.315 sec/step)
step 81980 	 loss = 0.001, train_acc = 1.000 (3.343 sec/step)
step 81990 	 loss = 0.055, train_acc = 1.000 (3.388 sec/step)
step 82000 	 loss = 0.021, train_acc = 1.000 (3.349 sec/step)
step 82010 	 loss = 0.051, train_acc = 1.000 (3.368 sec/step)
step 82020 	 loss = 0.062, train_acc = 1.000 (3.303 sec/step)
step 82030 	 loss = 0.503, train_acc = 0.900 (3.383 sec/step)
step 82040 	 loss = 1.443, train_acc = 0.600 (3.304 sec/step)
step 82050 	 loss = 0.134, train_acc = 0.900 (3.343 sec/step)
step 82060 	 loss = 0.087, train_acc = 0.900 (3.336 sec/step)
step 82070 	 loss = 0.140, train_acc = 0.900 (3.372 sec/step)
step 82080 	 loss = 0.073, train_acc = 1.000 (3.333 sec/step)
step 82090 	 loss = 0.000, train_acc = 1.000 (3.382 sec/step)
step 82100 	 loss = 0.022, train_acc = 1.000 (3.344 sec/step)
step 82110 	 loss = 0.298, train_acc = 0.900 (3.394 sec/step)
step 82120 	 loss = 0.364, train_acc = 0.900 (3.376 sec/step)
step 82130 	 loss = 0.255, train_acc = 0.900 (3.365 sec/step)
step 82140 	 loss = 0.200, train_acc = 0.900 (3.353 sec/step)
step 82150 	 loss = 0.460, train_acc = 0.700 (3.359 sec/step)
step 82160 	 loss = 0.025, train_acc = 1.000 (3.358 sec/step)
step 82170 	 loss = 0.019, train_acc = 1.000 (3.370 sec/step)
step 82180 	 loss = 0.000, train_acc = 1.000 (3.355 sec/step)
step 82190 	 loss = 0.255, train_acc = 0.900 (3.339 sec/step)
step 82200 	 loss = 0.275, train_acc = 0.800 (3.332 sec/step)
step 82210 	 loss = 0.108, train_acc = 1.000 (3.348 sec/step)
step 82220 	 loss = 0.122, train_acc = 1.000 (3.371 sec/step)
step 82230 	 loss = 0.012, train_acc = 1.000 (3.374 sec/step)
step 82240 	 loss = 0.003, train_acc = 1.000 (3.344 sec/step)
step 82250 	 loss = 0.015, train_acc = 1.000 (3.390 sec/step)
step 82260 	 loss = 0.240, train_acc = 0.900 (3.395 sec/step)
step 82270 	 loss = 0.334, train_acc = 0.900 (3.362 sec/step)
step 82280 	 loss = 0.520, train_acc = 0.900 (3.351 sec/step)
step 82290 	 loss = 0.015, train_acc = 1.000 (3.325 sec/step)
step 82300 	 loss = 0.019, train_acc = 1.000 (3.339 sec/step)
step 82310 	 loss = 0.181, train_acc = 0.900 (3.350 sec/step)
step 82320 	 loss = 0.046, train_acc = 1.000 (3.357 sec/step)
step 82330 	 loss = 0.067, train_acc = 1.000 (3.371 sec/step)
step 82340 	 loss = 0.005, train_acc = 1.000 (3.351 sec/step)
step 82350 	 loss = 0.371, train_acc = 0.800 (3.302 sec/step)
step 82360 	 loss = 0.320, train_acc = 0.900 (3.328 sec/step)
step 82370 	 loss = 0.007, train_acc = 1.000 (3.398 sec/step)
step 82380 	 loss = 0.302, train_acc = 0.900 (3.372 sec/step)
step 82390 	 loss = 0.201, train_acc = 0.900 (3.311 sec/step)
step 82400 	 loss = 0.892, train_acc = 0.900 (3.377 sec/step)
step 82410 	 loss = 1.543, train_acc = 0.700 (3.293 sec/step)
step 82420 	 loss = 0.112, train_acc = 1.000 (3.388 sec/step)
step 82430 	 loss = 0.122, train_acc = 0.900 (3.319 sec/step)
step 82440 	 loss = 0.322, train_acc = 0.900 (3.368 sec/step)
step 82450 	 loss = 0.014, train_acc = 1.000 (3.368 sec/step)
step 82460 	 loss = 0.007, train_acc = 1.000 (3.460 sec/step)
step 82470 	 loss = 0.200, train_acc = 0.900 (3.367 sec/step)
step 82480 	 loss = 0.543, train_acc = 0.900 (3.371 sec/step)
step 82490 	 loss = 0.325, train_acc = 0.900 (3.308 sec/step)
step 82500 	 loss = 0.065, train_acc = 1.000 (3.357 sec/step)
step 82510 	 loss = 0.049, train_acc = 1.000 (3.304 sec/step)
step 82520 	 loss = 0.023, train_acc = 1.000 (3.352 sec/step)
step 82530 	 loss = 0.002, train_acc = 1.000 (3.334 sec/step)
step 82540 	 loss = 0.015, train_acc = 1.000 (3.430 sec/step)
step 82550 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 82560 	 loss = 0.006, train_acc = 1.000 (3.391 sec/step)
step 82570 	 loss = 1.362, train_acc = 0.800 (3.353 sec/step)
step 82580 	 loss = 0.207, train_acc = 0.900 (3.384 sec/step)
step 82590 	 loss = 0.258, train_acc = 0.900 (3.395 sec/step)
step 82600 	 loss = 0.441, train_acc = 0.900 (3.455 sec/step)
step 82610 	 loss = 0.172, train_acc = 0.900 (3.365 sec/step)
step 82620 	 loss = 0.000, train_acc = 1.000 (3.373 sec/step)
step 82630 	 loss = 0.570, train_acc = 0.900 (3.383 sec/step)
step 82640 	 loss = 0.343, train_acc = 0.900 (3.370 sec/step)
step 82650 	 loss = 0.000, train_acc = 1.000 (3.351 sec/step)
step 82660 	 loss = 0.188, train_acc = 0.900 (3.462 sec/step)
step 82670 	 loss = 0.007, train_acc = 1.000 (3.341 sec/step)
step 82680 	 loss = 0.063, train_acc = 1.000 (3.329 sec/step)
step 82690 	 loss = 0.009, train_acc = 1.000 (3.358 sec/step)
step 82700 	 loss = 0.006, train_acc = 1.000 (3.360 sec/step)
step 82710 	 loss = 0.682, train_acc = 0.800 (3.375 sec/step)
step 82720 	 loss = 0.067, train_acc = 1.000 (3.336 sec/step)
step 82730 	 loss = 0.684, train_acc = 0.800 (3.380 sec/step)
step 82740 	 loss = 0.017, train_acc = 1.000 (3.362 sec/step)
step 82750 	 loss = 0.844, train_acc = 0.800 (3.311 sec/step)
step 82760 	 loss = 0.251, train_acc = 0.900 (3.379 sec/step)
step 82770 	 loss = 0.041, train_acc = 1.000 (3.373 sec/step)
step 82780 	 loss = 0.012, train_acc = 1.000 (3.329 sec/step)
step 82790 	 loss = 1.963, train_acc = 0.800 (3.393 sec/step)
step 82800 	 loss = 0.268, train_acc = 0.900 (3.335 sec/step)
step 82810 	 loss = 0.352, train_acc = 0.900 (3.347 sec/step)
step 82820 	 loss = 0.004, train_acc = 1.000 (3.386 sec/step)
step 82830 	 loss = 0.141, train_acc = 0.900 (3.367 sec/step)
step 82840 	 loss = 0.119, train_acc = 0.900 (3.324 sec/step)
step 82850 	 loss = 0.028, train_acc = 1.000 (3.329 sec/step)
step 82860 	 loss = 0.249, train_acc = 0.900 (3.326 sec/step)
step 82870 	 loss = 0.140, train_acc = 1.000 (3.310 sec/step)
step 82880 	 loss = 0.076, train_acc = 1.000 (3.354 sec/step)
step 82890 	 loss = 0.025, train_acc = 1.000 (3.377 sec/step)
step 82900 	 loss = 0.000, train_acc = 1.000 (3.400 sec/step)
step 82910 	 loss = 0.001, train_acc = 1.000 (3.360 sec/step)
step 82920 	 loss = 1.432, train_acc = 0.700 (3.374 sec/step)
step 82930 	 loss = 1.172, train_acc = 0.900 (3.331 sec/step)
step 82940 	 loss = 0.024, train_acc = 1.000 (3.348 sec/step)
step 82950 	 loss = 0.630, train_acc = 0.900 (3.346 sec/step)
step 82960 	 loss = 0.316, train_acc = 0.900 (3.309 sec/step)
step 82970 	 loss = 0.195, train_acc = 0.900 (3.424 sec/step)
step 82980 	 loss = 0.004, train_acc = 1.000 (3.375 sec/step)
step 82990 	 loss = 0.000, train_acc = 1.000 (3.430 sec/step)
step 83000 	 loss = 0.177, train_acc = 0.900 (3.471 sec/step)
step 83010 	 loss = 0.469, train_acc = 1.000 (3.381 sec/step)
step 83020 	 loss = 0.413, train_acc = 0.900 (3.403 sec/step)
step 83030 	 loss = 0.064, train_acc = 1.000 (3.314 sec/step)
step 83040 	 loss = 0.392, train_acc = 0.900 (3.384 sec/step)
step 83050 	 loss = 1.652, train_acc = 0.800 (3.388 sec/step)
step 83060 	 loss = 0.026, train_acc = 1.000 (3.343 sec/step)
step 83070 	 loss = 0.364, train_acc = 0.900 (3.334 sec/step)
step 83080 	 loss = 0.113, train_acc = 0.900 (3.364 sec/step)
step 83090 	 loss = 0.012, train_acc = 1.000 (3.343 sec/step)
step 83100 	 loss = 0.052, train_acc = 1.000 (3.324 sec/step)
step 83110 	 loss = 0.001, train_acc = 1.000 (3.380 sec/step)
step 83120 	 loss = 0.064, train_acc = 1.000 (3.380 sec/step)
step 83130 	 loss = 0.012, train_acc = 1.000 (3.345 sec/step)
step 83140 	 loss = 0.040, train_acc = 1.000 (3.330 sec/step)
step 83150 	 loss = 0.023, train_acc = 1.000 (3.368 sec/step)
step 83160 	 loss = 0.077, train_acc = 1.000 (3.375 sec/step)
step 83170 	 loss = 0.009, train_acc = 1.000 (3.345 sec/step)
step 83180 	 loss = 0.029, train_acc = 1.000 (3.344 sec/step)
step 83190 	 loss = 0.094, train_acc = 0.900 (3.450 sec/step)
step 83200 	 loss = 0.857, train_acc = 0.900 (3.316 sec/step)
step 83210 	 loss = 0.179, train_acc = 0.900 (3.446 sec/step)
step 83220 	 loss = 0.151, train_acc = 0.900 (3.350 sec/step)
step 83230 	 loss = 0.000, train_acc = 1.000 (3.374 sec/step)
step 83240 	 loss = 0.115, train_acc = 0.900 (3.341 sec/step)
step 83250 	 loss = 0.215, train_acc = 0.900 (3.341 sec/step)
step 83260 	 loss = 0.301, train_acc = 0.900 (3.392 sec/step)
step 83270 	 loss = 0.079, train_acc = 1.000 (3.312 sec/step)
step 83280 	 loss = 0.008, train_acc = 1.000 (3.355 sec/step)
step 83290 	 loss = 0.022, train_acc = 1.000 (3.382 sec/step)
step 83300 	 loss = 0.002, train_acc = 1.000 (3.353 sec/step)
step 83310 	 loss = 0.024, train_acc = 1.000 (3.368 sec/step)
step 83320 	 loss = 0.002, train_acc = 1.000 (3.308 sec/step)
step 83330 	 loss = 0.055, train_acc = 1.000 (3.387 sec/step)
step 83340 	 loss = 0.320, train_acc = 0.900 (3.364 sec/step)
step 83350 	 loss = 0.167, train_acc = 0.900 (3.433 sec/step)
step 83360 	 loss = 0.269, train_acc = 0.900 (3.353 sec/step)
step 83370 	 loss = 0.000, train_acc = 1.000 (3.361 sec/step)
step 83380 	 loss = 0.450, train_acc = 0.800 (3.397 sec/step)
step 83390 	 loss = 0.000, train_acc = 1.000 (3.346 sec/step)
step 83400 	 loss = 0.398, train_acc = 0.900 (3.388 sec/step)
step 83410 	 loss = 0.000, train_acc = 1.000 (3.345 sec/step)
step 83420 	 loss = 0.029, train_acc = 1.000 (3.306 sec/step)
step 83430 	 loss = 0.021, train_acc = 1.000 (3.328 sec/step)
step 83440 	 loss = 0.171, train_acc = 0.900 (3.346 sec/step)
step 83450 	 loss = 0.153, train_acc = 0.900 (3.365 sec/step)
step 83460 	 loss = 0.007, train_acc = 1.000 (3.360 sec/step)
step 83470 	 loss = 0.037, train_acc = 1.000 (3.328 sec/step)
step 83480 	 loss = 0.020, train_acc = 1.000 (3.413 sec/step)
step 83490 	 loss = 0.271, train_acc = 0.900 (3.374 sec/step)
step 83500 	 loss = 0.298, train_acc = 0.900 (3.328 sec/step)
step 83510 	 loss = 0.013, train_acc = 1.000 (3.366 sec/step)
step 83520 	 loss = 0.005, train_acc = 1.000 (3.308 sec/step)
step 83530 	 loss = 0.069, train_acc = 1.000 (3.301 sec/step)
step 83540 	 loss = 0.442, train_acc = 0.900 (3.432 sec/step)
step 83550 	 loss = 0.027, train_acc = 1.000 (3.335 sec/step)
step 83560 	 loss = 0.907, train_acc = 0.700 (3.372 sec/step)
step 83570 	 loss = 0.778, train_acc = 0.800 (3.399 sec/step)
step 83580 	 loss = 0.207, train_acc = 0.900 (3.380 sec/step)
step 83590 	 loss = 0.036, train_acc = 1.000 (3.353 sec/step)
VALIDATION 	 acc = 0.529 (3.609 sec)
step 83600 	 loss = 0.496, train_acc = 0.900 (3.394 sec/step)
step 83610 	 loss = 0.010, train_acc = 1.000 (3.321 sec/step)
step 83620 	 loss = 0.342, train_acc = 0.900 (3.418 sec/step)
step 83630 	 loss = 0.076, train_acc = 1.000 (3.399 sec/step)
step 83640 	 loss = 0.027, train_acc = 1.000 (3.394 sec/step)
step 83650 	 loss = 0.314, train_acc = 0.800 (3.335 sec/step)
step 83660 	 loss = 0.000, train_acc = 1.000 (3.376 sec/step)
step 83670 	 loss = 0.003, train_acc = 1.000 (3.342 sec/step)
step 83680 	 loss = 0.002, train_acc = 1.000 (3.411 sec/step)
step 83690 	 loss = 0.016, train_acc = 1.000 (3.345 sec/step)
step 83700 	 loss = 0.227, train_acc = 0.900 (3.420 sec/step)
step 83710 	 loss = 0.001, train_acc = 1.000 (3.362 sec/step)
step 83720 	 loss = 0.553, train_acc = 0.900 (3.390 sec/step)
step 83730 	 loss = 0.037, train_acc = 1.000 (3.312 sec/step)
step 83740 	 loss = 0.001, train_acc = 1.000 (3.376 sec/step)
step 83750 	 loss = 0.146, train_acc = 0.900 (3.332 sec/step)
step 83760 	 loss = 0.219, train_acc = 1.000 (3.382 sec/step)
step 83770 	 loss = 0.007, train_acc = 1.000 (3.381 sec/step)
step 83780 	 loss = 0.002, train_acc = 1.000 (3.340 sec/step)
step 83790 	 loss = 0.000, train_acc = 1.000 (3.376 sec/step)
step 83800 	 loss = 0.094, train_acc = 1.000 (3.374 sec/step)
step 83810 	 loss = 0.034, train_acc = 1.000 (3.388 sec/step)
step 83820 	 loss = 0.009, train_acc = 1.000 (3.402 sec/step)
step 83830 	 loss = 0.014, train_acc = 1.000 (3.408 sec/step)
step 83840 	 loss = 0.037, train_acc = 1.000 (3.338 sec/step)
step 83850 	 loss = 0.207, train_acc = 1.000 (3.385 sec/step)
step 83860 	 loss = 0.015, train_acc = 1.000 (3.383 sec/step)
step 83870 	 loss = 0.000, train_acc = 1.000 (3.364 sec/step)
step 83880 	 loss = 0.606, train_acc = 0.800 (3.348 sec/step)
step 83890 	 loss = 0.319, train_acc = 0.900 (3.379 sec/step)
step 83900 	 loss = 0.029, train_acc = 1.000 (3.312 sec/step)
step 83910 	 loss = 0.064, train_acc = 1.000 (3.365 sec/step)
step 83920 	 loss = 0.015, train_acc = 1.000 (3.377 sec/step)
step 83930 	 loss = 0.014, train_acc = 1.000 (3.375 sec/step)
step 83940 	 loss = 0.013, train_acc = 1.000 (3.325 sec/step)
step 83950 	 loss = 0.001, train_acc = 1.000 (3.371 sec/step)
step 83960 	 loss = 0.161, train_acc = 0.900 (3.338 sec/step)
step 83970 	 loss = 0.035, train_acc = 1.000 (3.352 sec/step)
step 83980 	 loss = 0.158, train_acc = 0.900 (3.371 sec/step)
step 83990 	 loss = 0.206, train_acc = 0.900 (3.379 sec/step)
step 84000 	 loss = 0.963, train_acc = 0.600 (3.341 sec/step)
step 84010 	 loss = 0.022, train_acc = 1.000 (3.308 sec/step)
step 84020 	 loss = 0.123, train_acc = 1.000 (3.315 sec/step)
step 84030 	 loss = 0.084, train_acc = 1.000 (3.320 sec/step)
step 84040 	 loss = 0.215, train_acc = 0.900 (3.319 sec/step)
step 84050 	 loss = 0.141, train_acc = 0.900 (3.353 sec/step)
step 84060 	 loss = 0.045, train_acc = 1.000 (3.338 sec/step)
step 84070 	 loss = 0.000, train_acc = 1.000 (3.319 sec/step)
step 84080 	 loss = 0.826, train_acc = 0.900 (3.370 sec/step)
step 84090 	 loss = 0.042, train_acc = 1.000 (3.379 sec/step)
step 84100 	 loss = 0.106, train_acc = 1.000 (3.368 sec/step)
step 84110 	 loss = 0.028, train_acc = 1.000 (3.337 sec/step)
step 84120 	 loss = 0.849, train_acc = 0.600 (3.340 sec/step)
step 84130 	 loss = 0.356, train_acc = 0.900 (3.368 sec/step)
step 84140 	 loss = 0.121, train_acc = 0.900 (3.387 sec/step)
step 84150 	 loss = 0.100, train_acc = 0.900 (3.349 sec/step)
step 84160 	 loss = 0.002, train_acc = 1.000 (3.340 sec/step)
step 84170 	 loss = 0.000, train_acc = 1.000 (3.354 sec/step)
step 84180 	 loss = 0.002, train_acc = 1.000 (3.378 sec/step)
step 84190 	 loss = 0.890, train_acc = 0.800 (3.349 sec/step)
step 84200 	 loss = 0.113, train_acc = 1.000 (3.308 sec/step)
step 84210 	 loss = 0.000, train_acc = 1.000 (3.326 sec/step)
step 84220 	 loss = 1.380, train_acc = 0.700 (3.400 sec/step)
step 84230 	 loss = 1.040, train_acc = 0.800 (3.330 sec/step)
step 84240 	 loss = 0.138, train_acc = 1.000 (3.321 sec/step)
step 84250 	 loss = 0.598, train_acc = 0.900 (3.358 sec/step)
step 84260 	 loss = 0.030, train_acc = 1.000 (3.308 sec/step)
step 84270 	 loss = 0.202, train_acc = 0.900 (3.328 sec/step)
step 84280 	 loss = 0.000, train_acc = 1.000 (3.323 sec/step)
step 84290 	 loss = 0.245, train_acc = 0.900 (3.344 sec/step)
step 84300 	 loss = 0.017, train_acc = 1.000 (3.317 sec/step)
step 84310 	 loss = 0.061, train_acc = 1.000 (3.347 sec/step)
step 84320 	 loss = 0.474, train_acc = 0.900 (3.374 sec/step)
step 84330 	 loss = 0.431, train_acc = 0.900 (3.334 sec/step)
step 84340 	 loss = 0.018, train_acc = 1.000 (3.335 sec/step)
step 84350 	 loss = 0.021, train_acc = 1.000 (3.399 sec/step)
step 84360 	 loss = 0.147, train_acc = 0.900 (3.357 sec/step)
step 84370 	 loss = 0.104, train_acc = 0.900 (3.388 sec/step)
step 84380 	 loss = 0.271, train_acc = 0.900 (3.316 sec/step)
step 84390 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 84400 	 loss = 0.003, train_acc = 1.000 (3.369 sec/step)
step 84410 	 loss = 0.297, train_acc = 0.900 (3.399 sec/step)
step 84420 	 loss = 0.050, train_acc = 1.000 (3.311 sec/step)
step 84430 	 loss = 0.282, train_acc = 0.900 (3.338 sec/step)
step 84440 	 loss = 0.002, train_acc = 1.000 (3.360 sec/step)
step 84450 	 loss = 0.007, train_acc = 1.000 (3.363 sec/step)
step 84460 	 loss = 1.746, train_acc = 0.900 (3.338 sec/step)
step 84470 	 loss = 0.858, train_acc = 0.900 (3.349 sec/step)
step 84480 	 loss = 0.133, train_acc = 1.000 (3.352 sec/step)
step 84490 	 loss = 0.008, train_acc = 1.000 (3.352 sec/step)
step 84500 	 loss = 0.119, train_acc = 0.900 (3.360 sec/step)
step 84510 	 loss = 0.036, train_acc = 1.000 (3.332 sec/step)
step 84520 	 loss = 0.602, train_acc = 0.900 (3.379 sec/step)
step 84530 	 loss = 0.000, train_acc = 1.000 (3.335 sec/step)
step 84540 	 loss = 0.011, train_acc = 1.000 (3.310 sec/step)
step 84550 	 loss = 0.062, train_acc = 1.000 (3.361 sec/step)
step 84560 	 loss = 0.631, train_acc = 0.900 (3.353 sec/step)
step 84570 	 loss = 0.938, train_acc = 0.800 (3.347 sec/step)
step 84580 	 loss = 0.550, train_acc = 0.900 (3.409 sec/step)
step 84590 	 loss = 0.145, train_acc = 0.900 (3.399 sec/step)
step 84600 	 loss = 0.036, train_acc = 1.000 (3.366 sec/step)
step 84610 	 loss = 0.078, train_acc = 1.000 (3.315 sec/step)
step 84620 	 loss = 0.055, train_acc = 1.000 (3.349 sec/step)
step 84630 	 loss = 0.073, train_acc = 1.000 (3.374 sec/step)
step 84640 	 loss = 0.339, train_acc = 0.900 (3.402 sec/step)
step 84650 	 loss = 0.031, train_acc = 1.000 (3.390 sec/step)
step 84660 	 loss = 0.503, train_acc = 0.900 (3.330 sec/step)
step 84670 	 loss = 0.103, train_acc = 0.900 (3.392 sec/step)
step 84680 	 loss = 0.101, train_acc = 0.900 (3.399 sec/step)
step 84690 	 loss = 0.705, train_acc = 0.800 (3.339 sec/step)
step 84700 	 loss = 0.013, train_acc = 1.000 (3.342 sec/step)
step 84710 	 loss = 0.069, train_acc = 1.000 (3.340 sec/step)
step 84720 	 loss = 0.041, train_acc = 1.000 (3.371 sec/step)
step 84730 	 loss = 0.219, train_acc = 0.900 (3.344 sec/step)
step 84740 	 loss = 0.102, train_acc = 0.900 (3.393 sec/step)
step 84750 	 loss = 1.308, train_acc = 0.800 (3.425 sec/step)
step 84760 	 loss = 0.177, train_acc = 0.900 (3.324 sec/step)
step 84770 	 loss = 0.294, train_acc = 0.900 (3.317 sec/step)
step 84780 	 loss = 0.027, train_acc = 1.000 (3.348 sec/step)
step 84790 	 loss = 0.317, train_acc = 0.900 (3.297 sec/step)
step 84800 	 loss = 0.266, train_acc = 0.900 (3.310 sec/step)
step 84810 	 loss = 0.027, train_acc = 1.000 (3.376 sec/step)
step 84820 	 loss = 0.000, train_acc = 1.000 (3.348 sec/step)
step 84830 	 loss = 0.000, train_acc = 1.000 (3.334 sec/step)
step 84840 	 loss = 0.000, train_acc = 1.000 (3.478 sec/step)
step 84850 	 loss = 0.349, train_acc = 0.900 (3.364 sec/step)
step 84860 	 loss = 0.638, train_acc = 0.800 (3.370 sec/step)
step 84870 	 loss = 0.853, train_acc = 0.800 (3.361 sec/step)
step 84880 	 loss = 0.029, train_acc = 1.000 (3.384 sec/step)
step 84890 	 loss = 0.051, train_acc = 1.000 (3.364 sec/step)
step 84900 	 loss = 0.000, train_acc = 1.000 (3.336 sec/step)
step 84910 	 loss = 0.004, train_acc = 1.000 (3.331 sec/step)
step 84920 	 loss = 1.200, train_acc = 0.800 (3.353 sec/step)
step 84930 	 loss = 0.629, train_acc = 0.900 (3.380 sec/step)
step 84940 	 loss = 1.038, train_acc = 0.900 (3.331 sec/step)
step 84950 	 loss = 0.380, train_acc = 0.900 (3.368 sec/step)
step 84960 	 loss = 0.213, train_acc = 0.900 (3.334 sec/step)
step 84970 	 loss = 0.035, train_acc = 1.000 (3.411 sec/step)
step 84980 	 loss = 0.675, train_acc = 0.800 (3.312 sec/step)
step 84990 	 loss = 0.642, train_acc = 0.900 (3.314 sec/step)
step 85000 	 loss = 0.068, train_acc = 1.000 (3.346 sec/step)
step 85010 	 loss = 0.000, train_acc = 1.000 (3.410 sec/step)
step 85020 	 loss = 0.083, train_acc = 0.900 (3.342 sec/step)
step 85030 	 loss = 0.151, train_acc = 1.000 (3.378 sec/step)
step 85040 	 loss = 0.079, train_acc = 1.000 (3.371 sec/step)
step 85050 	 loss = 1.184, train_acc = 0.900 (3.353 sec/step)
step 85060 	 loss = 0.005, train_acc = 1.000 (3.350 sec/step)
step 85070 	 loss = 0.352, train_acc = 0.900 (3.399 sec/step)
step 85080 	 loss = 0.066, train_acc = 1.000 (3.369 sec/step)
step 85090 	 loss = 0.078, train_acc = 1.000 (3.321 sec/step)
step 85100 	 loss = 0.074, train_acc = 1.000 (3.339 sec/step)
step 85110 	 loss = 0.180, train_acc = 0.900 (3.355 sec/step)
step 85120 	 loss = 0.086, train_acc = 0.900 (3.360 sec/step)
step 85130 	 loss = 0.001, train_acc = 1.000 (3.367 sec/step)
step 85140 	 loss = 0.393, train_acc = 0.800 (3.380 sec/step)
step 85150 	 loss = 0.073, train_acc = 1.000 (3.362 sec/step)
step 85160 	 loss = 0.000, train_acc = 1.000 (3.372 sec/step)
step 85170 	 loss = 0.141, train_acc = 1.000 (3.341 sec/step)
step 85180 	 loss = 0.001, train_acc = 1.000 (3.361 sec/step)
step 85190 	 loss = 0.013, train_acc = 1.000 (3.382 sec/step)
step 85200 	 loss = 0.000, train_acc = 1.000 (3.383 sec/step)
step 85210 	 loss = 0.046, train_acc = 1.000 (3.375 sec/step)
step 85220 	 loss = 0.517, train_acc = 0.800 (3.458 sec/step)
step 85230 	 loss = 0.064, train_acc = 1.000 (3.310 sec/step)
step 85240 	 loss = 4.633, train_acc = 0.800 (3.341 sec/step)
step 85250 	 loss = 0.105, train_acc = 1.000 (3.365 sec/step)
step 85260 	 loss = 0.041, train_acc = 1.000 (3.391 sec/step)
step 85270 	 loss = 0.797, train_acc = 0.700 (3.341 sec/step)
step 85280 	 loss = 0.043, train_acc = 1.000 (3.350 sec/step)
step 85290 	 loss = 0.004, train_acc = 1.000 (3.409 sec/step)
step 85300 	 loss = 0.009, train_acc = 1.000 (3.352 sec/step)
step 85310 	 loss = 0.392, train_acc = 0.800 (3.365 sec/step)
step 85320 	 loss = 0.798, train_acc = 0.800 (3.359 sec/step)
step 85330 	 loss = 0.094, train_acc = 1.000 (3.319 sec/step)
step 85340 	 loss = 0.405, train_acc = 0.900 (3.367 sec/step)
step 85350 	 loss = 0.003, train_acc = 1.000 (3.337 sec/step)
step 85360 	 loss = 0.869, train_acc = 0.900 (3.375 sec/step)
step 85370 	 loss = 0.002, train_acc = 1.000 (3.388 sec/step)
step 85380 	 loss = 0.014, train_acc = 1.000 (3.298 sec/step)
step 85390 	 loss = 0.203, train_acc = 0.900 (3.373 sec/step)
step 85400 	 loss = 0.007, train_acc = 1.000 (3.309 sec/step)
step 85410 	 loss = 0.156, train_acc = 0.900 (3.410 sec/step)
step 85420 	 loss = 0.127, train_acc = 0.900 (3.419 sec/step)
step 85430 	 loss = 0.025, train_acc = 1.000 (3.381 sec/step)
step 85440 	 loss = 0.013, train_acc = 1.000 (3.397 sec/step)
step 85450 	 loss = 0.194, train_acc = 0.900 (3.394 sec/step)
step 85460 	 loss = 0.069, train_acc = 1.000 (3.377 sec/step)
step 85470 	 loss = 0.077, train_acc = 1.000 (3.340 sec/step)
step 85480 	 loss = 0.033, train_acc = 1.000 (3.370 sec/step)
step 85490 	 loss = 0.003, train_acc = 1.000 (3.472 sec/step)
VALIDATION 	 acc = 0.534 (3.626 sec)
step 85500 	 loss = 0.070, train_acc = 1.000 (3.355 sec/step)
step 85510 	 loss = 0.002, train_acc = 1.000 (3.353 sec/step)
step 85520 	 loss = 0.093, train_acc = 1.000 (3.390 sec/step)
step 85530 	 loss = 1.716, train_acc = 0.800 (3.352 sec/step)
step 85540 	 loss = 0.001, train_acc = 1.000 (3.397 sec/step)
step 85550 	 loss = 0.018, train_acc = 1.000 (3.438 sec/step)
step 85560 	 loss = 0.014, train_acc = 1.000 (3.408 sec/step)
step 85570 	 loss = 0.051, train_acc = 1.000 (3.404 sec/step)
step 85580 	 loss = 0.023, train_acc = 1.000 (3.383 sec/step)
step 85590 	 loss = 0.404, train_acc = 0.900 (3.390 sec/step)
step 85600 	 loss = 0.298, train_acc = 0.900 (3.441 sec/step)
step 85610 	 loss = 0.426, train_acc = 0.900 (3.304 sec/step)
step 85620 	 loss = 0.194, train_acc = 0.900 (3.324 sec/step)
step 85630 	 loss = 0.000, train_acc = 1.000 (3.345 sec/step)
step 85640 	 loss = 0.059, train_acc = 1.000 (3.335 sec/step)
step 85650 	 loss = 0.029, train_acc = 1.000 (3.352 sec/step)
step 85660 	 loss = 0.063, train_acc = 1.000 (3.361 sec/step)
step 85670 	 loss = 0.330, train_acc = 0.900 (3.307 sec/step)
step 85680 	 loss = 0.000, train_acc = 1.000 (3.326 sec/step)
step 85690 	 loss = 0.278, train_acc = 0.800 (3.420 sec/step)
step 85700 	 loss = 0.231, train_acc = 0.900 (3.352 sec/step)
step 85710 	 loss = 0.249, train_acc = 0.900 (3.439 sec/step)
step 85720 	 loss = 0.385, train_acc = 0.800 (3.390 sec/step)
step 85730 	 loss = 0.196, train_acc = 0.900 (3.378 sec/step)
step 85740 	 loss = 0.725, train_acc = 0.700 (3.343 sec/step)
step 85750 	 loss = 0.055, train_acc = 1.000 (3.361 sec/step)
step 85760 	 loss = 0.016, train_acc = 1.000 (3.368 sec/step)
step 85770 	 loss = 0.008, train_acc = 1.000 (3.358 sec/step)
step 85780 	 loss = 0.000, train_acc = 1.000 (3.332 sec/step)
step 85790 	 loss = 0.000, train_acc = 1.000 (3.377 sec/step)
step 85800 	 loss = 0.004, train_acc = 1.000 (3.353 sec/step)
step 85810 	 loss = 0.218, train_acc = 0.800 (3.317 sec/step)
step 85820 	 loss = 0.102, train_acc = 0.900 (3.394 sec/step)
step 85830 	 loss = 0.179, train_acc = 0.900 (3.334 sec/step)
step 85840 	 loss = 0.152, train_acc = 0.900 (3.381 sec/step)
step 85850 	 loss = 0.012, train_acc = 1.000 (3.351 sec/step)
step 85860 	 loss = 0.022, train_acc = 1.000 (3.342 sec/step)
step 85870 	 loss = 0.003, train_acc = 1.000 (3.362 sec/step)
step 85880 	 loss = 0.017, train_acc = 1.000 (3.334 sec/step)
step 85890 	 loss = 0.160, train_acc = 1.000 (3.349 sec/step)
step 85900 	 loss = 0.003, train_acc = 1.000 (3.314 sec/step)
step 85910 	 loss = 0.100, train_acc = 0.900 (3.322 sec/step)
step 85920 	 loss = 0.001, train_acc = 1.000 (3.369 sec/step)
step 85930 	 loss = 0.231, train_acc = 0.900 (3.387 sec/step)
step 85940 	 loss = 0.145, train_acc = 1.000 (3.464 sec/step)
step 85950 	 loss = 0.000, train_acc = 1.000 (3.391 sec/step)
step 85960 	 loss = 0.080, train_acc = 0.900 (3.343 sec/step)
step 85970 	 loss = 0.271, train_acc = 0.800 (3.382 sec/step)
step 85980 	 loss = 0.079, train_acc = 0.900 (3.355 sec/step)
step 85990 	 loss = 0.008, train_acc = 1.000 (3.382 sec/step)
step 86000 	 loss = 0.005, train_acc = 1.000 (3.383 sec/step)
step 86010 	 loss = 0.043, train_acc = 1.000 (3.331 sec/step)
step 86020 	 loss = 0.314, train_acc = 0.800 (3.341 sec/step)
step 86030 	 loss = 0.012, train_acc = 1.000 (3.359 sec/step)
step 86040 	 loss = 0.116, train_acc = 1.000 (3.366 sec/step)
step 86050 	 loss = 0.276, train_acc = 0.900 (3.324 sec/step)
step 86060 	 loss = 0.225, train_acc = 0.900 (3.396 sec/step)
step 86070 	 loss = 0.080, train_acc = 0.900 (3.345 sec/step)
step 86080 	 loss = 0.118, train_acc = 1.000 (3.381 sec/step)
step 86090 	 loss = 0.663, train_acc = 0.800 (3.381 sec/step)
step 86100 	 loss = 0.461, train_acc = 0.800 (3.409 sec/step)
step 86110 	 loss = 0.029, train_acc = 1.000 (3.348 sec/step)
step 86120 	 loss = 0.000, train_acc = 1.000 (3.350 sec/step)
step 86130 	 loss = 0.048, train_acc = 1.000 (3.377 sec/step)
step 86140 	 loss = 0.230, train_acc = 0.900 (3.464 sec/step)
step 86150 	 loss = 0.080, train_acc = 0.900 (3.308 sec/step)
step 86160 	 loss = 0.005, train_acc = 1.000 (3.306 sec/step)
step 86170 	 loss = 0.027, train_acc = 1.000 (3.339 sec/step)
step 86180 	 loss = 0.008, train_acc = 1.000 (3.321 sec/step)
step 86190 	 loss = 0.004, train_acc = 1.000 (3.330 sec/step)
step 86200 	 loss = 0.564, train_acc = 0.700 (3.364 sec/step)
step 86210 	 loss = 0.081, train_acc = 0.900 (3.389 sec/step)
step 86220 	 loss = 0.006, train_acc = 1.000 (3.355 sec/step)
step 86230 	 loss = 0.147, train_acc = 0.900 (3.350 sec/step)
step 86240 	 loss = 0.016, train_acc = 1.000 (3.353 sec/step)
step 86250 	 loss = 0.733, train_acc = 0.900 (3.388 sec/step)
step 86260 	 loss = 0.005, train_acc = 1.000 (3.385 sec/step)
step 86270 	 loss = 0.041, train_acc = 1.000 (3.378 sec/step)
step 86280 	 loss = 0.168, train_acc = 0.900 (3.357 sec/step)
step 86290 	 loss = 0.029, train_acc = 1.000 (3.335 sec/step)
step 86300 	 loss = 0.750, train_acc = 0.900 (3.390 sec/step)
step 86310 	 loss = 0.003, train_acc = 1.000 (3.347 sec/step)
step 86320 	 loss = 0.051, train_acc = 1.000 (3.364 sec/step)
step 86330 	 loss = 0.245, train_acc = 1.000 (3.332 sec/step)
step 86340 	 loss = 0.691, train_acc = 0.900 (3.367 sec/step)
step 86350 	 loss = 0.187, train_acc = 0.900 (3.311 sec/step)
step 86360 	 loss = 0.079, train_acc = 1.000 (3.398 sec/step)
step 86370 	 loss = 0.014, train_acc = 1.000 (3.391 sec/step)
step 86380 	 loss = 0.025, train_acc = 1.000 (3.323 sec/step)
step 86390 	 loss = 0.001, train_acc = 1.000 (3.377 sec/step)
step 86400 	 loss = 0.422, train_acc = 0.900 (3.399 sec/step)
step 86410 	 loss = 0.565, train_acc = 0.800 (3.363 sec/step)
step 86420 	 loss = 0.454, train_acc = 0.800 (3.341 sec/step)
step 86430 	 loss = 0.606, train_acc = 0.900 (3.325 sec/step)
step 86440 	 loss = 0.106, train_acc = 1.000 (3.363 sec/step)
step 86450 	 loss = 0.457, train_acc = 0.800 (3.412 sec/step)
step 86460 	 loss = 0.062, train_acc = 1.000 (3.384 sec/step)
step 86470 	 loss = 0.207, train_acc = 0.800 (3.332 sec/step)
step 86480 	 loss = 0.025, train_acc = 1.000 (3.357 sec/step)
step 86490 	 loss = 0.005, train_acc = 1.000 (3.346 sec/step)
step 86500 	 loss = 0.004, train_acc = 1.000 (3.363 sec/step)
step 86510 	 loss = 0.001, train_acc = 1.000 (3.372 sec/step)
step 86520 	 loss = 0.036, train_acc = 1.000 (3.354 sec/step)
step 86530 	 loss = 0.485, train_acc = 0.700 (3.431 sec/step)
step 86540 	 loss = 0.426, train_acc = 0.800 (3.346 sec/step)
step 86550 	 loss = 0.305, train_acc = 0.900 (3.334 sec/step)
step 86560 	 loss = 0.050, train_acc = 1.000 (3.351 sec/step)
step 86570 	 loss = 0.015, train_acc = 1.000 (3.316 sec/step)
step 86580 	 loss = 0.075, train_acc = 1.000 (3.347 sec/step)
step 86590 	 loss = 0.216, train_acc = 0.900 (3.355 sec/step)
step 86600 	 loss = 0.000, train_acc = 1.000 (3.359 sec/step)
step 86610 	 loss = 0.002, train_acc = 1.000 (3.315 sec/step)
step 86620 	 loss = 0.030, train_acc = 1.000 (3.332 sec/step)
step 86630 	 loss = 0.452, train_acc = 0.800 (3.345 sec/step)
step 86640 	 loss = 0.061, train_acc = 1.000 (3.372 sec/step)
step 86650 	 loss = 0.018, train_acc = 1.000 (3.365 sec/step)
step 86660 	 loss = 0.085, train_acc = 1.000 (3.346 sec/step)
step 86670 	 loss = 0.005, train_acc = 1.000 (3.351 sec/step)
step 86680 	 loss = 0.001, train_acc = 1.000 (3.351 sec/step)
step 86690 	 loss = 0.101, train_acc = 1.000 (3.313 sec/step)
step 86700 	 loss = 0.320, train_acc = 0.900 (3.348 sec/step)
step 86710 	 loss = 0.012, train_acc = 1.000 (3.324 sec/step)
step 86720 	 loss = 0.146, train_acc = 0.900 (3.329 sec/step)
step 86730 	 loss = 0.345, train_acc = 0.900 (3.385 sec/step)
step 86740 	 loss = 0.042, train_acc = 1.000 (3.368 sec/step)
step 86750 	 loss = 0.001, train_acc = 1.000 (3.394 sec/step)
step 86760 	 loss = 0.078, train_acc = 0.900 (3.369 sec/step)
step 86770 	 loss = 0.067, train_acc = 1.000 (3.351 sec/step)
step 86780 	 loss = 0.012, train_acc = 1.000 (3.429 sec/step)
step 86790 	 loss = 0.005, train_acc = 1.000 (3.362 sec/step)
step 86800 	 loss = 0.000, train_acc = 1.000 (3.354 sec/step)
step 86810 	 loss = 0.019, train_acc = 1.000 (3.361 sec/step)
step 86820 	 loss = 0.021, train_acc = 1.000 (3.336 sec/step)
step 86830 	 loss = 0.238, train_acc = 0.900 (3.350 sec/step)
step 86840 	 loss = 0.002, train_acc = 1.000 (3.339 sec/step)
step 86850 	 loss = 0.012, train_acc = 1.000 (3.384 sec/step)
step 86860 	 loss = 0.064, train_acc = 1.000 (3.417 sec/step)
step 86870 	 loss = 0.087, train_acc = 1.000 (3.397 sec/step)
step 86880 	 loss = 0.311, train_acc = 0.900 (3.337 sec/step)
step 86890 	 loss = 0.257, train_acc = 0.800 (3.346 sec/step)
step 86900 	 loss = 0.089, train_acc = 1.000 (3.388 sec/step)
step 86910 	 loss = 0.197, train_acc = 0.900 (3.320 sec/step)
step 86920 	 loss = 0.002, train_acc = 1.000 (3.328 sec/step)
step 86930 	 loss = 0.228, train_acc = 0.900 (3.395 sec/step)
step 86940 	 loss = 0.540, train_acc = 0.900 (3.346 sec/step)
step 86950 	 loss = 1.907, train_acc = 0.500 (3.327 sec/step)
step 86960 	 loss = 0.150, train_acc = 0.900 (3.368 sec/step)
step 86970 	 loss = 0.005, train_acc = 1.000 (3.388 sec/step)
step 86980 	 loss = 0.015, train_acc = 1.000 (3.367 sec/step)
step 86990 	 loss = 0.002, train_acc = 1.000 (3.358 sec/step)
step 87000 	 loss = 0.000, train_acc = 1.000 (3.358 sec/step)
step 87010 	 loss = 0.014, train_acc = 1.000 (3.356 sec/step)
step 87020 	 loss = 0.001, train_acc = 1.000 (3.337 sec/step)
step 87030 	 loss = 3.799, train_acc = 0.900 (3.345 sec/step)
step 87040 	 loss = 0.195, train_acc = 0.900 (3.327 sec/step)
step 87050 	 loss = 0.295, train_acc = 0.800 (3.324 sec/step)
step 87060 	 loss = 0.606, train_acc = 0.900 (3.335 sec/step)
step 87070 	 loss = 0.325, train_acc = 0.900 (3.310 sec/step)
step 87080 	 loss = 0.397, train_acc = 0.800 (3.362 sec/step)
step 87090 	 loss = 0.004, train_acc = 1.000 (3.335 sec/step)
step 87100 	 loss = 0.202, train_acc = 0.900 (3.389 sec/step)
step 87110 	 loss = 0.002, train_acc = 1.000 (3.348 sec/step)
step 87120 	 loss = 1.199, train_acc = 0.800 (3.389 sec/step)
step 87130 	 loss = 0.216, train_acc = 0.900 (3.353 sec/step)
step 87140 	 loss = 0.002, train_acc = 1.000 (3.372 sec/step)
step 87150 	 loss = 0.019, train_acc = 1.000 (3.429 sec/step)
step 87160 	 loss = 0.009, train_acc = 1.000 (3.358 sec/step)
step 87170 	 loss = 1.511, train_acc = 0.900 (3.383 sec/step)
step 87180 	 loss = 0.062, train_acc = 1.000 (3.403 sec/step)
step 87190 	 loss = 0.326, train_acc = 0.900 (3.368 sec/step)
step 87200 	 loss = 0.360, train_acc = 0.900 (3.327 sec/step)
step 87210 	 loss = 0.139, train_acc = 1.000 (3.350 sec/step)
step 87220 	 loss = 0.025, train_acc = 1.000 (3.358 sec/step)
step 87230 	 loss = 0.200, train_acc = 1.000 (3.331 sec/step)
step 87240 	 loss = 0.039, train_acc = 1.000 (3.388 sec/step)
step 87250 	 loss = 0.006, train_acc = 1.000 (3.363 sec/step)
step 87260 	 loss = 0.203, train_acc = 0.900 (3.360 sec/step)
step 87270 	 loss = 0.015, train_acc = 1.000 (3.387 sec/step)
step 87280 	 loss = 0.148, train_acc = 0.900 (3.390 sec/step)
step 87290 	 loss = 0.545, train_acc = 0.800 (3.378 sec/step)
step 87300 	 loss = 1.441, train_acc = 0.900 (3.397 sec/step)
step 87310 	 loss = 0.006, train_acc = 1.000 (3.356 sec/step)
step 87320 	 loss = 0.063, train_acc = 1.000 (3.360 sec/step)
step 87330 	 loss = 0.018, train_acc = 1.000 (3.317 sec/step)
step 87340 	 loss = 0.068, train_acc = 1.000 (3.329 sec/step)
step 87350 	 loss = 0.033, train_acc = 1.000 (3.338 sec/step)
step 87360 	 loss = 0.926, train_acc = 0.700 (3.360 sec/step)
step 87370 	 loss = 0.115, train_acc = 1.000 (3.375 sec/step)
step 87380 	 loss = 0.001, train_acc = 1.000 (3.340 sec/step)
step 87390 	 loss = 0.245, train_acc = 0.900 (3.365 sec/step)
VALIDATION 	 acc = 0.546 (3.616 sec)
step 87400 	 loss = 0.003, train_acc = 1.000 (3.349 sec/step)
step 87410 	 loss = 0.004, train_acc = 1.000 (3.302 sec/step)
step 87420 	 loss = 0.022, train_acc = 1.000 (3.338 sec/step)
step 87430 	 loss = 0.262, train_acc = 0.800 (3.345 sec/step)
step 87440 	 loss = 0.389, train_acc = 0.800 (3.372 sec/step)
step 87450 	 loss = 0.232, train_acc = 0.900 (3.406 sec/step)
step 87460 	 loss = 0.004, train_acc = 1.000 (3.393 sec/step)
step 87470 	 loss = 0.000, train_acc = 1.000 (3.311 sec/step)
step 87480 	 loss = 0.154, train_acc = 0.900 (3.337 sec/step)
step 87490 	 loss = 0.143, train_acc = 0.900 (3.371 sec/step)
step 87500 	 loss = 0.015, train_acc = 1.000 (3.334 sec/step)
step 87510 	 loss = 0.000, train_acc = 1.000 (3.384 sec/step)
step 87520 	 loss = 0.002, train_acc = 1.000 (3.379 sec/step)
step 87530 	 loss = 0.002, train_acc = 1.000 (3.331 sec/step)
step 87540 	 loss = 0.057, train_acc = 1.000 (3.391 sec/step)
step 87550 	 loss = 0.895, train_acc = 0.700 (3.348 sec/step)
step 87560 	 loss = 0.098, train_acc = 1.000 (3.353 sec/step)
step 87570 	 loss = 0.056, train_acc = 1.000 (3.345 sec/step)
step 87580 	 loss = 0.061, train_acc = 1.000 (3.367 sec/step)
step 87590 	 loss = 0.285, train_acc = 0.900 (3.377 sec/step)
step 87600 	 loss = 0.306, train_acc = 0.900 (3.389 sec/step)
step 87610 	 loss = 0.009, train_acc = 1.000 (3.335 sec/step)
step 87620 	 loss = 0.007, train_acc = 1.000 (3.369 sec/step)
step 87630 	 loss = 0.166, train_acc = 0.900 (3.328 sec/step)
step 87640 	 loss = 0.186, train_acc = 0.900 (3.377 sec/step)
step 87650 	 loss = 0.132, train_acc = 1.000 (3.351 sec/step)
step 87660 	 loss = 0.202, train_acc = 0.900 (3.337 sec/step)
step 87670 	 loss = 0.001, train_acc = 1.000 (3.392 sec/step)
step 87680 	 loss = 0.149, train_acc = 0.900 (3.324 sec/step)
step 87690 	 loss = 0.016, train_acc = 1.000 (3.357 sec/step)
step 87700 	 loss = 0.306, train_acc = 0.900 (3.342 sec/step)
step 87710 	 loss = 0.106, train_acc = 0.900 (3.317 sec/step)
step 87720 	 loss = 0.021, train_acc = 1.000 (3.381 sec/step)
step 87730 	 loss = 0.374, train_acc = 0.900 (3.356 sec/step)
step 87740 	 loss = 0.195, train_acc = 0.900 (3.356 sec/step)
step 87750 	 loss = 0.354, train_acc = 0.900 (3.355 sec/step)
step 87760 	 loss = 0.012, train_acc = 1.000 (3.377 sec/step)
step 87770 	 loss = 0.271, train_acc = 0.900 (3.315 sec/step)
step 87780 	 loss = 0.002, train_acc = 1.000 (3.451 sec/step)
step 87790 	 loss = 0.000, train_acc = 1.000 (3.310 sec/step)
step 87800 	 loss = 0.748, train_acc = 0.700 (3.357 sec/step)
step 87810 	 loss = 0.001, train_acc = 1.000 (3.326 sec/step)
step 87820 	 loss = 0.020, train_acc = 1.000 (3.381 sec/step)
step 87830 	 loss = 0.027, train_acc = 1.000 (3.331 sec/step)
step 87840 	 loss = 0.000, train_acc = 1.000 (3.308 sec/step)
step 87850 	 loss = 0.062, train_acc = 1.000 (3.386 sec/step)
step 87860 	 loss = 0.001, train_acc = 1.000 (3.398 sec/step)
step 87870 	 loss = 0.580, train_acc = 0.900 (3.342 sec/step)
step 87880 	 loss = 0.270, train_acc = 0.900 (3.345 sec/step)
step 87890 	 loss = 0.321, train_acc = 0.900 (3.317 sec/step)
step 87900 	 loss = 0.141, train_acc = 0.900 (3.375 sec/step)
step 87910 	 loss = 0.243, train_acc = 0.800 (3.365 sec/step)
step 87920 	 loss = 0.007, train_acc = 1.000 (3.377 sec/step)
step 87930 	 loss = 0.210, train_acc = 0.800 (3.366 sec/step)
step 87940 	 loss = 0.008, train_acc = 1.000 (3.440 sec/step)
step 87950 	 loss = 0.003, train_acc = 1.000 (3.357 sec/step)
step 87960 	 loss = 0.000, train_acc = 1.000 (3.323 sec/step)
step 87970 	 loss = 0.017, train_acc = 1.000 (3.388 sec/step)
step 87980 	 loss = 0.050, train_acc = 1.000 (3.354 sec/step)
step 87990 	 loss = 0.062, train_acc = 1.000 (3.392 sec/step)
step 88000 	 loss = 0.072, train_acc = 1.000 (3.315 sec/step)
step 88010 	 loss = 0.000, train_acc = 1.000 (3.332 sec/step)
step 88020 	 loss = 0.166, train_acc = 0.900 (3.359 sec/step)
step 88030 	 loss = 0.011, train_acc = 1.000 (3.327 sec/step)
step 88040 	 loss = 0.031, train_acc = 1.000 (3.329 sec/step)
step 88050 	 loss = 0.007, train_acc = 1.000 (3.343 sec/step)
step 88060 	 loss = 0.002, train_acc = 1.000 (3.364 sec/step)
step 88070 	 loss = 0.003, train_acc = 1.000 (3.306 sec/step)
step 88080 	 loss = 0.003, train_acc = 1.000 (3.369 sec/step)
step 88090 	 loss = 0.087, train_acc = 1.000 (3.414 sec/step)
step 88100 	 loss = 0.023, train_acc = 1.000 (3.330 sec/step)
step 88110 	 loss = 0.056, train_acc = 1.000 (3.368 sec/step)
step 88120 	 loss = 0.344, train_acc = 0.900 (3.367 sec/step)
step 88130 	 loss = 0.002, train_acc = 1.000 (3.322 sec/step)
step 88140 	 loss = 0.042, train_acc = 1.000 (3.353 sec/step)
step 88150 	 loss = 2.070, train_acc = 0.500 (3.380 sec/step)
step 88160 	 loss = 0.725, train_acc = 0.900 (3.360 sec/step)
step 88170 	 loss = 0.051, train_acc = 1.000 (3.373 sec/step)
step 88180 	 loss = 0.026, train_acc = 1.000 (3.307 sec/step)
step 88190 	 loss = 0.009, train_acc = 1.000 (3.356 sec/step)
step 88200 	 loss = 0.000, train_acc = 1.000 (3.313 sec/step)
step 88210 	 loss = 0.005, train_acc = 1.000 (3.302 sec/step)
step 88220 	 loss = 0.006, train_acc = 1.000 (3.324 sec/step)
step 88230 	 loss = 0.006, train_acc = 1.000 (3.341 sec/step)
step 88240 	 loss = 0.404, train_acc = 0.900 (3.363 sec/step)
step 88250 	 loss = 0.984, train_acc = 0.800 (3.315 sec/step)
step 88260 	 loss = 0.400, train_acc = 0.800 (3.330 sec/step)
step 88270 	 loss = 0.000, train_acc = 1.000 (3.309 sec/step)
step 88280 	 loss = 0.012, train_acc = 1.000 (3.325 sec/step)
step 88290 	 loss = 0.798, train_acc = 0.800 (3.488 sec/step)
step 88300 	 loss = 0.462, train_acc = 0.700 (3.303 sec/step)
step 88310 	 loss = 0.002, train_acc = 1.000 (3.358 sec/step)
step 88320 	 loss = 0.381, train_acc = 0.900 (3.384 sec/step)
step 88330 	 loss = 0.006, train_acc = 1.000 (3.363 sec/step)
step 88340 	 loss = 0.004, train_acc = 1.000 (3.346 sec/step)
step 88350 	 loss = 1.116, train_acc = 0.900 (3.340 sec/step)
step 88360 	 loss = 0.016, train_acc = 1.000 (3.340 sec/step)
step 88370 	 loss = 0.003, train_acc = 1.000 (3.390 sec/step)
step 88380 	 loss = 0.000, train_acc = 1.000 (3.325 sec/step)
step 88390 	 loss = 0.044, train_acc = 1.000 (3.320 sec/step)
step 88400 	 loss = 0.365, train_acc = 0.900 (3.348 sec/step)
step 88410 	 loss = 0.236, train_acc = 0.900 (3.343 sec/step)
step 88420 	 loss = 0.036, train_acc = 1.000 (3.375 sec/step)
step 88430 	 loss = 0.100, train_acc = 0.900 (3.333 sec/step)
step 88440 	 loss = 0.066, train_acc = 1.000 (3.334 sec/step)
step 88450 	 loss = 0.010, train_acc = 1.000 (3.311 sec/step)
step 88460 	 loss = 0.150, train_acc = 1.000 (3.321 sec/step)
step 88470 	 loss = 0.127, train_acc = 1.000 (3.339 sec/step)
step 88480 	 loss = 0.080, train_acc = 1.000 (3.356 sec/step)
step 88490 	 loss = 0.014, train_acc = 1.000 (3.437 sec/step)
step 88500 	 loss = 0.008, train_acc = 1.000 (3.391 sec/step)
step 88510 	 loss = 0.000, train_acc = 1.000 (3.377 sec/step)
step 88520 	 loss = 0.000, train_acc = 1.000 (3.378 sec/step)
step 88530 	 loss = 0.000, train_acc = 1.000 (3.373 sec/step)
step 88540 	 loss = 0.001, train_acc = 1.000 (3.354 sec/step)
step 88550 	 loss = 0.016, train_acc = 1.000 (3.371 sec/step)
step 88560 	 loss = 0.000, train_acc = 1.000 (3.387 sec/step)
step 88570 	 loss = 0.000, train_acc = 1.000 (3.316 sec/step)
step 88580 	 loss = 0.520, train_acc = 0.900 (3.387 sec/step)
step 88590 	 loss = 0.054, train_acc = 1.000 (3.350 sec/step)
step 88600 	 loss = 0.029, train_acc = 1.000 (3.326 sec/step)
step 88610 	 loss = 0.256, train_acc = 0.900 (3.351 sec/step)
step 88620 	 loss = 0.211, train_acc = 0.900 (3.378 sec/step)
step 88630 	 loss = 0.007, train_acc = 1.000 (3.349 sec/step)
step 88640 	 loss = 0.099, train_acc = 0.900 (3.377 sec/step)
step 88650 	 loss = 0.264, train_acc = 0.900 (3.349 sec/step)
step 88660 	 loss = 0.011, train_acc = 1.000 (3.347 sec/step)
step 88670 	 loss = 0.776, train_acc = 0.900 (3.379 sec/step)
step 88680 	 loss = 0.105, train_acc = 0.900 (3.342 sec/step)
step 88690 	 loss = 0.873, train_acc = 0.900 (3.371 sec/step)
step 88700 	 loss = 0.000, train_acc = 1.000 (3.394 sec/step)
step 88710 	 loss = 0.160, train_acc = 0.900 (3.363 sec/step)
step 88720 	 loss = 0.022, train_acc = 1.000 (3.361 sec/step)
step 88730 	 loss = 0.077, train_acc = 1.000 (3.364 sec/step)
step 88740 	 loss = 0.056, train_acc = 1.000 (3.328 sec/step)
step 88750 	 loss = 0.032, train_acc = 1.000 (3.450 sec/step)
step 88760 	 loss = 0.028, train_acc = 1.000 (3.349 sec/step)
step 88770 	 loss = 0.083, train_acc = 0.900 (3.356 sec/step)
step 88780 	 loss = 0.004, train_acc = 1.000 (3.354 sec/step)
step 88790 	 loss = 0.053, train_acc = 1.000 (3.299 sec/step)
step 88800 	 loss = 0.115, train_acc = 1.000 (3.325 sec/step)
step 88810 	 loss = 0.079, train_acc = 1.000 (3.341 sec/step)
step 88820 	 loss = 0.425, train_acc = 0.900 (3.353 sec/step)
step 88830 	 loss = 0.012, train_acc = 1.000 (3.333 sec/step)
step 88840 	 loss = 0.292, train_acc = 1.000 (3.336 sec/step)
step 88850 	 loss = 0.068, train_acc = 1.000 (3.319 sec/step)
step 88860 	 loss = 0.133, train_acc = 0.900 (3.393 sec/step)
step 88870 	 loss = 0.215, train_acc = 0.900 (3.347 sec/step)
step 88880 	 loss = 2.363, train_acc = 0.800 (3.389 sec/step)
step 88890 	 loss = 0.023, train_acc = 1.000 (3.375 sec/step)
step 88900 	 loss = 0.001, train_acc = 1.000 (3.355 sec/step)
step 88910 	 loss = 0.000, train_acc = 1.000 (3.408 sec/step)
step 88920 	 loss = 0.027, train_acc = 1.000 (3.299 sec/step)
step 88930 	 loss = 0.002, train_acc = 1.000 (3.350 sec/step)
step 88940 	 loss = 0.208, train_acc = 0.900 (3.356 sec/step)
step 88950 	 loss = 0.027, train_acc = 1.000 (3.321 sec/step)
step 88960 	 loss = 1.254, train_acc = 0.700 (3.375 sec/step)
step 88970 	 loss = 0.038, train_acc = 1.000 (3.348 sec/step)
step 88980 	 loss = 0.013, train_acc = 1.000 (3.386 sec/step)
step 88990 	 loss = 0.030, train_acc = 1.000 (3.375 sec/step)
step 89000 	 loss = 0.587, train_acc = 0.900 (3.347 sec/step)
step 89010 	 loss = 0.056, train_acc = 1.000 (3.469 sec/step)
step 89020 	 loss = 0.080, train_acc = 0.900 (3.319 sec/step)
step 89030 	 loss = 0.092, train_acc = 1.000 (3.299 sec/step)
step 89040 	 loss = 0.321, train_acc = 1.000 (3.409 sec/step)
step 89050 	 loss = 0.392, train_acc = 0.900 (3.359 sec/step)
step 89060 	 loss = 0.093, train_acc = 0.900 (3.337 sec/step)
step 89070 	 loss = 0.022, train_acc = 1.000 (3.319 sec/step)
step 89080 	 loss = 0.004, train_acc = 1.000 (3.320 sec/step)
step 89090 	 loss = 0.357, train_acc = 0.900 (3.343 sec/step)
step 89100 	 loss = 0.361, train_acc = 0.800 (3.384 sec/step)
step 89110 	 loss = 0.002, train_acc = 1.000 (3.381 sec/step)
step 89120 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 89130 	 loss = 0.002, train_acc = 1.000 (3.356 sec/step)
step 89140 	 loss = 0.009, train_acc = 1.000 (3.320 sec/step)
step 89150 	 loss = 0.004, train_acc = 1.000 (3.333 sec/step)
step 89160 	 loss = 0.003, train_acc = 1.000 (3.343 sec/step)
step 89170 	 loss = 0.000, train_acc = 1.000 (3.378 sec/step)
step 89180 	 loss = 0.068, train_acc = 1.000 (3.409 sec/step)
step 89190 	 loss = 0.002, train_acc = 1.000 (3.316 sec/step)
step 89200 	 loss = 0.627, train_acc = 0.900 (3.339 sec/step)
step 89210 	 loss = 0.062, train_acc = 1.000 (3.342 sec/step)
step 89220 	 loss = 0.210, train_acc = 0.900 (3.307 sec/step)
step 89230 	 loss = 0.091, train_acc = 0.900 (3.351 sec/step)
step 89240 	 loss = 0.524, train_acc = 0.900 (3.380 sec/step)
step 89250 	 loss = 0.020, train_acc = 1.000 (3.359 sec/step)
step 89260 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 89270 	 loss = 1.367, train_acc = 0.800 (3.308 sec/step)
step 89280 	 loss = 0.006, train_acc = 1.000 (3.426 sec/step)
step 89290 	 loss = 0.410, train_acc = 0.900 (3.394 sec/step)
VALIDATION 	 acc = 0.525 (3.631 sec)
step 89300 	 loss = 0.247, train_acc = 0.900 (3.386 sec/step)
step 89310 	 loss = 0.023, train_acc = 1.000 (3.323 sec/step)
step 89320 	 loss = 0.030, train_acc = 1.000 (3.345 sec/step)
step 89330 	 loss = 1.750, train_acc = 0.900 (3.455 sec/step)
step 89340 	 loss = 0.072, train_acc = 1.000 (3.347 sec/step)
step 89350 	 loss = 0.120, train_acc = 1.000 (3.347 sec/step)
step 89360 	 loss = 0.172, train_acc = 0.900 (3.307 sec/step)
step 89370 	 loss = 0.009, train_acc = 1.000 (3.327 sec/step)
step 89380 	 loss = 0.895, train_acc = 0.900 (3.398 sec/step)
step 89390 	 loss = 0.072, train_acc = 1.000 (3.382 sec/step)
step 89400 	 loss = 0.017, train_acc = 1.000 (3.329 sec/step)
step 89410 	 loss = 0.123, train_acc = 0.900 (3.376 sec/step)
step 89420 	 loss = 0.001, train_acc = 1.000 (3.332 sec/step)
step 89430 	 loss = 0.000, train_acc = 1.000 (3.385 sec/step)
step 89440 	 loss = 0.000, train_acc = 1.000 (3.327 sec/step)
step 89450 	 loss = 0.396, train_acc = 0.900 (3.344 sec/step)
step 89460 	 loss = 0.112, train_acc = 1.000 (3.328 sec/step)
step 89470 	 loss = 0.072, train_acc = 0.900 (3.335 sec/step)
step 89480 	 loss = 0.011, train_acc = 1.000 (3.365 sec/step)
step 89490 	 loss = 0.001, train_acc = 1.000 (3.347 sec/step)
step 89500 	 loss = 0.734, train_acc = 0.900 (3.343 sec/step)
step 89510 	 loss = 0.935, train_acc = 0.900 (3.317 sec/step)
step 89520 	 loss = 0.455, train_acc = 0.800 (3.351 sec/step)
step 89530 	 loss = 0.672, train_acc = 0.900 (3.403 sec/step)
step 89540 	 loss = 0.142, train_acc = 0.900 (3.352 sec/step)
step 89550 	 loss = 0.001, train_acc = 1.000 (3.373 sec/step)
step 89560 	 loss = 0.001, train_acc = 1.000 (3.342 sec/step)
step 89570 	 loss = 0.018, train_acc = 1.000 (3.336 sec/step)
step 89580 	 loss = 0.012, train_acc = 1.000 (3.379 sec/step)
step 89590 	 loss = 0.013, train_acc = 1.000 (3.340 sec/step)
step 89600 	 loss = 0.088, train_acc = 1.000 (3.315 sec/step)
step 89610 	 loss = 0.046, train_acc = 1.000 (3.353 sec/step)
step 89620 	 loss = 0.039, train_acc = 1.000 (3.374 sec/step)
step 89630 	 loss = 0.100, train_acc = 1.000 (3.349 sec/step)
step 89640 	 loss = 0.008, train_acc = 1.000 (3.398 sec/step)
step 89650 	 loss = 0.041, train_acc = 1.000 (3.411 sec/step)
step 89660 	 loss = 0.022, train_acc = 1.000 (3.314 sec/step)
step 89670 	 loss = 0.000, train_acc = 1.000 (3.341 sec/step)
step 89680 	 loss = 0.000, train_acc = 1.000 (3.337 sec/step)
step 89690 	 loss = 0.000, train_acc = 1.000 (3.377 sec/step)
step 89700 	 loss = 0.275, train_acc = 0.800 (3.371 sec/step)
step 89710 	 loss = 0.500, train_acc = 0.800 (3.299 sec/step)
step 89720 	 loss = 0.313, train_acc = 0.900 (3.382 sec/step)
step 89730 	 loss = 0.284, train_acc = 0.900 (3.383 sec/step)
step 89740 	 loss = 0.011, train_acc = 1.000 (3.338 sec/step)
step 89750 	 loss = 0.003, train_acc = 1.000 (3.387 sec/step)
step 89760 	 loss = 0.084, train_acc = 1.000 (3.355 sec/step)
step 89770 	 loss = 0.349, train_acc = 0.900 (3.361 sec/step)
step 89780 	 loss = 0.015, train_acc = 1.000 (3.488 sec/step)
step 89790 	 loss = 0.092, train_acc = 1.000 (3.324 sec/step)
step 89800 	 loss = 0.067, train_acc = 1.000 (3.342 sec/step)
step 89810 	 loss = 0.017, train_acc = 1.000 (3.378 sec/step)
step 89820 	 loss = 0.013, train_acc = 1.000 (3.354 sec/step)
step 89830 	 loss = 0.170, train_acc = 1.000 (3.328 sec/step)
step 89840 	 loss = 0.009, train_acc = 1.000 (3.345 sec/step)
step 89850 	 loss = 0.044, train_acc = 1.000 (3.377 sec/step)
step 89860 	 loss = 0.197, train_acc = 0.900 (3.345 sec/step)
step 89870 	 loss = 0.048, train_acc = 1.000 (3.343 sec/step)
step 89880 	 loss = 0.355, train_acc = 0.900 (3.306 sec/step)
step 89890 	 loss = 0.015, train_acc = 1.000 (3.344 sec/step)
step 89900 	 loss = 0.221, train_acc = 0.900 (3.375 sec/step)
step 89910 	 loss = 0.010, train_acc = 1.000 (3.388 sec/step)
step 89920 	 loss = 0.134, train_acc = 0.900 (3.341 sec/step)
step 89930 	 loss = 0.516, train_acc = 0.900 (3.409 sec/step)
step 89940 	 loss = 0.929, train_acc = 0.700 (3.348 sec/step)
step 89950 	 loss = 1.144, train_acc = 0.900 (3.357 sec/step)
step 89960 	 loss = 0.112, train_acc = 1.000 (3.399 sec/step)
step 89970 	 loss = 0.043, train_acc = 1.000 (3.398 sec/step)
step 89980 	 loss = 0.020, train_acc = 1.000 (3.363 sec/step)
step 89990 	 loss = 0.200, train_acc = 0.900 (3.349 sec/step)
step 90000 	 loss = 0.765, train_acc = 0.800 (3.375 sec/step)
step 90010 	 loss = 0.067, train_acc = 1.000 (3.332 sec/step)
step 90020 	 loss = 0.369, train_acc = 0.900 (3.368 sec/step)
step 90030 	 loss = 0.256, train_acc = 0.800 (3.330 sec/step)
step 90040 	 loss = 0.238, train_acc = 0.900 (3.316 sec/step)
step 90050 	 loss = 0.000, train_acc = 1.000 (3.374 sec/step)
step 90060 	 loss = 0.270, train_acc = 0.900 (3.330 sec/step)
step 90070 	 loss = 0.001, train_acc = 1.000 (3.395 sec/step)
step 90080 	 loss = 0.003, train_acc = 1.000 (3.404 sec/step)
step 90090 	 loss = 0.077, train_acc = 0.900 (3.368 sec/step)
step 90100 	 loss = 0.001, train_acc = 1.000 (3.338 sec/step)
step 90110 	 loss = 0.007, train_acc = 1.000 (3.403 sec/step)
step 90120 	 loss = 0.138, train_acc = 0.900 (3.363 sec/step)
step 90130 	 loss = 0.039, train_acc = 1.000 (3.385 sec/step)
step 90140 	 loss = 0.030, train_acc = 1.000 (3.360 sec/step)
step 90150 	 loss = 0.982, train_acc = 0.900 (3.331 sec/step)
step 90160 	 loss = 0.066, train_acc = 1.000 (3.369 sec/step)
step 90170 	 loss = 0.077, train_acc = 0.900 (3.393 sec/step)
step 90180 	 loss = 0.012, train_acc = 1.000 (3.291 sec/step)
step 90190 	 loss = 0.375, train_acc = 0.800 (3.343 sec/step)
step 90200 	 loss = 0.004, train_acc = 1.000 (3.396 sec/step)
step 90210 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 90220 	 loss = 0.241, train_acc = 0.900 (3.403 sec/step)
step 90230 	 loss = 0.013, train_acc = 1.000 (3.389 sec/step)
step 90240 	 loss = 0.023, train_acc = 1.000 (3.407 sec/step)
step 90250 	 loss = 0.001, train_acc = 1.000 (3.343 sec/step)
step 90260 	 loss = 0.035, train_acc = 1.000 (3.311 sec/step)
step 90270 	 loss = 0.000, train_acc = 1.000 (3.325 sec/step)
step 90280 	 loss = 0.097, train_acc = 0.900 (3.322 sec/step)
step 90290 	 loss = 0.959, train_acc = 0.800 (3.355 sec/step)
step 90300 	 loss = 0.596, train_acc = 0.800 (3.414 sec/step)
step 90310 	 loss = 0.667, train_acc = 0.900 (3.374 sec/step)
step 90320 	 loss = 0.105, train_acc = 0.900 (3.309 sec/step)
step 90330 	 loss = 0.473, train_acc = 0.800 (3.330 sec/step)
step 90340 	 loss = 0.055, train_acc = 1.000 (3.350 sec/step)
step 90350 	 loss = 0.000, train_acc = 1.000 (3.449 sec/step)
step 90360 	 loss = 0.017, train_acc = 1.000 (3.363 sec/step)
step 90370 	 loss = 0.118, train_acc = 1.000 (3.388 sec/step)
step 90380 	 loss = 0.072, train_acc = 1.000 (3.380 sec/step)
step 90390 	 loss = 0.000, train_acc = 1.000 (3.378 sec/step)
step 90400 	 loss = 0.374, train_acc = 0.900 (3.321 sec/step)
step 90410 	 loss = 0.017, train_acc = 1.000 (3.326 sec/step)
step 90420 	 loss = 0.048, train_acc = 1.000 (3.334 sec/step)
step 90430 	 loss = 1.225, train_acc = 0.800 (3.320 sec/step)
step 90440 	 loss = 0.082, train_acc = 1.000 (3.354 sec/step)
step 90450 	 loss = 0.005, train_acc = 1.000 (3.357 sec/step)
step 90460 	 loss = 0.000, train_acc = 1.000 (3.369 sec/step)
step 90470 	 loss = 0.005, train_acc = 1.000 (3.422 sec/step)
step 90480 	 loss = 1.927, train_acc = 0.800 (3.407 sec/step)
step 90490 	 loss = 0.714, train_acc = 0.800 (3.348 sec/step)
step 90500 	 loss = 0.047, train_acc = 1.000 (3.451 sec/step)
step 90510 	 loss = 0.412, train_acc = 0.900 (3.361 sec/step)
step 90520 	 loss = 0.307, train_acc = 0.900 (3.371 sec/step)
step 90530 	 loss = 0.005, train_acc = 1.000 (3.453 sec/step)
step 90540 	 loss = 0.012, train_acc = 1.000 (3.305 sec/step)
step 90550 	 loss = 0.008, train_acc = 1.000 (3.357 sec/step)
step 90560 	 loss = 0.506, train_acc = 0.900 (3.327 sec/step)
step 90570 	 loss = 0.058, train_acc = 1.000 (3.334 sec/step)
step 90580 	 loss = 0.015, train_acc = 1.000 (3.393 sec/step)
step 90590 	 loss = 0.033, train_acc = 1.000 (3.375 sec/step)
step 90600 	 loss = 0.000, train_acc = 1.000 (3.325 sec/step)
step 90610 	 loss = 0.057, train_acc = 1.000 (3.321 sec/step)
step 90620 	 loss = 0.001, train_acc = 1.000 (3.360 sec/step)
step 90630 	 loss = 0.000, train_acc = 1.000 (3.377 sec/step)
step 90640 	 loss = 0.577, train_acc = 0.900 (3.317 sec/step)
step 90650 	 loss = 0.207, train_acc = 0.900 (3.305 sec/step)
step 90660 	 loss = 0.228, train_acc = 0.900 (3.406 sec/step)
step 90670 	 loss = 0.000, train_acc = 1.000 (3.338 sec/step)
step 90680 	 loss = 0.981, train_acc = 0.700 (3.337 sec/step)
step 90690 	 loss = 0.524, train_acc = 0.900 (3.386 sec/step)
step 90700 	 loss = 0.215, train_acc = 1.000 (3.350 sec/step)
step 90710 	 loss = 0.056, train_acc = 1.000 (3.361 sec/step)
step 90720 	 loss = 0.210, train_acc = 0.900 (3.332 sec/step)
step 90730 	 loss = 0.212, train_acc = 0.900 (3.360 sec/step)
step 90740 	 loss = 1.112, train_acc = 0.800 (3.389 sec/step)
step 90750 	 loss = 0.582, train_acc = 0.900 (3.352 sec/step)
step 90760 	 loss = 0.691, train_acc = 0.900 (3.397 sec/step)
step 90770 	 loss = 0.006, train_acc = 1.000 (3.382 sec/step)
step 90780 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 90790 	 loss = 0.290, train_acc = 0.900 (3.332 sec/step)
step 90800 	 loss = 0.096, train_acc = 0.900 (3.346 sec/step)
step 90810 	 loss = 0.304, train_acc = 0.900 (3.395 sec/step)
step 90820 	 loss = 0.003, train_acc = 1.000 (3.301 sec/step)
step 90830 	 loss = 0.261, train_acc = 0.900 (3.367 sec/step)
step 90840 	 loss = 0.667, train_acc = 0.800 (3.348 sec/step)
step 90850 	 loss = 0.702, train_acc = 0.800 (3.338 sec/step)
step 90860 	 loss = 0.045, train_acc = 1.000 (3.383 sec/step)
step 90870 	 loss = 0.019, train_acc = 1.000 (3.311 sec/step)
step 90880 	 loss = 0.419, train_acc = 0.900 (3.393 sec/step)
step 90890 	 loss = 0.006, train_acc = 1.000 (3.382 sec/step)
step 90900 	 loss = 0.017, train_acc = 1.000 (3.402 sec/step)
step 90910 	 loss = 0.046, train_acc = 1.000 (3.430 sec/step)
step 90920 	 loss = 0.000, train_acc = 1.000 (3.363 sec/step)
step 90930 	 loss = 0.267, train_acc = 0.900 (3.337 sec/step)
step 90940 	 loss = 0.060, train_acc = 1.000 (3.357 sec/step)
step 90950 	 loss = 0.387, train_acc = 0.900 (3.334 sec/step)
step 90960 	 loss = 0.031, train_acc = 1.000 (3.324 sec/step)
step 90970 	 loss = 0.016, train_acc = 1.000 (3.318 sec/step)
step 90980 	 loss = 0.033, train_acc = 1.000 (3.325 sec/step)
step 90990 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 91000 	 loss = 0.000, train_acc = 1.000 (3.405 sec/step)
step 91010 	 loss = 0.699, train_acc = 0.900 (3.354 sec/step)
step 91020 	 loss = 0.029, train_acc = 1.000 (3.354 sec/step)
step 91030 	 loss = 0.332, train_acc = 0.800 (3.354 sec/step)
step 91040 	 loss = 0.077, train_acc = 1.000 (3.349 sec/step)
step 91050 	 loss = 0.499, train_acc = 0.900 (3.362 sec/step)
step 91060 	 loss = 0.107, train_acc = 0.900 (3.354 sec/step)
step 91070 	 loss = 0.033, train_acc = 1.000 (3.386 sec/step)
step 91080 	 loss = 0.066, train_acc = 1.000 (3.349 sec/step)
step 91090 	 loss = 0.510, train_acc = 0.900 (3.342 sec/step)
step 91100 	 loss = 0.051, train_acc = 1.000 (3.327 sec/step)
step 91110 	 loss = 0.553, train_acc = 0.900 (3.365 sec/step)
step 91120 	 loss = 0.160, train_acc = 0.900 (3.308 sec/step)
step 91130 	 loss = 0.299, train_acc = 0.900 (3.317 sec/step)
step 91140 	 loss = 0.114, train_acc = 0.900 (3.478 sec/step)
step 91150 	 loss = 0.036, train_acc = 1.000 (3.366 sec/step)
step 91160 	 loss = 0.061, train_acc = 1.000 (3.368 sec/step)
step 91170 	 loss = 0.099, train_acc = 1.000 (3.311 sec/step)
step 91180 	 loss = 0.107, train_acc = 1.000 (3.306 sec/step)
step 91190 	 loss = 0.560, train_acc = 0.800 (3.337 sec/step)
VALIDATION 	 acc = 0.536 (3.647 sec)
step 91200 	 loss = 0.192, train_acc = 0.900 (3.381 sec/step)
step 91210 	 loss = 0.160, train_acc = 0.900 (3.417 sec/step)
step 91220 	 loss = 0.000, train_acc = 1.000 (3.376 sec/step)
step 91230 	 loss = 0.534, train_acc = 0.900 (3.413 sec/step)
step 91240 	 loss = 0.167, train_acc = 0.900 (3.380 sec/step)
step 91250 	 loss = 1.197, train_acc = 0.900 (3.362 sec/step)
step 91260 	 loss = 0.018, train_acc = 1.000 (3.322 sec/step)
step 91270 	 loss = 0.225, train_acc = 0.900 (3.368 sec/step)
step 91280 	 loss = 0.001, train_acc = 1.000 (3.390 sec/step)
step 91290 	 loss = 0.568, train_acc = 0.900 (3.325 sec/step)
step 91300 	 loss = 0.000, train_acc = 1.000 (3.340 sec/step)
step 91310 	 loss = 0.000, train_acc = 1.000 (3.312 sec/step)
step 91320 	 loss = 0.012, train_acc = 1.000 (3.389 sec/step)
step 91330 	 loss = 0.141, train_acc = 0.900 (3.315 sec/step)
step 91340 	 loss = 0.192, train_acc = 0.900 (3.357 sec/step)
step 91350 	 loss = 0.057, train_acc = 1.000 (3.335 sec/step)
step 91360 	 loss = 0.045, train_acc = 1.000 (3.399 sec/step)
step 91370 	 loss = 0.164, train_acc = 0.900 (3.329 sec/step)
step 91380 	 loss = 0.010, train_acc = 1.000 (3.334 sec/step)
step 91390 	 loss = 0.519, train_acc = 0.900 (3.372 sec/step)
step 91400 	 loss = 0.164, train_acc = 0.900 (3.378 sec/step)
step 91410 	 loss = 0.278, train_acc = 0.800 (3.423 sec/step)
step 91420 	 loss = 1.797, train_acc = 0.700 (3.353 sec/step)
step 91430 	 loss = 0.023, train_acc = 1.000 (3.313 sec/step)
step 91440 	 loss = 0.313, train_acc = 0.900 (3.331 sec/step)
step 91450 	 loss = 0.401, train_acc = 0.900 (3.356 sec/step)
step 91460 	 loss = 0.070, train_acc = 1.000 (3.323 sec/step)
step 91470 	 loss = 0.110, train_acc = 0.900 (3.350 sec/step)
step 91480 	 loss = 0.003, train_acc = 1.000 (3.342 sec/step)
step 91490 	 loss = 0.000, train_acc = 1.000 (3.357 sec/step)
step 91500 	 loss = 0.011, train_acc = 1.000 (3.331 sec/step)
step 91510 	 loss = 0.346, train_acc = 0.900 (3.382 sec/step)
step 91520 	 loss = 0.004, train_acc = 1.000 (3.445 sec/step)
step 91530 	 loss = 0.001, train_acc = 1.000 (3.361 sec/step)
step 91540 	 loss = 0.060, train_acc = 1.000 (3.364 sec/step)
step 91550 	 loss = 0.337, train_acc = 0.900 (3.372 sec/step)
step 91560 	 loss = 0.711, train_acc = 0.800 (3.454 sec/step)
step 91570 	 loss = 0.023, train_acc = 1.000 (3.354 sec/step)
step 91580 	 loss = 0.000, train_acc = 1.000 (3.328 sec/step)
step 91590 	 loss = 0.000, train_acc = 1.000 (3.334 sec/step)
step 91600 	 loss = 1.819, train_acc = 0.900 (3.335 sec/step)
step 91610 	 loss = 0.010, train_acc = 1.000 (3.332 sec/step)
step 91620 	 loss = 0.848, train_acc = 0.800 (3.337 sec/step)
step 91630 	 loss = 0.023, train_acc = 1.000 (3.323 sec/step)
step 91640 	 loss = 0.207, train_acc = 0.900 (3.339 sec/step)
step 91650 	 loss = 0.033, train_acc = 1.000 (3.367 sec/step)
step 91660 	 loss = 0.305, train_acc = 0.800 (3.368 sec/step)
step 91670 	 loss = 0.000, train_acc = 1.000 (3.345 sec/step)
step 91680 	 loss = 0.103, train_acc = 1.000 (3.368 sec/step)
step 91690 	 loss = 0.005, train_acc = 1.000 (3.324 sec/step)
step 91700 	 loss = 0.398, train_acc = 0.900 (3.498 sec/step)
step 91710 	 loss = 0.042, train_acc = 1.000 (3.372 sec/step)
step 91720 	 loss = 0.004, train_acc = 1.000 (3.362 sec/step)
step 91730 	 loss = 0.054, train_acc = 1.000 (3.391 sec/step)
step 91740 	 loss = 0.246, train_acc = 0.900 (3.358 sec/step)
step 91750 	 loss = 0.084, train_acc = 0.900 (3.426 sec/step)
step 91760 	 loss = 0.079, train_acc = 0.900 (3.325 sec/step)
step 91770 	 loss = 0.772, train_acc = 0.900 (3.313 sec/step)
step 91780 	 loss = 0.542, train_acc = 0.800 (3.396 sec/step)
step 91790 	 loss = 0.178, train_acc = 0.900 (3.341 sec/step)
step 91800 	 loss = 0.213, train_acc = 0.900 (3.350 sec/step)
step 91810 	 loss = 0.007, train_acc = 1.000 (3.354 sec/step)
step 91820 	 loss = 0.140, train_acc = 0.900 (3.405 sec/step)
step 91830 	 loss = 0.170, train_acc = 0.900 (3.323 sec/step)
step 91840 	 loss = 0.053, train_acc = 1.000 (3.300 sec/step)
step 91850 	 loss = 0.215, train_acc = 0.900 (3.349 sec/step)
step 91860 	 loss = 0.246, train_acc = 0.900 (3.349 sec/step)
step 91870 	 loss = 0.042, train_acc = 1.000 (3.307 sec/step)
step 91880 	 loss = 0.005, train_acc = 1.000 (3.326 sec/step)
step 91890 	 loss = 0.195, train_acc = 0.900 (3.345 sec/step)
step 91900 	 loss = 0.045, train_acc = 1.000 (3.371 sec/step)
step 91910 	 loss = 0.184, train_acc = 0.900 (3.337 sec/step)
step 91920 	 loss = 0.002, train_acc = 1.000 (3.350 sec/step)
step 91930 	 loss = 0.012, train_acc = 1.000 (3.314 sec/step)
step 91940 	 loss = 0.006, train_acc = 1.000 (3.458 sec/step)
step 91950 	 loss = 0.057, train_acc = 1.000 (3.372 sec/step)
step 91960 	 loss = 0.440, train_acc = 0.900 (3.355 sec/step)
step 91970 	 loss = 0.103, train_acc = 1.000 (3.394 sec/step)
step 91980 	 loss = 0.006, train_acc = 1.000 (3.362 sec/step)
step 91990 	 loss = 0.003, train_acc = 1.000 (3.355 sec/step)
step 92000 	 loss = 0.046, train_acc = 1.000 (3.368 sec/step)
step 92010 	 loss = 0.555, train_acc = 0.900 (3.390 sec/step)
step 92020 	 loss = 0.503, train_acc = 0.900 (3.340 sec/step)
step 92030 	 loss = 0.003, train_acc = 1.000 (3.371 sec/step)
step 92040 	 loss = 0.223, train_acc = 0.900 (3.329 sec/step)
step 92050 	 loss = 0.017, train_acc = 1.000 (3.334 sec/step)
step 92060 	 loss = 0.135, train_acc = 0.900 (3.313 sec/step)
step 92070 	 loss = 0.127, train_acc = 0.900 (3.378 sec/step)
step 92080 	 loss = 0.111, train_acc = 1.000 (3.391 sec/step)
step 92090 	 loss = 0.000, train_acc = 1.000 (3.374 sec/step)
step 92100 	 loss = 0.076, train_acc = 1.000 (3.338 sec/step)
step 92110 	 loss = 0.011, train_acc = 1.000 (3.348 sec/step)
step 92120 	 loss = 0.035, train_acc = 1.000 (3.328 sec/step)
step 92130 	 loss = 0.009, train_acc = 1.000 (3.374 sec/step)
step 92140 	 loss = 0.051, train_acc = 1.000 (3.396 sec/step)
step 92150 	 loss = 0.001, train_acc = 1.000 (3.344 sec/step)
step 92160 	 loss = 0.002, train_acc = 1.000 (3.411 sec/step)
step 92170 	 loss = 0.000, train_acc = 1.000 (3.367 sec/step)
step 92180 	 loss = 0.449, train_acc = 0.800 (3.348 sec/step)
step 92190 	 loss = 0.055, train_acc = 1.000 (3.353 sec/step)
step 92200 	 loss = 0.002, train_acc = 1.000 (3.390 sec/step)
step 92210 	 loss = 0.023, train_acc = 1.000 (3.423 sec/step)
step 92220 	 loss = 0.011, train_acc = 1.000 (3.333 sec/step)
step 92230 	 loss = 0.195, train_acc = 0.900 (3.367 sec/step)
step 92240 	 loss = 0.115, train_acc = 0.900 (3.430 sec/step)
step 92250 	 loss = 0.475, train_acc = 0.900 (3.374 sec/step)
step 92260 	 loss = 0.255, train_acc = 0.900 (3.324 sec/step)
step 92270 	 loss = 0.066, train_acc = 1.000 (3.341 sec/step)
step 92280 	 loss = 0.000, train_acc = 1.000 (3.479 sec/step)
step 92290 	 loss = 0.000, train_acc = 1.000 (3.337 sec/step)
step 92300 	 loss = 0.602, train_acc = 0.800 (3.358 sec/step)
step 92310 	 loss = 0.001, train_acc = 1.000 (3.333 sec/step)
step 92320 	 loss = 0.019, train_acc = 1.000 (3.367 sec/step)
step 92330 	 loss = 0.312, train_acc = 0.900 (3.333 sec/step)
step 92340 	 loss = 1.062, train_acc = 0.900 (3.374 sec/step)
step 92350 	 loss = 0.042, train_acc = 1.000 (3.357 sec/step)
step 92360 	 loss = 0.065, train_acc = 1.000 (3.411 sec/step)
step 92370 	 loss = 0.045, train_acc = 1.000 (3.371 sec/step)
step 92380 	 loss = 0.001, train_acc = 1.000 (3.414 sec/step)
step 92390 	 loss = 0.002, train_acc = 1.000 (3.387 sec/step)
step 92400 	 loss = 0.211, train_acc = 0.900 (3.379 sec/step)
step 92410 	 loss = 0.000, train_acc = 1.000 (3.349 sec/step)
step 92420 	 loss = 0.001, train_acc = 1.000 (3.342 sec/step)
step 92430 	 loss = 0.004, train_acc = 1.000 (3.396 sec/step)
step 92440 	 loss = 0.452, train_acc = 0.900 (3.405 sec/step)
step 92450 	 loss = 0.099, train_acc = 1.000 (3.384 sec/step)
step 92460 	 loss = 0.056, train_acc = 1.000 (3.335 sec/step)
step 92470 	 loss = 0.028, train_acc = 1.000 (3.363 sec/step)
step 92480 	 loss = 0.001, train_acc = 1.000 (3.359 sec/step)
step 92490 	 loss = 1.065, train_acc = 0.900 (3.383 sec/step)
step 92500 	 loss = 0.328, train_acc = 0.900 (3.356 sec/step)
step 92510 	 loss = 0.035, train_acc = 1.000 (3.336 sec/step)
step 92520 	 loss = 0.000, train_acc = 1.000 (3.369 sec/step)
step 92530 	 loss = 0.258, train_acc = 0.900 (3.340 sec/step)
step 92540 	 loss = 0.018, train_acc = 1.000 (3.352 sec/step)
step 92550 	 loss = 0.006, train_acc = 1.000 (3.362 sec/step)
step 92560 	 loss = 0.008, train_acc = 1.000 (3.374 sec/step)
step 92570 	 loss = 0.006, train_acc = 1.000 (3.402 sec/step)
step 92580 	 loss = 3.099, train_acc = 0.800 (3.319 sec/step)
step 92590 	 loss = 1.690, train_acc = 0.400 (3.368 sec/step)
step 92600 	 loss = 0.068, train_acc = 1.000 (3.410 sec/step)
step 92610 	 loss = 0.003, train_acc = 1.000 (3.389 sec/step)
step 92620 	 loss = 0.061, train_acc = 1.000 (3.369 sec/step)
step 92630 	 loss = 0.000, train_acc = 1.000 (3.377 sec/step)
step 92640 	 loss = 0.108, train_acc = 0.900 (3.351 sec/step)
step 92650 	 loss = 0.107, train_acc = 0.900 (3.391 sec/step)
step 92660 	 loss = 0.015, train_acc = 1.000 (3.331 sec/step)
step 92670 	 loss = 0.128, train_acc = 0.900 (3.346 sec/step)
step 92680 	 loss = 0.040, train_acc = 1.000 (3.331 sec/step)
step 92690 	 loss = 0.005, train_acc = 1.000 (3.344 sec/step)
step 92700 	 loss = 0.008, train_acc = 1.000 (3.357 sec/step)
step 92710 	 loss = 0.377, train_acc = 0.900 (3.353 sec/step)
step 92720 	 loss = 0.229, train_acc = 0.900 (3.360 sec/step)
step 92730 	 loss = 0.004, train_acc = 1.000 (3.313 sec/step)
step 92740 	 loss = 0.008, train_acc = 1.000 (3.332 sec/step)
step 92750 	 loss = 0.058, train_acc = 1.000 (3.354 sec/step)
step 92760 	 loss = 0.000, train_acc = 1.000 (3.354 sec/step)
step 92770 	 loss = 0.043, train_acc = 1.000 (3.344 sec/step)
step 92780 	 loss = 0.006, train_acc = 1.000 (3.339 sec/step)
step 92790 	 loss = 1.034, train_acc = 0.800 (3.340 sec/step)
step 92800 	 loss = 0.121, train_acc = 0.900 (3.342 sec/step)
step 92810 	 loss = 0.035, train_acc = 1.000 (3.338 sec/step)
step 92820 	 loss = 0.496, train_acc = 0.800 (3.349 sec/step)
step 92830 	 loss = 0.520, train_acc = 0.800 (3.353 sec/step)
step 92840 	 loss = 0.293, train_acc = 0.900 (3.384 sec/step)
step 92850 	 loss = 0.001, train_acc = 1.000 (3.357 sec/step)
step 92860 	 loss = 0.171, train_acc = 1.000 (3.310 sec/step)
step 92870 	 loss = 0.010, train_acc = 1.000 (3.409 sec/step)
step 92880 	 loss = 0.000, train_acc = 1.000 (3.427 sec/step)
step 92890 	 loss = 0.000, train_acc = 1.000 (3.373 sec/step)
step 92900 	 loss = 0.000, train_acc = 1.000 (3.362 sec/step)
step 92910 	 loss = 0.001, train_acc = 1.000 (3.305 sec/step)
step 92920 	 loss = 0.097, train_acc = 1.000 (3.309 sec/step)
step 92930 	 loss = 0.044, train_acc = 1.000 (3.366 sec/step)
step 92940 	 loss = 0.008, train_acc = 1.000 (3.382 sec/step)
step 92950 	 loss = 0.114, train_acc = 0.900 (3.354 sec/step)
step 92960 	 loss = 0.000, train_acc = 1.000 (3.385 sec/step)
step 92970 	 loss = 0.000, train_acc = 1.000 (3.378 sec/step)
step 92980 	 loss = 0.030, train_acc = 1.000 (3.328 sec/step)
step 92990 	 loss = 0.000, train_acc = 1.000 (3.324 sec/step)
step 93000 	 loss = 0.021, train_acc = 1.000 (3.363 sec/step)
step 93010 	 loss = 0.209, train_acc = 0.900 (3.372 sec/step)
step 93020 	 loss = 0.005, train_acc = 1.000 (3.400 sec/step)
step 93030 	 loss = 0.227, train_acc = 0.800 (3.431 sec/step)
step 93040 	 loss = 0.378, train_acc = 0.900 (3.412 sec/step)
step 93050 	 loss = 0.000, train_acc = 1.000 (3.372 sec/step)
step 93060 	 loss = 0.000, train_acc = 1.000 (3.340 sec/step)
step 93070 	 loss = 0.408, train_acc = 0.900 (3.336 sec/step)
step 93080 	 loss = 0.250, train_acc = 0.900 (3.334 sec/step)
step 93090 	 loss = 0.162, train_acc = 0.900 (3.374 sec/step)
VALIDATION 	 acc = 0.530 (3.619 sec)
step 93100 	 loss = 0.268, train_acc = 0.900 (3.385 sec/step)
step 93110 	 loss = 0.108, train_acc = 1.000 (3.375 sec/step)
step 93120 	 loss = 2.377, train_acc = 0.900 (3.364 sec/step)
step 93130 	 loss = 0.047, train_acc = 1.000 (3.397 sec/step)
step 93140 	 loss = 0.002, train_acc = 1.000 (3.356 sec/step)
step 93150 	 loss = 0.580, train_acc = 0.900 (3.344 sec/step)
step 93160 	 loss = 0.000, train_acc = 1.000 (3.417 sec/step)
step 93170 	 loss = 0.035, train_acc = 1.000 (3.401 sec/step)
step 93180 	 loss = 0.000, train_acc = 1.000 (3.347 sec/step)
step 93190 	 loss = 0.024, train_acc = 1.000 (3.321 sec/step)
step 93200 	 loss = 0.070, train_acc = 1.000 (3.433 sec/step)
step 93210 	 loss = 0.001, train_acc = 1.000 (3.370 sec/step)
step 93220 	 loss = 0.026, train_acc = 1.000 (3.345 sec/step)
step 93230 	 loss = 0.045, train_acc = 1.000 (3.425 sec/step)
step 93240 	 loss = 0.007, train_acc = 1.000 (3.339 sec/step)
step 93250 	 loss = 0.092, train_acc = 1.000 (3.382 sec/step)
step 93260 	 loss = 0.161, train_acc = 0.900 (3.396 sec/step)
step 93270 	 loss = 0.190, train_acc = 0.900 (3.370 sec/step)
step 93280 	 loss = 0.447, train_acc = 0.800 (3.388 sec/step)
step 93290 	 loss = 0.080, train_acc = 0.900 (3.363 sec/step)
step 93300 	 loss = 0.006, train_acc = 1.000 (3.336 sec/step)
step 93310 	 loss = 0.018, train_acc = 1.000 (3.393 sec/step)
step 93320 	 loss = 0.104, train_acc = 1.000 (3.359 sec/step)
step 93330 	 loss = 0.025, train_acc = 1.000 (3.315 sec/step)
step 93340 	 loss = 0.048, train_acc = 1.000 (3.498 sec/step)
step 93350 	 loss = 0.000, train_acc = 1.000 (3.300 sec/step)
step 93360 	 loss = 0.762, train_acc = 0.800 (3.393 sec/step)
step 93370 	 loss = 0.012, train_acc = 1.000 (3.347 sec/step)
step 93380 	 loss = 0.143, train_acc = 1.000 (3.354 sec/step)
step 93390 	 loss = 0.022, train_acc = 1.000 (3.319 sec/step)
step 93400 	 loss = 0.390, train_acc = 0.800 (3.348 sec/step)
step 93410 	 loss = 0.027, train_acc = 1.000 (3.358 sec/step)
step 93420 	 loss = 0.007, train_acc = 1.000 (3.354 sec/step)
step 93430 	 loss = 0.001, train_acc = 1.000 (3.344 sec/step)
step 93440 	 loss = 0.003, train_acc = 1.000 (3.359 sec/step)
step 93450 	 loss = 0.440, train_acc = 0.900 (3.403 sec/step)
step 93460 	 loss = 0.016, train_acc = 1.000 (3.376 sec/step)
step 93470 	 loss = 0.014, train_acc = 1.000 (3.349 sec/step)
step 93480 	 loss = 0.006, train_acc = 1.000 (3.345 sec/step)
step 93490 	 loss = 0.180, train_acc = 0.900 (3.434 sec/step)
step 93500 	 loss = 0.374, train_acc = 0.700 (3.370 sec/step)
step 93510 	 loss = 0.001, train_acc = 1.000 (3.338 sec/step)
step 93520 	 loss = 0.000, train_acc = 1.000 (3.365 sec/step)
step 93530 	 loss = 0.123, train_acc = 0.900 (3.398 sec/step)
step 93540 	 loss = 0.035, train_acc = 1.000 (3.333 sec/step)
step 93550 	 loss = 0.006, train_acc = 1.000 (3.361 sec/step)
step 93560 	 loss = 0.382, train_acc = 0.800 (3.351 sec/step)
step 93570 	 loss = 0.012, train_acc = 1.000 (3.417 sec/step)
step 93580 	 loss = 0.004, train_acc = 1.000 (3.375 sec/step)
step 93590 	 loss = 0.022, train_acc = 1.000 (3.349 sec/step)
step 93600 	 loss = 0.000, train_acc = 1.000 (3.407 sec/step)
step 93610 	 loss = 0.561, train_acc = 0.900 (3.334 sec/step)
step 93620 	 loss = 0.186, train_acc = 1.000 (3.359 sec/step)
step 93630 	 loss = 0.206, train_acc = 0.900 (3.416 sec/step)
step 93640 	 loss = 0.592, train_acc = 0.800 (3.393 sec/step)
step 93650 	 loss = 1.064, train_acc = 0.900 (3.305 sec/step)
step 93660 	 loss = 0.002, train_acc = 1.000 (3.335 sec/step)
step 93670 	 loss = 0.285, train_acc = 0.900 (3.381 sec/step)
step 93680 	 loss = 0.501, train_acc = 0.800 (3.357 sec/step)
step 93690 	 loss = 0.003, train_acc = 1.000 (3.408 sec/step)
step 93700 	 loss = 0.031, train_acc = 1.000 (3.370 sec/step)
step 93710 	 loss = 0.021, train_acc = 1.000 (3.423 sec/step)
step 93720 	 loss = 1.496, train_acc = 0.900 (3.370 sec/step)
step 93730 	 loss = 0.040, train_acc = 1.000 (3.415 sec/step)
step 93740 	 loss = 0.004, train_acc = 1.000 (3.394 sec/step)
step 93750 	 loss = 0.643, train_acc = 0.900 (3.321 sec/step)
step 93760 	 loss = 0.597, train_acc = 0.900 (3.361 sec/step)
step 93770 	 loss = 0.023, train_acc = 1.000 (3.344 sec/step)
step 93780 	 loss = 0.596, train_acc = 0.800 (3.369 sec/step)
step 93790 	 loss = 0.065, train_acc = 1.000 (3.489 sec/step)
step 93800 	 loss = 0.319, train_acc = 0.900 (3.315 sec/step)
step 93810 	 loss = 0.011, train_acc = 1.000 (3.342 sec/step)
step 93820 	 loss = 0.302, train_acc = 1.000 (3.345 sec/step)
step 93830 	 loss = 0.001, train_acc = 1.000 (3.359 sec/step)
step 93840 	 loss = 0.010, train_acc = 1.000 (3.319 sec/step)
step 93850 	 loss = 0.101, train_acc = 1.000 (3.319 sec/step)
step 93860 	 loss = 0.149, train_acc = 1.000 (3.369 sec/step)
step 93870 	 loss = 0.933, train_acc = 0.800 (3.370 sec/step)
step 93880 	 loss = 0.001, train_acc = 1.000 (3.373 sec/step)
step 93890 	 loss = 0.957, train_acc = 0.900 (3.385 sec/step)
step 93900 	 loss = 0.121, train_acc = 1.000 (3.374 sec/step)
step 93910 	 loss = 0.003, train_acc = 1.000 (3.391 sec/step)
step 93920 	 loss = 0.001, train_acc = 1.000 (3.411 sec/step)
step 93930 	 loss = 0.076, train_acc = 1.000 (3.326 sec/step)
step 93940 	 loss = 0.004, train_acc = 1.000 (3.367 sec/step)
step 93950 	 loss = 0.725, train_acc = 0.700 (3.393 sec/step)
step 93960 	 loss = 0.001, train_acc = 1.000 (3.316 sec/step)
step 93970 	 loss = 0.207, train_acc = 0.900 (3.457 sec/step)
step 93980 	 loss = 0.089, train_acc = 0.900 (3.387 sec/step)
step 93990 	 loss = 0.000, train_acc = 1.000 (3.370 sec/step)
step 94000 	 loss = 0.011, train_acc = 1.000 (3.385 sec/step)
step 94010 	 loss = 0.051, train_acc = 1.000 (3.398 sec/step)
step 94020 	 loss = 0.000, train_acc = 1.000 (3.396 sec/step)
step 94030 	 loss = 0.168, train_acc = 0.900 (3.344 sec/step)
step 94040 	 loss = 0.428, train_acc = 0.800 (3.387 sec/step)
step 94050 	 loss = 0.034, train_acc = 1.000 (3.406 sec/step)
step 94060 	 loss = 1.102, train_acc = 0.900 (3.351 sec/step)
step 94070 	 loss = 0.421, train_acc = 0.900 (3.359 sec/step)
step 94080 	 loss = 0.000, train_acc = 1.000 (3.376 sec/step)
step 94090 	 loss = 0.069, train_acc = 1.000 (3.359 sec/step)
step 94100 	 loss = 0.814, train_acc = 0.800 (3.388 sec/step)
step 94110 	 loss = 0.057, train_acc = 1.000 (3.369 sec/step)
step 94120 	 loss = 0.050, train_acc = 1.000 (3.437 sec/step)
step 94130 	 loss = 0.000, train_acc = 1.000 (3.416 sec/step)
step 94140 	 loss = 0.000, train_acc = 1.000 (3.381 sec/step)
step 94150 	 loss = 0.089, train_acc = 1.000 (3.357 sec/step)
step 94160 	 loss = 0.007, train_acc = 1.000 (3.349 sec/step)
step 94170 	 loss = 0.060, train_acc = 1.000 (3.390 sec/step)
step 94180 	 loss = 0.052, train_acc = 1.000 (3.319 sec/step)
step 94190 	 loss = 0.006, train_acc = 1.000 (3.376 sec/step)
step 94200 	 loss = 0.158, train_acc = 1.000 (3.359 sec/step)
step 94210 	 loss = 0.004, train_acc = 1.000 (3.329 sec/step)
step 94220 	 loss = 0.000, train_acc = 1.000 (3.386 sec/step)
step 94230 	 loss = 0.654, train_acc = 0.900 (3.392 sec/step)
step 94240 	 loss = 0.007, train_acc = 1.000 (3.338 sec/step)
step 94250 	 loss = 0.247, train_acc = 0.900 (3.325 sec/step)
step 94260 	 loss = 0.012, train_acc = 1.000 (3.419 sec/step)
step 94270 	 loss = 1.301, train_acc = 0.700 (3.382 sec/step)
step 94280 	 loss = 0.007, train_acc = 1.000 (3.389 sec/step)
step 94290 	 loss = 0.234, train_acc = 0.900 (3.346 sec/step)
step 94300 	 loss = 0.023, train_acc = 1.000 (3.359 sec/step)
step 94310 	 loss = 0.000, train_acc = 1.000 (3.377 sec/step)
step 94320 	 loss = 0.009, train_acc = 1.000 (3.448 sec/step)
step 94330 	 loss = 0.021, train_acc = 1.000 (3.390 sec/step)
step 94340 	 loss = 0.181, train_acc = 0.900 (3.359 sec/step)
step 94350 	 loss = 0.000, train_acc = 1.000 (3.359 sec/step)
step 94360 	 loss = 0.206, train_acc = 0.900 (3.333 sec/step)
step 94370 	 loss = 0.480, train_acc = 0.800 (3.349 sec/step)
step 94380 	 loss = 1.140, train_acc = 0.800 (3.401 sec/step)
step 94390 	 loss = 0.364, train_acc = 0.800 (3.387 sec/step)
step 94400 	 loss = 0.009, train_acc = 1.000 (3.368 sec/step)
step 94410 	 loss = 0.923, train_acc = 0.900 (3.351 sec/step)
step 94420 	 loss = 0.157, train_acc = 0.900 (3.329 sec/step)
step 94430 	 loss = 1.186, train_acc = 0.900 (3.374 sec/step)
step 94440 	 loss = 0.463, train_acc = 0.800 (3.351 sec/step)
step 94450 	 loss = 0.336, train_acc = 0.900 (3.362 sec/step)
step 94460 	 loss = 0.014, train_acc = 1.000 (3.347 sec/step)
step 94470 	 loss = 0.598, train_acc = 0.800 (3.465 sec/step)
step 94480 	 loss = 0.008, train_acc = 1.000 (3.372 sec/step)
step 94490 	 loss = 0.234, train_acc = 0.800 (3.359 sec/step)
step 94500 	 loss = 0.613, train_acc = 0.900 (3.362 sec/step)
step 94510 	 loss = 0.112, train_acc = 0.900 (3.346 sec/step)
step 94520 	 loss = 0.400, train_acc = 0.900 (3.373 sec/step)
step 94530 	 loss = 0.351, train_acc = 0.900 (3.327 sec/step)
step 94540 	 loss = 0.222, train_acc = 0.900 (3.338 sec/step)
step 94550 	 loss = 0.032, train_acc = 1.000 (3.340 sec/step)
step 94560 	 loss = 0.000, train_acc = 1.000 (3.359 sec/step)
step 94570 	 loss = 0.042, train_acc = 1.000 (3.339 sec/step)
step 94580 	 loss = 0.150, train_acc = 0.900 (3.338 sec/step)
step 94590 	 loss = 0.103, train_acc = 1.000 (3.355 sec/step)
step 94600 	 loss = 0.150, train_acc = 0.900 (3.344 sec/step)
step 94610 	 loss = 0.028, train_acc = 1.000 (3.395 sec/step)
step 94620 	 loss = 0.372, train_acc = 0.900 (3.349 sec/step)
step 94630 	 loss = 0.001, train_acc = 1.000 (3.375 sec/step)
step 94640 	 loss = 0.239, train_acc = 0.900 (3.383 sec/step)
step 94650 	 loss = 0.002, train_acc = 1.000 (3.361 sec/step)
step 94660 	 loss = 0.005, train_acc = 1.000 (3.379 sec/step)
step 94670 	 loss = 0.002, train_acc = 1.000 (3.392 sec/step)
step 94680 	 loss = 0.001, train_acc = 1.000 (3.329 sec/step)
step 94690 	 loss = 0.015, train_acc = 1.000 (3.351 sec/step)
step 94700 	 loss = 0.097, train_acc = 0.900 (3.359 sec/step)
step 94710 	 loss = 0.015, train_acc = 1.000 (3.346 sec/step)
step 94720 	 loss = 0.028, train_acc = 1.000 (3.327 sec/step)
step 94730 	 loss = 1.136, train_acc = 0.800 (3.367 sec/step)
step 94740 	 loss = 0.452, train_acc = 0.900 (3.399 sec/step)
step 94750 	 loss = 0.298, train_acc = 0.900 (3.344 sec/step)
step 94760 	 loss = 0.388, train_acc = 0.900 (3.362 sec/step)
step 94770 	 loss = 0.004, train_acc = 1.000 (3.374 sec/step)
step 94780 	 loss = 0.003, train_acc = 1.000 (3.365 sec/step)
step 94790 	 loss = 0.006, train_acc = 1.000 (3.333 sec/step)
step 94800 	 loss = 0.036, train_acc = 1.000 (3.350 sec/step)
step 94810 	 loss = 0.017, train_acc = 1.000 (3.367 sec/step)
step 94820 	 loss = 0.001, train_acc = 1.000 (3.339 sec/step)
step 94830 	 loss = 0.001, train_acc = 1.000 (3.423 sec/step)
step 94840 	 loss = 0.000, train_acc = 1.000 (3.389 sec/step)
step 94850 	 loss = 0.000, train_acc = 1.000 (3.375 sec/step)
step 94860 	 loss = 0.431, train_acc = 0.900 (3.352 sec/step)
step 94870 	 loss = 0.137, train_acc = 0.900 (3.369 sec/step)
step 94880 	 loss = 0.238, train_acc = 0.900 (3.385 sec/step)
step 94890 	 loss = 0.026, train_acc = 1.000 (3.390 sec/step)
step 94900 	 loss = 0.085, train_acc = 1.000 (3.366 sec/step)
step 94910 	 loss = 0.173, train_acc = 0.900 (3.379 sec/step)
step 94920 	 loss = 0.633, train_acc = 0.900 (3.403 sec/step)
step 94930 	 loss = 0.188, train_acc = 0.900 (3.374 sec/step)
step 94940 	 loss = 0.191, train_acc = 0.900 (3.331 sec/step)
step 94950 	 loss = 0.001, train_acc = 1.000 (3.366 sec/step)
step 94960 	 loss = 0.690, train_acc = 0.900 (3.374 sec/step)
step 94970 	 loss = 0.145, train_acc = 1.000 (3.373 sec/step)
step 94980 	 loss = 0.152, train_acc = 0.900 (3.412 sec/step)
step 94990 	 loss = 0.307, train_acc = 0.900 (3.355 sec/step)
VALIDATION 	 acc = 0.531 (3.659 sec)
step 95000 	 loss = 0.541, train_acc = 0.900 (3.357 sec/step)
step 95010 	 loss = 0.008, train_acc = 1.000 (3.390 sec/step)
step 95020 	 loss = 0.027, train_acc = 1.000 (3.348 sec/step)
step 95030 	 loss = 0.594, train_acc = 0.700 (3.359 sec/step)
step 95040 	 loss = 0.005, train_acc = 1.000 (3.400 sec/step)
step 95050 	 loss = 0.088, train_acc = 0.900 (3.338 sec/step)
step 95060 	 loss = 0.019, train_acc = 1.000 (3.346 sec/step)
step 95070 	 loss = 0.021, train_acc = 1.000 (3.375 sec/step)
step 95080 	 loss = 0.000, train_acc = 1.000 (3.385 sec/step)
step 95090 	 loss = 0.001, train_acc = 1.000 (3.342 sec/step)
step 95100 	 loss = 0.383, train_acc = 0.800 (3.344 sec/step)
step 95110 	 loss = 1.527, train_acc = 0.900 (3.419 sec/step)
step 95120 	 loss = 0.628, train_acc = 0.900 (3.377 sec/step)
step 95130 	 loss = 0.077, train_acc = 1.000 (3.383 sec/step)
step 95140 	 loss = 0.024, train_acc = 1.000 (3.320 sec/step)
step 95150 	 loss = 1.150, train_acc = 0.900 (3.373 sec/step)
step 95160 	 loss = 0.338, train_acc = 0.800 (3.418 sec/step)
step 95170 	 loss = 0.000, train_acc = 1.000 (3.366 sec/step)
step 95180 	 loss = 0.648, train_acc = 0.900 (3.342 sec/step)
step 95190 	 loss = 0.000, train_acc = 1.000 (3.358 sec/step)
step 95200 	 loss = 0.014, train_acc = 1.000 (3.339 sec/step)
step 95210 	 loss = 0.051, train_acc = 1.000 (3.337 sec/step)
step 95220 	 loss = 0.000, train_acc = 1.000 (3.356 sec/step)
step 95230 	 loss = 0.074, train_acc = 1.000 (3.341 sec/step)
step 95240 	 loss = 1.468, train_acc = 0.900 (3.323 sec/step)
step 95250 	 loss = 0.048, train_acc = 1.000 (3.387 sec/step)
step 95260 	 loss = 0.158, train_acc = 0.900 (3.399 sec/step)
step 95270 	 loss = 0.021, train_acc = 1.000 (3.364 sec/step)
step 95280 	 loss = 0.018, train_acc = 1.000 (3.398 sec/step)
step 95290 	 loss = 0.225, train_acc = 0.900 (3.364 sec/step)
step 95300 	 loss = 0.235, train_acc = 0.900 (3.375 sec/step)
step 95310 	 loss = 0.042, train_acc = 1.000 (3.373 sec/step)
step 95320 	 loss = 0.173, train_acc = 0.800 (3.379 sec/step)
step 95330 	 loss = 1.579, train_acc = 0.900 (3.334 sec/step)
step 95340 	 loss = 0.045, train_acc = 1.000 (3.322 sec/step)
step 95350 	 loss = 0.137, train_acc = 0.900 (3.386 sec/step)
step 95360 	 loss = 0.013, train_acc = 1.000 (3.386 sec/step)
step 95370 	 loss = 0.011, train_acc = 1.000 (3.320 sec/step)
step 95380 	 loss = 0.023, train_acc = 1.000 (3.501 sec/step)
step 95390 	 loss = 0.319, train_acc = 0.900 (3.317 sec/step)
step 95400 	 loss = 0.797, train_acc = 0.800 (3.489 sec/step)
step 95410 	 loss = 0.704, train_acc = 0.800 (3.334 sec/step)
step 95420 	 loss = 1.111, train_acc = 0.900 (3.419 sec/step)
step 95430 	 loss = 0.069, train_acc = 1.000 (3.374 sec/step)
step 95440 	 loss = 0.000, train_acc = 1.000 (3.448 sec/step)
step 95450 	 loss = 0.001, train_acc = 1.000 (3.334 sec/step)
step 95460 	 loss = 0.006, train_acc = 1.000 (3.345 sec/step)
step 95470 	 loss = 0.000, train_acc = 1.000 (3.320 sec/step)
step 95480 	 loss = 0.162, train_acc = 0.900 (3.331 sec/step)
step 95490 	 loss = 0.083, train_acc = 1.000 (3.329 sec/step)
step 95500 	 loss = 0.042, train_acc = 1.000 (3.315 sec/step)
step 95510 	 loss = 0.016, train_acc = 1.000 (3.348 sec/step)
step 95520 	 loss = 0.026, train_acc = 1.000 (3.446 sec/step)
step 95530 	 loss = 0.375, train_acc = 0.900 (3.372 sec/step)
step 95540 	 loss = 0.952, train_acc = 0.800 (3.365 sec/step)
step 95550 	 loss = 0.001, train_acc = 1.000 (3.330 sec/step)
step 95560 	 loss = 0.174, train_acc = 0.900 (3.354 sec/step)
step 95570 	 loss = 0.061, train_acc = 1.000 (3.355 sec/step)
step 95580 	 loss = 0.034, train_acc = 1.000 (3.359 sec/step)
step 95590 	 loss = 0.048, train_acc = 1.000 (3.412 sec/step)
step 95600 	 loss = 0.594, train_acc = 0.800 (3.371 sec/step)
step 95610 	 loss = 0.011, train_acc = 1.000 (3.370 sec/step)
step 95620 	 loss = 0.031, train_acc = 1.000 (3.363 sec/step)
step 95630 	 loss = 0.052, train_acc = 1.000 (3.342 sec/step)
step 95640 	 loss = 0.405, train_acc = 0.900 (3.414 sec/step)
step 95650 	 loss = 0.011, train_acc = 1.000 (3.358 sec/step)
step 95660 	 loss = 0.025, train_acc = 1.000 (3.349 sec/step)
step 95670 	 loss = 0.420, train_acc = 0.900 (3.307 sec/step)
step 95680 	 loss = 0.000, train_acc = 1.000 (3.353 sec/step)
step 95690 	 loss = 0.013, train_acc = 1.000 (3.364 sec/step)
step 95700 	 loss = 0.209, train_acc = 0.800 (3.329 sec/step)
step 95710 	 loss = 0.005, train_acc = 1.000 (3.371 sec/step)
step 95720 	 loss = 0.307, train_acc = 0.900 (3.319 sec/step)
step 95730 	 loss = 0.638, train_acc = 0.800 (3.469 sec/step)
step 95740 	 loss = 0.013, train_acc = 1.000 (3.358 sec/step)
step 95750 	 loss = 0.009, train_acc = 1.000 (3.376 sec/step)
step 95760 	 loss = 0.139, train_acc = 0.900 (3.383 sec/step)
step 95770 	 loss = 0.027, train_acc = 1.000 (3.342 sec/step)
step 95780 	 loss = 0.012, train_acc = 1.000 (3.392 sec/step)
step 95790 	 loss = 0.191, train_acc = 0.800 (3.324 sec/step)
step 95800 	 loss = 0.098, train_acc = 0.900 (3.317 sec/step)
step 95810 	 loss = 0.213, train_acc = 0.900 (3.425 sec/step)
step 95820 	 loss = 0.006, train_acc = 1.000 (3.436 sec/step)
step 95830 	 loss = 0.009, train_acc = 1.000 (3.420 sec/step)
step 95840 	 loss = 0.082, train_acc = 1.000 (3.365 sec/step)
step 95850 	 loss = 0.034, train_acc = 1.000 (3.357 sec/step)
step 95860 	 loss = 0.037, train_acc = 1.000 (3.408 sec/step)
step 95870 	 loss = 0.011, train_acc = 1.000 (3.317 sec/step)
step 95880 	 loss = 0.024, train_acc = 1.000 (3.343 sec/step)
step 95890 	 loss = 0.075, train_acc = 1.000 (3.312 sec/step)
step 95900 	 loss = 0.000, train_acc = 1.000 (3.364 sec/step)
step 95910 	 loss = 0.012, train_acc = 1.000 (3.388 sec/step)
step 95920 	 loss = 0.000, train_acc = 1.000 (3.343 sec/step)
step 95930 	 loss = 0.016, train_acc = 1.000 (3.370 sec/step)
step 95940 	 loss = 0.096, train_acc = 0.900 (3.386 sec/step)
step 95950 	 loss = 0.002, train_acc = 1.000 (3.337 sec/step)
step 95960 	 loss = 0.002, train_acc = 1.000 (3.330 sec/step)
step 95970 	 loss = 0.004, train_acc = 1.000 (3.479 sec/step)
step 95980 	 loss = 0.051, train_acc = 1.000 (3.354 sec/step)
step 95990 	 loss = 0.105, train_acc = 0.900 (3.368 sec/step)
step 96000 	 loss = 0.021, train_acc = 1.000 (3.377 sec/step)
step 96010 	 loss = 0.000, train_acc = 1.000 (3.304 sec/step)
step 96020 	 loss = 0.025, train_acc = 1.000 (3.478 sec/step)
step 96030 	 loss = 0.148, train_acc = 0.900 (3.368 sec/step)
step 96040 	 loss = 0.870, train_acc = 0.900 (3.404 sec/step)
step 96050 	 loss = 0.042, train_acc = 1.000 (3.376 sec/step)
step 96060 	 loss = 0.023, train_acc = 1.000 (3.362 sec/step)
step 96070 	 loss = 0.150, train_acc = 0.900 (3.316 sec/step)
step 96080 	 loss = 0.110, train_acc = 0.900 (3.389 sec/step)
step 96090 	 loss = 0.000, train_acc = 1.000 (3.363 sec/step)
step 96100 	 loss = 0.036, train_acc = 1.000 (3.369 sec/step)
step 96110 	 loss = 1.131, train_acc = 0.700 (3.423 sec/step)
step 96120 	 loss = 0.004, train_acc = 1.000 (3.369 sec/step)
step 96130 	 loss = 0.944, train_acc = 0.900 (3.355 sec/step)
step 96140 	 loss = 0.557, train_acc = 0.900 (3.375 sec/step)
step 96150 	 loss = 0.477, train_acc = 0.900 (3.319 sec/step)
step 96160 	 loss = 0.194, train_acc = 0.900 (3.374 sec/step)
step 96170 	 loss = 0.985, train_acc = 0.800 (3.333 sec/step)
step 96180 	 loss = 0.003, train_acc = 1.000 (3.380 sec/step)
step 96190 	 loss = 0.000, train_acc = 1.000 (3.356 sec/step)
step 96200 	 loss = 0.030, train_acc = 1.000 (3.375 sec/step)
step 96210 	 loss = 0.003, train_acc = 1.000 (3.331 sec/step)
step 96220 	 loss = 0.000, train_acc = 1.000 (3.405 sec/step)
step 96230 	 loss = 0.019, train_acc = 1.000 (3.354 sec/step)
step 96240 	 loss = 0.047, train_acc = 1.000 (3.407 sec/step)
step 96250 	 loss = 0.111, train_acc = 1.000 (3.322 sec/step)
step 96260 	 loss = 0.089, train_acc = 0.900 (3.365 sec/step)
step 96270 	 loss = 0.178, train_acc = 0.800 (3.414 sec/step)
step 96280 	 loss = 0.074, train_acc = 1.000 (3.345 sec/step)
step 96290 	 loss = 0.139, train_acc = 1.000 (3.397 sec/step)
step 96300 	 loss = 0.001, train_acc = 1.000 (3.372 sec/step)
step 96310 	 loss = 0.100, train_acc = 0.900 (3.346 sec/step)
step 96320 	 loss = 1.353, train_acc = 0.900 (3.321 sec/step)
step 96330 	 loss = 0.625, train_acc = 0.800 (3.353 sec/step)
step 96340 	 loss = 0.308, train_acc = 0.900 (3.320 sec/step)
step 96350 	 loss = 0.289, train_acc = 0.900 (3.385 sec/step)
step 96360 	 loss = 0.498, train_acc = 0.900 (3.353 sec/step)
step 96370 	 loss = 0.000, train_acc = 1.000 (3.413 sec/step)
step 96380 	 loss = 0.008, train_acc = 1.000 (3.340 sec/step)
step 96390 	 loss = 0.020, train_acc = 1.000 (3.385 sec/step)
step 96400 	 loss = 0.000, train_acc = 1.000 (3.365 sec/step)
step 96410 	 loss = 0.084, train_acc = 0.900 (3.302 sec/step)
step 96420 	 loss = 0.468, train_acc = 0.900 (3.367 sec/step)
step 96430 	 loss = 0.131, train_acc = 0.900 (3.361 sec/step)
step 96440 	 loss = 0.177, train_acc = 0.800 (3.327 sec/step)
step 96450 	 loss = 0.000, train_acc = 1.000 (3.393 sec/step)
step 96460 	 loss = 0.002, train_acc = 1.000 (3.379 sec/step)
step 96470 	 loss = 0.001, train_acc = 1.000 (3.397 sec/step)
step 96480 	 loss = 0.021, train_acc = 1.000 (3.394 sec/step)
step 96490 	 loss = 0.001, train_acc = 1.000 (3.374 sec/step)
step 96500 	 loss = 0.000, train_acc = 1.000 (3.369 sec/step)
step 96510 	 loss = 0.000, train_acc = 1.000 (3.342 sec/step)
step 96520 	 loss = 0.610, train_acc = 0.700 (3.373 sec/step)
step 96530 	 loss = 0.009, train_acc = 1.000 (3.385 sec/step)
step 96540 	 loss = 0.218, train_acc = 0.900 (3.372 sec/step)
step 96550 	 loss = 0.018, train_acc = 1.000 (3.335 sec/step)
step 96560 	 loss = 0.092, train_acc = 1.000 (3.347 sec/step)
step 96570 	 loss = 0.002, train_acc = 1.000 (3.397 sec/step)
step 96580 	 loss = 0.060, train_acc = 1.000 (3.376 sec/step)
step 96590 	 loss = 1.709, train_acc = 0.800 (3.372 sec/step)
step 96600 	 loss = 0.857, train_acc = 0.800 (3.322 sec/step)
step 96610 	 loss = 0.030, train_acc = 1.000 (3.326 sec/step)
step 96620 	 loss = 0.107, train_acc = 0.900 (3.446 sec/step)
step 96630 	 loss = 0.155, train_acc = 0.900 (3.371 sec/step)
step 96640 	 loss = 0.264, train_acc = 0.900 (3.397 sec/step)
step 96650 	 loss = 0.172, train_acc = 0.900 (3.365 sec/step)
step 96660 	 loss = 0.185, train_acc = 0.900 (3.333 sec/step)
step 96670 	 loss = 0.005, train_acc = 1.000 (3.392 sec/step)
step 96680 	 loss = 0.001, train_acc = 1.000 (3.364 sec/step)
step 96690 	 loss = 0.113, train_acc = 0.900 (3.354 sec/step)
step 96700 	 loss = 0.441, train_acc = 0.800 (3.323 sec/step)
step 96710 	 loss = 0.033, train_acc = 1.000 (3.375 sec/step)
step 96720 	 loss = 0.011, train_acc = 1.000 (3.410 sec/step)
step 96730 	 loss = 1.104, train_acc = 0.800 (3.346 sec/step)
step 96740 	 loss = 0.051, train_acc = 1.000 (3.343 sec/step)
step 96750 	 loss = 0.136, train_acc = 0.900 (3.366 sec/step)
step 96760 	 loss = 0.499, train_acc = 0.900 (3.395 sec/step)
step 96770 	 loss = 0.098, train_acc = 1.000 (3.323 sec/step)
step 96780 	 loss = 0.048, train_acc = 1.000 (3.386 sec/step)
step 96790 	 loss = 0.011, train_acc = 1.000 (3.367 sec/step)

In [ ]: