BiRNN-multi-GPU

Author: Justin Tan

RNN model for rare decay identification in TensorFlow. Bidirectional.

June update: Multi-GPU support


In [1]:
import tensorflow as tf
import numpy as np
import pandas as pd
import time, os

class config(object):
    # Set network parameters
    # Empirically, depth more important than layer size - output dimension
    mode = 'kst'
    channel = 'rho0'
    n_particles = 5
    n_features = 100
    seq_length = n_features/n_particles
    rnn_cell = 'gru'#'lru_cell' # 'gru'
    hidden_units = 256  # Number of neurons per RNN Cell
    keep_prob = 1.0
    input_keep_prob = 0.9
    recurrent_keep_prob = 0.9
    num_epochs = 64
    batch_size = 512
    n_layers = 3 # Note: 3 layers is considered 'deep'
    learning_rate = 1e-3
    lr_epoch_decay = 0.999
    ema_decay = 0.999
    n_classes = 2
    n_gpus = 4

class directories(object):
    data = 'data'
    tensorboard = 'tensorboard'
    checkpoints = 'checkpoints'
    samples = 'samples'
    
architecture = '{} - {} | Base cell: {} | Hidden units: {} | Layers: {} | Batch: {} | Epochs: {}'.format(
    config.channel, config.mode, config.rnn_cell, config.hidden_units, config.n_layers, config.batch_size, config.num_epochs)

class reader():
    def __init__(self, df):
        
        self.df = df
        self.batch_size = config.batch_size
        self.steps_per_epoch = len(df) // config.batch_size
        self.epochs = 0
        self.proceed = True
        self.shuffle()

    def shuffle(self):
        self.df = self.df.sample(frac=1).reset_index(drop=True)
        self.df_X = self.df.drop('Labels', axis = 1)
        self.df_y = self.df['Labels']
        self.pointer = 0

    def next_batch(self, batch_size):
        if self.pointer + 1 >= self.steps_per_epoch:
            inputs = self.df_X.iloc[self.pointer*batch_size:]
            targets = self.df_y.iloc[self.pointer*batch_size:]
            self.epochs += 1
            self.shuffle()
            self.proceed = False
            
        inputs = self.df_X.iloc[self.pointer*batch_size:(self.pointer+1)*batch_size]
        targets = self.df_y.iloc[self.pointer*batch_size:(self.pointer+1)*batch_size]
        self.pointer += 1
                
        return inputs, targets

def save_summary(config, delta_t, train_acc, test_acc):
    import json
    summary = {
        'Timestamp': time.strftime('%c'),
        'Base cell': config.rnn_cell,
        'Hidden units': config.hidden_units,
        'Layers': config.n_layers,
        'Batch_size': config.batch_size,
        'Seq_length': config.seq_length,
        'Dropout': config.keep_prob,
        'Epochs': config.num_epochs,
        'Time': delta_t,
        'Final train acc': train_acc,
        'Final test acc': test_acc
    }
    # Writing JSON data
    if os.path.isfile('rnn_summary.json'):
        with open('rnn_summary_{}.json.format(config.name)', 'r+') as f:
            new = json.load(f)
        new.append(summary)
        with open('rnn_summary.json', 'w') as f:
            json.dump(new, f, indent = 4)
    else:
        with open('rnn_summary.json', 'w') as f:
             json.dump([summary], f, indent = 4)
                
def p_ordering(df):
    # Drop errors, order particles by momentum
    df = df.drop([column for column in df.columns if column.endswith('Err')], axis = 1)
    labels = df['Labels']
    blocks = np.split(df.drop('Labels', axis = 1), config.n_particles, axis = 1)
    cols_p = [column for column in df.columns if column.endswith('cms_p')]
    p_mean = [df[column].mean() for column in cols_p]
    p_ordered_frames = [blocks[i] for i in np.argsort(p_mean)]
    p_ordered_frames.append(labels)
    
    df_p_ordered = pd.concat(p_ordered_frames, axis = 1)
    return df_p_ordered
                
def load_data(file_name, test_size = 0.05):
    from sklearn.model_selection import train_test_split
    df = pd.read_hdf(file_name, 'df')
    df = p_ordering(df)
    df_X_train, df_X_test, df_y_train, df_y_test = train_test_split(df.drop('Labels', axis = 1),
                                                                    df['Labels'], test_size = test_size, random_state=42)
    return df_X_train, df_X_test, df_y_train, df_y_test

def plot_ROC_curve(network_output, y_true, meta = ''):
#     import matplotlib as mpl
#     mpl.use('pgf')
    import matplotlib.pyplot as plt
    import seaborn as sns
    from sklearn.metrics import roc_curve, auc
    y_score = network_output[:,1]
    
    # Compute ROC curve, integrate
    fpr, tpr, thresholds = roc_curve(y_true, y_score)    
    roc_auc = auc(fpr, tpr)
    
    plt.figure()
    plt.axes([.1,.1,.8,.7])
    plt.figtext(.5,.9, r'$\mathrm{Receiver \;Operating \;Characteristic}$', fontsize=15, ha='center')
    plt.figtext(.5,.85, meta, fontsize=10,ha='center')
    plt.plot(fpr, tpr, color='darkorange',
                     lw=2, label='ROC (area = %0.3f)' % roc_auc)
    plt.plot([0, 1], [0, 1], color='navy', lw=1.0, linestyle='--')
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel(r'$\mathrm{False \;Positive \;Rate}$')
    plt.ylabel(r'$\mathrm{True \;Positive \;Rate}$')
    plt.legend(loc="lower right")
    plt.savefig(os.path.join('graphs', '{}_{}_ROC.pdf'.format(config.channel, config.mode)), format='pdf', dpi=1000)
    #plt.savefig(os.path.join('graphs', '{}_{}_ROC.pgf'.format(config.channel, config.mode)), format='pgf', dpi=1000)
    print('AUC: {:.4f}'.format(roc_auc))
    plt.show()
    plt.gcf().clear()

Read Data


In [2]:
test_file = '/data/projects/punim0011/jtan/data/rnn/rnn_B02rho0gamma_kst.h5'
assert config.batch_size % config.n_gpus == 0, 'Batch size must be divisible by number of GPUs'

df_X_train, df_X_test, df_y_train, df_y_test = load_data(test_file)
df_train = pd.concat([df_X_train, df_y_train], axis = 1)
df_test = pd.concat([df_X_test, df_y_test], axis = 1)

config.n_features = df_train.shape[1] - 1
config.seq_length = config.n_features//config.n_particles
config.steps_per_epoch = len(df_X_train) // config.batch_size
assert config.seq_length == config.n_features/config.n_particles, 'Discrepancy in input feature dimension'

readerTrain = reader(df_train)
readerTest = reader(df_test)

RNN construction


In [3]:
def layer_weights(shape, name = 'weights'):
    # Return weight tensor of given shape using Xavier initialization
    W = tf.get_variable(name, shape = shape, initializer=tf.contrib.layers.xavier_initializer())
    return W

def layer_biases(shape, name = 'biases'):
    # Return bias tensor of given shape with small initialized constant value
    b = tf.get_variable(name, shape = shape, initializer = tf.constant_initializer(0.01))
    return b

def BN_layer_ops(x, shape, name, keep_prob, phase, activation=tf.nn.relu):
    # High-level implementation of BN
    with tf.variable_scope(name) as scope:
         # scope.reuse_variables() # otherwise tf.get_variable() checks that already existing vars are not shared by accident
        weights = layer_weights(shape = shape)
        biases = layer_biases(shape = [shape[1]])
        z_BN = tf.matmul(x, weights) + biases
        
        # Place BN transform before non-linearity - update to TF 1.2!
        theta_BN = tf.contrib.layers.batch_norm(z_BN, center=True, scale=True,is_training=phase, 
                                                decay=0.99, zero_debias_moving_mean=True, scope='bn', fused = True)
        BN_actv = activation(theta_BN)
        BN_layer_output = tf.nn.dropout(BN_actv, keep_prob)

    return BN_layer_output

def build_network(x, n_layers, hidden_layer_nodes, keep_prob, training_phase):
    assert n_layers == len(hidden_layer_nodes), 'Specified layer nodes and number of layers do not correspond.'
    layers = [x]
    with tf.variable_scope('BN_layers') as scope:
        hidden_1 = BN_layer_ops(x, shape = [config.n_features, hidden_layer_nodes[0]], name = 'BNhidden0',
                                keep_prob = keep_prob, phase = training_phase)
        layers.append(hidden_1)
        for n in range(0,n_layers-1):
            hidden_n = BN_layer_ops(layers[-1], shape = [hidden_layer_nodes[n], hidden_layer_nodes[n+1]], name = 'BNhidden{}'.format(n+1),
                                   keep_prob = keep_prob, phase = training_phase)
            layers.append(hidden_n)
        readout = readout_ops(layers[-1], shape = [hidden_layer_nodes[-1], config.n_classes], name = 'readout')
        
    return readout

def average_gradients(tower_grads):
    """ Calculate the average gradient for each shared variable across all towers.
    Args:
    tower_grads: Nested list of (gradient, variable) tuples. The outer list
      is over individual gradients. The inner list is over the gradient
      calculation for each tower.
    Returns:
     List of pairs of (gradient, variable) where the gradient has been averaged
     across all towers.
    """
    average_grads = []
    for grad_var_pair in zip(*tower_grads):
        # Note that each grad_and_vars looks like the following:
        #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
        grads = []
        for g, _ in grad_var_pair:
            # Add 0 dimension to the gradients to represent the tower.
            expanded_g = tf.expand_dims(g, 0)
            # Append on a 'tower' dimension which we will average over below.
            grads.append(expanded_g)
        # Average over the 'tower' dimension.
        grad = tf.concat(axis=0, values=grads)
        grad = tf.reduce_mean(grad, 0)

        # Keep in mind that the Variables are redundant because they are shared
        # across towers. So just return the first tower's pointer to
        # the Variable.
        v = grad_var_pair[0][1]
        gv_pair = (grad, v)
        average_grads.append(gv_pair)
        
    return average_grads

In [4]:
class BiRNN():
    def __init__(self, config, training = True):
        # Placeholders for feed_dict
        self.inputs = tf.placeholder(tf.float32, shape = [None, config.n_features])
        self.targets = tf.placeholder(tf.int32, shape = [None])
        self.keep_prob = tf.placeholder(tf.float32) # Dropout on input connections
        self.global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
        beta = tf.train.exponential_decay(config.learning_rate, self.global_step, 
                                               decay_steps = config.steps_per_epoch, decay_rate = config.lr_epoch_decay, staircase=True)
        
        # Reshape input to batch_size x n_particles x seq_length tensor, split batches
        # evenly across gpus
        rnn_inputs = tf.reshape(self.inputs, [-1, config.n_particles, config.seq_length])
        rnn_input_batches = tf.split(rnn_inputs, config.n_gpus, axis = 0)
        label_batches = tf.split(self.targets, config.n_gpus, axis = 0)
        opt = tf.train.AdamOptimizer(beta)    
    
        # Choose rnn cell type
        if config.rnn_cell == 'lstm':
            args = {'num_units': config.hidden_units, 'forget_bias': 1.0, 'state_is_tuple': True}
            base_cell = tf.nn.rnn_cell.LSTMCell
        elif config.rnn_cell == 'gru':
            args = {'num_units': config.hidden_units}
            base_cell = tf.nn.rnn_cell.GRUCell
        elif config.rnn_cell == 'layer-norm':
            args = {'num_units': config.hidden_units, 'forget_bias': 1.0, 'dropout_keep_prob': config.recurrent_keep_prob}
            base_cell = tf.contrib.rnn.LayerNormBasicLSTMCell
        else:
            args = {'num_units': config.hidden_units, 'forget_bias': 1.0, 'dropout_keep_prob': config.recurrent_keep_prob}
            base_cell = tf.contrib.rnn.LayerNormBasicLSTMCell

        self.cell = base_cell

        def tower_computation(scope, inputs, labels, n_gpu):
            if training and config.input_keep_prob < 1:
                rnn_inputs = tf.nn.dropout(inputs, self.keep_prob)
                fwd_cells = [tf.nn.rnn_cell.DropoutWrapper(
                    self.cell(**args), input_keep_prob = config.input_keep_prob) for _ in range(config.n_layers)]
                bwd_cells = [tf.nn.rnn_cell.DropoutWrapper(
                    self.cell(**args), input_keep_prob = config.input_keep_prob) for _ in range(config.n_layers)]
            else:
                fwd_cells = [self.cell(**args) for _ in range(config.n_layers)]
                bwd_cells = [self.cell(**args) for _ in range(config.n_layers)]

            fwd_init = [fwd_cell.zero_state(config.batch_size/config.n_gpus, tf.float32) for fwd_cell in fwd_cells]
            bwd_init = [bwd_cell.zero_state(config.batch_size/config.n_gpus, tf.float32) for bwd_cell in bwd_cells]

            birnn_output, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
                cells_fw = fwd_cells,
                cells_bw = bwd_cells,
                inputs = rnn_inputs,
                initial_states_fw = fwd_init,
                initial_states_bw = bwd_init,
                sequence_length = np.ones(config.batch_size//config.n_gpus)*config.n_particles,
                parallel_iterations = 64)

            # Extract output from last time step
            outputs = tf.split(birnn_output, 2, axis = 2)
            output_fwd = outputs[0][:,-1,:]
            output_bwd = outputs[1][:,-1,:]

            with tf.variable_scope('softmax'):
                W_f = layer_weights(shape = [config.hidden_units, config.n_classes], name = 'smx_W_fwd')
                W_b = layer_weights(shape = [config.hidden_units, config.n_classes], name = 'smx_W_bwd')
                softmax_b = layer_biases(shape = [config.n_classes], name = 'smx_b')
                logits_RNN = tf.matmul(output_fwd, W_f) + tf.matmul(output_bwd, W_b) + softmax_b  # Unormalized log probabilties for next char

            cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits_RNN, labels = labels))
            tf.add_to_collection('losses_collection', cross_entropy)

            # Assemble all of the losses for the current tower only.
            losses = tf.get_collection('losses_collection', scope)
            for l in losses:
                tf.summary.scalar('xentropy_{}-raw'.format(n_gpu), l)

            return cross_entropy, logits_RNN

        # Calculate gradients for each model tower
        tower_grads, tower_readouts, tower_losses, tower_summaries = [], [], [], []
        for gpu in range(config.n_gpus):
            with tf.device('/gpu:{}'.format(gpu)):
                with tf.variable_scope('vDNN', reuse=(gpu > 0)):
                    with tf.name_scope('tower_{}'.format(gpu)) as scope:
                        # Load one batch per GPU
                        input_batch, label_batch = rnn_input_batches[gpu], label_batches[gpu]

                        # Calculate loss for one tower of the model. Construct the entire model,
                        # but share the variable across all towers
                        loss, readout = tower_computation(scope, input_batch, label_batch, gpu)
                        # Reuse variables for the next tower, retain the summaries from the final tower.
                        #tf.get_variable_scope().reuse_variables()
                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)

                        # Retain batch norm update operations only from the final tower.
                        # batchnorm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)

                        # Calculate the gradients for given batch on this tower
                        grads = opt.compute_gradients(loss)
                        tower_grads.append(grads)
                        tower_readouts.append(readout)
                        tower_summaries.append(summaries)
                        tower_losses.append(loss)
                        
        # Synchronize all towers
        mean_grads = average_gradients(tower_grads)
        self.readout = tf.concat(tower_readouts, axis = 0)

        # Evaluation metrics
        self.cross_entropy = tf.reduce_mean(tower_losses)
        self.prediction = tf.nn.softmax(self.readout)
        correct_prediction = tf.equal(tf.cast(tf.argmax(self.readout, 1), tf.int32), self.targets)
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        _, self.auc_op = tf.metrics.auc(predictions = tf.argmax(self.readout,1), labels = self.targets, num_thresholds = 512)

        # Track moving average of trainable variables
        self.ema = tf.train.ExponentialMovingAverage(decay = config.ema_decay, num_updates = self.global_step)
        maintain_averages_op = self.ema.apply(tf.trainable_variables())

        # Apply the gradients to adjust the shared variables.
        apply_gradient_op = opt.apply_gradients(mean_grads, global_step=self.global_step)

        # Group all updates to into a single train op.
        #batchnorm_updates_op = tf.group(*batchnorm_updates)
        self.train_op = tf.group(apply_gradient_op, maintain_averages_op)#, batchnorm_updates_op)
        
        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation from the last tower summaries
        tower_summaries.append(tf.summary.scalar('cross_entropy', self.cross_entropy))
        tower_summaries.append(tf.summary.scalar('accuracy', self.accuracy))
        tower_summaries.append(tf.summary.scalar('auc', self.auc_op))
        tower_summaries.append(tf.summary.scalar('global_step', self.global_step))
        tower_summaries.append(tf.summary.scalar('learning_rate', beta))
        self.merge_op = tf.summary.merge(tower_summaries)
        
    def predict(self, ckpt, metaGraph = None):
        pin_cpu = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True, device_count = {'GPU':0})
    
        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(config.ema_decay)
        variables_to_restore = self.ema.variables_to_restore()
        #variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        
        with tf.Session(config=pin_cpu) as sess:
            # Initialize variables
            init_op = tf.global_variables_initializer()
            sess.run(init_op)
            sess.run(tf.local_variables_initializer())
            start_time = time.time()
            assert (ckpt.model_checkpoint_path or metaGraph), 'Missing checkpoint file!'
            
            if metaGraph:
                saver = tf.train.import_meta_graph(metaGraph)
                saver.restore(sess, os.path.splitext(metaGraph)[0])
                print('{} restored.'.format(metaGraph))
            else:    
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('{} restored.'.format(ckpt.model_checkpoint_path))

            # Make predictions using the trained model
            feed_dict_test = {self.inputs: df_X_test.values, self.targets: df_y_test.values, self.keep_prob: 1.0}#, self.training_phase: False}
            network_output_test, final_v_acc, final_v_auc = sess.run(
                [self.prediction, self.accuracy, self.auc_op], feed_dict = feed_dict_test)

            print("Validation accuracy: {:g}\nValidation AUC: {:g}".format(final_v_acc, final_v_auc))
            
            plot_ROC_curve(network_output = network_output_test, y_true = df_y_test.values,
                           meta = architecture + ' | Test accuracy: {}'.format(final_v_acc))            
            delta_t = time.time() - start_time
            print("Inference complete. Duration: %g s" %(delta_t))
            
            return network_output_test

In [5]:
def train(config, restore = False):
    
    biRNN = BiRNN(config, training = True)
    start_time = time.time()
    v_acc_best = 0.
    global_step = 0
    global_epoch = 0
    
    saver = tf.train.Saver()
    train_writer = tf.summary.FileWriter(
        os.path.join(directories.tensorboard, 'train_{}'.format(time.strftime('%d-%m_%I:%M'))), graph = tf.get_default_graph())
    test_writer = tf.summary.FileWriter(os.path.join(directories.tensorboard, 'test_{}'.format(time.strftime('%d-%m_%I:%M'))))
    ckpt = tf.train.get_checkpoint_state(directories.checkpoints)
    
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
        # Initialize variables
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        
        if restore and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_epoch = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            assert (type(global_epoch) == int), 'Epoch number untracked'
            print('{} restored at epoch {}.'.format(ckpt.model_checkpoint_path, global_epoch))              
            
        for epoch in range(global_epoch,config.num_epochs):
            
            readerTrain.proceed = True
            step = 0
            # Save every 8 epochs    
            if epoch % 8 == 0:
                save_path = saver.save(sess,
                                       os.path.join(directories.checkpoints,'biRNN_{}_{}_epoch{}.ckpt'.format(config.mode, config.channel, epoch)),
                                       global_step = epoch)
                print('Graph saved to file: {}'.format(save_path))
            
            print('(*) Entering Epoch {} ({:.3f} s)'.format(epoch, time.time() - start_time))

            while(readerTrain.proceed):
                # Iterate through entire corpus
                x_train, y_train = readerTrain.next_batch(config.batch_size)
                feed_dict_train = {biRNN.inputs: x_train.values, biRNN.targets: y_train.values, biRNN.keep_prob: config.keep_prob}
                t_op = sess.run(biRNN.train_op, feed_dict = feed_dict_train)
                step += 1

                if step % (config.steps_per_epoch // 8) == 0:            
                    # Evaluate model
                    improved = ''
                    sess.run(tf.local_variables_initializer())

                    x_test, y_test = readerTest.next_batch(config.batch_size)
                    feed_dict_test = {biRNN.inputs: x_test.values, biRNN.targets: y_test.values, biRNN.keep_prob: 1.0}

                    t_acc, t_summary = sess.run([biRNN.accuracy, biRNN.merge_op],
                                                        feed_dict = feed_dict_train)
                    v_acc, v_loss, v_auc, v_summary, = sess.run([biRNN.accuracy, biRNN.cross_entropy, biRNN.auc_op, biRNN.merge_op],
                                                        feed_dict = feed_dict_test)

                    train_writer.add_summary(t_summary, step)
                    test_writer.add_summary(v_summary, step)
                    
                    if epoch > 8 and v_acc > v_acc_best:
                        v_acc_best = v_acc
                        improved = '*'
                        save_path = saver.save(sess, os.path.join(directories.checkpoints, 'best.ckpt'), global_step = epoch)
                    
                    print('Epoch {}, Step {} | Training Acc: {:.3f} | Test Acc: {:.3f} | Test Loss: {:.3f} | Test AUC {:.3f} ({:.2f} s) {}'
                          .format(epoch, step, t_acc, v_acc, v_loss, v_auc, time.time() - start_time, improved))

        save_path = saver.save(sess, os.path.join(directories.checkpoints, 'biRNN_end'),
                               global_step = epoch)
        print('Metagraph aved to file: {}'.format(save_path))
        print('Architecture: {}'.format(architecture))

#         final_train_accuracy = biRNN.accuracy.eval(feed_dict = {biRNN.inputs: df_X_train.values,
#                                                                 biRNN.targets: df_y_train.values, biRNN.keep_prob: 1.0})
#         final_test_accuracy = biRNN.accuracy.eval(feed_dict = {biRNN.inputs: df_X_test.values,
#                                                                biRNN.targets: df_y_test.values, biRNN.keep_prob: 1.0})
#         delta_t = time.time() - start_time 
#     print("Training Complete. Time elapsed: {:.3f} s".format(delta_t))
#     print("Train accuracy: %g\nValidation accuracy: %g" %(final_train_accuracy, final_test_accuracy))
#     save_summary(config, delta_t, final_train_accuracy, final_test_accuracy)

In [6]:
train(config)#, restore = True)


Graph saved to file: checkpoints/biRNN_kst_rho0_epoch0.ckpt-0
(*) Entering Epoch 0 (7.151 s)
Epoch 0, Step 118 | Training Acc: 0.598 | Test Acc: 0.627 | Test Loss: 0.652 | Test AUC 0.500 (14.77 s) 
Epoch 0, Step 236 | Training Acc: 0.611 | Test Acc: 0.594 | Test Loss: 0.655 | Test AUC 0.548 (20.49 s) 
Epoch 0, Step 354 | Training Acc: 0.617 | Test Acc: 0.625 | Test Loss: 0.629 | Test AUC 0.558 (26.21 s) 
Epoch 0, Step 472 | Training Acc: 0.641 | Test Acc: 0.639 | Test Loss: 0.633 | Test AUC 0.578 (31.96 s) 
Epoch 0, Step 590 | Training Acc: 0.740 | Test Acc: 0.688 | Test Loss: 0.557 | Test AUC 0.687 (37.69 s) 
Epoch 0, Step 708 | Training Acc: 0.689 | Test Acc: 0.713 | Test Loss: 0.557 | Test AUC 0.652 (43.42 s) 
Epoch 0, Step 826 | Training Acc: 0.699 | Test Acc: 0.732 | Test Loss: 0.539 | Test AUC 0.708 (49.13 s) 
Epoch 0, Step 944 | Training Acc: 0.721 | Test Acc: 0.715 | Test Loss: 0.533 | Test AUC 0.675 (54.83 s) 
(*) Entering Epoch 1 (55.334 s)
Epoch 1, Step 118 | Training Acc: 0.754 | Test Acc: 0.736 | Test Loss: 0.533 | Test AUC 0.727 (61.04 s) 
Epoch 1, Step 236 | Training Acc: 0.750 | Test Acc: 0.742 | Test Loss: 0.508 | Test AUC 0.716 (66.69 s) 
Epoch 1, Step 354 | Training Acc: 0.734 | Test Acc: 0.688 | Test Loss: 0.584 | Test AUC 0.659 (72.42 s) 
Epoch 1, Step 472 | Training Acc: 0.746 | Test Acc: 0.734 | Test Loss: 0.550 | Test AUC 0.688 (78.11 s) 
Epoch 1, Step 590 | Training Acc: 0.744 | Test Acc: 0.760 | Test Loss: 0.511 | Test AUC 0.735 (83.83 s) 
Epoch 1, Step 708 | Training Acc: 0.785 | Test Acc: 0.715 | Test Loss: 0.540 | Test AUC 0.713 (89.56 s) 
Epoch 1, Step 826 | Training Acc: 0.721 | Test Acc: 0.727 | Test Loss: 0.534 | Test AUC 0.686 (95.32 s) 
Epoch 1, Step 944 | Training Acc: 0.736 | Test Acc: 0.752 | Test Loss: 0.517 | Test AUC 0.737 (101.03 s) 
(*) Entering Epoch 2 (101.495 s)
Epoch 2, Step 118 | Training Acc: 0.748 | Test Acc: 0.744 | Test Loss: 0.513 | Test AUC 0.710 (107.23 s) 
Epoch 2, Step 236 | Training Acc: 0.736 | Test Acc: 0.736 | Test Loss: 0.524 | Test AUC 0.714 (113.05 s) 
Epoch 2, Step 354 | Training Acc: 0.779 | Test Acc: 0.770 | Test Loss: 0.482 | Test AUC 0.730 (118.84 s) 
Epoch 2, Step 472 | Training Acc: 0.738 | Test Acc: 0.768 | Test Loss: 0.474 | Test AUC 0.705 (124.58 s) 
Epoch 2, Step 590 | Training Acc: 0.760 | Test Acc: 0.760 | Test Loss: 0.466 | Test AUC 0.754 (130.38 s) 
Epoch 2, Step 708 | Training Acc: 0.764 | Test Acc: 0.758 | Test Loss: 0.504 | Test AUC 0.751 (136.13 s) 
Epoch 2, Step 826 | Training Acc: 0.787 | Test Acc: 0.775 | Test Loss: 0.452 | Test AUC 0.765 (141.95 s) 
Epoch 2, Step 944 | Training Acc: 0.793 | Test Acc: 0.779 | Test Loss: 0.454 | Test AUC 0.778 (147.68 s) 
(*) Entering Epoch 3 (148.139 s)
Epoch 3, Step 118 | Training Acc: 0.816 | Test Acc: 0.771 | Test Loss: 0.471 | Test AUC 0.780 (153.88 s) 
Epoch 3, Step 236 | Training Acc: 0.824 | Test Acc: 0.793 | Test Loss: 0.405 | Test AUC 0.792 (159.61 s) 
Epoch 3, Step 354 | Training Acc: 0.812 | Test Acc: 0.773 | Test Loss: 0.464 | Test AUC 0.786 (165.35 s) 
Epoch 3, Step 472 | Training Acc: 0.811 | Test Acc: 0.818 | Test Loss: 0.397 | Test AUC 0.796 (171.10 s) 
Epoch 3, Step 590 | Training Acc: 0.828 | Test Acc: 0.812 | Test Loss: 0.424 | Test AUC 0.804 (176.85 s) 
Epoch 3, Step 708 | Training Acc: 0.809 | Test Acc: 0.795 | Test Loss: 0.423 | Test AUC 0.794 (182.58 s) 
Epoch 3, Step 826 | Training Acc: 0.803 | Test Acc: 0.781 | Test Loss: 0.451 | Test AUC 0.791 (188.33 s) 
Epoch 3, Step 944 | Training Acc: 0.824 | Test Acc: 0.791 | Test Loss: 0.454 | Test AUC 0.789 (194.06 s) 
(*) Entering Epoch 4 (194.519 s)
Epoch 4, Step 118 | Training Acc: 0.768 | Test Acc: 0.787 | Test Loss: 0.416 | Test AUC 0.774 (200.22 s) 
Epoch 4, Step 236 | Training Acc: 0.809 | Test Acc: 0.785 | Test Loss: 0.448 | Test AUC 0.785 (205.94 s) 
Epoch 4, Step 354 | Training Acc: 0.773 | Test Acc: 0.818 | Test Loss: 0.400 | Test AUC 0.784 (211.66 s) 
Epoch 4, Step 472 | Training Acc: 0.803 | Test Acc: 0.807 | Test Loss: 0.433 | Test AUC 0.784 (217.37 s) 
Epoch 4, Step 590 | Training Acc: 0.768 | Test Acc: 0.805 | Test Loss: 0.425 | Test AUC 0.765 (223.08 s) 
Epoch 4, Step 708 | Training Acc: 0.801 | Test Acc: 0.799 | Test Loss: 0.409 | Test AUC 0.784 (228.79 s) 
Epoch 4, Step 826 | Training Acc: 0.805 | Test Acc: 0.816 | Test Loss: 0.390 | Test AUC 0.799 (234.52 s) 
Epoch 4, Step 944 | Training Acc: 0.820 | Test Acc: 0.814 | Test Loss: 0.423 | Test AUC 0.800 (240.24 s) 
(*) Entering Epoch 5 (240.710 s)
Epoch 5, Step 118 | Training Acc: 0.797 | Test Acc: 0.732 | Test Loss: 0.521 | Test AUC 0.740 (246.44 s) 
Epoch 5, Step 236 | Training Acc: 0.797 | Test Acc: 0.785 | Test Loss: 0.446 | Test AUC 0.778 (252.15 s) 
Epoch 5, Step 354 | Training Acc: 0.799 | Test Acc: 0.799 | Test Loss: 0.457 | Test AUC 0.788 (257.86 s) 
Epoch 5, Step 472 | Training Acc: 0.855 | Test Acc: 0.789 | Test Loss: 0.426 | Test AUC 0.819 (263.60 s) 
Epoch 5, Step 590 | Training Acc: 0.797 | Test Acc: 0.824 | Test Loss: 0.407 | Test AUC 0.784 (269.32 s) 
Epoch 5, Step 708 | Training Acc: 0.799 | Test Acc: 0.795 | Test Loss: 0.411 | Test AUC 0.769 (275.05 s) 
Epoch 5, Step 826 | Training Acc: 0.812 | Test Acc: 0.803 | Test Loss: 0.417 | Test AUC 0.801 (280.79 s) 
Epoch 5, Step 944 | Training Acc: 0.803 | Test Acc: 0.803 | Test Loss: 0.416 | Test AUC 0.780 (286.53 s) 
(*) Entering Epoch 6 (287.000 s)
Epoch 6, Step 118 | Training Acc: 0.812 | Test Acc: 0.803 | Test Loss: 0.418 | Test AUC 0.801 (292.73 s) 
Epoch 6, Step 236 | Training Acc: 0.826 | Test Acc: 0.811 | Test Loss: 0.382 | Test AUC 0.807 (298.52 s) 
Epoch 6, Step 354 | Training Acc: 0.820 | Test Acc: 0.844 | Test Loss: 0.393 | Test AUC 0.816 (304.25 s) 
Epoch 6, Step 472 | Training Acc: 0.797 | Test Acc: 0.803 | Test Loss: 0.424 | Test AUC 0.796 (310.00 s) 
Epoch 6, Step 590 | Training Acc: 0.807 | Test Acc: 0.846 | Test Loss: 0.360 | Test AUC 0.814 (315.75 s) 
Epoch 6, Step 708 | Training Acc: 0.797 | Test Acc: 0.822 | Test Loss: 0.389 | Test AUC 0.785 (321.47 s) 
Epoch 6, Step 826 | Training Acc: 0.840 | Test Acc: 0.809 | Test Loss: 0.399 | Test AUC 0.809 (327.18 s) 
Epoch 6, Step 944 | Training Acc: 0.777 | Test Acc: 0.801 | Test Loss: 0.399 | Test AUC 0.785 (332.91 s) 
(*) Entering Epoch 7 (333.370 s)
Epoch 7, Step 118 | Training Acc: 0.812 | Test Acc: 0.822 | Test Loss: 0.384 | Test AUC 0.799 (339.10 s) 
Epoch 7, Step 236 | Training Acc: 0.799 | Test Acc: 0.809 | Test Loss: 0.424 | Test AUC 0.784 (344.85 s) 
Epoch 7, Step 354 | Training Acc: 0.857 | Test Acc: 0.824 | Test Loss: 0.386 | Test AUC 0.827 (350.58 s) 
Epoch 7, Step 472 | Training Acc: 0.857 | Test Acc: 0.807 | Test Loss: 0.415 | Test AUC 0.808 (356.30 s) 
Epoch 7, Step 590 | Training Acc: 0.846 | Test Acc: 0.803 | Test Loss: 0.427 | Test AUC 0.811 (362.03 s) 
Epoch 7, Step 708 | Training Acc: 0.830 | Test Acc: 0.809 | Test Loss: 0.400 | Test AUC 0.811 (367.79 s) 
Epoch 7, Step 826 | Training Acc: 0.822 | Test Acc: 0.799 | Test Loss: 0.423 | Test AUC 0.791 (373.53 s) 
Epoch 7, Step 944 | Training Acc: 0.828 | Test Acc: 0.807 | Test Loss: 0.397 | Test AUC 0.801 (379.26 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch8.ckpt-8
(*) Entering Epoch 8 (380.906 s)
Epoch 8, Step 118 | Training Acc: 0.824 | Test Acc: 0.797 | Test Loss: 0.448 | Test AUC 0.793 (386.63 s) 
Epoch 8, Step 236 | Training Acc: 0.820 | Test Acc: 0.799 | Test Loss: 0.430 | Test AUC 0.787 (392.34 s) 
Epoch 8, Step 354 | Training Acc: 0.816 | Test Acc: 0.777 | Test Loss: 0.445 | Test AUC 0.787 (398.06 s) 
Epoch 8, Step 472 | Training Acc: 0.820 | Test Acc: 0.787 | Test Loss: 0.451 | Test AUC 0.793 (403.76 s) 
Epoch 8, Step 590 | Training Acc: 0.809 | Test Acc: 0.822 | Test Loss: 0.404 | Test AUC 0.805 (409.45 s) 
Epoch 8, Step 708 | Training Acc: 0.844 | Test Acc: 0.783 | Test Loss: 0.419 | Test AUC 0.815 (415.14 s) 
Epoch 8, Step 826 | Training Acc: 0.832 | Test Acc: 0.771 | Test Loss: 0.435 | Test AUC 0.790 (420.98 s) 
Epoch 8, Step 944 | Training Acc: 0.812 | Test Acc: 0.789 | Test Loss: 0.452 | Test AUC 0.779 (426.70 s) 
(*) Entering Epoch 9 (427.160 s)
Epoch 9, Step 118 | Training Acc: 0.807 | Test Acc: 0.801 | Test Loss: 0.411 | Test AUC 0.801 (433.81 s) *
Epoch 9, Step 236 | Training Acc: 0.812 | Test Acc: 0.799 | Test Loss: 0.428 | Test AUC 0.791 (439.54 s) 
Epoch 9, Step 354 | Training Acc: 0.818 | Test Acc: 0.840 | Test Loss: 0.362 | Test AUC 0.827 (446.17 s) *
Epoch 9, Step 472 | Training Acc: 0.828 | Test Acc: 0.812 | Test Loss: 0.388 | Test AUC 0.805 (451.91 s) 
Epoch 9, Step 590 | Training Acc: 0.822 | Test Acc: 0.830 | Test Loss: 0.402 | Test AUC 0.808 (457.62 s) 
Epoch 9, Step 708 | Training Acc: 0.846 | Test Acc: 0.838 | Test Loss: 0.398 | Test AUC 0.832 (463.33 s) 
Epoch 9, Step 826 | Training Acc: 0.805 | Test Acc: 0.820 | Test Loss: 0.388 | Test AUC 0.803 (469.07 s) 
Epoch 9, Step 944 | Training Acc: 0.854 | Test Acc: 0.826 | Test Loss: 0.386 | Test AUC 0.829 (474.81 s) 
(*) Entering Epoch 10 (475.276 s)
Epoch 10, Step 118 | Training Acc: 0.818 | Test Acc: 0.818 | Test Loss: 0.393 | Test AUC 0.813 (481.01 s) 
Epoch 10, Step 236 | Training Acc: 0.814 | Test Acc: 0.814 | Test Loss: 0.406 | Test AUC 0.804 (486.77 s) 
Epoch 10, Step 354 | Training Acc: 0.777 | Test Acc: 0.803 | Test Loss: 0.402 | Test AUC 0.779 (492.48 s) 
Epoch 10, Step 472 | Training Acc: 0.836 | Test Acc: 0.785 | Test Loss: 0.420 | Test AUC 0.797 (498.21 s) 
Epoch 10, Step 590 | Training Acc: 0.863 | Test Acc: 0.797 | Test Loss: 0.438 | Test AUC 0.810 (503.90 s) 
Epoch 10, Step 708 | Training Acc: 0.824 | Test Acc: 0.832 | Test Loss: 0.361 | Test AUC 0.813 (509.65 s) 
Epoch 10, Step 826 | Training Acc: 0.840 | Test Acc: 0.834 | Test Loss: 0.394 | Test AUC 0.833 (515.38 s) 
Epoch 10, Step 944 | Training Acc: 0.787 | Test Acc: 0.795 | Test Loss: 0.395 | Test AUC 0.795 (521.18 s) 
(*) Entering Epoch 11 (521.646 s)
Epoch 11, Step 118 | Training Acc: 0.814 | Test Acc: 0.820 | Test Loss: 0.400 | Test AUC 0.805 (527.38 s) 
Epoch 11, Step 236 | Training Acc: 0.838 | Test Acc: 0.811 | Test Loss: 0.407 | Test AUC 0.819 (533.11 s) 
Epoch 11, Step 354 | Training Acc: 0.807 | Test Acc: 0.814 | Test Loss: 0.417 | Test AUC 0.796 (538.83 s) 
Epoch 11, Step 472 | Training Acc: 0.859 | Test Acc: 0.848 | Test Loss: 0.375 | Test AUC 0.840 (545.50 s) *
Epoch 11, Step 590 | Training Acc: 0.807 | Test Acc: 0.824 | Test Loss: 0.377 | Test AUC 0.806 (551.22 s) 
Epoch 11, Step 708 | Training Acc: 0.811 | Test Acc: 0.803 | Test Loss: 0.452 | Test AUC 0.784 (556.93 s) 
Epoch 11, Step 826 | Training Acc: 0.844 | Test Acc: 0.814 | Test Loss: 0.412 | Test AUC 0.820 (562.66 s) 
Epoch 11, Step 944 | Training Acc: 0.822 | Test Acc: 0.820 | Test Loss: 0.380 | Test AUC 0.801 (568.38 s) 
(*) Entering Epoch 12 (568.838 s)
Epoch 12, Step 118 | Training Acc: 0.822 | Test Acc: 0.811 | Test Loss: 0.418 | Test AUC 0.804 (574.59 s) 
Epoch 12, Step 236 | Training Acc: 0.834 | Test Acc: 0.807 | Test Loss: 0.428 | Test AUC 0.810 (580.34 s) 
Epoch 12, Step 354 | Training Acc: 0.820 | Test Acc: 0.852 | Test Loss: 0.361 | Test AUC 0.828 (587.05 s) *
Epoch 12, Step 472 | Training Acc: 0.797 | Test Acc: 0.775 | Test Loss: 0.449 | Test AUC 0.765 (592.79 s) 
Epoch 12, Step 590 | Training Acc: 0.797 | Test Acc: 0.834 | Test Loss: 0.383 | Test AUC 0.803 (598.52 s) 
Epoch 12, Step 708 | Training Acc: 0.807 | Test Acc: 0.820 | Test Loss: 0.367 | Test AUC 0.800 (604.27 s) 
Epoch 12, Step 826 | Training Acc: 0.818 | Test Acc: 0.816 | Test Loss: 0.389 | Test AUC 0.796 (610.00 s) 
Epoch 12, Step 944 | Training Acc: 0.828 | Test Acc: 0.811 | Test Loss: 0.408 | Test AUC 0.808 (615.75 s) 
(*) Entering Epoch 13 (616.214 s)
Epoch 13, Step 118 | Training Acc: 0.807 | Test Acc: 0.830 | Test Loss: 0.386 | Test AUC 0.809 (621.95 s) 
Epoch 13, Step 236 | Training Acc: 0.830 | Test Acc: 0.812 | Test Loss: 0.409 | Test AUC 0.813 (627.70 s) 
Epoch 13, Step 354 | Training Acc: 0.801 | Test Acc: 0.805 | Test Loss: 0.425 | Test AUC 0.797 (633.43 s) 
Epoch 13, Step 472 | Training Acc: 0.844 | Test Acc: 0.820 | Test Loss: 0.414 | Test AUC 0.817 (639.19 s) 
Epoch 13, Step 590 | Training Acc: 0.840 | Test Acc: 0.822 | Test Loss: 0.394 | Test AUC 0.823 (644.92 s) 
Epoch 13, Step 708 | Training Acc: 0.832 | Test Acc: 0.840 | Test Loss: 0.389 | Test AUC 0.820 (650.66 s) 
Epoch 13, Step 826 | Training Acc: 0.809 | Test Acc: 0.799 | Test Loss: 0.418 | Test AUC 0.793 (656.41 s) 
Epoch 13, Step 944 | Training Acc: 0.818 | Test Acc: 0.791 | Test Loss: 0.429 | Test AUC 0.785 (662.15 s) 
(*) Entering Epoch 14 (662.612 s)
Epoch 14, Step 118 | Training Acc: 0.787 | Test Acc: 0.822 | Test Loss: 0.370 | Test AUC 0.788 (668.34 s) 
Epoch 14, Step 236 | Training Acc: 0.830 | Test Acc: 0.801 | Test Loss: 0.404 | Test AUC 0.809 (674.08 s) 
Epoch 14, Step 354 | Training Acc: 0.793 | Test Acc: 0.775 | Test Loss: 0.460 | Test AUC 0.759 (679.82 s) 
Epoch 14, Step 472 | Training Acc: 0.836 | Test Acc: 0.822 | Test Loss: 0.376 | Test AUC 0.817 (685.57 s) 
Epoch 14, Step 590 | Training Acc: 0.793 | Test Acc: 0.809 | Test Loss: 0.415 | Test AUC 0.777 (691.28 s) 
Epoch 14, Step 708 | Training Acc: 0.809 | Test Acc: 0.785 | Test Loss: 0.467 | Test AUC 0.772 (697.02 s) 
Epoch 14, Step 826 | Training Acc: 0.834 | Test Acc: 0.832 | Test Loss: 0.395 | Test AUC 0.819 (702.76 s) 
Epoch 14, Step 944 | Training Acc: 0.828 | Test Acc: 0.830 | Test Loss: 0.377 | Test AUC 0.800 (708.52 s) 
(*) Entering Epoch 15 (708.981 s)
Epoch 15, Step 118 | Training Acc: 0.822 | Test Acc: 0.807 | Test Loss: 0.420 | Test AUC 0.798 (714.71 s) 
Epoch 15, Step 236 | Training Acc: 0.818 | Test Acc: 0.824 | Test Loss: 0.380 | Test AUC 0.808 (720.47 s) 
Epoch 15, Step 354 | Training Acc: 0.822 | Test Acc: 0.805 | Test Loss: 0.420 | Test AUC 0.793 (726.18 s) 
Epoch 15, Step 472 | Training Acc: 0.848 | Test Acc: 0.832 | Test Loss: 0.391 | Test AUC 0.833 (731.91 s) 
Epoch 15, Step 590 | Training Acc: 0.816 | Test Acc: 0.807 | Test Loss: 0.410 | Test AUC 0.800 (737.64 s) 
Epoch 15, Step 708 | Training Acc: 0.834 | Test Acc: 0.824 | Test Loss: 0.370 | Test AUC 0.819 (743.39 s) 
Epoch 15, Step 826 | Training Acc: 0.828 | Test Acc: 0.832 | Test Loss: 0.403 | Test AUC 0.815 (749.13 s) 
Epoch 15, Step 944 | Training Acc: 0.850 | Test Acc: 0.820 | Test Loss: 0.403 | Test AUC 0.831 (754.87 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch16.ckpt-16
(*) Entering Epoch 16 (756.352 s)
Epoch 16, Step 118 | Training Acc: 0.814 | Test Acc: 0.811 | Test Loss: 0.418 | Test AUC 0.805 (762.11 s) 
Epoch 16, Step 236 | Training Acc: 0.826 | Test Acc: 0.773 | Test Loss: 0.487 | Test AUC 0.778 (767.85 s) 
Epoch 16, Step 354 | Training Acc: 0.811 | Test Acc: 0.795 | Test Loss: 0.434 | Test AUC 0.787 (773.59 s) 
Epoch 16, Step 472 | Training Acc: 0.816 | Test Acc: 0.850 | Test Loss: 0.355 | Test AUC 0.822 (779.33 s) 
Epoch 16, Step 590 | Training Acc: 0.801 | Test Acc: 0.834 | Test Loss: 0.372 | Test AUC 0.818 (785.10 s) 
Epoch 16, Step 708 | Training Acc: 0.828 | Test Acc: 0.840 | Test Loss: 0.393 | Test AUC 0.832 (790.83 s) 
Epoch 16, Step 826 | Training Acc: 0.828 | Test Acc: 0.822 | Test Loss: 0.387 | Test AUC 0.822 (796.56 s) 
Epoch 16, Step 944 | Training Acc: 0.797 | Test Acc: 0.789 | Test Loss: 0.433 | Test AUC 0.780 (802.28 s) 
(*) Entering Epoch 17 (802.739 s)
Epoch 17, Step 118 | Training Acc: 0.842 | Test Acc: 0.814 | Test Loss: 0.392 | Test AUC 0.809 (808.45 s) 
Epoch 17, Step 236 | Training Acc: 0.791 | Test Acc: 0.865 | Test Loss: 0.335 | Test AUC 0.822 (815.08 s) *
Epoch 17, Step 354 | Training Acc: 0.816 | Test Acc: 0.814 | Test Loss: 0.404 | Test AUC 0.813 (820.78 s) 
Epoch 17, Step 472 | Training Acc: 0.799 | Test Acc: 0.791 | Test Loss: 0.428 | Test AUC 0.803 (826.51 s) 
Epoch 17, Step 590 | Training Acc: 0.809 | Test Acc: 0.818 | Test Loss: 0.387 | Test AUC 0.805 (832.22 s) 
Epoch 17, Step 708 | Training Acc: 0.814 | Test Acc: 0.828 | Test Loss: 0.361 | Test AUC 0.809 (837.93 s) 
Epoch 17, Step 826 | Training Acc: 0.807 | Test Acc: 0.830 | Test Loss: 0.389 | Test AUC 0.798 (843.62 s) 
Epoch 17, Step 944 | Training Acc: 0.850 | Test Acc: 0.809 | Test Loss: 0.395 | Test AUC 0.819 (849.35 s) 
(*) Entering Epoch 18 (849.801 s)
Epoch 18, Step 118 | Training Acc: 0.859 | Test Acc: 0.830 | Test Loss: 0.385 | Test AUC 0.834 (855.54 s) 
Epoch 18, Step 236 | Training Acc: 0.830 | Test Acc: 0.846 | Test Loss: 0.364 | Test AUC 0.820 (861.29 s) 
Epoch 18, Step 354 | Training Acc: 0.863 | Test Acc: 0.791 | Test Loss: 0.424 | Test AUC 0.812 (866.98 s) 
Epoch 18, Step 472 | Training Acc: 0.822 | Test Acc: 0.836 | Test Loss: 0.371 | Test AUC 0.807 (872.69 s) 
Epoch 18, Step 590 | Training Acc: 0.824 | Test Acc: 0.824 | Test Loss: 0.374 | Test AUC 0.812 (878.40 s) 
Epoch 18, Step 708 | Training Acc: 0.812 | Test Acc: 0.797 | Test Loss: 0.417 | Test AUC 0.802 (884.10 s) 
Epoch 18, Step 826 | Training Acc: 0.855 | Test Acc: 0.797 | Test Loss: 0.446 | Test AUC 0.814 (889.82 s) 
Epoch 18, Step 944 | Training Acc: 0.846 | Test Acc: 0.793 | Test Loss: 0.412 | Test AUC 0.803 (895.53 s) 
(*) Entering Epoch 19 (895.989 s)
Epoch 19, Step 118 | Training Acc: 0.842 | Test Acc: 0.803 | Test Loss: 0.419 | Test AUC 0.809 (901.68 s) 
Epoch 19, Step 236 | Training Acc: 0.783 | Test Acc: 0.838 | Test Loss: 0.387 | Test AUC 0.795 (907.40 s) 
Epoch 19, Step 354 | Training Acc: 0.820 | Test Acc: 0.797 | Test Loss: 0.420 | Test AUC 0.790 (913.10 s) 
Epoch 19, Step 472 | Training Acc: 0.811 | Test Acc: 0.803 | Test Loss: 0.400 | Test AUC 0.801 (918.79 s) 
Epoch 19, Step 590 | Training Acc: 0.822 | Test Acc: 0.828 | Test Loss: 0.412 | Test AUC 0.815 (924.49 s) 
Epoch 19, Step 708 | Training Acc: 0.807 | Test Acc: 0.828 | Test Loss: 0.369 | Test AUC 0.813 (930.18 s) 
Epoch 19, Step 826 | Training Acc: 0.822 | Test Acc: 0.822 | Test Loss: 0.359 | Test AUC 0.812 (935.88 s) 
Epoch 19, Step 944 | Training Acc: 0.818 | Test Acc: 0.807 | Test Loss: 0.418 | Test AUC 0.799 (941.59 s) 
(*) Entering Epoch 20 (942.048 s)
Epoch 20, Step 118 | Training Acc: 0.857 | Test Acc: 0.822 | Test Loss: 0.406 | Test AUC 0.838 (947.75 s) 
Epoch 20, Step 236 | Training Acc: 0.848 | Test Acc: 0.842 | Test Loss: 0.369 | Test AUC 0.838 (953.46 s) 
Epoch 20, Step 354 | Training Acc: 0.840 | Test Acc: 0.830 | Test Loss: 0.374 | Test AUC 0.830 (959.18 s) 
Epoch 20, Step 472 | Training Acc: 0.836 | Test Acc: 0.826 | Test Loss: 0.373 | Test AUC 0.822 (964.89 s) 
Epoch 20, Step 590 | Training Acc: 0.834 | Test Acc: 0.838 | Test Loss: 0.360 | Test AUC 0.823 (970.59 s) 
Epoch 20, Step 708 | Training Acc: 0.846 | Test Acc: 0.809 | Test Loss: 0.399 | Test AUC 0.822 (976.28 s) 
Epoch 20, Step 826 | Training Acc: 0.824 | Test Acc: 0.797 | Test Loss: 0.425 | Test AUC 0.800 (982.06 s) 
Epoch 20, Step 944 | Training Acc: 0.822 | Test Acc: 0.832 | Test Loss: 0.415 | Test AUC 0.822 (987.76 s) 
(*) Entering Epoch 21 (988.219 s)
Epoch 21, Step 118 | Training Acc: 0.826 | Test Acc: 0.787 | Test Loss: 0.453 | Test AUC 0.796 (993.92 s) 
Epoch 21, Step 236 | Training Acc: 0.822 | Test Acc: 0.805 | Test Loss: 0.455 | Test AUC 0.790 (999.70 s) 
Epoch 21, Step 354 | Training Acc: 0.797 | Test Acc: 0.814 | Test Loss: 0.388 | Test AUC 0.793 (1005.47 s) 
Epoch 21, Step 472 | Training Acc: 0.822 | Test Acc: 0.816 | Test Loss: 0.387 | Test AUC 0.806 (1011.15 s) 
Epoch 21, Step 590 | Training Acc: 0.816 | Test Acc: 0.811 | Test Loss: 0.425 | Test AUC 0.810 (1016.86 s) 
Epoch 21, Step 708 | Training Acc: 0.859 | Test Acc: 0.840 | Test Loss: 0.408 | Test AUC 0.834 (1022.57 s) 
Epoch 21, Step 826 | Training Acc: 0.803 | Test Acc: 0.832 | Test Loss: 0.368 | Test AUC 0.809 (1028.27 s) 
Epoch 21, Step 944 | Training Acc: 0.834 | Test Acc: 0.834 | Test Loss: 0.379 | Test AUC 0.831 (1033.98 s) 
(*) Entering Epoch 22 (1034.431 s)
Epoch 22, Step 118 | Training Acc: 0.842 | Test Acc: 0.824 | Test Loss: 0.376 | Test AUC 0.811 (1040.17 s) 
Epoch 22, Step 236 | Training Acc: 0.826 | Test Acc: 0.805 | Test Loss: 0.422 | Test AUC 0.793 (1045.88 s) 
Epoch 22, Step 354 | Training Acc: 0.846 | Test Acc: 0.840 | Test Loss: 0.370 | Test AUC 0.830 (1051.57 s) 
Epoch 22, Step 472 | Training Acc: 0.807 | Test Acc: 0.842 | Test Loss: 0.379 | Test AUC 0.814 (1057.31 s) 
Epoch 22, Step 590 | Training Acc: 0.836 | Test Acc: 0.820 | Test Loss: 0.398 | Test AUC 0.814 (1063.06 s) 
Epoch 22, Step 708 | Training Acc: 0.850 | Test Acc: 0.828 | Test Loss: 0.380 | Test AUC 0.831 (1068.82 s) 
Epoch 22, Step 826 | Training Acc: 0.793 | Test Acc: 0.836 | Test Loss: 0.367 | Test AUC 0.796 (1074.58 s) 
Epoch 22, Step 944 | Training Acc: 0.799 | Test Acc: 0.863 | Test Loss: 0.355 | Test AUC 0.817 (1080.28 s) 
(*) Entering Epoch 23 (1080.734 s)
Epoch 23, Step 118 | Training Acc: 0.832 | Test Acc: 0.811 | Test Loss: 0.406 | Test AUC 0.812 (1086.43 s) 
Epoch 23, Step 236 | Training Acc: 0.838 | Test Acc: 0.812 | Test Loss: 0.408 | Test AUC 0.810 (1092.15 s) 
Epoch 23, Step 354 | Training Acc: 0.834 | Test Acc: 0.797 | Test Loss: 0.410 | Test AUC 0.783 (1097.85 s) 
Epoch 23, Step 472 | Training Acc: 0.836 | Test Acc: 0.828 | Test Loss: 0.395 | Test AUC 0.832 (1103.56 s) 
Epoch 23, Step 590 | Training Acc: 0.814 | Test Acc: 0.846 | Test Loss: 0.344 | Test AUC 0.826 (1109.25 s) 
Epoch 23, Step 708 | Training Acc: 0.816 | Test Acc: 0.848 | Test Loss: 0.353 | Test AUC 0.821 (1114.94 s) 
Epoch 23, Step 826 | Training Acc: 0.795 | Test Acc: 0.826 | Test Loss: 0.393 | Test AUC 0.804 (1120.65 s) 
Epoch 23, Step 944 | Training Acc: 0.814 | Test Acc: 0.832 | Test Loss: 0.375 | Test AUC 0.800 (1126.38 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch24.ckpt-24
(*) Entering Epoch 24 (1127.801 s)
Epoch 24, Step 118 | Training Acc: 0.820 | Test Acc: 0.822 | Test Loss: 0.376 | Test AUC 0.816 (1133.54 s) 
Epoch 24, Step 236 | Training Acc: 0.818 | Test Acc: 0.805 | Test Loss: 0.409 | Test AUC 0.801 (1139.25 s) 
Epoch 24, Step 354 | Training Acc: 0.805 | Test Acc: 0.824 | Test Loss: 0.370 | Test AUC 0.800 (1144.99 s) 
Epoch 24, Step 472 | Training Acc: 0.816 | Test Acc: 0.811 | Test Loss: 0.396 | Test AUC 0.798 (1150.78 s) 
Epoch 24, Step 590 | Training Acc: 0.811 | Test Acc: 0.816 | Test Loss: 0.405 | Test AUC 0.807 (1156.51 s) 
Epoch 24, Step 708 | Training Acc: 0.838 | Test Acc: 0.832 | Test Loss: 0.356 | Test AUC 0.819 (1162.21 s) 
Epoch 24, Step 826 | Training Acc: 0.803 | Test Acc: 0.822 | Test Loss: 0.372 | Test AUC 0.793 (1167.95 s) 
Epoch 24, Step 944 | Training Acc: 0.840 | Test Acc: 0.811 | Test Loss: 0.407 | Test AUC 0.821 (1173.68 s) 
(*) Entering Epoch 25 (1174.132 s)
Epoch 25, Step 118 | Training Acc: 0.863 | Test Acc: 0.828 | Test Loss: 0.393 | Test AUC 0.844 (1179.85 s) 
Epoch 25, Step 236 | Training Acc: 0.809 | Test Acc: 0.844 | Test Loss: 0.346 | Test AUC 0.822 (1185.58 s) 
Epoch 25, Step 354 | Training Acc: 0.807 | Test Acc: 0.828 | Test Loss: 0.359 | Test AUC 0.805 (1191.30 s) 
Epoch 25, Step 472 | Training Acc: 0.865 | Test Acc: 0.809 | Test Loss: 0.406 | Test AUC 0.830 (1197.01 s) 
Epoch 25, Step 590 | Training Acc: 0.811 | Test Acc: 0.799 | Test Loss: 0.410 | Test AUC 0.785 (1202.70 s) 
Epoch 25, Step 708 | Training Acc: 0.838 | Test Acc: 0.822 | Test Loss: 0.394 | Test AUC 0.814 (1208.46 s) 
Epoch 25, Step 826 | Training Acc: 0.818 | Test Acc: 0.803 | Test Loss: 0.410 | Test AUC 0.806 (1214.23 s) 
Epoch 25, Step 944 | Training Acc: 0.840 | Test Acc: 0.812 | Test Loss: 0.386 | Test AUC 0.818 (1219.95 s) 
(*) Entering Epoch 26 (1220.404 s)
Epoch 26, Step 118 | Training Acc: 0.820 | Test Acc: 0.818 | Test Loss: 0.380 | Test AUC 0.808 (1226.14 s) 
Epoch 26, Step 236 | Training Acc: 0.846 | Test Acc: 0.803 | Test Loss: 0.415 | Test AUC 0.812 (1231.85 s) 
Epoch 26, Step 354 | Training Acc: 0.852 | Test Acc: 0.848 | Test Loss: 0.366 | Test AUC 0.837 (1237.62 s) 
Epoch 26, Step 472 | Training Acc: 0.816 | Test Acc: 0.820 | Test Loss: 0.430 | Test AUC 0.808 (1243.41 s) 
Epoch 26, Step 590 | Training Acc: 0.855 | Test Acc: 0.826 | Test Loss: 0.353 | Test AUC 0.827 (1249.17 s) 
Epoch 26, Step 708 | Training Acc: 0.824 | Test Acc: 0.836 | Test Loss: 0.350 | Test AUC 0.824 (1254.91 s) 
Epoch 26, Step 826 | Training Acc: 0.838 | Test Acc: 0.818 | Test Loss: 0.398 | Test AUC 0.822 (1260.64 s) 
Epoch 26, Step 944 | Training Acc: 0.822 | Test Acc: 0.811 | Test Loss: 0.414 | Test AUC 0.804 (1266.39 s) 
(*) Entering Epoch 27 (1266.854 s)
Epoch 27, Step 118 | Training Acc: 0.773 | Test Acc: 0.816 | Test Loss: 0.393 | Test AUC 0.783 (1272.60 s) 
Epoch 27, Step 236 | Training Acc: 0.818 | Test Acc: 0.846 | Test Loss: 0.358 | Test AUC 0.819 (1278.33 s) 
Epoch 27, Step 354 | Training Acc: 0.812 | Test Acc: 0.812 | Test Loss: 0.415 | Test AUC 0.802 (1284.05 s) 
Epoch 27, Step 472 | Training Acc: 0.844 | Test Acc: 0.824 | Test Loss: 0.388 | Test AUC 0.826 (1289.75 s) 
Epoch 27, Step 590 | Training Acc: 0.844 | Test Acc: 0.846 | Test Loss: 0.360 | Test AUC 0.841 (1295.46 s) 
Epoch 27, Step 708 | Training Acc: 0.840 | Test Acc: 0.809 | Test Loss: 0.417 | Test AUC 0.821 (1301.17 s) 
Epoch 27, Step 826 | Training Acc: 0.803 | Test Acc: 0.801 | Test Loss: 0.415 | Test AUC 0.790 (1306.93 s) 
Epoch 27, Step 944 | Training Acc: 0.844 | Test Acc: 0.805 | Test Loss: 0.378 | Test AUC 0.818 (1312.66 s) 
(*) Entering Epoch 28 (1313.121 s)
Epoch 28, Step 118 | Training Acc: 0.814 | Test Acc: 0.822 | Test Loss: 0.385 | Test AUC 0.815 (1318.82 s) 
Epoch 28, Step 236 | Training Acc: 0.818 | Test Acc: 0.812 | Test Loss: 0.403 | Test AUC 0.803 (1324.58 s) 
Epoch 28, Step 354 | Training Acc: 0.814 | Test Acc: 0.822 | Test Loss: 0.398 | Test AUC 0.804 (1330.30 s) 
Epoch 28, Step 472 | Training Acc: 0.818 | Test Acc: 0.820 | Test Loss: 0.385 | Test AUC 0.820 (1336.02 s) 
Epoch 28, Step 590 | Training Acc: 0.818 | Test Acc: 0.824 | Test Loss: 0.418 | Test AUC 0.807 (1341.73 s) 
Epoch 28, Step 708 | Training Acc: 0.814 | Test Acc: 0.820 | Test Loss: 0.407 | Test AUC 0.812 (1347.46 s) 
Epoch 28, Step 826 | Training Acc: 0.842 | Test Acc: 0.805 | Test Loss: 0.409 | Test AUC 0.814 (1353.18 s) 
Epoch 28, Step 944 | Training Acc: 0.846 | Test Acc: 0.828 | Test Loss: 0.347 | Test AUC 0.832 (1358.89 s) 
(*) Entering Epoch 29 (1359.348 s)
Epoch 29, Step 118 | Training Acc: 0.822 | Test Acc: 0.820 | Test Loss: 0.391 | Test AUC 0.815 (1365.07 s) 
Epoch 29, Step 236 | Training Acc: 0.832 | Test Acc: 0.814 | Test Loss: 0.400 | Test AUC 0.815 (1370.86 s) 
Epoch 29, Step 354 | Training Acc: 0.822 | Test Acc: 0.824 | Test Loss: 0.373 | Test AUC 0.802 (1376.61 s) 
Epoch 29, Step 472 | Training Acc: 0.834 | Test Acc: 0.848 | Test Loss: 0.366 | Test AUC 0.827 (1382.37 s) 
Epoch 29, Step 590 | Training Acc: 0.846 | Test Acc: 0.807 | Test Loss: 0.424 | Test AUC 0.807 (1388.13 s) 
Epoch 29, Step 708 | Training Acc: 0.865 | Test Acc: 0.824 | Test Loss: 0.370 | Test AUC 0.831 (1394.04 s) 
Epoch 29, Step 826 | Training Acc: 0.826 | Test Acc: 0.803 | Test Loss: 0.405 | Test AUC 0.791 (1399.80 s) 
Epoch 29, Step 944 | Training Acc: 0.793 | Test Acc: 0.803 | Test Loss: 0.408 | Test AUC 0.789 (1405.54 s) 
(*) Entering Epoch 30 (1405.999 s)
Epoch 30, Step 118 | Training Acc: 0.816 | Test Acc: 0.828 | Test Loss: 0.379 | Test AUC 0.818 (1411.72 s) 
Epoch 30, Step 236 | Training Acc: 0.854 | Test Acc: 0.828 | Test Loss: 0.404 | Test AUC 0.815 (1417.46 s) 
Epoch 30, Step 354 | Training Acc: 0.822 | Test Acc: 0.824 | Test Loss: 0.413 | Test AUC 0.790 (1423.17 s) 
Epoch 30, Step 472 | Training Acc: 0.824 | Test Acc: 0.824 | Test Loss: 0.404 | Test AUC 0.823 (1428.90 s) 
Epoch 30, Step 590 | Training Acc: 0.828 | Test Acc: 0.803 | Test Loss: 0.421 | Test AUC 0.796 (1434.66 s) 
Epoch 30, Step 708 | Training Acc: 0.811 | Test Acc: 0.826 | Test Loss: 0.390 | Test AUC 0.793 (1440.39 s) 
Epoch 30, Step 826 | Training Acc: 0.816 | Test Acc: 0.807 | Test Loss: 0.411 | Test AUC 0.799 (1446.11 s) 
Epoch 30, Step 944 | Training Acc: 0.824 | Test Acc: 0.832 | Test Loss: 0.362 | Test AUC 0.815 (1451.87 s) 
(*) Entering Epoch 31 (1452.335 s)
Epoch 31, Step 118 | Training Acc: 0.805 | Test Acc: 0.842 | Test Loss: 0.381 | Test AUC 0.812 (1458.07 s) 
Epoch 31, Step 236 | Training Acc: 0.861 | Test Acc: 0.818 | Test Loss: 0.391 | Test AUC 0.826 (1463.80 s) 
Epoch 31, Step 354 | Training Acc: 0.855 | Test Acc: 0.844 | Test Loss: 0.379 | Test AUC 0.842 (1469.55 s) 
Epoch 31, Step 472 | Training Acc: 0.840 | Test Acc: 0.822 | Test Loss: 0.391 | Test AUC 0.817 (1475.27 s) 
Epoch 31, Step 590 | Training Acc: 0.814 | Test Acc: 0.801 | Test Loss: 0.425 | Test AUC 0.794 (1480.99 s) 
Epoch 31, Step 708 | Training Acc: 0.824 | Test Acc: 0.830 | Test Loss: 0.416 | Test AUC 0.807 (1486.71 s) 
Epoch 31, Step 826 | Training Acc: 0.820 | Test Acc: 0.816 | Test Loss: 0.398 | Test AUC 0.807 (1492.44 s) 
Epoch 31, Step 944 | Training Acc: 0.834 | Test Acc: 0.814 | Test Loss: 0.413 | Test AUC 0.807 (1498.17 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch32.ckpt-32
(*) Entering Epoch 32 (1499.635 s)
Epoch 32, Step 118 | Training Acc: 0.820 | Test Acc: 0.820 | Test Loss: 0.391 | Test AUC 0.804 (1505.39 s) 
Epoch 32, Step 236 | Training Acc: 0.820 | Test Acc: 0.824 | Test Loss: 0.382 | Test AUC 0.819 (1511.11 s) 
Epoch 32, Step 354 | Training Acc: 0.848 | Test Acc: 0.840 | Test Loss: 0.362 | Test AUC 0.836 (1516.86 s) 
Epoch 32, Step 472 | Training Acc: 0.826 | Test Acc: 0.830 | Test Loss: 0.356 | Test AUC 0.821 (1522.59 s) 
Epoch 32, Step 590 | Training Acc: 0.842 | Test Acc: 0.818 | Test Loss: 0.413 | Test AUC 0.815 (1528.43 s) 
Epoch 32, Step 708 | Training Acc: 0.828 | Test Acc: 0.818 | Test Loss: 0.403 | Test AUC 0.807 (1534.22 s) 
Epoch 32, Step 826 | Training Acc: 0.822 | Test Acc: 0.828 | Test Loss: 0.386 | Test AUC 0.813 (1540.00 s) 
Epoch 32, Step 944 | Training Acc: 0.826 | Test Acc: 0.818 | Test Loss: 0.400 | Test AUC 0.812 (1545.73 s) 
(*) Entering Epoch 33 (1546.188 s)
Epoch 33, Step 118 | Training Acc: 0.809 | Test Acc: 0.842 | Test Loss: 0.384 | Test AUC 0.819 (1551.89 s) 
Epoch 33, Step 236 | Training Acc: 0.824 | Test Acc: 0.844 | Test Loss: 0.340 | Test AUC 0.819 (1557.61 s) 
Epoch 33, Step 354 | Training Acc: 0.828 | Test Acc: 0.824 | Test Loss: 0.405 | Test AUC 0.809 (1563.39 s) 
Epoch 33, Step 472 | Training Acc: 0.844 | Test Acc: 0.855 | Test Loss: 0.341 | Test AUC 0.847 (1569.14 s) 
Epoch 33, Step 590 | Training Acc: 0.857 | Test Acc: 0.809 | Test Loss: 0.401 | Test AUC 0.818 (1575.05 s) 
Epoch 33, Step 708 | Training Acc: 0.838 | Test Acc: 0.828 | Test Loss: 0.407 | Test AUC 0.816 (1580.77 s) 
Epoch 33, Step 826 | Training Acc: 0.832 | Test Acc: 0.852 | Test Loss: 0.340 | Test AUC 0.834 (1586.50 s) 
Epoch 33, Step 944 | Training Acc: 0.816 | Test Acc: 0.826 | Test Loss: 0.391 | Test AUC 0.811 (1592.24 s) 
(*) Entering Epoch 34 (1592.697 s)
Epoch 34, Step 118 | Training Acc: 0.818 | Test Acc: 0.863 | Test Loss: 0.348 | Test AUC 0.828 (1598.44 s) 
Epoch 34, Step 236 | Training Acc: 0.812 | Test Acc: 0.811 | Test Loss: 0.411 | Test AUC 0.798 (1604.18 s) 
Epoch 34, Step 354 | Training Acc: 0.812 | Test Acc: 0.812 | Test Loss: 0.386 | Test AUC 0.805 (1609.92 s) 
Epoch 34, Step 472 | Training Acc: 0.838 | Test Acc: 0.822 | Test Loss: 0.380 | Test AUC 0.822 (1615.75 s) 
Epoch 34, Step 590 | Training Acc: 0.812 | Test Acc: 0.842 | Test Loss: 0.379 | Test AUC 0.814 (1621.59 s) 
Epoch 34, Step 708 | Training Acc: 0.816 | Test Acc: 0.826 | Test Loss: 0.393 | Test AUC 0.805 (1627.38 s) 
Epoch 34, Step 826 | Training Acc: 0.820 | Test Acc: 0.828 | Test Loss: 0.386 | Test AUC 0.819 (1633.30 s) 
Epoch 34, Step 944 | Training Acc: 0.818 | Test Acc: 0.836 | Test Loss: 0.373 | Test AUC 0.819 (1639.23 s) 
(*) Entering Epoch 35 (1639.691 s)
Epoch 35, Step 118 | Training Acc: 0.832 | Test Acc: 0.812 | Test Loss: 0.432 | Test AUC 0.812 (1645.51 s) 
Epoch 35, Step 236 | Training Acc: 0.844 | Test Acc: 0.857 | Test Loss: 0.355 | Test AUC 0.846 (1651.24 s) 
Epoch 35, Step 354 | Training Acc: 0.854 | Test Acc: 0.807 | Test Loss: 0.385 | Test AUC 0.815 (1656.99 s) 
Epoch 35, Step 472 | Training Acc: 0.809 | Test Acc: 0.811 | Test Loss: 0.412 | Test AUC 0.793 (1662.71 s) 
Epoch 35, Step 590 | Training Acc: 0.799 | Test Acc: 0.846 | Test Loss: 0.361 | Test AUC 0.815 (1668.44 s) 
Epoch 35, Step 708 | Training Acc: 0.824 | Test Acc: 0.809 | Test Loss: 0.404 | Test AUC 0.807 (1674.23 s) 
Epoch 35, Step 826 | Training Acc: 0.842 | Test Acc: 0.869 | Test Loss: 0.329 | Test AUC 0.849 (1680.96 s) *
Epoch 35, Step 944 | Training Acc: 0.789 | Test Acc: 0.809 | Test Loss: 0.393 | Test AUC 0.793 (1686.69 s) 
(*) Entering Epoch 36 (1687.151 s)
Epoch 36, Step 118 | Training Acc: 0.812 | Test Acc: 0.805 | Test Loss: 0.432 | Test AUC 0.801 (1692.89 s) 
Epoch 36, Step 236 | Training Acc: 0.865 | Test Acc: 0.844 | Test Loss: 0.356 | Test AUC 0.844 (1698.76 s) 
Epoch 36, Step 354 | Training Acc: 0.842 | Test Acc: 0.824 | Test Loss: 0.403 | Test AUC 0.819 (1704.61 s) 
Epoch 36, Step 472 | Training Acc: 0.838 | Test Acc: 0.820 | Test Loss: 0.397 | Test AUC 0.818 (1710.41 s) 
Epoch 36, Step 590 | Training Acc: 0.834 | Test Acc: 0.814 | Test Loss: 0.417 | Test AUC 0.814 (1716.78 s) 
Epoch 36, Step 708 | Training Acc: 0.818 | Test Acc: 0.809 | Test Loss: 0.415 | Test AUC 0.807 (1726.92 s) 
Epoch 36, Step 826 | Training Acc: 0.830 | Test Acc: 0.812 | Test Loss: 0.437 | Test AUC 0.809 (1737.13 s) 
Epoch 36, Step 944 | Training Acc: 0.840 | Test Acc: 0.824 | Test Loss: 0.419 | Test AUC 0.822 (1747.27 s) 
(*) Entering Epoch 37 (1747.921 s)
Epoch 37, Step 118 | Training Acc: 0.822 | Test Acc: 0.840 | Test Loss: 0.355 | Test AUC 0.820 (1758.07 s) 
Epoch 37, Step 236 | Training Acc: 0.850 | Test Acc: 0.797 | Test Loss: 0.419 | Test AUC 0.818 (1768.23 s) 
Epoch 37, Step 354 | Training Acc: 0.840 | Test Acc: 0.822 | Test Loss: 0.380 | Test AUC 0.824 (1778.39 s) 
Epoch 37, Step 472 | Training Acc: 0.820 | Test Acc: 0.805 | Test Loss: 0.397 | Test AUC 0.805 (1788.58 s) 
Epoch 37, Step 590 | Training Acc: 0.830 | Test Acc: 0.824 | Test Loss: 0.401 | Test AUC 0.824 (1798.84 s) 
Epoch 37, Step 708 | Training Acc: 0.820 | Test Acc: 0.857 | Test Loss: 0.346 | Test AUC 0.830 (1808.99 s) 
Epoch 37, Step 826 | Training Acc: 0.842 | Test Acc: 0.820 | Test Loss: 0.378 | Test AUC 0.822 (1819.13 s) 
Epoch 37, Step 944 | Training Acc: 0.820 | Test Acc: 0.850 | Test Loss: 0.335 | Test AUC 0.818 (1829.38 s) 
(*) Entering Epoch 38 (1830.047 s)
Epoch 38, Step 118 | Training Acc: 0.795 | Test Acc: 0.824 | Test Loss: 0.398 | Test AUC 0.802 (1840.29 s) 
Epoch 38, Step 236 | Training Acc: 0.822 | Test Acc: 0.793 | Test Loss: 0.414 | Test AUC 0.800 (1850.50 s) 
Epoch 38, Step 354 | Training Acc: 0.854 | Test Acc: 0.812 | Test Loss: 0.398 | Test AUC 0.820 (1860.76 s) 
Epoch 38, Step 472 | Training Acc: 0.805 | Test Acc: 0.820 | Test Loss: 0.403 | Test AUC 0.809 (1870.93 s) 
Epoch 38, Step 590 | Training Acc: 0.832 | Test Acc: 0.812 | Test Loss: 0.395 | Test AUC 0.809 (1881.14 s) 
Epoch 38, Step 708 | Training Acc: 0.824 | Test Acc: 0.807 | Test Loss: 0.406 | Test AUC 0.806 (1891.36 s) 
Epoch 38, Step 826 | Training Acc: 0.842 | Test Acc: 0.809 | Test Loss: 0.414 | Test AUC 0.819 (1901.55 s) 
Epoch 38, Step 944 | Training Acc: 0.820 | Test Acc: 0.807 | Test Loss: 0.401 | Test AUC 0.800 (1911.53 s) 
(*) Entering Epoch 39 (1912.183 s)
Epoch 39, Step 118 | Training Acc: 0.828 | Test Acc: 0.832 | Test Loss: 0.375 | Test AUC 0.813 (1922.34 s) 
Epoch 39, Step 236 | Training Acc: 0.828 | Test Acc: 0.834 | Test Loss: 0.384 | Test AUC 0.819 (1932.56 s) 
Epoch 39, Step 354 | Training Acc: 0.818 | Test Acc: 0.809 | Test Loss: 0.371 | Test AUC 0.795 (1942.75 s) 
Epoch 39, Step 472 | Training Acc: 0.809 | Test Acc: 0.850 | Test Loss: 0.354 | Test AUC 0.818 (1952.97 s) 
Epoch 39, Step 590 | Training Acc: 0.828 | Test Acc: 0.838 | Test Loss: 0.384 | Test AUC 0.820 (1963.12 s) 
Epoch 39, Step 708 | Training Acc: 0.805 | Test Acc: 0.785 | Test Loss: 0.417 | Test AUC 0.791 (1973.31 s) 
Epoch 39, Step 826 | Training Acc: 0.820 | Test Acc: 0.807 | Test Loss: 0.415 | Test AUC 0.806 (1983.55 s) 
Epoch 39, Step 944 | Training Acc: 0.830 | Test Acc: 0.818 | Test Loss: 0.418 | Test AUC 0.812 (1993.73 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch40.ckpt-40
(*) Entering Epoch 40 (1995.423 s)
Epoch 40, Step 118 | Training Acc: 0.814 | Test Acc: 0.844 | Test Loss: 0.359 | Test AUC 0.819 (2005.59 s) 
Epoch 40, Step 236 | Training Acc: 0.822 | Test Acc: 0.842 | Test Loss: 0.361 | Test AUC 0.820 (2015.80 s) 
Epoch 40, Step 354 | Training Acc: 0.807 | Test Acc: 0.828 | Test Loss: 0.393 | Test AUC 0.807 (2025.99 s) 
Epoch 40, Step 472 | Training Acc: 0.832 | Test Acc: 0.832 | Test Loss: 0.374 | Test AUC 0.834 (2036.16 s) 
Epoch 40, Step 590 | Training Acc: 0.828 | Test Acc: 0.842 | Test Loss: 0.362 | Test AUC 0.832 (2046.40 s) 
Epoch 40, Step 708 | Training Acc: 0.834 | Test Acc: 0.854 | Test Loss: 0.359 | Test AUC 0.828 (2056.56 s) 
Epoch 40, Step 826 | Training Acc: 0.844 | Test Acc: 0.812 | Test Loss: 0.396 | Test AUC 0.822 (2066.73 s) 
Epoch 40, Step 944 | Training Acc: 0.852 | Test Acc: 0.822 | Test Loss: 0.384 | Test AUC 0.818 (2076.91 s) 
(*) Entering Epoch 41 (2077.551 s)
Epoch 41, Step 118 | Training Acc: 0.824 | Test Acc: 0.834 | Test Loss: 0.396 | Test AUC 0.820 (2087.71 s) 
Epoch 41, Step 236 | Training Acc: 0.855 | Test Acc: 0.807 | Test Loss: 0.445 | Test AUC 0.820 (2097.98 s) 
Epoch 41, Step 354 | Training Acc: 0.850 | Test Acc: 0.838 | Test Loss: 0.372 | Test AUC 0.828 (2108.14 s) 
Epoch 41, Step 472 | Training Acc: 0.824 | Test Acc: 0.826 | Test Loss: 0.387 | Test AUC 0.806 (2118.30 s) 
Epoch 41, Step 590 | Training Acc: 0.832 | Test Acc: 0.826 | Test Loss: 0.376 | Test AUC 0.805 (2128.47 s) 
Epoch 41, Step 708 | Training Acc: 0.781 | Test Acc: 0.830 | Test Loss: 0.369 | Test AUC 0.799 (2138.65 s) 
Epoch 41, Step 826 | Training Acc: 0.863 | Test Acc: 0.826 | Test Loss: 0.383 | Test AUC 0.834 (2148.83 s) 
Epoch 41, Step 944 | Training Acc: 0.840 | Test Acc: 0.826 | Test Loss: 0.391 | Test AUC 0.824 (2159.05 s) 
(*) Entering Epoch 42 (2159.693 s)
Epoch 42, Step 118 | Training Acc: 0.842 | Test Acc: 0.844 | Test Loss: 0.338 | Test AUC 0.833 (2169.88 s) 
Epoch 42, Step 236 | Training Acc: 0.826 | Test Acc: 0.840 | Test Loss: 0.357 | Test AUC 0.824 (2180.06 s) 
Epoch 42, Step 354 | Training Acc: 0.857 | Test Acc: 0.818 | Test Loss: 0.405 | Test AUC 0.829 (2190.25 s) 
Epoch 42, Step 472 | Training Acc: 0.832 | Test Acc: 0.816 | Test Loss: 0.392 | Test AUC 0.811 (2200.49 s) 
Epoch 42, Step 590 | Training Acc: 0.820 | Test Acc: 0.820 | Test Loss: 0.414 | Test AUC 0.812 (2210.69 s) 
Epoch 42, Step 708 | Training Acc: 0.828 | Test Acc: 0.830 | Test Loss: 0.372 | Test AUC 0.816 (2220.94 s) 
Epoch 42, Step 826 | Training Acc: 0.812 | Test Acc: 0.805 | Test Loss: 0.421 | Test AUC 0.793 (2231.14 s) 
Epoch 42, Step 944 | Training Acc: 0.816 | Test Acc: 0.824 | Test Loss: 0.396 | Test AUC 0.807 (2241.34 s) 
(*) Entering Epoch 43 (2241.984 s)
Epoch 43, Step 118 | Training Acc: 0.842 | Test Acc: 0.812 | Test Loss: 0.368 | Test AUC 0.829 (2252.24 s) 
Epoch 43, Step 236 | Training Acc: 0.812 | Test Acc: 0.842 | Test Loss: 0.368 | Test AUC 0.820 (2262.44 s) 
Epoch 43, Step 354 | Training Acc: 0.846 | Test Acc: 0.842 | Test Loss: 0.371 | Test AUC 0.831 (2272.62 s) 
Epoch 43, Step 472 | Training Acc: 0.836 | Test Acc: 0.793 | Test Loss: 0.423 | Test AUC 0.807 (2282.85 s) 
Epoch 43, Step 590 | Training Acc: 0.844 | Test Acc: 0.820 | Test Loss: 0.396 | Test AUC 0.816 (2292.91 s) 
Epoch 43, Step 708 | Training Acc: 0.832 | Test Acc: 0.820 | Test Loss: 0.387 | Test AUC 0.821 (2303.08 s) 
Epoch 43, Step 826 | Training Acc: 0.842 | Test Acc: 0.809 | Test Loss: 0.390 | Test AUC 0.818 (2313.31 s) 
Epoch 43, Step 944 | Training Acc: 0.867 | Test Acc: 0.832 | Test Loss: 0.379 | Test AUC 0.846 (2323.47 s) 
(*) Entering Epoch 44 (2324.117 s)
Epoch 44, Step 118 | Training Acc: 0.820 | Test Acc: 0.830 | Test Loss: 0.368 | Test AUC 0.810 (2334.27 s) 
Epoch 44, Step 236 | Training Acc: 0.834 | Test Acc: 0.861 | Test Loss: 0.335 | Test AUC 0.843 (2344.44 s) 
Epoch 44, Step 354 | Training Acc: 0.828 | Test Acc: 0.820 | Test Loss: 0.385 | Test AUC 0.796 (2354.63 s) 
Epoch 44, Step 472 | Training Acc: 0.848 | Test Acc: 0.828 | Test Loss: 0.401 | Test AUC 0.818 (2364.80 s) 
Epoch 44, Step 590 | Training Acc: 0.840 | Test Acc: 0.822 | Test Loss: 0.404 | Test AUC 0.825 (2375.02 s) 
Epoch 44, Step 708 | Training Acc: 0.814 | Test Acc: 0.834 | Test Loss: 0.389 | Test AUC 0.817 (2385.19 s) 
Epoch 44, Step 826 | Training Acc: 0.838 | Test Acc: 0.857 | Test Loss: 0.358 | Test AUC 0.838 (2395.36 s) 
Epoch 44, Step 944 | Training Acc: 0.850 | Test Acc: 0.793 | Test Loss: 0.426 | Test AUC 0.812 (2405.58 s) 
(*) Entering Epoch 45 (2406.222 s)
Epoch 45, Step 118 | Training Acc: 0.834 | Test Acc: 0.838 | Test Loss: 0.338 | Test AUC 0.828 (2416.49 s) 
Epoch 45, Step 236 | Training Acc: 0.807 | Test Acc: 0.824 | Test Loss: 0.363 | Test AUC 0.798 (2426.73 s) 
Epoch 45, Step 354 | Training Acc: 0.854 | Test Acc: 0.824 | Test Loss: 0.376 | Test AUC 0.829 (2436.47 s) 
Epoch 45, Step 472 | Training Acc: 0.805 | Test Acc: 0.834 | Test Loss: 0.392 | Test AUC 0.808 (2442.29 s) 
Epoch 45, Step 590 | Training Acc: 0.846 | Test Acc: 0.855 | Test Loss: 0.359 | Test AUC 0.843 (2448.06 s) 
Epoch 45, Step 708 | Training Acc: 0.811 | Test Acc: 0.816 | Test Loss: 0.363 | Test AUC 0.811 (2453.82 s) 
Epoch 45, Step 826 | Training Acc: 0.855 | Test Acc: 0.838 | Test Loss: 0.379 | Test AUC 0.839 (2459.60 s) 
Epoch 45, Step 944 | Training Acc: 0.822 | Test Acc: 0.840 | Test Loss: 0.397 | Test AUC 0.825 (2465.44 s) 
(*) Entering Epoch 46 (2465.898 s)
Epoch 46, Step 118 | Training Acc: 0.830 | Test Acc: 0.832 | Test Loss: 0.392 | Test AUC 0.819 (2471.66 s) 
Epoch 46, Step 236 | Training Acc: 0.846 | Test Acc: 0.781 | Test Loss: 0.450 | Test AUC 0.808 (2477.47 s) 
Epoch 46, Step 354 | Training Acc: 0.830 | Test Acc: 0.816 | Test Loss: 0.378 | Test AUC 0.808 (2483.23 s) 
Epoch 46, Step 472 | Training Acc: 0.863 | Test Acc: 0.797 | Test Loss: 0.435 | Test AUC 0.826 (2490.28 s) 
Epoch 46, Step 590 | Training Acc: 0.795 | Test Acc: 0.822 | Test Loss: 0.391 | Test AUC 0.791 (2500.42 s) 
Epoch 46, Step 708 | Training Acc: 0.846 | Test Acc: 0.828 | Test Loss: 0.370 | Test AUC 0.831 (2510.68 s) 
Epoch 46, Step 826 | Training Acc: 0.838 | Test Acc: 0.828 | Test Loss: 0.381 | Test AUC 0.823 (2520.78 s) 
Epoch 46, Step 944 | Training Acc: 0.818 | Test Acc: 0.850 | Test Loss: 0.364 | Test AUC 0.824 (2530.94 s) 
(*) Entering Epoch 47 (2531.594 s)
Epoch 47, Step 118 | Training Acc: 0.844 | Test Acc: 0.803 | Test Loss: 0.412 | Test AUC 0.812 (2541.74 s) 
Epoch 47, Step 236 | Training Acc: 0.848 | Test Acc: 0.826 | Test Loss: 0.409 | Test AUC 0.816 (2551.92 s) 
Epoch 47, Step 354 | Training Acc: 0.838 | Test Acc: 0.816 | Test Loss: 0.414 | Test AUC 0.806 (2562.10 s) 
Epoch 47, Step 472 | Training Acc: 0.840 | Test Acc: 0.840 | Test Loss: 0.347 | Test AUC 0.821 (2572.30 s) 
Epoch 47, Step 590 | Training Acc: 0.824 | Test Acc: 0.826 | Test Loss: 0.390 | Test AUC 0.811 (2582.47 s) 
Epoch 47, Step 708 | Training Acc: 0.828 | Test Acc: 0.812 | Test Loss: 0.405 | Test AUC 0.805 (2592.66 s) 
Epoch 47, Step 826 | Training Acc: 0.818 | Test Acc: 0.779 | Test Loss: 0.424 | Test AUC 0.775 (2602.83 s) 
Epoch 47, Step 944 | Training Acc: 0.820 | Test Acc: 0.822 | Test Loss: 0.393 | Test AUC 0.820 (2613.02 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch48.ckpt-48
(*) Entering Epoch 48 (2614.737 s)
Epoch 48, Step 118 | Training Acc: 0.844 | Test Acc: 0.840 | Test Loss: 0.366 | Test AUC 0.823 (2624.96 s) 
Epoch 48, Step 236 | Training Acc: 0.832 | Test Acc: 0.836 | Test Loss: 0.387 | Test AUC 0.821 (2635.14 s) 
Epoch 48, Step 354 | Training Acc: 0.803 | Test Acc: 0.838 | Test Loss: 0.340 | Test AUC 0.817 (2645.31 s) 
Epoch 48, Step 472 | Training Acc: 0.855 | Test Acc: 0.834 | Test Loss: 0.376 | Test AUC 0.834 (2655.54 s) 
Epoch 48, Step 590 | Training Acc: 0.857 | Test Acc: 0.820 | Test Loss: 0.425 | Test AUC 0.827 (2665.70 s) 
Epoch 48, Step 708 | Training Acc: 0.838 | Test Acc: 0.809 | Test Loss: 0.408 | Test AUC 0.806 (2675.89 s) 
Epoch 48, Step 826 | Training Acc: 0.844 | Test Acc: 0.826 | Test Loss: 0.373 | Test AUC 0.828 (2685.93 s) 
Epoch 48, Step 944 | Training Acc: 0.832 | Test Acc: 0.811 | Test Loss: 0.430 | Test AUC 0.813 (2696.12 s) 
(*) Entering Epoch 49 (2696.761 s)
Epoch 49, Step 118 | Training Acc: 0.822 | Test Acc: 0.867 | Test Loss: 0.324 | Test AUC 0.828 (2706.91 s) 
Epoch 49, Step 236 | Training Acc: 0.814 | Test Acc: 0.834 | Test Loss: 0.391 | Test AUC 0.811 (2717.13 s) 
Epoch 49, Step 354 | Training Acc: 0.816 | Test Acc: 0.803 | Test Loss: 0.419 | Test AUC 0.799 (2727.37 s) 
Epoch 49, Step 472 | Training Acc: 0.838 | Test Acc: 0.816 | Test Loss: 0.401 | Test AUC 0.813 (2737.58 s) 
Epoch 49, Step 590 | Training Acc: 0.844 | Test Acc: 0.805 | Test Loss: 0.409 | Test AUC 0.804 (2747.74 s) 
Epoch 49, Step 708 | Training Acc: 0.852 | Test Acc: 0.811 | Test Loss: 0.383 | Test AUC 0.814 (2757.88 s) 
Epoch 49, Step 826 | Training Acc: 0.826 | Test Acc: 0.852 | Test Loss: 0.341 | Test AUC 0.829 (2768.02 s) 
Epoch 49, Step 944 | Training Acc: 0.826 | Test Acc: 0.842 | Test Loss: 0.381 | Test AUC 0.822 (2778.14 s) 
(*) Entering Epoch 50 (2778.786 s)
Epoch 50, Step 118 | Training Acc: 0.832 | Test Acc: 0.820 | Test Loss: 0.383 | Test AUC 0.819 (2788.95 s) 
Epoch 50, Step 236 | Training Acc: 0.828 | Test Acc: 0.846 | Test Loss: 0.340 | Test AUC 0.831 (2799.14 s) 
Epoch 50, Step 354 | Training Acc: 0.822 | Test Acc: 0.807 | Test Loss: 0.416 | Test AUC 0.808 (2809.30 s) 
Epoch 50, Step 472 | Training Acc: 0.832 | Test Acc: 0.848 | Test Loss: 0.362 | Test AUC 0.835 (2819.48 s) 
Epoch 50, Step 590 | Training Acc: 0.854 | Test Acc: 0.844 | Test Loss: 0.376 | Test AUC 0.840 (2829.67 s) 
Epoch 50, Step 708 | Training Acc: 0.834 | Test Acc: 0.836 | Test Loss: 0.380 | Test AUC 0.826 (2839.88 s) 
Epoch 50, Step 826 | Training Acc: 0.842 | Test Acc: 0.836 | Test Loss: 0.374 | Test AUC 0.832 (2850.02 s) 
Epoch 50, Step 944 | Training Acc: 0.852 | Test Acc: 0.809 | Test Loss: 0.407 | Test AUC 0.818 (2860.23 s) 
(*) Entering Epoch 51 (2860.876 s)
Epoch 51, Step 118 | Training Acc: 0.836 | Test Acc: 0.811 | Test Loss: 0.409 | Test AUC 0.812 (2870.97 s) 
Epoch 51, Step 236 | Training Acc: 0.828 | Test Acc: 0.832 | Test Loss: 0.370 | Test AUC 0.825 (2881.16 s) 
Epoch 51, Step 354 | Training Acc: 0.826 | Test Acc: 0.809 | Test Loss: 0.410 | Test AUC 0.801 (2891.39 s) 
Epoch 51, Step 472 | Training Acc: 0.863 | Test Acc: 0.814 | Test Loss: 0.386 | Test AUC 0.824 (2901.57 s) 
Epoch 51, Step 590 | Training Acc: 0.795 | Test Acc: 0.828 | Test Loss: 0.385 | Test AUC 0.797 (2911.76 s) 
Epoch 51, Step 708 | Training Acc: 0.816 | Test Acc: 0.842 | Test Loss: 0.369 | Test AUC 0.810 (2921.94 s) 
Epoch 51, Step 826 | Training Acc: 0.812 | Test Acc: 0.824 | Test Loss: 0.380 | Test AUC 0.812 (2932.12 s) 
Epoch 51, Step 944 | Training Acc: 0.859 | Test Acc: 0.830 | Test Loss: 0.398 | Test AUC 0.831 (2942.38 s) 
(*) Entering Epoch 52 (2943.029 s)
Epoch 52, Step 118 | Training Acc: 0.867 | Test Acc: 0.840 | Test Loss: 0.378 | Test AUC 0.843 (2953.25 s) 
Epoch 52, Step 236 | Training Acc: 0.861 | Test Acc: 0.830 | Test Loss: 0.381 | Test AUC 0.838 (2963.45 s) 
Epoch 52, Step 354 | Training Acc: 0.846 | Test Acc: 0.855 | Test Loss: 0.328 | Test AUC 0.835 (2973.70 s) 
Epoch 52, Step 472 | Training Acc: 0.791 | Test Acc: 0.848 | Test Loss: 0.363 | Test AUC 0.816 (2983.94 s) 
Epoch 52, Step 590 | Training Acc: 0.834 | Test Acc: 0.844 | Test Loss: 0.377 | Test AUC 0.821 (2994.15 s) 
Epoch 52, Step 708 | Training Acc: 0.820 | Test Acc: 0.805 | Test Loss: 0.408 | Test AUC 0.790 (3004.38 s) 
Epoch 52, Step 826 | Training Acc: 0.832 | Test Acc: 0.816 | Test Loss: 0.388 | Test AUC 0.807 (3014.59 s) 
Epoch 52, Step 944 | Training Acc: 0.828 | Test Acc: 0.824 | Test Loss: 0.393 | Test AUC 0.813 (3024.79 s) 
(*) Entering Epoch 53 (3025.434 s)
Epoch 53, Step 118 | Training Acc: 0.822 | Test Acc: 0.803 | Test Loss: 0.414 | Test AUC 0.794 (3035.65 s) 
Epoch 53, Step 236 | Training Acc: 0.820 | Test Acc: 0.787 | Test Loss: 0.449 | Test AUC 0.800 (3045.87 s) 
Epoch 53, Step 354 | Training Acc: 0.852 | Test Acc: 0.812 | Test Loss: 0.412 | Test AUC 0.823 (3056.05 s) 
Epoch 53, Step 472 | Training Acc: 0.854 | Test Acc: 0.818 | Test Loss: 0.395 | Test AUC 0.841 (3066.08 s) 
Epoch 53, Step 590 | Training Acc: 0.828 | Test Acc: 0.844 | Test Loss: 0.391 | Test AUC 0.827 (3076.32 s) 
Epoch 53, Step 708 | Training Acc: 0.857 | Test Acc: 0.834 | Test Loss: 0.365 | Test AUC 0.840 (3086.54 s) 
Epoch 53, Step 826 | Training Acc: 0.848 | Test Acc: 0.828 | Test Loss: 0.372 | Test AUC 0.827 (3096.76 s) 
Epoch 53, Step 944 | Training Acc: 0.832 | Test Acc: 0.828 | Test Loss: 0.408 | Test AUC 0.820 (3106.97 s) 
(*) Entering Epoch 54 (3107.612 s)
Epoch 54, Step 118 | Training Acc: 0.830 | Test Acc: 0.840 | Test Loss: 0.334 | Test AUC 0.826 (3117.80 s) 
Epoch 54, Step 236 | Training Acc: 0.828 | Test Acc: 0.807 | Test Loss: 0.410 | Test AUC 0.809 (3127.99 s) 
Epoch 54, Step 354 | Training Acc: 0.812 | Test Acc: 0.836 | Test Loss: 0.341 | Test AUC 0.812 (3138.19 s) 
Epoch 54, Step 472 | Training Acc: 0.816 | Test Acc: 0.807 | Test Loss: 0.423 | Test AUC 0.798 (3148.37 s) 
Epoch 54, Step 590 | Training Acc: 0.818 | Test Acc: 0.818 | Test Loss: 0.410 | Test AUC 0.812 (3158.64 s) 
Epoch 54, Step 708 | Training Acc: 0.852 | Test Acc: 0.834 | Test Loss: 0.387 | Test AUC 0.832 (3168.84 s) 
Epoch 54, Step 826 | Training Acc: 0.818 | Test Acc: 0.797 | Test Loss: 0.458 | Test AUC 0.800 (3179.03 s) 
Epoch 54, Step 944 | Training Acc: 0.820 | Test Acc: 0.824 | Test Loss: 0.397 | Test AUC 0.811 (3189.21 s) 
(*) Entering Epoch 55 (3189.863 s)
Epoch 55, Step 118 | Training Acc: 0.844 | Test Acc: 0.814 | Test Loss: 0.415 | Test AUC 0.816 (3200.04 s) 
Epoch 55, Step 236 | Training Acc: 0.826 | Test Acc: 0.848 | Test Loss: 0.364 | Test AUC 0.813 (3210.23 s) 
Epoch 55, Step 354 | Training Acc: 0.848 | Test Acc: 0.836 | Test Loss: 0.403 | Test AUC 0.834 (3220.47 s) 
Epoch 55, Step 472 | Training Acc: 0.836 | Test Acc: 0.855 | Test Loss: 0.336 | Test AUC 0.830 (3230.62 s) 
Epoch 55, Step 590 | Training Acc: 0.857 | Test Acc: 0.840 | Test Loss: 0.358 | Test AUC 0.841 (3240.87 s) 
Epoch 55, Step 708 | Training Acc: 0.818 | Test Acc: 0.803 | Test Loss: 0.405 | Test AUC 0.794 (3251.07 s) 
Epoch 55, Step 826 | Training Acc: 0.834 | Test Acc: 0.814 | Test Loss: 0.375 | Test AUC 0.822 (3261.13 s) 
Epoch 55, Step 944 | Training Acc: 0.828 | Test Acc: 0.826 | Test Loss: 0.370 | Test AUC 0.814 (3271.30 s) 
Graph saved to file: checkpoints/biRNN_kst_rho0_epoch56.ckpt-56
(*) Entering Epoch 56 (3273.042 s)
Epoch 56, Step 118 | Training Acc: 0.842 | Test Acc: 0.818 | Test Loss: 0.387 | Test AUC 0.823 (3283.24 s) 
Epoch 56, Step 236 | Training Acc: 0.807 | Test Acc: 0.805 | Test Loss: 0.415 | Test AUC 0.794 (3293.43 s) 
Epoch 56, Step 354 | Training Acc: 0.852 | Test Acc: 0.822 | Test Loss: 0.371 | Test AUC 0.832 (3303.60 s) 
Epoch 56, Step 472 | Training Acc: 0.846 | Test Acc: 0.824 | Test Loss: 0.367 | Test AUC 0.824 (3313.80 s) 
Epoch 56, Step 590 | Training Acc: 0.820 | Test Acc: 0.816 | Test Loss: 0.415 | Test AUC 0.801 (3324.04 s) 
Epoch 56, Step 708 | Training Acc: 0.814 | Test Acc: 0.848 | Test Loss: 0.358 | Test AUC 0.823 (3334.35 s) 
Epoch 56, Step 826 | Training Acc: 0.844 | Test Acc: 0.834 | Test Loss: 0.379 | Test AUC 0.834 (3344.63 s) 
Epoch 56, Step 944 | Training Acc: 0.809 | Test Acc: 0.812 | Test Loss: 0.389 | Test AUC 0.801 (3354.81 s) 
(*) Entering Epoch 57 (3355.471 s)
Epoch 57, Step 118 | Training Acc: 0.834 | Test Acc: 0.826 | Test Loss: 0.384 | Test AUC 0.826 (3365.69 s) 
Epoch 57, Step 236 | Training Acc: 0.832 | Test Acc: 0.850 | Test Loss: 0.351 | Test AUC 0.830 (3375.91 s) 
Epoch 57, Step 354 | Training Acc: 0.822 | Test Acc: 0.842 | Test Loss: 0.328 | Test AUC 0.813 (3386.15 s) 
Epoch 57, Step 472 | Training Acc: 0.861 | Test Acc: 0.840 | Test Loss: 0.364 | Test AUC 0.834 (3396.42 s) 
Epoch 57, Step 590 | Training Acc: 0.854 | Test Acc: 0.801 | Test Loss: 0.414 | Test AUC 0.825 (3406.63 s) 
Epoch 57, Step 708 | Training Acc: 0.793 | Test Acc: 0.814 | Test Loss: 0.392 | Test AUC 0.801 (3416.83 s) 
Epoch 57, Step 826 | Training Acc: 0.846 | Test Acc: 0.787 | Test Loss: 0.428 | Test AUC 0.808 (3427.04 s) 
Epoch 57, Step 944 | Training Acc: 0.812 | Test Acc: 0.805 | Test Loss: 0.397 | Test AUC 0.807 (3437.23 s) 
(*) Entering Epoch 58 (3437.889 s)
Epoch 58, Step 118 | Training Acc: 0.857 | Test Acc: 0.809 | Test Loss: 0.421 | Test AUC 0.815 (3447.96 s) 
Epoch 58, Step 236 | Training Acc: 0.855 | Test Acc: 0.812 | Test Loss: 0.379 | Test AUC 0.821 (3457.74 s) 
Epoch 58, Step 354 | Training Acc: 0.811 | Test Acc: 0.828 | Test Loss: 0.382 | Test AUC 0.814 (3463.59 s) 
Epoch 58, Step 472 | Training Acc: 0.830 | Test Acc: 0.826 | Test Loss: 0.389 | Test AUC 0.808 (3469.47 s) 
Epoch 58, Step 590 | Training Acc: 0.840 | Test Acc: 0.830 | Test Loss: 0.388 | Test AUC 0.827 (3475.26 s) 
Epoch 58, Step 708 | Training Acc: 0.855 | Test Acc: 0.844 | Test Loss: 0.363 | Test AUC 0.830 (3481.02 s) 
Epoch 58, Step 826 | Training Acc: 0.850 | Test Acc: 0.840 | Test Loss: 0.375 | Test AUC 0.829 (3486.80 s) 
Epoch 58, Step 944 | Training Acc: 0.826 | Test Acc: 0.832 | Test Loss: 0.375 | Test AUC 0.811 (3492.59 s) 
(*) Entering Epoch 59 (3493.054 s)
Epoch 59, Step 118 | Training Acc: 0.824 | Test Acc: 0.818 | Test Loss: 0.425 | Test AUC 0.819 (3498.85 s) 
Epoch 59, Step 236 | Training Acc: 0.850 | Test Acc: 0.828 | Test Loss: 0.391 | Test AUC 0.835 (3504.64 s) 
Epoch 59, Step 354 | Training Acc: 0.830 | Test Acc: 0.836 | Test Loss: 0.373 | Test AUC 0.821 (3510.35 s) 
Epoch 59, Step 472 | Training Acc: 0.816 | Test Acc: 0.832 | Test Loss: 0.358 | Test AUC 0.813 (3516.08 s) 
Epoch 59, Step 590 | Training Acc: 0.840 | Test Acc: 0.842 | Test Loss: 0.372 | Test AUC 0.836 (3521.82 s) 
Epoch 59, Step 708 | Training Acc: 0.840 | Test Acc: 0.850 | Test Loss: 0.362 | Test AUC 0.840 (3527.56 s) 
Epoch 59, Step 826 | Training Acc: 0.852 | Test Acc: 0.838 | Test Loss: 0.404 | Test AUC 0.837 (3533.29 s) 
Epoch 59, Step 944 | Training Acc: 0.834 | Test Acc: 0.830 | Test Loss: 0.371 | Test AUC 0.822 (3539.01 s) 
(*) Entering Epoch 60 (3539.468 s)
Epoch 60, Step 118 | Training Acc: 0.803 | Test Acc: 0.838 | Test Loss: 0.376 | Test AUC 0.818 (3545.22 s) 
Epoch 60, Step 236 | Training Acc: 0.807 | Test Acc: 0.789 | Test Loss: 0.438 | Test AUC 0.784 (3550.97 s) 
Epoch 60, Step 354 | Training Acc: 0.842 | Test Acc: 0.811 | Test Loss: 0.391 | Test AUC 0.812 (3556.70 s) 
Epoch 60, Step 472 | Training Acc: 0.830 | Test Acc: 0.830 | Test Loss: 0.366 | Test AUC 0.819 (3562.44 s) 
Epoch 60, Step 590 | Training Acc: 0.840 | Test Acc: 0.820 | Test Loss: 0.382 | Test AUC 0.821 (3568.20 s) 
Epoch 60, Step 708 | Training Acc: 0.822 | Test Acc: 0.828 | Test Loss: 0.401 | Test AUC 0.814 (3573.93 s) 
Epoch 60, Step 826 | Training Acc: 0.838 | Test Acc: 0.826 | Test Loss: 0.396 | Test AUC 0.822 (3579.70 s) 
Epoch 60, Step 944 | Training Acc: 0.848 | Test Acc: 0.842 | Test Loss: 0.366 | Test AUC 0.837 (3585.49 s) 
(*) Entering Epoch 61 (3585.947 s)
Epoch 61, Step 118 | Training Acc: 0.859 | Test Acc: 0.830 | Test Loss: 0.390 | Test AUC 0.838 (3591.68 s) 
Epoch 61, Step 236 | Training Acc: 0.818 | Test Acc: 0.846 | Test Loss: 0.375 | Test AUC 0.825 (3597.45 s) 
Epoch 61, Step 354 | Training Acc: 0.824 | Test Acc: 0.807 | Test Loss: 0.379 | Test AUC 0.798 (3603.19 s) 
Epoch 61, Step 472 | Training Acc: 0.850 | Test Acc: 0.830 | Test Loss: 0.352 | Test AUC 0.830 (3608.94 s) 
Epoch 61, Step 590 | Training Acc: 0.822 | Test Acc: 0.830 | Test Loss: 0.371 | Test AUC 0.822 (3614.70 s) 
Epoch 61, Step 708 | Training Acc: 0.816 | Test Acc: 0.803 | Test Loss: 0.426 | Test AUC 0.801 (3620.45 s) 
Epoch 61, Step 826 | Training Acc: 0.836 | Test Acc: 0.816 | Test Loss: 0.377 | Test AUC 0.818 (3626.20 s) 
Epoch 61, Step 944 | Training Acc: 0.848 | Test Acc: 0.834 | Test Loss: 0.421 | Test AUC 0.831 (3631.94 s) 
(*) Entering Epoch 62 (3632.399 s)
Epoch 62, Step 118 | Training Acc: 0.816 | Test Acc: 0.803 | Test Loss: 0.418 | Test AUC 0.794 (3638.14 s) 
Epoch 62, Step 236 | Training Acc: 0.865 | Test Acc: 0.830 | Test Loss: 0.376 | Test AUC 0.840 (3643.86 s) 
Epoch 62, Step 354 | Training Acc: 0.824 | Test Acc: 0.807 | Test Loss: 0.404 | Test AUC 0.808 (3649.65 s) 
Epoch 62, Step 472 | Training Acc: 0.812 | Test Acc: 0.820 | Test Loss: 0.400 | Test AUC 0.809 (3655.44 s) 
Epoch 62, Step 590 | Training Acc: 0.803 | Test Acc: 0.791 | Test Loss: 0.424 | Test AUC 0.793 (3661.21 s) 
Epoch 62, Step 708 | Training Acc: 0.816 | Test Acc: 0.818 | Test Loss: 0.416 | Test AUC 0.805 (3666.98 s) 
Epoch 62, Step 826 | Training Acc: 0.822 | Test Acc: 0.832 | Test Loss: 0.372 | Test AUC 0.816 (3672.72 s) 
Epoch 62, Step 944 | Training Acc: 0.811 | Test Acc: 0.822 | Test Loss: 0.390 | Test AUC 0.802 (3678.46 s) 
(*) Entering Epoch 63 (3678.923 s)
Epoch 63, Step 118 | Training Acc: 0.852 | Test Acc: 0.828 | Test Loss: 0.352 | Test AUC 0.828 (3684.67 s) 
Epoch 63, Step 236 | Training Acc: 0.834 | Test Acc: 0.830 | Test Loss: 0.374 | Test AUC 0.824 (3690.40 s) 
Epoch 63, Step 354 | Training Acc: 0.822 | Test Acc: 0.838 | Test Loss: 0.373 | Test AUC 0.817 (3696.15 s) 
Epoch 63, Step 472 | Training Acc: 0.816 | Test Acc: 0.838 | Test Loss: 0.355 | Test AUC 0.811 (3701.87 s) 
Epoch 63, Step 590 | Training Acc: 0.854 | Test Acc: 0.814 | Test Loss: 0.375 | Test AUC 0.832 (3707.61 s) 
Epoch 63, Step 708 | Training Acc: 0.850 | Test Acc: 0.836 | Test Loss: 0.348 | Test AUC 0.835 (3713.35 s) 
Epoch 63, Step 826 | Training Acc: 0.834 | Test Acc: 0.809 | Test Loss: 0.410 | Test AUC 0.808 (3719.15 s) 
Epoch 63, Step 944 | Training Acc: 0.834 | Test Acc: 0.852 | Test Loss: 0.358 | Test AUC 0.829 (3724.89 s) 
Metagraph aved to file: checkpoints/biRNN_end-63
Architecture: rho0 - kst | Base cell: gru | Hidden units: 256 | Layers: 3 | Batch: 512 | Epochs: 64

In [4]:
def cell_dropout(base_cell, keep_prob):
    # Apply dropout between RNN layers - only on the output
    cell_dropout = tf.contrib.rnn.DropoutWrapper(base_cell, output_keep_prob=keep_prob)
    return cell_dropout

def layer_weights(shape, name = 'weights'):
    # Return weight tensor of given shape using Xavier initialization
    W = tf.get_variable(name, shape = shape, initializer=tf.contrib.layers.xavier_initializer())
    return W

def layer_biases(shape, name = 'biases'):
    # Return bias tensor of given shape with small initialized constant value
    b = tf.get_variable(name, shape = shape, initializer = tf.constant_initializer(0.01))
    return b

class CharRNN():
    def __init__(self, config, training = True, sample = False):
        self.config = config
        self.scope = 'train'

        if sample:
            # Configure graph to generate characters
            self.config.batch_size = 1
            self.config.seq_length = 1
                
        # Placeholders for feed_dict
        self.inputs = tf.placeholder(tf.int32, shape = [None, self.config.seq_length])
        self.targets = tf.placeholder(tf.int32, shape = [None, self.config.seq_length])
        self.keep_prob = tf.placeholder(tf.float32) # Dropout on input connections

        # Initialize embedding matrix to be uniform in the unit cube
        # embeds char IDs into dense representation - the RNN state size
        embeddings = tf.Variable(
              tf.random_uniform([config.vocab_size, config.hidden_units], -1.0, 1.0))
        rnn_inputs = tf.nn.embedding_lookup(embeddings, self.inputs)
        
        # Place operations necessary to perform inference onto graph
        if config.rnn_cell == 'lstm':
            base_cell = tf.contrib.rnn.LSTMCell(num_units = config.hidden_units, forget_bias = 1.0, state_is_tuple = True)
        elif config.rnn_cell == 'gru':
            base_cell = tf.contrib.rnn.GRUCell(num_units = config.hidden_units)
        elif config.rnn_cell == 'layer-norm':
            base_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(num_units = config.hidden_units,
                                                          forget_bias = 1.0, dropout_keep_prob = config.recurrent_keep_prob)
        else:
            base_cell = tf.contrib.rnn.BasicRNNCell(num_units = config.hidden_units)

        self.cell = base_cell
        # Apply Dropout operator on non-recurrent connections
        if training and config.input_keep_prob < 1:
            rnn_inputs = tf.nn.dropout(rnn_inputs, config.keep_prob)
            self.cell = tf.contrib.rnn.DropoutWrapper(base_cell, input_keep_prob=config.input_keep_prob)

        # Wrap stacked cells into a single cell
        self.multicell = tf.contrib.rnn.MultiRNNCell(
            [self.cell for _ in range(config.n_layers)], state_is_tuple=True)

        # Accept previous hidden state as input
        self.zero_state = self.multicell.zero_state(self.config.batch_size, tf.float32)
        self.init_state = self.zero_state

        # Outputs shaped [batch_size, max_time, cell.output_size]
        rnn_outputs, self.final_state = tf.nn.dynamic_rnn(
            cell = self.multicell, inputs = rnn_inputs, initial_state = self.init_state, scope = self.scope)

        # Flatten outputs across batch_size, sequence length dimensions
        flat_rnn_outputs = tf.reshape(rnn_outputs, [-1, config.hidden_units])
        flat_targets = tf.reshape(self.targets, [-1])

        with tf.variable_scope('softmax_{}'.format(self.scope)):
            softmax_W = layer_weights(shape = [config.hidden_units, config.vocab_size], name = 'smx_W')
            softmax_b = layer_biases(shape = [config.vocab_size], name = 'smx_b')

        self.logits_RNN = tf.matmul(flat_rnn_outputs, softmax_W) + softmax_b  # Unormalized log probabilties for next char
        self.predictions = tf.nn.softmax(self.logits_RNN)
        
        self.cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.logits_RNN, labels = flat_targets))
        tf.summary.scalar('cross_entropy', self.cross_entropy)
        
        # Anneal learning rate
        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(config.learning_rate, global_step,
                                                       decay_steps = reader.steps_per_epoch, decay_rate = config.lr_epoch_decay, staircase=True)

        self.train_op = tf.train.AdamOptimizer(config.learning_rate).minimize(self.cross_entropy, name = 'optimizer',
                                                                              global_step = global_step)
        
    def sample(self, ckpt, char2ix, seed = 'The ', sample_length = 1000, simple = True, use_temperature = False, temperature = 5,test = False):
        ''' Samples a sequence of characters from a saved model with given seed
        '''
        chars = []
        ix2char = dict(zip(char2ix.values(), char2ix.keys()))

        with tf.Session() as sess:
            saver = tf.train.Saver()
            init_op = tf.global_variables_initializer()
            sess.run(init_op)

            if ckpt.model_checkpoint_path:
#                 saver = tf.train.import_meta_graph('checkpoints/fields/char-RNN_fields_epoch49.ckpt-49.meta')
#                 saver.restore(sess, 'checkpoints/fields/char-RNN_fields_epoch49.ckpt-49')
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('{} restored.'.format(ckpt.model_checkpoint_path))

            # Begin confused, condition upon given seed
            state = sess.run(self.zero_state)

            for char in seed:
                ix = char2ix[char]
                feed_dict_sample = {self.inputs: np.array([[ix]]), self.init_state: state}
                state = sess.run(self.final_state, feed_dict = feed_dict_sample)
                chars.append(ix)

            current_char = chars[-1]
            
            def weighted_pick(weights):
                t = np.cumsum(weights)
                s = np.sum(weights)
                return(int(np.searchsorted(t, np.random.rand(1)*s)))
            
            # Get predictions
            from scipy.misc import logsumexp

            def log_softmax(vec):
                return vec - logsumexp(vec)

            def softmax(vec):
                return np.exp(log_softmax(vec))
            
            for n in range(sample_length):
                feed_dict_sample = {self.inputs: np.array([[current_char]]), self.init_state: state}
                self.logits, preds, state = sess.run([self.logits_RNN, self.predictions, self.final_state], feed_dict = feed_dict_sample)

                if use_temperature:
                    logits = np.squeeze(self.logits)
                    logits = np.asarray(logits, np.float64)
                    logits /= temperature
                    x = logits - np.max(logits)
                    
                    #boltzmann_factor = np.exp(x)
                    #preds = boltzmann_factor/np.sum(boltzmann_factor)
                    preds = np.exp(x)/np.sum(np.exp(x))

                if simple:
                    current_char = np.random.choice(config.vocab_size, 1, p = np.squeeze(preds))[0]
                else:
                    dist = np.random.multinomial(n = 100, pvals = np.squeeze(preds))
                    current_char = np.argmax(dist)
                
                if test:
                    current_char = weighted_pick(preds[0])
                    
                chars.append(current_char)

            chars = [ix2char[ix] for ix in chars]
            sample = ''.join(chars)
            print(sample)
            with open(os.path.join(directories.samples, 'sample_{}'.format(config.name)), 'w') as f:
                 json.dump(sample, f)           

def train(config, restore = False):
    
    charRNN = CharRNN(config, training = True)

    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(
        os.path.join(directories.tensorboard, 'train_{}'.format(time.strftime('%d-%m_%I:%M'))), graph = tf.get_default_graph())
    test_writer = tf.summary.FileWriter(os.path.join(directories.tensorboard, 'test_{}'.format(time.strftime('%d-%m_%I:%M'))))
    ckpt = tf.train.get_checkpoint_state(directories.checkpoints)
    
    with tf.Session() as sess:
        # Initialize variables
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        
        if restore and ckpt.model_checkpoint_path:
            print('{} restored.'.format(ckpt.model_checkpoint_path))
#             saver = tf.train.import_meta_graph('checkpoints/char-RNN__epoch49.ckpt-49.meta')
#             saver.restore(sess, 'checkpoints/char-RNN__epoch49.ckpt-49')
            saver.restore(sess, ckpt.model_checkpoint_path)
            
        start_time = time.time()

        for epoch in range(config.num_epochs):
            # Reset RNN memory
            state = sess.run(charRNN.zero_state)
            reader.proceed = True
            step = 0
            total_loss = 0.0
            epoch_mean_loss = []
            # Save every epoch
            save_path = saver.save(sess,
                                   os.path.join(directories.checkpoints,'char-RNN_{}_epoch{}.ckpt'.format(config.name, epoch)),
                                   global_step = epoch)
            print('(*) Entering Epoch {} ({:.3f} s)'.format(epoch, time.time() - start_time))
            print('Metagraph saved to file: {}'.format(save_path))

            while(reader.proceed):
                # Iterate through entire corpus
                batch_inputs, batch_targets = reader.next_batch(config.batch_size, config.seq_length)
                feed_dict_train = {charRNN.inputs: batch_inputs, charRNN.targets: batch_targets, charRNN.init_state: state}
                t_loss, state, t_op = sess.run([charRNN.cross_entropy, charRNN.final_state, charRNN.train_op], feed_dict = feed_dict_train)
                step += 1

                if step % (reader.steps_per_epoch // 4) == 0:
                    # Evaluate model                
                    val_inputs, val_targets = reader.next_batch(config.batch_size, config.seq_length)
                    feed_dict_val = {charRNN.inputs: val_inputs, charRNN.targets: val_targets, charRNN.init_state: state}

                    train_summary = sess.run(merge_op, feed_dict = feed_dict_train)
                    v_loss, v_summary, = sess.run([charRNN.cross_entropy, merge_op], feed_dict = feed_dict_val)

                    train_writer.add_summary(train_summary, step)
                    test_writer.add_summary(v_summary, step)
                    print('Epoch {}, Step {} | Training Loss (mean): {:.3f} ({:.3f}) | Validation Loss {:.3f}'
                          .format(epoch, step, t_loss, total_loss/step, v_loss))

                total_loss += t_loss

            epoch_mean_loss.append(total_loss/step)

        save_path = saver.save(sess, os.path.join(directories.checkpoints, 'char-RNN_end'),
                               global_step = epoch)
        print('Metagraph saved to file: {}'.format(save_path))
        delta_t = time.time() - start_time

        print("Training Complete. Time elapsed: %g s\n" %(delta_t))
        print("Average train accuracy on final epoch: {:.3f}".format(epoch_mean_loss[-1]))
        print('Architecture: {}\n'.format(architecture))
        save_summary(config, delta_t, epoch_mean_loss[-1])

In [ ]:
train(config)#, restore = True)


(*) Entering Epoch 0 (0.188 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch0.ckpt-0
Epoch 0, Step 97 | Training Loss (mean): 2.434 (2.813) | Validation Loss 2.417
Epoch 0, Step 194 | Training Loss (mean): 2.201 (2.557) | Validation Loss 2.198
Epoch 0, Step 291 | Training Loss (mean): 2.069 (2.417) | Validation Loss 2.051
Epoch 0, Step 388 | Training Loss (mean): 2.036 (2.322) | Validation Loss 1.984
(*) Entering Epoch 1 (1241.416 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch1.ckpt-1
Epoch 1, Step 97 | Training Loss (mean): 1.926 (1.948) | Validation Loss 1.909
Epoch 1, Step 194 | Training Loss (mean): 1.882 (1.929) | Validation Loss 1.875
Epoch 1, Step 291 | Training Loss (mean): 1.835 (1.905) | Validation Loss 1.842
(*) Entering Epoch 2 (2474.725 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch2.ckpt-2
Epoch 2, Step 97 | Training Loss (mean): 1.794 (1.772) | Validation Loss 1.770
Epoch 2, Step 194 | Training Loss (mean): 1.737 (1.769) | Validation Loss 1.746
Epoch 2, Step 291 | Training Loss (mean): 1.693 (1.757) | Validation Loss 1.719
(*) Entering Epoch 3 (3725.976 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch3.ckpt-3
Epoch 3, Step 97 | Training Loss (mean): 1.700 (1.673) | Validation Loss 1.680
Epoch 3, Step 194 | Training Loss (mean): 1.653 (1.675) | Validation Loss 1.662
Epoch 3, Step 291 | Training Loss (mean): 1.618 (1.667) | Validation Loss 1.642
(*) Entering Epoch 4 (4962.074 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch4.ckpt-4
Epoch 4, Step 97 | Training Loss (mean): 1.641 (1.606) | Validation Loss 1.615
Epoch 4, Step 194 | Training Loss (mean): 1.593 (1.611) | Validation Loss 1.603
Epoch 4, Step 291 | Training Loss (mean): 1.562 (1.606) | Validation Loss 1.587
(*) Entering Epoch 5 (6205.252 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch5.ckpt-5
Epoch 5, Step 97 | Training Loss (mean): 1.591 (1.557) | Validation Loss 1.567
Epoch 5, Step 194 | Training Loss (mean): 1.561 (1.564) | Validation Loss 1.562
Epoch 5, Step 291 | Training Loss (mean): 1.520 (1.560) | Validation Loss 1.547
(*) Entering Epoch 6 (7448.962 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch6.ckpt-6
Epoch 6, Step 97 | Training Loss (mean): 1.558 (1.518) | Validation Loss 1.527
Epoch 6, Step 194 | Training Loss (mean): 1.523 (1.526) | Validation Loss 1.521
Epoch 6, Step 291 | Training Loss (mean): 1.487 (1.524) | Validation Loss 1.519
(*) Entering Epoch 7 (8689.193 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch7.ckpt-7
Epoch 7, Step 97 | Training Loss (mean): 1.530 (1.488) | Validation Loss 1.496
Epoch 7, Step 194 | Training Loss (mean): 1.495 (1.496) | Validation Loss 1.495
Epoch 7, Step 291 | Training Loss (mean): 1.458 (1.494) | Validation Loss 1.487
(*) Entering Epoch 8 (9926.365 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch8.ckpt-8
Epoch 8, Step 97 | Training Loss (mean): 1.504 (1.462) | Validation Loss 1.472
Epoch 8, Step 194 | Training Loss (mean): 1.470 (1.471) | Validation Loss 1.468
Epoch 8, Step 291 | Training Loss (mean): 1.437 (1.470) | Validation Loss 1.466
(*) Entering Epoch 9 (11159.768 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch9.ckpt-9
Epoch 9, Step 97 | Training Loss (mean): 1.486 (1.440) | Validation Loss 1.453
Epoch 9, Step 194 | Training Loss (mean): 1.447 (1.450) | Validation Loss 1.453
Epoch 9, Step 291 | Training Loss (mean): 1.411 (1.449) | Validation Loss 1.451
(*) Entering Epoch 10 (12391.148 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch10.ckpt-10
Epoch 10, Step 97 | Training Loss (mean): 1.469 (1.422) | Validation Loss 1.436
Epoch 10, Step 194 | Training Loss (mean): 1.434 (1.432) | Validation Loss 1.432
Epoch 10, Step 291 | Training Loss (mean): 1.403 (1.431) | Validation Loss 1.434
(*) Entering Epoch 11 (13627.608 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch11.ckpt-11
Epoch 11, Step 97 | Training Loss (mean): 1.451 (1.406) | Validation Loss 1.419
Epoch 11, Step 194 | Training Loss (mean): 1.420 (1.416) | Validation Loss 1.421
Epoch 11, Step 291 | Training Loss (mean): 1.385 (1.416) | Validation Loss 1.415
(*) Entering Epoch 12 (14864.453 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch12.ckpt-12
Epoch 12, Step 97 | Training Loss (mean): 1.437 (1.392) | Validation Loss 1.409
Epoch 12, Step 194 | Training Loss (mean): 1.406 (1.402) | Validation Loss 1.408
Epoch 12, Step 291 | Training Loss (mean): 1.366 (1.402) | Validation Loss 1.407
(*) Entering Epoch 13 (16098.245 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch13.ckpt-13
Epoch 13, Step 97 | Training Loss (mean): 1.421 (1.379) | Validation Loss 1.393
Epoch 13, Step 194 | Training Loss (mean): 1.393 (1.390) | Validation Loss 1.397
Epoch 13, Step 291 | Training Loss (mean): 1.355 (1.390) | Validation Loss 1.393
(*) Entering Epoch 14 (17319.559 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch14.ckpt-14
Epoch 14, Step 97 | Training Loss (mean): 1.412 (1.368) | Validation Loss 1.381
Epoch 14, Step 194 | Training Loss (mean): 1.381 (1.379) | Validation Loss 1.386
Epoch 14, Step 291 | Training Loss (mean): 1.345 (1.379) | Validation Loss 1.383
(*) Entering Epoch 15 (18535.697 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch15.ckpt-15
Epoch 15, Step 97 | Training Loss (mean): 1.401 (1.357) | Validation Loss 1.375
Epoch 15, Step 194 | Training Loss (mean): 1.369 (1.368) | Validation Loss 1.372
Epoch 15, Step 291 | Training Loss (mean): 1.339 (1.369) | Validation Loss 1.377
(*) Entering Epoch 16 (19761.228 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch16.ckpt-16
Epoch 16, Step 97 | Training Loss (mean): 1.390 (1.349) | Validation Loss 1.365
Epoch 16, Step 194 | Training Loss (mean): 1.362 (1.360) | Validation Loss 1.365
Epoch 16, Step 291 | Training Loss (mean): 1.331 (1.360) | Validation Loss 1.364
(*) Entering Epoch 17 (20980.771 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch17.ckpt-17
Epoch 17, Step 97 | Training Loss (mean): 1.386 (1.340) | Validation Loss 1.362
Epoch 17, Step 194 | Training Loss (mean): 1.359 (1.351) | Validation Loss 1.358
Epoch 17, Step 291 | Training Loss (mean): 1.324 (1.352) | Validation Loss 1.361
(*) Entering Epoch 18 (22215.376 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch18.ckpt-18
Epoch 18, Step 291 | Training Loss (mean): 1.316 (1.344) | Validation Loss 1.354
(*) Entering Epoch 19 (23450.234 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch19.ckpt-19
Epoch 19, Step 97 | Training Loss (mean): 1.372 (1.326) | Validation Loss 1.343
Epoch 19, Step 194 | Training Loss (mean): 1.340 (1.337) | Validation Loss 1.342
Epoch 19, Step 291 | Training Loss (mean): 1.310 (1.337) | Validation Loss 1.346
(*) Entering Epoch 20 (24684.626 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch20.ckpt-20
Epoch 20, Step 97 | Training Loss (mean): 1.364 (1.319) | Validation Loss 1.336
Epoch 20, Step 194 | Training Loss (mean): 1.334 (1.330) | Validation Loss 1.338
Epoch 20, Step 291 | Training Loss (mean): 1.303 (1.331) | Validation Loss 1.340
(*) Entering Epoch 21 (25900.511 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch21.ckpt-21
Epoch 21, Step 97 | Training Loss (mean): 1.359 (1.313) | Validation Loss 1.334
Epoch 21, Step 194 | Training Loss (mean): 1.331 (1.324) | Validation Loss 1.332
Epoch 21, Step 291 | Training Loss (mean): 1.295 (1.325) | Validation Loss 1.338
(*) Entering Epoch 22 (27126.018 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch22.ckpt-22
Epoch 22, Step 97 | Training Loss (mean): 1.355 (1.307) | Validation Loss 1.325
Epoch 22, Step 194 | Training Loss (mean): 1.328 (1.319) | Validation Loss 1.325
Epoch 22, Step 291 | Training Loss (mean): 1.294 (1.319) | Validation Loss 1.331
(*) Entering Epoch 23 (28355.545 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch23.ckpt-23
Epoch 23, Step 97 | Training Loss (mean): 1.353 (1.302) | Validation Loss 1.325
Epoch 23, Step 194 | Training Loss (mean): 1.316 (1.313) | Validation Loss 1.320
Epoch 23, Step 291 | Training Loss (mean): 1.285 (1.314) | Validation Loss 1.325
(*) Entering Epoch 24 (29571.776 s)
Metagraph saved to file: checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch24.ckpt-24
Epoch 24, Step 97 | Training Loss (mean): 1.341 (1.297) | Validation Loss 1.320
Epoch 24, Step 194 | Training Loss (mean): 1.313 (1.308) | Validation Loss 1.316
Epoch 24, Step 291 | Training Loss (mean): 1.284 (1.309) | Validation Loss 1.322
Epoch 57, Step 97 | Training Loss (mean): 1.256 (1.211) | Validation Loss 1.245

In [5]:
with open(os.path.join(directories.checkpoints, 'char2ix_{}.json'.format(os.path.splitext(os.path.basename(file_name))[0])), 'r') as f:
    char2ix = json.load(f)
char2ix = {key:int(char2ix[key]) for key in char2ix}
assert char2ix == reader.char2ix, 'Discrepancy in char-index map!'

ckpt = tf.train.get_checkpoint_state(directories.checkpoints)
model = CharRNN(config, training = False, sample = True)

In [15]:
model.sample(ckpt, char2ix = reader.char2ix, seed = 'The ', sample_length = 3000, simple = False, use_temperature = True, temperature = 2.3)


checkpoints/lesmis_wp/char-RNN_lesmis_wp_epoch57.ckpt-57 restored.
The later the appearance of the poor child of the door of the corner of the army and with the countess and a great manner of the village of which she was not the following part of the wall of the other soul in the midst of the Rue Plumet, and the most resembles a little being called him a strange feeling of probably he remained and the one in the position and the catastrophe of the barricade and the other being had a red stone of the very soul of a conscience of a few party in a real defense of the walls of an end of the commander-in-chief became forty-pieces of the morning, and again and respectfully as if a child was a ball. A strange and service of the house, the countess began to have a resistance with the contrary to conceal him from the direction of the convent which is the second life, and which was the fact that the most expression of the same moment the latter was being restored in the presence of a sort of person who was in the shadow, and she still in the wall of his eyes had come to the ground and particularly all the man and had been able to say to the same time, which had seen the least stars and a spot in the garden on the depths of the first place of her assistance of the princess continued to understand that he was in the porch which considered it in his head and stopped and asked him and always ready to contemplate the Rue de la Chanvrerie was sure to let him so that he had not been waiting to find out the expression of the room.

"I am sure your life when I will have a little commander-in-chief's book and began to return to the forest of the barricade should be seen to say something and an air of interests that they had already been an infantry and galloped up and had not a dead man who was what he had been a family and smile of the barricade was a commander-in-chief and the enemy's horse and holding his eyes and which had gone to the wall.

"And what is the princess, the general who had been a fire and disappeared and from a state of the Rue de la Chanvrerie, in a broad a conversation of the end of the same time that he was to come and I don't know how to conceal the good God was always the army and exclaimed:--

"Ah, you will be merely anything always continued to speak to the back of the earth for him, who had been the count was so on a respect of the presence of the state of a man who had even everything to be a woman to the poor commander-in-chief and perhaps in the corner of the streets, and which was plain to which she was at the midst of the news of the Tsar, the third point of former in the ancient and subject of the confidence of the assassination of a step he was doing in the fact that he had sent to a dream. He had the old man, who was a sort of soldiers who stood before the last of the police soldiers and the carriage and the child was on the first time that he had supposed that he had a brother of Austerlitz and a sort of soldiers for the princess' and the table as the old countess took a carts and still presented the