Welcome to the notebook that trains the model to extract rod positions and angles

Welcome to foosbot


In [1]:
#!pip install keras
#!pip install numpy
#!pip install imageio
#!pip install matplotlib
#!pip install opencv-python

In [1]:
from __future__ import print_function


from video_file import *

import importlib
try:
    importlib.reload(video_file)
except:
    pass

import cv2
import sys
import os
import csv
import numpy as np
from random import randint
from random import shuffle

from PIL import Image
import imageio
import itertools as it

import tensorflow as tf
import keras
print("Keras version %s" % keras.__version__)
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K

print("Tensorflow version %s" % tf.__version__)

import pprint
pp = pprint.PrettyPrinter(depth=6)


# Create the image transformer
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [0], data_format='channels_last' )


# Paths relative to current python file.
data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingData\\Result\\settings.tsv"


Using TensorFlow backend.
Keras version 2.0.4
Tensorflow version 1.1.0

In [2]:
print("Opening training frames from config %s." % (data_path))

position_rel_indexes = [0]
frame_rel_indexes = [0]

training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)


Opening training frames from config .\..\..\TrainingData\Processed\RodTrainingData\Result\settings.tsv.
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk0.avi
added 3054 new frames for a total of 3054
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk1.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk1.avi
added 4401 new frames for a total of 7455
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk2.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk2.avi
added 4500 new frames for a total of 11955
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk3.avi
added 4405 new frames for a total of 16360
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk4.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk4.avi
added 5194 new frames for a total of 21554
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk5.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk5.avi
added 6453 new frames for a total of 28007
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk6.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk6.avi
added 5664 new frames for a total of 33671
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk7.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk7.avi
added 3711 new frames for a total of 37382
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk8.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk8.avi
added 4688 new frames for a total of 42070
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk9.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk9.avi
added 1146 new frames for a total of 43216
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk10.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk10.avi
added 1761 new frames for a total of 44977
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk13.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk13.avi
added 1257 new frames for a total of 46234
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk14.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk14.avi
added 1217 new frames for a total of 47451
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingData\Result\chunk15.avi
.\..\..\TrainingData\Processed\RodTrainingData\Result\chunk15.avi
added 1303 new frames for a total of 48754

In [3]:
# https://stanford.edu/~shervine/blog/keras-generator-multiprocessing.html
class threadsafe_iter(object):
  """
    Takes an iterator/generator and makes it thread-safe by
    serializing call to the `next` method of given iterator/generator.
    """
  def __init__(self, it):
      self.it = it
      self.lock = threading.Lock()

  def __iter__(self):
      return self

  def __next__(self):
      with self.lock:
          return self.it.__next__()

# https://stanford.edu/~shervine/blog/keras-generator-multiprocessing.html
def threadsafe_generator(f):
  """
    A decorator that takes a generator function and makes it thread-safe.
    """
  def g(*a, **kw):
      return threadsafe_iter(f(*a, **kw))
  return g



# Define our training and validation iterators
@threadsafe_generator
def TrainGen(model, training):
    while True:
        #print("TrainGen restarting training input.")
        model.reset_states()
        training.move_first_training_frame()
        (frames, output, reset_memory) = training.get_next_training_frame()
        while frames is not None:
            yield (frames, output)
            (frames, output, reset_memory) = training.get_next_training_frame()
            
            if reset_memory or frames is None:
                model.reset_states()
                
@threadsafe_generator
def ValidateGen(model, training):
    while True:
        #print("Validation restarting training input.")
        model.reset_states()
        training.move_first_validation_frame()
        (frames, output, reset_memory) = training.get_next_validation_frame()
        while frames is not None:
            yield (frames, output)
            (frames, output, reset_memory) = training.get_next_validation_frame()
            
            if reset_memory or frames is None:
                model.reset_states()

# Generators for training the position
@threadsafe_generator
def TrainBatchGen(batch_size, model, training):
    gen = TrainGen(model, training)
    while True:
        # Build the next batch
        batch_frames = np.zeros(shape=(batch_size, training.depth, training.height, training.width, training.channels), dtype=np.float32)
        batch_outputs = np.zeros(shape=(batch_size, 1), dtype=np.float32)
        for i in range(batch_size):
            (frames, output) = next(gen)
            batch_frames[i,:,:,:,:] = frames
            batch_outputs[i,:] = output[0] # Train just the 3 current rod positions as outputs
            #batch_outputs[i,:] = output[3:6] - output[0:3] # Train the difference in the three rod positions as output
            #batch_outputs[i,:] = output
            
        
        #pp.pprint("Yielding batch")
        #pp.pprint(batch_outputs)
        yield (batch_frames, batch_outputs)
        #pp.pprint("Yielded batch")

@threadsafe_generator
def ValidateBatchGen(batch_size, model, training):
    gen = ValidateGen(model, training)
    while True:
        # Build the next batch
        batch_frames = np.zeros(shape=(batch_size, training.depth, training.height, training.width, training.channels), dtype=np.float32)
        batch_outputs = np.zeros(shape=(batch_size, 1), dtype=np.float32)
        for i in range(batch_size):
            (frames, output) = next(gen)
            batch_frames[i,:,:,:,:] = frames
            batch_outputs[i,:] = output[0] # Train just the 3 current rod positions as outputs
            #batch_outputs[i,:] = output[3:6] - output[0:3] # Train the difference in the three rod positions as output
            #batch_outputs[i,:] = output
        
        #pp.pprint("Yielding batch")
        #pp.pprint(batch_outputs)
        yield (batch_frames, batch_outputs)
        #pp.pprint("Yielded batch")
        
        
    
# Helper function to plot our validation result
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import pandas as pd
%matplotlib inline


def plot_validate(generator, model, count, name):
    #plot_validate(ValidateBatchGen(batch_size, model), model, 2000, "Position prediction")
    
    outputs_predicted = None
    outputs_true = None
    
    while outputs_predicted is None or outputs_predicted.shape[0] < count:
        
        (new_frames, new_outputs_true) = next(generator)
        if outputs_true is None:
            outputs_true = new_outputs_true
        else:
            outputs_true = np.concatenate( (outputs_true, new_outputs_true), axis=0 )
        
        
        new_outputs_predicted = model.predict(new_frames, batch_size=new_frames.shape[0], verbose=0)
        if outputs_predicted is None:
            outputs_predicted = new_outputs_predicted
        else:
            outputs_predicted = np.concatenate( (outputs_predicted, new_outputs_predicted), axis=0 )
    
    #(frames, outputs_true) = next(ValidateBatchGen(2000))
    #frames = np.squeeze(frames, axis=(1,))
    #validate_in, validate_out
    #frames = validate_in
    #outputs_true =validate_out
    
    print("Predicted.")
    
    
    pp.pprint(outputs_true)
    pp.pprint(outputs_predicted)
    
    
    #plt.figure(figsize=(8,30))
    plt.figure()
    
    #plt.subplot(111)
    plt.plot(range(count),outputs_true[0:count,0], range(count),outputs_predicted[0:count,0] )
    plt.ylabel("Rod 1: %s" % name)
    plt.title("First 200 output recordings")
    plt.grid(True)
    
    
    
    
    #plt.figure(figsize=(8,30))
    #plt.subplot(111)
    plt.figure()
    
    true, predicted = zip(*sorted(zip(outputs_true[0:count,0], outputs_predicted[0:count,0])))
    plt.plot(range(count),true, range(count),predicted )
    plt.ylabel("Rod 1: %s" % name)
    plt.title("First 200 output recordings")
    plt.grid(True)

    
    
    plt.show()

Input training frame


In [19]:
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import pandas as pd
%matplotlib inline

#training.move_first_training_frame()

for k in range(10):
    (frame, position, reset) = training.get_next_training_frame()
    data = np.zeros(shape=(np.shape(frame)[1], np.shape(frame)[2] * np.shape(frame)[0], 3), dtype=np.float32)
    for i in range(np.shape(frame)[0]):
        tmp = frame[i,:,:,:]
        data[:,i*np.shape(frame)[2]:(i+1)*np.shape(frame)[2],:] = tmp


    plt.imshow(data)
    plt.show()
    pp.pprint(position)

#training.move_first_training_frame()

print("Shape of training input:")
pp.pprint(np.shape(frame))

print("Shape of training output:")
pp.pprint(np.shape(position))

print("Corresponding Positions:")
pd.DataFrame(position)
pp.pprint(position)


[0.5]
[0.0]
[0.0]
[0.5]
[0.5]
[0.0]
[0.0]
[0.5]
[0.0]
[0.5]
Shape of training input:
(1, 90, 320, 3)
Shape of training output:
(1,)
Corresponding Positions:
[0.5]

Specify the model structure we will use


In [6]:
from keras.models import Sequential
from keras.layers import *
from keras.models import Model


number_of_frames = 1
image_height       = training.height
image_width        = training.width
image_depth        = training.depth
image_channels     = training.channels
output_size        = 1

# Model options
batch_size = 10
cnn_kernel_count = 120

# Build the model
pp.pprint("Input shape without batches:")
pp.pprint((image_depth, image_height, image_width, image_channels))

# Used to give fixed names to the layers for transferring the model
conv_num = 0 
pool_num = 0
dense_num = 0

 # (?, 1, 90, 320, cnn_kernel_count, 3 )
# Build a functional model design
inputs = Input(shape=(number_of_frames, image_height, image_width, image_channels,),
               batch_shape=(batch_size, number_of_frames, image_height, image_width, image_channels),
              name="Input")
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(inputs)
conv_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1

# Split into a horizontal detail and vertical detailed CNN paths
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 45, 150, cnn_kernel_count, 3 )
pool_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1

x = MaxPooling3D( pool_size=(1, 1, 2),
                  name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 45, 75, 128, 3 )
pool_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1



x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 1, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1


x = Flatten()(x)

x = Dense(128, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
x = Dense(128, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
x = Dense(64, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
predictions = Dense(output_size, activation='linear',name="dense_%i"%dense_num)(x)
dense_num+=1

model = Model(inputs=inputs, outputs=predictions)


# For a multi-class classification problem
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.0001),
              loss='mean_squared_error',
              metrics=['accuracy'])


model.summary()


'Input shape without batches:'
(1, 90, 320, 3)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Input (InputLayer)           (10, 1, 90, 320, 3)       0         
_________________________________________________________________
conv3d_0 (Conv3D)            (10, 1, 90, 320, 120)     3360      
_________________________________________________________________
conv3d_1 (Conv3D)            (10, 1, 90, 320, 120)     129720    
_________________________________________________________________
max_pooling3d_0 (MaxPooling3 (10, 1, 45, 160, 120)     0         
_________________________________________________________________
conv3d_2 (Conv3D)            (10, 1, 45, 160, 120)     129720    
_________________________________________________________________
conv3d_3 (Conv3D)            (10, 1, 45, 160, 120)     129720    
_________________________________________________________________
max_pooling3d_1 (MaxPooling3 (10, 1, 45, 80, 120)      0         
_________________________________________________________________
conv3d_4 (Conv3D)            (10, 1, 45, 80, 120)      129720    
_________________________________________________________________
conv3d_5 (Conv3D)            (10, 1, 45, 80, 120)      129720    
_________________________________________________________________
max_pooling3d_2 (MaxPooling3 (10, 1, 22, 40, 120)      0         
_________________________________________________________________
conv3d_6 (Conv3D)            (10, 1, 22, 40, 120)      129720    
_________________________________________________________________
conv3d_7 (Conv3D)            (10, 1, 22, 40, 120)      129720    
_________________________________________________________________
max_pooling3d_3 (MaxPooling3 (10, 1, 11, 20, 120)      0         
_________________________________________________________________
conv3d_8 (Conv3D)            (10, 1, 11, 20, 120)      129720    
_________________________________________________________________
conv3d_9 (Conv3D)            (10, 1, 11, 20, 120)      129720    
_________________________________________________________________
max_pooling3d_4 (MaxPooling3 (10, 1, 11, 10, 120)      0         
_________________________________________________________________
flatten_1 (Flatten)          (10, 13200)               0         
_________________________________________________________________
dense_0 (Dense)              (10, 128)                 1689728   
_________________________________________________________________
dropout_1 (Dropout)          (10, 128)                 0         
_________________________________________________________________
dense_1 (Dense)              (10, 128)                 16512     
_________________________________________________________________
dropout_2 (Dropout)          (10, 128)                 0         
_________________________________________________________________
dense_2 (Dense)              (10, 64)                  8256      
_________________________________________________________________
dropout_3 (Dropout)          (10, 64)                  0         
_________________________________________________________________
dense_3 (Dense)              (10, 1)                   65        
=================================================================
Total params: 2,885,401
Trainable params: 2,885,401
Non-trainable params: 0
_________________________________________________________________

Train our model to identify the rod positions


In [7]:
def mse(y_true, y_pred):
    return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.


model.compile(optimizer=keras.optimizers.RMSprop(lr=0.0005),
              loss='mean_squared_error',
              metrics=[mse])

print("Updated learner.")

# Train the model to predict the future position. This is the control signal to the robot AI
WEIGHTS_FNAME = '.\\RodAngle\\pos_cnn_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\pos_cnn_models_%i.h5'


batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )

model.reset_states()
for epoch in range(10000):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        break


Updated learner.
Batch size 10: 3900 training batches, 975 validation batches
Epoch 1/1
3900/3900 [==============================] - 2119s - loss: 0.0604 - mse: 6.0406e-05 - val_loss: 0.1081 - val_mse: 1.0807e-04
Wrote model to .\RodAngle\pos_cnn_weights_0.hdf
Epoch 2/2
3900/3900 [==============================] - 2103s - loss: 0.0196 - mse: 1.9590e-05 - val_loss: 0.1151 - val_mse: 1.1508e-04
Wrote model to .\RodAngle\pos_cnn_weights_1.hdf
Epoch 3/3
3900/3900 [==============================] - 2102s - loss: 0.0160 - mse: 1.6013e-05 - val_loss: 0.1139 - val_mse: 1.1389e-04
Wrote model to .\RodAngle\pos_cnn_weights_2.hdf
Epoch 4/4
3900/3900 [==============================] - 2103s - loss: 0.0142 - mse: 1.4235e-05 - val_loss: 0.0884 - val_mse: 8.8446e-05
Wrote model to .\RodAngle\pos_cnn_weights_3.hdf
Epoch 5/5
3900/3900 [==============================] - 2103s - loss: 0.0170 - mse: 1.6985e-05 - val_loss: 0.1020 - val_mse: 1.0197e-04
Wrote model to .\RodAngle\pos_cnn_weights_4.hdf
Epoch 6/6
3900/3900 [==============================] - 2103s - loss: 0.0579 - mse: 5.7864e-05 - val_loss: 0.1218 - val_mse: 1.2183e-04
Wrote model to .\RodAngle\pos_cnn_weights_5.hdf
Epoch 7/7
3900/3900 [==============================] - 2103s - loss: 0.0134 - mse: 1.3370e-05 - val_loss: 0.1091 - val_mse: 1.0910e-04
Wrote model to .\RodAngle\pos_cnn_weights_6.hdf
Epoch 8/8
3900/3900 [==============================] - 2103s - loss: 0.0202 - mse: 2.0216e-05 - val_loss: 0.1058 - val_mse: 1.0580e-04
Wrote model to .\RodAngle\pos_cnn_weights_7.hdf
Epoch 9/9
3900/3900 [==============================] - 2103s - loss: 0.0182 - mse: 1.8206e-05 - val_loss: 0.0838 - val_mse: 8.3825e-05
Wrote model to .\RodAngle\pos_cnn_weights_8.hdf
Epoch 10/10
3900/3900 [==============================] - 2103s - loss: 0.0160 - mse: 1.5984e-05 - val_loss: 0.0657 - val_mse: 6.5662e-05
Wrote model to .\RodAngle\pos_cnn_weights_9.hdf
Epoch 11/11
3900/3900 [==============================] - 2103s - loss: 0.0145 - mse: 1.4547e-05 - val_loss: 0.1014 - val_mse: 1.0140e-04
Wrote model to .\RodAngle\pos_cnn_weights_10.hdf
Epoch 12/12
3900/3900 [==============================] - 2103s - loss: 0.0148 - mse: 1.4788e-05 - val_loss: 0.0898 - val_mse: 8.9792e-05
Wrote model to .\RodAngle\pos_cnn_weights_11.hdf
Epoch 13/13
3900/3900 [==============================] - 2103s - loss: 0.0147 - mse: 1.4673e-05 - val_loss: 0.1079 - val_mse: 1.0785e-04
Wrote model to .\RodAngle\pos_cnn_weights_12.hdf
Epoch 14/14
3900/3900 [==============================] - 2103s - loss: 0.0126 - mse: 1.2556e-05 - val_loss: 0.0688 - val_mse: 6.8816e-05
Wrote model to .\RodAngle\pos_cnn_weights_13.hdf
Epoch 15/15
1107/3900 [=======>......................] - ETA: 1399s - loss: 0.0132 - mse: 1.3174e-05
User stopped the training.

Test the best trained model's ability to extract the rod positions


In [12]:
# Load the best model result
epoch = 13
WEIGHTS_FNAME = '.\\RodAngle\\pos_cnn_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\pos_cnn_models_%i.h5'
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")


Loaded model.

In [13]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")


Predicted.
array([[ 0.25945947],
       [ 0.25945947],
       [ 0.28648648],
       ..., 
       [ 0.91666669],
       [ 0.89705884],
       [ 0.89215684]], dtype=float32)
array([[ 0.36889505],
       [ 0.33275491],
       [ 0.38173857],
       ..., 
       [ 0.68189257],
       [ 0.67548656],
       [ 0.66991365]], dtype=float32)

In [20]:
def mse(y_true, y_pred):
    return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.


model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00005),
              loss='mean_squared_error',
              metrics=[mse])

print("Updated learner.")

batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )

start_epoch = epoch
model.reset_states()
for epoch in range(start_epoch,10000):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        break


Updated learner.
Batch size 10: 1199 training batches, 299 validation batches
Epoch 1/1
   1/1199 [..............................] - ETA: 2347s - loss: 0.1177 - mse: 1.1772e-04
User stopped the training.

In [11]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")


Predicted.
array([[ 0.25945947],
       [ 0.25945947],
       [ 0.28648648],
       ..., 
       [ 0.91666669],
       [ 0.89705884],
       [ 0.89215684]], dtype=float32)
array([[ 0.32116389],
       [ 0.32118678],
       [ 0.33413178],
       ..., 
       [ 0.90547734],
       [ 0.85345852],
       [ 0.85565698]], dtype=float32)

In [8]:
# Load the best model result
epoch = 24
WEIGHTS_FNAME = '.\\RodAngle\\pos_cnn_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\pos_cnn_models_%i.h5'
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")


Loaded model.

In [9]:
def mse(y_true, y_pred):
    return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.


model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00001),
              loss='mean_squared_error',
              metrics=[mse])

print("Updated learner.")

batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )

start_epoch = epoch
model.reset_states()
for epoch in range(start_epoch,10000):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        break


Updated learner.
Batch size 10: 3900 training batches, 975 validation batches
Epoch 25/25
3900/3900 [==============================] - 1780s - loss: 0.0068 - mse: 6.8101e-06 - val_loss: 0.0050 - val_mse: 5.0152e-06
Wrote model to .\RodAngle\pos_cnn_weights_24.hdf
Epoch 26/26
3900/3900 [==============================] - 1770s - loss: 0.0066 - mse: 6.5850e-06 - val_loss: 0.0043 - val_mse: 4.3282e-06
Wrote model to .\RodAngle\pos_cnn_weights_25.hdf
Epoch 27/27
 740/3900 [====>.........................] - ETA: 1314s - loss: 0.0077 - mse: 7.6595e-06
User stopped the training.

In [ ]:
# Load the best model result
epoch = 25
WEIGHTS_FNAME = '.\\RodAngle\\pos_cnn_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\pos_cnn_models_%i.h5'
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")


Loaded model.

Transfer the model as a base to predict the rod angles


In [5]:
# Paths relative to current python file.


print("Opening training frames from config %s." % (data_path))

position_rel_indexes = [0]
frame_rel_indexes = [0]

# Create the image transformer
data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingDataAngles\\Result\\settings_just_two.tsv"
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [], data_format='channels_last' )
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)


Opening training frames from config .\..\..\TrainingData\Processed\RodTrainingData\Result\settings.tsv.
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
added 3204 new frames for a total of 3204
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
added 1667 new frames for a total of 4871

In [5]:
from keras.models import Sequential
from keras.layers import *
from keras.models import Model


number_of_frames = 1
image_height       = training.height
image_width        = training.width
image_depth        = training.depth
image_channels     = training.channels
output_size        = 1

# Model options
batch_size = 1
cnn_kernel_count = 120

# Build the model
pp.pprint("Input shape without batches:")
pp.pprint((image_depth, image_height, image_width, image_channels))

# Used to give fixed names to the layers for transferring the model
conv_num = 0 
pool_num = 0
dense_num = 0

 # (?, 1, 90, 320, cnn_kernel_count, 3 )
# Build a functional model design
# Build a functional model design
inputs = Input(shape=(number_of_frames, image_height, image_width, image_channels,),
               batch_shape=(batch_size, number_of_frames, image_height, image_width, image_channels),
              name="Input")
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(inputs)
conv_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1

# Split into a horizontal detail and vertical detailed CNN paths
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 45, 150, cnn_kernel_count, 3 )
pool_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1

x = MaxPooling3D( pool_size=(1, 1, 2),
                  name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 45, 75, 128, 3 )
pool_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1

x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1


x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 2, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1


x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
           kernel_size = (1, 3, 3),
           padding = "same",
           activation = "relu",
           name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 1, 2),
                  name = "max_pooling3d_%i"%pool_num)(x)
pool_num+=1


x = Flatten()(x)

x = Dense(128, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
x = Dense(128, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
x = Dense(64, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
predictions = Dense(output_size, activation='linear',name="dense_%i"%dense_num)(x)
dense_num+=1

model = Model(inputs=inputs, outputs=predictions)


# For a multi-class classification problem
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.0001),
              loss='mean_squared_error',
              metrics=['accuracy'])


model.summary()


'Input shape without batches:'
(1, 90, 320, 3)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Input (InputLayer)           (1, 1, 90, 320, 3)        0         
_________________________________________________________________
conv3d_0 (Conv3D)            (1, 1, 90, 320, 120)      3360      
_________________________________________________________________
conv3d_1 (Conv3D)            (1, 1, 90, 320, 120)      129720    
_________________________________________________________________
max_pooling3d_0 (MaxPooling3 (1, 1, 45, 160, 120)      0         
_________________________________________________________________
conv3d_2 (Conv3D)            (1, 1, 45, 160, 120)      129720    
_________________________________________________________________
conv3d_3 (Conv3D)            (1, 1, 45, 160, 120)      129720    
_________________________________________________________________
max_pooling3d_1 (MaxPooling3 (1, 1, 45, 80, 120)       0         
_________________________________________________________________
conv3d_4 (Conv3D)            (1, 1, 45, 80, 120)       129720    
_________________________________________________________________
conv3d_5 (Conv3D)            (1, 1, 45, 80, 120)       129720    
_________________________________________________________________
max_pooling3d_2 (MaxPooling3 (1, 1, 22, 40, 120)       0         
_________________________________________________________________
conv3d_6 (Conv3D)            (1, 1, 22, 40, 120)       129720    
_________________________________________________________________
conv3d_7 (Conv3D)            (1, 1, 22, 40, 120)       129720    
_________________________________________________________________
max_pooling3d_3 (MaxPooling3 (1, 1, 11, 20, 120)       0         
_________________________________________________________________
conv3d_8 (Conv3D)            (1, 1, 11, 20, 120)       129720    
_________________________________________________________________
conv3d_9 (Conv3D)            (1, 1, 11, 20, 120)       129720    
_________________________________________________________________
max_pooling3d_4 (MaxPooling3 (1, 1, 5, 10, 120)        0         
_________________________________________________________________
conv3d_10 (Conv3D)           (1, 1, 5, 10, 120)        129720    
_________________________________________________________________
conv3d_11 (Conv3D)           (1, 1, 5, 10, 120)        129720    
_________________________________________________________________
max_pooling3d_5 (MaxPooling3 (1, 1, 5, 5, 120)         0         
_________________________________________________________________
flatten_1 (Flatten)          (1, 3000)                 0         
_________________________________________________________________
dense_0 (Dense)              (1, 128)                  384128    
_________________________________________________________________
dropout_1 (Dropout)          (1, 128)                  0         
_________________________________________________________________
dense_1 (Dense)              (1, 128)                  16512     
_________________________________________________________________
dropout_2 (Dropout)          (1, 128)                  0         
_________________________________________________________________
dense_2 (Dense)              (1, 64)                   8256      
_________________________________________________________________
dropout_3 (Dropout)          (1, 64)                   0         
_________________________________________________________________
dense_3 (Dense)              (1, 1)                    65        
=================================================================
Total params: 1,839,241
Trainable params: 1,839,241
Non-trainable params: 0
_________________________________________________________________

In [12]:
def mse_wrap(y_true, y_pred):
    # This is a rapped MSE function, since -1 is the same as 1 for rod rotation.
    return K.square( K.min( K.abs( K.concatenate([y_pred - y_true, y_pred - y_true + 2, y_pred - y_true -2])), axis=1 ) )

def mse(y_true, y_pred):
    return K.square(y_pred - y_true)


model.compile(optimizer=keras.optimizers.RMSprop(lr=0.001),
              loss=[mse],
              metrics=[mse])

print("Updated learner.")

WEIGHTS_FNAME = '.\\RodAngle\\angle_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\angle_models_%i.h5'

batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )

epoch = 0
model.reset_states()

model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00001),
              loss=[mse_wrap],
              metrics=[mse])
print("Updated learner.")



# Create the image transformer
data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingDataAngles\\Result\\settings_just_two.tsv"
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [], data_format='channels_last' )
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)
lr = 0.0001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,5):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break
        
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")       


data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingDataAngles\\Result\\settings.tsv"
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [], data_format='channels_last' )
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)

lr = 0.0001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,20):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")        

lr = 0.00001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,40):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")

lr = 0.000001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,60):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")
        
lr = 0.0000001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,90):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")

lr = 0.00000001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,120):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


Updated learner.
Batch size 1: 3897 training batches, 974 validation batches
Updated learner.
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
added 3204 new frames for a total of 3204
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
added 1667 new frames for a total of 4871
Updated lr to 0.000100
Epoch 2/2
3897/3897 [==============================] - 272s - loss: 0.0441 - mse: 0.0453 - val_loss: 0.0014 - val_mse: 0.0014
Wrote model to .\RodAngle\angle_weights_1.hdf
Epoch 3/3
3897/3897 [==============================] - 274s - loss: 0.0220 - mse: 0.0220 - val_loss: 0.0045 - val_mse: 0.0045
Wrote model to .\RodAngle\angle_weights_2.hdf
Epoch 4/4
3897/3897 [==============================] - 274s - loss: 0.0136 - mse: 0.0136 - val_loss: 0.0019 - val_mse: 0.0019
Wrote model to .\RodAngle\angle_weights_3.hdf
Epoch 5/5
3897/3897 [==============================] - 273s - loss: 0.0103 - mse: 0.0103 - val_loss: 1.9171e-04 - val_mse: 1.9171e-04
Wrote model to .\RodAngle\angle_weights_4.hdf
Predicted.
array([[ 0. ],
       [ 0. ],
       [ 0. ],
       ..., 
       [ 0.5],
       [ 0.5],
       [ 0.5]], dtype=float32)
array([[-0.0061878 ],
       [-0.00453545],
       [-0.01286873],
       ..., 
       [ 0.47895461],
       [ 0.48467016],
       [ 0.50686169]], dtype=float32)
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
added 3204 new frames for a total of 3204
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
added 1667 new frames for a total of 4871
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
added 1172 new frames for a total of 6043
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
added 2190 new frames for a total of 8233
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
added 1644 new frames for a total of 9877
Updated lr to 0.000100
Epoch 6/6
3897/3897 [==============================] - 256s - loss: 0.0436 - mse: 0.0461 - val_loss: 0.0101 - val_mse: 0.0101
Wrote model to .\RodAngle\angle_weights_5.hdf
Epoch 7/7
3897/3897 [==============================] - 255s - loss: 0.0311 - mse: 0.0320 - val_loss: 0.0086 - val_mse: 0.0086
Wrote model to .\RodAngle\angle_weights_6.hdf
Epoch 8/8
3897/3897 [==============================] - 255s - loss: 0.0266 - mse: 0.0275 - val_loss: 0.0119 - val_mse: 0.0119
Wrote model to .\RodAngle\angle_weights_7.hdf
Epoch 9/9
3897/3897 [==============================] - 255s - loss: 0.0225 - mse: 0.0225 - val_loss: 0.0036 - val_mse: 0.0036
Wrote model to .\RodAngle\angle_weights_8.hdf
Epoch 10/10
3897/3897 [==============================] - 255s - loss: 0.0212 - mse: 0.0212 - val_loss: 0.0057 - val_mse: 0.0057
Wrote model to .\RodAngle\angle_weights_9.hdf
Epoch 11/11
3897/3897 [==============================] - 255s - loss: 0.0238 - mse: 0.0341 - val_loss: 0.0237 - val_mse: 0.0237
Wrote model to .\RodAngle\angle_weights_10.hdf
Epoch 12/12
3897/3897 [==============================] - 255s - loss: 0.0202 - mse: 0.0212 - val_loss: 0.0077 - val_mse: 0.0077
Wrote model to .\RodAngle\angle_weights_11.hdf
Epoch 13/13
3897/3897 [==============================] - 255s - loss: 0.0180 - mse: 0.0180 - val_loss: 0.0011 - val_mse: 0.0011
Wrote model to .\RodAngle\angle_weights_12.hdf
Epoch 14/14
3897/3897 [==============================] - 255s - loss: 0.0189 - mse: 0.0212 - val_loss: 0.0018 - val_mse: 0.0018
Wrote model to .\RodAngle\angle_weights_13.hdf
Epoch 15/15
3897/3897 [==============================] - 255s - loss: 0.0176 - mse: 0.0199 - val_loss: 0.0014 - val_mse: 0.0014
Wrote model to .\RodAngle\angle_weights_14.hdf
Epoch 16/16
3897/3897 [==============================] - 255s - loss: 0.0167 - mse: 0.0167 - val_loss: 9.5043e-04 - val_mse: 9.5043e-04
Wrote model to .\RodAngle\angle_weights_15.hdf
Epoch 17/17
3897/3897 [==============================] - 255s - loss: 0.0168 - mse: 0.0168 - val_loss: 8.7654e-04 - val_mse: 8.7654e-04
Wrote model to .\RodAngle\angle_weights_16.hdf
Epoch 18/18
3897/3897 [==============================] - 255s - loss: 0.0171 - mse: 0.0171 - val_loss: 9.6707e-04 - val_mse: 9.6707e-04
Wrote model to .\RodAngle\angle_weights_17.hdf
Epoch 19/19
3897/3897 [==============================] - 255s - loss: 0.0183 - mse: 0.0232 - val_loss: 0.0019 - val_mse: 0.0019
Wrote model to .\RodAngle\angle_weights_18.hdf
Epoch 20/20
3897/3897 [==============================] - 255s - loss: 0.0160 - mse: 0.0182 - val_loss: 0.0035 - val_mse: 0.0035
Wrote model to .\RodAngle\angle_weights_19.hdf
Predicted.
array([[-0.5],
       [-0.5],
       [-0.5],
       ..., 
       [ 0.5],
       [ 0.5],
       [ 0.5]], dtype=float32)
array([[-0.41053873],
       [-0.3870396 ],
       [-0.45672327],
       ..., 
       [ 0.42535621],
       [ 0.48632437],
       [ 0.50587344]], dtype=float32)
Updated lr to 0.000010
Epoch 21/21
3897/3897 [==============================] - 257s - loss: 0.0109 - mse: 0.0109 - val_loss: 0.0038 - val_mse: 0.0038
Wrote model to .\RodAngle\angle_weights_20.hdf
Epoch 22/22
3897/3897 [==============================] - 256s - loss: 0.0102 - mse: 0.0102 - val_loss: 2.3554e-04 - val_mse: 2.3554e-04
Wrote model to .\RodAngle\angle_weights_21.hdf
Epoch 23/23
3897/3897 [==============================] - 256s - loss: 0.0107 - mse: 0.0107 - val_loss: 1.9943e-04 - val_mse: 1.9943e-04
Wrote model to .\RodAngle\angle_weights_22.hdf
Epoch 24/24
3897/3897 [==============================] - 256s - loss: 0.0103 - mse: 0.0103 - val_loss: 0.0018 - val_mse: 0.0018
Wrote model to .\RodAngle\angle_weights_23.hdf
Epoch 25/25
3897/3897 [==============================] - 256s - loss: 0.0109 - mse: 0.0109 - val_loss: 4.6945e-04 - val_mse: 4.6945e-04
Wrote model to .\RodAngle\angle_weights_24.hdf
Epoch 26/26
3897/3897 [==============================] - 256s - loss: 0.0105 - mse: 0.0105 - val_loss: 0.0023 - val_mse: 0.0023
Wrote model to .\RodAngle\angle_weights_25.hdf
Epoch 27/27
3897/3897 [==============================] - 256s - loss: 0.0103 - mse: 0.0103 - val_loss: 0.0018 - val_mse: 0.0018
Wrote model to .\RodAngle\angle_weights_26.hdf
Epoch 28/28
3897/3897 [==============================] - 256s - loss: 0.0101 - mse: 0.0101 - val_loss: 3.9144e-04 - val_mse: 3.9144e-04
Wrote model to .\RodAngle\angle_weights_27.hdf
Epoch 29/29
3897/3897 [==============================] - 256s - loss: 0.0103 - mse: 0.0103 - val_loss: 7.1851e-04 - val_mse: 7.1851e-04
Wrote model to .\RodAngle\angle_weights_28.hdf
Epoch 30/30
3897/3897 [==============================] - 256s - loss: 0.0106 - mse: 0.0106 - val_loss: 0.0034 - val_mse: 0.0034
Wrote model to .\RodAngle\angle_weights_29.hdf
Epoch 31/31
3897/3897 [==============================] - 256s - loss: 0.0110 - mse: 0.0110 - val_loss: 0.0016 - val_mse: 0.0016
Wrote model to .\RodAngle\angle_weights_30.hdf
Epoch 32/32
1621/3897 [===========>..................] - ETA: 263s - loss: 0.0105 - mse: 0.0105
User stopped the training.
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-12-1ca49e780838> in <module>()
     93     try:
---> 94         model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
     95         model.save_weights(WEIGHTS_FNAME % epoch)

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1889                                                sample_weight=sample_weight,
-> 1890                                                class_weight=class_weight)
   1891 

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1632         self._make_train_function()
-> 1633         outputs = self.train_function(ins)
   1634         if len(outputs) == 1:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2228         updated = session.run(self.outputs + [self.updates_op],
-> 2229                               feed_dict=feed_dict)
   2230         return updated[:len(self.outputs)]

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    777       result = self._run(None, fetches, feed_dict, options_ptr,
--> 778                          run_metadata_ptr)
    779       if run_metadata:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    981       results = self._do_run(handle, final_targets, final_fetches,
--> 982                              feed_dict_string, options, run_metadata)
    983     else:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1031       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032                            target_list, options, run_metadata)
   1033     else:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1038     try:
-> 1039       return fn(*args)
   1040     except errors.OpError as e:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1020                                  feed_dict, fetch_list, target_list,
-> 1021                                  status, run_metadata)
   1022 

KeyboardInterrupt: 

During handling of the above exception, another exception occurred:

AssertionError                            Traceback (most recent call last)
<ipython-input-12-1ca49e780838> in <module>()
     98     except KeyboardInterrupt:
     99         print("\r\nUser stopped the training.")
--> 100         assert(False)
    101         break
    102 

AssertionError: 

In [13]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")


Predicted.
array([[ 0.  ],
       [ 0.  ],
       [ 0.  ],
       ..., 
       [-0.25],
       [-0.25],
       [-0.25]], dtype=float32)
array([[ 0.00430454],
       [ 0.0032889 ],
       [ 0.00440927],
       ..., 
       [-0.22995049],
       [-0.22813967],
       [-0.22288063]], dtype=float32)

In [14]:
lr = 0.0001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,60):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")        

lr = 0.00001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,300):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


Updated lr to 0.000100
Epoch 33/33
3897/3897 [==============================] - 307s - loss: 0.0196 - mse: 0.0262 - val_loss: 0.0075 - val_mse: 0.0075
Wrote model to .\RodAngle\angle_weights_32.hdf
Epoch 34/34
3897/3897 [==============================] - 265s - loss: 0.0160 - mse: 0.0160 - val_loss: 0.0150 - val_mse: 0.0150
Wrote model to .\RodAngle\angle_weights_33.hdf
Epoch 35/35
3897/3897 [==============================] - 258s - loss: 0.0146 - mse: 0.0146 - val_loss: 0.0045 - val_mse: 0.0045
Wrote model to .\RodAngle\angle_weights_34.hdf
Epoch 36/36
3897/3897 [==============================] - 259s - loss: 0.0148 - mse: 0.0148 - val_loss: 0.0034 - val_mse: 0.0034
Wrote model to .\RodAngle\angle_weights_35.hdf
Epoch 37/37
3897/3897 [==============================] - 258s - loss: 0.0148 - mse: 0.0148 - val_loss: 1.7341e-04 - val_mse: 1.7341e-04
Wrote model to .\RodAngle\angle_weights_36.hdf
Epoch 38/38
3897/3897 [==============================] - 258s - loss: 0.0137 - mse: 0.0137 - val_loss: 6.6750e-04 - val_mse: 6.6750e-04
Wrote model to .\RodAngle\angle_weights_37.hdf
Epoch 39/39
3897/3897 [==============================] - 258s - loss: 0.0142 - mse: 0.0142 - val_loss: 0.0028 - val_mse: 0.0028
Wrote model to .\RodAngle\angle_weights_38.hdf
Epoch 40/40
3897/3897 [==============================] - 259s - loss: 0.0134 - mse: 0.0134 - val_loss: 0.0042 - val_mse: 0.0042
Wrote model to .\RodAngle\angle_weights_39.hdf
Epoch 41/41
3897/3897 [==============================] - 258s - loss: 0.0133 - mse: 0.0133 - val_loss: 0.0082 - val_mse: 0.0082
Wrote model to .\RodAngle\angle_weights_40.hdf
Epoch 42/42
3897/3897 [==============================] - 258s - loss: 0.0127 - mse: 0.0127 - val_loss: 0.0016 - val_mse: 0.0016
Wrote model to .\RodAngle\angle_weights_41.hdf
Epoch 43/43
3897/3897 [==============================] - 259s - loss: 0.0127 - mse: 0.0128 - val_loss: 2.5043e-04 - val_mse: 2.5043e-04
Wrote model to .\RodAngle\angle_weights_42.hdf
Epoch 44/44
3897/3897 [==============================] - 259s - loss: 0.0127 - mse: 0.0127 - val_loss: 0.0027 - val_mse: 0.0027
Wrote model to .\RodAngle\angle_weights_43.hdf
Epoch 45/45
3897/3897 [==============================] - 259s - loss: 0.0135 - mse: 0.0135 - val_loss: 5.1221e-04 - val_mse: 5.1221e-04
Wrote model to .\RodAngle\angle_weights_44.hdf
Epoch 46/46
3897/3897 [==============================] - 259s - loss: 0.0125 - mse: 0.0125 - val_loss: 8.5243e-04 - val_mse: 8.5243e-04
Wrote model to .\RodAngle\angle_weights_45.hdf
Epoch 47/47
3897/3897 [==============================] - 260s - loss: 0.0119 - mse: 0.0119 - val_loss: 0.0020 - val_mse: 0.0020
Wrote model to .\RodAngle\angle_weights_46.hdf
Epoch 48/48
3897/3897 [==============================] - 260s - loss: 0.0118 - mse: 0.0118 - val_loss: 0.0012 - val_mse: 0.0012
Wrote model to .\RodAngle\angle_weights_47.hdf
Epoch 49/49
3897/3897 [==============================] - 260s - loss: 0.0123 - mse: 0.0123 - val_loss: 0.0020 - val_mse: 0.0020
Wrote model to .\RodAngle\angle_weights_48.hdf
Epoch 50/50
3897/3897 [==============================] - 260s - loss: 0.0116 - mse: 0.0116 - val_loss: 0.0064 - val_mse: 0.0064
Wrote model to .\RodAngle\angle_weights_49.hdf
Epoch 51/51
3897/3897 [==============================] - 260s - loss: 0.0112 - mse: 0.0112 - val_loss: 0.0044 - val_mse: 0.0044
Wrote model to .\RodAngle\angle_weights_50.hdf
Epoch 52/52
3897/3897 [==============================] - 260s - loss: 0.0122 - mse: 0.0122 - val_loss: 0.0055 - val_mse: 0.0055
Wrote model to .\RodAngle\angle_weights_51.hdf
Epoch 53/53
3897/3897 [==============================] - 260s - loss: 0.0120 - mse: 0.0120 - val_loss: 0.0022 - val_mse: 0.0022
Wrote model to .\RodAngle\angle_weights_52.hdf
Epoch 54/54
3897/3897 [==============================] - 261s - loss: 0.0105 - mse: 0.0105 - val_loss: 0.0014 - val_mse: 0.0014
Wrote model to .\RodAngle\angle_weights_53.hdf
Epoch 55/55
3897/3897 [==============================] - 260s - loss: 0.0116 - mse: 0.0116 - val_loss: 0.0019 - val_mse: 0.0019
Wrote model to .\RodAngle\angle_weights_54.hdf
Epoch 56/56
3897/3897 [==============================] - 260s - loss: 0.0114 - mse: 0.0114 - val_loss: 0.0023 - val_mse: 0.0023
Wrote model to .\RodAngle\angle_weights_55.hdf
Epoch 57/57
3897/3897 [==============================] - 260s - loss: 0.0118 - mse: 0.0118 - val_loss: 0.0016 - val_mse: 0.0016
Wrote model to .\RodAngle\angle_weights_56.hdf
Epoch 58/58
3897/3897 [==============================] - 260s - loss: 0.0111 - mse: 0.0111 - val_loss: 7.0891e-04 - val_mse: 7.0891e-04
Wrote model to .\RodAngle\angle_weights_57.hdf
Epoch 59/59
3897/3897 [==============================] - 259s - loss: 0.0118 - mse: 0.0118 - val_loss: 0.0039 - val_mse: 0.0039
Wrote model to .\RodAngle\angle_weights_58.hdf
Epoch 60/60
3897/3897 [==============================] - 259s - loss: 0.0119 - mse: 0.0119 - val_loss: 0.0021 - val_mse: 0.0021
Wrote model to .\RodAngle\angle_weights_59.hdf
Predicted.
array([[ 0.  ],
       [ 0.  ],
       [ 0.  ],
       ..., 
       [-0.25],
       [-0.25],
       [-0.25]], dtype=float32)
array([[-0.00871182],
       [-0.00920091],
       [-0.00936639],
       ..., 
       [-0.24458215],
       [-0.25713637],
       [-0.2376346 ]], dtype=float32)
Updated lr to 0.000010
Epoch 61/61
3897/3897 [==============================] - 256s - loss: 0.0086 - mse: 0.0086 - val_loss: 7.4868e-04 - val_mse: 7.4868e-04
Wrote model to .\RodAngle\angle_weights_60.hdf
Epoch 62/62
3897/3897 [==============================] - 255s - loss: 0.0085 - mse: 0.0085 - val_loss: 4.3631e-04 - val_mse: 4.3631e-04
Wrote model to .\RodAngle\angle_weights_61.hdf
Epoch 63/63
3897/3897 [==============================] - 255s - loss: 0.0091 - mse: 0.0091 - val_loss: 5.9308e-04 - val_mse: 5.9308e-04
Wrote model to .\RodAngle\angle_weights_62.hdf
Epoch 64/64
3897/3897 [==============================] - 255s - loss: 0.0085 - mse: 0.0085 - val_loss: 0.0021 - val_mse: 0.0021
Wrote model to .\RodAngle\angle_weights_63.hdf
Epoch 65/65
3897/3897 [==============================] - 255s - loss: 0.0086 - mse: 0.0086 - val_loss: 0.0017 - val_mse: 0.0017
Wrote model to .\RodAngle\angle_weights_64.hdf
Epoch 66/66
3897/3897 [==============================] - 255s - loss: 0.0087 - mse: 0.0087 - val_loss: 0.0022 - val_mse: 0.0022
Wrote model to .\RodAngle\angle_weights_65.hdf
Epoch 67/67
3897/3897 [==============================] - 255s - loss: 0.0088 - mse: 0.0088 - val_loss: 5.0133e-04 - val_mse: 5.0133e-04
Wrote model to .\RodAngle\angle_weights_66.hdf
Epoch 68/68
3897/3897 [==============================] - 255s - loss: 0.0087 - mse: 0.0087 - val_loss: 3.4003e-04 - val_mse: 3.4003e-04
Wrote model to .\RodAngle\angle_weights_67.hdf
Epoch 69/69
3897/3897 [==============================] - 255s - loss: 0.0088 - mse: 0.0088 - val_loss: 0.0021 - val_mse: 0.0021
Wrote model to .\RodAngle\angle_weights_68.hdf
Epoch 70/70
3897/3897 [==============================] - 255s - loss: 0.0094 - mse: 0.0094 - val_loss: 2.0874e-04 - val_mse: 2.0874e-04
Wrote model to .\RodAngle\angle_weights_69.hdf
Epoch 71/71
3897/3897 [==============================] - 255s - loss: 0.0087 - mse: 0.0087 - val_loss: 8.9519e-05 - val_mse: 8.9519e-05
Wrote model to .\RodAngle\angle_weights_70.hdf
Epoch 72/72
3897/3897 [==============================] - 255s - loss: 0.0089 - mse: 0.0089 - val_loss: 1.6339e-04 - val_mse: 1.6339e-04
Wrote model to .\RodAngle\angle_weights_71.hdf
Epoch 73/73
3897/3897 [==============================] - 255s - loss: 0.0086 - mse: 0.0086 - val_loss: 0.0016 - val_mse: 0.0016
Wrote model to .\RodAngle\angle_weights_72.hdf
Epoch 74/74
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 0.0011 - val_mse: 0.0011
Wrote model to .\RodAngle\angle_weights_73.hdf
Epoch 75/75
3897/3897 [==============================] - 255s - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0024 - val_mse: 0.0024
Wrote model to .\RodAngle\angle_weights_74.hdf
Epoch 76/76
3897/3897 [==============================] - 255s - loss: 0.0087 - mse: 0.0087 - val_loss: 0.0014 - val_mse: 0.0014
Wrote model to .\RodAngle\angle_weights_75.hdf
Epoch 77/77
3897/3897 [==============================] - 255s - loss: 0.0089 - mse: 0.0089 - val_loss: 9.4685e-04 - val_mse: 9.4685e-04
Wrote model to .\RodAngle\angle_weights_76.hdf
Epoch 78/78
3897/3897 [==============================] - 255s - loss: 0.0085 - mse: 0.0085 - val_loss: 4.7389e-04 - val_mse: 4.7389e-04
Wrote model to .\RodAngle\angle_weights_77.hdf
Epoch 79/79
3897/3897 [==============================] - 255s - loss: 0.0088 - mse: 0.0088 - val_loss: 5.8094e-04 - val_mse: 5.8094e-04
Wrote model to .\RodAngle\angle_weights_78.hdf
Epoch 80/80
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 4.0367e-04 - val_mse: 4.0367e-04
Wrote model to .\RodAngle\angle_weights_79.hdf
Epoch 81/81
3897/3897 [==============================] - 255s - loss: 0.0086 - mse: 0.0086 - val_loss: 2.0779e-04 - val_mse: 2.0779e-04
Wrote model to .\RodAngle\angle_weights_80.hdf
Epoch 82/82
3897/3897 [==============================] - 255s - loss: 0.0091 - mse: 0.0091 - val_loss: 0.0022 - val_mse: 0.0022
Wrote model to .\RodAngle\angle_weights_81.hdf
Epoch 83/83
3897/3897 [==============================] - 255s - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0016 - val_mse: 0.0016
Wrote model to .\RodAngle\angle_weights_82.hdf
Epoch 84/84
3897/3897 [==============================] - 255s - loss: 0.0087 - mse: 0.0087 - val_loss: 0.0011 - val_mse: 0.0011
Wrote model to .\RodAngle\angle_weights_83.hdf
Epoch 85/85
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 4.5566e-04 - val_mse: 4.5566e-04
Wrote model to .\RodAngle\angle_weights_84.hdf
Epoch 86/86
3897/3897 [==============================] - 255s - loss: 0.0090 - mse: 0.0090 - val_loss: 0.0035 - val_mse: 0.0035
Wrote model to .\RodAngle\angle_weights_85.hdf
Epoch 87/87
3897/3897 [==============================] - 255s - loss: 0.0089 - mse: 0.0089 - val_loss: 0.0029 - val_mse: 0.0029
Wrote model to .\RodAngle\angle_weights_86.hdf
Epoch 88/88
3897/3897 [==============================] - 255s - loss: 0.0090 - mse: 0.0090 - val_loss: 3.3710e-04 - val_mse: 3.3710e-04
Wrote model to .\RodAngle\angle_weights_87.hdf
Epoch 89/89
3897/3897 [==============================] - 255s - loss: 0.0101 - mse: 0.0101 - val_loss: 3.7786e-04 - val_mse: 3.7786e-04
Wrote model to .\RodAngle\angle_weights_88.hdf
Epoch 90/90
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 9.9336e-04 - val_mse: 9.9336e-04
Wrote model to .\RodAngle\angle_weights_89.hdf
Epoch 91/91
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0036 - val_mse: 0.0036
Wrote model to .\RodAngle\angle_weights_90.hdf
Epoch 92/92
3897/3897 [==============================] - 255s - loss: 0.0090 - mse: 0.0090 - val_loss: 0.0049 - val_mse: 0.0049
Wrote model to .\RodAngle\angle_weights_91.hdf
Epoch 93/93
3897/3897 [==============================] - 255s - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0018 - val_mse: 0.0018
Wrote model to .\RodAngle\angle_weights_92.hdf
Epoch 94/94
3897/3897 [==============================] - 255s - loss: 0.0089 - mse: 0.0089 - val_loss: 2.2369e-04 - val_mse: 2.2369e-04
Wrote model to .\RodAngle\angle_weights_93.hdf
Epoch 95/95
3897/3897 [==============================] - 255s - loss: 0.0094 - mse: 0.0094 - val_loss: 0.0010 - val_mse: 0.0010
Wrote model to .\RodAngle\angle_weights_94.hdf
Epoch 96/96
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 2.0304e-04 - val_mse: 2.0304e-04
Wrote model to .\RodAngle\angle_weights_95.hdf
Epoch 97/97
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0049 - val_mse: 0.0049
Wrote model to .\RodAngle\angle_weights_96.hdf
Epoch 98/98
3897/3897 [==============================] - 255s - loss: 0.0094 - mse: 0.0094 - val_loss: 3.7581e-04 - val_mse: 3.7581e-04
Wrote model to .\RodAngle\angle_weights_97.hdf
Epoch 99/99
3897/3897 [==============================] - 255s - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0032 - val_mse: 0.0032
Wrote model to .\RodAngle\angle_weights_98.hdf
Epoch 100/100
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0016 - val_mse: 0.0016
Wrote model to .\RodAngle\angle_weights_99.hdf
Epoch 101/101
3897/3897 [==============================] - 255s - loss: 0.0085 - mse: 0.0085 - val_loss: 0.0012 - val_mse: 0.0012
Wrote model to .\RodAngle\angle_weights_100.hdf
Epoch 102/102
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 5.9304e-04 - val_mse: 5.9304e-04
Wrote model to .\RodAngle\angle_weights_101.hdf
Epoch 103/103
3897/3897 [==============================] - 255s - loss: 0.0099 - mse: 0.0099 - val_loss: 0.0024 - val_mse: 0.0024
Wrote model to .\RodAngle\angle_weights_102.hdf
Epoch 104/104
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 2.9068e-04 - val_mse: 2.9068e-04
Wrote model to .\RodAngle\angle_weights_103.hdf
Epoch 105/105
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 0.0064 - val_mse: 0.0064
Wrote model to .\RodAngle\angle_weights_104.hdf
Epoch 106/106
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 0.0032 - val_mse: 0.0032
Wrote model to .\RodAngle\angle_weights_105.hdf
Epoch 107/107
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 0.0042 - val_mse: 0.0042
Wrote model to .\RodAngle\angle_weights_106.hdf
Epoch 108/108
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 0.0050 - val_mse: 0.0050
Wrote model to .\RodAngle\angle_weights_107.hdf
Epoch 109/109
3897/3897 [==============================] - 255s - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0026 - val_mse: 0.0026
Wrote model to .\RodAngle\angle_weights_108.hdf
Epoch 110/110
3897/3897 [==============================] - 255s - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0024 - val_mse: 0.0024
Wrote model to .\RodAngle\angle_weights_109.hdf
Epoch 111/111
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 0.0036 - val_mse: 0.0036
Wrote model to .\RodAngle\angle_weights_110.hdf
Epoch 112/112
3897/3897 [==============================] - 255s - loss: 0.0091 - mse: 0.0091 - val_loss: 0.0014 - val_mse: 0.0014
Wrote model to .\RodAngle\angle_weights_111.hdf
Epoch 113/113
3897/3897 [==============================] - 255s - loss: 0.0090 - mse: 0.0090 - val_loss: 0.0021 - val_mse: 0.0021
Wrote model to .\RodAngle\angle_weights_112.hdf
Epoch 114/114
3897/3897 [==============================] - 255s - loss: 0.0099 - mse: 0.0099 - val_loss: 0.0035 - val_mse: 0.0035
Wrote model to .\RodAngle\angle_weights_113.hdf
Epoch 115/115
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 0.0020 - val_mse: 0.0020
Wrote model to .\RodAngle\angle_weights_114.hdf
Epoch 116/116
3897/3897 [==============================] - 255s - loss: 0.0099 - mse: 0.0099 - val_loss: 0.0011 - val_mse: 0.0011
Wrote model to .\RodAngle\angle_weights_115.hdf
Epoch 117/117
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 0.0029 - val_mse: 0.0029
Wrote model to .\RodAngle\angle_weights_116.hdf
Epoch 118/118
3897/3897 [==============================] - 255s - loss: 0.0094 - mse: 0.0094 - val_loss: 0.0021 - val_mse: 0.0021
Wrote model to .\RodAngle\angle_weights_117.hdf
Epoch 119/119
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 3.7451e-04 - val_mse: 3.7451e-04
Wrote model to .\RodAngle\angle_weights_118.hdf
Epoch 120/120
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0060 - val_mse: 0.0060
Wrote model to .\RodAngle\angle_weights_119.hdf
Epoch 121/121
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0042 - val_mse: 0.0042
Wrote model to .\RodAngle\angle_weights_120.hdf
Epoch 122/122
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 2.0626e-04 - val_mse: 2.0626e-04
Wrote model to .\RodAngle\angle_weights_121.hdf
Epoch 123/123
3897/3897 [==============================] - 255s - loss: 0.0101 - mse: 0.0101 - val_loss: 0.0059 - val_mse: 0.0059
Wrote model to .\RodAngle\angle_weights_122.hdf
Epoch 124/124
3897/3897 [==============================] - 255s - loss: 0.0100 - mse: 0.0100 - val_loss: 0.0040 - val_mse: 0.0040
Wrote model to .\RodAngle\angle_weights_123.hdf
Epoch 125/125
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0055 - val_mse: 0.0055
Wrote model to .\RodAngle\angle_weights_124.hdf
Epoch 126/126
3897/3897 [==============================] - 255s - loss: 0.0092 - mse: 0.0092 - val_loss: 0.0055 - val_mse: 0.0055
Wrote model to .\RodAngle\angle_weights_125.hdf
Epoch 127/127
3897/3897 [==============================] - 255s - loss: 0.0098 - mse: 0.0098 - val_loss: 0.0029 - val_mse: 0.0029
Wrote model to .\RodAngle\angle_weights_126.hdf
Epoch 128/128
3897/3897 [==============================] - 255s - loss: 0.0100 - mse: 0.0100 - val_loss: 0.0042 - val_mse: 0.0042
Wrote model to .\RodAngle\angle_weights_127.hdf
Epoch 129/129
3897/3897 [==============================] - 255s - loss: 0.0101 - mse: 0.0101 - val_loss: 0.0014 - val_mse: 0.0014
Wrote model to .\RodAngle\angle_weights_128.hdf
Epoch 130/130
3897/3897 [==============================] - 255s - loss: 0.0097 - mse: 0.0097 - val_loss: 2.1375e-04 - val_mse: 2.1375e-04
Wrote model to .\RodAngle\angle_weights_129.hdf
Epoch 131/131
3897/3897 [==============================] - 255s - loss: 0.0095 - mse: 0.0095 - val_loss: 0.0057 - val_mse: 0.0057
Wrote model to .\RodAngle\angle_weights_130.hdf
Epoch 132/132
3897/3897 [==============================] - 255s - loss: 0.0105 - mse: 0.0105 - val_loss: 4.5525e-04 - val_mse: 4.5525e-04
Wrote model to .\RodAngle\angle_weights_131.hdf
Epoch 133/133
3897/3897 [==============================] - 255s - loss: 0.0098 - mse: 0.0098 - val_loss: 3.9326e-04 - val_mse: 3.9326e-04
Wrote model to .\RodAngle\angle_weights_132.hdf
Epoch 134/134
3897/3897 [==============================] - 255s - loss: 0.0097 - mse: 0.0097 - val_loss: 0.0027 - val_mse: 0.0027
Wrote model to .\RodAngle\angle_weights_133.hdf
Epoch 135/135
3897/3897 [==============================] - 255s - loss: 0.0097 - mse: 0.0097 - val_loss: 0.0083 - val_mse: 0.0083
Wrote model to .\RodAngle\angle_weights_134.hdf
Epoch 136/136
3897/3897 [==============================] - 255s - loss: 0.0098 - mse: 0.0098 - val_loss: 0.0043 - val_mse: 0.0043
Wrote model to .\RodAngle\angle_weights_135.hdf
Epoch 137/137
3897/3897 [==============================] - 255s - loss: 0.0096 - mse: 0.0096 - val_loss: 0.0028 - val_mse: 0.0028
Wrote model to .\RodAngle\angle_weights_136.hdf
Epoch 138/138
3897/3897 [==============================] - 255s - loss: 0.0097 - mse: 0.0097 - val_loss: 9.5030e-04 - val_mse: 9.5030e-04
Wrote model to .\RodAngle\angle_weights_137.hdf
Epoch 139/139
3897/3897 [==============================] - 255s - loss: 0.0103 - mse: 0.0103 - val_loss: 0.0055 - val_mse: 0.0055
Wrote model to .\RodAngle\angle_weights_138.hdf
Epoch 140/140
3897/3897 [==============================] - 255s - loss: 0.0100 - mse: 0.0100 - val_loss: 0.0027 - val_mse: 0.0027
Wrote model to .\RodAngle\angle_weights_139.hdf
Epoch 141/141
3897/3897 [==============================] - 255s - loss: 0.0094 - mse: 0.0094 - val_loss: 3.5672e-04 - val_mse: 3.5672e-04
Wrote model to .\RodAngle\angle_weights_140.hdf
Epoch 142/142
3897/3897 [==============================] - 255s - loss: 0.0097 - mse: 0.0097 - val_loss: 0.0022 - val_mse: 0.0022
Wrote model to .\RodAngle\angle_weights_141.hdf
Epoch 143/143
3897/3897 [==============================] - 255s - loss: 0.0100 - mse: 0.0100 - val_loss: 0.0027 - val_mse: 0.0027
Wrote model to .\RodAngle\angle_weights_142.hdf
Epoch 144/144
3897/3897 [==============================] - 255s - loss: 0.0097 - mse: 0.0097 - val_loss: 0.0029 - val_mse: 0.0029
Wrote model to .\RodAngle\angle_weights_143.hdf
Epoch 145/145
3897/3897 [==============================] - 255s - loss: 0.0098 - mse: 0.0098 - val_loss: 0.0087 - val_mse: 0.0087
Wrote model to .\RodAngle\angle_weights_144.hdf
Epoch 146/146
3897/3897 [==============================] - 255s - loss: 0.0098 - mse: 0.0098 - val_loss: 0.0026 - val_mse: 0.0026
Wrote model to .\RodAngle\angle_weights_145.hdf
Epoch 147/147
3897/3897 [==============================] - 255s - loss: 0.0100 - mse: 0.0100 - val_loss: 0.0047 - val_mse: 0.0047
Wrote model to .\RodAngle\angle_weights_146.hdf
Epoch 148/148
3897/3897 [==============================] - 255s - loss: 0.0104 - mse: 0.0104 - val_loss: 2.5371e-04 - val_mse: 2.5371e-04
Wrote model to .\RodAngle\angle_weights_147.hdf
Epoch 149/149
3897/3897 [==============================] - 255s - loss: 0.0098 - mse: 0.0098 - val_loss: 0.0054 - val_mse: 0.0054
Wrote model to .\RodAngle\angle_weights_148.hdf
Epoch 150/150
3897/3897 [==============================] - 255s - loss: 0.0105 - mse: 0.0105 - val_loss: 0.0015 - val_mse: 0.0015
Wrote model to .\RodAngle\angle_weights_149.hdf
Epoch 151/151
3897/3897 [==============================] - 265s - loss: 0.0103 - mse: 0.0103 - val_loss: 0.0061 - val_mse: 0.0061
Wrote model to .\RodAngle\angle_weights_150.hdf
Epoch 152/152
 407/3897 [==>...........................] - ETA: 264s - loss: 0.0099 - mse: 0.0099
User stopped the training.
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-14-feae26146dc4> in <module>()
     31     try:
---> 32         model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
     33         model.save_weights(WEIGHTS_FNAME % epoch)

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1889                                                sample_weight=sample_weight,
-> 1890                                                class_weight=class_weight)
   1891 

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1632         self._make_train_function()
-> 1633         outputs = self.train_function(ins)
   1634         if len(outputs) == 1:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2228         updated = session.run(self.outputs + [self.updates_op],
-> 2229                               feed_dict=feed_dict)
   2230         return updated[:len(self.outputs)]

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    777       result = self._run(None, fetches, feed_dict, options_ptr,
--> 778                          run_metadata_ptr)
    779       if run_metadata:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    981       results = self._do_run(handle, final_targets, final_fetches,
--> 982                              feed_dict_string, options, run_metadata)
    983     else:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1031       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032                            target_list, options, run_metadata)
   1033     else:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1038     try:
-> 1039       return fn(*args)
   1040     except errors.OpError as e:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1020                                  feed_dict, fetch_list, target_list,
-> 1021                                  status, run_metadata)
   1022 

KeyboardInterrupt: 

During handling of the above exception, another exception occurred:

AssertionError                            Traceback (most recent call last)
<ipython-input-14-feae26146dc4> in <module>()
     36     except KeyboardInterrupt:
     37         print("\r\nUser stopped the training.")
---> 38         assert(False)
     39         break

AssertionError: 

In [15]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")


Predicted.
array([[ 0. ],
       [ 0. ],
       [ 0. ],
       ..., 
       [ 0.5],
       [ 0.5],
       [ 0.5]], dtype=float32)
array([[-0.01090559],
       [-0.00516437],
       [-0.0193336 ],
       ..., 
       [ 0.39560071],
       [ 0.37355471],
       [ 0.35544991]], dtype=float32)

In [ ]:
data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingDataAngles\\Result\\settings_full.tsv"
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [], data_format='channels_last' )
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)

lr = 0.0001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,175):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")        

lr = 0.00001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,200):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")        

lr = 0.000001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,300):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
added 3204 new frames for a total of 3204
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk1.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk1.avi
added 2763 new frames for a total of 5967
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk2.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk2.avi
added 2355 new frames for a total of 8322
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
added 1667 new frames for a total of 9989
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
added 1172 new frames for a total of 11161
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
added 2190 new frames for a total of 13351
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
added 1644 new frames for a total of 14995
Updated lr to 0.000100
Epoch 153/153
3897/3897 [==============================] - 263s - loss: 0.1176 - mse: 0.1856 - val_loss: 0.0195 - val_mse: 0.0195
Wrote model to .\RodAngle\angle_weights_152.hdf
Epoch 154/154
3897/3897 [==============================] - 261s - loss: 0.0834 - mse: 0.1186 - val_loss: 0.0896 - val_mse: 0.1567
Wrote model to .\RodAngle\angle_weights_153.hdf
Epoch 155/155
3897/3897 [==============================] - 262s - loss: 0.0637 - mse: 0.1212 - val_loss: 0.0217 - val_mse: 0.0217
Wrote model to .\RodAngle\angle_weights_154.hdf
Epoch 156/156
3897/3897 [==============================] - 262s - loss: 0.0602 - mse: 0.1339 - val_loss: 0.0822 - val_mse: 0.1536
Wrote model to .\RodAngle\angle_weights_155.hdf
Epoch 157/157
3897/3897 [==============================] - 262s - loss: 0.0579 - mse: 0.1621 - val_loss: 0.0164 - val_mse: 0.0164
Wrote model to .\RodAngle\angle_weights_156.hdf
Epoch 158/158
3897/3897 [==============================] - 262s - loss: 0.0639 - mse: 0.1994 - val_loss: 0.0233 - val_mse: 0.0310
Wrote model to .\RodAngle\angle_weights_157.hdf
Epoch 159/159
3897/3897 [==============================] - 262s - loss: 0.0528 - mse: 0.1641 - val_loss: 0.0416 - val_mse: 0.1535
Wrote model to .\RodAngle\angle_weights_158.hdf
Epoch 160/160
3897/3897 [==============================] - 261s - loss: 0.0483 - mse: 0.1421 - val_loss: 0.0145 - val_mse: 0.0281
Wrote model to .\RodAngle\angle_weights_159.hdf
Epoch 161/161
3897/3897 [==============================] - 261s - loss: 0.0406 - mse: 0.1448 - val_loss: 0.0242 - val_mse: 0.0242
Wrote model to .\RodAngle\angle_weights_160.hdf
Epoch 162/162
3897/3897 [==============================] - 262s - loss: 0.0467 - mse: 0.1470 - val_loss: 0.0599 - val_mse: 0.1578
Wrote model to .\RodAngle\angle_weights_161.hdf
Epoch 163/163
3897/3897 [==============================] - 262s - loss: 0.0414 - mse: 0.1639 - val_loss: 0.0515 - val_mse: 0.1086
Wrote model to .\RodAngle\angle_weights_162.hdf
Epoch 164/164
3897/3897 [==============================] - 261s - loss: 0.0413 - mse: 0.1737 - val_loss: 0.0100 - val_mse: 0.0100
Wrote model to .\RodAngle\angle_weights_163.hdf
Epoch 165/165
3897/3897 [==============================] - 263s - loss: 0.0427 - mse: 0.1629 - val_loss: 0.1223 - val_mse: 0.3238
Wrote model to .\RodAngle\angle_weights_164.hdf
Epoch 166/166
3897/3897 [==============================] - 263s - loss: 0.0425 - mse: 0.1806 - val_loss: 0.0629 - val_mse: 0.2097
Wrote model to .\RodAngle\angle_weights_165.hdf
Epoch 167/167
3897/3897 [==============================] - 264s - loss: 0.0434 - mse: 0.1849 - val_loss: 0.0532 - val_mse: 0.1891
Wrote model to .\RodAngle\angle_weights_166.hdf
Epoch 168/168
3897/3897 [==============================] - 278s - loss: 0.0448 - mse: 0.1803 - val_loss: 0.0259 - val_mse: 0.1421
Wrote model to .\RodAngle\angle_weights_167.hdf
Epoch 169/169
3897/3897 [==============================] - 281s - loss: 0.0497 - mse: 0.1833 - val_loss: 0.0379 - val_mse: 0.2231
Wrote model to .\RodAngle\angle_weights_168.hdf
Epoch 170/170
3897/3897 [==============================] - 279s - loss: 0.0410 - mse: 0.1627 - val_loss: 0.0605 - val_mse: 0.1905
Wrote model to .\RodAngle\angle_weights_169.hdf
Epoch 171/171
3897/3897 [==============================] - 279s - loss: 0.0457 - mse: 0.1674 - val_loss: 0.0596 - val_mse: 0.1863
Wrote model to .\RodAngle\angle_weights_170.hdf
Epoch 172/172
3897/3897 [==============================] - 282s - loss: 0.0443 - mse: 0.1992 - val_loss: 0.0787 - val_mse: 0.2641
Wrote model to .\RodAngle\angle_weights_171.hdf
Epoch 173/173
3897/3897 [==============================] - 283s - loss: 0.0441 - mse: 0.1845 - val_loss: 0.0039 - val_mse: 0.0039
Wrote model to .\RodAngle\angle_weights_172.hdf
Epoch 174/174
3897/3897 [==============================] - 282s - loss: 0.0420 - mse: 0.1762 - val_loss: 0.0022 - val_mse: 0.0022
Wrote model to .\RodAngle\angle_weights_173.hdf
Epoch 175/175
3897/3897 [==============================] - 274s - loss: 0.0440 - mse: 0.1540 - val_loss: 0.0482 - val_mse: 0.1985
Wrote model to .\RodAngle\angle_weights_174.hdf
Predicted.
array([[-1.  ],
       [-0.75],
       [-0.5 ],
       ..., 
       [ 0.  ],
       [ 0.  ],
       [ 0.  ]], dtype=float32)
array([[-0.23341319],
       [-0.30664638],
       [-0.20474419],
       ..., 
       [ 0.0200251 ],
       [-0.01645105],
       [ 0.0200251 ]], dtype=float32)
Updated lr to 0.000010
Epoch 176/176
3897/3897 [==============================] - 261s - loss: 0.0274 - mse: 0.1505 - val_loss: 0.0283 - val_mse: 0.1144
Wrote model to .\RodAngle\angle_weights_175.hdf
Epoch 177/177
3897/3897 [==============================] - 282s - loss: 0.0263 - mse: 0.1479 - val_loss: 0.0241 - val_mse: 0.1023
Wrote model to .\RodAngle\angle_weights_176.hdf
Epoch 178/178
3897/3897 [==============================] - 268s - loss: 0.0261 - mse: 0.1476 - val_loss: 0.0270 - val_mse: 0.1181
Wrote model to .\RodAngle\angle_weights_177.hdf
Epoch 179/179
3897/3897 [==============================] - 265s - loss: 0.0250 - mse: 0.1599 - val_loss: 0.0115 - val_mse: 0.0115
Wrote model to .\RodAngle\angle_weights_178.hdf
Epoch 180/180
3897/3897 [==============================] - 278s - loss: 0.0257 - mse: 0.1501 - val_loss: 0.0088 - val_mse: 0.0088
Wrote model to .\RodAngle\angle_weights_179.hdf
Epoch 181/181
3897/3897 [==============================] - 278s - loss: 0.0249 - mse: 0.1513 - val_loss: 0.0088 - val_mse: 0.0088
Wrote model to .\RodAngle\angle_weights_180.hdf
Epoch 182/182
3897/3897 [==============================] - 274s - loss: 0.0261 - mse: 0.1507 - val_loss: 0.0371 - val_mse: 0.1874
Wrote model to .\RodAngle\angle_weights_181.hdf
Epoch 183/183
3897/3897 [==============================] - 273s - loss: 0.0257 - mse: 0.1560 - val_loss: 0.0445 - val_mse: 0.1892
Wrote model to .\RodAngle\angle_weights_182.hdf
Epoch 184/184
3897/3897 [==============================] - 278s - loss: 0.0253 - mse: 0.1353 - val_loss: 0.0329 - val_mse: 0.1951
Wrote model to .\RodAngle\angle_weights_183.hdf
Epoch 185/185
3897/3897 [==============================] - 278s - loss: 0.0253 - mse: 0.1471 - val_loss: 0.0387 - val_mse: 0.1817
Wrote model to .\RodAngle\angle_weights_184.hdf
Epoch 186/186
3897/3897 [==============================] - 278s - loss: 0.0233 - mse: 0.1508 - val_loss: 0.0074 - val_mse: 0.0074
Wrote model to .\RodAngle\angle_weights_185.hdf
Epoch 187/187
3897/3897 [==============================] - 273s - loss: 0.0244 - mse: 0.1403 - val_loss: 0.0465 - val_mse: 0.2001
Wrote model to .\RodAngle\angle_weights_186.hdf
Epoch 188/188
3897/3897 [==============================] - 277s - loss: 0.0252 - mse: 0.1263 - val_loss: 0.0083 - val_mse: 0.0083
Wrote model to .\RodAngle\angle_weights_187.hdf
Epoch 189/189
3897/3897 [==============================] - 292s - loss: 0.0249 - mse: 0.1627 - val_loss: 0.0278 - val_mse: 0.1769
Wrote model to .\RodAngle\angle_weights_188.hdf
Epoch 190/190
3897/3897 [==============================] - 289s - loss: 0.0236 - mse: 0.1470 - val_loss: 0.0119 - val_mse: 0.0119
Wrote model to .\RodAngle\angle_weights_189.hdf
Epoch 191/191
3897/3897 [==============================] - 292s - loss: 0.0241 - mse: 0.1650 - val_loss: 0.0507 - val_mse: 0.1612
Wrote model to .\RodAngle\angle_weights_190.hdf
Epoch 192/192
3897/3897 [==============================] - 292s - loss: 0.0256 - mse: 0.1544 - val_loss: 0.0395 - val_mse: 0.1845
Wrote model to .\RodAngle\angle_weights_191.hdf
Epoch 193/193
3897/3897 [==============================] - 292s - loss: 0.0257 - mse: 0.1511 - val_loss: 0.0364 - val_mse: 0.1526
Wrote model to .\RodAngle\angle_weights_192.hdf
Epoch 194/194
3897/3897 [==============================] - 293s - loss: 0.0259 - mse: 0.1555 - val_loss: 0.0165 - val_mse: 0.0165
Wrote model to .\RodAngle\angle_weights_193.hdf
Epoch 195/195
3897/3897 [==============================] - 293s - loss: 0.0234 - mse: 0.1416 - val_loss: 0.0428 - val_mse: 0.1688
Wrote model to .\RodAngle\angle_weights_194.hdf
Epoch 196/196
3897/3897 [==============================] - 292s - loss: 0.0243 - mse: 0.1552 - val_loss: 0.0088 - val_mse: 0.0309
Wrote model to .\RodAngle\angle_weights_195.hdf
Epoch 197/197
3897/3897 [==============================] - 293s - loss: 0.0256 - mse: 0.1525 - val_loss: 0.0418 - val_mse: 0.2144
Wrote model to .\RodAngle\angle_weights_196.hdf
Epoch 198/198
3897/3897 [==============================] - 295s - loss: 0.0254 - mse: 0.1626 - val_loss: 0.0261 - val_mse: 0.1060
Wrote model to .\RodAngle\angle_weights_197.hdf
Epoch 199/199
3897/3897 [==============================] - 295s - loss: 0.0234 - mse: 0.1591 - val_loss: 0.0424 - val_mse: 0.1951
Wrote model to .\RodAngle\angle_weights_198.hdf
Epoch 200/200
3897/3897 [==============================] - 276s - loss: 0.0224 - mse: 0.1603 - val_loss: 0.0259 - val_mse: 0.1062
Wrote model to .\RodAngle\angle_weights_199.hdf
Predicted.
array([[ 0.5       ],
       [ 0.5       ],
       [ 0.5       ],
       ..., 
       [-0.55555558],
       [-0.33333334],
       [-0.11111111]], dtype=float32)
array([[ 0.35905775],
       [ 0.32842273],
       [ 0.33210465],
       ..., 
       [-0.64380592],
       [-0.19681916],
       [-0.00815234]], dtype=float32)
Updated lr to 0.000001
Epoch 201/201
 301/3897 [=>............................] - ETA: 255s - loss: 0.0246 - mse: 0.1381

In [8]:
WEIGHTS_FNAME = '.\\RodAngle\\angle_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\angle_models_%i.h5'

data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingDataAngles\\Result\\settings_full.tsv"
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [], data_format='channels_last' )
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)

batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )



# Load the best model result
epoch = 195
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")


Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
added 3204 new frames for a total of 3204
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk1.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk1.avi
added 2763 new frames for a total of 5967
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk2.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk2.avi
added 2355 new frames for a total of 8322
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
added 1667 new frames for a total of 9989
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
added 1172 new frames for a total of 11161
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
added 2190 new frames for a total of 13351
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
added 1644 new frames for a total of 14995
Batch size 1: 11996 training batches, 2999 validation batches
Loaded model.
Predicted.
array([[ 1.        ],
       [ 0.875     ],
       [ 0.75      ],
       ..., 
       [ 0.41666666],
       [ 0.5       ],
       [ 0.58333331]], dtype=float32)
array([[ 0.6827268 ],
       [ 0.50471801],
       [ 0.46920675],
       ..., 
       [ 0.28260672],
       [ 0.43673974],
       [ 0.43864328]], dtype=float32)

In [11]:
def mse(y_true, y_pred):
    return K.square(y_pred - y_true)

def mse_wrap(y_true, y_pred):
    # This is a rapped MSE function, since -1 is the same as 1 for rod rotation.
    return K.square( K.min( K.abs( K.concatenate([y_pred - y_true, y_pred - y_true + 2, y_pred - y_true -2])), axis=1 ) )

lr = 0.000001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,400):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


Updated lr to 0.000001
Epoch 197/197
11996/11996 [==============================] - 826s - loss: 0.0244 - mse: 0.1534 - val_loss: 0.0274 - val_mse: 0.1167
Wrote model to .\RodAngle\angle_weights_196.hdf
Epoch 198/198
11996/11996 [==============================] - 820s - loss: 0.0230 - mse: 0.1531 - val_loss: 0.0258 - val_mse: 0.1166
Wrote model to .\RodAngle\angle_weights_197.hdf
Epoch 199/199
11996/11996 [==============================] - 819s - loss: 0.0230 - mse: 0.1538 - val_loss: 0.0265 - val_mse: 0.1182
Wrote model to .\RodAngle\angle_weights_198.hdf
Epoch 200/200
11996/11996 [==============================] - 820s - loss: 0.0239 - mse: 0.1530 - val_loss: 0.0260 - val_mse: 0.1158
Wrote model to .\RodAngle\angle_weights_199.hdf
Epoch 201/201
11996/11996 [==============================] - 824s - loss: 0.0241 - mse: 0.1562 - val_loss: 0.0261 - val_mse: 0.1206
Wrote model to .\RodAngle\angle_weights_200.hdf
Epoch 202/202
11996/11996 [==============================] - 833s - loss: 0.0248 - mse: 0.1609 - val_loss: 0.0273 - val_mse: 0.1182
Wrote model to .\RodAngle\angle_weights_201.hdf
Epoch 203/203
11996/11996 [==============================] - 830s - loss: 0.0232 - mse: 0.1523 - val_loss: 0.0266 - val_mse: 0.1179
Wrote model to .\RodAngle\angle_weights_202.hdf
Epoch 204/204
 1243/11996 [==>...........................] - ETA: 695s - loss: 0.0246 - mse: 0.1937
User stopped the training.
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-11-48d327b88b8b> in <module>()
     16     try:
---> 17         model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
     18         model.save_weights(WEIGHTS_FNAME % epoch)

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1889                                                sample_weight=sample_weight,
-> 1890                                                class_weight=class_weight)
   1891 

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1632         self._make_train_function()
-> 1633         outputs = self.train_function(ins)
   1634         if len(outputs) == 1:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2228         updated = session.run(self.outputs + [self.updates_op],
-> 2229                               feed_dict=feed_dict)
   2230         return updated[:len(self.outputs)]

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    777       result = self._run(None, fetches, feed_dict, options_ptr,
--> 778                          run_metadata_ptr)
    779       if run_metadata:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    981       results = self._do_run(handle, final_targets, final_fetches,
--> 982                              feed_dict_string, options, run_metadata)
    983     else:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1031       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032                            target_list, options, run_metadata)
   1033     else:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1038     try:
-> 1039       return fn(*args)
   1040     except errors.OpError as e:

C:\local\Anaconda3-4.1.1-Windows-x86_64\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1020                                  feed_dict, fetch_list, target_list,
-> 1021                                  status, run_metadata)
   1022 

KeyboardInterrupt: 

During handling of the above exception, another exception occurred:

AssertionError                            Traceback (most recent call last)
<ipython-input-11-48d327b88b8b> in <module>()
     21     except KeyboardInterrupt:
     22         print("\r\nUser stopped the training.")
---> 23         assert(False)
     24         break

AssertionError: 

In [ ]:
WEIGHTS_FNAME = '.\\RodAngle\\angle_weights_%i.hdf'
MODELS_FNAME = '.\\RodAngle\\angle_models_%i.h5'

data_path  = ".\\..\\..\\TrainingData\\Processed\\RodTrainingDataAngles\\Result\\settings_full.tsv"
transformer = VideoTransform( zoom_range=0.1, rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [], data_format='channels_last' )
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)

batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )



# Load the best model result
epoch = 202
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")

# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Angle prediction")

def mse(y_true, y_pred):
    return K.square(y_pred - y_true)

def mse_wrap(y_true, y_pred):
    # This is a rapped MSE function, since -1 is the same as 1 for rod rotation.
    return K.square( K.min( K.abs( K.concatenate([y_pred - y_true, y_pred - y_true + 2, y_pred - y_true -2])), axis=1 ) )

lr = 0.000001
print("Updated lr to %f" % lr)
model.compile(optimizer=keras.optimizers.RMSprop(lr=lr),
              loss=[mse_wrap],
              metrics=[mse])

start_epoch = epoch + 1
for epoch in range(start_epoch,400):
    try:
        model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
        model.save_weights(WEIGHTS_FNAME % epoch)
        model.save(MODELS_FNAME % epoch)
        print(("Wrote model to " + WEIGHTS_FNAME )  % epoch)
    except KeyboardInterrupt:
        print("\r\nUser stopped the training.")
        assert(False)
        break


Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk0.avi
added 3204 new frames for a total of 3204
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk1.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk1.avi
added 2763 new frames for a total of 5967
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk2.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk2.avi
added 2355 new frames for a total of 8322
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk3.avi
added 1667 new frames for a total of 9989
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk4.avi
added 1172 new frames for a total of 11161
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk5.avi
added 2190 new frames for a total of 13351
Creating training chunk from .\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
.\..\..\TrainingData\Processed\RodTrainingDataAngles\Result\chunk6.avi
added 1644 new frames for a total of 14995
Batch size 1: 11996 training batches, 2999 validation batches
Loaded model.
Predicted.
array([[-1.        ],
       [-0.75      ],
       [-0.5       ],
       ..., 
       [ 0.33333334],
       [ 0.16666667],
       [ 0.        ]], dtype=float32)
array([[-0.40722042],
       [-0.35155037],
       [-0.25303647],
       ..., 
       [ 0.31336245],
       [ 0.29311302],
       [ 0.00632197]], dtype=float32)
Updated lr to 0.000001
Epoch 204/204
11996/11996 [==============================] - 798s - loss: 0.0233 - mse: 0.1544 - val_loss: 0.0279 - val_mse: 0.1179
Wrote model to .\RodAngle\angle_weights_203.hdf
Epoch 205/205
11996/11996 [==============================] - 792s - loss: 0.0238 - mse: 0.1625 - val_loss: 0.0287 - val_mse: 0.1176
Wrote model to .\RodAngle\angle_weights_204.hdf
Epoch 206/206
11996/11996 [==============================] - 794s - loss: 0.0231 - mse: 0.1538 - val_loss: 0.0321 - val_mse: 0.1528
Wrote model to .\RodAngle\angle_weights_205.hdf
Epoch 207/207
11996/11996 [==============================] - 794s - loss: 0.0230 - mse: 0.1550 - val_loss: 0.0286 - val_mse: 0.1181
Wrote model to .\RodAngle\angle_weights_206.hdf
Epoch 208/208
11996/11996 [==============================] - 794s - loss: 0.0237 - mse: 0.1619 - val_loss: 0.0272 - val_mse: 0.1145
Wrote model to .\RodAngle\angle_weights_207.hdf
Epoch 209/209
11996/11996 [==============================] - 821s - loss: 0.0233 - mse: 0.1594 - val_loss: 0.0272 - val_mse: 0.1163
Wrote model to .\RodAngle\angle_weights_208.hdf
Epoch 210/210
11996/11996 [==============================] - 814s - loss: 0.0239 - mse: 0.1580 - val_loss: 0.0262 - val_mse: 0.1192
Wrote model to .\RodAngle\angle_weights_209.hdf
Epoch 211/211
 4367/11996 [=========>....................] - ETA: 459s - loss: 0.0226 - mse: 0.1371