In [1]:
#!pip install keras
#!pip install numpy
#!pip install imageio
#!pip install matplotlib
#!pip install opencv-python
In [1]:
from __future__ import print_function
from video_file import *
import importlib
try:
importlib.reload(video_file)
except:
pass
import cv2
import sys
import os
import csv
import numpy as np
from random import randint
from random import shuffle
from PIL import Image
import imageio
import itertools as it
import tensorflow as tf
import keras
print("Keras version %s" % keras.__version__)
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
print("Tensorflow version %s" % tf.__version__)
import pprint
pp = pprint.PrettyPrinter(depth=6)
# Create the image transformer
transformer = VideoTransform( zoom_range=0.1, rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range= 0.1, fill_mode='nearest', vertical_flip=False, horizontal_flip=True, horizontal_flip_invert_indices = [], horizontal_flip_reverse_indices = [0,1,2,3,4,5], data_format='channels_last' )
# Paths relative to current python file.
data_path = ".\\..\\..\\TrainingData\\Processed\\\AmateurDefender\\Result\\settings.tsv"
In [2]:
print("Opening training frames from config %s." % (data_path))
position_rel_indexes = [0, 3] # Predict current rod positions and future position in 2 frames
number_of_frames = 3
frame_rel_indexes = []
for i in range(number_of_frames):
frame_rel_indexes += [i - number_of_frames + 1]
pp.pprint(frame_rel_indexes)
#frame_rel_indexes = [-4, -3, -2, -1, 0] # Use 5 frames as input
training = TrainingInput(transformer, data_path, position_rel_indexes, frame_rel_indexes, 0.2)
In [7]:
# Define our training and validation iterators
# https://stanford.edu/~shervine/blog/keras-generator-multiprocessing.html
class threadsafe_iter(object):
"""
Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
# https://stanford.edu/~shervine/blog/keras-generator-multiprocessing.html
def threadsafe_generator(f):
"""
A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
# Define our training and validation iterators
@threadsafe_generator
def TrainGen(model, training):
while True:
#print("TrainGen restarting training input.")
model.reset_states()
training.move_first_training_frame()
(frames, output, reset_memory) = training.get_next_training_frame()
while frames is not None:
yield (frames, output)
(frames, output, reset_memory) = training.get_next_training_frame()
if reset_memory or frames is None:
model.reset_states()
@threadsafe_generator
def ValidateGen(model, training):
while True:
#print("Validation restarting training input.")
model.reset_states()
training.move_first_validation_frame()
(frames, output, reset_memory) = training.get_next_validation_frame()
while frames is not None:
yield (frames, output)
(frames, output, reset_memory) = training.get_next_validation_frame()
if reset_memory or frames is None:
model.reset_states()
# Generators for training the position
@threadsafe_generator
def TrainBatchGen(batch_size, model, training):
gen = TrainGen(model, training)
while True:
# Build the next batch
batch_frames = np.zeros(shape=(batch_size, training.depth, training.height, training.width, training.channels), dtype=np.float32)
batch_outputs = np.zeros(shape=(batch_size, 3), dtype=np.float32)
for i in range(batch_size):
(frames, output) = next(gen)
batch_frames[i,:,:,:,:] = frames
batch_outputs[i,:] = output[0:3] # Train just the 3 current rod positions as outputs
#batch_outputs[i,:] = output[3:6] - output[0:3] # Train the difference in the three rod positions as output
#batch_outputs[i,:] = output
#pp.pprint("Yielding batch")
#pp.pprint(batch_outputs)
yield (batch_frames, batch_outputs)
#pp.pprint("Yielded batch")
@threadsafe_generator
def ValidateBatchGen(batch_size, model, training):
gen = ValidateGen(model, training)
while True:
# Build the next batch
batch_frames = np.zeros(shape=(batch_size, training.depth, training.height, training.width, training.channels), dtype=np.float32)
batch_outputs = np.zeros(shape=(batch_size, 3), dtype=np.float32)
for i in range(batch_size):
(frames, output) = next(gen)
batch_frames[i,:,:,:,:] = frames
batch_outputs[i,:] = output[0:3] # Train just the 3 current rod positions as outputs
#batch_outputs[i,:] = output[3:6] - output[0:3] # Train the difference in the three rod positions as output
#batch_outputs[i,:] = output
#pp.pprint("Yielding batch")
#pp.pprint(batch_outputs)
yield (batch_frames, batch_outputs)
#pp.pprint("Yielded batch")
@threadsafe_generator
def TrainBatchGenDpos(batch_size, model, training):
gen = TrainGen(model, training)
while True:
# Build the next batch
batch_frames = np.zeros(shape=(batch_size, training.depth, training.height, training.width, training.channels), dtype=np.float32)
batch_outputs = np.zeros(shape=(batch_size, 3), dtype=np.float32)
for i in range(batch_size):
(frames, output) = next(gen)
batch_frames[i,:,:,:,:] = frames
batch_outputs[i,0] = output[3] - output[0] # Train the difference in the three rod positions as output
batch_outputs[i,1] = output[4] - output[1]
batch_outputs[i,2] = output[5] - output[2]
#batch_outputs[i,:] = output[3:6] - output[0:3] # Train the difference in the three rod positions as output
#batch_outputs[i,:] = output
#pp.pprint("Yielding batch")
#pp.pprint(batch_outputs)
yield (batch_frames, batch_outputs)
#pp.pprint("Yielded batch")
@threadsafe_generator
def ValidateBatchGenDpos(batch_size, model, training):
gen = ValidateGen(model, training)
while True:
# Build the next batch
batch_frames = np.zeros(shape=(batch_size, training.depth, training.height, training.width, training.channels), dtype=np.float32)
batch_outputs = np.zeros(shape=(batch_size, 3), dtype=np.float32)
for i in range(batch_size):
(frames, output) = next(gen)
batch_frames[i,:,:,:,:] = frames
batch_outputs[i,0] = output[3] - output[0] # Train the difference in the three rod positions as output
batch_outputs[i,1] = output[4] - output[1]
batch_outputs[i,2] = output[5] - output[2]
#batch_outputs[i,:] = output[3:6] - output[0:3] # Train the difference in the three rod positions as output
#batch_outputs[i,:] = output
#pp.pprint("Yielding batch")
#pp.pprint(batch_outputs)
yield (batch_frames, batch_outputs)
#pp.pprint("Yielded batch")
# Helper function to plot our validation result
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import pandas as pd
%matplotlib inline
def plot_validate(generator, model, count, name):
#plot_validate(ValidateBatchGen(batch_size, model), model, 2000, "Position prediction")
outputs_predicted = None
outputs_true = None
while outputs_predicted is None or outputs_predicted.shape[0] < count:
(new_frames, new_outputs_true) = next(generator)
if outputs_true is None:
outputs_true = new_outputs_true
else:
outputs_true = np.concatenate( (outputs_true, new_outputs_true), axis=0 )
new_outputs_predicted = model.predict(new_frames, batch_size=new_frames.shape[0], verbose=0)
if outputs_predicted is None:
outputs_predicted = new_outputs_predicted
else:
outputs_predicted = np.concatenate( (outputs_predicted, new_outputs_predicted), axis=0 )
#(frames, outputs_true) = next(ValidateBatchGen(2000))
#frames = np.squeeze(frames, axis=(1,))
#validate_in, validate_out
#frames = validate_in
#outputs_true =validate_out
print("Predicted.")
pp.pprint(outputs_true)
pp.pprint(outputs_predicted)
plt.figure(figsize=(8,30))
plt.subplot(611)
plt.plot(range(count),outputs_true[0:count,0], range(count),outputs_predicted[0:count,0] )
plt.ylabel("Rod 1: %s" % name)
plt.title("First 200 output recordings")
plt.grid(True)
plt.subplot(612)
plt.plot(range(count),outputs_true[0:count,1], range(count),outputs_predicted[0:count,1] )
plt.ylabel("Rod 2: %s" % name)
plt.title("First output recordings")
plt.grid(True)
plt.subplot(613)
plt.plot(range(count),outputs_true[0:count,2], range(count),outputs_predicted[0:count,2] )
plt.ylabel("Rod 3: %s" % name)
plt.title("First output recordings")
plt.grid(True)
plt.figure(figsize=(8,30))
plt.subplot(611)
true, predicted = zip(*sorted(zip(outputs_true[0:count,0], outputs_predicted[0:count,0])))
plt.plot(range(count),true, range(count),predicted )
plt.ylabel("Rod 1: %s" % name)
plt.title("First 200 output recordings")
plt.grid(True)
plt.subplot(612)
true, predicted = zip(*sorted(zip(outputs_true[0:count,1], outputs_predicted[0:count,1])))
plt.plot(range(count),true, range(count),predicted, marker='.', markersize = 2, linewidth =0.1, markerfacecolor='black')
plt.ylabel("Rod 2: %s" % name)
plt.grid(True)
plt.subplot(613)
true, predicted = zip(*sorted(zip(outputs_true[0:count,2], outputs_predicted[0:count,2])))
plt.plot(range(count),true, range(count),predicted, marker='.', markersize = 2, linewidth =0.1, markerfacecolor='black')
plt.ylabel("Rod 3: %s" % name)
plt.grid(True)
plt.show()
def mse(y_true, y_pred):
return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.
In [4]:
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import pandas as pd
%matplotlib inline
training.move_first_training_frame()
for k in range(10):
(frame, position, reset) = training.get_next_training_frame()
data = np.zeros(shape=(np.shape(frame)[1], np.shape(frame)[2] * np.shape(frame)[0], 3), dtype=np.float32)
for i in range(np.shape(frame)[0]):
tmp = frame[i,:,:,:]
data[:,i*np.shape(frame)[2]:(i+1)*np.shape(frame)[2],:] = tmp
fig, ax = plt.subplots(figsize=(18, 2))
plt.imshow(data)
plt.show()
pp.pprint(position)
training.move_first_training_frame()
print("Shape of training input:")
pp.pprint(np.shape(frame))
print("Shape of training output:")
pp.pprint(np.shape(position))
print("Corresponding Positions:")
pd.DataFrame(position)
pp.pprint(position)
In [5]:
from keras.models import Sequential
from keras.layers import *
from keras.models import Model
image_height = training.height
image_width = training.width
image_depth = training.depth
image_channels = training.channels
output_size = 3
# Model options
batch_size = 10
lstm_output_size = 300
cnn_kernel_count = 60
# Build the model
pp.pprint("Input shape without batches:")
pp.pprint((image_depth, image_height, image_width, image_channels))
# Used to give fixed names to the layers for transferring the model
conv_num = 0
pool_num = 0
dense_num = 0
# Build a functional model design
inputs = Input(shape=(number_of_frames, image_height, image_width, image_channels,),
name="Input")
x = Conv3D(cnn_kernel_count,
kernel_size = (3, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(inputs)
conv_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (3, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
# Split into a horizontal detail and vertical detailed CNN paths
x = MaxPooling3D( pool_size=(1, 2, 2),
name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 54, 100, 128, 3 )
pool_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (3, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (3, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(3, 2, 2),
name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 54, 100, 128, 3 )
pool_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (1, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (1, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 2, 2),
name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 54, 100, 128, 3 )
pool_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (1, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = Conv3D(cnn_kernel_count,
kernel_size = (1, 3, 3),
padding = "same",
activation = "relu",
name = "conv3d_%i"%conv_num)(x)
conv_num+=1
x = MaxPooling3D( pool_size=(1, 1, 2),
name = "max_pooling3d_%i"%pool_num)(x) # (?, 1, 54, 100, 128, 3 )
pool_num+=1
#x = Flatten()(x)
#x = Reshape((number_of_frames,6*6*cnn_kernel_count))(x)
x = Flatten()(x)
x = Dense(128, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
x = Dense(64, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
x = Dense(64, activation='relu',name="dense_%i"%dense_num)(x)
dense_num+=1
x = Dropout(0.5)(x)
predictions = Dense(3, activation='linear',name="dense_%i"%dense_num)(x)
dense_num+=1
model = Model(inputs=inputs, outputs=predictions)
# For a multi-class classification problem
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.0001),
loss='mean_squared_error',
metrics=['accuracy'])
model.summary()
In [7]:
def mse(y_true, y_pred):
return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.
print("Updated learner.")
# Train the model to predict the future position. This is the control signal to the robot AI
WEIGHTS_FNAME = 'pos_cnn_weights_%i.hdf'
MODELS_FNAME = 'pos_cnn_models_%i.h5'
batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.0001),
loss='mean_squared_error',
metrics=[mse])
model.reset_states()
for epoch in range(30):
try:
model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00001),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(300):
try:
model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [8]:
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.000001),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(300):
try:
model.fit_generator(TrainBatchGen(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGen(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [9]:
# Load the best model result
epoch = 8
WEIGHTS_FNAME = 'pos_cnn_weights_%i.hdf'
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")
In [10]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGen(batch_size, model, training), model, 2000, "Position prediction")
The theory is that the model has designed filters that are able to successfully extract the rod positions. This is important information needed to make a good decision on how the rods should be moved. We take this base model which has learned how to track the rod positions as a base for the real AI that predicts the rod movements that are going to occur next.
In [6]:
# Load the best model result
epoch = 8
WEIGHTS_FNAME = 'pos_cnn_weights_%i.hdf'
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")
In [13]:
def mse(y_true, y_pred):
return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.
print("Updated learner.")
# Train the model to predict the future position. This is the control signal to the robot AI
WEIGHTS_FNAME = 'dpos_cnn_weights_%i.hdf'
MODELS_FNAME = 'dpos_cnn_models_%i.h5'
batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("Batch size %i: %i training batches, %i validation batches" % (batch_size, batches_training_per_epoch, batches_validation_per_epoch) )
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.0001),
loss='mean_squared_error',
metrics=[mse])
model.reset_states()
for epoch in range(15):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 3 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00001),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(300):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 3 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [14]:
# Load the best position prediction model as the starting point
epoch = 14
WEIGHTS_FNAME = 'dpos_cnn_weights_%i.hdf'
model.load_weights(WEIGHTS_FNAME % epoch)
print("Loaded model.")
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Difference in position")
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00001),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(300):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 3 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [15]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Difference in position")
In [16]:
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.000001),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(epoch,300):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 6 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [17]:
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.000005),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(epoch,300):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 6 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [ ]:
batch_size = 40
batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("%i training batches, %i validation batches" % (batches_training_per_epoch, batches_validation_per_epoch) )
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00005),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(epoch,300):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 6 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [ ]:
# Load the best model result
epoch = 22
WEIGHTS_FNAME = 'dpos_cnn_weights_%i.hdf'
MODELS_FNAME = 'dpos_cnn_models_%i.h5'
model.load_weights(WEIGHTS_FNAME % epoch, by_name=True)
print("Loaded model.")
batch_size = 40
batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("%i training batches, %i validation batches" % (batches_training_per_epoch, batches_validation_per_epoch) )
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.00005),
loss='mean_squared_error',
metrics=[mse])
for epoch in range(epoch,300):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=10, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
if epoch % 6 == 0:
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Position prediction")
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [ ]:
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.000005),
loss='mean_squared_error',
metrics=[mse])
print("Updated learner.")
# Train the model to predict the future position. This is the control signal to the robot AI
WEIGHTS_FNAME = 'dpos_cnn_weights_%i.hdf'
MODELS_FNAME = 'dpos_cnn_models_%i.h5'
start_epoch = epoch+1
batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("%i training batches, %i validation batches" % (batches_training_per_epoch, batches_validation_per_epoch) )
for epoch in range(start_epoch,1000):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [5]:
# Load the best position prediction model as the starting point
epoch = 65
WEIGHTS_FNAME = 'dpos_cnn_weights_%i.hdf'
model.load_weights(WEIGHTS_FNAME % epoch)
print("Loaded model.")
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Difference in position")
In [ ]:
In [7]:
def mse(y_true, y_pred):
return K.square(y_pred - y_true)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.
model.compile(optimizer=keras.optimizers.RMSprop(lr=0.000005),
loss='mean_squared_error',
metrics=[mse])
print("Updated learner.")
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Difference in position")
# Train the model to predict the future position. This is the control signal to the robot AI
WEIGHTS_FNAME = 'dpos_cnn_weights_%i.hdf'
MODELS_FNAME = 'dpos_cnn_models_%i.h5'
start_epoch = epoch+1
batches_training_per_epoch = int(training.get_training_count() / batch_size)
batches_validation_per_epoch = int(training.get_validation_count() / batch_size)
print("%i training batches, %i validation batches" % (batches_training_per_epoch, batches_validation_per_epoch) )
for epoch in range(start_epoch,1000):
try:
model.fit_generator(TrainBatchGenDpos(batch_size, model, training), batches_training_per_epoch, epochs=epoch+1, verbose=1, callbacks=None, class_weight=None, max_q_size=10, workers=1, validation_data=ValidateBatchGenDpos(batch_size, model, training), validation_steps = batches_validation_per_epoch, pickle_safe=False, initial_epoch=epoch)
model.save_weights(WEIGHTS_FNAME % epoch)
model.save(MODELS_FNAME % epoch)
print(("Wrote model to " + WEIGHTS_FNAME ) % epoch)
except KeyboardInterrupt:
print("\r\nUser stopped the training.")
break
In [8]:
# Plot the real versus predicted values for some of the validation data
plot_validate(ValidateBatchGenDpos(batch_size, model, training), model, 2000, "Difference in position")