In this notebook the MRD models over the entire dataset is trained. These models are used in several experiments and so are run prior to actually running each exp. This notebook execution will be highly time consuming. Each model takes approximately 3 hrs and a total duration of around 3 days.
These models are not used in Exp1 and Exp4. It is possible to run Exp1, Exp4 prior to running this notebook.
In [ ]:
# import the modules
import os
import GPy
import csv
import numpy as np
import cPickle as pickle
from matplotlib import pyplot as plt
%matplotlib notebook
In [ ]:
nShr = 4
nPos = 6
names = []
dims = [1,7500,8]
keys = ['Time','Cloud','TopCoord']
for nS in range(nShr):
for nP in range(nPos):
names.append('K1S%dP%dT1' % (nS+1,nP+1))
# create models folder
dName = 'Models'
if not os.path.exists(dName):
os.makedirs(dName)
# load dataset
Data = pickle.load(open('Data/Data.p','rb'))
In [ ]:
# set the overall parameters for mrd
qDim = 15
# dimensions for kinect and mocap
qDims = [10,5]
qDVals = [np.arange(0,qDims[0]), np.arange(qDims[0],qDims[0]+qDims[1])]
# set the number of inducing inputs
nInducing = 100
In [ ]:
# main loop
samplingFreq = 2
nTrials = len(names)
# optimization variables
SNR1 = 100
SNR0 = 1000
trainIters = 1500
initMod0Iters = 500
initMod1Iters = 500
initVardistIters = 1500
In [ ]:
# cross validation over the dataset
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'
mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'
keys = [kinectKey,mocapKey]
# model name
expName = '%s%s' % (kinectExt,mocapExt)
# YNames variable
dims = [kinectDim, mocapDim]
YNames = [kinectKey, mocapKey]
nShr = 4
nPos = 6
for sInd in range(nShr):
for pInd in range(nPos):
valData = {}
testData = {}
trainData = {}
testInd = sInd*nPos+pInd
valInd = sInd*nPos+(pInd+1)%nPos
trainInd = [sInd*nPos+ind for ind in range(nPos)]
del trainInd[pInd]
print 'Cycle:%d' % (pInd+1)
print valInd, testInd, trainInd
for key,dim in zip(keys,dims):
trD = np.empty((0,dim))
for ind in trainInd:
trD = np.concatenate((trD,Data[names[ind]][key][::samplingFreq,:]),axis=0)
trainData[key] = trD
valData[key] = Data[names[valInd]][key]
testData[key] = Data[names[testInd]][key]
# choosing the training dataset
nSamples = trainData[kinectKey].shape[0]
trainList = [trainData[kinectKey], trainData[mocapKey]]
# initializing the latent space
scales = []
inputX = np.zeros((nSamples,qDim))
for qD,qDV,Y in zip(qDims, qDVals, trainList):
x,frcs = GPy.util.initialization.initialize_latent('PCA',qD, Y)
scales.extend(frcs)
inputX[:,qDV] = x
scales = np.asarray(scales)
print scales
# setting up the kernel
mrdKernels = []
for Y in trainList:
mrdKernels.append(GPy.kern.RBF(qDim, variance=1., lengthscale=1./scales, ARD = True))
# initializing MRD model
mrdModel = GPy.models.MRD(trainList, input_dim=qDim, num_inducing=nInducing, kernel=mrdKernels,
X=inputX, name='%s%d%d' % (expName,sInd,pInd+1))
print 'Setup Model!'
# Phase 1: Optimizaition by fixing variance parameters
var0 = mrdModel.Y0.Y.var()
var1 = mrdModel.Y1.Y.var()
mrdModel.Y0.rbf.variance.fix(var0)
mrdModel.Y1.rbf.variance.fix(var1)
mrdModel.Y0.Gaussian_noise.variance.fix(var0/SNR0)
mrdModel.Y1.Gaussian_noise.variance.fix(var1/SNR1)
mrdModel.optimize(messages=True, max_iters=initVardistIters)
# Phase 2: Optimize each model individually
# constrain space 0
mrdModel.Y1.constrain_fixed()
mrdModel.optimize(messages=True, max_iters=initMod0Iters)
# constrain space 1
mrdModel.Y0.constrain_fixed()
mrdModel.Y1.unconstrain_fixed()
mrdModel.Y1.rbf.variance.fix(var1)
mrdModel.Y1.Gaussian_noise.variance.fix(var1/SNR1)
mrdModel.optimize(messages=True, max_iters=initMod1Iters)
# Phase 3: Optimize the model without any constraints
# training without constraints
mrdModel.Y0.unconstrain_fixed()
mrdModel.Y1.unconstrain_fixed()
mrdModel.optimize(messages=True, max_iters=trainIters)
print 'Training Done!'
# save the model
mrdModel = pickle.dump(mrdModel, open('Models/Model%d%d.p' % (sInd+1,pInd+1),'wb'))
print 'Saving Done!'
In [ ]: