Experiment 7: TRo Journal


In this experiment, the generalization of cloth models to unseen T-shirts of the mannequin is verified. The evaluation is performed using RMSE, NRMSE, Pearson correlation as the parameters. In this notebook, the MRD cloth models are trained excluding one T-shirt for test inference.


In [ ]:
# import the modules
import sys
import GPy
import csv
import numpy as np
import cPickle as pickle
import scipy.stats as stats
import sklearn.metrics as metrics
from matplotlib import pyplot as plt

%matplotlib notebook

Data Loading



In [ ]:
# load all the files and create Data
trackPath = '../Data/Tracks/'
mocapPath = '../Data/MocapData/'
kinectPath = '../Data/KinectData/'

nShr = 4
nPos = 6
names = []
for sInd in range(nShr):
    for pInd in range(nPos):
        names.append('K1S%dP%dT1' % (sInd+1,pInd+1))

# create directory for results
dName = '../Models/Exp7'
if not os.path.exists(dName):
    os.makedirs(dName)
        
Data = pickle.load(open('../Data/Data.p','rb'))

In [ ]:
# set the overall parameters for bgplvm
qDim = 15

# dimensions for kinect and mocap
qDims = [10,5]
qDVals = [np.arange(0,qDims[0]), np.arange(qDims[0],qDims[0]+qDims[1])]

# set the number of inducing inputs
nInducing = 100

In [ ]:
# main loop
samplingFreq = 2

# optimization variables
SNR0 = 1000
SNR1 = 100
trainIters = 1500
initMod0Iters = 500
initMod1Iters = 500
initVardistIters = 2000

In [ ]:
# loop over the kinect keys
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'

mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'
    
keys = [kinectKey,mocapKey]
dims = [kinectDim, mocapDim]
YNames = [kinectKey, mocapKey]
expName = '%s%s' % (kinectExt,mocapExt)

ValInd = [[6,7,14,15,22,23],[4,5,12,13,20,21],[2,3,10,11,18,19],[0,1,8,9,16,17]]
TestInd = [[0,1,2,3,4,5],[6,7,8,9,10,11],[12,13,14,15,16,17],[18,19,20,21,22,23]]
TrainInd = [[8,9,16,17,18,19],[0,1,14,15,22,23],[4,5,6,7,20,21],[2,3,10,11,12,13]]

for sInd in range(nShirts):
    valData = {}
    testData = {}
    trainData = {}

    valInd = ValInd[sInd]
    testInd = TestInd[sInd]
    trainInd = TrainInd[sInd]
    
    print 'Cycle:%d' % (sInd+1)
    print valInd, testInd, trainInd
    
    for key,dim in zip(keys,dims):
        trD = np.empty((0,dim))
        for ind in trainInd:
            trD = np.concatenate((trD,Data[names[ind]][key][::samplingFreq,:]),axis=0)
        trainData[key] = trD
        
    # choosing the training dataset
    nSamples = trainData[kinectKey].shape[0]
    trainList = [trainData[kinectKey], trainData[mocapKey]]
    
    # initializing the latent space 
    scales = []
    inputX = np.zeros((nSamples,qDim))

    for qD,qDV,Y in zip(qDims, qDVals, trainList):
        x,frcs = GPy.util.initialization.initialize_latent('PCA',qD, Y)
        scales.extend(frcs)
        inputX[:,qDV] = x
    
    scales = np.asarray(scales)
    print scales
    
    # setting up the kernel
    mrdKernels = []

    for Y in trainList:
        mrdKernels.append(GPy.kern.RBF(qDim, variance=1., lengthscale=1./scales, ARD = True))
        
    # initializing MRD model
    mrdModel = GPy.models.MRD(trainList, input_dim=qDim, num_inducing=nInducing, kernel=mrdKernels, 
                              X=inputX, name='%s%d%d' % (expName,sInd,pInd+1))
    print 'Setup Model!'
    
    # Phase 1: Optimizaition by fixing variance parameters
    var0 = mrdModel.Y0.Y.var()
    var1 = mrdModel.Y1.Y.var()

    mrdModel.Y0.rbf.variance.fix(var0)
    mrdModel.Y1.rbf.variance.fix(var1)

    mrdModel.Y0.Gaussian_noise.variance.fix(var0/SNR0)
    mrdModel.Y1.Gaussian_noise.variance.fix(var1/SNR1)

    mrdModel.optimize(messages=True, max_iters=initVardistIters)
    
    # Phase 2: Optimize each model individually

    # constrain space 0
    mrdModel.Y1.constrain_fixed()
    mrdModel.optimize(messages=True, max_iters=initMod0Iters)

    # constrain space 1
    mrdModel.Y0.constrain_fixed()
    mrdModel.Y1.unconstrain_fixed()
    mrdModel.Y1.rbf.variance.fix(var1)
    mrdModel.Y1.Gaussian_noise.variance.fix(var1/SNR1)
    mrdModel.optimize(messages=True, max_iters=initMod1Iters)
    
    # Phase 3: Optimize the model without any constraints

    # training without constraints
    mrdModel.Y0.unconstrain_fixed()
    mrdModel.Y1.unconstrain_fixed()
    mrdModel.optimize(messages=True, max_iters=trainIters)
    
    print 'Training Done!'
    
    # plot the learned model
    mrdModel.plot_scales(sharex=True,sharey=False,titles=YNames)
    mrdModel.plot_latent(which_indices=[0,1])
    
    # save the model
    mrdModel = pickle.dump(mrdModel, open('../Models/Exp7/%s%d.p' % (expName,sInd+1),'wb'))
    
    print 'Saving Done!'