Experiment 5: TRo Journal


Compare the predictive performance of using MRD with various feature representations for the feature space and the observation space.

In this Ipython notebook, the predictive performance of 3 different feature representations of each observation space are evaluated. The metrics for evaluation are RMS error, normalized RMS error and pearson correlation.


In [ ]:
# import the modules
import sys
import GPy
import csv
import numpy as np
import cPickle as pickle
import scipy.stats as stats
import sklearn.metrics as metrics
from matplotlib import pyplot as plt

%matplotlib notebook

In [ ]:
# load the dataset
Data = pickle.load(open('../Data/FeatureData.p','rb'))

names = ['K1S1P1T1','K1S1P2T1','K1S1P3T1','K1S1P4T1','K1S1P5T1','K1S1P6T1']
nTrials = len(names)

# create directory for results
dName = '../Results/Exp5'
if not os.path.exists(dName):
    os.makedirs(dName)

Inference Function



In [ ]:
# function to compute reconstruction error
def reconstructionError(model, valData, testData, mKey, kKey, plotFlag=False, optimizeFlag=False):    
    nSamplesVal = valData[mKey].shape[0]
    nSamplesTest = testData[mKey].shape[0]
    
    nDimIn = valData[kKey].shape[1]
    nDimOut = valData[mKey].shape[1]
    
    qDim = model.X.mean.shape[1]
    
    # computing reconstruction error for test1, test2 with variances
    predictVal = np.zeros((nSamplesVal,nDimOut))
    predictTest = np.zeros((nSamplesTest,nDimOut))

    print 'Finished Init!'
    
    for n in range(nSamplesVal):
        yIn = valData[kKey][n,:]
        yTrueOut = valData[mKey][n,:]
    
        [xPredict, infX] = model.Y0.infer_newX(yIn[None,:], optimize=False)
        yOut = model.predict(xPredict.mean, Yindex=1)    
        predictVal[n,:] = yOut[0]
        sys.stdout.write('.')        
    print '\nFinished Train!'
    
    for n in range(nSamplesTest):
        yIn = testData[kKey][n,:]
        yTrueOut = testData[mKey][n,:]
    
        [xPredict, infX] = model.Y0.infer_newX(yIn[None,:], optimize=optimizeFlag)
        yOut = model.predict(xPredict.mean, Yindex=1)    
        predictTest[n,:] = yOut[0]
        sys.stdout.write('.')
    print '\nFinished Test!'
        
    # plot the results
    if plotFlag:
        # plot of the predicted values
        fig = plt.figure()
        for i in range(8):
            fig.add_subplot(421+i)
            plt.plot(range(nSamplesVal),valData[mKey][:,i])
            plt.plot(range(nSamplesVal),predictVal[:,i])
            plt.legend(['True','Predict'])
        plt.suptitle('Train Prediction')

        fig = plt.figure()
        for i in range(8):
            fig.add_subplot(421+i)
            plt.plot(range(nSamplesTest),testData[mKey][:,i])
            plt.plot(range(nSamplesTest),predictTest[:,i])
            plt.legend(['True','Predict'])
        plt.suptitle('Test Prediction')
    
    valErrors1 = np.sqrt(metrics.mean_squared_error(valData[mKey],predictVal,multioutput='raw_values'))
    testErrors1 = np.sqrt(metrics.mean_squared_error(testData[mKey],predictTest,multioutput='raw_values'))
    
    valErrors2 = np.divide(np.sqrt(metrics.mean_squared_error(valData[mKey],predictVal,multioutput='raw_values')), valData[mKey].max(axis=0) - valData[mKey].min(axis=0))
    testErrors2 = np.divide(np.sqrt(metrics.mean_squared_error(testData[mKey],predictTest,multioutput='raw_values')), testData[mKey].max(axis=0) - testData[mKey].min(axis=0))
    
    valCorr = np.zeros((1,nDimOut))
    testCorr = np.zeros((1,nDimOut))
    for d in range(nDimOut):
        valCorr[0,d],_ = stats.pearsonr(valData[mKey][:,d],predictVal[:,d])
        testCorr[0,d],_ = stats.pearsonr(testData[mKey][:,d],predictTest[:,d])
    
    print 'NRMSE: Train, Test'
    print valErrors.mean(), testErrors.mean()
    
    print 'Correlation: Train, Test'
    print valCorr.mean(), testCorr.mean()
    
    results = {}
    valResults = {}
    testResults = {}

    valResults['corr'] = valCorr
    valResults['pred'] = predictVal
    valResults['rmse'] = valErrors1
    valResults['nrmse'] = valErrors2

    testResults['corr'] = testCorr
    testResults['pred'] = predictTest
    testResults['rmse'] = testErrors1
    testResults['nrmse'] = testErrors2
    
    results['train'] = valResults
    results['test'] = testResults
    return results

Kinect Experimental Results



In [ ]:
# main loop
mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'

kinectExts = ['CO','D','C','E']
kinectDims = [2500,2500,7500,640]
kinectKeys = ['Color','Depth','Cloud','ESF']

for kinectKey, kinectDim, kinectExt in zip(kinectKeys, kinectDims, kinectExts):
    dims = [kinectDim,8]
    keys = [kinectKey,'TopCoord']
    expName = '%sT' % (kinectExt)

    print 'Modality: %s' % (kinectKey)
    
    for K in range(nTrials):
        valData = {}
        testData = {}
        
        testInd = K
        valInd = (K+1) % nTrials
    
        print 'Cycle:%d' % (K+1)
        print valInd, testInd
    
        for key,dim in zip(keys,dims):
            valData[key] = Data[names[valInd]][key]
            testData[key] = Data[names[testInd]][key]
            
        mrdModel = pickle.load(open('../Models/Exp5/%s%d.p' % (expName,K+1), 'rb'))
        results = reconstructionError(mrdModel,valData,testData,'TopCoord',kinectKey,optimizeFlag=True,plotFlag=True)
        pickle.dump(results, open('../Results/Exp5/%sRes%d.p' % (expName,K+1), 'wb'))

Mocap Experimental Results



In [ ]:
# main loop
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'

mocapDims = [36,60,21]
mocapExts = ['M','CM','CP']
mocapKeys = ['Marker','CircleMarker','CircleParam']

for mocapKey, mocapDim, mocapExt in zip(mocapKeys, mocapDims, mocapExts):
    dims = [kinectDim,mocapDim]
    keys = [kinectKey,mocapKey]
    expName = '%s%s' % (kinectExt,mocapExt)

    print 'Modality: %s' % (mocapKey)
    
    for K in range(nTrials):
        valData = {}
        testData = {}
        
        testInd = K
        valInd = (K+1) % nTrials
    
        print 'Cycle:%d' % (K+1)
        print valInd, testInd
    
        for key,dim in zip(keys,dims):
            valData[key] = Data[names[valInd]][key]
            testData[key] = Data[names[testInd]][key]
            
        mrdModel = pickle.load(open('../Models/Exp5/%s%d.p' % (expName,K+1), 'rb'))
        results = reconstructionError(mrdModel,valData,testData,mocapKey,kinectKey,optimizeFlag=True,plotFlag=True)
        pickle.dump(results, open('../Results/Exp5/%sRes%d.p' % (expName,K+1), 'wb'))

Compute RMSE and Formatting



In [ ]:
# main loop with update to results
mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'

kinectExts = ['CO','D','C','E']
kinectDims = [2500,2500,7500,640]
kinectKeys = ['Color','Depth','Cloud','ESF']

for kinectKey, kinectDim, kinectExt in zip(kinectKeys, kinectDims, kinectExts):
    dims = [kinectDim,8]
    keys = [kinectKey,'TopCoord']
    expName = '%sT' % (kinectExt)

    print 'Modality: %s' % (kinectKey)
    
    for K in range(nTrials):
        valData = {}
        testData = {}
        
        testInd = K
        valInd = (K+1) % nTrials
    
        print 'Cycle:%d' % (K+1)
        print valInd, testInd
    
        for key,dim in zip(keys,dims):
            valData[key] = Data[names[valInd]][key]
            testData[key] = Data[names[testInd]][key]
            
        results = pickle.load(open('Results/%sRes%d.p' % (expName,K+1), 'rb'))
        
        newResults = {'train':{}, 'test':{}}
        newResults['train']['corr'] = results['val']['corr']
        newResults['train']['pred'] = results['val']['pred']
        newResults['train']['nrmse'] = results['val']['err']
        newResults['train']['rmse'] = np.sqrt(metrics.mean_squared_error(valData['TopCoord'],results['val']['pred'],multioutput='raw_values'))
        
        newResults['test']['corr'] = results['test']['corr']
        newResults['test']['pred'] = results['test']['pred']
        newResults['test']['nrmse'] = results['test']['err']
        newResults['test']['rmse'] = np.sqrt(metrics.mean_squared_error(testData['TopCoord'],results['test']['pred'],multioutput='raw_values'))
        pickle.dump(newResults, open('../Results/Exp5/%sRes%d.p' % (expName,K+1), 'wb'))

In [ ]:
# main loop with update to results
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'

mocapDims = [36,60,21]
mocapExts = ['M','CM','CP']
mocapKeys = ['Marker','CircleMarker','CircleParam']

for mocapKey, mocapDim, mocapExt in zip(mocapKeys, mocapDims, mocapExts):
    dims = [kinectDim,mocapDim]
    keys = [kinectKey,mocapKey]
    expName = '%s%s' % (kinectExt,mocapExt)

    print 'Modality: %s' % (mocapKey)
    
    for K in range(nTrials):
        valData = {}
        testData = {}
        
        testInd = K
        valInd = (K+1) % nTrials
    
        print 'Cycle:%d' % (K+1)
        print valInd, testInd
    
        for key,dim in zip(keys,dims):
            valData[key] = Data[names[valInd]][key]
            testData[key] = Data[names[testInd]][key]
            
        results = pickle.load(open('Results/%sRes%d.p' % (expName,K+1), 'rb'))
        
        newResults = {'train':{}, 'test':{}}
        newResults['train']['corr'] = results['val']['corr']
        newResults['train']['pred'] = results['val']['pred']
        newResults['train']['nrmse'] = results['val']['err']
        newResults['train']['rmse'] = np.sqrt(metrics.mean_squared_error(valData[mocapKey],results['val']['pred'],multioutput='raw_values'))
        
        newResults['test']['corr'] = results['test']['corr']
        newResults['test']['pred'] = results['test']['pred']
        newResults['test']['nrmse'] = results['test']['err']
        newResults['test']['rmse'] = np.sqrt(metrics.mean_squared_error(testData[mocapKey],results['test']['pred'],multioutput='raw_values'))
        pickle.dump(newResults, open('../Results/Exp5/%sRes%d.p' % (expName,K+1), 'wb'))