Experiment 1: TRo Journal


Compare the performance of BGPLVM with a linear latent variable model such as PCA for cloth state estimation. Experimental results show that BGPLVM has significantly better performance and generalization capability with respect to PCA.

The metrics for evaluation are RMS error, normalized RMS error and pearson correlation. The significance was evaluated using Wilcoxon rank sum test.


In [ ]:
# import the modules
import os
import sys
import GPy
import csv
import numpy as np
import cPickle as pickle
import matplotlib.cm as cm
import scipy.stats as stats
from GPy.plotting import Tango
import sklearn.metrics as metrics
from matplotlib import pyplot as plt

%matplotlib notebook

Data Loading



In [ ]:
# load all the files and create Data
trackPath = '../Data/Tracks/'
mocapPath = '../Data/MocapData/'
kinectPath = '../Data/KinectData/'

names = ['K1S1P1T1','K1S1P2T1','K1S1P3T1','K1S1P4T1','K1S1P5T1','K1S1P6T1',
         'K1S2P2T1','K1S2P3T1','K1S2P4T1','K1S3P2T1','K1S3P3T1','K1S3P4T1']

nTypes = 2

dataFormats = ['','Cloud']
dataKeys = ['Time','Cloud']

paths = [trackPath, kinectPath]

# read all the file names and save to dicts
Data = {}
# loop over all names
for fileName in names:
    data = {}
    for ind in range(nTypes):
        dataName = paths[ind] + fileName + dataFormats[ind] 
        reader = csv.reader(open(dataName,"rb"), delimiter=',')

        d = np.array(list(reader))
        data[dataKeys[ind]] = d.astype('float')        
    Data[fileName] = data

In [ ]:
# create train, val and test data
trainNames = ['K1S1P2T1','K1S1P3T1','K1S1P4T1','K1S1P5T1','K1S1P6T1']
testNames = ['K1S1P1T1','K1S2P2T1','K1S2P3T1','K1S2P4T1','K1S3P2T1','K1S3P3T1','K1S3P4T1']

samplingFreq = 4

dim = 7500
key = 'Cloud'

testSize = 0
trainSize = 0
testData = []
testSizes = []
trainTraj = []
trainSizes = []
trainData = np.empty((0,dim))    

for fileName in trainNames:
    trainTraj.append(Data[fileName][key][::samplingFreq,:])
    trainSizes.append(trainTraj[-1].shape[0])
    trainData = np.concatenate((trainData,Data[fileName][key][::samplingFreq,:]),axis=0)
trainSize += trainData.shape[0]
    
for fileName in testNames:
    testData.append(Data[fileName][key][::samplingFreq,:])
    testSizes.append(testData[-1].shape[0])
    testSize += testSizes[-1]
print trainSizes, testSizes

Model Training



In [ ]:
# Training Cloud BGPLVM Model

# model name
expName = 'bgplvmModel'

# set the overall parameters for bgplvm
qDim = 15
nSamples = trainData.shape[0]

# set the number of inducing inputs
nInducing = 50

In [ ]:
from sklearn.decomposition import PCA

pca = PCA(n_components=qDim)
pca.fit(trainData)

scalesPCA = pca.explained_variance_ratio_
scalesPCA = scalesPCA/scalesPCA.max()

trainX = pca.transform(trainData)
trainOut = pca.inverse_transform(trainX)

In [ ]:
# setting up the kernel
kernel = GPy.kern.RBF(qDim, variance=1., lengthscale=1./scalesPCA, ARD = True)

# exepriment with different X initializations
bgplvmModel = GPy.models.BayesianGPLVM(trainData,input_dim=qDim,num_inducing=nInducing,kernel=kernel,X=trainX)

# Phase 1: Optimizaition by fixing variance parameters
SNR = 1000
var = bgplvmModel.Y.var()

bgplvmModel.rbf.variance.fix(var)
bgplvmModel.Gaussian_noise.variance.fix(var/SNR)

initVardistIters = 2000
bgplvmModel.optimize(messages=True, max_iters=initVardistIters)

# Phase 2: Optimize the model without any constraints

# training without constraints
trainIters = 1000
bgplvmModel.unconstrain_fixed()
bgplvmModel.optimize(messages=True, max_iters=trainIters)

# Save the model to file
dName = '../Models/Exp1'
if not os.path.exists(dName):
    os.makedirs(dName)
pickle.dump(bgplvmModel,open('../Models/Exp1/model.p','wb'))

Analysis Functions



In [ ]:
# function to compute pearson correlation and nrmse
def computeMetrics(predData, trueData):
    nDims = trueData.shape[1]
    out1 = np.nonzero(trueData.std(axis=0))
    out2 = np.nonzero(predData.std(axis=0))
    
    # remove the dimensions without information
    validDims = np.intersect1d(out1[0],out2[0])
    predCData = predData[:, validDims]
    trueCData = trueData[:, validDims]
    
    # compute rms err
    err1 = np.divide(np.sqrt(metrics.mean_squared_error(predCData,trueCData,multioutput='raw_values')), trueCData.max(axis=0) - trueCData.min(axis=0))
    err2 = np.sqrt(metrics.mean_squared_error(predCData,trueCData,multioutput='raw_values'))    
    
    # compute pearson correlation
    corr = np.zeros((2,validDims.shape[0]))
    for d in range(validDims.shape[0]):
        corr[0,d],corr[1,d] = stats.pearsonr(predCData[:,d],trueCData[:,d])
    
    return err1.mean(), err2.mean(), corr.mean(axis=1)

In [ ]:
# this lookup table for generating p-values was taken from the following link:
# https://onlinecourses.science.psu.edu/stat414/sites/onlinecourses.science.psu.edu.stat414/files/lesson48/ExactW_Table.pdf
W = {}
W[5] = {'max':15, 'min':0, 'p':0.05}
W[6] = {'max':21, 'min':0, 'p':0.025}
W[7] = {'max':28, 'min':0, 'p':0.01}

# function to compute wilcoxon metric for one-sided x > y, greater than test
def wilcoxon(x,y):
    # convert to numpy arrays
    x,y = map(np.asarray, (x,y))
    
    # compute differences
    d = x - y
    
    # remove zero differences
    d = np.compress(np.not_equal(d, 0), d, axis=-1)
    
    # compute rank of differences
    r = stats.rankdata(abs(d))
    
    # compute sum of ranked differences
    T = np.sum((d > 0)*r, axis=0)
    
    # significant or not significant
    n = d.shape[0]
    
    # assign significance
    if (T >= W[n]['max'] or T <= W[n]['min']):
        result = W[n]['p']
    else:
        result = None
        
    # return statistic and result
    return T, result

Model Analysis



In [ ]:
# performing latent point inference
bgplvmModel = pickle.load(open('../Models/Exp1/model.p','rb'))

trainX = np.zeros((trainData.shape[0],qDim))
trainOut = np.zeros(trainData.shape)
for i in range(trainData.shape[0]):
    [xPredict, infX] = bgplvmModel.infer_newX(np.atleast_2d(trainData[i,:]), optimize=False)
    trainX[i,:] = xPredict.mean
    trainOut[i,:],_ = bgplvmModel.predict(np.atleast_2d(trainX[i,:]))
    sys.stdout.write('.')
sys.stdout.write('\n')

bgplvmX = [trainX]
bgplvmOut = [trainOut]

# loop over test trials
bgplvmMainX = np.zeros((0,qDim))
bgplvmMainOut = np.zeros((0,dim))
for ind in range(len(testData)):
    testX = np.zeros((testData[ind].shape[0],qDim))
    testOut = np.zeros(testData[ind].shape)
    for i in range(testData[ind].shape[0]):
        [xPredict, infX] = bgplvmModel.infer_newX(np.atleast_2d(testData[ind][i,:]), optimize=True)
        testX[i,:] = xPredict.mean
        testOut[i,:],_ = bgplvmModel.predict(np.atleast_2d(xPredict.mean))
        sys.stdout.write('.')
    
    bgplvmX.append(testX.copy())
    bgplvmOut.append(testOut.copy())
    
    bgplvmMainX = np.concatenate((bgplvmMainX,testX),axis=0)
    bgplvmMainOut = np.concatenate((bgplvmMainOut,testOut),axis=0)
    sys.stdout.write('\n')
    
scalesBGPLVM = bgplvmModel.kern.input_sensitivity(summarize=False)
scalesBGPLVM =  scalesBGPLVM/scalesBGPLVM.max()

In [ ]:
from sklearn.decomposition import PCA

pca = PCA(n_components=qDim)
pca.fit(trainData)

scalesPCA = pca.explained_variance_ratio_
scalesPCA = scalesPCA/scalesPCA.max()

trainX = pca.transform(trainData)
trainOut = pca.inverse_transform(trainX)

pcaX = [trainX]
pcaOut = [trainOut]

pcaMainX = np.zeros((0,qDim))
pcaMainOut = np.zeros((0,dim))

for ind in range(len(testData)):
    testX = pca.transform(testData[ind])
    testOut = pca.inverse_transform(testX)
    
    pcaX.append(testX.copy())
    pcaOut.append(testOut.copy())

    pcaMainX = np.concatenate((pcaMainX,testX),axis=0)
    pcaMainOut = np.concatenate((pcaMainOut,testOut),axis=0)

In [ ]:
trueOut = [trainData] + testData

predictData = {}
predictData['PCA'] = pcaOut
predictData['True'] = trueOut
predictData['BGPLVM'] = bgplvmOut

latentData = {}
latentData['PCA'] = pcaMainX
latentData['PCATrain'] = trainX
latentData['BGPLVM'] = bgplvmMainX
latentData['PCAScales'] = scalesPCA
latentData['BGPLVMScales'] = scalesBGPLVM

dName = '../Results/Exp1'
if not os.path.exists(dName):
    os.makedirs(dName)
pickle.dump(latentData,open('../Results/Exp1/latentData.p','wb'))
pickle.dump(predictData,open('../Results/Exp1/predictData.p','wb'))

In [ ]:
# load the data
latentData = pickle.load(open('../Results/Exp1/latentData.p','rb'))
predictData = pickle.load(open('../Results/Exp1/predictData.p','rb'))

# initialize variables to compute metrics
nTrials = len(predictData['PCA'])
dims = predictData['PCA'][0].shape[1]
results = {'PCA':{'Err':[],'Corr':[],'pval':[],'NormErr':[]}, 
           'BGPLVM':{'Err':[],'Corr':[],'pval':[],'NormErr':[]}}

# loop over the trials
for nTrial in range(nTrials):
    pcaData = predictData['PCA'][nTrial]
    trueData = predictData['True'][nTrial]
    bgplvmData = predictData['BGPLVM'][nTrial]

    pcaNormErr, pcaErr, pcaCorr = computeMetrics(pcaData,trueData)
    bgplvmNormErr, bgplvmErr, bgplvmCorr = computeMetrics(bgplvmData,trueData)
    
    results['PCA']['Err'].append(pcaErr)
    results['PCA']['Corr'].append(pcaCorr[0])
    results['PCA']['pval'].append(pcaCorr[1])
    results['PCA']['NormErr'].append(pcaNormErr)
    
    results['BGPLVM']['Err'].append(bgplvmErr)
    results['BGPLVM']['Corr'].append(bgplvmCorr[0])
    results['BGPLVM']['pval'].append(bgplvmCorr[1])
    results['BGPLVM']['NormErr'].append(bgplvmNormErr)

    print 'Trial: %d' % nTrial
    print 'Errs: PCA: %f, BGPLVM: %f' % (pcaErr,bgplvmErr)
    print 'Corrs: PCA: %f, BGPLVM: %f' % (pcaCorr[0],bgplvmCorr[0])
    print 'Norm Errs: PCA: %f, BGPLVM: %f' % (pcaNormErr,bgplvmNormErr)
    print 'Corr p-value: PCA: %f, BGPLVM: %f' % (pcaCorr[1],bgplvmCorr[1])

In [ ]:
# p-values for the test data
results['Stats'] = {'Err':{},'Corr':{},'NormErr':{}}

pcaTestErr = np.asarray(results['PCA']['Err'][1:])
pcaTestCorr = np.asarray(results['PCA']['Corr'][1:])
pcaTestNormErr = np.asarray(results['PCA']['NormErr'][1:])

bgplvmTestErr = np.asarray(results['BGPLVM']['Err'][1:])
bgplvmTestCorr = np.asarray(results['BGPLVM']['Corr'][1:])
bgplvmTestNormErr = np.asarray(results['BGPLVM']['NormErr'][1:])

tTestErr, pTTestErr = wilcoxon(bgplvmTestErr, pcaTestErr)
tTestCorr, pTTestCorr = wilcoxon(bgplvmTestCorr, pcaTestCorr)
tTestNormErr, pTTestNormErr = wilcoxon(bgplvmTestNormErr, pcaTestNormErr)

results['Stats']['Err']['tStat'] = tTestErr
results['Stats']['Err']['pVal'] = pTTestErr
results['Stats']['Corr']['tStat'] = tTestCorr
results['Stats']['Corr']['pVal'] = pTTestCorr
results['Stats']['NormErr']['tStat'] = tTestNormErr
results['Stats']['NormErr']['pVal'] = pTTestNormErr
print results['Stats']['Err']['pVal'], results['Stats']['NormErr']['pVal'], results['Stats']['Corr']['pVal']

In [ ]:
# save results pickle file
pickle.dump(results,open('Result/metricData.p','wb'))

In [ ]:
# initialize variables to compute metrics
nTrials = len(trainTraj)
results = {'PCA':{'Err':[],'Corr':[],'pval':[],'NormErr':[]}, 
           'BGPLVM':{'Err':[],'Corr':[],'pval':[],'NormErr':[]}}

pcaPred = predictData['PCA'][0].copy()
bgplvmPred = predictData['BGPLVM'][0].copy()

# loop over the trials
for nTrial,size in enumerate(trainSizes):
    pcaData = pcaPred[:size,:]
    trueData = trainTraj[nTrial]
    bgplvmData = bgplvmPred[:size,:]

    pcaPred = np.delete(pcaPred,(range(size)),axis=0)
    bgplvmPred = np.delete(bgplvmPred,(range(size)),axis=0)

    pcaNormErr, pcaErr, pcaCorr = computeMetrics(pcaData,trueData)
    bgplvmNormErr, bgplvmErr, bgplvmCorr = computeMetrics(bgplvmData,trueData)
    
    results['PCA']['Err'].append(pcaErr)
    results['PCA']['Corr'].append(pcaCorr[0])
    results['PCA']['NormErr'].append(pcaNormErr)
    
    results['BGPLVM']['Err'].append(bgplvmErr)
    results['BGPLVM']['Corr'].append(bgplvmCorr[0])
    results['BGPLVM']['NormErr'].append(bgplvmNormErr)

    print 'Trial: %d' % nTrial
    print 'Errs: PCA: %f, BGPLVM: %f' % (pcaErr,bgplvmErr)
    print 'Corrs: PCA: %f, BGPLVM: %f' % (pcaCorr[0],bgplvmCorr[0])
    print 'Norm Errs: PCA: %f, BGPLVM: %f' % (pcaNormErr,bgplvmNormErr)

In [ ]:
# p-values for the test data
results['Stats'] = {'Err':{},'Corr':{},'NormErr':{}}

pcaTrainErr = np.asarray(results['PCA']['Err'])
pcaTrainCorr = np.asarray(results['PCA']['Corr'])
pcaTrainNormErr = np.asarray(results['PCA']['NormErr'])

bgplvmTrainErr = np.asarray(results['BGPLVM']['Err'])
bgplvmTrainCorr = np.asarray(results['BGPLVM']['Corr'])
bgplvmTrainNormErr = np.asarray(results['BGPLVM']['NormErr'])

tTrainErr, pTTrainErr = wilcoxon(bgplvmTrainErr, pcaTrainErr)
tTrainCorr, pTTrainCorr = wilcoxon(bgplvmTrainCorr, pcaTrainCorr)
tTrainNormErr, pTTrainNormErr = wilcoxon(bgplvmTrainNormErr, pcaTrainNormErr)

results['Stats']['Err']['tStat'] = tTrainErr
results['Stats']['Err']['pVal'] = pTTrainErr
results['Stats']['Corr']['tStat'] = tTrainCorr
results['Stats']['Corr']['pVal'] = pTTrainCorr
results['Stats']['NormErr']['tStat'] = tTrainNormErr
results['Stats']['NormErr']['pVal'] = pTTrainNormErr
print results['Stats']['Err']['pVal'], results['Stats']['NormErr']['pVal'], results['Stats']['Corr']['pVal']

In [ ]:
# save results pickle file
pickle.dump(results,open('Result/metricData2.p','wb'))

Plotting Functions



In [ ]:
red = Tango.colorsHex['mediumRed']
blue = Tango.colorsHex['mediumBlue']

In [ ]:
def plotScales(scales, options, yThresh=0.05):
    fSize = 15
    fig = plt.figure()
    ax = fig.add_subplot(111)
    
    x = np.arange(1,scales.shape[0]+1)
    c = 'b'
    ax.bar(x, height=scales, width=0.8, align='center', color=blue, edgecolor='k', linewidth=1.3)        
    #ax.plot([0.4, scales.shape[0]+0.6], [yThresh, yThresh], '--', linewidth=3, color=red)
    
    # setting the bar plot parameters
    ax.set_xticklabels('')
    ax.set_xlim(.4, scales.shape[0]+.6)
    ax.tick_params(axis='both', labelsize=fSize)
    ax.set_xticks(xrange(1,scales.shape[0]+1))
    ax.set_title(options['title'], fontsize=fSize)
    ax.set_ylabel(options['ylabel'], fontsize=fSize)
    ax.set_xlabel('Latent Dimensions', fontsize=fSize)
    plt.tight_layout()
    return ax

In [ ]:
def pcaLatent(pcaInput, plotIndices = [0,1], maxPoints = [1000,500]):
    # plotting variable initialization
    s = 100
    fSize = 15
    resolution = 50

    testMarker = 'o'
    trainMarker = 'o'

    fig = plt.figure()
    ax = fig.add_subplot(111)

    # get latent space data
    testData = pcaInput[1]    
    trainData = pcaInput[0]
    
    # subsample latent points for easier visualization
    if trainData.shape[0] > maxPoints[0]:
        sample = np.random.choice(trainData.shape[0], size=maxPoints[0], replace=False)
        trainData = trainData[sample]
    
    if testData.shape[0] > maxPoints[1]:
        sample = np.random.choice(testData.shape[0], size=maxPoints[1], replace=False)
        testData = testData[sample]
    
    # labels variable for plotting
    testLabels = [red]*testData.shape[0]
    trainLabels = [blue]*trainData.shape[0]
        
    # variables for plotting
    qDim = trainData.shape[1]
    input1, input2 = plotIndices
    nSamples = trainData.shape[0]
        
    # compute plot limits
    xmin, ymin = trainData[:, [input1, input2]].min(0)
    xmax, ymax = trainData[:, [input1, input2]].max(0)
    x_r, y_r = xmax-xmin, ymax-ymin
    xmin -= .1*x_r
    xmax += .1*x_r
    ymin -= .1*y_r
    ymax += .1*y_r

    trainHandle = ax.scatter(trainData[:, input1], trainData[:, input2], marker=trainMarker, s=s, c=trainLabels, 
                            linewidth=.2, edgecolor='k', alpha=1.)
    testHandle = ax.scatter(testData[:, input1], testData[:, input2], marker=testMarker, s=s, c=testLabels, 
                            linewidth=.2, edgecolor='k', alpha=1.)
    
    ax.grid(b=False)
    ax.set_aspect('auto')
    ax.legend(['Train','Test'],loc=1)
    ax.tick_params(axis='both', labelsize=fSize)
    ax.set_xlabel('Latent Dimension %i' % (input1+1), fontsize=fSize)
    ax.set_ylabel('Latent Dimension %i' % (input2+1), fontsize=fSize)

    ax.set_xlim((xmin, xmax))
    ax.set_ylim((ymin, ymax))
    
    fig.tight_layout()
    fig.canvas.draw()
    plt.show()

    return ax

In [ ]:
def bgplvmLatent(model, testData, plotIndices = [0,1], maxPoints = [1000,500]):
    # plotting variable initialization
    s = 100
    fSize = 15
    resolution = 50

    testMarker = 'o'
    trainMarker = 'o'
    
    fig = plt.figure()
    ax = fig.add_subplot(111)
    
    # get latent space plot parameters
    trainData = model.X.mean
    qDim = model.X.mean.shape[1]
    input1, input2 = plotIndices
    nSamples = model.X.mean.shape[0]
    
    # subsample latent points for easier visualization
    if trainData.shape[0] > maxPoints[0]:
        sample = np.random.choice(trainData.shape[0], size=maxPoints[0], replace=False)
        trainData = trainData[sample]
    
    if testData.shape[0] > maxPoints[1]:
        sample = np.random.choice(testData.shape[0], size=maxPoints[1], replace=False)
        testData = testData[sample]
    
    
    # label variables for plotting
    testLabels = [red]*testData.shape[0]
    trainLabels = [blue]*trainData.shape[0]
    
    # compute plot limits
    xmin, ymin = trainData[:, [input1, input2]].min(0)
    xmax, ymax = trainData[:, [input1, input2]].max(0)
    x_r, y_r = xmax-xmin, ymax-ymin
    xmin -= .1*x_r
    xmax += .1*x_r
    ymin -= .1*y_r
    ymax += .1*y_r

    # plot the variance for the model
    def plotFunction(x):
        Xtest_full = np.zeros((x.shape[0], qDim))
        Xtest_full[:, [input1, input2]] = x
        _, var = model.predict(np.atleast_2d(Xtest_full))
        var = var[:, :1]
        return -np.log(var)

    x, y = np.mgrid[xmin:xmax:1j*resolution, ymin:ymax:1j*resolution]
    gridData = np.hstack((x.flatten()[:, None], y.flatten()[:, None]))
    gridVariance = (plotFunction(gridData)).reshape((resolution, resolution))

    varianceHandle = plt.imshow(gridVariance.T, interpolation='bilinear', origin='lower', cmap=cm.gray,
                                extent=(xmin, xmax, ymin, ymax))

    # test and training plotting
    trainHandle = ax.scatter(trainData[:, input1], trainData[:, input2], marker=trainMarker, s=s, c=trainLabels, 
                            linewidth=.2, edgecolor='k', alpha=1.)
    testHandle = ax.scatter(testData[:, input1], testData[:, input2], marker=testMarker, s=s, c=testLabels, 
                            linewidth=.2, edgecolor='k', alpha=1.)
    
    ax.grid(b=False)
    ax.set_aspect('auto')
    ax.legend(['Train','Test'],loc=1)
    ax.tick_params(axis='both', labelsize=fSize)
    ax.set_xlabel('Latent Dimension %i' % (input1+1), fontsize=fSize)
    ax.set_ylabel('Latent Dimension %i' % (input2+1), fontsize=fSize)

    ax.set_xlim((xmin, xmax))
    ax.set_ylim((ymin, ymax))

    fig.canvas.draw()
    fig.tight_layout()
    fig.canvas.draw()
    plt.show()

    return ax

In [ ]:
# function to plot error bars
def plotErrorBars(mE, sE, xLabels, legend, colors, ylabel='NRMSE', 
                  legendLoc=1, title='Comparison', ylimit=[0.,1.], 
                  xlimit=[-0.1,2.1]):
    fSize = 15
    N = mE.shape[1]
    width = 0.8/mE.shape[0]       
    
    ind = np.arange(N)  
    fig, ax = plt.subplots()
    
    for i in range(mE.shape[0]):
        err = ax.bar(0.3+ind+i*width, mE[i,:], yerr=sE[i,:], width=width, color=colors[i], ecolor='k')
    
    ax.set_ylim(ylimit)
    ax.set_xlim(xlimit)
    ax.set_xticks(ind+0.5)
    ax.set_title(title, fontsize= fSize)
    ax.set_ylabel(ylabel, fontsize=fSize)
    ax.legend(legend, loc=legendLoc, fontsize=fSize)
    ax.set_xticklabels(xLabels, fontsize=fSize)
    
    for tick in ax.yaxis.get_major_ticks():
        tick.label.set_fontsize(fSize)

    plt.tight_layout()
    plt.show()
    return ax

Model Plots



In [ ]:
# load the test data
metricData = pickle.load(open('Result/metricData.p','rb'))
latentData = pickle.load(open('../Results/Exp1/latentData.p','rb'))
predictData = pickle.load(open('../Results/Exp1/predictData.p','rb'))

# Performing latent point inference
bgplvmModel = pickle.load(open('../Models/Exp1/model.p','rb'))

In [ ]:
options = {'title':'','ylabel':'ARD Weight'}
plotScales(latentData['BGPLVMScales'], options)
plt.savefig('Result/bgplvmLatentScales.pdf', format='pdf')

bgplvmLatent(bgplvmModel, latentData['BGPLVM'])
plt.savefig('Result/bgplvmLatentSpace1.pdf', format='pdf')

bgplvmLatent(bgplvmModel, latentData['BGPLVM'], plotIndices=[0,2])
plt.savefig('Result/bgplvmLatentSpace2.pdf',format='pdf')

In [ ]:
options = {'title':'','ylabel':'Eigen Value/Variance'}
plotScales(latentData['PCAScales'],options,yThresh=0.05)
plt.savefig('Result/pcaLatentScales.pdf', format='pdf')

pcaPlot = [latentData['PCATrain'],latentData['PCA']]
pcaLatent(pcaPlot)
plt.savefig('Result/pcaLatentSpace1.pdf', format='pdf')

pcaLatent(pcaPlot, plotIndices=[0,2])
plt.savefig('Result/pcaLatentSpace2.pdf', format='pdf')

In [ ]:
colors = ['r','b']
legend = ['PCA','BGPLVM']
xLabels = ['Train','Test']

In [ ]:
pcaME = np.asarray([metricData['PCA']['Err'][0],np.asarray(metricData['PCA']['Err'][1:]).mean()])
bgplvmME = np.asarray([metricData['BGPLVM']['Err'][0],np.asarray(metricData['BGPLVM']['Err'][1:]).mean()])

pcaSE = np.asarray([0.0,np.asarray(metricData['PCA']['Err'][1:]).std()])
bgplvmSE = np.asarray([0.0,np.asarray(metricData['BGPLVM']['Err'][1:]).std()])

mE = np.asarray([pcaME,bgplvmME])
sE = np.asarray([pcaSE,bgplvmSE])
ax = plotErrorBars(mE, sE, xLabels, legend, colors, ylabel='RMSE', title='', ylimit=[0.01,0.03], legendLoc=2)

x = 1.5
y = max(bgplvmME[1]+bgplvmSE[1]/2, pcaME[1]+pcaSE[1]/2)
dx = abs(0.4)

text = '**'
ax.annotate(text, xy=(1.47,1.07*y), fontsize=15, fontweight='bold')
props = {'connectionstyle':'bar', 'arrowstyle':'-', 'shrinkA':15, 'shrinkB':15, 'lw':2}
ax.annotate('', xy=(1.3,y), xytext=(1.7,y), arrowprops=props)

plt.savefig('Result/bgplvmRMSE.pdf', format='pdf')

In [ ]:
pcaME = np.asarray([metricData['PCA']['NormErr'][0],np.asarray(metricData['PCA']['NormErr'][1:]).mean()])
bgplvmME = np.asarray([metricData['BGPLVM']['NormErr'][0],np.asarray(metricData['BGPLVM']['NormErr'][1:]).mean()])

pcaSE = np.asarray([0.0,np.asarray(metricData['PCA']['NormErr'][1:]).std()])
bgplvmSE = np.asarray([0.0,np.asarray(metricData['BGPLVM']['NormErr'][1:]).std()])

mE = np.asarray([pcaME,bgplvmME])
sE = np.asarray([pcaSE,bgplvmSE])
ax = plotErrorBars(mE, sE, xLabels, legend, colors, ylabel='NRMSE', title='', ylimit=[0.05,0.25], legendLoc=2)

x = 1.5
y = max(bgplvmME[1]+bgplvmSE[1]/2, pcaME[1]+pcaSE[1]/2)
dx = abs(0.4)

text = '**'
ax.annotate(text, xy=(1.47,1.07*y), fontsize=15, fontweight='bold')
props = {'connectionstyle':'bar', 'arrowstyle':'-', 'shrinkA':15, 'shrinkB':15, 'lw':2}
ax.annotate('', xy=(1.3,y), xytext=(1.7,y), arrowprops=props)

plt.savefig('Result/bgplvmNRMSE.pdf', format='pdf')

In [ ]:
pcaMC = np.asarray([metricData['PCA']['Corr'][0],np.asarray(metricData['PCA']['Corr'][1:]).mean()])
bgplvmMC = np.asarray([metricData['BGPLVM']['Corr'][0],np.asarray(metricData['BGPLVM']['Corr'][1:]).mean()])

pcaSC = np.asarray([0.0,np.asarray(metricData['PCA']['Corr'][1:]).std()])
bgplvmSC = np.asarray([0.0,np.asarray(metricData['BGPLVM']['Corr'][1:]).std()])

mC = np.asarray([pcaMC,bgplvmMC])
sC = np.asarray([pcaSC,bgplvmSC])
ax = plotErrorBars(mC, sC, xLabels, legend, colors, ylabel='Correlation', title='', ylimit=[0.5,0.85], legendLoc=1)

x = 1.5
y = max(bgplvmMC[1]+bgplvmSC[1]/2, pcaMC[1]+pcaSC[1]/2)
dx = abs(0.4)

text = '**'
ax.annotate(text, xy=(1.47,1.05*y), fontsize=15, fontweight='bold')
props = {'connectionstyle':'bar', 'arrowstyle':'-', 'shrinkA':15, 'shrinkB':15, 'lw':2}
ax.annotate('', xy=(1.3,y), xytext=(1.7,y), arrowprops=props)

plt.savefig('Result/bgplvmCorr.pdf', format='pdf')