In [ ]:
import scipy.io
import numpy as np
from menpo.warp import warp
from menpo.warp.base import map_coordinates_interpolator, matlab_interpolator
from menpo.transform import Affine
from menpo.align.lucaskanade import InverseCompositional
from menpo.align.lucaskanade.similaritymeasure import *
import matplotlib.pyplot as plt
from menpo.convolution import log_gabor
from collections import OrderedDict

In [ ]:
def compute_rms_point_error(test_pts, template_affine, M):
    iteration_pts = M.apply(template_affine.T)
    diff_pts = test_pts - iteration_pts
    return np.sqrt(np.mean(diff_pts ** 2))

In [ ]:
def my_test_affine(tdata, pt_offsets, alg_list, n_iters, n_freq_tests, spatial_sigma, interpolator, verbose):
    # tdata has three fields:
    #     tdata.img1      greyscale target image
    #     tdata.img2      greyscale template image
    #     tdata.tmplt     [x_start y_start x_end y_end] template rectangle corners
    #
    # pt_offsets is a N x 6 matrix of (x1, y1, x2, y2, x3, y3) deltas for each
    # of the three affine control points. Zero mean, unit variance,
    #     pt_offsets = np.random.rand(N, 6)
    #
    # alg_list is an ordered dictionary of similarity measures to use:
    #   {'String measure name', (MeasureClass, constructor params, constructor keyword arguments)}
    # eg.
    #   {'LeastSquares', (LeastSquares, None, None)}

    # Target affine warp control points - triangle on the rectangle
    
    minX = tdata.tmplt[0]
    minY = tdata.tmplt[1]
    minZ = tdata.tmplt[2]
    maxX = tdata.tmplt[3]
    maxY = tdata.tmplt[4]
    maxZ = tdata.tmplt[5]


    target_affine = np.array([[minX, minY, minZ],
                              [maxX, minY, minZ],
                              [minX + ((maxX - minX) / 2.0) - 0.5, maxY, minZ],
                              [minX + ((maxX - minX) / 2.0) - 0.5, minY + ((maxY - minY) / 2.0) - 0.5, maxZ]])

    # Template image dimensions (formerly had +1)
    template_width = maxX - minX
    template_height = maxY - minY
    template_depth = maxZ - minZ
    tmplt_shape = (template_height, template_width, template_depth)

    # Template affine warp control points
    template_affine = np.array([[0,                  0,                   0],      
                                [template_width,     0,                   0],
                                [template_width / 2.0, template_height,   0],
                                [template_width / 2.0, template_height / 2.0, template_depth]]).T

    # Initial warp parameters. Unperturbed translation
    p_init = np.eye(4)
    p_init[:, 3] = [tdata.tmplt[0] - 1, tdata.tmplt[1] - 1,
                    tdata.tmplt[2] - 1, 1]

    # Translate by 0.5 pixels to avoid identity warp. Warping introduces a little
    # smoothing and this avoids the case where the first iteration for a forwards
    # algorithm is on the "unsmoothed" unwarped image
    p_init[0, 3] += 0.5
    p_init[1, 3] += 0.5
    p_init[2, 3] += 0.5

    # Pick a total of n_freq_tests point offsets from pt_offsets randomly
    ind = np.floor(pt_offsets.shape[0] * np.random.rand(n_freq_tests, 1)).astype(np.uint32)
    ind[ind == 0] = 1
    # Uncomment this line to randomise the noise added
    # pt_offsets1 = pt_offsets[ind, :]
    pt_offsets1 = pt_offsets

    # Scale point offsets to have required sigma
    pt_offsets1 = pt_offsets1 * spatial_sigma

    img = tdata.img1
    results = {}

    if verbose:
        plt.close()
        f = plt.figure()
        plt.ion()
        f.add_subplot(2, 1, 0)
        img_plot = plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
        f.add_subplot(2, 1, 1)

    # Run
    for offset_idx in xrange(n_freq_tests):
        # Test points: apply current point offset to target points
        test_pts = target_affine + np.reshape(pt_offsets1[offset_idx, :], [4,3])
        # Solve for affine warp
        M1 = np.vstack([template_affine, np.ones([1, template_affine.shape[1]])]).T
        M2 = np.vstack([test_pts.T, np.ones([1, test_pts.shape[0]])]).T
        M = np.linalg.solve(M1, M2).T

        # Warp original image to get test "template" image
        tmplt = warp(tdata.img2, tmplt_shape, Affine(M), interpolator=interpolator)
        if verbose:
            tmplt_plot = plt.imshow(tmplt[:, :, 0], cmap=plt.cm.Greys_r)

        if verbose:
            print 'Initial RMS: {0}'.format(compute_rms_point_error(test_pts, template_affine, Affine(p_init)))

        # Run each algorithm
        for sim_measure in alg_list:
            # Allow the passing of arguments in to the instantiated class
            clz = alg_list[sim_measure][0]
            args = alg_list[sim_measure][1]
            kwargs = alg_list[sim_measure][2]

            if not args is None and not kwargs is None:
                metric = clz(*args, **kwargs)
            elif not args is None:
                metric = clz(*args)
            elif not kwargs is None:
                metric = clz(**kwargs)
            else:
                metric = clz()

            final_transform = InverseCompositional(img, tmplt, metric, Affine(p_init), interpolator=interpolator).align(max_iters=n_iters)
            rms_pt_error = compute_rms_point_error(test_pts, template_affine, final_transform)

            if verbose:
                print '{0}: {1}'.format(sim_measure, rms_pt_error)

                img_plot.set_array(img[:, :, 0])
                tmplt_plot.set_array(tmplt[:, :, 0])
                plt.draw()

            if not sim_measure in results:
                results[sim_measure] = []
            measure_results = results[sim_measure]
            measure_results.append(rms_pt_error)
            results[sim_measure] = measure_results

    return results

In [ ]:
# Load datasets
np.set_printoptions(suppress=False, linewidth=600)
pt_offset = scipy.io.loadmat('/home/pts08/Dropbox/lk/face_recognition_2011/data/affine_pt_offset_3d.mat')['pt_offset']
visual_human = scipy.io.loadmat('/home/pts08/Dropbox/lk/face_recognition_2011/data/visual_human_small.mat')

coords = visual_human['tmplt1'][0]
tmplt = np.array(visual_human['tmplt_img'], dtype=np.float64)

# Pre-compute
tmplt_width = coords[4] - coords[1]
tmplt_height = coords[3] - coords[0]
tmplt_depth = coords[5] - coords[2]

# Set up experiment variables
verbose = True
n_iters = 30                     # Number of gradient descent iterations
n_freq_tests = 30                # Number of frequency of convergence tests
max_spatial_error = 3.0          # Max location error for deciding convergence
all_spc_sig = np.arange(1, 11)   # All spatial sigmas (1,10)
interpolator = map_coordinates_interpolator
# Gabor variables
blank_image = np.ones([tmplt_width, tmplt_height, tmplt_depth])

alg_list = OrderedDict([('LeastSquares', (LeastSquares, None, None)),
                        ('ECC', (ECC, None, None)),
                        #'IRLS': (IRLS, None, None),
                        ('GaborFourier', (GaborFourier, None, None)),
                        ('GradientImages', (GradientImages, None, None)),
                        ('GradientCorrelation', (GradientCorrelation, None, None))])

results = np.zeros([len(all_spc_sig), len(alg_list)])

In [ ]:
# Run experiment
tdata = lambda x: 0
tdata.img1 = tmplt
tdata.img2 = tmplt
tdata.tmplt = coords

# Matrix S for Gabor-Fourier method
# Thanks to Peter Kovesi's Gabor Filters
# http://www.csse.uwa.edu.au/~pk/
S = log_gabor(blank_image, num_scales=32)[2]
alg_list['GaborFourier'] = (GaborFourier, [blank_image.shape], {'filter_bank': S})

# Run tests
for sigma_ind, current_sigma in enumerate(all_spc_sig):
    res = my_test_affine(tdata, pt_offset, alg_list, n_iters, n_freq_tests, current_sigma, interpolator, verbose)

    for measure_ind, sim_measure in enumerate(alg_list):
        measure_results = res[sim_measure]
        # Get whether or not it converges
        n_converge = len(filter(lambda error: error < max_spatial_error, measure_results))
        results[sigma_ind, measure_ind] = n_converge


# Save out results just in case
scipy.io.savemat('3dresults.mat', {'results': results})

In [ ]:
# Plot results
mean_results = np.mean(np.mean(results, 1), 0) / float(n_freq_tests)


line_styles = ['k--D', 'y:^', 'r:*', 'g:^', 'b-s']
lines = []
for i in xrange(mean_results.shape[1]):
    lines.append(all_spc_sig)
    lines.append(mean_results[:, i])
    lines.append(line_styles[i])

p = plt.plot(*lines)

legend_labels = [a for a in alg_list.keys()]
plt.yticks(np.linspace(0, 1, 11))
plt.xticks(all_spc_sig)
plt.ylabel('Frequency of Convergence')
plt.xlabel('Point Standard Deviation')
plt.legend(p, legend_labels)
plt.title('Yale: with Smoothing')

plt.show()