In [ ]:
import menpo.io as mio
from menpo.feature import no_op, igo, hog, sparse_hog, lbp

# method to load a database
def load_database(path_to_images, crop_percentage, max_images=None):
    images = []
    # load landmarked images
    for i in mio.import_images(path_to_images, max_images=max_images, verbose=True):
        # crop image
        i.crop_to_landmarks_proportion_inplace(crop_percentage)
        
        # convert it to grayscale if needed
        if i.n_channels == 3:
            i = i.as_greyscale(mode='luminosity')
            
        # append it to the list
        images.append(no_op(i))
    return images

In [ ]:
images = load_database('/mnt/data/nontas/train200/', 0.5)
n_channels = images[0].n_channels
patch_shape = (17, 17)

In [ ]:
import numpy as np
from menpo.visualize import print_dynamic, progress_bar_str

def extract_patch_vectors(image, group, label, patch_size,
                          normalize_patches=False):
    r"""
    returns a numpy.array of size (16*16*36) x 68
    """
    # extract patches
    patches = image.extract_patches_around_landmarks(
        group=group, label=label, patch_size=patch_size,
        as_single_array=not normalize_patches)

    # vectorize patches
    if normalize_patches:
        # initialize output matrix
        patches_vectors = np.empty(
            (np.prod(patches[0].shape) * patches[0].n_channels, len(patches)))

        # extract each vector
        for p in range(len(patches)):
            # normalize part
            patches[p].normalize_norm_inplace()

            # extract vector
            patches_vectors[:, p] = patches[p].as_vector()
    else:
        # initialize output matrix
        patches_vectors = np.empty((np.prod(patches.shape[1:]),
                                    patches.shape[0]))

        # extract each vector
        for p in range(patches.shape[0]):
            patches_vectors[:, p] = patches[p, ...].ravel()

    # return vectorized parts
    return patches_vectors


def _warp_images_joan(images, group, label, patch_size, level_str, verbose):
    r"""
    returns numpy.array of size (16*16*36) x n_images x 68
    """
    # find length of each patch and number of points
    patches_len = np.prod(patch_size) * images[0].n_channels
    n_points = images[0].landmarks[group][label].n_points

    # initialize an output numpy array
    patches_array = np.empty((patches_len, n_points, len(images)))

    # extract parts
    for c, i in enumerate(images):
        # print progress
        if verbose:
            print_dynamic('{}Extracting patches from images - {}'.format(
                level_str,
                progress_bar_str(float(c + 1) / len(images),
                                 show_bar=False)))

        # extract patches from this image
        patches_vectors = extract_patch_vectors(
            i, group=group, label=label, patch_size=patch_size,
            normalize_patches=False)

        # store
        patches_array[..., c] = patches_vectors

    # rollaxis and return
    return np.rollaxis(patches_array, 2, 1)

def _build_appearance_model_joan(warped_images, n_appearance_parameters, level_str, verbose):
    # build appearance model
    if verbose:
        print_dynamic('{}Training appearance distribution per '
                      'patch'.format(level_str))
    n_points = warped_images.shape[-1]
    patch_len = warped_images.shape[0]
    app_len = patch_len * n_points
    app_mean = np.empty(app_len)
    app_cov = np.zeros((app_len, app_len))
    for e in range(n_points):
        # print progress
        if verbose:
            print_dynamic('{}Training appearance distribution '
                          'per patch - {}'.format(
                          level_str,
                          progress_bar_str(float(e + 1) / n_points,
                                           show_bar=False)))
        # find indices in target mean and covariance matrices
        i_from = e * patch_len
        i_to = (e + 1) * patch_len
        # compute and store mean
        app_mean[i_from:i_to] = np.mean(warped_images[..., e], axis=1)
        # compute and store covariance
        cov_mat = np.cov(warped_images[..., e])
        s, v, d = np.linalg.svd(cov_mat)
        s = s[:, :n_appearance_parameters]
        v = v[:n_appearance_parameters]
        d = d[:n_appearance_parameters, :]
        app_cov[i_from:i_to, i_from:i_to] = s.dot(np.diag(1/v)).dot(d)
    return app_mean, app_cov

In [ ]:
from cvpr15.builder import _build_appearance_model, _warp_images
import timeit

In [ ]:
%%timeit
warped_images1 = _warp_images_joan(images, 'PTS', 'all', patch_shape, 'Joan: ', True)
app1 = _build_appearance_model_joan(warped_images1, 200, 'Joan: ', True)

In [ ]:
%%timeit
warped_images2 = _warp_images(images, 'PTS', 'all', patch_shape, 'Nontas: ', True)
app2 = _build_appearance_model(warped_images2, 68, patch_shape, n_channels, 200, 'Nontas: ', True)

In [ ]:
print app1[0].shape, app1[1].shape
print app2[0].shape, app2[1].shape

In [ ]:
print np.array_equal(app1[0], app2[0])
print np.array_equal(app1[1], app2[1])

In [ ]:
print warped_images1.shape
print warped_images2.shape

In [ ]:
p = 37
i = 10

from menpo.image import Image
import numpy as np
%matplotlib inline
m1 = warped_images1[:, i, p].reshape(patch_shape[0], patch_shape[1], n_channels)
Image(m1).view(channels=0)

patch_len = np.prod(patch_shape) * n_channels
i_from = p * patch_len
i_to = (p + 1) * patch_len
m2 = warped_images2[i_from:i_to, i].reshape(patch_shape[0], patch_shape[1], n_channels)
Image(m2).view_new(channels=0)

print np.array_equal(m1, m2)

In [ ]:
e = 37
d1 = np.cov(warped_images1[..., e])

patch_len = np.prod(patch_shape) * n_channels
i_from = e * patch_len
i_to = (e + 1) * patch_len
d2 = np.cov(warped_images2[i_from:i_to, :])

print np.array_equal(d1, d2)

In [ ]:
patch_len1 = warped_images1.shape[0]
patch_len2 = np.prod(patch_shape) * n_channels

print patch_len1, patch_len2

In [ ]: