In [3]:
import pp, sys
from sklearn.datasets import fetch_olivetti_faces
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split

import OVFM.Model as md
import OVFM.FeatureMap as fm
import OVFM.Risk as rsk
import OVFM.LearningRate as lr
import OVFM.DataGeneration as dg
import OVFM.SGD as sgd

import numpy as np
import scipy as sp
import time as time

import matplotlib.pyplot as plt
import matplotlib.cm as cm

from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import RidgeCV

In [4]:
def generate_features_train( nb_points, nb_gaussian, d, sigma = None ):
    
    nb_points = nb_points / nb_gaussian
    
    if sigma is None:
        sigma = []
    # sequence of random pd matrix
        for i in xrange( nb_gaussian ):
            temp = 2 * np.random.rand( d, d ) - 1
            temp = np.dot( temp, temp.T )
            sigma.append( temp / np.linalg.norm( temp, 2 ) )

    # Generate centroids
    centers = [ 10 * np.random.rand( d ) for i in xrange( nb_gaussian ) ]
    
    # Generate data
    gen = [ np.random.multivariate_normal( centers[ i ], sigma[ i ], nb_points ) for i in xrange( nb_gaussian ) ]
    return sklearn.preprocessing.MinMaxScaler( ( -1, 1 ) ).fit_transform( np.concatenate( gen, axis = 0 ) )
    
def generate_features_test( nb_points, d ):
    return sklearn.preprocessing.MinMaxScaler( ( -1, 1 ) ).fit_transform( np.random.rand( nb_points, d ) )

In [5]:
n = 1e2
p = 50
d = 50
x = 2 * np.random.random( ( n, d ) ) - 1


/Library/Python/2.7/site-packages/ipykernel/__main__.py:4: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the future

In [6]:
A = 2 * np.random.rand( p, p ) - 1
A = np.dot( A, A.T )
U, S, V = np.linalg.svd( A )
A = np.dot( S[ :30 ] * U[ :, :30 ], V[ :30, : ] )
A = A / np.linalg.norm( A, 2 )
# A = np.eye( p )
sigma_b = np.median( sp.spatial.distance.pdist( x ) )
gamma_b = .1 / ( 2 * sigma_b ** 2 )
model_gen = md.Model( fm.DecomposableFF( gamma_b, d, 1000, A, eps_or_rank = 1e-3 ) )

In [7]:
model_gen.coefs2D = np.random.normal( 0, 1, model_gen.coefs2D.shape )
y = model_gen( x.T ).T

In [8]:
A = 2 * np.random.rand( p, p ) - 1
A = np.dot( A, A.T )
A = A / np.linalg.norm( A, 2 )
yn = y + 0. * np.std( y ) * np.random.multivariate_normal( np.zeros( p ), A, y.shape[ 0 ] )

X_train, X_test, y_train, y_test = train_test_split( x, yn, train_size = .75 )
print np.std( y )


0.497315197072

In [9]:
# gammas = [ 0.1 * gamma, gamma, 10 * gamma ]
modules = ( 'OVFM.Model', 'OVFM.FeatureMap', 'OVFM.Risk', 'OVFM.LearningRate', 'OVFM.SGD', 'numpy', 'sklearn.cross_validation' )

def MSE( p, y ):
#         with warnings.catch_warnings():
#             warnings.filterwarnings('error')
#             try:
#                 return np.mean( ( p - y ) ** 2 )
#             except Warning:
#                 return np.inf
    return numpy.mean( ( p - y ) ** 2 )

def RMSE( p, y ):
#         with warnings.catch_warnings():
#             warnings.filterwarnings('error')
#             try:
#                 return np.sqrt( MSE( p, y ) )
#             except Warning:
#                 return np.inf
    return numpy.sqrt( numpy.mean( ( p - y ) ** 2 ) )

def dojob( c_gamma, c_lambda, c_D, c_d, c_p, X_train, y_train ):
                    
    best_err = numpy.full( ( 10, 3 ), numpy.inf )
    best_eta0 = numpy.full( ( 10, 3 ), numpy.inf )
    for fold in xrange( 10 ):
        c_X_train, c_X_validation, c_y_train, c_y_validation = sklearn.cross_validation.train_test_split( X_train, y_train, train_size = 0.666667 )
    
        A = numpy.cov( c_y_train.T )
        A = A / numpy.linalg.norm( A )
        models = [
        OVFM.Model.Model( OVFM.FeatureMap.DecomposableFF( c_gamma, c_d, c_D, numpy.eye( c_p ), eps_or_rank = 0 ) ),
        OVFM.Model.Model( OVFM.FeatureMap.DecomposableFF( c_gamma, c_d, c_D, A, eps_or_rank = 0 ) ),
        OVFM.Model.Model( OVFM.FeatureMap.DecomposableFF( c_gamma, c_d, c_D, A, eps_or_rank = 1e-3 ) ) ] 
        
        risk = OVFM.Risk.ElasticNet( c_lambda, 0, 0 )
        lb = OVFM.LearningRate.Constant( 0. ) # no bias

        for i, c_model in enumerate( models ):
            for eta0 in numpy.logspace( 3, -3, 10, base = numpy.e ):
                lc = OVFM.LearningRate.InvScaling( eta0, c_lambda, 1 )
                opt = OVFM.SGD.SGD( risk, c_X_train.shape[ 0 ], lc, lb, 1, 0 )
                c_model.reset( )
                try:
                    opt.fit( c_model, c_X_train.T, c_y_train.T )
                    err = RMSE( c_model( c_X_validation.T ).T, c_y_validation )
                except (RuntimeError):
                    err = numpy.inf
                if err < best_err[ fold, i ]:
                    best_err[ fold, i ] = err
                    best_eta0[ fold, i ] = eta0
    return best_eta0, best_err

In [ ]:
# tuple of all parallel python servers to connect with
ppservers = ()
#ppservers = ("10.0.0.1",)

if len( sys.argv ) > 1:
    ncpus = 7
    # Creates jobserver with ncpus workers
    job_server = pp.Server( ncpus, ppservers=ppservers )
else:
    # Creates jobserver with automatically detected number of workers
    job_server = pp.Server( ppservers=ppservers )

print "Starting pp with", job_server.get_ncpus( ), "workers"

In [ ]:
jobs = []
sigma_b = np.median( sp.spatial.distance.pdist( X_train ) )
gamma_b = 1. / ( 2 * sigma_b ** 2 )
for gamma in np.logspace( np.log( gamma_b ) - 2, np.log( gamma_b ) + 2, 5, base = np.e ):
    for lbda in np.concatenate( ( np.zeros( 1 ), np.logspace( -10, 0, 10 ) ) ):
        jobs.append( ( gamma, lbda, job_server.submit( dojob, ( gamma, lbda, 1000, d, p, X_train, y_train ), depfuncs = ( MSE, RMSE ), modules = modules ) ) )

# inputs = (100000, 100100, 100200, 100300, 100400, 100500, 100600, 100700)
# jobs = [(input, job_server.submit(sum_primes,(input,), (isprime,), ("math",))) for input in inputs]
# for input, job in jobs:
#     print "Sum of primes below", input, "is", job()

# print "Time elapsed: ", time.time() - start_time, "s"

In [ ]:
for param1, param2, job in jobs:
    print param1, param2, job( )

In [ ]:
job_server.print_stats( )

In [14]:



Out[14]:
(array([[ 0.04978707,  0.04978707,  1.39561243],
        [ 1.39561243,  0.04978707,  2.71828183],
        [ 0.04978707,  1.39561243,  0.04978707],
        [ 0.36787944,  2.71828183,  0.04978707],
        [ 0.04978707,  2.71828183,  2.71828183],
        [ 0.71653131,  5.29449005,  0.04978707],
        [ 2.71828183,  0.04978707,  0.04978707],
        [ 0.04978707,  0.04978707,  0.04978707],
        [ 0.04978707,  0.04978707,  0.04978707],
        [ 0.71653131,  0.04978707,  0.04978707]]),
 array([[ 0.50250025,  0.50222055,  0.49949018],
        [ 0.46689279,  0.49673446,  0.49315526],
        [ 0.50751982,  0.50408958,  0.50724478],
        [ 0.50078369,  0.49206942,  0.50235782],
        [ 0.49904856,  0.4912833 ,  0.49796796],
        [ 0.49126911,  0.49363938,  0.49729146],
        [ 0.46112324,  0.50248283,  0.50226096],
        [ 0.50593493,  0.50499286,  0.50484003],
        [ 0.51008747,  0.50912997,  0.50896067],
        [ 0.49108894,  0.50222303,  0.5023363 ]]))

In [13]:
import OVFM.Model, OVFM.FeatureMap, OVFM.Risk, OVFM.LearningRate, OVFM.SGD, numpy, sklearn.cross_validation

In [15]:
dojob( gamma_b, 0, 1000, d, p, X_train, y_train )


Out[15]:
(array([[ 0.09697197,  2.71828183,  1.39561243],
        [ 0.09697197,  2.71828183,  2.71828183],
        [ 0.09697197,  2.71828183,  2.71828183],
        [ 0.09697197,  1.39561243,  1.39561243],
        [ 0.09697197,  2.71828183,  1.39561243],
        [ 0.36787944,  2.71828183,  2.71828183],
        [ 0.09697197,  1.39561243,  2.71828183],
        [ 0.09697197,  2.71828183,  2.71828183],
        [ 0.09697197,  2.71828183,  2.71828183],
        [ 0.09697197,  2.71828183,  2.71828183]]),
 array([[ 0.1170101 ,  0.16431526,  0.1781379 ],
        [ 0.1142335 ,  0.18390876,  0.16967584],
        [ 0.11603348,  0.18048793,  0.15896251],
        [ 0.10656607,  0.17718634,  0.19111925],
        [ 0.12127663,  0.17140013,  0.18393752],
        [ 0.11228207,  0.18304017,  0.21602844],
        [ 0.11264791,  0.20272027,  0.1721232 ],
        [ 0.11883531,  0.16344578,  0.16654305],
        [ 0.12246104,  0.17651035,  0.17855761],
        [ 0.1191152 ,  0.19400248,  0.19584962]]))

In [ ]: