In [2]:
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop

from keras.datasets import mnist
from keras.utils import np_utils


Using Theano backend.

In [3]:
def data():
    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test

In [4]:
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}

In [7]:
X_train, Y_train, X_test, Y_test = data()

best_run, best_model = optim.minimize(model=model,
                                      data=data,
                                      algo=tpe.suggest,
                                      max_evals=5,
                                      trials=Trials(),
                                      notebook_name='simple_notebook')


>>> Imports:
from __future__ import print_function

try:
    from hyperopt import Trials, STATUS_OK, tpe
except:
    pass

try:
    from hyperas import optim
except:
    pass

try:
    from hyperas.distributions import choice, uniform
except:
    pass

try:
    from keras.models import Sequential
except:
    pass

try:
    from keras.layers.core import Dense, Dropout, Activation
except:
    pass

try:
    from keras.optimizers import RMSprop
except:
    pass

try:
    from keras.datasets import mnist
except:
    pass

try:
    from keras.utils import np_utils
except:
    pass

>>> Hyperas search space:

def get_space():
    return {
        'Dropout': hp.uniform('Dropout', 0, 1),
        'Dense': hp.choice('Dense', [256, 512, 1024]),
        'Dropout_1': hp.uniform('Dropout_1', 0, 1),
        'batch_size': hp.choice('batch_size', [64, 128]),
    }

>>> Data
  1: 
  2: '''
  3: Data providing function:
  4: 
  5: This function is separated from model() so that hyperopt
  6: won't reload data for each evaluation run.
  7: '''
  8: (X_train, y_train), (X_test, y_test) = mnist.load_data()
  9: X_train = X_train.reshape(60000, 784)
 10: X_test = X_test.reshape(10000, 784)
 11: X_train = X_train.astype('float32')
 12: X_test = X_test.astype('float32')
 13: X_train /= 255
 14: X_test /= 255
 15: nb_classes = 10
 16: Y_train = np_utils.to_categorical(y_train, nb_classes)
 17: Y_test = np_utils.to_categorical(y_test, nb_classes)
 18: 
 19: 
 20: 
>>> Resulting replaced keras model:

   1: def keras_fmin_fnct(space):
   2: 
   3:     '''
   4:     Model providing function:
   5: 
   6:     Create Keras model with double curly brackets dropped-in as needed.
   7:     Return value has to be a valid python dictionary with two customary keys:
   8:         - loss: Specify a numeric evaluation metric to be minimized
   9:         - status: Just use STATUS_OK and see hyperopt documentation if not feasible
  10:     The last one is optional, though recommended, namely:
  11:         - model: specify the model just created so that we can later use it again.
  12:     '''
  13:     model = Sequential()
  14:     model.add(Dense(512, input_shape=(784,)))
  15:     model.add(Activation('relu'))
  16:     model.add(Dropout(space['Dropout']))
  17:     model.add(Dense(space['Dense']))
  18:     model.add(Activation('relu'))
  19:     model.add(Dropout(space['Dropout_1']))
  20:     model.add(Dense(10))
  21:     model.add(Activation('softmax'))
  22: 
  23:     rms = RMSprop()
  24:     model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
  25: 
  26:     model.fit(X_train, Y_train,
  27:               batch_size=space['batch_size'],
  28:               nb_epoch=1,
  29:               verbose=2,
  30:               validation_data=(X_test, Y_test))
  31:     score, acc = model.evaluate(X_test, Y_test, verbose=0)
  32:     print('Test accuracy:', acc)
  33:     return {'loss': -acc, 'status': STATUS_OK, 'model': model}
  34: 
Train on 60000 samples, validate on 10000 samples
Epoch 1/1
10s - loss: 0.3744 - acc: 0.8864 - val_loss: 0.1486 - val_acc: 0.9551
Test accuracy: 0.9551
Train on 60000 samples, validate on 10000 samples
Epoch 1/1
10s - loss: 0.2633 - acc: 0.9201 - val_loss: 0.1164 - val_acc: 0.9674
Test accuracy: 0.9674
Train on 60000 samples, validate on 10000 samples
Epoch 1/1
7s - loss: 0.5879 - acc: 0.8161 - val_loss: 0.1910 - val_acc: 0.9451
Test accuracy: 0.9451
Train on 60000 samples, validate on 10000 samples
Epoch 1/1
10s - loss: 0.5738 - acc: 0.8219 - val_loss: 0.1962 - val_acc: 0.9457
Test accuracy: 0.9457
Train on 60000 samples, validate on 10000 samples
Epoch 1/1
10s - loss: 0.4174 - acc: 0.8727 - val_loss: 0.1367 - val_acc: 0.9616
Test accuracy: 0.9616

In [6]:
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))


Evalutation of best performing model:
10000/10000 [==============================] - 0s     
[0.11977583827276249, 0.96419999999999995]