In [ ]:
# Import libraries
import cPickle, gzip
import numpy as np
from time import sleep
import dreaml as dm
from dreaml.server import start
import dreaml.transformations as trans
from dreaml.dataframe.transform import ContinuousTransform
# Load data from files
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
n_train=1000
n_test=100
X_train = train_set[0][0:n_train,:]
y_train = train_set[1][0:n_train,None]
X_test = valid_set[0][0:n_train,:]
y_test = valid_set[1][0:n_train,None]
df = dm.DataFrame()
# start(df)
df["data/train/", "input/raw/"] = dm.DataFrame.from_matrix(X_train)
df["data/train/", "input/label/"] = dm.DataFrame.from_matrix(y_train)
df["data/test/", "input/raw/"] = dm.DataFrame.from_matrix(X_test)
df["data/test/", "input/label/"] = dm.DataFrame.from_matrix(y_test)
Here, we construct a simple neural network transform with the ability to add layers and change the optimizer while training. Note that this code is largely identical to the Keras example, and simply wrapped inside the ContinuousTransform class.
In [ ]:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
class SimpleNN(ContinuousTransform):
def init_func(self,target_df,X_train_df,y_train_df,X_test_df,y_test_df):
model=Sequential()
model.add(Dense(64, input_dim=784, init='uniform'))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(64,init='uniform'))
model.add(Activation('softmax'))
self.opt = SGD(lr=0.1,decay=1e-6,momentum=0.9,nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=self.opt)
self.model = model
self.target = target_df
self.nb_epoch = 2
self.batch_size = 16
self.verbose = 0
target_df.set_matrix(0)
def continuous_func(self,target_df,X_train_df,y_train_df,X_test_df,y_test_df):
self.model.fit(X_train_df.get_matrix(),
y_train_df.get_matrix(),
nb_epoch=self.nb_epoch,
batch_size=self.batch_size,
verbose=self.verbose)
score = self.model.evaluate(X_test_df.get_matrix(),
y_test_df.get_matrix(),
batch_size=self.batch_size,
verbose=self.verbose)
target_df.set_matrix(np.array([[score]]))
def add_layers(self,layers,idx):
self.target.stop()
while len(self.model.layers) > idx:
self.model.layers.pop()
for l in layers:
self.model.add(l)
self.model.compile(loss='categorical_crossentropy', optimizer=self.opt)
self.target.go()
def set_optimizer(self,opt):
self.target.stop()
self.opt = opt
self.model.compile(loss='categorical_crossentropy', optimizer=self.opt)
self.target.go()
df["output/","score/"] = SimpleNN(df["data/train/","input/raw/"],
df["data/train/","input/label/"],
df["data/test/","input/raw/"],
df["data/test/","input/label/"])
The class allows use to replace all layers after a given index in the model. In this example, we replace the last layer (a single softmax activation) with a series of 3 layers, followed by a final softmax activation.
In [ ]:
new_layers = [
Activation('tanh'),
Dropout(0.5),
Dense(10,init='uniform'),
Activation('softmax')
]
df["output/","score/"].T.add_layers(new_layers,4)
We can also change the optimizer being used. Here we adjust the learning rate and momentum, and replace the previous optimizer.
In [ ]:
new_opt = SGD(lr=0.01,decay=1e-6,momentum=0.8,nesterov=True)
df["output/","score/"].T.set_optimizer(new_opt)
Lastly, we can directly any parameters we've exposed in the class. In this case, we have number of epochs and batch size, along with a verbose output parameter.
In [ ]:
df["output/","score/"].T.nb_epoch = 4
df["output/","score/"].T.batch_size = 32
df["output/","score/"].T.verbose = 1