In [1]:
%load_ext autoreload
%autoreload = 2

In [2]:
import numpy as np
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD
import tables
import os
import theano


Using Theano backend.
Using gpu device 0: GRID K520 (CNMeM is disabled)

Loading lmdb dataset


In [3]:
import lmdb
import caffe

In [4]:
env = lmdb.open("/home/ubuntu/dataset/binary/DR_128_128_800_lmdb/train_db/", readonly=True)

In [ ]:
with env.begin() as txn:
    cursor = txn.cursor()
    for key, val in cursor:
        a = txn.get(key)
        print type(a)
        break

In [ ]:


In [ ]:

Loading h5 db


In [ ]:
train_filename = "data/train_128_db.h5"
test_filename = "data/test_128_db.h5"

In [ ]:
fp_train = tables.open_file(train_filename, mode='r+')
X_train, y_train = fp_train.root.X_train, fp_train.root.y_train

In [ ]:
fp_test = tables.open_file(test_filename, mode='r+')
X_test, y_test = fp_test.root.X_test, fp_test.root.y_test

In [ ]:


In [ ]:
nn = Sequential()
nn.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(3, 128, 128), activation='relu'))
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape
nn.add(Convolution2D(32, 3, 3, activation='relu'))
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape
nn.add(MaxPooling2D(pool_size=(3, 3)))
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape
nn.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape
nn.add(MaxPooling2D(pool_size=(3, 3)))
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape
nn.add(Dropout(0.5))

nn.add(Flatten())
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape

nn.add(Dense(128, activation='relu'))
nn.add(Dropout(0.5))
print "Output shape after %d layers -" % len(nn.layers), nn.output_shape

nn.add(Dense(5, activation='softmax'))

sgd = SGD(lr=0.01, momentum=0.9, decay=1e-4, nesterov=True)

In [ ]:
nn.compile(loss='binary_crossentropy', optimizer=sgd)

In [ ]:


In [ ]:
batch = np.random.randint(0, X_train.shape[0], size=16)
print batch

In [ ]:
nn.fit(X_train, y_train, batch_size=32, nb_epoch=20, show_accuracy=True, shuffle=True, validation_split=0.2, verbose=2)

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:
fp_train.close()
fp_test.close()