In [1]:
from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
#import GenSyntheticMNSITFixedWidthModule as GenDataset
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

In [2]:
# Consts
DATSET_SIZE = 10000
WIDTH_NUMS = 2

In [3]:
def dense_to_one_hot(labels_dense, num_classes=10):
    """Convert class labels from scalars to one-hot vectors."""
    num_labels = labels_dense.shape[0]
    index_offset = np.arange(num_labels) * num_classes
    labels_one_hot = np.zeros((num_labels, num_classes))
    print (index_offset + labels_dense.ravel())
    labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
    return labels_one_hot

In [4]:
# Get the dataset
X, Y = GenDataset.getDataSet(WIDTH_NUMS, DATSET_SIZE)
X = X.reshape([-1, 28, 28 * WIDTH_NUMS, 1])

In [5]:
print (X.shape)


(10000, 28, 56, 1)

In [6]:
# Generate validation set
ratio = 0.8 # Train/Test set
randIdx = np.random.random(DATSET_SIZE) <= ratio
#print (sum(map(lambda x: int(x), randIdx)))
X_train = X[randIdx]
Y_train = Y[randIdx]
X_test = X[randIdx == False]
Y_test = Y[randIdx == False]
Y_train = [dense_to_one_hot(Y_train[:,idx]) for idx in range(Y_train.shape[1])] 
Y_test = [dense_to_one_hot(Y_test[:,idx]) for idx in range(Y_test.shape[1])] 
del X, Y # release some space


[    3    14    23 ..., 80191 80206 80216]
[    5    18    28 ..., 80194 80206 80219]
[    8    17    23 ..., 19759 19762 19773]
[    2    16    25 ..., 19758 19766 19774]

In [7]:
# Test a sample data
%matplotlib inline
idx = np.random.randint(0,X_train.shape[0])
print ([Y_train[i][idx] for i in range(len(Y_train))])
print (X_train[idx].shape)
plt.imshow(np.squeeze(X_train[idx]), cmap = 'gray')


[array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.]), array([ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.])]
(28, 56, 1)
Out[7]:
<matplotlib.image.AxesImage at 0x7f7b8973cc50>

In [8]:
# Building convolutional network
network = input_data(shape=[None, 28, 28 * WIDTH_NUMS, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
fc_1 = fully_connected(network, 128, activation='tanh')
fc_1 = dropout(fc_1, 0.8)
fc_2 = fully_connected(network, 128, activation='tanh')
fc_2 = dropout(fc_2, 0.8)
softmax1 = fully_connected(fc_1, 10, activation='softmax')
softmax2 = fully_connected(fc_2, 10, activation='softmax')
network1 = regression(softmax1, optimizer='adam', learning_rate=0.01,
                     loss='categorical_crossentropy', name='target1')
network2 = regression(softmax2, optimizer='adam', learning_rate=0.01,
                     loss='categorical_crossentropy', name='target2')
network = tflearn.merge([network1, network2], mode='elemwise_sum')

In [9]:
model = tflearn.DNN(network, tensorboard_verbose=1)
model.fit({'input': X_train}, {'target1': Y_train[0], 'target2': Y_train[1]}, 
          validation_set= (X_test, [Y_test[0], Y_test[1]]), n_epoch=5, snapshot_step=100, show_metric=True, run_id='convnet_mnist_')


Training Step: 630  | total loss: 0.10910
| Adam_0 | epoch: 005 | loss: 0.07524 - acc: 0.9823 | val_loss: 0.10004 - val_acc: 0.9666 -- iter: 8022/8022
| Adam_1 | epoch: 005 | loss: 0.03386 - acc: 0.9895 | val_loss: 0.10465 - val_acc: 0.9712 -- iter: 8022/8022
Training Step: 630  | total loss: 0.10910
| Adam_0 | epoch: 005 | loss: 0.07524 - acc: 0.9823 | val_loss: 0.10004 - val_acc: 0.9666 -- iter: 8022/8022
| Adam_1 | epoch: 005 | loss: 0.03386 - acc: 0.9895 | val_loss: 0.10465 - val_acc: 0.9712 -- iter: 8022/8022
--

In [ ]: