In [1]:
from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected, flatten
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
import random
import numpy as np

In [2]:
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=False)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])


Extracting mnist/train-images-idx3-ubyte.gz
/home/ankdesh/installed/anaconda/envs/tensorflow/lib/python2.7/gzip.py:275: VisibleDeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future
  chunk = self.extrabuf[offset: offset + size]
/home/ankdesh/installed/anaconda/envs/tensorflow/lib/python2.7/site-packages/tflearn/datasets/mnist.py:52: VisibleDeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future
  data = data.reshape(num_images, rows, cols, 1)
Extracting mnist/train-labels-idx1-ubyte.gz
Extracting mnist/t10k-images-idx3-ubyte.gz
Extracting mnist/t10k-labels-idx1-ubyte.gz

In [3]:
NUM_SAMPLES_TO_GEN = 100
WIDTH_SAMPLE = 2
NUM_IMGS_AVAILABLE = X.shape[0]

In [4]:
X_seq = np.empty(shape=(NUM_SAMPLES_TO_GEN, 28, 28 * WIDTH_SAMPLE,1),dtype='float32')
Y_seq = np.zeros(shape=(NUM_SAMPLES_TO_GEN, 10**WIDTH_SAMPLE),dtype='float64')
for i in range(NUM_SAMPLES_TO_GEN): # For each sample to generate
    indices = np.random.randint(0,NUM_IMGS_AVAILABLE - 1, size=WIDTH_SAMPLE) # generate indices for creating this wide image
    X_seq[i] = np.concatenate(X[indices], axis=1)
    Y_seq_index = Y[indices[1]] * 10 + Y[indices[0]]
    Y_seq[i][Y_seq_index] = 1. # Convert to One hot

In [5]:
# Building convolutional network
network = input_data(shape=[None, 28, 28 * WIDTH_SAMPLE,1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = flatten(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 100, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
                     loss='categorical_crossentropy', name='target')

In [6]:
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': X_seq}, {'target': Y_seq}, n_epoch=5,
           validation_set=0.1,run_id='convnet_mnist4')


Training Step: 10  | total loss: 3.97562
| Adam | epoch: 005 | loss: 3.97562 | val_loss: 7.16499 -- iter: 90/90
Training Step: 10  | total loss: 3.97562
| Adam | epoch: 005 | loss: 3.97562 | val_loss: 7.16499 -- iter: 90/90
--

In [ ]: