In [1]:
from __future__ import print_function
import gzip
import itertools
import pickle
import os
import sys
import numpy as np
import lasagne
import theano
import theano.tensor as T
import time

In [2]:
DATA_FILENAME = 'mnist.pkl.gz'
NUM_EPOCHS = 200
BATCH_SIZE = 600
LEARNING_RATE = 0.01
MOMENTUM = 0.9

In [3]:
def load_data(data):
    X_train, y_train = data[0]
    X_valid, y_valid = data[1]
    X_test, y_test = data[2]

    # reshape for convolutions
    X_train = X_train.reshape((X_train.shape[0], 1, 28, 28))
    X_valid = X_valid.reshape((X_valid.shape[0], 1, 28, 28))
    X_test = X_test.reshape((X_test.shape[0], 1, 28, 28))

    return dict(
        X_train=theano.shared(lasagne.utils.floatX(X_train)),
        y_train=T.cast(theano.shared(y_train), 'int32'),
        X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
        y_valid=T.cast(theano.shared(y_valid), 'int32'),
        X_test=theano.shared(lasagne.utils.floatX(X_test)),
        y_test=T.cast(theano.shared(y_test), 'int32'),
        num_examples_train=X_train.shape[0],
        num_examples_valid=X_valid.shape[0],
        num_examples_test=X_test.shape[0],
        input_height=X_train.shape[2],
        input_width=X_train.shape[3],
        output_dim=10,
        )

In [4]:
def build_model(input_width, input_height, output_dim,
                batch_size=BATCH_SIZE):
    l_in = lasagne.layers.InputLayer(
        shape=(batch_size, 1, input_width, input_height),
        )

    l_conv1 = lasagne.layers.Conv2DLayer(
        l_in,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
        )
    l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))

    l_conv2 = lasagne.layers.Conv2DLayer(
        l_pool1,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
        )
    l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(2, 2))

    l_hidden1 = lasagne.layers.DenseLayer(
        l_pool2,
        num_units=256,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
        )

    l_hidden1_dropout = lasagne.layers.DropoutLayer(l_hidden1, p=0.5)

    # l_hidden2 = lasagne.layers.DenseLayer(
    #     l_hidden1_dropout,
    #     num_units=256,
    #     nonlinearity=lasagne.nonlinearities.rectify,
    #     )
    # l_hidden2_dropout = lasagne.layers.DropoutLayer(l_hidden2, p=0.5)

    l_out = lasagne.layers.DenseLayer(
        l_hidden1_dropout,
        num_units=output_dim,
        nonlinearity=lasagne.nonlinearities.softmax,
        W=lasagne.init.GlorotUniform(),
        )

    return l_out

In [6]:
def create_iter_functions(dataset, output_layer,
                          X_tensor_type=T.matrix,
                          batch_size=BATCH_SIZE,
                          learning_rate=LEARNING_RATE, momentum=MOMENTUM):
    """Create functions for training, validation and testing to iterate one
       epoch.
    """
    batch_index = T.iscalar('batch_index')
    X_batch = X_tensor_type('x')
    y_batch = T.ivector('y')
    batch_slice = slice(batch_index * batch_size,
                        (batch_index + 1) * batch_size)

    objective = lasagne.objectives.Objective(output_layer,
        loss_function=lasagne.objectives.categorical_crossentropy)

    loss_train = objective.get_loss(X_batch, target=y_batch)
    loss_eval = objective.get_loss(X_batch, target=y_batch,
                                   deterministic=True)

    pred = T.argmax(
        lasagne.layers.get_output(output_layer, X_batch, deterministic=True),
        axis=1)
    accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)

    all_params = lasagne.layers.get_all_params(output_layer)
    updates = lasagne.updates.nesterov_momentum(
        loss_train, all_params, learning_rate, momentum)

    iter_train = theano.function(
        [batch_index], loss_train,
        updates=updates,
        givens={
            X_batch: dataset['X_train'][batch_slice],
            y_batch: dataset['y_train'][batch_slice],
        },
    )

    iter_valid = theano.function(
        [batch_index], [loss_eval, accuracy],
        givens={
            X_batch: dataset['X_valid'][batch_slice],
            y_batch: dataset['y_valid'][batch_slice],
        },
    )

    iter_test = theano.function(
        [batch_index], [loss_eval, accuracy],
        givens={
            X_batch: dataset['X_test'][batch_slice],
            y_batch: dataset['y_test'][batch_slice],
        },
    )

    return dict(
        train=iter_train,
        valid=iter_valid,
        test=iter_test,
    )

In [7]:
def train(iter_funcs, dataset, batch_size=BATCH_SIZE):
    """Train the model with `dataset` with mini-batch training. Each
       mini-batch has `batch_size` recordings.
    """
    num_batches_train = dataset['num_examples_train'] // batch_size
    num_batches_valid = dataset['num_examples_valid'] // batch_size

    for epoch in itertools.count(1):
        batch_train_losses = []
        for b in range(num_batches_train):
            batch_train_loss = iter_funcs['train'](b)
            batch_train_losses.append(batch_train_loss)

        avg_train_loss = np.mean(batch_train_losses)

        batch_valid_losses = []
        batch_valid_accuracies = []
        for b in range(num_batches_valid):
            batch_valid_loss, batch_valid_accuracy = iter_funcs['valid'](b)
            batch_valid_losses.append(batch_valid_loss)
            batch_valid_accuracies.append(batch_valid_accuracy)

        avg_valid_loss = np.mean(batch_valid_losses)
        avg_valid_accuracy = np.mean(batch_valid_accuracies)

        yield {
            'number': epoch,
            'train_loss': avg_train_loss,
            'valid_loss': avg_valid_loss,
            'valid_accuracy': avg_valid_accuracy,
        }

In [14]:
print("Loading data...")
with gzip.open(DATA_FILENAME, 'rb') as f:
    data = pickle.load(f)
dataset = load_data(data)

print("Building model and compiling functions...")
output_layer = build_model(
    input_height=dataset['input_height'],
    input_width=dataset['input_width'],
    output_dim=dataset['output_dim'],
    )

iter_funcs = create_iter_functions(
    dataset,
    output_layer,
    X_tensor_type=T.tensor4,
    )

num_epochs = NUM_EPOCHS

print("Starting training...")
now = time.time()
try:
    for epoch in train(iter_funcs, dataset):
        print("Epoch {} of {} took {:.3f}s".format(
            epoch['number'], num_epochs, time.time() - now))
        now = time.time()
        print("  training loss:\t\t{:.6f}".format(epoch['train_loss']))
        print("  validation loss:\t\t{:.6f}".format(epoch['valid_loss']))
        print("  validation accuracy:\t\t{:.2f} %%".format(
            epoch['valid_accuracy'] * 100))

        if epoch['number'] >= num_epochs:
            break

except KeyboardInterrupt:
    pass


Loading data...
Building model and compiling functions...
Starting training...
Epoch 1 of 200 took 174.946s
  training loss:		1.242629
  validation loss:		0.271462
  validation accuracy:		91.96 %%
Epoch 2 of 200 took 200.097s
  training loss:		0.312536
  validation loss:		0.155670
  validation accuracy:		95.55 %%
Epoch 3 of 200 took 184.285s
  training loss:		0.213655
  validation loss:		0.118474
  validation accuracy:		96.52 %%
Epoch 4 of 200 took 177.598s
  training loss:		0.169883
  validation loss:		0.096493
  validation accuracy:		97.16 %%
Epoch 5 of 200 took 178.313s
  training loss:		0.146444
  validation loss:		0.083608
  validation accuracy:		97.43 %%
Epoch 6 of 200 took 177.332s
  training loss:		0.128278
  validation loss:		0.075484
  validation accuracy:		97.62 %%
Epoch 7 of 200 took 176.976s
  training loss:		0.115729
  validation loss:		0.068976
  validation accuracy:		97.85 %%
Epoch 8 of 200 took 174.416s
  training loss:		0.105179
  validation loss:		0.063809
  validation accuracy:		97.98 %%
Epoch 9 of 200 took 174.995s
  training loss:		0.096124
  validation loss:		0.059951
  validation accuracy:		98.05 %%
Epoch 10 of 200 took 174.222s
  training loss:		0.091027
  validation loss:		0.057243
  validation accuracy:		98.16 %%
Epoch 11 of 200 took 174.762s
  training loss:		0.084521
  validation loss:		0.054204
  validation accuracy:		98.23 %%
Epoch 12 of 200 took 180.207s
  training loss:		0.080658
  validation loss:		0.051951
  validation accuracy:		98.29 %%
Epoch 13 of 200 took 174.119s
  training loss:		0.074935
  validation loss:		0.049074
  validation accuracy:		98.39 %%
Epoch 14 of 200 took 173.336s
  training loss:		0.073975
  validation loss:		0.047464
  validation accuracy:		98.49 %%
Epoch 15 of 200 took 175.313s
  training loss:		0.071554
  validation loss:		0.046353
  validation accuracy:		98.52 %%
Epoch 16 of 200 took 173.701s
  training loss:		0.067563
  validation loss:		0.045016
  validation accuracy:		98.55 %%
Epoch 17 of 200 took 173.395s
  training loss:		0.064036
  validation loss:		0.044432
  validation accuracy:		98.55 %%
Epoch 18 of 200 took 177.765s
  training loss:		0.062768
  validation loss:		0.042111
  validation accuracy:		98.67 %%
Epoch 19 of 200 took 187.817s
  training loss:		0.059874
  validation loss:		0.041446
  validation accuracy:		98.64 %%
Epoch 20 of 200 took 181.003s
  training loss:		0.055150
  validation loss:		0.041254
  validation accuracy:		98.58 %%
Epoch 21 of 200 took 176.452s
  training loss:		0.054561
  validation loss:		0.040863
  validation accuracy:		98.69 %%
Epoch 22 of 200 took 175.830s
  training loss:		0.053435
  validation loss:		0.039351
  validation accuracy:		98.74 %%
Epoch 23 of 200 took 176.127s
  training loss:		0.051261
  validation loss:		0.038018
  validation accuracy:		98.76 %%
Epoch 24 of 200 took 173.406s
  training loss:		0.051460
  validation loss:		0.038752
  validation accuracy:		98.79 %%
Epoch 25 of 200 took 175.422s
  training loss:		0.048922
  validation loss:		0.037568
  validation accuracy:		98.71 %%
Epoch 26 of 200 took 174.217s
  training loss:		0.047735
  validation loss:		0.036557
  validation accuracy:		98.84 %%
Epoch 27 of 200 took 173.807s
  training loss:		0.046517
  validation loss:		0.038296
  validation accuracy:		98.78 %%
Epoch 28 of 200 took 173.757s
  training loss:		0.045695
  validation loss:		0.038220
  validation accuracy:		98.75 %%
Epoch 29 of 200 took 177.346s
  training loss:		0.043490
  validation loss:		0.036492
  validation accuracy:		98.82 %%
Epoch 30 of 200 took 190.207s
  training loss:		0.042334
  validation loss:		0.035961
  validation accuracy:		98.83 %%
Epoch 31 of 200 took 194.386s
  training loss:		0.042641
  validation loss:		0.035814
  validation accuracy:		98.83 %%
Epoch 32 of 200 took 183.520s
  training loss:		0.040549
  validation loss:		0.035080
  validation accuracy:		98.83 %%
Epoch 33 of 200 took 200.670s
  training loss:		0.039924
  validation loss:		0.035014
  validation accuracy:		98.81 %%
Epoch 34 of 200 took 213.694s
  training loss:		0.037751
  validation loss:		0.035072
  validation accuracy:		98.89 %%
Epoch 35 of 200 took 209.604s
  training loss:		0.037396
  validation loss:		0.034472
  validation accuracy:		98.89 %%
Epoch 36 of 200 took 207.461s
  training loss:		0.037071
  validation loss:		0.034251
  validation accuracy:		98.91 %%
Epoch 37 of 200 took 223.871s
  training loss:		0.036224
  validation loss:		0.034188
  validation accuracy:		98.85 %%
Epoch 38 of 200 took 213.409s
  training loss:		0.036672
  validation loss:		0.034382
  validation accuracy:		98.88 %%
Epoch 39 of 200 took 210.354s
  training loss:		0.036229
  validation loss:		0.032623
  validation accuracy:		98.90 %%
Epoch 40 of 200 took 227.577s
  training loss:		0.032412
  validation loss:		0.032998
  validation accuracy:		98.93 %%
Epoch 41 of 200 took 233.708s
  training loss:		0.033724
  validation loss:		0.032734
  validation accuracy:		98.91 %%
Epoch 42 of 200 took 232.047s
  training loss:		0.032555
  validation loss:		0.031924
  validation accuracy:		98.94 %%
Epoch 43 of 200 took 224.897s
  training loss:		0.031350
  validation loss:		0.032070
  validation accuracy:		98.91 %%
Epoch 44 of 200 took 211.136s
  training loss:		0.030594
  validation loss:		0.031882
  validation accuracy:		98.98 %%
Epoch 45 of 200 took 215.065s
  training loss:		0.031042
  validation loss:		0.031792
  validation accuracy:		98.95 %%
Epoch 46 of 200 took 193.095s
  training loss:		0.030222
  validation loss:		0.032438
  validation accuracy:		98.95 %%
Epoch 47 of 200 took 211.300s
  training loss:		0.029151
  validation loss:		0.031527
  validation accuracy:		98.99 %%
Epoch 48 of 200 took 211.457s
  training loss:		0.028870
  validation loss:		0.030687
  validation accuracy:		99.00 %%
Epoch 49 of 200 took 212.478s
  training loss:		0.029732
  validation loss:		0.032273
  validation accuracy:		99.03 %%
Epoch 50 of 200 took 217.122s
  training loss:		0.028872
  validation loss:		0.031430
  validation accuracy:		99.00 %%
Epoch 51 of 200 took 211.695s
  training loss:		0.028159
  validation loss:		0.031146
  validation accuracy:		98.98 %%
Epoch 52 of 200 took 214.241s
  training loss:		0.026880
  validation loss:		0.030874
  validation accuracy:		98.98 %%
Epoch 53 of 200 took 199.208s
  training loss:		0.027076
  validation loss:		0.030525
  validation accuracy:		99.01 %%
Epoch 54 of 200 took 177.037s
  training loss:		0.025250
  validation loss:		0.031327
  validation accuracy:		98.98 %%
Epoch 55 of 200 took 173.345s
  training loss:		0.025180
  validation loss:		0.030590
  validation accuracy:		99.05 %%
Epoch 56 of 200 took 174.460s
  training loss:		0.024713
  validation loss:		0.030517
  validation accuracy:		99.15 %%
Epoch 57 of 200 took 173.859s
  training loss:		0.024192
  validation loss:		0.032367
  validation accuracy:		98.96 %%
Epoch 58 of 200 took 173.373s
  training loss:		0.022888
  validation loss:		0.030038
  validation accuracy:		99.05 %%
Epoch 59 of 200 took 173.642s
  training loss:		0.024369
  validation loss:		0.029788
  validation accuracy:		99.02 %%
Epoch 60 of 200 took 173.307s
  training loss:		0.024130
  validation loss:		0.031348
  validation accuracy:		99.03 %%
Epoch 61 of 200 took 177.779s
  training loss:		0.023749
  validation loss:		0.031098
  validation accuracy:		99.03 %%
Epoch 62 of 200 took 173.809s
  training loss:		0.023018
  validation loss:		0.029426
  validation accuracy:		99.15 %%
Epoch 63 of 200 took 173.768s
  training loss:		0.022541
  validation loss:		0.030060
  validation accuracy:		99.04 %%
Epoch 64 of 200 took 173.607s
  training loss:		0.023230
  validation loss:		0.029961
  validation accuracy:		99.02 %%
Epoch 65 of 200 took 173.441s
  training loss:		0.022102
  validation loss:		0.030130
  validation accuracy:		99.02 %%
Epoch 66 of 200 took 173.400s
  training loss:		0.021488
  validation loss:		0.030411
  validation accuracy:		99.08 %%
Epoch 67 of 200 took 173.466s
  training loss:		0.020972
  validation loss:		0.031858
  validation accuracy:		99.07 %%
Epoch 68 of 200 took 173.515s
  training loss:		0.020636
  validation loss:		0.030347
  validation accuracy:		99.07 %%
Epoch 69 of 200 took 173.349s
  training loss:		0.019832
  validation loss:		0.030090
  validation accuracy:		99.02 %%
Epoch 70 of 200 took 173.625s
  training loss:		0.019862
  validation loss:		0.031033
  validation accuracy:		99.05 %%
Epoch 71 of 200 took 172.969s
  training loss:		0.020034
  validation loss:		0.031447
  validation accuracy:		98.98 %%
Epoch 72 of 200 took 173.418s
  training loss:		0.019389
  validation loss:		0.030132
  validation accuracy:		99.05 %%
Epoch 73 of 200 took 173.417s
  training loss:		0.019370
  validation loss:		0.030179
  validation accuracy:		99.07 %%
Epoch 74 of 200 took 173.494s
  training loss:		0.018577
  validation loss:		0.029999
  validation accuracy:		99.02 %%
Epoch 75 of 200 took 173.757s
  training loss:		0.018030
  validation loss:		0.030717
  validation accuracy:		99.10 %%
Epoch 76 of 200 took 173.805s
  training loss:		0.018003
  validation loss:		0.032598
  validation accuracy:		99.07 %%
Epoch 77 of 200 took 174.282s
  training loss:		0.018229
  validation loss:		0.031366
  validation accuracy:		99.07 %%
Epoch 78 of 200 took 173.229s
  training loss:		0.018791
  validation loss:		0.030535
  validation accuracy:		99.04 %%
Epoch 79 of 200 took 173.326s
  training loss:		0.017671
  validation loss:		0.029910
  validation accuracy:		99.05 %%
Epoch 80 of 200 took 173.271s
  training loss:		0.016922
  validation loss:		0.029760
  validation accuracy:		99.05 %%
Epoch 81 of 200 took 188.473s
  training loss:		0.016296
  validation loss:		0.030195
  validation accuracy:		99.04 %%
Epoch 82 of 200 took 177.257s
  training loss:		0.016402
  validation loss:		0.032094
  validation accuracy:		99.03 %%
Epoch 83 of 200 took 173.379s
  training loss:		0.015976
  validation loss:		0.031474
  validation accuracy:		99.08 %%
Epoch 84 of 200 took 173.666s
  training loss:		0.016204
  validation loss:		0.031227
  validation accuracy:		99.07 %%
Epoch 85 of 200 took 173.240s
  training loss:		0.015237
  validation loss:		0.030762
  validation accuracy:		99.11 %%
Epoch 86 of 200 took 173.635s
  training loss:		0.016069
  validation loss:		0.031290
  validation accuracy:		99.11 %%
Epoch 87 of 200 took 173.369s
  training loss:		0.016258
  validation loss:		0.031846
  validation accuracy:		98.97 %%
Epoch 88 of 200 took 173.688s
  training loss:		0.016570
  validation loss:		0.029569
  validation accuracy:		99.07 %%
Epoch 89 of 200 took 173.453s
  training loss:		0.014587
  validation loss:		0.030260
  validation accuracy:		99.10 %%
Epoch 90 of 200 took 173.283s
  training loss:		0.015476
  validation loss:		0.030566
  validation accuracy:		99.07 %%
Epoch 91 of 200 took 173.277s
  training loss:		0.014944
  validation loss:		0.030642
  validation accuracy:		99.07 %%
Epoch 92 of 200 took 173.101s
  training loss:		0.015682
  validation loss:		0.030824
  validation accuracy:		99.10 %%
Epoch 93 of 200 took 173.125s
  training loss:		0.015075
  validation loss:		0.030568
  validation accuracy:		99.07 %%
Epoch 94 of 200 took 173.226s
  training loss:		0.014875
  validation loss:		0.030515
  validation accuracy:		99.10 %%
Epoch 95 of 200 took 173.520s
  training loss:		0.014603
  validation loss:		0.029510
  validation accuracy:		99.09 %%
Epoch 96 of 200 took 173.266s
  training loss:		0.013721
  validation loss:		0.029602
  validation accuracy:		99.17 %%
Epoch 97 of 200 took 173.625s
  training loss:		0.013248
  validation loss:		0.030373
  validation accuracy:		99.17 %%
Epoch 98 of 200 took 173.514s
  training loss:		0.013398
  validation loss:		0.029816
  validation accuracy:		99.16 %%
Epoch 99 of 200 took 173.203s
  training loss:		0.014257
  validation loss:		0.030079
  validation accuracy:		99.14 %%
Epoch 100 of 200 took 173.496s
  training loss:		0.012927
  validation loss:		0.029108
  validation accuracy:		99.13 %%
Epoch 101 of 200 took 173.308s
  training loss:		0.013704
  validation loss:		0.029488
  validation accuracy:		99.17 %%
Epoch 102 of 200 took 173.142s
  training loss:		0.013214
  validation loss:		0.031418
  validation accuracy:		99.14 %%
Epoch 103 of 200 took 173.765s
  training loss:		0.013729
  validation loss:		0.030360
  validation accuracy:		99.09 %%
Epoch 104 of 200 took 173.540s
  training loss:		0.012829
  validation loss:		0.030199
  validation accuracy:		99.08 %%
Epoch 105 of 200 took 173.516s
  training loss:		0.013062
  validation loss:		0.030782
  validation accuracy:		99.19 %%
Epoch 106 of 200 took 173.323s
  training loss:		0.011011
  validation loss:		0.030828
  validation accuracy:		99.15 %%
Epoch 107 of 200 took 173.742s
  training loss:		0.012798
  validation loss:		0.030354
  validation accuracy:		99.11 %%
Epoch 108 of 200 took 173.056s
  training loss:		0.012867
  validation loss:		0.030212
  validation accuracy:		99.14 %%
Epoch 109 of 200 took 173.140s
  training loss:		0.013131
  validation loss:		0.031482
  validation accuracy:		99.15 %%
Epoch 110 of 200 took 173.496s
  training loss:		0.012420
  validation loss:		0.032017
  validation accuracy:		99.14 %%
Epoch 111 of 200 took 172.997s
  training loss:		0.011440
  validation loss:		0.032787
  validation accuracy:		99.09 %%
Epoch 112 of 200 took 172.993s
  training loss:		0.011258
  validation loss:		0.031192
  validation accuracy:		99.12 %%
Epoch 113 of 200 took 173.472s
  training loss:		0.012212
  validation loss:		0.032544
  validation accuracy:		99.13 %%
Epoch 114 of 200 took 173.709s
  training loss:		0.011390
  validation loss:		0.031495
  validation accuracy:		99.15 %%
Epoch 115 of 200 took 173.143s
  training loss:		0.012341
  validation loss:		0.031368
  validation accuracy:		99.11 %%
Epoch 116 of 200 took 173.905s
  training loss:		0.011554
  validation loss:		0.031450
  validation accuracy:		99.22 %%
Epoch 117 of 200 took 173.465s
  training loss:		0.011585
  validation loss:		0.030420
  validation accuracy:		99.18 %%
Epoch 118 of 200 took 173.470s
  training loss:		0.011125
  validation loss:		0.030189
  validation accuracy:		99.06 %%
Epoch 119 of 200 took 173.364s
  training loss:		0.011183
  validation loss:		0.029895
  validation accuracy:		99.14 %%
Epoch 120 of 200 took 173.013s
  training loss:		0.010501
  validation loss:		0.031817
  validation accuracy:		99.09 %%
Epoch 121 of 200 took 173.426s
  training loss:		0.010348
  validation loss:		0.030283
  validation accuracy:		99.13 %%
Epoch 122 of 200 took 173.486s
  training loss:		0.011029
  validation loss:		0.030284
  validation accuracy:		99.21 %%
Epoch 123 of 200 took 173.068s
  training loss:		0.010203
  validation loss:		0.030166
  validation accuracy:		99.13 %%
Epoch 124 of 200 took 173.361s
  training loss:		0.010677
  validation loss:		0.029371
  validation accuracy:		99.21 %%
Epoch 125 of 200 took 173.556s
  training loss:		0.010247
  validation loss:		0.031291
  validation accuracy:		99.14 %%
Epoch 126 of 200 took 173.440s
  training loss:		0.010270
  validation loss:		0.030648
  validation accuracy:		99.18 %%
Epoch 127 of 200 took 173.519s
  training loss:		0.009961
  validation loss:		0.029987
  validation accuracy:		99.20 %%
Epoch 128 of 200 took 173.411s
  training loss:		0.010196
  validation loss:		0.030124
  validation accuracy:		99.19 %%
Epoch 129 of 200 took 173.282s
  training loss:		0.010351
  validation loss:		0.031019
  validation accuracy:		99.18 %%
Epoch 130 of 200 took 173.441s
  training loss:		0.009544
  validation loss:		0.029925
  validation accuracy:		99.16 %%
Epoch 131 of 200 took 173.688s
  training loss:		0.009518
  validation loss:		0.030982
  validation accuracy:		99.14 %%
Epoch 132 of 200 took 173.318s
  training loss:		0.009921
  validation loss:		0.031488
  validation accuracy:		99.07 %%
Epoch 133 of 200 took 173.605s
  training loss:		0.009429
  validation loss:		0.033190
  validation accuracy:		99.07 %%
Epoch 134 of 200 took 173.292s
  training loss:		0.009619
  validation loss:		0.031526
  validation accuracy:		99.09 %%
Epoch 135 of 200 took 173.586s
  training loss:		0.009090
  validation loss:		0.030581
  validation accuracy:		99.13 %%
Epoch 136 of 200 took 173.902s
  training loss:		0.008871
  validation loss:		0.032506
  validation accuracy:		99.11 %%
Epoch 137 of 200 took 173.446s
  training loss:		0.008844
  validation loss:		0.032717
  validation accuracy:		99.16 %%
Epoch 138 of 200 took 173.528s
  training loss:		0.008788
  validation loss:		0.031533
  validation accuracy:		99.16 %%
Epoch 139 of 200 took 173.068s
  training loss:		0.008951
  validation loss:		0.031313
  validation accuracy:		99.16 %%
Epoch 140 of 200 took 173.499s
  training loss:		0.008819
  validation loss:		0.032017
  validation accuracy:		99.06 %%
Epoch 141 of 200 took 173.498s
  training loss:		0.009247
  validation loss:		0.030869
  validation accuracy:		99.18 %%
Epoch 142 of 200 took 173.311s
  training loss:		0.008288
  validation loss:		0.032200
  validation accuracy:		99.09 %%
Epoch 143 of 200 took 172.996s
  training loss:		0.009039
  validation loss:		0.030575
  validation accuracy:		99.16 %%
Epoch 144 of 200 took 173.492s
  training loss:		0.009006
  validation loss:		0.032816
  validation accuracy:		99.17 %%
Epoch 145 of 200 took 173.828s
  training loss:		0.008361
  validation loss:		0.031514
  validation accuracy:		99.14 %%
Epoch 146 of 200 took 173.400s
  training loss:		0.007450
  validation loss:		0.032183
  validation accuracy:		99.17 %%
Epoch 147 of 200 took 173.457s
  training loss:		0.008543
  validation loss:		0.030296
  validation accuracy:		99.14 %%
Epoch 148 of 200 took 173.298s
  training loss:		0.008174
  validation loss:		0.032220
  validation accuracy:		99.14 %%
Epoch 149 of 200 took 173.503s
  training loss:		0.007685
  validation loss:		0.032013
  validation accuracy:		99.06 %%
Epoch 150 of 200 took 173.853s
  training loss:		0.008260
  validation loss:		0.033074
  validation accuracy:		99.11 %%
Epoch 151 of 200 took 173.243s
  training loss:		0.007655
  validation loss:		0.032096
  validation accuracy:		99.15 %%
Epoch 152 of 200 took 173.461s
  training loss:		0.008345
  validation loss:		0.031973
  validation accuracy:		99.07 %%
Epoch 153 of 200 took 173.656s
  training loss:		0.007511
  validation loss:		0.031457
  validation accuracy:		99.17 %%
Epoch 154 of 200 took 173.543s
  training loss:		0.007568
  validation loss:		0.032725
  validation accuracy:		99.10 %%
Epoch 155 of 200 took 173.361s
  training loss:		0.007542
  validation loss:		0.032427
  validation accuracy:		99.13 %%
Epoch 156 of 200 took 176.414s
  training loss:		0.007517
  validation loss:		0.031518
  validation accuracy:		99.17 %%
Epoch 157 of 200 took 174.121s
  training loss:		0.007363
  validation loss:		0.031342
  validation accuracy:		99.14 %%
Epoch 158 of 200 took 173.858s
  training loss:		0.007550
  validation loss:		0.032905
  validation accuracy:		99.07 %%
Epoch 159 of 200 took 173.612s
  training loss:		0.007767
  validation loss:		0.031707
  validation accuracy:		99.08 %%
Epoch 160 of 200 took 173.592s
  training loss:		0.007429
  validation loss:		0.031664
  validation accuracy:		99.15 %%
Epoch 161 of 200 took 173.539s
  training loss:		0.007225
  validation loss:		0.031905
  validation accuracy:		99.16 %%
Epoch 162 of 200 took 174.078s
  training loss:		0.007293
  validation loss:		0.035006
  validation accuracy:		99.17 %%
Epoch 163 of 200 took 173.878s
  training loss:		0.007529
  validation loss:		0.032926
  validation accuracy:		99.14 %%
Epoch 164 of 200 took 173.257s
  training loss:		0.007543
  validation loss:		0.033868
  validation accuracy:		99.20 %%
Epoch 165 of 200 took 174.006s
  training loss:		0.007537
  validation loss:		0.033564
  validation accuracy:		99.09 %%
Epoch 166 of 200 took 173.641s
  training loss:		0.006623
  validation loss:		0.031516
  validation accuracy:		99.17 %%
Epoch 167 of 200 took 173.759s
  training loss:		0.007161
  validation loss:		0.031353
  validation accuracy:		99.20 %%
Epoch 168 of 200 took 173.669s
  training loss:		0.007037
  validation loss:		0.031438
  validation accuracy:		99.17 %%
Epoch 169 of 200 took 173.841s
  training loss:		0.006709
  validation loss:		0.032151
  validation accuracy:		99.19 %%
Epoch 170 of 200 took 173.770s
  training loss:		0.007442
  validation loss:		0.033068
  validation accuracy:		99.09 %%
Epoch 171 of 200 took 173.268s
  training loss:		0.007050
  validation loss:		0.031315
  validation accuracy:		99.14 %%
Epoch 172 of 200 took 173.509s
  training loss:		0.006382
  validation loss:		0.031659
  validation accuracy:		99.16 %%
Epoch 173 of 200 took 173.764s
  training loss:		0.006387
  validation loss:		0.032373
  validation accuracy:		99.15 %%
Epoch 174 of 200 took 173.544s
  training loss:		0.006617
  validation loss:		0.031862
  validation accuracy:		99.18 %%
Epoch 175 of 200 took 173.413s
  training loss:		0.006631
  validation loss:		0.032079
  validation accuracy:		99.16 %%
Epoch 176 of 200 took 173.690s
  training loss:		0.006243
  validation loss:		0.032685
  validation accuracy:		99.13 %%
Epoch 177 of 200 took 173.864s
  training loss:		0.005994
  validation loss:		0.032110
  validation accuracy:		99.17 %%
Epoch 178 of 200 took 173.476s
  training loss:		0.005927
  validation loss:		0.033062
  validation accuracy:		99.19 %%
Epoch 179 of 200 took 173.957s
  training loss:		0.006503
  validation loss:		0.032676
  validation accuracy:		99.13 %%
Epoch 180 of 200 took 176.436s
  training loss:		0.006018
  validation loss:		0.031489
  validation accuracy:		99.13 %%
Epoch 181 of 200 took 173.747s
  training loss:		0.006394
  validation loss:		0.033384
  validation accuracy:		99.13 %%
Epoch 182 of 200 took 173.802s
  training loss:		0.006820
  validation loss:		0.032817
  validation accuracy:		99.14 %%
Epoch 183 of 200 took 173.908s
  training loss:		0.006171
  validation loss:		0.032586
  validation accuracy:		99.18 %%
Epoch 184 of 200 took 173.744s
  training loss:		0.006022
  validation loss:		0.033232
  validation accuracy:		99.13 %%
Epoch 185 of 200 took 173.798s
  training loss:		0.006279
  validation loss:		0.031606
  validation accuracy:		99.18 %%
Epoch 186 of 200 took 173.944s
  training loss:		0.006711
  validation loss:		0.031931
  validation accuracy:		99.07 %%
Epoch 187 of 200 took 173.707s
  training loss:		0.005398
  validation loss:		0.033390
  validation accuracy:		99.17 %%
Epoch 188 of 200 took 173.770s
  training loss:		0.005815
  validation loss:		0.032712
  validation accuracy:		99.18 %%
Epoch 189 of 200 took 173.708s
  training loss:		0.004947
  validation loss:		0.032752
  validation accuracy:		99.18 %%
Epoch 190 of 200 took 173.928s
  training loss:		0.005936
  validation loss:		0.033004
  validation accuracy:		99.21 %%
Epoch 191 of 200 took 173.967s
  training loss:		0.005618
  validation loss:		0.033511
  validation accuracy:		99.15 %%
Epoch 192 of 200 took 173.576s
  training loss:		0.006218
  validation loss:		0.032888
  validation accuracy:		99.21 %%
Epoch 193 of 200 took 174.078s
  training loss:		0.005733
  validation loss:		0.033180
  validation accuracy:		99.20 %%
Epoch 194 of 200 took 173.684s
  training loss:		0.006005
  validation loss:		0.031962
  validation accuracy:		99.21 %%
Epoch 195 of 200 took 173.845s
  training loss:		0.005988
  validation loss:		0.033247
  validation accuracy:		99.11 %%
Epoch 196 of 200 took 173.504s
  training loss:		0.005639
  validation loss:		0.035591
  validation accuracy:		99.20 %%
Epoch 197 of 200 took 173.544s
  training loss:		0.005416
  validation loss:		0.031794
  validation accuracy:		99.12 %%
Epoch 198 of 200 took 173.600s
  training loss:		0.005694
  validation loss:		0.032934
  validation accuracy:		99.18 %%
Epoch 199 of 200 took 173.856s
  training loss:		0.005443
  validation loss:		0.033474
  validation accuracy:		99.18 %%
Epoch 200 of 200 took 173.266s
  training loss:		0.005285
  validation loss:		0.033518
  validation accuracy:		99.20 %%
/Users/dikien/anaconda/lib/python2.7/site-packages/Lasagne-0.1.dev0-py2.7.egg/lasagne/init.py:86: UserWarning: The uniform initializer no longer uses Glorot et al.'s approach to determine the bounds, but defaults to the range (-0.01, 0.01) instead. Please use the new GlorotUniform initializer to get the old behavior. GlorotUniform is now the default for all layers.
  warnings.warn("The uniform initializer no longer uses Glorot et al.'s "
/Users/dikien/anaconda/lib/python2.7/site-packages/Lasagne-0.1.dev0-py2.7.egg/lasagne/layers/helper.py:69: UserWarning: get_all_layers() has been changed to return layers in topological order. The former implementation is still available as get_all_layers_old(), but will be removed before the first release of Lasagne. To ignore this warning, use `warnings.filterwarnings('ignore', '.*topo.*')`.
  warnings.warn("get_all_layers() has been changed to return layers in "

In [4]:
print("Loading data...")
with gzip.open(DATA_FILENAME, 'rb') as f:
    data = pickle.load(f)
dataset = load_data(data)


Loading data...

In [7]:
# dataset['input_height']
# dataset['input_width']
dataset['output_dim']


Out[7]:
10