In [3]:
'''Trains a simple convnet on the MNIST dataset for ONLY digits 3 and 8.
Gets to 98.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
4 seconds per epoch on a 2 GHz Intel Core i5.
'''

from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np

batch_size = 128
num_classes = 2
epochs = 12

# input image dimensions
img_rows, img_cols = 28, 28

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

#Only look at 3s and 8s
train_picks = np.logical_or(y_train==2,y_train==7)
test_picks = np.logical_or(y_test==2,y_test==7)

x_train = x_train[train_picks]
x_test = x_test[test_picks]
y_train = np.array(y_train[train_picks]==7,dtype=int)
y_test = np.array(y_test[test_picks]==7,dtype=int)


if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(4, kernel_size=(3, 3),activation='relu',input_shape=input_shape))
model.add(Conv2D(8, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


Downloading data from https://s3.amazonaws.com/img-datasets/mnist.npz
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-3-179e27036aa7> in <module>()
     22 
     23 # the data, shuffled and split between train and test sets
---> 24 (x_train, y_train), (x_test, y_test) = mnist.load_data()
     25 
     26 #Only look at 3s and 8s

/opt/conda/lib/python3.6/site-packages/keras/datasets/mnist.py in load_data(path)
     13         Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
     14     """
---> 15     path = get_file(path, origin='https://s3.amazonaws.com/img-datasets/mnist.npz')
     16     f = np.load(path)
     17     x_train = f['x_train']

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in get_file(fname, origin, untar, md5_hash, file_hash, cache_subdir, hash_algorithm, extract, archive_format, cache_dir)
    199             try:
    200                 urlretrieve(origin, fpath,
--> 201                             functools.partial(dl_progress, progbar=progbar))
    202             except URLError as e:
    203                 raise Exception(error_msg.format(origin, e.errno, e.reason))

/opt/conda/lib/python3.6/urllib/request.py in urlretrieve(url, filename, reporthook, data)
    275 
    276             while True:
--> 277                 block = fp.read(bs)
    278                 if not block:
    279                     break

/opt/conda/lib/python3.6/http/client.py in read(self, amt)
    447             # Amount is given, implement using readinto
    448             b = bytearray(amt)
--> 449             n = self.readinto(b)
    450             return memoryview(b)[:n].tobytes()
    451         else:

/opt/conda/lib/python3.6/http/client.py in readinto(self, b)
    491         # connection, and the user is reading more bytes than will be provided
    492         # (for example, reading in 1k chunks)
--> 493         n = self.fp.readinto(b)
    494         if not n and b:
    495             # Ideally, we would raise IncompleteRead if the content-length

/opt/conda/lib/python3.6/socket.py in readinto(self, b)
    584         while True:
    585             try:
--> 586                 return self._sock.recv_into(b)
    587             except timeout:
    588                 self._timeout_occurred = True

/opt/conda/lib/python3.6/ssl.py in recv_into(self, buffer, nbytes, flags)
   1000                   "non-zero flags not allowed in calls to recv_into() on %s" %
   1001                   self.__class__)
-> 1002             return self.read(nbytes, buffer)
   1003         else:
   1004             return socket.recv_into(self, buffer, nbytes, flags)

/opt/conda/lib/python3.6/ssl.py in read(self, len, buffer)
    863             raise ValueError("Read on closed or unwrapped SSL socket.")
    864         try:
--> 865             return self._sslobj.read(len, buffer)
    866         except SSLError as x:
    867             if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:

/opt/conda/lib/python3.6/ssl.py in read(self, len, buffer)
    623         """
    624         if buffer is not None:
--> 625             v = self._sslobj.read(len, buffer)
    626         else:
    627             v = self._sslobj.read(len)

KeyboardInterrupt: 

In [2]:
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
def build_model(optimizer):
    model = Sequential()
    model.add(Conv2D(4, kernel_size=(3, 3),activation='relu',input_shape=input_shape))
    model.add(Conv2D(8, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(16, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model

classifier = KerasClassifier(build_fn = build_model)
parameters = {
    'optimizer' : ['adam','rmsprop']
}
grid_search = GridSearchCV(estimator = model,
                          param_grid = parameters,
                          scoring = 'accuracy',
                          cv = 10)
grid_search = grid_search.fit(x_train, y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_


Using TensorFlow backend.
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-2-9b9c0bafbf12> in <module>()
     21     'optimizer' : ['adam','rmsprop']
     22 }
---> 23 grid_search = GridSearchCV(estimator = model,
     24                           param_grid = parameters,
     25                           scoring = 'accuracy',

NameError: name 'model' is not defined

In [ ]: