In [3]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from scipy import ndimage


# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline

In [ ]:


In [4]:
image_size = 28  # Pixel width and height.
pixel_depth = 255.0  # Number of levels per pixel.
folders = [
    r'.\\notMNIST_small\\A\\',
    r'.\\notMNIST_small\\B\\',
    r'.\\notMNIST_small\\C\\',
    r'.\\notMNIST_small\\D\\',
    r'.\\notMNIST_small\\E\\',
    r'.\\notMNIST_small\\F\\',
    r'.\\notMNIST_small\\G\\',
    r'.\\notMNIST_small\\H\\',
    r'.\\notMNIST_small\\I\\',
    r'.\\notMNIST_small\\J\\'

]

def load_letter(folder, min_num_images):
  """Load the data for a single letter label."""
  image_files = os.listdir(folder)
  dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
  print(folder)
  num_images = 0
  for image in image_files:
    image_file = os.path.join(folder, image)
    try:
      image_data = (ndimage.imread(image_file).astype(float) - 
                    pixel_depth / 2) / pixel_depth
      if image_data.shape != (image_size, image_size):
        raise Exception('Unexpected image shape: %s' % str(image_data.shape))
      dataset[num_images, :, :] = image_data
      num_images = num_images + 1
    except IOError as e:
      print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
    
  dataset = dataset[0:num_images, :, :]
  if num_images < min_num_images:
    raise Exception('Many fewer images than expected: %d < %d' %
                    (num_images, min_num_images))
    
  print('Full dataset tensor:', dataset.shape)
  print('Mean:', np.mean(dataset))
  print('Standard deviation:', np.std(dataset))
  return dataset

X_datasets = list()
Y_datasets = list()
for idx in range(len(folders)):
    folder = folders[idx] 
    X_datasets.append(load_letter(folder, 1800))
    labels = np.zeros((X_datasets[-1].shape[0],len(folders)))
    labels[:,idx] = 1
    Y_datasets.append(labels)


.\\notMNIST_small\\A\\
Could not read: .\\notMNIST_small\\A\\RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png : cannot identify image file '.\\\\notMNIST_small\\\\A\\\\RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png' - it's ok, skipping.
Full dataset tensor: (1872, 28, 28)
Mean: -0.132626
Standard deviation: 0.445128
.\\notMNIST_small\\B\\
Full dataset tensor: (1873, 28, 28)
Mean: 0.00535609
Standard deviation: 0.457115
.\\notMNIST_small\\C\\
Full dataset tensor: (1873, 28, 28)
Mean: -0.141521
Standard deviation: 0.44269
.\\notMNIST_small\\D\\
Full dataset tensor: (1873, 28, 28)
Mean: -0.0492167
Standard deviation: 0.459759
.\\notMNIST_small\\E\\
Full dataset tensor: (1873, 28, 28)
Mean: -0.0599148
Standard deviation: 0.45735
.\\notMNIST_small\\F\\
Could not read: .\\notMNIST_small\\F\\Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png : cannot identify image file '.\\\\notMNIST_small\\\\F\\\\Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png' - it's ok, skipping.
Full dataset tensor: (1872, 28, 28)
Mean: -0.118185
Standard deviation: 0.452279
.\\notMNIST_small\\G\\
Full dataset tensor: (1872, 28, 28)
Mean: -0.0925503
Standard deviation: 0.449006
.\\notMNIST_small\\H\\
Full dataset tensor: (1872, 28, 28)
Mean: -0.0586893
Standard deviation: 0.458759
.\\notMNIST_small\\I\\
Full dataset tensor: (1872, 28, 28)
Mean: 0.0526451
Standard deviation: 0.471894
.\\notMNIST_small\\J\\
Full dataset tensor: (1872, 28, 28)
Mean: -0.151689
Standard deviation: 0.448014

In [ ]:


In [ ]:


In [ ]:


In [5]:
from sklearn.model_selection import train_test_split

X_datasets2 = np.concatenate(X_datasets)
Y_datasets2 = np.concatenate(Y_datasets)
print("Total samples number:",X_datasets2.shape)
X_trains,X_tests,Y_trains,Y_tests = train_test_split(X_datasets2,Y_datasets2,test_size=0.25)
print("Samples for tests:",Y_tests.shape[0])
print("Samples for trains:",Y_trains.shape[0])
plt.imshow(X_tests[0],cmap='gray')


Total samples number: (18724, 28, 28)
Samples for tests: 4681
Samples for trains: 14043
Out[5]:
<matplotlib.image.AxesImage at 0x2b628a59940>

In [ ]:


In [ ]:


In [ ]:


In [15]:
# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense,Activation,Reshape
from keras.callbacks import EarlyStopping,ModelCheckpoint
import numpy

callbacks = [
    EarlyStopping(monitor='val_loss', min_delta=0.00001, verbose=1),
    # EarlyStopping(monitor='val_loss', patience=2, verbose=0),
    ModelCheckpoint(filepath='./weights.net', verbose=1, save_best_only=True),
    
]

# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)

input_dim = X_trains[0].shape[0]*X_trains[0].shape[1]
print((X_trains[0].shape[0],X_trains[0].shape[1]))
print(Y_trains[0].shape[0])
# create model
model = Sequential()
model.add(Reshape((input_dim,), input_shape=(X_trains[0].shape[0],X_trains[0].shape[1])))
model.add(Dense(input_dim, input_shape = (input_dim,), init='uniform', activation='relu'))
model.add(Dense(int(input_dim), init='uniform', activation='relu'))
model.add(Dense(int(input_dim/2),init='uniform',activation='sigmoid'))
model.add(Dense(Y_trains[0].shape[0],init='uniform', name="output"))

model.add(Activation('softmax', name="softmax"))
model.summary()

# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X_trains, 
          Y_trains, 
          epochs=50, 
          batch_size=10, 
          verbose=2, 
          validation_split=0.25,
          callbacks=callbacks)
# calculate predictions
results = model.evaluate(X_tests, Y_tests, batch_size=32, verbose=1, sample_weight=None)
# round predictions
print(results)


(28, 28)
10
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
reshape_8 (Reshape)          (None, 784)               0         
_________________________________________________________________
dense_21 (Dense)             (None, 784)               615440    
_________________________________________________________________
dense_22 (Dense)             (None, 784)               615440    
_________________________________________________________________
dense_23 (Dense)             (None, 392)               307720    
_________________________________________________________________
output (Dense)               (None, 10)                3930      
C:\Users\admin\Anaconda3\lib\site-packages\ipykernel_launcher.py:24: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(784, input_shape=(784,), activation="relu", kernel_initializer="uniform")`
C:\Users\admin\Anaconda3\lib\site-packages\ipykernel_launcher.py:25: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(784, activation="relu", kernel_initializer="uniform")`
C:\Users\admin\Anaconda3\lib\site-packages\ipykernel_launcher.py:26: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(392, activation="sigmoid", kernel_initializer="uniform")`
C:\Users\admin\Anaconda3\lib\site-packages\ipykernel_launcher.py:27: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(10, name="output", kernel_initializer="uniform")`
_________________________________________________________________
softmax (Activation)         (None, 10)                0         
=================================================================
Total params: 1,542,530
Trainable params: 1,542,530
Non-trainable params: 0
_________________________________________________________________
Train on 10532 samples, validate on 3511 samples
Epoch 1/50
Epoch 00001: val_loss improved from inf to 0.35558, saving model to ./weights.net
 - 41s - loss: 0.5302 - acc: 0.8430 - val_loss: 0.3556 - val_acc: 0.8949
Epoch 2/50
Epoch 00002: val_loss improved from 0.35558 to 0.32717, saving model to ./weights.net
 - 42s - loss: 0.3396 - acc: 0.9006 - val_loss: 0.3272 - val_acc: 0.9032
Epoch 3/50
Epoch 00003: val_loss improved from 0.32717 to 0.31285, saving model to ./weights.net
 - 40s - loss: 0.2781 - acc: 0.9187 - val_loss: 0.3128 - val_acc: 0.8949
Epoch 4/50
Epoch 00004: val_loss did not improve
 - 40s - loss: 0.2370 - acc: 0.9275 - val_loss: 0.3356 - val_acc: 0.9015
Epoch 00004: early stopping
4681/4681 [==============================] - 1s 216us/step
[0.360274024178803, 0.90536210212766599]

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [14]:
clazzez = ['A','B','C','D','E','F','G','H','I','J']
results = model.predict(X_tests)
errors = list()
for idx in range(len(results)):
    res = results[idx]
    cla_pre = clazzez[np.argmax(res)]
    cla_tar = clazzez[np.argmax(Y_tests[idx])]
    if cla_pre==cla_tar:
#         print(cla_pre,cla_tar)
        errors.append(idx)
# print(errors)

problems = 10
fig, axes = plt.subplots(problems, figsize=(10,10))
fig.tight_layout()
for idx in range(problems):
    err = errors[idx]
    cla_pre = clazzez[np.argmax(results[err])]
    cla_tar = clazzez[np.argmax(Y_tests[err])]    
    
    axes[idx].imshow(X_tests[err],cmap='gray')
    axes[idx].set_title("cla_pre=%s cla_tar=%s " % (cla_pre,cla_tar), fontsize=10)
    axes[idx].set_xticks([]) 
    axes[idx].set_yticks([])



In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: