In [53]:
# 倒库
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

In [54]:
mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [55]:
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

print(mnist)
print(trX)
print(trY)
print(teX)
print(teY)


Datasets(train=<tensorflow.contrib.learn.python.learn.datasets.mnist.DataSet object at 0x000000000B394E48>, validation=<tensorflow.contrib.learn.python.learn.datasets.mnist.DataSet object at 0x000000000B394DA0>, test=<tensorflow.contrib.learn.python.learn.datasets.mnist.DataSet object at 0x000000000B394F60>)
[[ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]
[[ 0.  0.  0. ...,  1.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  1.  0.]]
[[ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]
[[ 0.  0.  0. ...,  1.  0.  0.]
 [ 0.  0.  1. ...,  0.  0.  0.]
 [ 0.  1.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]

In [56]:
trX = trX.reshape(-1,28,28,1) 
teX = teX.reshape(-1,28,28,1)
X = tf.placeholder("float",[None,28,28,1])
Y = tf.placeholder("float",[None,10])
print(trX)
print(trY)


[[[[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  ..., 
  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]]


 [[[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  ..., 
  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]]


 [[[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  ..., 
  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]]


 ..., 
 [[[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  ..., 
  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]]


 [[[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  ..., 
  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]]


 [[[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  ..., 
  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]

  [[ 0.]
   [ 0.]
   [ 0.]
   ..., 
   [ 0.]
   [ 0.]
   [ 0.]]]]
[[ 0.  0.  0. ...,  1.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  1.  0.]]

In [57]:
def init_weights(shape):
    return tf.Variable(tf.random_normal(shape,stddev=0.01))
w = init_weights([3,3,1,32]) #patch 大小为3*3 输入维度为1 输出维度为32 
w2 = init_weights([3,3,32,64])  #patch 大小为3*3 输入维度为32 输出维度为64
w3 = init_weights([3,3,64,128])
w4 = init_weights([128*4*4,625]) # 全连接层 输入维度为128 * 4 * 4 是上一层的输出数据又三维的转变成一维 输出维度为625
w_o = init_weights([625,10]) # 输出层 输入维度为625  输出维度为10 代表10类(labels)

In [58]:
def model(X,w,w2,w3,w4,w_o,p_keep_conv,p_keep_hidden):
    # 第一组卷积层及池化层 最后dropout一些神经元
    lla = tf.nn.relu(tf.nn.conv2d(X,w,strides=[1,1,1,1],padding='SAME'))
    ll = tf.nn.max_pool(lla,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    ll = tf.nn.dropout(ll,p_keep_conv)
    # 第二组卷积层及池化层 最后dropout一些神经元
    l2a = tf.nn.relu(tf.nn.conv2d(ll,w2,strides=[1,1,1,1],padding='SAME'))
    l2 = tf.nn.max_pool(l2a,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    l2 = tf.nn.dropout(l2,p_keep_conv)
    # 第三组卷积层及池化层 最后的dropout 
    l3a = tf.nn.relu(tf.nn.conv2d(l2,w3,strides=[1,1,1,1],padding='SAME'))
    l3 = tf.nn.max_pool(l3a,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    l3 = tf.reshape(l3,[-1,w4.get_shape().as_list()[0]])
    l3 = tf.nn.dropout(l3,p_keep_conv)
    # 全连接层 最后dropout一些神经元
    l4 = tf.nn.relu(tf.matmul(l3,w4))
    l4 = tf.nn.dropout(l4,p_keep_hidden)
    # 输出层
    pyx = tf.matmul(l4,w_o)
    return pyx # 返回预测值

p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X,w,w2,w3,w4,w_o,p_keep_conv,p_keep_hidden)

In [ ]:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x,labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001,0.9).minimize(cost)
predict_op = tf.arg_max(py_x,1)

In [ ]:
batch_size = 128
test_size = 256
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for i in range(100):
        training_batch = zip(range(0,len(trX),batch_size),range(batch_size,len(trX) + 1 ,batch_size))
        for start , end in training_batch:
            sess.run(train_op,feed_dict = {X: trX[start:end],Y: trY[start:end],p_keep_conv: 0.8,p_keep_hidden: 0.5})
        test_indices = np.arange(len(teX)) 
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:test_size]
        print(i,np.mean(np.argmax(teY[test_indices],axis=1)==sess.run(predict_op,feed_dict={X: teX[test_indices],p_keep_conv:1.0,p_keep_hidden:1.0})))


0 0.94140625
1 0.98828125
2 0.99609375
3 0.9921875
4 1.0
5 0.9921875
6 0.984375
7 0.984375
8 0.99609375
9 0.99609375
10 1.0
11 1.0
12 0.984375
13 0.9921875
14 1.0
15 0.98046875
16 0.9921875
17 0.9921875
18 0.99609375
19 0.98828125
20 0.98828125
21 0.99609375
22 1.0
23 0.99609375
24 0.98828125
25 0.99609375
26 0.9921875
27 0.9921875
28 1.0
29 0.99609375
30 0.99609375
31 0.98046875
32 1.0
33 0.9921875
34 0.99609375
35 0.99609375
36 0.99609375
37 0.9921875
38 0.9921875
39 0.99609375
40 0.9921875
41 0.99609375
42 0.9921875
43 0.9921875
44 0.9921875
45 1.0
46 0.98828125
47 0.99609375
48 0.99609375
49 0.9921875
50 0.98828125
51 0.9921875
52 1.0
53 0.99609375
54 0.99609375
55 0.99609375
56 0.9921875
57 0.98828125
58 0.99609375
59 0.99609375
60 1.0
61 1.0