학습 할때에는 한번 restart를 해준 후 한다.


In [1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
%matplotlib inline

In [2]:
from tensorflow.examples.tutorials.mnist import input_data

In [3]:
mnist = input_data.read_data_sets('/Users/jaegyuhan/PythonEx_1/mnist_data', one_hot=True)


Extracting /Users/jaegyuhan/PythonEx_1/mnist_data/train-images-idx3-ubyte.gz
Extracting /Users/jaegyuhan/PythonEx_1/mnist_data/train-labels-idx1-ubyte.gz
Extracting /Users/jaegyuhan/PythonEx_1/mnist_data/t10k-images-idx3-ubyte.gz
Extracting /Users/jaegyuhan/PythonEx_1/mnist_data/t10k-labels-idx1-ubyte.gz

In [4]:
X = tf.placeholder(dtype=tf.float32, shape=[None, 784])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 10])

In [5]:
W1 = tf.Variable(tf.random_normal(shape=[784, 512], stddev=0.01), name="w1val")
B1 = tf.Variable(tf.random_normal(shape=[512], stddev=0.01), name="b1val")
L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1))

In [6]:
W2 = tf.Variable(tf.random_normal(shape=[512, 256], stddev=0.01), name="w2val")
B2 = tf.Variable(tf.random_normal(shape=[256], stddev=0.01), name="b2val")
L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), B2))

In [7]:
W3 = tf.Variable(tf.random_normal(shape=[256, 128], stddev=0.01), name="w3val")
B3 = tf.Variable(tf.random_normal(shape=[128], stddev=0.01), name="b3val")
L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), B3))

In [8]:
W4 = tf.Variable(tf.random_normal([128, 10], stddev=0.01), name="w4val")
B4 = tf.Variable(tf.random_normal(shape=[10], stddev=0.01), name="b4val")
model = tf.add(tf.matmul(L3, W4), B4) #소프트맥스 활성화 함수 적용해 볼 것

In [9]:
param_list = [W1, W2, W3, W4, B1, B2, B3, B4]
saver = tf.train.Saver(param_list)

In [10]:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

In [11]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    batch_size = 100
    total_batch = int(mnist.train.num_examples / batch_size)
    print("배치 토탈 : ",total_batch)
    
    for epoch in range(15):
        total_cost = 0
        
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            
            _, cost_val = sess.run([optimizer, cost], feed_dict = {X: batch_xs, Y: batch_ys})
            total_cost += cost_val
            
        
        print("Epoch:","%04d" % (epoch + 1), "Avg. cost =", "{:.3f}".format(total_cost / total_batch))
    
    
    print('최적화 완료!')
    saver.save(sess, "./chkp_save4/mnist")
    
#     w1_val, b1_val = sess.run([W1,B1])
#     print(w1_val)
    
    is_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y,1))
    accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
    print("정확도:", sess.run(accuracy, feed_dict={X:mnist.test.images, Y:mnist.test.labels}))
    
#     predict = sess.run([model], feed_dict={X:ll})
#     print(predict)
#     predict = np.array(predict)
#     print("shape:", predict.shape)
#     print("result : ",np.argmax(predict[0], axis=1))


배치 토탈 :  550
Epoch: 0001 Avg. cost = 0.508
Epoch: 0002 Avg. cost = 0.169
Epoch: 0003 Avg. cost = 0.111
Epoch: 0004 Avg. cost = 0.076
Epoch: 0005 Avg. cost = 0.060
Epoch: 0006 Avg. cost = 0.045
Epoch: 0007 Avg. cost = 0.034
Epoch: 0008 Avg. cost = 0.030
Epoch: 0009 Avg. cost = 0.025
Epoch: 0010 Avg. cost = 0.022
Epoch: 0011 Avg. cost = 0.021
Epoch: 0012 Avg. cost = 0.017
Epoch: 0013 Avg. cost = 0.013
Epoch: 0014 Avg. cost = 0.015
Epoch: 0015 Avg. cost = 0.014
최적화 완료!
정확도: 0.9783

In [ ]: