In [1]:
import tensorflow as tf

In [2]:
hello = tf.constant("Hi")

In [3]:
print(hello)


Tensor("Const:0", shape=(), dtype=string)

In [4]:
# Tensor : 자료형

In [5]:
a = tf.constant(10)
b = tf.constant(2)
c = a + b

In [6]:
print(c)


Tensor("add:0", shape=(), dtype=int32)

In [7]:
# 텐서플로우는 그래프 생성 후, 그래프 실행하는 과정을 거쳐야 함. 지연 실행 방법을 사용

In [9]:
sess = tf.Session()
print(sess.run(c))


12

In [11]:
print(sess.run(hello))


b'Hi'

In [12]:
sess.close()

In [13]:
# 플레이스홀더는 매개변수

In [15]:
X = tf.placeholder(tf.float32, [None, 3])
print(X)


Tensor("Placeholder_1:0", shape=(?, 3), dtype=float32)

In [16]:
x_data = [[1,2,3], [4,5,6]]

In [18]:
W = tf.Variable(tf.random_normal([3,2]))
b = tf.Variable(tf.random_normal([2,1]))

In [19]:
expr = tf.matmul(X,W) + b

In [23]:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(x_data)
print(sess.run(W))
print(sess.run(b))
print(sess.run(expr, feed_dict={X: x_data}))
# feed_dict : 그래프 실행할 때 사용할 입력값을 지정
sess.close()


[[1, 2, 3], [4, 5, 6]]
[[-0.72649789 -0.31295726]
 [ 1.18099403 -0.71925688]
 [-1.98263812 -2.10048032]]
[[-1.28041577]
 [-1.60697615]]
[[ -5.59283924  -9.33332729]
 [-10.50382614 -19.05797195]]

In [24]:
x_data


Out[24]:
[[1, 2, 3], [4, 5, 6]]

In [25]:
x_data = [1, 2, 3]
y_data = [1, 2, 3]

In [26]:
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))

In [27]:
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)

In [28]:
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))

In [29]:
X = tf.placeholder(tf.float32, name="X")

In [31]:
Y = tf.placeholder(tf.float32, name="Y")

In [33]:
hypothesis = W*X +b

In [34]:
cost = tf.reduce_mean(tf.square(hypothesis-Y))

In [35]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)

In [36]:
train_op = optimizer.minimize(cost)

In [42]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    for step in range(10):
        _, cost_val = sess.run([train_op, cost], feed_dict={X:x_data, Y: y_data})
        
        print(step, cost_val, sess.run(W), sess.run(b))
    
    print("X: 5, Y:", sess.run(hypothesis, feed_dict={X:5}))
    print("X: 5, Y:", sess.run(hypothesis, feed_dict={X:2.5}))


0 23.2728 [ 1.17120934] [ 0.16678452]
1 0.27883 [ 0.94470012] [ 0.06494389]
2 0.00412319 [ 0.97033584] [ 0.07407507]
3 0.00080411 [ 0.96839243] [ 0.07112574]
4 0.0007286 [ 0.96944255] [ 0.06954362]
5 0.000693551 [ 0.9701454] [ 0.06785788]
6 0.000660599 [ 0.97086656] [ 0.06622814]
7 0.000629222 [ 0.97156656] [ 0.06463589]
8 0.000599332 [ 0.97225004] [ 0.06308208]
9 0.000570862 [ 0.9729172] [ 0.06156565]
X: 5, Y: [ 4.92615175]
X: 5, Y: [ 2.49385858]

4. 기본 신경망 구현


In [43]:
import numpy as np

In [44]:
x_data = np.array([[0,0], [1,0], [1,1], [0,0], [0,0], [0,1]])

In [45]:
y_data = np.array([
    [1, 0, 0],
    [0, 1, 0],
    [0, 0, 1],
    [1, 0, 0],
    [1, 0, 0],
    [0, 0, 1]
])

In [46]:
X = tf.placeholder(tf.float32)

In [47]:
Y = tf.placeholder(tf.float32)

In [69]:
W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random_uniform([10, 3], -1., 1.))

In [70]:
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))

In [73]:
L1 = tf.add(tf.matmul(X, W1), b1)

In [74]:
L1 = tf.nn.relu(L1)

In [75]:
model = tf.add(tf.matmul(L1, W2), b2)

In [77]:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))

In [82]:
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)

In [83]:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for step in range(100):
    sess.run(train_op, feed_dict={X:x_data, Y:y_data})
    
    if (step+1)%10 == 0:
        print(step+1, sess.run(cost, feed_dict={X:x_data, Y:y_data}))
        
prediction = tf.argmax(model, axis=1)
target = tf.argmax(Y, axis=1)
print("예측값: ", sess.run(prediction, feed_dict={X:x_data}))
print("실제값: ", sess.run(target, feed_dict={Y:y_data}))

is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print("정확도: {:.2f}".format(sess.run(accuracy*100, feed_dict={X: x_data, Y:y_data})))


10 0.820693
20 0.639553
30 0.514824
40 0.417581
50 0.332758
60 0.256476
70 0.190775
80 0.138239
90 0.0994742
100 0.0724157
예측값:  [0 1 2 0 0 2]
실제값:  [0 1 2 0 0 2]
정확도: 100.00

5. 텐서보드와 모델 재사용


In [116]:
data = np.loadtxt("./sample_data.csv", delimiter=',', unpack=True, dtype='float32')

x_data = np.transpose(data[0:2])
y_data = np.transpose(data[2:])

In [117]:
data


Out[117]:
array([[ 0.,  1.,  1.,  0.,  0.,  0.],
       [ 0.,  0.,  1.,  0.,  0.,  1.],
       [ 1.,  0.,  0.,  1.,  1.,  0.],
       [ 0.,  1.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  1.,  0.,  0.,  1.]], dtype=float32)

In [119]:
# tf.reset_default_graph()
global_step = tf.Variable(0, trainable=False, name='global_step')

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

with tf.name_scope('layer1'):
    W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.), name='W1')
    L1 = tf.nn.relu(tf.matmul(X, W1))

with tf.name_scope('layer2'):
    W2 = tf.Variable(tf.random_uniform([10, 20], -1., 1.), name='W2')
    L2 = tf.nn.relu(tf.matmul(L1, W2))

with tf.name_scope('output'):
    W3 = tf.Variable(tf.random_uniform([20, 3], -1., 1.), name='W3')
    model = tf.matmul(L2, W3)

with tf.name_scope('optimizer'):
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))

    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    # global_step로 넘겨준 변수를, 학습용 변수들을 최적화 할 때 마다 학습 횟수를 하나씩 증가시킵니다.
    train_op = optimizer.minimize(cost, global_step=global_step)
    
    # 손실값을 추적하기 위해 수집할 값을 지정
    tf.summary.scalar('cost', cost)

sess = tf.Session()
# 모델을 저장하고 불러오는 API를 초기화합니다.
# global_variables 함수를 통해 앞서 정의하였던 변수들을 저장하거나 불러올 변수들로 설정합니다.
saver = tf.train.Saver(tf.global_variables())

ckpt = tf.train.get_checkpoint_state('./model')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
    saver.restore(sess, ckpt.model_checkpoint_path)
else:
    sess.run(tf.global_variables_initializer())

merged = tf.summary.merge_all()
# 앞서 지정한 텐서들을 수집
writer = tf.summary.FileWriter('./logs', sess.graph)
# 그래프와 텐서들의 값을 저장할 디렉터리 설정
# 최적화 진행
for step in range(2):
    sess.run(train_op, feed_dict={X: x_data, Y: y_data})
    
    print("Step: {}".format(sess.run(global_step)),
          "Cost: {:.3f}".format(sess.run(cost, feed_dict={X: x_data, Y: y_data})))
    
    summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})
    writer.add_summary(summary, global_step=sess.run(global_step))
    
saver.save(sess, './model/dnn.ckpt', global_step=global_step)
# 결과 확인
# 0: 기타 1: 포유류, 2: 조류
######
prediction = tf.argmax(model, 1)
target = tf.argmax(Y, 1)
print('예측값:', sess.run(prediction, feed_dict={X: x_data}))
print('실제값:', sess.run(target, feed_dict={Y: y_data}))

is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('정확도: {:.2f}'.format(sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data})))


Step: 1 Cost: 1.152
Step: 2 Cost: 1.045
예측값: [0 1 1 0 0 2]
실제값: [0 1 2 0 0 2]
정확도: 83.33

In [95]:
# tf.Variable(initial_value=None, trainable=True, collections=None, validate_shape=True, 
# caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)

In [2]:
from tensorflow.examples.tutorials.mnist import input_data

In [3]:
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)


Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting ./mnist/data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting ./mnist/data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting ./mnist/data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting ./mnist/data/t10k-labels-idx1-ubyte.gz

In [4]:
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])

In [ ]:
W1 = tf.Variable(tf.random_normal([784]))