In [54]:
import tensorflow as tf
print(tf.__version__)
import numpy as np
In [55]:
hello = tf.constant('Hello, World')
print(hello)
In [56]:
a = tf.constant(10)
b = tf.constant(20)
c = a+b
print(c)
In [57]:
with tf.Session() as sess:
print(sess.run(c))
In [58]:
with tf.Session() as sess:
print(sess.run(tf.add(a,b)))
In [59]:
with tf.Session() as sess:
aa,bb,cc = sess.run([a,b,c])
print(aa)
In [60]:
#아래는 행의 크기가 정해지지 않고 열은 3인 홀더가 만들어 진다.
X = tf.placeholder(tf.float32, [None, 3])
print(X)
In [61]:
with tf.Session() as sess:
print(sess.run(X, feed_dict={X:[[1,2,3]]})) #여기서 앞의 x는 연산해야 할 그래프를 나타내고 뒤의 x는 그 그래프안의 홀더를 뜻 한다.
In [62]:
x_data = [[1,2,3],[4,5,6]]
In [63]:
W = tf.Variable(tf.random_normal([3,2])) #tf.random_normal은 주작위로 정규분포의 값으로 세팅한다.
b = tf.Variable(tf.random_normal([2,1]))
In [64]:
expr = tf.matmul(X,W) + b
In [65]:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(x_data)
print(sess.run(W))
print(sess.run(b))
print(sess.run(expr, feed_dict={X:x_data}))
In [66]:
mat_a = tf.constant(value=[2,2,2,2],dtype=tf.float32, shape=[2,2])
mat_b = tf.constant(value=[1,2], dtype=tf.float32, shape=[2,1])
with tf.Session() as sess:
print(sess.run(mat_a))
print(sess.run(mat_b))
print(sess.run(mat_a+mat_b))
In [67]:
x_data = [1,2,3]
y_data = [1,2,3]
In [68]:
with tf.Session() as sess:
for i in range(10):
print(sess.run(tf.random_uniform(shape=[1], minval=0, maxval=9, dtype=tf.int32)))
In [69]:
W = tf.Variable(tf.random_uniform(shape=[1], minval=-1.0, maxval=1.0))
b = tf.Variable(tf.random_uniform(shape=[1], minval=-1.0, maxval=1.0))
In [70]:
X = tf.placeholder(dtype=tf.float32, name = "X")
Y = tf.placeholder(dtype=tf.float32, name = "Y")
In [71]:
hypothesis = W * X + b
In [72]:
cost = tf.reduce_mean(tf.square(hypothesis - Y))
In [73]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(cost)
In [74]:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(100):
_, cost_val = sess.run([train_op, cost], feed_dict={X:x_data, Y:y_data})
print(step, cost_val, sess.run(W), sess.run(b))
print("X: 5, Y: ", sess.run(hypothesis, feed_dict={X:5}))
In [75]:
x_data = np.array([[0,0],
[1,0],
[1,1],
[0,0],
[0,0],
[0,1]]) #날개, 털 있으면 1 없으면 0
In [76]:
y_data = np.array([
[1,0,0], #기타
[0,1,0], #포유류
[0,0,1], #조류
[1,0,0],
[1,0,0],
[0,0,1]
])
In [77]:
X = tf.placeholder(dtype=tf.float32)
Y = tf.placeholder(dtype=tf.float32)
In [78]:
W = tf.Variable(tf.random_uniform([2,3], -1.,1.))
b = tf.Variable(tf.zeros([3]))
In [79]:
with tf.Session() as sess:
print(sess.run(tf.zeros([3])))
print(sess.run(tf.random_uniform([2,3], -1.,1.)))
In [80]:
L = tf.add(tf.matmul(X,W), b)
L = tf.nn.relu(L)
In [81]:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf.matmul(X,W),feed_dict={X:x_data}))
In [82]:
model = tf.nn.softmax(L)
In [83]:
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(model), axis=1))
In [84]:
%timeit 3+5
In [85]:
%%timeit
a = [1,2,3]
a = [x+1 for x in a]
In [86]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)
In [87]:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(100):
sess.run(train_op, feed_dict={X:x_data, Y:y_data})
if (step + 1) % 10 == 0:
print(step + 1, sess.run(cost, feed_dict={X:x_data, Y:y_data}))
In [94]:
prediction = tf.argmax(model, axis=1)
target = tf.argmax(Y, axis=1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('예측값 : ', sess.run(prediction, feed_dict = {X: x_data}))
print('실제값 : ', sess.run(target, feed_dict = {Y:y_data}))
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))
In [ ]: