In [5]:
import tensorflow as tf
import numpy as np
In [6]:
print(tf.__version__)
In [7]:
data = [[2, 0],[4, 0], [6, 0], [8, 1], [10, 1], [12, 1], [14, 1]]
In [8]:
x_data = [x_row[0] for x_row in data]
In [9]:
print(x_data)
In [10]:
y_data = [y_row[1] for y_row in data]
In [11]:
print(y_data)
In [20]:
a = tf.Variable(tf.random_normal([1], dtype=tf.float64, seed=0))
b = tf.Variable(tf.random_normal([1], dtype=tf.float64, seed=0))
In [21]:
y = 1/ (1 + np.e**(a*x_data + b)) # 시그모이드`
In [22]:
loss = -tf.reduce_mean(np.array(y_data) * tf.log(y) + (1-np.array(y_data)) * tf.log(1-y))
In [23]:
learning_rate = 0.5
gd = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
In [25]:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(60001):
sess.run(gd)
if i % 6000 == 0:
# print("Epoch: {:.1f}, loss = {:0.4f}, 기울기 a={:0.4f}, y 절편={0.4f}".format(i, sess.run(loss), sess.run(a), sess.run(b)))
print("Epoch: %.f, loss = %0.4f, 기울기 a = %0.4f, y 절편 b = %0.4f" % (i, sess.run(loss), sess.run(a), sess.run(b)))
In [ ]: