In [5]:
import tensorflow as tf
import numpy as np

In [6]:
print(tf.__version__)


1.2.0

In [7]:
data = [[2, 0],[4, 0], [6, 0], [8, 1], [10, 1], [12, 1], [14, 1]]

In [8]:
x_data = [x_row[0] for x_row in data]

In [9]:
print(x_data)


[2, 4, 6, 8, 10, 12, 14]

In [10]:
y_data = [y_row[1] for y_row in data]

In [11]:
print(y_data)


[0, 0, 0, 1, 1, 1, 1]

In [20]:
a = tf.Variable(tf.random_normal([1], dtype=tf.float64, seed=0))
b = tf.Variable(tf.random_normal([1], dtype=tf.float64, seed=0))

In [21]:
y = 1/ (1 + np.e**(a*x_data + b))  # 시그모이드`

In [22]:
loss = -tf.reduce_mean(np.array(y_data) * tf.log(y) + (1-np.array(y_data)) * tf.log(1-y))

In [23]:
learning_rate = 0.5
gd = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

In [25]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    for i in range(60001):
        sess.run(gd)
        if i % 6000 == 0:
#             print("Epoch: {:.1f}, loss = {:0.4f}, 기울기 a={:0.4f}, y 절편={0.4f}".format(i, sess.run(loss), sess.run(a), sess.run(b)))
            print("Epoch: %.f, loss = %0.4f, 기울기 a = %0.4f,  y 절편 b = %0.4f" % (i, sess.run(loss), sess.run(a),  sess.run(b)))


Epoch: 0, loss = 1.2676, 기울기 a = 0.1849,  y 절편 b = -0.4334
Epoch: 6000, loss = 0.0152, 기울기 a = -2.9211,  y 절편 b = 20.2981
Epoch: 12000, loss = 0.0081, 기울기 a = -3.5637,  y 절편 b = 24.8010
Epoch: 18000, loss = 0.0055, 기울기 a = -3.9557,  y 절편 b = 27.5463
Epoch: 24000, loss = 0.0041, 기울기 a = -4.2380,  y 절편 b = 29.5231
Epoch: 30000, loss = 0.0033, 기울기 a = -4.4586,  y 절편 b = 31.0675
Epoch: 36000, loss = 0.0028, 기울기 a = -4.6396,  y 절편 b = 32.3346
Epoch: 42000, loss = 0.0024, 기울기 a = -4.7930,  y 절편 b = 33.4086
Epoch: 48000, loss = 0.0021, 기울기 a = -4.9261,  y 절편 b = 34.3406
Epoch: 54000, loss = 0.0019, 기울기 a = -5.0436,  y 절편 b = 35.1636
Epoch: 60000, loss = 0.0017, 기울기 a = -5.1489,  y 절편 b = 35.9005

In [ ]: