In [19]:
import tensorflow as tf
print(tf.__version__)
import numpy
import matplotlib.pyplot as plt
%matplotlib inline


1.2.0

In [20]:
W = tf.Variable(tf.random_uniform([1],-1.0,1.0), name = 'weight')  # tf.random_normal([1])에서 [1]은 shape이다 
b = tf.Variable(tf.zeros([1]), name = 'bias')

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)


hypothesis = X * W + b

In [21]:
cost = tf.reduce_mean(tf.square(hypothesis - Y))

In [22]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0015)
train = optimizer.minimize(cost)

In [23]:
sess = tf.Session()
sess.run(tf.global_variables_initializer()) #우리가 여기에서 변수 두개를 선언했다. W,b 이걸 초기화하고 써야 한다.

In [24]:
import numpy as np
num_points = 200
vectors_set = []
for i in range(num_points):
  x = np.random.normal(5,5)+15
  y =  x*1000+ (np.random.normal(0,3))*1000
  vectors_set.append([x,y])
  
x_data = [v[0] for v in vectors_set ]
y_data = [v[1] for v in vectors_set ]

In [27]:
for step in range(10):
    cost_val, W_val, b_val, train_val = \
        sess.run([cost, W, b, train], feed_dict={X:x_data, Y:y_data})

    if step % 1 == 0:
        print(step, cost_val, W_val, b_val)
        plt.plot(x_data,y_data,'ro')
        plt.plot(x_data, W_val*x_data + b_val)


0 1.06958e+07 [ 1068.6854248] [-1110.79711914]
1 1.06957e+07 [ 1068.69421387] [-1110.98522949]
2 1.06957e+07 [ 1068.703125] [-1111.17333984]
3 1.06957e+07 [ 1068.71203613] [-1111.36132812]
4 1.06957e+07 [ 1068.7208252] [-1111.54931641]
5 1.06956e+07 [ 1068.72973633] [-1111.73730469]
6 1.06956e+07 [ 1068.73852539] [-1111.9251709]
7 1.06956e+07 [ 1068.74743652] [-1112.11303711]
8 1.06956e+07 [ 1068.75634766] [-1112.30090332]
9 1.06955e+07 [ 1068.76513672] [-1112.48864746]

In [7]:
print(sess.run(hypothesis, feed_dict={X:[3]}))


[ 3.33067632]

In [8]:
print(sess.run(hypothesis, feed_dict={X:[2]}))
print(sess.run(hypothesis, feed_dict={X:[1.5, 3.5]}))


[ 2.33433247]
[ 1.83616066  3.82884812]

In [ ]: