In [19]:
import tensorflow as tf
print(tf.__version__)
import numpy
import matplotlib.pyplot as plt
%matplotlib inline
In [20]:
W = tf.Variable(tf.random_uniform([1],-1.0,1.0), name = 'weight') # tf.random_normal([1])에서 [1]은 shape이다
b = tf.Variable(tf.zeros([1]), name = 'bias')
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
hypothesis = X * W + b
In [21]:
cost = tf.reduce_mean(tf.square(hypothesis - Y))
In [22]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0015)
train = optimizer.minimize(cost)
In [23]:
sess = tf.Session()
sess.run(tf.global_variables_initializer()) #우리가 여기에서 변수 두개를 선언했다. W,b 이걸 초기화하고 써야 한다.
In [24]:
import numpy as np
num_points = 200
vectors_set = []
for i in range(num_points):
x = np.random.normal(5,5)+15
y = x*1000+ (np.random.normal(0,3))*1000
vectors_set.append([x,y])
x_data = [v[0] for v in vectors_set ]
y_data = [v[1] for v in vectors_set ]
In [27]:
for step in range(10):
cost_val, W_val, b_val, train_val = \
sess.run([cost, W, b, train], feed_dict={X:x_data, Y:y_data})
if step % 1 == 0:
print(step, cost_val, W_val, b_val)
plt.plot(x_data,y_data,'ro')
plt.plot(x_data, W_val*x_data + b_val)
In [7]:
print(sess.run(hypothesis, feed_dict={X:[3]}))
In [8]:
print(sess.run(hypothesis, feed_dict={X:[2]}))
print(sess.run(hypothesis, feed_dict={X:[1.5, 3.5]}))
In [ ]: