In [5]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

# 随机生成1000个点,围绕在y = 0.1 x + 0.3 的直线周围
num_points = 1000
vectors_set = []
for i in range(num_points):
    x1 = np.random.normal(0.0, 0.55)
    y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)
    vectors_set.append([x1, y1])

    
# 生成一些样本
x_data = [v[0] for v in vectors_set]
y_data = [v[1] for v in vectors_set]

plt.scatter(x_data, y_data, c = 'r')
plt.show()



In [11]:
# 生成1维度的W矩阵,取值是[-1, 1]之间的随机数
W = tf.Variable(tf.random.uniform([1], -1.0, 1.0), name = 'W')
# 生成1维的b矩阵, 初始值是0
b = tf.Variable(tf.zeros([1]), name = 'b')
# 经过计算得出预估值y
y = W * x_data + b


# 以预估值y和实际值y_data之间的均方差误差作为损失
loss = tf.reduce_mean(tf.square(y - y_data), name='loss')
# 采用低度下降法来优化参数
optimizer = tf.train.GradientDescentOptimizer(0.5)
# 训练的过程就是最小化这个误差值
train = optimizer.minimize(loss, name = 'train')

sess = tf.Session()

init = tf.global_variables_initializer()
sess.run(init)

# 初始化的W和b是多少
print('W =', sess.run(W), 'b =', sess.run(b), 'loss =', sess.run(loss))

for step in range(20):
    sess.run(train)
    # 输出训练好的W和b
    print('W =', sess.run(W), 'b =', sess.run(b), 'loss =', sess.run(loss))


('W =', array([-0.64143467], dtype=float32), 'b =', array([0.], dtype=float32), 'loss =', 0.24914397)
('W =', array([-0.4225526], dtype=float32), 'b =', array([0.28656858], dtype=float32), 'loss =', 0.083623685)
('W =', array([-0.2649762], dtype=float32), 'b =', array([0.29005358], dtype=float32), 'loss =', 0.041405633)
('W =', array([-0.15476394], dtype=float32), 'b =', array([0.29256248], dtype=float32), 'loss =', 0.020752372)
('W =', array([-0.07767799], dtype=float32), 'b =', array([0.29431725], dtype=float32), 'loss =', 0.010648707)
('W =', array([-0.02376165], dtype=float32), 'b =', array([0.2955446], dtype=float32), 'loss =', 0.005705947)
('W =', array([0.01394914], dtype=float32), 'b =', array([0.29640305], dtype=float32), 'loss =', 0.0032879272)
('W =', array([0.04032526], dtype=float32), 'b =', array([0.29700345], dtype=float32), 'loss =', 0.002105021)
('W =', array([0.05877355], dtype=float32), 'b =', array([0.29742342], dtype=float32), 'loss =', 0.0015263373)
('W =', array([0.07167687], dtype=float32), 'b =', array([0.29771715], dtype=float32), 'loss =', 0.001243243)
('W =', array([0.08070185], dtype=float32), 'b =', array([0.29792258], dtype=float32), 'loss =', 0.001104752)
('W =', array([0.08701421], dtype=float32), 'b =', array([0.2980663], dtype=float32), 'loss =', 0.0010370016)
('W =', array([0.09142927], dtype=float32), 'b =', array([0.29816678], dtype=float32), 'loss =', 0.0010038578)
('W =', array([0.09451731], dtype=float32), 'b =', array([0.2982371], dtype=float32), 'loss =', 0.0009876436)
('W =', array([0.09667718], dtype=float32), 'b =', array([0.29828626], dtype=float32), 'loss =', 0.0009797115)
('W =', array([0.09818786], dtype=float32), 'b =', array([0.29832065], dtype=float32), 'loss =', 0.0009758312)
('W =', array([0.09924448], dtype=float32), 'b =', array([0.2983447], dtype=float32), 'loss =', 0.0009739328)
('W =', array([0.09998351], dtype=float32), 'b =', array([0.2983615], dtype=float32), 'loss =', 0.00097300427)
('W =', array([0.1005004], dtype=float32), 'b =', array([0.29837328], dtype=float32), 'loss =', 0.00097254995)
('W =', array([0.10086194], dtype=float32), 'b =', array([0.2983815], dtype=float32), 'loss =', 0.00097232766)
('W =', array([0.10111482], dtype=float32), 'b =', array([0.29838726], dtype=float32), 'loss =', 0.0009722191)

In [ ]:


In [ ]: