In [2]:
import numpy as np
import tensorflow as tf
import tensorboard_jupyter as tb
import matplotlib.pyplot as plt
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess = tf.Session()
batch_size = 20
x_vals = np.random.normal(1, 0.1, 100)
y_vals = np.repeat(10., 100)
with tf.name_scope('x_data') as scope:
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
with tf.name_scope('y_target') as scope:
y_target = tf.placeholder(shape=[None, 1], dtype = tf.float32)
with tf.name_scope('A') as scope:
A = tf.Variable(tf.random_normal(shape=[1,1]))
# 行列の乗算はmultiplyではなくmatmul
with tf.name_scope('model') as scope:
my_output = tf.matmul(x_data, A)
# L2損失
with tf.name_scope('loss') as scope:
loss = tf.reduce_mean(tf.square(my_output - y_target))
# 変数初期化
init = tf.global_variables_initializer()
sess.run(init)
# 変数最適化方法
my_opt = tf.train.GradientDescentOptimizer(learning_rate=0.02)
# L2損失を最小化するようにする
train_step = my_opt.minimize(loss)
# batch
loss_batch = []
for i in range(100):
rand_index = np.random.choice(100, size=batch_size)
rand_x = np.transpose([x_vals[rand_index]])
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
if (i+1)%5 == 0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
print('Loss = ' + str(temp_loss))
loss_batch.append(temp_loss)
tf.summary.FileWriter('./log/', sess.graph)
tb.show_graph(tf.get_default_graph().as_graph_def())
In [11]:
ops.reset_default_graph()
sess = tf.Session()
x_vals = np.random.normal(1, 0.1, 100)
y_vals = np.repeat(10., 100)
with tf.name_scope('x_data') as scope:
# バッチじゃないのでshapeが1
x_data = tf.placeholder(shape=[1], dtype=tf.float32)
with tf.name_scope('y_target') as scope:
# バッチじゃないのでshapeが1
y_target = tf.placeholder(shape=[1], dtype = tf.float32)
with tf.name_scope('A') as scope:
# バッチじゃないのでshapeが1
A = tf.Variable(tf.random_normal(shape=[1]))
# 行列の乗算はmultiplyではなくmatmul
with tf.name_scope('model') as scope:
my_output = tf.multiply(x_data, A)
# L2損失
with tf.name_scope('loss') as scope:
loss = tf.reduce_mean(tf.square(my_output - y_target))
# 変数初期化
init = tf.global_variables_initializer()
sess.run(init)
# 変数最適化方法
my_opt = tf.train.GradientDescentOptimizer(learning_rate=0.02)
# L2損失を最小化するようにする
train_step = my_opt.minimize(loss)
# batch
loss_stochastic = []
for i in range(100):
rand_index = np.random.choice(100) # batch_sizeを指定しない
rand_x = [x_vals[rand_index]]
rand_y = [y_vals[rand_index]]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
if (i+1)%5 == 0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
print('Loss = ' + str(temp_loss))
loss_stochastic.append(temp_loss)
tf.summary.FileWriter('./log/', sess.graph)
tb.show_graph(tf.get_default_graph().as_graph_def())
In [12]:
plt.plot(range(0, 100, 5), loss_stochastic, 'b-', label='Stochastic Loss')
plt.plot(range(0, 100, 5), loss_batch, 'r--', label='Batch Loss, size=20')
plt.legend(loc='upper right', prop={'size': 11})
plt.show()
# Batchの方がなめらかに下がっていく
In [ ]: