In [15]:
import numpy as np
import tensorflow as tf
import tensorboard_jupyter as tb
import matplotlib.pyplot as plt

sess = tf.Session()

## 回帰の例 ##

# 平均1、標準偏差0.1の正規分布を作成
x_vals = np.random.normal(1, 0.1, 100)
x_data = tf.placeholder(shape=[1], dtype=tf.float32)
# 10を100個
y_vals = np.repeat(10., 100)
y_target = tf.placeholder(shape=[1], dtype=tf.float32)
# 予測結果の変数は最初はランダムで初期化
A = tf.Variable(tf.random_normal(shape=[1]))

# 計算グラフに乗算を追加する。
my_output = tf.multiply(x_data, A)
# L2損失関数
# 平均1のx_dataとAの乗算と10との差分のため、Aは10に収束するはず
loss = tf.square(my_output - y_target)

# 変数初期化
init = tf.global_variables_initializer()
sess.run(init)

# 変数最適化方法
my_opt = tf.train.GradientDescentOptimizer(learning_rate=0.02)
# L2損失を最小化するようにする
train_step = my_opt.minimize(loss)

# トレーニング
for i in range(100):
    rand_index = np.random.choice(100)
    rand_x = [x_vals[rand_index]]
    rand_y = [y_vals[rand_index]]
    sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
    if (i+1)%25 == 0:
        print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
        print('Loss = ' + str(sess.run(loss, feed_dict={x_data:rand_x, y_target: rand_y})))

tf.summary.FileWriter('./log/', sess.graph)
tb.show_graph(tf.get_default_graph().as_graph_def())


Step #25 A = [ 6.56119394]
Loss = [ 7.05666208]
Step #50 A = [ 8.70613861]
Loss = [ 1.63538313]
Step #75 A = [ 9.47094059]
Loss = [ 0.13261081]
Step #100 A = [ 9.63837337]
Loss = [ 0.62042272]

In [25]:
## 分類の例 ##
from tensorflow.python.framework import ops
ops.reset_default_graph()

sess = tf.Session()

# 平均-1と3のデータを50こずつ
x_vals = np.concatenate((np.random.normal(-1, 1, 50),
                        np.random.normal(3, 1, 50)))

with tf.name_scope('x_data') as scope:
    x_data = tf.placeholder(shape=[1], dtype=tf.float32)    
    
y_vals = np.concatenate([np.repeat(0., 50), np.repeat(1., 50)])
with tf.name_scope('y_target') as scope:
    y_target = tf.placeholder(shape=[1], dtype=tf.float32)
    
# 予測値は本来と離れた10で初期化
with tf.name_scope('A') as scope:
    A = tf.Variable(tf.random_normal(mean=10, shape=[1]))
    
# モデルは平均移動計算(sigmoid(x + A)だがsigmoidは損失関数が行う)
with tf.name_scope('Model') as scope:
    my_output = tf.add(x_data, A)
    
# バッチデータを期待するため次元数を追加。([50]が[1, 50]になる)
my_output_expanded = tf.expand_dims(my_output, 0)
y_target_expanded = tf.expand_dims(y_target, 0)

init = tf.global_variables_initializer()
sess.run(init)
# シグモイド誤差エントロピー損失関数
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output_expanded, labels=y_target_expanded)
# 最適化関数
my_opt = tf.train.GradientDescentOptimizer(0.05)
train_step = my_opt.minimize(xentropy)

for i in range(1400):
    rand_index = np.random.choice(100)
    rand_x = [x_vals[rand_index]]
    rand_y = [y_vals[rand_index]]
    sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
    if (i+1)%200==0:
        print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
        print('Loss = ' + str(sess.run(xentropy, feed_dict={x_data: rand_x, y_target: rand_y})))
        
tf.summary.FileWriter('./log/', sess.graph)
tb.show_graph(tf.get_default_graph().as_graph_def())


Step #200 A = [ 5.78850842]
Loss = [[ 4.13020277]]
Step #400 A = [ 1.72222722]
Loss = [[ 0.00621681]]
Step #600 A = [-0.06747835]
Loss = [[ 0.29462105]]
Step #800 A = [-0.84376633]
Loss = [[ 0.07802983]]
Step #1000 A = [-0.72108114]
Loss = [[ 0.11570743]]
Step #1200 A = [-0.91699666]
Loss = [[ 0.26338002]]
Step #1400 A = [-0.96741456]
Loss = [[ 0.06652448]]

In [ ]: