Author: hdup
My contact info:
hdup huangdan@youhujia.com
evitself evitself@gmail.com
In [1]:
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import utils
import tensorflow as tf
%matplotlib inline
In [2]:
a = tf.constant(5.)
b = tf.constant(1.2345)
In [3]:
with tf.Session() as sess:
print('a={0}, b={1}'.format(sess.run(a), sess.run(b)))
print('a+b={0}'.format(sess.run(a+b)))
print('a-b={0}'.format(sess.run(a-b)))
print('a*b={0}'.format(sess.run(a*b)))
print('a/b={0}'.format(sess.run(a/b)))
In [4]:
a = tf.placeholder(dtype=tf.float32)
b = tf.placeholder(dtype=tf.float32)
In [5]:
op_add = tf.add(a, b)
op_sub = tf.subtract(a, b)
op_mul = tf.multiply(a, b)
op_div = tf.divide(a, b)
In [6]:
with tf.Session() as sess:
print('a+b={0}'.format(sess.run(op_add, feed_dict={a: 1.0, b:0.5})))
print('a-b={0}'.format(sess.run(op_sub, feed_dict={a: 1.0, b:0.5})))
print('a*b={0}'.format(sess.run(op_mul, feed_dict={a: 1.0, b:0.5})))
print('a/b={0}'.format(sess.run(op_div, feed_dict={a: 1.0, b:0.5})))
In [7]:
a = np.array([[1.0, 0.5]], dtype=np.float32)
b = np.array([[0.2, 0.3]], dtype=np.float32)
In [8]:
mat1 = tf.constant(a)
mat2 = tf.constant(b)
In [9]:
mat_mul1 = tf.matmul(mat1, mat2, transpose_b=True)
mat_mul2 = tf.matmul(mat1, mat2, transpose_a=True)
mat_add = tf.add(mat1, mat2)
mat_sub = tf.subtract(mat1, mat2)
mat_elemmul = tf.multiply(mat1, mat2)
In [10]:
with tf.Session() as sess:
dot_product1 = sess.run(mat_mul1)
dot_product2 = sess.run(mat_mul2)
add_ret = sess.run(mat_add)
sub_ret = sess.run(mat_sub)
elm_ret = sess.run(mat_elemmul)
print('1), result is {0}, shape is {1}'.format(dot_product1, dot_product1.shape))
print('2), result is {0}, shape is {1}'.format(dot_product2, dot_product2.shape))
print('3), mat_a + mat_b: result is {0}, shape is {1}'.format(add_ret, add_ret.shape))
print('4), mat_a - mat_b: result is {0}, shape is {1}'.format(sub_ret, sub_ret.shape))
print('5), mat_a .* mat_b: result is {0}, shape is {1}'.format(elm_ret, elm_ret.shape))
In [11]:
sample_cnt = 100
train_X = np.linspace(-3.0, 3.0, num=sample_cnt, dtype=np.float32).reshape((sample_cnt, 1))
train_y = train_X * 0.375 + 1.1
print(train_X.shape)
In [12]:
X = tf.placeholder(dtype=tf.float32)
y = tf.placeholder(dtype=tf.float32)
In [13]:
W = tf.Variable(tf.random_normal((1,)), name='weights')
b = tf.Variable(tf.random_normal((1,)), name='bias')
In [14]:
# Linear function
h = tf.add(tf.multiply(X, W), b)
# MSE cost function
diff = h - y
cost = tf.reduce_sum(tf.multiply(diff, diff)) / (2 * sample_cnt)
# GD optimizer
lr = 0.01
ad = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)
#gd = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(cost)
# initializer
init = tf.global_variables_initializer()
In [15]:
with tf.Session() as sess:
# first init all variables
sess.run(init)
# batch training
for epoch in range(0, 1000):
sess.run(ad, feed_dict={X: train_X, y: train_y})
if (epoch + 1) % 100 == 0:
cur_cost = sess.run(cost, feed_dict={X: train_X, y: train_y})
print('epoch: {0}, cost: {1}, W: {2}, b: {3}'.format(epoch + 1, cur_cost, sess.run(W), sess.run(b)))
# finish
final_cost = sess.run(cost, feed_dict={X: train_X, y: train_y})
print('training finished!')
print('final cost: {0}, W: {1}, b: {2}'.format(final_cost, sess.run(W), sess.run(b)))
# then plot some curves
predictions = sess.run(h, feed_dict={X: train_X})
plt.plot(train_X, train_y, 'r+', label='training')
plt.plot(train_X, predictions, 'b--', label='fitted')
plt.grid(True)
plt.legend()
plt.show()
In [ ]: