In [1]:
%matplotlib notebook
import matplotlib
import matplotlib.pyplot as plt
from IPython.display import Image
In [2]:
import numpy as np
import tensorflow as tf
import fnmatch, os
import time
In [4]:
import math
import random
batch_size = 100
# define the data
xx = [i*math.pi/100 for i in range(100)]
yy = [math.sin(i*5) for i in xx]
y_ = [ [yy[i]+random.random()-0.5 ] for i in range(batch_size) ]
plt.figure(figsize=(12,5))
plt.plot(range(len(yy)), yy, label='sin')
plt.plot(range(len(y_)), y_, label='sin+noise')
plt.legend()
plt.show()
In [23]:
#model
tf.reset_default_graph()
hidden_size = 1
vector_size = 100
# define place holder to for the input data and the target.
x = tf.placeholder(tf.float32, [vector_size, 1], name='x')
y = tf.placeholder(tf.float32, [vector_size, 1], name='y')
lr = tf.Variable(0.0, trainable=False, name='learning_rate')
# model parameters
Wxh = tf.Variable(tf.random_uniform((hidden_size, vector_size))*0.01, name='Wxh') # input to hidden
Why = tf.Variable(tf.random_uniform((vector_size, hidden_size))*0.01, name='Why') # hidden to output
bh = tf.Variable(tf.zeros((hidden_size, 1)), name='bh') # hidden bias
by = tf.Variable(tf.zeros((vector_size, 1)), name='by') # output bias
hidden = tf.tanh(tf.matmul(Wxh, x)+bh)
pred = tf.tanh(tf.matmul(Why, hidden)+by)
#hidden = tf.tanh(tf.matmul(Wxh, x))
#pred = tf.tanh(tf.matmul(Why, hidden))
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-y, 2))
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost)
In [24]:
epoch_count = 100
learning_rate = 0.02
decay_rate = 0.98
batch = 100
sess = tf.Session()
cost_optimisation = []
with sess.as_default():
tf.initialize_all_variables().run()
print ("variable initialized")
for e in range(epoch_count):
sess.run(tf.assign(lr, learning_rate * (decay_rate ** e)))
start = time.time()
# Get learning data
x_ = [ [xx[i]] for i in range(batch) ]
y_ = [ [yy[i]+random.random()-0.5 ] for i in range(batch) ] # We add noise to the training
feed = {x: x_, y: y_}
# Run a session using train_op
train_loss, _ = sess.run([cost, optimizer], feed)
end = time.time()
cost_optimisation.append(train_loss)
print("{}/{} (epoch {}), train_loss = {:.6f}, time/batch = {:.3f}" \
.format(e, epoch_count,
e, train_loss, end - start))
In [25]:
# Check the cost
plt.figure(figsize=(12,5))
plt.plot(range(len(cost_optimisation)), cost_optimisation, label='cost')
plt.legend()
plt.show()
In [26]:
with sess.as_default():
x_ = [ [xx[i]] for i in range(batch) ]
feed = {x: x_}
[prediction] = sess.run([pred], feed)
In [27]:
plt.figure(figsize=(12,5))
plt.plot(range(len(prediction)), prediction, label='prediction')
plt.legend()
plt.show()
In [28]:
with sess.as_default():
for var in tf.all_variables():
if var in tf.trainable_variables():
print 't', var.name, var.eval().shape, ':'
plt.figure(figsize=(1,1))
plt.figimage(var.eval(), label=var.name)
plt.show()
else:
print 'nt', var.name, var.eval().shape
Feedback wellcome @dh7net