In [1]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline
#Model Parameters
sigmoid_ammount = 15
m = 150
mass = 1
k = 1
om2 = k/mass
v_0 = 1
x_0 = 0
ham_0 = (mass/2.0)*v_0*v_0 + (k/2.0)*x_0*x_0
#------------------------------------
sess = tf.Session()
In [2]:
#Time Parameter
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
time = tf.placeholder(tf.double)
#--------------------------------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
C = tf.get_variable("C", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(1e-3, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)
#--------------------------------------------------------------------------------------
In [3]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
approximator = tf.matmul(sigmoids_matrix, tf.transpose(W)) + C
#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
tf.square(approximator[0][0] - x_0) + tf.square(vel[0][0] - v_0) +
tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B, C])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0])
update_V = V.assign_sub(alpha*grads_weights[1])
update_B = B.assign_sub(alpha*grads_weights[2])
update_C = C.assign_sub(alpha*grads_weights[3])
In [4]:
def init_weights():
init = tf.global_variables_initializer()
sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
weights = sess.run([W, V, B])
return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():
grads = sess.run(grads_weights, {time: tTr})
return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
matrix = sess.run(sigmoids_matrix, {time: observation_time})
plt.title('Sigmoids system')
plt.grid(True)
for i in range(sigmoid_ammount):
si = matrix[:,i]
plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
approx = sess.run(approximator, {time: observation_time})
plt.grid(True)
plt.title('Approximation')
plt.plot(observation_time, approx)
In [5]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)
In [6]:
show_approx(some_time)
In [7]:
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
Err = []
I = []
sess.run(tf.assign(alpha, learn_rate))
for i in tqdm_notebook(range(N)):
I.append(i)
_, _, _, j = sess.run([update_W, update_V, update_B, J], {time: tTr})
Err.append(1/j)
if i%1800==0:
clear_output()
print(j)
if (j<1e-6):
break
#---------------------------------------------------------------------------
In [8]:
grad_descent(500000, 10e-3)
In [9]:
np.set_printoptions(precision=4)
plt.plot(I, Err)
Out[9]:
In [10]:
#print(show_grads())
In [11]:
#print(get_weights())
In [12]:
#show_sigmoids(some_time)
In [13]:
observe_time = np.linspace(0, 2*math.pi, 400).reshape(400,1)
plt.title('Approximation and error')
plt.grid(True)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)
plt.plot(observe_time, approx)
plt.plot(observe_time, true - approx, 'red')
Out[13]:
In [14]:
plt.plot(observe_time, approx)
Out[14]:
In [15]:
err = true - approx
np.sum(np.square(err))*(1/np.array(err).size)
Out[15]: