In [1]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline

#Model Parameters
sigmoid_ammount = 15
m = 150

mass = 1
k = 1
om2 = k/mass
v_0 = 1
x_0 = 0

ham_0 = (mass/2.0)*v_0*v_0 + (k/2.0)*x_0*x_0

#------------------------------------
sess = tf.Session()


D:\Anaconda\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
#Time Parameter
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
time = tf.placeholder(tf.double)
#--------------------------------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
C = tf.get_variable("C", initializer = tf.random_uniform(shape=[1, 1], dtype=tf.double, minval = -1, maxval = 1))
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(1e-3, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)
#--------------------------------------------------------------------------------------

In [3]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
approximator = tf.matmul(sigmoids_matrix, tf.transpose(W)) + C
#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
     tf.square(approximator[0][0] - x_0) + tf.square(vel[0][0] - v_0) +
     tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B, C])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0]) 
update_V = V.assign_sub(alpha*grads_weights[1]) 
update_B = B.assign_sub(alpha*grads_weights[2])
update_C = C.assign_sub(alpha*grads_weights[3])

In [4]:
def init_weights():
    init = tf.global_variables_initializer()
    sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
    weights = sess.run([W, V, B])
    return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():            
    grads = sess.run(grads_weights, {time: tTr})            
    return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
    matrix = sess.run(sigmoids_matrix, {time: observation_time})
    plt.title('Sigmoids system')
    plt.grid(True)
    for i in range(sigmoid_ammount):
        si = matrix[:,i] 
        plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
    approx = sess.run(approximator, {time: observation_time})
    plt.grid(True)
    plt.title('Approximation')
    plt.plot(observation_time, approx)

In [5]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)


[[[ 0.08897037  0.96238342 -0.81513181  0.00925487  0.64914619
   -0.40419568 -0.45439763  0.37897223 -0.80084921 -0.12828464
    0.9601263   0.15095864 -0.66270693 -0.03968295  0.40512793]]

 [[-0.55440435 -0.40915137  0.68379414  0.31386183  0.11665004
    0.42394517 -0.80429324  0.00414977  0.73976857  0.37561171
    0.61997304 -0.98547338 -0.8186879   0.4570231   0.68598012]]

 [[ 0.09561318 -0.93327773  0.99687951  0.98133496  0.25330972
   -0.32398041 -0.76254653 -0.25036711  0.22503453  0.63684453
    0.06473597 -0.19460776 -0.29063148 -0.37364887  0.39249743]]]

In [6]:
show_approx(some_time)



In [7]:
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
    Err = []
    I = []
    sess.run(tf.assign(alpha, learn_rate))
    for i in tqdm_notebook(range(N)):
        I.append(i)
        _, _, _, _, j = sess.run([update_W, update_V, update_B, update_C, J], {time: tTr})
        Err.append(1/j)
        if i%1800==0:
            clear_output()
            print(j)
        if (j<1e-6):
            break
#---------------------------------------------------------------------------

In [8]:
grad_descent(500000, 10e-3)


5.679381238834066e-05


In [9]:
np.set_printoptions(precision=4)
plt.plot(I, Err)


Out[9]:
[<matplotlib.lines.Line2D at 0x25674ffe550>]

In [10]:
#print(show_grads())

In [11]:
#print(get_weights())

In [12]:
#show_sigmoids(some_time)

In [13]:
observe_time = np.linspace(0, 2*math.pi, 400).reshape(400,1)
plt.title('Approximation and error')
plt.grid(True)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)
plt.plot(observe_time, approx)
plt.plot(observe_time, true - approx, 'red')


Out[13]:
[<matplotlib.lines.Line2D at 0x25675006438>]

In [14]:
plt.plot(observe_time, approx)


Out[14]:
[<matplotlib.lines.Line2D at 0x256750817b8>]

In [15]:
err = true - approx
np.sum(np.square(err))*(1/np.array(err).size)


Out[15]:
1.0167611420285465e-06