In [1]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline

#Model Parameters
sigmoid_ammount = 15
m = 150

mass = 1
k = 1
om2 = k/mass
v_0 = 1
x_0 = 0

ham_0 = (mass/2.0)*v_0*v_0 + (k/2.0)*x_0*x_0

#------------------------------------
sess = tf.Session()


D:\Anaconda\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
#Time Parameter
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
time = tf.placeholder(tf.double)
#--------------------------------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
C = tf.get_variable("C", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(1e-3, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)
#--------------------------------------------------------------------------------------

In [3]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
approximator = tf.matmul(sigmoids_matrix, tf.transpose(W)) + C
#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
     tf.square(approximator[0][0] - x_0) + tf.square(vel[0][0] - v_0) +
     tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B, C])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0]) 
update_V = V.assign_sub(alpha*grads_weights[1]) 
update_B = B.assign_sub(alpha*grads_weights[2])
update_C = C.assign_sub(alpha*grads_weights[3])

In [4]:
def init_weights():
    init = tf.global_variables_initializer()
    sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
    weights = sess.run([W, V, B])
    return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():            
    grads = sess.run(grads_weights, {time: tTr})            
    return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
    matrix = sess.run(sigmoids_matrix, {time: observation_time})
    plt.title('Sigmoids system')
    plt.grid(True)
    for i in range(sigmoid_ammount):
        si = matrix[:,i] 
        plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
    approx = sess.run(approximator, {time: observation_time})
    plt.grid(True)
    plt.title('Approximation')
    plt.plot(observation_time, approx)

In [5]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)


[[[ 0.56088261  0.67774401 -0.80156586  0.81591878 -0.59244438
    0.21798489 -0.97787945  0.2067518  -0.5173751  -0.70575847
   -0.54294665  0.39829878 -0.97643218 -0.50741284  0.75200187]]

 [[-0.0339906   0.87918978  0.88684244 -0.50383131 -0.9116965
   -0.53103951  0.32268369  0.41167191  0.44549379  0.18948539
    0.83722292  0.06349334  0.04754673  0.0152478  -0.06706203]]

 [[ 0.69595807 -0.57585029 -0.92743877  0.31141205 -0.43628331
   -0.03755745 -0.69712927 -0.91386927  0.31602092  0.4792107
    0.04898627 -0.13976984  0.06143178  0.07202208 -0.32171573]]]

In [6]:
show_approx(some_time)



In [7]:
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
    Err = []
    I = []
    sess.run(tf.assign(alpha, learn_rate))
    for i in tqdm_notebook(range(N)):
        I.append(i)
        _, _, _, j = sess.run([update_W, update_V, update_B, J], {time: tTr})
        Err.append(1/j)
        if i%1800==0:
            clear_output()
            print(j)
        if (j<1e-6):
            break
#---------------------------------------------------------------------------

In [8]:
grad_descent(500000, 10e-3)


0.00024313096441867522


In [9]:
np.set_printoptions(precision=4)
plt.plot(I, Err)


Out[9]:
[<matplotlib.lines.Line2D at 0x215d3aaccf8>]

In [10]:
#print(show_grads())


[[[ 5.8920e-06  6.6583e-06  4.5851e-05 -2.0389e-05  3.2227e-06
   -4.2564e-06  6.8047e-05  8.7334e-06 -1.4212e-06 -2.9801e-06
    4.7215e-06 -2.7312e-05  4.8119e-05  3.7174e-06 -3.1894e-06]]

 [[ 3.3482e-06 -1.1263e-05  3.0043e-06  6.8261e-07 -2.2943e-05
   -3.6958e-06 -1.3517e-05 -1.5366e-08  2.8916e-07  3.1843e-06
   -1.6435e-06 -3.0172e-05  3.3610e-05 -1.7721e-06 -1.1625e-05]]

 [[-1.0831e-05 -2.9417e-05  8.8697e-05 -2.6011e-05  3.3773e-05
   -3.9329e-06  1.1798e-04  3.1170e-08  2.0707e-07  2.0310e-06
   -4.1324e-06  1.1790e-04 -1.7294e-04 -2.8601e-06 -1.0860e-05]]]

In [11]:
#print(get_weights())


[[[ 2.3627e+00  2.5724e+00 -3.9424e+00  2.0845e+00 -1.9324e+00
    4.4894e-01 -2.8999e+00 -3.4069e-03 -2.7573e-02 -2.8406e-01
    9.2021e-01  3.9671e+00 -4.0865e+00  6.7534e-01  1.2191e+00]]

 [[ 1.7907e-01  1.3813e+00  1.2077e+00 -8.1004e-01 -1.6436e+00
   -7.1805e-01  1.2713e+00  6.7127e-01  7.4893e-01  8.1065e-01
    1.7554e-01  7.0645e-01 -7.9515e-01  1.6878e-01 -7.8840e-01]]

 [[-4.3440e-01 -1.0426e+00 -5.1487e+00  1.6216e+00 -4.5409e-01
    6.4857e-02 -3.7775e+00 -8.1535e-01  2.6441e-01  3.8514e-01
   -1.7700e-01 -3.6415e+00  4.2786e+00  5.6236e-02  1.5917e-02]]]

In [12]:
#show_sigmoids(some_time)



In [13]:
observe_time = np.linspace(0, 2*math.pi, 400).reshape(400,1)
plt.title('Approximation and error')
plt.grid(True)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)
plt.plot(observe_time, approx)
plt.plot(observe_time, true - approx, 'red')


Out[13]:
[<matplotlib.lines.Line2D at 0x215d25c1ac8>]

In [14]:
plt.plot(observe_time, approx)


Out[14]:
[<matplotlib.lines.Line2D at 0x215d2670c88>]

In [15]:
err = true - approx
np.sum(np.square(err))*(1/np.array(err).size)


Out[15]:
1.7251162137746908e-05