In [1]:
# Необходмые команды импорта.
import sys
import os
sys.path.append('../physlearn/')
sys.path.append('../source')
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNet import NeuralNet
from physlearn.Optimizer.NelderMead.NelderMead import NelderMead
import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser

# Model Parameters
sigmoid_ammount = 25
m = 400 # размер сеток обучения
M = 4 # количество выходных нейронов(базисных функций)
a = -10
b = 10
hidden_ammount = 25
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 
colloc_xi = np.linspace(a/2.0, b/2.0, M, endpoint=True).reshape(1, M)
#obs_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 
%matplotlib inline


D:\Anaconda\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_deep_net_expressions(M, sigmoid_ammount, hidden_ammount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)

def net_outs_value(x):
    return net.calc(net_output, {net.x : x})
def net_sum_value(x):
    return net.calc(net_sum, {net.x : x})
def net_images_value(x):
    return net.calc(net_images, {net.x: x})
def net_images_sum_value(x):
    return net.calc(net_images_sum, {net.x: x})

dim = net.return_unroll_dim()
print(dim)


804

In [11]:
net.calc(net.output-net_output, {net.x: train_xi})


Out[11]:
array([[0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.]])

In [7]:
net.calc(tf.multiply(tf.square(net.x), net.output), {net.x: train_xi}).shape


Out[7]:
(4, 400)

In [ ]:


In [3]:
colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
diag_colloc_matrix = np.eye(M)
diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
np.fill_diagonal(normal_colloc_matrix, 0)
linear_factor = math_util.norm(normal_colloc_matrix)
print('Colloc matrix: \n', colloc_matrix)
print('Diag of colloc matrix: \n', diag_colloc_matrix)
print('Normal colloc matrix: \n', np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix))
print('Normal colloc matrix - diag: \n', normal_colloc_matrix)
print('Measure of linearity: \n', linear_factor)


Colloc matrix: 
 [[ 0.25272314 -0.12389086  2.0972873   0.323569  ]
 [ 4.15859794  4.76093516  5.49794994  4.9276182 ]
 [-3.91695295 -3.7778277  -2.09114314 -3.29920702]
 [-1.74645078 -1.42990465 -4.63065942  0.80616476]]
Diag of colloc matrix: 
 [[ 0.25272314  0.          0.          0.        ]
 [ 0.          4.76093516  0.          0.        ]
 [ 0.          0.         -2.09114314  0.        ]
 [ 0.          0.          0.          0.80616476]]
Normal colloc matrix: 
 [[ 1.         -0.49022364  8.29875463  1.28032996]
 [ 0.87348342  1.          1.15480463  1.03501057]
 [ 1.87311565  1.80658494  1.          1.57770501]
 [-2.16636954 -1.77371267 -5.74406081  1.        ]]
Normal colloc matrix - diag: 
 [[ 0.         -0.49022364  8.29875463  1.28032996]
 [ 0.87348342  0.          1.15480463  1.03501057]
 [ 1.87311565  1.80658494  0.          1.57770501]
 [-2.16636954 -1.77371267 -5.74406081  0.        ]]
Measure of linearity: 
 124.01159911521404

In [4]:
# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_sum
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)

# Определение функционала J
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_sum, -1) - regression_fit))
J_expression = noninvariance_measure

def J(params):
    net.roll_matrixes(params)
    colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
    diag_colloc_matrix = np.eye(M)
    diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
    normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
    np.fill_diagonal(normal_colloc_matrix, 0)
    linear_factor = math_util.norm(normal_colloc_matrix)
    cost = net.calc(J_expression, {net.x : train_xi}) + linear_factor
    cost += np.sum(np.square(net.run(np.linspace(-7, 7, 2, endpoint=True).reshape(1, 2) )))
    cost += 2*np.sum(np.square(net.run(np.linspace(-8, 8, 2, endpoint=True).reshape(1, 2) )))
    cost += 3*np.sum(np.square(net.run(np.linspace(-11, 11, 2, endpoint=True).reshape(1, 2) )))
    return cost

# Оптимизация
iter_number_de = 1500
iter_number_nm = 700000
opt_de = DifferentialEvolution(amount_of_individuals = 5*dim, end_cond = iter_number_de, f=0.55 )
opt_nm = NelderMead(end_cond = iter_number_nm)

In [5]:
optimisation_result = opt_nm.optimize(func=J, dim=dim)
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)


100%|███████████████████████████████████████████████████████████████████████| 700000/700000 [1:05:05<00:00, 185.14it/s]
J after optimisation:  0.004834189848987851

In [ ]:


In [6]:
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)


J after optimisation:  0.004834189848987851

In [7]:
x_obss = np.linspace(-2, 2, 100).reshape(1, 100)
plt.plot(np.sum(x_obss, 0), net_images_value(x_obss)[3,:] )
plt.plot(np.sum(x_obss, 0), net_outs_value(x_obss)[3,:] )


Out[7]:
[<matplotlib.lines.Line2D at 0x1bad9461ac8>]

In [23]:
vis = Visualiser(net, net_output, net_sum, M)
vis.plot_four(np.linspace(-15, 15, m, endpoint=True).reshape(1, m) )


<Figure size 432x288 with 0 Axes>

In [9]:
d1_osc.show_wf(2,train_xi)



In [10]:
#outs_matrix = net.calc(net_output, {net.x : train_xi})
#outs_matrix_file = open("outs_matrix1.txt", "wb")
#np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
#outs_matrix_file.close()

In [11]:
number_col = M
#net.roll_matrixes(optimisation_result)
#collocation_grid = rand.random((1,200)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
#collocation_grid = np.linspace(x_0_min, x_0_max, number_col, endpoint=True).reshape(1, number_col)
collocation_grid = np.linspace(a, b, number_col, endpoint=True).reshape(1, number_col)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)
from scipy.linalg import eig
#eigvals, eigvecs = eig(np.matmul(np.transpose(bind_matrix), hamiltonian_matrix), np.matmul(np.transpose(bind_matrix), bind_matrix))
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)
for eigval in eigvals:
    print(eigval)
#print(eigvecs[2,:])


(100+0j)
(99.9556853041122+0j)
(9.29777559084336+0j)
(11.111111111111112+0j)

In [12]:
x = np.linspace(0,25,1000)
i = 0

for eigval in eigvals:
    i+=1
    plt.plot(x, eigval+0*x)
    plt.plot(1+i, eigval, 'x')


D:\Anaconda\lib\site-packages\numpy\core\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)

In [22]:
x_obs = np.linspace(-15, 15, 1000).reshape(1, 1000)
for i in range(M):
    y = (net.run(x_obs)).transpose()*eigvecs[i,:]
    y = np.sum(y,1)
    y1 = net_images_value(x_obs)[i,:]
    plt.plot(np.sum(x_obs, 0), y)# - y1)



In [14]:
a = -10

In [15]:
x_obs = np.linspace(-3, 3, 1000).reshape(1, 1000)
k = 1

y = (net.run(x_obs)).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(y,1))

h = net_images_value(x_obs).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(h,1))


Out[15]:
[<matplotlib.lines.Line2D at 0x1badca803c8>]