In [1]:
# Необходмые команды импорта.
import sys, os
sys.path.append(os.path.join(sys.path[0], '../source/'))
sys.path.append(os.path.join(sys.path[0], '../../'))
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
from physlearn.DifferentialEvolution import DifferentialEvolution
from physlearn.NelderMead import NelderMead
import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser

# Model Parameters
sigmoid_ammount = 25
m = 400 # размер сеток обучения
M = 4 # количество выходных нейронов(базисных функций)
a = -10
b = 10
hidden_ammount = 25
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 
colloc_xi = np.linspace(a/2.0, b/2.0, M, endpoint=True).reshape(1, M)
#obs_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 
%matplotlib inline


D:\Anaconda\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_deep_net_expressions(M, sigmoid_ammount, hidden_ammount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)

def net_outs_value(x):
    return net.calc(net_output, {net.x : x})
def net_sum_value(x):
    return net.calc(net_sum, {net.x : x})
def net_images_value(x):
    return net.calc(net_images, {net.x: x})
def net_images_sum_value(x):
    return net.calc(net_images_sum, {net.x: x})

dim = net.return_unroll_dim()
print(dim)


804

In [3]:
colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
diag_colloc_matrix = np.eye(M)
diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
np.fill_diagonal(normal_colloc_matrix, 0)
linear_factor = math_util.norm(normal_colloc_matrix)
print('Colloc matrix: \n', colloc_matrix)
print('Diag of colloc matrix: \n', diag_colloc_matrix)
print('Normal colloc matrix: \n', np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix))
print('Normal colloc matrix - diag: \n', normal_colloc_matrix)
print('Measure of linearity: \n', linear_factor)


Colloc matrix: 
 [[ 1.70589771  1.51761565 -2.14432407 -1.0061478 ]
 [ 3.38547539  3.15554058  0.22704276  0.11723463]
 [ 1.70134839  1.74986225 -0.43451598 -0.07602597]
 [-1.30068093 -2.92549014 -4.0883426  -3.99291303]]
Diag of colloc matrix: 
 [[ 1.70589771  0.          0.          0.        ]
 [ 0.          3.15554058  0.          0.        ]
 [ 0.          0.         -0.43451598  0.        ]
 [ 0.          0.          0.         -3.99291303]]
Normal colloc matrix: 
 [[ 1.          0.88962875 -1.25700624 -0.58980547]
 [ 1.07286701  1.          0.07195051  0.037152  ]
 [-3.91550246 -4.02715281  1.          0.17496702]
 [ 0.32574737  0.73267064  1.02389974  1.        ]]
Normal colloc matrix - diag: 
 [[ 0.          0.88962875 -1.25700624 -0.58980547]
 [ 1.07286701  0.          0.07195051  0.037152  ]
 [-3.91550246 -4.02715281  0.          0.17496702]
 [ 0.32574737  0.73267064  1.02389974  0.        ]]
Measure of linearity: 
 37.14799627619677

In [4]:
# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_sum
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)

# Определение функционала J
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_sum, -1) - regression_fit))
J_expression = noninvariance_measure

def J(params):
    net.roll_matrixes(params)
    colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
    diag_colloc_matrix = np.eye(M)
    diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
    normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
    np.fill_diagonal(normal_colloc_matrix, 0)
    linear_factor = math_util.norm(normal_colloc_matrix)
    cost = net.calc(J_expression, {net.x : train_xi}) + linear_factor
    cost += np.sum(np.square(net.run(np.linspace(-7, 7, 2, endpoint=True).reshape(1, 2) )))
    cost += 2*np.sum(np.square(net.run(np.linspace(-8, 8, 2, endpoint=True).reshape(1, 2) )))
    cost += 3*np.sum(np.square(net.run(np.linspace(-11, 11, 2, endpoint=True).reshape(1, 2) )))
    return cost

# Оптимизация
iter_number_de = 1500
iter_number_nm = 700000
opt_de = DifferentialEvolution(amount_of_individuals = 5*dim, end_cond = iter_number_de, f=0.55 )
opt_nm = NelderMead(end_cond = iter_number_nm)

In [5]:
optimisation_result = opt_nm.optimize(func=J, dim=dim)
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)


100%|█████████████████████████████████████████████████████████████████████████| 320000/320000 [29:43<00:00, 179.44it/s]
J after optimisation:  0.043128089858585725

In [ ]:


In [6]:
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)


J after optimisation:  0.043128089858585725

In [8]:
x_obss = np.linspace(-2, 2, 100).reshape(1, 100)
plt.plot(np.sum(x_obss, 0), net_images_value(x_obss)[3,:] )
plt.plot(np.sum(x_obss, 0), net_outs_value(x_obss)[3,:] )


Out[8]:
[<matplotlib.lines.Line2D at 0x15da51cf438>]

In [9]:
vis = Visualiser(net, net_output, net_sum, M)
vis.plot_four(np.linspace(a+5, b-5, m, endpoint=True).reshape(1, m) )


<Figure size 432x288 with 0 Axes>

In [22]:
d1_osc.show_wf(2,train_xi)



In [11]:
outs_matrix = net.calc(net_output, {net.x : train_xi})
outs_matrix_file = open("outs_matrix1.txt", "wb")
np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
outs_matrix_file.close()

In [12]:
number_col = M
#net.roll_matrixes(optimisation_result)
#collocation_grid = rand.random((1,200)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
#collocation_grid = np.linspace(x_0_min, x_0_max, number_col, endpoint=True).reshape(1, number_col)
collocation_grid = np.linspace(a, b, number_col, endpoint=True).reshape(1, number_col)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)
from scipy.linalg import eig
#eigvals, eigvecs = eig(np.matmul(np.transpose(bind_matrix), hamiltonian_matrix), np.matmul(np.transpose(bind_matrix), bind_matrix))
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)
for eigval in eigvals:
    print(eigval)
#print(eigvecs[2,:])


(100+0j)
(99.98471124243152+0j)
(11.657328133478089+0j)
(11.111111111111112+0j)

In [13]:
x = np.linspace(0,25,1000)
i = 0

for eigval in eigvals:
    i+=1
    plt.plot(x, eigval+0*x)
    plt.plot(1+i, eigval, 'x')


D:\Anaconda\lib\site-packages\numpy\core\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)

In [21]:
x_obs = np.linspace(-15, 15, 1000).reshape(1, 1000)
for i in range(M):
    y = (net.run(x_obs)).transpose()*eigvecs[i,:]
    y = np.sum(y,1)
    y1 = net_images_value(x_obs)[i,:]
    plt.plot(np.sum(x_obs, 0), y )#- y1)



In [15]:
a = -10

In [18]:
x_obs = np.linspace(-3, 3, 1000).reshape(1, 1000)
k = 1

y = (net.run(x_obs)).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(y,1))

h = net_images_value(x_obs).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(h,1))


Out[18]:
[<matplotlib.lines.Line2D at 0x15da7c6a6d8>]