In [1]:
# Необходмые команды импорта.
import sys, os
sys.path.append(os.path.join(sys.path[0], '../source/'))
sys.path.append(os.path.join(sys.path[0], '../../'))
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
from physlearn.DifferentialEvolution import DifferentialEvolution
from physlearn.NelderMead import NelderMead
import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser
# Model Parameters
sigmoid_ammount = 25
m = 450 # размер сеток обучения
M = 10 # количество выходных нейронов(базисных функций)
a = -10
b = 10
hidden_ammount = 35
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m)
colloc_xi = np.linspace(a/2.0 -1, b/2.0 + 1, M, endpoint=True).reshape(1, M)
#obs_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m)
%matplotlib inline
In [2]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_deep_net_expressions(M, sigmoid_ammount, hidden_ammount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)
def net_outs_value(x):
return net.calc(net_output, {net.x : x})
def net_sum_value(x):
return net.calc(net_sum, {net.x : x})
def net_images_value(x):
return net.calc(net_images, {net.x: x})
def net_images_sum_value(x):
return net.calc(net_images_sum, {net.x: x})
dim = net.return_unroll_dim()
print(dim)
In [3]:
colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
diag_colloc_matrix = np.eye(M)
diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
np.fill_diagonal(normal_colloc_matrix, 0)
linear_factor = math_util.norm(normal_colloc_matrix)
print('Colloc matrix: \n', colloc_matrix)
print('Diag of colloc matrix: \n', diag_colloc_matrix)
print('Normal colloc matrix: \n', np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix))
print('Normal colloc matrix - diag: \n', normal_colloc_matrix)
print('Measure of linearity: \n', linear_factor)
In [4]:
# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_sum
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)
# Определение функционала J
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_sum, -1) - regression_fit))
J_expression = noninvariance_measure
def J(params):
net.roll_matrixes(params)
colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
diag_colloc_matrix = np.eye(M)
diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
np.fill_diagonal(normal_colloc_matrix, 0)
linear_factor = math_util.norm(normal_colloc_matrix)
cost = net.calc(J_expression, {net.x : train_xi}) + linear_factor
cost += np.sum(np.square(net.run(np.linspace(-7, 7, 2, endpoint=True).reshape(1, 2) )))
cost += 2*np.sum(np.square(net.run(np.linspace(-8, 8, 2, endpoint=True).reshape(1, 2) )))
cost += 3*np.sum(np.square(net.run(np.linspace(-11, 11, 2, endpoint=True).reshape(1, 2) )))
return cost
# Оптимизация
iter_number_de = 10000
iter_number_nm = 1000000
opt_de = DifferentialEvolution(amount_of_individuals = 6*dim, end_cond = iter_number_de, f=0.55 )
opt_nm = NelderMead(end_cond = iter_number_nm)
In [5]:
optimisation_result = opt_nm.optimize(func=J, dim=dim)
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)
In [ ]:
In [6]:
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)
In [7]:
x_obss = np.linspace(-2, 2, 100).reshape(1, 100)
plt.plot(np.sum(x_obss, 0), net_images_value(x_obss)[3,:] )
plt.plot(np.sum(x_obss, 0), net_outs_value(x_obss)[3,:] )
Out[7]:
In [8]:
vis = Visualiser(net, net_output, net_sum, M)
vis.plot_four(np.linspace(-15, 15, m, endpoint=True).reshape(1, m) )
In [9]:
d1_osc.show_wf(2,train_xi)
In [10]:
#outs_matrix = net.calc(net_output, {net.x : train_xi})
#outs_matrix_file = open("outs_matrix1.txt", "wb")
#np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
#outs_matrix_file.close()
In [11]:
number_col = M
#net.roll_matrixes(optimisation_result)
#collocation_grid = rand.random((1,200)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
#collocation_grid = np.linspace(x_0_min, x_0_max, number_col, endpoint=True).reshape(1, number_col)
collocation_grid = np.linspace(a, b, number_col, endpoint=True).reshape(1, number_col)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)
from scipy.linalg import eig
#eigvals, eigvecs = eig(np.matmul(np.transpose(bind_matrix), hamiltonian_matrix), np.matmul(np.transpose(bind_matrix), bind_matrix))
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)
for eigval in eigvals:
print(eigval)
#print(eigvecs[2,:])
In [12]:
x = np.linspace(0,25,1000)
i = 0
for eigval in eigvals:
i+=1
plt.plot(x, eigval+0*x)
plt.plot(1+i, eigval, 'x')
In [44]:
x_obs = np.linspace(-5, 5, 100).reshape(1, 100)
for i in range(M):
y = eigvals[k]*(net.run(x_obs)).transpose()*eigvecs[i,:]
y = np.sum(y,1)
y1 = net_images_value(x_obs)[i,:]
plt.plot(np.sum(x_obs, 0), np.abs(y - y1))
In [35]:
eigvals[k]
Out[35]:
In [14]:
a = -10
In [30]:
x_obs = np.linspace(-5, 5, 1000).reshape(1, 1000)
k = 5
y = eigvals[k]*(net.run(x_obs)).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(y,1))
h = net_images_value(x_obs).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(h,1))
Out[30]:
In [ ]: