In [1]:
# Необходмые команды импорта.
import sys, os
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from DifferentialEvolution import DifferentialEvolution
sys.path.append(os.path.join(sys.path[0], '../../'))
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
#from physlearn.DifferentialEvolution import DifferentialEvolution
from physlearn.NelderMead import NelderMead
%matplotlib inline
In [2]:
# Model Parameters
mass = 1
omega_square = 1
plank = 1
sigmoid_ammount = 15
m = 50 # Количество точек коллокаций
N = 15 # Количество выходных нейронов сети
x_0_min = -3
x_0_max = 3
x_collocations = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m)
In [3]:
# Построение сети
net = NeuralNetPro(-2,2)
net.add_input_layer(1)
net.add(sigmoid_ammount, tf.sigmoid)
net.add_output_layer(N, net.linear)
net.compile()
net.set_random_matrixes()
# "Вытаскивание" выражений для выходов и свертки выходов в
net_output = net.return_graph()
net_conv = tf.reduce_sum(net_output, 0)
sess = net.return_session()
def net_conv_value(x):
return net.calc(net_conv, {net.x : x})
dim = net.return_unroll_dim()
print(dim)
In [4]:
# Выражение, определяющеие образ выходов сети при действии гамильтонианом.
first_deriative = tf.gradients(net_output, net.x)[0]
net_images =( -(plank*plank/(2*mass))*(tf.gradients(first_deriative, net.x)[0])
+ (omega_square*mass/2)*tf.multiply(tf.square(net.x), net.output) )
net_images_conv = tf.reduce_sum(net_images, 0)
def net_images_value(x):
return net.calc(net_images, {net.x: x})
def net_images_conv_value(x):
return net.calc(net_images_conv, {net.x: x})
# Линейная регрессия
A = tf.transpose(net_output)
y = net_images_conv
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(A), A)), tf.transpose(A)), y)
In [5]:
def stochstic_grid1():
grid = rand.random((1,m)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
return grid
In [6]:
# Выражение для ценовой функции:
J_expression = (1/m) * tf.reduce_sum(tf.square(net_images_conv - tf.reduce_sum(tf.multiply(omega,net_output), 0)))
def J(params, grid):
net.roll_matrixes(params)
cost = net.calc(J_expression, {net.x : grid})
return cost
print(J(np.random.uniform(-5, 5, dim), stochstic_grid1()))
In [18]:
x_for_test = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m)
A_test = tf.transpose(net_output)
y_test = net_conv
y_test = tf.expand_dims(y, -1)
omega_test = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(A_test), A_test)), tf.transpose(A_test)), y_test)
In [ ]:
print(net.calc(omega_test, {net.x = x_for_test}))
In [ ]:
In [7]:
iter_number = 15000
opt = DifferentialEvolution(dim, iter_number, min_element = -2, max_element = 2)
In [8]:
optimisation_result = opt.optimize_stochastic(func=J, grid_func=stochstic_grid1, dim=dim)
In [9]:
i_list= np.linspace(0, iter_number, iter_number)
cost_list = opt.return_cost_list()
plt.plot(i_list, np.power(cost_list, -1))
Out[9]:
In [10]:
this_grid = stochstic_grid1()
print(J(optimisation_result, this_grid), this_grid)
In [11]:
net.roll_matrixes(optimisation_result)
collocation_grid = rand.random((1,N)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)
In [12]:
from scipy.linalg import eig
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)
In [13]:
for eigval in eigvals:
print(eigval)
#print(eigvecs[2,:])
In [14]:
x = np.linspace(0,10,1000)
i = 0
for eigval in eigvals_real[1:11]:
i+=1
plt.plot(x, eigval+0*x)
plt.plot(1+i, eigval, 'x')
In [15]:
x_obs = np.linspace(x_0_min, x_0_max, 10000).reshape(1, 10000)
for i in range(5):
y = (net_images_value(x_obs).transpose())*eigvecs[i,:]
y = np.sum(y,1)
plt.plot(np.linspace(x_0_min, x_0_max, 10000), y)
In [16]:
#Функции визуализации:
x_observe = np.linspace(-10,10,1000).reshape(1, 1000)
#---------------------------------------------------------------------------
def show_outputs_conv(x):
y = net_conv_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
#---------------------------------------------------------------------------
def show_outputs(x):
y = net.run(x)
plt.title('Outputs:')
plt.grid(True)
for i in range(N):
net_i = y[i,:]
plt.plot(x[0], net_i)
#---------------------------------------------------------------------------
def show_images(x):
y = net_images_value(x)
plt.title('Images:')
plt.grid(True)
for i in range(N):
net_image_i = y[i,:]
plt.plot(x[0], net_image_i)
#---------------------------------------------------------------------------
def show_images_conv(x):
y = net_images_conv_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
#---------------------------------------------------------------------------
def plot_four(x):
y1 = net.run(x)
y2 = net_images_value(x)
y3 = net_conv_value(x)
y4 = net_images_conv_value(x)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Original net outputs convolution:')
ax1.plot(x[0], y3, 'x')
ax2 = plt.subplot(222)
ax2.set_title('Image of net outputs convolution:')
ax2.plot(x[0], y4, 'x')
ax3 = plt.subplot(223)
ax3.set_title('Original net outputs:')
for i in range(N):
net_i = y1[i,:]
ax3.plot(x[0], net_i)
ax4 = plt.subplot(224)
ax4.set_title('Image of net outputs:')
for i in range(N):
net_image_i = y2[i,:]
ax4.plot(x[0], net_image_i)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
#---------------------------------------------------------------------------
In [17]:
plot_four(np.linspace(-3,3,1000).reshape(1, 1000))