In [1]:
# Необходмые команды импорта.
import sys, os
sys.path.append(os.path.join(sys.path[0], '../source/'))
sys.path.append(os.path.join(sys.path[0], '../../'))
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
from physlearn.DifferentialEvolution import DifferentialEvolution
from physlearn.NelderMead import NelderMead
import d1_osc
import ann_constructor
import math_util
# Model Parameters
sigmoid_ammount = 30
m = 500 # размер сеток обучения
M = 4 # количество выходных нейронов(базисных функций)
hidden_ammount = 25
a = -6
b = 6
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m)
%matplotlib inline
In [2]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_deep_net_expressions(M, sigmoid_ammount, hidden_ammount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)
def net_outs_value(x):
return net.calc(net_output, {net.x : x})
def net_sum_value(x):
return net.calc(net_sum, {net.x : x})
def net_images_value(x):
return net.calc(net_images, {net.x: x})
def net_images_sum_value(x):
return net.calc(net_images_sum, {net.x: x})
dim = net.return_unroll_dim()
print(dim)
In [3]:
# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_sum
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)
# Определение функционала J
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_sum, -1) - regression_fit))
J_expression = noninvariance_measure
def J(params):
net.roll_matrixes(params)
outs_matrix = net.calc(net_output, {net.x : train_xi})
#gram_matrix = np.matmul(outs_matrix, np.transpose(outs_matrix))
#diag_gram_matrix = np.eye(M)
#diag_gram_matrix.flat[:: M + 1] += -1 + gram_matrix.diagonal()
#normal_gram_matrix = np.matmul(LA.inv(diag_gram_matrix), gram_matrix)
#np.fill_diagonal(normal_gram_matrix, 0)
#nonortho_factor = math_util.norm(normal_gram_matrix)
cost = net.calc(J_expression, {net.x : train_xi}) #+ nonortho_factor
#cost += math_util.cond(np.matmul(outs_matrix, np.transpose(outs_matrix))) - 1
cost += np.sum(np.square(net.run(np.linspace(a - 3, b + 3 , 2, endpoint=True).reshape(1, 2) )))
cost += 2*np.sum(np.square(net.run(np.linspace(a - 4, b + 4 , 2, endpoint=True).reshape(1, 2) )))
#cost += 3*np.sum(np.square(net.run(np.linspace(a - 5, b + 5 , 2, endpoint=True).reshape(1, 2) )))
return cost
# Оптимизация
iter_number = 7000
#opt = DifferentialEvolution(amount_of_individuals = 6*dim, end_cond = iter_number, f=0.65 )
opt = NelderMead(end_cond = iter_number)
In [4]:
optimisation_result = opt.optimize(func=J, dim=dim)
print("J after optimisation: ", J(optimisation_result))
In [5]:
# Функции визуализации:
#---------------------------------------------------------------------------
def show_outputs_sum(x):
y = net_sum_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
#---------------------------------------------------------------------------
def show_outputs(x):
y = net.run(x)
plt.title('Outputs:')
plt.grid(True)
for i in range(M):
net_i = y[i,:]
plt.plot(x[0], net_i)
#---------------------------------------------------------------------------
def show_images(x):
y = net_images_value(x)
plt.title('Images:')
plt.grid(True)
for i in range(M):
net_image_i = y[i,:]
plt.plot(x[0], net_image_i)
#---------------------------------------------------------------------------
def show_images_sum(x):
y = net_images_sum_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
#---------------------------------------------------------------------------
def plot_four(x):
y1 = net.run(x)
y2 = net_images_value(x)
y3 = net_sum_value(x)
y4 = net_images_sum_value(x)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Sum of original net outputs:')
ax1.plot(x[0], y3, 'x')
ax2 = plt.subplot(222)
ax2.set_title('Sum of net images:')
ax2.plot(x[0], y4, 'x')
ax3 = plt.subplot(223)
ax3.set_title('Original net outputs:')
for i in range(M):
net_i = y1[i,:]
ax3.plot(x[0], net_i)
ax4 = plt.subplot(224)
ax4.set_title('Image of net outputs:')
for i in range(M):
net_image_i = y2[i,:]
ax4.plot(x[0], net_image_i)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
#---------------------------------------------------------------------------
In [6]:
plot_four(np.linspace(a, b, m, endpoint=True).reshape(1,m))
In [8]:
d1_osc.show_wf(0, train_xi)
In [ ]:
'''
outs_matrix = net.calc(net_output, {net.x : train_xi})
outs_matrix_file = open("outs_matrix1.txt", "wb")
np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
outs_matrix_file.close()
'''
print('done')
In [ ]:
In [ ]:
In [ ]:
In [ ]: