In [1]:
# Необходмые команды импорта.
import sys, os
sys.path.append(os.path.join(sys.path[0], '../source/'))
sys.path.append(os.path.join(sys.path[0], '../../'))
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
from physlearn.DifferentialEvolution import DifferentialEvolution
import d1_osc
import ann_constructor
import math_util

# Model Parameters
sigmoid_ammount = 20
m = 500 # размер сеток обучения
M = 4 # количество выходных нейронов(базисных функций)
a = -4
b = 4
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 

%matplotlib inline


C:\Work\Programms\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_net_expressions(M, sigmoid_ammount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)

def net_outs_value(x):
    return net.calc(net_output, {net.x : x})
def net_sum_value(x):
    return net.calc(net_sum, {net.x : x})
def net_images_value(x):
    return net.calc(net_images, {net.x: x})
def net_images_sum_value(x):
    return net.calc(net_images_sum, {net.x: x})

dim = net.return_unroll_dim()
print(dim)


124

In [3]:
# Определение функционала J:
special_matrix = tf.multiply(net_images, tf.pow(net_output, -1))

def nonortho_factor(net_outputs_matrix):
    gram_matrix = np.matmul(net_outputs_matrix, np.transpose(net_outputs_matrix))
    diag_gram_matrix = np.eye(M)
    diag_gram_matrix.flat[:: M + 1] += -1 + gram_matrix.diagonal()
    normal_gram_matrix = np.matmul(LA.inv(diag_gram_matrix), gram_matrix)
    np.fill_diagonal(normal_gram_matrix, 0)
    res = math_util.norm(normal_gram_matrix)
    return res

def J(params):
    net.roll_matrixes(params) 
    #outs_matrix = net.calc(net_output, {net.x : train_xi})
    #nonortho_factor_value = nonortho_factor(outs_matrix)
    noneigen_factor = np.sum(np.var(net.calc(special_matrix, {net.x : train_xi}), axis = 1 ))
    cost = noneigen_factor
    #cost += 2*nonortho_factor_value 
    return cost

# Оптимизация
iter_number = 1500
opt = DifferentialEvolution(amount_of_individuals = 6*dim, end_cond = iter_number, f = 0.75 )

In [4]:
optimisation_result = opt.optimize(func=J, dim=dim)
print("J after optimisation: ", J(optimisation_result))


100%|██████████████████████████████████████████████████████████████████████████████| 1500/1500 [16:44<00:00,  1.49it/s]
J after optimisation:  65.39999550712277

In [5]:
# Функции визуализации:

#---------------------------------------------------------------------------
def show_outputs_sum(x):
    y = net_sum_value(x)
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)
    
#---------------------------------------------------------------------------
def show_outputs(x):
    y = net.run(x)
    plt.title('Outputs:')
    plt.grid(True)
    for i in range(M):
        net_i = y[i,:] 
        plt.plot(x[0], net_i)
#---------------------------------------------------------------------------
def show_images(x):
    y = net_images_value(x)
    plt.title('Images:')
    plt.grid(True)
    for i in range(M):
        net_image_i = y[i,:] 
        plt.plot(x[0], net_image_i)
#---------------------------------------------------------------------------
def show_images_sum(x):
    y = net_images_sum_value(x)
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)    
#---------------------------------------------------------------------------
def plot_four(x):
    y1 = net.run(x)
    y2 = net_images_value(x)
    y3 = net_sum_value(x)
    y4 = net_images_sum_value(x)
    
    fig = plt.figure()
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)

    ax1 = plt.subplot(221)
    ax1.set_title('Sum of original net outputs:')
    ax1.plot(x[0], y3, 'x')

    ax2 = plt.subplot(222)
    ax2.set_title('Sum of net images:')
    ax2.plot(x[0], y4, 'x') 

    ax3 = plt.subplot(223)
    ax3.set_title('Original net outputs:')
    for i in range(M):
        net_i = y1[i,:] 
        ax3.plot(x[0], net_i)
        
    ax4 = plt.subplot(224)
    ax4.set_title('Image of net outputs:')
    for i in range(M):
        net_image_i = y2[i,:] 
        ax4.plot(x[0], net_image_i)
        
    fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
#---------------------------------------------------------------------------

In [6]:
plot_four(np.linspace(a, b, m, endpoint=True).reshape(1,m))


<matplotlib.figure.Figure at 0x1605c2059e8>

In [7]:
d1_osc.show_wf(0, train_xi.reshape(m,1))


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-7-c636ef721e4f> in <module>()
----> 1 d1_osc.show_wf(0,train_xi.reshape(m,1))

~\Documents\GitKraken\SPBU_COMP_PHYS_NN_QM\ANN_SOLVER\current_state\source\d1_osc.py in show_wf(n, x)
     22     plt.title('Output:')
     23     plt.grid(True)
---> 24     plt.plot(x[0], wf(n,x), "r--")

C:\Work\Programms\Anaconda3\lib\site-packages\matplotlib\pyplot.py in plot(*args, **kwargs)
   3259                       mplDeprecation)
   3260     try:
-> 3261         ret = ax.plot(*args, **kwargs)
   3262     finally:
   3263         ax._hold = washold

C:\Work\Programms\Anaconda3\lib\site-packages\matplotlib\__init__.py in inner(ax, *args, **kwargs)
   1715                     warnings.warn(msg % (label_namer, func.__name__),
   1716                                   RuntimeWarning, stacklevel=2)
-> 1717             return func(ax, *args, **kwargs)
   1718         pre_doc = inner.__doc__
   1719         if pre_doc is None:

C:\Work\Programms\Anaconda3\lib\site-packages\matplotlib\axes\_axes.py in plot(self, *args, **kwargs)
   1370         kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
   1371 
-> 1372         for line in self._get_lines(*args, **kwargs):
   1373             self.add_line(line)
   1374             lines.append(line)

C:\Work\Programms\Anaconda3\lib\site-packages\matplotlib\axes\_base.py in _grab_next_args(self, *args, **kwargs)
    402                 this += args[0],
    403                 args = args[1:]
--> 404             for seg in self._plot_args(this, kwargs):
    405                 yield seg
    406 

C:\Work\Programms\Anaconda3\lib\site-packages\matplotlib\axes\_base.py in _plot_args(self, tup, kwargs)
    382             x, y = index_of(tup[-1])
    383 
--> 384         x, y = self._xy_from_xy(x, y)
    385 
    386         if self.command == 'plot':

C:\Work\Programms\Anaconda3\lib\site-packages\matplotlib\axes\_base.py in _xy_from_xy(self, x, y)
    241         if x.shape[0] != y.shape[0]:
    242             raise ValueError("x and y must have same first dimension, but "
--> 243                              "have shapes {} and {}".format(x.shape, y.shape))
    244         if x.ndim > 2 or y.ndim > 2:
    245             raise ValueError("x and y can be no greater than 2-D, but have "

ValueError: x and y must have same first dimension, but have shapes (1,) and (500,)

In [ ]:
outs_matrix = net.calc(net_output, {net.x : train_xi})
outs_matrix_file = open("outs_matrix_with_variance.txt", "wb")
np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
outs_matrix_file.close()

In [ ]:


In [ ]:


In [ ]:


In [ ]: