In [1]:
# Необходмые команды импорта.
import sys, os
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from DifferentialEvolution import DifferentialEvolution

sys.path.append(os.path.join(sys.path[0], '../../'))
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
#from physlearn.DifferentialEvolution import DifferentialEvolution
from physlearn.NelderMead import NelderMead
%matplotlib inline


C:\Work\Programms\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
# Model Parameters
mass = 1
omega_square = 1
plank = 1
sigmoid_ammount = 15
m = 50 # Количество точек коллокаций
N = 15 # Количество выходных нейронов сети
x_0_min = -3
x_0_max = 3

x_collocations = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m)

In [3]:
# Построение сети
net = NeuralNetPro(-2,2)
net.add_input_layer(1)
net.add(sigmoid_ammount, tf.sigmoid)
net.add_output_layer(N, net.linear)
net.compile()

net.set_random_matrixes()

# "Вытаскивание" выражений для выходов и свертки выходов в 
net_output = net.return_graph()
net_conv = tf.reduce_sum(net_output, 0)
sess = net.return_session()

def net_conv_value(x):
    return net.calc(net_conv, {net.x : x})

dim = net.return_unroll_dim()
print(dim)


270

In [4]:
# Выражение, определяющеие образ выходов сети при действии гамильтонианом.

first_deriative = tf.gradients(net_output, net.x)[0]

net_images =( -(plank*plank/(2*mass))*(tf.gradients(first_deriative, net.x)[0]) 
             + (omega_square*mass/2)*tf.multiply(tf.square(net.x), net.output) )

net_images_conv = tf.reduce_sum(net_images, 0)

def net_images_value(x):
    return net.calc(net_images, {net.x: x})

def net_images_conv_value(x):
    return net.calc(net_images_conv, {net.x: x})

# Линейная регрессия
A = tf.transpose(net_output)
y = net_images_conv
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(A), A)), tf.transpose(A)), y)

In [5]:
def stochstic_grid1():
    grid = rand.random((1,m)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
    return grid

In [6]:
# Выражение для ценовой функции:
J_expression = (1/m) * tf.reduce_sum(tf.square(net_images_conv - tf.reduce_sum(tf.multiply(omega,net_output), 0)))

def J(params, grid):
    net.roll_matrixes(params)    
    cost = net.calc(J_expression, {net.x : grid})
    return cost

print(J(np.random.uniform(-5, 5, dim), stochstic_grid1()))


8.577208021224427

In [18]:
x_for_test = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m) 
A_test = tf.transpose(net_output)
y_test = net_conv
y_test = tf.expand_dims(y, -1)
omega_test = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(A_test), A_test)), tf.transpose(A_test)), y_test)


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
C:\Work\Programms\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
    509                 as_ref=input_arg.is_ref,
--> 510                 preferred_dtype=default_dtype)
    511           except TypeError as err:

C:\Work\Programms\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx)
   1035     if ret is None:
-> 1036       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1037 

C:\Work\Programms\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
    878         "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
--> 879         (dtype.name, t.dtype.name, str(t)))
    880   return t

ValueError: Tensor conversion requested dtype float64 for Tensor with dtype complex128: 'Tensor("ExpandDims_1:0", shape=(10000, 1), dtype=complex128)'

During handling of the above exception, another exception occurred:

TypeError                                 Traceback (most recent call last)
<ipython-input-18-d47613cbdd34> in <module>()
      3 y_test = net_conv
      4 y_test = tf.expand_dims(y, -1)
----> 5 omega_test = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(A_test), A_test)), tf.transpose(A_test)), y_test)

C:\Work\Programms\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py in matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse, name)
   2062     else:
   2063       return gen_math_ops._mat_mul(
-> 2064           a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
   2065 
   2066 

C:\Work\Programms\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in _mat_mul(a, b, transpose_a, transpose_b, name)
   2788     _, _, _op = _op_def_lib._apply_op_helper(
   2789         "MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b,
-> 2790         name=name)
   2791     _result = _op.outputs[:]
   2792     _inputs_flat = _op.inputs

C:\Work\Programms\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
    544                   "%s type %s of argument '%s'." %
    545                   (prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
--> 546                    inferred_from[input_arg.type_attr]))
    547 
    548           types = [values.dtype]

TypeError: Input 'b' of 'MatMul' Op has type complex128 that does not match type float64 of argument 'a'.

In [ ]:
print(net.calc(omega_test, {net.x = x_for_test}))

In [ ]:


In [7]:
iter_number = 15000
opt = DifferentialEvolution(dim, iter_number, min_element = -2, max_element = 2)

In [8]:
optimisation_result = opt.optimize_stochastic(func=J, grid_func=stochstic_grid1, dim=dim)


100%|████████████████████████████████████████████████████████████████████████████| 15000/15000 [30:43<00:00,  8.14it/s]

In [9]:
i_list= np.linspace(0, iter_number, iter_number)
cost_list = opt.return_cost_list()
plt.plot(i_list, np.power(cost_list, -1))


Out[9]:
[<matplotlib.lines.Line2D at 0x213548b2828>]

In [10]:
this_grid = stochstic_grid1()
print(J(optimisation_result, this_grid), this_grid)


1.2125283944228286e-06 [[-1.38966563e+00  2.64520288e+00  9.44218915e-01 -2.14065494e+00
  -2.53323626e+00  1.33858052e+00  5.04765720e-01 -8.04718515e-01
  -2.65808419e+00  2.97227662e+00  1.48345050e+00 -5.16287753e-01
  -5.31739462e-02 -8.88158271e-01 -4.66240437e-01 -2.24522686e+00
  -7.09232515e-01  2.72697419e-01  9.84753390e-01  8.17158549e-01
   1.99725485e+00 -2.26604629e+00  5.86967014e-01 -2.25674913e+00
   6.29612269e-01  1.52435809e+00  1.97370413e+00  1.14733286e+00
  -6.99067786e-01 -2.28369265e+00  2.81360031e+00  2.63983862e+00
  -2.46960804e+00  2.43611758e+00  3.61345173e-01  9.39852108e-01
   2.63171444e+00  6.45100339e-01 -1.25753381e+00 -1.19848884e+00
   1.64309825e+00  2.47031544e+00  1.70909788e+00  2.68892662e+00
   6.47850758e-04 -1.46454397e+00 -2.89076063e+00 -1.47825446e+00
  -2.54025196e+00 -1.08915136e+00]]

In [11]:
net.roll_matrixes(optimisation_result)
collocation_grid = rand.random((1,N)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)

In [12]:
from scipy.linalg import eig
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)

In [13]:
for eigval in eigvals:
    print(eigval)
#print(eigvecs[2,:])


(-271.1712705393781+0j)
(3.7071906114016318+4.61980573794218j)
(3.7071906114016313-4.61980573794218j)
(5.556815367450572+0j)
(-0.13057991572511848+1.9546231885305658j)
(-0.13057991572511848-1.954623188530566j)
(2.477616490604321+0j)
(-0.6594504981304619+0.2458866894159289j)
(-0.6594504981304617-0.2458866894159289j)
(1.6046646279014012+0j)
(-0.17644721662730906+0j)
(0.1773058163437089+0.004852864091740892j)
(0.17730581634370896-0.004852864091740892j)
(0.7938717553927267+0j)
(0.8285147997575589+0j)

In [14]:
x = np.linspace(0,10,1000)
i = 0

for eigval in eigvals_real[1:11]:
    i+=1
    plt.plot(x, eigval+0*x)
    plt.plot(1+i, eigval, 'x')



In [15]:
x_obs = np.linspace(x_0_min, x_0_max, 10000).reshape(1, 10000)
for i in range(5):
    y = (net_images_value(x_obs).transpose())*eigvecs[i,:]
    y = np.sum(y,1)
    plt.plot(np.linspace(x_0_min, x_0_max, 10000), y)


C:\Work\Programms\Anaconda3\lib\site-packages\numpy\core\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)

In [16]:
#Функции визуализации:
x_observe = np.linspace(-10,10,1000).reshape(1, 1000)
#---------------------------------------------------------------------------
def show_outputs_conv(x):
    y = net_conv_value(x)
    
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)
    
#---------------------------------------------------------------------------
def show_outputs(x):
    y = net.run(x)
    plt.title('Outputs:')
    plt.grid(True)
    for i in range(N):
        net_i = y[i,:] 
        plt.plot(x[0], net_i)
#---------------------------------------------------------------------------
def show_images(x):
    y = net_images_value(x)
    plt.title('Images:')
    plt.grid(True)
    for i in range(N):
        net_image_i = y[i,:] 
        plt.plot(x[0], net_image_i)
#---------------------------------------------------------------------------
def show_images_conv(x):
    y = net_images_conv_value(x)
    
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)    
#---------------------------------------------------------------------------
def plot_four(x):
    y1 = net.run(x)
    y2 = net_images_value(x)
    y3 = net_conv_value(x)
    y4 = net_images_conv_value(x)
    
    fig = plt.figure()
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)

    ax1 = plt.subplot(221)
    ax1.set_title('Original net outputs convolution:')
    ax1.plot(x[0], y3, 'x')

    ax2 = plt.subplot(222)
    ax2.set_title('Image of net outputs convolution:')
    ax2.plot(x[0], y4, 'x') 

    ax3 = plt.subplot(223)
    ax3.set_title('Original net outputs:')
    for i in range(N):
        net_i = y1[i,:] 
        ax3.plot(x[0], net_i)
        
    ax4 = plt.subplot(224)
    ax4.set_title('Image of net outputs:')
    for i in range(N):
        net_image_i = y2[i,:] 
        ax4.plot(x[0], net_image_i)
        
    fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
#---------------------------------------------------------------------------

In [17]:
plot_four(np.linspace(-3,3,1000).reshape(1, 1000))


<matplotlib.figure.Figure at 0x21354923ef0>