In [60]:
import torch as T
import math
import numpy as np
from matplotlib import mlab
from matplotlib import pylab as plt
from torch.autograd import Variable, grad
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline
#equation: eq == x''(t) + omega^2*x(t)=0
#x(0)=0==x0, x'(0)=1==v0
#j = (eq(t))^2 + (H(t) - E)^2 + (x(0))^2 + (x'(0)-1)^2
#basis: sigmoid(Vi*t + Bi)
#Размер базиса и прочие параметры:
ammount = 1
m = 100
#Начальные условия и параметры системы:
x0 = 0.0
v0 = 1.0
M = 1.0
k = 1.0
omega2 = k/M
E = 0.5*(M*v0**2+k*x0**2)
#Область построения:
timeCont = T.linspace(0, 2*math.pi, 150)
timeTrain = T.linspace(0, 2*math.pi, m)
f1 = (T.sin(timeCont*math.sqrt(omega2))*math.sqrt(omega2)).numpy()
f2 = (T.sin(timeTrain*math.sqrt(omega2))*math.sqrt(omega2)).numpy()
plt.plot(timeCont.numpy(), f1, 'green')
plt.plot(timeTrain.numpy(), f2, 'x')
Out[60]:
In [61]:
W = Variable(T.Tensor([1]), requires_grad=True)
V = Variable(T.Tensor([1]), requires_grad=True)
B = Variable(T.Tensor([0]), requires_grad=True)
#W = Variable(T.Tensor(ammount).uniform_(-1, 1), requires_grad=True)
#V = Variable(T.Tensor(ammount).uniform_(-1, 1), requires_grad=True)
#B = Variable(T.Tensor(ammount).uniform_(-3,1), requires_grad=True)
#.random_(0,3)-1
In [62]:
'''
t = Variable(T.linspace(-15, 8*math.pi, 250))
I = Variable(T.Tensor(t.size()).fill_(1))
z = T.ger(V, t) + T.ger(B, I)
sigmoids = T.mul(T.sigmoid(z), T.ger(W,I))
s=sigmoids.data
for i in range(ammount):
plt.plot(t.data.numpy(), s[i].numpy())
'''
print(1)
In [63]:
#фактически x(t), прямой проход инс
def forward1(t):
I = Variable(T.Tensor(t.size()).fill_(1))
z = T.ger(V, t) + T.ger(B, I)
return T.sum(T.mul(T.sigmoid(z), T.ger(W,I)) , 0)
def forward3(t):
return W*T.sin(V*t + B)
def forward2(t):
I = Variable(T.Tensor(t.size()).fill_(1))
z = T.ger(V, t) + T.ger(B, I)
return T.sum(T.mul(T.sin(z), T.ger(W,I)) , 0)
def J():
tTr = Variable(timeTrain, requires_grad = True)
X = forward3(tTr)
X.backward(Variable(T.Tensor(m).fill_(1)), retain_graph = True, create_graph = True)
v = tTr.grad.clone()
tTr.grad.data.zero_()
v.backward(Variable(T.Tensor(m).fill_(1)))
a = tTr.grad.clone()
e = a + X#*omega2
#print(x.data[0])
#print(v.data[0])
#print(e)
#H = 0.5*M*v*v+k*x*x*0.5
return (T.sum((e)**2)) + (X.data[0])**2 + (v.data[0]-1)**2 #+ 20*(T.sum((H-E)**2))/m
#T.autograd.backward(v, grad_variables = tTr)
#T.autograd.backward(x, grad_variables = tTr, retain_graph = True, create_graph = True)
In [64]:
# ОТЛАДОЧНЫЙ КОД
#
'''
if j.data[0] > jLast[0]:
alpha = alpha*0.9
W.data = wt
V.data = vt
B.data = bt
wt = W.data
vt = V.data
bt = B.data
if i==6000:
alpha = 10**-7
if i==14000:
alpha = 10**-8
'''
#
#Y = forward2(Variable(timeCont))
#plt.plot(timeCont.numpy(), Y.data.numpy(),'red')
Out[64]:
In [65]:
'''
N = 0
alpha = 10**-8
I = []
Er = []
Wgrad = []
#jLast = T.Tensor([10**5])
for i in tqdm_notebook(range(N)):
I.append(i)
j = J()
j.backward()
Er.append(j.data[0])
Wgrad.append(W.grad.data)
bg = B.grad.data
vg = V.grad.data
wg = W.grad.data
B = Variable(B.data - alpha*wg, requires_grad=True)
V = Variable(V.data - alpha*vg, requires_grad=True)
W = Variable(W.data - alpha*wg, requires_grad=True)
if i%400==0:
clear_output()
print(j.data)
#jLast[0] = j.data[0]
print(alpha)
'''
print(1)
In [66]:
'''
Y = forward2(Variable(timeCont))
plt.plot(timeCont.numpy(), (Y-T.sin(Variable(timeCont))).data.numpy(),'red')
'''
print(1)
In [67]:
#plt.plot(I, Er, 'red')
In [68]:
'''
t = Variable(T.linspace(-15, 8*math.pi, 250))
I = Variable(T.Tensor(t.size()).fill_(1))
z = T.ger(V, t) + T.ger(B, I)
sigmoids = T.mul(T.sigmoid(z), T.ger(W,I))
s=sigmoids.data
for i in range(ammount):
plt.plot(t.data.numpy(), s[i].numpy())
'''
x = forward3(tTr)
x.grad_fn
Out[68]:
In [73]:
W = Variable(T.Tensor([1]), requires_grad=True)
V = Variable(T.Tensor([1]), requires_grad=True)
B = Variable(T.Tensor([0]), requires_grad=True)
tTr = Variable(timeTrain, requires_grad = True)
'''
x.backward(Variable(T.Tensor(m).fill_(1)), retain_graph = True, create_graph = True)
v = tTr.grad.clone()
tTr.grad.data.zero_()
v.backward(Variable(T.Tensor(m).fill_(1)))
a = tTr.grad.clone()
#j0=J()
#j0.backward()
'''
j=J()
print(j)
j.backward()
print(V.grad)
In [70]:
time=tTr
x = forward3(tTr)
plt.plot(time.data.numpy(), x.data.numpy(),'red')
Out[70]:
In [71]:
plt.plot(time.data.numpy(), v.data.numpy(),'red')
In [ ]:
plt.plot(time.data.numpy(), a.data.numpy(),'red')
In [ ]: