In [19]:
import torch as T
import math
import numpy as np
from matplotlib import mlab
from matplotlib import pylab as plt
from torch.autograd import Variable, grad
%matplotlib inline

#equation: eq == x''(t) + omega^2*x(t)=0
#x(0)=0==x0, x'(0)=1==v0
#j = (eq(t))^2 + (H(t) - E)^2 + (x(0))^2 + (x'(0)-1)^2
#basis: sigmoid(Vi*t + Bi)

#Размер базиса и прочие параметры:
ammount = 1
m = 30

#Начальные условия и параметры системы:
x0 = 0.0
v0 = 1.0
M = 1.0
k = 1.0
omega2 = k/M
E = 0.5*(M*v0**2+k*x0**2)

#Область построения:

timeCont = T.linspace(0, 2*math.pi, 150)
timeTrain = T.linspace(0, 2*math.pi, m)

f1 = (T.sin(timeCont*math.sqrt(omega2))*math.sqrt(omega2)).numpy()
f2 = (T.sin(timeTrain*math.sqrt(omega2))*math.sqrt(omega2)).numpy()

plt.plot(timeCont.numpy(), f1, 'green')
plt.plot(timeTrain.numpy(), f2, 'x')


Out[19]:
[<matplotlib.lines.Line2D at 0x7fca03c693c8>]

In [ ]:


In [20]:
W = Variable(T.Tensor(ammount).random_(0,3)-1, requires_grad=True)
V = Variable(T.Tensor(ammount).uniform_(0.8, 1.2), requires_grad=True)
B = Variable(T.Tensor(ammount).uniform_((-3)*math.pi, 0.2), requires_grad=True)
G = Variable(T.Tensor(ammount).uniform_(-0.1, 0.1), requires_grad=True)
print(B)


Variable containing:
-3.2439
[torch.FloatTensor of size 1]


In [21]:
#фактически x(t), прямой проход инс\
'''
def forward(t):
    I = Variable(T.Tensor(t.size()).fill_(1))
    z = T.ger(V, t) + T.ger(B, I)
    return T.sum(T.mul(T.sigmoid(z), T.ger(W,I)) + T.ger(G,I), 0)

def J():
    tTr = Variable(timeTrain, requires_grad = True)
    x = forward(tTr)
    v = Variable(T.Tensor(m), requires_grad = True)
    a = Variable(T.Tensor(m))
    T.autograd.backward(x, grad_variables  = tTr, retain_graph = True, create_graph = True)
    v = tTr.grad
    T.autograd.backward(v, grad_variables  = tTr)
    a = tTr.grad
    e = a + omega2*x
    H = 0.5*M*v*v+k*x*x*0.5
    return 20*(T.sum((e)**2))/m + 7*(x.data[0])**2 + 12*(v.data[0]-1)**2 + 20*(T.sum((H-E)**2))/m

'''
def forward(t):
    return T.cos(t)

In [22]:
'''
t = Variable(T.linspace(-15, 8*math.pi, 250))
I = Variable(T.Tensor(t.size()).fill_(1))
z = T.ger(V, t) + T.ger(B, I)
sigmoids = T.mul(T.sigmoid(z), T.ger(W,I)) + T.ger(G,I)   
s=sigmoids.data
for i in range(ammount):
    plt.plot(t.data.numpy(), s[i].numpy())
'''


Out[22]:
'\nt = Variable(T.linspace(-15, 8*math.pi, 250))\nI = Variable(T.Tensor(t.size()).fill_(1))\nz = T.ger(V, t) + T.ger(B, I)\nsigmoids = T.mul(T.sigmoid(z), T.ger(W,I)) + T.ger(G,I)   \ns=sigmoids.data\nfor i in range(ammount):\n    plt.plot(t.data.numpy(), s[i].numpy())\n'

In [23]:
#plt.plot(t.data.numpy(), T.sum(s,0).numpy(), 'red')

In [25]:
#   ОТЛАДОЧНЫЙ КОД
#t = Variable(timeCont, requires_grad=True)
#I = Variable(T.DoubleTensor(tTr.size()).fill_(1))
#z = T.ger(V, tTr) + T.ger(B, I)
#Y = forward(t)
#plt.plot(timeCont.numpy(), Y.data.numpy(),'red')
t = Variable(T.linspace(-15, 8*math.pi, 250), requires_grad = True)

x = forward(t)
v = Variable(T.Tensor(m), requires_grad = True)
a = Variable(T.Tensor(m))
T.autograd.backward(x, grad_variables  = t, retain_graph = True, create_graph = True)
v = t.grad
T.autograd.backward(v, grad_variables  = t)
a = t.grad

plt.plot(t.data.numpy(), x.data.numpy())


Out[25]:
[<matplotlib.lines.Line2D at 0x7fca02a21978>]

In [26]:
plt.plot(t.data.numpy(), v.data.numpy())


Out[26]:
[<matplotlib.lines.Line2D at 0x7fca029bbb38>]

In [27]:
plt.plot(t.data.numpy(), a.data.numpy())


Out[27]:
[<matplotlib.lines.Line2D at 0x7fca028de5c0>]

In [ ]:
N = 5000
alpha = 10**-6
I = []
Er = []
jPr = T.Tensor([10**5])


for i in range(N):
    I.append(i)
    j = J()
    j.backward()
    Er.append(j.data[0])
    bg = B.grad
    vg = V.grad
    wg = W.grad
    gg = G.grad
    if j.data[0]>jPr[0]:
        alpha = alpha*0.2
        W.data = wt
        V.data = vt
        B.data = bt
        G.data = gt
    wt = W.data
    vt = V.data
    bt = B.data
    gt = G.data
    
    B = B - alpha*bg
    V = V - alpha*vg
    W = W - alpha*wg
    G = G - alpha*gg
    W = Variable(W.data, requires_grad=True)
    V = Variable(V.data, requires_grad=True)
    B = Variable(B.data, requires_grad=True)
    G = Variable(G.data, requires_grad=True)
    jPr = j.data

In [ ]:
Y = forward(Variable(timeCont))
plt.plot(timeCont.numpy(), Y.data.numpy(),'red')

In [ ]:
plt.plot(I, Er, 'red')

In [ ]:
t = Variable(T.linspace(-15, 8*math.pi, 250))
I = Variable(T.Tensor(t.size()).fill_(1))
z = T.ger(V, t) + T.ger(B, I)
sigmoids = T.mul(T.sigmoid(z), T.ger(W,I)) + T.ger(G,I)   
s=sigmoids.data
for i in range(ammount):
    plt.plot(t.data.numpy(), s[i].numpy())

In [ ]:
print(s)

In [ ]:
tTr = Variable(timeTrain, requires_grad = True)
x = forward(tTr)
v = Variable(T.Tensor(m), requires_grad=True)
a = Variable(T.Tensor(m))
T.autograd.backward(x, grad_variables  = tTr, retain_graph = True, create_graph = True)
v = tTr.grad
T.autograd.backward(v, grad_variables  = tTr)
a = tTr.grad
e = a + omega2*x
e