In [1]:
import torch
from torch.autograd import Variable

In [2]:
x = Variable(torch.ones(2,2), requires_grad=True)
print(x)


Variable containing:
 1  1
 1  1
[torch.FloatTensor of size 2x2]


In [4]:
y = x + 2
print(y)


Variable containing:
 3  3
 3  3
[torch.FloatTensor of size 2x2]


In [5]:
print(y.grad_fn)


<torch.autograd.function.AddConstantBackward object at 0x00000253ABF8FBE8>

In [7]:
z = y*y*3
out = z.mean()
print(z,out)


Variable containing:
 27  27
 27  27
[torch.FloatTensor of size 2x2]
 Variable containing:
 27
[torch.FloatTensor of size 1]


In [8]:
out.backward()

In [9]:
print(x.grad)


Variable containing:
 4.5000  4.5000
 4.5000  4.5000
[torch.FloatTensor of size 2x2]


In [11]:
x = torch.randn(3)
x = Variable(x, requires_grad=True)

y = x * 2
while y.data.norm() < 1000:
    y = y * 2
print(y)


Variable containing:
 -419.2958
  136.8486
-1727.5154
[torch.FloatTensor of size 3]


In [35]:
gradients = torch.FloatTensor([0.1, 1.0, 0.0001])
y.backward(gradients)
print(x.grad)


Variable containing:
  409.6000
 4096.0000
    0.4096
[torch.FloatTensor of size 3]


In [17]:
torch.norm  # 范数


Out[17]:
<function torch._C.norm>

In [16]:
y.data.norm(p=10)


Out[16]:
1727.5155034377074

In [23]:
a = torch.randn(4, 2)
print(a)


-0.2103 -1.2730
-0.4334 -0.3583
-0.5065 -2.1537
 0.2715  1.7556
[torch.FloatTensor of size 4x2]


In [25]:
torch.norm(a, 0, 1)


Out[25]:
 2
 2
 2
 2
[torch.FloatTensor of size 4x1]

In [32]:
torch.norm(a, 1, -1)


Out[32]:
 1.4833
 0.7917
 2.6602
 2.0272
[torch.FloatTensor of size 4x1]

In [51]:
x1 = torch.ones(3)
x1 = Variable(x1, requires_grad=True)

y1 = x1 * 2
while y1.data.norm() < 1000:
    y1 = y1 * 2

z = y1 * 10

In [48]:
gradients = torch.FloatTensor([0.1, 1.0, 0.0001])
z.backward(gradients)

print(x1.grad)


Variable containing:
  2048.0000
 20480.0000
     2.0480
[torch.FloatTensor of size 3]


In [ ]: