In [2]:
import torch
In [9]:
def activation(x):
""" sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1 / (1 + torch.exp(-x))
In [32]:
torch.manual_seed(7)
features = torch.randn((1, 5))
weights = torch.rand_like(features)
bias = torch.randn((1, 1))
In [41]:
activation(torch.sum(features * weights) + bias)
Out[41]:
In [58]:
# more efficient than use * and sum
activation(torch.mm(features, weights.view((5, 1))) + bias)
Out[58]:
In [59]:
print(weights)
print(weights.t())
# return a new tensor with same data in memory, sometimes a clone, as it copies data to another part of memory
print(weights.reshape((5, 1)))
# return the same tensor with different shape. sometimes, return less or more value if the shape is not matched
print(weights.resize_((5, 1)))
# 100% return a new tensor with the same data without messing with the memory. will return error if shape is not matched
print(weights.view((5, 1)))
In [76]:
torch.manual_seed(7)
features = torch.randn((1, 3))
n_input = features.shape[1]
n_hidden = 2
n_output = 1
W1 = torch.randn(n_input, n_hidden)
W2 = torch.randn(n_hidden, n_output)
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
In [79]:
H1 = activation(torch.mm(features, W1) + B1)
H2 = activation(torch.mm(H1, W2) + B2)
In [80]:
H2
Out[80]:
In [82]:
import numpy as np
In [92]:
a = np.random.rand(4, 3)
print(a)
In [93]:
b = torch.from_numpy(a)
print(b)
In [94]:
b.numpy()
Out[94]:
memory is shared so data will change if you change the values in the other
In [95]:
b.mul_(2)
Out[95]:
In [96]:
a
Out[96]:
In [ ]: