In [59]:
import os
import torch.nn as nn
import torchvision.models as models
import torchvision.datasets as datasets
import torch.utils.data as data_utils
import torchvision.transforms as transforms
import glob
import cv2
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from numpy.linalg import inv
In [5]:
a = torch.Tensor([[1,2,3],[4,5,6]])
a = Variable(a)
a_np = a.data.numpy()
row = [0,1]
col = [0,2]
a_np[row, col] = -1
print a_np
a = Variable(torch.from_numpy(a_np))
print a
data = a.view(-1,1)
print data
In [31]:
x = torch.linspace(1,4,3).type(torch.LongTensor)
print x
y = x.clone()
y[0] = 0
y = (y+2).clamp(3,4)
print x,y
a = np.linspace(1,3,3, dtype=np.uint32)
print a.clip(2,3)
In [35]:
for i in range(1,2):
print i
In [62]:
a = np.array([[1,2,3],[4,5,6]])
print a.transpose()
print a.size
print a.sum(axis=1)
print np.diag([1,2,3])
print np.pi
In [66]:
import torch
from torch.autograd import Variable
class MyReLU(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
def forward(self, input):
"""
In the forward pass we receive a Tensor containing the input and return a
Tensor containing the output. You can cache arbitrary Tensors for use in the
backward pass using the save_for_backward method.
"""
a = torch.zeros(2,3)
self.save_for_backward(input,a)
return input.clamp(min=0)
def backward(self, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input,a, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input