In [1]:
    
import torch
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) 
y = x * 2 + 1
class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.layer = torch.nn.Linear(1,1)
    def forward(self, x):
        return self.layer(x)
model = Model()
Optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(1000):
    x_tr = torch.from_numpy(x[:2,:1]).type(torch.FloatTensor)
    y_tr = torch.from_numpy(y[:2,:1]).type(torch.FloatTensor)
    y_pr = model(x_tr)
    loss = torch.pow(torch.abs(y_tr - y_pr),2)
    Optimizer.zero_grad()
    torch.sum(loss).backward()
    Optimizer.step()
print(model(torch.from_numpy(x).type(torch.FloatTensor)).detach().numpy())
    
    
In [2]:
    
import torch
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) 
y = x * 2 + 1
class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.layer = torch.nn.Linear(1,1)
    def forward(self, x):
        return self.layer(x)
model = Model()
Optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
print('w=', list(model.parameters())[0].detach().numpy())
print('b=', list(model.parameters())[1].detach().numpy())
print()
for epoch in range(1000):
    x_tr = torch.from_numpy(x[:2,:1]).type(torch.FloatTensor)
    y_tr = torch.from_numpy(y[:2,:1]).type(torch.FloatTensor)
    y_pr = model(x_tr)
    loss = torch.pow(torch.abs(y_tr - y_pr),2)
    if epoch < 3:
        print(f'Epoch:{epoch}')
        print('y_pr:', y_pr.detach().numpy())
        print('y_tr:', y[:2,:1])
        print('loss:', loss.detach().numpy())
        print()
    Optimizer.zero_grad()
    torch.sum(loss).backward()
    Optimizer.step()
print(model(torch.from_numpy(x).type(torch.FloatTensor)).detach().numpy())
    
    
In [3]:
    
import torch
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) 
y = x * 2 + 1
class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.layer = torch.nn.Linear(1,1)
        self.Optimizer = torch.optim.SGD(self.parameters(), lr=0.01)
    def forward(self, x):
        x = torch.from_numpy(x).type(torch.FloatTensor)
        return self.layer(x)
    def fit(self, x, y, epochs):
        for epoch in range(epochs):
            y_tr = torch.from_numpy(y).type(torch.FloatTensor)
            y_pr = model(x)
            loss = torch.pow(torch.abs(y_tr - y_pr),2)
            self.Optimizer.zero_grad()
            torch.sum(loss).backward()
            self.Optimizer.step()
model = Model()
model.fit(x[:2], y[:2], epochs=1000)
print(model(x))
    
    
In [4]:
    
import torch
import numpy as np
    
In [5]:
    
if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')
    
print('Using PyTorch version:', torch.__version__, ' Device:', device)
    
    
In [6]:
    
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) 
y = x * 2 + 1
class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.layer = torch.nn.Linear(1,1)
        self.Optimizer = torch.optim.SGD(self.parameters(), lr=0.01)
    def forward(self, x):
        x = torch.from_numpy(x).type(torch.FloatTensor).to(device)
        return self.layer(x)
    def fit(self, x, y, epochs):
        for epoch in range(epochs):
            y_tr = torch.from_numpy(y).type(torch.FloatTensor).to(device)
            y_pr = model(x)
            loss = torch.pow(torch.abs(y_tr - y_pr),2)
            self.Optimizer.zero_grad()
            torch.sum(loss).backward()
            self.Optimizer.step()
model = Model().to(device)
model.fit(x[:2], y[:2], epochs=1000)
print(model(x))
    
    
In [ ]: