In [ ]:
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
In [ ]:
train_X = np.random.normal(scale=2, size=(800, 69))
train_y = np.random.randint(0, high=10, size=(800,1), dtype=np.int64)
In [ ]:
class FullyConnectedLayers(nn.Module):
def __init__(self, inputs, targets, targets_size=10, learning_rate=1e-4):
super().__init__() # 在子类中调用父类的初始化方法
self._train_X = inputs
self._train_y = targets
self._train_X_size = inputs.shape[1]
self._train_y_size = targets_size
self._learning_rate = learning_rate
self._fc1 = nn.Linear(self._train_X_size, self._train_X_size)
self._ac1 = nn.ReLU() # activation function
self._fc2 = nn.Linear(self._train_X_size, self._train_y_size)
self._loss_function = nn.CrossEntropyLoss()
self._optimizer = torch.optim.SGD(self.parameters(), lr=learning_rate)
def forward(self, X):
out = self._fc1(X)
out = self._ac1(out)
out = self._fc2(out)
return out
def fit(self, training_epochs= 1e3, display= 1e2):
display = np.int(display)
for epoch in np.arange(np.int(training_epochs)):
inputs = Variable(torch.FloatTensor(self._train_X),requires_grad=True)
targets = Variable(torch.LongTensor(self._train_y.flatten()))
self._optimizer.zero_grad() #清空所有被优化过的Variable的梯度.
outputs = self.forward(inputs) # 使用神经网络架构前向推断
self._loss = self._loss_function(outputs, targets) # 计算批次损失函数
self._loss.backward() # 误差反向传播
self._optimizer.step()
if (epoch+1) % display == 0:
print ('Epoch (%d/%d), loss:%.4f' %(epoch+1, training_epochs, self._loss.data[0]))
def pred(self,X):
inputs = (Variable(torch.FloatTensor(X)))
outputs = self.forward(inputs)
_, output_labels = torch.max(outputs, 1)
return output_labels
a = FullyConnectedLayers(train_X, train_y, 10)
a.fit(1e3, 1e2)
In [ ]:
a.pred(train_X)
In [ ]: