In [6]:
%matplotlib inline
In [13]:
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
FloatTensor
ラベルは LongTensor
である必要がある
In [48]:
# hyperparameters
input_size = 4
num_classes = 3
num_epochs = 10000
learning_rate = 0.01
iris = load_iris()
X = iris.data
y = iris.target
# print(X.shape)
# print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=5)
# print(X_train.shape)
# print(X_test.shape)
# print(y_train.shape)
# print(y_test.shape)
# データの標準化
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# print(np.mean(X_train, axis=0))
# print(np.std(X_train, axis=0))
class LogisticRegression(nn.Module):
def __init__(self, input_size, num_classes):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
model = LogisticRegression(input_size, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def train(X_train, y_train):
inputs = Variable(torch.from_numpy(X_train).float())
targets = Variable(torch.from_numpy(y_train).long())
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
return loss.data[0]
def valid(X_test, y_test):
inputs = Variable(torch.from_numpy(X_test).float())
targets = Variable(torch.from_numpy(y_test).long())
outputs = model(inputs)
val_loss = criterion(outputs, targets)
# 精度を求める
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == targets.data).sum()
val_acc = correct / targets.size(0)
return val_loss.data[0], val_acc
loss_list = []
val_loss_list = []
val_acc_list = []
for epoch in range(num_epochs):
perm = np.arange(X_train.shape[0])
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
loss = train(X_train, y_train)
val_loss, val_acc = valid(X_test, y_test)
if epoch % 1000 == 0:
print('epoch %d, loss: %.4f val_loss: %.4f val_acc: %.4f'
% (epoch, loss, val_loss, val_acc))
# logging
loss_list.append(loss)
val_loss_list.append(val_loss)
val_acc_list.append(val_acc)
In [49]:
# plot learning curve
plt.figure()
plt.plot(range(num_epochs), loss_list, 'r-', label='train_loss')
plt.plot(range(num_epochs), val_loss_list, 'b-', label='val_loss')
plt.legend()
plt.figure()
plt.plot(range(num_epochs), val_acc_list, 'g-', label='val_acc')
plt.legend()
Out[49]:
In [1]:
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
In [2]:
# Hyper Parameters
input_size = 784
num_classes = 10
num_epochs = 20
batch_size = 100
learning_rate = 0.001
# MNIST Dataset (Images and Labels)
train_dataset = dsets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
# Dataset Loader (Input Pipline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
In [3]:
print(len(train_dataset))
print(len(test_dataset))
# 1データだけ取得
image, label = iter(train_loader).next()
print(type(image), type(label))
print(image.size(), label.size())
In [7]:
class LogisticRegression(nn.Module):
def __init__(self, input_size, num_classes):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
model = LogisticRegression(input_size, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def train(train_loader):
model.train()
running_loss = 0
for batch_idx, (images, labels) in enumerate(train_loader):
images = Variable(images.view(-1, 28 * 28))
labels = Variable(labels)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
running_loss += loss
loss.backward()
optimizer.step()
train_loss = running_loss / len(train_loader)
return train_loss.data[0]
def valid(test_loader):
model.eval()
running_loss = 0
correct = 0
total = 0
for batch_idx, (images, labels) in enumerate(test_loader):
images = Variable(images.view(-1, 28 * 28))
labels = Variable(labels)
outputs = model(images)
loss = criterion(outputs, labels)
running_loss += loss
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels.data).sum()
total += labels.size(0)
val_loss = running_loss / len(test_loader)
val_acc = correct / total
return val_loss.data[0], val_acc
loss_list = []
val_loss_list = []
val_acc_list = []
for epoch in range(num_epochs):
loss = train(train_loader)
val_loss, val_acc = valid(test_loader)
print('epoch %d, loss: %.4f val_loss: %.4f val_acc: %.4f'
% (epoch, loss, val_loss, val_acc))
# logging
loss_list.append(loss)
val_loss_list.append(val_loss)
val_acc_list.append(val_acc)
In [8]:
import matplotlib.pyplot as plt
%matplotlib inline
# plot learning curve
plt.figure()
plt.plot(range(num_epochs), loss_list, 'r-', label='train_loss')
plt.plot(range(num_epochs), val_loss_list, 'b-', label='val_loss')
plt.legend()
plt.grid()
plt.figure()
plt.plot(range(num_epochs), val_acc_list, 'g-', label='val_acc')
plt.legend()
plt.grid()
In [ ]: