In [3]:
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras import losses
from keras import datasets # mnist
from keras.utils import np_utils
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2"
In [4]:
# data 확인
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
# 데이터 정규화
X_train = X_train / 255.0
X_test = X_test / 255.0
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
In [5]:
print(X_train[0][:2])
print(y_train[0])
In [6]:
plt.figure(figsize=(6,12))
for i in range(6):
n = np.random.randint(0, len(X_train))
plt.subplot(320+1+i)
plt.imshow(X_train[n])
plt.title(y_train[n])
In [7]:
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
n = np.random.randint(0, 100)
print(y_train[n])
print(Y_train[n])
print(y_test[n])
print(Y_test[n])
In [8]:
X_train = X_train.reshape(-1, X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(-1, X_test.shape[1]*X_test.shape[2])
print(X_train.shape)
print(X_test.shape)
In [9]:
class ANN(Sequential):
def __init__(self, input_dim, hidden_dim, output_dim):
super().__init__()
self.add(Dense(hidden_dim, input_dim=input_dim, activation="relu", name="input_Layer") )
self.add(Dense(output_dim, activation="softmax", name="output_Layer"), )
self.compile(loss=losses.categorical_crossentropy,
optimizer="adam", metrics=['acc'])
In [10]:
# input data 정의
input_dim = X_train.shape[1]
hidden_layer = 100
output_dim = Y_train.shape[1]
# 모델 생성
model = ANN(input_dim, hidden_layer, output_dim)
In [11]:
model.summary()
In [12]:
%%time
history = model.fit(X_train, Y_train, epochs=10, batch_size=100, validation_split=0.2)
In [13]:
loss, accuracy = model.evaluate(X_test, Y_test, batch_size=100)
print("Test Loss\t:\t{:2.4f}".format(loss))
print("Test Accuracy\t:\t{:2.4f}".format(accuracy))
In [14]:
h = pd.DataFrame(history.history)
h = h.reset_index()
In [15]:
ax = h.plot(x="index", y=["loss", "val_loss"])
ax.set_ylabel("loss")
ax2 = ax.twinx()
ax2.set_ylabel("acc")
h.plot(x="index", y=["acc", "val_acc"], ax=ax2, colormap='viridis',)
ax.set_xlabel("epochs")
plt.grid(False)
plt.show()
In [16]:
yhat_test = model.predict(X_test, batch_size=32)
In [17]:
plt.figure(figsize=(6,12))
for i in range(6):
n = np.random.randint(0, len(X_test))
plt.subplot(320+1+i)
plt.imshow(X_test[n].reshape(28, 28))
plt.title("R:{}, P:{}".format(y_test[n], np.argmax(yhat_test[n])) )
In [18]:
## 추가 Deep NN
In [19]:
class DNN(Sequential):
def __init__(self, input_dim, hidden_dim, output_dim, depth=5):
super().__init__()
self.add(Dense(hidden_dim, input_dim=input_dim, activation="relu", name="input_Layer") )
for x in range(depth):
self.add(Dense(hidden_dim, activation="relu", name="hidden_layer_{:02}".format(x)))
self.add(Dense(output_dim, activation="softmax", name="output_Layer"), )
self.compile(loss=losses.categorical_crossentropy,
optimizer="adam", metrics=['acc'])
In [20]:
# input data 정의
input_dim = X_train.shape[1]
hidden_layer = 100
output_dim = Y_train.shape[1]
# 모델 생성
dnn = DNN(input_dim, hidden_layer, output_dim, depth=20)
In [21]:
dnn.summary()
In [22]:
history = dnn.fit(X_train, Y_train, epochs=10, batch_size=100, validation_split=0.2)
In [23]:
loss, accuracy = model.evaluate(X_test, Y_test, batch_size=100)
print("Test Loss\t:\t{:2.4f}".format(loss))
print("Test Accuracy\t:\t{:2.4f}".format(accuracy))
In [24]:
h = pd.DataFrame(history.history)
h = h.reset_index()
In [25]:
ax = h.plot(x="index", y=["loss", "val_loss"])
ax.set_ylabel("loss")
ax2 = ax.twinx()
ax2.set_ylabel("acc")
h.plot(x="index", y=["acc", "val_acc"], ax=ax2, colormap='viridis',)
ax.set_xlabel("epochs")
plt.grid(False)
plt.show()
In [26]:
yhat_test = model.predict(X_test, batch_size=32)
In [27]:
plt.figure(figsize=(6,12))
for i in range(6):
n = np.random.randint(0, len(X_test))
plt.subplot(320+1+i)
plt.imshow(X_test[n].reshape(28, 28))
plt.title("R:{}, P:{}".format(y_test[n], np.argmax(yhat_test[n])) )
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: