In [1]:
# 분류 DNN 모델 구현 ########################
from keras import layers, models
 
class DNN(models.Sequential):
    def __init__(self, Nin, Nh_l, Nout):
        super().__init__()
        self.add(layers.Dense(Nh_l[0], activation='relu', 
                 input_shape=(Nin,), name='Hidden-1'))
        self.add(layers.Dense(Nh_l[1], activation='relu', 
                 name='Hidden-2'))       
        self.add(layers.Dense(Nout, activation='softmax'))
        self.compile(loss='categorical_crossentropy', 
                         optimizer='adam', 
                         metrics=['accuracy'])

        
# 데이터 준비 ##############################
from ann_mnist_cl import Data_func


# 학습 효과 분석 ##############################
from ann_mnist_cl import plot_loss, plot_acc
import matplotlib.pyplot as plt


# 분류 DNN 학습 및 테스팅 ####################
def main():
    Nin = 784
    Nh_l = [100, 50]
    number_of_class = 10
    Nout = number_of_class

    (X_train, Y_train), (X_test, Y_test) = Data_func()
    model = DNN(Nin, Nh_l, Nout)
    history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2)
    
    performace_test = model.evaluate(X_test, y_test, batch_size=100)
    print('Test Loss and Accuracy ->', performace_test)

    plot_acc(history)
    plt.show()
    plot_loss(history)
    plt.show()


Using TensorFlow backend.

In [3]:
Nin = 784
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class

(X_train, Y_train), (X_test, Y_test) = Data_func()
model = DNN(Nin, Nh_l, Nout)
history = model.fit(X_train, Y_train, epochs=10, batch_size=100, validation_split=0.2)

performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)

plot_acc(history)
plt.show()
plot_loss(history)
plt.show()


Train on 48000 samples, validate on 12000 samples
Epoch 1/10
48000/48000 [==============================] - 63s - loss: 0.3744 - acc: 0.8948 - val_loss: 0.2011 - val_acc: 0.9424
Epoch 2/10
48000/48000 [==============================] - 2s - loss: 0.1618 - acc: 0.9522 - val_loss: 0.1479 - val_acc: 0.9557
Epoch 3/10
48000/48000 [==============================] - 2s - loss: 0.1160 - acc: 0.9651 - val_loss: 0.1189 - val_acc: 0.9645
Epoch 4/10
48000/48000 [==============================] - 2s - loss: 0.0902 - acc: 0.9731 - val_loss: 0.1060 - val_acc: 0.9657
Epoch 5/10
48000/48000 [==============================] - 2s - loss: 0.0723 - acc: 0.9780 - val_loss: 0.1183 - val_acc: 0.9640
Epoch 6/10
48000/48000 [==============================] - 2s - loss: 0.0598 - acc: 0.9814 - val_loss: 0.0957 - val_acc: 0.9699
Epoch 7/10
48000/48000 [==============================] - 2s - loss: 0.0474 - acc: 0.9851 - val_loss: 0.0938 - val_acc: 0.9734
Epoch 8/10
48000/48000 [==============================] - 2s - loss: 0.0389 - acc: 0.9881 - val_loss: 0.0945 - val_acc: 0.9722
Epoch 9/10
48000/48000 [==============================] - 2s - loss: 0.0323 - acc: 0.9904 - val_loss: 0.0913 - val_acc: 0.9745
Epoch 10/10
48000/48000 [==============================] - 2s - loss: 0.0284 - acc: 0.9913 - val_loss: 0.0946 - val_acc: 0.9747
 9200/10000 [==========================>...] - ETA: 0sTest Loss and Accuracy -> [0.089154908367272578, 0.97389999389648441]

In [ ]: