예제 6-2 합성곱 계층을 이용한 AE 전체 코드


In [2]:
# %load /home/sjkim/.jupyter/head.py
%matplotlib inline
%load_ext autoreload 
%autoreload 2
from importlib import reload

import matplotlib.pyplot as plt
import numpy as np

import pandas as pd
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="0"

# seaborn
#import seaborn as sns
#sns.set( style = 'white', font_scale = 1.7)
#sns.set_style('ticks')
#plt.rcParams['savefig.dpi'] = 200

# font for matplotlib
#import matplotlib
#import matplotlib.font_manager as fm
#fm.get_fontconfig_fonts()
#font_location = '/usr/share/fonts/truetype/nanum/NanumGothicBold.ttf'
#font_name = fm.FontProperties(fname=font_location).get_name()
#matplotlib.rc('font', family=font_name)


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

In [2]:
import ex6_2_ae_conv_mnist_mc as example


Using TensorFlow backend.

In [3]:
example.main()


Train on 48000 samples, validate on 12000 samples
Epoch 1/20
48000/48000 [==============================] - 5s - loss: 0.2409 - acc: 0.8058 - val_loss: 0.1511 - val_acc: 0.7957
Epoch 2/20
48000/48000 [==============================] - 3s - loss: 0.1323 - acc: 0.8043 - val_loss: 0.1230 - val_acc: 0.8051
Epoch 3/20
48000/48000 [==============================] - 3s - loss: 0.1167 - acc: 0.8082 - val_loss: 0.1147 - val_acc: 0.8120
Epoch 4/20
48000/48000 [==============================] - 3s - loss: 0.1096 - acc: 0.8098 - val_loss: 0.1076 - val_acc: 0.8095
Epoch 5/20
48000/48000 [==============================] - 3s - loss: 0.1054 - acc: 0.8107 - val_loss: 0.1006 - val_acc: 0.8121
Epoch 6/20
48000/48000 [==============================] - 3s - loss: 0.1020 - acc: 0.8113 - val_loss: 0.1023 - val_acc: 0.8109
Epoch 7/20
48000/48000 [==============================] - 3s - loss: 0.1000 - acc: 0.8117 - val_loss: 0.0979 - val_acc: 0.8133
Epoch 8/20
48000/48000 [==============================] - 3s - loss: 0.0983 - acc: 0.8120 - val_loss: 0.0964 - val_acc: 0.8135
Epoch 9/20
48000/48000 [==============================] - 3s - loss: 0.0967 - acc: 0.8122 - val_loss: 0.0980 - val_acc: 0.8120
Epoch 10/20
48000/48000 [==============================] - 3s - loss: 0.0955 - acc: 0.8124 - val_loss: 0.0946 - val_acc: 0.8138
Epoch 11/20
48000/48000 [==============================] - 3s - loss: 0.0948 - acc: 0.8125 - val_loss: 0.0958 - val_acc: 0.8125
Epoch 12/20
48000/48000 [==============================] - 3s - loss: 0.0940 - acc: 0.8127 - val_loss: 0.0952 - val_acc: 0.8142
Epoch 13/20
48000/48000 [==============================] - 3s - loss: 0.0932 - acc: 0.8128 - val_loss: 0.0935 - val_acc: 0.8130
Epoch 14/20
48000/48000 [==============================] - 3s - loss: 0.0926 - acc: 0.8129 - val_loss: 0.0946 - val_acc: 0.8127
Epoch 15/20
48000/48000 [==============================] - 3s - loss: 0.0924 - acc: 0.8129 - val_loss: 0.0934 - val_acc: 0.8144
Epoch 16/20
48000/48000 [==============================] - 3s - loss: 0.0920 - acc: 0.8130 - val_loss: 0.0941 - val_acc: 0.8128
Epoch 17/20
48000/48000 [==============================] - 3s - loss: 0.0918 - acc: 0.8131 - val_loss: 0.0928 - val_acc: 0.8146
Epoch 18/20
48000/48000 [==============================] - 3s - loss: 0.0914 - acc: 0.8131 - val_loss: 0.0938 - val_acc: 0.8129
Epoch 19/20
48000/48000 [==============================] - 3s - loss: 0.0911 - acc: 0.8132 - val_loss: 0.0923 - val_acc: 0.8146
Epoch 20/20
48000/48000 [==============================] - 3s - loss: 0.0906 - acc: 0.8133 - val_loss: 0.0912 - val_acc: 0.8134
(10000, 28, 28, 1) (10000, 28, 28, 1)

In [ ]: