In [6]:
# %load /home/sjkim/.jupyter/head.py
%matplotlib inline
%load_ext autoreload 
%autoreload 2
from importlib import reload

import matplotlib.pyplot as plt
import numpy as np

import pandas as pd
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="0"

# seaborn
#import seaborn as sns
#sns.set( style = 'white', font_scale = 1.7)
#sns.set_style('ticks')
#plt.rcParams['savefig.dpi'] = 200

# font for matplotlib
#import matplotlib
#import matplotlib.font_manager as fm
#fm.get_fontconfig_fonts()
#font_location = '/usr/share/fonts/truetype/nanum/NanumGothicBold.ttf'
#font_name = fm.FontProperties(fname=font_location).get_name()
#matplotlib.rc('font', family=font_name)


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

In [2]:
import ex2_1_ann_mnist_cl
reload(ex2_1_ann_mnist_cl)


Using TensorFlow backend.
Out[2]:
<module 'ex2_1_ann_mnist_cl' from '/home/sjkim/Dropbox/Aspuru-Guzik/python_lab/py3/keraspp/ex2_1_ann_mnist_cl.py'>

In [3]:
ex2_1_ann_mnist_cl.main()


Train on 48000 samples, validate on 12000 samples
Epoch 1/15
48000/48000 [==============================] - 3s - loss: 0.3949 - acc: 0.8909 - val_loss: 0.2161 - val_acc: 0.9404
Epoch 2/15
48000/48000 [==============================] - 2s - loss: 0.1849 - acc: 0.9471 - val_loss: 0.1638 - val_acc: 0.9551
Epoch 3/15
48000/48000 [==============================] - 2s - loss: 0.1372 - acc: 0.9606 - val_loss: 0.1363 - val_acc: 0.9604
Epoch 4/15
48000/48000 [==============================] - 2s - loss: 0.1088 - acc: 0.9679 - val_loss: 0.1277 - val_acc: 0.9647
Epoch 5/15
48000/48000 [==============================] - 2s - loss: 0.0896 - acc: 0.9745 - val_loss: 0.1093 - val_acc: 0.9682
Epoch 6/15
48000/48000 [==============================] - 2s - loss: 0.0745 - acc: 0.9786 - val_loss: 0.1084 - val_acc: 0.9687
Epoch 7/15
48000/48000 [==============================] - 2s - loss: 0.0634 - acc: 0.9817 - val_loss: 0.0996 - val_acc: 0.9694
Epoch 8/15
48000/48000 [==============================] - 1s - loss: 0.0527 - acc: 0.9849 - val_loss: 0.0977 - val_acc: 0.9693
Epoch 9/15
48000/48000 [==============================] - 1s - loss: 0.0449 - acc: 0.9876 - val_loss: 0.0957 - val_acc: 0.9723
Epoch 10/15
48000/48000 [==============================] - 1s - loss: 0.0398 - acc: 0.9887 - val_loss: 0.0936 - val_acc: 0.9734
Epoch 11/15
48000/48000 [==============================] - 2s - loss: 0.0332 - acc: 0.9910 - val_loss: 0.0918 - val_acc: 0.9723
Epoch 12/15
48000/48000 [==============================] - 2s - loss: 0.0280 - acc: 0.9928 - val_loss: 0.0911 - val_acc: 0.9737
Epoch 13/15
48000/48000 [==============================] - 2s - loss: 0.0247 - acc: 0.9935 - val_loss: 0.0962 - val_acc: 0.9725
Epoch 14/15
48000/48000 [==============================] - 2s - loss: 0.0207 - acc: 0.9951 - val_loss: 0.0947 - val_acc: 0.9720
Epoch 15/15
48000/48000 [==============================] - 2s - loss: 0.0180 - acc: 0.9960 - val_loss: 0.0961 - val_acc: 0.9730
 9900/10000 [============================>.] - ETA: 0sTest Loss and Accuracy -> [0.081038881501881405, 0.97699997305870057]

In [ ]: