In [1]:
from keras.models import Sequential, model_from_json
from keras.layers import Dense
import numpy as np
import os

np.random.seed(7)

dataset = np.loadtxt('pima-indians-diabetes.data', delimiter=',')
X = dataset[:, 0:8]
Y = dataset[:, 8]


Using TensorFlow backend.

In [ ]:
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, Y, epochs=150, batch_size=10, verbose=0)

In [3]:
scores = model.evaluate(X, Y, verbose=0)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))


acc: 77.60%

In [9]:
# serialize model to JSON
model_json = model.to_json()
with open('model.json', 'w') as json_file:
    json_file.write(model_json)

# serialize weights to HDF5
model.save_weights('model.h5')
print('saved model to disk')


saved model to disk

In [10]:
%ls


170518-keras-mnist.ipynb
170519-lstm-text-generation.ipynb
170522-inception-v3.ipynb
170523-googlenet.ipynb
170525-resnet.ipynb
170526-airline-passengers.ipynb
170526-pima-indians.ipynb
170531-multi-task-learning.ipynb
170602-saliency-map.ipynb
170602-vae2.ipynb
170605-cross-validation.ipynb
170606-ssd.ipynb
170608-babi-rnn.ipynb
170612-mnist-dcgan.ipynb
170612-slack-notifier.ipynb
170613-lfw-dcgan.ipynb
170615-keras-dcgan.ipynb
170615-mnist-dcgan.ipynb
170622-lstm-forecast.ipynb
170704-imdb-cnn-lstm.ipynb
170704-imdb-cnn.ipynb
170711-music-tagging.ipynb
170713-coco-test-1.ipynb
170714-coco-test-2.ipynb
170727-pose-estimation.ipynb
170808-neural-style-transfer.ipynb
170818-neural-style-transfer-examples.ipynb
170908-how-to-develop-lstms-in-keras.ipynb
170914-urban-sound-classification.ipynb
171002-keras-tensorboard.ipynb
171024-my-generator.ipynb
171218-sequence-echo-problem.ipynb
171225-encoder-decoder-with-attention.ipynb
180102-checkpoint.ipynb
180102-dropout-regularization.ipynb
180102-functional-api.ipynb
180102-learning-rate-scheduler.ipynb
180102-save-and-load-model.ipynb
180102-visualize-model.ipynb
__pycache__/
attention_decoder.py
cnn.png
create_pose_estimation_data.py
data/
dcgan_mnist.py
dcgan_mnist1/
hyperas_tutorial.py
ionosphere.data
model.h5
model.json
model.png
model_plot.png
multilayer_perceptron_graph.png
multiple_inputs.png
multiple_output.png
pima-indians-diabetes.data
results/
rnn.png
shared_feature_extractor.png
shared_input_layer.png
sonar.all-data
train_pose_estimation.py
utils2.py
vgg16_avg.py

In [18]:
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()

In [19]:
loaded_model = model_from_json(loaded_model_json)

# load weights into new model
loaded_model.load_weights('model.h5')

In [20]:
loaded_model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 12)                108       
_________________________________________________________________
dense_2 (Dense)              (None, 8)                 104       
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 9         
=================================================================
Total params: 221
Trainable params: 221
Non-trainable params: 0
_________________________________________________________________

In [21]:
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
scores = loaded_model.evaluate(X, Y, verbose=0)
print('%s: %.2f%%' % (loaded_model.metrics_names[1], scores[1] * 100))


acc: 77.60%

In [ ]: