In [1]:
from __future__ import print_function
# to be able to see plots
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import cPickle
import sys
sys.path.append("../tools")
from tools import collage
# just to use a fraction of GPU memory
# This is not needed on dedicated machines.
# Allows you to share the GPU.
gpu_memory_usage=0.33
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_usage
set_session(tf.Session(config=config))
In [2]:
from tools import readCIFAR, mapLabelsOneHot
# First run ../data/downloadCIFAR.sh
trnData, tstData, trnLabels, tstLabels = readCIFAR('../data/cifar-10-batches-py')
plt.subplot(1, 2, 1)
img = collage(trnData[:16])
print(img.shape)
plt.imshow(img)
plt.subplot(1, 2, 2)
img = collage(tstData[:16])
plt.imshow(img)
plt.show()
trnLabels = mapLabelsOneHot(trnLabels)
tstLabels = mapLabelsOneHot(tstLabels)
In [3]:
trnData = trnData.astype(np.float32) / 255.0 - 0.5
tstData = tstData.astype(np.float32) / 255.0 - 0.5
In [4]:
from keras.layers import Input, Reshape, Dense, Dropout, Flatten
from keras.layers import Activation
from keras.models import Model
from keras import regularizers
w_decay = 0.0001
w_reg = regularizers.l2(w_decay)
def get_simple_FC_network(input_data, layer_cout, layer_dim):
net = Flatten()(input_data)
for i in range(layer_cout):
net = Dense(layer_dim, activation='relu')(net)
net = Dense(10, name='out', activation='softmax')(net)
return net
In [20]:
from keras import optimizers
from keras.models import Model
from keras import losses
from keras import metrics
input_data = Input(shape=(32, 32, 3), name='data')
net = get_simple_FC_network(input_data, 5, 256)
model = Model(inputs=[input_data], outputs=[net])
# Build stacked GAN model
print('Model')
model.summary()
model.compile(loss=losses.categorical_crossentropy, optimizer=optimizers.Adam(lr=0.001), metrics=[metrics.categorical_accuracy])
In [26]:
from keras import backend
model.fit(
x=trnData, y=trnLabels,
batch_size=64, epochs=10, verbose=1,
validation_data=[tstData, tstLabels], shuffle=True)
Out[26]:
In [41]:
classProb = model.predict(x=tstData[0:2])
print('Class probabilities:', classProb, '\n')
loss, acc = model.evaluate(x=tstData, y=tstLabels, batch_size=1024)
print()
print('loss', loss)
print('acc', acc)
In [47]:
classProb = model.predict(x=tstData)
print(classProb.shape)
correctProb = (classProb * tstLabels).sum(axis=1)
wrongProb = (classProb * (1-tstLabels)).max(axis=1)
print(correctProb.shape, wrongProb.shape)
accuracy = (correctProb > wrongProb).mean()
print('Accuracy: ', accuracy)
In [ ]: