In [1]:
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import json
from collections import OrderedDict
In [2]:
def format_decimal(arr, places=8):
return [round(x * 10**places) / 10**places for x in arr]
In [3]:
DATA = OrderedDict()
[normalization.BatchNormalization.0] epsilon=1e-05, axis=-1, center=True, scale=True
In [4]:
data_in_shape = (4, 3)
norm = BatchNormalization(epsilon=1e-05, axis=-1, center=True, scale=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1000 + i)
if i == 3:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('gamma shape:', weights[0].shape)
print('gamma:', format_decimal(weights[0].ravel().tolist()))
print('beta shape:', weights[1].shape)
print('beta:', format_decimal(weights[1].ravel().tolist()))
print('moving_mean shape:', weights[2].shape)
print('moving_mean:', format_decimal(weights[2].ravel().tolist()))
print('moving_variance shape:', weights[3].shape)
print('moving_variance:', format_decimal(weights[3].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[normalization.BatchNormalization.1] epsilon=1e-02, axis=-1, center=True, scale=True
In [5]:
data_in_shape = (4, 3)
norm = BatchNormalization(epsilon=1e-02, axis=-1, center=True, scale=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1010 + i)
if i == 3:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('gamma shape:', weights[0].shape)
print('gamma:', format_decimal(weights[0].ravel().tolist()))
print('beta shape:', weights[1].shape)
print('beta:', format_decimal(weights[1].ravel().tolist()))
print('moving_mean shape:', weights[2].shape)
print('moving_mean:', format_decimal(weights[2].ravel().tolist()))
print('moving_variance shape:', weights[3].shape)
print('moving_variance:', format_decimal(weights[3].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[normalization.BatchNormalization.2] epsilon=1e-05, axis=1, center=True, scale=True
In [6]:
data_in_shape = (4, 3, 2)
norm = BatchNormalization(epsilon=1e-05, axis=1, center=True, scale=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1020 + i)
if i == 3:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('gamma shape:', weights[0].shape)
print('gamma:', format_decimal(weights[0].ravel().tolist()))
print('beta shape:', weights[1].shape)
print('beta:', format_decimal(weights[1].ravel().tolist()))
print('moving_mean shape:', weights[2].shape)
print('moving_mean:', format_decimal(weights[2].ravel().tolist()))
print('moving_variance shape:', weights[3].shape)
print('moving_variance:', format_decimal(weights[3].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[normalization.BatchNormalization.3] epsilon=1e-05, axis=2, center=True, scale=True
In [7]:
data_in_shape = (4, 3, 2)
norm = BatchNormalization(epsilon=1e-05, axis=2, center=True, scale=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1030 + i)
if i == 3:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('gamma shape:', weights[0].shape)
print('gamma:', format_decimal(weights[0].ravel().tolist()))
print('beta shape:', weights[1].shape)
print('beta:', format_decimal(weights[1].ravel().tolist()))
print('moving_mean shape:', weights[2].shape)
print('moving_mean:', format_decimal(weights[2].ravel().tolist()))
print('moving_variance shape:', weights[3].shape)
print('moving_variance:', format_decimal(weights[3].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[normalization.BatchNormalization.4] epsilon=1e-05, axis=3, center=True, scale=False
In [8]:
data_in_shape = (4, 3, 2)
norm = BatchNormalization(epsilon=1e-05, axis=3, center=True, scale=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1040 + i)
if i == 2:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('beta shape:', weights[0].shape)
print('beta:', format_decimal(weights[0].ravel().tolist()))
print('moving_mean shape:', weights[1].shape)
print('moving_mean:', format_decimal(weights[1].ravel().tolist()))
print('moving_variance shape:', weights[2].shape)
print('moving_variance:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[normalization.BatchNormalization.5] epsilon=1e-05, axis=-1, center=False, scale=True
In [9]:
data_in_shape = (4, 3, 2)
norm = BatchNormalization(epsilon=1e-05, axis=-1, center=False, scale=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1050 + i)
if i == 2:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('gamma shape:', weights[0].shape)
print('gamma:', format_decimal(weights[0].ravel().tolist()))
print('moving_mean shape:', weights[1].shape)
print('moving_mean:', format_decimal(weights[1].ravel().tolist()))
print('moving_variance shape:', weights[2].shape)
print('moving_variance:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[normalization.BatchNormalization.6] epsilon=0.001, axis=-1, center=False, scale=False
In [10]:
data_in_shape = (4, 3, 2)
norm = BatchNormalization(epsilon=0.001, axis=-1, center=False, scale=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = norm(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1060 + i)
if i == 1:
# variance should be positive
weights.append(np.random.random(w.shape))
else:
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('moving_mean shape:', weights[0].shape)
print('moving_mean:', format_decimal(weights[0].ravel().tolist()))
print('moving_variance shape:', weights[1].shape)
print('moving_variance:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['normalization.BatchNormalization.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
In [11]:
import os
filename = '../../../test/data/layers/normalization/BatchNormalization.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
In [12]:
print(json.dumps(DATA))
In [ ]: