In [1]:
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.recurrent import GRU
from keras import backend as K
import json
from collections import OrderedDict


Using TensorFlow backend.

In [2]:
def format_decimal(arr, places=6):
    return [round(x * 10**places) / 10**places for x in arr]

In [3]:
DATA = OrderedDict()

GRU

[recurrent.GRU.0] units=4, activation='tanh', recurrent_activation='hard_sigmoid'

Note dropout_W and dropout_U are only applied during training phase


In [4]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid')

layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3200 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.0'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [0.697096, 0.937488, -0.449098, -0.484192, -0.296977, 0.766173, 0.375647, -0.31032, -0.893983, 0.551514, 0.512208, -0.022663, -0.777151, 0.762656, 0.955093, -0.7102, -0.343035, 0.429084, -0.176999, -0.504458, -0.978595, 0.01322, 0.785201, 0.872206, -0.944044, 0.136217, -0.501474, 0.860549, 0.400717, -0.952791, -0.724148, -0.777265, 0.969193, -0.9457, -0.88104, 0.573352, -0.53497, 0.543619, 0.248223, -0.550226, 0.764797, 0.219472, -0.974674, -0.096673, 0.125632, 0.176088, -0.007492, -0.416477, -0.893533, 0.022808, -0.815785, 0.623421, -0.805923, -0.797787, 0.764992, -0.673555, -0.713329, 0.799281, 0.980194, -0.395521, 0.537878, -0.777262, -0.006721, 0.93244, 0.750308, 0.268049, 0.878764, 0.172846, 0.613674, 0.733389, -0.18969, -0.281979]
U shape: (4, 12)
U: [0.293987, 0.510798, -0.867003, -0.537004, 0.153043, 0.868432, 0.303538, -0.833902, -0.421654, 0.022877, -0.490379, 0.830018, -0.568055, 0.362359, -0.964449, -0.883199, 0.980361, -0.398021, -0.145153, -0.875784, -0.82698, -0.832323, 0.522688, -0.290755, -0.102632, 0.516158, 0.776809, -0.635952, -0.301458, 0.321256, -0.257592, 0.457013, -0.483288, -0.684349, -0.141722, 0.44671, 0.385804, -0.557622, -0.200272, -0.195853, 0.144566, -0.188024, 0.569759, -0.81958, -0.992319, 0.752181, 0.1356, 0.572831]
b shape: (12,)
b: [0.300373, -0.397273, -0.197073, 0.545033, -0.983067, 0.346379, 0.955756, 0.958477, -0.57945, 0.7951, 0.368559, -0.906396]

in shape: (3, 6)
in: [-0.096074, 0.639699, 0.415126, 0.709671, -0.932882, 0.360813, 0.055085, -0.150315, -0.825055, 0.664181, -0.893701, -0.63904, -0.341407, 0.479979, 0.168984, -0.374535, 0.02818, -0.765662]
out shape: (4,)
out: [-0.453688, -0.088839, 0.237924, -0.523194]

In [5]:
[w.shape for w in model.get_weights()]


Out[5]:
[(6, 12), (4, 12), (12,)]

[recurrent.GRU.1] units=5, activation='sigmoid', recurrent_activation='sigmoid'

Note dropout_W and dropout_U are only applied during training phase


In [6]:
data_in_shape = (8, 5)
rnn = GRU(5, activation='sigmoid', recurrent_activation='sigmoid')

layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3300 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.1'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (5, 15)
W: [0.299086, -0.606833, -0.606176, -0.787071, 0.651687, 0.533268, -0.031304, 0.761436, -0.233954, 0.250473, 0.336694, -0.819566, 0.386506, -0.310632, 0.534265, 0.326778, 0.986252, 0.550256, -0.428584, 0.729528, 0.753243, 0.052566, 0.112301, 0.943392, 0.84211, -0.032087, -0.617971, 0.363577, 0.075713, 0.981932, -0.449437, -0.591187, 0.139301, 0.590188, -0.713359, 0.848149, 0.620145, -0.334172, -0.684686, 0.235886, 0.906112, -0.58247, -0.606377, 0.399036, -0.040617, 0.66917, 0.945858, -0.222578, 0.448616, -0.670496, 0.969414, 0.702519, 0.544102, -0.795606, -0.477415, 0.013275, 0.810969, -0.519873, 0.888266, -0.353263, 0.394745, 0.481698, 0.489525, -0.222827, -0.586108, 0.113738, -0.762384, 0.225851, -0.173929, -0.491298, -0.0369, -0.388108, 0.401269, -0.024319, 0.139985]
U shape: (5, 15)
U: [-0.304258, -0.082662, 0.360337, -0.033337, 0.634706, -0.178816, 0.315423, -0.180654, -0.614839, 0.521472, -0.330505, -0.505923, -0.631878, 0.258902, 0.241568, -0.688406, -0.172362, -0.391257, 0.522173, 0.797502, -0.575558, 0.151381, -0.547897, 0.516589, 0.708659, 0.482547, -0.34562, 0.422216, 0.970023, -0.876834, 0.197523, 0.947844, -0.225032, -0.578899, 0.335104, -0.718726, 0.982918, 0.710863, -0.737148, -0.950417, 0.325266, -0.921167, -0.994423, 0.173532, 0.865162, 0.624344, 0.7721, -0.799441, -0.962392, -0.08485, -0.988859, -0.037766, -0.095967, -0.930576, 0.724299, 0.777163, 0.778067, 0.058835, 0.014762, -0.408893, -0.261168, 0.042962, -0.110324, -0.20591, -0.040286, 0.133582, 0.706208, 0.392852, 0.112108, 0.054984, 0.656253, -0.39117, 0.640926, 0.263237, -0.956473]
b shape: (15,)
b: [0.811563, -0.388325, 0.885488, 0.230234, -0.244712, 0.761297, -0.705815, 0.470388, -0.573381, -0.43489, -0.242117, 0.251692, -0.751239, 0.84564, -0.942882]

in shape: (8, 5)
in: [0.957995, -0.833377, -0.37798, 0.722882, -0.38416, 0.713205, -0.313798, -0.5528, 0.197124, -0.99759, 0.484412, 0.170152, -0.494716, -0.809929, 0.43214, 0.63091, -0.782599, 0.806579, 0.299779, 0.302272, -0.475303, 0.087694, -0.845931, 0.315783, -0.248644, 0.665153, -0.693905, -0.71389, -0.484642, 0.724727, 0.001392, -0.690386, -0.684477, -0.682144, 0.29142, -0.121511, 0.799387, 0.23656, 0.378234, -0.141114]
out shape: (5,)
out: [0.433866, 0.4875, 0.365915, 0.714999, 0.264424]

[recurrent.GRU.2] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=True

Note dropout_W and dropout_U are only applied during training phase


In [7]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
          return_sequences=True)

layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3400 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.2'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [-0.045589, 0.415186, -0.562532, 0.417194, 0.595636, 0.384863, -0.421095, 0.531931, 0.892653, -0.9421, -0.522872, -0.37874, -0.768283, -0.196357, -0.818039, -0.631257, -0.405011, -0.035917, -0.48787, 0.181399, 0.150278, -0.910744, 0.68533, 0.571771, 0.898532, -0.136768, 0.451804, -0.831859, -0.132937, 0.876735, -0.625141, -0.551269, -0.848617, 0.044549, 0.095396, -0.729275, -0.497799, 0.038413, -0.642936, -0.653779, -0.157369, 0.070241, -0.217814, 0.126628, -0.093442, 0.335803, -0.931704, -0.584418, 0.233299, 0.773364, 0.632209, -0.883479, 0.311433, 0.495002, -0.81312, 0.246855, -0.342407, 0.894092, 0.620033, -0.811121, -0.515191, -0.73913, 0.715419, 0.905782, 0.713213, -0.788392, -0.313119, -0.246659, 0.173484, 0.805644, -0.818834, -0.333024]
U shape: (4, 12)
U: [-0.720918, -0.952173, -0.727704, 0.156292, -0.355836, -0.862534, 0.167887, 0.9923, -0.726801, 0.346909, 0.339642, 0.91009, 0.52891, -0.857623, -0.906373, 0.492599, -0.313538, 0.513243, 0.839592, -0.334972, 0.62071, 0.163758, 0.921592, -0.119355, -0.548986, 0.315309, 0.148678, 0.69909, 0.744981, -0.897808, -0.621434, 0.44988, -0.244279, 0.919685, -0.626255, -0.924122, 0.05482, -0.812786, 0.03547, 0.715238, -0.864506, -0.593804, -0.610785, 0.264904, 0.837017, 0.437136, -0.550154, -0.96061]
b shape: (12,)
b: [-0.836587, 0.897901, -0.267459, -0.930645, -0.409861, -0.508697, -0.23829, 0.215855, -0.570529, 0.272606, -0.304086, -0.907375]

in shape: (3, 6)
in: [-0.030361, 0.792806, 0.0388, -0.782223, 0.098008, -0.99904, 0.356238, -0.490761, 0.905586, 0.839691, -0.300254, 0.452917, 0.765016, -0.422445, 0.569223, 0.937541, 0.56795, 0.097106]
out shape: (3, 4)
out: [-0.339067, -0.175526, 0.673247, 0.209448, -0.552767, 0.089528, -0.005182, -0.539873, -0.54038, 0.089528, -0.446479, -0.974843]

[recurrent.GRU.3] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False, go_backwards=True

Note dropout_W and dropout_U are only applied during training phase


In [8]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
          return_sequences=False, go_backwards=True)

layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3410 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.3'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [-0.148836, -0.691623, -0.259353, 0.398967, 0.178434, -0.938177, 0.563832, -0.586575, -0.831798, 0.956819, -0.259577, -0.699289, 0.686745, 0.695789, -0.490455, 0.714114, -0.011839, 0.660732, 0.882546, 0.913245, 0.912888, -0.132109, 0.756624, 0.10571, -0.164867, -0.525355, -0.843445, 0.350467, 0.161281, 0.130997, 0.965612, -0.793093, 0.092593, 0.497265, 0.125284, -0.769866, 0.652151, -0.229839, 0.589556, 0.452079, -0.812629, -0.003714, 0.129934, -0.042171, 0.373928, 0.830522, 0.650339, -0.614568, 0.009416, -0.738254, -0.319814, -0.713525, 0.087051, 0.076582, 0.114581, 0.615372, -0.6656, 0.490681, 0.617056, 0.503751, 0.451805, 0.024864, -0.916711, 0.07667, 0.956528, -0.946518, -0.217943, 0.475209, 0.263357, 0.798242, -0.480103, 0.82406]
U shape: (4, 12)
U: [0.967138, -0.583039, 0.764855, -0.532093, 0.047324, -0.375864, 0.930763, -0.094277, -0.033638, 0.956969, -0.126438, 0.333421, -0.002563, 0.398083, -0.486576, 0.67156, -0.702687, -0.406143, 0.33233, 0.895912, 0.630308, -0.581735, 0.129525, -0.323832, 0.276425, 0.167898, 0.309367, -0.35013, -0.784394, 0.59119, -0.459017, 0.130826, -0.699233, -0.004449, -0.204699, -0.267522, -0.847513, 0.773701, 0.289397, 0.63212, 0.728434, -0.420141, -0.84435, -0.390801, -0.433072, -0.512504, 0.615271, -0.253916]
b shape: (12,)
b: [-0.572009, -0.16708, 0.633717, 0.544638, 0.822347, -0.329096, 0.199946, 0.91608, -0.404574, 0.092205, -0.023165, 0.905883]

in shape: (3, 6)
in: [0.608946, -0.551183, 0.190791, -0.894874, 0.734435, -0.380768, 0.038316, -0.58664, -0.250221, -0.567826, 0.1872, 0.457072, -0.79909, 0.817308, -0.535968, -0.519832, 0.958321, 0.525862]
out shape: (4,)
out: [-0.98589, -0.34488, -0.117773, 0.665576]

[recurrent.GRU.4] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=True, go_backwards=True

Note dropout_W and dropout_U are only applied during training phase


In [9]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
          return_sequences=True, go_backwards=True)

layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3420 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.4'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [0.648076, -0.933145, 0.632527, -0.887257, -0.868064, 0.509119, -0.489015, 0.342717, -0.074426, 0.269493, -0.159285, -0.541295, -0.617557, 0.667622, -0.126333, 0.623244, 0.494329, -0.353027, -0.071929, 0.76814, 0.086752, -0.231308, -0.706655, -0.892407, 0.328747, -0.663853, -0.883796, 0.58082, 0.89732, -0.889811, -0.146597, -0.508468, -0.934769, 0.803009, -0.79129, -0.680897, -0.526831, 0.452929, -0.76019, 0.431171, -0.094593, -0.803631, 0.852033, 0.420535, 0.617888, 0.614191, 0.754506, -0.365128, 0.752598, 0.185452, 0.423028, 0.840781, -0.046601, 0.902557, 0.538487, -0.300339, 0.882854, -0.8739, -0.428781, -0.963806, 0.044708, 0.568021, -0.259802, 0.367364, 0.734628, 0.239464, -0.96882, -0.13658, 0.112533, -0.858009, -0.241363, 0.854742]
U shape: (4, 12)
U: [-0.848935, -0.07433, -0.244574, -0.054626, 0.537405, 0.675859, -0.404406, 0.340232, -0.156816, -0.452044, 0.167286, 0.378355, -0.479426, 0.432736, -0.001522, 0.636069, 0.637094, 0.051329, -0.729471, 0.933768, 0.135844, 0.991456, -0.631282, 0.993896, -0.001499, -0.147161, -0.08554, 0.161971, 0.088088, -0.890515, 0.20275, -0.694628, 0.137755, -0.009775, -0.504511, 0.221326, 0.786296, 0.131173, -0.065861, -0.289775, 0.163677, -0.60089, -0.858084, 0.977572, -0.372745, 0.283967, 0.129185, -0.898048]
b shape: (12,)
b: [0.698817, -0.044763, -0.496604, -0.075629, -0.967465, -0.953896, 0.33352, 0.815975, -0.285307, -0.483249, -0.981167, -0.253059]

in shape: (3, 6)
in: [0.148534, 0.417965, 0.375558, -0.600416, -0.887717, 0.317562, 0.434389, 0.646947, -0.644747, -0.575691, -0.547667, 0.196421, 0.426908, -0.03732, -0.837063, 0.387356, 0.710446, 0.013828]
out shape: (3, 4)
out: [0.251305, -0.373722, -0.142272, -0.324048, -0.079071, -0.629247, -0.421678, 0.141942, -0.373374, -0.362104, -0.74397, 0.126051]

[recurrent.GRU.5] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False, go_backwards=False, stateful=True

Note dropout_W and dropout_U are only applied during training phase

To test statefulness, model.predict is run twice


In [10]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
          return_sequences=False, go_backwards=False, stateful=True)

layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3430 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.5'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [-0.015897, -0.848443, 0.842792, -0.465152, 0.3481, 0.510389, -0.992778, 0.369654, -0.615604, 0.620224, -0.214609, 0.504147, 0.473761, -0.745675, -0.300108, -0.423315, 0.696664, -0.815214, 0.252845, -0.388892, -0.653816, -0.322302, 0.265343, 0.342551, 0.18721, 0.170705, 0.00931, 0.715875, -0.547358, 0.726838, 0.736064, -0.266672, -0.67036, -0.882757, 0.809491, 0.564659, 0.22527, -0.019071, -0.746865, 0.02245, 0.097309, 0.497686, -0.982907, 0.503759, -0.193199, 0.695506, -0.960113, -0.530728, 0.720679, -0.187994, -0.166245, 0.806344, 0.280325, 0.337285, 0.27085, -0.626485, -0.369051, 0.022973, -0.705744, 0.729512, 0.914495, -0.690124, 0.881943, -0.648586, -0.293915, 0.636509, 0.511375, 0.85435, 0.781066, -0.613855, -0.276003, 0.478627]
U shape: (4, 12)
U: [-0.435635, 0.900124, -0.334948, -0.436874, -0.888002, -0.8859, -0.881562, -0.74586, -0.022979, 0.870013, 0.061461, -0.53529, -0.090523, -0.32069, 0.61625, -0.343037, 0.915704, 0.69609, -0.16974, 0.211096, -0.361093, 0.343673, -0.083551, -0.168075, 0.40166, -0.017995, 0.576888, 0.492146, -0.620208, 0.603125, -0.721616, -0.293558, 0.917852, -0.514209, 0.344444, 0.900205, -0.993519, -0.283809, 0.024229, -0.799192, 0.418639, 0.120696, -0.813529, -0.768004, 0.433383, 0.87709, 0.474692, -0.894814]
b shape: (12,)
b: [-0.10129, -0.229923, -0.993001, -0.052356, 0.618518, 0.084778, -0.689832, 0.746462, 0.66411, -0.940729, -0.393391, -0.246194]

in shape: (3, 6)
in: [0.206338, -0.706156, -0.817432, 0.682606, 0.267345, 0.597849, -0.391708, -0.844586, -0.116337, -0.533634, 0.865085, -0.333647, -0.365342, -0.680547, 0.952109, 0.26761, -0.637081, 0.998968]
out shape: (4,)
out: [0.699378, -0.448309, -0.305413, -0.383354]

[recurrent.GRU.6] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=True, go_backwards=False, stateful=True

Note dropout_W and dropout_U are only applied during training phase

To test statefulness, model.predict is run twice


In [11]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
          return_sequences=True, go_backwards=False, stateful=True)

layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3440 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.6'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [0.400696, -0.641997, -0.427212, 0.92815, -0.382307, 0.52579, -0.298955, 0.804293, 0.060837, -0.381843, -0.362404, -0.287894, -0.133715, -0.250107, 0.133557, 0.809601, 0.224464, 0.192648, -0.383252, -0.479287, 0.488092, 0.453058, 0.651348, -0.637466, 0.143476, 0.115498, 0.175809, 0.231472, -0.573236, 0.892225, 0.386284, -0.419826, 0.048051, -0.244259, -0.39078, -0.93408, 0.591446, -0.780403, 0.23196, 0.678271, 0.774315, -0.219007, -0.997067, 0.589348, -0.760609, -0.615731, 0.303225, -0.111519, 0.960942, 0.894508, 0.69549, -0.682337, -0.264404, -0.572363, 0.127237, -0.160132, 0.202618, -0.393438, -0.461551, -0.034192, 0.520993, 0.760177, -0.104188, 0.917771, 0.907846, 0.334309, -0.616382, -0.073938, -0.103726, -0.852162, -0.673798, -0.657648]
U shape: (4, 12)
U: [-0.309794, -0.535705, 0.711138, -0.263219, -0.80297, -0.224219, -0.877424, 0.563619, 0.954281, 0.955728, 0.31396, -0.130807, 0.305157, 0.875891, 0.073604, -0.03227, -0.826057, 0.447289, -0.742758, 0.208603, 0.335053, 0.463562, 0.822418, 0.826141, 0.425398, 0.945678, 0.975818, 0.847521, 0.780927, -0.711789, 0.929333, 0.781502, 0.869627, -0.932976, -0.93481, 0.950563, 0.548142, -0.860462, 0.264768, -0.704064, -0.412027, -0.611868, -0.614491, -0.601713, -0.860569, -0.885433, 0.166167, 0.876076]
b shape: (12,)
b: [0.123421, 0.116533, 0.272969, -0.457375, -0.10058, -0.106149, -0.439683, 0.505106, -0.805833, 0.345413, 0.200024, -0.417246]

in shape: (3, 6)
in: [-0.190503, -0.799225, -0.252618, 0.498488, -0.087763, -0.647562, 0.829396, -0.913196, -0.828914, 0.11347, -0.781162, 0.908826, 0.859648, 0.893554, 0.960515, -0.894929, 0.903788, -0.51676]
out shape: (3, 4)
out: [-0.655994, 0.381562, 0.159134, 0.189835, -0.766225, -0.164506, -0.347471, 0.281128, -0.417517, 0.212996, -0.235514, -0.513604]

[recurrent.GRU.7] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False, go_backwards=True, stateful=True

Note dropout_W and dropout_U are only applied during training phase

To test statefulness, model.predict is run twice


In [12]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
          return_sequences=False, go_backwards=True, stateful=True)

layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3450 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.7'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [-0.217059, 0.926079, 0.878897, 0.908534, -0.783196, 0.29837, 0.900327, 0.92828, -0.895611, 0.798379, 0.289136, -0.506593, 0.211057, -0.470939, -0.313951, 0.070627, -0.366853, -0.049493, 0.707295, 0.968283, 0.146539, 0.481093, -0.59495, -0.950117, 0.537342, -0.216253, -0.628889, -0.759876, 0.092087, 0.030619, -0.586226, 0.665932, 0.421089, 0.999477, 0.35168, -0.953635, 0.429368, 0.114386, 0.665266, -0.876856, -0.714418, 0.858883, -0.206244, -0.748219, 0.314382, -0.480597, -0.066145, -0.809664, 0.265962, 0.380994, -0.456802, 0.190172, -0.500332, 0.061274, -0.507235, 0.805938, -0.373262, -0.814196, -0.280043, 0.682193, 0.647611, -0.035544, 0.582232, 0.183355, 0.214989, -0.313518, 0.893282, 0.802617, 0.69754, 0.797573, 0.351413, 0.306177]
U shape: (4, 12)
U: [-0.563067, 0.600078, 0.415698, 0.75817, -0.229433, 0.753535, 0.899258, 0.302955, -0.502078, 0.82962, 0.547417, 0.035067, 0.267238, 0.608234, 0.248494, 0.371422, -0.285179, -0.42698, -0.941637, -0.595394, 0.115438, -0.691169, 0.559936, -0.631186, 0.341637, -0.738756, 0.332916, -0.513288, -0.025353, -0.430303, -0.082212, 0.663043, -0.270141, -0.133259, 0.364972, -0.152163, 0.429373, -0.956845, -0.419642, -0.166387, -0.770657, -0.057249, -0.432069, -0.766248, 0.091082, -0.73226, -0.747741, -0.265191]
b shape: (12,)
b: [-0.116782, -0.060653, 0.65511, -0.562505, 0.189572, 0.351985, 0.453275, -0.350892, 0.22263, -0.583627, -0.26432, 0.614658]

in shape: (3, 6)
in: [0.258393, -0.716408, -0.874891, -0.5957, -0.156024, 0.504423, -0.764552, -0.203444, 0.980501, 0.442658, -0.69405, 0.845894, -0.934893, -0.649584, -0.119074, 0.935229, -0.748855, -0.463104]
out shape: (4,)
out: [-0.104133, 0.252092, 0.170733, 0.132856]

[recurrent.GRU.8] units=4, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=False, return_sequences=True, go_backwards=True, stateful=True

Note dropout_W and dropout_U are only applied during training phase

To test statefulness, model.predict is run twice


In [13]:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=False,
          return_sequences=True, go_backwards=True, stateful=True)

layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)

# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
    np.random.seed(3460 + i)
    weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U']
for w_i, w_name in enumerate(weight_names):
    print('{} shape:'.format(w_name), weights[w_i].shape)
    print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))

data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)

DATA['recurrent.GRU.8'] = {
    'input': {'data': data_in_formatted, 'shape': data_in_shape},
    'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
    'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}


W shape: (6, 12)
W: [0.493731, -0.713054, -0.991724, -0.182448, 0.590974, -0.95971, 0.402518, 0.575599, 0.348871, 0.587656, -0.091027, 0.610543, 0.546701, 0.805702, -0.571142, -0.803143, -0.821461, 0.473713, 0.468774, -0.395489, -0.420674, 0.179859, 0.287319, 0.595934, -0.970525, 0.623482, 0.93883, -0.601646, -0.307388, -0.734456, 0.608166, -0.696768, -0.292043, -0.582354, 0.483963, -0.879414, -0.905422, 0.66025, -0.490664, -0.675897, -0.367564, 0.413074, -0.348958, 0.095513, 0.073838, 0.923831, 0.546994, -0.594654, 0.403964, -0.652478, 0.659219, -0.887595, -0.519658, -0.518417, -0.719567, -0.381194, 0.936127, -0.347308, -0.432567, -0.923838, 0.745346, 0.408598, 0.195032, -0.291758, 0.012271, 0.68258, 0.258998, 0.253195, -0.945687, -0.701285, -0.098939, -0.959876]
U shape: (4, 12)
U: [-0.510224, -0.375143, -0.999832, -0.621992, 0.347463, 0.437596, -0.840274, 0.699169, 0.022476, -0.416013, -0.694025, 0.437842, 0.467612, -0.732654, 0.131544, -0.578074, -0.016291, 0.11982, 0.7398, -0.782659, 0.71942, -0.179374, -0.639908, -0.717196, 0.676085, 0.204119, -0.956782, 0.05779, 0.048135, 0.830161, 0.559749, 0.751911, 0.560842, 0.54528, 0.343392, 0.194211, -0.840363, 0.556398, 0.214783, -0.188248, 0.507066, 0.593836, -0.739215, -0.787099, 0.047721, 0.154225, -0.330886, 0.132199]

in shape: (3, 6)
in: [0.708902, 0.846182, 0.97007, -0.306318, -0.159615, 0.958509, 0.471753, 0.847227, -0.152287, 0.274365, 0.255755, 0.973133, -0.63889, -0.010724, 0.709579, -0.195852, 0.280868, 0.487307]
out shape: (3, 4)
out: [-0.503121, -0.461341, 0.437258, -0.679647, -0.509266, -0.246599, 0.514353, -0.423158, -0.568067, -0.308138, 0.620688, -0.419014]

export for Keras.js tests


In [14]:
print(json.dumps(DATA))


{"recurrent.GRU.0": {"input": {"data": [-0.096074, 0.639699, 0.415126, 0.709671, -0.932882, 0.360813, 0.055085, -0.150315, -0.825055, 0.664181, -0.893701, -0.63904, -0.341407, 0.479979, 0.168984, -0.374535, 0.02818, -0.765662], "shape": [3, 6]}, "weights": [{"data": [0.697096, 0.937488, -0.449098, -0.484192, -0.296977, 0.766173, 0.375647, -0.31032, -0.893983, 0.551514, 0.512208, -0.022663, -0.777151, 0.762656, 0.955093, -0.7102, -0.343035, 0.429084, -0.176999, -0.504458, -0.978595, 0.01322, 0.785201, 0.872206, -0.944044, 0.136217, -0.501474, 0.860549, 0.400717, -0.952791, -0.724148, -0.777265, 0.969193, -0.9457, -0.88104, 0.573352, -0.53497, 0.543619, 0.248223, -0.550226, 0.764797, 0.219472, -0.974674, -0.096673, 0.125632, 0.176088, -0.007492, -0.416477, -0.893533, 0.022808, -0.815785, 0.623421, -0.805923, -0.797787, 0.764992, -0.673555, -0.713329, 0.799281, 0.980194, -0.395521, 0.537878, -0.777262, -0.006721, 0.93244, 0.750308, 0.268049, 0.878764, 0.172846, 0.613674, 0.733389, -0.18969, -0.281979], "shape": [6, 12]}, {"data": [0.293987, 0.510798, -0.867003, -0.537004, 0.153043, 0.868432, 0.303538, -0.833902, -0.421654, 0.022877, -0.490379, 0.830018, -0.568055, 0.362359, -0.964449, -0.883199, 0.980361, -0.398021, -0.145153, -0.875784, -0.82698, -0.832323, 0.522688, -0.290755, -0.102632, 0.516158, 0.776809, -0.635952, -0.301458, 0.321256, -0.257592, 0.457013, -0.483288, -0.684349, -0.141722, 0.44671, 0.385804, -0.557622, -0.200272, -0.195853, 0.144566, -0.188024, 0.569759, -0.81958, -0.992319, 0.752181, 0.1356, 0.572831], "shape": [4, 12]}, {"data": [0.300373, -0.397273, -0.197073, 0.545033, -0.983067, 0.346379, 0.955756, 0.958477, -0.57945, 0.7951, 0.368559, -0.906396], "shape": [12]}], "expected": {"data": [-0.453688, -0.088839, 0.237924, -0.523194], "shape": [4]}}, "recurrent.GRU.1": {"input": {"data": [0.957995, -0.833377, -0.37798, 0.722882, -0.38416, 0.713205, -0.313798, -0.5528, 0.197124, -0.99759, 0.484412, 0.170152, -0.494716, -0.809929, 0.43214, 0.63091, -0.782599, 0.806579, 0.299779, 0.302272, -0.475303, 0.087694, -0.845931, 0.315783, -0.248644, 0.665153, -0.693905, -0.71389, -0.484642, 0.724727, 0.001392, -0.690386, -0.684477, -0.682144, 0.29142, -0.121511, 0.799387, 0.23656, 0.378234, -0.141114], "shape": [8, 5]}, "weights": [{"data": [0.299086, -0.606833, -0.606176, -0.787071, 0.651687, 0.533268, -0.031304, 0.761436, -0.233954, 0.250473, 0.336694, -0.819566, 0.386506, -0.310632, 0.534265, 0.326778, 0.986252, 0.550256, -0.428584, 0.729528, 0.753243, 0.052566, 0.112301, 0.943392, 0.84211, -0.032087, -0.617971, 0.363577, 0.075713, 0.981932, -0.449437, -0.591187, 0.139301, 0.590188, -0.713359, 0.848149, 0.620145, -0.334172, -0.684686, 0.235886, 0.906112, -0.58247, -0.606377, 0.399036, -0.040617, 0.66917, 0.945858, -0.222578, 0.448616, -0.670496, 0.969414, 0.702519, 0.544102, -0.795606, -0.477415, 0.013275, 0.810969, -0.519873, 0.888266, -0.353263, 0.394745, 0.481698, 0.489525, -0.222827, -0.586108, 0.113738, -0.762384, 0.225851, -0.173929, -0.491298, -0.0369, -0.388108, 0.401269, -0.024319, 0.139985], "shape": [5, 15]}, {"data": [-0.304258, -0.082662, 0.360337, -0.033337, 0.634706, -0.178816, 0.315423, -0.180654, -0.614839, 0.521472, -0.330505, -0.505923, -0.631878, 0.258902, 0.241568, -0.688406, -0.172362, -0.391257, 0.522173, 0.797502, -0.575558, 0.151381, -0.547897, 0.516589, 0.708659, 0.482547, -0.34562, 0.422216, 0.970023, -0.876834, 0.197523, 0.947844, -0.225032, -0.578899, 0.335104, -0.718726, 0.982918, 0.710863, -0.737148, -0.950417, 0.325266, -0.921167, -0.994423, 0.173532, 0.865162, 0.624344, 0.7721, -0.799441, -0.962392, -0.08485, -0.988859, -0.037766, -0.095967, -0.930576, 0.724299, 0.777163, 0.778067, 0.058835, 0.014762, -0.408893, -0.261168, 0.042962, -0.110324, -0.20591, -0.040286, 0.133582, 0.706208, 0.392852, 0.112108, 0.054984, 0.656253, -0.39117, 0.640926, 0.263237, -0.956473], "shape": [5, 15]}, {"data": [0.811563, -0.388325, 0.885488, 0.230234, -0.244712, 0.761297, -0.705815, 0.470388, -0.573381, -0.43489, -0.242117, 0.251692, -0.751239, 0.84564, -0.942882], "shape": [15]}], "expected": {"data": [0.433866, 0.4875, 0.365915, 0.714999, 0.264424], "shape": [5]}}, "recurrent.GRU.2": {"input": {"data": [-0.030361, 0.792806, 0.0388, -0.782223, 0.098008, -0.99904, 0.356238, -0.490761, 0.905586, 0.839691, -0.300254, 0.452917, 0.765016, -0.422445, 0.569223, 0.937541, 0.56795, 0.097106], "shape": [3, 6]}, "weights": [{"data": [-0.045589, 0.415186, -0.562532, 0.417194, 0.595636, 0.384863, -0.421095, 0.531931, 0.892653, -0.9421, -0.522872, -0.37874, -0.768283, -0.196357, -0.818039, -0.631257, -0.405011, -0.035917, -0.48787, 0.181399, 0.150278, -0.910744, 0.68533, 0.571771, 0.898532, -0.136768, 0.451804, -0.831859, -0.132937, 0.876735, -0.625141, -0.551269, -0.848617, 0.044549, 0.095396, -0.729275, -0.497799, 0.038413, -0.642936, -0.653779, -0.157369, 0.070241, -0.217814, 0.126628, -0.093442, 0.335803, -0.931704, -0.584418, 0.233299, 0.773364, 0.632209, -0.883479, 0.311433, 0.495002, -0.81312, 0.246855, -0.342407, 0.894092, 0.620033, -0.811121, -0.515191, -0.73913, 0.715419, 0.905782, 0.713213, -0.788392, -0.313119, -0.246659, 0.173484, 0.805644, -0.818834, -0.333024], "shape": [6, 12]}, {"data": [-0.720918, -0.952173, -0.727704, 0.156292, -0.355836, -0.862534, 0.167887, 0.9923, -0.726801, 0.346909, 0.339642, 0.91009, 0.52891, -0.857623, -0.906373, 0.492599, -0.313538, 0.513243, 0.839592, -0.334972, 0.62071, 0.163758, 0.921592, -0.119355, -0.548986, 0.315309, 0.148678, 0.69909, 0.744981, -0.897808, -0.621434, 0.44988, -0.244279, 0.919685, -0.626255, -0.924122, 0.05482, -0.812786, 0.03547, 0.715238, -0.864506, -0.593804, -0.610785, 0.264904, 0.837017, 0.437136, -0.550154, -0.96061], "shape": [4, 12]}, {"data": [-0.836587, 0.897901, -0.267459, -0.930645, -0.409861, -0.508697, -0.23829, 0.215855, -0.570529, 0.272606, -0.304086, -0.907375], "shape": [12]}], "expected": {"data": [-0.339067, -0.175526, 0.673247, 0.209448, -0.552767, 0.089528, -0.005182, -0.539873, -0.54038, 0.089528, -0.446479, -0.974843], "shape": [3, 4]}}, "recurrent.GRU.3": {"input": {"data": [0.608946, -0.551183, 0.190791, -0.894874, 0.734435, -0.380768, 0.038316, -0.58664, -0.250221, -0.567826, 0.1872, 0.457072, -0.79909, 0.817308, -0.535968, -0.519832, 0.958321, 0.525862], "shape": [3, 6]}, "weights": [{"data": [-0.148836, -0.691623, -0.259353, 0.398967, 0.178434, -0.938177, 0.563832, -0.586575, -0.831798, 0.956819, -0.259577, -0.699289, 0.686745, 0.695789, -0.490455, 0.714114, -0.011839, 0.660732, 0.882546, 0.913245, 0.912888, -0.132109, 0.756624, 0.10571, -0.164867, -0.525355, -0.843445, 0.350467, 0.161281, 0.130997, 0.965612, -0.793093, 0.092593, 0.497265, 0.125284, -0.769866, 0.652151, -0.229839, 0.589556, 0.452079, -0.812629, -0.003714, 0.129934, -0.042171, 0.373928, 0.830522, 0.650339, -0.614568, 0.009416, -0.738254, -0.319814, -0.713525, 0.087051, 0.076582, 0.114581, 0.615372, -0.6656, 0.490681, 0.617056, 0.503751, 0.451805, 0.024864, -0.916711, 0.07667, 0.956528, -0.946518, -0.217943, 0.475209, 0.263357, 0.798242, -0.480103, 0.82406], "shape": [6, 12]}, {"data": [0.967138, -0.583039, 0.764855, -0.532093, 0.047324, -0.375864, 0.930763, -0.094277, -0.033638, 0.956969, -0.126438, 0.333421, -0.002563, 0.398083, -0.486576, 0.67156, -0.702687, -0.406143, 0.33233, 0.895912, 0.630308, -0.581735, 0.129525, -0.323832, 0.276425, 0.167898, 0.309367, -0.35013, -0.784394, 0.59119, -0.459017, 0.130826, -0.699233, -0.004449, -0.204699, -0.267522, -0.847513, 0.773701, 0.289397, 0.63212, 0.728434, -0.420141, -0.84435, -0.390801, -0.433072, -0.512504, 0.615271, -0.253916], "shape": [4, 12]}, {"data": [-0.572009, -0.16708, 0.633717, 0.544638, 0.822347, -0.329096, 0.199946, 0.91608, -0.404574, 0.092205, -0.023165, 0.905883], "shape": [12]}], "expected": {"data": [-0.98589, -0.34488, -0.117773, 0.665576], "shape": [4]}}, "recurrent.GRU.4": {"input": {"data": [0.148534, 0.417965, 0.375558, -0.600416, -0.887717, 0.317562, 0.434389, 0.646947, -0.644747, -0.575691, -0.547667, 0.196421, 0.426908, -0.03732, -0.837063, 0.387356, 0.710446, 0.013828], "shape": [3, 6]}, "weights": [{"data": [0.648076, -0.933145, 0.632527, -0.887257, -0.868064, 0.509119, -0.489015, 0.342717, -0.074426, 0.269493, -0.159285, -0.541295, -0.617557, 0.667622, -0.126333, 0.623244, 0.494329, -0.353027, -0.071929, 0.76814, 0.086752, -0.231308, -0.706655, -0.892407, 0.328747, -0.663853, -0.883796, 0.58082, 0.89732, -0.889811, -0.146597, -0.508468, -0.934769, 0.803009, -0.79129, -0.680897, -0.526831, 0.452929, -0.76019, 0.431171, -0.094593, -0.803631, 0.852033, 0.420535, 0.617888, 0.614191, 0.754506, -0.365128, 0.752598, 0.185452, 0.423028, 0.840781, -0.046601, 0.902557, 0.538487, -0.300339, 0.882854, -0.8739, -0.428781, -0.963806, 0.044708, 0.568021, -0.259802, 0.367364, 0.734628, 0.239464, -0.96882, -0.13658, 0.112533, -0.858009, -0.241363, 0.854742], "shape": [6, 12]}, {"data": [-0.848935, -0.07433, -0.244574, -0.054626, 0.537405, 0.675859, -0.404406, 0.340232, -0.156816, -0.452044, 0.167286, 0.378355, -0.479426, 0.432736, -0.001522, 0.636069, 0.637094, 0.051329, -0.729471, 0.933768, 0.135844, 0.991456, -0.631282, 0.993896, -0.001499, -0.147161, -0.08554, 0.161971, 0.088088, -0.890515, 0.20275, -0.694628, 0.137755, -0.009775, -0.504511, 0.221326, 0.786296, 0.131173, -0.065861, -0.289775, 0.163677, -0.60089, -0.858084, 0.977572, -0.372745, 0.283967, 0.129185, -0.898048], "shape": [4, 12]}, {"data": [0.698817, -0.044763, -0.496604, -0.075629, -0.967465, -0.953896, 0.33352, 0.815975, -0.285307, -0.483249, -0.981167, -0.253059], "shape": [12]}], "expected": {"data": [0.251305, -0.373722, -0.142272, -0.324048, -0.079071, -0.629247, -0.421678, 0.141942, -0.373374, -0.362104, -0.74397, 0.126051], "shape": [3, 4]}}, "recurrent.GRU.5": {"input": {"data": [0.206338, -0.706156, -0.817432, 0.682606, 0.267345, 0.597849, -0.391708, -0.844586, -0.116337, -0.533634, 0.865085, -0.333647, -0.365342, -0.680547, 0.952109, 0.26761, -0.637081, 0.998968], "shape": [3, 6]}, "weights": [{"data": [-0.015897, -0.848443, 0.842792, -0.465152, 0.3481, 0.510389, -0.992778, 0.369654, -0.615604, 0.620224, -0.214609, 0.504147, 0.473761, -0.745675, -0.300108, -0.423315, 0.696664, -0.815214, 0.252845, -0.388892, -0.653816, -0.322302, 0.265343, 0.342551, 0.18721, 0.170705, 0.00931, 0.715875, -0.547358, 0.726838, 0.736064, -0.266672, -0.67036, -0.882757, 0.809491, 0.564659, 0.22527, -0.019071, -0.746865, 0.02245, 0.097309, 0.497686, -0.982907, 0.503759, -0.193199, 0.695506, -0.960113, -0.530728, 0.720679, -0.187994, -0.166245, 0.806344, 0.280325, 0.337285, 0.27085, -0.626485, -0.369051, 0.022973, -0.705744, 0.729512, 0.914495, -0.690124, 0.881943, -0.648586, -0.293915, 0.636509, 0.511375, 0.85435, 0.781066, -0.613855, -0.276003, 0.478627], "shape": [6, 12]}, {"data": [-0.435635, 0.900124, -0.334948, -0.436874, -0.888002, -0.8859, -0.881562, -0.74586, -0.022979, 0.870013, 0.061461, -0.53529, -0.090523, -0.32069, 0.61625, -0.343037, 0.915704, 0.69609, -0.16974, 0.211096, -0.361093, 0.343673, -0.083551, -0.168075, 0.40166, -0.017995, 0.576888, 0.492146, -0.620208, 0.603125, -0.721616, -0.293558, 0.917852, -0.514209, 0.344444, 0.900205, -0.993519, -0.283809, 0.024229, -0.799192, 0.418639, 0.120696, -0.813529, -0.768004, 0.433383, 0.87709, 0.474692, -0.894814], "shape": [4, 12]}, {"data": [-0.10129, -0.229923, -0.993001, -0.052356, 0.618518, 0.084778, -0.689832, 0.746462, 0.66411, -0.940729, -0.393391, -0.246194], "shape": [12]}], "expected": {"data": [0.699378, -0.448309, -0.305413, -0.383354], "shape": [4]}}, "recurrent.GRU.6": {"input": {"data": [-0.190503, -0.799225, -0.252618, 0.498488, -0.087763, -0.647562, 0.829396, -0.913196, -0.828914, 0.11347, -0.781162, 0.908826, 0.859648, 0.893554, 0.960515, -0.894929, 0.903788, -0.51676], "shape": [3, 6]}, "weights": [{"data": [0.400696, -0.641997, -0.427212, 0.92815, -0.382307, 0.52579, -0.298955, 0.804293, 0.060837, -0.381843, -0.362404, -0.287894, -0.133715, -0.250107, 0.133557, 0.809601, 0.224464, 0.192648, -0.383252, -0.479287, 0.488092, 0.453058, 0.651348, -0.637466, 0.143476, 0.115498, 0.175809, 0.231472, -0.573236, 0.892225, 0.386284, -0.419826, 0.048051, -0.244259, -0.39078, -0.93408, 0.591446, -0.780403, 0.23196, 0.678271, 0.774315, -0.219007, -0.997067, 0.589348, -0.760609, -0.615731, 0.303225, -0.111519, 0.960942, 0.894508, 0.69549, -0.682337, -0.264404, -0.572363, 0.127237, -0.160132, 0.202618, -0.393438, -0.461551, -0.034192, 0.520993, 0.760177, -0.104188, 0.917771, 0.907846, 0.334309, -0.616382, -0.073938, -0.103726, -0.852162, -0.673798, -0.657648], "shape": [6, 12]}, {"data": [-0.309794, -0.535705, 0.711138, -0.263219, -0.80297, -0.224219, -0.877424, 0.563619, 0.954281, 0.955728, 0.31396, -0.130807, 0.305157, 0.875891, 0.073604, -0.03227, -0.826057, 0.447289, -0.742758, 0.208603, 0.335053, 0.463562, 0.822418, 0.826141, 0.425398, 0.945678, 0.975818, 0.847521, 0.780927, -0.711789, 0.929333, 0.781502, 0.869627, -0.932976, -0.93481, 0.950563, 0.548142, -0.860462, 0.264768, -0.704064, -0.412027, -0.611868, -0.614491, -0.601713, -0.860569, -0.885433, 0.166167, 0.876076], "shape": [4, 12]}, {"data": [0.123421, 0.116533, 0.272969, -0.457375, -0.10058, -0.106149, -0.439683, 0.505106, -0.805833, 0.345413, 0.200024, -0.417246], "shape": [12]}], "expected": {"data": [-0.655994, 0.381562, 0.159134, 0.189835, -0.766225, -0.164506, -0.347471, 0.281128, -0.417517, 0.212996, -0.235514, -0.513604], "shape": [3, 4]}}, "recurrent.GRU.7": {"input": {"data": [0.258393, -0.716408, -0.874891, -0.5957, -0.156024, 0.504423, -0.764552, -0.203444, 0.980501, 0.442658, -0.69405, 0.845894, -0.934893, -0.649584, -0.119074, 0.935229, -0.748855, -0.463104], "shape": [3, 6]}, "weights": [{"data": [-0.217059, 0.926079, 0.878897, 0.908534, -0.783196, 0.29837, 0.900327, 0.92828, -0.895611, 0.798379, 0.289136, -0.506593, 0.211057, -0.470939, -0.313951, 0.070627, -0.366853, -0.049493, 0.707295, 0.968283, 0.146539, 0.481093, -0.59495, -0.950117, 0.537342, -0.216253, -0.628889, -0.759876, 0.092087, 0.030619, -0.586226, 0.665932, 0.421089, 0.999477, 0.35168, -0.953635, 0.429368, 0.114386, 0.665266, -0.876856, -0.714418, 0.858883, -0.206244, -0.748219, 0.314382, -0.480597, -0.066145, -0.809664, 0.265962, 0.380994, -0.456802, 0.190172, -0.500332, 0.061274, -0.507235, 0.805938, -0.373262, -0.814196, -0.280043, 0.682193, 0.647611, -0.035544, 0.582232, 0.183355, 0.214989, -0.313518, 0.893282, 0.802617, 0.69754, 0.797573, 0.351413, 0.306177], "shape": [6, 12]}, {"data": [-0.563067, 0.600078, 0.415698, 0.75817, -0.229433, 0.753535, 0.899258, 0.302955, -0.502078, 0.82962, 0.547417, 0.035067, 0.267238, 0.608234, 0.248494, 0.371422, -0.285179, -0.42698, -0.941637, -0.595394, 0.115438, -0.691169, 0.559936, -0.631186, 0.341637, -0.738756, 0.332916, -0.513288, -0.025353, -0.430303, -0.082212, 0.663043, -0.270141, -0.133259, 0.364972, -0.152163, 0.429373, -0.956845, -0.419642, -0.166387, -0.770657, -0.057249, -0.432069, -0.766248, 0.091082, -0.73226, -0.747741, -0.265191], "shape": [4, 12]}, {"data": [-0.116782, -0.060653, 0.65511, -0.562505, 0.189572, 0.351985, 0.453275, -0.350892, 0.22263, -0.583627, -0.26432, 0.614658], "shape": [12]}], "expected": {"data": [-0.104133, 0.252092, 0.170733, 0.132856], "shape": [4]}}, "recurrent.GRU.8": {"input": {"data": [0.708902, 0.846182, 0.97007, -0.306318, -0.159615, 0.958509, 0.471753, 0.847227, -0.152287, 0.274365, 0.255755, 0.973133, -0.63889, -0.010724, 0.709579, -0.195852, 0.280868, 0.487307], "shape": [3, 6]}, "weights": [{"data": [0.493731, -0.713054, -0.991724, -0.182448, 0.590974, -0.95971, 0.402518, 0.575599, 0.348871, 0.587656, -0.091027, 0.610543, 0.546701, 0.805702, -0.571142, -0.803143, -0.821461, 0.473713, 0.468774, -0.395489, -0.420674, 0.179859, 0.287319, 0.595934, -0.970525, 0.623482, 0.93883, -0.601646, -0.307388, -0.734456, 0.608166, -0.696768, -0.292043, -0.582354, 0.483963, -0.879414, -0.905422, 0.66025, -0.490664, -0.675897, -0.367564, 0.413074, -0.348958, 0.095513, 0.073838, 0.923831, 0.546994, -0.594654, 0.403964, -0.652478, 0.659219, -0.887595, -0.519658, -0.518417, -0.719567, -0.381194, 0.936127, -0.347308, -0.432567, -0.923838, 0.745346, 0.408598, 0.195032, -0.291758, 0.012271, 0.68258, 0.258998, 0.253195, -0.945687, -0.701285, -0.098939, -0.959876], "shape": [6, 12]}, {"data": [-0.510224, -0.375143, -0.999832, -0.621992, 0.347463, 0.437596, -0.840274, 0.699169, 0.022476, -0.416013, -0.694025, 0.437842, 0.467612, -0.732654, 0.131544, -0.578074, -0.016291, 0.11982, 0.7398, -0.782659, 0.71942, -0.179374, -0.639908, -0.717196, 0.676085, 0.204119, -0.956782, 0.05779, 0.048135, 0.830161, 0.559749, 0.751911, 0.560842, 0.54528, 0.343392, 0.194211, -0.840363, 0.556398, 0.214783, -0.188248, 0.507066, 0.593836, -0.739215, -0.787099, 0.047721, 0.154225, -0.330886, 0.132199], "shape": [4, 12]}], "expected": {"data": [-0.503121, -0.461341, 0.437258, -0.679647, -0.509266, -0.246599, 0.514353, -0.423158, -0.568067, -0.308138, 0.620688, -0.419014], "shape": [3, 4]}}}

In [ ]: