In [1]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, Convolution1D
from keras.layers import Embedding, LSTM, SimpleRNN, TimeDistributed
from keras.layers import MaxPooling2D, MaxPooling1D
In [9]:
model_1 = Sequential([Dense(output_dim = 20, input_shape = (784, ))
])
model_2 = Sequential([Dense(output_dim = 20, input_dim = 784)
])
x = np.random.uniform(size=(22, 784))
# Dense layer:
## input_shape (nb_samples, input_dim)
## output_shape (nb_samples, output_dim)
print (model_1.input_shape, model_1.output_shape)
print (model_2.input_shape, model_2.output_shape)
print model_1.layers[0].get_output_shape_for((100, 784))
output = model_1.predict(x)
print (output.shape)
In [10]:
word_vec_dim = 150
voca_size = 2000
n_samples = 100
seq_length = 3
model_1 = Sequential([Embedding(output_dim = word_vec_dim, input_dim = voca_size)])
# Embedding layer:
## input_shape (nb_samples, sequence_length)
## output_shape (nb_samples, sequence_length, output_dim)
print (model_1.layers[0].input_shape, model_1.layers[0].output_shape)
x = np.random.randint(low=0, high=2000, size=(n_samples, seq_length))
output = model_1.predict(x)
print (output.shape) # (n_samples, seq_length, word_vec_dim)
In [11]:
input_dim = 50
output_dim = 150
seq_length = 10
model = Sequential()
#model.add(SimpleRNN(output_dim=output_dim, input_dim=input_dim, input_length=seq_length, return_sequences=True))
model.add(SimpleRNN(output_dim=output_dim, input_shape=(seq_length, input_dim), return_sequences=True))
# SimpleRNN
## input_shape (nb_samples, timesteps, input_dim)
## output_shape (nb_samples, timesteps, output_dim)
print (model.input_shape, model.output_shape)
In [12]:
input_dim = 150
hidden_dim = 300
seq_length = 10
n_samples = 20
model_1 = Sequential([LSTM(output_dim = hidden_dim, input_dim = input_dim, input_length = seq_length)])
# LSTM layer
## input_shape (nb_samples, timesteps, input_dim)
## output_shape:
#### return_sequences==True: (nb_samples, timesteps, input_dim)
#### return_sequences==False: (nb_samples, input_dim) ### only the last output returned
print (model_1.layers[0].input_shape)
print (model_1.layers[0].output_shape)
x = np.random.uniform(size=(n_samples, seq_length, input_dim))
output = model_1.predict(x)
print (output.shape)
model_2 = Sequential([LSTM(output_dim = hidden_dim, input_dim = input_dim, input_length = seq_length,
return_sequences=True)]) ## return sequences
output = model_2.predict(x)
print (output.shape)
In [13]:
voca_size = 1000
seq_length = 10
word_vec_dim = 100
hidden_dim = 300
output_dim = 200
model = Sequential()
model.add(Embedding(output_dim=word_vec_dim, input_dim=voca_size, input_length=seq_length))
model.add(SimpleRNN(output_dim=hidden_dim, activation='sigmoid', return_sequences=True))
model.add(TimeDistributed(Dense(output_dim=output_dim, activation='softmax')))
print (model.input_shape, model.output_shape)
In [14]:
nb_filter = 64
rf_size = (5, 5) # receptive field size
input_shape = (3, 256, 256) # 256x256 RGB picture
strides = (2, 2)
model = Sequential()
model.add(Convolution2D(nb_filter=nb_filter, nb_row=rf_size[0], nb_col=rf_size[1], input_shape=input_shape,
border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Convolution2D layer
## dim_ordering == 'th'
#### input_shape (nb_samples, channels, rows, cols)
#### output_shape (nb_samples, nb_filter, new_rows, new_cols)
## dim_ordering == 'tf'
#### input_shape (nb_samples, rows, cols, channels)
#### output_shape (nb_samples, new_rows, new_cols, nb_filter)
print ('input_shape: {0}'.format(model.layers[0].input_shape))
x = np.random.uniform(size=(1, 3, 256, 256))
output = model.predict(x)
print ('output_shape: {0}'.format(output.shape))
ws = model.layers[0].get_weights()
print ('conv layer: weight_shape: {0}, bias_shape: {1}'.format(ws[0].shape, ws[1].shape)) # kernel shape (nb_filters, nb)
In [15]:
nb_filter = 64
rf_size = 3
input_dim = 32 ## channels to Convolution2D
model = Sequential([Convolution1D(nb_filter=nb_filter, filter_length=rf_size, input_dim=32)])
# Convolution1D
## input_shape (nb_samples, timesteps, channels)
## output_shape (nb_samples, new_timesteps, nb_filter)
print ('input_shape: {0}'.format(model.input_shape))
x = np.random.uniform(size=(1, 10, input_dim))
output = model.predict(x)
print ('output_shape: {0}'.format(output.shape))
ws = model.layers[0].get_weights()
print ('weight_shape: {0}, bias_shape: {1}'.format(ws[0].shape, ws[1].shape))
In [1]:
from keras.layers import Dense, LSTM, Embedding, Merge
from keras.models import Sequential, Model, model_from_yaml, model_from_json
from jupyter_notebook.datasets.importer.mnist_importer import MnistImporter
In [10]:
def build_model():
model = Sequential(name='test')
model.add(Dense(output_dim=100, input_dim=784))
model.add(Dense(output_dim=10, activation='softmax'))
return model
def reinstantiate_model(model):
config = model.get_config()
config[0]['model_name'] = model.name
json = model.to_json()
yaml = model.to_yaml()
model_config = Sequential.from_config(config)
model_config.name = config[0]['model_name']
model_json = model_from_json(json)
model_yaml = model_from_yaml(yaml)
print model.name, model_config.name, model_json.name, model_yaml.name
model = build_model()
reinstantiate_model(model)
In [11]:
import json
json_str = model.to_json()
config = json.loads(json_str)
print json_str
In [12]:
def mlp_mnist():
model = Sequential()
model.add(Dense(input_shape=(784, ), output_dim=64, name='hidden_1', activation='relu'))
model.add(Dense(output_dim=10, name='output', activation='softmax'))
return model
model = mlp_mnist()
input = model.input
hidden_1 = model.get_layer('hidden_1')
layers = model.layers
print input
print layers
print hidden_1
In [ ]: