In [1]:
from keras.layers import Input, Dense
from keras.models import Model


Using TensorFlow backend.

In [5]:
inputs = Input(shape=(784, ))

x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)

In [8]:
model = Model(inputs=inputs, outputs=predictions)

In [9]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_3 (InputLayer)         (None, 784)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_4 (Dense)              (None, 64)                4160      
_________________________________________________________________
dense_5 (Dense)              (None, 10)                650       
=================================================================
Total params: 55,050
Trainable params: 55,050
Non-trainable params: 0
_________________________________________________________________

In [11]:
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

In [15]:
model


Out[15]:
<keras.engine.training.Model at 0x11f680438>

In [19]:
# Functional APIで構築したmodelはtensorを引数として呼び出せる
x = Input(shape=(784, ))
y = model(x)

In [20]:
y


Out[20]:
<tf.Tensor 'model_1_1/dense_5/Softmax:0' shape=(?, 10) dtype=float32>

In [21]:
from keras.layers import TimeDistributed

In [22]:
input_sequences = Input(shape=(20, 784))

In [24]:
processed_sequences = TimeDistributed(model)(input_sequences)

In [25]:
processed_sequences


Out[25]:
<tf.Tensor 'time_distributed_2/Reshape_1:0' shape=(?, 20, 10) dtype=float32>

多入力・多出力モデル


In [9]:
import keras
from keras.layers import Input, Embedding, LSTM, Dense
from keras.models import Model

In [2]:
# 系列長100の単語ベクトルID列
# 単語IDは1-10000の整数値
main_input = Input(shape=(100, ), dtype='int32', name='main_input')

In [3]:
# 10000個の単語を512次元ベクトルにEmbedding
# 系列長は100
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)

In [4]:
# LSTMの出力は32次元ベクトル
lstm_out = LSTM(32)(x)

In [6]:
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)

In [7]:
auxiliary_input = Input(shape=(5, ), name='aux_input')

In [10]:
x = keras.layers.concatenate([lstm_out, auxiliary_input])

In [11]:
x


Out[11]:
<tf.Tensor 'concatenate_1/concat:0' shape=(?, 37) dtype=float32>

In [12]:
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)

In [13]:
x


Out[13]:
<tf.Tensor 'dense_3/Relu:0' shape=(?, 64) dtype=float32>

In [15]:
model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output, auxiliary_output])

In [16]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
main_input (InputLayer)          (None, 100)           0                                            
____________________________________________________________________________________________________
embedding_1 (Embedding)          (None, 100, 512)      5120000     main_input[0][0]                 
____________________________________________________________________________________________________
lstm_1 (LSTM)                    (None, 32)            69760       embedding_1[0][0]                
____________________________________________________________________________________________________
aux_input (InputLayer)           (None, 5)             0                                            
____________________________________________________________________________________________________
concatenate_1 (Concatenate)      (None, 37)            0           lstm_1[0][0]                     
                                                                   aux_input[0][0]                  
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 64)            2432        concatenate_1[0][0]              
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 64)            4160        dense_1[0][0]                    
____________________________________________________________________________________________________
dense_3 (Dense)                  (None, 64)            4160        dense_2[0][0]                    
____________________________________________________________________________________________________
main_output (Dense)              (None, 1)             65          dense_3[0][0]                    
____________________________________________________________________________________________________
aux_output (Dense)               (None, 1)             33          lstm_1[0][0]                     
====================================================================================================
Total params: 5,200,610
Trainable params: 5,200,610
Non-trainable params: 0
____________________________________________________________________________________________________

Shared Layers


In [18]:
import keras
from keras.layers import Input, LSTM, Dense
from keras.models import Model

tweet_a = Input(shape=(140, 256))
tweet_b = Input(shape=(140, 256))

In [19]:
shared_lstm = LSTM(64)

In [20]:
encoded_a = shared_lstm(tweet_a)
encoded_b = shared_lstm(tweet_b)

In [21]:
merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1)

In [22]:
predictions = Dense(1, activation='sigmoid')(merged_vector)

In [23]:
model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)

In [24]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_1 (InputLayer)             (None, 140, 256)      0                                            
____________________________________________________________________________________________________
input_2 (InputLayer)             (None, 140, 256)      0                                            
____________________________________________________________________________________________________
lstm_2 (LSTM)                    (None, 64)            82176       input_1[0][0]                    
                                                                   input_2[0][0]                    
____________________________________________________________________________________________________
concatenate_2 (Concatenate)      (None, 128)           0           lstm_2[0][0]                     
                                                                   lstm_2[1][0]                     
____________________________________________________________________________________________________
dense_4 (Dense)                  (None, 1)             129         concatenate_2[0][0]              
====================================================================================================
Total params: 82,305
Trainable params: 82,305
Non-trainable params: 0
____________________________________________________________________________________________________

In [ ]: