In [1]:
%run init.ipynb


Using TensorFlow backend.
matchzoo version 2.1.0

data loading ...
data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`
`ranking_task` initialized with metrics [normalized_discounted_cumulative_gain@3(0.0), normalized_discounted_cumulative_gain@5(0.0), mean_average_precision(0.0)]
loading embedding ...
embedding loaded as `glove_embedding`

In [2]:
preprocessor = mz.preprocessors.BasicPreprocessor(fixed_length_left=10, 
                                                  fixed_length_right=40, 
                                                  remove_stop_words=True)

In [3]:
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)


Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 2118/2118 [00:00<00:00, 8302.01it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 18841/18841 [00:04<00:00, 4319.36it/s]
Processing text_right with append: 100%|██████████| 18841/18841 [00:00<00:00, 903668.21it/s]
Building FrequencyFilter from a datapack.: 100%|██████████| 18841/18841 [00:00<00:00, 195422.81it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 108320.04it/s]
Processing text_left with extend: 100%|██████████| 2118/2118 [00:00<00:00, 725009.05it/s]
Processing text_right with extend: 100%|██████████| 18841/18841 [00:00<00:00, 860396.98it/s]
Building Vocabulary from a datapack.: 100%|██████████| 234263/234263 [00:00<00:00, 2861207.82it/s]
Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 2118/2118 [00:00<00:00, 8464.35it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 18841/18841 [00:04<00:00, 4340.04it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 202575.96it/s]
Processing text_left with transform: 100%|██████████| 2118/2118 [00:00<00:00, 308616.84it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 195001.35it/s]
Processing length_left with len: 100%|██████████| 2118/2118 [00:00<00:00, 660829.86it/s]
Processing length_right with len: 100%|██████████| 18841/18841 [00:00<00:00, 823132.98it/s]
Processing text_left with transform: 100%|██████████| 2118/2118 [00:00<00:00, 144222.61it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 116667.55it/s]
Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 122/122 [00:00<00:00, 8410.53it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 1115/1115 [00:00<00:00, 4361.18it/s]
Processing text_right with transform: 100%|██████████| 1115/1115 [00:00<00:00, 10806.91it/s]
Processing text_left with transform: 100%|██████████| 122/122 [00:00<00:00, 142655.45it/s]
Processing text_right with transform: 100%|██████████| 1115/1115 [00:00<00:00, 186424.66it/s]
Processing length_left with len: 100%|██████████| 122/122 [00:00<00:00, 193096.26it/s]
Processing length_right with len: 100%|██████████| 1115/1115 [00:00<00:00, 585386.03it/s]
Processing text_left with transform: 100%|██████████| 122/122 [00:00<00:00, 88853.11it/s]
Processing text_right with transform: 100%|██████████| 1115/1115 [00:00<00:00, 114908.20it/s]
Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 237/237 [00:00<00:00, 8602.77it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval => StopRemoval: 100%|██████████| 2300/2300 [00:00<00:00, 4324.46it/s]
Processing text_right with transform: 100%|██████████| 2300/2300 [00:00<00:00, 208270.89it/s]
Processing text_left with transform: 100%|██████████| 237/237 [00:00<00:00, 242925.23it/s]
Processing text_right with transform: 100%|██████████| 2300/2300 [00:00<00:00, 199443.84it/s]
Processing length_left with len: 100%|██████████| 237/237 [00:00<00:00, 323689.37it/s]
Processing length_right with len: 100%|██████████| 2300/2300 [00:00<00:00, 703075.52it/s]
Processing text_left with transform: 100%|██████████| 237/237 [00:00<00:00, 113696.68it/s]
Processing text_right with transform: 100%|██████████| 2300/2300 [00:00<00:00, 119756.92it/s]

In [4]:
model = mz.models.MatchPyramid()

# load `input_shapes` and `embedding_input_dim` (vocab_size)
model.params.update(preprocessor.context)

model.params['task'] = ranking_task
model.params['embedding_output_dim'] = 100
model.params['embedding_trainable'] = True
model.params['num_blocks'] = 2
model.params['kernel_count'] = [16, 32]
model.params['kernel_size'] = [[3, 3], [3, 3]]
model.params['dpool_size'] = [3, 10]
model.params['optimizer'] = 'adam'
model.params['dropout_rate'] = 0.1

model.build()
model.compile()

print(model.params)


model_class                   <class 'matchzoo.models.match_pyramid.MatchPyramid'>
input_shapes                  [(10,), (40,)]
task                          Ranking Task
optimizer                     adam
with_embedding                True
embedding_input_dim           16546
embedding_output_dim          100
embedding_trainable           True
num_blocks                    2
kernel_count                  [16, 32]
kernel_size                   [[3, 3], [3, 3]]
activation                    relu
dpool_size                    [3, 10]
padding                       same
dropout_rate                  0.1

In [5]:
model.backend.summary()


__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
text_left (InputLayer)          (None, 10)           0                                            
__________________________________________________________________________________________________
text_right (InputLayer)         (None, 40)           0                                            
__________________________________________________________________________________________________
embedding (Embedding)           multiple             1654600     text_left[0][0]                  
                                                                 text_right[0][0]                 
__________________________________________________________________________________________________
matching_layer_1 (MatchingLayer (None, 10, 40, 1)    0           embedding[0][0]                  
                                                                 embedding[1][0]                  
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 10, 40, 16)   160         matching_layer_1[0][0]           
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 10, 40, 32)   4640        conv2d_1[0][0]                   
__________________________________________________________________________________________________
dpool_index (InputLayer)        (None, 10, 40, 2)    0                                            
__________________________________________________________________________________________________
dynamic_pooling_layer_1 (Dynami (None, 3, 10, 32)    0           conv2d_2[0][0]                   
                                                                 dpool_index[0][0]                
__________________________________________________________________________________________________
flatten_1 (Flatten)             (None, 960)          0           dynamic_pooling_layer_1[0][0]    
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 960)          0           flatten_1[0][0]                  
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 1)            961         dropout_1[0][0]                  
==================================================================================================
Total params: 1,660,361
Trainable params: 1,660,361
Non-trainable params: 0
__________________________________________________________________________________________________

In [6]:
embedding_matrix = glove_embedding.build_matrix(preprocessor.context['vocab_unit'].state['term_index'])

In [7]:
model.load_embedding_matrix(embedding_matrix)

In [8]:
dpool_callback = mz.data_generator.callbacks.DynamicPooling(
    fixed_length_left=10, 
    fixed_length_right=40
)
train_generator = mz.DataGenerator(
    train_pack_processed,
    mode='pair',
    num_dup=2,
    num_neg=1,
    batch_size=20,
    callbacks=[dpool_callback]
)
print('num batches:', len(train_generator))


num batches: 102

In [9]:
test_generator = mz.DataGenerator(
    test_pack_processed,
    batch_size=20,
    callbacks=[dpool_callback]
)
len(test_generator)


Out[9]:
118

In [10]:
test_x, test_y = test_generator[:]
evaluate = mz.callbacks.EvaluateAllMetrics(model, x=test_x, y=test_y, batch_size=len(test_y))

In [11]:
history = model.fit_generator(train_generator, epochs=20, callbacks=[evaluate], workers=30, use_multiprocessing=True)


Epoch 1/20
102/102 [==============================] - 5s 47ms/step - loss: 0.8098
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5543583477044723 - normalized_discounted_cumulative_gain@5(0.0): 0.6116356675515685 - mean_average_precision(0.0): 0.5743161762170562
Epoch 2/20
102/102 [==============================] - 10s 95ms/step - loss: 0.5260
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5301739579616542 - normalized_discounted_cumulative_gain@5(0.0): 0.6016159230710824 - mean_average_precision(0.0): 0.5601950549545589
Epoch 3/20
102/102 [==============================] - 10s 101ms/step - loss: 0.3829
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5092929494151631 - normalized_discounted_cumulative_gain@5(0.0): 0.591478646606495 - mean_average_precision(0.0): 0.5441527086702361
Epoch 4/20
102/102 [==============================] - 10s 96ms/step - loss: 0.3304
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.4812004958713687 - normalized_discounted_cumulative_gain@5(0.0): 0.5596264451275563 - mean_average_precision(0.0): 0.5088711984093643
Epoch 5/20
102/102 [==============================] - 11s 109ms/step - loss: 0.2227
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.508961151215818 - normalized_discounted_cumulative_gain@5(0.0): 0.5842189791253107 - mean_average_precision(0.0): 0.5374359920620746
Epoch 6/20
102/102 [==============================] - 11s 108ms/step - loss: 0.1847
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.4934803343084 - normalized_discounted_cumulative_gain@5(0.0): 0.5644795486607259 - mean_average_precision(0.0): 0.5198714294860981
Epoch 7/20
102/102 [==============================] - 11s 108ms/step - loss: 0.1371
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5107068648352034 - normalized_discounted_cumulative_gain@5(0.0): 0.5784800117656429 - mean_average_precision(0.0): 0.5308213418764718
Epoch 8/20
102/102 [==============================] - 10s 97ms/step - loss: 0.1038
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.506959204392066 - normalized_discounted_cumulative_gain@5(0.0): 0.5816987880270651 - mean_average_precision(0.0): 0.5314363380052429
Epoch 9/20
102/102 [==============================] - 10s 95ms/step - loss: 0.0863
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.49359062647045354 - normalized_discounted_cumulative_gain@5(0.0): 0.5634597428623797 - mean_average_precision(0.0): 0.5230596215775214
Epoch 10/20
102/102 [==============================] - 10s 99ms/step - loss: 0.0791
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.514243097562575 - normalized_discounted_cumulative_gain@5(0.0): 0.575757696009702 - mean_average_precision(0.0): 0.5351664532621346
Epoch 11/20
102/102 [==============================] - 11s 110ms/step - loss: 0.0486
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5149547148083139 - normalized_discounted_cumulative_gain@5(0.0): 0.5730596976101693 - mean_average_precision(0.0): 0.533951748706618
Epoch 12/20
102/102 [==============================] - 11s 108ms/step - loss: 0.0417
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5089291208562943 - normalized_discounted_cumulative_gain@5(0.0): 0.5729989997270044 - mean_average_precision(0.0): 0.5339051698603664
Epoch 13/20
102/102 [==============================] - 11s 104ms/step - loss: 0.0293
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.494068153864542 - normalized_discounted_cumulative_gain@5(0.0): 0.5605928322073352 - mean_average_precision(0.0): 0.5139849024134501
Epoch 14/20
102/102 [==============================] - 9s 90ms/step - loss: 0.0279
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.4987296535086411 - normalized_discounted_cumulative_gain@5(0.0): 0.5671151052462753 - mean_average_precision(0.0): 0.5215687290635513
Epoch 15/20
102/102 [==============================] - 10s 95ms/step - loss: 0.0250
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5098806065082092 - normalized_discounted_cumulative_gain@5(0.0): 0.5740280134728454 - mean_average_precision(0.0): 0.5333708142044251
Epoch 16/20
102/102 [==============================] - 11s 108ms/step - loss: 0.0182
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5153947436725983 - normalized_discounted_cumulative_gain@5(0.0): 0.5762000806926193 - mean_average_precision(0.0): 0.5298641458868614
Epoch 17/20
102/102 [==============================] - 11s 108ms/step - loss: 0.0178
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5134634304473057 - normalized_discounted_cumulative_gain@5(0.0): 0.56980318132021 - mean_average_precision(0.0): 0.5282068879797845
Epoch 18/20
102/102 [==============================] - 11s 110ms/step - loss: 0.0145
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5176147404760021 - normalized_discounted_cumulative_gain@5(0.0): 0.581247024416785 - mean_average_precision(0.0): 0.5377934773692993
Epoch 19/20
102/102 [==============================] - 9s 92ms/step - loss: 0.0172
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.49962649604962145 - normalized_discounted_cumulative_gain@5(0.0): 0.56731906503144 - mean_average_precision(0.0): 0.5253291982119823
Epoch 20/20
102/102 [==============================] - 9s 92ms/step - loss: 0.0181
Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5087417209853161 - normalized_discounted_cumulative_gain@5(0.0): 0.5729943933841338 - mean_average_precision(0.0): 0.5320246681539154

In [12]:
model.evaluate(test_x, test_y, batch_size=5)


Out[12]:
{normalized_discounted_cumulative_gain@3(0.0): 0.5087417209853161,
 normalized_discounted_cumulative_gain@5(0.0): 0.5729943933841338,
 mean_average_precision(0.0): 0.5320246681539154}
Use this function to update the README.md with a better set of parameters. Make sure you delete the correct section of the README.md before calling this function.

In [13]:
append_params_to_readme(model)