In [1]:
%run ./tutorials/wikiqa/init.ipynb


Using TensorFlow backend.
matchzoo version 2.1.0

data loading ...
data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`
`ranking_task` initialized with metrics [normalized_discounted_cumulative_gain@3(0.0), normalized_discounted_cumulative_gain@5(0.0), mean_average_precision(0.0)]
loading embedding ...
embedding loaded as `glove_embedding`

In [2]:
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.visible_device_list="1"
config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
sess = tf.Session(config=config)
set_session(sess)  # set this TensorFlow session as the default session for Keras

In [3]:
def load_filtered_data(preprocessor, data_type):
    assert ( data_type in ['train', 'dev', 'test'])
    data_pack = mz.datasets.wiki_qa.load_data(data_type, task='ranking')

    if data_type == 'train':
        X, Y = preprocessor.fit_transform(data_pack).unpack()
    else:
        X, Y = preprocessor.transform(data_pack).unpack()

    new_idx = []
    for i in range(Y.shape[0]):
        if X["length_left"][i] == 0 or X["length_right"][i] == 0:
            continue
        new_idx.append(i)
    new_idx = np.array(new_idx)
    print("Removed empty data. Found ", (Y.shape[0] - new_idx.shape[0]))

    for k in X.keys():
        X[k] = X[k][new_idx]
    Y = Y[new_idx]

    pos_idx = (Y == 1)[:, 0]
    pos_qid = X["id_left"][pos_idx]
    keep_idx_bool = np.array([ qid in pos_qid for qid in X["id_left"]])
    keep_idx = np.arange(keep_idx_bool.shape[0])
    keep_idx = keep_idx[keep_idx_bool]
    print("Removed questions with no pos label. Found ", (keep_idx_bool == 0).sum())

    print("shuffling...")
    np.random.shuffle(keep_idx)
    for k in X.keys():
        X[k] = X[k][keep_idx]
    Y = Y[keep_idx]

    return X, Y, preprocessor

In [4]:
preprocessor = mz.preprocessors.BasicPreprocessor(fixed_length_left=20,
                                                  fixed_length_right=40,
                                                  remove_stop_words=False)
train_X, train_Y, preprocessor = load_filtered_data(preprocessor, 'train')
val_X, val_Y, _ = load_filtered_data(preprocessor, 'dev')
pred_X, pred_Y, _ = load_filtered_data(preprocessor, 'test')


Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 2118/2118 [00:00<00:00, 12754.26it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 18841/18841 [00:02<00:00, 6500.31it/s]
Processing text_right with append: 100%|██████████| 18841/18841 [00:00<00:00, 1215206.55it/s]
Building FrequencyFilter from a datapack.: 100%|██████████| 18841/18841 [00:00<00:00, 185258.28it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 184455.70it/s]
Processing text_left with extend: 100%|██████████| 2118/2118 [00:00<00:00, 922581.36it/s]
Processing text_right with extend: 100%|██████████| 18841/18841 [00:00<00:00, 1082236.12it/s]
Building Vocabulary from a datapack.: 100%|██████████| 404432/404432 [00:00<00:00, 3795031.47it/s]
Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 2118/2118 [00:00<00:00, 13650.60it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 18841/18841 [00:02<00:00, 6764.51it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 171037.31it/s]
Processing text_left with transform: 100%|██████████| 2118/2118 [00:00<00:00, 288623.28it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 90725.37it/s]
Processing length_left with len: 100%|██████████| 2118/2118 [00:00<00:00, 583636.81it/s]
Processing length_right with len: 100%|██████████| 18841/18841 [00:00<00:00, 1203693.44it/s]
Processing text_left with transform: 100%|██████████| 2118/2118 [00:00<00:00, 193145.54it/s]
Processing text_right with transform: 100%|██████████| 18841/18841 [00:00<00:00, 134549.60it/s]
Removed empty data. Found  38
Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 296/296 [00:00<00:00, 14135.26it/s]
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval:   0%|          | 0/2708 [00:00<?, ?it/s]
Removed questions with no pos label. Found  11672
shuffling...
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 2708/2708 [00:00<00:00, 6731.87it/s]
Processing text_right with transform: 100%|██████████| 2708/2708 [00:00<00:00, 168473.93it/s]
Processing text_left with transform: 100%|██████████| 296/296 [00:00<00:00, 204701.40it/s]
Processing text_right with transform: 100%|██████████| 2708/2708 [00:00<00:00, 159066.95it/s]
Processing length_left with len: 100%|██████████| 296/296 [00:00<00:00, 442607.48it/s]
Processing length_right with len: 100%|██████████| 2708/2708 [00:00<00:00, 1038699.15it/s]
Processing text_left with transform: 100%|██████████| 296/296 [00:00<00:00, 149130.81it/s]
Processing text_right with transform: 100%|██████████| 2708/2708 [00:00<00:00, 140864.36it/s]
Processing text_left with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 633/633 [00:00<00:00, 12189.39it/s]
Removed empty data. Found  2
Removed questions with no pos label. Found  1601
shuffling...
Processing text_right with chain_transform of Tokenize => Lowercase => PuncRemoval: 100%|██████████| 5961/5961 [00:00<00:00, 7064.16it/s]
Processing text_right with transform: 100%|██████████| 5961/5961 [00:00<00:00, 187399.25it/s]
Processing text_left with transform: 100%|██████████| 633/633 [00:00<00:00, 259733.36it/s]
Processing text_right with transform: 100%|██████████| 5961/5961 [00:00<00:00, 160878.23it/s]
Processing length_left with len: 100%|██████████| 633/633 [00:00<00:00, 688714.51it/s]
Processing length_right with len: 100%|██████████| 5961/5961 [00:00<00:00, 1166965.98it/s]
Processing text_left with transform: 100%|██████████| 633/633 [00:00<00:00, 158526.06it/s]
Processing text_right with transform: 100%|██████████| 5961/5961 [00:00<00:00, 137558.64it/s]
Removed empty data. Found  18
Removed questions with no pos label. Found  3805
shuffling...

In [5]:
from keras.optimizers import Adam
import matchzoo

model = matchzoo.contrib.models.ESIM()

# update `input_shapes` and `embedding_input_dim`
# model.params['task'] = mz.tasks.Ranking() 
# or 
model.params['task'] = mz.tasks.Classification(num_classes=2)
model.params.update(preprocessor.context)

model.params['mask_value'] = 0
model.params['lstm_dim'] = 300
model.params['embedding_output_dim'] = 300
model.params['embedding_trainable'] = False
model.params['dropout_rate'] = 0.5

model.params['mlp_num_units'] = 300
model.params['mlp_num_layers'] = 0
model.params['mlp_num_fan_out'] = 300
model.params['mlp_activation_func'] = 'tanh'
model.params['optimizer'] = Adam(lr=1e-4)
model.guess_and_fill_missing_params()
model.build()
model.compile()
model.backend.summary()


__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
text_left (InputLayer)          (None, 20)           0                                            
__________________________________________________________________________________________________
text_right (InputLayer)         (None, 40)           0                                            
__________________________________________________________________________________________________
embedding (Embedding)           multiple             5002500     text_left[0][0]                  
                                                                 text_right[0][0]                 
__________________________________________________________________________________________________
dropout_1 (Dropout)             multiple             0           embedding[0][0]                  
                                                                 embedding[1][0]                  
                                                                 dense_1[0][0]                    
                                                                 dense_1[1][0]                    
                                                                 dense_2[0][0]                    
__________________________________________________________________________________________________
lambda_1 (Lambda)               multiple             0           text_left[0][0]                  
                                                                 text_right[0][0]                 
__________________________________________________________________________________________________
bidirectional_1 (Bidirectional) multiple             1442400     dropout_1[0][0]                  
                                                                 dropout_1[1][0]                  
__________________________________________________________________________________________________
lambda_2 (Lambda)               (None, 20, 1)        0           lambda_1[0][0]                   
__________________________________________________________________________________________________
lambda_3 (Lambda)               (None, 40, 1)        0           lambda_1[1][0]                   
__________________________________________________________________________________________________
multiply_1 (Multiply)           (None, 20, 600)      0           bidirectional_1[0][0]            
                                                                 lambda_2[0][0]                   
__________________________________________________________________________________________________
multiply_2 (Multiply)           (None, 40, 600)      0           bidirectional_1[1][0]            
                                                                 lambda_3[0][0]                   
__________________________________________________________________________________________________
lambda_4 (Lambda)               (None, 20, 1)        0           lambda_1[0][0]                   
__________________________________________________________________________________________________
lambda_5 (Lambda)               (None, 1, 40)        0           lambda_1[1][0]                   
__________________________________________________________________________________________________
dot_1 (Dot)                     (None, 20, 40)       0           multiply_1[0][0]                 
                                                                 multiply_2[0][0]                 
__________________________________________________________________________________________________
multiply_3 (Multiply)           (None, 20, 40)       0           lambda_4[0][0]                   
                                                                 lambda_5[0][0]                   
__________________________________________________________________________________________________
permute_1 (Permute)             (None, 40, 20)       0           dot_1[0][0]                      
                                                                 multiply_3[0][0]                 
__________________________________________________________________________________________________
atten_mask (Lambda)             multiple             0           dot_1[0][0]                      
                                                                 multiply_3[0][0]                 
                                                                 permute_1[0][0]                  
                                                                 permute_1[1][0]                  
__________________________________________________________________________________________________
softmax_1 (Softmax)             multiple             0           atten_mask[0][0]                 
                                                                 atten_mask[1][0]                 
__________________________________________________________________________________________________
dot_2 (Dot)                     (None, 20, 600)      0           softmax_1[0][0]                  
                                                                 multiply_2[0][0]                 
__________________________________________________________________________________________________
dot_3 (Dot)                     (None, 40, 600)      0           softmax_1[1][0]                  
                                                                 multiply_1[0][0]                 
__________________________________________________________________________________________________
subtract_1 (Subtract)           (None, 20, 600)      0           multiply_1[0][0]                 
                                                                 dot_2[0][0]                      
__________________________________________________________________________________________________
multiply_4 (Multiply)           (None, 20, 600)      0           multiply_1[0][0]                 
                                                                 dot_2[0][0]                      
__________________________________________________________________________________________________
subtract_2 (Subtract)           (None, 40, 600)      0           multiply_2[0][0]                 
                                                                 dot_3[0][0]                      
__________________________________________________________________________________________________
multiply_5 (Multiply)           (None, 40, 600)      0           multiply_2[0][0]                 
                                                                 dot_3[0][0]                      
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 20, 2400)     0           multiply_1[0][0]                 
                                                                 dot_2[0][0]                      
                                                                 subtract_1[0][0]                 
                                                                 multiply_4[0][0]                 
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 40, 2400)     0           multiply_2[0][0]                 
                                                                 dot_3[0][0]                      
                                                                 subtract_2[0][0]                 
                                                                 multiply_5[0][0]                 
__________________________________________________________________________________________________
dense_1 (Dense)                 multiple             720300      concatenate_1[0][0]              
                                                                 concatenate_2[0][0]              
__________________________________________________________________________________________________
bidirectional_2 (Bidirectional) multiple             1442400     dropout_1[2][0]                  
                                                                 dropout_1[3][0]                  
__________________________________________________________________________________________________
lambda_6 (Lambda)               (None, 20, 1)        0           lambda_1[0][0]                   
__________________________________________________________________________________________________
lambda_8 (Lambda)               (None, 20, 1)        0           lambda_1[0][0]                   
__________________________________________________________________________________________________
lambda_10 (Lambda)              (None, 40, 1)        0           lambda_1[1][0]                   
__________________________________________________________________________________________________
lambda_12 (Lambda)              (None, 40, 1)        0           lambda_1[1][0]                   
__________________________________________________________________________________________________
multiply_6 (Multiply)           (None, 20, 600)      0           bidirectional_2[0][0]            
                                                                 lambda_6[0][0]                   
__________________________________________________________________________________________________
multiply_7 (Multiply)           (None, 20, 600)      0           bidirectional_2[0][0]            
                                                                 lambda_8[0][0]                   
__________________________________________________________________________________________________
multiply_8 (Multiply)           (None, 40, 600)      0           bidirectional_2[1][0]            
                                                                 lambda_10[0][0]                  
__________________________________________________________________________________________________
multiply_9 (Multiply)           (None, 40, 600)      0           bidirectional_2[1][0]            
                                                                 lambda_12[0][0]                  
__________________________________________________________________________________________________
lambda_7 (Lambda)               (None, 600)          0           multiply_6[0][0]                 
                                                                 lambda_6[0][0]                   
__________________________________________________________________________________________________
lambda_9 (Lambda)               (None, 600)          0           multiply_7[0][0]                 
__________________________________________________________________________________________________
lambda_11 (Lambda)              (None, 600)          0           multiply_8[0][0]                 
                                                                 lambda_10[0][0]                  
__________________________________________________________________________________________________
lambda_13 (Lambda)              (None, 600)          0           multiply_9[0][0]                 
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 1200)         0           lambda_7[0][0]                   
                                                                 lambda_9[0][0]                   
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 1200)         0           lambda_11[0][0]                  
                                                                 lambda_13[0][0]                  
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 2400)         0           concatenate_3[0][0]              
                                                                 concatenate_4[0][0]              
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 300)          720300      concatenate_5[0][0]              
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 2)            602         dropout_1[4][0]                  
==================================================================================================
Total params: 9,328,502
Trainable params: 4,326,002
Non-trainable params: 5,002,500
__________________________________________________________________________________________________

In [6]:
embedding_matrix = glove_embedding.build_matrix(preprocessor.context['vocab_unit'].state['term_index'], initializer=lambda: 0)
model.load_embedding_matrix(embedding_matrix)

In [8]:
# train as ranking task
model.params['task'] = mz.tasks.Ranking()
evaluate = mz.callbacks.EvaluateAllMetrics(model,
                                           x=pred_X,
                                           y=pred_Y,
                                           once_every=1,
                                           batch_size=len(pred_Y))
history = model.fit(x = [train_X['text_left'],
                         train_X['text_right']],                  # (20360, 1000)
                    y = train_Y,                                  # (20360, 2)
                    validation_data = (val_X, val_Y),
                    callbacks=[evaluate],
                    batch_size = 32,
                    epochs = 10)


Train on 8650 samples, validate on 1130 samples
Epoch 1/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0985 - val_loss: 0.0977
Validation: mean_average_precision(0.0): 0.6377925262180991
Epoch 2/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0947 - val_loss: 0.0939
Validation: mean_average_precision(0.0): 0.6323746460063332
Epoch 3/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0923 - val_loss: 0.0896
Validation: mean_average_precision(0.0): 0.6447892278707743
Epoch 4/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0895 - val_loss: 0.0904
Validation: mean_average_precision(0.0): 0.6645210508066117
Epoch 5/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0883 - val_loss: 0.0900
Validation: mean_average_precision(0.0): 0.6622282952529867
Epoch 6/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0839 - val_loss: 0.0900
Validation: mean_average_precision(0.0): 0.6654279587941297
Epoch 7/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0821 - val_loss: 0.0896
Validation: mean_average_precision(0.0): 0.6668269018575894
Epoch 8/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0792 - val_loss: 0.0885
Validation: mean_average_precision(0.0): 0.6723704781393599
Epoch 9/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0754 - val_loss: 0.0895
Validation: mean_average_precision(0.0): 0.6552521148587158
Epoch 10/10
8650/8650 [==============================] - 52s 6ms/step - loss: 0.0731 - val_loss: 0.0910
Validation: mean_average_precision(0.0): 0.6695447388956829

In [7]:
# train as classification task 

from keras.utils import to_categorical
train_Y = to_categorical(train_Y)
val_Y = to_categorical(val_Y)

model.params['task'] = mz.tasks.Classification(num_classes=2)

history = model.fit(x = [train_X['text_left'],
                         train_X['text_right']],                  # (20360, 1000)
                    y = train_Y,                                  # (20360, 2)
                    validation_data = (val_X, val_Y),
                    batch_size = 32,
                    epochs = 10)


Train on 8650 samples, validate on 1130 samples
Epoch 1/10
8650/8650 [==============================] - 68s 8ms/step - loss: 0.3628 - val_loss: 0.3552
Epoch 2/10
8650/8650 [==============================] - 63s 7ms/step - loss: 0.3285 - val_loss: 0.3591
Epoch 3/10
8650/8650 [==============================] - 63s 7ms/step - loss: 0.3105 - val_loss: 0.3681
Epoch 4/10
8650/8650 [==============================] - 64s 7ms/step - loss: 0.3012 - val_loss: 0.3166
Epoch 5/10
8650/8650 [==============================] - 64s 7ms/step - loss: 0.2888 - val_loss: 0.2961
Epoch 6/10
8650/8650 [==============================] - 64s 7ms/step - loss: 0.2801 - val_loss: 0.3362
Epoch 7/10
8650/8650 [==============================] - 64s 7ms/step - loss: 0.2692 - val_loss: 0.3324
Epoch 8/10
8650/8650 [==============================] - 64s 7ms/step - loss: 0.2609 - val_loss: 0.3172
Epoch 9/10
8650/8650 [==============================] - 58s 7ms/step - loss: 0.2542 - val_loss: 0.3296
Epoch 10/10
8650/8650 [==============================] - 53s 6ms/step - loss: 0.2365 - val_loss: 0.3058

In [ ]: