Use data clean from script_LSTM.py LSTM(64) DENSE(64) BATCH_SIZE = 256 weights.002-0.2777.hdf5 212s - loss: 0.2390 - acc: 0.8283 - val_loss: 0.2777 - val_acc: 0.8053 LSTM(128,0.5,0.5) DENSE(128,0.5) BatchNormalization() BATCH_SIZE = 2048 weights.022-0.2778.hdf5 111s - loss: 0.2682 - acc: 0.7932 - val_loss: 0.2778 - val_acc: 0.7855 LSTM(128,0.5,0.5) DENSE(128,0.5) BATCH_SIZE = 2048 weights.025-0.2798.hdf5 110s - loss: 0.2660 - acc: 0.7969 - val_loss: 0.2798 - val_acc: 0.7826

In [1]:
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import datetime, time, json, os, math, pickle, sys
from string import punctuation
from __future__ import division
from __future__ import print_function

from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model, load_model
from keras.layers import concatenate, Embedding, Dense, Input, Dropout, Bidirectional, LSTM, BatchNormalization, TimeDistributed
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import backend as K


Using TensorFlow backend.

In [22]:
DATA_DIR = '../data/'
MODEL = 'DataClean'
if os.getcwd().split('/')[-1] != MODEL:
    print('WRONG MODEL DIR!!!')
CHECKPOINT_DIR = './checkpoint/'
if not os.path.exists(CHECKPOINT_DIR):
    os.mkdir(CHECKPOINT_DIR)
LOG_DIR = './log/'
if not os.path.exists(LOG_DIR):
    os.mkdir(LOG_DIR)
OUTPUT_DIR = './output/'
if not os.path.exists(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)
    
MAX_LEN = 40
EMBEDDING_DIM = 300
VALID_SPLIT = 0.05
RE_WEIGHT = True # whether to re-weight classes to fit the 17.5% share in test set
# VOCAB_SIZE = 10000


def get_best_model(checkpoint_dir = CHECKPOINT_DIR):
    files = glob.glob(checkpoint_dir+'*')
    val_losses = [float(f.split('-')[-1][:-5]) for f in files]
    index = val_losses.index(min(val_losses))
    print('Loading model from checkpoint file ' + files[index])
    model = load_model(files[index])
    model_name = files[index].split('/')[-1]
    print('Loading model Done!')
    return (model, model_name)

In [3]:
trainval_df = pd.read_csv(DATA_DIR+"train.csv")
test_df = pd.read_csv(DATA_DIR+"test.csv")
print(trainval_df.shape)
print(test_df.shape)


(404290, 6)
(2345796, 3)
# Check for any null values # inds = pd.isnull(trainval_df).any(1).nonzero()[0] # trainval_df.loc[inds] # inds = pd.isnull(test_df).any(1).nonzero()[0] # test_df.loc[inds] # # Add the string 'empty' to empty strings # trainval_df = trainval_df.fillna('empty') # test_df = test_df.fillna('empty')

In [6]:
# data cleaning
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
    # Clean the text, with the option to remove stopwords and to stem words.
    
    if isinstance(text,float):
        # turn nan to empty string
        text = ""
    else:
        # Convert words to lower case and split them
        text = text.lower().split()

        # Optionally, remove stop words
        if remove_stopwords:
            stops = set(stopwords.words("english"))
            text = [w for w in text if not w in stops]

        text = " ".join(text)

        # Clean the text
        text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
        text = re.sub(r"what's", "what is ", text)
        text = re.sub(r"\'s", " ", text)
        text = re.sub(r"\'ve", " have ", text)
        text = re.sub(r"can't", "cannot ", text)
        text = re.sub(r"n't", " not ", text)
        text = re.sub(r"i'm", "i am ", text)
        text = re.sub(r"\'re", " are ", text)
        text = re.sub(r"\'d", " would ", text)
        text = re.sub(r"\'ll", " will ", text)
        text = re.sub(r",", " ", text)
        text = re.sub(r"\.", " ", text)
        text = re.sub(r"!", " ! ", text)
        text = re.sub(r"\/", " ", text)
        text = re.sub(r"\^", " ^ ", text)
        text = re.sub(r"\+", " + ", text)
        text = re.sub(r"\-", " - ", text)
        text = re.sub(r"\=", " = ", text)
        text = re.sub(r"'", " ", text)
        text = re.sub(r"60k", " 60000 ", text)
        text = re.sub(r":", " : ", text)
        text = re.sub(r" e g ", " eg ", text)
        text = re.sub(r" b g ", " bg ", text)
        text = re.sub(r" u s ", " american ", text)
        text = re.sub(r"\0s", "0", text)
        text = re.sub(r" 9 11 ", "911", text)
        text = re.sub(r"e - mail", "email", text)
        text = re.sub(r"j k", "jk", text)
        text = re.sub(r"\s{2,}", " ", text)

        # Optionally, shorten words to their stems
        if stem_words:
            text = text.split()
            stemmer = SnowballStemmer('english')
            stemmed_words = [stemmer.stem(word) for word in text]
            text = " ".join(stemmed_words)

    # Return a list of words
    return(text)

In [7]:
# question to word list by data cleaning

file_name = 'trainval_df.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    trainval_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    trainval_df['question1_WL'] = trainval_df.apply(lambda row: text_to_wordlist(row['question1']), axis=1)
    trainval_df['question2_WL'] = trainval_df.apply(lambda row: text_to_wordlist(row['question2']), axis=1)
    trainval_df.to_pickle(OUTPUT_DIR+file_name)      

file_name = 'test_df.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    test_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    test_df['question1_WL'] = test_df.apply(lambda row: text_to_wordlist(row['question1']), axis=1)
    test_df['question2_WL'] = test_df.apply(lambda row: text_to_wordlist(row['question2']), axis=1)
    test_df.to_pickle(OUTPUT_DIR+file_name)   
    
test_size = trainval_df.shape[0]-int(math.ceil(trainval_df.shape[0]*(1-VALID_SPLIT)/1024)*1024)
train_df, valid_df = train_test_split(trainval_df, test_size=test_size, random_state=1986, stratify=trainval_df['is_duplicate'])


Generating file trainval_df.pickle
Generating file test_df.pickle
# trainval_df['len1'] = trainval_df.apply(lambda row: len(row['question1_WL'].split()), axis=1) # trainval_df['len2'] = trainval_df.apply(lambda row: len(row['question2_WL'].split()), axis=1) test_df['len1'] = test_df.apply(lambda row: len(row['question1_WL'].split()), axis=1) test_df['len2'] = test_df.apply(lambda row: len(row['question2_WL'].split()), axis=1) lengths = pd.concat([test_df['len1'],test_df['len2']], axis=0) print(lengths.describe()) print(np.percentile(lengths, 99.0)) print(np.percentile(lengths, 99.4)) print(np.percentile(lengths, 99.5)) print(np.percentile(lengths, 99.9))

In [11]:
# tokenize and pad

all_questions = pd.concat([trainval_df['question1_WL'],trainval_df['question2_WL'],test_df['question1_WL'],test_df['question2_WL']], axis=0)
tokenizer = Tokenizer(num_words=None, lower=True)
tokenizer.fit_on_texts(all_questions)
word_index = tokenizer.word_index
nb_words = len(word_index)
print("Words in index: %d" % nb_words) #120594

train_q1 = pad_sequences(tokenizer.texts_to_sequences(train_df['question1_WL']), maxlen = MAX_LEN)
train_q2 = pad_sequences(tokenizer.texts_to_sequences(train_df['question2_WL']), maxlen = MAX_LEN)
valid_q1 = pad_sequences(tokenizer.texts_to_sequences(valid_df['question1_WL']), maxlen = MAX_LEN)
valid_q2 = pad_sequences(tokenizer.texts_to_sequences(valid_df['question2_WL']), maxlen = MAX_LEN)
y_train = train_df.is_duplicate.values
y_valid = valid_df.is_duplicate.values

train_q1_Double = np.vstack((train_q1, train_q2))
train_q2_Double = np.vstack((train_q2, train_q1))
valid_q1_Double = np.vstack((valid_q1, valid_q2))
valid_q2_Double = np.vstack((valid_q2, valid_q1))
y_train_Double = np.hstack((y_train, y_train))
y_valid_Double = np.hstack((y_valid, y_valid))

val_sample_weights = np.ones(len(y_valid_Double))
if RE_WEIGHT:
    class_weight = {0: 1.309028344, 1: 0.472001959}
    val_sample_weights *= 0.472001959
    val_sample_weights[y_valid_Double==0] = 1.309028344
else:
    class_weight = None
    val_sample_weights = None


Words in index: 120594

In [12]:
# load word_embedding_matrix

W2V = 'glove.840B.300d.txt'
file_name = W2V + '.word_embedding_matrix.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    with open(OUTPUT_DIR+file_name, 'rb') as f:
        word_embedding_matrix = pickle.load(f)
else:
    print ('Generating file '+file_name)   
    # Load GloVe to use pretrained vectors
    embeddings_index = {}
    with open(DATA_DIR+'/WordEmbedding/'+W2V) as f:
        for line in f:
            values = line.split(' ')
            word = values[0]
            embedding = np.asarray(values[1:], dtype='float32')
            embeddings_index[word] = embedding
    print('Word embeddings:', len(embeddings_index)) #1,505,774

    # Need to use EMBEDDING_DIM for embedding dimensions to match GloVe's vectors.
    nb_words = len(word_index)
    null_embedding_words = []
    word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
    for word, i in word_index.items():
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            # words not found in embedding index will be all-zeros.
            word_embedding_matrix[i] = embedding_vector
        else:
            null_embedding_words.append(word)
    print('Null word embeddings: %d' %len(null_embedding_words)) #37,412

    with open(OUTPUT_DIR+file_name, 'wb') as f:
        pickle.dump(word_embedding_matrix, f)


Generating file glove.840B.300d.txt.word_embedding_matrix.pickle
Word embeddings: 1505774
Null word embeddings: 37412
word_counts = tokenizer.word_counts null_embedding_word_counts = { word: word_counts[word] for word in null_embedding_words } print(sum(null_embedding_word_counts.values())) #454210 word_docs = tokenizer.word_docs null_embedding_word_docs = { word: word_docs[word] for word in null_embedding_words } print(sum(null_embedding_word_docs.values())) #446584 # 446584/(404290+2345796)/2 = 0.08119

In [40]:
BATCH_SIZE = 2048
EMBEDDING_TRAINABLE = False
RNNCELL_SIZE = 128
RNNCELL_LAYERS = 1
RNNCELL_DROPOUT = 0.5
RNNCELL_RECURRENT_DROPOUT = 0.5
RNNCELL_BIDIRECT = False
DENSE_SIZE = 128
DENSE_LAYERS = 1
DENSE_DROPOUT = 0.5

In [41]:
encode_model = Sequential()
encode_model.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length=MAX_LEN, trainable=EMBEDDING_TRAINABLE))
if RNNCELL_BIDIRECT:
    for i in range(RNNCELL_LAYERS-1):
        encode_model.add(Bidirectional(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                                            unroll=True, implementation=2, return_sequences=True)))
    encode_model.add(Bidirectional(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                                        unroll=True, implementation=2)))
else:
    for i in range(RNNCELL_LAYERS-1):
        encode_model.add(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                              unroll=True, implementation=2, return_sequences=True))
    encode_model.add(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                          unroll=True, implementation=2))

sequence1_input = Input(shape=(MAX_LEN,), name='q1')
sequence2_input = Input(shape=(MAX_LEN,), name='q2')
encoded_1 = encode_model(sequence1_input)
encoded_2 = encode_model(sequence2_input)
merged = concatenate([encoded_1, encoded_2], axis=-1)
merged = Dropout(DENSE_DROPOUT)(merged)
# merged = BatchNormalization()(merged)
for i in range(DENSE_LAYERS):
    merged = Dense(DENSE_SIZE, activation='relu', kernel_initializer='he_normal')(merged)
    merged = Dropout(DENSE_DROPOUT)(merged)
#     merged = BatchNormalization()(merged)
predictions = Dense(1, activation='sigmoid')(merged)
model = Model(inputs=[sequence1_input, sequence2_input], outputs=predictions)

In [42]:
encode_model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_6 (Embedding)      (None, 40, 300)           36178500  
_________________________________________________________________
lstm_6 (LSTM)                (None, 128)               219648    
=================================================================
Total params: 36,398,148.0
Trainable params: 219,648.0
Non-trainable params: 36,178,500.0
_________________________________________________________________

In [43]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
q1 (InputLayer)                  (None, 40)            0                                            
____________________________________________________________________________________________________
q2 (InputLayer)                  (None, 40)            0                                            
____________________________________________________________________________________________________
sequential_9 (Sequential)        (None, 128)           36398148                                     
____________________________________________________________________________________________________
concatenate_6 (Concatenate)      (None, 256)           0                                            
____________________________________________________________________________________________________
dropout_12 (Dropout)             (None, 256)           0                                            
____________________________________________________________________________________________________
dense_12 (Dense)                 (None, 128)           32896                                        
____________________________________________________________________________________________________
dropout_13 (Dropout)             (None, 128)           0                                            
____________________________________________________________________________________________________
dense_13 (Dense)                 (None, 1)             129                                          
====================================================================================================
Total params: 36,431,173.0
Trainable params: 252,673.0
Non-trainable params: 36,178,500.0
____________________________________________________________________________________________________

In [44]:
optimizer = Adam(lr=1e-3)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1),
             ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True),
             TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)]

print('BATCH_SIZE:', BATCH_SIZE)
model.fit({'q1': train_q1_Double, 'q2': train_q2_Double}, y_train_Double, 
          batch_size=BATCH_SIZE, epochs=100, verbose=2, callbacks=callbacks, 
          validation_data=({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, val_sample_weights), 
          shuffle=True, class_weight=class_weight, initial_epoch=0)


BATCH_SIZE: 2048
Train on 770048 samples, validate on 38532 samples
Epoch 1/100
Epoch 00000: val_loss improved from inf to 0.36693, saving model to ./checkpoint/weights.000-0.3669.hdf5
120s - loss: 0.4086 - acc: 0.6682 - val_loss: 0.3669 - val_acc: 0.6945
Epoch 2/100
Epoch 00001: val_loss improved from 0.36693 to 0.34885, saving model to ./checkpoint/weights.001-0.3489.hdf5
110s - loss: 0.3662 - acc: 0.7073 - val_loss: 0.3489 - val_acc: 0.7372
Epoch 3/100
Epoch 00002: val_loss improved from 0.34885 to 0.33290, saving model to ./checkpoint/weights.002-0.3329.hdf5
110s - loss: 0.3488 - acc: 0.7225 - val_loss: 0.3329 - val_acc: 0.7380
Epoch 4/100
Epoch 00003: val_loss improved from 0.33290 to 0.32665, saving model to ./checkpoint/weights.003-0.3267.hdf5
110s - loss: 0.3375 - acc: 0.7317 - val_loss: 0.3267 - val_acc: 0.7467
Epoch 5/100
Epoch 00004: val_loss improved from 0.32665 to 0.31697, saving model to ./checkpoint/weights.004-0.3170.hdf5
110s - loss: 0.3288 - acc: 0.7386 - val_loss: 0.3170 - val_acc: 0.7451
Epoch 6/100
Epoch 00005: val_loss improved from 0.31697 to 0.31174, saving model to ./checkpoint/weights.005-0.3117.hdf5
110s - loss: 0.3216 - acc: 0.7454 - val_loss: 0.3117 - val_acc: 0.7542
Epoch 7/100
Epoch 00006: val_loss improved from 0.31174 to 0.30906, saving model to ./checkpoint/weights.006-0.3091.hdf5
110s - loss: 0.3156 - acc: 0.7510 - val_loss: 0.3091 - val_acc: 0.7563
Epoch 8/100
Epoch 00007: val_loss improved from 0.30906 to 0.30893, saving model to ./checkpoint/weights.007-0.3089.hdf5
110s - loss: 0.3098 - acc: 0.7560 - val_loss: 0.3089 - val_acc: 0.7658
Epoch 9/100
Epoch 00008: val_loss improved from 0.30893 to 0.30424, saving model to ./checkpoint/weights.008-0.3042.hdf5
110s - loss: 0.3056 - acc: 0.7597 - val_loss: 0.3042 - val_acc: 0.7729
Epoch 10/100
Epoch 00009: val_loss improved from 0.30424 to 0.29670, saving model to ./checkpoint/weights.009-0.2967.hdf5
110s - loss: 0.3015 - acc: 0.7636 - val_loss: 0.2967 - val_acc: 0.7691
Epoch 11/100
Epoch 00010: val_loss improved from 0.29670 to 0.29603, saving model to ./checkpoint/weights.010-0.2960.hdf5
110s - loss: 0.2974 - acc: 0.7669 - val_loss: 0.2960 - val_acc: 0.7715
Epoch 12/100
Epoch 00011: val_loss improved from 0.29603 to 0.29311, saving model to ./checkpoint/weights.011-0.2931.hdf5
110s - loss: 0.2945 - acc: 0.7699 - val_loss: 0.2931 - val_acc: 0.7709
Epoch 13/100
Epoch 00012: val_loss did not improve
110s - loss: 0.2912 - acc: 0.7730 - val_loss: 0.2949 - val_acc: 0.7748
Epoch 14/100
Epoch 00013: val_loss did not improve
110s - loss: 0.2884 - acc: 0.7754 - val_loss: 0.2975 - val_acc: 0.7789
Epoch 15/100
Epoch 00014: val_loss improved from 0.29311 to 0.29163, saving model to ./checkpoint/weights.014-0.2916.hdf5
110s - loss: 0.2855 - acc: 0.7782 - val_loss: 0.2916 - val_acc: 0.7777
Epoch 16/100
Epoch 00015: val_loss improved from 0.29163 to 0.28918, saving model to ./checkpoint/weights.015-0.2892.hdf5
110s - loss: 0.2835 - acc: 0.7803 - val_loss: 0.2892 - val_acc: 0.7813
Epoch 17/100
Epoch 00016: val_loss improved from 0.28918 to 0.28915, saving model to ./checkpoint/weights.016-0.2892.hdf5
110s - loss: 0.2815 - acc: 0.7816 - val_loss: 0.2892 - val_acc: 0.7837
Epoch 18/100
Epoch 00017: val_loss did not improve
110s - loss: 0.2791 - acc: 0.7835 - val_loss: 0.2913 - val_acc: 0.7830
Epoch 19/100
Epoch 00018: val_loss did not improve
110s - loss: 0.2769 - acc: 0.7864 - val_loss: 0.2894 - val_acc: 0.7864
Epoch 20/100
Epoch 00019: val_loss improved from 0.28915 to 0.28266, saving model to ./checkpoint/weights.019-0.2827.hdf5
110s - loss: 0.2753 - acc: 0.7877 - val_loss: 0.2827 - val_acc: 0.7795
Epoch 21/100
Epoch 00020: val_loss did not improve
110s - loss: 0.2735 - acc: 0.7897 - val_loss: 0.2848 - val_acc: 0.7843
Epoch 22/100
Epoch 00021: val_loss did not improve
110s - loss: 0.2719 - acc: 0.7912 - val_loss: 0.2865 - val_acc: 0.7894
Epoch 23/100
Epoch 00022: val_loss improved from 0.28266 to 0.28210, saving model to ./checkpoint/weights.022-0.2821.hdf5
110s - loss: 0.2706 - acc: 0.7930 - val_loss: 0.2821 - val_acc: 0.7872
Epoch 24/100
Epoch 00023: val_loss did not improve
110s - loss: 0.2688 - acc: 0.7943 - val_loss: 0.2877 - val_acc: 0.7927
Epoch 25/100
Epoch 00024: val_loss improved from 0.28210 to 0.28169, saving model to ./checkpoint/weights.024-0.2817.hdf5
110s - loss: 0.2674 - acc: 0.7953 - val_loss: 0.2817 - val_acc: 0.7895
Epoch 26/100
Epoch 00025: val_loss improved from 0.28169 to 0.27978, saving model to ./checkpoint/weights.025-0.2798.hdf5
110s - loss: 0.2660 - acc: 0.7969 - val_loss: 0.2798 - val_acc: 0.7826
Epoch 27/100
Epoch 00026: val_loss did not improve
110s - loss: 0.2647 - acc: 0.7979 - val_loss: 0.2865 - val_acc: 0.7964
Epoch 28/100
Epoch 00027: val_loss did not improve
110s - loss: 0.2632 - acc: 0.7987 - val_loss: 0.2822 - val_acc: 0.7901
Epoch 29/100
Epoch 00028: val_loss did not improve
110s - loss: 0.2626 - acc: 0.7999 - val_loss: 0.2842 - val_acc: 0.7968
Epoch 30/100
Epoch 00029: val_loss did not improve
110s - loss: 0.2610 - acc: 0.8016 - val_loss: 0.2873 - val_acc: 0.7985
Epoch 31/100
Epoch 00030: val_loss did not improve
110s - loss: 0.2603 - acc: 0.8019 - val_loss: 0.2828 - val_acc: 0.7964
Epoch 32/100
Epoch 00031: val_loss did not improve
110s - loss: 0.2583 - acc: 0.8037 - val_loss: 0.2824 - val_acc: 0.7944
Epoch 00031: early stopping
Out[44]:
<keras.callbacks.History at 0x7fe77d401cd0>
#resume training model, model_name = get_best_model() # model = load_model(CHECKPOINT_DIR + 'weights.025-0.4508.hdf5') # model_name = 'weights.025-0.4508.hdf5' # print('model_name', model_name) # #try increasing learningrate # optimizer = Adam(lr=1e-4) # model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) # callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1), # EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1), # ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True), # TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)] print('BATCH_SIZE:', BATCH_SIZE) model.fit({'q1': train_q1_Double, 'q2': train_q2_Double}, y_train_Double, batch_size=BATCH_SIZE, epochs=100, verbose=2, callbacks=callbacks, validation_data=({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, val_sample_weights), shuffle=True, class_weight=class_weight, initial_epoch=)

In [38]:
model = load_model(CHECKPOINT_DIR + 'weights.022-0.2778.hdf5')
model_name = 'weights.022-0.2778.hdf5'
print('model_name', model_name)
val_loss = model.evaluate({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, sample_weight=val_sample_weights, batch_size=8192, verbose=2)
val_loss


model_name weights.022-0.2778.hdf5
Out[38]:
[0.27775588646789434, 0.78547702802869745]

In [39]:
#Create submission
test_q1 = pad_sequences(tokenizer.texts_to_sequences(test_df['question1_WL']), maxlen = MAX_LEN)
test_q2 = pad_sequences(tokenizer.texts_to_sequences(test_df['question2_WL']), maxlen = MAX_LEN)
predictions = model.predict({'q1': test_q1, 'q2': test_q2}, batch_size=8192, verbose=2)
predictions += model.predict({'q1': test_q2, 'q2': test_q1}, batch_size=8192, verbose=2)
predictions /= 2

submission = pd.DataFrame(predictions, columns=['is_duplicate'])
submission.insert(0, 'test_id', test_df.test_id)
file_name = MODEL+'_'+model_name+'_LSTM{:d}*{:d}_DENSE{:d}*{:d}_valloss{:.4f}.csv' \
.format(RNNCELL_SIZE,RNNCELL_LAYERS,DENSE_SIZE,DENSE_LAYERS,val_loss[0])
submission.to_csv(OUTPUT_DIR+file_name, index=False)
print(file_name)


DataClean_weights.022-0.2778.hdf5_LSTM128*1_DENSE128*1_valloss0.2778.csv
sys.stdout = open(OUTPUT_DIR+'training_output.txt', 'a') history = model.fit({'q1': train_q1, 'q2': train_q2}, y_train, batch_size=BATCH_SIZE, epochs=3, verbose=2, callbacks=callbacks, validation_data=({'q1': valid_q1, 'q2': valid_q2}, y_valid), shuffle=True, initial_epoch=0) sys.stdout = sys.__stdout__
summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], 'train_acc': history.history['acc'], 'valid_acc': history.history['val_acc'], 'train_loss': history.history['loss'], 'valid_loss': history.history['val_loss']}) summary_stats plt.plot(summary_stats.train_loss) # blue plt.plot(summary_stats.valid_loss) # green plt.show()
units = 128 # Number of nodes in the Dense layers dropout = 0.25 # Percentage of nodes to drop nb_filter = 32 # Number of filters to use in Convolution1D filter_length = 3 # Length of filter for Convolution1D # Initialize weights and biases for the Dense layers weights = initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=2) bias = bias_initializer='zeros' model1 = Sequential() model1.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model1.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Flatten()) model2 = Sequential() model2.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model2.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Flatten()) model3 = Sequential() model3.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model3.add(TimeDistributed(Dense(EMBEDDING_DIM))) model3.add(BatchNormalization()) model3.add(Activation('relu')) model3.add(Dropout(dropout)) model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))) model4 = Sequential() model4.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model4.add(TimeDistributed(Dense(EMBEDDING_DIM))) model4.add(BatchNormalization()) model4.add(Activation('relu')) model4.add(Dropout(dropout)) model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))) modela = Sequential() modela.add(Merge([model1, model2], mode='concat')) modela.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modela.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modelb = Sequential() modelb.add(Merge([model3, model4], mode='concat')) modelb.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) modelb.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) model = Sequential() model.add(Merge([modela, modelb], mode='concat')) model.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('sigmoid'))