Use GoogleNews-vectors-negative300.bin LSTM(64) DENSE(64) weights.003-0.2828.hdf5 193s - loss: 0.2478 - acc: 0.8197 - val_loss: 0.2828 - val_acc: 0.7996 LSTM(64) DENSE(64) BATCH_SIZE 256 ==> 2048 weights.011-0.2908.hdf5 63s - loss: 0.2398 - acc: 0.8276 - val_loss: 0.2908 - val_acc: 0.8005 LSTM(64) DENSE(64) BATCH_SIZE 256 ==> 64 weights.002-0.2822.hdf5 759s - loss: 0.2390 - acc: 0.8283 - val_loss: 0.2822 - val_acc: 0.8134 LSTM(64) DENSE(64) BATCH_SIZE 256 ==> 32 weights.001-0.2812.hdf5 1541s - loss: 0.2622 - acc: 0.8065 - val_loss: 0.2812 - val_acc: 0.8008
import requests def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) file_id = '0B7XkCwpI5KDYNlNUTTlSS21pQmM' destination = '../data/GoogleNews-vectors-negative300.bin.gz' download_file_from_google_drive(file_id, destination)

In [34]:
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import datetime, time, json, os, math, pickle, sys
from string import punctuation
from __future__ import division
from __future__ import print_function
from gensim.models import KeyedVectors

from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model, load_model
from keras.layers import concatenate, Embedding, Dense, Input, Dropout, Bidirectional, LSTM, BatchNormalization, TimeDistributed
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import backend as K

In [42]:
DATA_DIR = '../data/'
MODEL = 'Baseline'
if os.getcwd().split('/')[-1] != MODEL:
    print('WRONG MODEL DIR!!!')
CHECKPOINT_DIR = './checkpoint/'
if not os.path.exists(CHECKPOINT_DIR):
    os.mkdir(CHECKPOINT_DIR)
LOG_DIR = './log/'
if not os.path.exists(LOG_DIR):
    os.mkdir(LOG_DIR)
OUTPUT_DIR = './output/'
if not os.path.exists(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)
    
MAX_LEN = 40
EMBEDDING_DIM = 300
BATCH_SIZE = 32
VALID_SPLIT = 0.05
RE_WEIGHT = True # whether to re-weight classes to fit the 17.5% share in test set
# VOCAB_SIZE = 10000


def get_best_model(checkpoint_dir = CHECKPOINT_DIR):
    files = glob.glob(checkpoint_dir+'*')
    val_losses = [float(f.split('-')[-1][:-5]) for f in files]
    index = val_losses.index(min(val_losses))
    print('Loading model from checkpoint file ' + files[index])
    model = load_model(files[index])
    model_name = files[index].split('/')[-1]
    print('Loading model Done!')
    return (model, model_name)

In [36]:
trainval_df = pd.read_csv(DATA_DIR+"train.csv")
test_df = pd.read_csv(DATA_DIR+"test.csv")
print(trainval_df.shape)
print(test_df.shape)


(404290, 6)
(2345796, 3)
# Check for any null values # inds = pd.isnull(trainval_df).any(1).nonzero()[0] # trainval_df.loc[inds] # inds = pd.isnull(test_df).any(1).nonzero()[0] # test_df.loc[inds] # # Add the string 'empty' to empty strings # trainval_df = trainval_df.fillna('empty') # test_df = test_df.fillna('empty')

In [4]:
# data cleaning
abbr_dict={
    "i'm":"i am",
    "'re":" are",
    "'s":" is",
    "'ve":" have",
    "'ll":" will",
    "n't":" not",
}

_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")

# stop_words = ['the','a','an','and','but','if','or','because','as','what','which','this','that','these','those','then',
#               'just','so','than','such','both','through','about','for','is','of','while','during','to','What','Which',
#               'Is','If','While','This']
# print('stop_words:', len(stop_words))

# # nltk.download("stopwords")
# stop_words = stopwords.words('english')
# print('stop_words:', len(stop_words))


def text_to_wordlist(text, abbr_dict=None, remove_stop_words=False, stem_words=False):
    
    if isinstance(text,float):
        # turn nan to empty string
        text = ""
    else:
#         Convert words to lower case and split them
#         text = text.lower()

#         # abbreviation replace
#         # Create a regular expression  from the dictionary keys
#         regex = re.compile("(%s)" % "|".join(map(re.escape, abbr_dict.keys())))
#         # For each match, look-up corresponding value in dictionary
#         text = regex.sub(lambda mo: abbr_dict[mo.string[mo.start():mo.end()]], text) 

        words = []
        for space_separated_fragment in text.strip().split():
            words.extend(_WORD_SPLIT.split(space_separated_fragment))
        text = [w for w in words if w]
        text = " ".join(text)

#         Remove punctuation from text
#         text = ''.join([c for c in text if c not in punctuation])

        # Optionally, remove stop words
        if remove_stop_words:
            text = text.split()
            text = [w for w in text if not w in stop_words]
            text = " ".join(text)

        # Optionally, shorten words to their stems
        if stem_words:
            text = text.split()
            stemmer = SnowballStemmer('english')
            stemmed_words = [stemmer.stem(word) for word in text]
            text = " ".join(stemmed_words)
        
    # Return a list of words
    return(text)
trainval_df['len1'] = trainval_df.apply(lambda row: len(row['question1_WL'].split()), axis=1) trainval_df['len2'] = trainval_df.apply(lambda row: len(row['question2_WL'].split()), axis=1) test_df['len1'] = test_df.apply(lambda row: len(row['question1_WL'].split()), axis=1) test_df['len2'] = test_df.apply(lambda row: len(row['question2_WL'].split()), axis=1) lengths = pd.concat([trainval_df['len1'],trainval_df['len2']], axis=0) print(lengths.describe()) print(np.percentile(lengths, 99.0)) print(np.percentile(lengths, 99.4)) print(np.percentile(lengths, 99.5)) print(np.percentile(lengths, 99.9))

In [39]:
# question to word list by data cleaning

file_name = 'trainval_df.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    trainval_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    trainval_df['question1_WL'] = trainval_df.apply(lambda row: text_to_wordlist(row['question1']), axis=1)
    trainval_df['question2_WL'] = trainval_df.apply(lambda row: text_to_wordlist(row['question2']), axis=1)
    trainval_df.to_pickle(OUTPUT_DIR+file_name)      

file_name = 'test_df.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    test_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    test_df['question1_WL'] = test_df.apply(lambda row: text_to_wordlist(row['question1']), axis=1)
    test_df['question2_WL'] = test_df.apply(lambda row: text_to_wordlist(row['question2']), axis=1)
    test_df.to_pickle(OUTPUT_DIR+file_name)   
    
test_size = trainval_df.shape[0]-int(math.ceil(trainval_df.shape[0]*(1-VALID_SPLIT)/1024)*1024)
train_df, valid_df = train_test_split(trainval_df, test_size=test_size, random_state=1986, stratify=trainval_df['is_duplicate'])


Loading from file trainval_df.pickle
Loading from file test_df.pickle

In [40]:
# tokenize and pad

all_questions = pd.concat([trainval_df['question1_WL'],trainval_df['question2_WL'],test_df['question1_WL'],test_df['question2_WL']], axis=0)
tokenizer = Tokenizer(num_words=None, lower=True)
tokenizer.fit_on_texts(all_questions)
word_index = tokenizer.word_index
nb_words = len(word_index)
print("Words in index: %d" % nb_words) #126355

train_q1 = pad_sequences(tokenizer.texts_to_sequences(train_df['question1_WL']), maxlen = MAX_LEN)
train_q2 = pad_sequences(tokenizer.texts_to_sequences(train_df['question2_WL']), maxlen = MAX_LEN)
valid_q1 = pad_sequences(tokenizer.texts_to_sequences(valid_df['question1_WL']), maxlen = MAX_LEN)
valid_q2 = pad_sequences(tokenizer.texts_to_sequences(valid_df['question2_WL']), maxlen = MAX_LEN)
y_train = train_df.is_duplicate.values
y_valid = valid_df.is_duplicate.values

train_q1_Double = np.vstack((train_q1, train_q2))
train_q2_Double = np.vstack((train_q2, train_q1))
valid_q1_Double = np.vstack((valid_q1, valid_q2))
valid_q2_Double = np.vstack((valid_q2, valid_q1))
y_train_Double = np.hstack((y_train, y_train))
y_valid_Double = np.hstack((y_valid, y_valid))

val_sample_weights = np.ones(len(y_valid_Double))
if RE_WEIGHT:
    class_weight = {0: 1.309028344, 1: 0.472001959}
    val_sample_weights *= 0.472001959
    val_sample_weights[y_valid_Double==0] = 1.309028344
else:
    class_weight = None
    val_sample_weights = None


Words in index: 126355

In [7]:
# load word_embedding_matrix

W2V = 'GoogleNews-vectors-negative300.bin'
file_name = W2V + '.word_embedding_matrix.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    with open(OUTPUT_DIR+file_name, 'rb') as f:
        word_embedding_matrix = pickle.load(f)
else:
    print ('Generating file '+file_name)   
    word2vec = KeyedVectors.load_word2vec_format(DATA_DIR+'/WordEmbedding/'+W2V, binary=True)
    print('Word embeddings:', len(word2vec.vocab)) #3,000,000

    nb_words = len(word_index)
    null_embedding_words = []
    word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
    for word, i in word_index.items():
        if word in word2vec.vocab:
            word_embedding_matrix[i] = word2vec.word_vec(word)
        else:
            null_embedding_words.append(word)
    print('Null word embeddings: %d' %len(null_embedding_words)) #67,786

    with open(OUTPUT_DIR+file_name, 'wb') as f:
        pickle.dump(word_embedding_matrix, f)


Loading from file GoogleNews-vectors-negative300.bin.word_embedding_matrix.pickle
word_counts = tokenizer.word_counts null_embedding_word_counts = { word: word_counts[word] for word in null_embedding_words } print(sum(null_embedding_word_counts.values())) #454210 word_docs = tokenizer.word_docs null_embedding_word_docs = { word: word_docs[word] for word in null_embedding_words } print(sum(null_embedding_word_docs.values())) #446584 # 446584/(404290+2345796)/2 = 0.08119

In [29]:
EMBEDDING_TRAINABLE = False
RNNCELL_SIZE = 64
RNNCELL_LAYERS = 1
RNNCELL_DROPOUT = 0
RNNCELL_RECURRENT_DROPOUT = 0
RNNCELL_BIDIRECT = False
DENSE_SIZE = 64
DENSE_LAYERS = 1
DENSE_DROPOUT = 0

In [43]:
encode_model = Sequential()
encode_model.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length=MAX_LEN, trainable=EMBEDDING_TRAINABLE))
if RNNCELL_BIDIRECT:
    for i in range(RNNCELL_LAYERS-1):
        encode_model.add(Bidirectional(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                                            unroll=True, implementation=2, return_sequences=True)))
    encode_model.add(Bidirectional(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                                        unroll=True, implementation=2)))
else:
    for i in range(RNNCELL_LAYERS-1):
        encode_model.add(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                              unroll=True, implementation=2, return_sequences=True))
    encode_model.add(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, 
                          unroll=True, implementation=2))

sequence1_input = Input(shape=(MAX_LEN,), name='q1')
sequence2_input = Input(shape=(MAX_LEN,), name='q2')
encoded_1 = encode_model(sequence1_input)
encoded_2 = encode_model(sequence2_input)
merged = concatenate([encoded_1, encoded_2], axis=-1)
merged = Dropout(DENSE_DROPOUT)(merged)
# merged = BatchNormalization()(merged)
for i in range(DENSE_LAYERS):
    merged = Dense(DENSE_SIZE, activation='relu', kernel_initializer='he_normal')(merged)
    merged = Dropout(DENSE_DROPOUT)(merged)
predictions = Dense(1, activation='sigmoid')(merged)
model = Model(inputs=[sequence1_input, sequence2_input], outputs=predictions)

In [44]:
encode_model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_4 (Embedding)      (None, 40, 300)           37906800  
_________________________________________________________________
lstm_4 (LSTM)                (None, 64)                93440     
=================================================================
Total params: 38,000,240.0
Trainable params: 93,440.0
Non-trainable params: 37,906,800.0
_________________________________________________________________

In [ ]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
q1 (InputLayer)                  (None, 40)            0                                            
____________________________________________________________________________________________________
q2 (InputLayer)                  (None, 40)            0                                            
____________________________________________________________________________________________________
sequential_8 (Sequential)        (None, 64)            38000240                                     
____________________________________________________________________________________________________
concatenate_4 (Concatenate)      (None, 128)           0                                            
____________________________________________________________________________________________________
dropout_7 (Dropout)              (None, 128)           0                                            
____________________________________________________________________________________________________
dense_7 (Dense)                  (None, 64)            8256                                         
____________________________________________________________________________________________________
dropout_8 (Dropout)              (None, 64)            0                                            
____________________________________________________________________________________________________
dense_8 (Dense)                  (None, 1)             65                                           
====================================================================================================
Total params: 38,008,561.0
Trainable params: 101,761.0
Non-trainable params: 37,906,800.0
____________________________________________________________________________________________________

In [ ]:
optimizer = Adam(lr=1e-3)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1),
             EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1),
             ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True),
             TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)]

print('BATCH_SIZE:', BATCH_SIZE)
model.fit({'q1': train_q1_Double, 'q2': train_q2_Double}, y_train_Double, 
          batch_size=BATCH_SIZE, epochs=100, verbose=2, callbacks=callbacks, 
          validation_data=({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, val_sample_weights), 
          shuffle=True, class_weight=class_weight, initial_epoch=0)


BATCH_SIZE: 32
Train on 770048 samples, validate on 38532 samples
Epoch 1/100
Epoch 00000: val_loss improved from inf to 0.29426, saving model to ./checkpoint/weights.000-0.2943.hdf5
1542s - loss: 0.3256 - acc: 0.7399 - val_loss: 0.2943 - val_acc: 0.7728
Epoch 2/100
Epoch 00001: val_loss improved from 0.29426 to 0.28122, saving model to ./checkpoint/weights.001-0.2812.hdf5
1541s - loss: 0.2622 - acc: 0.8065 - val_loss: 0.2812 - val_acc: 0.8008
Epoch 3/100
Epoch 00002: val_loss did not improve
1544s - loss: 0.2295 - acc: 0.8377 - val_loss: 0.2914 - val_acc: 0.8147
Epoch 4/100
#resume training model, model_name = get_best_model() # model = load_model(CHECKPOINT_DIR + 'weights.025-0.4508.hdf5') # model_name = 'weights.025-0.4508.hdf5' # print('model_name', model_name) # #try increasing learningrate # optimizer = Adam(lr=1e-4) # model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) # callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1), # EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1), # ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True), # TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)] print('BATCH_SIZE:', BATCH_SIZE) model.fit({'q1': train_q1_Double, 'q2': train_q2_Double}, y_train_Double, batch_size=BATCH_SIZE, epochs=100, verbose=2, callbacks=callbacks, validation_data=({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, val_sample_weights), shuffle=True, class_weight=class_weight, initial_epoch=)

In [37]:
model = load_model(CHECKPOINT_DIR + 'weights.002-0.2822.hdf5')
model_name = 'weights.002-0.2822.hdf5'
print('model_name', model_name)
val_loss = model.evaluate({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, sample_weight=val_sample_weights, batch_size=8192, verbose=2)
val_loss


model_name weights.002-0.2822.hdf5
Out[37]:
[0.28221574697640039, 0.81337590717791675]

In [41]:
#Create submission
test_q1 = pad_sequences(tokenizer.texts_to_sequences(test_df['question1_WL']), maxlen = MAX_LEN)
test_q2 = pad_sequences(tokenizer.texts_to_sequences(test_df['question2_WL']), maxlen = MAX_LEN)
predictions = model.predict({'q1': test_q1, 'q2': test_q2}, batch_size=8192, verbose=2)
predictions += model.predict({'q1': test_q2, 'q2': test_q1}, batch_size=8192, verbose=2)
predictions /= 2

submission = pd.DataFrame(predictions, columns=['is_duplicate'])
submission.insert(0, 'test_id', test_df.test_id)
file_name = MODEL+'_'+model_name+'_LSTM{:d}*{:d}_DENSE{:d}*{:d}_valloss{:.4f}.csv' \
.format(RNNCELL_SIZE,RNNCELL_LAYERS,DENSE_SIZE,DENSE_LAYERS,val_loss[0])
submission.to_csv(OUTPUT_DIR+file_name, index=False)
print(file_name)


Baseline_weights.002-0.2822.hdf5_LSTM64*1_DENSE64*1_valloss0.2822.csv
sys.stdout = open(OUTPUT_DIR+'training_output.txt', 'a') history = model.fit({'q1': train_q1, 'q2': train_q2}, y_train, batch_size=BATCH_SIZE, epochs=3, verbose=2, callbacks=callbacks, validation_data=({'q1': valid_q1, 'q2': valid_q2}, y_valid), shuffle=True, initial_epoch=0) sys.stdout = sys.__stdout__
summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], 'train_acc': history.history['acc'], 'valid_acc': history.history['val_acc'], 'train_loss': history.history['loss'], 'valid_loss': history.history['val_loss']}) summary_stats plt.plot(summary_stats.train_loss) # blue plt.plot(summary_stats.valid_loss) # green plt.show()
units = 128 # Number of nodes in the Dense layers dropout = 0.25 # Percentage of nodes to drop nb_filter = 32 # Number of filters to use in Convolution1D filter_length = 3 # Length of filter for Convolution1D # Initialize weights and biases for the Dense layers weights = initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=2) bias = bias_initializer='zeros' model1 = Sequential() model1.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model1.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Flatten()) model2 = Sequential() model2.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model2.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Flatten()) model3 = Sequential() model3.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model3.add(TimeDistributed(Dense(EMBEDDING_DIM))) model3.add(BatchNormalization()) model3.add(Activation('relu')) model3.add(Dropout(dropout)) model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))) model4 = Sequential() model4.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False)) model4.add(TimeDistributed(Dense(EMBEDDING_DIM))) model4.add(BatchNormalization()) model4.add(Activation('relu')) model4.add(Dropout(dropout)) model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))) modela = Sequential() modela.add(Merge([model1, model2], mode='concat')) modela.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modela.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modelb = Sequential() modelb.add(Merge([model3, model4], mode='concat')) modelb.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) modelb.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) model = Sequential() model.add(Merge([modela, modelb], mode='concat')) model.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(units, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1, kernel_initializer=weights, bias_initializer=bias)) model.add(BatchNormalization()) model.add(Activation('sigmoid'))