Feature: Out-Of-Fold Predictions from a Siamese LSTM with Attention

Imports

This utility package imports numpy, pandas, matplotlib and a helper kg module into the root namespace.


In [1]:
from pygoose import *

In [2]:
import gc

In [3]:
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import *

In [4]:
kg.gpu.cuda_use_gpus(gpu_ids=0)

In [5]:
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import *
from keras.callbacks import EarlyStopping, ModelCheckpoint


Using TensorFlow backend.

Config

Automatically discover the paths to various data folders and compose the project structure.


In [6]:
project = kg.Project.discover()

Identifier for storing these features on disk and referring to them later.


In [7]:
feature_list_id = 'oofp_nn_siamese_lstm_attention'

Make subsequent NN runs reproducible.


In [8]:
RANDOM_SEED = 42

In [9]:
np.random.seed(RANDOM_SEED)

Read data

Word embedding lookup matrix.


In [10]:
embedding_matrix = kg.io.load(project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle')

Padded sequences of word indices for every question.


In [11]:
X_train_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle')
X_train_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle')

In [12]:
X_test_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle')
X_test_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle')

In [13]:
y_train = kg.io.load(project.features_dir + 'y_train.pickle')

Word embedding properties.


In [14]:
EMBEDDING_DIM = embedding_matrix.shape[-1]
VOCAB_LENGTH = embedding_matrix.shape[0]
MAX_SEQUENCE_LENGTH = X_train_q1.shape[-1]

In [15]:
print(EMBEDDING_DIM, VOCAB_LENGTH, MAX_SEQUENCE_LENGTH)


300 101442 30

Define models


In [16]:
def contrastive_loss(y_true, y_pred):
    """
    Contrastive loss from Hadsell-et-al.'06
    http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    """    
    margin = 1
    return K.mean((1 - y_true) * K.square(y_pred) +
                   y_true * K.square(K.maximum(margin - y_pred, 0)))

In [17]:
class AttentionWithContext(Layer):
    """
    Attention operation, with a context/query vector, for temporal data.
    Supports Masking.
    
    Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
    "Hierarchical Attention Networks for Document Classification" by using a context
    vector to assist the attention.
    
    # Input shape
        3D tensor with shape: `(samples, steps, features)`.
    # Output shape
        2D tensor with shape: `(samples, features)`.

    Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
    
    The dimensions are inferred based on the output shape of the RNN.
    Example:
        model.add(LSTM(64, return_sequences=True))
        model.add(AttentionWithContext())
    """

    def __init__(self, init='glorot_uniform',
                 kernel_regularizer=None, bias_regularizer=None,
                 kernel_constraint=None, bias_constraint=None,  **kwargs):
        
        self.supports_masking = True
        self.init = initializers.get(init)
        self.kernel_initializer = initializers.get('glorot_uniform')

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionWithContext, self).__init__(**kwargs)

    def build(self, input_shape):
        self.kernel = self.add_weight(
            (input_shape[-1], 1),
            initializer=self.kernel_initializer,
            name='{}_W'.format(self.name),
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint
        )
        self.b = self.add_weight(
            (input_shape[1],),
            initializer='zero',
            name='{}_b'.format(self.name),
            regularizer=self.bias_regularizer,
            constraint=self.bias_constraint
        )
        self.u = self.add_weight(
            (input_shape[1],),
            initializer=self.kernel_initializer,
            name='{}_u'.format(self.name),
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint
        )
        self.built = True

    def compute_mask(self, input, mask):
        return None

    def call(self, x, mask=None):
        multdata = K.dot(x, self.kernel)     # (x, 40, 300) * (300, 1) => (x, 40, 1)
        multdata = K.squeeze(multdata, -1)   # (x, 40)
        multdata = multdata + self.b         # (x, 40) + (40,)

        multdata = K.tanh(multdata)          # (x, 40)

        multdata = multdata * self.u         # (x, 40) * (40, 1) => (x, 1)
        multdata = K.exp(multdata)           # (x, 1)

        # Apply mask after the exp. will be re-normalized next.
        if mask is not None:
            mask = K.cast(mask, K.floatx())  # (x, 40)
            multdata = mask * multdata       # (x, 40) * (x, 40, )

        # In some cases, especially in the early stages of training, the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        multdata /= K.cast(K.sum(multdata, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        multdata = K.expand_dims(multdata)
        weighted_input = x * multdata
        return K.sum(weighted_input, axis=1)

    def compute_output_shape(self, input_shape):
        return (input_shape[0], input_shape[-1],)

In [18]:
def create_model(params):
    embedding_layer = Embedding(
        VOCAB_LENGTH,
        EMBEDDING_DIM,
        weights=[embedding_matrix],
        input_length=MAX_SEQUENCE_LENGTH,
        trainable=False,
    )
    lstm_layer = LSTM(
        params['num_lstm'],
        dropout=params['lstm_dropout_rate'],
        recurrent_dropout=params['lstm_dropout_rate'],
        return_sequences=True,
    )
    attention_layer = AttentionWithContext()

    sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_1 = embedding_layer(sequence_1_input)
    x1 = attention_layer(lstm_layer(embedded_sequences_1))

    sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_2 = embedding_layer(sequence_2_input)
    y1 = attention_layer(lstm_layer(embedded_sequences_2))

    merged = concatenate([x1, y1])
    merged = Dropout(params['dense_dropout_rate'])(merged)
    merged = BatchNormalization()(merged)

    merged = Dense(params['num_dense'], activation='relu')(merged)
    merged = Dropout(params['dense_dropout_rate'])(merged)
    merged = BatchNormalization()(merged)

    output = Dense(1, activation='sigmoid')(merged)

    model = Model(
        inputs=[sequence_1_input, sequence_2_input],
        outputs=output
    )

    model.compile(
        loss=contrastive_loss,
        optimizer='nadam',
        metrics=['accuracy']
    )

    return model

In [19]:
def predict(model, X_q1, X_q2):
    """
    Mirror the pairs, compute two separate predictions, and average them.
    """
    
    y1 = model.predict([X_q1, X_q2], batch_size=1024, verbose=1).reshape(-1)   
    y2 = model.predict([X_q2, X_q1], batch_size=1024, verbose=1).reshape(-1)    
    return (y1 + y2) / 2

Partition the data


In [20]:
NUM_FOLDS = 5

In [21]:
kfold = StratifiedKFold(
    n_splits=NUM_FOLDS,
    shuffle=True,
    random_state=RANDOM_SEED
)

Create placeholders for out-of-fold predictions.


In [22]:
y_train_oofp = np.zeros_like(y_train, dtype='float64')

In [23]:
y_test_oofp = np.zeros((len(X_test_q1), NUM_FOLDS))

Define hyperparameters


In [24]:
BATCH_SIZE = 2048

In [25]:
MAX_EPOCHS = 200

Best values picked by Bayesian optimization.


In [26]:
model_params = {
    'dense_dropout_rate': 0.164,
    'lstm_dropout_rate': 0.324,
    'num_dense': 132,
    'num_lstm': 254,
}

The path where the best weights of the current model will be saved.


In [27]:
model_checkpoint_path = project.temp_dir + 'fold-checkpoint-' + feature_list_id + '.h5'

Fit the folds and compute out-of-fold predictions


In [28]:
%%time

# Iterate through folds.
for fold_num, (ix_train, ix_val) in enumerate(kfold.split(X_train_q1, y_train)):
    
    # Augment the training set by mirroring the pairs.
    X_fold_train_q1 = np.vstack([X_train_q1[ix_train], X_train_q2[ix_train]])
    X_fold_train_q2 = np.vstack([X_train_q2[ix_train], X_train_q1[ix_train]])

    X_fold_val_q1 = np.vstack([X_train_q1[ix_val], X_train_q2[ix_val]])
    X_fold_val_q2 = np.vstack([X_train_q2[ix_val], X_train_q1[ix_val]])

    # Ground truth should also be "mirrored".
    y_fold_train = np.concatenate([y_train[ix_train], y_train[ix_train]])
    y_fold_val = np.concatenate([y_train[ix_val], y_train[ix_val]])
    
    print()
    print(f'Fitting fold {fold_num + 1} of {kfold.n_splits}')
    print()
    
    # Compile a new model.
    model = create_model(model_params)

    # Train.
    model.fit(
        [X_fold_train_q1, X_fold_train_q2], y_fold_train,
        validation_data=([X_fold_val_q1, X_fold_val_q2], y_fold_val),

        batch_size=BATCH_SIZE,
        epochs=MAX_EPOCHS,
        verbose=1,
        
        callbacks=[
            # Stop training when the validation loss stops improving.
            EarlyStopping(
                monitor='val_loss',
                min_delta=0.001,
                patience=3,
                verbose=1,
                mode='auto',
            ),
            # Save the weights of the best epoch.
            ModelCheckpoint(
                model_checkpoint_path,
                monitor='val_loss',
                save_best_only=True,
                verbose=2,
            ),
        ],
    )
        
    # Restore the best epoch.
    model.load_weights(model_checkpoint_path)
    
    # Compute out-of-fold predictions.
    y_train_oofp[ix_val] = predict(model, X_train_q1[ix_val], X_train_q2[ix_val])
    y_test_oofp[:, fold_num] = predict(model, X_test_q1, X_test_q2)
    
    # Clear GPU memory.
    K.clear_session()
    del X_fold_train_q1
    del X_fold_train_q2
    del X_fold_val_q1
    del X_fold_val_q2
    del model
    gc.collect()


Fitting fold 1 of 5

Train on 646862 samples, validate on 161718 samples
Epoch 1/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1843 - acc: 0.7180Epoch 00000: val_loss improved from inf to 0.21473, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 79s - loss: 0.1842 - acc: 0.7181 - val_loss: 0.2147 - val_acc: 0.6840
Epoch 2/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1550 - acc: 0.7703Epoch 00001: val_loss improved from 0.21473 to 0.15562, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 77s - loss: 0.1550 - acc: 0.7703 - val_loss: 0.1556 - val_acc: 0.7790
Epoch 3/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1422 - acc: 0.7921Epoch 00002: val_loss improved from 0.15562 to 0.13599, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 78s - loss: 0.1422 - acc: 0.7920 - val_loss: 0.1360 - val_acc: 0.8013
Epoch 4/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1334 - acc: 0.8069Epoch 00003: val_loss improved from 0.13599 to 0.13269, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 78s - loss: 0.1335 - acc: 0.8069 - val_loss: 0.1327 - val_acc: 0.8066
Epoch 5/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1269 - acc: 0.8174Epoch 00004: val_loss improved from 0.13269 to 0.12810, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 77s - loss: 0.1268 - acc: 0.8174 - val_loss: 0.1281 - val_acc: 0.8144
Epoch 6/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1214 - acc: 0.8266Epoch 00005: val_loss improved from 0.12810 to 0.11976, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 78s - loss: 0.1214 - acc: 0.8265 - val_loss: 0.1198 - val_acc: 0.8286
Epoch 7/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1169 - acc: 0.8334Epoch 00006: val_loss improved from 0.11976 to 0.11909, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 78s - loss: 0.1169 - acc: 0.8334 - val_loss: 0.1191 - val_acc: 0.8290
Epoch 8/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1132 - acc: 0.8397Epoch 00007: val_loss did not improve
646862/646862 [==============================] - 77s - loss: 0.1132 - acc: 0.8397 - val_loss: 0.1232 - val_acc: 0.8225
Epoch 9/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1097 - acc: 0.8450Epoch 00008: val_loss improved from 0.11909 to 0.11520, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 78s - loss: 0.1097 - acc: 0.8451 - val_loss: 0.1152 - val_acc: 0.8362
Epoch 10/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1069 - acc: 0.8500Epoch 00009: val_loss did not improve
646862/646862 [==============================] - 78s - loss: 0.1069 - acc: 0.8500 - val_loss: 0.1170 - val_acc: 0.8345
Epoch 11/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1046 - acc: 0.8535Epoch 00010: val_loss did not improve
646862/646862 [==============================] - 77s - loss: 0.1046 - acc: 0.8535 - val_loss: 0.1168 - val_acc: 0.8329
Epoch 12/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1021 - acc: 0.8574Epoch 00011: val_loss improved from 0.11520 to 0.11395, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 77s - loss: 0.1021 - acc: 0.8574 - val_loss: 0.1140 - val_acc: 0.8392
Epoch 13/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1002 - acc: 0.8601Epoch 00012: val_loss improved from 0.11395 to 0.11264, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 78s - loss: 0.1002 - acc: 0.8601 - val_loss: 0.1126 - val_acc: 0.8415
Epoch 14/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0985 - acc: 0.8630Epoch 00013: val_loss did not improve
646862/646862 [==============================] - 78s - loss: 0.0985 - acc: 0.8631 - val_loss: 0.1137 - val_acc: 0.8396
Epoch 15/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0968 - acc: 0.8655Epoch 00014: val_loss improved from 0.11264 to 0.11197, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 79s - loss: 0.0968 - acc: 0.8655 - val_loss: 0.1120 - val_acc: 0.8426
Epoch 16/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0954 - acc: 0.8678Epoch 00015: val_loss did not improve
646862/646862 [==============================] - 79s - loss: 0.0954 - acc: 0.8678 - val_loss: 0.1137 - val_acc: 0.8393
Epoch 17/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0941 - acc: 0.8696Epoch 00016: val_loss did not improve
646862/646862 [==============================] - 79s - loss: 0.0941 - acc: 0.8696 - val_loss: 0.1126 - val_acc: 0.8423
Epoch 00016: early stopping
2344960/2345796 [============================>.] - ETA: 0s
Fitting fold 2 of 5

Train on 646862 samples, validate on 161718 samples
Epoch 1/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1846 - acc: 0.7179Epoch 00000: val_loss improved from inf to 0.20657, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 80s - loss: 0.1845 - acc: 0.7180 - val_loss: 0.2066 - val_acc: 0.6524
Epoch 2/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1547 - acc: 0.7701Epoch 00001: val_loss improved from 0.20657 to 0.14955, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 80s - loss: 0.1546 - acc: 0.7701 - val_loss: 0.1496 - val_acc: 0.7899
Epoch 3/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1417 - acc: 0.7918Epoch 00002: val_loss improved from 0.14955 to 0.13209, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 80s - loss: 0.1417 - acc: 0.7918 - val_loss: 0.1321 - val_acc: 0.8093
Epoch 4/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1331 - acc: 0.8071Epoch 00003: val_loss did not improve
646862/646862 [==============================] - 74s - loss: 0.1331 - acc: 0.8071 - val_loss: 0.1323 - val_acc: 0.8092
Epoch 5/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1270 - acc: 0.8171Epoch 00004: val_loss improved from 0.13209 to 0.12600, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 71s - loss: 0.1270 - acc: 0.8171 - val_loss: 0.1260 - val_acc: 0.8192
Epoch 6/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1216 - acc: 0.8258Epoch 00005: val_loss improved from 0.12600 to 0.12143, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 73s - loss: 0.1216 - acc: 0.8258 - val_loss: 0.1214 - val_acc: 0.8262
Epoch 7/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1173 - acc: 0.8333Epoch 00006: val_loss did not improve
646862/646862 [==============================] - 71s - loss: 0.1173 - acc: 0.8333 - val_loss: 0.1222 - val_acc: 0.8253
Epoch 8/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1130 - acc: 0.8398- ETA: 1s - loss: 0.1130 - acc: Epoch 00007: val_loss improved from 0.12143 to 0.11808, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 71s - loss: 0.1130 - acc: 0.8399 - val_loss: 0.1181 - val_acc: 0.8325
Epoch 9/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1099 - acc: 0.8451Epoch 00008: val_loss improved from 0.11808 to 0.11705, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 71s - loss: 0.1099 - acc: 0.8451 - val_loss: 0.1170 - val_acc: 0.8340
Epoch 10/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1072 - acc: 0.8496Epoch 00009: val_loss did not improve
646862/646862 [==============================] - 72s - loss: 0.1072 - acc: 0.8495 - val_loss: 0.1178 - val_acc: 0.8333
Epoch 11/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1044 - acc: 0.8538Epoch 00010: val_loss did not improve
646862/646862 [==============================] - 77s - loss: 0.1044 - acc: 0.8538 - val_loss: 0.1172 - val_acc: 0.8350
Epoch 12/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1019 - acc: 0.8581Epoch 00011: val_loss improved from 0.11705 to 0.11340, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 71s - loss: 0.1019 - acc: 0.8581 - val_loss: 0.1134 - val_acc: 0.8402
Epoch 13/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.1002 - acc: 0.8601- ETA: 1s - loss: 0.1002 - accEpoch 00012: val_loss improved from 0.11340 to 0.11280, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 74s - loss: 0.1002 - acc: 0.8602 - val_loss: 0.1128 - val_acc: 0.8414
Epoch 14/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0982 - acc: 0.8637- ETA: 5s -Epoch 00013: val_loss did not improve
646862/646862 [==============================] - 72s - loss: 0.0982 - acc: 0.8636 - val_loss: 0.1153 - val_acc: 0.8390
Epoch 15/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0963 - acc: 0.8665Epoch 00014: val_loss did not improve
646862/646862 [==============================] - 71s - loss: 0.0963 - acc: 0.8665 - val_loss: 0.1150 - val_acc: 0.8390
Epoch 16/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0951 - acc: 0.8687Epoch 00015: val_loss improved from 0.11280 to 0.11239, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 73s - loss: 0.0951 - acc: 0.8687 - val_loss: 0.1124 - val_acc: 0.8433
Epoch 17/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0942 - acc: 0.8698- ETAEpoch 00016: val_loss improved from 0.11239 to 0.11196, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 71s - loss: 0.0942 - acc: 0.8697 - val_loss: 0.1120 - val_acc: 0.8433
Epoch 18/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0925 - acc: 0.8724Epoch 00017: val_loss did not improve
646862/646862 [==============================] - 72s - loss: 0.0925 - acc: 0.8725 - val_loss: 0.1122 - val_acc: 0.8439
Epoch 19/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0915 - acc: 0.8737Epoch 00018: val_loss improved from 0.11196 to 0.11112, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 73s - loss: 0.0915 - acc: 0.8736 - val_loss: 0.1111 - val_acc: 0.8454
Epoch 20/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0906 - acc: 0.875 - ETA: 0s - loss: 0.0906 - acc: 0.8754Epoch 00019: val_loss improved from 0.11112 to 0.10997, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 71s - loss: 0.0906 - acc: 0.8754 - val_loss: 0.1100 - val_acc: 0.8475
Epoch 21/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0892 - acc: 0.8773  E -Epoch 00020: val_loss improved from 0.10997 to 0.10888, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646862/646862 [==============================] - 72s - loss: 0.0892 - acc: 0.8773 - val_loss: 0.1089 - val_acc: 0.8493
Epoch 22/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0887 - acc: 0.8778Epoch 00021: val_loss did not improve
646862/646862 [==============================] - 73s - loss: 0.0887 - acc: 0.8778 - val_loss: 0.1096 - val_acc: 0.8474
Epoch 23/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0878 - acc: 0.8793Epoch 00022: val_loss did not improve
646862/646862 [==============================] - 72s - loss: 0.0878 - acc: 0.8793 - val_loss: 0.1107 - val_acc: 0.8466
Epoch 24/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0868 - acc: 0.8808Epoch 00023: val_loss did not improve
646862/646862 [==============================] - 71s - loss: 0.0868 - acc: 0.8808 - val_loss: 0.1113 - val_acc: 0.8461
Epoch 25/200
645120/646862 [============================>.] - ETA: 0s - loss: 0.0863 - acc: 0.8818Epoch 00024: val_loss did not improve
646862/646862 [==============================] - 71s - loss: 0.0863 - acc: 0.8818 - val_loss: 0.1101 - val_acc: 0.8475
Epoch 00024: early stopping
80859/80859 [==============================] - 3s     
80859/80859 [==============================] - 3s     
2345796/2345796 [==============================] - 106s   
2345796/2345796 [==============================] - 112s   

Fitting fold 3 of 5

Train on 646864 samples, validate on 161716 samples
Epoch 1/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1856 - acc: 0.7159Epoch 00000: val_loss improved from inf to 0.20636, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 76s - loss: 0.1856 - acc: 0.7160 - val_loss: 0.2064 - val_acc: 0.6640
Epoch 2/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1556 - acc: 0.7685Epoch 00001: val_loss improved from 0.20636 to 0.16753, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 80s - loss: 0.1555 - acc: 0.7686 - val_loss: 0.1675 - val_acc: 0.7489
Epoch 3/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1423 - acc: 0.7911Epoch 00002: val_loss improved from 0.16753 to 0.13742, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 76s - loss: 0.1423 - acc: 0.7911 - val_loss: 0.1374 - val_acc: 0.8001
Epoch 4/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1332 - acc: 0.8070Epoch 00003: val_loss improved from 0.13742 to 0.13103, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 72s - loss: 0.1331 - acc: 0.8070 - val_loss: 0.1310 - val_acc: 0.8093
Epoch 5/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1265 - acc: 0.8181Epoch 00004: val_loss improved from 0.13103 to 0.12388, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 72s - loss: 0.1265 - acc: 0.8181 - val_loss: 0.1239 - val_acc: 0.8209
Epoch 6/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1211 - acc: 0.8266Epoch 00005: val_loss improved from 0.12388 to 0.12240, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 71s - loss: 0.1211 - acc: 0.8267 - val_loss: 0.1224 - val_acc: 0.8248
Epoch 7/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1167 - acc: 0.8340Epoch 00006: val_loss did not improve
646864/646864 [==============================] - 72s - loss: 0.1167 - acc: 0.8340 - val_loss: 0.1245 - val_acc: 0.8199
Epoch 8/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1128 - acc: 0.8403Epoch 00007: val_loss improved from 0.12240 to 0.11539, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 79s - loss: 0.1128 - acc: 0.8402 - val_loss: 0.1154 - val_acc: 0.8355
Epoch 9/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1092 - acc: 0.8460Epoch 00008: val_loss did not improve
646864/646864 [==============================] - 72s - loss: 0.1092 - acc: 0.8460 - val_loss: 0.1162 - val_acc: 0.8362
Epoch 10/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1068 - acc: 0.8500Epoch 00009: val_loss improved from 0.11539 to 0.11494, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 72s - loss: 0.1067 - acc: 0.8500 - val_loss: 0.1149 - val_acc: 0.8379
Epoch 11/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1038 - acc: 0.8549- ETA: 2s - loss: 0.1038 - aEpoch 00010: val_loss improved from 0.11494 to 0.11353, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 72s - loss: 0.1038 - acc: 0.8549 - val_loss: 0.1135 - val_acc: 0.8395
Epoch 12/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.1018 - acc: 0.8578Epoch 00011: val_loss improved from 0.11353 to 0.11215, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 71s - loss: 0.1018 - acc: 0.8578 - val_loss: 0.1121 - val_acc: 0.8427
Epoch 13/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.0997 - acc: 0.8609Epoch 00012: val_loss improved from 0.11215 to 0.11115, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646864/646864 [==============================] - 71s - loss: 0.0997 - acc: 0.8609 - val_loss: 0.1112 - val_acc: 0.8446
Epoch 14/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.0979 - acc: 0.8641Epoch 00013: val_loss did not improve
646864/646864 [==============================] - 73s - loss: 0.0979 - acc: 0.8641 - val_loss: 0.1153 - val_acc: 0.8383
Epoch 15/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.0965 - acc: 0.8662Epoch 00014: val_loss did not improve
646864/646864 [==============================] - 80s - loss: 0.0965 - acc: 0.8662 - val_loss: 0.1172 - val_acc: 0.8369
Epoch 16/200
645120/646864 [============================>.] - ETA: 0s - loss: 0.0951 - acc: 0.8685Epoch 00015: val_loss did not improve
646864/646864 [==============================] - 81s - loss: 0.0951 - acc: 0.8685 - val_loss: 0.1131 - val_acc: 0.8412
Epoch 00015: early stopping
80858/80858 [==============================] - 4s     
2344960/2345796 [============================>.] - ETA: 0s
Fitting fold 4 of 5

Train on 646866 samples, validate on 161714 samples
Epoch 1/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1865 - acc: 0.7153Epoch 00000: val_loss improved from inf to 0.20801, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 88s - loss: 0.1864 - acc: 0.7154 - val_loss: 0.2080 - val_acc: 0.6537
Epoch 2/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1565 - acc: 0.7676Epoch 00001: val_loss improved from 0.20801 to 0.15519, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1565 - acc: 0.7676 - val_loss: 0.1552 - val_acc: 0.7859
Epoch 3/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1428 - acc: 0.7911Epoch 00002: val_loss improved from 0.15519 to 0.13651, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.1428 - acc: 0.7912 - val_loss: 0.1365 - val_acc: 0.8009
Epoch 4/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1343 - acc: 0.8054Epoch 00003: val_loss improved from 0.13651 to 0.12605, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 73s - loss: 0.1343 - acc: 0.8053 - val_loss: 0.1261 - val_acc: 0.8188
Epoch 5/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1277 - acc: 0.8160Epoch 00004: val_loss improved from 0.12605 to 0.12560, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 77s - loss: 0.1277 - acc: 0.8160 - val_loss: 0.1256 - val_acc: 0.8183
Epoch 6/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1220 - acc: 0.8260Epoch 00005: val_loss improved from 0.12560 to 0.12178, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 77s - loss: 0.1221 - acc: 0.8260 - val_loss: 0.1218 - val_acc: 0.8252
Epoch 7/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1175 - acc: 0.8333Epoch 00006: val_loss improved from 0.12178 to 0.12102, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 77s - loss: 0.1176 - acc: 0.8332 - val_loss: 0.1210 - val_acc: 0.8272
Epoch 8/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1137 - acc: 0.8394Epoch 00007: val_loss improved from 0.12102 to 0.12044, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1137 - acc: 0.8394 - val_loss: 0.1204 - val_acc: 0.8281
Epoch 9/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1098 - acc: 0.8455Epoch 00008: val_loss improved from 0.12044 to 0.11472, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 76s - loss: 0.1099 - acc: 0.8454 - val_loss: 0.1147 - val_acc: 0.8376
Epoch 10/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1073 - acc: 0.8490Epoch 00009: val_loss improved from 0.11472 to 0.11432, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.1073 - acc: 0.8490 - val_loss: 0.1143 - val_acc: 0.8370
Epoch 11/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1046 - acc: 0.8539Epoch 00010: val_loss did not improve
646866/646866 [==============================] - 80s - loss: 0.1045 - acc: 0.8539 - val_loss: 0.1170 - val_acc: 0.8352
Epoch 12/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1026 - acc: 0.8568Epoch 00011: val_loss improved from 0.11432 to 0.11395, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1026 - acc: 0.8568 - val_loss: 0.1140 - val_acc: 0.8396
Epoch 13/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1003 - acc: 0.8604Epoch 00012: val_loss did not improve
646866/646866 [==============================] - 86s - loss: 0.1003 - acc: 0.8604 - val_loss: 0.1147 - val_acc: 0.8386
Epoch 00012: early stopping
80857/80857 [==============================] - 4s     
80857/80857 [==============================] - 4s     
2345796/2345796 [==============================] - 124s   

Fitting fold 5 of 5

Train on 646866 samples, validate on 161714 samples
Epoch 1/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1840 - acc: 0.7181Epoch 00000: val_loss improved from inf to 0.21199, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1839 - acc: 0.7182 - val_loss: 0.2120 - val_acc: 0.6538
Epoch 2/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1550 - acc: 0.7697Epoch 00001: val_loss improved from 0.21199 to 0.15076, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1550 - acc: 0.7698 - val_loss: 0.1508 - val_acc: 0.7914
Epoch 3/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1420 - acc: 0.7920Epoch 00002: val_loss improved from 0.15076 to 0.13195, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1420 - acc: 0.7920 - val_loss: 0.1319 - val_acc: 0.8077
Epoch 4/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1335 - acc: 0.8062Epoch 00003: val_loss improved from 0.13195 to 0.12728, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.1335 - acc: 0.8061 - val_loss: 0.1273 - val_acc: 0.8165
Epoch 5/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1270 - acc: 0.8172Epoch 00004: val_loss improved from 0.12728 to 0.12308, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.1270 - acc: 0.8172 - val_loss: 0.1231 - val_acc: 0.8233
Epoch 6/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1213 - acc: 0.8263Epoch 00005: val_loss improved from 0.12308 to 0.12024, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1213 - acc: 0.8263 - val_loss: 0.1202 - val_acc: 0.8273
Epoch 7/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1170 - acc: 0.8334Epoch 00006: val_loss did not improve
646866/646866 [==============================] - 85s - loss: 0.1170 - acc: 0.8335 - val_loss: 0.1211 - val_acc: 0.8270
Epoch 8/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1133 - acc: 0.8397Epoch 00007: val_loss improved from 0.12024 to 0.11713, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.1133 - acc: 0.8397 - val_loss: 0.1171 - val_acc: 0.8322
Epoch 9/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1100 - acc: 0.8452Epoch 00008: val_loss did not improve
646866/646866 [==============================] - 85s - loss: 0.1101 - acc: 0.8452 - val_loss: 0.1181 - val_acc: 0.8322
Epoch 10/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1073 - acc: 0.8490Epoch 00009: val_loss improved from 0.11713 to 0.11526, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.1073 - acc: 0.8490 - val_loss: 0.1153 - val_acc: 0.8365
Epoch 11/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1048 - acc: 0.8528Epoch 00010: val_loss improved from 0.11526 to 0.11182, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.1048 - acc: 0.8527 - val_loss: 0.1118 - val_acc: 0.8421
Epoch 12/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1026 - acc: 0.8564Epoch 00011: val_loss did not improve
646866/646866 [==============================] - 85s - loss: 0.1026 - acc: 0.8565 - val_loss: 0.1146 - val_acc: 0.8380
Epoch 13/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.1004 - acc: 0.8597Epoch 00012: val_loss did not improve
646866/646866 [==============================] - 85s - loss: 0.1004 - acc: 0.8597 - val_loss: 0.1136 - val_acc: 0.8399
Epoch 14/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.0987 - acc: 0.8624Epoch 00013: val_loss improved from 0.11182 to 0.11155, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 85s - loss: 0.0987 - acc: 0.8624 - val_loss: 0.1116 - val_acc: 0.8433
Epoch 15/200
645120/646866 [============================>.] - ETA: 0s - loss: 0.0969 - acc: 0.8651Epoch 00014: val_loss improved from 0.11155 to 0.11119, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_siamese_lstm_attention.h5
646866/646866 [==============================] - 86s - loss: 0.0969 - acc: 0.8650 - val_loss: 0.1112 - val_acc: 0.8441
Epoch 00014: early stopping
80857/80857 [==============================] - 4s     
80857/80857 [==============================] - 4s     
2344960/2345796 [============================>.] - ETA: 0sCPU times: user 1h 40min 1s, sys: 19min 13s, total: 1h 59min 15s
Wall time: 2h 13min 13s

In [29]:
cv_score = log_loss(y_train, y_train_oofp)
print('CV score:', cv_score)


CV score: 0.360265255459

Save features


In [30]:
features_train = y_train_oofp.reshape((-1, 1))

In [31]:
features_test = np.mean(y_test_oofp, axis=1).reshape((-1, 1))

In [32]:
print('X train:', features_train.shape)
print('X test: ', features_test.shape)


X train: (404290, 1)
X test:  (2345796, 1)

In [33]:
feature_names = [feature_list_id]

In [34]:
project.save_features(features_train, features_test, feature_names, feature_list_id)

Explore


In [35]:
pd.DataFrame(features_test).plot.hist()


Out[35]:
<matplotlib.axes._subplots.AxesSubplot at 0x7f6fcfa06940>