In [86]:
import math
import numpy as np
import pandas as pd
pd.set_option('mode.chained_assignment', None)

In [87]:
# Training and test data files
train_file = '../input/train.csv'
test_file = '../input/test.csv'
model_file_male = '../output/titanic.model.male.json'
model_weights_file_male = '../output/titanic.model.male.best.hdf5'
model_file_female = '../output/titanic.model.female.json'
model_weights_file_female = '../output/titanic.model.female.best.hdf5'
pred_file = '../output/gender_submission.csv'

In [88]:
# Prepare the data for training and testing
from sklearn.preprocessing import MinMaxScaler

title_list=['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle',
            'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer']

import string
def substrings_in_string(big_string, substrings):
    for substring in substrings:
        if substring in big_string:
            return substring
    return np.nan

def prep_data(frame, augmentation=0):
    # Fill missing Age data with median 
    frame['Age'] = frame['Age'].fillna(frame['Age'].mean())
    
    # Generate data about whether adult or minor
    frame['Adult_Or_Minor'] = frame.apply(lambda row: 0 if row['Age'] < 18 else 1, axis=1)

    # Generate data about whether senior citizen
    frame['Senior_Citizen'] = frame.apply(lambda row: 0 if row['Age'] > 65 else 1, axis=1)

    # Fill missing Fare data with median
    frame['Fare'] = frame['Fare'].fillna(frame['Fare'].median())
    
    # Creating new family_size and fare per person columns 
    frame['Family_Size'] = frame['SibSp'] + frame['Parch'] + 1
    frame['Alone'] = frame.apply(lambda row: 1 if row['Family_Size'] == 1 else 0, axis=1)
    frame['Fare_Per_Person'] = frame['Fare']/frame['Family_Size']

    # Convert Sex to number
    #frame['Sex'] = pd.Categorical(frame['Sex']).codes
    frame.pop('Sex')

    # Generate data for missing Embarked and convert to number
    frame['Embarked'] = frame['Embarked'].fillna('X')
    frame['Embarked'] = pd.Categorical(frame['Embarked']).codes
    
    # Extract title from name
    frame['Title'] = frame['Name'].map(lambda x: substrings_in_string(x, title_list))
    frame['Title'] = pd.Categorical(frame['Title']).codes

    # Convert Name into characters
    frame['Name_Length'] = frame.apply(lambda row: len(row['Name']), axis=1)
    frame['Words_In_Name'] = frame.apply(lambda row: len(row['Name'].split()), axis=1)    
    frame.pop('Name')    
    
    # Convert Ticket into characters
    frame['Ticket_Length'] = frame.apply(lambda row: len(row['Ticket']), axis=1)
    frame.pop('Ticket')    
    
    # Convert Cabin column to whether in cabin
    frame['Cabin'] = frame['Cabin'].fillna('')
    frame['In_Cabin'] = frame.apply(lambda row: 1 if row['Cabin'] != '' else 0, axis=1)
    frame['Number_Of_Cabins'] = frame.apply(lambda row: len(row['Cabin'].split()), axis=1)    
    frame.pop('Cabin')
    
    frame.fillna(0, axis=1)
    
    # Introduce rows with some noise
    if augmentation > 0:
        print('Adding more rows to training data')
        row_count = frame.shape[0]
        print('Row count before: ', row_count)
        col_std = np.std(frame) 
        for i in range(0, row_count):
            rand = np.random.random_sample()
            if rand < augmentation:
                row1 = pd.Series(frame.iloc[i])
                row2 = pd.Series(frame.iloc[i])
                col_list = frame.columns.tolist()
                col_list.remove('PassengerId')
                col_list.remove('Survived')
                for col in col_list:
                    row1[col] = row1[col] + rand * col_std[col]
                    row2[col] = row2[col] - rand * col_std[col]
                frame = frame.append(row1)
                frame = frame.append(row2)
        row_count = frame.shape[0]
        print('Row count after: ', row_count)
    
    
    print("Before scaling: ")
    print(frame.head())
    
    # Scale everything except PassengerId
    min_max_scaler = MinMaxScaler()
    col_list = frame.columns.tolist()
    col_list.remove('PassengerId')
    frame = frame[col_list]
    np_scaled = min_max_scaler.fit_transform(frame)
    frame = pd.DataFrame(np_scaled)
    
    print("After scaling: ")
    print(frame.head())

    return frame

In [89]:
# Load training data
df_train_raw = pd.read_csv(train_file)
print(df_train_raw.shape)
df_train_raw.info()
df_train_raw.head()


(891, 12)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
PassengerId    891 non-null int64
Survived       891 non-null int64
Pclass         891 non-null int64
Name           891 non-null object
Sex            891 non-null object
Age            714 non-null float64
SibSp          891 non-null int64
Parch          891 non-null int64
Ticket         891 non-null object
Fare           891 non-null float64
Cabin          204 non-null object
Embarked       889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.6+ KB
Out[89]:
PassengerId Survived Pclass Name Sex Age SibSp Parch Ticket Fare Cabin Embarked
0 1 0 3 Braund, Mr. Owen Harris male 22.0 1 0 A/5 21171 7.2500 NaN S
1 2 1 1 Cumings, Mrs. John Bradley (Florence Briggs Th... female 38.0 1 0 PC 17599 71.2833 C85 C
2 3 1 3 Heikkinen, Miss. Laina female 26.0 0 0 STON/O2. 3101282 7.9250 NaN S
3 4 1 1 Futrelle, Mrs. Jacques Heath (Lily May Peel) female 35.0 1 0 113803 53.1000 C123 S
4 5 0 3 Allen, Mr. William Henry male 35.0 0 0 373450 8.0500 NaN S

In [90]:
# Prep training data
df_train_male_raw = df_train_raw[df_train_raw.Sex == 'male']
df_train_female_raw = df_train_raw[df_train_raw.Sex == 'female']
df_train_male = prep_data(df_train_male_raw, augmentation=0.05)
df_train_female = prep_data(df_train_female_raw, augmentation=0.015)


Adding more rows to training data
Row count before:  577
Row count after:  633
Before scaling: 
   PassengerId  Survived  Pclass        Age  SibSp  Parch     Fare  Embarked  \
0          1.0       0.0     3.0  22.000000    1.0    0.0   7.2500       2.0   
4          5.0       0.0     3.0  35.000000    0.0    0.0   8.0500       2.0   
5          6.0       0.0     3.0  30.726645    0.0    0.0   8.4583       1.0   
6          7.0       0.0     1.0  54.000000    0.0    0.0  51.8625       2.0   
7          8.0       0.0     3.0   2.000000    3.0    1.0  21.0750       2.0   

   Adult_Or_Minor  Senior_Citizen  Family_Size  Alone  Fare_Per_Person  Title  \
0             1.0             1.0          2.0    0.0           3.6250    7.0   
4             1.0             1.0          1.0    1.0           8.0500    7.0   
5             1.0             1.0          1.0    1.0           8.4583    7.0   
6             1.0             1.0          1.0    1.0          51.8625    7.0   
7             0.0             1.0          5.0    0.0           4.2150    6.0   

   Name_Length  Words_In_Name  Ticket_Length  In_Cabin  Number_Of_Cabins  
0         23.0            4.0            9.0       0.0               0.0  
4         24.0            4.0            6.0       0.0               0.0  
5         16.0            3.0            6.0       0.0               0.0  
6         23.0            4.0            5.0       1.0               1.0  
7         30.0            4.0            6.0       0.0               0.0  
After scaling: 
    0             1         2      3    4         5         6         7   \
0  0.0  9.962368e-01  0.271174  0.125  0.0  0.014151  0.996506  1.000000   
1  0.0  9.962368e-01  0.434531  0.000  0.0  0.015713  0.996506  1.000000   
2  0.0  9.962368e-01  0.380832  0.000  0.0  0.016510  0.498253  1.000000   
3  0.0  5.551115e-17  0.673285  0.000  0.0  0.101229  0.996506  1.000000   
4  0.0  9.962368e-01  0.019854  0.375  0.2  0.041136  0.996506  0.002787   

         8             9        10        11     12        13    14        15  \
0  0.998914  1.000000e-01  0.00419  0.007076  0.875  0.297297  0.25  0.400000   
1  0.998914  1.387779e-17  1.00000  0.015713  0.875  0.324324  0.25  0.200000   
2  0.998914  1.387779e-17  1.00000  0.016510  0.875  0.108108  0.00  0.200000   
3  0.998914  1.387779e-17  1.00000  0.101229  0.875  0.297297  0.25  0.133333   
4  0.998914  4.000000e-01  0.00419  0.008227  0.750  0.486486  0.25  0.200000   

         16        17  
0  0.003599  0.001520  
1  0.003599  0.001520  
2  0.003599  0.001520  
3  1.000000  0.334347  
4  0.003599  0.001520  
Adding more rows to training data
Row count before:  314
Row count after:  324
Before scaling: 
   PassengerId  Survived  Pclass   Age  SibSp  Parch     Fare  Embarked  \
1          2.0       1.0     1.0  38.0    1.0    0.0  71.2833       0.0   
2          3.0       1.0     3.0  26.0    0.0    0.0   7.9250       2.0   
3          4.0       1.0     1.0  35.0    1.0    0.0  53.1000       2.0   
8          9.0       1.0     3.0  27.0    0.0    2.0  11.1333       2.0   
9         10.0       1.0     2.0  14.0    1.0    0.0  30.0708       0.0   

   Adult_Or_Minor  Senior_Citizen  Family_Size  Alone  Fare_Per_Person  Title  \
1             1.0             1.0          2.0    0.0         35.64165    5.0   
2             1.0             1.0          1.0    1.0          7.92500    2.0   
3             1.0             1.0          2.0    0.0         26.55000    5.0   
8             1.0             1.0          3.0    0.0          3.71110    5.0   
9             0.0             1.0          2.0    0.0         15.03540    5.0   

   Name_Length  Words_In_Name  Ticket_Length  In_Cabin  Number_Of_Cabins  
1         51.0            7.0            8.0       1.0               1.0  
2         22.0            3.0           16.0       0.0               0.0  
3         44.0            7.0            6.0       1.0               1.0  
8         49.0            7.0            6.0       0.0               0.0  
9         35.0            5.0            6.0       0.0               0.0  
After scaling: 
    0         1         2         3         4         5         6         7   \
0  1.0  0.000000  0.598394  0.125374  0.000000  0.127642  0.000000  0.998875   
1  1.0  0.998734  0.405622  0.000427  0.000000  0.002324  0.666667  0.998875   
2  1.0  0.000000  0.550201  0.125374  0.000000  0.091677  0.666667  0.998875   
3  1.0  0.998734  0.421687  0.000427  0.333333  0.008670  0.666667  0.998875   
4  1.0  0.499367  0.212851  0.125374  0.000000  0.046127  0.000000  0.000000   

    8             9        10        11        12        13        14  \
0  0.0  1.000000e-01  0.00145  0.067507  0.833333  0.530303  0.363636   
1  0.0  1.387779e-17  1.00000  0.013288  0.333333  0.090909  0.000000   
2  0.0  1.000000e-01  0.00145  0.049722  0.833333  0.424242  0.363636   
3  0.0  2.000000e-01  0.00145  0.005045  0.833333  0.500000  0.363636   
4  0.0  1.000000e-01  0.00145  0.027197  0.833333  0.287879  0.181818   

         15        16        17  
0  0.307692  1.000000  0.250347  
1  0.923077  0.001367  0.000463  
2  0.153846  1.000000  0.250347  
3  0.153846  0.001367  0.000463  
4  0.153846  0.001367  0.000463  

In [91]:
# Construct the X array for males
X_train_male = np.array(df_train_male)[:,1:]
X_train_male = X_train_male.astype('float32')
print(X_train_male.shape)
print(X_train_male[0])

# Construct the X array for females
X_train_female = np.array(df_train_female)[:,1:]
X_train_female = X_train_female.astype('float32')
print(X_train_female.shape)
print(X_train_female[0])


(633, 17)
[ 0.9962368   0.27117366  0.125       0.          0.01415106  0.99650586
  1.          0.99891442  0.1         0.00418962  0.00707553  0.875
  0.2972973   0.25        0.40000001  0.00359913  0.00152015]
(324, 17)
[ 0.          0.59839356  0.12537391  0.          0.12764232  0.
  0.99887544  0.          0.1         0.00144972  0.06750725  0.83333331
  0.530303    0.36363637  0.30769232  1.          0.25034687]

In [92]:
# Extract survived data as predictions
from keras.utils.np_utils import to_categorical

y_train_male = np.array(df_train_male)[:,0]
y_train_male = y_train_male.astype('int')
y_train_male = to_categorical(y_train_male, 2)
print(y_train_male.shape)
print(y_train_male[0:5])

y_train_female = np.array(df_train_female)[:,0]
y_train_female = y_train_female.astype('int')
y_train_female = to_categorical(y_train_female, 2)
print(y_train_female.shape)
print(y_train_female[0:5])


(633, 2)
[[ 1.  0.]
 [ 1.  0.]
 [ 1.  0.]
 [ 1.  0.]
 [ 1.  0.]]
(324, 2)
[[ 0.  1.]
 [ 0.  1.]
 [ 0.  1.]
 [ 0.  1.]
 [ 0.  1.]]

In [93]:
# Load test data
df_test_raw = pd.read_csv(test_file)
print(df_test_raw.shape)
df_test_raw.head()
df_test_raw.info()


(418, 11)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 418 entries, 0 to 417
Data columns (total 11 columns):
PassengerId    418 non-null int64
Pclass         418 non-null int64
Name           418 non-null object
Sex            418 non-null object
Age            332 non-null float64
SibSp          418 non-null int64
Parch          418 non-null int64
Ticket         418 non-null object
Fare           417 non-null float64
Cabin          91 non-null object
Embarked       418 non-null object
dtypes: float64(2), int64(4), object(5)
memory usage: 36.0+ KB

In [94]:
# Prepare the data for testing
df_test_male_raw = df_test_raw[df_test_raw.Sex == 'male']
df_test_female_raw = df_test_raw[df_test_raw.Sex == 'female']
df_test_male = prep_data(df_test_male_raw)
df_test_female = prep_data(df_test_female_raw)


Before scaling: 
   PassengerId  Pclass   Age  SibSp  Parch     Fare  Embarked  Adult_Or_Minor  \
0          892       3  34.5      0      0   7.8292         1               1   
2          894       2  62.0      0      0   9.6875         1               1   
3          895       3  27.0      0      0   8.6625         2               1   
5          897       3  14.0      0      0   9.2250         2               0   
7          899       2  26.0      1      1  29.0000         2               1   

   Senior_Citizen  Family_Size  Alone  Fare_Per_Person  Title  Name_Length  \
0               1            1      1         7.829200      3           16   
2               1            1      1         9.687500      3           25   
3               1            1      1         8.662500      3           16   
5               1            1      1         9.225000      3           26   
7               1            3      0         9.666667      3           28   

   Words_In_Name  Ticket_Length  In_Cabin  Number_Of_Cabins  
0              3              6         0                 0  
2              4              6         0                 0  
3              3              6         0                 0  
5              4              4         0                 0  
7              4              6         0                 0  
After scaling: 
    0         1      2         3         4    5    6    7    8    9   \
0  1.0  0.512524  0.000  0.000000  0.029840  0.5  1.0  1.0  0.0  1.0   
1  0.5  0.925004  0.000  0.000000  0.036922  0.5  1.0  1.0  0.0  1.0   
2  1.0  0.400030  0.000  0.000000  0.033016  1.0  1.0  1.0  0.0  1.0   
3  1.0  0.205040  0.000  0.000000  0.035160  1.0  0.0  1.0  0.0  1.0   
4  0.5  0.385031  0.125  0.111111  0.110529  1.0  1.0  1.0  0.2  0.0   

         10    11     12    13        14   15   16  
0  0.037017  0.75  0.075  0.00  0.200000  0.0  0.0  
1  0.045804  0.75  0.300  0.25  0.200000  0.0  0.0  
2  0.040957  0.75  0.075  0.00  0.200000  0.0  0.0  
3  0.043617  0.75  0.325  0.25  0.066667  0.0  0.0  
4  0.045705  0.75  0.375  0.25  0.200000  0.0  0.0  
Before scaling: 
    PassengerId  Pclass   Age  SibSp  Parch     Fare  Embarked  \
1           893       3  47.0      1      0   7.0000         2   
4           896       3  22.0      1      1  12.2875         2   
6           898       3  30.0      0      0   7.6292         1   
8           900       3  18.0      0      0   7.2292         0   
12          904       1  23.0      1      0  82.2667         2   

    Adult_Or_Minor  Senior_Citizen  Family_Size  Alone  Fare_Per_Person  \
1                1               1            2      0         3.500000   
4                1               1            3      0         4.095833   
6                1               1            1      1         7.629200   
8                1               1            1      1         7.229200   
12               1               1            2      0        41.133350   

    Title  Name_Length  Words_In_Name  Ticket_Length  In_Cabin  \
1       2           32              5              6         0   
4       2           44              6              7         0   
6       1           20              3              6         0   
8       2           41              6              4         0   
12      2           45              6              5         1   

    Number_Of_Cabins  
1                  0  
4                  0  
6                  0  
8                  0  
12                 1  
After scaling: 
    0         1      2         3         4    5    6    7    8    9   \
0  1.0  0.617566  0.125  0.000000  0.000099  1.0  1.0  1.0  0.1  0.0   
1  1.0  0.287881  0.125  0.111111  0.010561  1.0  1.0  1.0  0.2  0.0   
2  1.0  0.393380  0.000  0.000000  0.001344  0.5  1.0  1.0  0.0  1.0   
3  1.0  0.235131  0.000  0.000000  0.000552  0.0  1.0  1.0  0.0  1.0   
4  0.0  0.301068  0.125  0.000000  0.149030  1.0  1.0  1.0  0.1  0.0   

         10        11        12   13        14   15    16  
0  0.009145  0.666667  0.354167  0.4  0.142857  0.0  0.00  
1  0.011426  0.666667  0.604167  0.6  0.214286  0.0  0.00  
2  0.024950  0.333333  0.104167  0.0  0.142857  0.0  0.00  
3  0.023419  0.666667  0.541667  0.6  0.000000  0.0  0.00  
4  0.153188  0.666667  0.625000  0.6  0.071429  1.0  0.25  

In [95]:
# Construct the X array for males
X_test_male = np.array(df_test_male)[:,:]
X_test_male = X_test_male.astype('float32')
print(X_test_male.shape)
print(X_test_male[0])

# Construct the X array for females
X_test_female = np.array(df_test_female)[:,:]
X_test_female = X_test_female.astype('float32')
print(X_test_female.shape)
print(X_test_female[0])


(266, 17)
[ 1.          0.51252437  0.          0.          0.02983973  0.5         1.
  1.          0.          1.          0.03701749  0.75        0.075       0.
  0.2         0.          0.        ]
(152, 17)
[  1.00000000e+00   6.17565632e-01   1.25000000e-01   0.00000000e+00
   9.89356122e-05   1.00000000e+00   1.00000000e+00   1.00000000e+00
   1.00000001e-01   0.00000000e+00   9.14509129e-03   6.66666687e-01
   3.54166657e-01   4.00000006e-01   1.42857149e-01   0.00000000e+00
   0.00000000e+00]

In [96]:
# Build a training network

from keras.models import Sequential
from keras.layers import Dense, Dropout, RepeatVector, Flatten, Activation
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.optimizers import SGD
from keras.layers.advanced_activations import LeakyReLU

def build_model(input_shape):
    model = Sequential()
    model.add(Dense(891, activation='relu', input_shape=input_shape))
    model.add(Dropout(0.25))
    model.add(Dense(445, activation='relu'))
    model.add(Dropout(0.5))
#    model.add(Dense(222, activation='relu'))
#    model.add(Dropout(0.75))
    model.add(Dense(2, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.summary()
    
    return model

In [97]:
# Save the model
def save_model(model, model_file):
    model_json = model.to_json()
    with open(model_file, 'w') as json_file:
        json_file.write(model_json)

In [98]:
# Train the model
def train_model(model, model_weights_file, X_train, y_train):
    checkpointer = ModelCheckpoint(filepath=model_weights_file, verbose=1, save_best_only=True)
    stopper = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=20, verbose=1, mode='auto')
    hist = model.fit(X_train, y_train, epochs=200, batch_size=20, validation_split=0.3,
                     callbacks=[checkpointer, stopper], 
                     verbose=1, shuffle=True)

In [99]:
# Build and train model for males
model_male = build_model(input_shape=(X_train_male.shape[1],))
save_model(model_male, model_file_male)
train_model(model_male, model_weights_file_male, X_train_male, y_train_male)

# Load the weights that yielded the best validation accuracy
model_male.load_weights(model_weights_file_male)

# Evaluate the model on the training set
score_male = model_male.evaluate(X_train_male, y_train_male)
print("\nTraining Accuracy:", score_male[1])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_41 (Dense)             (None, 891)               16038     
_________________________________________________________________
dropout_31 (Dropout)         (None, 891)               0         
_________________________________________________________________
dense_42 (Dense)             (None, 445)               396940    
_________________________________________________________________
dropout_32 (Dropout)         (None, 445)               0         
_________________________________________________________________
dense_43 (Dense)             (None, 2)                 892       
=================================================================
Total params: 413,870
Trainable params: 413,870
Non-trainable params: 0
_________________________________________________________________
Train on 443 samples, validate on 190 samples
Epoch 1/200
340/443 [======================>.......] - ETA: 0s - loss: 0.5105 - acc: 0.7765Epoch 00000: val_loss improved from inf to 0.38710, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 1s - loss: 0.4962 - acc: 0.7856 - val_loss: 0.3871 - val_acc: 0.8421
Epoch 2/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4456 - acc: 0.8112Epoch 00001: val_loss improved from 0.38710 to 0.35259, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4560 - acc: 0.8093 - val_loss: 0.3526 - val_acc: 0.8579
Epoch 3/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4306 - acc: 0.8287- ETA: 0s - loss: 0.4218 - acc: 0.828Epoch 00002: val_loss improved from 0.35259 to 0.35019, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4465 - acc: 0.8138 - val_loss: 0.3502 - val_acc: 0.8684
Epoch 4/200
340/443 [======================>.......] - ETA: 0s - loss: 0.4396 - acc: 0.8029Epoch 00003: val_loss improved from 0.35019 to 0.34500, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4381 - acc: 0.8138 - val_loss: 0.3450 - val_acc: 0.8684
Epoch 5/200
440/443 [============================>.] - ETA: 0s - loss: 0.4526 - acc: 0.8205Epoch 00004: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4515 - acc: 0.8217 - val_loss: 0.3702 - val_acc: 0.8711
Epoch 6/200
420/443 [===========================>..] - ETA: 0s - loss: 0.4275 - acc: 0.8310Epoch 00005: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4428 - acc: 0.8239 - val_loss: 0.3531 - val_acc: 0.8605
Epoch 7/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4390 - acc: 0.8287Epoch 00006: val_loss improved from 0.34500 to 0.33666, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4366 - acc: 0.8273 - val_loss: 0.3367 - val_acc: 0.8737
Epoch 8/200
380/443 [========================>.....] - ETA: 0s - loss: 0.4285 - acc: 0.8474Epoch 00007: val_loss improved from 0.33666 to 0.33089, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4366 - acc: 0.8375 - val_loss: 0.3309 - val_acc: 0.8737
Epoch 9/200
360/443 [=======================>......] - ETA: 0s - loss: 0.4380 - acc: 0.8292Epoch 00008: val_loss improved from 0.33089 to 0.31552, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4180 - acc: 0.8341 - val_loss: 0.3155 - val_acc: 0.8737
Epoch 10/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4191 - acc: 0.8275Epoch 00009: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4191 - acc: 0.8307 - val_loss: 0.3853 - val_acc: 0.8526
Epoch 11/200
380/443 [========================>.....] - ETA: 0s - loss: 0.4436 - acc: 0.8250Epoch 00010: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4368 - acc: 0.8352 - val_loss: 0.3289 - val_acc: 0.8789
Epoch 12/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4058 - acc: 0.8325Epoch 00011: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4236 - acc: 0.8273 - val_loss: 0.3232 - val_acc: 0.8737
Epoch 13/200
420/443 [===========================>..] - ETA: 0s - loss: 0.4166 - acc: 0.8286Epoch 00012: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4112 - acc: 0.8330 - val_loss: 0.3159 - val_acc: 0.8789
Epoch 14/200
420/443 [===========================>..] - ETA: 0s - loss: 0.4224 - acc: 0.8381Epoch 00013: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4169 - acc: 0.8397 - val_loss: 0.3269 - val_acc: 0.8842
Epoch 15/200
380/443 [========================>.....] - ETA: 0s - loss: 0.4124 - acc: 0.8408Epoch 00014: val_loss improved from 0.31552 to 0.29921, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.4093 - acc: 0.8431 - val_loss: 0.2992 - val_acc: 0.8842
Epoch 16/200
440/443 [============================>.] - ETA: 0s - loss: 0.4297 - acc: 0.8352Epoch 00015: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4325 - acc: 0.8341 - val_loss: 0.3263 - val_acc: 0.8737
Epoch 17/200
420/443 [===========================>..] - ETA: 0s - loss: 0.4160 - acc: 0.8476Epoch 00016: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4062 - acc: 0.8510 - val_loss: 0.3097 - val_acc: 0.8789
Epoch 18/200
380/443 [========================>.....] - ETA: 0s - loss: 0.4007 - acc: 0.8487Epoch 00017: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4037 - acc: 0.8488 - val_loss: 0.3251 - val_acc: 0.8974
Epoch 19/200
440/443 [============================>.] - ETA: 0s - loss: 0.4110 - acc: 0.8455Epoch 00018: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4091 - acc: 0.8465 - val_loss: 0.3027 - val_acc: 0.8789
Epoch 20/200
420/443 [===========================>..] - ETA: 0s - loss: 0.4083 - acc: 0.8500Epoch 00019: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4004 - acc: 0.8533 - val_loss: 0.3186 - val_acc: 0.9000
Epoch 21/200
380/443 [========================>.....] - ETA: 0s - loss: 0.4154 - acc: 0.8421Epoch 00020: val_loss improved from 0.29921 to 0.29878, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.3963 - acc: 0.8555 - val_loss: 0.2988 - val_acc: 0.8789
Epoch 22/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4225 - acc: 0.8425Epoch 00021: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4176 - acc: 0.8465 - val_loss: 0.3212 - val_acc: 0.8947
Epoch 23/200
400/443 [==========================>...] - ETA: 0s - loss: 0.3993 - acc: 0.8513Epoch 00022: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3983 - acc: 0.8488 - val_loss: 0.3333 - val_acc: 0.8789
Epoch 24/200
360/443 [=======================>......] - ETA: 0s - loss: 0.4139 - acc: 0.8486Epoch 00023: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4022 - acc: 0.8499 - val_loss: 0.2991 - val_acc: 0.8868
Epoch 25/200
440/443 [============================>.] - ETA: 0s - loss: 0.3814 - acc: 0.8545Epoch 00024: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3826 - acc: 0.8533 - val_loss: 0.3012 - val_acc: 0.8895
Epoch 26/200
400/443 [==========================>...] - ETA: 0s - loss: 0.4094 - acc: 0.8288Epoch 00025: val_loss improved from 0.29878 to 0.29464, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.3984 - acc: 0.8363 - val_loss: 0.2946 - val_acc: 0.8868
Epoch 27/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3750 - acc: 0.8592Epoch 00026: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3882 - acc: 0.8488 - val_loss: 0.3352 - val_acc: 0.9053
Epoch 28/200
440/443 [============================>.] - ETA: 0s - loss: 0.3759 - acc: 0.8580Epoch 00027: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3809 - acc: 0.8544 - val_loss: 0.2994 - val_acc: 0.9000
Epoch 29/200
440/443 [============================>.] - ETA: 0s - loss: 0.4135 - acc: 0.8432Epoch 00028: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.4157 - acc: 0.8420 - val_loss: 0.3086 - val_acc: 0.8842
Epoch 30/200
440/443 [============================>.] - ETA: 0s - loss: 0.3991 - acc: 0.8580Epoch 00029: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3971 - acc: 0.8589 - val_loss: 0.2969 - val_acc: 0.8842
Epoch 31/200
360/443 [=======================>......] - ETA: 0s - loss: 0.3694 - acc: 0.8667Epoch 00030: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3865 - acc: 0.8578 - val_loss: 0.3028 - val_acc: 0.9000
Epoch 32/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3652 - acc: 0.8592Epoch 00031: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3760 - acc: 0.8499 - val_loss: 0.3176 - val_acc: 0.8895
Epoch 33/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3681 - acc: 0.8605Epoch 00032: val_loss improved from 0.29464 to 0.28783, saving model to ../output/titanic.model.male.best.hdf5
443/443 [==============================] - 0s - loss: 0.3808 - acc: 0.8589 - val_loss: 0.2878 - val_acc: 0.8895
Epoch 34/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3750 - acc: 0.8566Epoch 00033: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3744 - acc: 0.8634 - val_loss: 0.2982 - val_acc: 0.8947
Epoch 35/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3833 - acc: 0.8500Epoch 00034: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3829 - acc: 0.8499 - val_loss: 0.3288 - val_acc: 0.8605
Epoch 36/200
440/443 [============================>.] - ETA: 0s - loss: 0.3848 - acc: 0.8534Epoch 00035: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3848 - acc: 0.8521 - val_loss: 0.3005 - val_acc: 0.8816
Epoch 37/200
400/443 [==========================>...] - ETA: 0s - loss: 0.3940 - acc: 0.8487Epoch 00036: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3807 - acc: 0.8567 - val_loss: 0.2984 - val_acc: 0.8868
Epoch 38/200
400/443 [==========================>...] - ETA: 0s - loss: 0.3715 - acc: 0.8550Epoch 00037: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3780 - acc: 0.8510 - val_loss: 0.3026 - val_acc: 0.8921
Epoch 39/200
440/443 [============================>.] - ETA: 0s - loss: 0.3706 - acc: 0.8636Epoch 00038: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3732 - acc: 0.8623 - val_loss: 0.3205 - val_acc: 0.8632
Epoch 40/200
360/443 [=======================>......] - ETA: 0s - loss: 0.3634 - acc: 0.8681Epoch 00039: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3759 - acc: 0.8567 - val_loss: 0.3028 - val_acc: 0.8895
Epoch 41/200
340/443 [======================>.......] - ETA: 0s - loss: 0.3526 - acc: 0.8691Epoch 00040: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3812 - acc: 0.8544 - val_loss: 0.3047 - val_acc: 0.8895
Epoch 42/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3606 - acc: 0.8607Epoch 00041: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3585 - acc: 0.8634 - val_loss: 0.2993 - val_acc: 0.8816
Epoch 43/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3568 - acc: 0.8750Epoch 00042: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3601 - acc: 0.8657 - val_loss: 0.3017 - val_acc: 0.8947
Epoch 44/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3724 - acc: 0.8571Epoch 00043: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3600 - acc: 0.8623 - val_loss: 0.2944 - val_acc: 0.8868
Epoch 45/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3598 - acc: 0.8595Epoch 00044: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3696 - acc: 0.8555 - val_loss: 0.3087 - val_acc: 0.8842
Epoch 46/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3424 - acc: 0.8776Epoch 00045: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3602 - acc: 0.8634 - val_loss: 0.3365 - val_acc: 0.8421
Epoch 47/200
440/443 [============================>.] - ETA: 0s - loss: 0.3558 - acc: 0.8625Epoch 00046: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3553 - acc: 0.8634 - val_loss: 0.3421 - val_acc: 0.8316
Epoch 48/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3505 - acc: 0.8711Epoch 00047: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3462 - acc: 0.8725 - val_loss: 0.3229 - val_acc: 0.8632
Epoch 49/200
380/443 [========================>.....] - ETA: 0s - loss: 0.3320 - acc: 0.8737Epoch 00048: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3553 - acc: 0.8646 - val_loss: 0.3091 - val_acc: 0.8789
Epoch 50/200
440/443 [============================>.] - ETA: 0s - loss: 0.3446 - acc: 0.8648Epoch 00049: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3471 - acc: 0.8634 - val_loss: 0.3469 - val_acc: 0.8553
Epoch 51/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3387 - acc: 0.8643Epoch 00050: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3362 - acc: 0.8646 - val_loss: 0.3030 - val_acc: 0.8763
Epoch 52/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3571 - acc: 0.8500Epoch 00051: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3583 - acc: 0.8488 - val_loss: 0.3125 - val_acc: 0.8895
Epoch 53/200
420/443 [===========================>..] - ETA: 0s - loss: 0.3532 - acc: 0.8524Epoch 00052: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3481 - acc: 0.8567 - val_loss: 0.3780 - val_acc: 0.8289
Epoch 54/200
440/443 [============================>.] - ETA: 0s - loss: 0.3443 - acc: 0.8682Epoch 00053: val_loss did not improve
443/443 [==============================] - 0s - loss: 0.3469 - acc: 0.8646 - val_loss: 0.3205 - val_acc: 0.8632
Epoch 00053: early stopping
633/633 [==============================] - 0s     

Training Accuracy: 0.868088467615

In [100]:
# Build and train model for males
model_female = build_model(input_shape=(X_train_female.shape[1],))
save_model(model_female, model_file_female)
train_model(model_female, model_weights_file_female, X_train_female, y_train_female)

# Load the weights that yielded the best validation accuracy
model_female.load_weights(model_weights_file_female)

# Evaluate the model on the training set
score_female = model_female.evaluate(X_train_female, y_train_female)
print("\nTraining Accuracy:", score_female[1])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_44 (Dense)             (None, 891)               16038     
_________________________________________________________________
dropout_33 (Dropout)         (None, 891)               0         
_________________________________________________________________
dense_45 (Dense)             (None, 445)               396940    
_________________________________________________________________
dropout_34 (Dropout)         (None, 445)               0         
_________________________________________________________________
dense_46 (Dense)             (None, 2)                 892       
=================================================================
Total params: 413,870
Trainable params: 413,870
Non-trainable params: 0
_________________________________________________________________
Train on 226 samples, validate on 98 samples
Epoch 1/200
200/226 [=========================>....] - ETA: 0s - loss: 0.6030 - acc: 0.7425Epoch 00000: val_loss improved from inf to 0.56775, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.6102 - acc: 0.7323 - val_loss: 0.5678 - val_acc: 0.7245
Epoch 2/200
200/226 [=========================>....] - ETA: 0s - loss: 0.5209 - acc: 0.7425Epoch 00001: val_loss improved from 0.56775 to 0.50554, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.5113 - acc: 0.7456 - val_loss: 0.5055 - val_acc: 0.7245
Epoch 3/200
220/226 [============================>.] - ETA: 0s - loss: 0.4755 - acc: 0.7477Epoch 00002: val_loss improved from 0.50554 to 0.49414, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4753 - acc: 0.7456 - val_loss: 0.4941 - val_acc: 0.7500
Epoch 4/200
180/226 [======================>.......] - ETA: 0s - loss: 0.4746 - acc: 0.7750Epoch 00003: val_loss improved from 0.49414 to 0.47348, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4605 - acc: 0.7677 - val_loss: 0.4735 - val_acc: 0.7551
Epoch 5/200
200/226 [=========================>....] - ETA: 0s - loss: 0.4571 - acc: 0.7675Epoch 00004: val_loss improved from 0.47348 to 0.47300, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4480 - acc: 0.7765 - val_loss: 0.4730 - val_acc: 0.7551
Epoch 6/200
160/226 [====================>.........] - ETA: 0s - loss: 0.3935 - acc: 0.7906Epoch 00005: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4415 - acc: 0.7788 - val_loss: 0.4895 - val_acc: 0.7755
Epoch 7/200
140/226 [=================>............] - ETA: 0s - loss: 0.4088 - acc: 0.7929Epoch 00006: val_loss improved from 0.47300 to 0.46387, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4418 - acc: 0.7765 - val_loss: 0.4639 - val_acc: 0.7653
Epoch 8/200
160/226 [====================>.........] - ETA: 0s - loss: 0.4586 - acc: 0.7844Epoch 00007: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4413 - acc: 0.7965 - val_loss: 0.4732 - val_acc: 0.7755
Epoch 9/200
220/226 [============================>.] - ETA: 0s - loss: 0.4364 - acc: 0.7795Epoch 00008: val_loss improved from 0.46387 to 0.45483, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4358 - acc: 0.7788 - val_loss: 0.4548 - val_acc: 0.7755
Epoch 10/200
200/226 [=========================>....] - ETA: 0s - loss: 0.4182 - acc: 0.7750Epoch 00009: val_loss improved from 0.45483 to 0.45298, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4260 - acc: 0.7854 - val_loss: 0.4530 - val_acc: 0.7857
Epoch 11/200
160/226 [====================>.........] - ETA: 0s - loss: 0.3939 - acc: 0.8188Epoch 00010: val_loss improved from 0.45298 to 0.43693, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4353 - acc: 0.7898 - val_loss: 0.4369 - val_acc: 0.8061
Epoch 12/200
220/226 [============================>.] - ETA: 0s - loss: 0.4271 - acc: 0.7750Epoch 00011: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4311 - acc: 0.7743 - val_loss: 0.4529 - val_acc: 0.7653
Epoch 13/200
180/226 [======================>.......] - ETA: 0s - loss: 0.4493 - acc: 0.7778Epoch 00012: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4297 - acc: 0.7876 - val_loss: 0.4464 - val_acc: 0.7755
Epoch 14/200
180/226 [======================>.......] - ETA: 0s - loss: 0.4112 - acc: 0.7944Epoch 00013: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4206 - acc: 0.7920 - val_loss: 0.4410 - val_acc: 0.7857
Epoch 15/200
220/226 [============================>.] - ETA: 0s - loss: 0.4085 - acc: 0.8000Epoch 00014: val_loss improved from 0.43693 to 0.42762, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4042 - acc: 0.8009 - val_loss: 0.4276 - val_acc: 0.8061
Epoch 16/200
140/226 [=================>............] - ETA: 0s - loss: 0.4016 - acc: 0.8143Epoch 00015: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4063 - acc: 0.8142 - val_loss: 0.4390 - val_acc: 0.7755
Epoch 17/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3789 - acc: 0.7950Epoch 00016: val_loss improved from 0.42762 to 0.42718, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4028 - acc: 0.7832 - val_loss: 0.4272 - val_acc: 0.7959
Epoch 18/200
180/226 [======================>.......] - ETA: 0s - loss: 0.4253 - acc: 0.7972Epoch 00017: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4005 - acc: 0.8097 - val_loss: 0.4318 - val_acc: 0.7755
Epoch 19/200
220/226 [============================>.] - ETA: 0s - loss: 0.3895 - acc: 0.8114Epoch 00018: val_loss improved from 0.42718 to 0.41515, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.4057 - acc: 0.7965 - val_loss: 0.4151 - val_acc: 0.8112
Epoch 20/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3788 - acc: 0.8175Epoch 00019: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4210 - acc: 0.7920 - val_loss: 0.4235 - val_acc: 0.7602
Epoch 21/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3950 - acc: 0.7950Epoch 00020: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.4020 - acc: 0.7920 - val_loss: 0.4226 - val_acc: 0.8112
Epoch 22/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3766 - acc: 0.8000Epoch 00021: val_loss improved from 0.41515 to 0.41086, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.3875 - acc: 0.7987 - val_loss: 0.4109 - val_acc: 0.8061
Epoch 23/200
180/226 [======================>.......] - ETA: 0s - loss: 0.4001 - acc: 0.8000Epoch 00022: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3891 - acc: 0.8119 - val_loss: 0.4166 - val_acc: 0.8010
Epoch 24/200
220/226 [============================>.] - ETA: 0s - loss: 0.3948 - acc: 0.7932Epoch 00023: val_loss improved from 0.41086 to 0.40805, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.3919 - acc: 0.7987 - val_loss: 0.4080 - val_acc: 0.8112
Epoch 25/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3802 - acc: 0.7975Epoch 00024: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3914 - acc: 0.8009 - val_loss: 0.4176 - val_acc: 0.7959
Epoch 26/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3728 - acc: 0.7958Epoch 00025: val_loss improved from 0.40805 to 0.39571, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.3772 - acc: 0.7987 - val_loss: 0.3957 - val_acc: 0.8265
Epoch 27/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3801 - acc: 0.8083Epoch 00026: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3703 - acc: 0.8208 - val_loss: 0.3997 - val_acc: 0.8163
Epoch 28/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3017 - acc: 0.8708Epoch 00027: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3773 - acc: 0.8252 - val_loss: 0.4000 - val_acc: 0.8010
Epoch 29/200
140/226 [=================>............] - ETA: 0s - loss: 0.3683 - acc: 0.8214Epoch 00028: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3722 - acc: 0.8186 - val_loss: 0.4117 - val_acc: 0.8010
Epoch 30/200
140/226 [=================>............] - ETA: 0s - loss: 0.4101 - acc: 0.7964Epoch 00029: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3855 - acc: 0.8097 - val_loss: 0.4098 - val_acc: 0.7857
Epoch 31/200
140/226 [=================>............] - ETA: 0s - loss: 0.3264 - acc: 0.8821Epoch 00030: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3774 - acc: 0.8296 - val_loss: 0.4008 - val_acc: 0.8265
Epoch 32/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3695 - acc: 0.8250Epoch 00031: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3718 - acc: 0.8296 - val_loss: 0.4017 - val_acc: 0.7908
Epoch 33/200
220/226 [============================>.] - ETA: 0s - loss: 0.3835 - acc: 0.8182Epoch 00032: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3871 - acc: 0.8186 - val_loss: 0.4072 - val_acc: 0.8112
Epoch 34/200
140/226 [=================>............] - ETA: 0s - loss: 0.3825 - acc: 0.8321Epoch 00033: val_loss improved from 0.39571 to 0.39241, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.3569 - acc: 0.8274 - val_loss: 0.3924 - val_acc: 0.8163
Epoch 35/200
220/226 [============================>.] - ETA: 0s - loss: 0.3672 - acc: 0.8159Epoch 00034: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3679 - acc: 0.8164 - val_loss: 0.4248 - val_acc: 0.7959
Epoch 36/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3497 - acc: 0.8292Epoch 00035: val_loss improved from 0.39241 to 0.38485, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.3629 - acc: 0.8296 - val_loss: 0.3849 - val_acc: 0.8265
Epoch 37/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3825 - acc: 0.8292Epoch 00036: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3690 - acc: 0.8186 - val_loss: 0.4220 - val_acc: 0.8061
Epoch 38/200
220/226 [============================>.] - ETA: 0s - loss: 0.3717 - acc: 0.8205Epoch 00037: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3677 - acc: 0.8208 - val_loss: 0.4200 - val_acc: 0.8061
Epoch 39/200
220/226 [============================>.] - ETA: 0s - loss: 0.3469 - acc: 0.8386Epoch 00038: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3517 - acc: 0.8341 - val_loss: 0.3974 - val_acc: 0.8265
Epoch 40/200
180/226 [======================>.......] - ETA: 0s - loss: 0.3441 - acc: 0.8306Epoch 00039: val_loss improved from 0.38485 to 0.37849, saving model to ../output/titanic.model.female.best.hdf5
226/226 [==============================] - 0s - loss: 0.3807 - acc: 0.8075 - val_loss: 0.3785 - val_acc: 0.8367
Epoch 41/200
220/226 [============================>.] - ETA: 0s - loss: 0.3626 - acc: 0.8114Epoch 00040: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3719 - acc: 0.8031 - val_loss: 0.3920 - val_acc: 0.8316
Epoch 42/200
220/226 [============================>.] - ETA: 0s - loss: 0.3379 - acc: 0.8455Epoch 00041: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3417 - acc: 0.8451 - val_loss: 0.3971 - val_acc: 0.7908
Epoch 43/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3406 - acc: 0.8250Epoch 00042: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3497 - acc: 0.8385 - val_loss: 0.3983 - val_acc: 0.8214
Epoch 44/200
220/226 [============================>.] - ETA: 0s - loss: 0.3382 - acc: 0.8409Epoch 00043: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3382 - acc: 0.8407 - val_loss: 0.3930 - val_acc: 0.8367
Epoch 45/200
140/226 [=================>............] - ETA: 0s - loss: 0.3503 - acc: 0.8107Epoch 00044: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3472 - acc: 0.8142 - val_loss: 0.4067 - val_acc: 0.8061
Epoch 46/200
220/226 [============================>.] - ETA: 0s - loss: 0.3373 - acc: 0.8409Epoch 00045: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3341 - acc: 0.8451 - val_loss: 0.4189 - val_acc: 0.8214
Epoch 47/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3119 - acc: 0.8475Epoch 00046: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3232 - acc: 0.8341 - val_loss: 0.3944 - val_acc: 0.8163
Epoch 48/200
180/226 [======================>.......] - ETA: 0s - loss: 0.3517 - acc: 0.8389Epoch 00047: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3325 - acc: 0.8496 - val_loss: 0.3875 - val_acc: 0.8367
Epoch 49/200
220/226 [============================>.] - ETA: 0s - loss: 0.3129 - acc: 0.8659Epoch 00048: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3267 - acc: 0.8606 - val_loss: 0.3883 - val_acc: 0.8367
Epoch 50/200
220/226 [============================>.] - ETA: 0s - loss: 0.3336 - acc: 0.8432Epoch 00049: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3317 - acc: 0.8473 - val_loss: 0.3964 - val_acc: 0.8010
Epoch 51/200
220/226 [============================>.] - ETA: 0s - loss: 0.3246 - acc: 0.8386Epoch 00050: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3308 - acc: 0.8341 - val_loss: 0.3942 - val_acc: 0.8163
Epoch 52/200
220/226 [============================>.] - ETA: 0s - loss: 0.3354 - acc: 0.8477Epoch 00051: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3342 - acc: 0.8429 - val_loss: 0.4509 - val_acc: 0.7806
Epoch 53/200
220/226 [============================>.] - ETA: 0s - loss: 0.3191 - acc: 0.8500Epoch 00052: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3145 - acc: 0.8540 - val_loss: 0.3912 - val_acc: 0.8163
Epoch 54/200
220/226 [============================>.] - ETA: 0s - loss: 0.3297 - acc: 0.8636Epoch 00053: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3251 - acc: 0.8673 - val_loss: 0.4105 - val_acc: 0.8367
Epoch 55/200
220/226 [============================>.] - ETA: 0s - loss: 0.3422 - acc: 0.8295Epoch 00054: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3437 - acc: 0.8252 - val_loss: 0.4034 - val_acc: 0.8214
Epoch 56/200
220/226 [============================>.] - ETA: 0s - loss: 0.3256 - acc: 0.8523Epoch 00055: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3218 - acc: 0.8562 - val_loss: 0.4186 - val_acc: 0.8214
Epoch 57/200
120/226 [==============>...............] - ETA: 0s - loss: 0.3314 - acc: 0.8667Epoch 00056: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3235 - acc: 0.8584 - val_loss: 0.3918 - val_acc: 0.8214
Epoch 58/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3078 - acc: 0.8700Epoch 00057: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3081 - acc: 0.8628 - val_loss: 0.3842 - val_acc: 0.8673
Epoch 59/200
220/226 [============================>.] - ETA: 0s - loss: 0.2852 - acc: 0.8864Epoch 00058: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.2947 - acc: 0.8761 - val_loss: 0.3838 - val_acc: 0.8316
Epoch 60/200
140/226 [=================>............] - ETA: 0s - loss: 0.3149 - acc: 0.8536Epoch 00059: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3381 - acc: 0.8274 - val_loss: 0.4032 - val_acc: 0.7959
Epoch 61/200
200/226 [=========================>....] - ETA: 0s - loss: 0.3160 - acc: 0.8550Epoch 00060: val_loss did not improve
226/226 [==============================] - 0s - loss: 0.3236 - acc: 0.8473 - val_loss: 0.4140 - val_acc: 0.8265
Epoch 00060: early stopping
 32/324 [=>............................] - ETA: 0s
Training Accuracy: 0.83487654321

In [101]:
# Predict for test data
y_test_male = model_male.predict(X_test_male)
print(y_test_male[0])

y_test_female = model_female.predict(X_test_female)
print(y_test_female[0])


[ 0.92958492  0.0684656 ]
[ 0.52061284  0.51384306]

In [102]:
# Save predictions
with open(pred_file, 'w') as f:
    f.write('PassengerId,Survived\n')
    for index, y_hat in enumerate(y_test_male):
        prediction = np.argmax(y_hat)
        f.write(str(int(df_test_male_raw.iloc[index]['PassengerId'])) + ',' + str(prediction)+'\n')
    for index, y_hat in enumerate(y_test_female):
        prediction = np.argmax(y_hat)
        f.write(str(int(df_test_female_raw.iloc[index]['PassengerId'])) + ',' + str(prediction)+'\n')
    f.close()

In [ ]: