In [ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
In [ ]:
train_df = pd.read_csv('./train.csv')
test_df = pd.read_csv('./test.csv')
combine = [train_df, test_df]
In [ ]:
## Add Title
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Survived'])
In [ ]:
## Add Title
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Sir', 'Major', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
print(train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean())
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5, "Rev": 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
In [ ]:
## Convert Sex to int
for dataset in combine:
dataset['SexType'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
pd.crosstab(train_df['SexType'], train_df['Survived'])
In [ ]:
guess_ages = np.zeros((2,3))
guess_ages
for dataset in combine:
for i in range(0, 2): # Sex
for j in range(0, 3): # Pclass
guess_df = dataset[(dataset['SexType'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
# Convert random age float to nearest .5 age
guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ dataset.Age.isnull() & (dataset.SexType == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.Age
In [ ]:
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch']
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
In [ ]:
## Add family features
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 0, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
In [ ]:
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head()
In [ ]:
## Add Age*Class
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)
In [ ]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
In [ ]:
for dataset in combine:
dataset['EmbarkedType'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
In [ ]:
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
In [ ]:
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
In [ ]:
train_df
In [ ]:
##
## Predict
##
### cross validation
from sklearn.model_selection import cross_val_score
###
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
In [ ]:
X_train = train_df.drop(["PassengerId", "Survived", "Name", "Sex", "FamilySize", "SibSp", "Parch", "Ticket", "Cabin", "Embarked", "EmbarkedType"], axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop(["PassengerId", "Name", "Sex", "FamilySize", "SibSp", "Parch", "Ticket", "Cabin", "Embarked", "EmbarkedType"], axis=1).copy()
print(X_train.shape, Y_train.shape, X_test.shape)
X_train
In [ ]:
# Random Forest
random_forest = RandomForestClassifier(n_estimators=200, max_depth=5)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
print("Training Accuracy = %s" % acc_random_forest)
print("Cross Validation Accuracy = %s " % cross_val_score(random_forest, X_train, Y_train, cv=4).mean())
In [ ]:
gbm = xgb.XGBClassifier(max_depth=5, n_estimators=200, learning_rate=0.05).fit(X_train, Y_train)
Y_pred = gbm.predict(X_test)
acc_xgboost= round(gbm.score(X_train, Y_train) * 100, 2)
print("Training Accuracy = %s" % acc_xgboost)
print("Cross Validation Accuracy = %s " % cross_val_score(gbm, X_train, Y_train, cv=4).mean())
In [ ]:
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
print(submission)
submission.to_csv('./my_submission2.csv', index=False)
## This gets score 0.77033
In [ ]: