In [1]:
# 数据分析和预处理
import pandas as pd
import numpy as np
import random as rnd
# 数据可视化
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# 机器学习算法
from sklearn.linear_model import LogisticRegression # logstic回归
from sklearn.linear_model import Perceptron # 感知机
from sklearn.linear_model import SGDClassifier # 随机梯度下降
from sklearn.svm import SVC,LinearSVC # svm
from sklearn.ensemble import RandomForestClassifier # 随机森林
from sklearn.neighbors import KNeighborsClassifier # KNN分类
from sklearn.naive_bayes import GaussianNB # 朴素贝叶斯
from sklearn.tree import DecisionTreeClassifier # 决策树
In [2]:
train_df = pd.read_csv('./input/titanic/train.csv')
test_df = pd.read_csv('./input/titanic/test.csv')
combine = [train_df,test_df]
In [3]:
print(train_df.columns.values)
In [4]:
# 数据预览
train_df.head()
Out[4]:
In [5]:
train_df.tail()
Out[5]:
In [6]:
# 查看数据集特征的数据类型
train_df.info()
print('='*40)
test_df.info()
train_df.describe()
In [7]:
train_df.describe()
Out[7]:
In [8]:
# 分析Pclass和分类结果的相关性 结论 Pclass =1 时明显高 所以强相关性
train_df[['Pclass','Survived']].groupby(['Pclass'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[8]:
In [9]:
# 分析sex 和 结果的相关性
train_df[['Sex','Survived']].groupby(['Sex'],as_index=False).mean().sort_values(by='Sex',ascending=False)
Out[9]:
In [10]:
train_df[['SibSp','Survived']].groupby(['SibSp'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[10]:
In [11]:
train_df[['Parch','Survived']].groupby(['Parch'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[11]:
In [12]:
g = sns.FacetGrid(train_df,col='Survived')
g.map(plt.hist,'Age',bins=20)
Out[12]:
In [13]:
grid = sns.FacetGrid(train_df,col='Survived',row='Pclass',size=2.2,aspect=1.6)
grid.map(plt.hist,'Age',bins=20,alpha=.5)
grid.add_legend()
Out[13]:
In [14]:
grid = sns.FacetGrid(train_df,row='Embarked',size=2.2,aspect=1.6)
grid.map(sns.pointplot,'Pclass','Survived','Sex',palette='deep')
grid.add_legend()
Out[14]:
In [15]:
grid = sns.FacetGrid(train_df,row='Embarked',col='Survived',size=2.2,aspect=1.6)
grid.map(sns.barplot,'Sex','Fare',alpha=.5,ci=None)
grid.add_legend()
Out[15]:
In [16]:
print('before',train_df.shape,test_df.shape,combine[0].shape,combine[1].shape)
train_df = train_df.drop(['Cabin','Ticket'],axis=1)
test_df = test_df.drop(['Cabin','Ticket'],axis=1)
combine = [train_df,test_df]
print('After',train_df.shape,test_df.shape,combine[0].shape,combine[1].shape)
In [17]:
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.',expand=False)
combine[0].head()
pd.crosstab(train_df['Title'],train_df['Sex'])
Out[17]:
In [18]:
# 给绝大多数的title 一个更通用的名字 或者rare
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady','Countess','Capt','Col','Don','Dr','Major','Rev','Sir','Jonkheer','Dona'],'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle','Miss')
dataset['Title'] = dataset['Title'].replace('Ms','Miss')
dataset['Title'] = dataset['Title'].replace('Mme','Mrs')
train_df[['Title','Survived']].groupby(['Title'],as_index=False).mean()
Out[18]:
In [19]:
# 把类别性的特征值处理成数值型
title_mapping = {'Mr':1,'Miss':2,'Mrs':3,'Master':4,'Rare':5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head()
Out[19]:
In [20]:
# 现在可以放心的删除掉name特征 passengerId也不需要
train_df = train_df.drop(['Name','PassengerId'],axis=1)
test_df = test_df.drop(['Name'],axis=1)
combine = [train_df,test_df]
train_df.shape,test_df.shape
Out[20]:
In [21]:
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female':1,'male':0}).astype(int)
train_df.head()
Out[21]:
In [22]:
grid = sns.FacetGrid(train_df,row='Pclass',col='Sex',size=2.2,aspect=1.6)
grid.map(plt.hist,'Age',alpha=.5,bins=20)
grid.add_legend()
Out[22]:
In [23]:
# 准备一个空数组存储guess的age
guess_ages = np.zeros((2,3))
guess_ages
Out[23]:
In [24]:
for dataset in combine:
for i in range(0,2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex']==i) & (dataset['Pclass']==j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5) * 0.5
for i in range(0,2):
for j in range(0,3):
dataset.loc[(dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head()
Out[24]:
In [25]:
# 创建age 段并且判断和survived之间的相关性
train_df['AgeBand'] = pd.cut(train_df['Age'],5)
train_df[['AgeBand','Survived']].groupby(['AgeBand'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[25]:
In [26]:
# 将age转换成代表年龄段的序数
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16,'Age'] = 0
dataset.loc[(dataset['Age'] > 16 ) & (dataset['Age'] <= 32),'Age'] = 1
dataset.loc[(dataset['Age'] > 32 ) & (dataset['Age'] <= 48),'Age'] = 2
dataset.loc[(dataset['Age'] > 48 ) & (dataset['Age'] <= 60),'Age'] = 3
dataset.loc[dataset['Age'] > 64 ,'Age'] = 4
train_df.head()
Out[26]:
In [27]:
# 移除AgeBand
train_df = train_df.drop(['AgeBand'],axis=1)
combine = [train_df,test_df]
train_df.head()
Out[27]:
In [28]:
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize','Survived']].groupby(['FamilySize'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[28]:
In [29]:
# 可以生成另一个IsAlone的特征
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1,'IsAlone'] = 1
train_df[['IsAlone','Survived']].groupby(['IsAlone'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[29]:
In [30]:
# 剔除parch sibsp familysize 等特征 只留下IsAlone
train_df = train_df.drop(['Parch','SibSp','FamilySize'],axis=1)
test_df = test_df.drop(['Parch','SibSp','FamilySize'],axis=1)
combine = [train_df,test_df]
train_df.head()
Out[30]:
In [31]:
# 可以组合age和class生成一个新的特征
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:,['Age*Class','Age','Pclass']].head(10)
Out[31]:
In [32]:
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
Out[32]:
In [33]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked','Survived']].groupby(['Embarked'],as_index=False).mean().sort_values(by='Survived',ascending=False)
Out[33]:
In [34]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S':0,'C':1,'Q':2}).astype(int)
train_df.head()
Out[34]:
In [35]:
test_df['Fare'].fillna(test_df['Fare'].dropna().median(),inplace=True)
test_df.head()
Out[35]:
In [36]:
# 创建fare段
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
Out[36]:
In [37]:
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10)
Out[37]:
In [38]:
test_df.head(10)
Out[38]:
In [39]:
X_train = train_df.drop('Survived',axis=1)
Y_train = train_df['Survived']
X_test = test_df.drop('PassengerId',axis=1).copy()
X_train.shape,Y_train.shape,X_test.shape
Out[39]:
In [40]:
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train,Y_train)
Y_pred = logreg.predict(X_test)
print(Y_pred)
print('='*10)
print(logreg.score(X_train,Y_train))
acc_log = round(logreg.score(X_train,Y_train)*100,2)
print(acc_log)
我们可以使用Logistic Regression来验证我们的假设和结论,可以通过特征的系数来观察 整的系数增加可能性,负的系数减少可能性
In [41]:
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns=['Feature']
coeff_df['Correlation'] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation',ascending=False)
Out[41]:
In [42]:
svc = SVC()
svc.fit(X_train,Y_train)
Y_predict = svc.predict(X_test)
print(Y_predict)
acc_svc = round(svc.score(X_train,Y_train)*100,2)
acc_svc
Out[42]:
In [43]:
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train,Y_train)
Y_predict = knn.predict(X_test)
print(Y_predict)
acc_knn = round(knn.score(X_train,Y_train)*100,2)
acc_knn
Out[43]:
In [44]:
nb = GaussianNB()
nb.fit(X_train,Y_train)
Y_predict = nb.predict(X_test)
print(Y_predict)
acc_nb = round(nb.score(X_train,Y_train)*100,2)
acc_nb
Out[44]:
In [45]:
perceptron = Perceptron()
perceptron.fit(X_train,Y_train)
Y_predict = perceptron.predict(X_test)
print(Y_predict)
acc_perc = round(perceptron.score(X_train,Y_train)*100,2)
acc_perc
Out[45]:
In [46]:
# linear_svc
linear_svc = LinearSVC()
linear_svc.fit(X_train,Y_train)
Y_predict = linear_svc.predict(X_test)
print(Y_predict)
acc_lsvc = round(linear_svc.score(X_train,Y_train)*100,2)
acc_lsvc
Out[46]:
In [47]:
# SGD
sgd = SGDClassifier()
sgd.fit(X_train,Y_train)
Y_predict= sgd.predict(X_test)
print(Y_predict)
acc_sgd = round(sgd.score(X_train,Y_train)*100,2)
acc_sgd
Out[47]:
In [48]:
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train,Y_train)
Y_predict= decision_tree.predict(X_test)
print(Y_predict)
acc_dt = round(decision_tree.score(X_train,Y_train)*100,2)
acc_dt
Out[48]:
In [49]:
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train,Y_train)
Y_predict= random_forest.predict(X_test)
print(Y_predict)
acc_rf = round(random_forest.score(X_train,Y_train)*100,2)
acc_rf
Out[49]:
In [50]:
models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree'],
'Score': [acc_svc, acc_knn, acc_log,
acc_rf, acc_nb, acc_perc,
acc_sgd, acc_lsvc, acc_dt]})
models.sort_values(by='Score', ascending=False)
Out[50]: