In [64]:
import pandas as pd
import numpy as np
import re
import sklearn
import seaborn as sb
import matplotlib.pyplot as plt
%matplotlib inline 

import plotly.offline as ol
import plotly.graph_objs as go
import plotly.tools as tls

from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,AdaBoostClassifier,ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.cross_validation import KFold;

In [65]:
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')

PassengerId = test['PassengerId']

train.head(3)
test.head(3)


Out[65]:
PassengerId Pclass Name Sex Age SibSp Parch Ticket Fare Cabin Embarked
0 892 3 Kelly, Mr. James male 34.5 0 0 330911 7.8292 NaN Q
1 893 3 Wilkes, Mrs. James (Ellen Needs) female 47.0 1 0 363272 7.0000 NaN S
2 894 2 Myles, Mr. Thomas Francis male 62.0 0 0 240276 9.6875 NaN Q

In [66]:
full_data = [train,test]

train['NameLength'] = train['Name'].apply(len)
test['NameLength'] = test['Name'].apply(len)

#train['Cabin'].unique()
train['HasCabin'] = train['Cabin'].apply(lambda x:0 if type(x)==float else 1)
test['HasCabin'] = test['Cabin'].apply(lambda x:0 if type(x)==float else 1)
#train['Cabin']
#train['HasCabin']
#g = lambda x:0 if type(x)==str else 1
#type('C3')
#type(train['Cabin'][0])

for dataset in full_data:
    dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] +1

for dataset in full_data:
    dataset['IsAlone'] = 0
    dataset.ix[dataset['FamilySize'] == 1,'IsAlone'] = 1

for dataset in full_data:
    dataset['Embarked'] = dataset['Embarked'].fillna('S')
    
for dataset in full_data:
    dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
train['CategoricalFare'] = pd.qcut(train['Fare'],4)
#train['CategoricalFare']

for dataset in full_data:
    age_avg = dataset['Age'].mean()
    age_std = dataset['Age'].std()
    age_null_count = dataset['Age'].isnull().sum()
    age_null_random_list = np.random.randint(age_avg-age_std,age_avg+age_std,size=age_null_count)
    dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
    dataset['Age'] = dataset['Age'].astype(int)
train['CategoricalAge'] = pd.qcut(train['Age'],5)
#train['Age']

def get_title(name):
    title_search = re.search(' ([A-Za-z]+)\.',name)
    if title_search:
        return title_search.group(1)
    return ''

for dataset in full_data:
    dataset['Title'] = dataset['Name'].apply(get_title)

for dataset in full_data:
    dataset['Title'] = dataset['Title'].replace(['Lady', 
                                                 'Countess',
                                                 'Capt', 
                                                 'Col',
                                                 'Don', 
                                                 'Dr', 
                                                 'Major', 
                                                 'Rev', 
                                                 'Sir', 
                                                 'Jonkheer', 
                                                 'Dona'],'Rare')
    dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
    dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
    dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
        
for dataset in full_data:
    dataset['Sex'] = dataset['Sex'].map({'female':0,'male':1}).astype(int)
    
    title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
    dataset['Title'] = dataset['Title'].map(title_mapping)
    dataset['Title'] = dataset['Title'].fillna(0)
    
    dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
    
    dataset.ix[ dataset['Fare'] <= 7.91, 'Fare']                               = 0
    dataset.ix[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
    dataset.ix[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare']   = 2
    dataset.ix[ dataset['Fare'] > 31, 'Fare']                                  = 3
    dataset['Fare'] = dataset['Fare'].astype(int)

    dataset.ix[ dataset['Age'] <= 16, 'Age']                          = 0
    dataset.ix[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
    dataset.ix[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
    dataset.ix[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
    dataset.ix[ dataset['Age'] > 64, 'Age']                           = 4


/Users/jark/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:35: SettingWithCopyWarning:


A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy


In [67]:
#feature selection
drop_elements = ['PassengerId','Name','Ticket','Cabin','SibSp']

train = train.drop(drop_elements,1)
train = train.drop(['CategoricalAge','CategoricalFare'],1)
test = test.drop(drop_elements,1)
train.head(3)
test.head(3)


Out[67]:
Pclass Sex Age Parch Fare Embarked NameLength HasCabin FamilySize IsAlone Title
0 3 1 2 0 0 2 16 0 1 1 1
1 3 0 2 0 0 0 32 0 2 0 3
2 2 1 3 0 1 2 25 0 1 1 1

In [68]:
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features',y=1.05,size=20)
sb.heatmap(train.astype(float).corr(),linewidths=0.1,vmax=1.0,square=True,cmap='spring',linecolor='black',annot=True)


Out[68]:
<matplotlib.axes._subplots.AxesSubplot at 0x11820a990>

In [69]:
ntrain = train.shape[0]
ntest = test.shape[0]
SEED = 0
NFOLDS = 5
kf = KFold(ntrain,n_folds=NFOLDS,random_state=SEED)

class SklearnHelper(object):
    def __init__(self,clf,seed=0,params=None):
        params['random_state']=seed
        self.clf = clf(**params)
    
    def train(self,x_train,y_train):
        self.clf.fit(x_train,y_train)
        
    def predict(self,x):
        return self.clf.predict(x)
    
    def fit(self,x,y):
        return self.clf.fit(x,y)
    
    def feature_importances(self,x,y):
        print self.clf.fit(x,y).feature_importances_

In [78]:
def get_oof(clf,x_train,y_train,x_test):
    oof_train = np.zeros((ntrain,))
    oof_test = np.zeros((ntest,))
    oof_test_skf = np.empty((NFOLDS,ntest))
    
    for i ,(train_index,test_index) in enumerate(kf):
        x_tr = x_train[train_index]
        y_tr = y_train[train_index]
        x_te = x_train[test_index]
        
        clf.train(x_tr,y_tr)
        
        oof_train[test_index] = clf.predict(x_te)
        oof_test_skf[i,:] = clf.predict(x_test)
        
    oof_test[:] = oof_test_skf.mean(axis=0)
    return oof_train.reshape(-1,1),oof_test.reshape(-1,1)

In [71]:
# Put in our parameters for said classifiers
# Random Forest parameters
rf_params = {
    'n_jobs': -1,
    'n_estimators': 500,
    'max_depth': 6,
    'min_samples_leaf': 2
}

# Extra Trees Parameters
et_params = {
    'n_jobs': -1,
    'n_estimators':500,
    #'max_features': 0.5,
    'max_depth': 8,
    'min_samples_leaf': 2,
    'verbose': 0
}

# AdaBoost parameters
ada_params = {
    'n_estimators': 500,
    'learning_rate' : 0.75
}

# Gradient Boosting parameters
gb_params = {
    'n_estimators': 500,
     #'max_features': 0.2,
    'max_depth': 5,
    'min_samples_leaf': 2,
    'verbose': 0
}

# Support Vector Classifier parameters 
svc_params = {
    'kernel' : 'linear',
    'C' : 0.025
    }

In [72]:
rf = SklearnHelper(clf=RandomForestClassifier,seed=SEED,params=rf_params)
et = SklearnHelper(clf=ExtraTreesClassifier,seed=SEED,params=et_params)
ada = SklearnHelper(clf=AdaBoostClassifier,seed=SEED,params=ada_params)
gb = SklearnHelper(clf=GradientBoostingClassifier,seed=SEED,params=gb_params)
svc = SklearnHelper(clf=SVC,seed=SEED,params=svc_params)

In [73]:
y_train = train['Survived'].ravel()
#y_train
train = train.drop(['Survived'],1)
x_train = train.values
#x_train
x_test = test.values
#x_test

In [74]:
#test_1 = np.empty((NFOLDS,ntest))
#test_1
#kf

In [82]:
et_oof_train,et_oof_test = get_oof(et,x_train,y_train,x_test)
#et_oof_train,et_oof_test
rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) # Random Forest
ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) # AdaBoost 
gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) # Gradient Boost
svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) # Support Vector Classifier

print"Training is complete"


Training is complete

In [83]:
rf_feature = rf.feature_importances(x_train,y_train)
et_feature = et.feature_importances(x_train, y_train)
ada_feature = ada.feature_importances(x_train, y_train)
gb_feature = gb.feature_importances(x_train,y_train)

#print rf_feature


[ 0.11188501  0.24096399  0.03377232  0.01913512  0.04858808  0.02331378
  0.11099576  0.06608898  0.06970233  0.0110007   0.26455392]
[ 0.11975994  0.38219929  0.02922989  0.01669369  0.05664727  0.02826898
  0.04771528  0.0835143   0.04503197  0.02085702  0.17008237]
[ 0.032  0.012  0.018  0.062  0.036  0.01   0.696  0.014  0.05   0.002
  0.068]
[ 0.06856557  0.04435239  0.10735643  0.03072722  0.11302656  0.05031039
  0.38997706  0.01907095  0.06650129  0.02060515  0.08950699]
None

In [98]:
rf_feature=[ 0.11188501 , 0.24096399 , 0.03377232 , 0.01913512 , 0.04858808 , 0.02331378
 , 0.11099576 , 0.06608898 , 0.06970233 , 0.0110007 ,  0.26455392]
et_feature=[ 0.11975994 , 0.38219929 , 0.02922989 , 0.01669369 , 0.05664727 , 0.02826898
 , 0.04771528 , 0.0835143 ,  0.04503197 , 0.02085702 , 0.17008237]
ada_feature=[ 0.032 , 0.012 , 0.018 , 0.062 , 0.036 , 0.01  , 0.696 , 0.014 , 0.05 ,  0.002
 , 0.068]
gb_feature=[ 0.06856557 , 0.04435239 , 0.10735643  ,0.03072722 , 0.11302656 , 0.05031039
 , 0.38997706 , 0.01907095 , 0.06650129 , 0.02060515 , 0.08950699]

cols = train.columns.values
# Create a dataframe with features
feature_dataframe = pd.DataFrame( {'features': cols,
     'Random Forest feature importances': rf_feature,
     'Extra Trees  feature importances': et_feature,
      'AdaBoost feature importances': ada_feature,
    'Gradient Boost feature importances': gb_feature
    })

In [99]:
# Scatter plot 
ol.init_notebook_mode()
trace = go.Scatter(
    y = feature_dataframe['Random Forest feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Random Forest feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'Random Forest Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
ol.iplot(fig,filename='scatter2010')

# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['Extra Trees  feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Extra Trees  feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'Extra Trees Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
ol.iplot(fig,filename='scatter2010')

# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['AdaBoost feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['AdaBoost feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'AdaBoost Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
ol.iplot(fig,filename='scatter2010')

# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['Gradient Boost feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Gradient Boost feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'Gradient Boosting Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
ol.iplot(fig,filename='scatter2010')



In [100]:
base_predictions_train = pd.DataFrame( {'RandomForest': rf_oof_train.ravel(),
     'ExtraTrees': et_oof_train.ravel(),
     'AdaBoost': ada_oof_train.ravel(),
      'GradientBoost': gb_oof_train.ravel()
    })
base_predictions_train.head()


Out[100]:
AdaBoost ExtraTrees GradientBoost RandomForest
0 0.0 0.0 0.0 0.0
1 1.0 1.0 1.0 1.0
2 1.0 0.0 1.0 0.0
3 1.0 1.0 1.0 1.0
4 0.0 0.0 0.0 0.0

In [102]:
data = [
    go.Heatmap(
        z= base_predictions_train.astype(float).corr().values ,
        x=base_predictions_train.columns.values,
        y= base_predictions_train.columns.values,
          colorscale='hot',
            showscale=True,
            reversescale = True
    )
]
ol.iplot(data, filename='labelled-heatmap')



In [103]:
x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1)
x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1)

In [ ]:
#接着继续做级联模型的第二层级