In [113]:
import pandas as pd
import numpy as np
from sklearn import tree, model_selection, ensemble, linear_model, preprocessing
import xgboost as xgb

def clean_data(data):
    data["Fare"] = data["Fare"].fillna(data["Fare"].dropna().median())
    data["Age"] = data["Age"].fillna(data["Age"].dropna().median())

    data.loc[data["Sex"] == "male", "Sex"] = 0
    data.loc[data["Sex"] == "female", "Sex"] = 1

    data["Embarked"] = data["Embarked"].fillna("S")
    data.loc[data["Embarked"] == "S", "Embarked"] = 0
    data.loc[data["Embarked"] == "C", "Embarked"] = 1
    data.loc[data["Embarked"] == "Q", "Embarked"] = 2
    
def write_prediction(prediction, name):
    PassengerId = np.array(test["PassengerId"]).astype(int)
    solution = pd.DataFrame(prediction, PassengerId, columns = ["Survived"])
    solution.to_csv(name, index_label = ["PassengerId"])

# Definition of the CategoricalEncoder class, copied from PR #9151.
# Just run this cell, or copy it to your code, no need to try to
# understand every line.

from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.preprocessing import LabelEncoder
from scipy import sparse

class CategoricalEncoder(BaseEstimator, TransformerMixin):
    """Encode categorical features as a numeric array.
    The input to this transformer should be a matrix of integers or strings,
    denoting the values taken on by categorical (discrete) features.
    The features can be encoded using a one-hot aka one-of-K scheme
    (``encoding='onehot'``, the default) or converted to ordinal integers
    (``encoding='ordinal'``).
    This encoding is needed for feeding categorical data to many scikit-learn
    estimators, notably linear models and SVMs with the standard kernels.
    Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
    Parameters
    ----------
    encoding : str, 'onehot', 'onehot-dense' or 'ordinal'
        The type of encoding to use (default is 'onehot'):
        - 'onehot': encode the features using a one-hot aka one-of-K scheme
          (or also called 'dummy' encoding). This creates a binary column for
          each category and returns a sparse matrix.
        - 'onehot-dense': the same as 'onehot' but returns a dense array
          instead of a sparse matrix.
        - 'ordinal': encode the features as ordinal integers. This results in
          a single column of integers (0 to n_categories - 1) per feature.
    categories : 'auto' or a list of lists/arrays of values.
        Categories (unique values) per feature:
        - 'auto' : Determine categories automatically from the training data.
        - list : ``categories[i]`` holds the categories expected in the ith
          column. The passed categories are sorted before encoding the data
          (used categories can be found in the ``categories_`` attribute).
    dtype : number type, default np.float64
        Desired dtype of output.
    handle_unknown : 'error' (default) or 'ignore'
        Whether to raise an error or ignore if a unknown categorical feature is
        present during transform (default is to raise). When this is parameter
        is set to 'ignore' and an unknown category is encountered during
        transform, the resulting one-hot encoded columns for this feature
        will be all zeros.
        Ignoring unknown categories is not supported for
        ``encoding='ordinal'``.
    Attributes
    ----------
    categories_ : list of arrays
        The categories of each feature determined during fitting. When
        categories were specified manually, this holds the sorted categories
        (in order corresponding with output of `transform`).
    Examples
    --------
    Given a dataset with three features and two samples, we let the encoder
    find the maximum value per feature and transform the data to a binary
    one-hot encoding.
    >>> from sklearn.preprocessing import CategoricalEncoder
    >>> enc = CategoricalEncoder(handle_unknown='ignore')
    >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
    ... # doctest: +ELLIPSIS
    CategoricalEncoder(categories='auto', dtype=<... 'numpy.float64'>,
              encoding='onehot', handle_unknown='ignore')
    >>> enc.transform([[0, 1, 1], [1, 0, 4]]).toarray()
    array([[ 1.,  0.,  0.,  1.,  0.,  0.,  1.,  0.,  0.],
           [ 0.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.]])
    See also
    --------
    sklearn.preprocessing.OneHotEncoder : performs a one-hot encoding of
      integer ordinal features. The ``OneHotEncoder assumes`` that input
      features take on values in the range ``[0, max(feature)]`` instead of
      using the unique values.
    sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
      dictionary items (also handles string-valued features).
    sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
      encoding of dictionary items or strings.
    """

    def __init__(self, encoding='onehot', categories='auto', dtype=np.float64,
                 handle_unknown='error'):
        self.encoding = encoding
        self.categories = categories
        self.dtype = dtype
        self.handle_unknown = handle_unknown

    def fit(self, X, y=None):
        """Fit the CategoricalEncoder to X.
        Parameters
        ----------
        X : array-like, shape [n_samples, n_feature]
            The data to determine the categories of each feature.
        Returns
        -------
        self
        """

        if self.encoding not in ['onehot', 'onehot-dense', 'ordinal']:
            template = ("encoding should be either 'onehot', 'onehot-dense' "
                        "or 'ordinal', got %s")
            raise ValueError(template % self.handle_unknown)

        if self.handle_unknown not in ['error', 'ignore']:
            template = ("handle_unknown should be either 'error' or "
                        "'ignore', got %s")
            raise ValueError(template % self.handle_unknown)

        if self.encoding == 'ordinal' and self.handle_unknown == 'ignore':
            raise ValueError("handle_unknown='ignore' is not supported for"
                             " encoding='ordinal'")

        X = check_array(X, dtype=np.object, accept_sparse='csc', copy=True)
        n_samples, n_features = X.shape

        self._label_encoders_ = [LabelEncoder() for _ in range(n_features)]

        for i in range(n_features):
            le = self._label_encoders_[i]
            Xi = X[:, i]
            if self.categories == 'auto':
                le.fit(Xi)
            else:
                valid_mask = np.in1d(Xi, self.categories[i])
                if not np.all(valid_mask):
                    if self.handle_unknown == 'error':
                        diff = np.unique(Xi[~valid_mask])
                        msg = ("Found unknown categories {0} in column {1}"
                               " during fit".format(diff, i))
                        raise ValueError(msg)
                le.classes_ = np.array(np.sort(self.categories[i]))

        self.categories_ = [le.classes_ for le in self._label_encoders_]

        return self

    def transform(self, X):
        """Transform X using one-hot encoding.
        Parameters
        ----------
        X : array-like, shape [n_samples, n_features]
            The data to encode.
        Returns
        -------
        X_out : sparse matrix or a 2-d array
            Transformed input.
        """
        X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)
        n_samples, n_features = X.shape
        X_int = np.zeros_like(X, dtype=np.int)
        X_mask = np.ones_like(X, dtype=np.bool)

        for i in range(n_features):
            valid_mask = np.in1d(X[:, i], self.categories_[i])

            if not np.all(valid_mask):
                if self.handle_unknown == 'error':
                    diff = np.unique(X[~valid_mask, i])
                    msg = ("Found unknown categories {0} in column {1}"
                           " during transform".format(diff, i))
                    raise ValueError(msg)
                else:
                    # Set the problematic rows to an acceptable value and
                    # continue `The rows are marked `X_mask` and will be
                    # removed later.
                    X_mask[:, i] = valid_mask
                    X[:, i][~valid_mask] = self.categories_[i][0]
            X_int[:, i] = self._label_encoders_[i].transform(X[:, i])

        if self.encoding == 'ordinal':
            return X_int.astype(self.dtype, copy=False)

        mask = X_mask.ravel()
        n_values = [cats.shape[0] for cats in self.categories_]
        n_values = np.array([0] + n_values)
        indices = np.cumsum(n_values)

        column_indices = (X_int + indices[:-1]).ravel()[mask]
        row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
                                n_features)[mask]
        data = np.ones(n_samples * n_features)[mask]

        out = sparse.csc_matrix((data, (row_indices, column_indices)),
                                shape=(n_samples, indices[-1]),
                                dtype=self.dtype).tocsr()
        if self.encoding == 'onehot-dense':
            return out.toarray()
        else:
            return out
        
def transform_data(data):
    data["AgeBucket"] = data["Age"] // 15 * 15
    data["RelativesOnboard"] = data["SibSp"] + data["Parch"]

In [56]:
data_path = 'd:/project/ml/data/titanic/'
train = pd.read_csv(data_path+'train.csv')
test = pd.read_csv(data_path+'test.csv')

train.head()


Out[56]:
PassengerId Survived Pclass Name Sex Age SibSp Parch Ticket Fare Cabin Embarked
0 1 0 3 Braund, Mr. Owen Harris male 22.0 1 0 A/5 21171 7.2500 NaN S
1 2 1 1 Cumings, Mrs. John Bradley (Florence Briggs Th... female 38.0 1 0 PC 17599 71.2833 C85 C
2 3 1 3 Heikkinen, Miss. Laina female 26.0 0 0 STON/O2. 3101282 7.9250 NaN S
3 4 1 1 Futrelle, Mrs. Jacques Heath (Lily May Peel) female 35.0 1 0 113803 53.1000 C123 S
4 5 0 3 Allen, Mr. William Henry male 35.0 0 0 373450 8.0500 NaN S

In [49]:
train.info()


<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
PassengerId    891 non-null int64
Survived       891 non-null int64
Pclass         891 non-null int64
Name           891 non-null object
Sex            891 non-null object
Age            714 non-null float64
SibSp          891 non-null int64
Parch          891 non-null int64
Ticket         891 non-null object
Fare           891 non-null float64
Cabin          204 non-null object
Embarked       889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.6+ KB

In [50]:
train.describe()


Out[50]:
PassengerId Survived Pclass Age SibSp Parch Fare
count 891.000000 891.000000 891.000000 714.000000 891.000000 891.000000 891.000000
mean 446.000000 0.383838 2.308642 29.699118 0.523008 0.381594 32.204208
std 257.353842 0.486592 0.836071 14.526497 1.102743 0.806057 49.693429
min 1.000000 0.000000 1.000000 0.420000 0.000000 0.000000 0.000000
25% 223.500000 0.000000 2.000000 20.125000 0.000000 0.000000 7.910400
50% 446.000000 0.000000 3.000000 28.000000 0.000000 0.000000 14.454200
75% 668.500000 1.000000 3.000000 38.000000 1.000000 0.000000 31.000000
max 891.000000 1.000000 3.000000 80.000000 8.000000 6.000000 512.329200

In [4]:
clean_data(train)
clean_data(test)

In [11]:
y_train = train["Survived"].values
X_train = train[["Pclass", "Sex", "Age", "Fare"]].values

In [33]:
decision_tree = tree.DecisionTreeClassifier(random_state= 1)
decision_tree = decision_tree.fit(X_train, y_train)

print(decision_tree.feature_importances_)
# print(decision_tree.score(X_train, y_train))
scores = model_selection.cross_val_score(decision_tree, X_train, y_train, scoring='accuracy', cv=10)
scores.mean()


[ 0.11682997  0.23486832  0.31088095  0.23230415  0.04892927  0.02973698
  0.02645037]
Out[33]:
0.79471512881625239

In [34]:
X_train = train[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
decision_tree_two = tree.DecisionTreeClassifier(
    max_depth = 7,
    min_samples_split = 2,
    random_state = 1)
decision_tree_two = decision_tree_two.fit(X_train, target)
scores = model_selection.cross_val_score(decision_tree_two, X_train, y_train, scoring='accuracy', cv=10)
scores.mean()


Out[34]:
0.81939734422880495

In [35]:



Out[35]:
0.82612586539552824

In [32]:
logistic = linear_model.LogisticRegression()
logistic.fit(X_train, y_train)
scores = model_selection.cross_val_score(logistic, X_train, y_train, scoring='accuracy', cv=10)
scores


Out[32]:
0.7935410282601294

In [36]:
gbm = ensemble.GradientBoostingClassifier(
    learning_rate = 0.005,
    min_samples_split=40,
    min_samples_leaf=1,
    max_features=2,
    max_depth=12,
    n_estimators=1500,
    subsample=0.75,
    random_state=1)
gbm = gbm.fit(X_train, y_train)

print(gbm.feature_importances_)
print(gbm.score(X_train, y_train))

scores = model_selection.cross_val_score(gbm, X_train, y_train, scoring='accuracy', cv=20)
scores.mean()


[ 0.05732547  0.32300303  0.11482147  0.36183847  0.05657822  0.04783569
  0.03859766]
0.94051627385
Out[36]:
0.82735287659200696

In [37]:
X_test = test[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
prediction_forest = forest.predict(X_test)
write_prediction(prediction_forest, "results/random_forest.csv")

In [114]:
params = {'max_depth':7,'eta':0.1,'subsample':0.7,'silent':1,'booster':'gbtree','objective':'multi:softmax', 'num_class':2}

plst = list(params.items())
num_rounds = 100  # 迭代次数

# random_state is of big influence for val-auc

# xgb_val = xgb.DMatrix(val_X, label=val_y)
xgb_train = xgb.DMatrix(X_train, label=y_train)
xgb_test = xgb.DMatrix(X_test)

watchlist = [(xgb_train, 'train')]
# providedpreds.size=524974, label.size=262487
# training model
# early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
model = xgb.train(plst, xgb_train, num_rounds, watchlist, early_stopping_rounds=100)

print("best best_ntree_limit", model.best_ntree_limit)
print('train bin', np.bincount(y_train))


[0]	train-merror:0.136925
Will train until train-merror hasn't improved in 100 rounds.
[1]	train-merror:0.124579
[2]	train-merror:0.127946
[3]	train-merror:0.114478
[4]	train-merror:0.114478
[5]	train-merror:0.114478
[6]	train-merror:0.118967
[7]	train-merror:0.114478
[8]	train-merror:0.114478
[9]	train-merror:0.112233
[10]	train-merror:0.114478
[11]	train-merror:0.109989
[12]	train-merror:0.109989
[13]	train-merror:0.109989
[14]	train-merror:0.107744
[15]	train-merror:0.107744
[16]	train-merror:0.107744
[17]	train-merror:0.107744
[18]	train-merror:0.108866
[19]	train-merror:0.105499
[20]	train-merror:0.104377
[21]	train-merror:0.103255
[22]	train-merror:0.102132
[23]	train-merror:0.102132
[24]	train-merror:0.102132
[25]	train-merror:0.103255
[26]	train-merror:0.102132
[27]	train-merror:0.099888
[28]	train-merror:0.10101
[29]	train-merror:0.10101
[30]	train-merror:0.10101
[31]	train-merror:0.099888
[32]	train-merror:0.10101
[33]	train-merror:0.097643
[34]	train-merror:0.098765
[35]	train-merror:0.095398
[36]	train-merror:0.095398
[37]	train-merror:0.095398
[38]	train-merror:0.093154
[39]	train-merror:0.093154
[40]	train-merror:0.089787
[41]	train-merror:0.088664
[42]	train-merror:0.087542
[43]	train-merror:0.088664
[44]	train-merror:0.08642
[45]	train-merror:0.08642
[46]	train-merror:0.08642
[47]	train-merror:0.08642
[48]	train-merror:0.08642
[49]	train-merror:0.08642
[50]	train-merror:0.085297
[51]	train-merror:0.085297
[52]	train-merror:0.084175
[53]	train-merror:0.085297
[54]	train-merror:0.085297
[55]	train-merror:0.084175
[56]	train-merror:0.083053
[57]	train-merror:0.083053
[58]	train-merror:0.083053
[59]	train-merror:0.08193
[60]	train-merror:0.080808
[61]	train-merror:0.079686
[62]	train-merror:0.078563
[63]	train-merror:0.078563
[64]	train-merror:0.078563
[65]	train-merror:0.080808
[66]	train-merror:0.080808
[67]	train-merror:0.08193
[68]	train-merror:0.080808
[69]	train-merror:0.080808
[70]	train-merror:0.079686
[71]	train-merror:0.079686
[72]	train-merror:0.078563
[73]	train-merror:0.076319
[74]	train-merror:0.078563
[75]	train-merror:0.078563
[76]	train-merror:0.078563
[77]	train-merror:0.076319
[78]	train-merror:0.076319
[79]	train-merror:0.076319
[80]	train-merror:0.075196
[81]	train-merror:0.075196
[82]	train-merror:0.075196
[83]	train-merror:0.075196
[84]	train-merror:0.075196
[85]	train-merror:0.075196
[86]	train-merror:0.074074
[87]	train-merror:0.075196
[88]	train-merror:0.075196
[89]	train-merror:0.075196
[90]	train-merror:0.075196
[91]	train-merror:0.075196
[92]	train-merror:0.075196
[93]	train-merror:0.075196
[94]	train-merror:0.075196
[95]	train-merror:0.075196
[96]	train-merror:0.075196
[97]	train-merror:0.074074
[98]	train-merror:0.074074
[99]	train-merror:0.074074
best best_ntree_limit 87
train bin [549 342]

In [97]:
predict = model.predict(xgb_test)
predict.astype(int)


Out[97]:
array([0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
       1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0,
       1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
       0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,
       1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0,
       0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0,
       1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,
       0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1,
       0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
       0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
       1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,
       1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,
       1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,
       1, 0, 0, 1])

In [47]:
model = xgb.XGBClassifier()
scores = model_selection.cross_val_score(model, X_train, y_train, scoring='accuracy', cv=20)
scores.mean()


Out[47]:
0.82285792709705741

In [52]:
from sklearn.base import BaseEstimator, TransformerMixin

# A class to select numerical or categorical columns 
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, attribute_names):
        self.attribute_names = attribute_names
    def fit(self, X, y=None):
        return self
    def transform(self, X):
        return X[self.attribute_names]

In [106]:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer

imputer = Imputer(strategy="median")

num_pipeline = Pipeline([
        ("select_numeric", DataFrameSelector(["Age", "AgeBucket","SibSp", "Parch", "RelativesOnboard", "Fare"])),
        ("imputer", Imputer(strategy="median")),
    ])

In [62]:
num_pipeline.fit_transform(train)


Out[62]:
array([[ 22.    ,   1.    ,   0.    ,   7.25  ],
       [ 38.    ,   1.    ,   0.    ,  71.2833],
       [ 26.    ,   0.    ,   0.    ,   7.925 ],
       ..., 
       [ 28.    ,   1.    ,   2.    ,  23.45  ],
       [ 26.    ,   0.    ,   0.    ,  30.    ],
       [ 32.    ,   0.    ,   0.    ,   7.75  ]])

In [58]:
# Inspired from stackoverflow.com/questions/25239958
class MostFrequentImputer(BaseEstimator, TransformerMixin):
    def fit(self, X, y=None):
        self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X],
                                       index=X.columns)
        return self
    def transform(self, X, y=None):
        return X.fillna(self.most_frequent)

In [59]:
cat_pipeline = Pipeline([
        ("select_cat", DataFrameSelector(["Pclass", "Sex", "Embarked"])),
        ("imputer", MostFrequentImputer()),
        ("cat_encoder", CategoricalEncoder(encoding='onehot-dense')),
    ])

In [61]:
cat_pipeline.fit_transform(train)


Out[61]:
array([[ 0.,  0.,  1., ...,  0.,  0.,  1.],
       [ 1.,  0.,  0., ...,  1.,  0.,  0.],
       [ 0.,  0.,  1., ...,  0.,  0.,  1.],
       ..., 
       [ 0.,  0.,  1., ...,  0.,  0.,  1.],
       [ 1.,  0.,  0., ...,  1.,  0.,  0.],
       [ 0.,  0.,  1., ...,  0.,  1.,  0.]])

In [79]:
from sklearn.pipeline import FeatureUnion
preprocess_pipeline = FeatureUnion(transformer_list=[
        ("num_pipeline", num_pipeline),
        ("cat_pipeline", cat_pipeline),
    ])

In [80]:
X_train = preprocess_pipeline.fit_transform(train)
X_train


Out[80]:
array([[ 15.    ,   1.    ,   7.25  , ...,   0.    ,   0.    ,   1.    ],
       [ 30.    ,   1.    ,  71.2833, ...,   1.    ,   0.    ,   0.    ],
       [ 15.    ,   0.    ,   7.925 , ...,   0.    ,   0.    ,   1.    ],
       ..., 
       [ 15.    ,   3.    ,  23.45  , ...,   0.    ,   0.    ,   1.    ],
       [ 15.    ,   0.    ,  30.    , ...,   1.    ,   0.    ,   0.    ],
       [ 30.    ,   0.    ,   7.75  , ...,   0.    ,   1.    ,   0.    ]])

In [67]:
y_train = train["Survived"]

In [81]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score

forest_clf = RandomForestClassifier(random_state=42)
scores = cross_val_score(forest_clf, X_train, y_train, cv=10)
scores.mean()


Out[81]:
0.82611394847349895

In [71]:
train["AgeBucket"] = train["Age"] // 15 * 15
train[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean()


Out[71]:
Survived
AgeBucket
0.0 0.576923
15.0 0.362745
30.0 0.423256
45.0 0.404494
60.0 0.240000
75.0 1.000000

In [73]:
train["RelativesOnboard"] = train["SibSp"] + train["Parch"]
train[["RelativesOnboard", "Survived"]].groupby(['RelativesOnboard']).mean()


Out[73]:
Survived
RelativesOnboard
0 0.303538
1 0.552795
2 0.578431
3 0.724138
4 0.200000
5 0.136364
6 0.333333
7 0.000000
10 0.000000

In [107]:
# gbm
train = pd.read_csv(data_path+'train.csv')
test = pd.read_csv(data_path+'test.csv')
transform_data(train)
transform_data(test)
X_train = preprocess_pipeline.fit_transform(train)

gbm = ensemble.GradientBoostingClassifier(
    learning_rate = 0.005,
    min_samples_split=40,
    min_samples_leaf=1,
    max_features=2,
    max_depth=12,
    n_estimators=1500,
    subsample=0.75,
    random_state=1)
gbm = gbm.fit(X_train, y_train)

print(gbm.feature_importances_)
print(gbm.score(X_train, y_train))

# scores = model_selection.cross_val_score(gbm, X_train, y_train, scoring='accuracy', cv=20)
# scores.mean()
# 0.827


[ 0.1509222   0.12154798  0.45165627  0.0267628   0.01684683  0.03448312
  0.06623131  0.07221939  0.02101543  0.01435315  0.02396153]
0.910213243547

In [108]:
X_test = preprocess_pipeline.fit_transform(test)
predict = gbm.predict(X_test)
write_prediction(predict, 'results/result.csv')
# gbm,并没有提高成绩,0.76076

In [110]:
# 随机森林
train = pd.read_csv(data_path+'train.csv')
test = pd.read_csv(data_path+'test.csv')
transform_data(train)
transform_data(test)
X_train = preprocess_pipeline.fit_transform(train)

forest = ensemble.RandomForestClassifier(
    max_depth = 7,
    min_samples_split = 4,
    n_estimators = 1000,
    random_state = 1,
    n_jobs = -1
)

scores = model_selection.cross_val_score(forest, X_train, y_train, scoring='accuracy', cv=10)
scores.mean()


Out[110]:
0.83166865282033819

In [111]:
forest = forest.fit(X_train, y_train)

In [112]:
X_test = preprocess_pipeline.fit_transform(test)
predict = forest.predict(X_test)
write_prediction(predict, 'results/result.csv')
# 随机森林,成绩提高到了0.79

In [115]:
from sklearn.metrics import accuracy_score

In [117]:
cross_val_score?

In [ ]: