In [113]:
import pandas as pd
import numpy as np
from sklearn import tree, model_selection, ensemble, linear_model, preprocessing
import xgboost as xgb
def clean_data(data):
data["Fare"] = data["Fare"].fillna(data["Fare"].dropna().median())
data["Age"] = data["Age"].fillna(data["Age"].dropna().median())
data.loc[data["Sex"] == "male", "Sex"] = 0
data.loc[data["Sex"] == "female", "Sex"] = 1
data["Embarked"] = data["Embarked"].fillna("S")
data.loc[data["Embarked"] == "S", "Embarked"] = 0
data.loc[data["Embarked"] == "C", "Embarked"] = 1
data.loc[data["Embarked"] == "Q", "Embarked"] = 2
def write_prediction(prediction, name):
PassengerId = np.array(test["PassengerId"]).astype(int)
solution = pd.DataFrame(prediction, PassengerId, columns = ["Survived"])
solution.to_csv(name, index_label = ["PassengerId"])
# Definition of the CategoricalEncoder class, copied from PR #9151.
# Just run this cell, or copy it to your code, no need to try to
# understand every line.
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.preprocessing import LabelEncoder
from scipy import sparse
class CategoricalEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features as a numeric array.
The input to this transformer should be a matrix of integers or strings,
denoting the values taken on by categorical (discrete) features.
The features can be encoded using a one-hot aka one-of-K scheme
(``encoding='onehot'``, the default) or converted to ordinal integers
(``encoding='ordinal'``).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
encoding : str, 'onehot', 'onehot-dense' or 'ordinal'
The type of encoding to use (default is 'onehot'):
- 'onehot': encode the features using a one-hot aka one-of-K scheme
(or also called 'dummy' encoding). This creates a binary column for
each category and returns a sparse matrix.
- 'onehot-dense': the same as 'onehot' but returns a dense array
instead of a sparse matrix.
- 'ordinal': encode the features as ordinal integers. This results in
a single column of integers (0 to n_categories - 1) per feature.
categories : 'auto' or a list of lists/arrays of values.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories are sorted before encoding the data
(used categories can be found in the ``categories_`` attribute).
dtype : number type, default np.float64
Desired dtype of output.
handle_unknown : 'error' (default) or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform (default is to raise). When this is parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros.
Ignoring unknown categories is not supported for
``encoding='ordinal'``.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting. When
categories were specified manually, this holds the sorted categories
(in order corresponding with output of `transform`).
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import CategoricalEncoder
>>> enc = CategoricalEncoder(handle_unknown='ignore')
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
... # doctest: +ELLIPSIS
CategoricalEncoder(categories='auto', dtype=<... 'numpy.float64'>,
encoding='onehot', handle_unknown='ignore')
>>> enc.transform([[0, 1, 1], [1, 0, 4]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[ 0., 1., 1., 0., 0., 0., 0., 0., 0.]])
See also
--------
sklearn.preprocessing.OneHotEncoder : performs a one-hot encoding of
integer ordinal features. The ``OneHotEncoder assumes`` that input
features take on values in the range ``[0, max(feature)]`` instead of
using the unique values.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, encoding='onehot', categories='auto', dtype=np.float64,
handle_unknown='error'):
self.encoding = encoding
self.categories = categories
self.dtype = dtype
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit the CategoricalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
The data to determine the categories of each feature.
Returns
-------
self
"""
if self.encoding not in ['onehot', 'onehot-dense', 'ordinal']:
template = ("encoding should be either 'onehot', 'onehot-dense' "
"or 'ordinal', got %s")
raise ValueError(template % self.handle_unknown)
if self.handle_unknown not in ['error', 'ignore']:
template = ("handle_unknown should be either 'error' or "
"'ignore', got %s")
raise ValueError(template % self.handle_unknown)
if self.encoding == 'ordinal' and self.handle_unknown == 'ignore':
raise ValueError("handle_unknown='ignore' is not supported for"
" encoding='ordinal'")
X = check_array(X, dtype=np.object, accept_sparse='csc', copy=True)
n_samples, n_features = X.shape
self._label_encoders_ = [LabelEncoder() for _ in range(n_features)]
for i in range(n_features):
le = self._label_encoders_[i]
Xi = X[:, i]
if self.categories == 'auto':
le.fit(Xi)
else:
valid_mask = np.in1d(Xi, self.categories[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(Xi[~valid_mask])
msg = ("Found unknown categories {0} in column {1}"
" during fit".format(diff, i))
raise ValueError(msg)
le.classes_ = np.array(np.sort(self.categories[i]))
self.categories_ = [le.classes_ for le in self._label_encoders_]
return self
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix or a 2-d array
Transformed input.
"""
X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)
n_samples, n_features = X.shape
X_int = np.zeros_like(X, dtype=np.int)
X_mask = np.ones_like(X, dtype=np.bool)
for i in range(n_features):
valid_mask = np.in1d(X[:, i], self.categories_[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(X[~valid_mask, i])
msg = ("Found unknown categories {0} in column {1}"
" during transform".format(diff, i))
raise ValueError(msg)
else:
# Set the problematic rows to an acceptable value and
# continue `The rows are marked `X_mask` and will be
# removed later.
X_mask[:, i] = valid_mask
X[:, i][~valid_mask] = self.categories_[i][0]
X_int[:, i] = self._label_encoders_[i].transform(X[:, i])
if self.encoding == 'ordinal':
return X_int.astype(self.dtype, copy=False)
mask = X_mask.ravel()
n_values = [cats.shape[0] for cats in self.categories_]
n_values = np.array([0] + n_values)
indices = np.cumsum(n_values)
column_indices = (X_int + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(n_samples * n_features)[mask]
out = sparse.csc_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.encoding == 'onehot-dense':
return out.toarray()
else:
return out
def transform_data(data):
data["AgeBucket"] = data["Age"] // 15 * 15
data["RelativesOnboard"] = data["SibSp"] + data["Parch"]
In [56]:
data_path = 'd:/project/ml/data/titanic/'
train = pd.read_csv(data_path+'train.csv')
test = pd.read_csv(data_path+'test.csv')
train.head()
Out[56]:
In [49]:
train.info()
In [50]:
train.describe()
Out[50]:
In [4]:
clean_data(train)
clean_data(test)
In [11]:
y_train = train["Survived"].values
X_train = train[["Pclass", "Sex", "Age", "Fare"]].values
In [33]:
decision_tree = tree.DecisionTreeClassifier(random_state= 1)
decision_tree = decision_tree.fit(X_train, y_train)
print(decision_tree.feature_importances_)
# print(decision_tree.score(X_train, y_train))
scores = model_selection.cross_val_score(decision_tree, X_train, y_train, scoring='accuracy', cv=10)
scores.mean()
Out[33]:
In [34]:
X_train = train[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
decision_tree_two = tree.DecisionTreeClassifier(
max_depth = 7,
min_samples_split = 2,
random_state = 1)
decision_tree_two = decision_tree_two.fit(X_train, target)
scores = model_selection.cross_val_score(decision_tree_two, X_train, y_train, scoring='accuracy', cv=10)
scores.mean()
Out[34]:
In [35]:
Out[35]:
In [32]:
logistic = linear_model.LogisticRegression()
logistic.fit(X_train, y_train)
scores = model_selection.cross_val_score(logistic, X_train, y_train, scoring='accuracy', cv=10)
scores
Out[32]:
In [36]:
gbm = ensemble.GradientBoostingClassifier(
learning_rate = 0.005,
min_samples_split=40,
min_samples_leaf=1,
max_features=2,
max_depth=12,
n_estimators=1500,
subsample=0.75,
random_state=1)
gbm = gbm.fit(X_train, y_train)
print(gbm.feature_importances_)
print(gbm.score(X_train, y_train))
scores = model_selection.cross_val_score(gbm, X_train, y_train, scoring='accuracy', cv=20)
scores.mean()
Out[36]:
In [37]:
X_test = test[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
prediction_forest = forest.predict(X_test)
write_prediction(prediction_forest, "results/random_forest.csv")
In [114]:
params = {'max_depth':7,'eta':0.1,'subsample':0.7,'silent':1,'booster':'gbtree','objective':'multi:softmax', 'num_class':2}
plst = list(params.items())
num_rounds = 100 # 迭代次数
# random_state is of big influence for val-auc
# xgb_val = xgb.DMatrix(val_X, label=val_y)
xgb_train = xgb.DMatrix(X_train, label=y_train)
xgb_test = xgb.DMatrix(X_test)
watchlist = [(xgb_train, 'train')]
# providedpreds.size=524974, label.size=262487
# training model
# early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
model = xgb.train(plst, xgb_train, num_rounds, watchlist, early_stopping_rounds=100)
print("best best_ntree_limit", model.best_ntree_limit)
print('train bin', np.bincount(y_train))
In [97]:
predict = model.predict(xgb_test)
predict.astype(int)
Out[97]:
In [47]:
model = xgb.XGBClassifier()
scores = model_selection.cross_val_score(model, X_train, y_train, scoring='accuracy', cv=20)
scores.mean()
Out[47]:
In [52]:
from sklearn.base import BaseEstimator, TransformerMixin
# A class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
In [106]:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
num_pipeline = Pipeline([
("select_numeric", DataFrameSelector(["Age", "AgeBucket","SibSp", "Parch", "RelativesOnboard", "Fare"])),
("imputer", Imputer(strategy="median")),
])
In [62]:
num_pipeline.fit_transform(train)
Out[62]:
In [58]:
# Inspired from stackoverflow.com/questions/25239958
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.most_frequent)
In [59]:
cat_pipeline = Pipeline([
("select_cat", DataFrameSelector(["Pclass", "Sex", "Embarked"])),
("imputer", MostFrequentImputer()),
("cat_encoder", CategoricalEncoder(encoding='onehot-dense')),
])
In [61]:
cat_pipeline.fit_transform(train)
Out[61]:
In [79]:
from sklearn.pipeline import FeatureUnion
preprocess_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
In [80]:
X_train = preprocess_pipeline.fit_transform(train)
X_train
Out[80]:
In [67]:
y_train = train["Survived"]
In [81]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
forest_clf = RandomForestClassifier(random_state=42)
scores = cross_val_score(forest_clf, X_train, y_train, cv=10)
scores.mean()
Out[81]:
In [71]:
train["AgeBucket"] = train["Age"] // 15 * 15
train[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean()
Out[71]:
In [73]:
train["RelativesOnboard"] = train["SibSp"] + train["Parch"]
train[["RelativesOnboard", "Survived"]].groupby(['RelativesOnboard']).mean()
Out[73]:
In [107]:
# gbm
train = pd.read_csv(data_path+'train.csv')
test = pd.read_csv(data_path+'test.csv')
transform_data(train)
transform_data(test)
X_train = preprocess_pipeline.fit_transform(train)
gbm = ensemble.GradientBoostingClassifier(
learning_rate = 0.005,
min_samples_split=40,
min_samples_leaf=1,
max_features=2,
max_depth=12,
n_estimators=1500,
subsample=0.75,
random_state=1)
gbm = gbm.fit(X_train, y_train)
print(gbm.feature_importances_)
print(gbm.score(X_train, y_train))
# scores = model_selection.cross_val_score(gbm, X_train, y_train, scoring='accuracy', cv=20)
# scores.mean()
# 0.827
In [108]:
X_test = preprocess_pipeline.fit_transform(test)
predict = gbm.predict(X_test)
write_prediction(predict, 'results/result.csv')
# gbm,并没有提高成绩,0.76076
In [110]:
# 随机森林
train = pd.read_csv(data_path+'train.csv')
test = pd.read_csv(data_path+'test.csv')
transform_data(train)
transform_data(test)
X_train = preprocess_pipeline.fit_transform(train)
forest = ensemble.RandomForestClassifier(
max_depth = 7,
min_samples_split = 4,
n_estimators = 1000,
random_state = 1,
n_jobs = -1
)
scores = model_selection.cross_val_score(forest, X_train, y_train, scoring='accuracy', cv=10)
scores.mean()
Out[110]:
In [111]:
forest = forest.fit(X_train, y_train)
In [112]:
X_test = preprocess_pipeline.fit_transform(test)
predict = forest.predict(X_test)
write_prediction(predict, 'results/result.csv')
# 随机森林,成绩提高到了0.79
In [115]:
from sklearn.metrics import accuracy_score
In [117]:
cross_val_score?
In [ ]: