The following example was taken from the scikit-learn feature importance page
More has been added to it so as to ensure that we meet our strict iRF guidelines
In [21]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Set seed for reproducibility
np.random.seed(200)
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Check the shape of the data
print(X[:10])
print(X.shape)
print(y[:100])
print(y.shape)
In [22]:
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Check that the feature importances are standardized to 1
print(sum(importances))
In [18]:
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
In [36]:
# Load the library with the iris dataset
from sklearn.datasets import load_iris
# Load scikit's train-test split function
from sklearn.cross_validation import train_test_split
# Load scikit's random forest classifier library
from sklearn.ensemble import RandomForestClassifier
# Load numpy
import numpy as np
# Set the random seed for reproducibility
np.random.seed(1015)
In [37]:
# Create an object called iris with the iris data
iris = load_iris()
print(iris.keys())
# Observe the 3 classes in the iris dataset
print(iris.target)
In [38]:
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)
In [39]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
In [46]:
print(X_train[:10])
print(y_train[:10])
In [47]:
from sklearn.ensemble import RandomForestClassifier
Just Fit using 2 trees for now - keep things simple
In [56]:
rf = RandomForestClassifier(n_estimators = 2)
rf.fit(X_train, y_train)
Out[56]:
In [57]:
rf.predict(X_test)
rf.score(X_test, y_test)
Out[57]:
That's an accuracy of 94% just using 2 trees!
In [58]:
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Check that the feature importances are standardized to 1
print(sum(importances))
In [61]:
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), indices)
plt.xlim([-1, X_train.shape[1]])
plt.show()
In [62]:
importances
Out[62]:
In [63]:
indices
Out[63]: