In [14]:
from sklearn.datasets import load_boston
import sklearn.ensemble
import sklearn.model_selection
import numpy as np

In [15]:
boston = load_boston()

In [16]:
rf = sklearn.ensemble.RandomForestRegressor(n_estimators=1000)

In [17]:
train, test, labels_train, labels_test = sklearn.model_selection.train_test_split(boston.data, boston.target, train_size=0.80, test_size=0.20)

In [18]:
rf.fit(train, labels_train)


Out[18]:
RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,
           max_features='auto', max_leaf_nodes=None,
           min_impurity_decrease=0.0, min_impurity_split=None,
           min_samples_leaf=1, min_samples_split=2,
           min_weight_fraction_leaf=0.0, n_estimators=1000, n_jobs=1,
           oob_score=False, random_state=None, verbose=0, warm_start=False)

In [19]:
print('Random Forest MSError', np.mean((rf.predict(test) - labels_test) ** 2))


('Random Forest MSError', 12.492700792156878)

In [20]:
print('MSError when predicting the mean', np.mean((labels_train.mean() - labels_test) ** 2))


('MSError when predicting the mean', 101.94960262930775)

In [21]:
categorical_features = np.argwhere(np.array([len(set(boston.data[:,x])) for x in range(boston.data.shape[1])]) <= 10).flatten()

In [22]:
import lime
import lime.lime_tabular

In [23]:
explainer = lime.lime_tabular.LimeTabularExplainer(train, feature_names=boston.feature_names, class_names=['price'], categorical_features=categorical_features, verbose=True, mode='regression')

In [24]:
i = 25
exp = explainer.explain_instance(test[i], rf.predict, num_features=5)


Intercept 23.3472933614
Prediction_local [ 22.26816652]
Right: 22.0623

In [25]:
exp.show_in_notebook(show_table=True)



In [26]:
exp.as_list()


Out[26]:
[('7.43 < LSTAT <= 11.73', 1.6485568658614111),
 ('6.18 < RM <= 6.57', -1.5185815542882575),
 ('284.00 < TAX <= 341.00', -0.46847722088708765),
 ('0.27 < CRIM <= 4.28', -0.39482132673497322),
 ('5.81 < INDUS <= 9.90', -0.34580360269472482)]