In [14]:
from sklearn.datasets import load_boston
import sklearn.ensemble
import sklearn.model_selection
import numpy as np
In [15]:
boston = load_boston()
In [16]:
rf = sklearn.ensemble.RandomForestRegressor(n_estimators=1000)
In [17]:
train, test, labels_train, labels_test = sklearn.model_selection.train_test_split(boston.data, boston.target, train_size=0.80, test_size=0.20)
In [18]:
rf.fit(train, labels_train)
Out[18]:
In [19]:
print('Random Forest MSError', np.mean((rf.predict(test) - labels_test) ** 2))
In [20]:
print('MSError when predicting the mean', np.mean((labels_train.mean() - labels_test) ** 2))
In [21]:
categorical_features = np.argwhere(np.array([len(set(boston.data[:,x])) for x in range(boston.data.shape[1])]) <= 10).flatten()
In [22]:
import lime
import lime.lime_tabular
In [23]:
explainer = lime.lime_tabular.LimeTabularExplainer(train, feature_names=boston.feature_names, class_names=['price'], categorical_features=categorical_features, verbose=True, mode='regression')
In [24]:
i = 25
exp = explainer.explain_instance(test[i], rf.predict, num_features=5)
In [25]:
exp.show_in_notebook(show_table=True)
In [26]:
exp.as_list()
Out[26]: