In [ ]:
from sklearn import tree
from sklearn.model_selection import GridSearchCV
In [ ]:
if 'features_train' not in locals() or globals():
%run ../dev/environment_setup.ipynb
In [ ]:
def preprocess2 (number):
# words_file = "../data/word_data.pkl"
# authors_file="../data/email_authors.pkl"
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
word_data = pickle.load( open("../data/word_data.pkl", "r"))
authors = pickle.load( open("../data/email_authors.pkl", "r") )
### test_size is the percentage of events assigned to the test set (remainder go into training)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
selector = SelectPercentile(f_classif, percentile=number)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
return features_train_transformed, features_test_transformed, labels_train, labels_test
In [ ]:
features_train, features_test, labels_train, labels_test = preprocess2(1)
In [ ]:
parameters = {"criterion": ["gini", "entropy"],
"min_samples_split": [2, 10, 20],
"max_depth": [None, 2, 5, 10],
"min_samples_leaf": [1, 5, 10],
"max_leaf_nodes": [None, 5, 10, 20],
}
svr = tree.DecisionTreeClassifier()
clf = GridSearchCV(svr, parameters)
In [ ]:
grid_train_predict("Decision Tree Focus on Best Parameters with GridSearchCV")
sorted(clf.cv_results_.keys())
param = "Best Param: " + str(clf.best_params_)
print (param)
score = "Best Avarage Score: " + str(clf.best_score_)
print (score)
# print ("BEST ESTIMATOR:")
# print(clf.best_estimator_)
# print ("BEST SCORE:")
# # print clf.best_score_
# # print(clf.best_estimator_.score)