Random forest classification


SparkContext and SparkSession


In [1]:
from pyspark import SparkContext
sc = SparkContext(master = 'local')

from pyspark.sql import SparkSession
spark = SparkSession.builder \
          .appName("Python Spark SQL basic example") \
          .config("spark.some.config.option", "some-value") \
          .getOrCreate()

Random forest tree with pyspark


In [2]:
cuse = spark.read.csv('data/cuse_binary.csv', header=True, inferSchema=True)
cuse.show(5)


+---+---------+---------+---+
|age|education|wantsMore|  y|
+---+---------+---------+---+
|<25|      low|      yes|  0|
|<25|      low|      yes|  0|
|<25|      low|      yes|  0|
|<25|      low|      yes|  0|
|<25|      low|      yes|  0|
+---+---------+---------+---+
only showing top 5 rows

Process categorical columns

Categorical columns


In [7]:
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler
from pyspark.ml import Pipeline

categorical_columns = cuse.columns[:-1]
categorical_columns


Out[7]:
['age', 'education', 'wantsMore']

Build StringIndexe stages


In [9]:
stringindexer_stages = [StringIndexer(inputCol=c, outputCol='stringindexed_' + c) for c in categorical_columns]
# encode label column and add it to stringindexer stages
stringindexer_stages += [StringIndexer(inputCol='y', outputCol='label')]

Build OneHotEncoder stages


In [10]:
onehotencoder_stages = [OneHotEncoder(inputCol='stringindexed_' + c, outputCol='onehot_'+c) for c in categorical_columns]

Build VectorAssembler stage


In [13]:
feature_columns = ['onehot_' + c for c in categorical_columns]
vectorassembler_stage = VectorAssembler(inputCols=feature_columns, outputCol='features')

Build pipeline model


In [15]:
all_stages = stringindexer_stages + onehotencoder_stages + [vectorassembler_stage]
pipeline = Pipeline(stages=all_stages)

Fit pipeline model


In [17]:
pipeline_model = pipeline.fit(cuse)

Transform data


In [19]:
final_columns = feature_columns + ['features', 'label']
cuse_df = pipeline_model.transform(cuse).select(final_columns)
cuse_df.show(5)


+-------------+----------------+----------------+-------------------+-----+
|   onehot_age|onehot_education|onehot_wantsMore|           features|label|
+-------------+----------------+----------------+-------------------+-----+
|(3,[2],[1.0])|       (1,[],[])|   (1,[0],[1.0])|(5,[2,4],[1.0,1.0])|  0.0|
|(3,[2],[1.0])|       (1,[],[])|   (1,[0],[1.0])|(5,[2,4],[1.0,1.0])|  0.0|
|(3,[2],[1.0])|       (1,[],[])|   (1,[0],[1.0])|(5,[2,4],[1.0,1.0])|  0.0|
|(3,[2],[1.0])|       (1,[],[])|   (1,[0],[1.0])|(5,[2,4],[1.0,1.0])|  0.0|
|(3,[2],[1.0])|       (1,[],[])|   (1,[0],[1.0])|(5,[2,4],[1.0,1.0])|  0.0|
+-------------+----------------+----------------+-------------------+-----+
only showing top 5 rows

Split data into training and test datasets


In [20]:
train, test = cuse_df.randomSplit([0.8, 0.2], seed=1234)

Build cross-validation model

Estimator


In [22]:
from pyspark.ml.classification import RandomForestClassifier

random_forest = RandomForestClassifier(featuresCol='features', labelCol='label')

Parameter grid


In [23]:
from pyspark.ml.tuning import ParamGridBuilder

param_grid = ParamGridBuilder().\
    addGrid(random_forest.maxDepth, [2, 3, 4]).\
    addGrid(random_forest.minInfoGain, [0.0, 0.1, 0.2, 0.3]).\
    build()

Evaluator


In [24]:
from pyspark.ml.evaluation import BinaryClassificationEvaluator

evaluator = BinaryClassificationEvaluator()

Build cross-validation model


In [27]:
from pyspark.ml.tuning import CrossValidator

crossvalidation = CrossValidator(estimator=random_forest, estimatorParamMaps=param_grid, evaluator=evaluator)

Fit cross-validation model


In [29]:
crossvalidation_mod = crossvalidation.fit(cuse_df)

Prediction

Prediction on training data


In [30]:
pred_train = crossvalidation_mod.transform(train)
pred_train.show(5)


+----------+----------------+----------------+---------+-----+--------------------+--------------------+----------+
|onehot_age|onehot_education|onehot_wantsMore| features|label|       rawPrediction|         probability|prediction|
+----------+----------------+----------------+---------+-----+--------------------+--------------------+----------+
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
+----------+----------------+----------------+---------+-----+--------------------+--------------------+----------+
only showing top 5 rows

Prediction on test data


In [31]:
pred_test = crossvalidation_mod.transform(test)
pred_test.show(5)


+----------+----------------+----------------+---------+-----+--------------------+--------------------+----------+
|onehot_age|onehot_education|onehot_wantsMore| features|label|       rawPrediction|         probability|prediction|
+----------+----------------+----------------+---------+-----+--------------------+--------------------+----------+
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
| (3,[],[])|       (1,[],[])|       (1,[],[])|(5,[],[])|  0.0|[9.61727693784312...|[0.48086384689215...|       1.0|
+----------+----------------+----------------+---------+-----+--------------------+--------------------+----------+
only showing top 5 rows

Prediction performance

We calculate the Area under the Receiver Operating Characteristic curve.


In [39]:
print('Accuracy on training data (areaUnderROC): ', evaluator.setMetricName('areaUnderROC').evaluate(pred_train), "\n"
     'Accuracy on training data (areaUnderROC): ', evaluator.setMetricName('areaUnderROC').evaluate(pred_test))


Accuracy on training data (areaUnderROC):  0.681918715706039 
Accuracy on training data (areaUnderROC):  0.6755505721350122

Confusion matrix

Confusion matrix from training data


In [43]:
label_pred_train = pred_train.select('label', 'prediction')
label_pred_train.rdd.zipWithIndex().countByKey()


Out[43]:
defaultdict(int,
            {Row(label=0.0, prediction=0.0): 746,
             Row(label=0.0, prediction=1.0): 167,
             Row(label=1.0, prediction=0.0): 220,
             Row(label=1.0, prediction=1.0): 194})

Confusion matrix from test data


In [44]:
label_pred_test = pred_test.select('label', 'prediction')
label_pred_test.rdd.zipWithIndex().countByKey()


Out[44]:
defaultdict(int,
            {Row(label=0.0, prediction=0.0): 151,
             Row(label=0.0, prediction=1.0): 36,
             Row(label=1.0, prediction=0.0): 50,
             Row(label=1.0, prediction=1.0): 43})

Best model and paramters


In [47]:
print('max depth: ', crossvalidation_mod.bestModel._java_obj.getMaxDepth(), "\n",
     'min information gain: ', crossvalidation_mod.bestModel._java_obj.getMinInfoGain())


max depth:  4 
 min information gain:  0.0

In [ ]: