In [1]:
import findspark
findspark.init()
import pyspark
sc = pyspark.SparkContext()

In [2]:
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint

from string import split,strip

from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.tree import RandomForest, RandomForestModel

from pyspark.mllib.util import MLUtils

Higgs data set

Data Set Information:
The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic properties measured by the particle detectors in the accelerator. The last seven features are functions of the first 21 features; these are high-level features derived by physicists to help discriminate between the two classes. There is an interest in using deep learning methods to obviate the need for physicists to manually develop such features. Benchmark results using Bayesian Decision Trees from a standard physics package and 5-layer neural networks are presented in the original paper. The last 500,000 examples are used as a test set.


In [3]:
#define feature names
feature_text='lepton pT, lepton eta, lepton phi, missing energy magnitude, missing energy phi, jet 1 pt, jet 1 eta, jet 1 phi, jet 1 b-tag, jet 2 pt, jet 2 eta, jet 2 phi, jet 2 b-tag, jet 3 pt, jet 3 eta, jet 3 phi, jet 3 b-tag, jet 4 pt, jet 4 eta, jet 4 phi, jet 4 b-tag, m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb'
features=[strip(a) for a in split(feature_text,',')]
print len(features),features


28 ['lepton pT', 'lepton eta', 'lepton phi', 'missing energy magnitude', 'missing energy phi', 'jet 1 pt', 'jet 1 eta', 'jet 1 phi', 'jet 1 b-tag', 'jet 2 pt', 'jet 2 eta', 'jet 2 phi', 'jet 2 b-tag', 'jet 3 pt', 'jet 3 eta', 'jet 3 phi', 'jet 3 b-tag', 'jet 4 pt', 'jet 4 eta', 'jet 4 phi', 'jet 4 b-tag', 'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb']

In [4]:
# create a directory called higgs, download and decompress HIGGS.csv.gz into it

from os.path import exists
if not exists('higgs'):
    print "creating directory higgs"
    !mkdir higgs
%cd higgs
if not exists('HIGGS.csv'):
    if not exists('HIGGS.csv.gz'):
        print 'downloading HIGGS.csv.gz'
        !curl -O http://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz
    print 'decompressing HIGGS.csv.gz --- May take 5-10 minutes'
    !gunzip -f HIGGS.csv.gz
!ls -l
%cd ..


/Users/alexegg/Development/DSE/DSE230/hmwk5/higgs
total 15694336
-rw-r--r--  1 alexegg  staff  8035497980 May 17 23:36 HIGGS.csv
/Users/alexegg/Development/DSE/DSE230/hmwk5

As done in previous notebook, create RDDs from raw data and build Gradient boosting and Random forests models. Consider doing 1% sampling since the dataset is too big for your local machine


In [5]:
# Read the file into an RDD
# If doing this on a real cluster, you need the file to be available on all nodes, ideally in HDFS.
path='higgs/HIGGS.csv'
inputRDD=sc.textFile(path)


Out[5]:
u'1.000000000000000000e+00,8.692932128906250000e-01,-6.350818276405334473e-01,2.256902605295181274e-01,3.274700641632080078e-01,-6.899932026863098145e-01,7.542022466659545898e-01,-2.485731393098831177e-01,-1.092063903808593750e+00,0.000000000000000000e+00,1.374992132186889648e+00,-6.536741852760314941e-01,9.303491115570068359e-01,1.107436060905456543e+00,1.138904333114624023e+00,-1.578198313713073730e+00,-1.046985387802124023e+00,0.000000000000000000e+00,6.579295396804809570e-01,-1.045456994324922562e-02,-4.576716944575309753e-02,3.101961374282836914e+00,1.353760004043579102e+00,9.795631170272827148e-01,9.780761599540710449e-01,9.200048446655273438e-01,7.216574549674987793e-01,9.887509346008300781e-01,8.766783475875854492e-01'

In [6]:
# Transform the text RDD into an RDD of LabeledPoints
Data=inputRDD.map(lambda line: [float(strip(x)) for x in line.split(',')])\
     .map(lambda a: LabeledPoint(a[0], a[1:]))


Out[6]:
[LabeledPoint(1.0, [0.869293212891,-0.635081827641,0.22569026053,0.327470064163,-0.689993202686,0.754202246666,-0.24857313931,-1.09206390381,0.0,1.37499213219,-0.653674185276,0.930349111557,1.10743606091,1.13890433311,-1.57819831371,-1.0469853878,0.0,0.65792953968,-0.0104545699432,-0.0457671694458,3.10196137428,1.35376000404,0.979563117027,0.978076159954,0.920004844666,0.721657454967,0.988750934601,0.876678347588]),
 LabeledPoint(1.0, [0.907542109489,0.329147279263,0.359411865473,1.4979698658,-0.313009530306,1.09553062916,-0.55752491951,-1.58822977543,2.1730761528,0.812581181526,-0.213641926646,1.27101457119,2.21487212181,0.499993950129,-1.26143181324,0.732156157494,0.0,0.398700892925,-1.13893008232,-0.000819110195152,0.0,0.302219897509,0.833048164845,0.985699653625,0.978098392487,0.779732167721,0.992355763912,0.798342585564]),
 LabeledPoint(1.0, [0.798834741116,1.47063875198,-1.63597476482,0.45377317071,0.425629168749,1.1048746109,1.28232228756,1.38166427612,0.0,0.851737201214,1.54065895081,-0.819689512253,2.21487212181,0.993489921093,0.356080114841,-0.208777546883,2.54822444916,1.25695455074,1.12884759903,0.900460839272,0.0,0.909753262997,1.1083304882,0.985692203045,0.95133125782,0.803251504898,0.865924417973,0.780117571354])]

In [7]:
Data1=Data.sample(False,0.01).cache()
(trainingData,testData)=Data1.randomSplit([0.7,0.3])

print 'Sizes: Data1=%d, trainingData=%d, testData=%d'%(Data1.count(),trainingData.cache().count(),testData.cache().count())


Sizes: Data1=109606, trainingData=76942, testData=32664

In [8]:
from time import time
errors={}
for depth in [1,3,6,10]:
    start=time()
    model=GradientBoostedTrees.trainClassifier(trainingData, categoricalFeaturesInfo={}, numIterations=10, maxDepth=depth)
    #print model.toDebugString()
    errors[depth]={}
    dataSets={'train':trainingData,'test':testData}
    for name in dataSets.keys():  # Calculate errors on train and test sets
        data=dataSets[name]
        Predicted=model.predict(data.map(lambda x: x.features))

        LabelsAndPredictions = data.map(lambda lp: lp.label).zip(Predicted)
        Err = LabelsAndPredictions.filter(lambda (v,p): v != p).count()/float(data.count())
        errors[depth][name]=Err
    print depth,errors[depth],int(time()-start),'seconds'
print errors


1 {'test': 0.3709282390399216, 'train': 0.3719165085388994} 88 seconds
3 {'test': 0.3286798922361009, 'train': 0.32232070910556004} 98 seconds
6 {'test': 0.300330639235856, 'train': 0.29040056146188037} 146 seconds
10 {'test': 0.29977957384276266, 'train': 0.19645967092095346} 759 seconds
{1: {'test': 0.3709282390399216, 'train': 0.3719165085388994}, 10: {'test': 0.29977957384276266, 'train': 0.19645967092095346}, 3: {'test': 0.3286798922361009, 'train': 0.32232070910556004}, 6: {'test': 0.300330639235856, 'train': 0.29040056146188037}}

In [9]:
B10 = errors
# Plot Train/test accuracy vs Depth of trees graph
%pylab inline
from plot_utils import *
make_figure([B10],['10Trees'],Title='Boosting using 10% of data')


Populating the interactive namespace from numpy and matplotlib
WARNING: pylab import has clobbered these variables: ['split']
`%matplotlib` prevents importing * from pylab and numpy

In [10]:
from time import time
errors={}
for depth in [1,3,6,10,15,20]:
    start=time()
    model = RandomForest.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
                                     numTrees=10, featureSubsetStrategy="auto",
                                     impurity='gini', maxDepth=depth)
    errors[depth]={}
    dataSets={'train':trainingData,'test':testData}
    for name in dataSets.keys():  # Calculate errors on train and test sets
        data=dataSets[name]
        Predicted=model.predict(data.map(lambda x: x.features))

        LabelsAndPredictions = data.map(lambda lp: lp.label).zip(Predicted)
        Err = LabelsAndPredictions.filter(lambda (v,p): v != p).count()/float(data.count())
        errors[depth][name]=Err
    print depth,errors[depth],int(time()-start),'seconds'
print errors


1 {'test': 0.42444281165809455, 'train': 0.428842504743833} 30 seconds
3 {'test': 0.37236713201077637, 'train': 0.3727742975228094} 30 seconds
6 {'test': 0.32283247612049965, 'train': 0.31461360505315694} 61 seconds
10 {'test': 0.30330026940974775, 'train': 0.2656286553507837} 169 seconds
15 {'test': 0.30369826108253734, 'train': 0.13319123495620078} 524 seconds
20 {'test': 0.3152706343374969, 'train': 0.03383067765329729} 1146 seconds
{1: {'test': 0.42444281165809455, 'train': 0.428842504743833}, 3: {'test': 0.37236713201077637, 'train': 0.3727742975228094}, 6: {'test': 0.32283247612049965, 'train': 0.31461360505315694}, 10: {'test': 0.30330026940974775, 'train': 0.2656286553507837}, 15: {'test': 0.30369826108253734, 'train': 0.13319123495620078}, 20: {'test': 0.3152706343374969, 'train': 0.03383067765329729}}

In [13]:
RF_10trees = errors
# Plot Train/test accuracy vs Depth of trees graph
make_figure([RF_10trees],['10Trees'],Title='Random Forests using 10% of data')



In [14]:
make_figure([RF_10trees, B10],['10Trees', 'GB'],Title='GBT vs RF')