In [ ]:
dir = "/Users/chanjinpark/GitHub/NRFAnalysis/"
docs = sc.textFile(dir + "data/docs")
corpus = sc.textFile(dir + "data/corpus").map(lambda x: x.split(','))
meta = {}
with  open(dir + "data/meta.txt") as f:
    for l in f:
        id = l[0:l.find('-')]
        m = l[l.find('-')+1:]
        area = m.split(':::')[1].split(',')
        meta[id] = area[0:2]

meta['2013R1A1A1004576'][1]

crbindex = {}
for i, v in enumerate(sorted(set(map(lambda m: m[1], meta.values())))) :
    crbindex[v] = float(i)

from pyspark.mllib.feature import HashingTF, IDF
hashingTF = HashingTF()
tf = hashingTF.transform(corpus)
tf.cache()
idf = IDF(minDocFreq=2).fit(tf)
tfidf = idf.transform(tf)

from pyspark.mllib.linalg import SparseVector, DenseVector
from pyspark.mllib.regression import LabeledPoint

data = docs.zip(tfidf).map(lambda d: LabeledPoint(crbindex[meta[d[0]][1]],d[1])) # DenseVector(d[1].toArray)))
training, test = data.randomSplit([0.7, 0.3])

from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.util import MLUtils
from pyspark.mllib.evaluation import MulticlassMetrics

model = NaiveBayes.train(training, 1.0)
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))


metrics = MulticlassMetrics(predictionAndLabels)

precision = metrics.precision()
recall = metrics.recall()
f1Score = metrics.fMeasure()
print("Summary Stats")
print("Precision = %s" % precision)
print("Recall = %s" % recall)
print("F1 Score = %s" % f1Score)

# Statistics by class
labels = data.map(lambda lp: lp.label).distinct().collect()

for label in sorted(labels):
    print("Class %s precision = %s" % (label, metrics.precision(label)))
    print("Class %s recall = %s" % (label, metrics.recall(label)))
    print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))

# Weighted stats
print("Weighted recall = %s" % metrics.weightedRecall)
print("Weighted precision = %s" % metrics.weightedPrecision)
print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())
print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))
print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)

In [ ]: