Naive Bayes - Trabalho

Questão 1

Implemente um classifacor Naive Bayes para o problema de predizer a qualidade de um carro. Para este fim, utilizaremos um conjunto de dados referente a qualidade de carros, disponível no UCI. Este dataset de carros possui as seguintes features e classe:

Attributos

  1. buying: vhigh, high, med, low
  2. maint: vhigh, high, med, low
  3. doors: 2, 3, 4, 5, more
  4. persons: 2, 4, more
  5. lug_boot: small, med, big
  6. safety: low, med, high

Classes

  1. unacc, acc, good, vgood

Questão 2

Crie uma versão de sua implementação usando as funções disponíveis na biblioteca SciKitLearn para o Naive Bayes (veja aqui)

Questão 3

Analise a acurácia dos dois algoritmos e discuta a sua solução.


In [83]:
import csv
import random
import math
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report

def separateByClass(dataset):
    separated = {}
    for _,row in dataset.iterrows():
        vector = row
        if (vector[-1] not in separated):
            separated[vector[-1]] = []
        separated[vector[-1]].append(vector)
    return separated

def mean(numbers):
    return sum(numbers)/float(len(numbers))
 
def stdev(numbers):
    avg = mean(numbers)
    variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
    return math.sqrt(variance)

def summarize(dataset):
    summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
    del summaries[-1]
    return summaries

def summarizeByClass(dataset):
    separated = separateByClass(dataset)
    summaries = {}
    for classValue, instances in separated.items():
        summaries[classValue] = summarize(instances)
    return summaries

def calculateProbability(x, mean, stdev):
    exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
    return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent

def calculateClassProbabilities(summaries, inputVector):
    probabilities = {}
    for classValue, classSummaries in summaries.items():
        probabilities[classValue] = 1
        for i in range(len(classSummaries)):
            mean, stdev = classSummaries[i]
            if stdev == 0.0:
                stdev = 10000.0
            x = inputVector[i]
            probabilities[classValue] *= calculateProbability(x, mean, stdev)
    return probabilities

def predict(summaries, inputVector):
    probabilities = calculateClassProbabilities(summaries, inputVector)
    bestLabel, bestProb = None, -1
    for classValue, probability in probabilities.items():
        if bestLabel is None or probability > bestProb:
            bestProb = probability
            bestLabel = classValue
    return bestLabel

def getPredictions(summaries, testSet):
    predictions = []
    for _,row in testSet.iterrows():
        result = predict(summaries, row)
        predictions.append(result)
    return predictions

def getAccuracy(testSet, predictions):
    correct = 0
    for i in range(len(testSet)):
        if testSet.iloc[i] == predictions[i]:
            correct += 1
    return (correct/float(len(testSet))) * 100.0

In [84]:
filename = 'carData.csv'
dataset = pd.read_csv(filename)
for i in range(0, dataset.shape[1]):
    dataset.iloc[:,i] = LabelEncoder().fit_transform(dataset.iloc[:,i])
    
Y = dataset.unacc.copy()
del dataset['unacc']
X = dataset

X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.3)

X_train_sk = X_train.copy()

idx = X_train.shape[1]
new_col = Y_train 
X_train.insert(loc=idx, column='Class', value=new_col)

X_train.head()


Out[84]:
vhigh vhigh.1 2 2.1 small low Class
611 0 0 2 2 2 1 2
1599 1 2 3 0 0 2 2
1011 2 0 1 1 1 2 2
733 0 2 3 0 1 0 2
1612 1 2 3 2 2 0 1

In [85]:
summaries = summarizeByClass(X_train)
Y_test.head()


Out[85]:
958     0
1309    0
23      2
1598    2
1525    1
Name: unacc, dtype: int64

In [86]:
y_pred = getPredictions(summaries, X_test)

In [87]:
acc = getAccuracy(Y_test, y_pred)
print('Hand-made Gaussian NB\nAccuracy = ',acc)
print("\nClassification Report:")
print(classification_report(y_true=Y_test, y_pred=y_pred, target_names=["unacc", "acc", "good", "vgood"]))


Hand-made Gaussian NB
Accuracy =  66.85934489402698

Classification Report:
             precision    recall  f1-score   support

      unacc       0.48      0.75      0.58       126
        acc       0.24      1.00      0.38        18
       good       0.96      0.65      0.77       361
      vgood       0.00      0.00      0.00        14

avg / total       0.79      0.67      0.69       519

/Users/vilmabezerraalves/miniconda3/envs/DataScience/lib/python3.6/site-packages/sklearn/metrics/classification.py:1135: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.
  'precision', 'predicted', average, warn_for)

In [88]:
from sklearn.naive_bayes import GaussianNB
naive = GaussianNB()
naive.fit(X_train_sk.values, Y_train.values)

y_pred_sk = naive.predict(X_test.values)
accSK = getAccuracy(Y_test, y_pred_sk)
print('SciKitLearn Gaussian NB\nAccuracy = ',accSK)
print("\nClassification Report:")
print(classification_report(y_true=Y_test, y_pred=y_pred_sk, target_names=["unacc", "acc", "good", "vgood"]))


SciKitLearn Gaussian NB
Accuracy =  60.886319845857415

Classification Report:
             precision    recall  f1-score   support

      unacc       0.39      0.07      0.12       126
        acc       0.00      0.00      0.00        18
       good       0.84      0.81      0.83       361
      vgood       0.09      1.00      0.17        14

avg / total       0.68      0.61      0.61       519

/Users/vilmabezerraalves/miniconda3/envs/DataScience/lib/python3.6/site-packages/sklearn/metrics/classification.py:1135: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.
  'precision', 'predicted', average, warn_for)

In [ ]: