In [1]:
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
%matplotlib inline
In [2]:
WEATHER_LABELS = {'clear': 0,
'partly_cloudy': 1,
'haze': 2,
'cloudy': 3}
COMMON_LABELS = {'primary': 0,
'agriculture': 1,
'road': 2,
'water': 3,
'habitation': 4,
'cultivation': 5,
'bare_ground': 6}
RARE_LABELS = {'selective_logging': 0,
'artisinal_mine': 1,
'blooming': 2,
'slash_burn': 3,
'blow_down': 4,
'conventional_mine': 5}
LABELS = [WEATHER_LABELS, COMMON_LABELS, RARE_LABELS]
In [3]:
NETWORK_ID = 0
In [4]:
EVALUATION_PATH = os.path.join(r'../reports/planet_validation')
TF_RECORD_PATH = os.path.join(r'../data/processed/tfrecord_summary.txt')
In [5]:
hashtable = dict([[v,k] for k,v in LABELS[NETWORK_ID].items()])
label_names = list(hashtable.values())
In [6]:
label_distribution = np.loadtxt(TF_RECORD_PATH)
In [10]:
train_size = label_distribution[-1]
validation_size = int(np.floor(40483 * 0.2))
In [ ]:
labels = np.loadtxt(os.path.join(EVALUATION_PATH, 'out_labels_train.txt'))[0 : train_size, :]
preds = np.loadtxt(os.path.join(EVALUATION_PATH, 'out_predictions_train.txt'))[0 : train_size, :]
In [ ]:
labels = np.loadtxt(os.path.join(EVALUATION_PATH, 'out_labels_validation.txt'))[0 : train_size, :]
preds = np.loadtxt(os.path.join(EVALUATION_PATH, 'out_predictions_validation.txt'))[0 : train_size, :]