In [1]:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
In [3]:
dfIn = pd.read_csv('30_10_2018_par0_training_history.csv', skiprows=0)
In [4]:
loss = dfIn['loss']
val_loss = dfIn['val_loss']
epoch = dfIn['epoch']
# print(loss, val_loss)
# loss.to_numpy()
In [5]:
ax = plt.gca()
ax.plot(epoch, loss)
ax.plot(epoch, val_loss)
plt.gca().legend(('train', 'val'))
plt.title('Loss')
plt.xlabel("epoch")
plt.ylabel("loss")
plt.savefig('plots/plots_training/loss_par1.png', dpi=300,bbox_inches='tight')
In [6]:
ax = plt.gca()
ax.plot(epoch, dfIn['sel_acc_2'])
ax.plot(epoch, dfIn['val_sel_acc_2'])
plt.gca().legend(('train', 'val'))
plt.title('Selection Accuracy')
plt.xlabel("epoch")
plt.ylabel("Purity")
plt.savefig('plots/plots_training/sec_acc_par0.png', dpi=300, bbox_inches='tight')
In [7]:
ax = plt.gca()
ax.plot(epoch, dfIn['sel_acc_3'])
ax.plot(epoch, dfIn['val_sel_acc_3'])
plt.gca().legend(('train', 'val'))
plt.title('Selection Accuracy')
plt.xlabel("epoch")
plt.ylabel("Accuracy")
plt.savefig('plots/plots_training/sec_acc_3.pdf', dpi=300)
In [8]:
ax = plt.gca()
ax.plot(epoch, dfIn['sel_acc_4'])
ax.plot(epoch, dfIn['val_sel_acc_4'])
plt.gca().legend(('train', 'val'))
plt.title('Selection Accuracy')
plt.xlabel("epoch")
plt.ylabel("Accuracy")
plt.savefig('plots/plots_training/sec_acc_4.pdf', dpi=300)
In [14]:
# Output of FeaturePermutation.py
# Par1
var_par1 = ['htt_eta', 'rel_jet_E_pt', 'htt_scalar_pt', 'jet_htt_deta','rel_jet_M_pt', 'htt_pt', 'jet_pt', 'jet_eta',
'jet_deepFlavour', 'jet_htt_dphi'
]
var_score_par1 = [0.012734234, 0.02217102, 0.022484362, 0.024425447, 0.035648525, 0.04314232, 0.07358897,0.12582594,
0.28503, 0.31471527 ]
# Par0
var = ['htt_eta', 'rel_jet_E_pt', 'htt_scalar_pt', 'jet_htt_deta','rel_jet_M_pt', 'htt_pt', 'jet_pt', 'jet_eta',
'jet_deepFlavour', 'jet_htt_dphi'
]
var_score = [0.010668278, 0.01610607, 0.021612406, 0.02437216, 0.03577125, 0.04414463, 0.07261199, 0.13032258,
0.26394898, 0.3217941 ]
In [15]:
N = 10
ind = np.arange(len(var_score))
fig, ax = plt.subplots()
width = 0.35 # the width of the bars
p1 = ax.barh(ind + width/2, var_score, width)
p2 = ax.barh(ind - width/2 , var_score_par1 , width, color='mediumvioletred')
ax.set_title("Importance via feature permutation")
ax.set_ylabel('Feature')
ax.legend((p1[0], p2[0]), ('train with parity even', 'train with parity odd'))
ax.set_yticks(range(len(var_score_par1)))
ax.set_yticklabels(('htt_eta', 'rel_jet_E_pt', 'htt_scalar_pt', 'jet_htt_deta','rel_jet_M_pt', 'htt_pt', 'jet_pt', 'jet_eta',
'jet_deepFlavour', 'jet_htt_dphi'))
plt.xlabel("Importance via feature permutation")
ax.autoscale_view()
plt.savefig("feature_importance_both.png", dpi=300, bbox_inches='tight')
plt.show()
In [13]:
# For only one training
plt.barh(var, var_score, color='teal')
plt.ylabel("Feature")
plt.xlabel("Importance via feature permutation")
plt.grid(True)
plt.draw()
plt.savefig("feature_importance_v2.png", dpi=300, bbox_inches='tight')
plt.show()
plt.close()