In [1]:
%pylab inline
figsize(8, 6)
In [2]:
import pandas
import numpy
from folding_group import FoldingGroupClassifier
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
from rep.report.metrics import RocAuc
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, roc_auc_score
In [3]:
from utils import get_N_B_events, get_events_number, get_events_statistics
In [4]:
import root_numpy
data_nan = pandas.DataFrame(root_numpy.root2array('datasets/data/csv/JPsiK/Tracks.root'))
In [5]:
data_nan.head()
Out[5]:
In [6]:
event_id_column = 'event_id'
event_id = data_nan.run.apply(str) + '_' + data_nan.event.apply(str)
data_nan['group_column'] = numpy.unique(event_id, return_inverse=True)[1]
data_nan[event_id_column] = event_id
In [7]:
get_events_statistics(data_nan)
Out[7]:
In [8]:
get_N_B_events()
Out[8]:
In [9]:
data = data_nan.dropna()
len(data_nan), len(data), get_events_statistics(data)
Out[9]:
In [10]:
from utils import add_diff_pt
# add diff pt
add_diff_pt(data)
# add cos(diff_phi)
data['cos_diff_phi'] = numpy.cos(data.diff_phi.values)
In [11]:
from itertools import combinations
PIDs = {'k': data.PIDNNk.values,
'e': data.PIDNNe.values,
'mu': data.PIDNNm.values,
}
for (pid_name1, pid_values1), (pid_name2, pid_values2) in combinations(PIDs.items(), 2):
data.loc[:, 'max_PID_{}_{}'.format(pid_name1, pid_name2)] = numpy.maximum(pid_values1, pid_values2)
data.loc[:, 'sum_PID_{}_{}'.format(pid_name1, pid_name2)] = pid_values1 + pid_values2
In [12]:
data.loc[:, 'label'] = (data.signB.values * data.signTrack.values > 0) * 1
In [13]:
', '.join(data.columns)
Out[13]:
In [14]:
initial_cut = '(ghostProb < 0.4)'
data = data.query(initial_cut)
In [15]:
get_events_statistics(data)
Out[15]:
In [16]:
threshold_kaon = 0.
threshold_muon = 0.
threshold_electron = 0.
threshold_pion = 0.
threshold_proton = 0.
cut_pid = " ( (PIDNNk > {trk}) | (PIDNNm > {trm}) | (PIDNNe > {tre}) | (PIDNNpi > {trpi}) | (PIDNNp > {trp})) "
cut_pid = cut_pid.format(trk=threshold_kaon, trm=threshold_muon, tre=threshold_electron, trpi=threshold_pion,
trp=threshold_proton)
data = data.query(cut_pid)
In [17]:
get_events_statistics(data)
Out[17]:
In [18]:
from utils import compute_sum_of_charges
means = [compute_sum_of_charges(data[mask], name, bins=bins,
event_id_column=event_id_column) for mask, name, bins in \
zip([data.signB > -100,
(data.IPs > 3) & ((abs(data.diff_eta) > 0.6) | (abs(data.diff_phi) > 0.825)),
(abs(data.diff_eta) < 0.6) & (abs(data.diff_phi) < 0.825) & (data.IPs < 3)],
['full', 'OS', 'SS'], [21, 21, 21])]
where $sw_i$ - sPLot weight (sWeight for signal)
$$\epsilon_{tag} = \frac{N (\text{passed selection})} {N (\text{all events})}$$$$\Delta\epsilon_{tag} = \frac{\sqrt{\epsilon_{tag}(1-\epsilon_{tag}) \sum_{\text{all events}}sw_i^2}} {N (\text{all events})}$$All events are not availables (some selections are applyed before), that is why we used $$\Delta\epsilon_{tag} = \frac{\sqrt{N (\text{passed selection})}} {N (\text{all events})},$$ which is similar to the previous definition
In [19]:
N_B_passed = float(get_events_number(data))
tagging_efficiency = N_B_passed / get_N_B_events()
tagging_efficiency_delta = sqrt(N_B_passed) / get_N_B_events()
tagging_efficiency, tagging_efficiency_delta
Out[19]:
In [20]:
hist(data.diff_pt.values, bins=100)
pass
In [21]:
_, take_indices = numpy.unique(data[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data.Bmass.values[take_indices], bins=100)
title('B mass hist')
xlabel('mass')
subplot(1, 2, 2)
hist(data.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist')
xlabel('signal sWeights')
plt.savefig('img/Bmass_less_PID.png' , format='png')
In [22]:
sweight_threshold = 1.
data_sw_passed = data[data.N_sig_sw > sweight_threshold]
data_sw_not_passed = data[data.N_sig_sw <= sweight_threshold]
get_events_statistics(data_sw_passed)
Out[22]:
In [23]:
_, take_indices = numpy.unique(data_sw_passed[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data_sw_passed.Bmass.values[take_indices], bins=100)
title('B mass hist for sWeight > 1 selection')
xlabel('mass')
subplot(1, 2, 2)
hist(data_sw_passed.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist for sWeight > 1 selection')
xlabel('signal sWeights')
plt.savefig('img/Bmass_selected_less_PID.png' , format='png')
In [24]:
hist(data_sw_passed.diff_pt.values, bins=100)
pass
In [25]:
features = list(set(data.columns) - {'index', 'run', 'event', 'i', 'signB', 'signTrack', 'N_sig_sw', 'Bmass', 'mult',
'PIDNNp', 'PIDNNpi', 'label', 'thetaMin', 'Dist_phi', event_id_column,
'mu_cut', 'e_cut', 'K_cut', 'ID', 'diff_phi', 'group_column'})
features
Out[25]:
In [26]:
figure(figsize=[15, 16])
bins = 60
step = 3
for i, (feature1, feature2) in enumerate(combinations(['PIDNNk', 'PIDNNm', 'PIDNNe', 'PIDNNp', 'PIDNNpi'], 2)):
subplot(4, 3, i + 1)
Z, (x, y) = numpy.histogramdd(data_sw_passed[[feature1, feature2]].values, bins=bins, range=([0, 1], [0, 1]))
pcolor(numpy.log(Z).T, vmin=0)
xlabel(feature1)
ylabel(feature2)
xticks(numpy.arange(bins, step), x[::step]), yticks(numpy.arange(bins, step), y[::step])
plt.savefig('img/PID_selected_less_PID.png' , format='png')
In [27]:
hist(data_sw_passed.diff_pt.values, bins=60, normed=True)
pass
In [28]:
figure(figsize=(20, 6))
subplot(1, 2, 1)
_, n_tracks = numpy.unique(data_sw_passed[event_id_column], return_counts=True)
hist(n_tracks, bins=100)
title('Number of tracks for events with sWeight > 1')
subplot(1, 2, 2)
_, n_tracks_all = numpy.unique(data[event_id_column], return_counts=True)
hist(n_tracks_all, bins=106)
title('Number of tracks')
plt.savefig('img/tracks_number_less_PID.png' , format='png')
In [29]:
figure(figsize=[15, 4])
for i, column in enumerate(['PIDNNm', 'PIDNNe', 'PIDNNk']):
subplot(1, 3, i + 1)
hist(data_sw_passed[column].values, bins=60, range=(0, 1), label=column)
legend()
In [30]:
from decisiontrain import DecisionTrainClassifier
from rep.estimators import SklearnClassifier
from hep_ml.losses import LogLossFunction
In [31]:
data_sw_passed_lds = LabeledDataStorage(data_sw_passed, data_sw_passed.label.values, data_sw_passed.N_sig_sw.values)
In [32]:
tt_base = DecisionTrainClassifier(learning_rate=0.1, n_estimators=3000, depth=6,
max_features=15, n_threads=14, loss=LogLossFunction(regularization=100))
tt_folding = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=11,
train_features=features, group_feature='group_column')
%time tt_folding.fit_lds(data_sw_passed_lds)
pass
In [33]:
import cPickle
with open('models/dt_full_group.pkl', 'w') as f:
cPickle.dump(tt_folding, f)
In [34]:
# import cPickle
# with open('models/dt_full_group.pkl', 'r') as f:
# tt_folding = cPickle.load(f)
In [35]:
comparison_report = tt_folding.test_on_lds(data_sw_passed_lds)
comparison_report.compute_metric(RocAuc())
Out[35]:
In [36]:
comparison_report.roc()
Out[36]:
In [37]:
lc = comparison_report.learning_curve(RocAuc(), steps=1)
In [38]:
lc
Out[38]:
In [39]:
comparison_report.feature_importance()
Out[39]:
In [40]:
from utils import get_result_with_bootstrap_for_given_part
In [42]:
result = get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', get_N_B_events(),
logistic=True, n_calibrations=30)
result
Out[42]:
In [75]:
import utils
reload(utils)
Out[75]:
In [76]:
from utils import get_result_with_bootstrap_for_given_part
In [77]:
result = get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', get_N_B_events(),
logistic=True, n_calibrations=1)
result
Out[77]:
In [45]:
result.to_csv('img/tracks.csv', index=False, header=True)
In [46]:
from utils import prepare_B_data_for_given_part
In [47]:
Bdata_prepared = prepare_B_data_for_given_part(tt_folding, [data_sw_passed, data_sw_not_passed],
get_N_B_events(), logistic=True)
In [48]:
Bdata_prepared.to_csv('models/Bdata_tracks.csv', header=True, index=False)
In [ ]:
from utils import estimate_algorithm
In [91]:
import cPickle
with open('models/dt_MC.pkl', 'r') as f:
tt_folding_MC = cPickle.load(f)
with open('models/calibrator_tracks_MC.pkl', 'r') as f:
calibrator_tracks_MC = cPickle.load(f)
with open('models/calibrator_B_MC.pkl', 'r') as f:
calibrator_B_MC = cPickle.load(f)
In [83]:
p_MC = tt_folding_MC.predict_proba(data)[:, 1]
roc_auc_score(data.label, p_MC, sample_weight=data.N_sig_sw.values.astype(float64))
Out[83]:
In [93]:
estimate_algorithm(tt_folding_MC, calibrator_tracks_MC, calibrator_B_MC, data, get_N_B_events())
Out[93]:
In [97]:
estimate_algorithm(tt_folding_MC, calibrator_tracks_MC, calibrator_B_MC, data, get_N_B_events(), calib_part_itself=True,
calib_itself=True)
Out[97]:
In [ ]: