In [1]:
%pylab inline
In [39]:
%run additional.ipynb
In [3]:
pandas.set_option('display.max_colwidth', 120)
did preselections:
In [4]:
bck_train_mode_name = 30000000
sig_train_modes_names = list(set(empty_events.keys()) - {bck_train_mode_name})
sig_train_files = ['mod_{}.csv'.format(name) for name in sig_train_modes_names]
bck_train_files = 'mod_30000000.csv'
folder = "datasets/prepared_hlt_body/"
In [5]:
# concat all signal data
if not os.path.exists(folder + 'signal_hlt1.csv'):
concat_files(folder, sig_train_files, os.path.join(folder , 'signal_hlt1.csv'))
In [6]:
signal_data = pandas.read_csv(os.path.join(folder, 'signal_hlt1.csv'), sep='\t')
bck_data = pandas.read_csv(os.path.join(folder, bck_train_files), sep='\t')
In [7]:
signal_data.columns
Out[7]:
In [8]:
print 'Signal', statistic_length(signal_data)
print 'Bck', statistic_length(bck_data)
In [9]:
total_bck_events = statistic_length(bck_data)['Events'] + empty_events[bck_train_mode_name]
total_signal_events_by_mode = dict()
for mode in sig_train_modes_names:
total_signal_events_by_mode[mode] = statistic_length(signal_data[signal_data['mode'] == mode])['Events'] + empty_events[mode]
In [10]:
print 'Bck:', total_bck_events
'Signal:', total_signal_events_by_mode
Out[10]:
In [11]:
variables_base = ["mcor", "chi2", "pt", "fdchi2", "minpt", "nlt16"]
variables_mcor = ["chi2", "pt", "fdchi2", "minpt", "nlt16"]
variables_additional = ["m", "fdr", "sumpt", "sumipchi2", "eta"]
variables_new = ['chi2', 'minpt', 'sumpt', 'fdchi2', 'nlt16']
variables_new_minpt = ['chi2', 'sumpt', 'fdchi2', 'nlt16']
In [12]:
# hlt1 2body selection
signal_data = signal_data[signal_data['pass_2body'] == 1]
bck_data = bck_data[bck_data['pass_2body'] == 1]
In [13]:
print 'Signal', statistic_length(signal_data)
print 'Bck', statistic_length(bck_data)
In [14]:
total_signal_events_by_mode_presel = dict()
for mode in sig_train_modes_names:
total_signal_events_by_mode_presel[mode] = statistic_length(signal_data[signal_data['mode'] == mode])['Events']
total_bck_events_presel = statistic_length(bck_data)['Events']
In [15]:
print 'Bck:', total_bck_events_presel
'Signal:', total_signal_events_by_mode_presel
Out[15]:
In [16]:
signal_data.head()
Out[16]:
In [17]:
ds_train_signal, ds_train_bck, ds_test_signal, ds_test_bck = prepare_data(signal_data, bck_data, 'unique')
In [18]:
print 'Signal', statistic_length(ds_train_signal)
print 'Bck', statistic_length(ds_train_bck)
In [19]:
train = pandas.concat([ds_train_bck, ds_train_signal])
In [20]:
print 'Signal', statistic_length(ds_test_signal)
print 'Bck', statistic_length(ds_test_bck)
In [21]:
test = pandas.concat([ds_test_bck, ds_test_signal])
In [22]:
total_test_bck_events = (total_bck_events - total_bck_events_presel) // 2 + statistic_length(ds_test_bck)['Events']
total_test_signal_events = dict()
for mode in sig_train_modes_names:
total_not_passed_signal = total_signal_events_by_mode[mode] - total_signal_events_by_mode_presel[mode]
total_test_signal_events[mode] = total_not_passed_signal // 2 + \
statistic_length(ds_test_signal[ds_test_signal['mode'] == mode])['Events']
In [23]:
print 'Bck total test events:', total_test_bck_events
'Signal total test events:', total_test_signal_events
Out[23]:
In [24]:
# cut on mcor
train_cut = train[train['mcor'] <= 10e3]
In [25]:
for var in variables_base + variables_additional:
hist(train[train.signal == 1][var].values, color='b', bins=60, histtype='step', normed=True)
hist(train[train.signal == 0][var].values, color='r', bins=60, histtype='step', normed=True)
title(var)
show()
In [26]:
import cPickle
if os.path.exists('models/hlt1_body2.pkl'):
with open('models/hlt1_body2.pkl', 'r') as file_mn:
estimators = cPickle.load(file_mn)
In [27]:
from rep_ef.estimators import EventFilterClassifier
In [27]:
estimators['MN: mcor cut, mcor var'] = EventFilterClassifier(features=variables_base,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
estimators['MN: mcor cut, mcor var'].fit(train_cut, train_cut['signal'])
Out[27]:
In [26]:
estimators['MN: mcor cut, mcor var'].get_feature_importances().sort('effect')
Out[26]:
In [28]:
estimators['MN'] = EventFilterClassifier(features=variables_mcor,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
estimators['MN'].fit(train, train['signal'])
Out[28]:
In [27]:
estimators['MN'].get_feature_importances().sort('effect')
Out[27]:
In [29]:
estimators['additional features'] = EventFilterClassifier(features=variables_base + variables_additional,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
estimators['additional features'].fit(train_cut, train_cut['signal'])
Out[29]:
In [28]:
estimators['additional features'].get_feature_importances().sort('effect')
Out[28]:
In [31]:
estimators['MN: pt->sumpt'] = EventFilterClassifier(features=variables_new,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
estimators['MN: pt->sumpt'].fit(train, train['signal'])
Out[31]:
In [29]:
estimators['MN: pt->sumpt'].get_feature_importances().sort('effect')
Out[29]:
In [33]:
estimators['MN: pt->sumpt, remove minpt'] = EventFilterClassifier(features=variables_new_minpt,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
estimators['MN: pt->sumpt, remove minpt'].fit(train, train['signal'])
Out[33]:
In [34]:
estimators['MN: pt->sumpt, remove minpt'].get_feature_importances().sort('effect')
Out[34]:
In [35]:
borders = {
'chi2': [1,2.5,5,7.5,10,100],
'sumpt': [3000,4000,5000,6000,7500,9000,12e3,23e3,50e3],
'fdchi2': [33,125,350,780,1800,5000,10000],
'minpt': [350,500,750,1500,3000,5000],
'nlt16': [0.5]
}
In [36]:
borders_minpt = {
'chi2': [1,2.5,5,7.5,10,100],
'sumpt': [3000,4000,5000,6000,7500,9000,12e3,23e3,50e3],
'fdchi2': [33,125,350,780,1800,5000,10000],
'nlt16': [0.5]
}
In [37]:
estimators['MN BBDT: pt->sumpt'] = EventFilterClassifier(features=variables_new,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False,
intervals=borders)
estimators['MN BBDT: pt->sumpt'].fit(train, train['signal'])
Out[37]:
In [45]:
estimators['MN BBDT: pt->sumpt'].get_feature_importances().sort('effect')
Out[45]:
In [38]:
estimators['MN BBDT: pt->sumpt, remove minpt'] = EventFilterClassifier(features=variables_new_minpt,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False,
intervals=borders_minpt)
estimators['MN BBDT: pt->sumpt, remove minpt'].fit(train, train['signal'])
Out[38]:
In [46]:
estimators['MN BBDT: pt->sumpt, remove minpt'].get_feature_importances().sort('effect')
Out[46]:
In [28]:
from rep.metaml import FoldingClassifier
from rep.estimators import SklearnClassifier
from sklearn.ensemble import RandomForestClassifier
In [30]:
forest_base_partial = SklearnClassifier(RandomForestClassifier(n_estimators=300, min_samples_leaf=500, max_depth=7,
max_features=4))
forest_folding_top = FoldingClassifier(base_estimator=forest_base_partial, random_state=11,
features=variables_new_minpt, ipc_profile='ssh-ipy')
forest_folding_top.fit(train, train['signal'])
Out[30]:
In [47]:
for rank in range(1, 3):
good_events = get_best_svr(train, forest_folding_top, rank)
ef_good = EventFilterClassifier(features=variables_base,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
ef_good.fit(good_events, good_events['signal'])
estimators['top-{} forest preselection, use mcor'.format(rank)] = ef_good
In [32]:
for rank in range(1, 3):
good_events = get_best_svr(train, forest_folding_top, rank)
ef_good = EventFilterClassifier(features=variables_new_minpt,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
ef_good.fit(good_events, good_events['signal'])
estimators['top-{} forest preselection'.format(rank)] = ef_good
In [33]:
forest_base_partial = SklearnClassifier(RandomForestClassifier(n_estimators=300, min_samples_leaf=500, max_depth=7,
max_features=5))
forest_folding_top_minpt = FoldingClassifier(base_estimator=forest_base_partial, random_state=11,
features=variables_new, ipc_profile='ssh-ipy')
forest_folding_top_minpt.fit(train, train['signal'])
Out[33]:
In [51]:
for rank in range(1, 3):
good_events = get_best_svr(train, forest_folding_top_minpt, rank)
ef_good = EventFilterClassifier(features=variables_new,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
ef_good.fit(good_events, good_events['signal'])
estimators['top-{} forest preselection, minpt'.format(rank)] = ef_good
In [54]:
estimators['top-1 forest preselection, minpt'].get_feature_importances()
Out[54]:
In [44]:
for rank in range(1, 3):
good_events = get_best_svr(train, forest_folding_top_minpt, rank)
ef_good = EventFilterClassifier(features=variables_base,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
ef_good.fit(good_events, good_events['signal'])
estimators['top-{} forest preselection, minpt, base'.format(rank)] = ef_good
In [55]:
estimators['top-1 forest preselection, minpt, base'].get_feature_importances()
Out[55]:
In [36]:
forest_base_partial = SklearnClassifier(RandomForestClassifier(n_estimators=300, min_samples_leaf=500, max_depth=7,
max_features=5))
forest_folding_top_mcor = FoldingClassifier(base_estimator=forest_base_partial, random_state=11,
features=variables_base, ipc_profile='ssh-ipy')
forest_folding_top_mcor.fit(train, train['signal'])
Out[36]:
In [37]:
for rank in range(1, 3):
good_events = get_best_svr(train, forest_folding_top_mcor, rank)
ef_good = EventFilterClassifier(features=variables_new,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
ef_good.fit(good_events, good_events['signal'])
estimators['top-{} forest preselection, minpt; mcor for forest'.format(rank)] = ef_good
In [38]:
for rank in range(1, 3):
good_events = get_best_svr(train, forest_folding_top_mcor, rank)
ef_good = EventFilterClassifier(features=variables_base,
dataset_name='hlt1_2{random}',
iterations=5000, sync=False)
ef_good.fit(good_events, good_events['signal'])
estimators['top-{} forest preselection, mcor'.format(rank)] = ef_good
In [57]:
import cPickle
with open('models/hlt1_body2.pkl', 'w') as file_mn:
cPickle.dump(estimators, file_mn)
In [27]:
estimators.keys()
Out[27]:
In [28]:
thresholds = dict()
RATE = [50000.]
events_pass = dict()
for name, cl in estimators.items():
prob = cl.predict_proba(ds_test_bck)
if 'mcor cut' not in name and 'additional' not in name:
thr, result = calculate_thresholds(ds_test_bck, prob, total_test_bck_events, rates=RATE)
for rate, val in result.items():
events_pass['{}-{}'.format(rate, name)] = val[1]
else:
thr, result = calculate_thresholds(ds_test_bck[ds_test_bck['mcor'] <= 10e3],
prob[numpy.array(ds_test_bck['mcor']) <= 10e3],
total_test_bck_events, rates=RATE)
for rate, val in result.items():
events_pass['{}-{}'.format(rate, name)] = val[1]
thresholds[name] = thr
print name, result
In [35]:
est_cut = dict([('MN: mcor cut, mcor var', estimators['MN: mcor cut, mcor var']),
('additional features', estimators['additional features'])])
train_modes_eff, statistic = result_statistic(est_cut, sig_train_modes_names,
ds_test_signal[ds_test_signal['mcor'] <= 10e3],
thresholds, RATE, total_test_signal_events)
In [36]:
est = []
for key in ['MN', 'MN: pt->sumpt', 'MN: pt->sumpt, remove minpt']:
est.append((key, estimators[key]))
est = dict(est)
train_modes_eff_rest, statistic_rest = result_statistic(est, sig_train_modes_names,
ds_test_signal,
thresholds, RATE, total_test_signal_events)
In [33]:
from rep.plotting import BarComparePlot
BarComparePlot(OrderedDict(train_modes_eff.items() + train_modes_eff_rest.items()),
sortby=('MN', 50000.0)).plot(new_plot=True, figsize=(24, 8), ylabel='efficiency', fontsize=22)
lgd = legend(bbox_to_anchor=(0.5, 1.4), loc='upper center', ncol=2, fontsize=22)
# plt.savefig('hlt1.pdf' , format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
In [37]:
compare_hierarhical = OrderedDict()
hierarhical = ['top-1 forest preselection',
'top-2 forest preselection',
'top-1 forest preselection, use mcor',
'top-2 forest preselection, use mcor',
'top-2 forest preselection, minpt',
'top-1 forest preselection, minpt',
'top-2 forest preselection, minpt, base',
'top-1 forest preselection, minpt, base',
'top-1 forest preselection, minpt; mcor for forest',
'top-2 forest preselection, minpt; mcor for forest',
'top-1 forest preselection, mcor',
'top-2 forest preselection, mcor',
'MN: pt->sumpt',
'MN BBDT: pt->sumpt',
'MN: pt->sumpt, remove minpt',
'MN BBDT: pt->sumpt, remove minpt'
]
for key in hierarhical:
compare_hierarhical[key] = estimators[key]
train_modes_eff_forest, statistic_forest = result_statistic(compare_hierarhical, sig_train_modes_names,
ds_test_signal,
thresholds, RATE, total_test_signal_events)
In [35]:
BarComparePlot(OrderedDict([((key, 50000.), train_modes_eff_forest[(key, 50000.)]) for key in hierarhical]),
sortby=('MN BBDT: pt->sumpt', 50000.0)).plot(new_plot=True, figsize=(24, 8),
ylabel='efficiency', fontsize=22)
lgd = legend(bbox_to_anchor=(0.5, 1.7), loc='upper center', ncol=2, fontsize=22)
In [36]:
pandas.DataFrame(statistic)
Out[36]:
In [37]:
pandas.DataFrame(statistic_rest)
Out[37]:
In [38]:
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
lds = LabeledDataStorage(test, test['signal'])
report = ClassificationReport(OrderedDict(est.items() + compare_hierarhical.items()), lds)
In [39]:
report.roc().plot(new_plot=True)
In [40]:
mode_metrics = dict()
for mode in sig_train_modes_names:
mode_metrics[mode] = dict()
for rate in [50000.]:
mode_metrics[mode][rate] = generate_topo_metric(ds_test_bck, ds_test_signal[ds_test_signal['mode'] == mode],
total_test_bck_events, total_test_signal_events[mode], rate)
In [41]:
def generate_select_mode(mode):
def select_mode(pd):
result = numpy.zeros(len(pd), dtype=bool)
result[numpy.array(pd['mode']) == mode] = True
result[numpy.array(pd['signal']) == 0] = True
return result
return select_mode
In [42]:
staged_eff_plots = []
for mode in sig_train_modes_names:
eff_metrics = OrderedDict()
select_mode = generate_select_mode(mode)
if len(ds_test_signal[ds_test_signal['mode'] == mode]) <= 0:
continue
staged_eff_plots.append(report.learning_curve(mode_metrics[mode][50000.], mask=select_mode, steps=10,
metric_label='mode {}, rate {}, eff'.format(mode, 50000.)))
In [43]:
for n, elem in enumerate(staged_eff_plots):
elem.plot(new_plot=True)
In [29]:
not_passed_event_sig = 0
not_passed_event_bck = (total_bck_events - total_bck_events_presel) // 2
for mode in sig_train_modes_names:
not_passed_event_sig += (total_signal_events_by_mode[mode] - total_signal_events_by_mode_presel[mode]) // 2
In [55]:
plots = OrderedDict()
for key, value in est.items() + compare_hierarhical.items():
plots[key] = plot_roc_events(value, ds_test_signal, ds_test_bck, key, False, not_passed_event_sig,
not_passed_event_bck)
In [57]:
from rep.plotting import FunctionsPlot
FunctionsPlot(plots).plot(new_plot=True, xlim=(0, 0.1), ylim=(0., 1))
plot([1. * events_pass['50000.0-MN'] / (statistic_length(ds_test_bck)['Events'] + not_passed_event_bck)] * 2, [0., 1], 'b--', label='rate: 50 kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.7), ncol=2)
In [44]:
plots = OrderedDict()
for key, value in est.items() + compare_hierarhical.items():
plots[key] = plot_roc_events(value, ds_test_signal, ds_test_bck, key, not_passed_event_sig, not_passed_event_bck)
In [45]:
from rep.plotting import FunctionsPlot
FunctionsPlot(plots).plot(new_plot=True, xlim=(0, 1), ylim=(0., 1))
plot([1. * events_pass['50000.0-MN'] / (statistic_length(ds_test_bck)['Events'] + not_passed_event_bck)] * 2, [0., 1], 'b--', label='rate: 50 kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.7), ncol=2)
In [51]:
FunctionsPlot(plots).plot(new_plot=True, xlim=(0.42, 0.44), ylim=(0.945, 0.965))
plot([1. * events_pass['50000.0-MN'] / statistic_length(ds_test_bck)['Events']] * 2, [0., 1], 'b--', label='rate: 50 kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.7), ncol=2)
In [46]:
plots_unnormed = OrderedDict()
for key, value in est.items() + compare_hierarhical.items():
plots_unnormed[key] = plot_roc_events(value, ds_test_signal, ds_test_bck, key, normed_channels=False)
In [47]:
from rep.plotting import FunctionsPlot
FunctionsPlot(plots_unnormed).plot(new_plot=True, xlim=(0, 1), ylim=(0., 1))
plot([1. * events_pass['50000.0-MN'] / statistic_length(ds_test_bck)['Events']] * 2, [0., 1], 'b--', label='rate: 50 kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.7), ncol=2)
In [55]:
with open("bbdt_run2/hlt1_borders_sumpt.mx", "w") as f:
f.write(estimators['MN BBDT: pt->sumpt'].formula_mx)
In [56]:
with open("bbdt_run2/hlt1_borders_minpt.mx", "w") as f:
f.write(estimators['MN BBDT: pt->sumpt, remove minpt'].formula_mx)