In [1]:
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')
P. Schuster
University of Michigan
January 2018
Revisiting this in July 2018 for energy-based bhm
.
Goal: Calculate the number of events in a specific time or energy range in the $nn$ cloud on the bicorrelation distribution.
Start by loading some data. I will use the data from the combined data sets from Cf072115-Cf072215b.
In [1]:
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
import os
import scipy.io as sio
import sys
import pandas as pd
from tqdm import *
# Plot entire array
np.set_printoptions(threshold=np.nan)
In [2]:
import seaborn as sns
sns.set_style(style='white')
In [3]:
sys.path.append('../scripts/')
import bicorr as bicorr
import bicorr_plot as bicorr_plot
import bicorr_e as bicorr_e
import bicorr_sums as bicorr_sums
In [4]:
%load_ext autoreload
%autoreload 2
In [6]:
det_df = bicorr.load_det_df()
In [31]:
dict_pair_to_index, dict_index_to_pair, dict_pair_to_angle = bicorr.build_dict_det_pair(det_df)
In [7]:
os.listdir('../analysis/Cf072115_to_Cf072215b/datap')
Out[7]:
In [8]:
bhm_e, e_bin_edges, note = bicorr_e.load_bhm_e('../analysis/Cf072115_to_Cf072215b/datap')
In [9]:
bhm_e.shape
Out[9]:
In [10]:
bicorr_e.build_bhp_e?
In [13]:
bhp_e = bicorr_e.build_bhp_e(bhm_e, e_bin_edges)[0]
In [17]:
bicorr_plot.bhp_e_plot(bhp_e, e_bin_edges, show_flag = True)
In [19]:
bicorr_sums.calc_nn_sum_e(bhp_e, e_bin_edges)
Out[19]:
I'm going to add this to det_df
.
In [20]:
det_df.head()
Out[20]:
Produce bhp
for all detector indices.
In [22]:
bhp_e = np.zeros((len(det_df),len(e_bin_edges)-1,len(e_bin_edges)-1))
bhp_e.shape
Out[22]:
In [26]:
for index in det_df.index.values: # index is same as in `bhm`
bhp_e[index,:,:] = bicorr_e.build_bhp_e(bhm_e,e_bin_edges,pair_is=[index])[0]
In [27]:
det_df['Ce'] = np.nan
det_df['Ce_err'] = np.nan
In [38]:
e_min = 0.62
e_max = 12
for index in det_df.index.values:
det_df.loc[index,'Ce'], det_df.loc[index,'Ce_err'] = bicorr_sums.calc_nn_sum_e(bhp_e[index,:,:],e_bin_edges, e_min, e_max)
det_df.head()
Out[38]:
In [40]:
plt.errorbar(det_df['angle'], det_df['Ce'], yerr=det_df['Ce_err'], fmt='.')
plt.xlabel('Angle')
plt.ylabel('Doubles counts')
plt.title('Doubles counts vs. angle')
plt.show()
bicorr_hist_master
versionsbhm_pos
bhm_neg
This is built into the default flux data processing script in bicorr.py
as follows:
print('********* Build bhm: Positive time range **********')
build_bhm(folder_start,folder_end,dt_bin_edges = np.arange(0.0,200.25,0.25))
print('********* Build bhm: Negative time range **********')
build_bhm(folder_start,folder_end,dt_bin_edges = np.arange(-200.0,0.25,0.25),sparse_filename = 'sparse_bhm_neg')
Here I will load those arrays and perform the background subtraction, then generalize the technique in a new function. I will coarsen the time binning from 0.25 ns spacing to 2.5 ns spacing for both arrays.
In [7]:
sparse_bhm_pos, dt_bin_edges_pos = bicorr.load_sparse_bhm(filepath = r'../analysis/Cf072115_to_Cf072215b/datap/')[0:2]
In [8]:
bhm_pos = bicorr.revive_sparse_bhm(sparse_bhm_pos, det_df, dt_bin_edges_pos)
In [8]:
print('ORIGINAL ARRAYS')
print(bhm_pos.shape)
print(dt_bin_edges_pos.shape)
print(dt_bin_edges_pos[-10:])
In [9]:
bhm_pos, dt_bin_edges_pos = bicorr.coarsen_bhm(bhm_pos, dt_bin_edges_pos, 4)
In [10]:
print('COARSE ARRAYS')
print(bhm_pos.shape)
print(dt_bin_edges_pos.shape)
print(dt_bin_edges_pos[-10:])
In [12]:
sparse_bhm_neg, dt_bin_edges_neg = bicorr.load_sparse_bhm(filepath = r'../analysis/Cf072115_to_Cf072215b/datap/',filename='sparse_bhm_neg.npz')[0:2]
In [13]:
bhm_neg = bicorr.revive_sparse_bhm(sparse_bhm_neg, det_df, dt_bin_edges_neg)
In [14]:
bhm_neg, dt_bin_edges_neg = bicorr.coarsen_bhm(bhm_neg, dt_bin_edges_neg, 4)
In [15]:
bhp_nn_pos = bicorr.build_bhp(bhm_pos,dt_bin_edges_pos,type_is=[0])[0]
bhp_nn_neg = bicorr.build_bhp(bhm_neg,dt_bin_edges_neg,type_is=[0])[0]
Take a quick look at plots:
In [20]:
bicorr_plot.bhp_plot(bhp_nn_pos,dt_bin_edges_pos,title='Positive time range',show_flag=True)
In [21]:
bicorr_plot.bhp_plot(bhp_nn_neg,dt_bin_edges_neg,title='Negative time range',show_flag=True)
In [33]:
x = np.arange(-10,1,1)
y = x.T
test_array_neg = np.zeros([10,10])
test_array_neg[0,0] = 3
test_array_neg[2,9] = 6
test_array_neg[9,5] = 10
In [41]:
x = dt_test_neg
y = dt_test_neg.T
plt.pcolormesh(x,y,test_array_neg.T,cmap='viridis')
plt.xlabel('Dimension 0')
plt.ylabel('Dimension 1')
plt.colorbar()
plt.show()
Flip the array. The result should look as follows:
In [37]:
dt_test_pos = np.arange(0,11,1)
test_array_pos = test_array_neg[::-1,::-1]
In [42]:
plt.pcolormesh(dt_test_pos,dt_test_pos,test_array_pos.T,cmap="viridis")
plt.xlabel('Dimension 0')
plt.ylabel('Dimension 1')
plt.colorbar()
plt.show()
Try it for a smaller size.
In [52]:
x = np.arange(-3,1,1)
y = x.T
test_array_neg = np.zeros([3,3])
test_array_neg[1,1] = 6
test_array_neg[2,2] = 1
test_array_neg[2,0] = 12
test_array_pos = test_array_neg[::-1,::-1]
In [53]:
plt.pcolormesh(x,y,test_array_neg.T,cmap='viridis')
plt.xlabel('Dimension 0')
plt.ylabel('Dimension 1')
plt.colorbar()
plt.show()
In [56]:
plt.pcolormesh(x,y,test_array_pos.T,cmap='viridis')
plt.xlabel('Dimension 0')
plt.ylabel('Dimension 1')
plt.colorbar()
plt.show()
In [59]:
tens = 10*np.ones([3,3])
print(tens)
In [60]:
plt.pcolormesh(x,y,tens.T,cmap='viridis')
plt.xlabel('Dimension 0')
plt.ylabel('Dimension 1')
plt.colorbar()
plt.show()
In [61]:
plt.pcolormesh(x,y,tens.T-test_array_pos.T,cmap='viridis')
plt.xlabel('Dimension 0')
plt.ylabel('Dimension 1')
plt.colorbar()
plt.show()
This subtraction looks good. Now back to the data.
Do this now for the negative time range.
In [22]:
bicorr_plot.bhp_plot(bhp_nn_neg[::-1,::-1],dt_bin_edges_pos,title='Negative data flipped to positive time range',show_flag=True)
Look more closely at a corner, positive and negative.
In [25]:
bicorr_plot.bhp_plot(bhp_nn_neg[:10,:10],dt_bin_edges_neg[:11],title='Negative time range',show_flag=True)
In [24]:
bicorr_plot.bhp_plot(bhp_nn_neg[::-1,::-1][-10:,-10:],dt_bin_edges_pos[-11:],title='Negative data flipped to positive time range',show_flag=True)
It looks like the flipping is happening correctly. So continue on.
In [23]:
bhp_nn_diff = np.subtract(bhp_nn_pos.astype(np.int32),bhp_nn_neg[::-1,::-1].astype(np.int32))
bhp_nn_diff.shape
Out[23]:
In [24]:
bhp_nn_diff.dtype
Out[24]:
In [26]:
bicorr_plot.bhp_plot(bhp_nn_diff,dt_bin_edges_pos,title='Background-subtracted bhp',show_flag=True)
This looks great. I have to keep the positive and negative matrices for error propagation when I calculate the $nn$ count rate sum.
Question... should I go from the bhm
or bhp
? bhm
is in terms of the number of counts, while bhp
may already be normalized.
I think I should work from bhp
and provide norm_factor as an optional input parameter. It is returned from build_bhp
so I will have to store it.
In [28]:
emin = 0.62
emax = 12
tmin = bicorr.convert_energy_to_time(emax)
tmax = bicorr.convert_energy_to_time(emin)
print(tmin,tmax)
In [29]:
dt_bin_edges_pos
Out[29]:
In [30]:
i_min = np.min(np.argwhere(tmin<dt_bin_edges_pos))
i_max = np.min(np.argwhere(tmax<dt_bin_edges_pos))
print(i_min,i_max)
In [31]:
dt_bin_edges_pos[i_min:i_max]
Out[31]:
In [32]:
np.sum(bhp_nn_pos[i_min:i_max,i_min:i_max])
Out[32]:
Functionalized to bicorr.calc_nn_sum
.
In [33]:
bicorr.calc_nn_sum(bhp_nn_pos, dt_bin_edges_pos)
Out[33]:
I am going to add some logic into the script to return the real emin
and emax
values that correspond to the bin edges:
# What are the energy bin limits that correspond to the bins?
emin_real = convert_time_to_energy[i_min]
emax_real = convert_time_to_energy[i_max]
energies_real = [emin_real,emax_real]
In [34]:
Cp = bicorr.calc_nn_sum(bhp_nn_pos, dt_bin_edges_pos)
Cn = bicorr.calc_nn_sum(bhp_nn_neg[::-1,::-1], dt_bin_edges_pos)
Cd = Cp-Cn
print('BR subtraction removes ', Cn/Cp*100, ' % of counts')
The background subtraction makes about a 1% correction.
The errors in the counts follow counting statistics:
In [35]:
err_Cd = np.sqrt(Cp + Cn)
print('BR subtracted counts = ',Cd,' +/- ',err_Cd)
In [36]:
print('Relative error = ', err_Cd/Cd)
The bicorr_hist_master
is not normalized, but when I calculate bhp
with a norm_factor
then it is normalized, so I am actually working with the following:
Create new bhp
arrays that are normalized.
In [37]:
num_fissions = 2194651200.00
In [38]:
bhp_nn_pos, norm_factor = bicorr.build_bhp(bhm_pos,dt_bin_edges_pos,type_is=[0], num_fissions = num_fissions)
bhp_nn_neg = bicorr.build_bhp(bhm_neg,dt_bin_edges_neg,type_is=[0], num_fissions = num_fissions)[0]
Assuming there is no uncertainty in $F$, the errors in the relative counts are:
In [39]:
Np = bicorr.calc_nn_sum(bhp_nn_pos,dt_bin_edges_pos)
Nn = bicorr.calc_nn_sum(bhp_nn_neg[::-1,::-1],dt_bin_edges_pos)
Nd = Np-Nn
In [40]:
print('BR subtraction removes ', Nn/Np*100, ' % of counts')
In [41]:
err_Np = np.sqrt(Np/norm_factor)
err_Nn = np.sqrt(Nn/norm_factor)
err_Nd = np.sqrt((Np+Nn)/norm_factor)
In [42]:
print('The BR_subtracted counts is ',Nd,' +/- ',err_Nd)
In [43]:
print('Relative error = ', err_Nd/Nd)
In [53]:
bhp_nn_pos = bicorr.build_bhp(bhm_pos,dt_bin_edges_pos,type_is=[0])[0]
bhp_nn_neg = bicorr.build_bhp(bhm_neg,dt_bin_edges_neg,type_is=[0])[0]
In [56]:
Cp, Cn, Cd, Cd_err = bicorr.calc_nn_sum_br(bhp_nn_pos,bhp_nn_neg,dt_bin_edges_pos)
print(Cp, Cn, Cd, Cd_err)
Relative counts:
In [57]:
bhp_nn_pos = bicorr.build_bhp(bhm_pos,dt_bin_edges_pos,type_is=[0], num_fissions = num_fissions)[0]
bhp_nn_neg = bicorr.build_bhp(bhm_neg,dt_bin_edges_neg,type_is=[0], num_fissions = num_fissions)[0]
In [58]:
Np, Nn, Nd, err_Nd = bicorr.calc_nn_sum_br(bhp_nn_pos,bhp_nn_neg,dt_bin_edges_pos,norm_factor=norm_factor)
print(Np, Nn, Nd, err_Nd)
First create positive and negative bhp
for all pairs.
In [67]:
bhp_nn_pos = np.zeros((len(det_df),len(dt_bin_edges_pos)-1,len(dt_bin_edges_pos)-1))
bhp_nn_neg = np.zeros((len(det_df),len(dt_bin_edges_neg)-1,len(dt_bin_edges_neg)-1))
In [69]:
for index in det_df.index.values: # index is same as in `bhm`
bhp_nn_pos[index,:,:] = bicorr.build_bhp(bhm_pos,dt_bin_edges_pos,type_is=[0],pair_is=[index])[0]
bhp_nn_neg[index,:,:] = bicorr.build_bhp(bhm_neg,dt_bin_edges_neg,type_is=[0],pair_is=[index])[0]
Save these to file for future use.
I will call the files bhp_nn_all_pairs_1ns.npz
. I have already saved something similar: bhp_nn_by_pair_1ns.npz
, but that one has already cut out the fission chamber neighbors. I'll leave it in for this one.
I want to save:
In [74]:
os.listdir('../analysis/Cf072115_to_Cf072215b/datap/')
Out[74]:
In [75]:
np.savez('../analysis/Cf072115_to_Cf072215b/datap/bhp_nn_all_pairs_1ns',
bhp_nn_pos = bhp_nn_pos,
bhp_nn_neg = bhp_nn_neg,
dt_bin_edges_pos = dt_bin_edges_pos)
In [ ]:
In [72]:
whos
Save these to file for future use. Then, calculate sums for each pair by looping back through bhp Then store sums to a dataframe Save dataframe to file with emin, max? Make it easy to calculate sums and fill dataframe for new emin, emax values so that I can calculate anisotropy vs. Ethresh.