In [3]:
import numpy as np
import logging
from time import time
from datetime import datetime
from os.path import join
from os import walk
%matplotlib notebook
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from itertools import cycle
In [2]:
# Do not change
BLEDP_SAMPLE_RATE = 2e-6 # seconds
BLEDP_CURRENT_MULT = 0.0625 # bits to uA
BLEDP_CHANNELS = 8
FRAME_VALUE_MASK = 1048575
FRAME_CH_SHIFT = 26
FRAME_CH_MASK = 7
In [3]:
# Change as needed
LBL_DET = ['Si 300um', 'K6430', 'Si 300um ?', 'Si 100um ?', 'HS', 'CVD1', 'CVD2', 'Trg']
markers = cycle(['1', '2', '3', '4'])
In [4]:
# Do not modify
class BledpOfflineRetriever(object):
"""
Manage BLEDPy offline data retrieval.
"""
# retrieved = pyqtSignal() not needed in this context
def __init__(self):
super(BledpOfflineRetriever, self).__init__()
self.frames = np.array([])
self.channel = None # 0=multi, ch from 1
def load_data(self, filename, **kwargs):
"""Get multi channel data from file.
"""
offset = kwargs.get('offset', 0)
limit = kwargs.get('limit', 0)
limit -= offset
if limit <= 0:
limit = -1
tick = time()
with open(filename, 'rb') as fp:
self.frames = np.fromfile(fp, count=2, dtype='>u4')
ch = self.get_channels()
if ch[0] == ch[1]:
self.channel = ch[0] + 1
offset <<= 2
else:
self.channel = 0
offset <<= 5
limit <<= 3
fp.seek(offset)
self.frames = np.fromfile(fp, count=limit, dtype='>u4')
def get_values(self, y_curr=False):
res = np.bitwise_and(FRAME_VALUE_MASK, self.frames)
if y_curr:
res = res.astype(np.float)
res *= BLEDP_CURRENT_MULT
if not self.channel:
res = res.reshape(res.size >> 3, BLEDP_CHANNELS).transpose()
return res
def get_datapack(self, x_time=False, y_curr=False):
y = self.get_values(y_curr=y_curr)
if y.ndim == 2:
# Multi channel
a = np.arange(0, y[0].size, 1.0)
if x_time:
a *= BLEDP_SAMPLE_RATE
x = np.repeat(a, BLEDP_CHANNELS)
x = x.reshape(y[0].size, BLEDP_CHANNELS).transpose()
return np.array([(xi, yi) for xi, yi in zip(x, y)])
# Single channel
x = np.arange(y.size)
if x_time:
x *= BLEDP_SAMPLE_RATE
return np.array([(x, y)])
def get_channels(self, sl=slice(None, None)):
res = np.right_shift(self.frames[sl], FRAME_CH_SHIFT)
res = np.bitwise_and(FRAME_CH_MASK, res)
return res
def average(arr, n):
end = n * int(len(arr)/n)
return np.mean(arr[:end].reshape(-1, n), 1)
In [5]:
def prepare_data(datapack, channels=[0, 2, 3, 5, 6], resample='1ms'):
resample_dict = {'no': 0, '10us': 5, '100us': 50, '1ms': 500}
resample_sel = resample_dict[resample]
x, y = [], []
if resample_sel:
for ch in channels:
y.append(average(dp[ch][1], resample_sel))
x.append(dp[ch][0][::resample_sel][:y[-1].size])
else:
for ch in channels:
y.append(dp[ch][1])
x.append(dp[ch][0])
return x, y
In [6]:
retriever = BledpOfflineRetriever()
In [7]:
main_path = '/eos/project/c/cryoblm/2015_CryoIRRAD/data/'
folder = 'bledp/voltage_scans/VS_2015-11-01_16h'
filename = '2015-11-01_17h55m44s'
fpath = join(main_path, folder, filename)
print('File path: %s' % fpath)
In [8]:
off, lim = int(0.3/BLEDP_SAMPLE_RATE), int(0.9/BLEDP_SAMPLE_RATE)
retriever.load_data(fpath, offset=off, limit=lim)
dp = retriever.get_datapack(x_time=True, y_curr=True)
In [9]:
selected_channels = [0, 2, 3, 5, 6]
# selected_channels = [0, 5]
x, y = prepare_data(dp, channels=selected_channels, resample='1ms')
In [10]:
fig = plt.figure('BLEDP Plotter', figsize=(8, 6))
ax = fig.add_subplot(111)
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1.0, 8.0))
for i, c in zip(range(len(x)), selected_channels):
ax.plot(x[i], y[i], c=colors[c], marker=next(markers), ls='-', label=LBL_DET[c])
ax.grid(True, ls='--')
ax.set_xlabel('Time, (s)')
ax.set_ylabel('Current, (uA)')
ax.legend(loc=1, numpoints=1, framealpha=.7, fancybox=True)
plt.show()
In [13]:
# Specify input path & file name
main_path = '/eos/project/c/cryoblm/2015_CryoIRRAD/data/'
folder = 'bledp/voltage_scans/VS_2015-11-01_16h'
filename = '2015-11-01_17h55m44s'
folder = 'bledp/2015-10-29/2015-10-29_03h'
filename = "2015-10-29_03h00m17s"
# Specify ouput path & file name
fn_in = join(main_path, folder, filename)
out_dir = join(main_path, 'bledp_extracted')
In [14]:
# Specify channels and resampling
selected_channels = [0, 5]
resample = '1ms' # can be also 10us, 100us
retriever = BledpOfflineRetriever()
off, lim = int(0.3/BLEDP_SAMPLE_RATE), int(0.9/BLEDP_SAMPLE_RATE)
retriever.load_data(fn_in, offset=off, limit=lim)
dp = retriever.get_datapack(x_time=True, y_curr=True)
x, y = prepare_data(dp, channels=selected_channels, resample='1ms')
arry = np.array([np.array(yi) for yi in y])
arr = np.vstack((x[0], arry)).transpose()
fn_out = join(out_dir, filename+'.csv')
header = '{}\n{}\n{}'
hdr = header.format(filename, 'Time, '+','.join([LBL_DET[i] for i in selected_channels]), '(s), '+','.join(['uA, ']*len(selected_channels)))
np.savetxt(fn_out, arr, fmt='%.5f', delimiter=',', header=hdr)
In [24]:
# Specify input path
main_path = '/eos/project/c/cryoblm/2015_CryoIRRAD/data/'
folder = 'bledp/voltage_scans/VS_2015-11-01_16h'
path = join(main_path, folder)
folder = 'bledp/2015-11-16/2015-11-16_08h'
path = join(main_path, folder)
# Specify output path
# CAUTION : it is ***much*** faster if you run it locally.
# Write out the files, and then let CERNBox upload them to the server.
out_dir = join(main_path, 'bledp_extracted')
print(path)
print(out_dir)
In [26]:
# %%script false # Comment this line out if you want to run the extraction of directories
# Specify channels and resampling.
# Also a file number limit if you want to process e.g. only 10 files
selected_channels = [0, 5]
resample = '1ms' # can be also 10us, 100us
flim = 10
retriever = BledpOfflineRetriever()
off, lim = int(0.3/BLEDP_SAMPLE_RATE), int(0.9/BLEDP_SAMPLE_RATE)
header = '{}\n{}\n{}'
cnt = 0
tick = time()
for root, dirs, files in walk(path):
for filename in files:
# Check also filename to be as date etc
if filename.startswith('.'):
continue
fn_in = join(root, filename)
retriever.load_data(fn_in, offset=off, limit=lim)
dp = retriever.get_datapack(x_time=True, y_curr=True)
x, y = prepare_data(dp, channels=selected_channels, resample='1ms')
arry = np.array([np.array(yi) for yi in y])
arr = np.vstack((x[0], arry)).transpose()
fn_out = join(out_dir, filename+'.csv')
hdr = header.format(filename, 'Time, '+','.join([LBL_DET[i] for i in selected_channels]), '(s), '+','.join(['uA, ']*len(selected_channels)))
np.savetxt(fn_out, arr, fmt='%.5f', delimiter=',', header=hdr)
cnt += 1
if cnt >= flim:
break
print('Time Elapsed : %.3f sec' % (time()-tick))
print('Files extracted : %d' % cnt)
In [66]:
# Specify input path
main_path = '/eos/project/c/cryoblm/2015_CryoIRRAD/data/'
folder = 'bledp/voltage_scans/VS_2015-11-01_16h'
path = join(main_path, folder)
# Specify output path and filename
# CAUTION : it is ***much*** faster if you run it locally.
# Write out the files, and then let CERNBox upload them to the server.
out_dir = join(main_path, 'bledp_extracted')
fn_out = join(out_dir, '%s_out_sum.csv' % datetime.now().strftime('%Y%m%d_%H%M%S'))
print(fn_out)
In [67]:
# Set up a function that returns the results that you want from each file.
# TODO similarly !!!
def sum_analysis(y_values):
res = []
for y in y_values:
res.append(np.sum(y))
return res
In [68]:
with open(fn_out, 'w') as fp:
header = "Analysis on {}\n{}\n{}\n"
hdr = header.format(path, 'Timestamp, '+', '.join([LBL_DET[i] for i in selected_channels]), '(s), '+', '.join(['sum']*len(selected_channels)))
fp.write(hdr)
out_line = "{}, {}\n"
cnt = 0
tick = time()
for root, dirs, files in walk(path):
for filename in files:
# Check also filename to be as date etc
if filename.startswith('.'):
continue
cnt += 1
fn_in = join(root, filename)
retriever.load_data(fn_in, offset=off, limit=lim)
dp = retriever.get_datapack(x_time=True, y_curr=True)
x, y = prepare_data(dp, channels=selected_channels, resample='1ms')
res = sum_analysis(y)
res_str = map(str, res)
# ts = datetime.strptime(filename, '%Y-%m-%d_%Hh%Mm%Ss').strftime('%Y-%m-%d %H:%M:%S')
fp.write(out_line.format(filename, ", ".join(res_str)))
print('Time elapsed : %.3f sec' % (time()-tick))
print('Files processed: %d' % cnt)
In [44]:
# Specify file - If you want you can leave that to take the file that was created earlier.
fn = fn_out
# fn = join(out_dir, '20180328_110158_out_sum.csv') # or choose specifically
dt_fmt = "%Y-%m-%d %H:%M:%S"
with open(fn, 'r') as fp:
hdr = [next(fp) for x in range(3)]
data = np.loadtxt(fp, dtype=str, delimiter=',', unpack=True)
ts = [datetime.strptime(item, dt_fmt) for item in data[0]]
y = data[1:].astype(np.float)
lbls = [item.strip() for item in hdr[1][:-1].split(',')[1:]]
fig = plt.figure("Sum results", figsize=(8, 6))
ax = fig.add_subplot(111)
for i, yi in enumerate(y):
ax.plot_date(ts, yi, ls='None', marker=next(markers), label=lbls[i])
myFmt = mdates.DateFormatter('%H:%M:%S')
ax.xaxis.set_major_formatter(myFmt)
ax.set_xlabel('Time')
ax.set_ylabel('Sum')
ax.grid(True, ls='--')
ax.legend(loc=1, numpoints=1, framealpha=.7, fancybox=True)
fig.autofmt_xdate()
plt.show()
In [13]:
fin = '/eos/project/c/cryoblm/2015_CryoIRRAD/analysis/CI15_sync_bledp_vs_beam.csv'
fout = '/eos/project/c/cryoblm/2015_CryoIRRAD/analysis/CI15_sync_bledp_vs_beam_mod.csv'
print('FIN: %s' % fin)
tick = time()
with open(fin, 'r') as fp:
hdr = next(fp)
data = np.loadtxt(fp, dtype=str, delimiter=',', unpack=True)
new_ts = [datetime.strptime(item, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d_%Hh%Mm%Ss') for item in data[0]]
new_data = np.copy(data[1:])
new_data = np.vstack([new_ts, new_data])
np.savetxt(fout, new_data.transpose(), fmt='%s', delimiter=',', header=hdr)
print('FOUT: %s' % fout)
print('Time Elapsed: %.3f sec' % (time()-tick))
In [ ]: