In [1]:
from __future__ import print_function, division
from datetime import datetime, timedelta
from pytimber import LoggingDB
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import rcParams
%matplotlib notebook
rcParams['axes.formatter.useoffset'] = False
# # Python 2
# from sys import version_info
# if version_info.major == 2:
# %matplotlib nbagg
from itertools import cycle
ldb = LoggingDB()
# A dictionary to store mean results
bsrt = {}
In [2]:
# BSRT LogDB Variables
v_sigma = ['LHC.BSRT.5L4.B2:FIT_SIGMA_V', 'LHC.BSRT.5L4.B2:FIT_SIGMA_H']
v_lsf = ['LHC.BSRT.5L4.B2:LSF_V', 'LHC.BSRT.5L4.B2:LSF_H']
v_pos = ['LHC.BSRT.5L4.B2:FIT_POSITION_V', 'LHC.BSRT.5L4.B2:FIT_POSITION_H']
v_beta = ['LHC.BSRT.5L4.B2:BETA_V', 'LHC.BSRT.5L4.B2:BETA_H']
v_eng = ['HX:ENG']
v_gd = ['LHC.BSRT.5L4.B2:GATE_DELAY']
# ldb.search('LHC.BSRT.5L4.B2%')
In [3]:
fillnum = 6371
fill = ldb.getLHCFillData(fillnum, unixtime=False)
tf1, tf2 = fill['startTime'], fill['endTime']
print('Fill %s\n\tStart: %s\tEnd :%s' % (fill["fillNumber"], tf1, tf2))
for item in fill['beamModes']:
print("Mode: %s\n\tStart: %s\tEnd :%s" % (item["mode"], item['startTime'], item['endTime']))
Insert info here
In [4]:
# Setting based on fill data
mode = "STABLE"
for item in fill['beamModes']:
if item["mode"] == mode:
# t1, t2 = item['startTime']-timedelta(minutes=20), item['endTime']+timedelta(minutes=20)
# t1, t2 = item['startTime']+timedelta(minutes=10), item['endTime']
# t1, t2 = item['startTime']+timedelta(hours=10), item['endTime']-timedelta(hours=10)
t1, t2 = item['startTime'], item['endTime']
# OR.... Setting manually
# R2707
# 20171102-162618 -> 20171102-163509 // this is still injection but only for B1
# t1, t2 = datetime(2017, 11, 2, 16, 26, 18), datetime(2017, 11, 2, 16, 35, 9)
# R2711
# 20171102-172324 -> 20171102-173222
# t1, t2 = datetime(2017, 11, 2, 17, 23, 24), datetime(2017, 11, 2, 17, 32, 22)
# R2717
# 20171102-190743 -> 20171102-191614
# t1, t2 = datetime(2017, 11, 2, 19, 7, 43), datetime(2017, 11, 2, 19, 16, 14)
# Whole Fill
# t1, t2 = tf1, tf2
# R2723
# 20171106-181313 -> 20171106-181918
# t1, t2 = datetime(2017, 11, 6, 18, 13, 13), datetime(2017, 11, 6, 18, 19, 18)
# R2737 - Fill 6371
# XXX -> XXXX
t1, t2 = datetime(2017, 11, 9, 20, 0, 0), datetime(2017, 11, 9, 20, 20, 0)
# 2.5 TeV fills
# R2740
# 20171113-122424 -> 20171113-125856
# t1, t2 = datetime(2017, 11, 13, 12, 24, 24), datetime(2017, 11, 13, 12, 58, 56)
# R2741
# 20171114-093357 -> 20171114-100904
# t1, t2 = datetime(2017, 11, 14, 9, 33, 57), datetime(2017, 11, 14, 10, 9, 4)
# R2742
# 20171114-122633 -> 20171114-122959
# t1, t2 = datetime(2017, 11, 14, 12, 26, 33), datetime(2017, 11, 14, 12, 29, 59)
# R2744
# 20171114-132522 -> 20171114-140129
# t1, t2 = datetime(2017, 11, 14, 13, 25, 22), datetime(2017, 11, 14, 14, 1, 29)
# R2798
# 20171128-041028 -> 20171128-044040
# t1, t2 = datetime(2017, 11, 28, 4, 10, 28), datetime(2017, 11, 28, 4, 40, 40)
fmt = "%Y-%m-%d %H:%M:%S"
print('Getting Data for: %s until %s' % (t1.strftime(fmt), t2.strftime(fmt)))
In [5]:
d_eng = ldb.getScaled(v_eng, t1, t2, unixtime=False, scaleAlgorithm='REPEAT', scaleSize='1', scaleInterval='MINUTE')
coeff = 8.33333333
minFmt = mdates.DateFormatter('%Y-%m-%d %H:%M')
f = plt.figure('Beam Energy')
ax = f.add_subplot(111)
for k, d in d_eng.items():
print("%s\t: x: %s\ty: %s" % (k, str(d[0].size), str(d[1].shape)))
x = d[0]
y = np.round(d[1]/coeff, 0)
ax.plot(x, y, lw=3, marker='x', c='k', ms=5, ls='None', label=k)
ls = len(set(y))
if ls > 1:
print('*** Energy has %d different values; take care' % ls)
ax.set_ylabel('Energy [GeV]', fontsize=12)
ax.legend(loc=2, numpoints=1, framealpha=.7)
ax.grid(which='both', lw=2)
ax.set_title('Beam Energy')
ax.xaxis.set_major_formatter(minFmt)
f.autofmt_xdate()
plt.show()
bsrt['eng'] = y.mean()
print('\nBeam Energy: %d GeV' % (bsrt['eng']))
In [6]:
d_beta = ldb.getScaled(v_beta, t1, t2, unixtime=False, scaleAlgorithm='REPEAT', scaleSize='1', scaleInterval='MINUTE')
minFmt = mdates.DateFormatter('%Y-%m-%d %H:%M')
f = plt.figure('BSRT Beta')
ax = f.add_subplot(111)
for k, d in d_beta.items():
print("%s\t: x: %s\ty: %s" % (k, str(d[0].size), str(d[1].shape)))
x = d[0]
y = d[1]
ls = len(set(y))
if ls > 1:
print('*** Beta has %d different values; take care' % ls)
bsrt["b_%s" % k[-1].lower()] = y.mean()
ax.plot(x, y, lw=3, label=k)
ax.set_ylabel('Value [m]', fontsize=12)
ax.legend(loc=3, numpoints=1, framealpha=.7)
ax.grid(which='both', lw=2)
ax.set_ylim((100, 400))
ax.set_title('[BSRT] Beta')
ax.xaxis.set_major_formatter(minFmt)
f.autofmt_xdate()
plt.show()
print('\nBSRT Beta:\n\tBSRT_BETA_H = %.3f\n\tBSRT_BETA_V = %.3f' % (bsrt['b_h'], bsrt['b_v']))
In [7]:
d_lsf = ldb.getScaled(v_lsf, t1, t2, unixtime=False, scaleAlgorithm='REPEAT', scaleSize='1', scaleInterval='MINUTE')
f = plt.figure('BSRT LSF')
ax = f.add_subplot(111)
x_lsf = []
for k, d in d_lsf.items():
print("%s\t: x: %s\ty: %s" % (k, str(d[0].size), str(d[1].shape)))
x = d[0]
y = d[1]
ls = len(set(y))
if ls > 1:
print('*** LSF has %d different values; take care' % ls)
ax.plot(x, y, lw=3, label=k)
bsrt["c_%s" % k[-1].lower()] = y.mean()
ax.set_ylabel('Value [m]', fontsize=12)
ax.legend(loc=3, numpoints=1, framealpha=.7)
ax.grid(which='both', lw=2)
ax.set_title('[BSRT] LSF')
ax.xaxis.set_major_formatter(minFmt)
f.autofmt_xdate()
x_lsf = x
plt.show()
print('\nBSRT LSF:\n\tBSRT_LSF_H = %.3f\n\tBSRT_LSF_V = %.3f' % (bsrt['c_h'], bsrt['c_v']))
In [8]:
window = 60
pmass = 0.938
gamma = np.round(1+bsrt['eng']/pmass, 5)
def fill_array_with_nan(data):
"""Mask array because BSRT data can have different lengths per row."""
lens = np.array([len(i) for i in data])
# Mask of valid places in each row
mask = np.arange(lens.max()) < lens[:,None]
# Setup output array and put elements from data into masked positions
ret = np.empty(mask.shape, dtype=np.float)
ret[:] = np.nan
ret[mask] = np.concatenate(data)
return set(lens), ret
def mavg(a, n=60):
"""A simple moving average."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
# Get the BSRT measurements
d_sigma = ldb.get(v_sigma, t1, t2, unixtime=False)
minFmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ma = cycle(('+','1', 'x', '2', '3', '4', '|', '_'))
f = plt.figure('BSRT Sigma', figsize=(8, 12))
ax1 = f.add_subplot(311)
ax2 = f.add_subplot(312)
ax3 = f.add_subplot(313)
for k, d in d_sigma.items():
print("%s\t: x: %s\ty: %s" % (k, str(d[0].size), str(d[1].shape)))
x = d[0]
y = d[1]
s = k[-1].lower()
if len(y.shape) == 1:
lens, y = fill_array_with_nan(d[1])
print("\tLens: %s\ty: %s" % (lens, str(y.shape)))
y = np.nanmean(y, axis=1)
indx = np.where(y > bsrt["c_%s" % s])
if indx[0].size < y.size:
print('Attention: Some values [%d] are not valid: %s' % (y.size-indx[0].size, y[y <= bsrt["c_%s" % s]]))
y = y[indx]
x = x[indx]
if y.size == 0:
print('Problem: No values left for %s !' % k)
ax1.plot(x, y, ls='None', marker=next(ma), label=k)
raise KeyboardInterrupt
y_corr = np.sqrt(y**2 - bsrt["c_%s" % s]**2)
y_emt = gamma*(y_corr**2/bsrt['b_%s' % s])
# mu = y.mean()
# print('%s:%s / %s:%s' % (type(x), x[:3], type(y), y[:3]))
ax1.plot(x, y, ls='None', marker=next(ma), label=k)
ax2.plot(x, y_corr, ls='None', marker=next(ma), label=k)
ax3.plot(x, y_emt, ls='None', marker=next(ma), label=k)
bsrt["s_%s" % s] = y.mean()
bsrt["sc_%s" % s] = y_corr.mean()
bsrt["ec_%s" % s] = y_emt.mean()
ylbls = ['Sigma [mm]', 'Sigma [mm]', 'Emittance']
titles = ["BSRT Sigma", "BSRT Sigma Corrected", "Emittance Corrected"]
for ax, ylbl, title in zip([ax1, ax2, ax3], ylbls, titles):
ax.set_ylabel(ylbl, fontsize=12)
ax.legend(loc=2, numpoints=1, framealpha=.7)
ax.grid(which='both', lw=2)
ax.set_title(title)
ax.xaxis.set_major_formatter(minFmt)
f.autofmt_xdate()
plt.show()
try:
print('\nBSRT RESULTS:\n')
print('\tBSRT_LSF_H = %.3f\tBSRT_LSF_V = %.3f' % (bsrt['c_h'], bsrt['c_v']))
print('\tBSRT_BETA_H = %.3f\tBSRT_BETA_V = %.3f' % (bsrt['b_h'], bsrt['b_v']))
print('\tBEAM_ENERGY = %d GeV' % bsrt['eng'])
print('\tNORM_COEFF = %f' % gamma)
print()
print('\tBSRT_SIGMA_H = %.3f\tBSRT_SIGMA_V = %.3f' % (bsrt['s_h'], bsrt['s_v']))
print('\tBSRT_SIGMA_CORR_H = %.3f\tBSRT_SIGMA_CORR_V = %.3f' % (bsrt['sc_h'], bsrt['sc_v']))
print('\tBSRT_EMT_CORR_H = %.3f\tBSRT_EMT_CORR_V = %.3f' % (bsrt['ec_h'], bsrt['ec_v']))
except KeyError:
print('Problems')
In [13]:
def gaus(x, a, mu, sigma):
return a*np.exp(-(x - mu)**2 / ( 2*sigma**2 ))
# Do not retrieve again if we have data from previous previous
if not d_sigma:
d_sigma = ldb.get(v_sigma, t1, t2, unixtime=False)
d_bids = ldb.get(v_gd, t1, t2, unixtime=False)
minFmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ma = cycle(('+','1', 'x', '2', '3', '4', '|', '_'))
x_b = np.arange(3564)
yb = d_bids[v_gd[0]][1]
f = plt.figure('BSRT Measurements Per BID', figsize=(8, 14))
ax1 = f.add_subplot(611)
ax2 = f.add_subplot(612)
ax3 = f.add_subplot(613)
ax4 = f.add_subplot(614)
ax5 = f.add_subplot(615)
ax6 = f.add_subplot(616)
problem = False
for k, d in d_sigma.items():
print("%s\t: x: %s\ty: %s" % (k, str(d[0].size), str(d[1].shape)))
x = d[0]
y = d[1]
s = k[-1].lower()
# Check consistency of data (v_sigma / v_gd)- should be same size and correct BIDs
bids = {k: [] for k in range(3564)}
for i in range(len(yb)):
assert len(y[i]) == len(yb[i])
for bid in yb[i]:
assert bid in bids
print('Datacheck OK [%s]' % s)
# Create bid dataset, with the measurements for each BID
for i in range(len(y)):
for j, bid in enumerate(yb[i]):
bids[bid].append(y[i][j])
# Now do mean/std for each BID and create corresponding arrays
spb = np.repeat(np.nan, len(list(bids.keys()))) # sigma per bunch
spb_s = np.repeat(np.nan, len(list(bids.keys()))) # std per bunch
spb_nm = np.repeat(np.nan, len(list(bids.keys()))) # number of measurements per bunch
spb_corr = np.repeat(np.nan, len(list(bids.keys()))) # number of measurements per bunch
for k, d in bids.items():
if d:
spb[k] = np.mean(d)
spb_s[k] = np.std(d)
spb_nm[k] = len(d)
indx = np.where(~np.isnan(spb))[0]
print("Number of bunches: %s" % (indx.size))
problem_indx = np.where(spb[indx] <= bsrt["c_%s" % s])[0]
if problem_indx.size > 0:
print('Attention: Bunches with problems: %s' % problem_indx)
print('Setting them to nan')
print('Problematic bunches: %s' % (problem_indx))
spb[problem_indx] = np.nan
# Now calculate also the corrected sigma and emittance
spb_corr = np.sqrt(spb**2 - bsrt["c_%s" % s]**2)
epb = gamma*(spb_corr**2/bsrt['b_%s' % s])
# Save mean values to the dictionary - should be the same as before
bsrt["s_%s" % s] = np.nanmean(spb)
bsrt["sc_%s" % s] = np.nanmean(spb_corr)
bsrt["ec_%s" % s] = np.nanmean(epb)
# Plot
ax1.errorbar(x_b, spb, yerr=spb_s, ls='None', marker=next(ma), label='Mean Sigma Per Bunch - ' + s)
ax2.plot(x_b, spb_nm, ls='-', marker=next(ma), label='Number of measurements - ' + s)
ax3.plot(x_b, spb_corr, ls='None', marker=next(ma), label='Mean Sigma Per Bunch Corrected - ' + s)
ax4.plot(x_b, epb, ls='None', marker=next(ma), label='Emittance Per Bunch - ' + s)
ax5.plot(x_b, spb_s, ls='None', marker=next(ma), label='RMS Per Bunch - ' + s)
ax6.hist(spb_s[~np.isnan(spb_s)], bins=50, alpha=.5)
vals, bins = np.histogram(spb_s[~np.isnan(spb_s)], bins=50)
cbins = (bins[:-1] + bins[1:]) / 2
fit = True
try:
popt, pcov = curve_fit(gaus, cbins, vals)
except RuntimeError:
print('RuntimeError : No fit.')
fit = False
if fit:
ax6.plot(cbins, gaus(cbins, *popt), ls='--', lw=3, c='r')
print("Fit results: %s" % popt)
mi, ma = np.nanmin(spb_nm), np.nanmax(spb_nm)
ax1.set_xlim(-100, 3664)
ax1.legend(loc=1, numpoints=1)
ax1.set_xlabel('BID')
ax1.set_ylabel('Sigma, [mm]')
ax2.legend(loc=1, numpoints=1)
ax2.set_xlim(-100, 3664)
ax2.set_ylim(mi-ma*.1, ma+ma*.1)
ax2.set_xlabel('BID')
ax2.set_ylabel('# of measurements')
ax3.legend(loc=1, numpoints=1)
ax3.set_xlim(-100, 3664)
ax3.set_xlabel('BID')
ax3.set_ylabel('Sigma, [mm]')
ax4.legend(loc=1, numpoints=1)
ax4.set_xlim(-100, 3664)
ax4.set_xlabel('BID')
ax4.set_ylabel('Emittance')
ax5.legend(loc=1, numpoints=1)
ax5.set_xlim(-100, 3664)
ax5.set_xlabel('BID')
ax5.set_ylabel('RMS')
plt.show()
print('\nBSRT RESULTS:\n')
print('\tBSRT_LSF_H = %.3f\tBSRT_LSF_V = %.3f' % (bsrt['c_h'], bsrt['c_v']))
print('\tBSRT_BETA_H = %.3f\tBSRT_BETA_V = %.3f' % (bsrt['b_h'], bsrt['b_v']))
print('\tBEAM_ENERGY = %d GeV' % bsrt['eng'])
print('\tNORM_COEFF = %f' % gamma)
print()
print('\tBSRT_SIGMA_H = %.3f\tBSRT_SIGMA_V = %.3f' % (bsrt['s_h'], bsrt['s_v']))
print('\tBSRT_SIGMA_CORR_H = %.3f\tBSRT_SIGMA_CORR_V = %.3f' % (bsrt['sc_h'], bsrt['sc_v']))
print('\tBSRT_EMT_CORR_H = %.3f\tBSRT_EMT_CORR_V = %.3f' % (bsrt['ec_h'], bsrt['ec_v']))
In [ ]:
# From LHC Optics Web
# bgv = {'b_h': 124.6, 'b_v': 153.9} # 6.5TeV - C7L4
# bgv = {'b_h': 132.0, 'b_v': 153.4} # 6.5TeV - B7L4
# bgv = {'b_h': 124.7, 'b_v': 153.9} # 450GeV - C7L4
# bgv = {'b_h': 132.1, 'b_v': 153.3} # 450GeV - B7L4
# bgv = {'b_h': 225.76, 'b_v': 158.93} # \pm .43, \pm .13 # NOPE - VdM ?
bgv = {'b_h': 125, 'b_v': 154}
bgv['sc_h'] = bsrt['sc_h']*np.sqrt(bgv['b_h'] / bsrt['b_h'])
bgv['sc_v'] = bsrt['sc_v']*np.sqrt(bgv['b_v'] / bsrt['b_v'])
bgv['ec_h'] = gamma*bgv['sc_h']**2/bgv['b_h']
bgv['ec_v'] = gamma*bgv['sc_v']**2/bgv['b_v']
print('\n*** Extrapolating @BGV - Corrected (LSF)')
print('\nBGV RESULTS:\n')
print('\tBGV_SIGMA_CORR_H = %.5f\t BGV_SIGMA_CORR_V = %.5f' % (bgv['sc_h'], bgv['sc_v']))
print('\tBGV_SIGMA_MEAN = %.5f' % (np.mean([[bgv['sc_h'], bgv['sc_v']]])))
print('\tBGV_SIGMA_GEO_MEAN = %.5f' % (np.sqrt([bgv['sc_h']*bgv['sc_v']])))
print('\tBGV_EMT_CORR_H = %.5f\t BGV_EMT_CORR_V = %.5f' % (bgv['ec_h'], bgv['ec_v']))
In [ ]: