ELETTRA-27: IDs interaction
[1]:
# In this example interaction of two EPU IDs is explored
# First, the corrector settings (deltas) are obtained for each ID separatly
# Next, both IDs are turned on and combined correction is applied
# Additional global tune correction can be performed after adding the IDs to better fix the tunes
# Extra care should be exercised with vertical dispersion (here it is done by additional weighting)
[2]:
# Import
import torch
from torch import Tensor
from pathlib import Path
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
matplotlib.rcParams['text.usetex'] = True
from model.library.element import Element
from model.library.line import Line
from model.library.quadrupole import Quadrupole
from model.library.matrix import Matrix
from model.command.external import load_lattice
from model.command.build import build
from model.command.tune import tune
from model.command.orbit import dispersion
from model.command.twiss import twiss
from model.command.advance import advance
from model.command.coupling import coupling
from model.command.wrapper import Wrapper
from model.command.wrapper import forward
from model.command.wrapper import inverse
from model.command.wrapper import normalize
[3]:
# Set data type and device
Element.dtype = dtype = torch.float64
Element.device = device = torch.device('cpu')
[4]:
# EPU-A model free compensation
[5]:
# Load lattice (ELEGANT table)
# Note, lattice is allowed to have repeated elements
path = Path('elettra.lte')
data = load_lattice(path)
[6]:
# Build and setup lattice
ring:Line = build('RING', 'ELEGANT', data)
# Flatten sublines
ring.flatten()
# Remove all marker elements but the ones starting with MLL (long straight section centers)
ring.remove_group(pattern=r'^(?!MLL_).*', kinds=['Marker'])
# Replace all sextupoles with quadrupoles
def factory(element:Element) -> None:
table = element.serialize
table.pop('ms', None)
return Quadrupole(**table)
ring.replace_group(pattern=r'', factory=factory, kinds=['Sextupole'])
# Set linear dipoles
def apply(element:Element) -> None:
element.linear = True
ring.apply(apply, kinds=['Dipole'])
# Merge drifts
ring.merge()
# Change lattice start
ring.start = "BPM_S01_01"
# Split BPMs
ring.split((None, ['BPM'], None, None))
# Roll lattice
ring.roll(1)
# Splice lattice
ring.splice()
# Describe
ring.describe
[6]:
{'BPM': 168, 'Drift': 708, 'Dipole': 156, 'Quadrupole': 360, 'Marker': 12}
[7]:
# Compute tunes (fractional part)
nux, nuy = tune(ring, [], matched=True, limit=1)
[8]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx, etapx, etaqy, etapy = dispersion(ring, orbit, [], limit=1)
[9]:
# Compute twiss parameters
ax, bx, ay, by = twiss(ring, [], matched=True, advance=True, full=False).T
[10]:
# Compute phase advances
mux, muy = advance(ring, [], alignment=False, matched=True).T
[11]:
# Compute coupling
c = coupling(ring, [])
[12]:
# Quadrupole names for global tune correction
QF = [f'QF_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
QD = [f'QD_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
[13]:
# Global tune responce matrix
def global_observable(knobs):
kf, kd = knobs
kn = torch.stack(len(QF)*[kf] + len(QD)*[kd])
return tune(ring, [kn], ('kn', None, QF + QD, None), matched=True, limit=1)
knobs = torch.tensor([0.0, 0.0], dtype=dtype)
global_target = global_observable(knobs)
global_matrix = torch.func.jacfwd(global_observable)(knobs)
print(global_target)
print(global_matrix)
tensor([0.2994, 0.1608], dtype=torch.float64)
tensor([[ 5.8543, 2.0964],
[-2.9918, -1.2602]], dtype=torch.float64)
[14]:
# Several local knobs can be used to correct ID effects
# Normal quadrupole correctors
nkn = ['OCT_S01_02', 'QF_S01_02', 'QD_S01_02', 'QD_S01_03', 'QF_S01_03', 'OCT_S01_03']
# Skew quadrupole correctors
nks = ['SD_S01_05', 'SH_S01_02', 'SH_S01_03', 'SD_S01_06']
[15]:
# Define knobs to magnets mixing matrices
Sn = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], dtype=dtype)
print(Sn)
print()
Ss = torch.tensor([[+1.0, 0.0], [0.0, +1.0], [0.0, -1.0], [-1.0, 0.0]], dtype=dtype)
print(Ss)
print()
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]], dtype=torch.float64)
tensor([[ 1., 0.],
[ 0., 1.],
[ 0., -1.],
[-1., 0.]], dtype=torch.float64)
[16]:
# Define twiss observable (full coupled twiss)
def observable_twiss(kn, ks):
return twiss(ring, [kn, ks], ('kn', None, nkn, None), ('ks', None, nks, None), matched=True, advance=True, full=False, convert=False)
[17]:
# Define dispersion observable
w_etax = 1.0
w_etay = 8.0
def observable_dispersion(kn, ks):
orbit = torch.tensor(4*[0.0], dtype=dtype)
etax, _, etay, _ = dispersion(ring,
orbit,
[kn, ks],
('kn', None, nkn, None),
('ks', None, nks, None))
return torch.stack([w_etax*etax, w_etay*etay]).T
[18]:
# Construct full target observable vector and corresponding responce matrix
def observable(knobs):
kn, ks = torch.split(knobs, [3, 2])
kn = Sn @ kn
ks = Ss @ ks
betas = observable_twiss(kn, ks)
etas = observable_dispersion(kn, ks)
return torch.cat([betas.flatten(), etas.flatten()])
knobs = torch.tensor((3 + 2)*[0.0], dtype=dtype)
print((target := observable(knobs)).shape)
print((matrix := torch.func.jacfwd(observable)(knobs)).shape)
torch.Size([5712])
torch.Size([5712, 5])
[19]:
# Define ID model
# Note, only the flattened triangular part of the A and B matrices is passed
A = torch.tensor([[-0.03484222052711237, 1.0272120741819959E-7, -4.698931299341201E-9, 0.0015923185492594811],
[1.0272120579834892E-7, -0.046082787920135176, 0.0017792061173117564, 3.3551298301095784E-8],
[-4.6989312853101E-9, 0.0017792061173117072, 0.056853750760983084, -1.5929605363332683E-7],
[0.0015923185492594336, 3.3551298348653296E-8, -1.5929605261642905E-7, 0.08311631737263032]], dtype=dtype)
B = torch.tensor([[0.03649353186115209, 0.0015448347221877217, 0.00002719892025520868, -0.0033681183134964482],
[0.0015448347221877217, 0.13683886657005795, -0.0033198692682377406, 0.00006140578258682469],
[0.00002719892025520868, -0.0033198692682377406, -0.05260095308967722, 0.005019907688182885],
[-0.0033681183134964482, 0.00006140578258682469, 0.005019907688182885, -0.2531573249456863]], dtype=dtype)
ID = Matrix('ID',
length=0.0,
A=A[torch.triu(torch.ones_like(A, dtype=torch.bool))].tolist(),
B=B[torch.triu(torch.ones_like(B, dtype=torch.bool))].tolist())
[20]:
# Insert ID into the existing lattice
# This will replace the target marker
error = ring.clone()
error.flatten()
error.insert(ID, 'MLL_S01', position=0.0)
error.splice()
# Describe
error.describe
[20]:
{'BPM': 168,
'Drift': 708,
'Dipole': 156,
'Quadrupole': 360,
'Matrix': 1,
'Marker': 11}
[21]:
# Compute tunes (fractional part)
nux_id, nuy_id = tune(error, [], matched=True, limit=1)
[22]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_id, etapx_id, etaqy_id, etapy_id = dispersion(error, orbit, [], limit=1)
[23]:
# Compute twiss parameters
ax_id, bx_id, ay_id, by_id = twiss(error, [], matched=True, advance=True, full=False).T
[24]:
# Compute phase advances
mux_id, muy_id = advance(error, [], alignment=False, matched=True).T
[25]:
# Compute coupling
c_id = coupling(error, [])
[26]:
# Tune shifts
print((nux - nux_id))
print((nuy - nuy_id))
tensor(0.0260, dtype=torch.float64)
tensor(-0.0114, dtype=torch.float64)
[27]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
tensor(0., dtype=torch.float64)
tensor(0.0004, dtype=torch.float64)
[28]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[29]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
tensor(11.5994, dtype=torch.float64)
tensor(1.7916, dtype=torch.float64)
[30]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
tensor(8.7941, dtype=torch.float64)
tensor(1.7778, dtype=torch.float64)
[31]:
# Define parametric observable vector (emulate tune measurement)
w_etax = 1.0
w_etay = 8.0
def global_observable(knobs):
kf, kd = knobs
kn = torch.stack(len(QF)*[kf] + len(QD)*[kd])
return tune(error, [kn], ('kn', None, QF + QD, None), matched=True, limit=1)
def observable_twiss(kn, ks):
return twiss(error, [kn, ks], ('kn', None, nkn, None), ('ks', None, nks, None), matched=True, advance=True, full=False, convert=False)
def observable_dispersion(kn, ks):
orbit = torch.tensor(4*[0.0], dtype=dtype)
etax, _, etay, _ = dispersion(error,
orbit,
[kn, ks],
('kn', None, nkn, None),
('ks', None, nks, None))
return torch.stack([w_etax*etax, w_etay*etay]).T
def observable(knobs):
kn, ks = torch.split(knobs, [3, 2])
kn = Sn @ kn
ks = Ss @ ks
betas = observable_twiss(kn, ks)
etas = observable_dispersion(kn, ks)
return torch.cat([betas.flatten(), etas.flatten()])
[32]:
# Check the residual vector norm
global_knobs = torch.tensor(2*[0.0], dtype=dtype)
knobs = torch.tensor((3 + 2)*[0.0], dtype=dtype)
print(((global_observable(global_knobs) - global_target)**2).sum())
print(((observable(knobs) - target)**2).sum())
tensor(0.0008, dtype=torch.float64)
tensor(212.9162, dtype=torch.float64)
[33]:
# Optimization loop (local)
# Responce matrix (jacobian)
M = matrix.clone()
# Weighting covariance (sensitivity) matrix
epsilon = 1.0E-9
C = M @ M.T
C = C + epsilon*torch.eye(len(C), dtype=dtype)
# Cholesky decomposition
L = torch.linalg.cholesky(C)
# Whiten response
M = torch.linalg.solve_triangular(L, M, upper=False)
# Additional weights
# Can be used to extra weight selected observables, e.g. tunes
weights = torch.ones(len(M), dtype=dtype)
weights = weights.sqrt()
# Whiten response with additional weights
M = M*weights.unsqueeze(1)
# Iterative correction
lr = 0.75
# Initial value
knobs = torch.tensor((3 + 2)*[0.0], dtype=dtype)
# Correction loop
for _ in range(8):
value = observable(knobs)
residual = target - value
residual = torch.linalg.solve_triangular(L, residual.unsqueeze(-1), upper=False).squeeze(-1)
residual = residual*weights
delta = torch.linalg.lstsq(M, residual, driver="gels").solution
knobs += lr*delta
print(((value - target)**2).sum())
print()
tensor(212.9162, dtype=torch.float64)
tensor(12.4881, dtype=torch.float64)
tensor(1.1070, dtype=torch.float64)
tensor(0.3135, dtype=torch.float64)
tensor(0.2637, dtype=torch.float64)
tensor(0.2607, dtype=torch.float64)
tensor(0.2605, dtype=torch.float64)
tensor(0.2605, dtype=torch.float64)
[34]:
# Apply final corrections
kn, ks = torch.split(knobs, [3, 2])
kn = Sn @ kn
ks = Ss @ ks
error.flatten()
for name, knob in zip(nkn, kn):
error[name].kn = (error[name].kn + knob).item()
for name, knob in zip(nks, ks):
error[name].ks = (error[name].ks + knob).item()
error.splice()
[35]:
# Optimization loop (global)
# Responce matrix (jacobian)
M = global_matrix.clone()
# Weighting covariance (sensitivity) matrix
epsilon = 1.0E-9
C = M @ M.T
C = C + epsilon*torch.eye(len(C), dtype=dtype)
# Cholesky decomposition
L = torch.linalg.cholesky(C)
# Whiten response
M = torch.linalg.solve_triangular(L, M, upper=False)
# Additional weights
# Can be used to extra weight selected observables, e.g. tunes
weights = torch.ones(len(M), dtype=dtype)
weights = weights.sqrt()
# Whiten response with additional weights
M = M*weights.unsqueeze(1)
# Iterative correction
lr = 0.75
# Initial value
global_knobs = torch.tensor(2*[0.0], dtype=dtype)
# Correction loop
for _ in range(4):
value = global_observable(global_knobs)
residual = global_target - value
residual = torch.linalg.solve_triangular(L, residual.unsqueeze(-1), upper=False).squeeze(-1)
residual = residual*weights
delta = torch.linalg.lstsq(M, residual, driver="gels").solution
global_knobs += lr*delta
print(((value - global_target)**2).sum())
print()
tensor(0.0001, dtype=torch.float64)
tensor(9.0533e-06, dtype=torch.float64)
tensor(7.8180e-07, dtype=torch.float64)
tensor(7.1608e-08, dtype=torch.float64)
[36]:
# Apply final corrections
kd, kf = global_knobs
error.flatten()
for name in QF:
error[name].kn = (error[name].kn + kd).item()
for name in QD:
error[name].kn = (error[name].kn + kf).item()
error.splice()
[37]:
# Compute tunes (fractional part)
nux_result, nuy_result = tune(error, [], matched=True, limit=1)
[38]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_result, etapx_result, etaqy_result, etapy_result = dispersion(error, orbit, [], limit=1)
[39]:
# Compute twiss parameters
ax_result, bx_result, ay_result, by_result = twiss(error, [], matched=True, advance=True, full=False).T
[40]:
# Compute phase advances
mux_result, muy_result = advance(error, [], alignment=False, matched=True).T
[41]:
# Compute coupling
c_result = coupling(error, [])
[42]:
# Tune shifts
print((nux - nux_id).abs())
print((nuy - nuy_id).abs())
print()
print((nux - nux_result).abs())
print((nuy - nuy_result).abs())
print()
tensor(0.0260, dtype=torch.float64)
tensor(0.0114, dtype=torch.float64)
tensor(4.7075e-05, dtype=torch.float64)
tensor(6.7220e-05, dtype=torch.float64)
[43]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
print(c_result)
tensor(0., dtype=torch.float64)
tensor(0.0004, dtype=torch.float64)
tensor(7.0523e-06, dtype=torch.float64)
[44]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_result).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_result).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[45]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_result)/bx).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_result)/by).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
print()
print(100*(((bx - bx_result)/bx)**2).mean().sqrt())
print(100*(((by - by_result)/by)**2).mean().sqrt())
print()
tensor(11.5994, dtype=torch.float64)
tensor(1.7916, dtype=torch.float64)
tensor(0.2067, dtype=torch.float64)
tensor(0.3851, dtype=torch.float64)
[46]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_result)/mux).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_result)/muy).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
print()
print(100*(((mux - mux_result)/mux)**2).mean().sqrt())
print(100*(((muy - muy_result)/muy)**2).mean().sqrt())
print()
tensor(8.7941, dtype=torch.float64)
tensor(1.7778, dtype=torch.float64)
tensor(0.3163, dtype=torch.float64)
tensor(0.3459, dtype=torch.float64)
[47]:
# Beta-beating and dispersion
bx_ref_bb = 100.0*(bx - bx_id) / bx
by_ref_bb = 100.0*(by - by_id) / by
bx_res_bb = 100.0*(bx - bx_result)/ bx
by_res_bb = 100.0*(by - by_result)/ by
def rms(x):
return (x**2).mean().sqrt()
rms_x_ref = rms(bx_ref_bb).item()
ptp_x_ref = (bx_ref_bb.max() - bx_ref_bb.min()).item()
rms_y_ref = rms(by_ref_bb).item()
ptp_y_ref = (by_ref_bb.max() - by_ref_bb.min()).item()
rms_x_res = rms(bx_res_bb).item()
ptp_x_res = (bx_res_bb.max() - bx_res_bb.min()).item()
rms_y_res = rms(by_res_bb).item()
ptp_y_res = (by_res_bb.max() - by_res_bb.min()).item()
s = ring.locations().cpu().numpy()
bx_ref_np = bx_ref_bb.cpu().numpy()
by_ref_np = by_ref_bb.cpu().numpy()
bx_res_np = bx_res_bb.cpu().numpy()
by_res_np = by_res_bb.cpu().numpy()
etax_ref = etaqx - etaqx_id
etay_ref = etaqy - etaqy_id
etax_res = etaqx - etaqx_result
etay_res = etaqy - etaqy_result
rms_etax_ref = rms(etax_ref).item()
ptp_etax_ref = (etax_ref.max() - etax_ref.min()).item()
rms_etay_ref = rms(etay_ref).item()
ptp_etay_ref = (etay_ref.max() - etay_ref.min()).item()
rms_etax_res = rms(etax_res).item()
ptp_etax_res = (etax_res.max() - etax_res.min()).item()
rms_etay_res = rms(etay_res).item()
ptp_etay_res = (etay_res.max() - etay_res.min()).item()
etax_ref_np = etax_ref.cpu().numpy()
etay_ref_np = etay_ref.cpu().numpy()
etax_res_np = etax_res.cpu().numpy()
etay_res_np = etay_res.cpu().numpy()
fig, (ax, ay) = plt.subplots(
2, 1, figsize=(16, 10),
sharex=True,
gridspec_kw={'hspace': 0.3}
)
ax.errorbar(s, bx_ref_np, fmt='-', marker='x', color='blue', alpha=0.75, lw=2.0, label=r'initial, $\beta_x$')
ax.errorbar(s, by_ref_np, fmt='-', marker='x', color='red', alpha=0.75, lw=2.0, label=r'initial, $\beta_y$')
ax.errorbar(s, bx_res_np, fmt='-', marker='o', color='blue', alpha=0.75, lw=2.0, label=r'final, $\beta_x$')
ax.errorbar(s, by_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\beta_y$')
ax.set_xlabel('s [m]', fontsize=18)
ax.set_ylabel(r'$\Delta \beta / \beta$ [\%]', fontsize=18)
ax.tick_params(width=2, labelsize=16)
ax.tick_params(axis='x', length=8, direction='in')
ax.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_x_ref:05.2f}\% \quad RMS$_y$={rms_y_ref:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_ref:05.2f}\% \quad PTP$_y$={ptp_y_ref:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_id)}{(nux - nux_id).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_id)}{(nuy - nuy_id).abs().item():.4f}'
rf'\quad C={c_id.item():.6f}'
)
ax.text(0.0, 1.10, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_x_res:05.2f}\% \quad RMS$_y$={rms_y_res:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_res:05.2f}\% \quad PTP$_y$={ptp_y_res:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_result)}{(nux - nux_result).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_result)}{(nuy - nuy_result).abs().item():.4f}'
rf'\quad C={c_result.item():.6f}'
)
ax.text(0.0, 1.025, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
ax.legend(loc='upper right', frameon=False, fontsize=14, ncol=4)
ax.set_ylim(-20, 20)
ay.errorbar(s, etax_ref_np, fmt='-', marker='x', color='blue', alpha=0.75, lw=2.0, label=r'initial, $\eta_x$')
ay.errorbar(s, etay_ref_np, fmt='-', marker='x', color='red', alpha=0.75, lw=2.0, label=r'initial, $\eta_y$')
ay.errorbar(s, etax_res_np, fmt='-', marker='o', color='blue',alpha=0.75, lw=2.0, label=r'final, $\eta_x$')
ay.errorbar(s, etay_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\eta_y$')
ay.set_xlabel('s [m]', fontsize=18)
ay.set_ylabel(r'$\Delta \eta$ [m]', fontsize=18)
ay.tick_params(width=2, labelsize=16)
ay.tick_params(axis='x', length=8, direction='in')
ay.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_etax_ref:.4E} m \quad RMS$_y$={rms_etay_ref:.4E} m \quad '
rf'PTP$_x$={ptp_etax_ref:.4E} m \quad PTP$_y$={ptp_etay_ref:.4E} m \quad '
)
ay.text(0.0, 1.125, title, transform=ay.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_etax_res:.4E} m \quad RMS$_y$={rms_etay_res:.4E} m \quad '
rf'PTP$_x$={ptp_etax_res:.4E} m \quad PTP$_y$={ptp_etay_res:.4E} m \quad '
)
ay.text(0.0, 1.05, title, transform=ay.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
plt.setp(ax.spines.values(), linewidth=2.0)
plt.setp(ay.spines.values(), linewidth=2.0)
plt.show()
[48]:
# Knobs
QF = [f'QF_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
QD = [f'QD_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
nkn_a = ['OCT_S01_02', 'QF_S01_02', 'QD_S01_02', 'QD_S01_03', 'QF_S01_03', 'OCT_S01_03']
nks_a = ['SD_S01_05', 'SH_S01_02', 'SH_S01_03', 'SD_S01_06']
ring.flatten()
kni = {name: ring[name].kn.item() for name in nkn_a}
ksi = {name: ring[name].ks.item() for name in nks_a}
kfi = {name: ring[name].kn.item() for name in QF}
kdi = {name: ring[name].kn.item() for name in QD}
error.flatten()
knf = {name: error[name].kn.item() for name in nkn_a}
ksf = {name: error[name].ks.item() for name in nks_a}
kff = {name: error[name].kn.item() for name in QF}
kdf = {name: error[name].kn.item() for name in QD}
print(dkf := [(kff[name]/kfi[name] - 1) for name in kfi if name not in nkn_a])
print()
print(dkd := [(kdf[name]/kdi[name] - 1) for name in kdi if name not in nkn_a])
print()
dkf_a, *_ = dkf
dkd_a, *_ = dkd
dk_a = {'DKF': dkf_a, 'DKD': dkd_a}
[-0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667, -0.004463432367246667]
[-0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742, -0.020078064001441742]
[49]:
import numpy as np
dkn_a = np.array([knf[i]/kni[i] - 1 for i in kni])
dks_a = np.array([ksf[i] - ksi[i] for i in ksi])
n_kn = len(dkn_a)
n_ks = len(dks_a)
n_dk = len(dk_a)
y_kn = np.arange(n_kn)
y_dk = np.arange(n_dk) + n_kn + 2*1
y_ks = np.arange(n_ks) + n_kn + n_dk + 2*2
fig, ax = plt.subplots(figsize=(8, 4))
ay = ax.twiny()
bar_kn = ax.barh(y_kn, dkn_a, height=0.6, alpha=1, label=r'normal', color='red')
bar_dk = ax.barh(y_dk, list(dk_a.values()), height=0.6, alpha=1, label=r'global', color='black')
bar_ks = ay.barh(y_ks, dks_a, height=0.6, alpha=1, label=r'skew', color='blue')
yticks = np.concatenate([y_kn, y_dk, y_ks])
yticklabels = [*kni.keys()] + [*dk_a.keys()] + [*ksi.keys()]
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=12)
ay.set_ylim(ax.get_ylim())
ax.axvline(0.0, color='black', linewidth=1.0, linestyle='--', alpha=0.5)
ay.axvline(0.0, color='black', linewidth=1.0, linestyle='--', alpha=0.5)
xmax = max(np.max(np.abs(dkn_a)), np.max(np.abs(list(dk_a.values()))))
ax.set_xlim(-1.1 * xmax, 1.1 * xmax)
xmax = np.max(np.abs(dks_a))
ay.set_xlim(-1.1 * xmax, 1.1 * xmax)
ax.tick_params(axis='x', length=6, width=1.5, direction='in', labelsize=12, bottom=True, top=False, labelbottom=True, labeltop=False)
ax.tick_params(axis='y', length=0, width=0, labelsize=12)
ay.tick_params(axis='x', length=6, width=1.5, direction='in', labelsize=12, bottom=False, top=True, labelbottom=False, labeltop=True)
ax.set_xlabel(r'FSE (normal \& global)', fontsize=12)
ay.set_xlabel(r'Delta (skew)', fontsize=12)
plt.setp(ax.spines.values(), linewidth=2.0)
plt.setp(ay.spines.values(), linewidth=2.0)
ax.spines['top'].set_visible(False)
ay.spines['bottom'].set_visible(False)
plt.tight_layout()
plt.show()
[50]:
# Set local corrections
dkf = [(kff[name] - kfi[name]) for name in kfi if name not in nkn_a]
dkd = [(kdf[name] - kdi[name]) for name in kdi if name not in nkn_a]
dkf_a, *_ = dkf
dkd_a, *_ = dkd
dk_a = {'DKF': dkf_a, 'DKD': dkd_a}
dkn_a = np.array([knf[i] - kni[i] for i in kni])
dks_a = np.array([ksf[i] - ksi[i] for i in ksi])
dkn_a = {name: value for name, value in zip(nkn_a, dkn_a.tolist())}
dks_a = {name: value for name, value in zip(nks_a, dks_a.tolist())}
[51]:
# EPU-B model free compensation
[52]:
# Load lattice (ELEGANT table)
# Note, lattice is allowed to have repeated elements
path = Path('elettra.lte')
data = load_lattice(path)
[53]:
# Build and setup lattice
ring:Line = build('RING', 'ELEGANT', data)
# Flatten sublines
ring.flatten()
# Remove all marker elements but the ones starting with MLL (long straight section centers)
ring.remove_group(pattern=r'^(?!MLL_).*', kinds=['Marker'])
# Replace all sextupoles with quadrupoles
def factory(element:Element) -> None:
table = element.serialize
table.pop('ms', None)
return Quadrupole(**table)
ring.replace_group(pattern=r'', factory=factory, kinds=['Sextupole'])
# Set linear dipoles
def apply(element:Element) -> None:
element.linear = True
ring.apply(apply, kinds=['Dipole'])
# Merge drifts
ring.merge()
# Change lattice start
ring.start = "BPM_S01_01"
# Split BPMs
ring.split((None, ['BPM'], None, None))
# Roll lattice
ring.roll(1)
# Splice lattice
ring.splice()
# Describe
ring.describe
[53]:
{'BPM': 168, 'Drift': 708, 'Dipole': 156, 'Quadrupole': 360, 'Marker': 12}
[54]:
# Compute tunes (fractional part)
nux, nuy = tune(ring, [], matched=True, limit=1)
[55]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx, etapx, etaqy, etapy = dispersion(ring, orbit, [], limit=1)
[56]:
# Compute twiss parameters
ax, bx, ay, by = twiss(ring, [], matched=True, advance=True, full=False).T
[57]:
# Compute phase advances
mux, muy = advance(ring, [], alignment=False, matched=True).T
[58]:
# Compute coupling
c = coupling(ring, [])
[59]:
# Quadrupole names for global tune correction
QF = [f'QF_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
QD = [f'QD_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
[60]:
# Global tune responce matrix
def global_observable(knobs):
kf, kd = knobs
kn = torch.stack(len(QF)*[kf] + len(QD)*[kd])
return tune(ring, [kn], ('kn', None, QF + QD, None), matched=True, limit=1)
knobs = torch.tensor([0.0, 0.0], dtype=dtype)
global_target = global_observable(knobs)
global_matrix = torch.func.jacfwd(global_observable)(knobs)
print(global_target)
print(global_matrix)
tensor([0.2994, 0.1608], dtype=torch.float64)
tensor([[ 5.8543, 2.0964],
[-2.9918, -1.2602]], dtype=torch.float64)
[61]:
# Several local knobs can be used to correct ID effects
# Normal quadrupole correctors
nkn = ['OCT_S02_02', 'QF_S02_02', 'QD_S02_02', 'QD_S02_03', 'QF_S02_03', 'OCT_S02_03']
# Skew quadrupole correctors
nks = ['SD_S02_05', 'SH_S02_02', 'SH_S02_03', 'SD_S02_06']
[62]:
# Define knobs to magnets mixing matrices
Sn = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], dtype=dtype)
print(Sn)
print()
Ss = torch.tensor([[+1.0, 0.0], [0.0, +1.0], [0.0, -1.0], [-1.0, 0.0]], dtype=dtype)
print(Ss)
print()
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]], dtype=torch.float64)
tensor([[ 1., 0.],
[ 0., 1.],
[ 0., -1.],
[-1., 0.]], dtype=torch.float64)
[63]:
# Define twiss observable (full coupled twiss)
def observable_twiss(kn, ks):
return twiss(ring, [kn, ks], ('kn', None, nkn, None), ('ks', None, nks, None), matched=True, advance=True, full=False, convert=False)
[64]:
# Define dispersion observable
w_etax = 1.0
w_etay = 8.0
def observable_dispersion(kn, ks):
orbit = torch.tensor(4*[0.0], dtype=dtype)
etax, _, etay, _ = dispersion(ring,
orbit,
[kn, ks],
('kn', None, nkn, None),
('ks', None, nks, None))
return torch.stack([w_etax*etax, w_etay*etay]).T
[65]:
# Construct full target observable vector and corresponding responce matrix
def observable(knobs):
kn, ks = torch.split(knobs, [3, 2])
kn = Sn @ kn
ks = Ss @ ks
betas = observable_twiss(kn, ks)
etas = observable_dispersion(kn, ks)
return torch.cat([betas.flatten(), etas.flatten()])
knobs = torch.tensor((3 + 2)*[0.0], dtype=dtype)
print((target := observable(knobs)).shape)
print((matrix := torch.func.jacfwd(observable)(knobs)).shape)
torch.Size([5712])
torch.Size([5712, 5])
[66]:
# Define ID model
# Note, only the flattened triangular part of the A and B matrices is passed
# LPU (small effect on linear optics)
A = torch.tensor([[-0.00012383337794002073, -4.660270490330217E-11, 6.376863691496814E-10, 2.7230065924765254E-9],
[-4.660394652563561E-11, -0.00023205060950586665, -1.3479277794464672E-9, -2.355844533297301E-9],
[6.376863692762265E-10, -1.347927657735391E-9, 0.03660488622324795, 5.772998189350976E-9],
[2.7230065927230765E-9, -2.3558445782246365E-9, 5.772998640180359E-9, 0.06475766873116935]], dtype=dtype)
B = torch.tensor([[0.00012378599381951955, -1.6869692601073327E-7, 7.640630142947167E-8, -2.0548228907169046E-7],
[-1.6869692601073327E-7, 0.0006954279711404285, 1.3864566360308503E-7, -5.549940163648357E-7],
[7.640630142947167E-8, 1.3864566360308503E-7, -0.034632609324692906, 0.0024749659452940626],
[-2.0548228907169046E-7, -5.549940163648357E-7, 0.0024749659452940626, -0.19648005917364772]], dtype=dtype)
# EPU
A = torch.tensor([[-0.03484222052711237, 1.0272120741819959E-7, -4.698931299341201E-9, 0.0015923185492594811],
[1.0272120579834892E-7, -0.046082787920135176, 0.0017792061173117564, 3.3551298301095784E-8],
[-4.6989312853101E-9, 0.0017792061173117072, 0.056853750760983084, -1.5929605363332683E-7],
[0.0015923185492594336, 3.3551298348653296E-8, -1.5929605261642905E-7, 0.08311631737263032]], dtype=dtype)
B = torch.tensor([[0.03649353186115209, 0.0015448347221877217, 0.00002719892025520868, -0.0033681183134964482],
[0.0015448347221877217, 0.13683886657005795, -0.0033198692682377406, 0.00006140578258682469],
[0.00002719892025520868, -0.0033198692682377406, -0.05260095308967722, 0.005019907688182885],
[-0.0033681183134964482, 0.00006140578258682469, 0.005019907688182885, -0.2531573249456863]], dtype=dtype)
ID = Matrix('ID',
length=0.0,
A=A[torch.triu(torch.ones_like(A, dtype=torch.bool))].tolist(),
B=B[torch.triu(torch.ones_like(B, dtype=torch.bool))].tolist())
[67]:
# Insert ID into the existing lattice
# This will replace the target marker
error = ring.clone()
error.flatten()
error.insert(ID, 'MLL_S02', position=0.0)
error.splice()
# Describe
error.describe
[67]:
{'BPM': 168,
'Drift': 708,
'Dipole': 156,
'Quadrupole': 360,
'Marker': 11,
'Matrix': 1}
[68]:
# Compute tunes (fractional part)
nux_id, nuy_id = tune(error, [], matched=True, limit=1)
[69]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_id, etapx_id, etaqy_id, etapy_id = dispersion(error, orbit, [], limit=1)
[70]:
# Compute twiss parameters
ax_id, bx_id, ay_id, by_id = twiss(error, [], matched=True, advance=True, full=False).T
[71]:
# Compute phase advances
mux_id, muy_id = advance(error, [], alignment=False, matched=True).T
[72]:
# Compute coupling
c_id = coupling(error, [])
[73]:
# Tune shifts
print((nux - nux_id))
print((nuy - nuy_id))
tensor(0.0260, dtype=torch.float64)
tensor(-0.0114, dtype=torch.float64)
[74]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
tensor(0., dtype=torch.float64)
tensor(0.0004, dtype=torch.float64)
[75]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[76]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
tensor(11.5994, dtype=torch.float64)
tensor(1.7916, dtype=torch.float64)
[77]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
tensor(8.7941, dtype=torch.float64)
tensor(1.7778, dtype=torch.float64)
[78]:
# Define parametric observable vector (emulate tune measurement)
w_etax = 1.0
w_etay = 8.0
def global_observable(knobs):
kf, kd = knobs
kn = torch.stack(len(QF)*[kf] + len(QD)*[kd])
return tune(error, [kn], ('kn', None, QF + QD, None), matched=True, limit=1)
def observable_twiss(kn, ks):
return twiss(error, [kn, ks], ('kn', None, nkn, None), ('ks', None, nks, None), matched=True, advance=True, full=False, convert=False)
def observable_dispersion(kn, ks):
orbit = torch.tensor(4*[0.0], dtype=dtype)
etax, _, etay, _ = dispersion(error,
orbit,
[kn, ks],
('kn', None, nkn, None),
('ks', None, nks, None))
return torch.stack([w_etax*etax, w_etay*etay]).T
def observable(knobs):
kn, ks = torch.split(knobs, [3, 2])
kn = Sn @ kn
ks = Ss @ ks
betas = observable_twiss(kn, ks)
etas = observable_dispersion(kn, ks)
return torch.cat([betas.flatten(), etas.flatten()])
[79]:
# Check the residual vector norm
global_knobs = torch.tensor(2*[0.0], dtype=dtype)
knobs = torch.tensor((3 + 2)*[0.0], dtype=dtype)
print(((global_observable(global_knobs) - global_target)**2).sum())
print(((observable(knobs) - target)**2).sum())
tensor(0.0008, dtype=torch.float64)
tensor(212.9162, dtype=torch.float64)
[80]:
# Optimization loop (local)
# Responce matrix (jacobian)
M = matrix.clone()
# Weighting covariance (sensitivity) matrix
epsilon = 1.0E-9
C = M @ M.T
C = C + epsilon*torch.eye(len(C), dtype=dtype)
# Cholesky decomposition
L = torch.linalg.cholesky(C)
# Whiten response
M = torch.linalg.solve_triangular(L, M, upper=False)
# Additional weights
# Can be used to extra weight selected observables, e.g. tunes
weights = torch.ones(len(M), dtype=dtype)
weights = weights.sqrt()
# Whiten response with additional weights
M = M*weights.unsqueeze(1)
# Iterative correction
lr = 0.75
# Initial value
knobs = torch.tensor((3 + 2)*[0.0], dtype=dtype)
# Correction loop
for _ in range(8):
value = observable(knobs)
residual = target - value
residual = torch.linalg.solve_triangular(L, residual.unsqueeze(-1), upper=False).squeeze(-1)
residual = residual*weights
delta = torch.linalg.lstsq(M, residual, driver="gels").solution
knobs += lr*delta
print(((value - target)**2).sum())
print()
tensor(212.9162, dtype=torch.float64)
tensor(12.4937, dtype=torch.float64)
tensor(1.1072, dtype=torch.float64)
tensor(0.3135, dtype=torch.float64)
tensor(0.2637, dtype=torch.float64)
tensor(0.2607, dtype=torch.float64)
tensor(0.2605, dtype=torch.float64)
tensor(0.2605, dtype=torch.float64)
[81]:
# Apply final corrections
kn, ks = torch.split(knobs, [3, 2])
kn = Sn @ kn
ks = Ss @ ks
error.flatten()
for name, knob in zip(nkn, kn):
error[name].kn = (error[name].kn + knob).item()
for name, knob in zip(nks, ks):
error[name].ks = (error[name].ks + knob).item()
error.splice()
[82]:
# Optimization loop (global)
# Responce matrix (jacobian)
M = global_matrix.clone()
# Weighting covariance (sensitivity) matrix
epsilon = 1.0E-9
C = M @ M.T
C = C + epsilon*torch.eye(len(C), dtype=dtype)
# Cholesky decomposition
L = torch.linalg.cholesky(C)
# Whiten response
M = torch.linalg.solve_triangular(L, M, upper=False)
# Additional weights
# Can be used to extra weight selected observables, e.g. tunes
weights = torch.ones(len(M), dtype=dtype)
weights = weights.sqrt()
# Whiten response with additional weights
M = M*weights.unsqueeze(1)
# Iterative correction
lr = 0.75
# Initial value
global_knobs = torch.tensor(2*[0.0], dtype=dtype)
# Correction loop
for _ in range(4):
value = global_observable(global_knobs)
residual = global_target - value
residual = torch.linalg.solve_triangular(L, residual.unsqueeze(-1), upper=False).squeeze(-1)
residual = residual*weights
delta = torch.linalg.lstsq(M, residual, driver="gels").solution
global_knobs += lr*delta
print(((value - global_target)**2).sum())
print()
tensor(0.0001, dtype=torch.float64)
tensor(9.0531e-06, dtype=torch.float64)
tensor(7.8179e-07, dtype=torch.float64)
tensor(7.1608e-08, dtype=torch.float64)
[83]:
# Apply final corrections
kd, kf = global_knobs
error.flatten()
for name in QF:
error[name].kn = (error[name].kn + kd).item()
for name in QD:
error[name].kn = (error[name].kn + kf).item()
error.splice()
[84]:
# Compute tunes (fractional part)
nux_result, nuy_result = tune(error, [], matched=True, limit=1)
[85]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_result, etapx_result, etaqy_result, etapy_result = dispersion(error, orbit, [], limit=1)
[86]:
# Compute twiss parameters
ax_result, bx_result, ay_result, by_result = twiss(error, [], matched=True, advance=True, full=False).T
[87]:
# Compute phase advances
mux_result, muy_result = advance(error, [], alignment=False, matched=True).T
[88]:
# Compute coupling
c_result = coupling(error, [])
[89]:
# Tune shifts
print((nux - nux_id).abs())
print((nuy - nuy_id).abs())
print()
print((nux - nux_result).abs())
print((nuy - nuy_result).abs())
print()
tensor(0.0260, dtype=torch.float64)
tensor(0.0114, dtype=torch.float64)
tensor(4.7077e-05, dtype=torch.float64)
tensor(6.7219e-05, dtype=torch.float64)
[90]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
print(c_result)
tensor(0., dtype=torch.float64)
tensor(0.0004, dtype=torch.float64)
tensor(7.0240e-06, dtype=torch.float64)
[91]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_result).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_result).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[92]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_result)/bx).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_result)/by).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
print()
print(100*(((bx - bx_result)/bx)**2).mean().sqrt())
print(100*(((by - by_result)/by)**2).mean().sqrt())
print()
tensor(11.5994, dtype=torch.float64)
tensor(1.7916, dtype=torch.float64)
tensor(0.2067, dtype=torch.float64)
tensor(0.3851, dtype=torch.float64)
[93]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_result)/mux).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_result)/muy).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
print()
print(100*(((mux - mux_result)/mux)**2).mean().sqrt())
print(100*(((muy - muy_result)/muy)**2).mean().sqrt())
print()
tensor(8.7941, dtype=torch.float64)
tensor(1.7778, dtype=torch.float64)
tensor(0.3163, dtype=torch.float64)
tensor(0.3459, dtype=torch.float64)
[94]:
# Beta-beating and dispersion
bx_ref_bb = 100.0*(bx - bx_id) / bx
by_ref_bb = 100.0*(by - by_id) / by
bx_res_bb = 100.0*(bx - bx_result)/ bx
by_res_bb = 100.0*(by - by_result)/ by
def rms(x):
return (x**2).mean().sqrt()
rms_x_ref = rms(bx_ref_bb).item()
ptp_x_ref = (bx_ref_bb.max() - bx_ref_bb.min()).item()
rms_y_ref = rms(by_ref_bb).item()
ptp_y_ref = (by_ref_bb.max() - by_ref_bb.min()).item()
rms_x_res = rms(bx_res_bb).item()
ptp_x_res = (bx_res_bb.max() - bx_res_bb.min()).item()
rms_y_res = rms(by_res_bb).item()
ptp_y_res = (by_res_bb.max() - by_res_bb.min()).item()
s = ring.locations().cpu().numpy()
bx_ref_np = bx_ref_bb.cpu().numpy()
by_ref_np = by_ref_bb.cpu().numpy()
bx_res_np = bx_res_bb.cpu().numpy()
by_res_np = by_res_bb.cpu().numpy()
etax_ref = etaqx - etaqx_id
etay_ref = etaqy - etaqy_id
etax_res = etaqx - etaqx_result
etay_res = etaqy - etaqy_result
rms_etax_ref = rms(etax_ref).item()
ptp_etax_ref = (etax_ref.max() - etax_ref.min()).item()
rms_etay_ref = rms(etay_ref).item()
ptp_etay_ref = (etay_ref.max() - etay_ref.min()).item()
rms_etax_res = rms(etax_res).item()
ptp_etax_res = (etax_res.max() - etax_res.min()).item()
rms_etay_res = rms(etay_res).item()
ptp_etay_res = (etay_res.max() - etay_res.min()).item()
etax_ref_np = etax_ref.cpu().numpy()
etay_ref_np = etay_ref.cpu().numpy()
etax_res_np = etax_res.cpu().numpy()
etay_res_np = etay_res.cpu().numpy()
fig, (ax, ay) = plt.subplots(
2, 1, figsize=(16, 10),
sharex=True,
gridspec_kw={'hspace': 0.3}
)
ax.errorbar(s, bx_ref_np, fmt='-', marker='x', color='blue', alpha=0.75, lw=2.0, label=r'initial, $\beta_x$')
ax.errorbar(s, by_ref_np, fmt='-', marker='x', color='red', alpha=0.75, lw=2.0, label=r'initial, $\beta_y$')
ax.errorbar(s, bx_res_np, fmt='-', marker='o', color='blue', alpha=0.75, lw=2.0, label=r'final, $\beta_x$')
ax.errorbar(s, by_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\beta_y$')
ax.set_xlabel('s [m]', fontsize=18)
ax.set_ylabel(r'$\Delta \beta / \beta$ [\%]', fontsize=18)
ax.tick_params(width=2, labelsize=16)
ax.tick_params(axis='x', length=8, direction='in')
ax.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_x_ref:05.2f}\% \quad RMS$_y$={rms_y_ref:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_ref:05.2f}\% \quad PTP$_y$={ptp_y_ref:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_id)}{(nux - nux_id).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_id)}{(nuy - nuy_id).abs().item():.4f}'
rf'\quad C={c_id.item():.6f}'
)
ax.text(0.0, 1.10, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_x_res:05.2f}\% \quad RMS$_y$={rms_y_res:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_res:05.2f}\% \quad PTP$_y$={ptp_y_res:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_result)}{(nux - nux_result).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_result)}{(nuy - nuy_result).abs().item():.4f}'
rf'\quad C={c_result.item():.6f}'
)
ax.text(0.0, 1.025, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
ax.legend(loc='upper right', frameon=False, fontsize=14, ncol=4)
ax.set_ylim(-20, 20)
ay.errorbar(s, etax_ref_np, fmt='-', marker='x', color='blue', alpha=0.75, lw=2.0, label=r'initial, $\eta_x$')
ay.errorbar(s, etay_ref_np, fmt='-', marker='x', color='red', alpha=0.75, lw=2.0, label=r'initial, $\eta_y$')
ay.errorbar(s, etax_res_np, fmt='-', marker='o', color='blue',alpha=0.75, lw=2.0, label=r'final, $\eta_x$')
ay.errorbar(s, etay_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\eta_y$')
ay.set_xlabel('s [m]', fontsize=18)
ay.set_ylabel(r'$\Delta \eta$ [m]', fontsize=18)
ay.tick_params(width=2, labelsize=16)
ay.tick_params(axis='x', length=8, direction='in')
ay.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_etax_ref:.4E} m \quad RMS$_y$={rms_etay_ref:.4E} m \quad '
rf'PTP$_x$={ptp_etax_ref:.4E} m \quad PTP$_y$={ptp_etay_ref:.4E} m \quad '
)
ay.text(0.0, 1.125, title, transform=ay.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_etax_res:.4E} m \quad RMS$_y$={rms_etay_res:.4E} m \quad '
rf'PTP$_x$={ptp_etax_res:.4E} m \quad PTP$_y$={ptp_etay_res:.4E} m \quad '
)
ay.text(0.0, 1.05, title, transform=ay.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
plt.setp(ax.spines.values(), linewidth=2.0)
plt.setp(ay.spines.values(), linewidth=2.0)
plt.show()
[95]:
# Knobs
QF = [f'QF_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
QD = [f'QD_S{i:02}_{j:02}' for j in [2, 3] for i in range(1, 12 + 1)]
nkn_b = ['OCT_S02_02', 'QF_S02_02', 'QD_S02_02', 'QD_S02_03', 'QF_S02_03', 'OCT_S02_03']
nks_b = ['SD_S02_05', 'SH_S02_02', 'SH_S02_03', 'SD_S02_06']
ring.flatten()
kni = {name: ring[name].kn.item() for name in nkn_b}
ksi = {name: ring[name].ks.item() for name in nks_b}
kfi = {name: ring[name].kn.item() for name in QF}
kdi = {name: ring[name].kn.item() for name in QD}
error.flatten()
knf = {name: error[name].kn.item() for name in nkn_b}
ksf = {name: error[name].ks.item() for name in nks_b}
kff = {name: error[name].kn.item() for name in QF}
kdf = {name: error[name].kn.item() for name in QD}
print(dkf := [(kff[name] /kfi[name] - 1) for name in kfi if name not in nkn_b])
print()
print(dkd := [(kdf[name] /kdi[name] - 1) for name in kdi if name not in nkn_b])
print()
dkf_b, *_ = dkf
dkd_b, *_ = dkd
dk_b = {'DKF': dkf_b, 'DKD': dkd_b}
[-0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425, -0.0044634344021683425]
[-0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794, -0.020078000014753794]
[96]:
import numpy as np
dkn_b = np.array([knf[i]/kni[i] - 1 for i in kni])
dks_b = np.array([ksf[i] - ksi[i] for i in ksi])
n_kn = len(dkn_b)
n_ks = len(dks_b)
n_dk = len(dk_b)
y_kn = np.arange(n_kn)
y_dk = np.arange(n_dk) + n_kn + 2*1
y_ks = np.arange(n_ks) + n_kn + n_dk + 2*2
fig, ax = plt.subplots(figsize=(8, 4))
ay = ax.twiny()
bar_kn = ax.barh(y_kn, dkn_b, height=0.6, alpha=1, label=r'normal', color='red')
bar_dk = ax.barh(y_dk, list(dk_b.values()), height=0.6, alpha=1, label=r'global', color='black')
bar_ks = ay.barh(y_ks, dks_b, height=0.6, alpha=1, label=r'skew', color='blue')
yticks = np.concatenate([y_kn, y_dk, y_ks])
yticklabels = [*kni.keys()] + [*dk_b.keys()] + [*ksi.keys()]
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=12)
ay.set_ylim(ax.get_ylim())
ax.axvline(0.0, color='black', linewidth=1.0, linestyle='--', alpha=0.5)
ay.axvline(0.0, color='black', linewidth=1.0, linestyle='--', alpha=0.5)
xmax = max(np.max(np.abs(dkn_b)), np.max(np.abs(list(dk_b.values()))))
ax.set_xlim(-1.1 * xmax, 1.1 * xmax)
xmax = np.max(np.abs(dks_b))
ay.set_xlim(-1.1 * xmax, 1.1 * xmax)
ax.tick_params(axis='x', length=6, width=1.5, direction='in', labelsize=12, bottom=True, top=False, labelbottom=True, labeltop=False)
ax.tick_params(axis='y', length=0, width=0, labelsize=12)
ay.tick_params(axis='x', length=6, width=1.5, direction='in', labelsize=12, bottom=False, top=True, labelbottom=False, labeltop=True)
ax.set_xlabel(r'FSE (normal \& global)', fontsize=12)
ay.set_xlabel(r'Delta (skew)', fontsize=12)
plt.setp(ax.spines.values(), linewidth=2.0)
plt.setp(ay.spines.values(), linewidth=2.0)
ax.spines['top'].set_visible(False)
ay.spines['bottom'].set_visible(False)
plt.tight_layout()
plt.show()
[97]:
# Set local corrections
dkf = [(kff[name] - kfi[name]) for name in kfi if name not in nkn_b]
dkd = [(kdf[name] - kdi[name]) for name in kdi if name not in nkn_b]
dkf_b, *_ = dkf
dkd_b, *_ = dkd
dk_b = {'DKF': dkf_b, 'DKD': dkd_b}
dkn_b = np.array([knf[i] - kni[i] for i in kni])
dks_b = np.array([ksf[i] - ksi[i] for i in ksi])
dkn_b = {name: value for name, value in zip(nkn_b, dkn_b.tolist())}
dks_b = {name: value for name, value in zip(nks_b, dks_b.tolist())}
[98]:
# Interaction and compensation
[99]:
# Build and setup lattice
ring:Line = build('RING', 'ELEGANT', data)
# Flatten sublines
ring.flatten()
# Remove all marker elements but the ones starting with MLL (long straight section centers)
ring.remove_group(pattern=r'^(?!MLL_).*', kinds=['Marker'])
# Replace all sextupoles with quadrupoles
def factory(element:Element) -> None:
table = element.serialize
table.pop('ms', None)
return Quadrupole(**table)
ring.replace_group(pattern=r'', factory=factory, kinds=['Sextupole'])
# Set linear dipoles
def apply(element:Element) -> None:
element.linear = True
ring.apply(apply, kinds=['Dipole'])
# Merge drifts
ring.merge()
# Change lattice start
ring.start = "BPM_S01_01"
# Split BPMs
ring.split((None, ['BPM'], None, None))
# Roll lattice
ring.roll(1)
# Splice lattice
ring.splice()
# Describe
ring.describe
[99]:
{'BPM': 168, 'Drift': 708, 'Dipole': 156, 'Quadrupole': 360, 'Marker': 12}
[100]:
# Define ID model
# Note, only the flattened triangular part of the A and B matrices is passed
A = torch.tensor([[-0.03484222052711237, 1.0272120741819959E-7, -4.698931299341201E-9, 0.0015923185492594811],
[1.0272120579834892E-7, -0.046082787920135176, 0.0017792061173117564, 3.3551298301095784E-8],
[-4.6989312853101E-9, 0.0017792061173117072, 0.056853750760983084, -1.5929605363332683E-7],
[0.0015923185492594336, 3.3551298348653296E-8, -1.5929605261642905E-7, 0.08311631737263032]], dtype=dtype)
B = torch.tensor([[0.03649353186115209, 0.0015448347221877217, 0.00002719892025520868, -0.0033681183134964482],
[0.0015448347221877217, 0.13683886657005795, -0.0033198692682377406, 0.00006140578258682469],
[0.00002719892025520868, -0.0033198692682377406, -0.05260095308967722, 0.005019907688182885],
[-0.0033681183134964482, 0.00006140578258682469, 0.005019907688182885, -0.2531573249456863]], dtype=dtype)
ID = Matrix('ID',
length=0.0,
A=A[torch.triu(torch.ones_like(A, dtype=torch.bool))].tolist(),
B=B[torch.triu(torch.ones_like(B, dtype=torch.bool))].tolist())
[101]:
# Insert IDs into the existing lattice
# Note, inserting several IDs might lead to lattice being unstable
error = ring.clone()
error.flatten()
error.insert(ID, 'MLL_S01', position=0.0)
error.insert(ID, 'MLL_S02', position=0.0)
error.splice()
# Describe
error.describe
[101]:
{'BPM': 168,
'Drift': 708,
'Dipole': 156,
'Quadrupole': 360,
'Matrix': 1,
'Marker': 10}
[102]:
# Compute tunes (fractional part)
nux_id, nuy_id = tune(error, [], matched=True, limit=1)
[103]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_id, etapx_id, etaqy_id, etapy_id = dispersion(error, orbit, [], limit=1)
[104]:
# Compute twiss parameters
ax_id, bx_id, ay_id, by_id = twiss(error, [], matched=True, advance=True, full=False).T
[105]:
# Compute phase advances
mux_id, muy_id = advance(error, [], alignment=False, matched=True).T
[106]:
# Compute coupling
c_id = coupling(error, [])
[107]:
# Tune shifts
print((nux - nux_id))
print((nuy - nuy_id))
tensor(0.0562, dtype=torch.float64)
tensor(-0.0229, dtype=torch.float64)
[108]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
tensor(0., dtype=torch.float64)
tensor(0.0010, dtype=torch.float64)
[109]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[110]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
tensor(12.8784, dtype=torch.float64)
tensor(0.8258, dtype=torch.float64)
[111]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
tensor(9.8666, dtype=torch.float64)
tensor(0.8816, dtype=torch.float64)
[112]:
# Apply final corrections (ID-A)
kn = torch.tensor([*dkn_a.values()], dtype=dtype)
ks = torch.tensor([*dks_a.values()], dtype=dtype)
error.flatten()
for name, knob in zip(nkn_a, kn):
error[name].kn = (error[name].kn + knob).item()
for name, knob in zip(nks_a, ks):
error[name].ks = (error[name].ks + knob).item()
error.splice()
[113]:
# Apply final corrections (ID-A)
kd, kf = torch.tensor([*dk_a.values()], dtype=dtype)
error.flatten()
for name in QF:
if name not in nkn_a:
error[name].kn = (error[name].kn + kd).item()
for name in QD:
if name not in nkn_a:
error[name].kn = (error[name].kn + kf).item()
error.splice()
[114]:
# Apply final corrections (ID-B)
kn = torch.tensor([*dkn_b.values()], dtype=dtype)
ks = torch.tensor([*dks_b.values()], dtype=dtype)
error.flatten()
for name, knob in zip(nkn_b, kn):
error[name].kn = (error[name].kn + knob).item()
for name, knob in zip(nks_b, ks):
error[name].ks = (error[name].ks + knob).item()
error.splice()
[115]:
# Apply final corrections (ID-B)
kd, kf = torch.tensor([*dk_b.values()], dtype=dtype)
error.flatten()
for name in QF:
if name not in nkn_b:
error[name].kn = (error[name].kn + kd).item()
for name in QD:
if name not in nkn_b:
error[name].kn = (error[name].kn + kf).item()
error.splice()
[116]:
# Note, it would be better to perform additional global tune correction
[117]:
# Compute tunes (fractional part)
nux_result, nuy_result = tune(error, [], matched=True, limit=1)
[118]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_result, etapx_result, etaqy_result, etapy_result = dispersion(error, orbit, [], limit=1)
[119]:
# Compute twiss parameters
ax_result, bx_result, ay_result, by_result = twiss(error, [], matched=True, advance=True, full=False).T
[120]:
# Compute phase advances
mux_result, muy_result = advance(error, [], alignment=False, matched=True).T
[121]:
# Compute coupling
c_result = coupling(error, [])
[122]:
# Tune shifts
print((nux - nux_id).abs())
print((nuy - nuy_id).abs())
print()
print((nux - nux_result).abs())
print((nuy - nuy_result).abs())
print()
tensor(0.0562, dtype=torch.float64)
tensor(0.0229, dtype=torch.float64)
tensor(0.0009, dtype=torch.float64)
tensor(0.0008, dtype=torch.float64)
[123]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
print(c_result)
tensor(0., dtype=torch.float64)
tensor(0.0010, dtype=torch.float64)
tensor(6.7645e-06, dtype=torch.float64)
[124]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_result).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_result).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[125]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_result)/bx).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_result)/by).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
print()
print(100*(((bx - bx_result)/bx)**2).mean().sqrt())
print(100*(((by - by_result)/by)**2).mean().sqrt())
print()
tensor(12.8784, dtype=torch.float64)
tensor(0.8258, dtype=torch.float64)
tensor(0.4160, dtype=torch.float64)
tensor(0.6207, dtype=torch.float64)
[126]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_result)/mux).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_result)/muy).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
print()
print(100*(((mux - mux_result)/mux)**2).mean().sqrt())
print(100*(((muy - muy_result)/muy)**2).mean().sqrt())
print()
tensor(9.8666, dtype=torch.float64)
tensor(0.8816, dtype=torch.float64)
tensor(0.4609, dtype=torch.float64)
tensor(0.5192, dtype=torch.float64)
[127]:
# Beta-beating and dispersion
bx_ref_bb = 100.0*(bx - bx_id) / bx
by_ref_bb = 100.0*(by - by_id) / by
bx_res_bb = 100.0*(bx - bx_result)/ bx
by_res_bb = 100.0*(by - by_result)/ by
def rms(x):
return (x**2).mean().sqrt()
rms_x_ref = rms(bx_ref_bb).item()
ptp_x_ref = (bx_ref_bb.max() - bx_ref_bb.min()).item()
rms_y_ref = rms(by_ref_bb).item()
ptp_y_ref = (by_ref_bb.max() - by_ref_bb.min()).item()
rms_x_res = rms(bx_res_bb).item()
ptp_x_res = (bx_res_bb.max() - bx_res_bb.min()).item()
rms_y_res = rms(by_res_bb).item()
ptp_y_res = (by_res_bb.max() - by_res_bb.min()).item()
s = ring.locations().cpu().numpy()
bx_ref_np = bx_ref_bb.cpu().numpy()
by_ref_np = by_ref_bb.cpu().numpy()
bx_res_np = bx_res_bb.cpu().numpy()
by_res_np = by_res_bb.cpu().numpy()
etax_ref = etaqx - etaqx_id
etay_ref = etaqy - etaqy_id
etax_res = etaqx - etaqx_result
etay_res = etaqy - etaqy_result
rms_etax_ref = rms(etax_ref).item()
ptp_etax_ref = (etax_ref.max() - etax_ref.min()).item()
rms_etay_ref = rms(etay_ref).item()
ptp_etay_ref = (etay_ref.max() - etay_ref.min()).item()
rms_etax_res = rms(etax_res).item()
ptp_etax_res = (etax_res.max() - etax_res.min()).item()
rms_etay_res = rms(etay_res).item()
ptp_etay_res = (etay_res.max() - etay_res.min()).item()
etax_ref_np = etax_ref.cpu().numpy()
etay_ref_np = etay_ref.cpu().numpy()
etax_res_np = etax_res.cpu().numpy()
etay_res_np = etay_res.cpu().numpy()
fig, (ax, ay) = plt.subplots(
2, 1, figsize=(16, 10),
sharex=True,
gridspec_kw={'hspace': 0.3}
)
ax.errorbar(s, bx_ref_np, fmt='-', marker='x', color='blue', alpha=0.75, lw=2.0, label=r'initial, $\beta_x$')
ax.errorbar(s, by_ref_np, fmt='-', marker='x', color='red', alpha=0.75, lw=2.0, label=r'initial, $\beta_y$')
ax.errorbar(s, bx_res_np, fmt='-', marker='o', color='blue', alpha=0.75, lw=2.0, label=r'final, $\beta_x$')
ax.errorbar(s, by_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\beta_y$')
ax.set_xlabel('s [m]', fontsize=18)
ax.set_ylabel(r'$\Delta \beta / \beta$ [\%]', fontsize=18)
ax.tick_params(width=2, labelsize=16)
ax.tick_params(axis='x', length=8, direction='in')
ax.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_x_ref:05.2f}\% \quad RMS$_y$={rms_y_ref:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_ref:05.2f}\% \quad PTP$_y$={ptp_y_ref:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_id)}{(nux - nux_id).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_id)}{(nuy - nuy_id).abs().item():.4f}'
rf'\quad C={c_id.item():.6f}'
)
ax.text(0.0, 1.10, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_x_res:05.2f}\% \quad RMS$_y$={rms_y_res:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_res:05.2f}\% \quad PTP$_y$={ptp_y_res:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_result)}{(nux - nux_result).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_result)}{(nuy - nuy_result).abs().item():.4f}'
rf'\quad C={c_result.item():.6f}'
)
ax.text(0.0, 1.025, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
ax.legend(loc='upper right', frameon=False, fontsize=14, ncol=4)
ax.set_ylim(-35, 35)
ay.errorbar(s, etax_ref_np, fmt='-', marker='x', color='blue', alpha=0.75, lw=2.0, label=r'initial, $\eta_x$')
ay.errorbar(s, etay_ref_np, fmt='-', marker='x', color='red', alpha=0.75, lw=2.0, label=r'initial, $\eta_y$')
ay.errorbar(s, etax_res_np, fmt='-', marker='o', color='blue',alpha=0.75, lw=2.0, label=r'final, $\eta_x$')
ay.errorbar(s, etay_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\eta_y$')
ay.set_xlabel('s [m]', fontsize=18)
ay.set_ylabel(r'$\Delta \eta$ [m]', fontsize=18)
ay.tick_params(width=2, labelsize=16)
ay.tick_params(axis='x', length=8, direction='in')
ay.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_etax_ref:.4E} m \quad RMS$_y$={rms_etay_ref:.4E} m \quad '
rf'PTP$_x$={ptp_etax_ref:.4E} m \quad PTP$_y$={ptp_etay_ref:.4E} m \quad '
)
ay.text(0.0, 1.125, title, transform=ay.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_etax_res:.4E} m \quad RMS$_y$={rms_etay_res:.4E} m \quad '
rf'PTP$_x$={ptp_etax_res:.4E} m \quad PTP$_y$={ptp_etay_res:.4E} m \quad '
)
ay.text(0.0, 1.05, title, transform=ay.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
plt.setp(ax.spines.values(), linewidth=2.0)
plt.setp(ay.spines.values(), linewidth=2.0)
plt.show()
[128]:
fig, ax = plt.subplots(
1, 1, figsize=(16, 5),
sharex=True,
gridspec_kw={'hspace': 0.3}
)
ax.errorbar(s, bx_res_np, fmt='-', marker='o', color='blue', alpha=0.75, lw=2.0, label=r'final, $\beta_x$')
ax.errorbar(s, by_res_np, fmt='-', marker='o', color='red', alpha=0.75, lw=2.0, label=r'final, $\beta_y$')
ax.set_xlabel('s [m]', fontsize=18)
ax.set_ylabel(r'$\Delta \beta / \beta$ [\%]', fontsize=18)
ax.tick_params(width=2, labelsize=16)
ax.tick_params(axis='x', length=8, direction='in')
ax.tick_params(axis='y', length=8, direction='in')
title = (
rf'RMS$_x$={rms_x_ref:05.2f}\% \quad RMS$_y$={rms_y_ref:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_ref:05.2f}\% \quad PTP$_y$={ptp_y_ref:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_id)}{(nux - nux_id).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_id)}{(nuy - nuy_id).abs().item():.4f}'
rf'\quad C={c_id.item():.6f}'
)
ax.text(0.0, 1.10, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
title = (
rf'RMS$_x$={rms_x_res:05.2f}\% \quad RMS$_y$={rms_y_res:05.2f}\% \quad '
rf'PTP$_x$={ptp_x_res:05.2f}\% \quad PTP$_y$={ptp_y_res:05.2f}\% \quad '
rf'$\Delta \nu_x$={(lambda x: '-' if x < 0 else '~')(nux - nux_result)}{(nux - nux_result).abs().item():.4f} \quad $\Delta \nu_y$={(lambda x: '-' if x < 0 else '~')(nuy - nuy_result)}{(nuy - nuy_result).abs().item():.4f}'
rf'\quad C={c_result.item():.6f}'
)
ax.text(0.0, 1.025, title, transform=ax.transAxes, ha='left', va='bottom', fontsize=16, fontfamily='monospace')
ax.legend(loc='upper right', frameon=False, fontsize=14, ncol=4)
ax.set_ylim(-5, 5)
plt.setp(ax.spines.values(), linewidth=2.0)
plt.show()