ELETTRA-09: Local ID correction (local correction: ORM)
[1]:
# In this example model free local ID correction is performed using ORM
[2]:
# Import
import torch
from torch import Tensor
from pathlib import Path
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
matplotlib.rcParams['text.usetex'] = True
from model.library.element import Element
from model.library.line import Line
from model.library.corrector import Corrector
from model.library.quadrupole import Quadrupole
from model.library.matrix import Matrix
from model.command.external import load_lattice
from model.command.build import build
from model.command.tune import tune
from model.command.orbit import dispersion
from model.command.orbit import ORM
from model.command.twiss import twiss
from model.command.advance import advance
from model.command.coupling import coupling
from model.command.wrapper import Wrapper
from model.command.wrapper import forward
from model.command.wrapper import inverse
from model.command.wrapper import normalize
[3]:
# Set data type and device
Element.dtype = dtype = torch.float64
Element.device = device = torch.device('cpu')
[4]:
# Load lattice (ELEGANT table)
# Note, lattice is allowed to have repeated elements
path = Path('elettra.lte')
data = load_lattice(path)
[5]:
# Define list of element names with trim coils
# Note, individual correctors are added separatly
names = ['SD_S01_01', 'SD_S01_02', 'SD_S01_03', 'SD_S01_04', 'SD_S01_07', 'SD_S01_08', 'SD_S01_09', 'SD_S01_10', 'SF_S01_01', 'SF_S01_06', 'SH_S01_01', 'SH_S01_04', 'OCT_S01_01', 'OCT_S01_04', 'SD_S02_01', 'SD_S02_02', 'SD_S02_03', 'SD_S02_04', 'SD_S02_07', 'SD_S02_08', 'SD_S02_09', 'SD_S02_10', 'SF_S02_01', 'SF_S02_06', 'SH_S02_01', 'SH_S02_04', 'OCT_S02_01', 'OCT_S02_04', 'SD_S03_01', 'SD_S03_02', 'SD_S03_03', 'SD_S03_04', 'SD_S03_07', 'SD_S03_08', 'SD_S03_09', 'SD_S03_10', 'SF_S03_01', 'SF_S03_06', 'SH_S03_01', 'SH_S03_04', 'OCT_S03_01', 'OCT_S03_04', 'SD_S04_01', 'SD_S04_02', 'SD_S04_03', 'SD_S04_04', 'SD_S04_07', 'SD_S04_08', 'SD_S04_09', 'SD_S04_10', 'SF_S04_01', 'SF_S04_06', 'SH_S04_01', 'SH_S04_04', 'OCT_S04_01', 'OCT_S04_04', 'SD_S05_01', 'SD_S05_02', 'SD_S05_03', 'SD_S05_04', 'SD_S05_07', 'SD_S05_08', 'SD_S05_09', 'SD_S05_10', 'SF_S05_01', 'SF_S05_06', 'SH_S05_01', 'SH_S05_04', 'OCT_S05_01', 'OCT_S05_04', 'SD_S06_01', 'SD_S06_02', 'SD_S06_03', 'SD_S06_04', 'SD_S06_07', 'SD_S06_08', 'SD_S06_09', 'SD_S06_10', 'SF_S06_01', 'SF_S06_06', 'SH_S06_01', 'SH_S06_04', 'OCT_S06_01', 'OCT_S06_04', 'SD_S07_01', 'SD_S07_02', 'SD_S07_03', 'SD_S07_04', 'SD_S07_07', 'SD_S07_08', 'SD_S07_09', 'SD_S07_10', 'SF_S07_01', 'SF_S07_06', 'SH_S07_01', 'SH_S07_04', 'OCT_S07_01', 'OCT_S07_04', 'SD_S08_01', 'SD_S08_02', 'SD_S08_03', 'SD_S08_04', 'SD_S08_07', 'SD_S08_08', 'SD_S08_09', 'SD_S08_10', 'SF_S08_01', 'SF_S08_06', 'SH_S08_01', 'SH_S08_04', 'OCT_S08_01', 'OCT_S08_04', 'SD_S09_01', 'SD_S09_02', 'SD_S09_03', 'SD_S09_04', 'SD_S09_07', 'SD_S09_08', 'SD_S09_09', 'SD_S09_10', 'SF_S09_01', 'SF_S09_06', 'SH_S09_01', 'SH_S09_04', 'OCT_S09_01', 'OCT_S09_04', 'SD_S10_01', 'SD_S10_02', 'SD_S10_03', 'SD_S10_04', 'SD_S10_07', 'SD_S10_08', 'SD_S10_09', 'SD_S10_10', 'SF_S10_01', 'SF_S10_06', 'SH_S10_01', 'SH_S10_04', 'OCT_S10_01', 'OCT_S10_04', 'SD_S11_01', 'SD_S11_02', 'SD_S11_03', 'SD_S11_04', 'SD_S11_07', 'SD_S11_08', 'SD_S11_09', 'SD_S11_10', 'SF_S11_01', 'SF_S11_06', 'SH_S11_01', 'SH_S11_04', 'OCT_S11_01', 'OCT_S11_04', 'SD_S12_01', 'SD_S12_02', 'SD_S12_03', 'SD_S12_04', 'SD_S12_07', 'SD_S12_08', 'SD_S12_09', 'SD_S12_10', 'SF_S12_01', 'SF_S12_06', 'SH_S12_01', 'SH_S12_04', 'OCT_S12_01', 'OCT_S12_04']
[6]:
# Build and setup lattice
ring:Line = build('RING', 'ELEGANT', data)
# Flatten sublines
ring.flatten()
# Remove all marker elements but the ones starting with MLL (long straight section centers)
ring.remove_group(pattern=r'^(?!MLL_).*', kinds=['Marker'])
# Replace all sextupoles with quadrupoles
def factory(element:Element) -> None:
table = element.serialize
table.pop('ms', None)
return Quadrupole(**table)
ring.replace_group(pattern=r'', factory=factory, kinds=['Sextupole'])
# Set linear dipoles
def apply(element:Element) -> None:
element.linear = True
ring.apply(apply, kinds=['Dipole'])
# Insert correctors
for name, *_ in ring.layout():
if name.startswith('CH'):
corrector = Corrector(f'{name}_CXY', factor=1)
ring.split((1 + 1, None, [name], None), paste=[corrector])
continue
if name in names:
corrector = Corrector(f'{name}_CXY', factor=1)
ring.split((1 + 1, None, [name], None), paste=[corrector])
# Merge drifts
ring.merge()
# Change lattice start start
ring.start = "BPM_S01_01"
# Split BPMs
ring.split((None, ['BPM'], None, None))
# Roll lattice
ring.roll(1)
# Splice
ring.splice()
# Describe
ring.describe
[6]:
{'BPM': 168,
'Drift': 732,
'Dipole': 156,
'Quadrupole': 360,
'Corrector': 192,
'Marker': 12}
[7]:
# Compute ORM (2*168 x 2*192)
orbit = torch.tensor(4*[0.0], dtype=dtype)
orm = ORM(ring, orbit, [], limit=1)
print(orm.shape)
torch.Size([336, 384])
[8]:
# Plot ORM (exclude skew blocks)
# RXX RXY
# RYX RYY
data = orm.clone()
data[data==0.0] = torch.nan
plt.figure(figsize=(10, 10))
img = plt.imshow(data.cpu().numpy(), cmap='magma', interpolation='nearest')
cax = plt.gcf().add_axes([plt.gca().get_position().x1 + 0.01, plt.gca().get_position().y0, 0.02, plt.gca().get_position().height])
plt.colorbar(img, cax=cax)
plt.show()
[9]:
# Compute tunes (fractional part)
nux, nuy = tune(ring, [], matched=True, limit=1)
[10]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx, etapx, etaqy, etapy = dispersion(ring, orbit, [], limit=1)
[11]:
# Compute twiss parameters
ax, bx, ay, by = twiss(ring, [], matched=True, advance=True, full=False).T
[12]:
# Compute phase advances
mux, muy = advance(ring, [], alignment=False, matched=True).T
[13]:
# Compute coupling
c = coupling(ring, [])
[14]:
# Several local knobs can be used to correct ID effects
# Normal quadrupole correctors
nkn = ['OCT_S01_02', 'QF_S01_02', 'QD_S01_02', 'QD_S01_03', 'QF_S01_03', 'OCT_S01_03']
# Skew quadrupole correctors
nks = ['SD_S01_05', 'SH_S01_02', 'SH_S01_03', 'SD_S01_06']
[15]:
# Define ORM observable
def observable_orm(kn, ks):
orm = ORM(ring, orbit, [kn, ks], ('kn', None, nkn, None), ('ks', None, nks, None), limit=1)
return orm
kn = torch.tensor(6*[0.0], dtype=dtype)
ks = torch.tensor(4*[0.0], dtype=dtype)
print(observable_orm(kn, ks))
print(torch.func.jacfwd(observable_orm, 0)(kn, ks))
print(torch.func.jacfwd(observable_orm, 1)(kn, ks))
tensor([[ 1.2608, 0.3460, -0.2254, ..., 0.0000, 0.0000, 0.0000],
[ 0.7149, 0.3956, 0.0488, ..., 0.0000, 0.0000, 0.0000],
[ 0.4009, 0.9655, 1.1580, ..., 0.0000, 0.0000, 0.0000],
...,
[ 0.0000, 0.0000, 0.0000, ..., 5.6044, 3.9261, 2.9078],
[ 0.0000, 0.0000, 0.0000, ..., 4.5002, 3.2682, 2.8291],
[ 0.0000, 0.0000, 0.0000, ..., 3.1543, 2.1224, 2.2974]],
dtype=torch.float64)
tensor([[[-3.8566e-01, -2.2381e+00, -7.5455e-01, -1.1784e-01, -2.5358e-01,
-2.2173e-02],
[ 5.0295e-02, 1.2518e-01, 8.2528e-03, -1.6907e-01, -4.2751e-01,
-5.0938e-02],
[ 1.7644e-01, 9.0914e-01, 2.8314e-01, -7.3071e-02, -2.0108e-01,
-2.6916e-02],
...,
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00]],
[[-2.5991e-01, -1.5344e+00, -5.2273e-01, -1.1730e-01, -2.7077e-01,
-2.8456e-02],
[ 3.3890e-02, 8.5809e-02, 5.7158e-03, -1.6831e-01, -4.5659e-01,
-6.5459e-02],
[ 1.1890e-01, 6.2329e-01, 1.9615e-01, -7.2744e-02, -2.1478e-01,
-3.4603e-02],
...,
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00]],
[[ 6.5381e-02, 2.3384e-01, 4.8616e-02, -1.9138e-01, -5.1419e-01,
-7.1631e-02],
[-8.5541e-03, -1.3152e-02, -5.4037e-04, -2.7465e-01, -8.6750e-01,
-1.6504e-01],
[-2.9931e-02, -9.5039e-02, -1.8249e-02, -1.1872e-01, -4.0817e-01,
-8.7285e-02],
...,
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00]],
...,
[[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
...,
[-2.1395e-01, -2.6650e-01, -1.3570e-01, 2.8356e+00, 6.1249e+00,
7.1096e+00],
[-2.2715e-01, -3.0406e-01, -1.8010e-01, 2.0538e+00, 4.5012e+00,
5.2789e+00],
[-4.4112e-01, -6.3972e-01, -4.3370e-01, 1.7591e+00, 4.0770e+00,
4.9635e+00]],
[[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
...,
[ 4.0765e-01, 1.7218e-01, 5.7125e-03, 2.7175e+00, 6.0054e+00,
7.0915e+00],
[ 4.3273e-01, 1.9570e-01, 7.5279e-03, 1.9683e+00, 4.4134e+00,
5.2655e+00],
[ 8.4024e-01, 4.1010e-01, 1.8029e-02, 1.6858e+00, 3.9976e+00,
4.9509e+00]],
[[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00],
...,
[ 9.2441e-01, 5.5557e-01, 1.3736e-01, 2.1668e+00, 4.9211e+00,
5.9266e+00],
[ 9.8132e-01, 6.3254e-01, 1.8221e-01, 1.5694e+00, 3.6165e+00,
4.4005e+00],
[ 1.9055e+00, 1.3279e+00, 4.3861e-01, 1.3442e+00, 3.2759e+00,
4.1376e+00]]], dtype=torch.float64)
tensor([[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
...,
[-0.3183, 0.4723, -0.6477, 1.1434],
[-0.3249, 0.5730, -0.4709, 0.8575],
[-0.6007, 1.2795, -0.4095, 0.8347]],
[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
...,
[-0.2661, 0.3261, -0.6698, 0.8431],
[-0.2710, 0.3957, -0.4870, 0.6323],
[-0.4993, 0.8835, -0.4234, 0.6155]],
[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
...,
[-0.2340, -0.0363, -1.1915, 0.2106],
[-0.2351, -0.0440, -0.8663, 0.1579],
[-0.4255, -0.0984, -0.7532, 0.1537]],
...,
[[ 0.0406, -0.3419, -0.6285, 0.5897],
[ 0.0189, 0.0126, -0.9466, 0.1015],
[-0.0019, 0.1344, -0.4206, -0.1470],
...,
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]],
[[-0.2335, 0.0505, -0.6094, 0.6009],
[-0.1472, -0.0019, -0.9178, 0.1034],
[-0.0155, -0.0199, -0.4078, -0.1497],
...,
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]],
[[-0.4527, 0.4102, -0.4928, 0.5141],
[-0.2792, -0.0151, -0.7422, 0.0885],
[-0.0257, -0.1612, -0.3298, -0.1281],
...,
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]]], dtype=torch.float64)
[16]:
# Construct full target observable vector and corresponding responce matrix
def observable(knobs):
kn, ks = torch.split(knobs, [6, 4])
orm = observable_orm(kn, ks)
return orm.flatten()
knobs = torch.tensor((6 + 4)*[0.0], dtype=dtype)
print((target := observable(knobs)).shape)
print((matrix := torch.func.jacfwd(observable)(knobs)).shape)
torch.Size([129024])
torch.Size([129024, 10])
[17]:
# To perform model free correction the response matrix should have full rank
print(torch.linalg.matrix_rank(matrix))
tensor(10)
[18]:
# Define ID model
# Note, only the flattened triangular part of the A and B matrices is passed
A = torch.tensor([[-0.03484222052711237, 1.0272120741819959E-7, -4.698931299341201E-9, 0.0015923185492594811],
[1.0272120579834892E-7, -0.046082787920135176, 0.0017792061173117564, 3.3551298301095784E-8],
[-4.6989312853101E-9, 0.0017792061173117072, 0.056853750760983084, -1.5929605363332683E-7],
[0.0015923185492594336, 3.3551298348653296E-8, -1.5929605261642905E-7, 0.08311631737263032]], dtype=dtype)
B = torch.tensor([[0.03649353186115209, 0.0015448347221877217, 0.00002719892025520868, -0.0033681183134964482],
[0.0015448347221877217, 0.13683886657005795, -0.0033198692682377406, 0.00006140578258682469],
[0.00002719892025520868, -0.0033198692682377406, -0.05260095308967722, 0.005019907688182885],
[-0.0033681183134964482, 0.00006140578258682469, 0.005019907688182885, -0.2531573249456863]], dtype=dtype)
ID = Matrix('ID',
length=0.0,
A=A[torch.triu(torch.ones_like(A, dtype=torch.bool))].tolist(),
B=B[torch.triu(torch.ones_like(B, dtype=torch.bool))].tolist())
[19]:
# Insert ID into the existing lattice
# This will replace the target marker
error = ring.clone()
error.flatten()
error.insert(ID, 'MLL_S01', position=0.0)
error.splice()
# Describe
error.describe
[19]:
{'BPM': 168,
'Drift': 732,
'Dipole': 156,
'Quadrupole': 360,
'Corrector': 192,
'Matrix': 1,
'Marker': 11}
[20]:
# Compute tunes (fractional part)
nux_id, nuy_id = tune(error, [], matched=True, limit=1)
[21]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_id, etapx_id, etaqy_id, etapy_id = dispersion(error, orbit, [], limit=1)
[22]:
# Compute twiss parameters
ax_id, bx_id, ay_id, by_id = twiss(error, [], matched=True, advance=True, full=False).T
[23]:
# Compute phase advances
mux_id, muy_id = advance(error, [], alignment=False, matched=True).T
[24]:
# Compute coupling
c_id = coupling(error, [])
[25]:
# Tune shifts
print((nux - nux_id))
print((nuy - nuy_id))
tensor(0.0260, dtype=torch.float64)
tensor(-0.0114, dtype=torch.float64)
[26]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
tensor(0., dtype=torch.float64)
tensor(0.0004, dtype=torch.float64)
[27]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[28]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
tensor(11.5994, dtype=torch.float64)
tensor(1.7916, dtype=torch.float64)
[29]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
tensor(8.7941, dtype=torch.float64)
tensor(1.7778, dtype=torch.float64)
[30]:
# Define parametric observable vector (emulate tune measurement)
def observable_orm(kn, ks):
orm = ORM(error, orbit, [kn, ks], ('kn', None, nkn, None), ('ks', None, nks, None), limit=1)
return orm
def observable(knobs):
kn, ks = torch.split(knobs, [6, 4])
orm = observable_orm(kn, ks)
return orm.flatten()
[31]:
# Check the residual vector norm
knobs = torch.tensor((6 + 4)*[0.0], dtype=dtype)
print(((observable(knobs) - target)**2).sum())
tensor(7992.7087, dtype=torch.float64)
[32]:
# Optimization loop (model free)
# Note, covariance matrix is not used here since the vector is too long
# An alternative would be to use fewer correctors and/or BPMs
# Iterative correction
lr = 0.75
# Initial value
knobs = torch.tensor((6 + 4)*[0.0], dtype=dtype)
for _ in range(16):
value = observable(knobs)
knobs -= lr*torch.linalg.lstsq(matrix, value - target, driver='gels').solution
print((value - target).norm())
print()
tensor(89.4020, dtype=torch.float64)
tensor(27.8160, dtype=torch.float64)
tensor(13.4407, dtype=torch.float64)
tensor(7.4012, dtype=torch.float64)
tensor(5.2105, dtype=torch.float64)
tensor(4.7305, dtype=torch.float64)
tensor(4.6589, dtype=torch.float64)
tensor(4.6503, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
tensor(4.6495, dtype=torch.float64)
[33]:
# Check the residual vector norm
print(((observable(knobs) - target)**2).sum())
tensor(21.6177, dtype=torch.float64)
[34]:
# Apply final corrections
kn, ks = torch.split(knobs, [6, 4])
result = error.clone()
result.flatten()
for name, knob in zip(nkn, kn):
result[name].kn = (result[name].kn + knob).item()
for name, knob in zip(nks, ks):
result[name].ks = (result[name].ks + knob).item()
result.splice()
[35]:
# Compute tunes (fractional part)
nux_result, nuy_result = tune(result, [], matched=True, limit=1)
[36]:
# Compute dispersion
orbit = torch.tensor(4*[0.0], dtype=dtype)
etaqx_result, etapx_result, etaqy_result, etapy_result = dispersion(result, orbit, [], limit=1)
[37]:
# Compute twiss parameters
ax_result, bx_result, ay_result, by_result = twiss(result, [], matched=True, advance=True, full=False).T
[38]:
# Compute phase advances
mux_result, muy_result = advance(result, [], alignment=False, matched=True).T
[39]:
# Compute coupling
c_result = coupling(result, [])
[40]:
# Tune shifts
print((nux - nux_id).abs())
print((nuy - nuy_id).abs())
print()
print((nux - nux_result).abs())
print((nuy - nuy_result).abs())
print()
tensor(0.0260, dtype=torch.float64)
tensor(0.0114, dtype=torch.float64)
tensor(0.0006, dtype=torch.float64)
tensor(2.8689e-05, dtype=torch.float64)
[41]:
# Coupling (minimal tune distance)
print(c)
print(c_id)
print(c_result)
tensor(0., dtype=torch.float64)
tensor(0.0004, dtype=torch.float64)
tensor(2.6796e-06, dtype=torch.float64)
[42]:
# Dispersion
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_id).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_id).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqx - etaqx_result).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), (etaqy - etaqy_result).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
[43]:
# Beta-beating
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_id)/bx).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_id)/by).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((bx - bx_result)/bx).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((by - by_result)/by).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((bx - bx_id)/bx)**2).mean().sqrt())
print(100*(((by - by_id)/by)**2).mean().sqrt())
print()
print(100*(((bx - bx_result)/bx)**2).mean().sqrt())
print(100*(((by - by_result)/by)**2).mean().sqrt())
print()
tensor(11.5994, dtype=torch.float64)
tensor(1.7916, dtype=torch.float64)
tensor(0.7689, dtype=torch.float64)
tensor(0.6475, dtype=torch.float64)
[44]:
# Phase advance
plt.figure(figsize=(12, 4))
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_id)/mux).cpu().numpy(), fmt='-', marker='x', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_id)/muy).cpu().numpy(), fmt='-', marker='x', color='red', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((mux - mux_result)/mux).cpu().numpy(), fmt='-', marker='o', color='blue', alpha=0.75)
plt.errorbar(ring.locations().cpu().numpy(), 100*((muy - muy_result)/muy).cpu().numpy(), fmt='-', marker='o', color='red', alpha=0.75)
plt.tight_layout()
plt.show()
print(100*(((mux - mux_id)/mux)**2).mean().sqrt())
print(100*(((muy - muy_id)/muy)**2).mean().sqrt())
print()
print(100*(((mux - mux_result)/mux)**2).mean().sqrt())
print(100*(((muy - muy_result)/muy)**2).mean().sqrt())
print()
tensor(8.7941, dtype=torch.float64)
tensor(1.7778, dtype=torch.float64)
tensor(0.5063, dtype=torch.float64)
tensor(0.4383, dtype=torch.float64)