Example-53: Coupling (Minimal tune distance computation)
[1]:
# In this example minimal tune distance computation is illustrated
[2]:
# Import
from pprint import pprint
import torch
from torch import Tensor
from pathlib import Path
import matplotlib
from matplotlib import pyplot as plt
from model.library.line import Line
from model.library.gradient import Gradient
from model.command.external import load_sdds
from model.command.external import load_lattice
from model.command.build import build
from model.command.mapping import matrix
from model.command.tune import tune
from model.command.twiss import twiss
from model.command.coupling import coupling
[3]:
# Build and setup lattice
# Load ELEGANT table
path = Path('ic.lte')
data = load_lattice(path)
# Build ELEGANT table
ring:Line = build('RING', 'ELEGANT', data)
ring.flatten()
# Merge drifts
ring.merge()
# Set linear dipoles
for element in ring:
if element.__class__.__name__ == 'Dipole':
element.linear = True
# Add gradient element to the lattice end
G = Gradient('G')
ring.append(G)
# Set number of elements of different kinds
nb = ring.describe['BPM']
nq = ring.describe['Quadrupole']
ns = ring.describe['Sextupole']
[4]:
# Gradient element is equivalent to the following linear transformation
def gradient(state, kn, ks):
(qx, px, qy, py), kn, ks = state, kn, ks
return torch.stack([qx, px - kn*qx + ks*qy, qy, py + ks*qx + kn*qy])
state = torch.tensor([0.001, 0.0, 0.005, 0.0], dtype=torch.float64)
kn = torch.tensor(0.01, dtype=torch.float64)
ks = torch.tensor(0.01, dtype=torch.float64)
print(gradient(state, kn, ks))
print(torch.func.jacrev(gradient)(state, kn, ks))
print()
print(G(state, data={'kn': kn, 'ks': ks, 'dp': 0.0}))
print(torch.func.jacrev(lambda state: G(state, data={'kn': kn, 'ks': ks, 'dp': 0.0}))(state))
print()
tensor([1.0000e-03, 4.0000e-05, 5.0000e-03, 6.0000e-05], dtype=torch.float64)
tensor([[ 1.0000, 0.0000, 0.0000, 0.0000],
[-0.0100, 1.0000, 0.0100, 0.0000],
[ 0.0000, 0.0000, 1.0000, 0.0000],
[ 0.0100, 0.0000, 0.0100, 1.0000]], dtype=torch.float64)
tensor([1.0000e-03, 4.0000e-05, 5.0000e-03, 6.0000e-05], dtype=torch.float64)
tensor([[ 1.0000, 0.0000, 0.0000, 0.0000],
[-0.0100, 1.0000, 0.0100, 0.0000],
[ 0.0000, 0.0000, 1.0000, 0.0000],
[ 0.0100, 0.0000, 0.0100, 1.0000]], dtype=torch.float64)
[5]:
# Compute one-turn matrix without and with skew gradient error
state = torch.tensor(4*[0.0], dtype=torch.float64)
ks = torch.tensor([0.01], dtype=torch.float64)
fmt, *_ = matrix(ring, 0, len(ring) - 1, ('ks', None, ['G'], None), matched=False)
print(torch.func.jacrev(ring)(state))
print()
print(fmt(state, 0.0*ks))
print()
print(fmt(state, 1.0*ks))
print()
print(torch.func.jacrev(lambda state: G(state, data={'kn': 0.0, 'ks': 0.05, 'dp': 0.0}))(state) @ fmt(state, 0.0*ks))
print()
tensor([[-4.4769, -2.6375, 0.0000, 0.0000],
[ 8.9592, 5.0549, 0.0000, 0.0000],
[ 0.0000, 0.0000, 3.9054, -2.0383],
[ 0.0000, 0.0000, 5.5965, -2.6648]], dtype=torch.float64)
tensor([[-4.4769, -2.6375, 0.0000, 0.0000],
[ 8.9592, 5.0549, 0.0000, 0.0000],
[ 0.0000, 0.0000, 3.9054, -2.0383],
[ 0.0000, 0.0000, 5.5965, -2.6648]], dtype=torch.float64)
tensor([[-4.4769, -2.6375, 0.0000, 0.0000],
[ 8.9592, 5.0549, 0.0391, -0.0204],
[ 0.0000, 0.0000, 3.9054, -2.0383],
[-0.0448, -0.0264, 5.5965, -2.6648]], dtype=torch.float64)
tensor([[-4.4769, -2.6375, 0.0000, 0.0000],
[ 8.9592, 5.0549, 0.1953, -0.1019],
[ 0.0000, 0.0000, 3.9054, -2.0383],
[-0.2238, -0.1319, 5.5965, -2.6648]], dtype=torch.float64)
[6]:
# Compute anjd compare twiss parameters at lattice start
ks = torch.tensor([0.01], dtype=torch.float64)
nux_a, nuy_a = tune(ring, [0.0*ks], ('ks', None, ['G'], None), matched=False)
ax_a, bx_a, ay_a, by_a = twiss(ring, [0.0*ks], ('ks', None, ['G'], None), matched=False)
nux_b, nuy_b = tune(ring, [1.0*ks], ('ks', None, ['G'], None), matched=False)
ax_b, bx_b, ay_b, by_b = twiss(ring, [1.0*ks], ('ks', None, ['G'], None), matched=False)
print(nux_a - nux_b)
print(nuy_a - nuy_b)
print()
print(ax_a - ax_b)
print(bx_a - bx_b)
print(ay_a - ay_b)
print(by_a - by_b)
print()
tensor(6.7364e-05, dtype=torch.float64)
tensor(-8.2241e-05, dtype=torch.float64)
tensor(0.0071, dtype=torch.float64)
tensor(0.0037, dtype=torch.float64)
tensor(-0.0039, dtype=torch.float64)
tensor(0.0021, dtype=torch.float64)
[7]:
# dQmin (Edwards & Shyphers, first order in amplitude and unperturbed tune differenct)
(ks.squeeze()/(2.0*torch.pi)*(bx_a*by_a).sqrt()).abs()
[7]:
tensor(0.0043, dtype=torch.float64)
[8]:
# dQmin (first order in amplitude)
# Note, tunes in [0, 1/2] are assumed
mux = 2.0*torch.pi*(nux_a % 0.5)
muy = 2.0*torch.pi*(nuy_a % 0.5)
(ks.squeeze()/(torch.pi)*(bx_a*by_a).sqrt()*(mux.sin()*muy.sin()).abs().sqrt()/(mux.sin() + muy.sin())).abs()
[8]:
tensor(0.0042, dtype=torch.float64)
[9]:
# dQmin (TEAPOT manual, appendix G, 1996)
print(coupling(ring, [0.0*ks], ('ks', None, ['G'], None), matched=False))
print(coupling(ring, [1.0*ks], ('ks', None, ['G'], None), matched=False))
tensor(0., dtype=torch.float64)
tensor(0.0042, dtype=torch.float64)
[10]:
# Derivative
# Note, not defined for zero skew amplitudes
print(torch.func.jacrev(lambda ks: coupling(ring, [ks], ('ks', None, ['G'], None), matched=False))(ks))
tensor([0.4239], dtype=torch.float64)