{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "65b5b531-909f-400f-8372-c10634059519", "metadata": {}, "source": [ "# Example-27: Normalize" ] }, { "cell_type": "code", "execution_count": 1, "id": "1152f21e-c8b1-4ca0-b0d6-b0e5940280f3", "metadata": {}, "outputs": [], "source": [ "# In this example normalized objective construction is illustrated" ] }, { "cell_type": "code", "execution_count": 2, "id": "c245aef8-7651-4b96-a74e-8e7d20cc306a", "metadata": {}, "outputs": [], "source": [ "# Import\n", "\n", "import torch\n", "from torch.utils.data import TensorDataset\n", "from torch.utils.data import DataLoader\n", "torch.set_printoptions(linewidth=128)\n", "\n", "import matplotlib\n", "from matplotlib import pyplot as plt\n", "matplotlib.rcParams['text.usetex'] = True\n", "\n", "from twiss import twiss\n", "\n", "from ndmap.signature import chop\n", "from ndmap.evaluate import evaluate\n", "from ndmap.pfp import parametric_fixed_point\n", "\n", "from model.library.drift import Drift\n", "from model.library.quadrupole import Quadrupole\n", "from model.library.sextupole import Sextupole\n", "from model.library.dipole import Dipole\n", "from model.library.line import Line\n", "\n", "from model.command.wrapper import group\n", "from model.command.wrapper import forward\n", "from model.command.wrapper import inverse\n", "from model.command.wrapper import normalize\n", "from model.command.wrapper import Wrapper" ] }, { "cell_type": "code", "execution_count": 3, "id": "65647630-f63f-4a50-92f3-3ebbde2ad3d9", "metadata": {}, "outputs": [], "source": [ "# Define simple FODO based lattice using nested lines\n", "\n", "DR = Drift('DR', 0.25)\n", "BM = Dipole('BM', 3.50, torch.pi/4.0)\n", "\n", "QF_A = Quadrupole('QF_A', 0.5, +0.20)\n", "QD_A = Quadrupole('QD_A', 0.5, -0.19)\n", "QF_B = Quadrupole('QF_B', 0.5, +0.20)\n", "QD_B = Quadrupole('QD_B', 0.5, -0.19)\n", "QF_C = Quadrupole('QF_C', 0.5, +0.20)\n", "QD_C = Quadrupole('QD_C', 0.5, -0.19)\n", "QF_D = Quadrupole('QF_D', 0.5, +0.20)\n", "QD_D = Quadrupole('QD_D', 0.5, -0.19)\n", "\n", "SF_A = Sextupole('SF_A', 0.25, 0.00)\n", "SD_A = Sextupole('SD_A', 0.25, 0.00)\n", "SF_B = Sextupole('SF_B', 0.25, 0.00)\n", "SD_B = Sextupole('SD_B', 0.25, 0.00)\n", "SF_C = Sextupole('SF_C', 0.25, 0.00)\n", "SD_C = Sextupole('SD_C', 0.25, 0.00)\n", "SF_D = Sextupole('SF_D', 0.25, 0.00)\n", "SD_D = Sextupole('SD_D', 0.25, 0.00)\n", "\n", "FODO_A = Line('FODO_A', [QF_A, DR, SF_A, DR, BM, DR, SD_A, DR, QD_A, QD_A, DR, SD_A, DR, BM, DR, SF_A, DR, QF_A], propagate=True, dp=0.0, exact=False, output=False, matrix=False)\n", "FODO_B = Line('FODO_B', [QF_B, DR, SF_B, DR, BM, DR, SD_B, DR, QD_B, QD_B, DR, SD_B, DR, BM, DR, SF_B, DR, QF_B], propagate=True, dp=0.0, exact=False, output=False, matrix=False)\n", "FODO_C = Line('FODO_C', [QF_C, DR, SF_C, DR, BM, DR, SD_C, DR, QD_C, QD_C, DR, SD_C, DR, BM, DR, SF_C, DR, QF_C], propagate=True, dp=0.0, exact=False, output=False, matrix=False)\n", "FODO_D = Line('FODO_D', [QF_D, DR, SF_D, DR, BM, DR, SD_D, DR, QD_D, QD_D, DR, SD_D, DR, BM, DR, SF_D, DR, QF_D], propagate=True, dp=0.0, exact=False, output=False, matrix=False)\n", "\n", "RING = Line('RING', [FODO_A, FODO_B, FODO_C, FODO_D], propagate=True, dp=0.0, exact=False, output=False, matrix=False)" ] }, { "cell_type": "code", "execution_count": 4, "id": "279e09d2-a58f-4654-b168-1268dc1f674e", "metadata": {}, "outputs": [], "source": [ "# Set parametric mapping\n", "\n", "ring, *_ = group(RING, 'FODO_A', 'FODO_D', ('ms', ['Sextupole'], None, None), ('dp', None, None, None), root=True)" ] }, { "cell_type": "code", "execution_count": 5, "id": "c2716680-1ddb-4df8-a394-84f2c25f0c7c", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([ 0.0157, -0.0006, -0.0189, -0.0032], dtype=torch.float64)\n", "tensor([ 0.0157, -0.0006, -0.0189, -0.0032], dtype=torch.float64)\n" ] } ], "source": [ "# Construct normalized function\n", "\n", "fn = normalize(ring, [(None, None), (-10.0, 10.0), (-0.01, 0.01)])\n", "\n", "# Compare with original\n", "\n", "fp = torch.tensor([0.001, 0.0005, -0.010, 0.0025], dtype=torch.float64)\n", "ms = torch.tensor([1.0, -1.0, 0.5, 2.0, 4.0, -5.0, -1.0, 3.0], dtype=torch.float64)\n", "dp = torch.tensor([0.005], dtype=torch.float64)\n", "\n", "print(ring(fp, ms, dp))\n", "print(fn(*forward([fp, ms, dp], [(None, None), (-10.0, 10.0), (-0.01, 0.01)])))" ] }, { "cell_type": "code", "execution_count": 6, "id": "741bb7b8-7799-4422-93ec-5e19a697469f", "metadata": {}, "outputs": [], "source": [ "# Set deviation parameters\n", "\n", "fp = torch.tensor(4*[0.0], dtype=torch.float64)\n", "ms = torch.tensor(8*[0.0], dtype=torch.float64)\n", "dp = torch.tensor([0.0], dtype=torch.float64)" ] }, { "cell_type": "code", "execution_count": 7, "id": "c97a86a4-57ea-46ef-838c-725a23cdb794", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([-2.0649, -0.8260], dtype=torch.float64)\n" ] } ], "source": [ "# Define parametric chomaticity function\n", "\n", "# Compute parametric fixed point (first order dispersion)\n", "\n", "pfp, *_ = parametric_fixed_point((0, 1), fp, [ms, dp], ring)\n", "chop(pfp)\n", "\n", "# Define ring around parametric fixed point\n", "\n", "def mapping(state, ms, dp):\n", " return ring(state + evaluate(pfp, [ms, dp]), ms, dp) - evaluate(pfp, [ms, dp])\n", "\n", "# Define tunes\n", "\n", "def tune(ms, dp):\n", " matrix = torch.func.jacrev(mapping)(fp, ms, dp)\n", " tunes, *_ = twiss(matrix)\n", " return tunes\n", "\n", "# Define chromaticity\n", "\n", "def chromaticity(ms):\n", " return torch.func.jacrev(tune, 1)(ms, dp).squeeze()\n", "\n", "# Compute natural chromaticity\n", "\n", "print(chromaticity(ms))" ] }, { "cell_type": "code", "execution_count": 8, "id": "c254e2a2-41bc-406c-a266-5739b44ca829", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([ 0.7439, -1.2084, 0.7439, -1.2084, 0.7439, -1.2084, 0.7439, -1.2084], dtype=torch.float64)\n", "tensor([5.0000, 5.0000], dtype=torch.float64)\n" ] } ], "source": [ "# Chromaticity can be corrected in a single step\n", "\n", "# Compute starting values\n", "\n", "psix, psiy = chromaticity(ms)\n", "\n", "# Set target values\n", "\n", "psix_target = torch.tensor(5.0, dtype=torch.float64)\n", "psiy_target = torch.tensor(5.0, dtype=torch.float64)\n", "\n", "# Perform correction\n", "\n", "dpsix = psix - psix_target\n", "dpsiy = psiy - psiy_target\n", "\n", "solution = - torch.linalg.pinv((torch.func.jacrev(chromaticity)(ms)).squeeze()) @ torch.stack([dpsix, dpsiy])\n", "print(solution)\n", "\n", "# Test solution\n", "\n", "print(chromaticity(solution))" ] }, { "cell_type": "code", "execution_count": 9, "id": "6e1a5cdb-f6a9-4af3-aef8-768dbdaf7090", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor(5.6871e-15, dtype=torch.float64)\n", "tensor(1.1580e-14, dtype=torch.float64)\n", "tensor(9.1573, dtype=torch.float64) tensor(2.7830, dtype=torch.float64)\n", "tensor(8.9651, dtype=torch.float64) tensor(2.7280, dtype=torch.float64)\n", "tensor(8.7737, dtype=torch.float64) tensor(2.6732, dtype=torch.float64)\n", "tensor(8.5832, dtype=torch.float64) tensor(2.6184, dtype=torch.float64)\n", "tensor(8.3937, dtype=torch.float64) tensor(2.5637, dtype=torch.float64)\n", "tensor(8.2052, dtype=torch.float64) tensor(2.5090, dtype=torch.float64)\n", "tensor(8.0177, dtype=torch.float64) tensor(2.4545, dtype=torch.float64)\n", "tensor(7.8314, dtype=torch.float64) tensor(2.4000, dtype=torch.float64)\n", "tensor(7.6464, dtype=torch.float64) tensor(2.3456, dtype=torch.float64)\n", "tensor(7.4627, dtype=torch.float64) tensor(2.2914, dtype=torch.float64)\n", "tensor(7.2804, dtype=torch.float64) tensor(2.2373, dtype=torch.float64)\n", "tensor(7.0995, dtype=torch.float64) tensor(2.1833, dtype=torch.float64)\n", "tensor(6.9202, dtype=torch.float64) tensor(2.1294, dtype=torch.float64)\n", "tensor(6.7426, dtype=torch.float64) tensor(2.0758, dtype=torch.float64)\n", "tensor(6.5666, dtype=torch.float64) tensor(2.0222, dtype=torch.float64)\n", "tensor(6.3924, dtype=torch.float64) tensor(1.9689, dtype=torch.float64)\n", "tensor(6.2200, dtype=torch.float64) tensor(1.9158, dtype=torch.float64)\n", "tensor(6.0495, dtype=torch.float64) tensor(1.8628, dtype=torch.float64)\n", "tensor(5.8808, dtype=torch.float64) tensor(1.8101, dtype=torch.float64)\n", "tensor(5.7141, dtype=torch.float64) tensor(1.7577, dtype=torch.float64)\n", "tensor(5.5492, dtype=torch.float64) tensor(1.7055, dtype=torch.float64)\n", "tensor(5.3862, dtype=torch.float64) tensor(1.6536, dtype=torch.float64)\n", "tensor(5.2250, dtype=torch.float64) tensor(1.6019, dtype=torch.float64)\n", "tensor(5.0655, dtype=torch.float64) tensor(1.5506, dtype=torch.float64)\n", "tensor(4.9077, dtype=torch.float64) tensor(1.4996, dtype=torch.float64)\n", "tensor(4.7514, dtype=torch.float64) tensor(1.4489, dtype=torch.float64)\n", "tensor(4.5965, dtype=torch.float64) tensor(1.3986, dtype=torch.float64)\n", "tensor(4.4428, dtype=torch.float64) tensor(1.3486, dtype=torch.float64)\n", "tensor(4.2902, dtype=torch.float64) tensor(1.2990, dtype=torch.float64)\n", "tensor(4.1384, dtype=torch.float64) tensor(1.2498, dtype=torch.float64)\n", "tensor(3.9873, dtype=torch.float64) tensor(1.2009, dtype=torch.float64)\n", "tensor(3.8366, dtype=torch.float64) tensor(1.1523, dtype=torch.float64)\n", "tensor(3.6862, dtype=torch.float64) tensor(1.1041, dtype=torch.float64)\n", "tensor(3.5359, dtype=torch.float64) tensor(1.0563, dtype=torch.float64)\n", "tensor(3.3857, dtype=torch.float64) tensor(1.0088, dtype=torch.float64)\n", "tensor(3.2353, dtype=torch.float64) tensor(0.9616, dtype=torch.float64)\n", "tensor(3.0848, dtype=torch.float64) tensor(0.9148, dtype=torch.float64)\n", "tensor(2.9343, dtype=torch.float64) tensor(0.8682, dtype=torch.float64)\n", "tensor(2.7836, dtype=torch.float64) tensor(0.8220, dtype=torch.float64)\n", "tensor(2.6331, dtype=torch.float64) tensor(0.7759, dtype=torch.float64)\n", "tensor(2.4828, dtype=torch.float64) tensor(0.7301, dtype=torch.float64)\n", "tensor(2.3329, dtype=torch.float64) tensor(0.6845, dtype=torch.float64)\n", "tensor(2.1835, dtype=torch.float64) tensor(0.6390, dtype=torch.float64)\n", "tensor(2.0349, dtype=torch.float64) tensor(0.5936, dtype=torch.float64)\n", "tensor(1.8870, dtype=torch.float64) tensor(0.5482, dtype=torch.float64)\n", "tensor(1.7399, dtype=torch.float64) tensor(0.5028, dtype=torch.float64)\n", "tensor(1.5934, dtype=torch.float64) tensor(0.4573, dtype=torch.float64)\n", "tensor(1.4474, dtype=torch.float64) tensor(0.4116, dtype=torch.float64)\n", "tensor(1.3016, dtype=torch.float64) tensor(0.3657, dtype=torch.float64)\n", "tensor(1.1557, dtype=torch.float64) tensor(0.3195, dtype=torch.float64)\n", "tensor(1.0093, dtype=torch.float64) tensor(0.2729, dtype=torch.float64)\n", "tensor(0.8622, dtype=torch.float64) tensor(0.2261, dtype=torch.float64)\n", "tensor(0.7146, dtype=torch.float64) tensor(0.1791, dtype=torch.float64)\n", "tensor(0.5667, dtype=torch.float64) tensor(0.1320, dtype=torch.float64)\n", "tensor(0.4196, dtype=torch.float64) tensor(0.0852, dtype=torch.float64)\n", "tensor(0.2748, dtype=torch.float64) tensor(0.0393, dtype=torch.float64)\n", "tensor(0.1340, dtype=torch.float64) tensor(0.0068, dtype=torch.float64)\n", "tensor(0.0262, dtype=torch.float64) tensor(0.0432, dtype=torch.float64)\n", "tensor(0.1620, dtype=torch.float64) tensor(0.0727, dtype=torch.float64)\n", "tensor(0.2647, dtype=torch.float64) tensor(0.0948, dtype=torch.float64)\n", "tensor(0.3263, dtype=torch.float64) tensor(0.1111, dtype=torch.float64)\n", "tensor(0.3638, dtype=torch.float64) tensor(0.1224, dtype=torch.float64)\n", "tensor(0.3894, dtype=torch.float64) tensor(0.1289, dtype=torch.float64)\n", "tensor(0.4067, dtype=torch.float64) tensor(0.1301, dtype=torch.float64)\n", "tensor(0.4121, dtype=torch.float64) tensor(0.1261, dtype=torch.float64)\n", "tensor(0.4008, dtype=torch.float64) tensor(0.1169, dtype=torch.float64)\n", "tensor(0.3711, dtype=torch.float64) tensor(0.1032, dtype=torch.float64)\n", "tensor(0.3262, dtype=torch.float64) tensor(0.0861, dtype=torch.float64)\n", "tensor(0.2728, dtype=torch.float64) tensor(0.0667, dtype=torch.float64)\n", "tensor(0.2177, dtype=torch.float64) tensor(0.0458, dtype=torch.float64)\n", "tensor(0.1600, dtype=torch.float64) tensor(0.0233, dtype=torch.float64)\n", "tensor(0.0867, dtype=torch.float64) tensor(0.0035, dtype=torch.float64)\n", "tensor(0.0227, dtype=torch.float64) tensor(0.0224, dtype=torch.float64)\n", "tensor(0.0708, dtype=torch.float64) tensor(0.0378, dtype=torch.float64)\n", "tensor(0.1198, dtype=torch.float64) tensor(0.0465, dtype=torch.float64)\n", "tensor(0.1470, dtype=torch.float64) tensor(0.0496, dtype=torch.float64)\n", "tensor(0.1569, dtype=torch.float64) tensor(0.0479, dtype=torch.float64)\n", "tensor(0.1535, dtype=torch.float64) tensor(0.0422, dtype=torch.float64)\n", "tensor(0.1370, dtype=torch.float64) tensor(0.0329, dtype=torch.float64)\n", "tensor(0.1064, dtype=torch.float64) tensor(0.0205, dtype=torch.float64)\n", "tensor(0.0647, dtype=torch.float64) tensor(0.0052, dtype=torch.float64)\n", "tensor(0.0201, dtype=torch.float64) tensor(0.0155, dtype=torch.float64)\n", "tensor(0.0578, dtype=torch.float64) tensor(0.0268, dtype=torch.float64)\n", "tensor(0.0859, dtype=torch.float64) tensor(0.0327, dtype=torch.float64)\n", "tensor(0.1052, dtype=torch.float64) tensor(0.0344, dtype=torch.float64)\n", "tensor(0.1163, dtype=torch.float64) tensor(0.0319, dtype=torch.float64)\n", "tensor(0.1068, dtype=torch.float64) tensor(0.0257, dtype=torch.float64)\n", "tensor(0.0814, dtype=torch.float64) tensor(0.0165, dtype=torch.float64)\n", "tensor(0.0557, dtype=torch.float64) tensor(0.0028, dtype=torch.float64)\n", "tensor(0.0120, dtype=torch.float64) tensor(0.0180, dtype=torch.float64)\n", "tensor(0.0825, dtype=torch.float64) tensor(0.0273, dtype=torch.float64)\n", "tensor(0.1034, dtype=torch.float64) tensor(0.0305, dtype=torch.float64)\n", "tensor(0.0967, dtype=torch.float64) tensor(0.0315, dtype=torch.float64)\n", "tensor(0.1101, dtype=torch.float64) tensor(0.0296, dtype=torch.float64)\n", "tensor(0.1152, dtype=torch.float64) tensor(0.0230, dtype=torch.float64)\n", "tensor(0.0848, dtype=torch.float64) tensor(0.0143, dtype=torch.float64)\n", "tensor(0.0472, dtype=torch.float64) tensor(0.0074, dtype=torch.float64)\n", "tensor(0.0454, dtype=torch.float64) tensor(0.0103, dtype=torch.float64)\n", "tensor(0.0325, dtype=torch.float64) tensor(0.0179, dtype=torch.float64)\n", "tensor(0.0641, dtype=torch.float64) tensor(0.0188, dtype=torch.float64)\n", "tensor(0.0619, dtype=torch.float64) tensor(0.0155, dtype=torch.float64)\n", "tensor(0.0520, dtype=torch.float64) tensor(0.0098, dtype=torch.float64)\n", "tensor(0.0419, dtype=torch.float64) tensor(0.0017, dtype=torch.float64)\n", "tensor(0.0103, dtype=torch.float64) tensor(0.0091, dtype=torch.float64)\n", "tensor(0.0345, dtype=torch.float64) tensor(0.0123, dtype=torch.float64)\n", "tensor(0.0390, dtype=torch.float64) tensor(0.0117, dtype=torch.float64)\n", "tensor(0.0417, dtype=torch.float64) tensor(0.0071, dtype=torch.float64)\n", "tensor(0.0244, dtype=torch.float64) tensor(0.0044, dtype=torch.float64)\n", "tensor(0.0279, dtype=torch.float64) tensor(0.0079, dtype=torch.float64)\n", "tensor(0.0253, dtype=torch.float64) tensor(0.0103, dtype=torch.float64)\n", "tensor(0.0361, dtype=torch.float64) tensor(0.0074, dtype=torch.float64)\n", "tensor(0.0241, dtype=torch.float64) tensor(0.0023, dtype=torch.float64)\n", "tensor(0.0138, dtype=torch.float64) tensor(0.0086, dtype=torch.float64)\n", "tensor(0.0475, dtype=torch.float64) tensor(0.0110, dtype=torch.float64)\n", "tensor(0.0466, dtype=torch.float64) tensor(0.0118, dtype=torch.float64)\n", "tensor(0.0417, dtype=torch.float64) tensor(0.0105, dtype=torch.float64)\n", "tensor(0.0462, dtype=torch.float64) tensor(0.0033, dtype=torch.float64)\n", "tensor(0.0105, dtype=torch.float64) tensor(0.0076, dtype=torch.float64)\n", "tensor(0.0357, dtype=torch.float64) tensor(0.0115, dtype=torch.float64)\n", "tensor(0.0363, dtype=torch.float64) tensor(0.0128, dtype=torch.float64)\n", "tensor(0.0477, dtype=torch.float64) tensor(0.0097, dtype=torch.float64)\n", "tensor(0.0349, dtype=torch.float64) tensor(0.0050, dtype=torch.float64)\n", "tensor(0.0254, dtype=torch.float64) tensor(0.0042, dtype=torch.float64)\n", "tensor(0.0137, dtype=torch.float64) tensor(0.0082, dtype=torch.float64)\n", "tensor(0.0353, dtype=torch.float64) tensor(0.0063, dtype=torch.float64)\n", "tensor(0.0205, dtype=torch.float64) tensor(0.0052, dtype=torch.float64)\n", "tensor(0.0327, dtype=torch.float64) tensor(0.0035, dtype=torch.float64)\n", "tensor(0.0119, dtype=torch.float64) tensor(0.0082, dtype=torch.float64)\n" ] } ], "source": [ "# Optimization (wrapping objective funtion and normalization)\n", "\n", "# Set model parameters\n", "# Parameters are not cloned inside the module on initialization, values will change during optimization!\n", "\n", "ms = torch.tensor(8*[0.0], dtype=torch.float64)\n", "ms, *_ = forward([ms], [(-10, 10)])\n", "\n", "# Define scalar objective function\n", "\n", "def objective(ms):\n", " psix, psiy = chromaticity(ms)\n", " return ((psix - psix_target)**2 + (psiy - psiy_target)**2).sqrt()\n", "\n", "print(objective(solution))\n", "\n", "# Define normalized objective\n", "\n", "objective = normalize(objective, [(-10.0, 10.0)])\n", "\n", "print(objective(*forward([solution], [(-10, 10)])))\n", "\n", "\n", "# Set model (forward returns evaluated objective)\n", "\n", "model = Wrapper(objective, ms)\n", "\n", "# Set optimizer\n", "\n", "optimizer = torch.optim.Adam(model.parameters(), lr=1.0E-3)\n", "\n", "# Perfom optimization\n", "\n", "epochs = 128\n", "for epoch in range(epochs):\n", "\n", " # Evaluate model\n", " error = model()\n", " \n", " # Compute derivatives\n", " error.backward()\n", "\n", " # Perform optimization step\n", " optimizer.step()\n", "\n", " # Set gradient to zero\n", " optimizer.zero_grad()\n", "\n", " # Verbose\n", " knobs, *_ = [*model.parameters()]\n", " knobs, *_ = inverse([knobs], [(-10, 10)])\n", " print(error.detach(), (knobs.detach() - solution).norm())" ] }, { "cell_type": "code", "execution_count": 10, "id": "d01b07e7-488b-4ac7-8943-4bd94ffc0f77", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([ 0.7439, -1.2084, 0.7439, -1.2084, 0.7439, -1.2084, 0.7439, -1.2084], dtype=torch.float64)\n", "tensor([ 0.7412, -1.2115, 0.7412, -1.2115, 0.7412, -1.2115, 0.7412, -1.2115], dtype=torch.float64)\n" ] } ], "source": [ "# Compare\n", "\n", "print(solution)\n", "print(*inverse([ms], [(-10, 10)]))" ] } ], "metadata": { "colab": { "collapsed_sections": [ "myt0_gMIOq7b", "5d97819c" ], "name": "03_frequency.ipynb", "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.1" }, "latex_envs": { "LaTeX_envs_menu_present": true, "autoclose": false, "autocomplete": true, "bibliofile": "biblio.bib", "cite_by": "apalike", "current_citInitial": 1, "eqLabelWithNumbers": true, "eqNumInitial": 1, "hotkeys": { "equation": "Ctrl-E", "itemize": "Ctrl-I" }, "labels_anchors": false, "latex_user_defs": false, "report_style_numbering": false, "user_envs_cfg": false } }, "nbformat": 4, "nbformat_minor": 5 }