ketanmore commited on
Commit
3626765
Β·
verified Β·
1 Parent(s): 0838c13

Upload layout-fine-tune.ipynb

Browse files
Files changed (1) hide show
  1. layout-fine-tune.ipynb +473 -0
layout-fine-tune.ipynb ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Loading Packages"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import os\n",
17
+ "os.environ['HF_HOME'] = '/data2/ketan/orc/HF_Cache'\n",
18
+ "import torch\n",
19
+ "import torch.nn as nn\n",
20
+ "import torch.optim as optim\n",
21
+ "from torch.utils.data import DataLoader\n",
22
+ "# from transformers import SegformerConfig\n",
23
+ "# from surya.model.detection.segformer import SegformerForRegressionMask\n",
24
+ "from surya.input.processing import prepare_image_detection\n",
25
+ "from surya.model.detection.segformer import load_processor , load_model\n",
26
+ "from datasets import load_dataset\n",
27
+ "from tqdm import tqdm\n",
28
+ "from torch.utils.tensorboard import SummaryWriter\n",
29
+ "import torch.nn.functional as F\n",
30
+ "import numpy as np \n",
31
+ "from surya.layout import parallel_get_regions\n",
32
+ "import torch.nn.functional as F"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "markdown",
37
+ "metadata": {},
38
+ "source": [
39
+ "# Initializing The Dataset And Model"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": 2,
45
+ "metadata": {},
46
+ "outputs": [],
47
+ "source": [
48
+ "device = torch.device(\"cuda:3\" if torch.cuda.is_available() else \"cpu\")\n",
49
+ "dataset = load_dataset(\"vikp/publaynet_bench\", split=\"train[:100]\") # You can choose you own dataset"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": 3,
55
+ "metadata": {},
56
+ "outputs": [
57
+ {
58
+ "name": "stdout",
59
+ "output_type": "stream",
60
+ "text": [
61
+ "Loaded detection model vikp/surya_layout2 on device cuda with dtype torch.float16\n"
62
+ ]
63
+ },
64
+ {
65
+ "data": {
66
+ "text/plain": [
67
+ "SegformerForRegressionMask(\n",
68
+ " (segformer): SegformerModel(\n",
69
+ " (encoder): SegformerEncoder(\n",
70
+ " (patch_embeddings): ModuleList(\n",
71
+ " (0): SegformerOverlapPatchEmbeddings(\n",
72
+ " (proj): Conv2d(3, 64, kernel_size=(7, 7), stride=(4, 4), padding=(3, 3))\n",
73
+ " (layer_norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
74
+ " )\n",
75
+ " (1): SegformerOverlapPatchEmbeddings(\n",
76
+ " (proj): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n",
77
+ " (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
78
+ " )\n",
79
+ " (2): SegformerOverlapPatchEmbeddings(\n",
80
+ " (proj): Conv2d(128, 320, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n",
81
+ " (layer_norm): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
82
+ " )\n",
83
+ " (3): SegformerOverlapPatchEmbeddings(\n",
84
+ " (proj): Conv2d(320, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n",
85
+ " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
86
+ " )\n",
87
+ " )\n",
88
+ " (block): ModuleList(\n",
89
+ " (0): ModuleList(\n",
90
+ " (0-2): 3 x SegformerLayer(\n",
91
+ " (layer_norm_1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
92
+ " (attention): SegformerAttention(\n",
93
+ " (self): SegformerEfficientSelfAttention(\n",
94
+ " (query): Linear(in_features=64, out_features=64, bias=True)\n",
95
+ " (key): Linear(in_features=64, out_features=64, bias=True)\n",
96
+ " (value): Linear(in_features=64, out_features=64, bias=True)\n",
97
+ " (sr): Conv2d(64, 64, kernel_size=(8, 8), stride=(8, 8))\n",
98
+ " (layer_norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
99
+ " )\n",
100
+ " (output): SegformerSelfOutput(\n",
101
+ " (dense): Linear(in_features=64, out_features=64, bias=True)\n",
102
+ " )\n",
103
+ " )\n",
104
+ " (layer_norm_2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
105
+ " (mlp): SegformerMixFFN(\n",
106
+ " (dense1): Linear(in_features=64, out_features=256, bias=True)\n",
107
+ " (dwconv): SegformerDWConv(\n",
108
+ " (dwconv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)\n",
109
+ " )\n",
110
+ " (intermediate_act_fn): GELUActivation()\n",
111
+ " (dense2): Linear(in_features=256, out_features=64, bias=True)\n",
112
+ " )\n",
113
+ " )\n",
114
+ " )\n",
115
+ " (1): ModuleList(\n",
116
+ " (0-3): 4 x SegformerLayer(\n",
117
+ " (layer_norm_1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
118
+ " (attention): SegformerAttention(\n",
119
+ " (self): SegformerEfficientSelfAttention(\n",
120
+ " (query): Linear(in_features=128, out_features=128, bias=True)\n",
121
+ " (key): Linear(in_features=128, out_features=128, bias=True)\n",
122
+ " (value): Linear(in_features=128, out_features=128, bias=True)\n",
123
+ " (sr): Conv2d(128, 128, kernel_size=(4, 4), stride=(4, 4))\n",
124
+ " (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
125
+ " )\n",
126
+ " (output): SegformerSelfOutput(\n",
127
+ " (dense): Linear(in_features=128, out_features=128, bias=True)\n",
128
+ " )\n",
129
+ " )\n",
130
+ " (layer_norm_2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
131
+ " (mlp): SegformerMixFFN(\n",
132
+ " (dense1): Linear(in_features=128, out_features=512, bias=True)\n",
133
+ " (dwconv): SegformerDWConv(\n",
134
+ " (dwconv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)\n",
135
+ " )\n",
136
+ " (intermediate_act_fn): GELUActivation()\n",
137
+ " (dense2): Linear(in_features=512, out_features=128, bias=True)\n",
138
+ " )\n",
139
+ " )\n",
140
+ " )\n",
141
+ " (2): ModuleList(\n",
142
+ " (0-8): 9 x SegformerLayer(\n",
143
+ " (layer_norm_1): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
144
+ " (attention): SegformerAttention(\n",
145
+ " (self): SegformerEfficientSelfAttention(\n",
146
+ " (query): Linear(in_features=320, out_features=320, bias=True)\n",
147
+ " (key): Linear(in_features=320, out_features=320, bias=True)\n",
148
+ " (value): Linear(in_features=320, out_features=320, bias=True)\n",
149
+ " (sr): Conv2d(320, 320, kernel_size=(2, 2), stride=(2, 2))\n",
150
+ " (layer_norm): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
151
+ " )\n",
152
+ " (output): SegformerSelfOutput(\n",
153
+ " (dense): Linear(in_features=320, out_features=320, bias=True)\n",
154
+ " )\n",
155
+ " )\n",
156
+ " (layer_norm_2): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
157
+ " (mlp): SegformerMixFFN(\n",
158
+ " (dense1): Linear(in_features=320, out_features=1280, bias=True)\n",
159
+ " (dwconv): SegformerDWConv(\n",
160
+ " (dwconv): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1280)\n",
161
+ " )\n",
162
+ " (intermediate_act_fn): GELUActivation()\n",
163
+ " (dense2): Linear(in_features=1280, out_features=320, bias=True)\n",
164
+ " )\n",
165
+ " )\n",
166
+ " )\n",
167
+ " (3): ModuleList(\n",
168
+ " (0-2): 3 x SegformerLayer(\n",
169
+ " (layer_norm_1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
170
+ " (attention): SegformerAttention(\n",
171
+ " (self): SegformerEfficientSelfAttention(\n",
172
+ " (query): Linear(in_features=512, out_features=512, bias=True)\n",
173
+ " (key): Linear(in_features=512, out_features=512, bias=True)\n",
174
+ " (value): Linear(in_features=512, out_features=512, bias=True)\n",
175
+ " )\n",
176
+ " (output): SegformerSelfOutput(\n",
177
+ " (dense): Linear(in_features=512, out_features=512, bias=True)\n",
178
+ " )\n",
179
+ " )\n",
180
+ " (layer_norm_2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
181
+ " (mlp): SegformerMixFFN(\n",
182
+ " (dense1): Linear(in_features=512, out_features=2048, bias=True)\n",
183
+ " (dwconv): SegformerDWConv(\n",
184
+ " (dwconv): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)\n",
185
+ " )\n",
186
+ " (intermediate_act_fn): GELUActivation()\n",
187
+ " (dense2): Linear(in_features=2048, out_features=512, bias=True)\n",
188
+ " )\n",
189
+ " )\n",
190
+ " )\n",
191
+ " )\n",
192
+ " (layer_norm): ModuleList(\n",
193
+ " (0): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
194
+ " (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
195
+ " (2): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
196
+ " (3): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
197
+ " )\n",
198
+ " )\n",
199
+ " )\n",
200
+ " (decode_head): SegformerForMaskDecodeHead(\n",
201
+ " (linear_c): ModuleList(\n",
202
+ " (0): SegformerForMaskMLP(\n",
203
+ " (proj): Linear(in_features=64, out_features=192, bias=True)\n",
204
+ " )\n",
205
+ " (1): SegformerForMaskMLP(\n",
206
+ " (proj): Linear(in_features=128, out_features=192, bias=True)\n",
207
+ " )\n",
208
+ " (2): SegformerForMaskMLP(\n",
209
+ " (proj): Linear(in_features=320, out_features=192, bias=True)\n",
210
+ " )\n",
211
+ " (3): SegformerForMaskMLP(\n",
212
+ " (proj): Linear(in_features=512, out_features=192, bias=True)\n",
213
+ " )\n",
214
+ " )\n",
215
+ " (linear_fuse): Conv2d(768, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
216
+ " (batch_norm): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
217
+ " (activation): ReLU()\n",
218
+ " (dropout): Dropout(p=0.1, inplace=False)\n",
219
+ " (classifier): Conv2d(768, 12, kernel_size=(1, 1), stride=(1, 1))\n",
220
+ " )\n",
221
+ ")"
222
+ ]
223
+ },
224
+ "execution_count": 3,
225
+ "metadata": {},
226
+ "output_type": "execute_result"
227
+ }
228
+ ],
229
+ "source": [
230
+ "model = load_model(\"vikp/surya_layout2\").to(device)\n",
231
+ "model.to(torch.float32)"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": 4,
237
+ "metadata": {},
238
+ "outputs": [],
239
+ "source": [
240
+ "def initialize_weights(model):\n",
241
+ " for module in model.modules():\n",
242
+ " if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):\n",
243
+ " torch.nn.init.xavier_uniform_(module.weight)\n",
244
+ " if module.bias is not None:\n",
245
+ " torch.nn.init.zeros_(module.bias)\n",
246
+ "\n",
247
+ "initialize_weights(model)\n"
248
+ ]
249
+ },
250
+ {
251
+ "cell_type": "markdown",
252
+ "metadata": {},
253
+ "source": [
254
+ "# Helper Functions, Loss Function And Optimizer"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": 5,
260
+ "metadata": {},
261
+ "outputs": [],
262
+ "source": [
263
+ "optimizer = optim.Adam(model.parameters(), lr=1e-4)\n",
264
+ "log_dir = \"logs\"\n",
265
+ "checkpoint_dir = \"checkpoints\"\n",
266
+ "os.makedirs(log_dir, exist_ok=True)\n",
267
+ "os.makedirs(checkpoint_dir, exist_ok=True)\n",
268
+ "writer = SummaryWriter(log_dir=log_dir)\n",
269
+ "\n",
270
+ "\n"
271
+ ]
272
+ },
273
+ {
274
+ "cell_type": "code",
275
+ "execution_count": 6,
276
+ "metadata": {},
277
+ "outputs": [],
278
+ "source": [
279
+ "def logits_to_mask(logits, labels, bboxes, original_size=(1200, 1200)):\n",
280
+ " batch_size, num_classes, height, width = logits.shape\n",
281
+ " mask = torch.zeros((batch_size, num_classes, height, width), dtype=torch.float32).to(logits.device)\n",
282
+ "\n",
283
+ " for bbox, class_id in zip(bboxes, labels):\n",
284
+ " x_min, y_min, x_max, y_max = bbox\n",
285
+ "\n",
286
+ " x_min = int(x_min * width / original_size[0])\n",
287
+ " y_min = int(y_min * height / original_size[1])\n",
288
+ " x_max = int(x_max * width / original_size[0])\n",
289
+ " y_max = int(y_max * height / original_size[1])\n",
290
+ "\n",
291
+ " x_min = max(0, min(x_min, width - 1))\n",
292
+ " y_min = max(0, min(y_min, height - 1))\n",
293
+ " x_max = max(0, min(x_max, width - 1))\n",
294
+ " y_max = max(0, min(y_max, height - 1))\n",
295
+ "\n",
296
+ " if x_min < x_max and y_min < y_max:\n",
297
+ " mask[:, class_id, y_min:y_max, x_min:x_max] = torch.maximum(\n",
298
+ " mask[:, class_id, y_min:y_max, x_min:x_max], torch.tensor(1.0).to(logits.device)\n",
299
+ " )\n",
300
+ " else:\n",
301
+ " print(f\"Invalid bounding box after adjustment: {bbox}, adjusted to: {(x_min, y_min, x_max, y_max)}\")\n",
302
+ "\n",
303
+ " return mask\n",
304
+ "\n",
305
+ "\n",
306
+ "def loss_function(logits, mask):\n",
307
+ " loss_fn = torch.nn.MSELoss() \n",
308
+ " loss = loss_fn(logits, mask)\n",
309
+ " return loss"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "markdown",
314
+ "metadata": {},
315
+ "source": [
316
+ "# Fine-Tuning Process"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "code",
321
+ "execution_count": 7,
322
+ "metadata": {},
323
+ "outputs": [
324
+ {
325
+ "name": "stderr",
326
+ "output_type": "stream",
327
+ "text": [
328
+ "Epoch 1/5: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 100/100 [01:30<00:00, 1.11it/s]\n"
329
+ ]
330
+ },
331
+ {
332
+ "name": "stdout",
333
+ "output_type": "stream",
334
+ "text": [
335
+ "Average Loss for Epoch 1: 0.0533\n"
336
+ ]
337
+ },
338
+ {
339
+ "name": "stderr",
340
+ "output_type": "stream",
341
+ "text": [
342
+ "Epoch 2/5: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 100/100 [01:30<00:00, 1.11it/s]\n"
343
+ ]
344
+ },
345
+ {
346
+ "name": "stdout",
347
+ "output_type": "stream",
348
+ "text": [
349
+ "Average Loss for Epoch 2: 0.0189\n"
350
+ ]
351
+ },
352
+ {
353
+ "name": "stderr",
354
+ "output_type": "stream",
355
+ "text": [
356
+ "Epoch 3/5: 35%|β–ˆβ–ˆβ–ˆβ–Œ | 35/100 [00:31<00:58, 1.12it/s]"
357
+ ]
358
+ }
359
+ ],
360
+ "source": [
361
+ "num_epochs = 5\n",
362
+ "\n",
363
+ "for param in model.parameters():\n",
364
+ " param.requires_grad = True\n",
365
+ "\n",
366
+ "\n",
367
+ "model.train()\n",
368
+ "with torch.autograd.set_detect_anomaly(True):\n",
369
+ "\n",
370
+ " for epoch in range(num_epochs):\n",
371
+ " running_loss = 0.0\n",
372
+ " avg_loss = 0.0\n",
373
+ "\n",
374
+ " for idx, item in enumerate(tqdm(dataset, desc=f\"Epoch {epoch + 1}/{num_epochs}\")):\n",
375
+ " images = [prepare_image_detection(img=item['image'], processor=load_processor())]\n",
376
+ " images = torch.stack(images, dim=0).to(model.dtype).to(model.device)\n",
377
+ " \n",
378
+ " optimizer.zero_grad()\n",
379
+ " outputs = model(pixel_values=images)\n",
380
+ "\n",
381
+ "\n",
382
+ " logits = outputs.logits\n",
383
+ "\n",
384
+ " bboxes = item['bboxes']\n",
385
+ " labels = item['category_ids']\n",
386
+ " logits = torch.clamp(logits, min=-1e6, max=1e6)\n",
387
+ " mask = logits_to_mask(logits, labels, bboxes)\n",
388
+ "\n",
389
+ " logits = logits.to(torch.float32)\n",
390
+ " mask = mask.to(torch.float32)\n",
391
+ " loss = loss_function(logits, mask)\n",
392
+ "\n",
393
+ " loss.backward()\n",
394
+ "\n",
395
+ " for name, param in model.named_parameters():\n",
396
+ " if torch.isnan(param.grad).any():\n",
397
+ " print(f\"NaN detected in gradients of {name}\")\n",
398
+ " break\n",
399
+ "\n",
400
+ " torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n",
401
+ " optimizer.step()\n",
402
+ "\n",
403
+ " avg_loss = 0.9 * avg_loss + 0.1 * loss.item() if idx > 0 else loss.item()\n",
404
+ "\n",
405
+ " writer.add_scalar('Training Loss', avg_loss, epoch + 1)\n",
406
+ " print(f\"Average Loss for Epoch {epoch + 1}: {avg_loss:.4f}\")\n",
407
+ "\n",
408
+ " torch.save(model.state_dict(), os.path.join(checkpoint_dir, f\"model_epoch_{epoch + 1}.pth\"))\n"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "markdown",
413
+ "metadata": {},
414
+ "source": [
415
+ "# Loading The Checkpoint "
416
+ ]
417
+ },
418
+ {
419
+ "cell_type": "code",
420
+ "execution_count": null,
421
+ "metadata": {},
422
+ "outputs": [
423
+ {
424
+ "data": {
425
+ "text/plain": [
426
+ "<All keys matched successfully>"
427
+ ]
428
+ },
429
+ "execution_count": 8,
430
+ "metadata": {},
431
+ "output_type": "execute_result"
432
+ }
433
+ ],
434
+ "source": [
435
+ "checkpoint_path = '/data2/ketan/orc/surya-layout-fine-tune/checkpoints/model_epoch_5.pth' \n",
436
+ "state_dict = torch.load(checkpoint_path,weights_only=True)\n",
437
+ "\n",
438
+ "model.load_state_dict(state_dict)"
439
+ ]
440
+ },
441
+ {
442
+ "cell_type": "code",
443
+ "execution_count": null,
444
+ "metadata": {},
445
+ "outputs": [],
446
+ "source": [
447
+ "model.to('cpu')\n",
448
+ "model.save_pretrained(\"fine-tuned-surya-model-layout\")"
449
+ ]
450
+ }
451
+ ],
452
+ "metadata": {
453
+ "kernelspec": {
454
+ "display_name": "Python 3",
455
+ "language": "python",
456
+ "name": "python3"
457
+ },
458
+ "language_info": {
459
+ "codemirror_mode": {
460
+ "name": "ipython",
461
+ "version": 3
462
+ },
463
+ "file_extension": ".py",
464
+ "mimetype": "text/x-python",
465
+ "name": "python",
466
+ "nbconvert_exporter": "python",
467
+ "pygments_lexer": "ipython3",
468
+ "version": "3.10.14"
469
+ }
470
+ },
471
+ "nbformat": 4,
472
+ "nbformat_minor": 2
473
+ }