veb-101 commited on
Commit
d50a6b4
·
1 Parent(s): 47fbff8

Model file update

Browse files
load_lightning_SD_to_Usual_SD.ipynb CHANGED
@@ -124,7 +124,7 @@
124
  {
125
  "data": {
126
  "text/html": [
127
- "Run data is saved locally in <code>c:\\Users\\vaibh\\OneDrive\\Desktop\\Work\\BigVision\\BLOG_POSTS\\Medical_segmentation\\GRADIO_APP\\UWMGI_Medical_Image_Segmentation\\wandb\\run-20230719_111329-pfqbikbe</code>"
128
  ],
129
  "text/plain": [
130
  "<IPython.core.display.HTML object>"
@@ -136,7 +136,7 @@
136
  {
137
  "data": {
138
  "text/html": [
139
- "Syncing run <strong><a href='https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation/runs/pfqbikbe' target=\"_blank\">generous-music-2</a></strong> to <a href='https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
140
  ],
141
  "text/plain": [
142
  "<IPython.core.display.HTML object>"
@@ -160,7 +160,7 @@
160
  {
161
  "data": {
162
  "text/html": [
163
- " View run at <a href='https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation/runs/pfqbikbe' target=\"_blank\">https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation/runs/pfqbikbe</a>"
164
  ],
165
  "text/plain": [
166
  "<IPython.core.display.HTML object>"
@@ -173,16 +173,17 @@
173
  "name": "stderr",
174
  "output_type": "stream",
175
  "text": [
176
- "\u001b[34m\u001b[1mwandb\u001b[0m: Downloading large artifact model-jsr2fn8v:v0, 977.89MB. 1 files... \n",
177
  "\u001b[34m\u001b[1mwandb\u001b[0m: 1 of 1 files downloaded. \n",
178
- "Done. 0:0:3.9\n"
179
  ]
180
  }
181
  ],
182
  "source": [
183
  "import wandb\n",
 
184
  "run = wandb.init()\n",
185
- "artifact = run.use_artifact(r'veb-101/UM_medical_segmentation/model-jsr2fn8v:v0', type='model')\n",
186
  "artifact_dir = artifact.download()"
187
  ]
188
  },
@@ -280,25 +281,32 @@
280
  "metadata": {},
281
  "outputs": [],
282
  "source": [
283
- "torch.save(model.state_dict(), \"Segformer_best_state_dict.ckpt\")"
284
  ]
285
  },
286
  {
287
  "cell_type": "code",
288
- "execution_count": null,
289
  "metadata": {},
290
  "outputs": [],
291
  "source": [
292
  "model.save_pretrained(\"segformer_trained_weights\")"
293
  ]
294
  },
 
 
 
 
 
 
 
295
  {
296
  "cell_type": "code",
297
  "execution_count": null,
298
  "metadata": {},
299
  "outputs": [],
300
  "source": [
301
- "model = get_model(model_path=os.path.join(os.getcwd(), \"segformer_trained_weights\"), num_classes=Configs.NUM_CLASSES)"
302
  ]
303
  }
304
  ],
 
124
  {
125
  "data": {
126
  "text/html": [
127
+ "Run data is saved locally in <code>c:\\Users\\vaibh\\OneDrive\\Desktop\\Work\\BigVision\\BLOG_POSTS\\Medical_segmentation\\GRADIO_APP\\UWMGI_Medical_Image_Segmentation\\wandb\\run-20230719_204221-w5qu5rqw</code>"
128
  ],
129
  "text/plain": [
130
  "<IPython.core.display.HTML object>"
 
136
  {
137
  "data": {
138
  "text/html": [
139
+ "Syncing run <strong><a href='https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation/runs/w5qu5rqw' target=\"_blank\">ethereal-bush-2</a></strong> to <a href='https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
140
  ],
141
  "text/plain": [
142
  "<IPython.core.display.HTML object>"
 
160
  {
161
  "data": {
162
  "text/html": [
163
+ " View run at <a href='https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation/runs/w5qu5rqw' target=\"_blank\">https://wandb.ai/veb-101/UWMGI_Medical_Image_Segmentation/runs/w5qu5rqw</a>"
164
  ],
165
  "text/plain": [
166
  "<IPython.core.display.HTML object>"
 
173
  "name": "stderr",
174
  "output_type": "stream",
175
  "text": [
176
+ "\u001b[34m\u001b[1mwandb\u001b[0m: Downloading large artifact model-fpgquxev:v0, 977.89MB. 1 files... \n",
177
  "\u001b[34m\u001b[1mwandb\u001b[0m: 1 of 1 files downloaded. \n",
178
+ "Done. 0:1:5.3\n"
179
  ]
180
  }
181
  ],
182
  "source": [
183
  "import wandb\n",
184
+ "\n",
185
  "run = wandb.init()\n",
186
+ "artifact = run.use_artifact(\"veb-101/UM_medical_segmentation/model-fpgquxev:v0\", type=\"model\")\n",
187
  "artifact_dir = artifact.download()"
188
  ]
189
  },
 
281
  "metadata": {},
282
  "outputs": [],
283
  "source": [
284
+ "# torch.save(model.state_dict(), \"Segformer_best_state_dict.ckpt\")"
285
  ]
286
  },
287
  {
288
  "cell_type": "code",
289
+ "execution_count": 10,
290
  "metadata": {},
291
  "outputs": [],
292
  "source": [
293
  "model.save_pretrained(\"segformer_trained_weights\")"
294
  ]
295
  },
296
+ {
297
+ "cell_type": "markdown",
298
+ "metadata": {},
299
+ "source": [
300
+ "To load the saved model, we simply need to pass the path to the directory \"segformer_trained_weights\"."
301
+ ]
302
+ },
303
  {
304
  "cell_type": "code",
305
  "execution_count": null,
306
  "metadata": {},
307
  "outputs": [],
308
  "source": [
309
+ "# model = get_model(model_path=os.path.join(os.getcwd(), \"segformer_trained_weights\"), num_classes=Configs.NUM_CLASSES)"
310
  ]
311
  }
312
  ],
segformer_trained_weights/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d4e3ff92a26341874655b112c2dc458cbb5ecf6d03f078dea8dd92ab0639e4a
3
  size 256300245
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423ff60b52bdbc5c0ea00f1a5648c42eccf2bdfbab550304bc95e28eb594cf0e
3
  size 256300245