{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1e99de7a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--2024-06-20 13:18:56-- https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt\n", "Resolving docs-assets.developer.apple.com (docs-assets.developer.apple.com)... 17.253.73.203, 17.253.73.201\n", "Connecting to docs-assets.developer.apple.com (docs-assets.developer.apple.com)|17.253.73.203|:443... connected.\n", "HTTP request sent, awaiting response... 416 Requested Range Not Satisfiable\n", "\n", " The file is already fully retrieved; nothing to do.\n", "\n", "--2024-06-20 13:18:58-- https://raw.githubusercontent.com/apple/ml-mobileclip/main/mobileclip/configs/mobileclip_s0.json\n", "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n", "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n", "HTTP request sent, awaiting response... 416 Range Not Satisfiable\n", "\n", " The file is already fully retrieved; nothing to do.\n", "\n" ] } ], "source": [ "#!git clone https://huggingface.co/spaces/depth-anything/Depth-Anything-V2\n", "#!pip install -r Depth-Anything-V2/requirements.txt\n", "#!pip install -q --upgrade coremltools" ] }, { "cell_type": "code", "execution_count": 1, "id": "d6cb8a61", "metadata": {}, "outputs": [], "source": [ "import os\n", "os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'" ] }, { "cell_type": "code", "execution_count": 2, "id": "801db364", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "scikit-learn version 1.6.0 is not supported. Minimum required version: 0.17. Maximum required version: 1.5.1. Disabling scikit-learn conversion API.\n" ] } ], "source": [ "import torch\n", "import coremltools as ct\n", "import numpy as np\n", "from PIL import Image\n", "import tempfile\n", "from huggingface_hub import hf_hub_download\n", "import sys\n", "sys.path.append('./Depth-Anything-V2')\n", "\n" ] }, { "cell_type": "code", "execution_count": 15, "id": "73882c02", "metadata": {}, "outputs": [], "source": [ "from depth_anything_v2.dpt import DepthAnythingV2\n", "from depth_anything_v2.util.transform import Resize, NormalizeImage, PrepareForNet\n", "\n", "import torch.nn.functional as F" ] }, { "cell_type": "markdown", "id": "26f7dcff", "metadata": {}, "source": [ "# 1. Load Depth-Anything-V2's vitl checkpoint" ] }, { "cell_type": "code", "execution_count": 4, "id": "e67aa722", "metadata": {}, "outputs": [], "source": [ "DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'\n", "model_configs = {\n", " 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},\n", " 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},\n", " 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},\n", " 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}\n", "}\n", "encoder2name = {\n", " 'vits': 'Small',\n", " 'vitb': 'Base',\n", " 'vitl': 'Large',\n", " 'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint\n", "}\n", "encoder = 'vitl'\n", "model_name = encoder2name[encoder]\n", "model = DepthAnythingV2(**model_configs[encoder])\n", "filepath = hf_hub_download(repo_id=f\"depth-anything/Depth-Anything-V2-{model_name}\", filename=f\"depth_anything_v2_{encoder}.pth\", repo_type=\"model\")\n", "state_dict = torch.load(filepath, map_location=\"cpu\")\n", "model.load_state_dict(state_dict)\n", "model = model.to(DEVICE).eval()" ] }, { "cell_type": "code", "execution_count": 8, "id": "a632e6b4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(3024, 4032, 3)\n" ] } ], "source": [ "image = Image.open(\"./sample_images/IMG_4061.jpeg\")\n", "img = np.array(image)\n", "print(img.shape)\n", "h, w = img.shape[:2]\n", "depth = model.infer_image(img)\n", "depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0\n", "depth = depth.astype(np.uint8)\n", "depth_image = Image.fromarray(depth)\n", "depth_image.save(\"depth_image.jpg\")" ] }, { "cell_type": "code", "execution_count": 36, "id": "77477217", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(3024, 4032, 3)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2_layers/patch_embed.py:73: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " assert H % patch_H == 0, f\"Input image height {H} is not a multiple of patch height {patch_H}\"\n", "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2_layers/patch_embed.py:74: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " assert W % patch_W == 0, f\"Input image width {W} is not a multiple of patch width: {patch_W}\"\n", "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2.py:183: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " if npatch == N and w == h:\n", "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dpt.py:147: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode=\"bilinear\", align_corners=True)\n" ] } ], "source": [ "original_image = Image.open(\"./sample_images/IMG_4061.jpeg\")\n", "origina_img = np.array(original_image)\n", "print(origina_img.shape)\n", "original_h, original_w = origina_img.shape[:2]\n", "input_size = 518\n", "image = original_image.resize((input_size,input_size), Image.Resampling.BILINEAR)\n", "img = np.array(image)\n", "input_image, (h, w) = model.image2tensor(img, input_size)\n", "input_image = input_image.to(DEVICE)\n", "with torch.no_grad():\n", " depth = model(input_image)\n", " depth = F.interpolate(depth[:, None], (h, w), mode=\"bilinear\", align_corners=True)[0, 0]\n", " depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0\n", " depth = depth.cpu().numpy().astype(np.uint8)\n", "depth_image = Image.fromarray(depth).resize((original_w,original_h), Image.Resampling.BILINEAR)\n", "depth_image.save(\"depth_image_2.jpg\")\n", "\n", "traced_model = torch.jit.trace(model, input_image)\n" ] }, { "cell_type": "code", "execution_count": 37, "id": "42632870", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Traced PyTorch ImageEncoder ckpt out for jpg:\n", ">>> tensor([[3.8735, 3.9076, 4.0226, ..., 1.8554, 1.7260, 2.5633],\n", " [4.3636, 4.1100, 4.1624, ..., 2.1774, 2.2929, 2.2913],\n", " [4.3914, 4.2280, 4.2901, ..., 2.3076, 2.3133, 2.2698],\n", " ...,\n", " [5.8771, 5.8192, 5.8249, ..., 3.9578, 3.9079, 3.7710],\n", " [6.1631, 6.1475, 6.1688, ..., 4.2481, 4.2320, 4.0410],\n", " [6.4769, 6.4864, 6.4850, ..., 4.6766, 4.6218, 4.4442]],\n", " device='mps:0', grad_fn=)\n" ] } ], "source": [ "example_output = traced_model(input_image)\n", "print(\"Traced PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", example_output[0, :10])" ] }, { "cell_type": "markdown", "id": "3c0d9c70", "metadata": {}, "source": [ "You can see that there is some loss in precision, but it is still acceptable." ] }, { "cell_type": "markdown", "id": "ca182b4a", "metadata": {}, "source": [ "# 2. Export ImageEncoder" ] }, { "cell_type": "code", "execution_count": 38, "id": "ef7af5c5", "metadata": {}, "outputs": [], "source": [ "image_means = [0.485, 0.456, 0.406]\n", "image_stds = [0.229, 0.224, 0.225]" ] }, { "cell_type": "code", "execution_count": 73, "id": "8f66a99c", "metadata": {}, "outputs": [], "source": [ "import torchvision.transforms as transforms\n", "\n", "class Wrapper(torch.nn.Module): \n", " def __init__(self, model):\n", " super().__init__()\n", " _means = image_means\n", " _stds = image_stds\n", " self.model = model \n", " self.stds = torch.tensor(_stds).half()[:,None,None]\n", " self.means = torch.tensor(_means).half()[:,None,None]\n", "\n", " transform_model = torch.nn.Sequential(\n", " transforms.Normalize(mean=image_means, std=image_stds)\n", " )\n", "\n", " def forward(self, input): \n", " input = input/255.0\n", " intput = self.transform_model(input)\n", " output = self.model(input)\n", " output = (output - output.min()) / (output.max() - output.min()) \n", " # Fix \"Image output, 'depthOutput', must have rank 4. Instead it has rank 3\"\n", " output = output.unsqueeze(0)\n", " # Fix \"Shape of the RGB/BGR image output, 'depthOutput', must be of kind (1, 3, H, W), i.e., first two dimensions must be (1, 3), instead they are: (1, 1)\"ArithmeticError\n", " output = output.repeat(1, 3, 1, 1)\n", " output = output * 255.0\n", " return output\n", "\n", "# Instantiate the Wrapper model passing the original PyTorch FCN model\n", "wrapped_model = Wrapper(traced_model)" ] }, { "cell_type": "code", "execution_count": 74, "id": "b3da3350", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "wrapped PyTorch ImageEncoder ckpt out for jpg:\n", ">>> tensor([[[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n", " 1.2884e-01, 4.5228e-01],\n", " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n", " 3.9270e-01, 3.3447e-01],\n", " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n", " 3.8963e-01, 4.5296e-01],\n", " ...,\n", " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n", " 1.0194e+02, 1.0191e+02],\n", " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n", " 1.0219e+02, 1.0212e+02],\n", " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n", " 1.0220e+02, 1.0189e+02]],\n", "\n", " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n", " 1.2884e-01, 4.5228e-01],\n", " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n", " 3.9270e-01, 3.3447e-01],\n", " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n", " 3.8963e-01, 4.5296e-01],\n", " ...,\n", " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n", " 1.0194e+02, 1.0191e+02],\n", " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n", " 1.0219e+02, 1.0212e+02],\n", " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n", " 1.0220e+02, 1.0189e+02]],\n", "\n", " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n", " 1.2884e-01, 4.5228e-01],\n", " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n", " 3.9270e-01, 3.3447e-01],\n", " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n", " 3.8963e-01, 4.5296e-01],\n", " ...,\n", " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n", " 1.0194e+02, 1.0191e+02],\n", " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n", " 1.0219e+02, 1.0212e+02],\n", " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n", " 1.0220e+02, 1.0189e+02]]], device='mps:0')\n", "Traced wrapped PyTorch ImageEncoder ckpt out for jpg:\n", ">>> tensor([[[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n", " 1.2884e-01, 4.5228e-01],\n", " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n", " 3.9270e-01, 3.3447e-01],\n", " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n", " 3.8963e-01, 4.5296e-01],\n", " ...,\n", " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n", " 1.0194e+02, 1.0191e+02],\n", " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n", " 1.0219e+02, 1.0212e+02],\n", " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n", " 1.0220e+02, 1.0189e+02]],\n", "\n", " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n", " 1.2884e-01, 4.5228e-01],\n", " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n", " 3.9270e-01, 3.3447e-01],\n", " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n", " 3.8963e-01, 4.5296e-01],\n", " ...,\n", " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n", " 1.0194e+02, 1.0191e+02],\n", " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n", " 1.0219e+02, 1.0212e+02],\n", " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n", " 1.0220e+02, 1.0189e+02]],\n", "\n", " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n", " 1.2884e-01, 4.5228e-01],\n", " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n", " 3.9270e-01, 3.3447e-01],\n", " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n", " 3.8963e-01, 4.5296e-01],\n", " ...,\n", " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n", " 1.0194e+02, 1.0191e+02],\n", " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n", " 1.0219e+02, 1.0212e+02],\n", " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n", " 1.0220e+02, 1.0189e+02]]], device='mps:0')\n" ] } ], "source": [ "i = np.asarray(original_image.resize((518, 518)))\n", "i = i.astype(\"float32\")\n", "i = np.transpose(i, (2, 0, 1))\n", "i = np.expand_dims(i, 0)\n", "i = torch.from_numpy(i).to(DEVICE)\n", "\n", "with torch.no_grad():\n", " out = wrapped_model(i)\n", "\n", "print(\"wrapped PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", out[0, :10])\n", "\n", "traced_model_w = torch.jit.trace(wrapped_model, i)\n", "\n", "with torch.no_grad():\n", " out = traced_model_w(i)\n", "\n", "print(\"Traced wrapped PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", out[0, :10])" ] }, { "cell_type": "code", "execution_count": 86, "id": "db5cb9b9", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(torch.Size([1, 3, 518, 518]), torch.Size([1, 3, 518, 518]))" ] }, "execution_count": 86, "metadata": {}, "output_type": "execute_result" } ], "source": [ "i.shape, out.shape" ] }, { "cell_type": "code", "execution_count": 92, "id": "681683aa", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(1, 3, 518, 518) 255.0 0.0 104.07214\n", "(518, 518, 3) 255 0 103.57204722648738\n" ] } ], "source": [ "tmp = out.cpu().numpy()\n", "\n", "print(tmp.shape, tmp.max(), tmp.min(), tmp.mean())\n", "# Convert to 3, 256, 256\n", "tmp = np.transpose(tmp, (0, 2, 3, 1)).astype(np.uint8)\n", "tmp = tmp.squeeze()\n", "print(tmp.shape, tmp.max(), tmp.min(), tmp.mean())\n", "Image.fromarray(tmp)\n", "tmp_image = Image.fromarray(tmp).resize((original_w,original_h))\n", "tmp_image.save(\"depth_image_3.png\")" ] }, { "cell_type": "code", "execution_count": 71, "id": "9e4f00bd", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "torch.Size([1, 3, 518, 518])" ] }, "execution_count": 71, "metadata": {}, "output_type": "execute_result" } ], "source": [ "i.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "304ae7b0", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Converting PyTorch Frontend ==> MIL Ops: 100%|█████████▉| 1247/1248 [00:00<00:00, 6927.17 ops/s]\n", "Running MIL frontend_pytorch pipeline: 100%|██████████| 5/5 [00:00<00:00, 90.46 passes/s]\n", "Running MIL default pipeline: 100%|██████████| 89/89 [00:06<00:00, 13.75 passes/s]\n", "Running MIL backend_mlprogram pipeline: 100%|██████████| 12/12 [00:00<00:00, 99.10 passes/s]\n" ] } ], "source": [ "traced_model_w.eval()\n", "image_input = ct.ImageType(name=\"colorImage\", shape=i.shape)\n", "image_encoder_model = ct.converters.convert(\n", " traced_model_w,\n", " convert_to=\"mlprogram\",\n", " inputs=[image_input],\n", " outputs=[ct.ImageType(name=\"depthOutput\")],\n", " minimum_deployment_target=ct.target.iOS16,\n", ")\n", "image_encoder_model.save(\"DepthAnything_v2_large.mlpackage\")" ] } ], "metadata": { "kernelspec": { "display_name": "pytorch2", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 5 }