SaiBrahmam
commited on
Commit
•
3cd9f43
1
Parent(s):
28a4341
Delete Untitled15 (1).ipynb
Browse files- Untitled15 (1).ipynb +0 -1
Untitled15 (1).ipynb
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"cells":[{"cell_type":"code","execution_count":13,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":12534,"status":"ok","timestamp":1682668051403,"user":{"displayName":"Sai Brahmam","userId":"06000535420940355016"},"user_tz":-330},"id":"LdTfL7M4zSW1","outputId":"e5b7341f-f5f9-493b-e0c0-67637d6e2981"},"outputs":[],"source":["# install requirements\n","import sys\n","if 'google.colab' in sys.modules:\n"," print('Running in Colab.')\n"," !pip install transformers timm fairscale streamlit\n"," !git clone https://github.com/salesforce/BLIP\n"," %cd BLIP"]},{"cell_type":"code","execution_count":15,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":18766,"status":"ok","timestamp":1682668076211,"user":{"displayName":"Sai Brahmam","userId":"06000535420940355016"},"user_tz":-330},"id":"gOEsAx6T0Rxk","outputId":"24dd42ae-4e7a-4288-bfc6-ef966df39dd0"},"outputs":[{"ename":"ModuleNotFoundError","evalue":"No module named 'models'","output_type":"error","traceback":["\u001b[1;31m---------------------------------------------------------------------------\u001b[0m","\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)","Cell \u001b[1;32mIn[15], line 24\u001b[0m\n\u001b[0;32m 21\u001b[0m image \u001b[39m=\u001b[39m transform(raw_image)\u001b[39m.\u001b[39munsqueeze(\u001b[39m0\u001b[39m)\u001b[39m.\u001b[39mto(device) \n\u001b[0;32m 22\u001b[0m \u001b[39mreturn\u001b[39;00m image\n\u001b[1;32m---> 24\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mmodels\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mblip\u001b[39;00m \u001b[39mimport\u001b[39;00m blip_decoder\n\u001b[0;32m 26\u001b[0m image_size \u001b[39m=\u001b[39m \u001b[39m384\u001b[39m\n\u001b[0;32m 28\u001b[0m model_url \u001b[39m=\u001b[39m \u001b[39m'\u001b[39m\u001b[39mhttps://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth\u001b[39m\u001b[39m'\u001b[39m\n","\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'models'"]}],"source":["from PIL import Image\n","import requests\n","import torch\n","from torchvision import transforms\n","from torchvision.transforms.functional import InterpolationMode\n","\n","device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n","\n","def load_demo_image(image_size,device):\n"," img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' \n"," raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') \n","\n"," w,h = raw_image.size\n"," display(raw_image.resize((w//5,h//5)))\n"," \n"," transform = transforms.Compose([\n"," transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),\n"," transforms.ToTensor(),\n"," transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n"," ]) \n"," image = transform(raw_image).unsqueeze(0).to(device) \n"," return image\n","\n","from models.blip import blip_decoder\n","\n","image_size = 384\n","\n","model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'\n"," \n","model = blip_decoder(pretrained=model_url, image_size=image_size, vit='base')\n","model.eval()\n","model = model.to(device)\n","\n","import streamlit as st\n","\n","st.title(\"Image Captioning\")\n","\n","uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"jpeg\", \"png\"])\n","if uploaded_file is not None:\n"," image = Image.open(uploaded_file).convert('RGB')\n"," st.image(image, caption='Uploaded Image.', use_column_width=True)\n"," transform = transforms.Compose([\n"," transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),\n"," transforms.ToTensor(),\n"," transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n"," ]) \n"," image = transform(image).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," # beam search\n"," #captions = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5, num_return_sequences=3) \n"," # nucleus sampling\n"," num_captions = 3\n"," captions = []\n"," for i in range(num_captions):\n"," caption = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)\n"," captions.append(caption[0])\n"," # display captions\n"," st.header(\"Generated Captions\")\n"," for i, caption in enumerate(captions):\n"," st.write(f\"{i + 1}. {caption}\")\n","else:\n"," st.warning(\"Please upload an image to generate captions.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"wxAsAsWG1iXX"},"outputs":[],"source":[]}],"metadata":{"colab":{"authorship_tag":"ABX9TyOHy5fQ9NZxWG5bIxYEjoDk","mount_file_id":"19syVpDlkaAbNpOGldD0K2XTQ9I9qnf6t","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.9.13"}},"nbformat":4,"nbformat_minor":0}
|
|
|
|