PicturesText / app.py
SaviAnna's picture
Update app.py
55a4083
raw
history blame
1.61 kB
# from PIL import Image
# import requests
# from transformers import CLIPProcessor, CLIPModel
# model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
# processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
# url = "http://images.cocodataset.org/val2017/000000039769.jpg"
# image = Image.open(requests.get(url, stream=True).raw)
# inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
# outputs = model(**inputs)
# logits_per_image = outputs.logits_per_image # this is the image-text similarity score
# probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
import os
import zipfile
from huggingface_hub import hf_hub_url, cached_download
import gdown
# Specify the Google Drive link to the archive file
archive_url = 'https://drive.google.com/uc?id=14QhofCbby053kWbVeWEBHCxOROQS-bjN'
# Specify the destination directory within the Hugging Face space
# Download the archive
output_path = 'SaviAnna/PicturesText/archive.zip'
destination_dir = 'SaviAnna/PicturesText' # Replace with your desired destination directory
#os.makedirs(destination_dir,exist_ok=True)
# Construct the destination path
#destination_path = hf_hub_url(destination_dir)
# Download the archive to the destination path
# cached_download(archive_url, output_path, quiet=False)
gdown.download(archive_url, output_path, quiet=False)
extracted_directory = 'SaviAnna/PicturesText/find_pic'
# Extract the archive
with zipfile.ZipFile(output_path, 'r') as zip_ref:
zip_ref.extractall(extracted_directory)