Shriharshan's picture
Update app.py
efb9c5f
raw
history blame
1.86 kB
# Image captioning with ViT+GPT2
from PIL import Image
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast
import requests
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
vit_feature_extactor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2")
#url = 'https://d2gp644kobdlm6.cloudfront.net/wp-content/uploads/2016/06/bigstock-Shocked-and-surprised-boy-on-t-113798588-300x212.jpg'
# with Image.open(requests.get(url, stream=True).raw) as img:
# pixel_values = vit_feature_extactor(images=img, return_tensors="pt").pixel_values
# encoder_outputs = model.generate(pixel_values.to('cpu'),num_beams = 5)
# generated_senetences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True,)
# generated_senetences
# generated_senetences[0].split(".")[0]
def vit2distilgpt2(img):
pixel_values = vit_feature_extactor(images=img, return_tensors="pt").pixel_values
encoder_outputs = generated_ids = model.generate(pixel_values.to('cpu'),num_beams=5)
generated_senetences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True)
return(generated_senetences[0].split('.')[0])
import gradio as gr
inputs = [
gr.inputs.Image(type="pil",label="Original Images")
]
outputs = [
gr.outputs.Textbox(label = "Caption")
]
title = "Image Captioning using ViT + GPT2"
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image.COCO DataSet is used for Training"
examples = [
["Image1.png"],
["Image2.png"],
["Image3.png"]
]
gr.Interface(
vit2distilgpt2,
inputs,
outputs,
title=title,
description=description,
examples=examples,
theme="huggingface",
).launch(debug=True, enable_queue=True)