|
from PIL import Image |
|
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast |
|
import requests |
|
|
|
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") |
|
vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") |
|
tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2") |
|
|
|
def vit2distilgpt2(img): |
|
pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values |
|
encoder_outputs = model.generate(pixel_values.to('cpu'), num_beams=5, num_return_sequences=3) |
|
generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) |
|
|
|
return generated_sentences |
|
|
|
import gradio as gr |
|
|
|
inputs = [ |
|
gr.inputs.Image(type="pil", label="Original Images") |
|
] |
|
|
|
outputs = [ |
|
gr.outputs.Textbox(label="Caption 1"), |
|
gr.outputs.Textbox(label="Caption 2"), |
|
gr.outputs.Textbox(label="Caption 3") |
|
] |
|
|
|
title = "Image Captioning using ViT + GPT2" |
|
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO DataSet is used for Training" |
|
examples = [ |
|
["Image1.png"], |
|
["Image2.png"], |
|
["Image3.png"] |
|
] |
|
|
|
gr.Interface( |
|
vit2distilgpt2, |
|
inputs, |
|
outputs, |
|
title=title, |
|
description=description, |
|
examples=examples, |
|
theme="huggingface", |
|
).launch(debug=True, enable_queue=True) |
|
|