|
|
|
from PIL import Image |
|
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast |
|
import requests |
|
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") |
|
vit_feature_extactor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") |
|
tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def vit2distilgpt2(img): |
|
pixel_values = vit_feature_extactor(images=img, return_tensors="pt").pixel_values |
|
encoder_outputs = generated_ids = model.generate(pixel_values.to('cpu'),num_beams=5) |
|
generated_senetences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) |
|
|
|
return(generated_senetences[0].split('.')[0]) |
|
|
|
import gradio as gr |
|
inputs = [ |
|
gr.inputs.Image(type="pil",label="Original Images") |
|
] |
|
|
|
outputs = [ |
|
gr.outputs.Textbox(label = "Caption") |
|
] |
|
|
|
title = "Image Captioning using ViT + GPT2" |
|
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image.COCO DataSet is used for Training" |
|
examples = [ |
|
["Image1.png"], |
|
["Image2.png"], |
|
["Image3.png"] |
|
] |
|
|
|
|
|
gr.Interface( |
|
vit2distilgpt2, |
|
inputs, |
|
outputs, |
|
title=title, |
|
description=description, |
|
examples=examples, |
|
theme="huggingface", |
|
).launch(debug=True, enable_queue=True) |