Spaces:
Runtime error
Runtime error
File size: 1,752 Bytes
17ad24e 746dc09 17ad24e 746dc09 17ad24e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# -*- coding: utf-8 -*-
"""VIT_demo_main.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1h4UTeFydVldZcBzvaDxS0vtDgQA6rQ5K
"""
#pip install transformers -q
#pip install gradio -q
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
import torch
from PIL import Image
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
vit_feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
def vit2distilgpt2(img):
pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values
encoder_outputs = generated_ids = model.generate(pixel_values.to('cpu'),num_beams=5)
generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True)
return(generated_sentences[0].split('.')[0])
import gradio as gr
inputs = [
gr.inputs.Image(type="pil", label="Original Image")
]
outputs = [
gr.outputs.Textbox(label = 'Caption')
]
title = "Image Captioning with Visual Transformer using nlpconnect"
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training."
article = " <a href='https://huggingface.co/nlpconnect/vit-gpt2-image-captioning'>Model Repo on Hugging Face Model Hub</a>"
examples = [
["Img_3.jpg"],
["Img_1.jpg"],
["Img_2.jpg"]
]
gr.Interface(
vit2distilgpt2,
inputs,
outputs,
title=title,
description=description,
article=article,
examples=examples,
theme="huggingface",
).launch(debug=True, enable_queue=True)
|