import gradio as gr from transformers import ViltProcessor, ViltForVisualQuestionAnswering import torch torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg') processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") model = ViltForVisualQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa") def answer_question(image, text): encoding = processor(image, text, return_tensors="pt") # forward pass with torch.no_grad(): outputs = model(**encoding) logits = outputs.logits idx = logits.argmax(-1).item() predicted_answer = model.config.id2label[idx] return predicted_answer image = gr.inputs.Image(type="pil") question = gr.inputs.Textbox(label="Question") answer = gr.outputs.Textbox(label="Predicted answer") examples = [["cats.jpg", "How many cats are there?"], [ "https://s3.geograph.org.uk/geophotos/06/21/24/6212487_1cca7f3f_1024x1024.jpg", "What is the color of the flower?", ], [ "https://computing.ece.vt.edu/~harsh/visualAttention/ProjectWebpage/Figures/vqa_1.png", "What is the mustache made of?", ], [ "https://computing.ece.vt.edu/~harsh/visualAttention/ProjectWebpage/Figures/vqa_2.png", "How many slices of pizza are there?", ], [ "https://computing.ece.vt.edu/~harsh/visualAttention/ProjectWebpage/Figures/vqa_3.png", "Does it appear to be rainy?", ], ] interface = gr.Interface(fn=answer_question, inputs=[image, question], outputs=answer, examples=examples, enable_queue=True) interface.launch(debug=True)