File size: 1,499 Bytes
91755d5
 
 
 
 
 
 
5652284
91755d5
38f62a2
91755d5
 
 
38f62a2
91755d5
 
 
08f150e
 
91755d5
e04d426
 
 
af1d4d5
 
e04d426
91755d5
 
 
 
 
 
26beed3
 
0db66af
f12403d
91755d5
 
 
 
 
 
e16a5cb
5094b5e
91755d5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import torch
from PIL import Image
import gradio as gr


device = 'cuda' if torch.cuda.is_available() else 'cpu'

model = torch.hub.load('mair-lab/mapl', 'mapl')
model.eval()
model.to(device, torch.float16)


def predict(image: Image.Image, question: str) -> str:
    pixel_values = model.image_transform(image).unsqueeze(0).to(device, torch.float16)

    input_ids = None
    if question:
        prompt = f"Please answer the question. Question: {question} Answer:" if '?' in question else question
        input_ids = model.text_transform(prompt).input_ids.to(device)
    
    generated_ids = model.generate(
        pixel_values=pixel_values,
        input_ids=input_ids,
        max_new_tokens=100,
        num_beams=5
    )
    
    answer = model.text_processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()

    return answer


predict(image=Image.new('RGB', (224, 224)), question="")

image = gr.components.Image(type='pil', label="Image")
question = gr.components.Textbox(info="Ask a visual question or leave empty for captioning", placeholder="What is this?", label="Question")
answer = gr.components.Textbox(label="Answer")

interface = gr.Interface(
    fn=predict,
    inputs=[image, question],
    outputs=answer,
    title="MAPL🍁",
    description="Paper: [https://arxiv.org/abs/2210.07179](https://arxiv.org/abs/2210.07179)\nCode and weights: [https://github.com/mair-lab/mapl](https://github.com/mair-lab/mapl)",
    allow_flagging='never')
interface.launch()