llava-1.5-dlai / app.py
ybelkada's picture
Update app.py
4d23b6a verified
raw
history blame
844 Bytes
import gradio as gr
import spaces
import torch
from transformers import AutoProcessor, LlavaForConditionalGeneration
model_id = "llava-hf/llava-1.5-7b-hf"
prompt_format = "USER: <image>\n{}\nASSISTANT:"
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
model = LlavaForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
).cuda()
processor = AutoProcessor.from_pretrained(model_id)
@spaces.GPU
def inference(text, image):
prompt = prompt_format.format(text)
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
output = model.generate(**inputs, max_new_tokens=1024)
return processor.decode(output[0][2:], skip_special_tokens=True)
gr.Interface(fn=inference, inputs=[gr.Text(), gr.Image()], outputs=gr.Text()).launch()