Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
import torch | |
import PIL | |
from flamingo_mini import FlamingoConfig, FlamingoModel, FlamingoProcessor | |
EXAMPLES_DIR = 'examples' | |
DEFAULT_PROMPT = "<image>" | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
model = FlamingoModel.from_pretrained('dhansmair/flamingo-mini') | |
model.to(device) | |
model.eval() | |
processor = FlamingoProcessor(model.config, load_vision_processor=True) | |
# setup some example images | |
examples = [] | |
if os.path.isdir(EXAMPLES_DIR): | |
for file in os.listdir(EXAMPLES_DIR): | |
path = EXAMPLES_DIR + "/" + file | |
examples.append([path, DEFAULT_PROMPT]) | |
def predict_caption(image, prompt): | |
assert isinstance(prompt, str) | |
features = processor.extract_features(image).to(device) | |
caption = model.generate_captions(processor, | |
visual_features=features, | |
prompt=prompt) | |
if isinstance(caption, list): | |
caption = caption[0] | |
return caption | |
iface = gr.Interface(fn=predict_caption, | |
inputs=[gr.Image(type="pil"), gr.Textbox(value=DEFAULT_PROMPT, label="Prompt")], | |
examples=examples, | |
outputs="text") | |
iface.launch() |