File size: 8,210 Bytes
e352103 d24c16c e352103 d24c16c e352103 5d9cb65 e352103 aab352a e352103 9ad1205 e352103 b926faa e352103 0029ec4 b926faa 0029ec4 e352103 0029ec4 e352103 0029ec4 e352103 0029ec4 e352103 d4fa011 ff11128 e352103 74ee333 e352103 b926faa e352103 b926faa 4d3b5fa e352103 b926faa e352103 b926faa e352103 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import gradio as gr
from transformers import AutoProcessor, Idefics3ForConditionalGeneration
import re
import time
from PIL import Image
import torch
import spaces
import subprocess
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM_converted_4")
model = Idefics3ForConditionalGeneration.from_pretrained("HuggingFaceTB/SmolVLM_converted_4",
torch_dtype=torch.bfloat16,
#_attn_implementation="flash_attention_2"
).to("cuda")
BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>", "<row_", "apiro", "\u2500lrow_", "row_1"], add_special_tokens=False).input_ids
EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
@spaces.GPU
def model_inference(
images, text, assistant_prefix, decoding_strategy, temperature, max_new_tokens,
repetition_penalty, top_p
):
if text == "" and not images:
gr.Error("Please input a query and optionally image(s).")
if text == "" and images:
gr.Error("Please input a text query along the image(s).")
if isinstance(images, Image.Image):
images = [images]
resulting_messages = [
{
"role": "user",
"content": [{"type": "image"}] + [
{"type": "text", "text": text}
]
}
]
if assistant_prefix:
text = f"{assistant_prefix} {text}"
prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
inputs = processor(text=prompt, images=[images], return_tensors="pt")
inputs = {k: v.to("cuda") for k, v in inputs.items()}
generation_args = {
"max_new_tokens": max_new_tokens,
"repetition_penalty": repetition_penalty,
}
assert decoding_strategy in [
"Greedy",
"Top P Sampling",
]
if decoding_strategy == "Greedy":
generation_args["do_sample"] = False
elif decoding_strategy == "Top P Sampling":
generation_args["temperature"] = temperature
generation_args["do_sample"] = True
generation_args["top_p"] = top_p
generation_args.update(inputs)
# Generate
generated_ids = model.generate(**generation_args)
generated_texts = processor.batch_decode(generated_ids[:, generation_args["input_ids"].size(1):], skip_special_tokens=True)
return generated_texts[0]
with gr.Blocks(fill_height=True) as demo:
gr.Markdown("## IDEFICS3-Llama 🐶")
gr.Markdown("Play with [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) in this demo. To get started, upload an image and text or try one of the examples.")
gr.Markdown("**Disclaimer:** Idefics3 does not include an RLHF alignment stage, so it may not consistently follow prompts or handle complex tasks. However, this doesn't mean it is incapable of doing so. Adding a prefix to the assistant's response, such as Let's think step for a reasoning question or `<html>` for HTML code generation, can significantly improve the output in practice. You could also play with the parameters such as the temperature in non-greedy mode.")
with gr.Column():
image_input = gr.Image(label="Upload your Image", type="pil", scale=1)
query_input = gr.Textbox(label="Prompt")
assistant_prefix = gr.Textbox(label="Assistant Prefix", placeholder="Let's think step by step.")
submit_btn = gr.Button("Submit")
output = gr.Textbox(label="Output")
with gr.Accordion(label="Example Inputs and Advanced Generation Parameters"):
examples=[
["example_images/mmmu_example.jpeg", "Chase wants to buy 4 kilograms of oval beads and 5 kilograms of star-shaped beads. How much will he spend?", "Let's think step by step.", "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/rococo_1.jpg", "What art era is this?", None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/paper_with_text.png", "Read what's written on the paper", None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/dragons_playing.png","What's unusual about this image?",None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/example_images_ai2d_example_2.jpeg", "What happens to fish if pelicans increase?", None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/travel_tips.jpg", "I want to go somewhere similar to the one in the photo. Give me destinations and travel tips.", None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/dummy_pdf.png", "How much percent is the order status?", None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/art_critic.png", "As an art critic AI assistant, could you describe this painting in details and make a thorough critic?.",None, "Greedy", 0.4, 512, 1.2, 0.8],
["example_images/s2w_example.png", "What is this UI about?", None,"Greedy", 0.4, 512, 1.2, 0.8]]
# Hyper-parameters for generation
max_new_tokens = gr.Slider(
minimum=8,
maximum=1024,
value=512,
step=1,
interactive=True,
label="Maximum number of new tokens to generate",
)
repetition_penalty = gr.Slider(
minimum=0.01,
maximum=5.0,
value=1.2,
step=0.01,
interactive=True,
label="Repetition penalty",
info="1.0 is equivalent to no penalty",
)
temperature = gr.Slider(
minimum=0.0,
maximum=5.0,
value=0.4,
step=0.1,
interactive=True,
label="Sampling temperature",
info="Higher values will produce more diverse outputs.",
)
top_p = gr.Slider(
minimum=0.01,
maximum=0.99,
value=0.8,
step=0.01,
interactive=True,
label="Top P",
info="Higher values is equivalent to sampling more low-probability tokens.",
)
decoding_strategy = gr.Radio(
[
"Greedy",
"Top P Sampling",
],
value="Greedy",
label="Decoding strategy",
interactive=True,
info="Higher values is equivalent to sampling more low-probability tokens.",
)
decoding_strategy.change(
fn=lambda selection: gr.Slider(
visible=(
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
)
),
inputs=decoding_strategy,
outputs=temperature,
)
decoding_strategy.change(
fn=lambda selection: gr.Slider(
visible=(
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
)
),
inputs=decoding_strategy,
outputs=repetition_penalty,
)
decoding_strategy.change(
fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
inputs=decoding_strategy,
outputs=top_p,
)
gr.Examples(
examples = examples,
inputs=[image_input, query_input, assistant_prefix, decoding_strategy, temperature,
max_new_tokens, repetition_penalty, top_p],
outputs=output,
fn=model_inference
)
submit_btn.click(model_inference, inputs = [image_input, query_input, assistant_prefix, decoding_strategy, temperature,
max_new_tokens, repetition_penalty, top_p], outputs=output)
demo.launch(debug=True) |