File size: 5,606 Bytes
4f3b26b
161f68c
64036af
 
161f68c
64036af
7093153
64036af
b3da3e6
4f3b26b
 
161f68c
f130867
 
161f68c
 
 
f130867
 
161f68c
 
 
64036af
 
3ba2ba9
 
64036af
 
 
7093153
 
 
bbaff07
7093153
 
 
 
92515d8
 
 
 
 
bbaff07
92515d8
 
 
bbaff07
92515d8
 
 
 
 
 
161f68c
0da7bd3
ac2e7e1
161f68c
 
 
 
 
 
64036af
0da7bd3
749f105
161f68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e58d5af
b3da3e6
3b6d536
 
f04086b
92515d8
161f68c
 
 
 
 
 
 
 
e3654e1
161f68c
 
e3654e1
 
 
 
3162c17
e3654e1
3162c17
e3654e1
161f68c
 
 
3ba2ba9
161f68c
3ba2ba9
da22505
161f68c
 
3b6d536
 
3ba2ba9
 
 
 
749f105
 
da22505
3ba2ba9
 
 
 
 
 
 
161f68c
0da7bd3
161f68c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import gradio as gr
import spaces
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch
import base64
from PIL import Image, ImageDraw
from io import BytesIO
import re


models = {
    "Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto"),
    "Qwen/Qwen2-VL-2B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="auto")
}

processors = {
    "Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct"),
    "Qwen/Qwen2-VL-2B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
}


def image_to_base64(image):
    buffered = BytesIO()
    image.save(buffered, format="PNG")
    img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
    return img_str


def draw_bounding_boxes(image, bounding_boxes, outline_color="red", line_width=2):
    draw = ImageDraw.Draw(image)
    for box in bounding_boxes:
        xmin, ymin, xmax, ymax = box
        draw.rectangle([xmin, ymin, xmax, ymax], outline=outline_color, width=line_width)
    return image


def rescale_bounding_boxes(bounding_boxes, original_width, original_height, scaled_width=1000, scaled_height=1000):
    x_scale = original_width / scaled_width
    y_scale = original_height / scaled_height
    rescaled_boxes = []
    for box in bounding_boxes:
        xmin, ymin, xmax, ymax = box
        rescaled_box = [
            xmin * x_scale,
            ymin * y_scale,
            xmax * x_scale,
            ymax * y_scale
        ]
        rescaled_boxes.append(rescaled_box)
    return rescaled_boxes


@spaces.GPU
def run_example(image, text_input, system_prompt, model_id="Qwen/Qwen2-VL-7B-Instruct"):
    model = models[model_id].eval()
    processor = processors[model_id]

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": f"data:image;base64,{image_to_base64(image)}"},
                {"type": "text", "text": system_prompt},
                {"type": "text", "text": text_input},
            ],
        }
    ]

    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)
    pattern = r'\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]'
    matches = re.findall(pattern, str(output_text))
    parsed_boxes = [[int(num) for num in match] for match in matches]
    scaled_boxes = rescale_bounding_boxes(parsed_boxes, image.width, image.height)
    return output_text, parsed_boxes, draw_bounding_boxes(image, scaled_boxes)

css = """
  #output {
    height: 500px; 
    overflow: auto; 
    border: 1px solid #ccc; 
  }
"""
default_system_prompt = "You are a helpfull assistant to detect objects in images. When asked to detect elements based on a description you return bounding boxes for all elements in the form of [xmin, ymin, xmax, ymax] whith the values beeing scaled to 1000 by 1000 pixels. When there are more than one result, answer with a list of bounding boxes in the form of [[xmin, ymin, xmax, ymax], [xmin, ymin, xmax, ymax], ...]."

with gr.Blocks(css=css) as demo:
    gr.Markdown(
    """
    # Qwen2-VL Object Detection Demo

    Use the Qwen2-VL models to detect objects in an image. The 7B variant seems to work much better.

    **Usage**: Use the keyword "detect" and a description of the target (see examples below).
    """)
    with gr.Tab(label="Qwen2-VL Input"):
        with gr.Row():
            with gr.Column():
                input_img = gr.Image(label="Input Image", type="pil")
                model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct")
                system_prompt = gr.Textbox(label="System Prompt", value=default_system_prompt)
                text_input = gr.Textbox(label="User Prompt")
                submit_btn = gr.Button(value="Submit")
            with gr.Column():
                model_output_text = gr.Textbox(label="Model Output Text")
                parsed_boxes = gr.Textbox(label="Parsed Boxes")
                annotated_image = gr.Image(label="Annotated Image")

        gr.Examples(
            examples=[
                ["assets/image1.jpg", "detect goats", default_system_prompt],
                ["assets/image2.jpg", "detect blue button", default_system_prompt],
                ["assets/image3.jpg", "detect person on bike", default_system_prompt],
            ],
            inputs=[input_img, text_input, system_prompt],
            outputs=[model_output_text, parsed_boxes, annotated_image],
            fn=run_example,
            cache_examples=True,
            label="Try examples"
        )

        submit_btn.click(run_example, [input_img, text_input, system_prompt, model_selector], [model_output_text, parsed_boxes, annotated_image])

demo.launch(debug=True)