VictorSanh commited on
Commit
dd3cb2c
1 Parent(s): 1ff14cb

some heavy cleaning

Browse files
app_dialogue.py CHANGED
@@ -165,7 +165,6 @@ EXAMPLES = [
165
 
166
  API_TOKEN = os.getenv("HF_AUTH_TOKEN")
167
  HF_WRITE_TOKEN = os.getenv("HF_WRITE_TOKEN")
168
- # IDEFICS_LOGO = "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/IDEFICS_logo.png"
169
  BOT_AVATAR = "IDEFICS_logo.png"
170
 
171
 
@@ -493,17 +492,6 @@ chatbot = gr.Chatbot(
493
  )
494
 
495
  # Using Flagging for saving dope and problematic examples
496
- # Dope examples flagging
497
-
498
-
499
- # gr.Markdown("""## How to use?
500
-
501
- # There are two ways to provide image inputs:
502
- # - Using the image box on the left panel
503
- # - Using the inline syntax: `text<fake_token_around_image><image:URL_IMAGE><fake_token_around_image>text`
504
-
505
- # The second syntax allows inputting an arbitrary number of images.""")
506
-
507
  image_flag = gr.Image(visible=False)
508
  with gr.Blocks(
509
  fill_height=True,
 
165
 
166
  API_TOKEN = os.getenv("HF_AUTH_TOKEN")
167
  HF_WRITE_TOKEN = os.getenv("HF_WRITE_TOKEN")
 
168
  BOT_AVATAR = "IDEFICS_logo.png"
169
 
170
 
 
492
  )
493
 
494
  # Using Flagging for saving dope and problematic examples
 
 
 
 
 
 
 
 
 
 
 
495
  image_flag = gr.Image(visible=False)
496
  with gr.Blocks(
497
  fill_height=True,
comparative_app.py DELETED
@@ -1,331 +0,0 @@
1
- import copy
2
- import os
3
- import random
4
- # import spaces
5
- import subprocess
6
- import time
7
- import torch
8
-
9
- from threading import Thread
10
- from typing import List, Dict, Union
11
- from urllib.parse import urlparse
12
- from PIL import Image
13
-
14
- import gradio as gr
15
- from transformers import AutoProcessor, TextIteratorStreamer
16
- from transformers import Idefics2ForConditionalGeneration
17
-
18
- # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
19
-
20
- DEVICE = torch.device("cuda")
21
- MODELS = {
22
- "tr_288_cinco_final_sft_sphinx_11000": Idefics2ForConditionalGeneration.from_pretrained(
23
- "HuggingFaceM4/idefics2-tfrm-compatible",
24
- torch_dtype=torch.bfloat16,
25
- _attn_implementation="flash_attention_2",
26
- trust_remote_code=True,
27
- token=os.environ["HF_AUTH_TOKEN"],
28
- revision="2e56f9030ba9a17b6ebcd1c9ad5311d5fad0115f",
29
- ).to(DEVICE),
30
- "tr_290_bis_288_cinco_chatty_150": Idefics2ForConditionalGeneration.from_pretrained(
31
- "HuggingFaceM4/idefics2-tfrm-compatible",
32
- torch_dtype=torch.bfloat16,
33
- _attn_implementation="flash_attention_2",
34
- trust_remote_code=True,
35
- token=os.environ["HF_AUTH_TOKEN"],
36
- revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
37
- ).to(DEVICE),
38
- }
39
- PROCESSOR = AutoProcessor.from_pretrained(
40
- "HuggingFaceM4/idefics2-tfrm-compatible",
41
- token=os.environ["HF_AUTH_TOKEN"],
42
- )
43
- BAD_WORDS_IDS = PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
44
- EOS_WORDS_IDS = PROCESSOR.tokenizer("<end_of_utterance>", add_special_tokens=False).input_ids + [PROCESSOR.tokenizer.eos_token_id]
45
-
46
- SYSTEM_PROMPT = [ # Deactivating the system propmpt for now, but if I were to reactivate it, I would need to a/ transform turns into dict for applying the chat template, b/ manually overwrite the `default_template` to add the first line (that is not part of any turns), in particular for handling the bos_token.
47
- # """The following is a conversation between a highly knowledgeable and intelligent visual AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant has the ability to perceive images and reason about the content of visual inputs. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts.
48
-
49
- # The conversation begins:""",
50
- # """\nUser:""",
51
- # "https://i1.pickpik.com/photos/515/906/355/kittens-cats-pet-cute-preview.jpg",
52
- # "Describe this image.<end_of_utterance>",
53
- # """\nAssistant: Five kittens are standing together in the center of the photograph. From the left to right, there is one orange kitten, two kittens white and black stripes, and two brown kittens with an orange head. They are in bright green grass and it looks like they are moving forward.<end_of_utterance>""",
54
- # "\nUser:How about this image?",
55
- # "https://cdn.pixabay.com/photo/2017/09/25/13/12/puppy-2785074_1280.jpg",
56
- # "Can you describe it too?<end_of_utterance>",
57
- # """\nAssistant: A dog is lying on the floor, looking at the camera. It is looking directly at you. The dog has a white body and brown patches on its face and ears. Its eyes are dark. Its nose is black, and it has long, floppy ears, white paws, long fur, big eyes.<end_of_utterance>""",
58
- # "\nUser: What kind of breed is it?<end_of_utterance>",
59
- # """\nAssistant: It looks like a Cavalier King Charles Spaniel. These breeds are known for their gentle, affectionate nature. Their long ears and big, brown eyes give them a sweet, expressive face. Cavaliers are small dogs weighing between 13 and 20 pounds. They have a silky, medium-length coat that comes in various colors, including black and tan, tricolor, red and white, or black.<end_of_utterance>""",
60
- # "\nUser: What can you tell me about this breed of dogs?<end_of_utterance>",
61
- # """\nAssistant: One specific characteristic of the Cavalier King Charles Spaniel is its friendly and affectionate nature. This breed is known for being extremely sociable and forming strong bonds with their owners. They are often described as "velcro dogs" because they love to be close to their human companions, whether it's sitting on their laps, cuddling on the couch, or simply following them around the house.<end_of_utterance>""",
62
- # "\nUser: ghjkhjabnufs<end_of_utterance>",
63
- # """\nAssistant: That doesn’t seem to be a word. Could you ask me another way?<end_of_utterance>""",
64
- # "\nUser: Do you like Cavalier King Charles Spaniel?<end_of_utterance>",
65
- # """\nAssistant: I do not have personal opinions as I’m just a computer program. However, cavaliers are known for being excellent family pets due to their gentle and patient demeanor, making them great with children and other pets. Their affectionate nature also makes them ideal therapy dogs, as they can provide comfort and emotional support to those in need.<end_of_utterance>""",
66
- # "\nUser: How many dogs do you see in this image?",
67
- # "https://i.dailymail.co.uk/i/pix/2011/07/01/article-2010308-0CD22A8300000578-496_634x414.jpg",
68
- # "<end_of_utterance>",
69
- # """\nAssistant: There is no dogs in this image. The picture shows a tennis player jumping to volley the ball.<end_of_utterance>""",
70
- ]
71
-
72
- API_TOKEN = os.getenv("HF_AUTH_TOKEN")
73
- # IDEFICS_LOGO = "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/IDEFICS_logo.png"
74
- BOT_AVATAR = "IDEFICS_logo.png"
75
-
76
-
77
- # Chatbot utils
78
- def turn_is_pure_media(turn):
79
- return turn[1] is None
80
-
81
-
82
- def format_user_prompt_with_im_history_and_system_conditioning(
83
- user_prompt, chat_history
84
- ) -> List[Dict[str, Union[List, str]]]:
85
- """
86
- Produces the resulting list that needs to go inside the processor.
87
- It handles the potential image(s), the history and the system conditionning.
88
- """
89
- resulting_list = copy.deepcopy(SYSTEM_PROMPT)
90
-
91
- # Format history
92
- for turn in chat_history:
93
- if not resulting_list or (resulting_list and resulting_list[-1]["role"] != "user"):
94
- resulting_list.append(
95
- {
96
- "role": "user",
97
- "content": [],
98
- }
99
- )
100
-
101
- if turn_is_pure_media(turn):
102
- media = turn[0][0]
103
- resulting_list[-1]["content"].append(Image.open(media))
104
- else:
105
- user_utterance, assistant_utterance = turn
106
- resulting_list[-1]["content"].append(user_utterance.strip())
107
- resulting_list.append(
108
- {
109
- "role": "assistant",
110
- "content": [assistant_utterance]
111
- }
112
- )
113
-
114
- # Format current input
115
- if not user_prompt["files"]:
116
- resulting_list.append(
117
- {
118
- "role": "user",
119
- "content": [user_prompt['text']],
120
- }
121
- )
122
- else:
123
- # Choosing to put the image first (i.e. before the text), but this is an arbiratrary choice.
124
- resulting_list.append(
125
- {
126
- "role": "user",
127
- "content": [Image.open(im['path']) for im in user_prompt['files']] + [user_prompt['text']],
128
- }
129
- )
130
-
131
- return resulting_list
132
-
133
-
134
- def extract_images_from_msg_list(msg_list):
135
- all_images = []
136
- for msg in msg_list:
137
- for c_ in msg["content"]:
138
- if isinstance(c_, Image.Image):
139
- all_images.append(c_)
140
- return all_images
141
-
142
-
143
- # @spaces.GPU(duration=180)
144
- def model_inference(
145
- user_prompt,
146
- chat_history,
147
- model_selector,
148
- decoding_strategy,
149
- temperature,
150
- max_new_tokens,
151
- repetition_penalty,
152
- top_p,
153
- ):
154
- if user_prompt["text"].strip() == "" and not user_prompt["files"]:
155
- raise gr.Error("Please input a query and optionally image(s).")
156
-
157
- if user_prompt["text"].strip() == "" and user_prompt["files"]:
158
- raise gr.Error("Please input a text query along the image(s).")
159
-
160
- for file in user_prompt["files"]:
161
- if not file["mime_type"].startswith("image/"):
162
- gr.Error("Idefics2 only supports images. Please input a valid image.")
163
-
164
- if chat_history:
165
- raise gr.Error("The comparative mode only supports single turns")
166
-
167
- if len(MODELS) <= 1:
168
- raise gr.Error("There should be at least 2 models to compare (3 might be a stretch on your regular 80GB H100).")
169
-
170
- streamer = TextIteratorStreamer(
171
- PROCESSOR.tokenizer,
172
- skip_prompt=True,
173
- timeout=5.,
174
- )
175
-
176
- # Common parameters to all decoding strategies
177
- # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies
178
- generation_args = {
179
- "max_new_tokens": max_new_tokens,
180
- "repetition_penalty": repetition_penalty,
181
- "bad_words_ids": BAD_WORDS_IDS,
182
- "eos_token_id": EOS_WORDS_IDS,
183
- "streamer": streamer,
184
- }
185
-
186
- assert decoding_strategy in [
187
- "Greedy",
188
- "Top P Sampling",
189
- ]
190
- if decoding_strategy == "Greedy":
191
- generation_args["do_sample"] = False
192
- elif decoding_strategy == "Top P Sampling":
193
- generation_args["temperature"] = temperature
194
- generation_args["do_sample"] = True
195
- generation_args["top_p"] = top_p
196
-
197
- # Creating model inputs
198
- formated_prompt_list = format_user_prompt_with_im_history_and_system_conditioning(
199
- user_prompt=user_prompt,
200
- chat_history=chat_history,
201
- )
202
- inputs = PROCESSOR.apply_chat_template(formated_prompt_list, add_generation_prompt=True, return_tensors="pt")
203
- inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
204
- generation_args.update(inputs)
205
-
206
- # The regular non streaming generation mode
207
- _ = generation_args.pop("streamer")
208
-
209
- print(f"Input: {user_prompt}")
210
- order = random.sample(range(len(MODELS)), len(MODELS))
211
- result = ""
212
- for idx, selected_model in enumerate(order):
213
- generated_ids = MODELS[sorted(list(MODELS.keys()))[selected_model]].generate(**generation_args)
214
- generated_text = PROCESSOR.batch_decode(generated_ids[:, generation_args["input_ids"].size(-1): ], skip_special_tokens=True)[0]
215
- result += f"**MODEL {chr(65+idx)}**:\n{generated_text}\n\n---------------------\n\n"
216
- print(f"MODEL {chr(65+idx)}: {sorted(list(MODELS.keys()))[selected_model]}")
217
- print("----------")
218
- result = result[:-25]
219
- return result
220
-
221
- # # The streaming generation mode
222
- # thread = Thread(
223
- # target=MODELS[model_selector].generate,
224
- # kwargs=generation_args,
225
- # )
226
- # thread.start()
227
-
228
- # print("start generating")
229
- # acc_text = ""
230
- # for text_token in streamer:
231
- # time.sleep(0.04)
232
- # acc_text += text_token
233
- # if acc_text.endswith("<end_of_utterance>"):
234
- # acc_text = acc_text[:-18]
235
- # yield acc_text
236
- # print("success - generated the following text:", acc_text)
237
-
238
-
239
- # Hyper-parameters for generation
240
- max_new_tokens = gr.Slider(
241
- minimum=8,
242
- maximum=1024,
243
- value=512,
244
- step=1,
245
- interactive=True,
246
- label="Maximum number of new tokens to generate",
247
- )
248
- repetition_penalty = gr.Slider(
249
- minimum=0.01,
250
- maximum=5.0,
251
- value=1.0,
252
- step=0.01,
253
- interactive=True,
254
- label="Repetition penalty",
255
- info="1.0 is equivalent to no penalty",
256
- )
257
- decoding_strategy = gr.Radio(
258
- [
259
- "Greedy",
260
- "Top P Sampling",
261
- ],
262
- value="Greedy",
263
- label="Decoding strategy",
264
- interactive=True,
265
- info="Higher values is equivalent to sampling more low-probability tokens.",
266
- )
267
- temperature = gr.Slider(
268
- minimum=0.0,
269
- maximum=5.0,
270
- value=0.4,
271
- step=0.1,
272
- interactive=True,
273
- label="Sampling temperature",
274
- info="Higher values will produce more diverse outputs.",
275
- )
276
- top_p = gr.Slider(
277
- minimum=0.01,
278
- maximum=0.99,
279
- value=0.8,
280
- step=0.01,
281
- interactive=True,
282
- label="Top P",
283
- info="Higher values is equivalent to sampling more low-probability tokens.",
284
- )
285
-
286
-
287
- chatbot = gr.Chatbot(
288
- label="IDEFICS2",
289
- avatar_images=[None, BOT_AVATAR],
290
- height=750,
291
- )
292
-
293
-
294
- with gr.Blocks(fill_height=True) as demo:
295
- # model selector should be set to `visbile=False` ultimately
296
- with gr.Row(elem_id="model_selector_row"):
297
- model_selector = gr.Dropdown(
298
- choices=MODELS.keys(),
299
- value=list(MODELS.keys())[0],
300
- interactive=True,
301
- show_label=False,
302
- container=False,
303
- label="Model",
304
- visible=True,
305
- )
306
-
307
- decoding_strategy.change(
308
- fn=lambda selection: gr.Slider(
309
- visible=(
310
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
311
- )
312
- ),
313
- inputs=decoding_strategy,
314
- outputs=temperature,
315
- )
316
- decoding_strategy.change(
317
- fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
318
- inputs=decoding_strategy,
319
- outputs=top_p,
320
- )
321
-
322
- gr.ChatInterface(
323
- fn=model_inference,
324
- chatbot=chatbot,
325
- # examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}],
326
- title="Idefics2 Playground",
327
- multimodal=True,
328
- additional_inputs=[model_selector, decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p],
329
- )
330
-
331
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
idefics1_app_dialogue.py DELETED
@@ -1,733 +0,0 @@
1
- import copy
2
- import hashlib
3
- import os
4
- import re
5
- import spaces
6
- import subprocess
7
- import torch
8
- import PIL
9
-
10
- from pathlib import Path
11
- from threading import Thread
12
- from typing import List, Optional, Tuple
13
- from urllib.parse import urlparse
14
- from PIL import Image
15
-
16
- import gradio as gr
17
- from gradio import processing_utils
18
- from gradio_client.client import DEFAULT_TEMP_DIR
19
- from transformers import AutoProcessor, AutoModelForCausalLM, TextIteratorStreamer, logging
20
-
21
- from utils import create_model_inputs
22
-
23
-
24
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
25
-
26
- DEVICE = torch.device("cuda")
27
- MODELS = {
28
- "282 - mix1 fixed - opt 23'000": AutoModelForCausalLM.from_pretrained(
29
- "HuggingFaceM4/idefics2",
30
- trust_remote_code=True,
31
- torch_dtype=torch.bfloat16,
32
- token=os.environ["HF_AUTH_TOKEN"],
33
- revision="a1bc6a2b0f74cde25844144f602dde2808a564d9",
34
- ).to(DEVICE),
35
- "286 - mix6 tables - opt 20'000": AutoModelForCausalLM.from_pretrained(
36
- "HuggingFaceM4/idefics2",
37
- trust_remote_code=True,
38
- torch_dtype=torch.bfloat16,
39
- token=os.environ["HF_AUTH_TOKEN"],
40
- revision="b473d49caa964991b40b79fe7cb27d51d4d023f6",
41
- ).to(DEVICE),
42
- # "285 - continued pretraining on text sft - opt 2'000": AutoModelForCausalLM.from_pretrained(
43
- # "HuggingFaceM4/idefics2",
44
- # trust_remote_code=True,
45
- # torch_dtype=torch.bfloat16,
46
- # token=os.environ["HF_AUTH_TOKEN"],
47
- # revision="b0a2a564e5dc311591886bb375e8d5a1aeaade83",
48
- # ).to(DEVICE),
49
- }
50
- PROCESSOR = AutoProcessor.from_pretrained(
51
- "HuggingFaceM4/idefics2",
52
- token=os.environ["HF_AUTH_TOKEN"],
53
- )
54
- FAKE_TOK_AROUND_IMAGE = "<fake_token_around_image>"
55
- BOS_TOKEN = PROCESSOR.tokenizer.bos_token
56
- BAD_WORDS_IDS = PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
57
- EOS_WORDS_IDS = PROCESSOR.tokenizer("<end_of_utterance>", add_special_tokens=False).input_ids + [PROCESSOR.tokenizer.eos_token_id]
58
- IMAGE_SEQ_LEN = list(MODELS.values())[0].config.perceiver_config.resampler_n_latents
59
-
60
- SYSTEM_PROMPT = [
61
- # """The following is a conversation between a highly knowledgeable and intelligent visual AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant has the ability to perceive images and reason about the content of visual inputs. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts.
62
-
63
- # The conversation begins:""",
64
- # """\nUser:""",
65
- # "https://i1.pickpik.com/photos/515/906/355/kittens-cats-pet-cute-preview.jpg",
66
- # "Describe this image.<end_of_utterance>",
67
- # """\nAssistant: Five kittens are standing together in the center of the photograph. From the left to right, there is one orange kitten, two kittens white and black stripes, and two brown kittens with an orange head. They are in bright green grass and it looks like they are moving forward.<end_of_utterance>""",
68
- # "\nUser:How about this image?",
69
- # "https://cdn.pixabay.com/photo/2017/09/25/13/12/puppy-2785074_1280.jpg",
70
- # "Can you describe it too?<end_of_utterance>",
71
- # """\nAssistant: A dog is lying on the floor, looking at the camera. It is looking directly at you. The dog has a white body and brown patches on its face and ears. Its eyes are dark. Its nose is black, and it has long, floppy ears, white paws, long fur, big eyes.<end_of_utterance>""",
72
- # "\nUser: What kind of breed is it?<end_of_utterance>",
73
- # """\nAssistant: It looks like a Cavalier King Charles Spaniel. These breeds are known for their gentle, affectionate nature. Their long ears and big, brown eyes give them a sweet, expressive face. Cavaliers are small dogs weighing between 13 and 20 pounds. They have a silky, medium-length coat that comes in various colors, including black and tan, tricolor, red and white, or black.<end_of_utterance>""",
74
- # "\nUser: What can you tell me about this breed of dogs?<end_of_utterance>",
75
- # """\nAssistant: One specific characteristic of the Cavalier King Charles Spaniel is its friendly and affectionate nature. This breed is known for being extremely sociable and forming strong bonds with their owners. They are often described as "velcro dogs" because they love to be close to their human companions, whether it's sitting on their laps, cuddling on the couch, or simply following them around the house.<end_of_utterance>""",
76
- # "\nUser: ghjkhjabnufs<end_of_utterance>",
77
- # """\nAssistant: That doesn’t seem to be a word. Could you ask me another way?<end_of_utterance>""",
78
- # "\nUser: Do you like Cavalier King Charles Spaniel?<end_of_utterance>",
79
- # """\nAssistant: I do not have personal opinions as I’m just a computer program. However, cavaliers are known for being excellent family pets due to their gentle and patient demeanor, making them great with children and other pets. Their affectionate nature also makes them ideal therapy dogs, as they can provide comfort and emotional support to those in need.<end_of_utterance>""",
80
- # "\nUser: How many dogs do you see in this image?",
81
- # "https://i.dailymail.co.uk/i/pix/2011/07/01/article-2010308-0CD22A8300000578-496_634x414.jpg",
82
- # "<end_of_utterance>",
83
- # """\nAssistant: There is no dogs in this image. The picture shows a tennis player jumping to volley the ball.<end_of_utterance>""",
84
- ]
85
-
86
- API_TOKEN = os.getenv("HF_AUTH_TOKEN")
87
- # IDEFICS_LOGO = "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/IDEFICS_logo.png"
88
- BOT_AVATAR = "IDEFICS_logo.png"
89
-
90
-
91
- # Monkey patch adapted from gradio.components.image.Image - mostly to make the `save` step optional in `pil_to_temp_file`
92
- def hash_bytes(bytes: bytes):
93
- sha1 = hashlib.sha1()
94
- sha1.update(bytes)
95
- return sha1.hexdigest()
96
-
97
-
98
- def pil_to_temp_file(img: PIL.Image.Image, dir: str = DEFAULT_TEMP_DIR, format: str = "png") -> str:
99
- """Save a PIL image into a temp file"""
100
- bytes_data = processing_utils.encode_pil_to_bytes(img, format)
101
- temp_dir = Path(dir) / hash_bytes(bytes_data)
102
- temp_dir.mkdir(exist_ok=True, parents=True)
103
- filename = str(temp_dir / f"image.{format}")
104
- if not os.path.exists(filename):
105
- img.save(filename, pnginfo=processing_utils.get_pil_metadata(img))
106
- return filename
107
-
108
-
109
- def add_file(file):
110
- return file.name, gr.update(label='🖼️ Uploaded!')
111
-
112
-
113
- # Utils to handle the image markdown display logic
114
- def split_str_on_im_markdown(string: str) -> List[str]:
115
- """
116
- Extract from a string (typically the user prompt string) the potential images from markdown
117
- Examples:
118
- - `User:![](/file=/my_temp/chicken_on_money.png)Describe this image.` would become `["User:", "/my_temp/chicken_on_money.png", "Describe this image."]`
119
- """
120
- IMAGES_PATTERN = re.compile(r"!\[[^\]]*\]\((.*?)\s*(\"(?:.*[^\"])\")?\s*\)")
121
- parts = []
122
- cursor = 0
123
- for pattern in IMAGES_PATTERN.finditer(string):
124
- start = pattern.start()
125
- if start != cursor:
126
- parts.append(string[cursor:start])
127
- image_url = pattern.group(1)
128
- if image_url.startswith("/file="):
129
- image_url = image_url[6:] # Remove the 'file=' prefix
130
- parts.append(image_url)
131
- cursor = pattern.end()
132
- if cursor != len(string):
133
- parts.append(string[cursor:])
134
- return parts
135
-
136
-
137
- def is_image(string: str) -> bool:
138
- """
139
- There are two ways for images: local image path or url.
140
- """
141
- return is_url(string) or string.startswith(DEFAULT_TEMP_DIR)
142
-
143
-
144
- def is_url(string: str) -> bool:
145
- """
146
- Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
147
- invalidated the url
148
- """
149
- if " " in string:
150
- return False
151
- result = urlparse(string)
152
- return all([result.scheme, result.netloc])
153
-
154
-
155
- def isolate_images_urls(prompt_list: List) -> List:
156
- """
157
- Convert a full string prompt to the list format expected by the processor.
158
- In particular, image urls (as delimited by <fake_token_around_image>) should be their own elements.
159
- From:
160
- ```
161
- [
162
- "bonjour<fake_token_around_image><image:IMG_URL><fake_token_around_image>hello",
163
- PIL.Image.Image,
164
- "Aurevoir",
165
- ]
166
- ```
167
- to:
168
- ```
169
- [
170
- "bonjour",
171
- IMG_URL,
172
- "hello",
173
- PIL.Image.Image,
174
- "Aurevoir",
175
- ]
176
- ```
177
- """
178
- linearized_list = []
179
- for prompt in prompt_list:
180
- # Prompt can be either a string, or a PIL image
181
- if isinstance(prompt, PIL.Image.Image):
182
- linearized_list.append(prompt)
183
- elif isinstance(prompt, str):
184
- if "<fake_token_around_image>" not in prompt:
185
- linearized_list.append(prompt)
186
- else:
187
- prompt_splitted = prompt.split("<fake_token_around_image>")
188
- for ps in prompt_splitted:
189
- if ps == "":
190
- continue
191
- if ps.startswith("<image:"):
192
- linearized_list.append(ps[7:-1])
193
- else:
194
- linearized_list.append(ps)
195
- else:
196
- raise TypeError(
197
- f"Unrecognized type for `prompt`. Got {type(type(prompt))}. Was expecting something in [`str`,"
198
- " `PIL.Image.Image`]"
199
- )
200
- return linearized_list
201
-
202
-
203
- def fetch_images(url_list: str) -> PIL.Image.Image:
204
- """Fetching images"""
205
- return PROCESSOR.image_processor.fetch_images(url_list)
206
-
207
-
208
- def handle_manual_images_in_user_prompt(user_prompt: str) -> List[str]:
209
- """
210
- Handle the case of textually manually inputted images (i.e. the `<fake_token_around_image><image:IMG_URL><fake_token_around_image>`) in the user prompt
211
- by fetching them, saving them locally and replacing the whole sub-sequence the image local path.
212
- """
213
- if "<fake_token_around_image>" in user_prompt:
214
- splitted_user_prompt = isolate_images_urls([user_prompt])
215
- resulting_user_prompt = []
216
- for u_p in splitted_user_prompt:
217
- if is_url(u_p):
218
- img = fetch_images([u_p])[0]
219
- tmp_file = pil_to_temp_file(img)
220
- resulting_user_prompt.append(tmp_file)
221
- else:
222
- resulting_user_prompt.append(u_p)
223
- return resulting_user_prompt
224
- else:
225
- return [user_prompt]
226
-
227
-
228
- def prompt_list_to_markdown(prompt_list: List[str]) -> str:
229
- """
230
- Convert a user prompt in the list format (i.e. elements are either a PIL image or a string) into
231
- the markdown format that is used for the chatbot history and rendering.
232
- """
233
- resulting_string = ""
234
- for elem in prompt_list:
235
- if is_image(elem):
236
- if is_url(elem):
237
- resulting_string += f"![]({elem})"
238
- else:
239
- resulting_string += f"![](/file={elem})"
240
- else:
241
- resulting_string += elem
242
- return resulting_string
243
-
244
-
245
- def prompt_list_to_model_input(prompt_list: List[str]) -> Tuple[str, List[Image.Image]]:
246
- """
247
- Create the final input string and image list to feed to the model's processor.
248
- """
249
- images = []
250
- for idx, part in enumerate(prompt_list):
251
- if is_image(part):
252
- if is_url(part):
253
- images.append(fetch_images([part])[0])
254
- else:
255
- images.append(Image.open(part))
256
- prompt_list[idx] = f"{FAKE_TOK_AROUND_IMAGE}{'<image>' * IMAGE_SEQ_LEN}{FAKE_TOK_AROUND_IMAGE}"
257
- input_text = "".join(prompt_list)
258
- input_text = input_text.replace(FAKE_TOK_AROUND_IMAGE * 2, FAKE_TOK_AROUND_IMAGE)
259
- input_text = BOS_TOKEN + input_text.strip()
260
- return input_text, images
261
-
262
-
263
- def remove_spaces_around_token(text: str) -> str:
264
- pattern = r"\s*(<fake_token_around_image>)\s*"
265
- replacement = r"\1"
266
- result = re.sub(pattern, replacement, text)
267
- return result
268
-
269
-
270
- # Chatbot utils
271
- def format_user_prompt_with_im_history_and_system_conditioning(
272
- current_user_prompt_str: str, current_image: Optional[str], history: List[Tuple[str, str]]
273
- ) -> Tuple[List[str], List[str]]:
274
- """
275
- Produces the resulting list that needs to go inside the processor.
276
- It handles the potential image box input, the history and the system conditionning.
277
- """
278
- resulting_list = copy.deepcopy(SYSTEM_PROMPT)
279
-
280
- # Format history
281
- for turn in history:
282
- user_utterance, assistant_utterance = turn
283
- splitted_user_utterance = split_str_on_im_markdown(user_utterance)
284
-
285
- optional_space = ""
286
- if not is_image(splitted_user_utterance[0]):
287
- optional_space = " "
288
- resulting_list.append(f"\nUser:{optional_space}")
289
- resulting_list.extend(splitted_user_utterance)
290
- resulting_list.append(f"<end_of_utterance>\nAssistant: {assistant_utterance}")
291
-
292
- # Format current input
293
- current_user_prompt_str = remove_spaces_around_token(current_user_prompt_str)
294
- if current_image is None:
295
- if "![](" in current_user_prompt_str:
296
- current_user_prompt_list = split_str_on_im_markdown(current_user_prompt_str)
297
- else:
298
- current_user_prompt_list = handle_manual_images_in_user_prompt(current_user_prompt_str)
299
-
300
- optional_space = ""
301
- if not is_image(current_user_prompt_list[0]):
302
- # Check if the first element is an image (and more precisely a path to an image)
303
- optional_space = " "
304
- resulting_list.append(f"\nUser:{optional_space}")
305
- resulting_list.extend(current_user_prompt_list)
306
- resulting_list.append("<end_of_utterance>\nAssistant:")
307
- else:
308
- # Choosing to put the image first when the image is inputted through the UI, but this is an arbiratrary choice.
309
- resulting_list.extend(["\nUser:", current_image, f"{current_user_prompt_str}<end_of_utterance>\nAssistant:"])
310
- current_user_prompt_list = [current_user_prompt_str]
311
-
312
- return resulting_list, current_user_prompt_list
313
-
314
-
315
- textbox = gr.Textbox(
316
- placeholder="Upload an image and send a message",
317
- show_label=False,
318
- # value="Describe the battle against the fierce dragons.",
319
- visible=True,
320
- container=False,
321
- label="Text input",
322
- scale=6,
323
- )
324
- with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
325
- gr.HTML("""<h1 align="center">🐶 IDEFICS Playground</h1>""")
326
- # with gr.Row(variant="panel"):
327
- # with gr.Column(scale=1):
328
- # gr.Image(IDEFICS_LOGO, elem_id="banner-image", show_label=False, show_download_button=False)
329
- # with gr.Column(scale=5):
330
- # gr.HTML("""
331
- # <p>This demo showcases <strong>IDEFICS</strong>, a open-access large visual language model. Like GPT-4, the multimodal model accepts arbitrary sequences of image and text inputs and produces text outputs. IDEFICS can answer questions about images, describe visual content, create stories grounded in multiple images, etc.</p>
332
- # <p>IDEFICS (which stands for <strong>I</strong>mage-aware <strong>D</strong>ecoder <strong>E</strong>nhanced à la <strong>F</strong>lamingo with <strong>I</strong>nterleaved <strong>C</strong>ross-attention<strong>S</strong>) is an open-access reproduction of <a href="https://huggingface.co/papers/2204.14198">Flamingo</a>, a closed-source visual language model developed by Deepmind. IDEFICS was built solely on publicly available data and models. It is currently the only visual language model of this scale (80 billion parameters) that is available in open-access.</p>
333
- # <p>📚 The variants available in this demo were fine-tuned on a mixture of supervised and instruction fine-tuning datasets to make the models more suitable in conversational settings. For more details, we refer to our <a href="https://huggingface.co/blog/idefics">blog post</a>.</p>
334
- # <p>🅿️ <strong>Intended uses:</strong> This demo along with the <a href="https://huggingface.co/models?sort=trending&amp;search=HuggingFaceM4%2Fidefics">supporting models</a> are provided as research artifacts to the community. We detail misuses and out-of-scope uses <a href="https://huggingface.co/HuggingFaceM4/idefics-80b#misuse-and-out-of-scope-use">here</a>.</p>
335
- # <p>⛔️ <strong>Limitations:</strong> The model can produce factually incorrect texts, hallucinate facts (with or without an image) and will struggle with small details in images. While the model will tend to refuse answering questionable user requests, it can produce problematic outputs (including racist, stereotypical, and disrespectful texts), in particular when prompted to do so. We encourage users to read our findings from evaluating the model for potential biases in the <a href="https://huggingface.co/HuggingFaceM4/idefics-80b#bias-evaluation">model card</a>.</p>
336
- # """)
337
-
338
- with gr.Row(elem_id="model_selector_row"):
339
- model_selector = gr.Dropdown(
340
- choices=MODELS.keys(),
341
- value="284 - neftune - opt 18'500",
342
- interactive=True,
343
- show_label=False,
344
- container=False,
345
- label="Model",
346
- visible=True,
347
- )
348
-
349
- imagebox = gr.Image(type="filepath", label="Image input", visible=False)
350
-
351
- with gr.Row():
352
- # def prefetch_images_in_history(user_prompt_str):
353
- # """
354
- # Pre-fetch the images that are passed in the chatbot default history.
355
- # """
356
- # return prompt_list_to_markdown(handle_manual_images_in_user_prompt(user_prompt_str))
357
-
358
- chatbot = gr.Chatbot(
359
- elem_id="chatbot",
360
- label="IDEFICS",
361
- visible=True,
362
- height=750,
363
- avatar_images=[None, BOT_AVATAR]
364
- )
365
-
366
- with gr.Group():
367
- with gr.Row():
368
- textbox.render()
369
- submit_btn = gr.Button(value="▶️ Submit", visible=True)
370
- clear_btn = gr.ClearButton([textbox, imagebox, chatbot], value="🧹 Clear")
371
- regenerate_btn = gr.Button(value="🔄 Regenerate", visible=True)
372
- upload_btn = gr.UploadButton("📁 Upload image", file_types=["image"])
373
-
374
- with gr.Row():
375
- with gr.Accordion("Advanced settings", open=False, visible=True) as parameter_row:
376
- max_new_tokens = gr.Slider(
377
- minimum=8,
378
- maximum=1024,
379
- value=512,
380
- step=1,
381
- interactive=True,
382
- label="Maximum number of new tokens to generate",
383
- )
384
- repetition_penalty = gr.Slider(
385
- minimum=0.01,
386
- maximum=5.0,
387
- value=1.0,
388
- step=0.01,
389
- interactive=True,
390
- label="Repetition penalty",
391
- info="1.0 is equivalent to no penalty",
392
- )
393
- decoding_strategy = gr.Radio(
394
- [
395
- "Greedy",
396
- "Top P Sampling",
397
- ],
398
- value="Greedy",
399
- label="Decoding strategy",
400
- interactive=True,
401
- info="Higher values is equivalent to sampling more low-probability tokens.",
402
- )
403
- temperature = gr.Slider(
404
- minimum=0.0,
405
- maximum=5.0,
406
- value=0.4,
407
- step=0.1,
408
- interactive=True,
409
- visible=False,
410
- label="Sampling temperature",
411
- info="Higher values will produce more diverse outputs.",
412
- )
413
- decoding_strategy.change(
414
- fn=lambda selection: gr.Slider(
415
- visible=(
416
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
417
- )
418
- ),
419
- inputs=decoding_strategy,
420
- outputs=temperature,
421
- )
422
- top_p = gr.Slider(
423
- minimum=0.01,
424
- maximum=0.99,
425
- value=0.8,
426
- step=0.01,
427
- interactive=True,
428
- visible=False,
429
- label="Top P",
430
- info="Higher values is equivalent to sampling more low-probability tokens.",
431
- )
432
- decoding_strategy.change(
433
- fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
434
- inputs=decoding_strategy,
435
- outputs=top_p,
436
- )
437
-
438
- @spaces.GPU(duration=180)
439
- def model_inference(
440
- model_selector,
441
- user_prompt_str,
442
- chat_history,
443
- image,
444
- decoding_strategy,
445
- temperature,
446
- max_new_tokens,
447
- repetition_penalty,
448
- top_p,
449
- ):
450
- if user_prompt_str.strip() == "" and image is None:
451
- return "", None, chat_history
452
-
453
- formated_prompt_list, user_prompt_list = format_user_prompt_with_im_history_and_system_conditioning(
454
- current_user_prompt_str=user_prompt_str.strip(),
455
- current_image=image,
456
- history=chat_history,
457
- )
458
-
459
- streamer = TextIteratorStreamer(
460
- PROCESSOR.tokenizer,
461
- skip_prompt=True,
462
- )
463
-
464
- # Common parameters to all decoding strategies
465
- # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies
466
- generation_args = {
467
- "max_new_tokens": max_new_tokens,
468
- "repetition_penalty": repetition_penalty,
469
- "bad_words_ids": BAD_WORDS_IDS,
470
- "eos_token_id": EOS_WORDS_IDS,
471
- "streamer": streamer,
472
- }
473
-
474
- assert decoding_strategy in [
475
- "Greedy",
476
- "Top P Sampling",
477
- ]
478
- if decoding_strategy == "Greedy":
479
- generation_args["do_sample"] = False
480
- elif decoding_strategy == "Top P Sampling":
481
- generation_args["temperature"] = temperature
482
- generation_args["do_sample"] = True
483
- generation_args["top_p"] = top_p
484
-
485
- if image is None:
486
- # Case where there is no image OR the image is passed as `<fake_token_around_image><image:IMAGE_URL><fake_token_around_image>`
487
- chat_history.append([prompt_list_to_markdown(user_prompt_list), ''])
488
- else:
489
- # Case where the image is passed through the Image Box.
490
- # Convert the image into base64 for both passing it through the chat history and
491
- # displaying the image inside the same bubble as the text.
492
- chat_history.append(
493
- [
494
- f"{prompt_list_to_markdown([image] + user_prompt_list)}",
495
- '',
496
- ]
497
- )
498
-
499
- # Creating model inputs
500
- input_text, images = prompt_list_to_model_input(formated_prompt_list)
501
- inputs = create_model_inputs([input_text], [images])
502
- inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
503
- generation_args.update(inputs)
504
-
505
- thread = Thread(
506
- target=MODELS[model_selector].generate,
507
- kwargs=generation_args,
508
- )
509
-
510
- thread.start()
511
- acc_text = ""
512
- for idx, text_token in enumerate(streamer):
513
-
514
- acc_text += text_token
515
- last_turn = chat_history.pop(-1)
516
- last_turn[-1] += acc_text
517
- if last_turn[-1].endswith("\nUser"):
518
- # Safeguard: sometimes (rarely), the model won't generate the token `<end_of_utterance>` and will go directly to generating `\nUser:`
519
- # It will thus stop the generation on `\nUser:`. But when it exits, it will have already generated `\nUser`
520
- # This post-processing ensures that we don't have an additional `\nUser` wandering around.
521
- last_turn[-1] = last_turn[-1][:-5]
522
- chat_history.append(last_turn)
523
- yield "", None, chat_history
524
- acc_text = ""
525
-
526
- def process_example(message, image):
527
- """
528
- Same as `model_inference` but in greedy mode and with the 80b-instruct.
529
- Specifically for pre-computing the default examples.
530
- """
531
- model_selector = "284 - neftune - opt 18'500"
532
- user_prompt_str = message
533
- chat_history = []
534
- max_new_tokens = 512
535
-
536
- formated_prompt_list, user_prompt_list = format_user_prompt_with_im_history_and_system_conditioning(
537
- current_user_prompt_str=user_prompt_str.strip(),
538
- current_image=image,
539
- history=chat_history,
540
- )
541
-
542
- # Common parameters to all decoding strategies
543
- # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies
544
- generation_args = {
545
- "max_new_tokens": max_new_tokens,
546
- "repetition_penalty": None,
547
- "bad_words_ids": BAD_WORDS_IDS,
548
- "eos_token_id": EOS_WORDS_IDS,
549
- "do_sample": False,
550
- }
551
-
552
- if image is None:
553
- # Case where there is no image OR the image is passed as `<fake_token_around_image><image:IMAGE_URL><fake_token_around_image>`
554
- chat_history.append([prompt_list_to_markdown(user_prompt_list), ''])
555
- else:
556
- # Case where the image is passed through the Image Box.
557
- # Convert the image into base64 for both passing it through the chat history and
558
- # displaying the image inside the same bubble as the text.
559
- chat_history.append(
560
- [
561
- f"{prompt_list_to_markdown([image] + user_prompt_list)}",
562
- '',
563
- ]
564
- )
565
-
566
- # Creating model inputs
567
- input_text, images = prompt_list_to_model_input(formated_prompt_list)
568
- inputs = create_model_inputs([input_text], [images])
569
- inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
570
- generation_args.update(inputs)
571
-
572
- generated_ids = MODELS[model_selector].generate(**generation_args)
573
- generated_text = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)[0]
574
-
575
- if generated_text.endswith("\nUser"):
576
- generated_text = generated_text[:-5]
577
-
578
- last_turn = chat_history.pop(-1)
579
- last_turn[-1] += generated_text
580
- chat_history.append(last_turn)
581
- return "", None, chat_history
582
-
583
- textbox.submit(
584
- fn=model_inference,
585
- inputs=[
586
- model_selector,
587
- textbox,
588
- chatbot,
589
- imagebox,
590
- decoding_strategy,
591
- temperature,
592
- max_new_tokens,
593
- repetition_penalty,
594
- top_p,
595
- ],
596
- outputs=[textbox, imagebox, chatbot],
597
- )
598
- submit_btn.click(
599
- fn=model_inference,
600
- inputs=[
601
- model_selector,
602
- textbox,
603
- chatbot,
604
- imagebox,
605
- decoding_strategy,
606
- temperature,
607
- max_new_tokens,
608
- repetition_penalty,
609
- top_p,
610
- ],
611
- outputs=[
612
- textbox,
613
- imagebox,
614
- chatbot,
615
- ],
616
- )
617
-
618
- def remove_last_turn(chat_history):
619
- if len(chat_history) == 0:
620
- return gr.Update(), gr.Update()
621
- last_interaction = chat_history[-1]
622
- chat_history = chat_history[:-1]
623
- chat_update = gr.update(value=chat_history)
624
- text_update = gr.update(value=last_interaction[0])
625
- return chat_update, text_update
626
-
627
- regenerate_btn.click(fn=remove_last_turn, inputs=chatbot, outputs=[chatbot, textbox]).then(
628
- fn=model_inference,
629
- inputs=[
630
- model_selector,
631
- textbox,
632
- chatbot,
633
- imagebox,
634
- decoding_strategy,
635
- temperature,
636
- max_new_tokens,
637
- repetition_penalty,
638
- top_p,
639
- ],
640
- outputs=[
641
- textbox,
642
- imagebox,
643
- chatbot,
644
- ],
645
- )
646
-
647
- upload_btn.upload(add_file, [upload_btn], [imagebox, upload_btn], queue=False)
648
- submit_btn.click(lambda : gr.update(label='📁 Upload image', interactive=True), [], upload_btn)
649
- textbox.submit(lambda : gr.update(label='📁 Upload image', interactive=True), [], upload_btn)
650
- clear_btn.click(lambda : gr.update(label='📁 Upload image', interactive=True), [], upload_btn)
651
-
652
- # examples_path = os.path.dirname(__file__)
653
- # gr.Examples(
654
- # examples=[
655
- # [
656
- # (
657
- # "Which famous person does the person in the image look like? Could you craft an engaging narrative"
658
- # " featuring this character from the image as the main protagonist?"
659
- # ),
660
- # f"{examples_path}/example_images/obama-harry-potter.jpg",
661
- # ],
662
- # [
663
- # "Can you describe the image? Do you think it's real?",
664
- # f"{examples_path}/example_images/rabbit_force.png",
665
- # ],
666
- # ["Explain this meme to me.", f"{examples_path}/example_images/meme_french.jpg"],
667
- # ["Give me a short and easy recipe for this dish.", f"{examples_path}/example_images/recipe_burger.webp"],
668
- # [
669
- # "I want to go somewhere similar to the one in the photo. Give me destinations and travel tips.",
670
- # f"{examples_path}/example_images/travel_tips.jpg",
671
- # ],
672
- # [
673
- # "Can you name the characters in the image and give their French names?",
674
- # f"{examples_path}/example_images/gaulois.png",
675
- # ],
676
- # ["Write a complete sales ad for this product.", f"{examples_path}/example_images/product_ad.jpg"],
677
- # [
678
- # (
679
- # "As an art critic AI assistant, could you describe this painting in details and make a thorough"
680
- # " critic?"
681
- # ),
682
- # f"{examples_path}/example_images/art_critic.png",
683
- # ],
684
- # [
685
- # "Can you tell me a very short story based on this image?",
686
- # f"{examples_path}/example_images/chicken_on_money.png",
687
- # ],
688
- # ["Write 3 funny meme texts about this image.", f"{examples_path}/example_images/elon_smoking.jpg"],
689
- # [
690
- # "Who is in this picture? Why do people find it surprising?",
691
- # f"{examples_path}/example_images/pope_doudoune.webp",
692
- # ],
693
- # ["What are the armed baguettes guarding?", f"{examples_path}/example_images/baguettes_guarding_paris.png"],
694
- # ["What is this animal and why is it unusual?", f"{examples_path}/example_images/blue_dog.png"],
695
- # [
696
- # "What is this object and do you think it is horrifying?",
697
- # f"{examples_path}/example_images/can_horror.png",
698
- # ],
699
- # [
700
- # (
701
- # "What is this sketch for? How would you make an argument to prove this sketch was made by Picasso"
702
- # " himself?"
703
- # ),
704
- # f"{examples_path}/example_images/cat_sketch.png",
705
- # ],
706
- # ["Which celebrity does this claymation figure look like?", f"{examples_path}/example_images/kanye.jpg"],
707
- # ["What can you tell me about the cap in this image?", f"{examples_path}/example_images/ironman_cap.png"],
708
- # [
709
- # "Can you write an advertisement for Coca-Cola based on this image?",
710
- # f"{examples_path}/example_images/polar_bear_coke.png",
711
- # ],
712
- # [
713
- # "What is happening in this image? Which famous personality does this person in center looks like?",
714
- # f"{examples_path}/example_images/gandhi_selfie.jpg",
715
- # ],
716
- # [
717
- # "What do you think the dog is doing and is it unusual?",
718
- # f"{examples_path}/example_images/surfing_dog.jpg",
719
- # ],
720
- # ],
721
- # inputs=[textbox, imagebox],
722
- # outputs=[textbox, imagebox, chatbot],
723
- # fn=process_example,
724
- # cache_examples=False,
725
- # examples_per_page=6,
726
- # label=(
727
- # "Click on any example below to get started.\nFor convenience, the model generations have been"
728
- # " pre-computed with `idefics-80b-instruct`."
729
- # ),
730
- # )
731
-
732
- demo.queue(max_size=40)
733
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
idefics2_old_app_dialogue.py DELETED
@@ -1,417 +0,0 @@
1
- import copy
2
- import os
3
- import spaces
4
- import subprocess
5
- import time
6
- import torch
7
-
8
- from threading import Thread
9
- from typing import List, Tuple
10
- from urllib.parse import urlparse
11
- from PIL import Image
12
-
13
- import gradio as gr
14
- from gradio_client.client import DEFAULT_TEMP_DIR
15
- from transformers import AutoProcessor, AutoModelForCausalLM, TextIteratorStreamer
16
- from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension
17
- from transformers.image_transforms import resize, to_channel_dimension_format
18
-
19
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
20
-
21
- DEVICE = torch.device("cuda")
22
- MODELS = {
23
- "tr_290_bis_288_cinco_chatty - opt 150": AutoModelForCausalLM.from_pretrained(
24
- "HuggingFaceM4/idefics2",
25
- trust_remote_code=True,
26
- torch_dtype=torch.bfloat16,
27
- token=os.environ["HF_AUTH_TOKEN"],
28
- revision="9e47f905a9e262451c749286fcb97516cedff6d3",
29
- ).to(DEVICE),
30
- "tr_288_cinco_final_sft_sphinx - opt 11'000": AutoModelForCausalLM.from_pretrained(
31
- "HuggingFaceM4/idefics2",
32
- trust_remote_code=True,
33
- torch_dtype=torch.bfloat16,
34
- token=os.environ["HF_AUTH_TOKEN"],
35
- revision="316ea4acf714760882ad89e364ae1f8c447ae82e",
36
- ).to(DEVICE),
37
- # "285 - continued pretraining on text sft - opt 2'000": AutoModelForCausalLM.from_pretrained(
38
- # "HuggingFaceM4/idefics2",
39
- # trust_remote_code=True,
40
- # torch_dtype=torch.bfloat16,
41
- # token=os.environ["HF_AUTH_TOKEN"],
42
- # revision="b0a2a564e5dc311591886bb375e8d5a1aeaade83",
43
- # ).to(DEVICE),
44
- }
45
- PROCESSOR = AutoProcessor.from_pretrained(
46
- "HuggingFaceM4/idefics2",
47
- token=os.environ["HF_AUTH_TOKEN"],
48
- )
49
- FAKE_TOK_AROUND_IMAGE = "<fake_token_around_image>"
50
- BOS_TOKEN = PROCESSOR.tokenizer.bos_token
51
- BAD_WORDS_IDS = PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
52
- EOS_WORDS_IDS = PROCESSOR.tokenizer("<end_of_utterance>", add_special_tokens=False).input_ids + [PROCESSOR.tokenizer.eos_token_id]
53
- IMAGE_SEQ_LEN = 64#list(MODELS.values())[0].config.perceiver_config.resampler_n_latents
54
-
55
- SYSTEM_PROMPT = [
56
- # """The following is a conversation between a highly knowledgeable and intelligent visual AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant has the ability to perceive images and reason about the content of visual inputs. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts.
57
-
58
- # The conversation begins:""",
59
- # """\nUser:""",
60
- # "https://i1.pickpik.com/photos/515/906/355/kittens-cats-pet-cute-preview.jpg",
61
- # "Describe this image.<end_of_utterance>",
62
- # """\nAssistant: Five kittens are standing together in the center of the photograph. From the left to right, there is one orange kitten, two kittens white and black stripes, and two brown kittens with an orange head. They are in bright green grass and it looks like they are moving forward.<end_of_utterance>""",
63
- # "\nUser:How about this image?",
64
- # "https://cdn.pixabay.com/photo/2017/09/25/13/12/puppy-2785074_1280.jpg",
65
- # "Can you describe it too?<end_of_utterance>",
66
- # """\nAssistant: A dog is lying on the floor, looking at the camera. It is looking directly at you. The dog has a white body and brown patches on its face and ears. Its eyes are dark. Its nose is black, and it has long, floppy ears, white paws, long fur, big eyes.<end_of_utterance>""",
67
- # "\nUser: What kind of breed is it?<end_of_utterance>",
68
- # """\nAssistant: It looks like a Cavalier King Charles Spaniel. These breeds are known for their gentle, affectionate nature. Their long ears and big, brown eyes give them a sweet, expressive face. Cavaliers are small dogs weighing between 13 and 20 pounds. They have a silky, medium-length coat that comes in various colors, including black and tan, tricolor, red and white, or black.<end_of_utterance>""",
69
- # "\nUser: What can you tell me about this breed of dogs?<end_of_utterance>",
70
- # """\nAssistant: One specific characteristic of the Cavalier King Charles Spaniel is its friendly and affectionate nature. This breed is known for being extremely sociable and forming strong bonds with their owners. They are often described as "velcro dogs" because they love to be close to their human companions, whether it's sitting on their laps, cuddling on the couch, or simply following them around the house.<end_of_utterance>""",
71
- # "\nUser: ghjkhjabnufs<end_of_utterance>",
72
- # """\nAssistant: That doesn’t seem to be a word. Could you ask me another way?<end_of_utterance>""",
73
- # "\nUser: Do you like Cavalier King Charles Spaniel?<end_of_utterance>",
74
- # """\nAssistant: I do not have personal opinions as I’m just a computer program. However, cavaliers are known for being excellent family pets due to their gentle and patient demeanor, making them great with children and other pets. Their affectionate nature also makes them ideal therapy dogs, as they can provide comfort and emotional support to those in need.<end_of_utterance>""",
75
- # "\nUser: How many dogs do you see in this image?",
76
- # "https://i.dailymail.co.uk/i/pix/2011/07/01/article-2010308-0CD22A8300000578-496_634x414.jpg",
77
- # "<end_of_utterance>",
78
- # """\nAssistant: There is no dogs in this image. The picture shows a tennis player jumping to volley the ball.<end_of_utterance>""",
79
- ]
80
-
81
- API_TOKEN = os.getenv("HF_AUTH_TOKEN")
82
- # IDEFICS_LOGO = "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/IDEFICS_logo.png"
83
- BOT_AVATAR = "IDEFICS_logo.png"
84
-
85
-
86
- # Model processing utils - these will be handled in the model processor directly ultimately
87
- def convert_to_rgb(image):
88
- # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
89
- # for transparent images. The call to `alpha_composite` handles this case
90
- if image.mode == "RGB":
91
- return image
92
-
93
- image_rgba = image.convert("RGBA")
94
- background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
95
- alpha_composite = Image.alpha_composite(background, image_rgba)
96
- alpha_composite = alpha_composite.convert("RGB")
97
- return alpha_composite
98
-
99
-
100
- def custom_transform(x):
101
- x = convert_to_rgb(x)
102
- x = to_numpy_array(x)
103
-
104
- height, width = x.shape[:2]
105
- aspect_ratio = width / height
106
- if width >= height and width > 980:
107
- width = 980
108
- height = int(width / aspect_ratio)
109
- elif height > width and height > 980:
110
- height = 980
111
- width = int(height * aspect_ratio)
112
- width = max(width, 378)
113
- height = max(height, 378)
114
-
115
- x = resize(x, (height, width), resample=PILImageResampling.BILINEAR)
116
- x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
117
- x = PROCESSOR.image_processor.normalize(
118
- x,
119
- mean=PROCESSOR.image_processor.image_mean,
120
- std=PROCESSOR.image_processor.image_std
121
- )
122
- x = to_channel_dimension_format(x, ChannelDimension.FIRST)
123
- x = torch.tensor(x)
124
- return x
125
-
126
-
127
- def create_model_inputs(
128
- input_texts: List[str],
129
- image_lists: List[List[Image.Image]],
130
- ):
131
- """
132
- All this logic will eventually be handled inside the model processor.
133
- """
134
- inputs = PROCESSOR.tokenizer(
135
- input_texts,
136
- return_tensors="pt",
137
- add_special_tokens=False,
138
- padding=True,
139
- )
140
-
141
- output_images = [
142
- [PROCESSOR.image_processor(img, transform=custom_transform) for img in im_list]
143
- for im_list in image_lists
144
- ]
145
- total_batch_size = len(output_images)
146
- max_num_images = max([len(img_l) for img_l in output_images])
147
- if max_num_images > 0:
148
- max_height = max([i.size(2) for img_l in output_images for i in img_l])
149
- max_width = max([i.size(3) for img_l in output_images for i in img_l])
150
- padded_image_tensor = torch.zeros(total_batch_size, max_num_images, 3, max_height, max_width)
151
- padded_pixel_attention_masks = torch.zeros(
152
- total_batch_size, max_num_images, max_height, max_width, dtype=torch.bool
153
- )
154
- for batch_idx, img_l in enumerate(output_images):
155
- for img_idx, img in enumerate(img_l):
156
- im_height, im_width = img.size()[2:]
157
- padded_image_tensor[batch_idx, img_idx, :, :im_height, :im_width] = img
158
- padded_pixel_attention_masks[batch_idx, img_idx, :im_height, :im_width] = True
159
-
160
- inputs["pixel_values"] = padded_image_tensor
161
- inputs["pixel_attention_mask"] = padded_pixel_attention_masks
162
-
163
- return inputs
164
-
165
-
166
- # Chatbot utils
167
- def is_image(string: str) -> bool:
168
- """
169
- There are two ways for images: local image path or url.
170
- """
171
- return is_url(string) or string.startswith(DEFAULT_TEMP_DIR)
172
-
173
-
174
- def is_url(string: str) -> bool:
175
- """
176
- Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
177
- invalidated the url
178
- """
179
- if " " in string:
180
- return False
181
- result = urlparse(string)
182
- return all([result.scheme, result.netloc])
183
-
184
-
185
- def prompt_list_to_model_input(prompt_list: List[str]) -> Tuple[str, List[Image.Image]]:
186
- """
187
- Create the final input string and image list to feed to the model.
188
- """
189
- images = []
190
- for idx, part in enumerate(prompt_list):
191
- if is_image(part):
192
- images.append(Image.open(part))
193
- prompt_list[idx] = f"{FAKE_TOK_AROUND_IMAGE}{'<image>' * IMAGE_SEQ_LEN}{FAKE_TOK_AROUND_IMAGE}"
194
- input_text = "".join(prompt_list)
195
- input_text = input_text.replace(FAKE_TOK_AROUND_IMAGE * 2, FAKE_TOK_AROUND_IMAGE)
196
- input_text = BOS_TOKEN + input_text.strip()
197
- return input_text, images
198
-
199
-
200
- def turn_is_pure_media(turn):
201
- return turn[1] is None
202
-
203
-
204
- def format_user_prompt_with_im_history_and_system_conditioning(
205
- user_prompt, chat_history
206
- ) -> List[str]:
207
- """
208
- Produces the resulting list that needs to go inside the processor.
209
- It handles the potential image(s), the history and the system conditionning.
210
- """
211
- resulting_list = copy.deepcopy(SYSTEM_PROMPT)
212
-
213
- # Format history
214
- for turn in chat_history:
215
- if turn_is_pure_media(turn):
216
- media = turn[0][0]
217
- if resulting_list == [] or (resulting_list != [] and resulting_list[-1].endswith("<end_of_utterance>")):
218
- resulting_list.append("\nUser:")
219
- resulting_list.append(media)
220
- else:
221
- user_utterance, assistant_utterance = turn
222
- if resulting_list and is_image(resulting_list[-1]): # means that previous `turn` in `chat_history` was a pure media
223
- resulting_list.append(f"{user_utterance.strip()}<end_of_utterance>\nAssistant: {assistant_utterance}<end_of_utterance>")
224
- else:
225
- resulting_list.append(f"\nUser: {user_utterance.strip()}<end_of_utterance>\nAssistant: {assistant_utterance}<end_of_utterance>")
226
-
227
- # Format current input
228
- if not user_prompt["files"]:
229
- resulting_list.append(f"\nUser: ")
230
- else:
231
- # Choosing to put the image first when the image is inputted through the UI, but this is an arbiratrary choice.
232
- resulting_list.append("\nUser:")
233
- resulting_list.extend([im["path"] for im in user_prompt["files"]])
234
- resulting_list.append(f"{user_prompt['text']}<end_of_utterance>\nAssistant:")
235
-
236
- return resulting_list
237
-
238
-
239
- @spaces.GPU(duration=180)
240
- def model_inference(
241
- user_prompt,
242
- chat_history,
243
- model_selector,
244
- decoding_strategy,
245
- temperature,
246
- max_new_tokens,
247
- repetition_penalty,
248
- top_p,
249
- ):
250
- if user_prompt["text"].strip() == "" and not user_prompt["files"]:
251
- gr.Error("Please input a query and optionally image(s).")
252
-
253
- if user_prompt["text"].strip() == "" and user_prompt["files"]:
254
- gr.Error("Please input a text query along the image(s).")
255
-
256
- for file in user_prompt["files"]:
257
- if not file["mime_type"].startswith("image/"):
258
- gr.Error("Idefics2 only supports images. Please input a valid image.")
259
-
260
- formated_prompt_list = format_user_prompt_with_im_history_and_system_conditioning(
261
- user_prompt=user_prompt,
262
- chat_history=chat_history,
263
- )
264
-
265
- streamer = TextIteratorStreamer(
266
- PROCESSOR.tokenizer,
267
- skip_prompt=True,
268
- timeout=5.,
269
- )
270
-
271
- # Common parameters to all decoding strategies
272
- # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies
273
- generation_args = {
274
- "max_new_tokens": max_new_tokens,
275
- "repetition_penalty": repetition_penalty,
276
- "bad_words_ids": BAD_WORDS_IDS,
277
- "eos_token_id": EOS_WORDS_IDS,
278
- "streamer": streamer,
279
- }
280
-
281
- assert decoding_strategy in [
282
- "Greedy",
283
- "Top P Sampling",
284
- ]
285
- if decoding_strategy == "Greedy":
286
- generation_args["do_sample"] = False
287
- elif decoding_strategy == "Top P Sampling":
288
- generation_args["temperature"] = temperature
289
- generation_args["do_sample"] = True
290
- generation_args["top_p"] = top_p
291
-
292
-
293
- # Creating model inputs
294
- input_text, images = prompt_list_to_model_input(formated_prompt_list)
295
- inputs = create_model_inputs([input_text], [images])
296
- inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
297
- generation_args.update(inputs)
298
-
299
- # # The regular non streaming generation mode
300
- # _ = generation_args.pop("streamer")
301
- # generated_ids = MODELS[model_selector].generate(**generation_args)
302
- # generated_text = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)[0]
303
- # return generated_text
304
-
305
- thread = Thread(
306
- target=MODELS[model_selector].generate,
307
- kwargs=generation_args,
308
- )
309
- thread.start()
310
-
311
- print("start generating")
312
- acc_text = ""
313
- try:
314
- for text_token in streamer:
315
- acc_text += text_token
316
- time.sleep(0.03)
317
- yield acc_text
318
- except Exception as e:
319
- print("error")
320
- gr.Error(e)
321
- print(f"Success! Generated the following sequence: `{acc_text}`")
322
-
323
-
324
-
325
- # Hyper-parameters for generation
326
- max_new_tokens = gr.Slider(
327
- minimum=8,
328
- maximum=1024,
329
- value=512,
330
- step=1,
331
- interactive=True,
332
- label="Maximum number of new tokens to generate",
333
- )
334
- repetition_penalty = gr.Slider(
335
- minimum=0.01,
336
- maximum=5.0,
337
- value=1.0,
338
- step=0.01,
339
- interactive=True,
340
- label="Repetition penalty",
341
- info="1.0 is equivalent to no penalty",
342
- )
343
- decoding_strategy = gr.Radio(
344
- [
345
- "Greedy",
346
- "Top P Sampling",
347
- ],
348
- value="Greedy",
349
- label="Decoding strategy",
350
- interactive=True,
351
- info="Higher values is equivalent to sampling more low-probability tokens.",
352
- )
353
- temperature = gr.Slider(
354
- minimum=0.0,
355
- maximum=5.0,
356
- value=0.4,
357
- step=0.1,
358
- interactive=True,
359
- label="Sampling temperature",
360
- info="Higher values will produce more diverse outputs.",
361
- )
362
- top_p = gr.Slider(
363
- minimum=0.01,
364
- maximum=0.99,
365
- value=0.8,
366
- step=0.01,
367
- interactive=True,
368
- label="Top P",
369
- info="Higher values is equivalent to sampling more low-probability tokens.",
370
- )
371
-
372
-
373
- chatbot = gr.Chatbot(
374
- label="IDEFICS2",
375
- avatar_images=[None, BOT_AVATAR],
376
- height=500,
377
- )
378
-
379
-
380
- with gr.Blocks(fill_height=True) as demo:
381
- # model selector should be set to `visbile=False` ultimately
382
- with gr.Row(elem_id="model_selector_row"):
383
- model_selector = gr.Dropdown(
384
- choices=MODELS.keys(),
385
- value=list(MODELS.keys())[0],
386
- interactive=True,
387
- show_label=False,
388
- container=False,
389
- label="Model",
390
- visible=True,
391
- )
392
-
393
- decoding_strategy.change(
394
- fn=lambda selection: gr.Slider(
395
- visible=(
396
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
397
- )
398
- ),
399
- inputs=decoding_strategy,
400
- outputs=temperature,
401
- )
402
- decoding_strategy.change(
403
- fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
404
- inputs=decoding_strategy,
405
- outputs=top_p,
406
- )
407
-
408
- gr.ChatInterface(
409
- fn=model_inference,
410
- chatbot=chatbot,
411
- # examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}],
412
- title="Idefics2 Playground",
413
- multimodal=True,
414
- additional_inputs=[model_selector, decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p],
415
- )
416
-
417
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils.py DELETED
@@ -1,93 +0,0 @@
1
- import os
2
- import torch
3
-
4
- from transformers import AutoProcessor
5
- from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension
6
- from transformers.image_transforms import resize, to_channel_dimension_format
7
- from typing import List
8
- from PIL import Image
9
-
10
-
11
- PROCESSOR = AutoProcessor.from_pretrained(
12
- "HuggingFaceM4/idefics2",
13
- token=os.environ["HF_AUTH_TOKEN"],
14
- )
15
-
16
-
17
- def convert_to_rgb(image):
18
- # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
19
- # for transparent images. The call to `alpha_composite` handles this case
20
- if image.mode == "RGB":
21
- return image
22
-
23
- image_rgba = image.convert("RGBA")
24
- background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
25
- alpha_composite = Image.alpha_composite(background, image_rgba)
26
- alpha_composite = alpha_composite.convert("RGB")
27
- return alpha_composite
28
-
29
-
30
- def custom_transform(x):
31
- x = convert_to_rgb(x)
32
- x = to_numpy_array(x)
33
-
34
- height, width = x.shape[:2]
35
- aspect_ratio = width / height
36
- if width >= height and width > 980:
37
- width = 980
38
- height = int(width / aspect_ratio)
39
- elif height > width and height > 980:
40
- height = 980
41
- width = int(height * aspect_ratio)
42
- width = max(width, 378)
43
- height = max(height, 378)
44
-
45
- x = resize(x, (height, width), resample=PILImageResampling.BILINEAR)
46
- x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
47
- x = PROCESSOR.image_processor.normalize(
48
- x,
49
- mean=PROCESSOR.image_processor.image_mean,
50
- std=PROCESSOR.image_processor.image_std
51
- )
52
- x = to_channel_dimension_format(x, ChannelDimension.FIRST)
53
- x = torch.tensor(x)
54
- return x
55
-
56
-
57
- def create_model_inputs(
58
- input_texts: List[str],
59
- image_lists: List[List[Image.Image]],
60
- ):
61
- """
62
- All this logic will eventually be handled inside the model processor.
63
- """
64
- inputs = PROCESSOR.tokenizer(
65
- input_texts,
66
- return_tensors="pt",
67
- add_special_tokens=False,
68
- padding=True,
69
- )
70
-
71
- output_images = [
72
- [PROCESSOR.image_processor(img, transform=custom_transform) for img in im_list]
73
- for im_list in image_lists
74
- ]
75
- total_batch_size = len(output_images)
76
- max_num_images = max([len(img_l) for img_l in output_images])
77
- if max_num_images > 0:
78
- max_height = max([i.size(2) for img_l in output_images for i in img_l])
79
- max_width = max([i.size(3) for img_l in output_images for i in img_l])
80
- padded_image_tensor = torch.zeros(total_batch_size, max_num_images, 3, max_height, max_width)
81
- padded_pixel_attention_masks = torch.zeros(
82
- total_batch_size, max_num_images, max_height, max_width, dtype=torch.bool
83
- )
84
- for batch_idx, img_l in enumerate(output_images):
85
- for img_idx, img in enumerate(img_l):
86
- im_height, im_width = img.size()[2:]
87
- padded_image_tensor[batch_idx, img_idx, :, :im_height, :im_width] = img
88
- padded_pixel_attention_masks[batch_idx, img_idx, :im_height, :im_width] = True
89
-
90
- inputs["pixel_values"] = padded_image_tensor
91
- inputs["pixel_attention_mask"] = padded_pixel_attention_masks
92
-
93
- return inputs