qgallouedec HF staff commited on
Commit
668e702
1 Parent(s): 62f1873

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -0
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForVision2Seq, AutoProcessor, AutoModelForVision2Seq, BitsAndBytesConfig
3
+ import torch
4
+
5
+ quantization_config = BitsAndBytesConfig(
6
+ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.float16
7
+ )
8
+
9
+ processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
10
+ model = AutoModelForVision2Seq.from_pretrained(
11
+ "HuggingFaceM4/idefics2-8b",
12
+ torch_dtype=torch.float16,
13
+ quantization_config=quantization_config,
14
+ )
15
+
16
+
17
+ def respond(multimodal_input):
18
+ images = multimodal_input["files"]
19
+ content = [{"type": "image"} for _ in images]
20
+ content.append({"type": "text", "text": multimodal_input["text"]})
21
+ messages = [{"role": "user", "content": content}]
22
+ prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
23
+ inputs = processor(text=prompt, images=[images], return_tensors="pt")
24
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
25
+ num_tokens = len(inputs["input_ids"][0])
26
+ with torch.inference_mode():
27
+ generated_ids = model.generate(**inputs, max_new_tokens=500)
28
+
29
+ new_tokens = generated_ids[:, num_tokens:]
30
+ generated_text = processor.batch_decode(new_tokens, skip_special_tokens=True)[0]
31
+ return generated_text
32
+
33
+
34
+ gr.Interface(respond, inputs=[gr.MultimodalTextbox(file_types=["image"], show_label=False)], outputs="text").launch()