qgallouedec HF staff commited on
Commit
bfcd10f
1 Parent(s): 1b4edc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -1,17 +1,15 @@
1
  import gradio as gr
2
- from transformers import AutoModelForVision2Seq, AutoProcessor, AutoModelForVision2Seq, BitsAndBytesConfig
3
  import torch
4
 
 
 
 
5
  quantization_config = BitsAndBytesConfig(
6
  load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.float16
7
  )
8
-
9
- processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
10
- model = AutoModelForVision2Seq.from_pretrained(
11
- "HuggingFaceM4/idefics2-8b",
12
- torch_dtype=torch.float16,
13
- quantization_config=quantization_config,
14
- )
15
 
16
 
17
  def respond(multimodal_input):
@@ -31,4 +29,14 @@ def respond(multimodal_input):
31
  return generated_text
32
 
33
 
34
- gr.Interface(respond, inputs=[gr.MultimodalTextbox(file_types=["image"], show_label=False)], outputs="text").launch()
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForVision2Seq, AutoProcessor, BitsAndBytesConfig
3
  import torch
4
 
5
+
6
+ model_id = "HuggingFaceM4/idefics2-8b"
7
+
8
  quantization_config = BitsAndBytesConfig(
9
  load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.float16
10
  )
11
+ processor = AutoProcessor.from_pretrained(model_id)
12
+ model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype=torch.float16, quantization_config=quantization_config)
 
 
 
 
 
13
 
14
 
15
  def respond(multimodal_input):
 
29
  return generated_text
30
 
31
 
32
+ gr.Interface(
33
+ respond,
34
+ inputs=[gr.MultimodalTextbox(file_types=["image"], show_label=False)],
35
+ outputs="text",
36
+ title="IDEFICS2-8B DPO",
37
+ description="Try IDEFICS2-8B fine-tuned using direct preference optimization (DPO) in this demo. Learn more about vision language model DPO integration of TRL [here](https://huggingface.co/blog/dpo_vlm).",
38
+ examples=[
39
+ {"text": "What is the type of flower in the image and what insect is on it?", "files": ["./bee.jpg"]},
40
+ {"text": "Describe the image", "files": ["./howl.jpg"]},
41
+ ],
42
+ ).launch()