triphuong57 commited on
Commit
ad33b28
Β·
verified Β·
1 Parent(s): 52d6487

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, AutoModelForSeq2SeqLM
3
  from peft import PeftModel
4
  import spaces
5
  import torch
@@ -8,13 +8,17 @@ import os
8
  token = os.getenv('token')
9
  HfFolder.save_token(token)
10
  device = "cuda"
11
-
12
- model = PaliGemmaForConditionalGeneration.from_pretrained("triphuong57/paligemma_lora").to(device)
13
  processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
14
 
15
- @spaces.GPU(duration=180)
16
  def greet(image, prompt):
17
 
 
 
 
 
 
18
  model_inputs = processor(text=prompt, images=image, return_tensors="pt")
19
  input_len = model_inputs["input_ids"].shape[-1]
20
  with torch.inference_mode():
 
1
  import gradio as gr
2
+ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, BitsAndBytesConfig
3
  from peft import PeftModel
4
  import spaces
5
  import torch
 
8
  token = os.getenv('token')
9
  HfFolder.save_token(token)
10
  device = "cuda"
11
+ model = PaliGemmaForConditionalGeneration.from_pretrained("triphuong57/paligemma_lora")
 
12
  processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
13
 
14
+ @spaces.GPU(duration=240)
15
  def greet(image, prompt):
16
 
17
+
18
+ # model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)
19
+
20
+
21
+ # # model = PeftModel(base_model, "/folders").to(device)
22
  model_inputs = processor(text=prompt, images=image, return_tensors="pt")
23
  input_len = model_inputs["input_ids"].shape[-1]
24
  with torch.inference_mode():