Ryukijano commited on
Commit
6b90443
1 Parent(s): bdfe3b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
- # app.py for Hugging Face Space: Connecting Meta Llama 3.2 Vision, Segment Anything 2, and Diffusion Model
2
  import gradio as gr
3
  import spaces # Import the spaces module to use GPU-specific decorators
4
- from transformers import MllamaForConditionalGeneration, AutoProcessor, pipeline
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
  import os
@@ -12,7 +12,7 @@ hf_token = os.getenv("HF_TOKEN") # Fetch token from repository secrets
12
 
13
  # Set up Meta Llama 3.2 Vision model (using private model with token)
14
  llama_vision_model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
15
- vision_model = MllamaForConditionalGeneration.from_pretrained(
16
  llama_vision_model_id,
17
  torch_dtype=torch.bfloat16,
18
  device_map="auto",
@@ -20,8 +20,8 @@ vision_model = MllamaForConditionalGeneration.from_pretrained(
20
  )
21
  processor = AutoProcessor.from_pretrained(llama_vision_model_id, token=hf_token)
22
 
23
- # Set up segmentation model using MaskFormer Swin Large from Hugging Face Hub
24
- segment_model_id = "facebook/maskformer-swin-large"
25
  segment_pipe = pipeline(
26
  "image-segmentation",
27
  model=segment_model_id,
@@ -45,7 +45,7 @@ def process_image(image):
45
  output = vision_model.generate(**inputs, max_new_tokens=50)
46
  caption = processor.decode(output[0], skip_special_tokens=True)
47
 
48
- # Step 2: Segment important parts of the image using MaskFormer Swin Large
49
  segmented_result = segment_pipe(image=image)
50
  segments = segmented_result
51
 
 
1
+ # app.py for Hugging Face Space: Connecting Meta Llama 3.2 Vision, PaliGemma Segmentation, and Diffusion Model
2
  import gradio as gr
3
  import spaces # Import the spaces module to use GPU-specific decorators
4
+ from transformers import PaliGemmaForConditionalGeneration, AutoProcessor, pipeline
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
  import os
 
12
 
13
  # Set up Meta Llama 3.2 Vision model (using private model with token)
14
  llama_vision_model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
15
+ vision_model = PaliGemmaForConditionalGeneration.from_pretrained(
16
  llama_vision_model_id,
17
  torch_dtype=torch.bfloat16,
18
  device_map="auto",
 
20
  )
21
  processor = AutoProcessor.from_pretrained(llama_vision_model_id, token=hf_token)
22
 
23
+ # Set up segmentation model using PaliGemma from Hugging Face Hub
24
+ segment_model_id = "google/paligemma-3b-mix-224"
25
  segment_pipe = pipeline(
26
  "image-segmentation",
27
  model=segment_model_id,
 
45
  output = vision_model.generate(**inputs, max_new_tokens=50)
46
  caption = processor.decode(output[0], skip_special_tokens=True)
47
 
48
+ # Step 2: Segment important parts of the image using PaliGemma
49
  segmented_result = segment_pipe(image=image)
50
  segments = segmented_result
51