onuralpszr commited on
Commit
47768b2
β€’
1 Parent(s): d31ac85

fix: πŸ› adjust intro markdown text

Browse files

Signed-off-by: Onuralp SEZER <thunderbirdtr@gmail.com>

Files changed (1) hide show
  1. app.py +38 -14
app.py CHANGED
@@ -15,6 +15,40 @@ MASK_ANNOTATOR = sv.MaskAnnotator()
15
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  VIDEO_TARGET_DIRECTORY = "tmp"
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  create_directory(directory_path=VIDEO_TARGET_DIRECTORY)
19
  model_id = "google/paligemma2-3b-pt-448"
20
  model = PaliGemmaForConditionalGeneration.from_pretrained(model_id).eval().to(DEVICE)
@@ -33,6 +67,9 @@ def paligemma_detection(input_image, input_text, max_new_tokens):
33
  result = processor.decode(generation, skip_special_tokens=True)
34
  return result
35
 
 
 
 
36
  def annotate_image(result, resolution_wh, class_names, cv_image):
37
 
38
  detections = sv.Detections.from_lmm(
@@ -134,20 +171,7 @@ def process_video(input_video, input_text, class_names, max_new_tokens, progress
134
  return video_path, results
135
 
136
  with gr.Blocks() as app:
137
- gr.Markdown("""
138
- ## PaliGemma 2 Detection with Supervision - Demo
139
-
140
- [![Github](https://img.shields.io/badge/Github-100000?style=flat&logo=github&logoColor=white)](https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md) [![Huggingface](https://img.shields.io/badge/Huggingface-FFD21E?style=flat&logo=Huggingface&logoColor=black)](https://huggingface.co/blog/paligemma) [![Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/merveenoyan/smol-vision/blob/main/Fine_tune_PaliGemma.ipynb) [![Paper](https://img.shields.io/badge/Arvix-B31B1B?style=flat&logo=arXiv&logoColor=white)](https://arxiv.org/abs/2412.03555) [![Supervision](https://img.shields.io/badge/Supervision-6706CE?style=flat&logo=Roboflow&logoColor=white)](https://supervision.roboflow.com/)
141
-
142
- PaliGemma 2 is an open vision-language model by Google, inspired by [PaLI-3](https://arxiv.org/abs/2310.09199) and
143
- built with open components such as the [SigLIP](https://arxiv.org/abs/2303.15343)
144
- vision model and the [Gemma 2](https://arxiv.org/abs/2408.00118) language model. PaliGemma 2 is designed as a versatile
145
- model for transfer to a wide range of vision-language tasks such as image and short video caption, visual question
146
- answering, text reading, object detection and object segmentation.
147
-
148
- This space show how to use PaliGemma 2 for object detection with supervision.
149
- You can input an image and a text prompt
150
- """)
151
 
152
  with gr.Tab("Image Detection"):
153
  with gr.Row():
 
15
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  VIDEO_TARGET_DIRECTORY = "tmp"
17
 
18
+
19
+ INTRO_TEXT = """
20
+ ## PaliGemma 2 Detection with Supervision - Demo
21
+
22
+ <div style="display: flex; gap: 10px;">
23
+ <a href="https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md">
24
+ <img src="https://img.shields.io/badge/Github-100000?style=flat&logo=github&logoColor=white" alt="Github">
25
+ </a>
26
+ <a href="https://huggingface.co/blog/paligemma">
27
+ <img src="https://img.shields.io/badge/Huggingface-FFD21E?style=flat&logo=Huggingface&logoColor=black" alt="Huggingface">
28
+ </a>
29
+ <a href="https://github.com/merveenoyan/smol-vision/blob/main/Fine_tune_PaliGemma.ipynb">
30
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Colab">
31
+ </a>
32
+ <a href="https://arxiv.org/abs/2412.03555">
33
+ <img src="https://img.shields.io/badge/Arvix-B31B1B?style=flat&logo=arXiv&logoColor=white" alt="Paper">
34
+ </a>
35
+ <a href="https://supervision.roboflow.com/">
36
+ <img src="https://img.shields.io/badge/Supervision-6706CE?style=flat&logo=Roboflow&logoColor=white" alt="Supervision">
37
+ </a>
38
+ </div>
39
+
40
+
41
+ PaliGemma 2 is an open vision-language model by Google, inspired by [PaLI-3](https://arxiv.org/abs/2310.09199) and
42
+ built with open components such as the [SigLIP](https://arxiv.org/abs/2303.15343)
43
+ vision model and the [Gemma 2](https://arxiv.org/abs/2408.00118) language model. PaliGemma 2 is designed as a versatile
44
+ model for transfer to a wide range of vision-language tasks such as image and short video caption, visual question
45
+ answering, text reading, object detection and object segmentation.
46
+
47
+ This space show how to use PaliGemma 2 for object detection with supervision.
48
+ You can input an image and a text prompt
49
+ """
50
+
51
+
52
  create_directory(directory_path=VIDEO_TARGET_DIRECTORY)
53
  model_id = "google/paligemma2-3b-pt-448"
54
  model = PaliGemmaForConditionalGeneration.from_pretrained(model_id).eval().to(DEVICE)
 
67
  result = processor.decode(generation, skip_special_tokens=True)
68
  return result
69
 
70
+
71
+
72
+
73
  def annotate_image(result, resolution_wh, class_names, cv_image):
74
 
75
  detections = sv.Detections.from_lmm(
 
171
  return video_path, results
172
 
173
  with gr.Blocks() as app:
174
+ gr.Markdown(INTRO_TEXT)
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  with gr.Tab("Image Detection"):
177
  with gr.Row():