Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,9 +2,10 @@ import spaces
|
|
2 |
import gradio as gr
|
3 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
4 |
# import peft
|
5 |
-
|
6 |
import requests
|
7 |
import copy
|
|
|
8 |
|
9 |
from PIL import Image, ImageDraw, ImageFont
|
10 |
import io
|
@@ -125,10 +126,31 @@ def process_image(image, task_prompt, text_input=None, model_id='dwb2023/florenc
|
|
125 |
else:
|
126 |
return "", None # Return empty string and None for unknown task prompts
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
single_task_list =[
|
129 |
'Object Detection'
|
130 |
]
|
131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
with gr.Blocks(theme="sudeepshouche/minimalist") as demo:
|
133 |
gr.Markdown("## 🧬OmniScience - building teams of fine tuned VLM models for diagnosis and detection 🔧")
|
134 |
gr.Markdown("- 🔬Florence-2 Model Proof of Concept, focusing on Object Detection <OD> tasks.")
|
@@ -171,6 +193,21 @@ with gr.Blocks(theme="sudeepshouche/minimalist") as demo:
|
|
171 |
|
172 |
submit_btn.click(process_image, [input_img, task_prompt, model_selector], [output_text, output_img])
|
173 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
gr.Markdown("## 🚀Other Cool Stuff:")
|
175 |
gr.Markdown("- [Florence 2 Whitepaper](https://arxiv.org/pdf/2311.06242) - how I found out about the Roboflow 100 and the BCCD dataset. Turns out this nugget was from the original [Florence whitepaper](https://arxiv.org/pdf/2111.11432) but useful all the same!")
|
176 |
gr.Markdown("- [Roboflow YouTube Video on Florence 2 fine-tuning](https://youtu.be/i3KjYgxNH6w?si=x1ZMg9hsNe25Y19-&t=1296) - bookmarked an 🧠insightful trade-off analysis of various VLMs.")
|
|
|
2 |
import gradio as gr
|
3 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
4 |
# import peft
|
5 |
+
from pycocotools.coco import COCO
|
6 |
import requests
|
7 |
import copy
|
8 |
+
import os
|
9 |
|
10 |
from PIL import Image, ImageDraw, ImageFont
|
11 |
import io
|
|
|
126 |
else:
|
127 |
return "", None # Return empty string and None for unknown task prompts
|
128 |
|
129 |
+
|
130 |
+
def process_segmentation(image_path, annotation_file_path):
|
131 |
+
coco = COCO(annotation_file_path)
|
132 |
+
img_id = list(coco.imgs.keys())[0]
|
133 |
+
img_info = coco.loadImgs(img_id)[0]
|
134 |
+
ann_ids = coco.getAnnIds(imgIds=img_id)
|
135 |
+
anns = coco.loadAnns(ann_ids)
|
136 |
+
|
137 |
+
image = Image.open(image_path)
|
138 |
+
for ann in anns:
|
139 |
+
if 'segmentation' in ann and ann['segmentation']:
|
140 |
+
image = draw_polygons(image, {'polygons': ann['segmentation'], 'labels': [coco.loadCats(ann['category_id'])[0]['name']]})
|
141 |
+
return image
|
142 |
+
|
143 |
single_task_list =[
|
144 |
'Object Detection'
|
145 |
]
|
146 |
|
147 |
+
# Define the path to the example images and annotations file
|
148 |
+
example_image_dir = 'examples/bccd-test/'
|
149 |
+
annotations_file_path = os.path.join(example_image_dir, '_annotations.coco.json')
|
150 |
+
|
151 |
+
# Get the list of example image files
|
152 |
+
example_images = [f for f in os.listdir(example_image_dir) if f.endswith('.jpg')]
|
153 |
+
|
154 |
with gr.Blocks(theme="sudeepshouche/minimalist") as demo:
|
155 |
gr.Markdown("## 🧬OmniScience - building teams of fine tuned VLM models for diagnosis and detection 🔧")
|
156 |
gr.Markdown("- 🔬Florence-2 Model Proof of Concept, focusing on Object Detection <OD> tasks.")
|
|
|
193 |
|
194 |
submit_btn.click(process_image, [input_img, task_prompt, model_selector], [output_text, output_img])
|
195 |
|
196 |
+
with gr.Tab(label="Segmentation"):
|
197 |
+
with gr.Row():
|
198 |
+
with gr.Column():
|
199 |
+
input_img = gr.Dropdown(choices=example_images, label="Input Picture", value=example_images[0])
|
200 |
+
submit_btn = gr.Button(value="Submit")
|
201 |
+
with gr.Column():
|
202 |
+
output_img = gr.Image(label="Output Image")
|
203 |
+
|
204 |
+
def process_segmentation_tab(image_name):
|
205 |
+
image_path = os.path.join(example_image_dir, image_name)
|
206 |
+
result_image = process_segmentation(image_path, annotations_file_path)
|
207 |
+
return result_image
|
208 |
+
|
209 |
+
submit_btn.click(process_segmentation_tab, input_img, output_img)
|
210 |
+
|
211 |
gr.Markdown("## 🚀Other Cool Stuff:")
|
212 |
gr.Markdown("- [Florence 2 Whitepaper](https://arxiv.org/pdf/2311.06242) - how I found out about the Roboflow 100 and the BCCD dataset. Turns out this nugget was from the original [Florence whitepaper](https://arxiv.org/pdf/2111.11432) but useful all the same!")
|
213 |
gr.Markdown("- [Roboflow YouTube Video on Florence 2 fine-tuning](https://youtu.be/i3KjYgxNH6w?si=x1ZMg9hsNe25Y19-&t=1296) - bookmarked an 🧠insightful trade-off analysis of various VLMs.")
|