Zaheer commited on
Commit
517fa88
1 Parent(s): 326cdaf

Ready to Use

Browse files
Files changed (3) hide show
  1. README.md +3 -3
  2. app.py +82 -0
  3. requirements.txt +86 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Object Detection With YOLO
3
- emoji: 🐢
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 3.37.0
8
  app_file: app.py
 
1
  ---
2
  title: Object Detection With YOLO
3
+ emoji: ❤️‍🔥
4
+ colorFrom: purple
5
+ colorTo: white
6
  sdk: gradio
7
  sdk_version: 3.37.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoFeatureExtractor, YolosForObjectDetection
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+ import io
7
+ import numpy as np
8
+
9
+
10
+ COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
11
+ [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
12
+
13
+
14
+ def process_class_list(classes_string: str):
15
+ return [x.strip() for x in classes_string.split(",")] if classes_string else []
16
+
17
+ def model_inference(img, model_name: str, prob_threshold: int, classes_to_show = str):
18
+ feature_extractor = AutoFeatureExtractor.from_pretrained(f"hustvl/{model_name}")
19
+ model = YolosForObjectDetection.from_pretrained(f"hustvl/{model_name}")
20
+
21
+ img = Image.fromarray(img)
22
+
23
+ pixel_values = feature_extractor(img, return_tensors="pt").pixel_values
24
+
25
+ with torch.no_grad():
26
+ outputs = model(pixel_values, output_attentions=True)
27
+
28
+ probas = outputs.logits.softmax(-1)[0, :, :-1]
29
+ keep = probas.max(-1).values > prob_threshold
30
+
31
+ target_sizes = torch.tensor(img.size[::-1]).unsqueeze(0)
32
+ postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
33
+ bboxes_scaled = postprocessed_outputs[0]['boxes']
34
+
35
+ classes_list = process_class_list(classes_to_show)
36
+ return plot_results(
37
+ img, probas[keep], bboxes_scaled[keep], model, classes_list
38
+ )
39
+
40
+ def plot_results(pil_img, prob, boxes, model, classes_list):
41
+ plt.figure(figsize=(16,10))
42
+ plt.imshow(pil_img)
43
+ ax = plt.gca()
44
+ colors = COLORS * 100
45
+ for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
46
+ cl = p.argmax()
47
+ object_class = model.config.id2label[cl.item()]
48
+
49
+ if len(classes_list) > 0 :
50
+ if object_class not in classes_list:
51
+ continue
52
+
53
+ ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
54
+ fill=False, color=c, linewidth=3))
55
+ text = f'{object_class}: {p[cl]:0.2f}'
56
+ ax.text(xmin, ymin, text, fontsize=15,
57
+ bbox=dict(facecolor='yellow', alpha=0.5))
58
+ plt.axis('off')
59
+ return fig2img(plt.gcf())
60
+
61
+ def fig2img(fig):
62
+ buf = io.BytesIO()
63
+ fig.savefig(buf)
64
+ buf.seek(0)
65
+ return Image.open(buf)
66
+
67
+ description = """YOLOS - Object Detection"""
68
+
69
+ image_in = gr.components.Image()
70
+ image_out = gr.components.Image()
71
+ model_choice = gr.components.Dropdown(["yolos-tiny", "yolos-small", "yolos-base", "yolos-small-300", "yolos-small-dwr"], value="yolos-small", label="YOLOS Model")
72
+ prob_threshold_slider = gr.components.Slider(minimum=0, maximum=1.0, step=0.01, value=0.9, label="Probability Threshold")
73
+ classes_to_show = gr.components.Textbox(placeholder="e.g. person, car , laptop", label="Classes to use (Optional)")
74
+
75
+ Iface = gr.Interface(
76
+ fn=model_inference,
77
+ inputs=[image_in,model_choice, prob_threshold_slider, classes_to_show],
78
+ outputs=image_out,
79
+ title="Object Detection With YOLO",
80
+ description=description,
81
+ theme='HaleyCH/HaleyCH_Theme',
82
+ ).launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.1.0
2
+ aiohttp==3.8.4
3
+ aiosignal==1.3.1
4
+ altair==5.0.1
5
+ annotated-types==0.5.0
6
+ anyio==3.7.1
7
+ async-timeout==4.0.2
8
+ attrs==23.1.0
9
+ certifi==2023.5.7
10
+ charset-normalizer==3.2.0
11
+ click==8.1.5
12
+ colorama==0.4.6
13
+ contourpy==1.1.0
14
+ cycler==0.11.0
15
+ exceptiongroup==1.1.2
16
+ fastapi==0.100.0
17
+ ffmpy==0.3.1
18
+ filelock==3.12.2
19
+ fonttools==4.41.0
20
+ frozenlist==1.4.0
21
+ fsspec==2023.6.0
22
+ gitdb==4.0.10
23
+ GitPython==3.1.32
24
+ gradio==3.37.0
25
+ gradio_client==0.2.10
26
+ h11==0.14.0
27
+ httpcore==0.17.3
28
+ httpx==0.24.1
29
+ huggingface-hub==0.16.4
30
+ idna==3.4
31
+ Jinja2==3.1.2
32
+ jsonschema==4.18.4
33
+ jsonschema-specifications==2023.6.1
34
+ kiwisolver==1.4.4
35
+ linkify-it-py==2.0.2
36
+ markdown-it-py==2.2.0
37
+ MarkupSafe==2.1.3
38
+ matplotlib==3.7.2
39
+ mdit-py-plugins==0.3.3
40
+ mdurl==0.1.2
41
+ mpmath==1.3.0
42
+ multidict==6.0.4
43
+ networkx==3.1
44
+ numpy==1.25.1
45
+ opencv-python==4.8.0.74
46
+ orjson==3.9.2
47
+ packaging==23.1
48
+ pandas==2.0.3
49
+ Pillow==10.0.0
50
+ psutil==5.9.5
51
+ pydantic==2.0.3
52
+ pydantic_core==2.3.0
53
+ pydub==0.25.1
54
+ pyparsing==3.0.9
55
+ python-dateutil==2.8.2
56
+ python-multipart==0.0.6
57
+ pytz==2023.3
58
+ PyYAML==6.0.1
59
+ referencing==0.29.3
60
+ regex==2023.6.3
61
+ requests==2.31.0
62
+ rpds-py==0.8.11
63
+ safetensors==0.3.1
64
+ scipy==1.11.1
65
+ seaborn==0.12.2
66
+ semantic-version==2.10.0
67
+ six==1.16.0
68
+ smmap==5.0.0
69
+ sniffio==1.3.0
70
+ starlette==0.27.0
71
+ sympy==1.12
72
+ thop==0.1.1.post2209072238
73
+ tokenizers==0.13.3
74
+ toolz==0.12.0
75
+ torch==2.0.1
76
+ torchvision==0.15.2
77
+ tqdm==4.65.0
78
+ transformers==4.30.2
79
+ typing_extensions==4.7.1
80
+ tzdata==2023.3
81
+ uc-micro-py==1.0.2
82
+ ultralytics==8.0.136
83
+ urllib3==2.0.3
84
+ uvicorn==0.23.1
85
+ websockets==11.0.3
86
+ yarl==1.9.2