kdevoe commited on
Commit
a2876ac
·
verified ·
1 Parent(s): b67e16c

Changing to different streaming demo

Browse files
Files changed (1) hide show
  1. app.py +46 -54
app.py CHANGED
@@ -1,59 +1,51 @@
1
- import cv2
2
  import gradio as gr
3
  import numpy as np
4
- from transformers import pipeline
5
-
6
- # Load YOLO model from Hugging Face's transformers library
7
- model = pipeline("object-detection", model="hustvl/yolos-tiny")
8
-
9
- # Function to capture and process video frames in real time
10
- def capture_and_detect():
11
- cap = cv2.VideoCapture(0) # OpenCV video capture from webcam
12
-
13
- while True:
14
- ret, frame = cap.read()
15
- if not ret:
16
- break
17
-
18
- # Convert frame to RGB as required by YOLO model
19
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
20
-
21
- # Perform object detection on the frame
22
- results = model(rgb_frame)
23
-
24
- # Draw bounding boxes and labels on the frame
25
- for result in results:
26
- label = result['label']
27
- score = result['score']
28
- box = result['box']
29
- x1, y1, x2, y2 = int(box['xmin']), int(box['ymin']), int(box['xmax']), int(box['ymax'])
30
-
31
- # Draw bounding box and label
32
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
33
- text = f"{label}: {score:.2f}"
34
- cv2.putText(frame, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
35
-
36
- # Convert BGR back to RGB for Gradio display
37
- yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
38
-
39
- cap.release()
40
-
41
- # Gradio Interface using real-time video capture and object detection
42
- def video_stream():
43
- return capture_and_detect()
44
-
45
- # Create Gradio interface
46
- webcam_interface = gr.Interface(
47
- fn=video_stream,
48
- inputs=None,
49
- outputs=gr.Image(),
50
- live=True,
51
- description="Real-Time Object Detection with YOLO and Gradio"
52
- )
53
-
54
- # Launch Gradio app
55
- if __name__ == "__main__":
56
- webcam_interface.launch()
57
 
58
 
59
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
+ import cv2
4
+
5
+ def transform_cv2(frame, transform):
6
+ if transform == "cartoon":
7
+ # prepare color
8
+ img_color = cv2.pyrDown(cv2.pyrDown(frame))
9
+ for _ in range(6):
10
+ img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
11
+ img_color = cv2.pyrUp(cv2.pyrUp(img_color))
12
+
13
+ # prepare edges
14
+ img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
15
+ img_edges = cv2.adaptiveThreshold(
16
+ cv2.medianBlur(img_edges, 7),
17
+ 255,
18
+ cv2.ADAPTIVE_THRESH_MEAN_C,
19
+ cv2.THRESH_BINARY,
20
+ 9,
21
+ 2,
22
+ )
23
+ img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
24
+ # combine color and edges
25
+ img = cv2.bitwise_and(img_color, img_edges)
26
+ return img
27
+ elif transform == "edges":
28
+ # perform edge detection
29
+ img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
30
+ return img
31
+ else:
32
+ return np.flipud(frame)
33
+
34
+
35
+ css=""".my-group {max-width: 500px !important; max-height: 500px !important;}
36
+ .my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
37
+
38
+ with gr.Blocks(css=css) as demo:
39
+ with gr.Column(elem_classes=["my-column"]):
40
+ with gr.Group(elem_classes=["my-group"]):
41
+ transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
42
+ value="flip", label="Transformation")
43
+ input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
44
+ input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)
45
+
46
+
47
+ demo.launch()
48
+
 
 
 
 
 
 
 
49
 
50
 
51