Spaces:
Sleeping
Sleeping
Raumkommander
commited on
Commit
·
a991bc1
1
Parent(s):
c99a62f
inital deployment1
Browse files- app.py +8 -66
- app_new.py +50 -0
- appcopy.py +86 -0
app.py
CHANGED
@@ -1,86 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
import cv2
|
3 |
-
import torch
|
4 |
import numpy as np
|
5 |
-
from diffusers import StableDiffusionPipeline,AutoPipelineForImage2Image,AutoencoderTiny
|
6 |
-
from transformers import AutoProcessor, AutoModel, AutoTokenizer
|
7 |
from PIL import Image
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
# def predict(prompt, frame):
|
12 |
-
# generator = torch.manual_seed(params.seed)
|
13 |
-
# steps = params.steps
|
14 |
-
# strength = params.strength
|
15 |
-
# if int(steps * strength) < 1:
|
16 |
-
# steps = math.ceil(1 / max(0.10, strength))
|
17 |
-
#
|
18 |
-
# prompt = params.prompt
|
19 |
-
# prompt_embeds = None
|
20 |
-
#
|
21 |
-
# results = self.pipe(
|
22 |
-
# image=frame,
|
23 |
-
# prompt_embeds=prompt_embeds,
|
24 |
-
# prompt=prompt,
|
25 |
-
# negative_prompt=params.negative_prompt,
|
26 |
-
# generator=generator,
|
27 |
-
# strength=strength,
|
28 |
-
# num_inference_steps=steps,
|
29 |
-
# guidance_scale=1.1,
|
30 |
-
# width=params.width,
|
31 |
-
# height=params.height,
|
32 |
-
# output_type="pil",
|
33 |
-
# )
|
34 |
-
#
|
35 |
-
# nsfw_content_detected = (
|
36 |
-
# results.nsfw_content_detected[0]
|
37 |
-
# if "nsfw_content_detected" in results
|
38 |
-
# else False
|
39 |
-
# )
|
40 |
-
# if nsfw_content_detected:
|
41 |
-
# return None
|
42 |
-
# result_image = results.images[0]
|
43 |
-
#
|
44 |
-
# return result_image
|
45 |
-
#
|
46 |
-
# def process_frame(frame, prompt="A futuristic landscape"):
|
47 |
-
# """Process a single frame using the real-time latent consistency model."""
|
48 |
-
#
|
49 |
-
# # Convert frame to PIL image
|
50 |
-
# image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).resize((512, 512))
|
51 |
-
#
|
52 |
-
# # Apply Real-Time Latent Consistency Model
|
53 |
-
# result = realtime_pipe(prompt=prompt, image=image, strength=0.5, guidance_scale=7.5).images[0]
|
54 |
-
# return np.array(result)
|
55 |
-
|
56 |
-
def video_stream(prompt):
|
57 |
-
"""Captures video feed from webcam and sends to the AI model."""
|
58 |
cap = cv2.VideoCapture(0)
|
59 |
while cap.isOpened():
|
60 |
ret, frame = cap.read()
|
61 |
if not ret:
|
62 |
break
|
63 |
-
|
64 |
-
frame = process_frame(frame, prompt)
|
65 |
-
yield frame # Return processed frame
|
66 |
cap.release()
|
67 |
|
68 |
-
|
69 |
# Create Gradio App
|
70 |
with gr.Blocks() as demo:
|
71 |
-
gr.Markdown("##
|
72 |
|
73 |
with gr.Row():
|
74 |
-
webcam_feed = gr.
|
75 |
-
|
76 |
|
77 |
-
|
78 |
-
canvas_output = gr.Image(interactive=True, label="Canvas - Processed Image Output")
|
79 |
|
80 |
-
|
81 |
-
start_button = gr.Button("Start Real-Time AI Enhancement")
|
82 |
-
|
83 |
-
#start_button.click(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas_output])
|
84 |
|
85 |
demo.launch(share=True)
|
86 |
-
|
|
|
1 |
import gradio as gr
|
2 |
import cv2
|
|
|
3 |
import numpy as np
|
|
|
|
|
4 |
from PIL import Image
|
5 |
|
6 |
+
def video_stream():
|
7 |
+
"""Captures video feed from webcam and outputs the same stream to a different canvas."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
cap = cv2.VideoCapture(0)
|
9 |
while cap.isOpened():
|
10 |
ret, frame = cap.read()
|
11 |
if not ret:
|
12 |
break
|
13 |
+
yield frame
|
|
|
|
|
14 |
cap.release()
|
15 |
|
|
|
16 |
# Create Gradio App
|
17 |
with gr.Blocks() as demo:
|
18 |
+
gr.Markdown("## 🎥 Webcam Stream with Output to a Separate Canvas")
|
19 |
|
20 |
with gr.Row():
|
21 |
+
webcam_feed = gr.Video(label="Live Webcam", streaming=True)
|
22 |
+
canvas_output = gr.Image(label="Canvas - Output Stream")
|
23 |
|
24 |
+
start_button = gr.Button("Start Streaming")
|
|
|
25 |
|
26 |
+
start_button.click(fn=video_stream, inputs=[], outputs=[canvas_output])
|
|
|
|
|
|
|
27 |
|
28 |
demo.launch(share=True)
|
|
app_new.py
CHANGED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from diffusers import StableDiffusionPipeline
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
# Load the Real-Time Latent Consistency Model (LCM)
|
9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
lcm_pipe = StableDiffusionPipeline.from_pretrained("latent-consistency/lcm-lora-sdv1-5").to(device)
|
11 |
+
|
12 |
+
def process_frame(image, prompt="A futuristic landscape", negative_prompt="not blurry"):
|
13 |
+
"""Modify the input image using the real-time latent consistency model (LCM)."""
|
14 |
+
image = image.resize((512, 512))
|
15 |
+
result = lcm_pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=4, guidance_scale=7.5).images[0]
|
16 |
+
return np.array(result)
|
17 |
+
|
18 |
+
def video_stream(prompt, negative_prompt):
|
19 |
+
"""Captures video feed from webcam and sends it to LCM in real time."""
|
20 |
+
cap = cv2.VideoCapture(0)
|
21 |
+
while cap.isOpened():
|
22 |
+
ret, frame = cap.read()
|
23 |
+
if not ret:
|
24 |
+
break
|
25 |
+
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
26 |
+
frame = process_frame(image, prompt, negative_prompt)
|
27 |
+
yield frame
|
28 |
+
cap.release()
|
29 |
+
|
30 |
+
# Create Gradio App
|
31 |
+
with gr.Blocks() as demo:
|
32 |
+
gr.Markdown("## 🎨 Real-Time AI-Enhanced Webcam using Latent Consistency Model (LCM)")
|
33 |
+
|
34 |
+
with gr.Row():
|
35 |
+
webcam_feed = gr.Camera(streaming=True, label="Live Webcam")
|
36 |
+
processed_image = gr.Image(label="AI-Enhanced Webcam Feed")
|
37 |
+
|
38 |
+
with gr.Row():
|
39 |
+
image_input = gr.Image(type="pil", label="Upload Image for Processing")
|
40 |
+
canvas_output = gr.Image(interactive=True, label="Canvas - Processed Image Output")
|
41 |
+
|
42 |
+
prompt_input = gr.Textbox(label="Real-Time LCM Prompt", value="A futuristic landscape")
|
43 |
+
negative_prompt_input = gr.Textbox(label="Negative Prompt", value="")
|
44 |
+
start_button = gr.Button("Start Real-Time AI Enhancement")
|
45 |
+
process_button = gr.Button("Process Uploaded Image")
|
46 |
+
|
47 |
+
start_button.click(fn=video_stream, inputs=[prompt_input, negative_prompt_input], outputs=[processed_image, canvas_output])
|
48 |
+
process_button.click(fn=process_frame, inputs=[image_input, prompt_input, negative_prompt_input], outputs=[canvas_output])
|
49 |
+
|
50 |
+
demo.launch(share=True)
|
appcopy.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from diffusers import StableDiffusionPipeline,AutoPipelineForImage2Image,AutoencoderTiny
|
6 |
+
from transformers import AutoProcessor, AutoModel, AutoTokenizer
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
#
|
10 |
+
#
|
11 |
+
# def predict(prompt, frame):
|
12 |
+
# generator = torch.manual_seed(params.seed)
|
13 |
+
# steps = params.steps
|
14 |
+
# strength = params.strength
|
15 |
+
# if int(steps * strength) < 1:
|
16 |
+
# steps = math.ceil(1 / max(0.10, strength))
|
17 |
+
#
|
18 |
+
# prompt = params.prompt
|
19 |
+
# prompt_embeds = None
|
20 |
+
#
|
21 |
+
# results = self.pipe(
|
22 |
+
# image=frame,
|
23 |
+
# prompt_embeds=prompt_embeds,
|
24 |
+
# prompt=prompt,
|
25 |
+
# negative_prompt=params.negative_prompt,
|
26 |
+
# generator=generator,
|
27 |
+
# strength=strength,
|
28 |
+
# num_inference_steps=steps,
|
29 |
+
# guidance_scale=1.1,
|
30 |
+
# width=params.width,
|
31 |
+
# height=params.height,
|
32 |
+
# output_type="pil",
|
33 |
+
# )
|
34 |
+
#
|
35 |
+
# nsfw_content_detected = (
|
36 |
+
# results.nsfw_content_detected[0]
|
37 |
+
# if "nsfw_content_detected" in results
|
38 |
+
# else False
|
39 |
+
# )
|
40 |
+
# if nsfw_content_detected:
|
41 |
+
# return None
|
42 |
+
# result_image = results.images[0]
|
43 |
+
#
|
44 |
+
# return result_image
|
45 |
+
#
|
46 |
+
# def process_frame(frame, prompt="A futuristic landscape"):
|
47 |
+
# """Process a single frame using the real-time latent consistency model."""
|
48 |
+
#
|
49 |
+
# # Convert frame to PIL image
|
50 |
+
# image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).resize((512, 512))
|
51 |
+
#
|
52 |
+
# # Apply Real-Time Latent Consistency Model
|
53 |
+
# result = realtime_pipe(prompt=prompt, image=image, strength=0.5, guidance_scale=7.5).images[0]
|
54 |
+
# return np.array(result)
|
55 |
+
|
56 |
+
def video_stream(prompt):
|
57 |
+
"""Captures video feed from webcam and sends to the AI model."""
|
58 |
+
cap = cv2.VideoCapture(0)
|
59 |
+
while cap.isOpened():
|
60 |
+
ret, frame = cap.read()
|
61 |
+
if not ret:
|
62 |
+
break
|
63 |
+
|
64 |
+
frame = process_frame(frame, prompt)
|
65 |
+
yield frame # Return processed frame
|
66 |
+
cap.release()
|
67 |
+
|
68 |
+
|
69 |
+
# Create Gradio App
|
70 |
+
with gr.Blocks() as demo:
|
71 |
+
gr.Markdown("## 🎨 Real-Time AI-Enhanced Webcam using Latent Consistency Model (LCM)")
|
72 |
+
|
73 |
+
with gr.Row():
|
74 |
+
webcam_feed = gr.Camera(streaming=True, label="Live Webcam")
|
75 |
+
processed_image = gr.Image(label="AI-Enhanced Webcam Feed")
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
canvas_output = gr.Image(interactive=True, label="Canvas - Processed Image Output")
|
79 |
+
|
80 |
+
prompt_input = gr.Textbox(label="Real-Time LCM Prompt", value="A futuristic landscape")
|
81 |
+
start_button = gr.Button("Start Real-Time AI Enhancement")
|
82 |
+
|
83 |
+
#start_button.click(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas_output])
|
84 |
+
|
85 |
+
demo.launch(share=True)
|
86 |
+
|