Spaces:
Sleeping
Sleeping
Raumkommander
commited on
Commit
·
9b1ccc9
1
Parent(s):
2b288f7
inital deployment1
Browse files- .DS_Store +0 -0
- .gitignore +1 -0
- app.py +57 -17
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Real-Time-Latent-Consistency-Model/
|
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import cv2
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
-
from diffusers import StableDiffusionPipeline
|
6 |
from transformers import AutoProcessor, AutoModel, AutoTokenizer
|
7 |
from PIL import Image
|
8 |
|
@@ -10,24 +10,64 @@ from PIL import Image
|
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
##realtime_pipe = StableDiffusionPipeline.from_pretrained("radames/Real-Time-Latent-Consistency-Model").to(device)
|
12 |
|
13 |
-
# Load the model (optimized for inference)
|
14 |
-
model_id = "radames/Real-Time-Latent-Consistency-Model"
|
15 |
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
realtime_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
19 |
-
realtime_pipe.to("cuda") # Use GPU for faster inference
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def video_stream(prompt):
|
33 |
"""Captures video feed from webcam and sends to the AI model."""
|
@@ -56,7 +96,7 @@ with gr.Blocks() as demo:
|
|
56 |
prompt_input = gr.Textbox(label="Real-Time LCM Prompt", value="A futuristic landscape")
|
57 |
start_button = gr.Button("Start Real-Time AI Enhancement")
|
58 |
|
59 |
-
start_button.click(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas_output])
|
60 |
|
61 |
demo.launch(share=True)
|
62 |
|
|
|
2 |
import cv2
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
+
from diffusers import StableDiffusionPipeline,AutoPipelineForImage2Image,AutoencoderTiny
|
6 |
from transformers import AutoProcessor, AutoModel, AutoTokenizer
|
7 |
from PIL import Image
|
8 |
|
|
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
##realtime_pipe = StableDiffusionPipeline.from_pretrained("radames/Real-Time-Latent-Consistency-Model").to(device)
|
12 |
|
13 |
+
# Load the model (optimized for inference)#
|
14 |
+
#model_id = "radames/Real-Time-Latent-Consistency-Model"
|
15 |
|
16 |
+
# model_id = "stabilityai/sd-turbo"
|
17 |
+
# AutoPipelineForImage2Image.from_pretrained(base_model)
|
18 |
+
#
|
19 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
+
#
|
21 |
+
# realtime_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
22 |
+
# realtime_pipe.to("cuda") # Use GPU for faster inference
|
23 |
|
|
|
|
|
24 |
|
25 |
+
#
|
26 |
+
#
|
27 |
+
# def predict(prompt, frame):
|
28 |
+
# generator = torch.manual_seed(params.seed)
|
29 |
+
# steps = params.steps
|
30 |
+
# strength = params.strength
|
31 |
+
# if int(steps * strength) < 1:
|
32 |
+
# steps = math.ceil(1 / max(0.10, strength))
|
33 |
+
#
|
34 |
+
# prompt = params.prompt
|
35 |
+
# prompt_embeds = None
|
36 |
+
#
|
37 |
+
# results = self.pipe(
|
38 |
+
# image=frame,
|
39 |
+
# prompt_embeds=prompt_embeds,
|
40 |
+
# prompt=prompt,
|
41 |
+
# negative_prompt=params.negative_prompt,
|
42 |
+
# generator=generator,
|
43 |
+
# strength=strength,
|
44 |
+
# num_inference_steps=steps,
|
45 |
+
# guidance_scale=1.1,
|
46 |
+
# width=params.width,
|
47 |
+
# height=params.height,
|
48 |
+
# output_type="pil",
|
49 |
+
# )
|
50 |
+
#
|
51 |
+
# nsfw_content_detected = (
|
52 |
+
# results.nsfw_content_detected[0]
|
53 |
+
# if "nsfw_content_detected" in results
|
54 |
+
# else False
|
55 |
+
# )
|
56 |
+
# if nsfw_content_detected:
|
57 |
+
# return None
|
58 |
+
# result_image = results.images[0]
|
59 |
+
#
|
60 |
+
# return result_image
|
61 |
+
#
|
62 |
+
# def process_frame(frame, prompt="A futuristic landscape"):
|
63 |
+
# """Process a single frame using the real-time latent consistency model."""
|
64 |
+
#
|
65 |
+
# # Convert frame to PIL image
|
66 |
+
# image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).resize((512, 512))
|
67 |
+
#
|
68 |
+
# # Apply Real-Time Latent Consistency Model
|
69 |
+
# result = realtime_pipe(prompt=prompt, image=image, strength=0.5, guidance_scale=7.5).images[0]
|
70 |
+
# return np.array(result)
|
71 |
|
72 |
def video_stream(prompt):
|
73 |
"""Captures video feed from webcam and sends to the AI model."""
|
|
|
96 |
prompt_input = gr.Textbox(label="Real-Time LCM Prompt", value="A futuristic landscape")
|
97 |
start_button = gr.Button("Start Real-Time AI Enhancement")
|
98 |
|
99 |
+
#start_button.click(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas_output])
|
100 |
|
101 |
demo.launch(share=True)
|
102 |
|