Raumkommander commited on
Commit
9cbc798
·
1 Parent(s): f1e3f4b

inital deployment1

Browse files
Files changed (1) hide show
  1. app.py +7 -21
app.py CHANGED
@@ -10,22 +10,8 @@ from PIL import Image
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  realtime_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("radames/Real-Time-Latent-Consistency-Model").to(device)
12
 
13
- def apply_color_filter(frame, filter_type="None"):
14
- """Apply a color filter to the frame."""
15
- if filter_type == "Red":
16
- frame[:, :, 1] = 0 # Remove green channel
17
- frame[:, :, 2] = 0 # Remove blue channel
18
- elif filter_type == "Green":
19
- frame[:, :, 0] = 0 # Remove red channel
20
- frame[:, :, 2] = 0 # Remove blue channel
21
- elif filter_type == "Blue":
22
- frame[:, :, 0] = 0 # Remove red channel
23
- frame[:, :, 1] = 0 # Remove green channel
24
- return frame
25
-
26
- def process_frame(frame, filter_type="None", prompt="A futuristic landscape"):
27
- """Process a single frame by applying a color filter and real-time latent consistency model."""
28
- frame = apply_color_filter(frame, filter_type)
29
 
30
  # Convert frame to PIL image
31
  image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).resize((512, 512))
@@ -34,15 +20,15 @@ def process_frame(frame, filter_type="None", prompt="A futuristic landscape"):
34
  result = realtime_pipe(prompt=prompt, image=image, strength=0.5, guidance_scale=7.5).images[0]
35
  return np.array(result)
36
 
37
- def video_stream(filter_type, prompt):
38
- """Captures video feed from webcam, applies color filter, and sends to the AI model."""
39
  cap = cv2.VideoCapture(0)
40
  while cap.isOpened():
41
  ret, frame = cap.read()
42
  if not ret:
43
  break
44
 
45
- frame = process_frame(frame, filter_type, prompt)
46
  yield frame # Return processed frame
47
  cap.release()
48
 
@@ -53,10 +39,10 @@ with gr.Blocks() as demo:
53
  with gr.Row():
54
  webcam_feed = gr.Camera(streaming=True, label="Live Webcam")
55
  processed_image = gr.Image(label="AI-Enhanced Webcam Feed")
 
56
 
57
- filter_selector = gr.Radio(["None", "Red", "Green", "Blue"], label="Color Filter")
58
  prompt_input = gr.Textbox(label="Real-Time Latent Consistency Model Prompt", value="A futuristic landscape")
59
 
60
- webcam_feed.change(fn=video_stream, inputs=[filter_selector, prompt_input], outputs=processed_image)
61
 
62
  demo.launch(share=True)
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  realtime_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("radames/Real-Time-Latent-Consistency-Model").to(device)
12
 
13
+ def process_frame(frame, prompt="A futuristic landscape"):
14
+ """Process a single frame using the real-time latent consistency model."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # Convert frame to PIL image
17
  image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).resize((512, 512))
 
20
  result = realtime_pipe(prompt=prompt, image=image, strength=0.5, guidance_scale=7.5).images[0]
21
  return np.array(result)
22
 
23
+ def video_stream(prompt):
24
+ """Captures video feed from webcam and sends to the AI model."""
25
  cap = cv2.VideoCapture(0)
26
  while cap.isOpened():
27
  ret, frame = cap.read()
28
  if not ret:
29
  break
30
 
31
+ frame = process_frame(frame, prompt)
32
  yield frame # Return processed frame
33
  cap.release()
34
 
 
39
  with gr.Row():
40
  webcam_feed = gr.Camera(streaming=True, label="Live Webcam")
41
  processed_image = gr.Image(label="AI-Enhanced Webcam Feed")
42
+ canvas = gr.Image(interactive=True, label="Canvas - Edit Processed Image")
43
 
 
44
  prompt_input = gr.Textbox(label="Real-Time Latent Consistency Model Prompt", value="A futuristic landscape")
45
 
46
+ webcam_feed.change(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas])
47
 
48
  demo.launch(share=True)