radames commited on
Commit
0822be9
1 Parent(s): f45636e
app-controlnetlora.py DELETED
@@ -1,315 +0,0 @@
1
- import asyncio
2
- import json
3
- import logging
4
- import traceback
5
- from pydantic import BaseModel
6
-
7
- from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
8
- from fastapi.middleware.cors import CORSMiddleware
9
- from fastapi.responses import (
10
- StreamingResponse,
11
- JSONResponse,
12
- HTMLResponse,
13
- FileResponse,
14
- )
15
-
16
- from diffusers import (
17
- StableDiffusionControlNetImg2ImgPipeline,
18
- ControlNetModel,
19
- LCMScheduler,
20
- )
21
- from compel import Compel
22
- import torch
23
-
24
- from canny_gpu import SobelOperator
25
-
26
- try:
27
- import intel_extension_for_pytorch as ipex
28
- except:
29
- pass
30
- from PIL import Image
31
- import numpy as np
32
- import gradio as gr
33
- import io
34
- import uuid
35
- import os
36
- import time
37
- import psutil
38
-
39
-
40
- MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
41
- TIMEOUT = float(os.environ.get("TIMEOUT", 0))
42
- SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
43
- TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
44
-
45
- WIDTH = 512
46
- HEIGHT = 512
47
-
48
- # check if MPS is available OSX only M1/M2/M3 chips
49
- mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
50
- xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
51
- device = torch.device(
52
- "cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
53
- )
54
-
55
- # change to torch.float16 to save GPU memory
56
- torch_dtype = torch.float16
57
-
58
- print(f"TIMEOUT: {TIMEOUT}")
59
- print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
60
- print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
61
- print(f"device: {device}")
62
-
63
- if mps_available:
64
- device = torch.device("mps")
65
- device = "cpu"
66
- torch_dtype = torch.float32
67
-
68
- controlnet_canny = ControlNetModel.from_pretrained(
69
- "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch_dtype
70
- ).to(device)
71
-
72
- canny_torch = SobelOperator(device=device)
73
-
74
- models_id = [
75
- "plasmo/woolitize",
76
- "nitrosocke/Ghibli-Diffusion",
77
- "nitrosocke/mo-di-diffusion",
78
- ]
79
- lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
80
-
81
- if SAFETY_CHECKER == "True":
82
- pipes = {}
83
- for model_id in models_id:
84
- pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
85
- model_id,
86
- controlnet=controlnet_canny,
87
- )
88
- pipes[model_id] = pipe
89
- else:
90
- pipes = {}
91
- for model_id in models_id:
92
- pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
93
- model_id,
94
- safety_checker=None,
95
- controlnet=controlnet_canny,
96
- )
97
- pipes[model_id] = pipe
98
- for pipe in pipes.values():
99
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
100
- pipe.set_progress_bar_config(disable=True)
101
- pipe.to(device=device, dtype=torch_dtype).to(device)
102
-
103
- if psutil.virtual_memory().total < 64 * 1024**3:
104
- pipe.enable_attention_slicing()
105
-
106
- # Load LCM LoRA
107
- pipe.load_lora_weights(lcm_lora_id, adapter_name="lcm")
108
-
109
- compel_proc = Compel(
110
- tokenizer=pipe.tokenizer,
111
- text_encoder=pipe.text_encoder,
112
- truncate_long_prompts=False,
113
- )
114
- if TORCH_COMPILE:
115
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
116
- pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
117
-
118
- pipe(
119
- prompt="warmup",
120
- image=[Image.new("RGB", (768, 768))],
121
- control_image=[Image.new("RGB", (768, 768))],
122
- )
123
-
124
-
125
- user_queue_map = {}
126
-
127
-
128
- class InputParams(BaseModel):
129
- seed: int = 2159232
130
- prompt: str
131
- guidance_scale: float = 8.0
132
- strength: float = 0.5
133
- steps: int = 4
134
- lcm_steps: int = 50
135
- width: int = WIDTH
136
- height: int = HEIGHT
137
- controlnet_scale: float = 0.8
138
- controlnet_start: float = 0.0
139
- controlnet_end: float = 1.0
140
- canny_low_threshold: float = 0.31
141
- canny_high_threshold: float = 0.78
142
- debug_canny: bool = False
143
- model_id: str = "nitrosocke/Ghibli-Diffusion"
144
-
145
-
146
- def predict(input_image: Image.Image, params: InputParams):
147
- generator = torch.manual_seed(params.seed)
148
-
149
- control_image = canny_torch(
150
- input_image, params.canny_low_threshold, params.canny_high_threshold
151
- )
152
- prompt_embeds = compel_proc(params.prompt)
153
- pipe = pipes[params.model_id]
154
- results = pipe(
155
- control_image=control_image,
156
- prompt_embeds=prompt_embeds,
157
- generator=generator,
158
- image=input_image,
159
- strength=params.strength,
160
- num_inference_steps=params.steps,
161
- guidance_scale=params.guidance_scale,
162
- width=params.width,
163
- height=params.height,
164
- output_type="pil",
165
- controlnet_conditioning_scale=params.controlnet_scale,
166
- control_guidance_start=params.controlnet_start,
167
- control_guidance_end=params.controlnet_end,
168
- )
169
- nsfw_content_detected = (
170
- results.nsfw_content_detected[0]
171
- if "nsfw_content_detected" in results
172
- else False
173
- )
174
- if nsfw_content_detected:
175
- return None
176
- result_image = results.images[0]
177
- if params.debug_canny:
178
- # paste control_image on top of result_image
179
- w0, h0 = (200, 200)
180
- control_image = control_image.resize((w0, h0))
181
- w1, h1 = result_image.size
182
- result_image.paste(control_image, (w1 - w0, h1 - h0))
183
-
184
- return result_image
185
-
186
-
187
- app = FastAPI()
188
- app.add_middleware(
189
- CORSMiddleware,
190
- allow_origins=["*"],
191
- allow_credentials=True,
192
- allow_methods=["*"],
193
- allow_headers=["*"],
194
- )
195
-
196
-
197
- @app.websocket("/ws")
198
- async def websocket_endpoint(websocket: WebSocket):
199
- await websocket.accept()
200
- if MAX_QUEUE_SIZE > 0 and len(user_queue_map) >= MAX_QUEUE_SIZE:
201
- print("Server is full")
202
- await websocket.send_json({"status": "error", "message": "Server is full"})
203
- await websocket.close()
204
- return
205
-
206
- try:
207
- uid = str(uuid.uuid4())
208
- print(f"New user connected: {uid}")
209
- await websocket.send_json(
210
- {"status": "success", "message": "Connected", "userId": uid}
211
- )
212
- user_queue_map[uid] = {"queue": asyncio.Queue()}
213
- await websocket.send_json(
214
- {"status": "start", "message": "Start Streaming", "userId": uid}
215
- )
216
- await handle_websocket_data(websocket, uid)
217
- except WebSocketDisconnect as e:
218
- logging.error(f"WebSocket Error: {e}, {uid}")
219
- traceback.print_exc()
220
- finally:
221
- print(f"User disconnected: {uid}")
222
- queue_value = user_queue_map.pop(uid, None)
223
- queue = queue_value.get("queue", None)
224
- if queue:
225
- while not queue.empty():
226
- try:
227
- queue.get_nowait()
228
- except asyncio.QueueEmpty:
229
- continue
230
-
231
-
232
- @app.get("/queue_size")
233
- async def get_queue_size():
234
- queue_size = len(user_queue_map)
235
- return JSONResponse({"queue_size": queue_size})
236
-
237
-
238
- @app.get("/stream/{user_id}")
239
- async def stream(user_id: uuid.UUID):
240
- uid = str(user_id)
241
- try:
242
- user_queue = user_queue_map[uid]
243
- queue = user_queue["queue"]
244
-
245
- async def generate():
246
- last_prompt: str = None
247
- while True:
248
- data = await queue.get()
249
- input_image = data["image"]
250
- params = data["params"]
251
- if input_image is None:
252
- continue
253
-
254
- image = predict(
255
- input_image,
256
- params,
257
- )
258
- if image is None:
259
- continue
260
- frame_data = io.BytesIO()
261
- image.save(frame_data, format="JPEG")
262
- frame_data = frame_data.getvalue()
263
- if frame_data is not None and len(frame_data) > 0:
264
- yield b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + frame_data + b"\r\n"
265
-
266
- await asyncio.sleep(1.0 / 120.0)
267
-
268
- return StreamingResponse(
269
- generate(), media_type="multipart/x-mixed-replace;boundary=frame"
270
- )
271
- except Exception as e:
272
- logging.error(f"Streaming Error: {e}, {user_queue_map}")
273
- traceback.print_exc()
274
- return HTTPException(status_code=404, detail="User not found")
275
-
276
-
277
- async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
278
- uid = str(user_id)
279
- user_queue = user_queue_map[uid]
280
- queue = user_queue["queue"]
281
- if not queue:
282
- return HTTPException(status_code=404, detail="User not found")
283
- last_time = time.time()
284
- try:
285
- while True:
286
- data = await websocket.receive_bytes()
287
- params = await websocket.receive_json()
288
- params = InputParams(**params)
289
- pil_image = Image.open(io.BytesIO(data))
290
-
291
- while not queue.empty():
292
- try:
293
- queue.get_nowait()
294
- except asyncio.QueueEmpty:
295
- continue
296
- await queue.put({"image": pil_image, "params": params})
297
- if TIMEOUT > 0 and time.time() - last_time > TIMEOUT:
298
- await websocket.send_json(
299
- {
300
- "status": "timeout",
301
- "message": "Your session has ended",
302
- "userId": uid,
303
- }
304
- )
305
- await websocket.close()
306
- return
307
-
308
- except Exception as e:
309
- logging.error(f"Error: {e}")
310
- traceback.print_exc()
311
-
312
-
313
- @app.get("/", response_class=HTMLResponse)
314
- async def root():
315
- return FileResponse("./static/controlnetlora.html")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/controlnetlora.html DELETED
@@ -1,446 +0,0 @@
1
- <!doctype html>
2
- <html>
3
-
4
- <head>
5
- <meta charset="UTF-8">
6
- <title>Real-Time Latent Consistency Model ControlNet Lora</title>
7
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
8
- <script
9
- src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
10
- <script src="https://cdn.jsdelivr.net/npm/piexifjs@1.0.6/piexif.min.js"></script>
11
- <script src="https://cdn.tailwindcss.com"></script>
12
- <style type="text/tailwindcss">
13
- .button {
14
- @apply bg-gray-700 hover:bg-gray-800 text-white font-normal p-2 rounded disabled:bg-gray-300 dark:disabled:bg-gray-700 disabled:cursor-not-allowed dark:disabled:text-black
15
- }
16
- </style>
17
- <script type="module">
18
- const getValue = (id) => {
19
- const el = document.querySelector(`${id}`)
20
- if (el.type === "checkbox")
21
- return el.checked;
22
- return el.value;
23
- }
24
- const startBtn = document.querySelector("#start");
25
- const stopBtn = document.querySelector("#stop");
26
- const videoEl = document.querySelector("#webcam");
27
- const imageEl = document.querySelector("#player");
28
- const queueSizeEl = document.querySelector("#queue_size");
29
- const errorEl = document.querySelector("#error");
30
- const snapBtn = document.querySelector("#snap");
31
- const webcamsEl = document.querySelector("#webcams");
32
-
33
- function LCMLive(webcamVideo, liveImage) {
34
- let websocket;
35
-
36
- async function start() {
37
- return new Promise((resolve, reject) => {
38
- const websocketURL = `${window.location.protocol === "https:" ? "wss" : "ws"
39
- }:${window.location.host}/ws`;
40
-
41
- const socket = new WebSocket(websocketURL);
42
- socket.onopen = () => {
43
- console.log("Connected to websocket");
44
- };
45
- socket.onclose = () => {
46
- console.log("Disconnected from websocket");
47
- stop();
48
- resolve({ "status": "disconnected" });
49
- };
50
- socket.onerror = (err) => {
51
- console.error(err);
52
- reject(err);
53
- };
54
- socket.onmessage = (event) => {
55
- const data = JSON.parse(event.data);
56
- switch (data.status) {
57
- case "success":
58
- break;
59
- case "start":
60
- const userId = data.userId;
61
- initVideoStream(userId);
62
- break;
63
- case "timeout":
64
- stop();
65
- resolve({ "status": "timeout" });
66
- case "error":
67
- stop();
68
- reject(data.message);
69
-
70
- }
71
- };
72
- websocket = socket;
73
- })
74
- }
75
- function switchCamera() {
76
- const constraints = {
77
- audio: false,
78
- video: { width: 1024, height: 1024, deviceId: mediaDevices[webcamsEl.value].deviceId }
79
- };
80
- navigator.mediaDevices
81
- .getUserMedia(constraints)
82
- .then((mediaStream) => {
83
- webcamVideo.removeEventListener("timeupdate", videoTimeUpdateHandler);
84
- webcamVideo.srcObject = mediaStream;
85
- webcamVideo.onloadedmetadata = () => {
86
- webcamVideo.play();
87
- webcamVideo.addEventListener("timeupdate", videoTimeUpdateHandler);
88
- };
89
- })
90
- .catch((err) => {
91
- console.error(`${err.name}: ${err.message}`);
92
- });
93
- }
94
-
95
- async function videoTimeUpdateHandler() {
96
- const model_id = getValue("input[name=base_model]:checked");
97
-
98
- const [WIDTH, HEIGHT] = [512, 512];
99
-
100
- const canvas = new OffscreenCanvas(WIDTH, HEIGHT);
101
- const videoW = webcamVideo.videoWidth;
102
- const videoH = webcamVideo.videoHeight;
103
- const aspectRatio = WIDTH / HEIGHT;
104
-
105
- const ctx = canvas.getContext("2d");
106
- ctx.drawImage(webcamVideo, videoW / 2 - videoH * aspectRatio / 2, 0, videoH * aspectRatio, videoH, 0, 0, WIDTH, HEIGHT)
107
- const blob = await canvas.convertToBlob({ type: "image/jpeg", quality: 1 });
108
- websocket.send(blob);
109
- websocket.send(JSON.stringify({
110
- "seed": getValue("#seed"),
111
- "prompt": getValue("#prompt"),
112
- "guidance_scale": getValue("#guidance-scale"),
113
- "strength": getValue("#strength"),
114
- "steps": getValue("#steps"),
115
- "width": WIDTH,
116
- "height": HEIGHT,
117
- "controlnet_scale": getValue("#controlnet_scale"),
118
- "controlnet_start": getValue("#controlnet_start"),
119
- "controlnet_end": getValue("#controlnet_end"),
120
- "canny_low_threshold": getValue("#canny_low_threshold"),
121
- "canny_high_threshold": getValue("#canny_high_threshold"),
122
- "debug_canny": getValue("#debug_canny"),
123
- "model_id": model_id
124
- }));
125
- }
126
- let mediaDevices = [];
127
- async function initVideoStream(userId) {
128
- liveImage.src = `/stream/${userId}`;
129
- await navigator.mediaDevices.enumerateDevices()
130
- .then(devices => {
131
- const cameras = devices.filter(device => device.kind === 'videoinput');
132
- mediaDevices = cameras;
133
- webcamsEl.innerHTML = "";
134
- cameras.forEach((camera, index) => {
135
- const option = document.createElement("option");
136
- option.value = index;
137
- option.innerText = camera.label;
138
- webcamsEl.appendChild(option);
139
- option.selected = index === 0;
140
- });
141
- webcamsEl.addEventListener("change", switchCamera);
142
- })
143
- .catch(err => {
144
- console.error(err);
145
- });
146
- const constraints = {
147
- audio: false,
148
- video: { width: 1024, height: 1024, deviceId: mediaDevices[0].deviceId }
149
- };
150
- navigator.mediaDevices
151
- .getUserMedia(constraints)
152
- .then((mediaStream) => {
153
- webcamVideo.srcObject = mediaStream;
154
- webcamVideo.onloadedmetadata = () => {
155
- webcamVideo.play();
156
- webcamVideo.addEventListener("timeupdate", videoTimeUpdateHandler);
157
- };
158
- })
159
- .catch((err) => {
160
- console.error(`${err.name}: ${err.message}`);
161
- });
162
- }
163
-
164
-
165
- async function stop() {
166
- websocket.close();
167
- navigator.mediaDevices.getUserMedia({ video: true }).then((mediaStream) => {
168
- mediaStream.getTracks().forEach((track) => track.stop());
169
- });
170
- webcamVideo.removeEventListener("timeupdate", videoTimeUpdateHandler);
171
- webcamsEl.removeEventListener("change", switchCamera);
172
- webcamVideo.srcObject = null;
173
- }
174
- return {
175
- start,
176
- stop
177
- }
178
- }
179
- function toggleMessage(type) {
180
- errorEl.hidden = false;
181
- errorEl.scrollIntoView();
182
- switch (type) {
183
- case "error":
184
- errorEl.innerText = "To many users are using the same GPU, please try again later.";
185
- errorEl.classList.toggle("bg-red-300", "text-red-900");
186
- break;
187
- case "success":
188
- errorEl.innerText = "Your session has ended, please start a new one.";
189
- errorEl.classList.toggle("bg-green-300", "text-green-900");
190
- break;
191
- }
192
- setTimeout(() => {
193
- errorEl.hidden = true;
194
- }, 2000);
195
- }
196
- function snapImage() {
197
- try {
198
- const zeroth = {};
199
- const exif = {};
200
- const gps = {};
201
- zeroth[piexif.ImageIFD.Make] = "LCM Image-to-Image ControNet";
202
- zeroth[piexif.ImageIFD.ImageDescription] = `prompt: ${getValue("#prompt")} | seed: ${getValue("#seed")} | guidance_scale: ${getValue("#guidance-scale")} | strength: ${getValue("#strength")} | controlnet_start: ${getValue("#controlnet_start")} | controlnet_end: ${getValue("#controlnet_end")} | steps: ${getValue("#steps")}`;
203
- zeroth[piexif.ImageIFD.Software] = "https://github.com/radames/Real-Time-Latent-Consistency-Model";
204
- exif[piexif.ExifIFD.DateTimeOriginal] = new Date().toISOString();
205
-
206
- const exifObj = { "0th": zeroth, "Exif": exif, "GPS": gps };
207
- const exifBytes = piexif.dump(exifObj);
208
-
209
- const canvas = document.createElement("canvas");
210
- canvas.width = imageEl.naturalWidth;
211
- canvas.height = imageEl.naturalHeight;
212
- const ctx = canvas.getContext("2d");
213
- ctx.drawImage(imageEl, 0, 0);
214
- const dataURL = canvas.toDataURL("image/jpeg");
215
- const withExif = piexif.insert(exifBytes, dataURL);
216
-
217
- const a = document.createElement("a");
218
- a.href = withExif;
219
- a.download = `lcm_txt_2_img${Date.now()}.png`;
220
- a.click();
221
- } catch (err) {
222
- console.log(err);
223
- }
224
- }
225
- const models_id = {
226
- "nitrosocke/Ghibli-Diffusion": "ghibli style",
227
- "nitrosocke/mo-di-diffusion": "modern disney style",
228
- "plasmo/woolitize": "woolitize"
229
- }
230
-
231
- document.addEventListener("DOMContentLoaded", () => {
232
- const models_options = document.querySelector("#models_options");
233
- Object.entries(models_id).forEach(([model, activation], i) => {
234
- const modelEl = document.createElement("div");
235
- modelEl.innerHTML = `
236
- <input type="radio" id="${model}" name="base_model" value="${model}" class="cursor-pointer" ${i === 0 ? "checked" : ""}>
237
- <label for="${model}" class="text-sm cursor-pointer" title="Use the keyword on your prompt: ${activation}">${model}: <b>${activation}</b>
238
- <a href="https://hf.co/${model}" title="Model link on Hugging Face" target="_blank" class="text-sm text-blue-500 underline hover:no-underline">⤴️</a></label>
239
- `;
240
- models_options.appendChild(modelEl);
241
- })
242
- models_options.addEventListener("change", () => {
243
- const model = getValue("input[name=base_model]:checked");
244
- const prompt = getValue("#prompt");
245
- const activation = models_id[model];
246
- if (prompt.includes(activation))
247
- return;
248
- document.querySelector("#prompt").value = `${activation} portrait of a person`;
249
- })
250
-
251
- })
252
- const lcmLive = LCMLive(videoEl, imageEl);
253
- startBtn.addEventListener("click", async () => {
254
- try {
255
- startBtn.disabled = true;
256
- snapBtn.disabled = false;
257
- const res = await lcmLive.start();
258
- startBtn.disabled = false;
259
- if (res.status === "timeout")
260
- toggleMessage("success")
261
- } catch (err) {
262
- console.log(err);
263
- toggleMessage("error")
264
- startBtn.disabled = false;
265
- }
266
- });
267
- stopBtn.addEventListener("click", () => {
268
- lcmLive.stop();
269
- });
270
- window.addEventListener("beforeunload", () => {
271
- lcmLive.stop();
272
- });
273
- snapBtn.addEventListener("click", snapImage);
274
- setInterval(() =>
275
- fetch("/queue_size")
276
- .then((res) => res.json())
277
- .then((data) => {
278
- queueSizeEl.innerText = data.queue_size;
279
- })
280
- .catch((err) => {
281
- console.log(err);
282
- })
283
- , 5000);
284
- </script>
285
- </head>
286
-
287
- <body class="text-black dark:bg-gray-900 dark:text-white">
288
- <div class="fixed right-2 top-2 p-4 font-bold text-sm rounded-lg max-w-xs text-center" id="error">
289
- </div>
290
- <main class="container mx-auto px-4 py-4 max-w-4xl flex flex-col gap-4">
291
- <article class="text-center max-w-xl mx-auto">
292
- <h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1>
293
- <h2 class="text-2xl font-bold mb-4">ControlNet LoRa</h2>
294
- <p class="text-sm">
295
- This demo showcases
296
- <a href="https://huggingface.co/blog/lcm_lora" target="_blank"
297
- class="text-blue-500 underline hover:no-underline">LCM LoRa</a> ControlNet pipeline
298
- using <a
299
- href="https://huggingface.co/docs/diffusers/api/pipelines/latent_consistency_models#latent-consistency-models"
300
- target="_blank" class="text-blue-500 underline hover:no-underline">Diffusers</a> with a MJPEG
301
- stream server.
302
- </p>
303
- <p class="text-sm">
304
- There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU.
305
- </p>
306
- </article>
307
- <div>
308
- <h2 class="font-medium">Prompt</h2>
309
- <p class="text-sm text-gray-500">
310
- Change the prompt to generate different images, accepts <a
311
- href="https://github.com/damian0815/compel/blob/main/doc/syntax.md" target="_blank"
312
- class="text-blue-500 underline hover:no-underline">Compel</a> syntax.
313
- </p>
314
- <div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
315
- <textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none dark:text-black"
316
- title="Prompt, this is an example, feel free to modify"
317
- placeholder="Add your prompt here...">ghibli style portrait of a person</textarea>
318
- </div>
319
- </div>
320
- <!-- -->
321
- <label class="font-medium" for="base_model">Base Model</label>
322
- <fieldset class="flex flex-col gap-2" id="models_options">
323
- </fieldset>
324
- <!-- -->
325
- <div class="">
326
- <details>
327
- <summary class="font-medium cursor-pointer">Advanced Options</summary>
328
- <div class="grid grid-cols-3 sm:grid-cols-6 items-center gap-3 py-3">
329
- <label for="webcams" class="text-sm font-medium">Camera Options: </label>
330
- <select id="webcams" class="text-sm border-2 border-gray-500 rounded-md font-light dark:text-black">
331
- </select>
332
- <div></div>
333
- <label class="text-sm font-medium " for="steps">Inference Steps
334
- </label>
335
- <input type="range" id="steps" name="steps" min="2" max="10" value="4"
336
- oninput="this.nextElementSibling.value = Number(this.value)">
337
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
338
- 4</output>
339
- <label class="text-sm font-medium" for="guidance-scale">Guidance Scale
340
- </label>
341
- <input type="range" id="guidance-scale" name="guidance-scale" min="0" max="5" step="0.001"
342
- value="0.3" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
343
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
344
- 0.3</output>
345
- <!-- -->
346
- <label class="text-sm font-medium" for="strength">Strength</label>
347
- <input type="range" id="strength" name="strength" min="0.1" max="1" step="0.0001" value="0.50"
348
- oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
349
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
350
- 0.5</output>
351
- <!-- -->
352
- <label class="text-sm font-medium" for="controlnet_scale">ControlNet Condition Scale</label>
353
- <input type="range" id="controlnet_scale" name="controlnet_scale" min="0.0" max="1" step="0.001"
354
- value="0.80" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
355
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
356
- 0.8</output>
357
- <!-- -->
358
- <label class="text-sm font-medium" for="controlnet_start">ControlNet Guidance Start</label>
359
- <input type="range" id="controlnet_start" name="controlnet_start" min="0.0" max="1.0" step="0.001"
360
- value="0.0" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
361
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
362
- 0.0</output>
363
- <!-- -->
364
- <label class="text-sm font-medium" for="controlnet_end">ControlNet Guidance End</label>
365
- <input type="range" id="controlnet_end" name="controlnet_end" min="0.0" max="1.0" step="0.001"
366
- value="0.8" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
367
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
368
- 0.8</output>
369
- <!-- -->
370
- <label class="text-sm font-medium" for="canny_low_threshold">Canny Low Threshold</label>
371
- <input type="range" id="canny_low_threshold" name="canny_low_threshold" min="0.0" max="1.0"
372
- step="0.001" value="0.1"
373
- oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
374
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
375
- 0.1</output>
376
- <!-- -->
377
- <label class="text-sm font-medium" for="canny_high_threshold">Canny High Threshold</label>
378
- <input type="range" id="canny_high_threshold" name="canny_high_threshold" min="0.0" max="1.0"
379
- step="0.001" value="0.2"
380
- oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
381
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
382
- 0.2</output>
383
- <!-- -->
384
- <label class="text-sm font-medium" for="seed">Seed</label>
385
- <input type="number" id="seed" name="seed" value="299792458"
386
- class="font-light border border-gray-700 text-right rounded-md p-2 dark:text-black">
387
- <button
388
- onclick="document.querySelector('#seed').value = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)"
389
- class="button">
390
- Rand
391
- </button>
392
- <!-- -->
393
- <!-- -->
394
- <!-- <label class="text-sm font-medium" for="dimension">Image Dimensions</label>
395
- <div class="col-span-2 flex gap-2">
396
- <div class="flex gap-1">
397
- <input type="radio" id="dimension512" name="dimension" value="[512,512]" checked
398
- class="cursor-pointer">
399
- <label for="dimension512" class="text-sm cursor-pointer">512x512</label>
400
- </div>
401
- <div class="flex gap-1">
402
- <input type="radio" id="dimension768" name="dimension" value="[768,768]"
403
- lass="cursor-pointer">
404
- <label for="dimension768" class="text-sm cursor-pointer">768x768</label>
405
- </div>
406
- </div> -->
407
- <!-- -->
408
- <!-- -->
409
- <label class="text-sm font-medium" for="debug_canny">Debug Canny</label>
410
- <div class="col-span-2 flex gap-2">
411
- <input type="checkbox" id="debug_canny" name="debug_canny" class="cursor-pointer">
412
- <label for="debug_canny" class="text-sm cursor-pointer"></label>
413
- </div>
414
- <div></div>
415
- <!-- -->
416
- </div>
417
- </details>
418
- </div>
419
- <div class="flex gap-3">
420
- <button id="start" class="button">
421
- Start
422
- </button>
423
- <button id="stop" class="button">
424
- Stop
425
- </button>
426
- <button id="snap" disabled class="button ml-auto">
427
- Snapshot
428
- </button>
429
- </div>
430
- <div class="relative rounded-lg border border-slate-300 overflow-hidden">
431
- <img id="player" class="w-full aspect-square rounded-lg"
432
- src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=">
433
- <div class="absolute top-0 left-0 w-1/4 aspect-square">
434
- <video id="webcam" class="w-full aspect-square relative z-10 object-cover" playsinline autoplay muted
435
- loop></video>
436
- <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 448" width="100"
437
- class="w-full p-4 absolute top-0 opacity-20 z-0">
438
- <path fill="currentColor"
439
- d="M224 256a128 128 0 1 0 0-256 128 128 0 1 0 0 256zm-45.7 48A178.3 178.3 0 0 0 0 482.3 29.7 29.7 0 0 0 29.7 512h388.6a29.7 29.7 0 0 0 29.7-29.7c0-98.5-79.8-178.3-178.3-178.3h-91.4z" />
440
- </svg>
441
- </div>
442
- </div>
443
- </main>
444
- </body>
445
-
446
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/tailwind.config.js DELETED
File without changes