Spaces:
Sleeping
Sleeping
File size: 4,900 Bytes
cb92d2b 43148fd d6fedfa 1123781 43148fd 3207814 ff9325e 3207814 ff9325e cd353d4 1123781 43148fd d6fedfa cb92d2b 1123781 cb92d2b 43148fd 3207814 d6fedfa 1123781 d6fedfa 3207814 be97094 d6fedfa ff9325e d6fedfa 0e617d2 cd353d4 0e617d2 ff9325e 3207814 be97094 ff9325e 3207814 d6fedfa be97094 d6fedfa be97094 ff9325e d6fedfa ff9325e cb92d2b 1123781 cb92d2b d6fedfa cb92d2b d6fedfa cb92d2b d6fedfa cb92d2b d6fedfa cb92d2b d6fedfa cb92d2b 43148fd ff9325e d6fedfa 43148fd be97094 3207814 ff9325e 3207814 43148fd 1123781 43148fd d6fedfa 43148fd cb92d2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
<script lang="ts">
import { onMount } from 'svelte';
import { PUBLIC_BASE_URL } from '$env/static/public';
import type { FieldProps, PipelineInfo } from '$lib/types';
import { PipelineMode } from '$lib/types';
import ImagePlayer from '$lib/components/ImagePlayer.svelte';
import VideoInput from '$lib/components/VideoInput.svelte';
import Button from '$lib/components/Button.svelte';
import PipelineOptions from '$lib/components/PipelineOptions.svelte';
import Spinner from '$lib/icons/spinner.svelte';
import { lcmLiveStatus, lcmLiveActions, LCMLiveStatus } from '$lib/lcmLive';
import {
mediaStreamActions,
mediaStreamStatus,
onFrameChangeStore,
MediaStreamStatusEnum
} from '$lib/mediaStream';
import { getPipelineValues, deboucedPipelineValues } from '$lib/store';
let pipelineParams: FieldProps[];
let pipelineInfo: PipelineInfo;
let isImageMode: boolean = false;
let maxQueueSize: number = 0;
onMount(() => {
getSettings();
});
async function getSettings() {
const settings = await fetch(`${PUBLIC_BASE_URL}/settings`).then((r) => r.json());
pipelineParams = Object.values(settings.input_params.properties);
pipelineInfo = settings.info.properties;
isImageMode = pipelineInfo.input_mode.default === PipelineMode.IMAGE;
maxQueueSize = settings.max_queue_size;
pipelineParams = pipelineParams.filter((e) => e?.disabled !== true);
}
// send Webcam stream to LCM if image mode
$: {
if (
isImageMode &&
$lcmLiveStatus === LCMLiveStatus.CONNECTED &&
$mediaStreamStatus === MediaStreamStatusEnum.CONNECTED
) {
lcmLiveActions.send(getPipelineValues());
lcmLiveActions.send($onFrameChangeStore.blob);
}
}
// send only prompt if text mode
$: {
if ($lcmLiveStatus === LCMLiveStatus.CONNECTED) {
lcmLiveActions.send($deboucedPipelineValues);
}
}
$: isLCMRunning = $lcmLiveStatus !== LCMLiveStatus.DISCONNECTED;
let disabled = false;
async function toggleLcmLive() {
if (!isLCMRunning) {
if (isImageMode) {
await mediaStreamActions.enumerateDevices();
await mediaStreamActions.start();
}
disabled = true;
await lcmLiveActions.start();
disabled = false;
} else {
if (isImageMode) {
mediaStreamActions.stop();
}
lcmLiveActions.stop();
}
}
</script>
<div class="fixed right-2 top-2 max-w-xs rounded-lg p-4 text-center text-sm font-bold" id="error" />
<main class="container mx-auto flex max-w-4xl flex-col gap-3 px-4 py-4">
<article class="flex- mx-auto max-w-xl text-center">
<h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1>
<p class="py-2 text-sm">
This demo showcases
<a
href="https://huggingface.co/blog/lcm_lora"
target="_blank"
class="text-blue-500 underline hover:no-underline">LCM LoRA</a
>
Image to Image pipeline using
<a
href="https://huggingface.co/docs/diffusers/main/en/using-diffusers/lcm#performing-inference-with-lcm"
target="_blank"
class="text-blue-500 underline hover:no-underline">Diffusers</a
> with a MJPEG stream server.
</p>
{#if maxQueueSize > 0}
<p class="text-sm">
There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU,
affecting real-time performance. Maximum queue size is {maxQueueSize}.
<a
href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
target="_blank"
class="text-blue-500 underline hover:no-underline">Duplicate</a
> and run it on your own GPU.
</p>
{/if}
</article>
{#if pipelineParams}
<header>
<h2 class="font-medium">Prompt</h2>
<p class="text-sm text-gray-500">
Change the prompt to generate different images, accepts <a
href="https://github.com/damian0815/compel/blob/main/doc/syntax.md"
target="_blank"
class="text-blue-500 underline hover:no-underline">Compel</a
> syntax.
</p>
</header>
<PipelineOptions {pipelineParams}></PipelineOptions>
<div class="flex gap-3">
<Button on:click={toggleLcmLive} {disabled}>
{#if isLCMRunning}
Stop
{:else}
Start
{/if}
</Button>
<Button disabled={isLCMRunning} classList={'ml-auto'}>Snapshot</Button>
</div>
<ImagePlayer>
{#if isImageMode}
<VideoInput></VideoInput>
{/if}
</ImagePlayer>
{:else}
<!-- loading -->
<div class="flex items-center justify-center gap-3 py-48 text-2xl">
<Spinner classList={'animate-spin opacity-50'}></Spinner>
<p>Loading...</p>
</div>
{/if}
</main>
<style lang="postcss">
:global(html) {
@apply text-black dark:bg-gray-900 dark:text-white;
}
</style>
|