x-mas / app.py
fantos's picture
Update app.py
26c3a03 verified
raw
history blame
9.57 kB
import spaces
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
from transformers import pipeline
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = True
# ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
lora_repo = "strangerzonehf/Flux-Xmas-Realpix-LoRA"
trigger_word = ""
pipe.load_lora_weights(lora_repo)
pipe.to("cuda")
MAX_SEED = 2**32-1
@spaces.GPU()
def translate_and_generate(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
# ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
def contains_korean(text):
return any(ord('๊ฐ€') <= ord(char) <= ord('ํžฃ') for char in text)
if contains_korean(prompt):
# ํ•œ๊ธ€์„ ์˜์–ด๋กœ ๋ฒˆ์—ญ
translated = translator(prompt)[0]['translation_text']
actual_prompt = translated
else:
actual_prompt = prompt
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device="cuda").manual_seed(seed)
progress(0, "Starting image generation...")
for i in range(1, steps + 1):
if i % (steps // 10) == 0:
progress(i / steps * 100, f"Processing step {i} of {steps}...")
image = pipe(
prompt=f"{actual_prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
).images[0]
progress(100, "Completed!")
return image, seed
example_image_path = "example0.webp"
example_prompt = """Cozy winter scene with a Christmas atmosphere: a snow-covered cabin in the forest, warm light glowing from the windows, surrounded by sparkling Christmas decorations and a beautifully adorned Christmas tree. The sky is filled with stars, and soft snowflakes are gently falling, creating a serene and warm ambiance"""
example_cfg_scale = 3.2
example_steps = 32
example_width = 1152
example_height = 896
example_seed = 3981632454
example_lora_scale = 0.85
def load_example():
example_image = Image.open(example_image_path)
return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_image
css = """
.container {
max-width: 1400px;
margin: auto;
padding: 20px;
position: relative;
min-height: 100vh;
}
body {
background-image: url('file/example0.webp');
background-size: cover;
background-position: center;
background-attachment: fixed;
margin: 0;
padding: 0;
}
.header {
text-align: center;
margin-bottom: 30px;
color: white;
text-shadow: 2px 2px 4px rgba(0,0,0,0.7);
}
.generate-btn {
background-color: #2ecc71 !important;
color: white !important;
margin: 20px auto !important;
display: block !important;
width: 200px !important;
}
.generate-btn:hover {
background-color: #27ae60 !important;
}
.parameter-box, .result-box, .prompt-box, .audio-controls {
background-color: rgba(245, 246, 250, 0.8);
padding: 20px;
border-radius: 10px;
margin: 10px 0;
backdrop-filter: blur(5px);
}
.result-box {
width: 90%;
margin: 20px auto;
}
.image-output {
margin: 0 auto;
display: block;
max-width: 800px !important;
}
.prompt-box {
width: 90%;
margin: 20px auto;
}
.accordion {
margin-top: 20px;
background-color: rgba(245, 246, 250, 0.8);
backdrop-filter: blur(5px);
}
.audio-controls {
position: fixed;
bottom: 20px;
right: 20px;
z-index: 1000;
display: flex;
gap: 10px;
align-items: center;
}
.audio-btn {
padding: 10px 20px;
border-radius: 5px;
border: none;
background: rgba(46, 204, 113, 0.9);
color: white;
cursor: pointer;
}
.audio-btn:hover {
background: rgba(39, 174, 96, 0.9);
}
[๊ฐ™์€ snow ๊ด€๋ จ CSS...]
"""
with gr.Blocks(css=css) as app:
gr.HTML(f"<script>{js_code}</script>")
with gr.Column(elem_classes="container"):
gr.Markdown("# ๐ŸŽ„ X-MAS LoRA", elem_classes="header")
# ์ด๋ฏธ์ง€ ์ถœ๋ ฅ ์˜์—ญ
with gr.Group(elem_classes="result-box"):
gr.Markdown("### ๐Ÿ–ผ๏ธ Generated Image")
result = gr.Image(label="Result", elem_classes="image-output")
# ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ ๋ฐ•์Šค
with gr.Group(elem_classes="prompt-box"):
prompt = gr.TextArea(
label="โœ๏ธ Your Prompt (ํ•œ๊ธ€ ๋˜๋Š” ์˜์–ด)",
placeholder="์ด๋ฏธ์ง€๋ฅผ ์„ค๋ช…ํ•˜์„ธ์š”...",
lines=5
)
generate_button = gr.Button(
"๐Ÿš€ Generate Image",
elem_classes="generate-btn"
)
# HTML5 ์˜ค๋””์˜ค ์š”์†Œ ์ถ”๊ฐ€
gr.HTML("""
<div class="audio-controls">
<button class="audio-btn" onclick="toggleAudio(1)">Play Music 1</button>
<button class="audio-btn" onclick="toggleAudio(2)">Play Music 2</button>
<button class="audio-btn" onclick="stopAllAudio()">Stop All</button>
</div>
<audio id="bgMusic1" loop style="display: none;">
<source src="file/1.mp3" type="audio/mp3">
</audio>
<audio id="bgMusic2" loop style="display: none;">
<source src="file/2.mp3" type="audio/mp3">
</audio>
<script>
let currentlyPlaying = null;
function toggleAudio(num) {
const audio = document.getElementById('bgMusic' + num);
const otherAudio = document.getElementById('bgMusic' + (num === 1 ? 2 : 1));
if (currentlyPlaying === audio) {
audio.pause();
currentlyPlaying = null;
} else {
if (currentlyPlaying) {
currentlyPlaying.pause();
}
otherAudio.pause();
audio.play();
currentlyPlaying = audio;
}
}
function stopAllAudio() {
document.getElementById('bgMusic1').pause();
document.getElementById('bgMusic2').pause();
currentlyPlaying = null;
}
</script>
""")
# ์ด๋ฏธ์ง€ ์ถœ๋ ฅ ์˜์—ญ
with gr.Group(elem_classes="result-box"):
gr.Markdown("### ๐Ÿ–ผ๏ธ Generated Image")
result = gr.Image(label="Result", elem_classes="image-output")
# ์˜ต์…˜๋“ค์„ ์•„์ฝ”๋””์–ธ์œผ๋กœ ๊ตฌ์„ฑ
with gr.Accordion("๐ŸŽจ Advanced Options", open=False, elem_classes="accordion"):
with gr.Group(elem_classes="parameter-box"):
gr.Markdown("### ๐ŸŽ›๏ธ Generation Parameters")
with gr.Row():
with gr.Column():
cfg_scale = gr.Slider(
label="CFG Scale",
minimum=1,
maximum=20,
step=0.5,
value=example_cfg_scale
)
steps = gr.Slider(
label="Steps",
minimum=1,
maximum=100,
step=1,
value=example_steps
)
lora_scale = gr.Slider(
label="LoRA Scale",
minimum=0,
maximum=1,
step=0.01,
value=example_lora_scale
)
with gr.Group(elem_classes="parameter-box"):
gr.Markdown("### ๐Ÿ“ Image Dimensions")
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=1536,
step=64,
value=example_width
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=1536,
step=64,
value=example_height
)
with gr.Group(elem_classes="parameter-box"):
gr.Markdown("### ๐ŸŽฒ Seed Settings")
with gr.Row():
randomize_seed = gr.Checkbox(
True,
label="Randomize seed"
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=example_seed
)
app.load(
load_example,
inputs=[],
outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result]
)
generate_button.click(
translate_and_generate,
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed]
)
# ๋งˆ์ง€๋ง‰ ๋ถ€๋ถ„์„ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ˆ˜์ •
app.queue()
app.launch() # js=js_code ์ œ๊ฑฐ