Fashion-FLUX / app.py
aiqcamp's picture
Update app.py
fc48392 verified
import spaces
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
from transformers import pipeline
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = True
# λ²ˆμ—­ λͺ¨λΈ μ΄ˆκΈ°ν™”
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
# κΈ°λ³Έ λͺ¨λΈ 및 LoRA μ„€μ •
base_model = "black-forest-labs/FLUX.1-dev"
model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
pipe.to("cuda")
MAX_SEED = 2**32-1
# μ˜ˆμ‹œ ν”„λ‘¬ν”„νŠΈ μ •μ˜
model_examples = [
"professional fashion model wearing elegant black dress in studio lighting",
"fashion model in casual street wear, urban background",
"high fashion model in avant-garde outfit on runway"
]
clothes_examples = [
"luxurious red evening gown with detailed embroidery",
"casual denim jacket with vintage wash",
"modern minimalist white blazer with clean lines"
]
@spaces.GPU()
def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
if not prompt:
return None, seed
def contains_korean(text):
return any(ord('κ°€') <= ord(char) <= ord('힣') for char in text)
if contains_korean(prompt):
translated = translator(prompt)[0]['translation_text']
actual_prompt = translated
else:
actual_prompt = prompt
if mode == "νŒ¨μ…˜ λͺ¨λΈ 생성":
pipe.load_lora_weights(model_lora_repo)
trigger_word = "fashion photography, professional model"
else:
pipe.load_lora_weights(clothes_lora_repo)
trigger_word = "upper clothing, fashion item"
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device="cuda").manual_seed(seed)
image = pipe(
prompt=f"{actual_prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
).images[0]
return image, seed
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as app:
gr.Markdown("# 🎭 Fashion AI Studio")
with gr.Column():
mode = gr.Radio(
choices=["Person", "Clothes"],
label="Generation",
value="Fashion Model"
)
prompt = gr.TextArea(
label="✍️ Prompt (ν•œκΈ€ 지원)",
placeholder="Text Input Prompt",
lines=3
)
# μ˜ˆμ‹œ μ„Ήμ…˜μ„ λͺ¨λ“œλ³„λ‘œ 뢄리
with gr.Column(visible=True) as model_examples_container:
gr.Examples(
examples=model_examples,
inputs=prompt,
label="Examples(person)"
)
with gr.Column(visible=False) as clothes_examples_container:
gr.Examples(
examples=clothes_examples,
inputs=prompt,
label="Examples(clothes)"
)
result = gr.Image(label="Generated Image")
generate_button = gr.Button("πŸš€ START")
with gr.Accordion("🎨 OPTION", open=False):
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7.0)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=30)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, value=0.85)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=1536, value=512)
height = gr.Slider(label="Height", minimum=256, maximum=1536, value=768)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="μ‹œλ“œ λžœλ€ν™”")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=42)
def update_visibility(mode):
return (
gr.update(visible=(mode == "Person")),
gr.update(visible=(mode == "Clothes"))
)
mode.change(
fn=update_visibility,
inputs=[mode],
outputs=[model_examples_container, clothes_examples_container]
)
generate_button.click(
generate_fashion,
inputs=[prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed]
)
if __name__ == "__main__":
app.launch(share=True)