import spaces import gradio as gr import torch from PIL import Image from diffusers import DiffusionPipeline import random from transformers import pipeline torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.backends.cuda.matmul.allow_tf32 = True # 번역 모델 초기화 translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en") # 기본 모델 및 LoRA 설정 base_model = "black-forest-labs/FLUX.1-dev" model_lora_repo = "Motas/Flux_Fashion_Photography_Style" clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA" pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16) pipe.to("cuda") MAX_SEED = 2**32-1 # 예시 프롬프트 정의 model_examples = [ "professional fashion model wearing elegant black dress in studio lighting", "fashion model in casual street wear, urban background", "high fashion model in avant-garde outfit on runway" ] clothes_examples = [ "luxurious red evening gown with detailed embroidery", "casual denim jacket with vintage wash", "modern minimalist white blazer with clean lines" ] @spaces.GPU() def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)): if not prompt: return None, seed def contains_korean(text): return any(ord('가') <= ord(char) <= ord('힣') for char in text) if contains_korean(prompt): translated = translator(prompt)[0]['translation_text'] actual_prompt = translated else: actual_prompt = prompt if mode == "패션 모델 생성": pipe.load_lora_weights(model_lora_repo) trigger_word = "fashion photography, professional model" else: pipe.load_lora_weights(clothes_lora_repo) trigger_word = "upper clothing, fashion item" if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device="cuda").manual_seed(seed) image = pipe( prompt=f"{actual_prompt} {trigger_word}", num_inference_steps=steps, guidance_scale=cfg_scale, width=width, height=height, generator=generator, joint_attention_kwargs={"scale": lora_scale}, ).images[0] return image, seed with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as app: gr.Markdown("# 🎭 Fashion AI Studio") with gr.Column(): mode = gr.Radio( choices=["Person", "Clothes"], label="Generation", value="Fashion Model" ) prompt = gr.TextArea( label="✍️ Prompt (한글 지원)", placeholder="Text Input Prompt", lines=3 ) # 예시 섹션을 모드별로 분리 with gr.Column(visible=True) as model_examples_container: gr.Examples( examples=model_examples, inputs=prompt, label="Examples(person)" ) with gr.Column(visible=False) as clothes_examples_container: gr.Examples( examples=clothes_examples, inputs=prompt, label="Examples(clothes)" ) result = gr.Image(label="Generated Image") generate_button = gr.Button("🚀 START") with gr.Accordion("🎨 OPTION", open=False): with gr.Row(): cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7.0) steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=30) lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, value=0.85) with gr.Row(): width = gr.Slider(label="Width", minimum=256, maximum=1536, value=512) height = gr.Slider(label="Height", minimum=256, maximum=1536, value=768) with gr.Row(): randomize_seed = gr.Checkbox(True, label="시드 랜덤화") seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=42) def update_visibility(mode): return ( gr.update(visible=(mode == "Person")), gr.update(visible=(mode == "Clothes")) ) mode.change( fn=update_visibility, inputs=[mode], outputs=[model_examples_container, clothes_examples_container] ) generate_button.click( generate_fashion, inputs=[prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale], outputs=[result, seed] ) if __name__ == "__main__": app.launch(share=True)