import random import gradio as gr import numpy as np import torch import spaces from diffusers import FluxPipeline from PIL import Image from diffusers.utils import export_to_gif from transformers import pipeline HEIGHT = 256 WIDTH = 1024 MAX_SEED = np.iinfo(np.int32).max device = "cuda" if torch.cuda.is_available() else "cpu" pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to(device) translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en") def split_image(input_image, num_splits=4): output_images = [] for i in range(num_splits): left = i * 256 right = (i + 1) * 256 box = (left, 0, right, 256) output_images.append(input_image.crop(box)) return output_images def translate_to_english(text): return translator(text)[0]['translation_text'] @spaces.GPU() def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt): prompt = translate_to_english(prompt) prompt_template = f""" A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right. The gif is of {prompt}. """ if randomize_seed: seed = random.randint(0, MAX_SEED) image = pipe( prompt=prompt_template, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=1, generator=torch.Generator("cpu").manual_seed(seed), height=HEIGHT, width=WIDTH ).images[0] return export_to_gif(split_image(image, 4), "flux.gif", fps=4), image, seed css = """ footer { visibility: hidden;} """ examples = [ "고양이가 공중에서 발을 흔드는 모습", "팬더가 엉덩이를 좌우로 흔드는 모습", "꽃이 피어나는 과정" ] with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo: with gr.Column(elem_id="col-container"): with gr.Row(): prompt = gr.Text(label="프롬프트", show_label=False, max_lines=1, placeholder="프롬프트를 입력하세요") submit = gr.Button("제출", scale=0) output = gr.Image(label="GIF", show_label=False) output_stills = gr.Image(label="스틸 이미지", show_label=False, elem_id="stills") with gr.Accordion("고급 설정", open=False): seed = gr.Slider( label="시드", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="시드 무작위화", value=True) with gr.Row(): guidance_scale = gr.Slider( label="가이던스 스케일", minimum=1, maximum=15, step=0.1, value=3.5, ) num_inference_steps = gr.Slider( label="추론 단계 수", minimum=1, maximum=50, step=1, value=28, ) gr.Examples( examples=examples, fn=predict, inputs=[prompt], outputs=[output, output_stills, seed], cache_examples="lazy" ) gr.on( triggers=[submit.click, prompt.submit], fn=predict, inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps], outputs=[output, output_stills, seed] ) demo.launch()