flux-labs / app.py
fantaxy's picture
Update app.py
01681c8 verified
raw
history blame
3.52 kB
import os
import gradio as gr
import torch
import numpy as np
import random
from diffusers import FluxPipeline, FluxTransformer2DModel
import spaces
from translatepy import Translator
# ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
translator = Translator()
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# ์ƒ์ˆ˜
model = "black-forest-labs/FLUX.1-dev"
MAX_SEED = np.iinfo(np.int32).max
# CSS ๋ฐ JS ์„ค์ •
CSS = """
footer {
visibility: hidden;
}
"""
JS = """function () {
gradioURL = window.location.href
if (!gradioURL.endsWith('?__theme=dark')) {
window.location.replace(gradioURL + '?__theme=dark');
}
}"""
# Initialize `pipe` to None globally
pipe = None
# ๋ชจ๋ธ ๋กœ๋“œ ์‹œ๋„
try:
transformer = FluxTransformer2DModel.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=torch.bfloat16)
if torch.cuda.is_available():
pipe = FluxPipeline.from_pretrained(
model,
transformer=transformer,
torch_dtype=torch.bfloat16).to("cuda")
else:
print("CUDA is not available. Check your GPU settings.")
except Exception as e:
print(f"Failed to load the model: {e}")
# ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํ•จ์ˆ˜
def generate_image(prompt, width=1024, height=1024, scales=5, steps=4, seed=-1, nums=1, progress=gr.Progress(track_tqdm=True)):
if pipe is None:
print("Model is not loaded properly. Please check the logs for details.")
return None, "Model not loaded."
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = str(translator.translate(prompt, 'English'))
generator = torch.Generator().manual_seed(seed)
try:
images = pipe(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, max_sequence_length=512, num_images_per_prompt=nums, generator=generator).images
except Exception as e:
print(f"Error generating image: {e}")
return None, "Error during image generation."
return images, seed
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ ๋ฐ ์‹คํ–‰
with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
gr.HTML("<h1><center>Flux Labs</center></h1>")
gr.HTML("<p><center>Model Now: <a href='https://huggingface.co/sayakpaul/FLUX.1-merged'>FLUX.1 Merged</a><br>๐Ÿ™‡โ€โ™‚๏ธFrequent model changes</center></p>")
with gr.Row():
with gr.Column(scale=4):
img = gr.Gallery(label='flux Generated Image', columns=1, preview=True, height=600)
prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
sendBtn = gr.Button(scale=1, variant='primary')
with gr.Accordion("Advanced Options", open=True):
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=1024)
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024)
scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=4)
seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=0)
nums = gr.Slider(label="Image Numbers", minimum=1, maximum=4, step=1, value=1)
sendBtn.click(fn=generate_image, inputs=[prompt, width, height, scales, steps, seed, nums], outputs=[img, seed])
prompt.submit(fn=generate_image, inputs=[prompt, width, height, scales, steps, seed, nums], outputs=[img, seed])
demo.queue().launch()