text3d / app.py
ginipick's picture
Update app.py
9dbfa64 verified
raw
history blame
7.39 kB
import spaces
import argparse
import os
import time
from os import path
import shutil
from datetime import datetime
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download
import gradio as gr
import torch
from diffusers import FluxPipeline
from PIL import Image
from transformers import pipeline
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
# Hugging Face 토큰 μ„€μ •
HF_TOKEN = os.getenv("HF_TOKEN")
if HF_TOKEN is None:
raise ValueError("HF_TOKEN environment variable is not set")
# Setup and initialization code
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
PERSISTENT_DIR = os.environ.get("PERSISTENT_DIR", ".")
gallery_path = path.join(PERSISTENT_DIR, "gallery")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path
torch.backends.cuda.matmul.allow_tf32 = True
# Create gallery directory if it doesn't exist
if not path.exists(gallery_path):
os.makedirs(gallery_path, exist_ok=True)
class timer:
def __init__(self, method_name="timed process"):
self.method = method_name
def __enter__(self):
self.start = time.time()
print(f"{self.method} starts")
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
print(f"{self.method} took {str(round(end - self.start, 2))}s")
# Model initialization
if not path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
# 인증된 λͺ¨λΈ λ‘œλ“œ
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.bfloat16,
use_auth_token=HF_TOKEN
)
# Hyper-SD LoRA λ‘œλ“œ (인증 포함)
pipe.load_lora_weights(
hf_hub_download(
"ByteDance/Hyper-SD",
"Hyper-FLUX.1-dev-8steps-lora.safetensors",
use_auth_token=HF_TOKEN
)
)
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device="cuda", dtype=torch.bfloat16)
def save_image(image):
"""Save the generated image and return the path"""
try:
if not os.path.exists(gallery_path):
try:
os.makedirs(gallery_path, exist_ok=True)
except Exception as e:
print(f"Failed to create gallery directory: {str(e)}")
return None
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
random_suffix = os.urandom(4).hex()
filename = f"generated_{timestamp}_{random_suffix}.png"
filepath = os.path.join(gallery_path, filename)
try:
if isinstance(image, Image.Image):
image.save(filepath, "PNG", quality=100)
else:
image = Image.fromarray(image)
image.save(filepath, "PNG", quality=100)
if not os.path.exists(filepath):
print(f"Warning: Failed to verify saved image at {filepath}")
return None
return filepath
except Exception as e:
print(f"Failed to save image: {str(e)}")
return None
except Exception as e:
print(f"Error in save_image: {str(e)}")
return None
# Create Gradio interface
with gr.Blocks(theme=gr.themes.Soft()) as demo:
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(
label="Image Description",
placeholder="Describe the image you want to create...",
lines=3
)
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
height = gr.Slider(
label="Height",
minimum=256,
maximum=1152,
step=64,
value=1024
)
width = gr.Slider(
label="Width",
minimum=256,
maximum=1152,
step=64,
value=1024
)
with gr.Row():
steps = gr.Slider(
label="Inference Steps",
minimum=6,
maximum=25,
step=1,
value=8
)
scales = gr.Slider(
label="Guidance Scale",
minimum=0.0,
maximum=5.0,
step=0.1,
value=3.5
)
def get_random_seed():
return torch.randint(0, 1000000, (1,)).item()
seed = gr.Number(
label="Seed (random by default, set for reproducibility)",
value=get_random_seed(),
precision=0
)
randomize_seed = gr.Button("🎲 Randomize Seed", elem_classes=["generate-btn"])
generate_btn = gr.Button(
"✨ Generate Image",
elem_classes=["generate-btn"]
)
with gr.Column(scale=4, elem_classes=["fixed-width"]):
output = gr.Image(
label="Generated Image",
elem_id="output-image",
elem_classes=["output-image", "fixed-width"]
)
@spaces.GPU
def process_and_save_image(height, width, steps, scales, prompt, seed):
global pipe
# ν•œκΈ€ 감지 및 λ²ˆμ—­
def contains_korean(text):
return any(ord('κ°€') <= ord(c) <= ord('힣') for c in text)
# ν”„λ‘¬ν”„νŠΈ μ „μ²˜λ¦¬
if contains_korean(prompt):
# ν•œκΈ€μ„ μ˜μ–΄λ‘œ λ²ˆμ—­
translated = translator(prompt)[0]['translation_text']
prompt = translated
# ν”„λ‘¬ν”„νŠΈ ν˜•μ‹ κ°•μ œ
formatted_prompt = f"wbgmsst, 3D, {prompt} ,white background"
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
try:
generated_image = pipe(
prompt=[formatted_prompt],
generator=torch.Generator().manual_seed(int(seed)),
num_inference_steps=int(steps),
guidance_scale=float(scales),
height=int(height),
width=int(width),
max_sequence_length=256
).images[0]
saved_path = save_image(generated_image)
if saved_path is None:
print("Warning: Failed to save generated image")
return generated_image
except Exception as e:
print(f"Error in image generation: {str(e)}")
return None
def update_seed():
return get_random_seed()
generate_btn.click(
process_and_save_image,
inputs=[height, width, steps, scales, prompt, seed],
outputs=output
)
randomize_seed.click(
update_seed,
outputs=[seed]
)
generate_btn.click(
update_seed,
outputs=[seed]
)
if __name__ == "__main__":
demo.launch(allowed_paths=[PERSISTENT_DIR])