import spaces import gradio as gr import torch import random from diffusers import DiffusionPipeline # Initialize models device = "cuda" if torch.cuda.is_available() else "cpu" dtype = torch.bfloat16 huggingface_token = os.getenv("HUGGINGFACE_TOKEN") # Initialize the base model and move it to GPU base_model = "black-forest-labs/FLUX.1-dev" pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda") # Load LoRA weights pipe.load_lora_weights("gokaygokay/Flux-Detailer-LoRA") MAX_SEED = 2**32-1 @spaces.GPU(duration=75) def generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale): generator = torch.Generator(device="cuda").manual_seed(seed) image = pipe( prompt=prompt, num_inference_steps=steps, guidance_scale=cfg_scale, width=width, height=height, generator=generator, joint_attention_kwargs={"scale": lora_scale}, ).images[0] return image def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale): if randomize_seed: seed = random.randint(0, MAX_SEED) image = generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale) return image, seed custom_css = """ .input-group, .output-group { border: 1px solid #e0e0e0; border-radius: 10px; padding: 20px; margin-bottom: 20px; background-color: #f9f9f9; } .submit-btn { background-color: #2980b9 !important; color: white !important; } .submit-btn:hover { background-color: #3498db !important; } """ title = """