import gradio as gr import numpy as np import random import torch import spaces import re from diffusers import ( DiffusionPipeline, AutoencoderTiny, ) from huggingface_hub import hf_hub_download def feifeimodload(): dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained( "aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype ).to(device) pipe.load_lora_weights( hf_hub_download("aifeifei798/feifei-flux-lora-v1.1", "feifei-v1.1.safetensors"), adapter_name="feifei", ) pipe.set_adapters( ["feifei"], adapter_weights=[0.85], ) pipe.fuse_lora( adapter_name=["feifei"], lora_scale=1.0, ) #pipe.enable_sequential_cpu_offload() pipe.vae.enable_slicing() pipe.vae.enable_tiling() pipe.unload_lora_weights() torch.cuda.empty_cache() return pipe pipe = feifeimodload() MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 2048 @spaces.GPU() def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) # prompt = f"feifei, real model girl in real life, {prompt}, slight smile, Master of Light and Shadow." prompt = re.sub("young woman", "sexy feifei" ,prompt) prompt = re.sub("woman", "sexy feifei" ,prompt) prompt = re.sub("girl", "sexy feifei" ,prompt) prompt = re.sub("model", "sexy feifei" ,prompt) image = pipe( prompt = prompt, width = width, height = height, num_inference_steps = num_inference_steps, generator = generator, guidance_scale=3.5 ).images[0] return image, seed examples = [ "jpop a girl in bikini", "kpop a girl in bikini", "a girl in bikini", "DarkIdol-flux + feifei-flux-lora", "real model slight smile girl in real life", "real model smile girl in real life", "real model girl in real life", "The image is a portrait of a young woman dressed as an angel. She is sitting on the floor with her legs spread apart and her arms resting on her knees. The woman has long blonde hair styled in a half-up, half-down look with a golden halo on her head. She has large white wings that are spread out behind her, covering her breasts. The wings are also white and appear to be made of a shiny material.The woman is wearing a white corset with a black harness and thigh-high boots. The corset has a high neckline and long sleeves, and the boots are white with a pointed toe. She also has a pair of black stockings on her feet. The background is a plain grey color. The overall mood of the image is sensual and angelic.", "A stunning high-resolution photo of a Japanese cosplay model in a sensual outfit, captured at the Tokyo Comic Convention, showcases exquisite lighting, vibrant colors, and a flawless composition, with impeccable attention to detail and a unique creative flair.", "Create a high-resolution, professionally lit and styled photo of a Japanese female model in a luxurious winter fashion shoot on a beach, showcasing natural lighting, consistent aesthetics, balanced composition, intricate details, harmonious colors, flawless execution, emotional expression, creativity, and a unique concept, optimized for technical excellence and featuring master-level lighting, color, and styling.", "Capture a serene Japanese model in a snow-covered street, clad in a sensual Balenciaga winter outfit, evoking a sense of intimacy and luxury, with a harmonious blend of warm and cool tones, subtle shadows, and meticulous details, conveying a narrative of elegance and poise.", "A high-resolution photograph of a Japanese female model posing for a Louis Vuitton brand advertisement, featuring natural lighting effects, a consistent style, balanced composition, rich details, harmonious colors, no visible flaws, emotional expression, creativity, and uniqueness, with optimized technical parameters, master-level lighting, master-level color, and master-level styling.", "A high-resolution photograph of a Japanese female model in a serene, natural setting, with soft, warm lighting, and a minimalist aesthetic, showcasing a elegant fragrance bottle and the model's effortless, emotive expression, with impeccable styling, and a muted color palette, evoking a sense of understated luxury and refinement.", "A high-resolution photograph of a Japanese female model posing beside a sleek, red Ferrari, bathed in warm, golden light, with subtle shadows accentuating her curves and the car's contours, set against a blurred, gradient blue background, with the model's elegant, flowing gown and the Ferrari's metallic sheen perfectly complementing each other in a masterful display of color, texture, and composition.", ] css=""" #col-container { margin: 0 auto; max-width: 520px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(f"""# DarkIdol-flux + feifei-flux-lora DarkIdol-flux + feifei-flux-lora is a text-to-image AI model designed to create aesthetic, detailed and diverse images from textual prompts in just 6-8 steps. It offers enhanced performance in image quality, typography, understanding complex prompts, and resource efficiency. """) with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=12, placeholder="Enter your prompt", value="jpop a girl in bikini", container=False, ) run_button = gr.Button("Run") result = gr.Image(label="Result", show_label=False,height=520) with gr.Accordion("Advanced Settings", open=False): seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider( label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=896, ) height = gr.Slider( label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=1152, ) with gr.Row(): num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=50, step=1, value=8, ) gr.Examples( examples = examples, fn = infer, inputs = [prompt], outputs = [result, seed], cache_examples=False ) gr.on( triggers=[run_button.click, prompt.submit], fn = infer, inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps], outputs = [result, seed] ) demo.launch()