import random import os import uuid from datetime import datetime import gradio as gr import numpy as np import spaces import torch from diffusers import DiffusionPipeline from PIL import Image # Create permanent storage directory SAVE_DIR = "saved_images" # Gradio will handle the persistence if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR, exist_ok=True) device = "cuda" if torch.cuda.is_available() else "cpu" repo_id = "black-forest-labs/FLUX.1-dev" adapter_id = "seawolf2357/flux-lora-military-artillery-k9" pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16) pipeline.load_lora_weights(adapter_id) pipeline = pipeline.to(device) MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 def save_generated_image(image, prompt): # Generate unique filename with timestamp timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") unique_id = str(uuid.uuid4())[:8] filename = f"{timestamp}_{unique_id}.png" filepath = os.path.join(SAVE_DIR, filename) # Save the image image.save(filepath) # Save metadata metadata_file = os.path.join(SAVE_DIR, "metadata.txt") with open(metadata_file, "a", encoding="utf-8") as f: f.write(f"{filename}|{prompt}|{timestamp}\n") return filepath def load_generated_images(): if not os.path.exists(SAVE_DIR): return [] # Load all images from the directory image_files = [os.path.join(SAVE_DIR, f) for f in os.listdir(SAVE_DIR) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))] # Sort by creation time (newest first) image_files.sort(key=lambda x: os.path.getctime(x), reverse=True) return image_files def load_predefined_images(): # Return empty list since we're not using predefined images return [] @spaces.GPU(duration=120) def inference( prompt: str, seed: int, randomize_seed: bool, width: int, height: int, guidance_scale: float, num_inference_steps: int, lora_scale: float, progress: gr.Progress = gr.Progress(track_tqdm=True), ): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device=device).manual_seed(seed) image = pipeline( prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height, generator=generator, joint_attention_kwargs={"scale": lora_scale}, ).images[0] # Save the generated image filepath = save_generated_image(image, prompt) # Return the image, seed, and updated gallery return image, seed, load_generated_images() examples = [ "K9 Thunder self-propelled artillery positioned on a strategic hilltop at dawn, its 155mm barrel gleaming in the morning light. The massive tracks leave deep impressions in the terrain, while advanced fire control systems and thermal imaging displays glow inside the cabin. The morning mist swirls around its imposing silhouette. [trigger]", "K9 Thunder demonstrating rapid fire capability during a winter exercise, snow flying from its tracks as it maneuvers. The 52-caliber gun barrel elevates smoothly while automated loading systems efficiently handle the ammunition. Artillery crew in winter camouflage coordinate the precision fire mission. [trigger]", "K9 Thunder crossing a reinforced bridge during monsoon season, its advanced suspension system handling the challenging terrain. Water beads off its defensive armor plating while the turret rotates, testing weapon stabilization. The military green finish contrasts with the stormy sky. [trigger]", "K9 Thunder in desert deployment configuration, its advanced cooling systems handling the extreme heat. The extended range ammunition is being loaded while diagnostic systems monitor performance. Sand-colored camouflage blends with the arid environment as heat waves distort the horizon. [trigger]", "K9 Thunder conducting a night fire mission, muzzle flash illuminating the darkness. The automated fire control system displays light up the cabin interior while thermal imaging systems scan the target area. The powerful engine rumbles as it adjusts position between salvos. [trigger]", "K9 Thunder during a combined arms exercise, coordinating with reconnaissance drones and forward observers. Its digital battle management system processes target data while the hydraulic stabilizers deploy. Modern combat networking equipment connects it to the battlefield grid as supporting units move into position. [trigger]" ] css = """ footer { visibility: hidden; } """ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, analytics_enabled=False) as demo: gr.HTML('
FLUX LoRA Military K9
') gr.HTML('
😄Image to Video Explore: https://huggingface.co/spaces/ginigen/theater
') with gr.Tabs() as tabs: with gr.Tab("Generation"): with gr.Column(elem_id="col-container"): with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider( label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, ) height = gr.Slider( label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768, ) with gr.Row(): guidance_scale = gr.Slider( label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=3.5, ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=50, step=1, value=30, ) lora_scale = gr.Slider( label="LoRA scale", minimum=0.0, maximum=1.0, step=0.1, value=1.0, ) gr.Examples( examples=examples, inputs=[prompt], outputs=[result, seed], ) with gr.Tab("Gallery"): gallery_header = gr.Markdown("### Generated Images Gallery") generated_gallery = gr.Gallery( label="Generated Images", columns=6, show_label=False, value=load_generated_images(), elem_id="generated_gallery", height="auto" ) refresh_btn = gr.Button("🔄 Refresh Gallery") # Event handlers def refresh_gallery(): return load_generated_images() refresh_btn.click( fn=refresh_gallery, inputs=None, outputs=generated_gallery, ) gr.on( triggers=[run_button.click, prompt.submit], fn=inference, inputs=[ prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, lora_scale, ], outputs=[result, seed, generated_gallery], ) demo.queue() demo.launch()