|
import torch |
|
from PIL import Image |
|
from diffusers import StableDiffusionImg2ImgPipeline |
|
import gradio as gr |
|
from random import randint |
|
from all_models import models |
|
|
|
|
|
def load_pipelines(models): |
|
pipelines = {} |
|
for model in models: |
|
try: |
|
pipelines[model] = StableDiffusionImg2ImgPipeline.from_pretrained( |
|
f'models/{model}', torch_dtype=torch.float32 |
|
).to('cuda' if torch.cuda.is_available() else 'cpu') |
|
except Exception as e: |
|
print(f"Failed to load model {model}: {e}") |
|
return pipelines |
|
|
|
pipelines = load_pipelines(models) |
|
|
|
def generate_image(content_img, model_choice, prompt, guidance_scale, strength): |
|
if content_img is None or model_choice not in pipelines: |
|
return None |
|
|
|
content_image = Image.open(content_img).convert("RGB").resize((512, 512)) |
|
seed_value = torch.randint(0, 4294967295, size=(1,)).item() |
|
|
|
pipe = pipelines[model_choice] |
|
result = pipe( |
|
image=content_image, |
|
guidance_scale=guidance_scale, |
|
strength=strength, |
|
num_inference_steps=100, |
|
prompt=prompt, |
|
seed=seed_value |
|
).images[0] |
|
|
|
return result |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Image-to-Image Generator with Model Selection") |
|
|
|
with gr.Row(): |
|
content_img = gr.Image(type="filepath", label="Upload Content Image") |
|
|
|
with gr.Row(): |
|
model_choice = gr.Dropdown(models, label="Choose a Model") |
|
prompt = gr.Textbox(label="Enter Prompt") |
|
|
|
with gr.Row(): |
|
guidance_scale = gr.Slider(5.0, 20.0, value=7.5, label="Guidance Scale") |
|
strength = gr.Slider(0.1, 1.0, value=0.5, label="Strength") |
|
|
|
generate_btn = gr.Button("Generate Image") |
|
output_image = gr.Image(label="Generated Image") |
|
|
|
generate_btn.click(generate_image, inputs=[content_img, model_choice, prompt, guidance_scale, strength], outputs=output_image) |
|
|
|
demo.launch() |
|
|