Spaces:
Sleeping
Sleeping
File size: 1,434 Bytes
7091ee2 507ba49 7091ee2 507ba49 d327496 464c338 d327496 7091ee2 507ba49 7091ee2 507ba49 7091ee2 507ba49 d327496 507ba49 464c338 7091ee2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
from diffusers import DiffusionPipeline
# Initialize the pipeline variable globally
pipeline = None
# Load the pipeline and LoRA weights
def load_cust(modelsyu):
pipeline = DiffusionPipeline.from_pretrained(modelsyu)
output_result = pipeline()
return output_result
def generate_image(prompt, negative_prompt):
global pipeline
# Generate the image with the provided prompts
if pipeline is None:
return "Pipeline not loaded. Please load the models first."
image = pipeline(prompt, negative_prompt=negative_prompt).images[0]
return image
# Define the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Text to Image Generation Custom models Demo")
prompt = gr.Textbox(label="Prompt", placeholder="Enter your text prompt here")
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter your negative prompt here")
submit_button = gr.Button("Generate Image")
with gr.Accordion('Load your custom models first'):
basem = gr.Textbox(label="Your Lora model")
exports = gr.Button("Load your models")
outputid = gr.Textbox(label="output", interactive=False)
exports.click(load_cust, inputs=[basem], outputs=[outputid])
output_image = gr.Image(label="Generated Image")
submit_button.click(generate_image, inputs=[prompt, negative_prompt], outputs=output_image)
# Launch the demo
demo.launch()
|