import os import gradio as gr from pathlib import Path from diffusers import StableDiffusionPipeline from PIL import Image from huggingface_hub import notebook_login #if not (Path.home()/'.huggingface'/'token').exists(): #token = os.environ.get("HUGGING_FACE_HUB_TOKEN") token = "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" import torch, logging logging.disable(logging.WARNING) torch.cuda.empty_cache() torch.manual_seed(3407) from torch import autocast from contextlib import nullcontext from diffusers import StableDiffusionPipeline model_id = "CompVis/stable-diffusion-v1-4" device = "cuda" if torch.cuda.is_available() else "cpu" context = autocast if device == "cuda" else nullcontext pipe = StableDiffusionPipeline.from_pretrained(model_id,use_auth_token=token).to(device) def infer(prompt,samples): with context(device): images = pipe(samples*[prompt], guidance_scale=7.5).images return images demo = gr.Blocks() with demo: text = gr.Textbox(lines=7,placeholder="Enter your prompt to generate a background image... something like - Photorealistic scenery of bookshelf in a room") samples = gr.Slider(label="Number of Images", minimum=1, maximum=5, value=2, step=1) btn = gr.Button("Generate images",variant="primary").style( margin=False, rounded=(False, True, True, False), ) gallery = gr.Gallery(label="Generated images", show_label=True).style(grid=(1, 3), height="auto") text.submit(infer, inputs=[text, samples], outputs=gallery) btn.click(infer, inputs=[text, samples], outputs=gallery, show_progress=True, status_tracker=None) demo.launch()