import torch import gradio as gr import numpy as np from torch import autocast from PIL import Image from diffusers import StableDiffusionImg2ImgPipeline # load the pipeline device = "cuda" model_id_or_path = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id_or_path, revision="fp16", torch_dtype=torch.float16, use_auth_token='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' ) # or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4 # and pass `model_id_or_path="./stable-diffusion-v1-4"` without having to use `use_auth_token=True`. pipe = pipe.to(device) def diffuse(x, param): print('in callback') x = Image.fromarray(np.uint8(x)) init_image = x.resize((768, 512)) prompt = 'st petersburg logo' if param == 'Эрмитаж': prompt = "st petersburg logo winter palace image on background hermitage vector style" elif param == 'Казанский собор': prompt = "st petersburg logo kazansky sobor image on background" elif param == 'Мосты': prompt = 'st petersburg logo bridges over neva image on background beutiful high quality' with autocast("cuda"): images = pipe(prompt=prompt, init_image=init_image, strength=0.7, guidance_scale=7.5).images return [images[0], param] def flip_image(x, param): return [np.fliplr(x), 'функция приняла на вход ' + param] with gr.Blocks() as demo: gr.Markdown("Слово 'Санкт-Петербург'") with gr.Tab("Санкт-Петербург"): with gr.Row(): image_input = gr.Image() param_input = gr.Radio(["Эрмитаж", "Мосты", "Казанский собор"], label='Что для тебя Санкт-Петербург?') image_output = gr.Image() param_out = gr.Markdown() image_button = gr.Button("GET IMAGE") image_button.click(diffuse, [image_input, param_input], [image_output, param_out]) demo.launch() #def greet(name): # return "Hello " + name + "!!" #iface = gr.Interface(fn=greet, inputs="text", outputs="text") #iface.launch()