import gradio as gr gr.Interface.load("models/nitrosocke/classic-anim-diffusion").launch() def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed): print(f"{datetime.datetime.now()} img_to_img, model: {model_path}") global last_mode global pipe global current_model_path if model_path != current_model_path or last_mode != "img2img": current_model_path = model_path update_state(f"Loading {current_model.name} image-to-image model...") if is_colab or current_model == custom_model: pipe = StableDiffusionImg2ImgPipeline.from_pretrained( current_model_path, torch_dtype=torch.float16, scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"), safety_checker=lambda images, clip_input: (images, False) ) else: pipe = StableDiffusionImg2ImgPipeline.from_pretrained( current_model_path, torch_dtype=torch.float16, scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler") ) # pipe = pipe.to("cpu") # pipe = current_model.pipe_i2i if torch.cuda.is_available(): pipe = pipe.to("cuda") pipe.enable_xformers_memory_efficient_attention() last_mode = "img2img" prompt = current_model.prefix + prompt ratio = min(height / img.height, width / img.width) img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) result = pipe( prompt, negative_prompt = neg_prompt, num_images_per_prompt=n_images, image = img, num_inference_steps = int(steps), strength = strength, guidance_scale = guidance, # width = width, # height = height, generator = generator, callback=pipe_callback) # update_state(f"Done. Seed: {seed}")