|
import gradio as gr |
|
import json |
|
import shutil |
|
import sqlite3 |
|
import subprocess |
|
import sys |
|
sys.path.append('src/blip') |
|
sys.path.append('src/clip') |
|
import clip |
|
import hashlib |
|
import math |
|
import numpy as np |
|
import pickle |
|
import torchvision.transforms as T |
|
import torchvision.transforms.functional as TF |
|
import requests |
|
import wget |
|
import gradio as grad, random, re |
|
import torch |
|
import os |
|
import utils |
|
import html |
|
import re |
|
import base64 |
|
import subprocess |
|
import argparse |
|
import logging |
|
import streamlit as st |
|
import pandas as pd |
|
import datasets |
|
import yaml |
|
import textwrap |
|
import tornado |
|
import time |
|
import cv2 as cv |
|
from torch import autocast |
|
from diffusers import StableDiffusionPipeline |
|
from transformers import pipeline, set_seed |
|
from huggingface_hub import HfApi |
|
from huggingface_hub import hf_hub_download |
|
from transformers import CLIPTextModel, CLIPTokenizer |
|
from diffusers import AutoencoderKL, UNet2DConditionModel |
|
from diffusers import StableDiffusionImg2ImgPipeline |
|
from PIL import Image |
|
from datasets import load_dataset |
|
from share_btn import community_icon_html, loading_icon_html, share_js |
|
from io import BytesIO |
|
from models.blip import blip_decoder |
|
from torch import nn |
|
from torch.nn import functional as F |
|
from tqdm import tqdm |
|
from pathlib import Path |
|
from flask import Flask, request, jsonify, g |
|
from flask_expects_json import expects_json |
|
from flask_cors import CORS |
|
from huggingface_hub import Repository |
|
from flask_apscheduler import APScheduler |
|
from jsonschema import ValidationError |
|
from os import mkdir |
|
from os.path import isdir |
|
from colorthief import ColorThief |
|
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls |
|
from utils import dataset_utils |
|
from utils import streamlit_utils as st_utils |
|
from dataclasses import asdict |
|
from .transfer import transfer_color |
|
from .utils import convert_bytes_to_pil |
|
from diffusers import DiffusionPipeline |
|
from huggingface_hub.inference_api import InferenceApi |
|
from huggingface_hub import login |
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
|
|
|
|
|
is_colab = utils.is_google_colab() |
|
|
|
from share_btn import community_icon_html, loading_icon_html, share_js |
|
|
|
from huggingface_hub import login |
|
login() |
|
|
|
from huggingface_hub.inference_api import InferenceApi |
|
inference = InferenceApi(repo_id="bert-base-uncased", token=API_TOKEN) |
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("Fazzie/Teyvat") |
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("Guizmus/AnimeChanStyle") |
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("poloclub/diffusiondb") |
|
|
|
from datasets import load_dataset |
|
dataset = load_dataset("Fazzie/Teyvat") |
|
from datasets import load_dataset |
|
dataset = load_dataset("Guizmus/AnimeChanStyle") |
|
from datasets import load_dataset |
|
dataset = load_dataset("poloclub/diffusiondb") |
|
|
|
from datasets import load_dataset |
|
dataset = load_dataset("Fazzie/Teyvat") |
|
|
|
from datasets import load_dataset |
|
dataset = load_dataset("Guizmus/AnimeChanStyle") |
|
|
|
from datasets import load_dataset |
|
dataset = load_dataset("poloclub/diffusiondb") |
|
|
|
|
|
dataset = load_dataset("Fazzie/Teyvat") |
|
|
|
|
|
dataset = load_dataset("Guizmus/AnimeChanStyle") |
|
|
|
|
|
dataset = load_dataset("poloclub/diffusiondb") |
|
|
|
dataset = load_dataset("Fazzie/Teyvat") |
|
dataset = load_dataset("Guizmus/AnimeChanStyle") |
|
dataset = load_dataset("poloclub/diffusiondb") |
|
|
|
dataset = load_dataset("Fazzie/Teyvat") |
|
|
|
dataset = load_dataset("Guizmus/AnimeChanStyle") |
|
|
|
dataset = load_dataset("poloclub/diffusiondb") |
|
|
|
sys.path.append('src/blip') |
|
sys.path.append('src/clip') |
|
|
|
pipeline = DiffusionPipeline.from_pretrained("flax/waifu-diffusion") |
|
pipeline = DiffusionPipeline.from_pretrained("flax/Cyberpunk-Anime-Diffusion") |
|
pipeline = DiffusionPipeline.from_pretrained("technillogue/waifu-diffusion") |
|
pipeline = DiffusionPipeline.from_pretrained("svjack/Stable-Diffusion-Pokemon-en") |
|
pipeline = DiffusionPipeline.from_pretrained("AdamOswald1/Idk") |
|
pipeline = DiffusionPipeline.from_pretrained("katakana/2D-Mix") |
|
|
|
class Model: |
|
def __init__(self, name, path, prefix): |
|
self.name = name |
|
self.path = path |
|
self.prefix = prefix |
|
self.pipe_t2i = None |
|
self.pipe_i2i = None |
|
|
|
models = [ |
|
Model("Custom model", "", ""), |
|
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style"), |
|
Model("Archer", "nitrosocke/archer-diffusion", "archer style"), |
|
Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style"), |
|
Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style"), |
|
Model("Modern Disney", "nitrosocke/modern-disney-diffusion", "modern disney style"), |
|
Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style"), |
|
Model("Waifu", "hakurei/waifu-diffusion", ""), |
|
Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", "pokemon style"), |
|
Model("Pokémon", "svjack/Stable-Diffusion-Pokemon-en", "pokemon style"), |
|
Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", "pony style"), |
|
Model("Robo Diffusion", "nousr/robo-diffusion", "robo style"), |
|
Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion, flax/Cyberpunk-Anime-Diffusion", "cyberpunk style"), |
|
Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "cyberpunk style"), |
|
Model("Cyberpunk Anime", "flax/Cyberpunk-Anime-Diffusion", "cyberpunk style"), |
|
Model("Cyberware", "Eppinette/Cyberware", "cyberware"), |
|
Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy"), |
|
Model("Waifu", "flax/waifu-diffusion", ""), |
|
Model("Dark Souls", "Guizmus/DarkSoulsDiffusion", "dark souls style"), |
|
Model("Waifu", "technillogue/waifu-diffusion", ""), |
|
Model("Ouroborus", "Eppinette/Ouroboros", "m_ouroboros style"), |
|
Model("Ouroborus alt", "Eppinette/Ouroboros", "m_ouroboros"), |
|
Model("Waifu", "Eppinette/Mona", "Mona"), |
|
Model("Waifu", "Eppinette/Mona", "Mona Woman"), |
|
Model("Waifu", "Eppinette/Mona", "Mona Genshin"), |
|
Model("Genshin", "Eppinette/Mona", "Mona"), |
|
Model("Genshin", "Eppinette/Mona", "Mona Woman"), |
|
Model("Genshin", "Eppinette/Mona", "Mona Genshin"), |
|
Model("Space Machine", "rabidgremlin/sd-db-epic-space-machine", "EpicSpaceMachine"), |
|
Model("Spacecraft", "rabidgremlin/sd-db-epic-space-machine", "EpicSpaceMachine"), |
|
Model("TARDIS", "Guizmus/Tardisfusion", "Classic Tardis style"), |
|
Model("TARDIS", "Guizmus/Tardisfusion", "Modern Tardis style"), |
|
Model("TARDIS", "Guizmus/Tardisfusion", "Tardis Box style"), |
|
Model("Spacecraft", "Guizmus/Tardisfusion", "Classic Tardis style"), |
|
Model("Spacecraft", "Guizmus/Tardisfusion", "Modern Tardis style"), |
|
Model("Spacecraft", "Guizmus/Tardisfusion", "Tardis Box style"), |
|
Model("CLIP", "EleutherAI/clip-guided-diffusion", "CLIP"), |
|
Model("Face Swap", "felixrosberg/face-swap", "faceswap"), |
|
Model("Face Swap", "felixrosberg/face-swap", "faceswap with"), |
|
Model("Face Swap", "felixrosberg/face-swap", "faceswapped"), |
|
Model("Face Swap", "felixrosberg/face-swap", "faceswapped with"), |
|
Model("Face Swap", "felixrosberg/face-swap", "face on"), |
|
Model("Waifu", "Fampai/lumine_genshin_impact", "lumine_genshin"), |
|
Model("Waifu", "Fampai/lumine_genshin_impact", "lumine"), |
|
Model("Waifu", "Fampai/lumine_genshin_impact", "Lumine Genshin"), |
|
Model("Waifu", "Fampai/lumine_genshin_impact", "Lumine_genshin"), |
|
Model("Waifu", "Fampai/lumine_genshin_impact", "Lumine_Genshin"), |
|
Model("Waifu", "Fampai/lumine_genshin_impact", "Lumine"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Lumine_genshin"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Lumine_Genshin"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Lumine"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Lumine Genshin"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "lumine"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "Ganyu"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "Ganyu Woman"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "Ganyu Genshin"), |
|
Model("Waifu", "sd-concepts-library/ganyu-genshin-impact", "Ganyu"), |
|
Model("Waifu", "sd-concepts-library/ganyu-genshin-impact", "Ganyu Woman"), |
|
Model("Waifu", "sd-concepts-library/ganyu-genshin-impact", "Ganyu Genshin"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "raiden_ei"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Raiden Ei"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Ei Genshin"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Raiden Genshin"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Raiden_Genshin"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Ei_Genshin"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Raiden"), |
|
Model("Waifu", "Fampai/raiden_genshin_impact", "Ei"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Raiden Ei"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "raiden_ei"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Raiden"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Raiden Genshin"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Ei Genshin"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Raiden_Genshin"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Ei_Genshin"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Ei"), |
|
Model("Waifu", "Fampai/hutao_genshin_impact", "hutao_genshin"), |
|
Model("Waifu", "Fampai/hutao_genshin_impact", "HuTao_Genshin"), |
|
Model("Waifu", "Fampai/hutao_genshin_impact", "HuTao Genshin"), |
|
Model("Waifu", "Fampai/hutao_genshin_impact", "HuTao"), |
|
Model("Waifu", "Fampai/hutao_genshin_impact", "hutao_genshin"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "hutao_genshin"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "HuTao_Genshin"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "HuTao Genshin"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "HuTao"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "Female"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "female"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "Woman"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "woman"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "Girl"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "girl"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Female"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "female"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Woman"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "woman"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "Girl"), |
|
Model("Genshin", "Fampai/lumine_genshin_impact", "girl"), |
|
Model("Genshin", "Eppinette/Mona", "Female"), |
|
Model("Genshin", "Eppinette/Mona", "female"), |
|
Model("Genshin", "Eppinette/Mona", "Woman"), |
|
Model("Genshin", "Eppinette/Mona", "woman"), |
|
Model("Genshin", "Eppinette/Mona", "Girl"), |
|
Model("Genshin", "Eppinette/Mona", "girl"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "Female"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "female"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "Woman"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "woman"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "Girl"), |
|
Model("Genshin", "sd-concepts-library/ganyu-genshin-impact", "girl"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Female"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "female"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Woman"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "woman"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "Girl"), |
|
Model("Genshin", "Fampai/raiden_genshin_impact", "girl"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "Female"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "female"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "Woman"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "woman"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "Girl"), |
|
Model("Genshin", "Fampai/hutao_genshin_impact", "girl"), |
|
Model("Waifu", "crumb/genshin-stable-inversion, yuiqena/GenshinImpact, Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "Genshin"), |
|
Model("Waifu", "crumb/genshin-stable-inversion, yuiqena/GenshinImpact, Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", "Genshin Impact"), |
|
Model("Genshin", "crumb/genshin-stable-inversion, yuiqena/GenshinImpact, Fampai/lumine_genshin_impact, Eppinette/Mona, sd-concepts-library/ganyu-genshin-impact, Fampai/raiden_genshin_impact, Fampai/hutao_genshin_impact", ""), |
|
Model("Waifu", "crumb/genshin-stable-inversion", "Genshin"), |
|
Model("Waifu", "crumb/genshin-stable-inversion", "Genshin Impact"), |
|
Model("Genshin", "crumb/genshin-stable-inversion", ""), |
|
Model("Waifu", "yuiqena/GenshinImpact", "Genshin"), |
|
Model("Waifu", "yuiqena/GenshinImpact", "Genshin Impact"), |
|
Model("Genshin", "yuiqena/GenshinImpact", ""), |
|
Model("Waifu", "hakurei/waifu-diffusion, flax/waifu-diffusion, technillogue/waifu-diffusion, Guizmus/AnimeChanStyle, katakana/2D-Mix", ""), |
|
Model("Pokémon", "lambdalabs/sd-pokemon-diffusers, svjack/Stable-Diffusion-Pokemon-en", "pokemon style"), |
|
Model("Pokémon", "lambdalabs/sd-pokemon-diffusers, svjack/Stable-Diffusion-Pokemon-en", ""), |
|
Model("Test", "AdamoOswald1/Idk", ""), |
|
Model("Anime", "Guizmus/AnimeChanStyle", "AnimeChan Style"), |
|
Model("Genshin", "Guizmus/AnimeChanStyle", "AnimeChan Style"), |
|
Model("Waifu", "Guizmus/AnimeChanStyle", "AnimeChan Style"), |
|
Model("Waifu", "Guizmus/AnimeChanStyle", "Genshin"), |
|
Model("Waifu", "Guizmus/AnimeChanStyle", "Genshin Impact"), |
|
Model("Genshin", "Guizmus/AnimeChanStyle", ""), |
|
Model("Anime", "Guizmus/AnimeChanStyle", ""), |
|
Model("Waifu", "Guizmus/AnimeChanStyle", ""), |
|
Model("Anime", "Guizmus/AnimeChanStyle, katakana/2D-Mix", ""), |
|
Model("Anime", "katakana/2D-Mix", "2D-Mix"), |
|
Model("Genshin", "katakana/2D-Mix", "2D-Mix"), |
|
Model("Waifu", "katakana/2D-Mix", "2D-Mix"), |
|
Model("Waifu", "katakana/2D-Mix", "Genshin"), |
|
Model("Waifu", "katakana/2D-Mix", "Genshin Impact"), |
|
Model("Genshin", "katakana/2D-Mix", ""), |
|
Model("Anime", "katakana/2D-Mix", ""), |
|
Model("Waifu", "katakana/2D-Mix", ""), |
|
Model("Beeple", "riccardogiorato/beeple-diffusion", "beeple style "), |
|
Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "), |
|
Model("Poolsuite", "prompthero/poolsuite", "poolsuite style ") |
|
] |
|
|
|
|
|
|
|
scheduler = DPMSolverMultistepScheduler( |
|
beta_start=0.00085, |
|
beta_end=0.012, |
|
beta_schedule="scaled_linear", |
|
num_train_timesteps=1000, |
|
trained_betas=None, |
|
predict_epsilon=True, |
|
thresholding=False, |
|
algorithm_type="dpmsolver++", |
|
solver_type="midpoint", |
|
lower_order_final=True, |
|
) |
|
|
|
custom_model = None |
|
if is_colab: |
|
models.insert(0, Model("Custom model", "", "")) |
|
custom_model = models[0] |
|
|
|
last_mode = "txt2img" |
|
current_model = models[1] if is_colab else models[0] |
|
current_model_path = current_model.path |
|
|
|
if is_colab: |
|
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler) |
|
pipe = StableDiffusionPipeline.from_pretrained("hakurei/waifu-diffusion", torch_type=torch.float16, revision="fp16") |
|
pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torchfloat, revision="fp16") |
|
gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2') |
|
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16).to("cuda") |
|
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16) |
|
pipeline = DiffusionPipeline.from_pretrained("flax/waifu-diffusion") |
|
pipeline = DiffusionPipeline.from_pretrained("flax/Cyberpunk-Anime-Diffusion") |
|
pipeline = DiffusionPipeline.from_pretrained("technillogue/waifu-diffusion") |
|
pipeline = DiffusionPipeline.from_pretrained("svjack/Stable-Diffusion-Pokemon-en") |
|
pipeline = DiffusionPipeline.from_pretrained("AdamOswald1/Idk") |
|
pipeline = DiffusionPipeline.from_pretrained("katakana/2D-Mix") |
|
|
|
else: |
|
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16) |
|
for model in models: |
|
try: |
|
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16) |
|
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler) |
|
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler) |
|
except: |
|
models.remove(model) |
|
pipe = models[0].pipe_t2i |
|
|
|
if torch.cuda.is_available(): |
|
pipe = pipe.to("cuda") |
|
|
|
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" |
|
|
|
def custom_model_changed(path): |
|
models[0].path = path |
|
global current_model |
|
current_model = models[0] |
|
|
|
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""): |
|
|
|
global current_model |
|
for model in models: |
|
if model.name == model_name: |
|
current_model = model |
|
model_path = current_model.path |
|
|
|
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None |
|
|
|
if img is not None: |
|
return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator) |
|
else: |
|
return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator) |
|
|
|
def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None): |
|
|
|
global last_mode |
|
global pipe |
|
global current_model_path |
|
if model_path != current_model_path or last_mode != "txt2img": |
|
current_model_path = model_path |
|
|
|
if is_colab or current_model == custom_model: |
|
pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler) |
|
else: |
|
pipe.to("cpu") |
|
pipe = current_model.pipe_t2i |
|
|
|
if torch.cuda.is_available(): |
|
pipe = pipe.to("cuda") |
|
last_mode = "txt2img" |
|
|
|
prompt = current_model.prefix + prompt |
|
result = pipe( |
|
prompt, |
|
negative_prompt = neg_prompt, |
|
|
|
num_inference_steps = int(steps), |
|
guidance_scale = guidance, |
|
width = width, |
|
height = height, |
|
generator = generator) |
|
|
|
|
|
def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None): |
|
|
|
global last_mode |
|
global pipe |
|
global current_model_path |
|
if model_path != current_model_path or last_mode != "img2img": |
|
current_model_path = model_path |
|
|
|
if is_colab or current_model == custom_model: |
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler) |
|
else: |
|
pipe.to("cpu") |
|
pipe = current_model.pipe_i2i |
|
|
|
if torch.cuda.is_available(): |
|
pipe = pipe.to("cuda") |
|
last_mode = "img2img" |
|
|
|
prompt = current_model.prefix + prompt |
|
ratio = min(height / img.height, width / img.width) |
|
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) |
|
result = pipe( |
|
prompt, |
|
negative_prompt = neg_prompt, |
|
|
|
init_image = img, |
|
num_inference_steps = int(steps), |
|
strength = strength, |
|
guidance_scale = guidance, |
|
width = width, |
|
height = height, |
|
generator = generator) |
|
|
|
css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} |
|
""" |
|
with gr.Blocks(css=css) as demo: |
|
gr.HTML( |
|
f""" |
|
<div class="finetuned-diffusion-div"> |
|
<div> |
|
<h1>Playground Diffusion</h1> |
|
</div> |
|
<p> |
|
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br> |
|
<a href="https://huggingface.co/riccardogiorato/avatar-diffusion">Avatar</a>,<br/> |
|
<a href="https://huggingface.co/riccardogiorato/beeple-diffusion">Beeple</a>,<br/> |
|
<a href="https://huggingface.co/s3nh/beksinski-style-stable-diffusion">Beksinski</a>,<br/> |
|
Diffusers 🧨 SD model hosted on HuggingFace 🤗. |
|
Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")} |
|
</p> |
|
</div> |
|
""" |
|
) |
|
with gr.Row(): |
|
|
|
with gr.Column(scale=55): |
|
with gr.Group(): |
|
model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name) |
|
with gr.Box(visible=False) as custom_model_group: |
|
custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True) |
|
gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>") |
|
|
|
with gr.Row(): |
|
prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False) |
|
generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) |
|
|
|
|
|
image_out = gr.Image(height=512) |
|
|
|
|
|
|
|
|
|
with gr.Column(scale=45): |
|
with gr.Tab("Options"): |
|
with gr.Group(): |
|
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") |
|
|
|
|
|
|
|
with gr.Row(): |
|
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) |
|
steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) |
|
|
|
with gr.Row(): |
|
width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) |
|
height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) |
|
|
|
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) |
|
|
|
with gr.Tab("Image to image"): |
|
with gr.Group(): |
|
image = gr.Image(label="Image", height=256, tool="editor", type="pil") |
|
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) |
|
|
|
if is_colab: |
|
model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group) |
|
custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None) |
|
|
|
|
|
inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt] |
|
prompt.submit(inference, inputs=inputs, outputs=image_out) |
|
generate.click(inference, inputs=inputs, outputs=image_out) |
|
|
|
if not is_colab: |
|
demo.queue(concurrency_count=1) |
|
demo.launch(debug=is_colab, share=is_colab) |