Dao3's picture
Update app.py
8e15d03
raw
history blame
10.5 kB
import gradio as gr
import os
import sys
from pathlib import Path
import random
import string
import time
from queue import Queue
from threading import Thread
import emoji
text_gen=gr.Interface.load("spaces/Dao3/MagicPrompt-Stable-Diffusion")
def get_prompts(prompt_text):
if prompt_text:
return text_gen("photo, " + prompt_text)
else:
return text_gen("")
proc1=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-2.0")
def restart_script_periodically():
while True:
random_time = random.randint(540, 600)
time.sleep(random_time)
os.execl(sys.executable, sys.executable, *sys.argv)
restart_thread = Thread(target=restart_script_periodically, daemon=True)
restart_thread.start()
queue = Queue()
queue_threshold = 100
def add_random_noise(prompt, noise_level=0.00):
if noise_level == 0:
noise_level = 0.00
percentage_noise = noise_level * 5
num_noise_chars = int(len(prompt) * (percentage_noise/100))
noise_indices = random.sample(range(len(prompt)), num_noise_chars)
prompt_list = list(prompt)
noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈'])
for index in noise_indices:
prompt_list[index] = random.choice(noise_chars)
return "".join(prompt_list)
def send_it1(inputs, noise_level, proc1=proc1):
prompt_with_noise = add_random_noise(inputs, noise_level)
while queue.qsize() >= queue_threshold:
time.sleep(2)
queue.put(prompt_with_noise)
output1 = proc1(prompt_with_noise)
return output1
def send_it2(inputs, noise_level, proc1=proc1):
prompt_with_noise = add_random_noise(inputs, noise_level)
while queue.qsize() >= queue_threshold:
time.sleep(2)
queue.put(prompt_with_noise)
output2 = proc1(prompt_with_noise)
return output2
#def send_it3(inputs, noise_level, proc1=proc1):
#prompt_with_noise = add_random_noise(inputs, noise_level)
#while queue.qsize() >= queue_threshold:
#time.sleep(2)
#queue.put(prompt_with_noise)
#output3 = proc1(prompt_with_noise)
#return output3
#def send_it4(inputs, noise_level, proc1=proc1):
#prompt_with_noise = add_random_noise(inputs, noise_level)
#while queue.qsize() >= queue_threshold:
#time.sleep(2)
#queue.put(prompt_with_noise)
#output4 = proc1(prompt_with_noise)
#return output4
with gr.Blocks(css='style.css') as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div>
<h2 style="font-weight: 900; font-size: 3rem; margin-bottom:20px;">
幻梦成真-2.0
</h2>
</div>
<p style="margin-bottom: 10px; font-size: 96%">
差异程度: 用数值调节两张图的差异程度。数值越大,两张图的差异越大,反之越小。
</p>
<p style="margin-bottom: 10px; font-size: 98%">
❤️ 喜欢的话,就点上面的❤️吧~❤️</a>
</p>
</div>
"""
)
with gr.Column(elem_id="col-container"):
with gr.Row(variant="compact"):
input_text = gr.Textbox(
label="Short Prompt",
show_label=False,
max_lines=2,
placeholder="输入你的想象(英文词汇),然后按右边按钮。没灵感?直接按!",
).style(
container=False,
)
see_prompts = gr.Button("✨ 咒语显现 ✨").style(full_width=False)
with gr.Row(variant="compact"):
prompt = gr.Textbox(
label="输入描述词",
show_label=False,
max_lines=2,
placeholder="可输入完整描述词,或者用咒语显现按钮生成",
).style(
container=False,
)
run = gr.Button("✨ 幻梦成真✨").style(full_width=False)
with gr.Row():
with gr.Row():
noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="差异程度")
with gr.Row():
with gr.Row():
output1=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False)
output2=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False)
#with gr.Row():
#output1=gr.Image()
see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
global last_mode
global pipe
global current_model_path
if model_path != current_model_path or last_mode != "txt2img":
current_model_path = model_path
update_state(f"Loading {current_model.name} text-to-image model...")
if is_colab or current_model == custom_model:
pipe = StableDiffusionPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
safety_checker=lambda images, clip_input: (images, False)
)
else:
pipe = StableDiffusionPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
)
# pipe = pipe.to("cpu")
# pipe = current_model.pipe_t2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
pipe.enable_xformers_memory_efficient_attention()
last_mode = "txt2img"
prompt = current_model.prefix + prompt
result = pipe(
prompt,
negative_prompt = neg_prompt,
num_images_per_prompt=n_images,
num_inference_steps = int(steps),
guidance_scale = guidance,
width = width,
height = height,
generator = generator,
callback=pipe_callback)
# update_state(f"Done. Seed: {seed}")
return replace_nsfw_images(result)
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
global last_mode
global pipe
global current_model_path
if model_path != current_model_path or last_mode != "img2img":
current_model_path = model_path
update_state(f"Loading {current_model.name} image-to-image model...")
if is_colab or current_model == custom_model:
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
safety_checker=lambda images, clip_input: (images, False)
)
else:
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
)
# pipe = pipe.to("cpu")
# pipe = current_model.pipe_i2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
pipe.enable_xformers_memory_efficient_attention()
last_mode = "img2img"
prompt = current_model.prefix + prompt
ratio = min(height / img.height, width / img.width)
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
result = pipe(
prompt,
negative_prompt = neg_prompt,
num_images_per_prompt=n_images,
image = img,
num_inference_steps = int(steps),
strength = strength,
guidance_scale = guidance,
# width = width,
# height = height,
generator = generator,
callback=pipe_callback)
# update_state(f"Done. Seed: {seed}")
return replace_nsfw_images(result)
def replace_nsfw_images(results):
if is_colab:
return results.images
for i in range(len(results.images)):
if results.nsfw_content_detected[i]:
results.images[i] = Image.open("nsfw.png")
return results.images
with gr.Row():
gr.HTML(
"""
<div class="footer">
<p>
使用了<a href="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0">Dreamlike Photoreal 2.0</a> 制作的sd模型, <a href="https://twitter.com/DavidJohnstonxx/">本案例最初作者Phenomenon1981</a>
</p>
</div>
<div class="acknowledgments" style="font-size: 115%">
<p>
这个模型和<a href="https://huggingface.co/spaces/Dao3/DreamlikeArt-Diffusion-1.0">幻梦成真</a>的区别是:幻梦显形更虚幻,这个模型更真实,毕竟都"成真"了嘛。 </p>
</p>
</div>
<div class="acknowledgments" style="font-size: 115%">
<p>
安利:还有一个汉化项目:<a href="https://tiwenti.chat/">TiwenTi.chat</a>,这是一个ChatGPT的中文案例库,按照工具用途和角色扮演用途做了分类,欢迎去看去分享~ </p>
</p>
</div>
"""
)
demo.launch(enable_queue=True, inline=True)
block.queue(concurrency_count=100)