File size: 24,211 Bytes
458e83a
 
c73cdf1
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd27b5
458e83a
 
 
 
 
 
6281c75
 
 
 
 
 
 
 
 
 
 
458e83a
 
 
 
 
 
 
8cd27b5
458e83a
8cd27b5
ffbe7e1
8cd27b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffbe7e1
8cd27b5
 
 
 
 
 
 
 
a97e58c
8cd27b5
 
 
 
 
 
 
 
 
 
 
 
ffbe7e1
8cd27b5
 
 
1288689
8cd27b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288689
 
 
 
 
 
 
 
8cd27b5
1288689
 
 
12f4e07
 
 
 
1288689
 
 
 
 
 
458e83a
 
 
 
 
 
8cd27b5
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd27b5
458e83a
 
 
 
 
 
 
b01de6c
458e83a
 
 
 
 
 
ca20cf0
 
 
 
458e83a
 
 
1288689
 
458e83a
 
 
 
 
 
c73cdf1
 
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288689
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd27b5
1288689
 
54d6c82
1288689
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288689
458e83a
 
 
 
 
 
 
 
 
 
8cd27b5
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd27b5
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd27b5
458e83a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
# -*- coding: utf-8 -*-
"""Test_gradio_push.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1mlZpAq-EWRmmLHH4Ok533awreqtJwzzW
"""

"""# HF Script

"""

# -*- coding: utf-8 -*-
"""Copy of Anime_Pack_Gradio.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1RxVCwOkq3Q5qlEkQxhFGeUxICBujjEjR
"""

import os

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-zh-en")

model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-zh-en")

import gradio as gr
import numpy as np
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, DPMSolverMultistepScheduler, StableDiffusionImg2ImgPipeline

import torch
from controlnet_aux import HEDdetector
from diffusers.utils import load_image

import concurrent.futures
from threading import Thread
from compel import Compel


from transformers import pipeline


model_ckpt = "papluca/xlm-roberta-base-language-detection"
pipe = pipeline("text-classification", model=model_ckpt)

HF_TOKEN = os.environ.get("HUGGING_FACE_HUB_TOKEN")

device="cuda" if torch.cuda.is_available() else "cpu"
pipe_scribble, pipe_depth, pipe_img2img = None, None, None

hidden_booster_text = "masterpiece++, best quality++, ultra-detailed+ +, unity 8k wallpaper+, illustration+, anime style+, intricate, fluid simulation, sharp edges. glossy++, Smooth++, detailed eyes++, best quality++,4k++,8k++,highres++,masterpiece++,ultra- detailed,realistic++,photorealistic++,photo-realistic++,depth of field, ultra-high definition, highly detailed, natural lighting, sharp focus, cinematic, hyperrealism,extremely detailed"
hidden_negative = "bad anatomy, disfigured, poorly drawn,deformed, mutation, malformation, deformed, mutated, disfigured, deformed eyes+, bad face++, bad hands, poorly drawn hands, malformed hands, extra arms++, extra legs++, Fused body+, Fused hands+, Fused legs+, missing arms, missing limb, extra digit+, fewer digits, floating limbs, disconnected limbs, inaccurate limb, bad fingers, missing fingers, ugly face, long body++"
hidden_cn_booster_text = ",漂亮的脸"
hidden_cn_negative = ""

hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')

controlnet_scribble = ControlNetModel.from_pretrained(
    "lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False, )
depth_estimator = pipeline('depth-estimation')

controlnet_depth = ControlNetModel.from_pretrained(
    "lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16
)


def translate(prompt):
    trans_text = prompt
    translated = model.generate(**tokenizer(trans_text, return_tensors="pt", padding=True))
    tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
    tgt_text = ''.join(tgt_text)[:-1]
    return tgt_text

     

def load_pipe_scribble():
    global pipe_scribble
    if pipe_scribble is None:
        
        pipe_scribble = StableDiffusionControlNetPipeline.from_single_file(
            "https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors", controlnet=controlnet_scribble, safety_checker=None, requires_safety_checker=False,
            torch_dtype=torch.float16, token=HF_TOKEN
        )
        
        pipe_scribble.load_lora_weights("shellypeng/lora2")
        pipe_scribble.fuse_lora(lora_scale=0.1)
        
        pipe_scribble.load_textual_inversion("shellypeng/textinv1")
        pipe_scribble.load_textual_inversion("shellypeng/textinv2")
        pipe_scribble.load_textual_inversion("shellypeng/textinv3")
        pipe_scribble.load_textual_inversion("shellypeng/textinv4")
        pipe_scribble.scheduler = DPMSolverMultistepScheduler.from_config(pipe_scribble.scheduler.config, use_karras_sigmas=True)
        pipe_scribble.safety_checker = None
        pipe_scribble.requires_safety_checker = False
        pipe_scribble.to(device)
        pipe_scribble.safety_checker = lambda images, **kwargs: (images, [False] * len(images))


def load_pipe_depth():
    global pipe_depth
    if pipe_depth is None:
        
        
        pipe_depth = StableDiffusionControlNetPipeline.from_single_file(
            "https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors", controlnet=controlnet_depth,
            torch_dtype=torch.float16,
        )
        pipe_depth.load_lora_weights("shellypeng/lora1")
        pipe_depth.fuse_lora(lora_scale=0.3)
        
        pipe_depth.load_textual_inversion("shellypeng/textinv1")
        pipe_depth.load_textual_inversion("shellypeng/textinv2")
        pipe_depth.load_textual_inversion("shellypeng/textinv3")
        pipe_depth.load_textual_inversion("shellypeng/textinv4")
        pipe_depth.scheduler = DPMSolverMultistepScheduler.from_config(pipe_depth.scheduler.config, use_karras_sigmas=True)
        def dummy(images, **kwargs):
            return images, False
        pipe_depth.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
        pipe_depth.to(device)
        
def load_pipe_img2img():
    global pipe_img2img
    if pipe_img2img is None:
        pipe_img2img = StableDiffusionImg2ImgPipeline.from_single_file("https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors",
                                                               torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False, token=HF_TOKEN)

        pipe_img2img.load_lora_weights("shellypeng/lora1")
        pipe_img2img.fuse_lora(lora_scale=0.1)
        pipe_img2img.load_lora_weights("shellypeng/lora2", token=HF_TOKEN)
        pipe_img2img.fuse_lora(lora_scale=0.1)
        
        pipe_img2img.load_textual_inversion("shellypeng/textinv1")
        pipe_img2img.load_textual_inversion("shellypeng/textinv2")
        pipe_img2img.load_textual_inversion("shellypeng/textinv3")
        pipe_img2img.load_textual_inversion("shellypeng/textinv4")
        pipe_img2img.scheduler = DPMSolverMultistepScheduler.from_config(pipe_img2img.scheduler.config, use_karras_sigmas=True)
        pipe_img2img.safety_checker = None
        pipe_img2img.requires_safety_checker = False
        pipe_img2img.to(device)
        
        pipe_img2img.safety_checker = lambda images, **kwargs: (images, [False] * len(images))


def real_to_anime(text, input_img):
    """
    pass the sd model and do scribble to image
    include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
    expression to improve hand)
    """
    load_pipe_depth()
    input_img = Image.fromarray(input_img)
    input_img = load_image(input_img)
    input_img = depth_estimator(input_img)['depth']
    res_image0 = pipe_depth(text, input_img, negative_prompt=hidden_negative, num_inference_steps=40).images[0]
    res_image1 = pipe_depth(text, input_img, negative_prompt=hidden_negative, num_inference_steps=40).images[0]
    res_image2 = pipe_depth(text, input_img, negative_prompt=hidden_negative, num_inference_steps=40).images[0]
    res_image3 = pipe_depth(text, input_img, negative_prompt=hidden_negative, num_inference_steps=40).images[0]

    return res_image0, res_image1, res_image2, res_image3




def scribble_to_image(text, neg_prompt_box, input_img):
    """
    pass the sd model and do scribble to image
    include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
    expression to improve hand)
    """
    load_pipe_scribble()


# if auto detect detects chinese => auto turn on chinese prompting checkbox
    # change param "bag" below to text, image param below to input_img
    input_img = Image.fromarray(input_img)
    input_img = hed(input_img, scribble=True)
    input_img = load_image(input_img)
    # global prompt
    lang_check_label = pipe(text, top_k=1, truncation=True)[0]['label']
    lang_check_score = pipe(text, top_k=1, truncation=True)[0]['score']
    if lang_check_label == 'zh' and lang_check_score >= 0.85:
        text = translate(text)
    compel_proc = Compel(tokenizer=pipe_scribble.tokenizer, text_encoder=pipe_scribble.text_encoder)
    prompt = text + hidden_booster_text
    prompt_embeds = compel_proc(prompt)
    negative_prompt = neg_prompt_box + hidden_negative
    negative_prompt_embeds = compel_proc(negative_prompt)

    res_image0 = pipe_scribble(image=input_img, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]
    res_image1 = pipe_scribble(image=input_img, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]
    res_image2 = pipe_scribble(image=input_img, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]
    res_image3 = pipe_scribble(image=input_img, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]

    return res_image0, res_image1, res_image2, res_image3

def real_img2img_to_anime(text, neg_prompt_box, input_img):
    """
    pass the sd model and do scribble to image
    include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
    expression to improve hand)
    """
    load_pipe_img2img()
    input_img = Image.fromarray(input_img)
    input_img = load_image(input_img)
    lang_check_label = pipe(text, top_k=1, truncation=True)[0]['label']
    lang_check_score = pipe(text, top_k=1, truncation=True)[0]['score']
    if lang_check_label == 'zh' and lang_check_score >= 0.85:
        text = translate(text)

    compel_proc = Compel(tokenizer=pipe_img2img.tokenizer, text_encoder=pipe_img2img.text_encoder)
    prompt = text + hidden_booster_text
    prompt_embeds = compel_proc(prompt)

    negative_prompt = neg_prompt_box + hidden_negative
    negative_prompt_embeds = compel_proc(negative_prompt)
    # input_img = depth_estimator(input_img)['depth']
    res_image0 = pipe_img2img(image=input_img, strength=0.8, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]
    res_image1 = pipe_img2img(image=input_img, strength=0.8, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]
    res_image2 = pipe_img2img(image=input_img, strength=0.8, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]
    res_image3 = pipe_img2img(image=input_img, strength=0.8, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=40).images[0]

    return res_image0, res_image1, res_image2, res_image3

    


theme = gr.themes.Soft(
    primary_hue="orange",
    secondary_hue="orange",
).set(
    block_background_fill='*primary_50'
)



def zh_prompt_info(text, neg_text, chinese_check):
    can_raise_info = ""
    lang_check_label = pipe(text, top_k=1, truncation=True)[0]['label']
    lang_check_score = pipe(text, top_k=1, truncation=True)[0]['score']
    neg_lang_check_label = pipe(neg_text, top_k=1, truncation=True)[0]['label']
    neg_lang_check_score = pipe(neg_text, top_k=1, truncation=True)[0]['score']
    print(lang_check_label)
    if lang_check_label == 'zh' and lang_check_score >= 0.85:
        if not chinese_check:
            chinese_check = True
            can_raise_info = "zh"
        if neg_lang_check_label == 'en' and neg_lang_check_score >= 0.85:
            can_raise_info = "invalid"
            return True, can_raise_info
    elif lang_check_label == 'en' and lang_check_score >= 0.85:
        if chinese_check:
            chinese_check = False
            can_raise_info = "en"
        if neg_lang_check_label == 'zh' and neg_lang_check_score >= 0.85:
            can_raise_info = "invalid"
            return False, can_raise_info
    return chinese_check, can_raise_info
def mult_thread_img2img(prompt_box, neg_prompt_box, image_box):
    with concurrent.futures.ThreadPoolExecutor(max_workers=12000) as executor:
        future = executor.submit(real_img2img_to_anime, prompt_box, neg_prompt_box, image_box)
        image1, image2, image3, image4 = future.result()
    return image1, image2, image3, image4
def mult_thread_scribble(prompt_box, neg_prompt_box, image_box):
    with concurrent.futures.ThreadPoolExecutor(max_workers=12000) as executor:
        future = executor.submit(scribble_to_image, prompt_box, neg_prompt_box, image_box)
        image1, image2, image3, image4 = future.result()
    return image1, image2, image3, image4
def mult_thread_live_scribble(prompt_box, neg_prompt_box, image_box):
    image_box = image_box["composite"]
    with concurrent.futures.ThreadPoolExecutor(max_workers=12000) as executor:
        future = executor.submit(scribble_to_image, prompt_box, neg_prompt_box, image_box)
        image1, image2, image3, image4 = future.result()
    return image1, image2, image3, image4
def mult_thread_lang_class(prompt_box, neg_prompt_box, chinese_check):

    with concurrent.futures.ThreadPoolExecutor(max_workers=12000) as executor:
        future = executor.submit(zh_prompt_info, prompt_box, neg_prompt_box, chinese_check)
        chinese_check, can_raise_info = future.result()
    if can_raise_info == "zh":
        gr.Info("Chinese Language Detected, Switching to Chinese Prompt Mode")
    elif can_raise_info == "en":
        gr.Info("English Language Detected, Disabling Chinese Prompt Mode")
    return chinese_check


with gr.Blocks(theme=theme, css="footer {visibility: hidden}", title="ShellAI Apps") as iface:
    with gr.Tab("AnimeDepth(安妮深度)"):
        gr.Markdown(
            """
            # AnimeDepth(安妮深度)
            Turns pictures into one in the anime style with depth-to-image controlnet.
            将图片用深度图的方式转为动漫风图片。
            """
        )
        with gr.Row(equal_height=True):
            with gr.Column():
                with gr.Row(equal_height=True):
                    with gr.Column(scale=4):
                        prompt_box = gr.Textbox(label="Prompt(提示词)", placeholder="Enter a prompt\n输入提示词", lines=3)
                        neg_prompt_box = gr.Textbox(label="Negative Prompt(负面提示词)", placeholder="Enter a negative prompt(things you don't want to include in the generated image)\n输入负面提示词:输入您不想生成的部分", lines=3)
                    with gr.Row(equal_height=True):
                        chinese_check = gr.Checkbox(label="Chinese Prompt Mode(中文提示词模式)", info="Click here to enable Chinese Prompting(点此触发中文提示词输入)")

                image_box = gr.Image(label="Input Image(上传图片)", height=400)
                gen_btn = gr.Button(value="Generate(生成)")
    
        with gr.Row(equal_height=True):
            image1 = gr.Image(label="Result 1(结果图 1)")
            image2 = gr.Image(label="Result 2(结果图 2)")
            image3 = gr.Image(label="Result 3(结果图 3)")
            image4 = gr.Image(label="Result 4(结果图 4)")
        example_img2img = [
            ["漂亮的女孩,微笑,长发,黑发,粉色外套,白色内衬,优雅,红色背景,红色窗帘", "低画质", "sunmi.jpg"],
            ["Beautiful girl, smiling, bun, bun hair, black hair, beautiful eyes, black dress, elegant, red carpet photo","ugly, bad quality", "emma.jpg"]
        ]
        
        # gr.Examples(examples=example_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_img2img, cache_examples=True)

        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=real_to_anime, inputs=[prompt_box, image_box], outputs=[image1, image2, image3, image4])
    
    with gr.Tab("Animefier(安妮漫风)"):
        gr.Markdown(
            """
            # Animefier(安妮漫风)
            Turns realistic photos into one in the anime style.
            将真实图片转为动漫风图片。
            """
        )
        with gr.Row(equal_height=True):
            with gr.Column():
                with gr.Row(equal_height=True):
                    with gr.Column(scale=4):
                        prompt_box = gr.Textbox(label="Prompt(提示词)", placeholder="Enter a prompt\n输入提示词", lines=3)
                        neg_prompt_box = gr.Textbox(label="Negative Prompt(负面提示词)", placeholder="Enter a negative prompt(things you don't want to include in the generated image)\n输入负面提示词:输入您不想生成的部分", lines=3)
                    with gr.Row(equal_height=True):
                        chinese_check = gr.Checkbox(label="Chinese Prompt Mode(中文提示词模式)", info="Click here to enable Chinese Prompting(点此触发中文提示词输入)")

                image_box = gr.Image(label="Input Image(上传图片)", height=400)
                gen_btn = gr.Button(value="Generate(生成)")
    
        with gr.Row(equal_height=True):
            image1 = gr.Image(label="Result 1(结果图 1)")
            image2 = gr.Image(label="Result 2(结果图 2)")
            image3 = gr.Image(label="Result 3(结果图 3)")
            image4 = gr.Image(label="Result 4(结果图 4)")
        example_img2img = [
            ["漂亮的女孩,微笑,长发,黑发,粉色外套,白色内衬,优雅,红色背景,红色窗帘", "低画质", "sunmi.jpg"],
            ["Beautiful girl, smiling, bun, bun hair, black hair, beautiful eyes, black dress, elegant, red carpet photo","ugly, bad quality", "emma.jpg"]
        ]
        
        # gr.Examples(examples=example_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_img2img, cache_examples=True)

        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])
    with gr.Tab("Live Sketch(实时涂鸦)"):
        gr.Markdown(
            """
            # Live Sketch(实时涂鸦)
            Live draw sketches/scribbles and turns into one in the anime style.
            实时涂鸦,将粗线条涂鸦转为动漫风图片。
            """
        )
        with gr.Row(equal_height=True):
            with gr.Column():
                with gr.Row(equal_height=True):
                    with gr.Column(scale=4):
                        prompt_box = gr.Textbox(label="Prompt(提示词)", placeholder="Enter a prompt\n输入提示词", lines=3)
                        neg_prompt_box = gr.Textbox(label="Negative Prompt(负面提示词)", placeholder="Enter a negative prompt(things you don't want to include in the generated image)\n输入负面提示词:输入您不想生成的部分", lines=3)
                    with gr.Row(equal_height=True):
                        chinese_check = gr.Checkbox(label="Chinese Prompt Mode(中文提示词模式)", info="Click here to enable Chinese Prompting(点此触发中文提示词输入)")
                image_box = gr.ImageEditor(sources=(), brush=gr.Brush(default_size="5", color_mode="fixed", colors=["#000000"]), height=400)

                gen_btn = gr.Button(value="Generate(生成)")
        with gr.Row(equal_height=True):
            image1 = gr.Image(label="Result 1(结果图 1)")
            image2 = gr.Image(label="Result 2(结果图 2)")
            image3 = gr.Image(label="Result 3(结果图 3)")
            image4 = gr.Image(label="Result 4(结果图 4)")
        # sketch_image_box.change(fn=mult_thread_scribble, inputs=[prompt_box, neg_prompt_box, sketch_image_box], outputs=[image1, image2, image3, image4])
        example_scribble_live2img = [
            ["帅气的男孩,橙色头发++,皱眉,闭眼,深蓝色开襟毛衣,白色内衬,酷,冷漠,帅气,硝烟背景", "劣质", "sketch_boy.png"],
            ["a beautiful girl spreading her arms, blue hair, long hair, hat with flowers on its edge, smiling++, dynamic, black dress, park background, birds, trees, flowers, grass","ugly, worst quality", "girl_spread.jpg"]
        ]
        
        # gr.Examples(examples=example_scribble_live2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_live_scribble, cache_examples=True)

        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_live_scribble, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])

    with gr.Tab("AniSketch(安妮涂鸦)"):
        gr.Markdown(
            """
            # AniSketch(安妮涂鸦)
            Turns sketches/scribbles into one in the anime style.
            将草图、粗线条涂鸦转为动漫风图片。
            """
        )
        with gr.Row(equal_height=True):
            with gr.Column():
                with gr.Row(equal_height=True):
                    with gr.Column(scale=4):
                        prompt_box = gr.Textbox(label="Prompt(提示词)", placeholder="Enter a prompt\n输入提示词", lines=3)
                        neg_prompt_box = gr.Textbox(label="Negative Prompt(负面提示词)", placeholder="Enter a negative prompt(things you don't want to include in the generated image)\n输入负面提示词:输入您不想生成的部分", lines=3)
                    with gr.Row(equal_height=True):
                        chinese_check = gr.Checkbox(label="Chinese Prompt Mode(中文提示词模式)", info="Click here to enable Chinese Prompting(点此触发中文提示词输入)")
                image_box = gr.Image(label="Input Image(上传图片)", height=400)

                gen_btn = gr.Button(value="Generate(生成)")
        with gr.Row(equal_height=True):
            image1 = gr.Image(label="Result 1(结果图 1)")
            image2 = gr.Image(label="Result 2(结果图 2)")
            image3 = gr.Image(label="Result 3(结果图 3)")
            image4 = gr.Image(label="Result 4(结果图 4)")
        example_scribble2img = [
            ["漂亮的女人,散开的长发,巫师,巫师袍,微笑,拍手,优雅,成熟,月夜背景", "水印", "final_witch.jpg"],
            ["a man wearing a chinese clothes, closed eyes, handsome face, dragon on the clothes, expressionless face, indifferent, chinese building background","poor quality", "chinese_man.jpg"]
        ]
        
        # gr.Examples(examples=example_scribble2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_scribble, cache_examples=True)

        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
        gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_scribble, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])


def run():
    iface.queue(default_concurrency_limit=20).launch(debug=True, share=True)

run()

"""# Separator

"""