shellypeng commited on
Commit
8cd27b5
·
verified ·
1 Parent(s): 12f4e07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -67
app.py CHANGED
@@ -51,6 +51,7 @@ pipe = pipeline("text-classification", model=model_ckpt)
51
  HF_TOKEN = os.environ.get("HUGGING_FACE_HUB_TOKEN")
52
 
53
  device="cuda" if torch.cuda.is_available() else "cpu"
 
54
 
55
  hidden_booster_text = "masterpiece++, best quality++, ultra-detailed+ +, unity 8k wallpaper+, illustration+, anime style+, intricate, fluid simulation, sharp edges. glossy++, Smooth++, detailed eyes++, best quality++,4k++,8k++,highres++,masterpiece++,ultra- detailed,realistic++,photorealistic++,photo-realistic++,depth of field, ultra-high definition, highly detailed, natural lighting, sharp focus, cinematic, hyperrealism,extremely detailed"
56
  hidden_negative = "bad anatomy, disfigured, poorly drawn,deformed, mutation, malformation, deformed, mutated, disfigured, deformed eyes+, bad face++, bad hands, poorly drawn hands, malformed hands, extra arms++, extra legs++, Fused body+, Fused hands+, Fused legs+, missing arms, missing limb, extra digit+, fewer digits, floating limbs, disconnected limbs, inaccurate limb, bad fingers, missing fingers, ugly face, long body++"
@@ -64,54 +65,81 @@ def translate(prompt):
64
  tgt_text = ''.join(tgt_text)[:-1]
65
  return tgt_text
66
 
 
67
 
68
- hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
 
 
69
 
70
- controlnet_scribble = ControlNetModel.from_pretrained(
71
- "lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False, )
72
-
73
- pipe_scribble = StableDiffusionControlNetPipeline.from_single_file(
74
- "https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors", controlnet=controlnet_scribble, safety_checker=None, requires_safety_checker=False,
75
- torch_dtype=torch.float16, token=HF_TOKEN
76
- )
77
-
78
- pipe_scribble.load_lora_weights("shellypeng/lora2")
79
- pipe_scribble.fuse_lora(lora_scale=0.1)
80
-
81
- pipe_scribble.load_textual_inversion("shellypeng/textinv1")
82
- pipe_scribble.load_textual_inversion("shellypeng/textinv2")
83
- pipe_scribble.load_textual_inversion("shellypeng/textinv3")
84
- pipe_scribble.load_textual_inversion("shellypeng/textinv4")
85
- pipe_scribble.scheduler = DPMSolverMultistepScheduler.from_config(pipe_scribble.scheduler.config, use_karras_sigmas=True)
86
- pipe_scribble.safety_checker = None
87
- pipe_scribble.requires_safety_checker = False
88
- pipe_scribble.to(device)
89
- pipe_scribble.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
90
-
91
-
92
- depth_estimator = pipeline('depth-estimation')
93
-
94
- controlnet_depth = ControlNetModel.from_pretrained(
95
- "lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16
96
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- pipe_depth = StableDiffusionControlNetPipeline.from_single_file(
100
- "https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors", controlnet=controlnet_depth,
101
- torch_dtype=torch.float16,
102
- )
103
- pipe_depth.load_lora_weights("shellypeng/lora1")
104
- pipe_depth.fuse_lora(lora_scale=1.5)
105
-
106
- pipe_depth.load_textual_inversion("shellypeng/textinv1")
107
- pipe_depth.load_textual_inversion("shellypeng/textinv2")
108
- pipe_depth.load_textual_inversion("shellypeng/textinv3")
109
- pipe_depth.load_textual_inversion("shellypeng/textinv4")
110
- pipe_depth.scheduler = DPMSolverMultistepScheduler.from_config(pipe_depth.scheduler.config, use_karras_sigmas=True)
111
- def dummy(images, **kwargs):
112
- return images, False
113
- pipe_depth.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
114
- pipe_depth.to(device)
115
 
116
  def real_to_anime(text, input_img):
117
  """
@@ -119,6 +147,7 @@ def real_to_anime(text, input_img):
119
  include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
120
  expression to improve hand)
121
  """
 
122
  input_img = Image.fromarray(input_img)
123
  input_img = load_image(input_img)
124
  input_img = depth_estimator(input_img)['depth']
@@ -138,7 +167,7 @@ def scribble_to_image(text, neg_prompt_box, input_img):
138
  include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
139
  expression to improve hand)
140
  """
141
-
142
 
143
 
144
  # if auto detect detects chinese => auto turn on chinese prompting checkbox
@@ -170,6 +199,7 @@ def real_img2img_to_anime(text, neg_prompt_box, input_img):
170
  include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
171
  expression to improve hand)
172
  """
 
173
  input_img = Image.fromarray(input_img)
174
  input_img = load_image(input_img)
175
  lang_check_label = pipe(text, top_k=1, truncation=True)[0]['label']
@@ -202,24 +232,6 @@ theme = gr.themes.Soft(
202
  )
203
 
204
 
205
- pipe_img2img = StableDiffusionImg2ImgPipeline.from_single_file("https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors",
206
- torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False, token=HF_TOKEN)
207
-
208
- pipe_img2img.load_lora_weights("shellypeng/lora1")
209
- pipe_img2img.fuse_lora(lora_scale=0.1)
210
- pipe_img2img.load_lora_weights("shellypeng/lora2", token=HF_TOKEN)
211
- pipe_img2img.fuse_lora(lora_scale=0.1)
212
-
213
- pipe_img2img.load_textual_inversion("shellypeng/textinv1")
214
- pipe_img2img.load_textual_inversion("shellypeng/textinv2")
215
- pipe_img2img.load_textual_inversion("shellypeng/textinv3")
216
- pipe_img2img.load_textual_inversion("shellypeng/textinv4")
217
- pipe_img2img.scheduler = DPMSolverMultistepScheduler.from_config(pipe_img2img.scheduler.config, use_karras_sigmas=True)
218
- pipe_img2img.safety_checker = None
219
- pipe_img2img.requires_safety_checker = False
220
- pipe_img2img.to(device)
221
-
222
- pipe_img2img.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
223
 
224
  def zh_prompt_info(text, neg_text, chinese_check):
225
  can_raise_info = ""
@@ -302,7 +314,7 @@ with gr.Blocks(theme=theme, css="footer {visibility: hidden}", title="ShellAI Ap
302
  ["Beautiful girl, smiling, bun, bun hair, black hair, beautiful eyes, black dress, elegant, red carpet photo","ugly, bad quality", "emma.jpg"]
303
  ]
304
 
305
- gr.Examples(examples=example_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_img2img, cache_examples=True)
306
 
307
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
308
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=real_to_anime, inputs=[prompt_box, image_box], outputs=[image1, image2, image3, image4])
@@ -337,7 +349,7 @@ with gr.Blocks(theme=theme, css="footer {visibility: hidden}", title="ShellAI Ap
337
  ["Beautiful girl, smiling, bun, bun hair, black hair, beautiful eyes, black dress, elegant, red carpet photo","ugly, bad quality", "emma.jpg"]
338
  ]
339
 
340
- gr.Examples(examples=example_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_img2img, cache_examples=True)
341
 
342
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
343
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])
@@ -371,7 +383,7 @@ with gr.Blocks(theme=theme, css="footer {visibility: hidden}", title="ShellAI Ap
371
  ["a beautiful girl spreading her arms, blue hair, long hair, hat with flowers on its edge, smiling++, dynamic, black dress, park background, birds, trees, flowers, grass","ugly, worst quality", "girl_spread.jpg"]
372
  ]
373
 
374
- gr.Examples(examples=example_scribble_live2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_live_scribble, cache_examples=True)
375
 
376
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
377
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_live_scribble, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])
@@ -405,7 +417,7 @@ with gr.Blocks(theme=theme, css="footer {visibility: hidden}", title="ShellAI Ap
405
  ["a man wearing a chinese clothes, closed eyes, handsome face, dragon on the clothes, expressionless face, indifferent, chinese building background","poor quality", "chinese_man.jpg"]
406
  ]
407
 
408
- gr.Examples(examples=example_scribble2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_scribble, cache_examples=True)
409
 
410
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
411
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_scribble, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])
 
51
  HF_TOKEN = os.environ.get("HUGGING_FACE_HUB_TOKEN")
52
 
53
  device="cuda" if torch.cuda.is_available() else "cpu"
54
+ pipe_scribble, pipe_depth, pipe_img2img = None, None, None
55
 
56
  hidden_booster_text = "masterpiece++, best quality++, ultra-detailed+ +, unity 8k wallpaper+, illustration+, anime style+, intricate, fluid simulation, sharp edges. glossy++, Smooth++, detailed eyes++, best quality++,4k++,8k++,highres++,masterpiece++,ultra- detailed,realistic++,photorealistic++,photo-realistic++,depth of field, ultra-high definition, highly detailed, natural lighting, sharp focus, cinematic, hyperrealism,extremely detailed"
57
  hidden_negative = "bad anatomy, disfigured, poorly drawn,deformed, mutation, malformation, deformed, mutated, disfigured, deformed eyes+, bad face++, bad hands, poorly drawn hands, malformed hands, extra arms++, extra legs++, Fused body+, Fused hands+, Fused legs+, missing arms, missing limb, extra digit+, fewer digits, floating limbs, disconnected limbs, inaccurate limb, bad fingers, missing fingers, ugly face, long body++"
 
65
  tgt_text = ''.join(tgt_text)[:-1]
66
  return tgt_text
67
 
68
+
69
 
70
+ def load_pipe_scribble():
71
+ if pipe_scribble is None:
72
+ hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
73
 
74
+ controlnet_scribble = ControlNetModel.from_pretrained(
75
+ "lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False, )
76
+
77
+ pipe_scribble = StableDiffusionControlNetPipeline.from_single_file(
78
+ "https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors", controlnet=controlnet_scribble, safety_checker=None, requires_safety_checker=False,
79
+ torch_dtype=torch.float16, token=HF_TOKEN
80
+ )
81
+
82
+ pipe_scribble.load_lora_weights("shellypeng/lora2")
83
+ pipe_scribble.fuse_lora(lora_scale=0.1)
84
+
85
+ pipe_scribble.load_textual_inversion("shellypeng/textinv1")
86
+ pipe_scribble.load_textual_inversion("shellypeng/textinv2")
87
+ pipe_scribble.load_textual_inversion("shellypeng/textinv3")
88
+ pipe_scribble.load_textual_inversion("shellypeng/textinv4")
89
+ pipe_scribble.scheduler = DPMSolverMultistepScheduler.from_config(pipe_scribble.scheduler.config, use_karras_sigmas=True)
90
+ pipe_scribble.safety_checker = None
91
+ pipe_scribble.requires_safety_checker = False
92
+ pipe_scribble.to(device)
93
+ pipe_scribble.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
94
+
95
+
96
+ def load_pipe_depth():
97
+ if pipe_depth is None:
98
+ depth_estimator = pipeline('depth-estimation')
99
+
100
+ controlnet_depth = ControlNetModel.from_pretrained(
101
+ "lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16
102
+ )
103
+
104
+
105
+ pipe_depth = StableDiffusionControlNetPipeline.from_single_file(
106
+ "https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors", controlnet=controlnet_depth,
107
+ torch_dtype=torch.float16,
108
+ )
109
+ pipe_depth.load_lora_weights("shellypeng/lora1")
110
+ pipe_depth.fuse_lora(lora_scale=1.5)
111
+
112
+ pipe_depth.load_textual_inversion("shellypeng/textinv1")
113
+ pipe_depth.load_textual_inversion("shellypeng/textinv2")
114
+ pipe_depth.load_textual_inversion("shellypeng/textinv3")
115
+ pipe_depth.load_textual_inversion("shellypeng/textinv4")
116
+ pipe_depth.scheduler = DPMSolverMultistepScheduler.from_config(pipe_depth.scheduler.config, use_karras_sigmas=True)
117
+ def dummy(images, **kwargs):
118
+ return images, False
119
+ pipe_depth.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
120
+ pipe_depth.to(device)
121
+
122
+ def load_pipe_img2img():
123
+ if pipe_img2img is None:
124
+ pipe_img2img = StableDiffusionImg2ImgPipeline.from_single_file("https://huggingface.co/shellypeng/anime-god/blob/main/animeGod_v10.safetensors",
125
+ torch_dtype=torch.float16, safety_checker=None, requires_safety_checker=False, token=HF_TOKEN)
126
 
127
+ pipe_img2img.load_lora_weights("shellypeng/lora1")
128
+ pipe_img2img.fuse_lora(lora_scale=0.1)
129
+ pipe_img2img.load_lora_weights("shellypeng/lora2", token=HF_TOKEN)
130
+ pipe_img2img.fuse_lora(lora_scale=0.1)
131
+
132
+ pipe_img2img.load_textual_inversion("shellypeng/textinv1")
133
+ pipe_img2img.load_textual_inversion("shellypeng/textinv2")
134
+ pipe_img2img.load_textual_inversion("shellypeng/textinv3")
135
+ pipe_img2img.load_textual_inversion("shellypeng/textinv4")
136
+ pipe_img2img.scheduler = DPMSolverMultistepScheduler.from_config(pipe_img2img.scheduler.config, use_karras_sigmas=True)
137
+ pipe_img2img.safety_checker = None
138
+ pipe_img2img.requires_safety_checker = False
139
+ pipe_img2img.to(device)
140
+
141
+ pipe_img2img.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  def real_to_anime(text, input_img):
145
  """
 
147
  include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
148
  expression to improve hand)
149
  """
150
+ load_pipe_depth()
151
  input_img = Image.fromarray(input_img)
152
  input_img = load_image(input_img)
153
  input_img = depth_estimator(input_img)['depth']
 
167
  include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
168
  expression to improve hand)
169
  """
170
+ load_pipe_scribble()
171
 
172
 
173
  # if auto detect detects chinese => auto turn on chinese prompting checkbox
 
199
  include Adetailer, detail tweaker lora, prompt backend include: beautiful eyes, beautiful face, beautiful hand, (maybe infer from user's prompt for gesture and facial
200
  expression to improve hand)
201
  """
202
+ load_pipe_img2img()
203
  input_img = Image.fromarray(input_img)
204
  input_img = load_image(input_img)
205
  lang_check_label = pipe(text, top_k=1, truncation=True)[0]['label']
 
232
  )
233
 
234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
  def zh_prompt_info(text, neg_text, chinese_check):
237
  can_raise_info = ""
 
314
  ["Beautiful girl, smiling, bun, bun hair, black hair, beautiful eyes, black dress, elegant, red carpet photo","ugly, bad quality", "emma.jpg"]
315
  ]
316
 
317
+ # gr.Examples(examples=example_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_img2img, cache_examples=True)
318
 
319
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
320
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=real_to_anime, inputs=[prompt_box, image_box], outputs=[image1, image2, image3, image4])
 
349
  ["Beautiful girl, smiling, bun, bun hair, black hair, beautiful eyes, black dress, elegant, red carpet photo","ugly, bad quality", "emma.jpg"]
350
  ]
351
 
352
+ # gr.Examples(examples=example_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_img2img, cache_examples=True)
353
 
354
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
355
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_img2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])
 
383
  ["a beautiful girl spreading her arms, blue hair, long hair, hat with flowers on its edge, smiling++, dynamic, black dress, park background, birds, trees, flowers, grass","ugly, worst quality", "girl_spread.jpg"]
384
  ]
385
 
386
+ # gr.Examples(examples=example_scribble_live2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_live_scribble, cache_examples=True)
387
 
388
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
389
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_live_scribble, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])
 
417
  ["a man wearing a chinese clothes, closed eyes, handsome face, dragon on the clothes, expressionless face, indifferent, chinese building background","poor quality", "chinese_man.jpg"]
418
  ]
419
 
420
+ # gr.Examples(examples=example_scribble2img, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4], fn=mult_thread_scribble, cache_examples=True)
421
 
422
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_lang_class, inputs=[prompt_box, neg_prompt_box, chinese_check], outputs=[chinese_check], show_progress=False)
423
  gr.on(triggers=[prompt_box.submit, gen_btn.click],fn=mult_thread_scribble, inputs=[prompt_box, neg_prompt_box, image_box], outputs=[image1, image2, image3, image4])