John6666 commited on
Commit
bc443ff
·
verified ·
1 Parent(s): 2a2efee

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +7 -10
  2. constants.py +70 -2
  3. requirements.txt +1 -1
app.py CHANGED
@@ -7,6 +7,7 @@ from stablepy import (
7
  check_scheduler_compatibility,
8
  TASK_AND_PREPROCESSORS,
9
  FACE_RESTORATION_MODELS,
 
10
  )
11
  from constants import (
12
  DIRECTORY_UPSCALERS,
@@ -20,16 +21,12 @@ from constants import (
20
  MODEL_TYPE_TASK,
21
  POST_PROCESSING_SAMPLER,
22
  DIFFUSERS_CONTROLNET_MODEL,
23
-
 
24
  )
25
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
26
  import torch
27
  import re
28
- from stablepy import (
29
- scheduler_names,
30
- IP_ADAPTERS_SD,
31
- IP_ADAPTERS_SDXL,
32
- )
33
  import time
34
  from PIL import ImageFile
35
  from utils import (
@@ -54,6 +51,9 @@ import warnings
54
  from stablepy import logger
55
  from diffusers import FluxPipeline
56
  # import urllib.parse
 
 
 
57
 
58
  ImageFile.LOAD_TRUNCATED_IMAGES = True
59
  torch.backends.cuda.matmul.allow_tf32 = True
@@ -1202,7 +1202,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1202
  info="The maximum proportional size of the generated image based on the uploaded image."
1203
  )
1204
  with gr.Row():
1205
- controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
1206
  control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1207
  control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1208
  control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
@@ -1234,9 +1234,6 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1234
 
1235
  with gr.Accordion("IP-Adapter", open=False, visible=True) as menu_ipa:
1236
 
1237
- IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
1238
- MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
1239
-
1240
  with gr.Accordion("IP-Adapter 1", open=True, visible=True):
1241
  with gr.Row():
1242
  image_ip1 = gr.Image(label="IP Image", type="filepath")
 
7
  check_scheduler_compatibility,
8
  TASK_AND_PREPROCESSORS,
9
  FACE_RESTORATION_MODELS,
10
+ scheduler_names,
11
  )
12
  from constants import (
13
  DIRECTORY_UPSCALERS,
 
21
  MODEL_TYPE_TASK,
22
  POST_PROCESSING_SAMPLER,
23
  DIFFUSERS_CONTROLNET_MODEL,
24
+ IP_MODELS,
25
+ MODE_IP_OPTIONS,
26
  )
27
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
28
  import torch
29
  import re
 
 
 
 
 
30
  import time
31
  from PIL import ImageFile
32
  from utils import (
 
51
  from stablepy import logger
52
  from diffusers import FluxPipeline
53
  # import urllib.parse
54
+ import subprocess
55
+
56
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
57
 
58
  ImageFile.LOAD_TRUNCATED_IMAGES = True
59
  torch.backends.cuda.matmul.allow_tf32 = True
 
1202
  info="The maximum proportional size of the generated image based on the uploaded image."
1203
  )
1204
  with gr.Row():
1205
+ controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0], allow_custom_value=True)
1206
  control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1207
  control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1208
  control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
 
1234
 
1235
  with gr.Accordion("IP-Adapter", open=False, visible=True) as menu_ipa:
1236
 
 
 
 
1237
  with gr.Accordion("IP-Adapter 1", open=True, visible=True):
1238
  with gr.Row():
1239
  image_ip1 = gr.Image(label="IP Image", type="filepath")
constants.py CHANGED
@@ -5,6 +5,8 @@ from stablepy import (
5
  SD15_TASKS,
6
  SDXL_TASKS,
7
  ALL_BUILTIN_UPSCALERS,
 
 
8
  )
9
 
10
  # - **Download Models**
@@ -19,6 +21,7 @@ DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book
19
  LOAD_DIFFUSERS_FORMAT_MODEL = [
20
  'stabilityai/stable-diffusion-xl-base-1.0',
21
  'Laxhar/noobai-XL-1.1',
 
22
  'black-forest-labs/FLUX.1-dev',
23
  'John6666/blue-pencil-flux1-v021-fp8-flux',
24
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
@@ -31,7 +34,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
31
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
32
  'terminusresearch/FluxBooru-v0.3',
33
  'black-forest-labs/FLUX.1-schnell',
34
- 'ostris/OpenFLUX.1',
35
  'shuttleai/shuttle-3-diffusion',
36
  'Laxhar/noobai-XL-1.0',
37
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
@@ -125,6 +128,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
125
  'John6666/duchaiten-pony-real-v20-sdxl',
126
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
127
  'Spestly/OdysseyXL-3.0',
 
128
  'KBlueLeaf/Kohaku-XL-Zeta',
129
  'cagliostrolab/animagine-xl-3.1',
130
  'yodayo-ai/kivotos-xl-2.0',
@@ -336,6 +340,20 @@ POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
336
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
337
  ]
338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  SUBTITLE_GUI = (
340
  "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
341
  " to perform different tasks in image generation."
@@ -356,7 +374,9 @@ EXAMPLES_GUI_HELP = (
356
  3. ControlNet Canny SDXL
357
  4. Optical pattern (Optical illusion) SDXL
358
  5. Convert an image to a coloring drawing
359
- 6. ControlNet OpenPose SD 1.5 and Latent upscale
 
 
360
 
361
  - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
362
  """
@@ -483,6 +503,54 @@ EXAMPLES_GUI = [
483
  35,
484
  False,
485
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  [
487
  "1girl,face,curly hair,red hair,white background,",
488
  "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
 
5
  SD15_TASKS,
6
  SDXL_TASKS,
7
  ALL_BUILTIN_UPSCALERS,
8
+ IP_ADAPTERS_SD,
9
+ IP_ADAPTERS_SDXL,
10
  )
11
 
12
  # - **Download Models**
 
21
  LOAD_DIFFUSERS_FORMAT_MODEL = [
22
  'stabilityai/stable-diffusion-xl-base-1.0',
23
  'Laxhar/noobai-XL-1.1',
24
+ 'Laxhar/noobai-XL-Vpred-1.0',
25
  'black-forest-labs/FLUX.1-dev',
26
  'John6666/blue-pencil-flux1-v021-fp8-flux',
27
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
 
34
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
35
  'terminusresearch/FluxBooru-v0.3',
36
  'black-forest-labs/FLUX.1-schnell',
37
+ # 'ostris/OpenFLUX.1',
38
  'shuttleai/shuttle-3-diffusion',
39
  'Laxhar/noobai-XL-1.0',
40
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
 
128
  'John6666/duchaiten-pony-real-v20-sdxl',
129
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
130
  'Spestly/OdysseyXL-3.0',
131
+ 'Spestly/OdysseyXL-4.0',
132
  'KBlueLeaf/Kohaku-XL-Zeta',
133
  'cagliostrolab/animagine-xl-3.1',
134
  'yodayo-ai/kivotos-xl-2.0',
 
340
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
341
  ]
342
 
343
+ IP_MODELS = []
344
+ ALL_IPA = sorted(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL))
345
+
346
+ for origin_name in ALL_IPA:
347
+ suffixes = []
348
+ if origin_name in IP_ADAPTERS_SD:
349
+ suffixes.append("sd1.5")
350
+ if origin_name in IP_ADAPTERS_SDXL:
351
+ suffixes.append("sdxl")
352
+ ref_name = f"{origin_name} ({'/'.join(suffixes)})"
353
+ IP_MODELS.append((ref_name, origin_name))
354
+
355
+ MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
356
+
357
  SUBTITLE_GUI = (
358
  "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
359
  " to perform different tasks in image generation."
 
374
  3. ControlNet Canny SDXL
375
  4. Optical pattern (Optical illusion) SDXL
376
  5. Convert an image to a coloring drawing
377
+ 6. V prediction model inference
378
+ 7. V prediction model sd_embed variant inference
379
+ 8. ControlNet OpenPose SD 1.5 and Latent upscale
380
 
381
  - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
382
  """
 
503
  35,
504
  False,
505
  ],
506
+ [
507
+ "[mochizuki_shiina], [syuri22], newest, reimu, solo, outdoors, water, flower, lantern",
508
+ "worst quality, normal quality, old, sketch,",
509
+ 28,
510
+ 7.0,
511
+ -1,
512
+ "None",
513
+ 0.33,
514
+ "DPM 3M Ef",
515
+ 1600,
516
+ 1024,
517
+ "Laxhar/noobai-XL-Vpred-1.0",
518
+ "txt2img",
519
+ "color_image.png", # img conttol
520
+ 1024, # img resolution
521
+ 0.35, # strength
522
+ 1.0, # cn scale
523
+ 0.0, # cn start
524
+ 1.0, # cn end
525
+ "Classic",
526
+ None,
527
+ 30,
528
+ False,
529
+ ],
530
+ [
531
+ "[mochizuki_shiina], [syuri22], newest, multiple girls, 2girls, earrings, jewelry, gloves, purple eyes, black hair, looking at viewer, nail polish, hat, smile, open mouth, fingerless gloves, sleeveless, :d, upper body, blue eyes, closed mouth, black gloves, hands up, long hair, shirt, bare shoulders, white headwear, blush, black headwear, blue nails, upper teeth only, short hair, white gloves, white shirt, teeth, rabbit hat, star earrings, purple nails, pink hair, detached sleeves, fingernails, fake animal ears, animal hat, sleeves past wrists, black shirt, medium hair, fur trim, sleeveless shirt, turtleneck, long sleeves, rabbit ears, star \\(symbol\\)",
532
+ "worst quality, normal quality, old, sketch,",
533
+ 28,
534
+ 7.0,
535
+ -1,
536
+ "None",
537
+ 0.33,
538
+ "DPM 3M Ef",
539
+ 1600,
540
+ 1024,
541
+ "Laxhar/noobai-XL-Vpred-1.0",
542
+ "txt2img",
543
+ "color_image.png", # img conttol
544
+ 1024, # img resolution
545
+ 0.35, # strength
546
+ 1.0, # cn scale
547
+ 0.0, # cn start
548
+ 1.0, # cn end
549
+ "Classic-sd_embed",
550
+ None,
551
+ 30,
552
+ False,
553
+ ],
554
  [
555
  "1girl,face,curly hair,red hair,white background,",
556
  "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- git+https://github.com/R3gm/stablepy.git@47c19f5 # -b refactor_sampler_fix
2
  torch==2.2.0
3
  numpy<2
4
  gdown
 
1
+ stablepy==0.6.0
2
  torch==2.2.0
3
  numpy<2
4
  gdown