adamelliotfields commited on
Commit
1b15230
·
verified ·
1 Parent(s): ae4d37d

Remove custom pipelines

Browse files
Files changed (2) hide show
  1. lib/config.py +30 -57
  2. lib/pipelines.py +0 -241
lib/config.py CHANGED
@@ -1,35 +1,20 @@
1
- import os
2
- from importlib import import_module
3
- from importlib.util import find_spec
4
  from types import SimpleNamespace
5
  from warnings import filterwarnings
6
 
7
  from diffusers import (
8
- DDIMScheduler,
9
  DEISMultistepScheduler,
10
  DPMSolverMultistepScheduler,
11
  EulerAncestralDiscreteScheduler,
12
  EulerDiscreteScheduler,
13
- PNDMScheduler,
 
 
 
14
  UniPCMultistepScheduler,
15
  )
16
  from diffusers.utils import logging as diffusers_logging
17
  from transformers import logging as transformers_logging
18
 
19
- from .pipelines import (
20
- CustomStableDiffusionControlNetImg2ImgPipeline,
21
- CustomStableDiffusionControlNetPipeline,
22
- CustomStableDiffusionImg2ImgPipeline,
23
- CustomStableDiffusionPipeline,
24
- )
25
-
26
- # Improved GPU handling and progress bars; set before importing spaces
27
- os.environ["ZEROGPU_V2"] = "1"
28
-
29
- # Errors if enabled and not installed
30
- if find_spec("hf_transfer"):
31
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
32
-
33
  filterwarnings("ignore", category=FutureWarning, module="diffusers")
34
  filterwarnings("ignore", category=FutureWarning, module="transformers")
35
 
@@ -37,7 +22,7 @@ diffusers_logging.set_verbosity_error()
37
  transformers_logging.set_verbosity_error()
38
 
39
  # Standard Stable Diffusion 1.5 file structure
40
- sd_files = [
41
  "feature_extractor/preprocessor_config.json",
42
  "safety_checker/config.json",
43
  "scheduler/scheduler_config.json",
@@ -56,36 +41,29 @@ sd_files = [
56
 
57
  # Using namespace instead of dataclass for simplicity
58
  Config = SimpleNamespace(
59
- HF_TOKEN=os.environ.get("HF_TOKEN", None),
60
- ZERO_GPU=import_module("spaces").config.Config.zero_gpu,
61
- # TODO: fix model config redundancy
62
- HF_MODELS={
63
- # downloaded on startup
 
 
64
  "ai-forever/Real-ESRGAN": ["RealESRGAN_x2.pth", "RealESRGAN_x4.pth"],
65
  "cyberdelia/CyberRealistic": ["CyberRealistic_V5_FP16.safetensors"],
66
  "fluently/Fluently-v4": ["Fluently-v4.safetensors"],
 
 
 
 
 
67
  "lllyasviel/control_v11p_sd15_canny": ["diffusion_pytorch_model.fp16.safetensors"],
68
- "Lykon/dreamshaper-8": [*sd_files],
69
  "s6yx/ReV_Animated": ["rev_1.2.2/rev_1.2.2-fp16.safetensors"],
70
  "SG161222/Realistic_Vision_V5.1_noVAE": ["Realistic_Vision_V5.1_fp16-no-ema.safetensors"],
71
- "stable-diffusion-v1-5/stable-diffusion-v1-5": [*sd_files],
72
  "XpucT/Deliberate": ["Deliberate_v6.safetensors"],
73
  "XpucT/Reliberate": ["Reliberate_v3.safetensors"],
74
  },
75
- MONO_FONTS=["monospace"],
76
- SANS_FONTS=[
77
- "sans-serif",
78
- "Apple Color Emoji",
79
- "Segoe UI Emoji",
80
- "Segoe UI Symbol",
81
- "Noto Color Emoji",
82
- ],
83
- PIPELINES={
84
- "txt2img": CustomStableDiffusionPipeline,
85
- "img2img": CustomStableDiffusionImg2ImgPipeline,
86
- "controlnet_txt2img": CustomStableDiffusionControlNetPipeline,
87
- "controlnet_img2img": CustomStableDiffusionControlNetImg2ImgPipeline,
88
- },
89
  MODEL="XpucT/Reliberate",
90
  MODELS=[
91
  "cyberdelia/CyberRealistic",
@@ -97,25 +75,21 @@ Config = SimpleNamespace(
97
  "XpucT/Deliberate",
98
  "XpucT/Reliberate",
99
  ],
100
- # Single-file model weights
101
- MODEL_CHECKPOINTS={
102
- # keep keys lowercase for case-insensitive matching in the loader
103
- "cyberdelia/cyberrealistic": "CyberRealistic_V5_FP16.safetensors",
104
- "fluently/fluently-v4": "Fluently-v4.safetensors",
105
- "s6yx/rev_animated": "rev_1.2.2/rev_1.2.2-fp16.safetensors",
106
- "sg161222/realistic_vision_v5.1_novae": "Realistic_Vision_V5.1_fp16-no-ema.safetensors",
107
- "xpuct/deliberate": "Deliberate_v6.safetensors",
108
- "xpuct/reliberate": "Reliberate_v3.safetensors",
109
- },
110
- SCHEDULER="UniPC 2M",
111
  SCHEDULERS={
112
- "DDIM": DDIMScheduler,
113
- "DEIS 2M": DEISMultistepScheduler,
114
  "DPM++ 2M": DPMSolverMultistepScheduler,
115
  "Euler": EulerDiscreteScheduler,
116
  "Euler a": EulerAncestralDiscreteScheduler,
117
- "PNDM": PNDMScheduler,
118
- "UniPC 2M": UniPCMultistepScheduler,
119
  },
120
  ANNOTATOR="canny",
121
  ANNOTATORS={
@@ -124,7 +98,6 @@ Config = SimpleNamespace(
124
  WIDTH=512,
125
  HEIGHT=512,
126
  NUM_IMAGES=1,
127
- SEED=-1,
128
  GUIDANCE_SCALE=6,
129
  INFERENCE_STEPS=40,
130
  DENOISING_STRENGTH=0.8,
 
 
 
 
1
  from types import SimpleNamespace
2
  from warnings import filterwarnings
3
 
4
  from diffusers import (
 
5
  DEISMultistepScheduler,
6
  DPMSolverMultistepScheduler,
7
  EulerAncestralDiscreteScheduler,
8
  EulerDiscreteScheduler,
9
+ StableDiffusionControlNetImg2ImgPipeline,
10
+ StableDiffusionControlNetPipeline,
11
+ StableDiffusionImg2ImgPipeline,
12
+ StableDiffusionPipeline,
13
  UniPCMultistepScheduler,
14
  )
15
  from diffusers.utils import logging as diffusers_logging
16
  from transformers import logging as transformers_logging
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  filterwarnings("ignore", category=FutureWarning, module="diffusers")
19
  filterwarnings("ignore", category=FutureWarning, module="transformers")
20
 
 
22
  transformers_logging.set_verbosity_error()
23
 
24
  # Standard Stable Diffusion 1.5 file structure
25
+ _sd_files = [
26
  "feature_extractor/preprocessor_config.json",
27
  "safety_checker/config.json",
28
  "scheduler/scheduler_config.json",
 
41
 
42
  # Using namespace instead of dataclass for simplicity
43
  Config = SimpleNamespace(
44
+ PIPELINES={
45
+ "txt2img": StableDiffusionPipeline,
46
+ "img2img": StableDiffusionImg2ImgPipeline,
47
+ "controlnet_txt2img": StableDiffusionControlNetPipeline,
48
+ "controlnet_img2img": StableDiffusionControlNetImg2ImgPipeline,
49
+ },
50
+ HF_REPOS={
51
  "ai-forever/Real-ESRGAN": ["RealESRGAN_x2.pth", "RealESRGAN_x4.pth"],
52
  "cyberdelia/CyberRealistic": ["CyberRealistic_V5_FP16.safetensors"],
53
  "fluently/Fluently-v4": ["Fluently-v4.safetensors"],
54
+ "h94/IP-Adapter": [
55
+ "models/ip-adapter-full-face_sd15.safetensors",
56
+ "models/ip-adapter-plus_sd15.safetensors",
57
+ "models/image_encoder/model.safetensors",
58
+ ],
59
  "lllyasviel/control_v11p_sd15_canny": ["diffusion_pytorch_model.fp16.safetensors"],
60
+ "Lykon/dreamshaper-8": _sd_files,
61
  "s6yx/ReV_Animated": ["rev_1.2.2/rev_1.2.2-fp16.safetensors"],
62
  "SG161222/Realistic_Vision_V5.1_noVAE": ["Realistic_Vision_V5.1_fp16-no-ema.safetensors"],
63
+ "stable-diffusion-v1-5/stable-diffusion-v1-5": _sd_files,
64
  "XpucT/Deliberate": ["Deliberate_v6.safetensors"],
65
  "XpucT/Reliberate": ["Reliberate_v3.safetensors"],
66
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  MODEL="XpucT/Reliberate",
68
  MODELS=[
69
  "cyberdelia/CyberRealistic",
 
75
  "XpucT/Deliberate",
76
  "XpucT/Reliberate",
77
  ],
78
+ SINGLE_FILE_MODELS=[
79
+ "cyberdelia/CyberRealistic",
80
+ "fluently/Fluently-v4",
81
+ "s6yx/ReV_Animated",
82
+ "SG161222/Realistic_Vision_V5.1_noVAE",
83
+ "XpucT/Deliberate",
84
+ "XpucT/Reliberate",
85
+ ],
86
+ SCHEDULER="UniPC",
 
 
87
  SCHEDULERS={
88
+ "DEIS": DEISMultistepScheduler,
 
89
  "DPM++ 2M": DPMSolverMultistepScheduler,
90
  "Euler": EulerDiscreteScheduler,
91
  "Euler a": EulerAncestralDiscreteScheduler,
92
+ "UniPC": UniPCMultistepScheduler,
 
93
  },
94
  ANNOTATOR="canny",
95
  ANNOTATORS={
 
98
  WIDTH=512,
99
  HEIGHT=512,
100
  NUM_IMAGES=1,
 
101
  GUIDANCE_SCALE=6,
102
  INFERENCE_STEPS=40,
103
  DENOISING_STRENGTH=0.8,
lib/pipelines.py DELETED
@@ -1,241 +0,0 @@
1
- import os
2
- from importlib import import_module
3
-
4
- from diffusers import (
5
- StableDiffusionControlNetImg2ImgPipeline,
6
- StableDiffusionControlNetPipeline,
7
- StableDiffusionImg2ImgPipeline,
8
- StableDiffusionPipeline,
9
- )
10
- from diffusers.loaders.single_file import (
11
- SINGLE_FILE_OPTIONAL_COMPONENTS,
12
- load_single_file_sub_model,
13
- )
14
- from diffusers.loaders.single_file_utils import fetch_diffusers_config, load_single_file_checkpoint
15
- from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT
16
- from diffusers.pipelines.pipeline_loading_utils import (
17
- ALL_IMPORTABLE_CLASSES,
18
- _get_pipeline_class,
19
- load_sub_model,
20
- )
21
- from diffusers.utils import logging
22
- from huggingface_hub import snapshot_download
23
- from huggingface_hub.utils import validate_hf_hub_args
24
-
25
-
26
- class CustomDiffusionMixin:
27
- r"""
28
- Overrides DiffusionPipeline `from_pretrained` and `from_single_file` methods to allow passing a progress function.
29
- """
30
-
31
- # Copied from https://github.com/huggingface/diffusers/blob/v0.30.3/src/diffusers/pipelines/pipeline_utils.py#L480
32
- @classmethod
33
- @validate_hf_hub_args
34
- def from_pretrained(cls, pretrained_model_name_or_path, progress=None, **kwargs):
35
- torch_dtype = kwargs.pop("torch_dtype", None)
36
- variant = kwargs.pop("variant", None)
37
- token = kwargs.pop("token", None)
38
-
39
- # download the checkpoints and configs
40
- cached_folder = cls.download(
41
- pretrained_model_name_or_path,
42
- variant=variant,
43
- token=token,
44
- **kwargs,
45
- )
46
-
47
- # pop out "_ignore_files" as it is only needed for download
48
- config_dict = cls.load_config(cached_folder)
49
- config_dict.pop("_ignore_files", None)
50
-
51
- # Define which model components should load variants.
52
- # We retrieve the information by matching whether variant model checkpoints exist in the subfolders.
53
- # Example: `diffusion_pytorch_model.safetensors` -> `diffusion_pytorch_model.fp16.safetensors` with variant being `"fp16"`.
54
- model_variants = {}
55
- if variant is not None:
56
- for folder in os.listdir(cached_folder):
57
- folder_path = os.path.join(cached_folder, folder)
58
- is_folder = os.path.isdir(folder_path) and folder in config_dict
59
- variant_exists = is_folder and any(
60
- p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)
61
- )
62
- if variant_exists:
63
- model_variants[folder] = variant
64
-
65
- # load the pipeline class
66
- pipeline_class = _get_pipeline_class(cls, config=config_dict)
67
-
68
- # define expected modules given pipeline signature and define non-None initialized modules (=`init_kwargs`)
69
- expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)
70
- passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
71
- passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
72
-
73
- def load_module(name, value):
74
- if value[0] is None:
75
- return False
76
- if name in passed_class_obj and passed_class_obj[name] is None:
77
- return False
78
- return True
79
-
80
- init_dict, _, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
81
- init_kwargs = {
82
- k: init_dict.pop(k)
83
- for k in optional_kwargs
84
- if k in init_dict and k not in pipeline_class._optional_components
85
- }
86
- init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
87
- init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}
88
-
89
- # load each module in the pipeline
90
- pipelines = import_module("diffusers.pipelines")
91
- tqdm = logging.tqdm if progress is None else progress.tqdm
92
- for name, (library_name, class_name) in tqdm(
93
- sorted(init_dict.items()),
94
- desc="Loading pipeline components",
95
- ):
96
- # use passed sub model or load class_name from library_name
97
- loaded_sub_model = None
98
- if name in passed_class_obj:
99
- # passed as an argument like "scheduler"
100
- loaded_sub_model = passed_class_obj[name]
101
- else:
102
- loaded_sub_model = load_sub_model(
103
- library_name=library_name,
104
- class_name=class_name,
105
- importable_classes=ALL_IMPORTABLE_CLASSES,
106
- pipelines=pipelines,
107
- is_pipeline_module=hasattr(pipelines, library_name),
108
- pipeline_class=pipeline_class,
109
- torch_dtype=torch_dtype,
110
- provider=None,
111
- sess_options=None,
112
- device_map=None,
113
- max_memory=None,
114
- offload_folder=None,
115
- offload_state_dict=False,
116
- model_variants=model_variants,
117
- name=name,
118
- from_flax=False,
119
- variant=variant,
120
- low_cpu_mem_usage=_LOW_CPU_MEM_USAGE_DEFAULT,
121
- cached_folder=cached_folder,
122
- )
123
- init_kwargs[name] = loaded_sub_model
124
-
125
- # potentially add passed objects if expected
126
- missing_modules = set(expected_modules) - set(init_kwargs.keys())
127
- if len(missing_modules) > 0:
128
- for module in missing_modules:
129
- init_kwargs[module] = passed_class_obj.get(module, None)
130
-
131
- # instantiate the pipeline
132
- model = pipeline_class(**init_kwargs)
133
-
134
- # save where the model was instantiated from
135
- model.register_to_config(_name_or_path=pretrained_model_name_or_path)
136
- return model
137
-
138
- # Copied from https://github.com/huggingface/diffusers/blob/v0.30.3/src/diffusers/loaders/single_file.py#L270
139
- @classmethod
140
- @validate_hf_hub_args
141
- def from_single_file(cls, pretrained_model_link_or_path, progress=None, **kwargs):
142
- token = kwargs.pop("token", None)
143
- torch_dtype = kwargs.pop("torch_dtype", None)
144
-
145
- # load the pipeline class
146
- pipeline_class = _get_pipeline_class(cls, config=None)
147
- checkpoint = load_single_file_checkpoint(pretrained_model_link_or_path, token=token)
148
-
149
- config = fetch_diffusers_config(checkpoint)
150
- default_pretrained_model_config_name = config["pretrained_model_name_or_path"]
151
-
152
- # attempt to download the config files for the pipeline
153
- cached_model_config_path = snapshot_download(
154
- default_pretrained_model_config_name,
155
- token=token,
156
- allow_patterns=["**/*.json", "*.json", "*.txt", "**/*.txt", "**/*.model"],
157
- )
158
-
159
- # pop out "_ignore_files" as it is only needed for download
160
- config_dict = pipeline_class.load_config(cached_model_config_path)
161
- config_dict.pop("_ignore_files", None)
162
-
163
- # define expected modules given pipeline signature and define non-None initialized modules (=`init_kwargs`)
164
- expected_modules, optional_kwargs = pipeline_class._get_signature_keys(cls)
165
- passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
166
- passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
167
-
168
- def load_module(name, value):
169
- if value[0] is None:
170
- return False
171
- if name in passed_class_obj and passed_class_obj[name] is None:
172
- return False
173
- if name in SINGLE_FILE_OPTIONAL_COMPONENTS:
174
- return False
175
- return True
176
-
177
- init_dict, _, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
178
- init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict}
179
- init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
180
- init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}
181
-
182
- # load each module in the pipeline
183
- pipelines = import_module("diffusers.pipelines")
184
- tqdm = logging.tqdm if progress is None else progress.tqdm
185
- for name, (library_name, class_name) in tqdm(
186
- sorted(init_dict.items()),
187
- desc="Loading pipeline components",
188
- ):
189
- # use passed sub model or load class_name from library_name
190
- loaded_sub_model = None
191
- if name in passed_class_obj:
192
- # passed as an argument like "scheduler"
193
- loaded_sub_model = passed_class_obj[name]
194
- else:
195
- loaded_sub_model = load_single_file_sub_model(
196
- library_name=library_name,
197
- class_name=class_name,
198
- name=name,
199
- checkpoint=checkpoint,
200
- is_pipeline_module=hasattr(pipelines, library_name),
201
- cached_model_config_path=cached_model_config_path,
202
- pipelines=pipelines,
203
- torch_dtype=torch_dtype,
204
- **kwargs,
205
- )
206
- init_kwargs[name] = loaded_sub_model
207
-
208
- # potentially add passed objects if expected
209
- missing_modules = set(expected_modules) - set(init_kwargs.keys())
210
- if len(missing_modules) > 0:
211
- for module in missing_modules:
212
- init_kwargs[module] = passed_class_obj.get(module, None)
213
-
214
- # instantiate the pipeline
215
- pipe = pipeline_class(**init_kwargs)
216
-
217
- # save where the model was instantiated from
218
- pipe.register_to_config(_name_or_path=pretrained_model_link_or_path)
219
- return pipe
220
-
221
-
222
- class CustomStableDiffusionPipeline(CustomDiffusionMixin, StableDiffusionPipeline):
223
- pass
224
-
225
-
226
- class CustomStableDiffusionImg2ImgPipeline(CustomDiffusionMixin, StableDiffusionImg2ImgPipeline):
227
- pass
228
-
229
-
230
- class CustomStableDiffusionControlNetPipeline(
231
- CustomDiffusionMixin,
232
- StableDiffusionControlNetPipeline,
233
- ):
234
- pass
235
-
236
-
237
- class CustomStableDiffusionControlNetImg2ImgPipeline(
238
- CustomDiffusionMixin,
239
- StableDiffusionControlNetImg2ImgPipeline,
240
- ):
241
- pass