Datasets:

ArXiv:
diffusers-bot commited on
Commit
f493ca6
1 Parent(s): 8eec2bd

Upload folder using huggingface_hub

Browse files
main/lpw_stable_diffusion_xl.py CHANGED
@@ -24,12 +24,7 @@ from diffusers import DiffusionPipeline, StableDiffusionXLPipeline
24
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
25
  from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
27
- from diffusers.models.attention_processor import (
28
- AttnProcessor2_0,
29
- LoRAAttnProcessor2_0,
30
- LoRAXFormersAttnProcessor,
31
- XFormersAttnProcessor,
32
- )
33
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
34
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
35
  from diffusers.schedulers import KarrasDiffusionSchedulers
@@ -1292,12 +1287,7 @@ class SDXLLongPromptWeightingPipeline(
1292
  self.vae.to(dtype=torch.float32)
1293
  use_torch_2_0_or_xformers = isinstance(
1294
  self.vae.decoder.mid_block.attentions[0].processor,
1295
- (
1296
- AttnProcessor2_0,
1297
- XFormersAttnProcessor,
1298
- LoRAXFormersAttnProcessor,
1299
- LoRAAttnProcessor2_0,
1300
- ),
1301
  )
1302
  # if xformers or torch_2_0 is used attention block does not need
1303
  # to be in float32 which can save lots of memory
 
24
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
25
  from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
27
+ from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
 
 
 
 
 
28
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
29
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
30
  from diffusers.schedulers import KarrasDiffusionSchedulers
 
1287
  self.vae.to(dtype=torch.float32)
1288
  use_torch_2_0_or_xformers = isinstance(
1289
  self.vae.decoder.mid_block.attentions[0].processor,
1290
+ (AttnProcessor2_0, XFormersAttnProcessor),
 
 
 
 
 
1291
  )
1292
  # if xformers or torch_2_0 is used attention block does not need
1293
  # to be in float32 which can save lots of memory
main/pipeline_demofusion_sdxl.py CHANGED
@@ -16,12 +16,7 @@ from diffusers.loaders import (
16
  TextualInversionLoaderMixin,
17
  )
18
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
19
- from diffusers.models.attention_processor import (
20
- AttnProcessor2_0,
21
- LoRAAttnProcessor2_0,
22
- LoRAXFormersAttnProcessor,
23
- XFormersAttnProcessor,
24
- )
25
  from diffusers.models.lora import adjust_lora_scale_text_encoder
26
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
27
  from diffusers.schedulers import KarrasDiffusionSchedulers
@@ -612,12 +607,7 @@ class DemoFusionSDXLPipeline(
612
  self.vae.to(dtype=torch.float32)
613
  use_torch_2_0_or_xformers = isinstance(
614
  self.vae.decoder.mid_block.attentions[0].processor,
615
- (
616
- AttnProcessor2_0,
617
- XFormersAttnProcessor,
618
- LoRAXFormersAttnProcessor,
619
- LoRAAttnProcessor2_0,
620
- ),
621
  )
622
  # if xformers or torch_2_0 is used attention block does not need
623
  # to be in float32 which can save lots of memory
 
16
  TextualInversionLoaderMixin,
17
  )
18
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
19
+ from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
 
 
 
 
 
20
  from diffusers.models.lora import adjust_lora_scale_text_encoder
21
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
22
  from diffusers.schedulers import KarrasDiffusionSchedulers
 
607
  self.vae.to(dtype=torch.float32)
608
  use_torch_2_0_or_xformers = isinstance(
609
  self.vae.decoder.mid_block.attentions[0].processor,
610
+ (AttnProcessor2_0, XFormersAttnProcessor),
 
 
 
 
 
611
  )
612
  # if xformers or torch_2_0 is used attention block does not need
613
  # to be in float32 which can save lots of memory
main/pipeline_sdxl_style_aligned.py CHANGED
@@ -46,8 +46,6 @@ from diffusers.models.attention_processor import (
46
  Attention,
47
  AttnProcessor2_0,
48
  FusedAttnProcessor2_0,
49
- LoRAAttnProcessor2_0,
50
- LoRAXFormersAttnProcessor,
51
  XFormersAttnProcessor,
52
  )
53
  from diffusers.models.lora import adjust_lora_scale_text_encoder
@@ -1153,8 +1151,6 @@ class StyleAlignedSDXLPipeline(
1153
  (
1154
  AttnProcessor2_0,
1155
  XFormersAttnProcessor,
1156
- LoRAXFormersAttnProcessor,
1157
- LoRAAttnProcessor2_0,
1158
  FusedAttnProcessor2_0,
1159
  ),
1160
  )
 
46
  Attention,
47
  AttnProcessor2_0,
48
  FusedAttnProcessor2_0,
 
 
49
  XFormersAttnProcessor,
50
  )
51
  from diffusers.models.lora import adjust_lora_scale_text_encoder
 
1151
  (
1152
  AttnProcessor2_0,
1153
  XFormersAttnProcessor,
 
 
1154
  FusedAttnProcessor2_0,
1155
  ),
1156
  )
main/pipeline_stable_diffusion_xl_controlnet_adapter.py CHANGED
@@ -25,12 +25,7 @@ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokeniz
25
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
  from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
  from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28
- from diffusers.models.attention_processor import (
29
- AttnProcessor2_0,
30
- LoRAAttnProcessor2_0,
31
- LoRAXFormersAttnProcessor,
32
- XFormersAttnProcessor,
33
- )
34
  from diffusers.models.lora import adjust_lora_scale_text_encoder
35
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
36
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
@@ -797,12 +792,7 @@ class StableDiffusionXLControlNetAdapterPipeline(
797
  self.vae.to(dtype=torch.float32)
798
  use_torch_2_0_or_xformers = isinstance(
799
  self.vae.decoder.mid_block.attentions[0].processor,
800
- (
801
- AttnProcessor2_0,
802
- XFormersAttnProcessor,
803
- LoRAXFormersAttnProcessor,
804
- LoRAAttnProcessor2_0,
805
- ),
806
  )
807
  # if xformers or torch_2_0 is used attention block does not need
808
  # to be in float32 which can save lots of memory
 
25
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
  from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
  from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28
+ from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
 
 
 
 
 
29
  from diffusers.models.lora import adjust_lora_scale_text_encoder
30
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
31
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
 
792
  self.vae.to(dtype=torch.float32)
793
  use_torch_2_0_or_xformers = isinstance(
794
  self.vae.decoder.mid_block.attentions[0].processor,
795
+ (AttnProcessor2_0, XFormersAttnProcessor),
 
 
 
 
 
796
  )
797
  # if xformers or torch_2_0 is used attention block does not need
798
  # to be in float32 which can save lots of memory
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py CHANGED
@@ -44,12 +44,7 @@ from diffusers.models import (
44
  T2IAdapter,
45
  UNet2DConditionModel,
46
  )
47
- from diffusers.models.attention_processor import (
48
- AttnProcessor2_0,
49
- LoRAAttnProcessor2_0,
50
- LoRAXFormersAttnProcessor,
51
- XFormersAttnProcessor,
52
- )
53
  from diffusers.models.lora import adjust_lora_scale_text_encoder
54
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
55
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
@@ -1135,12 +1130,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
1135
  self.vae.to(dtype=torch.float32)
1136
  use_torch_2_0_or_xformers = isinstance(
1137
  self.vae.decoder.mid_block.attentions[0].processor,
1138
- (
1139
- AttnProcessor2_0,
1140
- XFormersAttnProcessor,
1141
- LoRAXFormersAttnProcessor,
1142
- LoRAAttnProcessor2_0,
1143
- ),
1144
  )
1145
  # if xformers or torch_2_0 is used attention block does not need
1146
  # to be in float32 which can save lots of memory
 
44
  T2IAdapter,
45
  UNet2DConditionModel,
46
  )
47
+ from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
 
 
 
 
 
48
  from diffusers.models.lora import adjust_lora_scale_text_encoder
49
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
50
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
 
1130
  self.vae.to(dtype=torch.float32)
1131
  use_torch_2_0_or_xformers = isinstance(
1132
  self.vae.decoder.mid_block.attentions[0].processor,
1133
+ (AttnProcessor2_0, XFormersAttnProcessor),
 
 
 
 
 
1134
  )
1135
  # if xformers or torch_2_0 is used attention block does not need
1136
  # to be in float32 which can save lots of memory
main/pipeline_stable_diffusion_xl_differential_img2img.py CHANGED
@@ -37,8 +37,6 @@ from diffusers.loaders import (
37
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
38
  from diffusers.models.attention_processor import (
39
  AttnProcessor2_0,
40
- LoRAAttnProcessor2_0,
41
- LoRAXFormersAttnProcessor,
42
  XFormersAttnProcessor,
43
  )
44
  from diffusers.models.lora import adjust_lora_scale_text_encoder
@@ -854,8 +852,6 @@ class StableDiffusionXLDifferentialImg2ImgPipeline(
854
  (
855
  AttnProcessor2_0,
856
  XFormersAttnProcessor,
857
- LoRAXFormersAttnProcessor,
858
- LoRAAttnProcessor2_0,
859
  ),
860
  )
861
  # if xformers or torch_2_0 is used attention block does not need
 
37
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
38
  from diffusers.models.attention_processor import (
39
  AttnProcessor2_0,
 
 
40
  XFormersAttnProcessor,
41
  )
42
  from diffusers.models.lora import adjust_lora_scale_text_encoder
 
852
  (
853
  AttnProcessor2_0,
854
  XFormersAttnProcessor,
 
 
855
  ),
856
  )
857
  # if xformers or torch_2_0 is used attention block does not need
main/pipeline_stable_diffusion_xl_ipex.py CHANGED
@@ -34,8 +34,6 @@ from diffusers.loaders import (
34
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
35
  from diffusers.models.attention_processor import (
36
  AttnProcessor2_0,
37
- LoRAAttnProcessor2_0,
38
- LoRAXFormersAttnProcessor,
39
  XFormersAttnProcessor,
40
  )
41
  from diffusers.models.lora import adjust_lora_scale_text_encoder
@@ -662,8 +660,6 @@ class StableDiffusionXLPipelineIpex(
662
  (
663
  AttnProcessor2_0,
664
  XFormersAttnProcessor,
665
- LoRAXFormersAttnProcessor,
666
- LoRAAttnProcessor2_0,
667
  ),
668
  )
669
  # if xformers or torch_2_0 is used attention block does not need
 
34
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
35
  from diffusers.models.attention_processor import (
36
  AttnProcessor2_0,
 
 
37
  XFormersAttnProcessor,
38
  )
39
  from diffusers.models.lora import adjust_lora_scale_text_encoder
 
660
  (
661
  AttnProcessor2_0,
662
  XFormersAttnProcessor,
 
 
663
  ),
664
  )
665
  # if xformers or torch_2_0 is used attention block does not need