Skip to content

Commit 8eb1731

Browse files
authored
[LoRA] get rid of the legacy lora remnants and make our codebase lighter (#8623)
* get rid of the legacy lora remnants and make our codebase lighter * fix depcrecated lora argument * fix * empty commit to trigger ci * remove print * empty
1 parent c71c19c commit 8eb1731

File tree

43 files changed

+34
-578
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+34
-578
lines changed

docs/source/en/api/attnprocessor.md

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,12 +41,6 @@ An attention processor is a class for applying different types of attention mech
4141
## FusedAttnProcessor2_0
4242
[[autodoc]] models.attention_processor.FusedAttnProcessor2_0
4343

44-
## LoRAAttnAddedKVProcessor
45-
[[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor
46-
47-
## LoRAXFormersAttnProcessor
48-
[[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor
49-
5044
## SlicedAttnProcessor
5145
[[autodoc]] models.attention_processor.SlicedAttnProcessor
5246

examples/community/lpw_stable_diffusion_xl.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,7 @@
2424
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
2525
from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
2626
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
27-
from diffusers.models.attention_processor import (
28-
AttnProcessor2_0,
29-
LoRAAttnProcessor2_0,
30-
LoRAXFormersAttnProcessor,
31-
XFormersAttnProcessor,
32-
)
27+
from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
3328
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
3429
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
3530
from diffusers.schedulers import KarrasDiffusionSchedulers
@@ -1292,12 +1287,7 @@ def upcast_vae(self):
12921287
self.vae.to(dtype=torch.float32)
12931288
use_torch_2_0_or_xformers = isinstance(
12941289
self.vae.decoder.mid_block.attentions[0].processor,
1295-
(
1296-
AttnProcessor2_0,
1297-
XFormersAttnProcessor,
1298-
LoRAXFormersAttnProcessor,
1299-
LoRAAttnProcessor2_0,
1300-
),
1290+
(AttnProcessor2_0, XFormersAttnProcessor),
13011291
)
13021292
# if xformers or torch_2_0 is used attention block does not need
13031293
# to be in float32 which can save lots of memory

examples/community/pipeline_demofusion_sdxl.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,7 @@
1616
TextualInversionLoaderMixin,
1717
)
1818
from diffusers.models import AutoencoderKL, UNet2DConditionModel
19-
from diffusers.models.attention_processor import (
20-
AttnProcessor2_0,
21-
LoRAAttnProcessor2_0,
22-
LoRAXFormersAttnProcessor,
23-
XFormersAttnProcessor,
24-
)
19+
from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
2520
from diffusers.models.lora import adjust_lora_scale_text_encoder
2621
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
2722
from diffusers.schedulers import KarrasDiffusionSchedulers
@@ -612,12 +607,7 @@ def upcast_vae(self):
612607
self.vae.to(dtype=torch.float32)
613608
use_torch_2_0_or_xformers = isinstance(
614609
self.vae.decoder.mid_block.attentions[0].processor,
615-
(
616-
AttnProcessor2_0,
617-
XFormersAttnProcessor,
618-
LoRAXFormersAttnProcessor,
619-
LoRAAttnProcessor2_0,
620-
),
610+
(AttnProcessor2_0, XFormersAttnProcessor),
621611
)
622612
# if xformers or torch_2_0 is used attention block does not need
623613
# to be in float32 which can save lots of memory

examples/community/pipeline_sdxl_style_aligned.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@
4646
Attention,
4747
AttnProcessor2_0,
4848
FusedAttnProcessor2_0,
49-
LoRAAttnProcessor2_0,
50-
LoRAXFormersAttnProcessor,
5149
XFormersAttnProcessor,
5250
)
5351
from diffusers.models.lora import adjust_lora_scale_text_encoder
@@ -1153,8 +1151,6 @@ def upcast_vae(self):
11531151
(
11541152
AttnProcessor2_0,
11551153
XFormersAttnProcessor,
1156-
LoRAXFormersAttnProcessor,
1157-
LoRAAttnProcessor2_0,
11581154
FusedAttnProcessor2_0,
11591155
),
11601156
)

examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,7 @@
2525
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
2626
from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
2727
from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28-
from diffusers.models.attention_processor import (
29-
AttnProcessor2_0,
30-
LoRAAttnProcessor2_0,
31-
LoRAXFormersAttnProcessor,
32-
XFormersAttnProcessor,
33-
)
28+
from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
3429
from diffusers.models.lora import adjust_lora_scale_text_encoder
3530
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
3631
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
@@ -797,12 +792,7 @@ def upcast_vae(self):
797792
self.vae.to(dtype=torch.float32)
798793
use_torch_2_0_or_xformers = isinstance(
799794
self.vae.decoder.mid_block.attentions[0].processor,
800-
(
801-
AttnProcessor2_0,
802-
XFormersAttnProcessor,
803-
LoRAXFormersAttnProcessor,
804-
LoRAAttnProcessor2_0,
805-
),
795+
(AttnProcessor2_0, XFormersAttnProcessor),
806796
)
807797
# if xformers or torch_2_0 is used attention block does not need
808798
# to be in float32 which can save lots of memory

examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,7 @@
4444
T2IAdapter,
4545
UNet2DConditionModel,
4646
)
47-
from diffusers.models.attention_processor import (
48-
AttnProcessor2_0,
49-
LoRAAttnProcessor2_0,
50-
LoRAXFormersAttnProcessor,
51-
XFormersAttnProcessor,
52-
)
47+
from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
5348
from diffusers.models.lora import adjust_lora_scale_text_encoder
5449
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
5550
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
@@ -1135,12 +1130,7 @@ def upcast_vae(self):
11351130
self.vae.to(dtype=torch.float32)
11361131
use_torch_2_0_or_xformers = isinstance(
11371132
self.vae.decoder.mid_block.attentions[0].processor,
1138-
(
1139-
AttnProcessor2_0,
1140-
XFormersAttnProcessor,
1141-
LoRAXFormersAttnProcessor,
1142-
LoRAAttnProcessor2_0,
1143-
),
1133+
(AttnProcessor2_0, XFormersAttnProcessor),
11441134
)
11451135
# if xformers or torch_2_0 is used attention block does not need
11461136
# to be in float32 which can save lots of memory

examples/community/pipeline_stable_diffusion_xl_differential_img2img.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,6 @@
3737
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
3838
from diffusers.models.attention_processor import (
3939
AttnProcessor2_0,
40-
LoRAAttnProcessor2_0,
41-
LoRAXFormersAttnProcessor,
4240
XFormersAttnProcessor,
4341
)
4442
from diffusers.models.lora import adjust_lora_scale_text_encoder
@@ -854,8 +852,6 @@ def upcast_vae(self):
854852
(
855853
AttnProcessor2_0,
856854
XFormersAttnProcessor,
857-
LoRAXFormersAttnProcessor,
858-
LoRAAttnProcessor2_0,
859855
),
860856
)
861857
# if xformers or torch_2_0 is used attention block does not need

examples/community/pipeline_stable_diffusion_xl_ipex.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,6 @@
3434
from diffusers.models import AutoencoderKL, UNet2DConditionModel
3535
from diffusers.models.attention_processor import (
3636
AttnProcessor2_0,
37-
LoRAAttnProcessor2_0,
38-
LoRAXFormersAttnProcessor,
3937
XFormersAttnProcessor,
4038
)
4139
from diffusers.models.lora import adjust_lora_scale_text_encoder
@@ -662,8 +660,6 @@ def upcast_vae(self):
662660
(
663661
AttnProcessor2_0,
664662
XFormersAttnProcessor,
665-
LoRAXFormersAttnProcessor,
666-
LoRAAttnProcessor2_0,
667663
),
668664
)
669665
# if xformers or torch_2_0 is used attention block does not need

0 commit comments

Comments
 (0)