diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 7a4812e0961e..0b11c1f5bc5d 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -17,6 +17,9 @@ An attention processor is a class for applying different types of attention mech ## CustomDiffusionAttnProcessor [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor +## CustomDiffusionAttnProcessor2_0 +[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0 + ## AttnAddedKVProcessor [[autodoc]] models.attention_processor.AttnAddedKVProcessor @@ -39,4 +42,4 @@ An attention processor is a class for applying different types of attention mech [[autodoc]] models.attention_processor.SlicedAttnProcessor ## SlicedAttnAddedKVProcessor -[[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor \ No newline at end of file +[[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index ea226836932e..46e9b92ca91c 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -1468,7 +1468,8 @@ def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, a class CustomDiffusionAttnProcessor2_0(nn.Module): r""" - Processor for implementing attention for the Custom Diffusion method. + Processor for implementing attention for the Custom Diffusion method + using PyTorch 2.0’s memory-efficient scaled dot-product attention. Args: train_kv (`bool`, defaults to `True`): @@ -1758,6 +1759,7 @@ def __call__(self, attn: "Attention", hidden_states, encoder_hidden_states=None, LoRAAttnAddedKVProcessor, CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, + CustomDiffusionAttnProcessor2_0, ] LORA_ATTENTION_PROCESSORS = (