From 7b782bc96b0939bbdc809b8b88f40d3fdd284fa5 Mon Sep 17 00:00:00 2001 From: Ruoxi Date: Wed, 16 Aug 2023 09:07:25 +0800 Subject: [PATCH 1/2] Update attnprocessor.md --- docs/source/en/api/attnprocessor.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 7a4812e0961e..0b11c1f5bc5d 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -17,6 +17,9 @@ An attention processor is a class for applying different types of attention mech ## CustomDiffusionAttnProcessor [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor +## CustomDiffusionAttnProcessor2_0 +[[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0 + ## AttnAddedKVProcessor [[autodoc]] models.attention_processor.AttnAddedKVProcessor @@ -39,4 +42,4 @@ An attention processor is a class for applying different types of attention mech [[autodoc]] models.attention_processor.SlicedAttnProcessor ## SlicedAttnAddedKVProcessor -[[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor \ No newline at end of file +[[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor From 32ece777434ba9b5674228ddab731e55efe5c347 Mon Sep 17 00:00:00 2001 From: Ruoxi Date: Wed, 16 Aug 2023 09:10:59 +0800 Subject: [PATCH 2/2] Update attention_processor.py --- src/diffusers/models/attention_processor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index ea226836932e..46e9b92ca91c 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -1468,7 +1468,8 @@ def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, a class CustomDiffusionAttnProcessor2_0(nn.Module): r""" - Processor for implementing attention for the Custom Diffusion method. + Processor for implementing attention for the Custom Diffusion method + using PyTorch 2.0’s memory-efficient scaled dot-product attention. Args: train_kv (`bool`, defaults to `True`): @@ -1758,6 +1759,7 @@ def __call__(self, attn: "Attention", hidden_states, encoder_hidden_states=None, LoRAAttnAddedKVProcessor, CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, + CustomDiffusionAttnProcessor2_0, ] LORA_ATTENTION_PROCESSORS = (