diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index 35996d65072c..81118967aade 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -32,7 +32,19 @@ from .configuration_utils import ConfigMixin from .dynamic_modules_utils import get_class_from_dynamic_module from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME -from .utils import CONFIG_NAME, DIFFUSERS_CACHE, ONNX_WEIGHTS_NAME, WEIGHTS_NAME, BaseOutput, logging +from .utils import ( + CONFIG_NAME, + DIFFUSERS_CACHE, + ONNX_WEIGHTS_NAME, + WEIGHTS_NAME, + BaseOutput, + is_transformers_available, + logging, +) + + +if is_transformers_available(): + from transformers import PreTrainedModel INDEX_FILE = "diffusion_pytorch_model.bin" @@ -338,6 +350,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P custom_pipeline = kwargs.pop("custom_pipeline", None) provider = kwargs.pop("provider", None) sess_options = kwargs.pop("sess_options", None) + device_map = kwargs.pop("device_map", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained @@ -463,6 +476,13 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P loading_kwargs["provider"] = provider loading_kwargs["sess_options"] = sess_options + if ( + issubclass(class_obj, diffusers.ModelMixin) + or is_transformers_available() + and issubclass(class_obj, PreTrainedModel) + ): + loading_kwargs["device_map"] = device_map + # check if the module is in a subdirectory if os.path.isdir(os.path.join(cached_folder, name)): loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) diff --git a/src/diffusers/pipelines/stable_diffusion/safety_checker.py b/src/diffusers/pipelines/stable_diffusion/safety_checker.py index 773a7d4b2107..3984171f57db 100644 --- a/src/diffusers/pipelines/stable_diffusion/safety_checker.py +++ b/src/diffusers/pipelines/stable_diffusion/safety_checker.py @@ -19,6 +19,8 @@ def cosine_distance(image_embeds, text_embeds): class StableDiffusionSafetyChecker(PreTrainedModel): config_class = CLIPConfig + _no_split_modules = ["CLIPEncoderLayer"] + def __init__(self, config: CLIPConfig): super().__init__(config) @@ -28,8 +30,8 @@ def __init__(self, config: CLIPConfig): self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) - self.register_buffer("concept_embeds_weights", torch.ones(17)) - self.register_buffer("special_care_embeds_weights", torch.ones(3)) + self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) + self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) @torch.no_grad() def forward(self, clip_input, images): diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 4bf1e5e47c8e..30beb033fca7 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -17,12 +17,15 @@ import os import random import tempfile +import tracemalloc import unittest import numpy as np import torch +import accelerate import PIL +import transformers from diffusers import ( AutoencoderKL, DDIMPipeline, @@ -50,6 +53,7 @@ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, WEIGHTS_NAME, floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import get_tests_dir +from packaging import version from PIL import Image from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -2034,3 +2038,53 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: pipe(prompt=prompt, num_inference_steps=5, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1) assert test_callback_fn.has_been_called assert number_of_steps == 6 + + @slow + @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU") + def test_stable_diffusion_accelerate_load_works(self): + if version.parse(version.parse(transformers.__version__).base_version) < version.parse("4.23"): + return + + if version.parse(version.parse(accelerate.__version__).base_version) < version.parse("0.14"): + return + + model_id = "CompVis/stable-diffusion-v1-4" + _ = StableDiffusionPipeline.from_pretrained( + model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True, device_map="auto" + ).to(torch_device) + + @slow + @unittest.skipIf(torch_device == "cpu", "This test is supposed to run on GPU") + def test_stable_diffusion_accelerate_load_reduces_memory_footprint(self): + if version.parse(version.parse(transformers.__version__).base_version) < version.parse("4.23"): + return + + if version.parse(version.parse(accelerate.__version__).base_version) < version.parse("0.14"): + return + + pipeline_id = "CompVis/stable-diffusion-v1-4" + + torch.cuda.empty_cache() + gc.collect() + + tracemalloc.start() + pipeline_normal_load = StableDiffusionPipeline.from_pretrained( + pipeline_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True + ) + pipeline_normal_load.to(torch_device) + _, peak_normal = tracemalloc.get_traced_memory() + tracemalloc.stop() + + del pipeline_normal_load + torch.cuda.empty_cache() + gc.collect() + + tracemalloc.start() + _ = StableDiffusionPipeline.from_pretrained( + pipeline_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True, device_map="auto" + ) + _, peak_accelerate = tracemalloc.get_traced_memory() + + tracemalloc.stop() + + assert peak_accelerate < peak_normal