Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.__init__
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These lines ended up here by accident before, because we have a # Copied from on the whole class

def __init__(
self,
vae: AutoencoderKL,
Expand Down Expand Up @@ -148,7 +147,6 @@ def __init__(
feature_extractor=feature_extractor,
)

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.enable_attention_slicing
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
r"""
Enable sliced attention computation.
Expand All @@ -168,7 +166,6 @@ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto
slice_size = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(slice_size)

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.disable_attention_slicing
def disable_attention_slicing(self):
r"""
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
Expand All @@ -177,7 +174,6 @@ def disable_attention_slicing(self):
# set slice_size = `None` to disable `attention slicing`
self.enable_attention_slicing(None)

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
Expand All @@ -196,7 +192,6 @@ def enable_sequential_cpu_offload(self, gpu_id=0):
cpu_offload(cpu_offloaded_model, device)

@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline._execution_device
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
Expand All @@ -214,7 +209,6 @@ def _execution_device(self):
return torch.device(module._hf_hook.execution_device)
return self.device

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.enable_xformers_memory_efficient_attention
def enable_xformers_memory_efficient_attention(self):
r"""
Enable memory efficient attention as implemented in xformers.
Expand All @@ -227,14 +221,12 @@ def enable_xformers_memory_efficient_attention(self):
"""
self.unet.set_use_memory_efficient_attention_xformers(True)

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.disable_xformers_memory_efficient_attention
def disable_xformers_memory_efficient_attention(self):
r"""
Disable memory efficient attention as implemented in xformers.
"""
self.unet.set_use_memory_efficient_attention_xformers(False)

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline._encode_prompt
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
r"""
Encodes the prompt into text encoder hidden states.
Expand Down Expand Up @@ -340,7 +332,6 @@ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_fr

return text_embeddings

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.run_safety_checker
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
Expand All @@ -351,7 +342,6 @@ def run_safety_checker(self, image, device, dtype):
has_nsfw_concept = None
return image, has_nsfw_concept

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.decode_latents
def decode_latents(self, latents):
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
Expand All @@ -360,7 +350,6 @@ def decode_latents(self, latents):
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image

# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
Expand Down
4 changes: 4 additions & 0 deletions utils/check_copies.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,10 @@ def is_copy_consistent(filename, overwrite=False):
observed_code_lines = lines[start_index:line_index]
observed_code = "".join(observed_code_lines)

# Remove any nested `Copied from` comments to avoid circular copies
theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None]
theoretical_code = "\n".join(theoretical_code)

# Before comparing, use the `replace_pattern` on the original code.
if len(replace_pattern) > 0:
patterns = replace_pattern.replace("with", "").split(",")
Expand Down