Skip to content

Commit 71c8224

Browse files
authored
Moving certain pipelines slow tests to nightly (#4469)
* move audioldm tests to nightly * move kandinsky im2img ddpm test to nightly * move flax dpm test to nightly * move diffedit dpm test to nightly * move fp16 slow tests to nightly
1 parent 4367b8a commit 71c8224

File tree

5 files changed

+114
-5
lines changed

5 files changed

+114
-5
lines changed

tests/pipelines/audioldm/test_audioldm.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
PNDMScheduler,
3737
UNet2DConditionModel,
3838
)
39-
from diffusers.utils import is_xformers_available, slow, torch_device
39+
from diffusers.utils import is_xformers_available, nightly, slow, torch_device
4040
from diffusers.utils.testing_utils import enable_full_determinism
4141

4242
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
@@ -408,6 +408,27 @@ def test_audioldm(self):
408408
max_diff = np.abs(expected_slice - audio_slice).max()
409409
assert max_diff < 1e-2
410410

411+
412+
@nightly
413+
class AudioLDMPipelineNightlyTests(unittest.TestCase):
414+
def tearDown(self):
415+
super().tearDown()
416+
gc.collect()
417+
torch.cuda.empty_cache()
418+
419+
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
420+
generator = torch.Generator(device=generator_device).manual_seed(seed)
421+
latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16))
422+
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
423+
inputs = {
424+
"prompt": "A hammer hitting a wooden surface",
425+
"latents": latents,
426+
"generator": generator,
427+
"num_inference_steps": 3,
428+
"guidance_scale": 2.5,
429+
}
430+
return inputs
431+
411432
def test_audioldm_lms(self):
412433
audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm")
413434
audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)

tests/pipelines/kandinsky/test_kandinsky_img2img.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
VQModel,
3232
)
3333
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
34-
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
34+
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
3535
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
3636

3737
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
@@ -344,6 +344,16 @@ def test_kandinsky_img2img(self):
344344

345345
assert_mean_pixel_difference(image, expected_image)
346346

347+
348+
@nightly
349+
@require_torch_gpu
350+
class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase):
351+
def tearDown(self):
352+
# clean up the VRAM after each test
353+
super().tearDown()
354+
gc.collect()
355+
torch.cuda.empty_cache()
356+
347357
def test_kandinsky_img2img_ddpm(self):
348358
expected_image = load_numpy(
349359
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"

tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
StableDiffusionDiffEditPipeline,
3333
UNet2DConditionModel,
3434
)
35-
from diffusers.utils import load_image, slow
35+
from diffusers.utils import load_image, nightly, slow
3636
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
3737

3838
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
@@ -347,6 +347,25 @@ def test_stable_diffusion_diffedit_full(self):
347347
)
348348
assert np.abs((expected_image - image).max()) < 5e-1
349349

350+
351+
@nightly
352+
@require_torch_gpu
353+
class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase):
354+
def tearDown(self):
355+
super().tearDown()
356+
gc.collect()
357+
torch.cuda.empty_cache()
358+
359+
@classmethod
360+
def setUpClass(cls):
361+
raw_image = load_image(
362+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png"
363+
)
364+
365+
raw_image = raw_image.convert("RGB").resize((768, 768))
366+
367+
cls.raw_image = raw_image
368+
350369
def test_stable_diffusion_diffedit_dpm(self):
351370
generator = torch.manual_seed(0)
352371

tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import unittest
1818

1919
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
20-
from diffusers.utils import is_flax_available, slow
20+
from diffusers.utils import is_flax_available, nightly, slow
2121
from diffusers.utils.testing_utils import require_flax
2222

2323

@@ -65,6 +65,15 @@ def test_stable_diffusion_flax(self):
6565
print(f"output_slice: {output_slice}")
6666
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
6767

68+
69+
@nightly
70+
@require_flax
71+
class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
72+
def tearDown(self):
73+
# clean up the VRAM after each test
74+
super().tearDown()
75+
gc.collect()
76+
6877
def test_stable_diffusion_dpm_flax(self):
6978
model_id = "stabilityai/stable-diffusion-2"
7079
scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")

tests/pipelines/unidiffuser/test_unidiffuser.py

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
UniDiffuserPipeline,
2121
UniDiffuserTextDecoder,
2222
)
23-
from diffusers.utils import floats_tensor, load_image, randn_tensor, slow, torch_device
23+
from diffusers.utils import floats_tensor, load_image, nightly, randn_tensor, slow, torch_device
2424
from diffusers.utils.testing_utils import require_torch_gpu
2525

2626
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
@@ -619,6 +619,56 @@ def test_unidiffuser_default_img2text_v1(self):
619619
expected_text_prefix = "An astronaut"
620620
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
621621

622+
623+
@nightly
624+
@require_torch_gpu
625+
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
626+
def tearDown(self):
627+
super().tearDown()
628+
gc.collect()
629+
torch.cuda.empty_cache()
630+
631+
def get_inputs(self, device, seed=0, generate_latents=False):
632+
generator = torch.manual_seed(seed)
633+
image = load_image(
634+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
635+
)
636+
inputs = {
637+
"prompt": "an elephant under the sea",
638+
"image": image,
639+
"generator": generator,
640+
"num_inference_steps": 3,
641+
"guidance_scale": 8.0,
642+
"output_type": "numpy",
643+
}
644+
if generate_latents:
645+
latents = self.get_fixed_latents(device, seed=seed)
646+
for latent_name, latent_tensor in latents.items():
647+
inputs[latent_name] = latent_tensor
648+
return inputs
649+
650+
def get_fixed_latents(self, device, seed=0):
651+
if type(device) == str:
652+
device = torch.device(device)
653+
latent_device = torch.device("cpu")
654+
generator = torch.Generator(device=latent_device).manual_seed(seed)
655+
# Hardcode the shapes for now.
656+
prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32)
657+
vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32)
658+
clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32)
659+
660+
# Move latents onto desired device.
661+
prompt_latents = prompt_latents.to(device)
662+
vae_latents = vae_latents.to(device)
663+
clip_latents = clip_latents.to(device)
664+
665+
latents = {
666+
"prompt_latents": prompt_latents,
667+
"vae_latents": vae_latents,
668+
"clip_latents": clip_latents,
669+
}
670+
return latents
671+
622672
def test_unidiffuser_default_joint_v1_fp16(self):
623673
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
624674
pipe.to(torch_device)

0 commit comments

Comments
 (0)