Skip to content

Commit 2952406

Browse files
committed
Add draft tests for DiffEdit
1 parent 2cf2fa9 commit 2952406

File tree

1 file changed

+85
-0
lines changed

1 file changed

+85
-0
lines changed

tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
AutoencoderKL,
2828
DDIMInverseScheduler,
2929
DDIMScheduler,
30+
DPMSolverMultistepInverseScheduler,
31+
DPMSolverMultistepScheduler,
3032
StableDiffusionDiffEditPipeline,
3133
UNet2DConditionModel,
3234
)
@@ -74,6 +76,13 @@ def get_dummy_components(self):
7476
clip_sample=False,
7577
set_alpha_to_zero=False,
7678
)
79+
inverse_scheduler = DPMSolverMultistepInverseScheduler(
80+
beta_start=0.00085,
81+
beta_end=0.012,
82+
beta_schedule="scaled_linear",
83+
clip_sample=False,
84+
set_alpha_to_zero=False,
85+
)
7786
torch.manual_seed(0)
7887
vae = AutoencoderKL(
7988
block_out_channels=[32, 64],
@@ -249,6 +258,31 @@ def test_inversion(self):
249258
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
250259
self.assertLessEqual(max_diff, 1e-3)
251260

261+
def test_inversion_dpm(self):
262+
device = "cpu"
263+
264+
components = self.get_dummy_components()
265+
components["scheduler"] = DPMSolverMultistepInverseScheduler(
266+
beta_start=0.00085,
267+
beta_end=0.012,
268+
beta_schedule="scaled_linear",
269+
clip_sample=False,
270+
)
271+
pipe = self.pipeline_class(**components)
272+
pipe.to(device)
273+
pipe.set_progress_bar_config(disable=None)
274+
275+
inputs = self.get_dummy_inversion_inputs(device)
276+
image = pipe.invert(**inputs).images
277+
image_slice = image[0, -1, -3:, -3:]
278+
279+
self.assertEqual(image.shape, (2, 32, 32, 3))
280+
expected_slice = np.array(
281+
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],
282+
)
283+
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
284+
self.assertLessEqual(max_diff, 1e-3)
285+
252286

253287
@require_torch_gpu
254288
@slow
@@ -313,3 +347,54 @@ def test_stable_diffusion_diffedit_full(self):
313347
/ 255
314348
)
315349
assert np.abs((expected_image - image).max()) < 5e-1
350+
351+
def test_stable_diffusion_diffedit_dpm(self):
352+
generator = torch.manual_seed(0)
353+
354+
pipe = StableDiffusionDiffEditPipeline.from_pretrained(
355+
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
356+
)
357+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
358+
pipe.inverse_scheduler = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
359+
pipe.enable_model_cpu_offload()
360+
pipe.set_progress_bar_config(disable=None)
361+
362+
source_prompt = "a bowl of fruit"
363+
target_prompt = "a bowl of pears"
364+
365+
mask_image = pipe.generate_mask(
366+
image=self.raw_image,
367+
source_prompt=source_prompt,
368+
target_prompt=target_prompt,
369+
generator=generator,
370+
)
371+
372+
inv_latents = pipe.invert(
373+
prompt=source_prompt,
374+
image=self.raw_image,
375+
inpaint_strength=0.7,
376+
generator=generator,
377+
num_inference_steps=25,
378+
).latents
379+
380+
image = pipe(
381+
prompt=target_prompt,
382+
mask_image=mask_image,
383+
image_latents=inv_latents,
384+
generator=generator,
385+
negative_prompt=source_prompt,
386+
inpaint_strength=0.7,
387+
num_inference_steps=25,
388+
output_type="numpy",
389+
).images[0]
390+
391+
expected_image = (
392+
np.array(
393+
load_image(
394+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
395+
"/diffedit/pears.png"
396+
).resize((768, 768))
397+
)
398+
/ 255
399+
)
400+
assert np.abs((expected_image - image).max()) < 5e-1

0 commit comments

Comments
 (0)