Skip to content

Commit a7ae808

Browse files
increase tolerance
1 parent ea01a4c commit a7ae808

File tree

2 files changed

+14
-13
lines changed

2 files changed

+14
-13
lines changed

tests/models/test_models_unet_2d.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,13 @@
2121
import torch
2222

2323
from diffusers import UNet2DConditionModel, UNet2DModel
24-
from diffusers.utils import floats_tensor, load_numpy, require_torch_gpu, slow, torch_all_close, torch_device
24+
from diffusers.utils import floats_tensor, load_numpy, logging, require_torch_gpu, slow, torch_all_close, torch_device
2525
from parameterized import parameterized
2626

2727
from ..test_modeling_common import ModelTesterMixin
2828

2929

30+
logger = logging.get_logger(__name__)
3031
torch.backends.cuda.matmul.allow_tf32 = False
3132

3233

@@ -464,7 +465,7 @@ def test_compvis_sd_v1_4(self, seed, timestep, expected_slice):
464465
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
465466
expected_output_slice = torch.tensor(expected_slice)
466467

467-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
468+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
468469

469470
@parameterized.expand(
470471
[
@@ -490,7 +491,7 @@ def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice):
490491
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
491492
expected_output_slice = torch.tensor(expected_slice)
492493

493-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
494+
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
494495

495496
@parameterized.expand(
496497
[
@@ -515,7 +516,7 @@ def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
515516
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
516517
expected_output_slice = torch.tensor(expected_slice)
517518

518-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
519+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
519520

520521
@parameterized.expand(
521522
[
@@ -541,7 +542,7 @@ def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
541542
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
542543
expected_output_slice = torch.tensor(expected_slice)
543544

544-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
545+
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
545546

546547
@parameterized.expand(
547548
[
@@ -566,7 +567,7 @@ def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
566567
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
567568
expected_output_slice = torch.tensor(expected_slice)
568569

569-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
570+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
570571

571572
@parameterized.expand(
572573
[
@@ -592,4 +593,4 @@ def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice):
592593
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
593594
expected_output_slice = torch.tensor(expected_slice)
594595

595-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
596+
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)

tests/models/test_models_vae.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ def test_stable_diffusion(self, seed, expected_slice):
185185
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
186186
expected_output_slice = torch.tensor(expected_slice)
187187

188-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
188+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
189189

190190
@parameterized.expand(
191191
[
@@ -209,7 +209,7 @@ def test_stable_diffusion_fp16(self, seed, expected_slice):
209209
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
210210
expected_output_slice = torch.tensor(expected_slice)
211211

212-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
212+
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
213213

214214
@parameterized.expand(
215215
[
@@ -231,7 +231,7 @@ def test_stable_diffusion_mode(self, seed, expected_slice):
231231
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
232232
expected_output_slice = torch.tensor(expected_slice)
233233

234-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
234+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
235235

236236
@parameterized.expand(
237237
[
@@ -254,7 +254,7 @@ def test_stable_diffusion_decode(self, seed, expected_slice):
254254
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
255255
expected_output_slice = torch.tensor(expected_slice)
256256

257-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
257+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
258258

259259
@parameterized.expand(
260260
[
@@ -276,7 +276,7 @@ def test_stable_diffusion_decode_fp16(self, seed, expected_slice):
276276
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
277277
expected_output_slice = torch.tensor(expected_slice)
278278

279-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
279+
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
280280

281281
@parameterized.expand(
282282
[
@@ -300,4 +300,4 @@ def test_stable_diffusion_encode_sample(self, seed, expected_slice):
300300
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
301301
expected_output_slice = torch.tensor(expected_slice)
302302

303-
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
303+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)

0 commit comments

Comments
 (0)