Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/diffusers/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
from .testing_utils import (
floats_tensor,
load_image,
load_numpy,
parse_flag_from_env,
require_torch_gpu,
slow,
Expand Down
18 changes: 17 additions & 1 deletion src/diffusers/utils/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,14 @@
import random
import re
import unittest
import urllib.parse
from distutils.util import strtobool
from io import StringIO
from io import BytesIO, StringIO
from pathlib import Path
from typing import Union

import numpy as np

import PIL.Image
import PIL.ImageOps
import requests
Expand Down Expand Up @@ -165,6 +168,19 @@ def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
return image


def load_numpy(path) -> np.ndarray:
if not path.startswith("http://") or path.startswith("https://"):
path = os.path.join(
"https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main", urllib.parse.quote(path)
)

response = requests.get(path)
response.raise_for_status()
array = np.load(BytesIO(response.content))

return array


# --- pytest conf functions --- #

# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
Expand Down
14 changes: 7 additions & 7 deletions tests/models/test_models_unet_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import torch

from diffusers import UNet2DConditionModel, UNet2DModel
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils import floats_tensor, load_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from parameterized import parameterized

from ..test_modeling_common import ModelTesterMixin
Expand Down Expand Up @@ -411,18 +411,18 @@ def test_forward_with_norm_groups(self):

@slow
class UNet2DConditionModelIntegrationTests(unittest.TestCase):
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()

def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False):
batch_size, channels, height, width = shape
generator = torch.Generator(device=torch_device).manual_seed(seed)
dtype = torch.float16 if fp16 else torch.float32
image = torch.randn(batch_size, channels, height, width, device=torch_device, generator=generator, dtype=dtype)

image = torch.from_numpy(load_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image

def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"):
Expand All @@ -437,9 +437,9 @@ def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"):
return model

def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False):
generator = torch.Generator(device=torch_device).manual_seed(seed)
dtype = torch.float16 if fp16 else torch.float32
return torch.randn(shape, device=torch_device, generator=generator, dtype=dtype)
hidden_states = torch.from_numpy(load_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return hidden_states

@parameterized.expand(
[
Expand Down
12 changes: 6 additions & 6 deletions tests/models/test_models_vae.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from diffusers import AutoencoderKL
from diffusers.modeling_utils import ModelMixin
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils import floats_tensor, load_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from parameterized import parameterized

from ..test_modeling_common import ModelTesterMixin
Expand Down Expand Up @@ -136,18 +136,18 @@ def test_output_pretrained(self):

@slow
class AutoencoderKLIntegrationTests(unittest.TestCase):
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()

def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
batch_size, channels, height, width = shape
generator = torch.Generator(device=torch_device).manual_seed(seed)
def get_sd_image(self, seed=0, shape=(4, 4, 64, 64), fp16=False):
dtype = torch.float16 if fp16 else torch.float32
image = torch.randn(batch_size, channels, height, width, device=torch_device, generator=generator, dtype=dtype)

image = torch.from_numpy(load_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image

def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False):
Expand Down