Skip to content

Commit

Permalink
[Tests] Better prints (#1043)
Browse files Browse the repository at this point in the history
  • Loading branch information
patrickvonplaten authored Oct 28, 2022
1 parent 8d6487f commit c4ef1ef
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 21 deletions.
10 changes: 9 additions & 1 deletion src/diffusers/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,15 @@


if is_torch_available():
from .testing_utils import floats_tensor, load_image, parse_flag_from_env, require_torch_gpu, slow, torch_device
from .testing_utils import (
floats_tensor,
load_image,
parse_flag_from_env,
require_torch_gpu,
slow,
torch_all_close,
torch_device,
)


logger = get_logger(__name__)
Expand Down
8 changes: 8 additions & 0 deletions src/diffusers/utils/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,14 @@
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device


def torch_all_close(a, b, *args, **kwargs):
if not is_torch_available():
raise ValueError("PyTorch needs to be installed to use this function.")
if not torch.allclose(a, b, *args, **kwargs):
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
return True


def get_tests_dir(append_path=None):
"""
Args:
Expand Down
24 changes: 12 additions & 12 deletions tests/models/test_models_unet_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import torch

from diffusers import UNet2DConditionModel, UNet2DModel
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_device
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_all_close, torch_device
from parameterized import parameterized

from ..test_modeling_common import ModelTesterMixin
Expand Down Expand Up @@ -156,7 +156,7 @@ def test_from_pretrained_accelerate_wont_change_results(self):
model_normal_load.eval()
arr_normal_load = model_normal_load(noise, time_step)["sample"]

assert torch.allclose(arr_accelerate, arr_normal_load, rtol=1e-3)
assert torch_all_close(arr_accelerate, arr_normal_load, rtol=1e-3)

@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU")
def test_memory_footprint_gets_reduced(self):
Expand Down Expand Up @@ -207,7 +207,7 @@ def test_output_pretrained(self):
expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800])
# fmt: on

self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3))
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-3))


class UNet2DConditionModelTests(ModelTesterMixin, unittest.TestCase):
Expand Down Expand Up @@ -287,7 +287,7 @@ def test_gradient_checkpointing(self):
named_params = dict(model.named_parameters())
named_params_2 = dict(model_2.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch.allclose(param.grad.data, named_params_2[name].grad.data, atol=5e-5))
self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5))


class NCSNppModelTests(ModelTesterMixin, unittest.TestCase):
Expand Down Expand Up @@ -377,7 +377,7 @@ def test_output_pretrained_ve_mid(self):
expected_output_slice = torch.tensor([-4836.2231, -6487.1387, -3816.7969, -7964.9253, -10966.2842, -20043.6016, 8137.0571, 2340.3499, 544.6114])
# fmt: on

self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2))
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))

def test_output_pretrained_ve_large(self):
model = UNet2DModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
Expand All @@ -402,7 +402,7 @@ def test_output_pretrained_ve_large(self):
expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256])
# fmt: on

self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2))
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))

def test_forward_with_norm_groups(self):
# not required for this model
Expand Down Expand Up @@ -464,7 +464,7 @@ def test_compvis_sd_v1_4(self, seed, timestep, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -490,7 +490,7 @@ def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -515,7 +515,7 @@ def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -541,7 +541,7 @@ def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -566,7 +566,7 @@ def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -592,4 +592,4 @@ def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
16 changes: 8 additions & 8 deletions tests/models/test_models_vae.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from diffusers import AutoencoderKL
from diffusers.modeling_utils import ModelMixin
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_device
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_all_close, torch_device
from parameterized import parameterized

from ..test_modeling_common import ModelTesterMixin
Expand Down Expand Up @@ -131,7 +131,7 @@ def test_output_pretrained(self):
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485]
)

self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2))
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))


@slow
Expand Down Expand Up @@ -185,7 +185,7 @@ def test_stable_diffusion(self, seed, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -209,7 +209,7 @@ def test_stable_diffusion_fp16(self, seed, expected_slice):
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -231,7 +231,7 @@ def test_stable_diffusion_mode(self, seed, expected_slice):
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -254,7 +254,7 @@ def test_stable_diffusion_decode(self, seed, expected_slice):
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -276,7 +276,7 @@ def test_stable_diffusion_decode_fp16(self, seed, expected_slice):
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

@parameterized.expand(
[
Expand All @@ -300,4 +300,4 @@ def test_stable_diffusion_encode_sample(self, seed, expected_slice):
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
expected_output_slice = torch.tensor(expected_slice)

assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)

0 comments on commit c4ef1ef

Please sign in to comment.