Skip to content

Commit 68161e9

Browse files
ezyangNicolasHug
andauthored
Add opcheck testing for nms (#7961)
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Co-authored-by: Nicolas Hug <nh.nicolas.hug@gmail.com> Co-authored-by: Nicolas Hug <contact@nicolas-hug.com>
1 parent e3fb8c0 commit 68161e9

File tree

5 files changed

+33
-13
lines changed

5 files changed

+33
-13
lines changed

.github/scripts/unittest.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ set -euo pipefail
88
eval "$($(which conda) shell.bash hook)" && conda deactivate && conda activate ci
99

1010
echo '::group::Install testing utilities'
11-
pip install --progress-bar=off pytest pytest-mock pytest-cov
11+
pip install --progress-bar=off pytest pytest-mock pytest-cov expecttest
1212
echo '::endgroup::'
1313

1414
python test/smoke_test.py

test/conftest.py

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ def pytest_configure(config):
1919
config.addinivalue_line("markers", "needs_cuda: mark for tests that rely on a CUDA device")
2020
config.addinivalue_line("markers", "needs_mps: mark for tests that rely on a MPS device")
2121
config.addinivalue_line("markers", "dont_collect: mark for tests that should not be collected")
22+
config.addinivalue_line("markers", "opcheck_only_one: only opcheck one parametrization")
2223

2324

2425
def pytest_collection_modifyitems(items):

test/optests_failures_dict.json

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"_description": "This is a dict containing failures for tests autogenerated by generate_opcheck_tests. For more details, please see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit",
3+
"_version": 1,
4+
"data": {}
5+
}

test/test_models.py

+1-11
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,6 @@
2525
SKIP_BIG_MODEL = os.getenv("SKIP_BIG_MODEL", "1") == "1"
2626

2727

28-
@contextlib.contextmanager
29-
def disable_tf32():
30-
previous = torch.backends.cudnn.allow_tf32
31-
torch.backends.cudnn.allow_tf32 = False
32-
try:
33-
yield
34-
finally:
35-
torch.backends.cudnn.allow_tf32 = previous
36-
37-
3828
def list_model_fns(module):
3929
return [get_model_builder(name) for name in list_models(module)]
4030

@@ -681,7 +671,7 @@ def test_vitc_models(model_fn, dev):
681671
test_classification_model(model_fn, dev)
682672

683673

684-
@disable_tf32() # see: https://github.com/pytorch/vision/issues/7618
674+
@torch.backends.cudnn.flags(allow_tf32=False) # see: https://github.com/pytorch/vision/issues/7618
685675
@pytest.mark.parametrize("model_fn", list_model_fns(models))
686676
@pytest.mark.parametrize("dev", cpu_and_cuda())
687677
def test_classification_model(model_fn, dev):

test/test_ops.py

+25-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import torch
1111
import torch.fx
1212
import torch.nn.functional as F
13+
import torch.testing._internal.optests as optests
1314
from common_utils import assert_equal, cpu_and_cuda, cpu_and_cuda_and_mps, needs_cuda, needs_mps
1415
from PIL import Image
1516
from torch import nn, Tensor
@@ -19,6 +20,14 @@
1920
from torchvision.models.feature_extraction import get_graph_node_names
2021

2122

23+
OPTESTS = [
24+
"test_schema",
25+
"test_autograd_registration",
26+
"test_faketensor",
27+
"test_aot_dispatch_dynamic",
28+
]
29+
30+
2231
# Context manager for setting deterministic flag and automatically
2332
# resetting it to its original value
2433
class DeterministicGuard:
@@ -462,7 +471,7 @@ def test_boxes_shape(self):
462471

463472
@pytest.mark.parametrize("aligned", (True, False))
464473
@pytest.mark.parametrize("device", cpu_and_cuda_and_mps())
465-
@pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64), ids=str)
474+
@pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64)) # , ids=str)
466475
@pytest.mark.parametrize("contiguous", (True, False))
467476
@pytest.mark.parametrize("deterministic", (True, False))
468477
def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None):
@@ -712,6 +721,7 @@ def _create_tensors_with_iou(self, N, iou_thresh):
712721

713722
@pytest.mark.parametrize("iou", (0.2, 0.5, 0.8))
714723
@pytest.mark.parametrize("seed", range(10))
724+
@pytest.mark.opcheck_only_one()
715725
def test_nms_ref(self, iou, seed):
716726
torch.random.manual_seed(seed)
717727
err_msg = "NMS incompatible between CPU and reference implementation for IoU={}"
@@ -732,6 +742,7 @@ def test_nms_input_errors(self):
732742

733743
@pytest.mark.parametrize("iou", (0.2, 0.5, 0.8))
734744
@pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 50), (3, 10)))
745+
@pytest.mark.opcheck_only_one()
735746
def test_qnms(self, iou, scale, zero_point):
736747
# Note: we compare qnms vs nms instead of qnms vs reference implementation.
737748
# This is because with the int conversion, the trick used in _create_tensors_with_iou
@@ -759,6 +770,7 @@ def test_qnms(self, iou, scale, zero_point):
759770
),
760771
)
761772
@pytest.mark.parametrize("iou", (0.2, 0.5, 0.8))
773+
@pytest.mark.opcheck_only_one()
762774
def test_nms_gpu(self, iou, device, dtype=torch.float64):
763775
dtype = torch.float32 if device == "mps" else dtype
764776
tol = 1e-3 if dtype is torch.half else 1e-5
@@ -778,6 +790,7 @@ def test_nms_gpu(self, iou, device, dtype=torch.float64):
778790
@needs_cuda
779791
@pytest.mark.parametrize("iou", (0.2, 0.5, 0.8))
780792
@pytest.mark.parametrize("dtype", (torch.float, torch.half))
793+
@pytest.mark.opcheck_only_one()
781794
def test_autocast(self, iou, dtype):
782795
with torch.cuda.amp.autocast():
783796
self.test_nms_gpu(iou=iou, dtype=dtype, device="cuda")
@@ -789,6 +802,7 @@ def test_autocast(self, iou, dtype):
789802
pytest.param("mps", marks=pytest.mark.needs_mps),
790803
),
791804
)
805+
@pytest.mark.opcheck_only_one()
792806
def test_nms_float16(self, device):
793807
boxes = torch.tensor(
794808
[
@@ -805,6 +819,7 @@ def test_nms_float16(self, device):
805819
assert_equal(keep32, keep16)
806820

807821
@pytest.mark.parametrize("seed", range(10))
822+
@pytest.mark.opcheck_only_one()
808823
def test_batched_nms_implementations(self, seed):
809824
"""Make sure that both implementations of batched_nms yield identical results"""
810825
torch.random.manual_seed(seed)
@@ -830,6 +845,15 @@ def test_batched_nms_implementations(self, seed):
830845
torch.testing.assert_close(empty, ops.batched_nms(empty, None, None, None))
831846

832847

848+
optests.generate_opcheck_tests(
849+
testcase=TestNMS,
850+
namespaces=["torchvision"],
851+
failures_dict_path=os.path.join(os.path.dirname(__file__), "optests_failures_dict.json"),
852+
additional_decorators=[],
853+
test_utils=OPTESTS,
854+
)
855+
856+
833857
class TestDeformConv:
834858
dtype = torch.float64
835859

0 commit comments

Comments
 (0)