@@ -16,7 +16,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
16
16
to their intersection-over-union (IoU).
17
17
18
18
NMS iteratively removes lower scoring boxes which have an
19
- IoU greater than iou_threshold with another (higher scoring)
19
+ IoU greater than `` iou_threshold`` with another (higher scoring)
20
20
box.
21
21
22
22
If multiple boxes have the exact same score and satisfy the IoU
@@ -114,7 +114,12 @@ def _batched_nms_vanilla(
114
114
115
115
def remove_small_boxes (boxes : Tensor , min_size : float ) -> Tensor :
116
116
"""
117
- Remove boxes which contains at least one side smaller than min_size.
117
+ Remove every box from ``boxes`` which contains at least one side length
118
+ that is smaller than ``min_size``.
119
+
120
+ .. note::
121
+ For sanitizing a :class:`~torchvision.tv_tensors.BoundingBoxes` object, consider using
122
+ the transform :func:`~torchvision.transforms.v2.SanitizeBoundingBoxes` instead.
118
123
119
124
Args:
120
125
boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format
@@ -123,7 +128,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
123
128
124
129
Returns:
125
130
Tensor[K]: indices of the boxes that have both sides
126
- larger than min_size
131
+ larger than `` min_size``
127
132
"""
128
133
if not torch .jit .is_scripting () and not torch .jit .is_tracing ():
129
134
_log_api_usage_once (remove_small_boxes )
@@ -135,7 +140,11 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
135
140
136
141
def clip_boxes_to_image (boxes : Tensor , size : Tuple [int , int ]) -> Tensor :
137
142
"""
138
- Clip boxes so that they lie inside an image of size `size`.
143
+ Clip boxes so that they lie inside an image of size ``size``.
144
+
145
+ .. note::
146
+ For clipping a :class:`~torchvision.tv_tensors.BoundingBoxes` object, consider using
147
+ the transform :func:`~torchvision.transforms.v2.ClampBoundingBoxes` instead.
139
148
140
149
Args:
141
150
boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format
@@ -167,15 +176,22 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
167
176
168
177
def box_convert (boxes : Tensor , in_fmt : str , out_fmt : str ) -> Tensor :
169
178
"""
170
- Converts boxes from given in_fmt to out_fmt.
171
- Supported in_fmt and out_fmt are:
179
+ Converts :class:`torch.Tensor` boxes from a given ``in_fmt`` to ``out_fmt``.
180
+
181
+ .. note::
182
+ For converting a :class:`torch.Tensor` or a :class:`~torchvision.tv_tensors.BoundingBoxes` object
183
+ between different formats,
184
+ consider using :func:`~torchvision.transforms.v2.functional.convert_bounding_box_format` instead.
185
+ Or see the corresponding transform :func:`~torchvision.transforms.v2.ConvertBoundingBoxFormat`.
186
+
187
+ Supported ``in_fmt`` and ``out_fmt`` strings are:
172
188
173
- 'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.
189
+ `` 'xyxy'`` : boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.
174
190
This is the format that torchvision utilities expect.
175
191
176
- 'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height.
192
+ `` 'xywh'`` : boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height.
177
193
178
- 'cxcywh' : boxes are represented via centre, width and height, cx, cy being center of box, w, h
194
+ `` 'cxcywh'`` : boxes are represented via centre, width and height, cx, cy being center of box, w, h
179
195
being width and height.
180
196
181
197
Args:
0 commit comments