From 23e6c55055b303aac1ded6b515e602487f3de475 Mon Sep 17 00:00:00 2001 From: aljazkonec1 Date: Thu, 28 Nov 2024 14:32:04 +0100 Subject: [PATCH] added dai.ImgTransformations attributes for messages. --- depthai_nodes/ml/messages/classification.py | 26 +++++++++++ depthai_nodes/ml/messages/clusters.py | 26 +++++++++++ depthai_nodes/ml/messages/img_detections.py | 26 +++++++++++ depthai_nodes/ml/messages/keypoints.py | 26 +++++++++++ depthai_nodes/ml/messages/lines.py | 26 +++++++++++ depthai_nodes/ml/messages/map.py | 26 +++++++++++ depthai_nodes/ml/messages/prediction.py | 26 +++++++++++ depthai_nodes/ml/messages/segmentation.py | 26 +++++++++++ depthai_nodes/ml/parsers/classification.py | 1 + .../ml/parsers/classification_sequence.py | 1 + depthai_nodes/ml/parsers/fastsam.py | 2 +- depthai_nodes/ml/parsers/hrnet.py | 1 + depthai_nodes/ml/parsers/image_output.py | 1 + depthai_nodes/ml/parsers/keypoints.py | 1 + depthai_nodes/ml/parsers/lane_detection.py | 8 ++-- depthai_nodes/ml/parsers/map_output.py | 2 + .../ml/parsers/mediapipe_palm_detection.py | 5 ++- depthai_nodes/ml/parsers/mlsd.py | 2 + depthai_nodes/ml/parsers/ppdet.py | 8 ++-- depthai_nodes/ml/parsers/regression.py | 1 + depthai_nodes/ml/parsers/scrfd.py | 7 +-- depthai_nodes/ml/parsers/segmentation.py | 2 + .../ml/parsers/superanimal_landmarker.py | 1 + depthai_nodes/ml/parsers/utils/ppdet.py | 43 +++++-------------- depthai_nodes/ml/parsers/yolo.py | 1 + depthai_nodes/ml/parsers/yunet.py | 1 + 26 files changed, 253 insertions(+), 43 deletions(-) diff --git a/depthai_nodes/ml/messages/classification.py b/depthai_nodes/ml/messages/classification.py index d9a54fc1..7f539686 100644 --- a/depthai_nodes/ml/messages/classification.py +++ b/depthai_nodes/ml/messages/classification.py @@ -14,6 +14,8 @@ class Classifications(dai.Buffer): A list of classes. scores : NDArray[np.float32] Corresponding probability scores. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): @@ -21,6 +23,7 @@ def __init__(self): dai.Buffer.__init__(self) self._classes: List[str] = [] self._scores: NDArray[np.float32] = np.array([]) + self._transformation: dai.ImgTransformation = None @property def classes(self) -> List: @@ -91,3 +94,26 @@ def top_score(self) -> float: @rtype: float """ return self._scores[0] + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/clusters.py b/depthai_nodes/ml/messages/clusters.py index dff27642..47095e9b 100644 --- a/depthai_nodes/ml/messages/clusters.py +++ b/depthai_nodes/ml/messages/clusters.py @@ -73,12 +73,15 @@ class Clusters(dai.Buffer): ---------- clusters : List[Cluster] List of clusters. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): """Initializes the Clusters object.""" super().__init__() self._clusters: List[Cluster] = [] + self._transformation: dai.ImgTransformation = None @property def clusters(self) -> List[Cluster]: @@ -103,3 +106,26 @@ def clusters(self, value: List[Cluster]): if not all(isinstance(cluster, Cluster) for cluster in value): raise ValueError("Clusters must be a list of Cluster objects.") self._clusters = value + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/img_detections.py b/depthai_nodes/ml/messages/img_detections.py index 52865940..71903dc9 100644 --- a/depthai_nodes/ml/messages/img_detections.py +++ b/depthai_nodes/ml/messages/img_detections.py @@ -161,6 +161,8 @@ class ImgDetectionsExtended(dai.Buffer): Image detections with keypoints. masks: np.ndarray The segmentation masks of the image. All masks are stored in a single numpy array. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self) -> None: @@ -168,6 +170,7 @@ def __init__(self) -> None: super().__init__() self._detections: List[ImgDetectionExtended] = [] self._masks: SegmentationMask = SegmentationMask() + self._transformation: dai.ImgTransformation = None @property def detections(self) -> List[ImgDetectionExtended]: @@ -226,3 +229,26 @@ def masks(self, value: NDArray[np.int16]): masks_msg = SegmentationMask() masks_msg.mask = value self._masks = masks_msg + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/keypoints.py b/depthai_nodes/ml/messages/keypoints.py index e0766ed5..a65df5bb 100644 --- a/depthai_nodes/ml/messages/keypoints.py +++ b/depthai_nodes/ml/messages/keypoints.py @@ -127,12 +127,15 @@ class Keypoints(dai.Buffer): ---------- keypoints: List[Keypoint] List of Keypoint objects, each representing a keypoint. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): """Initializes the Keypoints object.""" super().__init__() self._keypoints: List[Keypoint] = [] + self._transformation: dai.ImgTransformation = None @property def keypoints(self) -> List[Keypoint]: @@ -157,3 +160,26 @@ def keypoints(self, value: List[Keypoint]): if not all(isinstance(item, Keypoint) for item in value): raise ValueError("keypoints must be a list of Keypoint objects.") self._keypoints = value + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/lines.py b/depthai_nodes/ml/messages/lines.py index 94c198d1..24d47bda 100644 --- a/depthai_nodes/ml/messages/lines.py +++ b/depthai_nodes/ml/messages/lines.py @@ -100,12 +100,15 @@ class Lines(dai.Buffer): ---------- lines : List[Line] List of detected lines. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): """Initializes the Lines object.""" super().__init__() self._lines: List[Line] = [] + self._transformation: dai.ImgTransformation = None @property def lines(self) -> List[Line]: @@ -130,3 +133,26 @@ def lines(self, value: List[Line]): if not all(isinstance(item, Line) for item in value): raise ValueError("Lines must be a list of Line objects.") self._lines = value + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/map.py b/depthai_nodes/ml/messages/map.py index c57b5152..15ba0064 100644 --- a/depthai_nodes/ml/messages/map.py +++ b/depthai_nodes/ml/messages/map.py @@ -14,6 +14,8 @@ class Map2D(dai.Buffer): 2D Map width. height : int 2D Map height. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): @@ -22,6 +24,7 @@ def __init__(self): self._map: NDArray[np.float32] = np.array([]) self._width: int = None self._height: int = None + self._transformation: dai.ImgTransformation = None @property def map(self) -> NDArray[np.float32]: @@ -71,3 +74,26 @@ def height(self) -> int: @rtype: int """ return self._height + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/prediction.py b/depthai_nodes/ml/messages/prediction.py index 91e4226e..3d53ada5 100644 --- a/depthai_nodes/ml/messages/prediction.py +++ b/depthai_nodes/ml/messages/prediction.py @@ -48,12 +48,15 @@ class Predictions(dai.Buffer): ---------- predictions : List[Prediction] List of predictions. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): """Initializes the Predictions object.""" super().__init__() self._predictions: List[Prediction] = [] + self._transformation: dai.ImgTransformation = None @property def predictions(self) -> List[Prediction]: @@ -89,3 +92,26 @@ def prediction(self) -> float: @rtype: float """ return self._predictions[0].prediction + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/messages/segmentation.py b/depthai_nodes/ml/messages/segmentation.py index ca0b5787..e2ce9a47 100644 --- a/depthai_nodes/ml/messages/segmentation.py +++ b/depthai_nodes/ml/messages/segmentation.py @@ -12,12 +12,15 @@ class SegmentationMask(dai.Buffer): ---------- mask: NDArray[np.int16] Segmentation mask. + transformation : dai.ImgTransformation + Image transformation object. """ def __init__(self): """Initializes the SegmentationMask object.""" super().__init__() self._mask: NDArray[np.int16] = np.array([]) + self._transformation: dai.ImgTransformation = None @property def mask(self) -> NDArray[np.int16]: @@ -48,3 +51,26 @@ def mask(self, value: NDArray[np.int16]): if np.any((value < -1)): raise ValueError("Mask must be an array of integers larger or equal to -1.") self._mask = value + + @property + def transformation(self) -> dai.ImgTransformation: + """Returns the Image Transformation object. + + @return: The Image Transformation object. + @rtype: dai.ImgTransformation + """ + return self._transformation + + @transformation.setter + def transformation(self, value: dai.ImgTransformation): + """Sets the Image Transformation object. + + @param value: The Image Transformation object. + @type value: dai.ImgTransformation + @raise TypeError: If value is not a dai.ImgTransformation object. + """ + if not isinstance(value, dai.ImgTransformation): + raise TypeError( + f"Transformation must be a dai.ImgTransformation object, instead got {type(value)}." + ) + self._transformation = value diff --git a/depthai_nodes/ml/parsers/classification.py b/depthai_nodes/ml/parsers/classification.py index e4d86106..916af2f9 100644 --- a/depthai_nodes/ml/parsers/classification.py +++ b/depthai_nodes/ml/parsers/classification.py @@ -141,6 +141,7 @@ def run(self): scores = softmax(scores) msg = create_classification_message(self.classes, scores) + msg.transformation = output.getTransformation() msg.setTimestamp(output.getTimestamp()) self.out.send(msg) diff --git a/depthai_nodes/ml/parsers/classification_sequence.py b/depthai_nodes/ml/parsers/classification_sequence.py index 6af2d707..a325f222 100644 --- a/depthai_nodes/ml/parsers/classification_sequence.py +++ b/depthai_nodes/ml/parsers/classification_sequence.py @@ -178,6 +178,7 @@ def run(self): ignored_indexes=self.ignored_indexes, concatenate_classes=self.concatenate_classes, ) + msg.transformation = output.getTransformation() msg.setTimestamp(output.getTimestamp()) self.out.send(msg) diff --git a/depthai_nodes/ml/parsers/fastsam.py b/depthai_nodes/ml/parsers/fastsam.py index 7c4c947d..b3e35c20 100644 --- a/depthai_nodes/ml/parsers/fastsam.py +++ b/depthai_nodes/ml/parsers/fastsam.py @@ -353,6 +353,6 @@ def run(self): results_masks = merge_masks(results_masks) segmentation_message = create_segmentation_message(results_masks) + segmentation_message.transformation = output.getTransformation() segmentation_message.setTimestamp(output.getTimestamp()) - self.out.send(segmentation_message) diff --git a/depthai_nodes/ml/parsers/hrnet.py b/depthai_nodes/ml/parsers/hrnet.py index 4be4de39..3626ef2a 100644 --- a/depthai_nodes/ml/parsers/hrnet.py +++ b/depthai_nodes/ml/parsers/hrnet.py @@ -113,5 +113,6 @@ def run(self): confidence_threshold=self.score_threshold, ) keypoints_message.setTimestamp(output.getTimestamp()) + keypoints_message.transformation = output.getTransformation() self.out.send(keypoints_message) diff --git a/depthai_nodes/ml/parsers/image_output.py b/depthai_nodes/ml/parsers/image_output.py index 2cd1f54b..b796d711 100644 --- a/depthai_nodes/ml/parsers/image_output.py +++ b/depthai_nodes/ml/parsers/image_output.py @@ -113,5 +113,6 @@ def run(self): is_bgr=self.output_is_bgr, ) image_message.setTimestamp(output.getTimestamp()) + image_message.transformation = output.getTransformation() self.out.send(image_message) diff --git a/depthai_nodes/ml/parsers/keypoints.py b/depthai_nodes/ml/parsers/keypoints.py index 4acd6efa..ce76fb16 100644 --- a/depthai_nodes/ml/parsers/keypoints.py +++ b/depthai_nodes/ml/parsers/keypoints.py @@ -172,5 +172,6 @@ def run(self): msg = create_keypoints_message(keypoints) msg.setTimestamp(output.getTimestamp()) + msg.transformation = output.getTransformation() self.out.send(msg) diff --git a/depthai_nodes/ml/parsers/lane_detection.py b/depthai_nodes/ml/parsers/lane_detection.py index b9769bd5..b51246b3 100644 --- a/depthai_nodes/ml/parsers/lane_detection.py +++ b/depthai_nodes/ml/parsers/lane_detection.py @@ -201,6 +201,8 @@ def run(self): y=y, ) - message = create_cluster_message(points) - message.setTimestamp(output.getTimestamp()) - self.out.send(message) + msg = create_cluster_message(points) + msg.setTimestamp(output.getTimestamp()) + msg.transformation = output.getTransformation() + + self.out.send(msg) diff --git a/depthai_nodes/ml/parsers/map_output.py b/depthai_nodes/ml/parsers/map_output.py index e9e39c1b..8de18f56 100644 --- a/depthai_nodes/ml/parsers/map_output.py +++ b/depthai_nodes/ml/parsers/map_output.py @@ -106,4 +106,6 @@ def run(self): map=map, min_max_scaling=self.min_max_scaling ) map_message.setTimestamp(output.getTimestamp()) + map_message.transformation = output.getTransformation() + self.out.send(map_message) diff --git a/depthai_nodes/ml/parsers/mediapipe_palm_detection.py b/depthai_nodes/ml/parsers/mediapipe_palm_detection.py index e7c61e01..5b22905b 100644 --- a/depthai_nodes/ml/parsers/mediapipe_palm_detection.py +++ b/depthai_nodes/ml/parsers/mediapipe_palm_detection.py @@ -192,9 +192,12 @@ def run(self): bboxes = np.clip(bboxes, 0, 1) points = np.clip(points, 0, 1) + angles = np.round(angles, 0) detections_msg = create_detection_message( - bboxes=bboxes, scores=scores, angles=angles, keypoints=points + bboxes=bboxes, scores=scores, angles=angles ) detections_msg.setTimestamp(output.getTimestamp()) + detections_msg.transformation = output.getTransformation() + self.out.send(detections_msg) diff --git a/depthai_nodes/ml/parsers/mlsd.py b/depthai_nodes/ml/parsers/mlsd.py index 0525a94b..696f4167 100644 --- a/depthai_nodes/ml/parsers/mlsd.py +++ b/depthai_nodes/ml/parsers/mlsd.py @@ -171,4 +171,6 @@ def run(self): message = create_line_detection_message(lines, np.array(scores)) message.setTimestamp(output.getTimestamp()) + message.transformation = output.getTransformation() + self.out.send(message) diff --git a/depthai_nodes/ml/parsers/ppdet.py b/depthai_nodes/ml/parsers/ppdet.py index b64ea769..dfe11645 100644 --- a/depthai_nodes/ml/parsers/ppdet.py +++ b/depthai_nodes/ml/parsers/ppdet.py @@ -114,7 +114,7 @@ def run(self): _, _, height, width = predictions.shape - bboxes, angles, corners, scores = parse_paddle_detection_outputs( + bboxes, angles, scores = parse_paddle_detection_outputs( predictions, self.mask_threshold, self.conf_threshold, @@ -122,8 +122,10 @@ def run(self): width=width, height=height, ) - - message = create_detection_message(bboxes, scores, angles=angles) + message = create_detection_message( + bboxes=bboxes, scores=scores, angles=angles + ) message.setTimestamp(output.getTimestamp()) + message.transformation = output.getTransformation() self.out.send(message) diff --git a/depthai_nodes/ml/parsers/regression.py b/depthai_nodes/ml/parsers/regression.py index c7be585c..ed232c0b 100644 --- a/depthai_nodes/ml/parsers/regression.py +++ b/depthai_nodes/ml/parsers/regression.py @@ -88,5 +88,6 @@ def run(self): regression_message = create_regression_message(predictions=predictions) regression_message.setTimestamp(output.getTimestamp()) + regression_message.transformation = output.getTransformation() self.out.send(regression_message) diff --git a/depthai_nodes/ml/parsers/scrfd.py b/depthai_nodes/ml/parsers/scrfd.py index 76ecc2e5..e626baa7 100644 --- a/depthai_nodes/ml/parsers/scrfd.py +++ b/depthai_nodes/ml/parsers/scrfd.py @@ -213,9 +213,10 @@ def run(self): ) bboxes = xyxy_to_xywh(bboxes) bboxes = np.clip(bboxes, 0, 1) - detection_msg = create_detection_message( + message = create_detection_message( bboxes=bboxes, scores=scores, keypoints=keypoints ) - detection_msg.setTimestamp(output.getTimestamp()) + message.setTimestamp(output.getTimestamp()) + message.transformation = output.getTransformation() - self.out.send(detection_msg) + self.out.send(message) diff --git a/depthai_nodes/ml/parsers/segmentation.py b/depthai_nodes/ml/parsers/segmentation.py index 3b3e346d..621c2796 100644 --- a/depthai_nodes/ml/parsers/segmentation.py +++ b/depthai_nodes/ml/parsers/segmentation.py @@ -153,4 +153,6 @@ def run(self): mask_message = create_segmentation_message(class_map) mask_message.setTimestamp(output.getTimestamp()) + mask_message.transformation = output.getTransformation() + self.out.send(mask_message) diff --git a/depthai_nodes/ml/parsers/superanimal_landmarker.py b/depthai_nodes/ml/parsers/superanimal_landmarker.py index 82b3c874..768d6a44 100644 --- a/depthai_nodes/ml/parsers/superanimal_landmarker.py +++ b/depthai_nodes/ml/parsers/superanimal_landmarker.py @@ -99,5 +99,6 @@ def run(self): msg = create_keypoints_message(keypoints, scores, self.score_threshold) msg.setTimestamp(output.getTimestamp()) + msg.transformation = output.getTransformation() self.out.send(msg) diff --git a/depthai_nodes/ml/parsers/utils/ppdet.py b/depthai_nodes/ml/parsers/utils/ppdet.py index 8971afa5..a35d3658 100644 --- a/depthai_nodes/ml/parsers/utils/ppdet.py +++ b/depthai_nodes/ml/parsers/utils/ppdet.py @@ -81,42 +81,22 @@ def _box_score(predictions: np.ndarray, _corners: np.ndarray) -> float: def _unclip( box: np.ndarray, - corners: np.ndarray, - width: int, - height: int, - unclip_ratio: float = 2, + unclip_ratio: float = 3, ) -> Tuple[np.ndarray, np.ndarray]: """Internal function to dilate the bounding box area by a specified ratio. @param box: The rotated bounding box. @type box: np.ndarray - @param corners: The corners of the bounding box. - @type corners: np.ndarray - @param width: The width of the model output predictions. - @type width: int - @param height: The height of the model output predictions. - @type height: int @param unclip_ratio: The ratio to dilate the bounding box area by. - @type unclip_ratio: float = 2 + @type unclip_ratio: float = 3 @return: The dilated bounding box corners. @rtype: np.ndarray """ - perimiter = cv2.arcLength(corners, True) - area = cv2.contourArea(corners) - dilation_pixels = ( - int(-perimiter / 8 + np.sqrt(perimiter**2 / 64 + area * unclip_ratio / 4)) + 1 - ) - corners = _dilate_box(corners, dilation_pixels) - - for point in corners: - point[0] = min(max(point[0], 0), width - 1) - point[1] = min(max(point[1], 0), height - 1) + box[2] = box[2] * np.sqrt(unclip_ratio) + box[3] = box[3] * unclip_ratio - box[2] = np.linalg.norm(corners[0] - corners[1]) - box[3] = np.linalg.norm(corners[1] - corners[2]) - - return box, np.array(corners, dtype=float) + return box def parse_paddle_detection_outputs( @@ -161,8 +141,6 @@ def parse_paddle_detection_outputs( kernel = np.ones((5, 5), np.uint8) mask = cv2.dilate(mask.astype(np.uint8), kernel, iterations=1) - src_h, src_w = predictions.shape[:2] - outs = cv2.findContours( (mask * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE ) @@ -177,7 +155,6 @@ def parse_paddle_detection_outputs( boxes = [] scores = [] - corners_array = [] angles = [] for contour in contours[:num_contours]: box, corners = _get_mini_boxes(contour) @@ -188,8 +165,7 @@ def parse_paddle_detection_outputs( if score < bbox_threshold: continue - box, corners = _unclip(box, corners, src_w, src_h) - corners_array.append(corners) + box = _unclip(box) boxes.append(box[:4]) scores.append(score) angles.append(box[4]) @@ -200,6 +176,9 @@ def parse_paddle_detection_outputs( boxes[:, 1] /= height boxes[:, 2] /= width boxes[:, 3] /= height - corners = np.clip(corners, 0, 1) - return boxes, np.array(angles), np.array(corners_array), np.array(scores) + boxes = np.clip(boxes, 0.0, 1.0) + angles = np.array(angles) + angles = np.round(angles, 0) + + return boxes, angles, np.array(scores) diff --git a/depthai_nodes/ml/parsers/yolo.py b/depthai_nodes/ml/parsers/yolo.py index 445b5d84..8dbf27c7 100644 --- a/depthai_nodes/ml/parsers/yolo.py +++ b/depthai_nodes/ml/parsers/yolo.py @@ -397,5 +397,6 @@ def run(self): ) detections_message.setTimestamp(output.getTimestamp()) + detections_message.transformation = output.getTransformation() self.out.send(detections_message) diff --git a/depthai_nodes/ml/parsers/yunet.py b/depthai_nodes/ml/parsers/yunet.py index e2f9de13..597d7d21 100644 --- a/depthai_nodes/ml/parsers/yunet.py +++ b/depthai_nodes/ml/parsers/yunet.py @@ -296,5 +296,6 @@ def run(self): ) detections_message.setTimestamp(output.getTimestamp()) + detections_message.transformation = output.getTransformation() self.out.send(detections_message)