diff --git a/paddleseg/models/losses/lovasz_loss.py b/paddleseg/models/losses/lovasz_loss.py index 4385c979fe..82dfaf9597 100644 --- a/paddleseg/models/losses/lovasz_loss.py +++ b/paddleseg/models/losses/lovasz_loss.py @@ -124,8 +124,12 @@ def lovasz_hinge_flat(logits, labels): signs = 2. * labels - 1. signs.stop_gradient = True errors = 1. - logits * signs - errors_sorted, perm = paddle._C_ops.argsort(errors, 'axis', 0, 'descending', - True) + if hasattr(paddle, "_legacy_C_ops"): + errors_sorted, perm = paddle._legacy_C_ops.argsort(errors, 'axis', 0, + 'descending', True) + else: + errors_sorted, perm = paddle._C_ops.argsort(errors, 'axis', 0, + 'descending', True) errors_sorted.stop_gradient = False gt_sorted = paddle.gather(labels, perm) grad = lovasz_grad(gt_sorted) @@ -181,8 +185,12 @@ def lovasz_softmax_flat(probas, labels, classes='present'): else: class_pred = probas[:, c] errors = paddle.abs(fg - class_pred) - errors_sorted, perm = paddle._C_ops.argsort(errors, 'axis', 0, - 'descending', True) + if hasattr(paddle, "_legacy_C_ops"): + errors_sorted, perm = paddle._legacy_C_ops.argsort( + errors, 'axis', 0, 'descending', True) + else: + errors_sorted, perm = paddle._C_ops.argsort(errors, 'axis', 0, + 'descending', True) errors_sorted.stop_gradient = False fg_sorted = paddle.gather(fg, perm)