Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR]Open unittest for PIR #60877

Merged
merged 1 commit into from
Jan 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 31 additions & 17 deletions python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -984,7 +984,7 @@ def hsigmoid_loss(
if num_classes < 2:
raise ValueError(f'Expected num_classes >= 2 (got {num_classes})')

if in_dynamic_or_pir_mode():
if in_dynamic_mode():
out, _, _ = _C_ops.hsigmoid_loss(
input,
label,
Expand All @@ -997,27 +997,41 @@ def hsigmoid_loss(
is_sparse,
)
return out
else:

check_variable_and_dtype(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

类似这样在PIR下的check 逻辑后续还需要讨论~

input, 'input', ['float32', 'float64'], 'hsigmoid_loss'
)
check_variable_and_dtype(label, 'label', ['int64'], 'hsigmoid_loss')
check_variable_and_dtype(
weight, 'weight', ['float32', 'float64'], 'hsigmoid_loss'
)
if bias is not None:
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'hsigmoid_loss'
bias, 'bias', ['float32', 'float64'], 'hsigmoid_loss'
)
check_variable_and_dtype(label, 'label', ['int64'], 'hsigmoid_loss')
if path_table is not None:
check_variable_and_dtype(
weight, 'weight', ['float32', 'float64'], 'hsigmoid_loss'
path_table, 'path_table', ['int64'], 'hsigmoid_loss'
)
if path_code is not None:
check_variable_and_dtype(
path_code, 'path_code', ['int64'], 'hsigmoid_loss'
)
if bias is not None:
check_variable_and_dtype(
bias, 'bias', ['float32', 'float64'], 'hsigmoid_loss'
)
if path_table is not None:
check_variable_and_dtype(
path_table, 'path_table', ['int64'], 'hsigmoid_loss'
)
if path_code is not None:
check_variable_and_dtype(
path_code, 'path_code', ['int64'], 'hsigmoid_loss'
)

if in_pir_mode():
out, _, _ = _C_ops.hsigmoid_loss(
input,
label,
weight,
bias,
path_table,
path_code,
num_classes,
is_sparse,
is_sparse,
)
return out
else:
attrs = {
"num_classes": num_classes,
"is_sparse": is_sparse,
Expand Down
84 changes: 49 additions & 35 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,12 @@
convert_dtype,
)
from ..common_ops_import import Variable
from ..framework import LayerHelper, in_dynamic_mode, in_dynamic_or_pir_mode
from ..framework import (
LayerHelper,
in_dynamic_mode,
in_dynamic_or_pir_mode,
in_pir_mode,
)
from .creation import full
from .manipulation import cast
from .math import _get_reduce_axis
Expand Down Expand Up @@ -2830,26 +2835,30 @@ def eigh(x, UPLO='L', name=None):
[ 0.3826833963394165j , -0.9238795042037964j ]])

"""
if in_dynamic_or_pir_mode():
if in_dynamic_mode():
return _C_ops.eigh(x, UPLO)
else:

def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape)
)
if x_shape[-1] != x_shape[-2]:
raise ValueError(
f"The input matrix must be batches of square matrices. But received x's dimention: {x_shape}"
)
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
f"UPLO must be L or U. But received UPLO is: {UPLO}"
)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape)
)
if x_shape[-1] != x_shape[-2]:
raise ValueError(
f"The input matrix must be batches of square matrices. But received x's dimention: {x_shape}"
)
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
f"UPLO must be L or U. But received UPLO is: {UPLO}"
)

if in_pir_mode():
__check_input(x, UPLO)
return _C_ops.eigh(x, UPLO)

else:
__check_input(x, UPLO)

helper = LayerHelper('eigh', **locals())
Expand Down Expand Up @@ -3342,27 +3351,32 @@ def eigvalsh(x, UPLO='L', name=None):
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.17157286, 5.82842731])
"""
if in_dynamic_or_pir_mode():
if in_dynamic_mode():
values, _ = _C_ops.eigvalsh(x, UPLO, x.stop_gradient)
return values
else:

def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape)
)
if x_shape[-1] != x_shape[-2]:
raise ValueError(
f"The input matrix must be batches of square matrices. But received x's dimention: {x_shape}"
)
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
f"UPLO must be L or U. But received UPLO is: {UPLO}"
)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape)
)
if x_shape[-1] != x_shape[-2]:
raise ValueError(
f"The input matrix must be batches of square matrices. But received x's dimention: {x_shape}"
)
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
f"UPLO must be L or U. But received UPLO is: {UPLO}"
)

if in_pir_mode():
__check_input(x, UPLO)
values, _ = _C_ops.eigvalsh(x, UPLO, x.stop_gradient)
return values

else:
__check_input(x, UPLO)

helper = LayerHelper('eigvalsh', **locals())
Expand Down
21 changes: 14 additions & 7 deletions python/paddle/tensor/logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,12 @@
from paddle.tensor.math import broadcast_shape
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only

from ..framework import LayerHelper, in_dynamic_mode, in_dynamic_or_pir_mode
from ..framework import (
LayerHelper,
in_dynamic_mode,
in_dynamic_or_pir_mode,
in_pir_mode,
)

__all__ = []

Expand Down Expand Up @@ -363,14 +368,16 @@ def is_empty(x, name=None):
False)

"""
if in_dynamic_or_pir_mode():
if in_dynamic_mode():
return _C_ops.is_empty(x)
else:
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
)
check_type(name, "name", (str, type(None)), "is_empty")

check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
)
check_type(name, "name", (str, type(None)), "is_empty")
if in_pir_mode():
return _C_ops.is_empty(x)
else:
helper = LayerHelper("is_empty", **locals())
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
Expand Down
48 changes: 43 additions & 5 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -818,13 +818,9 @@ def crop(x, shape=None, offsets=None, name=None):
if shape is None:
shape = x.shape

if in_dynamic_or_pir_mode():
if in_dynamic_mode():
return _C_ops.crop(x, shape, offsets)

out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}

def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
Expand Down Expand Up @@ -854,6 +850,48 @@ def _attr_offsets_check(offset_val):
% str(offset_val)
)

if in_pir_mode():
if isinstance(offsets, paddle.pir.Value):
offsets.stop_gradient = True
elif paddle.utils._contain_var(offsets):
new_offsets_tensor = []
for dim in offsets:
if isinstance(dim, paddle.pir.Value):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
else:
_attr_offsets_check(dim)
temp_out = fill_constant([1], 'int32', dim, force_cpu=True)
new_offsets_tensor.append(temp_out)
offsets = new_offsets_tensor
else:
for offset in offsets:
_attr_offsets_check(offset)

if isinstance(shape, paddle.pir.Value):
shape.stop_gradient = True
elif paddle.utils._contain_var(shape):
new_shape_tensor = []
for dim_size in shape:
if isinstance(dim_size, paddle.pir.Value):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
else:
_attr_shape_check(dim_size)
temp_out = fill_constant(
[1], 'int32', dim_size, force_cpu=True
)
new_shape_tensor.append(temp_out)
shape = new_shape_tensor
else:
for dim_size in shape:
_attr_shape_check(dim_size)
return _C_ops.crop(x, shape, offsets)

out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}

if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
Expand Down
5 changes: 5 additions & 0 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1133,6 +1133,7 @@ def test_backward(self):


class TestCoshOpError(unittest.TestCase):
@test_with_pir_api
def test_errors(self):
with static_guard():
with program_guard(Program()):
Expand Down Expand Up @@ -1331,6 +1332,7 @@ def test_dygraph_api(self):
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand Down Expand Up @@ -1398,6 +1400,7 @@ def test_dygraph_api(self):
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand Down Expand Up @@ -3122,6 +3125,7 @@ def test_base_api(self):
out = paddle.nn.functional.hardswish(x)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand Down Expand Up @@ -4531,6 +4535,7 @@ def test_base_api(self):
out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand Down
2 changes: 2 additions & 0 deletions test/legacy_test/test_crop_tensor_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from op_test import OpTest

import paddle
from paddle.pir_utils import test_with_pir_api


def crop(data, offsets, crop_shape):
Expand Down Expand Up @@ -225,6 +226,7 @@ def initTestCase(self):


class TestCropTensorException(unittest.TestCase):
@test_with_pir_api
def test_exception(self):
input1 = paddle.static.data(
name="input1", shape=[2, 3, 6, 6], dtype="float32"
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_diagflat.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from paddle.static import Program, program_guard


@test_with_pir_api
class TestDiagFlatError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_eigh_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ def init_input_shape(self):


class TestEighAPIError(unittest.TestCase):
@test_with_pir_api
def test_error(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_eigvalsh_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ def init_input_shape(self):


class TestEigvalshAPIError(unittest.TestCase):
@test_with_pir_api
def test_error(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
Expand Down
2 changes: 2 additions & 0 deletions test/legacy_test/test_fold_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()

Expand Down Expand Up @@ -190,6 +191,7 @@ def test_info(self):


class TestFoldOpError(unittest.TestCase):
@test_with_pir_api
def test_errors(self):
from paddle.base.framework import Program, program_guard
from paddle.nn.functional import fold
Expand Down
2 changes: 2 additions & 0 deletions test/legacy_test/test_histogram_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ def net_func():
with self.assertRaises(ValueError):
self.run_network(net_func)

@test_with_pir_api
def test_min_max_range_error(self):
"""Test range of min, max is not finite"""

Expand All @@ -116,6 +117,7 @@ def net_func():
with self.assertRaises(TypeError):
self.run_network(net_func)

@test_with_pir_api
def test_type_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
Expand Down
Loading