Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Macro] Increase macro constant MAX_RANK_SUPPORTED #63061

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
0c50e10
Merge pull request #187 from PaddlePaddle/develop
HydrogenSulfate Mar 16, 2024
cec4508
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 21, 2024
0ff0e3e
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 21, 2024
09324cc
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 25, 2024
34ff96e
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 25, 2024
7d0a4ca
Merge pull request #193 from PaddlePaddle/develop
HydrogenSulfate Mar 26, 2024
fe2148f
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 27, 2024
5c559d3
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 27, 2024
f2218ce
Merge branch 'develop' of https://github.com/HydrogenSulfate/Paddle i…
HydrogenSulfate Mar 27, 2024
e70e6fb
increase MAX_RANK_SUPPORTED to 8 for supporting expand op with input …
HydrogenSulfate Mar 27, 2024
b97808a
update code
HydrogenSulfate Mar 28, 2024
a0d69cf
add unitest for rank6/7/8 for expand_v2
HydrogenSulfate Mar 28, 2024
92e5800
correct expand shape for TestExpandPirValueListShape
HydrogenSulfate Mar 28, 2024
96ca9dd
support rank 7/8 for ExpandGradKernel
HydrogenSulfate Mar 28, 2024
e6a560d
large numel than 100
HydrogenSulfate Mar 28, 2024
0a5944a
support 8D input for expand_as kernel
HydrogenSulfate Mar 29, 2024
6e87086
update code
HydrogenSulfate Mar 31, 2024
bd02bfa
disable cinn_test for expand_ZeroDim tests
HydrogenSulfate Apr 1, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/expand_as_v2_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"

#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8

namespace paddle {
namespace operators {
Expand Down
11 changes: 6 additions & 5 deletions paddle/fluid/operators/expand_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,11 @@ class ExpandOp : public framework::OperatorWithKernel {
static_cast<size_t>(x_dims.size())));
PADDLE_ENFORCE_LE(
x_dims.size(),
6,
MAX_RANK_SUPPORTED,
platform::errors::InvalidArgument(
"The number of dimensions of the input for Op(expand) "
"must not be greater than 6, but the value received is %d.",
"must not be greater than %d, but the value received is %d.",
MAX_RANK_SUPPORTED,
x_dims.size()));

std::vector<int64_t> out_shape(x_dims.size());
Expand Down Expand Up @@ -98,7 +99,7 @@ class ExpandOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor, default Tensor<float>). A tensor with rank in [1, 6]."
"(Tensor, default Tensor<float>). A tensor with rank in [1, 8]."
"X is the input to be expanded.");
AddInput("ExpandTimes",
"(Tensor<int>), optional). If provided, expand according to "
Expand All @@ -112,7 +113,7 @@ class ExpandOpMaker : public framework::OpProtoAndCheckerMaker {
.AsDuplicable()
.AsDispensable();
AddOutput("Out",
"(Tensor, default Tensor<float>). A tensor with rank in [1, 6]."
"(Tensor, default Tensor<float>). A tensor with rank in [1, 8]."
"The rank of Output(Out) have the same with Input(X). "
"After expanding, size of each dimension of Output(Out) is equal "
"to size of the corresponding dimension of Input(X) multiplying "
Expand All @@ -123,7 +124,7 @@ class ExpandOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
Expand operator tiles the input by given times number. You should set times
number for each dimension by providing attribute 'expand_times'. The rank of X
should be in [1, 6]. Please note that size of 'expand_times' must be the same
should be in [1, 8]. Please note that size of 'expand_times' must be the same
with X's rank. Following is a using case:
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
Expand Down
17 changes: 15 additions & 2 deletions paddle/fluid/operators/expand_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"

#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8

namespace paddle {
namespace operators {
Expand Down Expand Up @@ -128,6 +128,12 @@ class ExpandKernel : public framework::OpKernel<T> {
case 6:
Expand<6>(context);
break;
case 7:
Expand<7>(context);
break;
case 8:
Expand<8>(context);
break;
}
}

Expand Down Expand Up @@ -249,10 +255,17 @@ class ExpandGradKernel : public framework::OpKernel<T> {
case 6:
ExpandBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 7:
ExpandBackward<7>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 8:
ExpandBackward<8>(context, reshape_dims_vec, reduce_dims_vec);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"Only support tensor with rank being between 1 and %d. But "
"received tensor's rank = %d.",
MAX_RANK_SUPPORTED,
dims));
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/expand_v2_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"

#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8

namespace paddle {
namespace operators {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/prim/api/manual_prim/utils/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static phi::DDim get_reduce_dims(const phi::DDim& x_dims,
* y_dims = [2, 1, 6, 1] <-- shaped are right-aligned for comparison
* <-- broadcast -->
* z_dims = [10, 2, 4, 6, 5]
* ==> reduce_dims_from_z_to_x = [0, 1, 3]
* ==> reduce_dims_from_z_to_x = [1, 3]
* ==> reduce_dims_from_z_to_y = [0, 2, 4]
*/
auto out_dims = paddle::operators::details::BroadcastTwoDims(x_dims, y_dims);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1532,7 +1532,7 @@ void ExpandAsInferMeta(const MetaTensor& x,
const MetaTensor& y,
const std::vector<int>& target_shape,
MetaTensor* out) {
#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8
auto x_dims = x.dims();
PADDLE_ENFORCE_GE(
target_shape.size(),
Expand Down
18 changes: 10 additions & 8 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1219,7 +1219,7 @@ void EinsumRawInferMeta(const std::vector<const MetaTensor*>& inputs,
void ExpandInferMeta(const MetaTensor& x,
const IntArray& shape,
MetaTensor* out) {
#define MAX_RANK_SUPPORTED 6
#define EXPAND_MAX_RANK_SUPPORTED 8
auto x_dims = x.dims();
auto expand_shape = shape.GetData();

Expand All @@ -1238,11 +1238,11 @@ void ExpandInferMeta(const MetaTensor& x,
static_cast<size_t>(x_dims.size())));
PADDLE_ENFORCE_LE(
expand_shape.size(),
MAX_RANK_SUPPORTED,
EXPAND_MAX_RANK_SUPPORTED,
phi::errors::InvalidArgument("The number of elements (%d) of 'shape' for "
"must not be greater than %d.",
expand_shape.size(),
MAX_RANK_SUPPORTED));
EXPAND_MAX_RANK_SUPPORTED));
PADDLE_ENFORCE_GE(
expand_shape.size(),
0,
Expand Down Expand Up @@ -1283,6 +1283,7 @@ void ExpandInferMeta(const MetaTensor& x,
if (out_rank > 0 && out_shape[0] == x_dims[0]) {
out->share_lod(x);
}
#undef EXPAND_MAX_RANK_SUPPORTED
}

void FillAnyLikeInferMeta(const MetaTensor& x,
Expand Down Expand Up @@ -4722,7 +4723,7 @@ void TileInferMeta(const MetaTensor& x,
const IntArray& repeat_times,
MetaTensor* out,
MetaConfig config) {
#define MAX_RANK_SUPPORTED 6
#define TILE_MAX_RANK_SUPPORTED 6

auto repeat_times_data = repeat_times.GetData();
auto x_dims = x.dims();
Expand All @@ -4732,19 +4733,19 @@ void TileInferMeta(const MetaTensor& x,

PADDLE_ENFORCE_LE(
x_dims.size(),
MAX_RANK_SUPPORTED,
TILE_MAX_RANK_SUPPORTED,
errors::InvalidArgument(
"The rank of the input 'x' for tile op "
"must not be greater than %d, but the value received is %d.",
MAX_RANK_SUPPORTED,
TILE_MAX_RANK_SUPPORTED,
x_dims.size()));
PADDLE_ENFORCE_LE(
repeat_times_data.size(),
MAX_RANK_SUPPORTED,
TILE_MAX_RANK_SUPPORTED,
errors::InvalidArgument(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d.",
MAX_RANK_SUPPORTED,
TILE_MAX_RANK_SUPPORTED,
repeat_times_data.size()));
PADDLE_ENFORCE_GE(
repeat_times_data.size(),
Expand Down Expand Up @@ -4785,6 +4786,7 @@ void TileInferMeta(const MetaTensor& x,
out->share_lod(x);
}
out->set_dtype(x.dtype());
#undef TILE_MAX_RANK_SUPPORTED
}

void TopKInferMeta(const MetaTensor& x,
Expand Down
4 changes: 3 additions & 1 deletion paddle/phi/kernels/funcs/eigen/broadcast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,9 @@ struct EigenBroadcastGrad<Eigen::DefaultDevice, T, Rank> {
template struct FUNCTOR<Eigen::DefaultDevice, T, 3>; \
template struct FUNCTOR<Eigen::DefaultDevice, T, 4>; \
template struct FUNCTOR<Eigen::DefaultDevice, T, 5>; \
template struct FUNCTOR<Eigen::DefaultDevice, T, 6>
template struct FUNCTOR<Eigen::DefaultDevice, T, 6>; \
template struct FUNCTOR<Eigen::DefaultDevice, T, 7>; \
template struct FUNCTOR<Eigen::DefaultDevice, T, 8>
INSTANTIATION(EigenBroadcast, bool);
INSTANTIATION(EigenBroadcast, dtype::float16);
INSTANTIATION(EigenBroadcast, dtype::bfloat16);
Expand Down
4 changes: 3 additions & 1 deletion paddle/phi/kernels/funcs/eigen/broadcast.cu
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ struct EigenBroadcastGrad<Eigen::GpuDevice, T, Rank> {
template struct FUNCTOR<Eigen::GpuDevice, T, 3>; \
template struct FUNCTOR<Eigen::GpuDevice, T, 4>; \
template struct FUNCTOR<Eigen::GpuDevice, T, 5>; \
template struct FUNCTOR<Eigen::GpuDevice, T, 6>
template struct FUNCTOR<Eigen::GpuDevice, T, 6>; \
template struct FUNCTOR<Eigen::GpuDevice, T, 7>; \
template struct FUNCTOR<Eigen::GpuDevice, T, 8>
INSTANTIATION(EigenBroadcast, bool);
INSTANTIATION(EigenBroadcast, dtype::float16);
INSTANTIATION(EigenBroadcast, dtype::bfloat16);
Expand Down
11 changes: 10 additions & 1 deletion paddle/phi/kernels/impl/expand_as_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,19 @@ void ExpandAsGradKernel(const Context& context,
ExpandAsBackward<Context, T, 6>(
context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad);
break;
case 7:
ExpandAsBackward<Context, T, 7>(
context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad);
break;
case 8:
ExpandAsBackward<Context, T, 8>(
context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad);
break;
default:
PADDLE_THROW(errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"Only support tensor with rank being between 1 and %d. But "
"received tensor's rank = %d.",
MAX_RANK_SUPPORTED,
dims));
}
}
Expand Down
8 changes: 7 additions & 1 deletion paddle/phi/kernels/impl/expand_as_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"

#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8

namespace phi {

Expand Down Expand Up @@ -158,6 +158,12 @@ void ExpandAsKernel(const Context& ctx,
case 6:
ExpandAs<Context, T, 6>(ctx, x, real_target_shape, out);
break;
case 7:
ExpandAs<Context, T, 7>(ctx, x, real_target_shape, out);
break;
case 8:
ExpandAs<Context, T, 8>(ctx, x, real_target_shape, out);
break;
}
}

Expand Down
11 changes: 10 additions & 1 deletion paddle/phi/kernels/impl/expand_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,19 @@ void ExpandGradKernel(const Context& ctx,
ExpandBackward<Context, T, 6>(
ctx, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad);
break;
case 7:
ExpandBackward<Context, T, 7>(
ctx, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad);
break;
case 8:
ExpandBackward<Context, T, 8>(
ctx, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"Only support tensor with rank being between 1 and %d. But "
"received tensor's rank = %d.",
MAX_RANK_SUPPORTED,
dims));
}
}
Expand Down
8 changes: 7 additions & 1 deletion paddle/phi/kernels/impl/expand_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8

namespace phi {
using Tensor = DenseTensor;
Expand Down Expand Up @@ -169,6 +169,12 @@ void ExpandKernel(const Context& ctx,
case 6:
Expand<Context, T, 6>(ctx, x, shape, out);
break;
case 7:
Expand<Context, T, 7>(ctx, x, shape, out);
break;
case 8:
Expand<Context, T, 8>(ctx, x, shape, out);
break;
}
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/xpu/expand_as_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"

#define MAX_RANK_SUPPORTED 6
#define MAX_RANK_SUPPORTED 8

namespace phi {

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ def is_complex_test():
not in check_shape_white_list.NEED_TO_FIX_OP_LIST
):
raise AssertionError(
"Input's shape should be large than or equal to 100 for "
"Number of element(s) of input should be large than or equal to 100 for "
+ cls.op_type
+ " Op."
)
Expand Down
Loading