Skip to content

Commit

Permalink
【Error Message No. 17】 paddle/cinn/frontend/decomposer/* (#64310)
Browse files Browse the repository at this point in the history
* fix

* fix
  • Loading branch information
enkilee authored May 22, 2024
1 parent bd3934d commit fa8b4f3
Show file tree
Hide file tree
Showing 7 changed files with 164 additions and 74 deletions.
2 changes: 0 additions & 2 deletions paddle/cinn/frontend/computation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,6 @@ void CinnComputation::SetTensorData(hlir::framework::Tensor &t,
true,
phi::errors::InvalidArgument("The size of the input data is not equal to "
"the size of the tensor."));
CHECK_EQ(size, t->shape().numel() * t->type().bytes());
context_->target.arch.Visit(adt::match{
[&](common::UnknownArch) { CINN_NOT_IMPLEMENTED; },
[&](common::X86Arch) { memcpy(tdata, data, size); },
Expand All @@ -233,7 +232,6 @@ void CinnComputation::GetTensorData(hlir::framework::Tensor &t,
true,
phi::errors::InvalidArgument("The size of the input data is not equal to "
"the size of the tensor."));
CHECK_EQ(size, t->shape().numel() * t->type().bytes());
context_->target.arch.Visit(adt::match{
[&](common::UnknownArch) { CINN_NOT_IMPLEMENTED; },
[&](common::X86Arch) { memcpy(data, tdata, size); },
Expand Down
49 changes: 33 additions & 16 deletions paddle/cinn/frontend/decomposer/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,21 @@

#include "paddle/cinn/frontend/decomposer_registry.h"
#include "paddle/cinn/frontend/syntax.h"
#include "paddle/common/enforce.h"

namespace cinn {
namespace frontend {
namespace decomposer {

void relu(const Instruction& instr, const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 1UL)
<< " 1 input tensor for " << instr->op_type;
CHECK_EQ(instr->outputs.size(), 1UL)
<< "1 output tensor for " << instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
1UL,
phi::errors::InvalidArgument("1 input tensor for ", instr->op_type));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
1UL,
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
auto x = instr->inputs[0];
auto output = instr->outputs[0];
auto* builder = context.builder();
Expand All @@ -39,10 +44,14 @@ void relu(const Instruction& instr, const DecomposerContext& context) {
}

void relu_grad(const Instruction& instr, const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 2UL)
<< " 2 input tensors for " << instr->op_type;
CHECK_EQ(instr->outputs.size(), 1UL)
<< "1 output tensor for " << instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
2UL,
phi::errors::InvalidArgument(" 2 input tensors for ", instr->op_type));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
1UL,
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
auto dout = instr->inputs[0];
auto out = instr->inputs[1];
auto dx = instr->outputs[0];
Expand All @@ -60,10 +69,14 @@ void relu_grad(const Instruction& instr, const DecomposerContext& context) {
}

void gelu(const Instruction& instr, const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 1UL)
<< " 1 input tensor for " << instr->op_type;
CHECK_EQ(instr->outputs.size(), 1UL)
<< "1 output tensor for " << instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
1UL,
phi::errors::InvalidArgument(" 1 input tensor for ", instr->op_type));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
1UL,
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
auto x = instr->inputs[0];
auto output = instr->outputs[0];
auto* builder = context.builder();
Expand All @@ -86,10 +99,14 @@ void gelu(const Instruction& instr, const DecomposerContext& context) {
}

void softmax(const Instruction& instr, const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 1UL)
<< " 1 input tensor for " << instr->op_type;
CHECK_EQ(instr->outputs.size(), 1UL)
<< "1 output tensor for " << instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
1UL,
phi::errors::InvalidArgument(" 1 input tensor for ", instr->op_type));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
1UL,
phi::errors::InvalidArgument("1 output tensor for ", instr->op_type));
auto x = instr->inputs[0];
auto output = instr->outputs[0];
auto* builder = context.builder();
Expand Down
105 changes: 74 additions & 31 deletions paddle/cinn/frontend/decomposer/batch_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

#include "paddle/cinn/frontend/decomposer_registry.h"
#include "paddle/cinn/frontend/syntax.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace frontend {
namespace decomposer {
Expand All @@ -25,9 +25,12 @@ struct BatchNormHelper {
const std::vector<int>& arg_param_shape,
std::string data_layout,
std::string bn_op_type) {
CHECK_EQ(arg_x_shape.size(), 4UL)
<< "Only 4-D input tensor is supported, but get " << arg_x_shape.size()
<< "-D input tensor.";
PADDLE_ENFORCE_EQ(arg_x_shape.size(),
4UL,
phi::errors::InvalidArgument(
"Only 4-D input tensor is supported, but get %d",
arg_x_shape.size(),
"-D input tensor."));

builder = net_builder;
x_shape = arg_x_shape;
Expand Down Expand Up @@ -162,21 +165,34 @@ struct BatchNormHelper {

void batch_norm_train(const Instruction& instr,
const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 5UL)
<< "The number of the given inputs is not equal to the required for op "
<< instr->op_type;
CHECK_EQ(instr->outputs.size(), 5UL)
<< "The number of the given outputs is not equal to the required for op "
<< instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
5UL,
phi::errors::InvalidArgument(
"The number of the given inputs is not equal to the required"));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
5UL,
phi::errors::InvalidArgument(
"The number of the given outputs is not equal to the required"));

auto& x = instr->inputs[0];
auto& scale = instr->inputs[1];
auto& bias = instr->inputs[2];
auto& moving_mean = instr->inputs[3];
auto& moving_variance = instr->inputs[4];
CHECK_EQ(scale->type, bias->type);
CHECK_EQ(scale->type, moving_mean->type);
CHECK_EQ(scale->type, moving_variance->type);
PADDLE_ENFORCE_EQ(
scale->type == bias->type,
true,
phi::errors::InvalidArgument("The type of scale and bias is not equal"));
PADDLE_ENFORCE_EQ(scale->type == moving_mean->type,
true,
phi::errors::InvalidArgument(
"The type of scale and moving_mean is not equal"));
PADDLE_ENFORCE_EQ(scale->type == moving_variance->type,
true,
phi::errors::InvalidArgument(
"The type of scale and moving_variance is not equal"));

float epsilon = instr.GetAttrs<float>("epsilon");
float momentum = instr.GetAttrs<float>("momentum");
Expand Down Expand Up @@ -219,21 +235,34 @@ void batch_norm_train(const Instruction& instr,

void batch_norm_grad(const Instruction& instr,
const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 5UL)
<< " The number of the given inputs is not equal to the required "
<< instr->op_type;
CHECK_EQ(instr->outputs.size(), 3UL)
<< " The number of the given outputs is not equal to the required"
<< instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
5UL,
phi::errors::InvalidArgument(
"The number of the given inputs is not equal to the required"));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
3UL,
phi::errors::InvalidArgument(
"The number of the given outputs is not equal to the required"));

auto& y_grad = instr->inputs[0];
auto& x = instr->inputs[1];
auto& scale = instr->inputs[2];
auto& save_mean = instr->inputs[3];
auto& save_variance = instr->inputs[4];
CHECK_EQ(y_grad->type, x->type);
CHECK_EQ(scale->type, save_mean->type);
CHECK_EQ(scale->type, save_variance->type);
PADDLE_ENFORCE_EQ(
y_grad->type == x->type,
true,
phi::errors::InvalidArgument("The type of y_grad and x is not equal"));
PADDLE_ENFORCE_EQ(scale->type == save_mean->type,
true,
phi::errors::InvalidArgument(
"The type of scale and save_mean is not equal"));
PADDLE_ENFORCE_EQ(scale->type == save_variance->type,
true,
phi::errors::InvalidArgument(
"The type of scale and save_variance is not equal"));

auto epsilon = instr.GetAttrs<float>("epsilon");
auto layout = instr.GetAttrs<std::string>("data_layout");
Expand Down Expand Up @@ -304,21 +333,35 @@ void batch_norm_grad(const Instruction& instr,
}

void batch_norm(const Instruction& instr, const DecomposerContext& context) {
CHECK_EQ(instr->inputs.size(), 5UL)
<< "The number of the given inputs is not equal to the required for op "
<< instr->op_type;
CHECK_EQ(instr->outputs.size(), 1UL)
<< "The number of the given outputs is not equal to the required for op "
<< instr->op_type;
PADDLE_ENFORCE_EQ(
instr->inputs.size(),
5UL,
phi::errors::InvalidArgument(
"The number of the given inputs is not equal to the required"));

PADDLE_ENFORCE_EQ(
instr->outputs.size(),
1UL,
phi::errors::InvalidArgument(
"The number of the given outputs is not equal to the required"));

auto& x = instr->inputs[0];
auto& scale = instr->inputs[1];
auto& bias = instr->inputs[2];
auto& moving_mean = instr->inputs[3];
auto& moving_variance = instr->inputs[4];
CHECK_EQ(scale->type, bias->type);
CHECK_EQ(scale->type, moving_mean->type);
CHECK_EQ(scale->type, moving_variance->type);
PADDLE_ENFORCE_EQ(
scale->type == bias->type,
true,
phi::errors::InvalidArgument("The type of scale and bias is not equal"));
PADDLE_ENFORCE_EQ(scale->type == moving_mean->type,
true,
phi::errors::InvalidArgument(
"The type of scale and moving_mean is not equal"));
PADDLE_ENFORCE_EQ(scale->type == moving_variance->type,
true,
phi::errors::InvalidArgument(
"The type of scale and moving_variance is not equal"));

float epsilon = instr.GetAttrs<float>("epsilon");
float momentum = instr.GetAttrs<float>("momentum");
Expand Down
13 changes: 8 additions & 5 deletions paddle/cinn/frontend/decomposer/elementwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,19 @@

#include "paddle/cinn/frontend/decomposer_registry.h"
#include "paddle/cinn/frontend/syntax.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace frontend {
namespace decomposer {

void sum(const Instruction& instr, const DecomposerContext& context) {
CHECK_GT(instr->inputs.size(), 0UL)
<< "At least 1 input tensor for " << instr->op_type;
CHECK_EQ(instr->outputs.size(), 1UL)
<< "1 output tensor for " << instr->op_type;
PADDLE_ENFORCE_GT(instr->inputs.size(),
0UL,
phi::errors::InvalidArgument("At least 1 input tensor."));
PADDLE_ENFORCE_EQ(
instr->outputs.size(),
1UL,
phi::errors::InvalidArgument("Output size should be 1 tensor."));
auto inputs = instr->inputs;
auto output = instr->outputs[0];
auto* builder = context.builder();
Expand Down
24 changes: 16 additions & 8 deletions paddle/cinn/frontend/decomposer/test_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
// limitations under the License.

#include "paddle/cinn/frontend/decomposer/test_helper.h"

#include "paddle/common/enforce.h"
namespace cinn::frontend {

void RunDecomposer(Program* prog,
Expand Down Expand Up @@ -55,14 +55,20 @@ void CopyFromVector<bool>(const std::vector<bool>& vec,
auto* data = tensor->mutable_data<bool>(target);

size_t numel = tensor->shape().numel();
CHECK_EQ(vec.size(), numel);
PADDLE_ENFORCE_EQ(vec.size(),
numel,
phi::errors::InvalidArgument(
"The size of the input vector should be equal to the "
"number of elements in the tensor."));

#ifdef CINN_WITH_CUDA
// why not use vector<bool> ? Because to optimizes space, each value is stored
// in a single bit. So that the vector<bool> doesn't has data() function.
CHECK_EQ(sizeof(bool), sizeof(char))
<< "The test need ensure the byte size of bool equal to the byte size of "
"char.";
PADDLE_ENFORCE_EQ(sizeof(bool) == sizeof(char),
true,
phi::errors::InvalidArgument(
"The test need ensure the byte size of bool equal to "
"the byte size of char."));

std::vector<char> vec_char(numel);
for (int i = 0; i < numel; ++i) vec_char[i] = static_cast<char>(vec[i]);
Expand All @@ -84,9 +90,11 @@ void CopyToVector<bool>(const hlir::framework::Tensor tensor,
#ifdef CINN_WITH_CUDA
// why not use vector<bool> ? Because to optimizes space, each value is stored
// in a single bit. So that the vector<bool> doesn't has data() function.
CHECK_EQ(sizeof(bool), sizeof(char))
<< "The test need ensure the byte size of bool equal to the byte size of "
"char.";
PADDLE_ENFORCE_EQ(
sizeof(bool) == sizeof(char),
true,
phi::errors::InvalidArgument("The test need ensure the byte size of bool "
"equal to the byte size of char."));

std::vector<char> vec_char(numel);
cudaMemcpy(
Expand Down
26 changes: 20 additions & 6 deletions paddle/cinn/frontend/decomposer/test_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#include "paddle/cinn/hlir/framework/tensor.h"
#include "paddle/cinn/hlir/op/use_ops.h"
#include "paddle/cinn/hlir/pass/use_pass.h"

#include "paddle/common/enforce.h"
namespace cinn::frontend {

using CPUKernelFunc = std::function<void(const std::vector<size_t>& lengths,
Expand Down Expand Up @@ -83,7 +83,11 @@ void CopyFromVector(const std::vector<T>& vec,
auto* data = tensor->mutable_data<T>(target);

size_t numel = tensor->shape().numel();
CHECK_EQ(vec.size(), numel);
PADDLE_ENFORCE_EQ(vec.size(),
numel,
phi::errors::InvalidArgument(
"The size of the input vector should be equal to the "
"number of elements in the tensor."));

if (target == cinn::common::DefaultNVGPUTarget()) {
#ifdef CINN_WITH_CUDA
Expand Down Expand Up @@ -127,7 +131,11 @@ void CheckOutput(const std::vector<T>& actual,
const std::vector<T>& expect,
float atol = 1e-8,
float rtol = 1e-5) {
CHECK_EQ(actual.size(), expect.size());
PADDLE_ENFORCE_EQ(actual.size(),
expect.size(),
phi::errors::InvalidArgument(
"The size of the actual result should be equal to the "
"size of the expected result."));

auto allclose = [](T a, T e, float atol, float rtol) {
return abs(a - e) <= (atol + rtol * abs(e));
Expand Down Expand Up @@ -159,7 +167,11 @@ void CheckOutput(const std::vector<T>& actual,
<< " (expect), maximum_relative_diff=" << max_diff
<< " (absolute_diff=" << abs((actual[offset] - expect[offset]))
<< ")";
CHECK_EQ(num_diffs, 0);
PADDLE_ENFORCE_EQ(
num_diffs,
0,
phi::errors::InvalidArgument("The actual result is different from the "
"expected result, please check the log."));
}

template <typename T>
Expand Down Expand Up @@ -229,8 +241,10 @@ void RunAndCheckShape(NetBuilder* builder,

for (size_t i = 0; i < output_names.size(); ++i) {
auto tensor = scope->GetTensor(output_names[i]);
CHECK_EQ(tensor->shape().data() == output_shapes[i], true)
<< "The " << i << "-th shape is expected to be " << output_shapes[i];
PADDLE_ENFORCE_EQ(tensor->shape().data() == output_shapes[i],
true,
phi::errors::InvalidArgument(
"The shape of the output tensor is not correct"));
if (output_vecs) {
std::vector<T> vec;
CopyToVector<T>(tensor, &vec);
Expand Down
Loading

0 comments on commit fa8b4f3

Please sign in to comment.