Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP]【Hackathon 5th No.11】为 Paddle 新增 igamma / igammac API #59368

Closed
wants to merge 11 commits into from
18 changes: 18 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1220,6 +1220,24 @@
func : i1e
backward : i1e_grad

- op : igamma
args : (Tensor x, Tensor a)
output : Tensor(out)
infer_meta :
func : IgammaInferMeta
kernel :
func : igamma
inplace: (x -> out)

- op : igammac
args : (Tensor x, Tensor a)
output : Tensor(out)
infer_meta :
func : IgammacInferMeta
kernel :
func : igammac
inplace: (x -> out)

- op : imag
args : (Tensor x)
output : Tensor (out)
Expand Down
44 changes: 44 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5265,6 +5265,50 @@ void StridedUnChangedInferMeta(const MetaTensor& x, MetaTensor* out) {
out->set_strides(x.strides());
}

void IgammaInferMeta(const MetaTensor& x,
const MetaTensor& a,
MetaTensor* out) {
out->share_meta(x);
out->set_dtype(x.dtype());

auto x_dims = x.dims();
auto a_dims = a.dims();
auto x_dims_size = x_dims.size();
auto a_dims_size = a_dims.size();

PADDLE_ENFORCE_EQ(
x_dims_size == a_dims_size,
true,
phi::errors::InvalidArgument(
"The size of tesnor x and tensor a should be same dimension,"
"but received tensor x (%u) and tensor a (%u).",
x_dims_size,
a_dims_size));
out->set_dims(x.dims());
}

void IgammacInferMeta(const MetaTensor& x,
const MetaTensor& a,
MetaTensor* out) {
out->share_meta(x);
out->set_dtype(x.dtype());

auto x_dims = x.dims();
auto a_dims = a.dims();
auto x_dims_size = x_dims.size();
auto a_dims_size = a_dims.size();

PADDLE_ENFORCE_EQ(
x_dims_size == a_dims_size,
true,
phi::errors::InvalidArgument(
"The size of tesnor x and tensor a should be same dimension,"
"but received tensor x (%u) and tensor a (%u).",
x_dims_size,
a_dims_size));
out->set_dims(x.dims());
}

} // namespace phi

PD_REGISTER_INFER_META_FN(flatten, phi::FlattenInferMeta);
6 changes: 6 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,12 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out);

void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out);

void IgammaInferMeta(const MetaTensor& x, const MetaTensor& a, MetaTensor* out);

void IgammacInferMeta(const MetaTensor& x,
const MetaTensor& a,
MetaTensor* out);

void KthvalueInferMeta(const MetaTensor& x,
int k,
int axis,
Expand Down
41 changes: 41 additions & 0 deletions paddle/phi/kernels/cpu/igamma_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/igamma_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/impl/igamma_kernel_impl.h"

namespace phi {

template <typename T, typename Context>
void IgammaKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out) {
const int64_t size = x.numel();
const T* x_data = x.data<T>();
const T* a_data = a.data<T>();
T* out_data = ctx.template Alloc<T>(out);

phi::funcs::ForRange<Context> for_range(ctx, size);
IgammaFunctor<T> functor(x_data, a_data, out_data, size);
for_range(functor);
}

} // namespace phi

PD_REGISTER_KERNEL(igamma, CPU, ALL_LAYOUT, phi::IgammaKernel, float, double) {}
42 changes: 42 additions & 0 deletions paddle/phi/kernels/cpu/igammac_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/igammac_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/impl/igamma_kernel_impl.h"

namespace phi {

template <typename T, typename Context>
void IgammacKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out) {
const int64_t size = x.numel();
const T* x_data = x.data<T>();
const T* a_data = a.data<T>();
T* out_data = ctx.template Alloc<T>(out);

phi::funcs::ForRange<Context> for_range(ctx, size);
IgammacFunctor<T> functor(x_data, a_data, out_data, size);
for_range(functor);
}

} // namespace phi

PD_REGISTER_KERNEL(
igammac, CPU, ALL_LAYOUT, phi::IgammacKernel, float, double) {}
35 changes: 35 additions & 0 deletions paddle/phi/kernels/gpu/igamma_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/igamma_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
#include "paddle/phi/kernels/impl/igamma_kernel_impl.h"

namespace phi {

template <typename T, typename Context>
void IgammaKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& a, DenseTensor* out) {
ctx.template Alloc<T>(out);
std::vector<const DenseTensor*> ins = {&x, &a};
std::vector<DenseTensor*> outs = {out};
auto functor = CudaIgammaFunctor<T>();
phi::funcs::ElementwiseKernel<T>(ctx, ins, &outs, functor);
}

} // namespace phi

PD_REGISTER_KERNEL(igamma, GPU, ALL_LAYOUT, phi::IgammaKernel, float, double) {}
39 changes: 39 additions & 0 deletions paddle/phi/kernels/gpu/igammac_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/igammac_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
#include "paddle/phi/kernels/impl/igamma_kernel_impl.h"

namespace phi {

template <typename T, typename Context>
void IgammacKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out) {
ctx.template Alloc<T>(out);
std::vector<const DenseTensor*> ins = {&x, &a};
std::vector<DenseTensor*> outs = {out};
auto functor = CudaIgammacFunctor<T>();
phi::funcs::ElementwiseKernel<T>(ctx, ins, &outs, functor);
}

} // namespace phi

PD_REGISTER_KERNEL(
igammac, GPU, ALL_LAYOUT, phi::IgammacKernel, float, double) {}
34 changes: 34 additions & 0 deletions paddle/phi/kernels/igamma_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

/**
* @brief This kernel calculate Modified Bessel function of order 0.
* @param ctx device context
* @param x The input tensor of i0
* @param out The output tensor of i0 kernel, it has the same shape and
* dtype with input. Each element corresponds to input tensor
*/
template <typename T, typename Context>
void IgammaKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out);

} // namespace phi
34 changes: 34 additions & 0 deletions paddle/phi/kernels/igammac_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

/**
* @brief This kernel calculate Modified Bessel function of order 0.
* @param ctx device context
* @param x The input tensor of i0
* @param out The output tensor of i0 kernel, it has the same shape and
* dtype with input. Each element corresponds to input tensor
*/
template <typename T, typename Context>
void IgammacKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out);

} // namespace phi
Loading