-
Notifications
You must be signed in to change notification settings - Fork 5.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
【Paddle Tensor 第二期 API支持 0-size Tensor】paddle.full_like 支持 0-size tensor-part #70077
Conversation
你的PR提交成功,感谢你对开源项目的贡献! |
if (out->numel() == 0) { | ||
return; // Handle 0-size Tensor: no allocation or computation needed. | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
好的老师
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
其他地方好像都在上下文加过了
if (out->numel() == 0) { | ||
return; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这里是否应该移动到109行下方
out->Resize(x.dims()); | ||
if (out->numel() == 0) { | ||
return; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
- out->Resize是新加的逻辑,放到if分支里
- if分支里缺少Alloc
@@ -105,12 +105,12 @@ void FullBatchSizeLikeKernel(const Context& dev_ctx, | |||
int out_batch_size_dim, | |||
DenseTensor* out) { | |||
if (x.lod().size() && x_batch_size_dim == 0) { | |||
// set the correct batch size for the DenseTensor. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
注释不要删除
} else { | ||
FullLikeKernel<T, Context>(dev_ctx, x, val, dtype, out); | ||
} | ||
FullLikeKernel<T, Context>(dev_ctx, x, val, dtype, out); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这里好像不用改,
shape = [2, 3] | ||
value = 5 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这里shape不是一个0-size,这个单测的意义是?
) | ||
def test_full_kernel_gpu(self): | ||
paddle.disable_static() | ||
shape = [2, 3] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
同上
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这个单测文件有测试0-size的case吗?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
好的老师
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
请老师看一眼不知道这样对不对
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
![]() |
class TestFullKernelZeroSize(unittest.TestCase): | ||
def test_full_kernel_cpu_zero_size(self): | ||
paddle.disable_static() | ||
value = 5 | ||
dtype = "int32" | ||
shape = [0, 3] | ||
tensor = paddle.full(shape, value, dtype=dtype) | ||
expected = np.full(shape, value, dtype=dtype) | ||
self.assertTrue(np.array_equal(tensor.numpy(), expected)) | ||
paddle.enable_static() | ||
|
||
@unittest.skipIf( | ||
not core.is_compiled_with_cuda(), "Paddle is not compiled with CUDA" | ||
) | ||
def test_full_kernel_gpu_zero_size(self): | ||
paddle.disable_static() | ||
paddle.set_device("gpu:0") | ||
value = 5.5 | ||
dtype = "float32" | ||
shape = [0, 3] | ||
tensor = paddle.full(shape, value, dtype=dtype) | ||
expected = np.full(shape, value, dtype=dtype) | ||
self.assertTrue(np.array_equal(tensor.numpy(), expected)) | ||
paddle.enable_static() | ||
|
||
|
||
class TestFullLikeKernelZeroSize(unittest.TestCase): | ||
def test_full_like_kernel_cpu_zero_size(self): | ||
paddle.disable_static() | ||
base_tensor = paddle.to_tensor(np.empty((0, 2), dtype=np.float32)) | ||
value = 10.0 | ||
result = paddle.full_like(base_tensor, value, dtype="float32") | ||
expected = np.full_like(base_tensor.numpy(), value) | ||
self.assertTrue(np.array_equal(result.numpy(), expected)) | ||
paddle.enable_static() | ||
|
||
@unittest.skipIf( | ||
not core.is_compiled_with_cuda(), "Paddle is not compiled with CUDA" | ||
) | ||
def test_full_like_kernel_gpu_zero_size(self): | ||
paddle.disable_static() | ||
base_tensor = paddle.to_tensor( | ||
np.empty((0, 3), dtype=np.float32), place=paddle.CUDAPlace(0) | ||
) | ||
value = 20.0 | ||
result = paddle.full_like(base_tensor, value, dtype="float32") | ||
expected = np.full_like(base_tensor.numpy(), value) | ||
self.assertTrue(np.array_equal(result.numpy(), expected)) | ||
paddle.enable_static() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
- enable/disable的切换使用guard
- self.assertTrue(np.array_equal/allclose(result.numpy(), expected))这类判断使用 np.testing.assert_array_equal/allclose
@SCUcookie 下个PR按照review意见,改一下单测的代码 |
PR Category
User Experience
PR Types
Bug fixes
Description
添加输入等于0时的情况判断