-
Notifications
You must be signed in to change notification settings - Fork 5.8k
[Paddle Tensor No.12][BUPT] 新增 Tensor.__dlpack_device__ #69632
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Paddle Tensor No.12][BUPT] 新增 Tensor.__dlpack_device__ #69632
Conversation
你的PR提交成功,感谢你对开源项目的贡献! |
test/legacy_test/test_dlpack.py
Outdated
with dygraph_guard(): | ||
tensor_cpu = paddle.to_tensor([1, 2, 3], place=base.CPUPlace()) | ||
device_type, device_id = tensor_cpu.__dlpack_device__() | ||
self.assertEqual(device_type, 1) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
device_type不要用hard code 数字,应该使用DLDeviceType.xxx
test/legacy_test/test_dlpack.py
Outdated
def test_dlpack_device(self): | ||
with dygraph_guard(): | ||
tensor_cpu = paddle.to_tensor([1, 2, 3], place=base.CPUPlace()) | ||
device_type, device_id = tensor_cpu.__dlpack_device__() | ||
self.assertEqual(device_type, 1) | ||
self.assertEqual(device_id, 0) | ||
if paddle.is_compiled_with_cuda(): | ||
tensor_cuda = paddle.to_tensor( | ||
[1, 2, 3], place=base.CUDAPlace(0) | ||
) | ||
device_type, device_id = tensor_cuda.__dlpack_device__() | ||
self.assertEqual(device_type, 2) | ||
self.assertEqual(device_id, 0) | ||
if paddle.is_compiled_with_cuda(): | ||
tensor_pinned = paddle.to_tensor( | ||
[1, 2, 3], place=base.CUDAPinnedPlace() | ||
) | ||
device_type, device_id = tensor_pinned.__dlpack_device__() | ||
self.assertEqual(device_type, 1) | ||
self.assertEqual(device_id, 0) | ||
if paddle.is_compiled_with_xpu(): | ||
tensor_xpu = paddle.to_tensor([1, 2, 3], place=base.XPUPlace(0)) | ||
device_type, device_id = tensor_xpu.__dlpack_device__() | ||
self.assertEqual(device_type, 14) | ||
self.assertEqual(device_id, 0) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
写代码的时候,不同逻辑的代码块建议添加适当的换行,否则全部挤在一块阅读难度会比较高
def test_dlpack_device(self): | |
with dygraph_guard(): | |
tensor_cpu = paddle.to_tensor([1, 2, 3], place=base.CPUPlace()) | |
device_type, device_id = tensor_cpu.__dlpack_device__() | |
self.assertEqual(device_type, 1) | |
self.assertEqual(device_id, 0) | |
if paddle.is_compiled_with_cuda(): | |
tensor_cuda = paddle.to_tensor( | |
[1, 2, 3], place=base.CUDAPlace(0) | |
) | |
device_type, device_id = tensor_cuda.__dlpack_device__() | |
self.assertEqual(device_type, 2) | |
self.assertEqual(device_id, 0) | |
if paddle.is_compiled_with_cuda(): | |
tensor_pinned = paddle.to_tensor( | |
[1, 2, 3], place=base.CUDAPinnedPlace() | |
) | |
device_type, device_id = tensor_pinned.__dlpack_device__() | |
self.assertEqual(device_type, 1) | |
self.assertEqual(device_id, 0) | |
if paddle.is_compiled_with_xpu(): | |
tensor_xpu = paddle.to_tensor([1, 2, 3], place=base.XPUPlace(0)) | |
device_type, device_id = tensor_xpu.__dlpack_device__() | |
self.assertEqual(device_type, 14) | |
self.assertEqual(device_id, 0) | |
def test_dlpack_device(self): | |
with dygraph_guard(): | |
tensor_cpu = paddle.to_tensor([1, 2, 3], place=base.CPUPlace()) | |
device_type, device_id = tensor_cpu.__dlpack_device__() | |
self.assertEqual(device_type, 1) | |
self.assertEqual(device_id, 0) | |
if paddle.is_compiled_with_cuda(): | |
tensor_cuda = paddle.to_tensor( | |
[1, 2, 3], place=base.CUDAPlace(0) | |
) | |
device_type, device_id = tensor_cuda.__dlpack_device__() | |
self.assertEqual(device_type, 2) | |
self.assertEqual(device_id, 0) | |
if paddle.is_compiled_with_cuda(): | |
tensor_pinned = paddle.to_tensor( | |
[1, 2, 3], place=base.CUDAPinnedPlace() | |
) | |
device_type, device_id = tensor_pinned.__dlpack_device__() | |
self.assertEqual(device_type, 1) | |
self.assertEqual(device_id, 0) | |
if paddle.is_compiled_with_xpu(): | |
tensor_xpu = paddle.to_tensor([1, 2, 3], place=base.XPUPlace(0)) | |
device_type, device_id = tensor_xpu.__dlpack_device__() | |
self.assertEqual(device_type, 14) | |
self.assertEqual(device_id, 0) |
import paddle
import torch
def test():
#cpu
tensor_cpu = paddle.to_tensor([1, 2, 3], place=base.CPUPlace())
paddle_device_type, paddle_device_id = tensor_cpu.__dlpack_device__()
torch_cpu= torch.randn(100, 10000, device='cpu')
device_type_torch, device_id_torch = (DLDeviceType.kDLCPU if torch_cpu.device.type == "cpu" else -1), torch_cpu.device.index
print("paddle_cpu_device:",paddle_device_type, paddle_device_id)
print("torch_cpu_device:",device_type_torch, device_id_torch)
#gpu
tensor_gpu = paddle.to_tensor([1, 2, 3], place=base.CUDAPlace(0))
torch_gpu= torch.randn(100, 10000, device='gpu')
paddle_device_type, paddle_device_id = tensor_gpu.__dlpack_device__()
device_type_torch, device_id_torch = (DLDeviceType.kDLCUDA if torch_cpu.device.type == "gpu" else -1), torch_gpu.device.index
print("paddle_gpu_device:",paddle_device_type, paddle_device_id)
print("torch_gpu_device:",device_type_torch, device_id_torch) 输出: |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
self.assertEqual(device_type, DLDeviceType.kDLCPU) | ||
self.assertEqual(device_id, None) | ||
|
||
# 测试 CUDA |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
代码中尽量不要出现中文,"测试" --> test
elif place.is_cpu_place(): | ||
return DLDeviceType.kDLCPU, None | ||
elif place.is_cuda_pinned_place(): | ||
return DLDeviceType.kDLCUDAHost, 0 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
为什么cpu_place是None,cuda_pinned_place的第二个返回值是0?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
PR Category
User Experience
PR Types
New features
Description
Paddle Tensor 规范化:新增 Tensor.dlpack_device