Skip to content

Commit 3289717

Browse files
authored
[TVM FFI] Bump tvm ffi to 0.1.0b20 in unittests (PaddlePaddle#75902)
1 parent b91c61d commit 3289717

File tree

2 files changed

+23
-28
lines changed

2 files changed

+23
-28
lines changed

python/unittest_py/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ xdoctest==1.3.0
2020
ubelt==1.3.3 # just for xdoctest
2121
mypy==1.17.1
2222
soundfile
23-
apache-tvm-ffi==0.1.0b16
23+
apache-tvm-ffi==0.1.0b20

test/legacy_test/test_tvm_ffi.py

Lines changed: 22 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -49,14 +49,14 @@ def test_c_dlpack_exchange_api_cpu(self):
4949
cpp_source = r"""
5050
void add_one_cpu(tvm::ffi::TensorView x, tvm::ffi::TensorView y) {
5151
// implementation of a library function
52-
TVM_FFI_ICHECK(x->ndim == 1) << "x must be a 1D tensor";
52+
TVM_FFI_ICHECK(x.ndim() == 1) << "x must be a 1D tensor";
5353
DLDataType f32_dtype{kDLFloat, 32, 1};
54-
TVM_FFI_ICHECK(x->dtype == f32_dtype) << "x must be a float tensor";
55-
TVM_FFI_ICHECK(y->ndim == 1) << "y must be a 1D tensor";
56-
TVM_FFI_ICHECK(y->dtype == f32_dtype) << "y must be a float tensor";
57-
TVM_FFI_ICHECK(x->shape[0] == y->shape[0]) << "x and y must have the same shape";
58-
for (int i = 0; i < x->shape[0]; ++i) {
59-
static_cast<float*>(y->data)[i] = static_cast<float*>(x->data)[i] + 1;
54+
TVM_FFI_ICHECK(x.dtype() == f32_dtype) << "x must be a float tensor";
55+
TVM_FFI_ICHECK(y.ndim() == 1) << "y must be a 1D tensor";
56+
TVM_FFI_ICHECK(y.dtype() == f32_dtype) << "y must be a float tensor";
57+
TVM_FFI_ICHECK(x.size(0) == y.size(0)) << "x and y must have the same shape";
58+
for (int i = 0; i < x.size(0); ++i) {
59+
static_cast<float*>(y.data_ptr())[i] = static_cast<float*>(x.data_ptr())[i] + 1;
6060
}
6161
}
6262
"""
@@ -92,22 +92,22 @@ def test_c_dlpack_exchange_api_gpu(self):
9292
9393
void add_one_cuda(tvm::ffi::TensorView x, tvm::ffi::TensorView y) {
9494
// implementation of a library function
95-
TVM_FFI_ICHECK(x->ndim == 1) << "x must be a 1D tensor";
95+
TVM_FFI_ICHECK(x.ndim() == 1) << "x must be a 1D tensor";
9696
DLDataType f32_dtype{kDLFloat, 32, 1};
97-
TVM_FFI_ICHECK(x->dtype == f32_dtype) << "x must be a float tensor";
98-
TVM_FFI_ICHECK(y->ndim == 1) << "y must be a 1D tensor";
99-
TVM_FFI_ICHECK(y->dtype == f32_dtype) << "y must be a float tensor";
100-
TVM_FFI_ICHECK(x->shape[0] == y->shape[0]) << "x and y must have the same shape";
97+
TVM_FFI_ICHECK(x.dtype() == f32_dtype) << "x must be a float tensor";
98+
TVM_FFI_ICHECK(y.ndim() == 1) << "y must be a 1D tensor";
99+
TVM_FFI_ICHECK(y.dtype() == f32_dtype) << "y must be a float tensor";
100+
TVM_FFI_ICHECK(x.size(0) == y.size(0)) << "x and y must have the same shape";
101101
102-
int64_t n = x->shape[0];
102+
int64_t n = x.size(0);
103103
int64_t nthread_per_block = 256;
104104
int64_t nblock = (n + nthread_per_block - 1) / nthread_per_block;
105105
// Obtain the current stream from the environment by calling TVMFFIEnvGetStream
106106
cudaStream_t stream = static_cast<cudaStream_t>(
107-
TVMFFIEnvGetStream(x->device.device_type, x->device.device_id));
107+
TVMFFIEnvGetStream(x.device().device_type, x.device().device_id));
108108
// launch the kernel
109-
AddOneKernel<<<nblock, nthread_per_block, 0, stream>>>(static_cast<float*>(x->data),
110-
static_cast<float*>(y->data), n);
109+
AddOneKernel<<<nblock, nthread_per_block, 0, stream>>>(static_cast<float*>(x.data_ptr()),
110+
static_cast<float*>(y.data_ptr()), n);
111111
}
112112
"""
113113
mod: Module = tvm_ffi.cpp.load_inline(
@@ -123,23 +123,18 @@ def test_c_dlpack_exchange_api_gpu(self):
123123
np.testing.assert_allclose(y.numpy(), [2.0, 2.0, 2.0])
124124

125125
def test_c_dlpack_exchange_api_alloc_tensor(self):
126-
if platform.system() == "Windows":
127-
# Temporary skip this test case on windows because return owned tensor created by
128-
# TVMFFIEnvGetTensorAllocator will cause double free error
129-
return
130126
cpp_source = r"""
131127
inline tvm::ffi::Tensor alloc_tensor(tvm::ffi::Shape shape, DLDataType dtype, DLDevice device) {
132-
return tvm::ffi::Tensor::FromDLPackAlloc(TVMFFIEnvGetTensorAllocator(), shape, dtype, device);
128+
return tvm::ffi::Tensor::FromEnvAlloc(TVMFFIEnvTensorAlloc, shape, dtype, device);
133129
}
134130
135131
tvm::ffi::Tensor add_one_cpu(tvm::ffi::TensorView x) {
136-
TVM_FFI_ICHECK(x->ndim == 1) << "x must be a 1D tensor";
132+
TVM_FFI_ICHECK(x.ndim() == 1) << "x must be a 1D tensor";
137133
DLDataType f32_dtype{kDLFloat, 32, 1};
138-
TVM_FFI_ICHECK(x->dtype == f32_dtype) << "x must be a float tensor";
139-
tvm::ffi::Shape x_shape(x->shape, x->shape + x->ndim);
140-
tvm::ffi::Tensor y = alloc_tensor(x_shape, f32_dtype, x->device);
141-
for (int i = 0; i < x->shape[0]; ++i) {
142-
static_cast<float*>(y->data)[i] = static_cast<float*>(x->data)[i] + 1;
134+
TVM_FFI_ICHECK(x.dtype() == f32_dtype) << "x must be a float tensor";
135+
tvm::ffi::Tensor y = alloc_tensor(x.shape(), f32_dtype, x.device());
136+
for (int i = 0; i < x.size(0); ++i) {
137+
static_cast<float*>(y.data_ptr())[i] = static_cast<float*>(x.data_ptr())[i] + 1;
143138
}
144139
return y;
145140
}

0 commit comments

Comments
 (0)