Skip to content

Commit 9760a63

Browse files
mikaylagawareckipytorchmergebot
authored andcommitted
Test that TORCH_FEATURE_VERSION guards are used where needed (pytorch#167962)
Splits each torch library registration in the 2.10 folder into its own file -- I had a script that parsed kernel.cpp to do this but I felt like forcing this responsibility on the user might be less error prone Compiles each file targetting 2.9 and asserts that compilation fails. (There are 2 2.9 kernels we use as negative tests where compilation is expected to succeed) Pull Request resolved: pytorch#167962 Approved by: https://github.com/janeyx99 ghstack dependencies: pytorch#168025, pytorch#167802, pytorch#167803, pytorch#167804
1 parent 2e907f4 commit 9760a63

22 files changed

+812
-244
lines changed
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#include <torch/csrc/stable/library.h>
2+
#include <torch/csrc/stable/tensor.h>
3+
4+
using torch::stable::Tensor;
5+
6+
uint64_t get_any_data_ptr(Tensor t, bool mutable_) {
7+
if (mutable_) {
8+
return reinterpret_cast<uint64_t>(t.mutable_data_ptr());
9+
} else {
10+
return reinterpret_cast<uint64_t>(t.const_data_ptr());
11+
}
12+
}
13+
14+
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic_2_10, m) {
15+
m.def("get_any_data_ptr(Tensor t, bool mutable_) -> int");
16+
}
17+
18+
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic_2_10, CompositeExplicitAutograd, m) {
19+
m.impl("get_any_data_ptr", TORCH_BOX(&get_any_data_ptr));
20+
}
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
#include <torch/csrc/stable/library.h>
2+
#include <torch/csrc/stable/tensor.h>
3+
#include <torch/headeronly/core/ScalarType.h>
4+
5+
using torch::stable::Tensor;
6+
7+
uint64_t get_template_any_data_ptr(Tensor t, torch::headeronly::ScalarType dtype, bool mutable_) {
8+
#define DEFINE_CASE(T, name) \
9+
case torch::headeronly::ScalarType::name: { \
10+
if (mutable_) { \
11+
return reinterpret_cast<uint64_t>(t.mutable_data_ptr<T>()); \
12+
} else { \
13+
return reinterpret_cast<uint64_t>(t.const_data_ptr<T>()); \
14+
} \
15+
}
16+
switch (dtype) {
17+
// per aten/src/ATen/templates/TensorMethods.cpp:
18+
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
19+
DEFINE_CASE(uint16_t, UInt16)
20+
DEFINE_CASE(uint32_t, UInt32)
21+
DEFINE_CASE(uint64_t, UInt64)
22+
default:
23+
return 0;
24+
}
25+
#undef DEFINE_CASE
26+
}
27+
28+
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic_2_10, m) {
29+
m.def("get_template_any_data_ptr(Tensor t, ScalarType dtype, bool mutable_) -> int");
30+
}
31+
32+
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic_2_10, CompositeExplicitAutograd, m) {
33+
m.impl("get_template_any_data_ptr", TORCH_BOX(&get_template_any_data_ptr));
34+
}

test/cpp_extensions/libtorch_agnostic_2_10_extension/libtorch_agnostic_2_10/csrc/kernel.cpp

Lines changed: 0 additions & 244 deletions
This file was deleted.
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
#include <torch/csrc/stable/library.h>
2+
#include <torch/csrc/stable/ops.h>
3+
#include <torch/csrc/stable/tensor.h>
4+
5+
#include <vector>
6+
7+
using torch::stable::Tensor;
8+
9+
// Declare my__foreach_mul (defined in my__foreach_mul.cpp)
10+
extern std::vector<Tensor> my__foreach_mul(
11+
torch::headeronly::HeaderOnlyArrayRef<Tensor> self,
12+
torch::headeronly::HeaderOnlyArrayRef<Tensor> other);
13+
14+
// Helper function for cloning
15+
Tensor my_clone(Tensor t) {
16+
return clone(t);
17+
}
18+
19+
std::vector<Tensor> make_tensor_clones_and_call_foreach(Tensor t1, Tensor t2) {
20+
// This function tests that my__foreach_mul can take in std::initializer_lists
21+
// in addition to std::vectors.
22+
Tensor t1_1 = my_clone(t1);
23+
Tensor t1_2 = my_clone(t1);
24+
Tensor t2_1 = my_clone(t2);
25+
Tensor t2_2 = my_clone(t2);
26+
return my__foreach_mul({t1_1, t2_1}, {t1_2, t2_2});
27+
}
28+
29+
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic_2_10, m) {
30+
m.def(
31+
"make_tensor_clones_and_call_foreach(Tensor t1, Tensor t2) -> Tensor[]");
32+
}
33+
34+
STABLE_TORCH_LIBRARY_IMPL(
35+
libtorch_agnostic_2_10,
36+
CompositeExplicitAutograd,
37+
m) {
38+
m.impl(
39+
"make_tensor_clones_and_call_foreach",
40+
TORCH_BOX(&make_tensor_clones_and_call_foreach));
41+
}
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
// This is duplicated from the libtorch_agnostic_2_9_extension
2+
// as a negative test for test_version_compatibility.py
3+
4+
#include <torch/csrc/stable/library.h>
5+
#include <torch/csrc/stable/tensor.h>
6+
#include <torch/csrc/stable/ops.h>
7+
#include <torch/headeronly/util/Exception.h>
8+
#include <torch/headeronly/core/ScalarType.h>
9+
#include <torch/headeronly/core/Dispatch_v2.h>
10+
#include <torch/headeronly/core/TensorAccessor.h>
11+
12+
#include "tensor_accessor_kernel.h"
13+
14+
using torch::stable::Tensor;
15+
16+
Tensor mv_tensor_accessor_cpu(Tensor m, Tensor v) {
17+
STD_TORCH_CHECK(m.dim() == 2, "m must be 2D");
18+
STD_TORCH_CHECK(v.dim() == 1, "v must be 1D");
19+
STD_TORCH_CHECK(m.size(1) == v.size(0), "m.shape[1] == v.shape[0] must hold");
20+
STD_TORCH_CHECK(m.scalar_type() == v.scalar_type(), "m and v must have the same dtype");
21+
STD_TORCH_CHECK(m.device() == v.device(), "m and v must be on the same device");
22+
Tensor res = new_empty(m, {m.size(0)});
23+
THO_DISPATCH_V2(m.scalar_type(), "mv_tensor_accessor_cpu",
24+
AT_WRAP(([&]() {
25+
auto resa = Accessor_cpu<scalar_t, 1>(reinterpret_cast<scalar_t*>(res.data_ptr()), res.sizes().data(), res.strides().data());
26+
auto ma = Accessor_cpu<scalar_t, 2>(reinterpret_cast<scalar_t*>(m.data_ptr()), m.sizes().data(), m.strides().data());
27+
auto va = Accessor_cpu<scalar_t, 1>(reinterpret_cast<scalar_t*>(v.data_ptr()), v.sizes().data(), v.strides().data());
28+
mv_tensor_accessor_kernel<Accessor_cpu, scalar_t>(resa, ma, va);
29+
})),
30+
AT_FLOATING_TYPES);
31+
return res;
32+
}
33+
34+
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic_2_10, m) {
35+
m.def("mv_tensor_accessor_cpu(Tensor res, Tensor m, Tensor v) -> Tensor");
36+
}
37+
38+
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic_2_10, CompositeExplicitAutograd, m) {
39+
m.impl("mv_tensor_accessor_cpu", TORCH_BOX(&mv_tensor_accessor_cpu));
40+
}

0 commit comments

Comments
 (0)