|
1 | | -#ifndef C10_MACROS_EXPORT_H_ |
2 | | -#define C10_MACROS_EXPORT_H_ |
3 | | - |
4 | | -#ifndef C10_USING_CUSTOM_GENERATED_MACROS |
5 | | -#include <torch/headeronly/macros/cmake_macros.h> |
6 | | -#endif // C10_USING_CUSTOM_GENERATED_MACROS |
7 | | - |
8 | 1 | #include <torch/headeronly/macros/Export.h> |
9 | | - |
10 | | -// This one is being used by libtorch.so |
11 | | -#ifdef CAFFE2_BUILD_MAIN_LIB |
12 | | -#define TORCH_API C10_EXPORT |
13 | | -#else |
14 | | -#define TORCH_API C10_IMPORT |
15 | | -#endif |
16 | | - |
17 | | -// You may be wondering: Whose brilliant idea was it to split torch_cuda into |
18 | | -// two pieces with confusing names? |
19 | | -// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we |
20 | | -// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker |
21 | | -// issues when linking big binaries. |
22 | | -// (https://github.com/pytorch/pytorch/issues/39968) We had two choices: |
23 | | -// (1) Stop supporting so many GPU architectures |
24 | | -// (2) Do something else |
25 | | -// We chose #2 and decided to split the behemoth that was torch_cuda into two |
26 | | -// smaller libraries, one with most of the core kernel functions (torch_cuda_cu) |
27 | | -// and the other that had..well..everything else (torch_cuda_cpp). The idea was |
28 | | -// this: instead of linking our static libraries (like the hefty |
29 | | -// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky |
30 | | -// relocation marker issues, we could link our static libraries to a smaller |
31 | | -// part of torch_cuda (torch_cuda_cpp) and avoid the issues. |
32 | | - |
33 | | -// libtorch_cuda_cu.so |
34 | | -#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB |
35 | | -#define TORCH_CUDA_CU_API C10_EXPORT |
36 | | -#elif defined(BUILD_SPLIT_CUDA) |
37 | | -#define TORCH_CUDA_CU_API C10_IMPORT |
38 | | -#endif |
39 | | - |
40 | | -// libtorch_cuda_cpp.so |
41 | | -#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB |
42 | | -#define TORCH_CUDA_CPP_API C10_EXPORT |
43 | | -#elif defined(BUILD_SPLIT_CUDA) |
44 | | -#define TORCH_CUDA_CPP_API C10_IMPORT |
45 | | -#endif |
46 | | - |
47 | | -// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the |
48 | | -// same api) |
49 | | -#ifdef TORCH_CUDA_BUILD_MAIN_LIB |
50 | | -#define TORCH_CUDA_CPP_API C10_EXPORT |
51 | | -#define TORCH_CUDA_CU_API C10_EXPORT |
52 | | -#elif !defined(BUILD_SPLIT_CUDA) |
53 | | -#define TORCH_CUDA_CPP_API C10_IMPORT |
54 | | -#define TORCH_CUDA_CU_API C10_IMPORT |
55 | | -#endif |
56 | | - |
57 | | -#if defined(TORCH_HIP_BUILD_MAIN_LIB) |
58 | | -#define TORCH_HIP_CPP_API C10_EXPORT |
59 | | -#define TORCH_HIP_API C10_EXPORT |
60 | | -#else |
61 | | -#define TORCH_HIP_CPP_API C10_IMPORT |
62 | | -#define TORCH_HIP_API C10_IMPORT |
63 | | -#endif |
64 | | - |
65 | | -#if defined(TORCH_XPU_BUILD_MAIN_LIB) |
66 | | -#define TORCH_XPU_API C10_EXPORT |
67 | | -#else |
68 | | -#define TORCH_XPU_API C10_IMPORT |
69 | | -#endif |
70 | | - |
71 | | -// Enums only need to be exported on windows for non-CUDA files |
72 | | -#if defined(_WIN32) && defined(__CUDACC__) |
73 | | -#define C10_API_ENUM C10_API |
74 | | -#else |
75 | | -#define C10_API_ENUM |
76 | | -#endif |
77 | | - |
78 | | -#endif // C10_MACROS_EXPORT_H_ |
0 commit comments