-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathipc.cpp
More file actions
121 lines (106 loc) · 3.53 KB
/
ipc.cpp
File metadata and controls
121 lines (106 loc) · 3.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#include <iostream>
#include <pybind11/pybind11.h>
#include "ipc.h"
#define MACRO_STRINGIFY(x) #x
#ifdef __APPLE__
#include "ipc_apple.h"
py::object
initialize_from_mach_port(unsigned int machPort, int width, int height) {
return mtl_tensor_from_mach_port(machPort, width, height);
}
py::object normalize_apple_mtl_tensor(py::object tensor) {
return normalize_apple_mtl_tensor_impl(std::move(tensor));
}
py::capsule mtl_tensor_from_cuda_mem_handle(
const char *cuda_ipc_handle, int width, int height
) {
return py::none();
}
#elif HAS_CUDA
#include "ipc_cuda.h"
py::object
initialize_from_mach_port(unsigned int machPort, int width, int height) {
return py::none();
}
py::object normalize_apple_mtl_tensor(py::object tensor) { return py::none(); }
py::capsule mtl_tensor_from_cuda_mem_handle(
const char *cuda_ipc_handle, int width, int height
) {
DLManagedTensor *tensor = mtl_tensor_from_cuda_ipc_handle(
reinterpret_cast<void *>(const_cast<char *>(cuda_ipc_handle)),
width,
height
);
if (!tensor) {
throw std::runtime_error(
"Failed to create DLManagedTensor from CUDA IPC handle"
);
}
return py::capsule(tensor, "dltensor", [](void *ptr) {
DLManagedTensor *managed_tensor = static_cast<DLManagedTensor *>(ptr);
if (managed_tensor && managed_tensor->deleter) {
managed_tensor->deleter(managed_tensor);
}
});
}
#else
py::object
initialize_from_mach_port(unsigned int machPort, int width, int height) {
return py::none();
}
py::object normalize_apple_mtl_tensor(py::object tensor) { return py::none(); }
py::capsule mtl_tensor_from_cuda_mem_handle(
const char *cuda_ipc_handle, int width, int height
) {
return py::none();
}
#endif
// #include "ipc_boost.hpp"
#include "ipc_noboost.hpp"
int initialize_shared_memory(
int port,
const char *initial_data,
size_t data_size,
size_t action_size,
bool find_free_port
) {
try {
return create_shared_memory_impl(
port, initial_data, data_size, action_size, find_free_port
);
} catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
std::cerr << "Failed to initialize shared memory: errno=" << errno
<< std::endl;
throw std::runtime_error(e.what());
}
}
void write_to_shared_memory(
const char *p2j_memory_name, const char *data, size_t action_size
) {
write_to_shared_memory_impl(p2j_memory_name, data, action_size);
}
py::bytes read_from_shared_memory(
const char *p2j_memory_name, const char *j2p_memory_name
) {
return read_from_shared_memory_impl(p2j_memory_name, j2p_memory_name);
}
void destroy_shared_memory(const char *memory_name, bool release_semaphores) {
destroy_shared_memory_impl(memory_name, release_semaphores);
}
PYBIND11_MODULE(craftground_native, m) {
m.doc() = "Craftground Native Module";
m.def("initialize_from_mach_port", &initialize_from_mach_port);
m.def("normalize_apple_mtl_tensor", &normalize_apple_mtl_tensor);
m.def("mtl_tensor_from_cuda_mem_handle", &mtl_tensor_from_cuda_mem_handle);
m.def("initialize_shared_memory", &initialize_shared_memory);
m.def("write_to_shared_memory", &write_to_shared_memory);
m.def("read_from_shared_memory", &read_from_shared_memory);
m.def("destroy_shared_memory", &destroy_shared_memory);
m.def("shared_memory_exists", &shared_memory_exists);
#ifdef VERSION_INFO
m.attr("__version__") = MACRO_STRINGIFY(VERSION_INFO);
#else
m.attr("__version__") = "dev";
#endif
}