1313#include < hpx/modules/functional.hpp>
1414#include < hpx/modules/futures.hpp>
1515#include < hpx/modules/memory.hpp>
16+ #include < hpx/modules/mpi_base.hpp>
1617#include < hpx/modules/runtime_local.hpp>
1718#include < hpx/modules/threading_base.hpp>
18- #include < hpx/mpi_base/mpi.hpp>
1919
2020#include < atomic>
2121#include < cstddef>
2525#include < utility>
2626#include < vector>
2727
28- namespace hpx { namespace mpi { namespace experimental {
28+ namespace hpx :: mpi:: experimental {
2929
3030 // -----------------------------------------------------------------
3131 namespace detail {
3232
33- using request_callback_function_type =
33+ HPX_CXX_EXPORT using request_callback_function_type =
3434 hpx::move_only_function<void (int )>;
3535
36- HPX_CORE_EXPORT void add_request_callback (
36+ HPX_CXX_EXPORT HPX_CORE_EXPORT void add_request_callback (
3737 request_callback_function_type&& f, MPI_Request req);
38- HPX_CORE_EXPORT void register_polling (hpx::threads::thread_pool_base&);
39- HPX_CORE_EXPORT void unregister_polling (
38+ HPX_CXX_EXPORT HPX_CORE_EXPORT void register_polling (
39+ hpx::threads::thread_pool_base&);
40+ HPX_CXX_EXPORT HPX_CORE_EXPORT void unregister_polling (
4041 hpx::threads::thread_pool_base&);
4142
4243 } // namespace detail
4344
4445 // by convention the title is 7 chars (for alignment)
45- using print_on = debug::enable_print<false >;
46- static constexpr print_on mpi_debug (" MPI_FUT" );
46+ HPX_CXX_EXPORT using print_on = debug::enable_print<false >;
47+ HPX_CXX_EXPORT static constexpr print_on mpi_debug (" MPI_FUT" );
4748
4849 namespace detail {
4950
50- using mutex_type = hpx::spinlock;
51+ HPX_CXX_EXPORT using mutex_type = hpx::spinlock;
5152
5253 // mutex needed to protect mpi request vector, note that the
5354 // mpi poll function takes place inside the main scheduling loop
5455 // of hpx and not on an hpx worker thread, so we must use std:mutex
55- HPX_CORE_EXPORT mutex_type& get_vector_mtx ();
56+ HPX_CXX_EXPORT HPX_CORE_EXPORT mutex_type& get_vector_mtx ();
5657
5758 // -----------------------------------------------------------------
5859 // An implementation of future_data for MPI
59- struct future_data : hpx::lcos::detail::future_data<int >
60+ HPX_CXX_EXPORT struct future_data : hpx::lcos::detail::future_data<int >
6061 {
6162 HPX_NON_COPYABLE (future_data);
6263
@@ -107,12 +108,12 @@ namespace hpx { namespace mpi { namespace experimental {
107108
108109 // -----------------------------------------------------------------
109110 // intrusive pointer for future_data
110- using future_data_ptr = hpx::intrusive_ptr<future_data>;
111+ HPX_CXX_EXPORT using future_data_ptr = hpx::intrusive_ptr<future_data>;
111112
112113 // -----------------------------------------------------------------
113114 // a convenience structure to hold state vars
114115 // used extensivey with debug::print to display rank etc
115- struct mpi_info
116+ HPX_CXX_EXPORT struct mpi_info
116117 {
117118 bool error_handler_initialized_ = false ;
118119 int rank_ = -1 ;
@@ -124,66 +125,70 @@ namespace hpx { namespace mpi { namespace experimental {
124125 };
125126
126127 // an instance of mpi_info that we store data in
127- HPX_CORE_EXPORT mpi_info& get_mpi_info ();
128+ HPX_CXX_EXPORT HPX_CORE_EXPORT mpi_info& get_mpi_info ();
128129
129130 // stream operator to display debug mpi_info
130- HPX_CORE_EXPORT std::ostream& operator <<(
131+ HPX_CXX_EXPORT HPX_CORE_EXPORT std::ostream& operator <<(
131132 std::ostream& os, mpi_info const & i);
132133
133134 // -----------------------------------------------------------------
134135 // an MPI error handling type that we can use to intercept
135136 // MPI errors is we enable the error handler
136- HPX_CORE_EXPORT extern MPI_Errhandler hpx_mpi_errhandler;
137+ HPX_CXX_EXPORT HPX_CORE_EXPORT extern MPI_Errhandler hpx_mpi_errhandler;
137138
138139 // function that converts an MPI error into an exception
139- HPX_CORE_EXPORT void hpx_MPI_Handler (MPI_Comm*, int * errorcode, ...);
140+ HPX_CXX_EXPORT HPX_CORE_EXPORT void hpx_MPI_Handler (
141+ MPI_Comm*, int * errorcode, ...);
140142
141143 // -----------------------------------------------------------------
142144 // we track requests and callbacks in two vectors even though
143145 // we have the request stored in the request_callback vector already
144146 // the reason for this is because we can use MPI_Testany
145147 // with a vector of requests to save overheads compared
146148 // to testing one by one every item (using a list)
147- HPX_CORE_EXPORT std::vector<MPI_Request>& get_requests_vector ();
149+ HPX_CXX_EXPORT HPX_CORE_EXPORT std::vector<MPI_Request>&
150+ get_requests_vector ();
148151
149152 // -----------------------------------------------------------------
150153 // define a lockfree queue type to place requests in prior to handling
151154 // this is done only to avoid taking a lock every time a request is
152155 // returned from MPI. Instead the requests are placed into a queue
153156 // and the polling code pops them prior to calling Testany
154- using queue_type = concurrency::ConcurrentQueue<future_data_ptr>;
157+ HPX_CXX_EXPORT using queue_type =
158+ concurrency::ConcurrentQueue<future_data_ptr>;
155159
156160 // -----------------------------------------------------------------
157161 // used internally to query how many requests are 'in flight'
158162 // these are requests that are being polled for actively
159163 // and not the same as the requests enqueued
160- HPX_CORE_EXPORT std::size_t get_number_of_active_requests ();
161-
164+ HPX_CXX_EXPORT HPX_CORE_EXPORT std::size_t
165+ get_number_of_active_requests ();
162166 } // namespace detail
163167
164168 // -----------------------------------------------------------------
165169 // set an error handler for communicators that will be called
166170 // on any error instead of the default behavior of program termination
167- HPX_CORE_EXPORT void set_error_handler ();
171+ HPX_CXX_EXPORT HPX_CORE_EXPORT void set_error_handler ();
168172
169173 // -----------------------------------------------------------------
170174 // return a future object from a user supplied MPI_Request
171- HPX_CORE_EXPORT hpx::future<void > get_future (MPI_Request request);
175+ HPX_CXX_EXPORT HPX_CORE_EXPORT hpx::future<void > get_future (
176+ MPI_Request request);
172177
173178 // -----------------------------------------------------------------
174179 // return a future from an async call to MPI_Ixxx function
175180 namespace detail {
176181
177- template <typename F, typename ... Ts>
182+ HPX_CXX_EXPORT template <typename F, typename ... Ts>
178183 hpx::future<int > async (F f, Ts&&... ts)
179184 {
180185 // create a future data shared state
181186 detail::future_data_ptr data =
182187 new detail::future_data (detail::future_data::init_no_addref{});
183188
184189 // invoke the call to MPI_Ixxx, ignore the returned result for now
185- int result = f ( HPX_FORWARD (Ts, ts)..., &data-> request_ );
186- HPX_UNUSED (result );
190+ [[maybe_unused]] int result =
191+ f ( HPX_FORWARD (Ts, ts)..., &data-> request_ );
187192
188193 // Add callback after the request has been filled
189194 data->add_callback ();
@@ -198,13 +203,15 @@ namespace hpx { namespace mpi { namespace experimental {
198203 // Background progress function for MPI async operations
199204 // Checks for completed MPI_Requests and sets mpi::experimental::future ready
200205 // when found
201- HPX_CORE_EXPORT hpx::threads::policies::detail::polling_status poll ();
206+ HPX_CXX_EXPORT HPX_CORE_EXPORT
207+ hpx::threads::policies::detail::polling_status
208+ poll ();
202209
203210 // -----------------------------------------------------------------
204211 // This is not completely safe as it will return when the request vector is
205212 // empty, but cannot guarantee that other communications are not about
206213 // to be launched in outstanding continuations etc.
207- inline void wait ()
214+ HPX_CXX_EXPORT inline void wait ()
208215 {
209216 hpx::util::yield_while ([]() {
210217 std::unique_lock<detail::mutex_type> lk (
@@ -217,7 +224,7 @@ namespace hpx { namespace mpi { namespace experimental {
217224 });
218225 }
219226
220- template <typename F>
227+ HPX_CXX_EXPORT template <typename F>
221228 inline void wait (F&& f)
222229 {
223230 hpx::util::yield_while ([&]() {
@@ -234,16 +241,17 @@ namespace hpx { namespace mpi { namespace experimental {
234241 // initialize the hpx::mpi background request handler
235242 // All ranks should call this function,
236243 // but only one thread per rank needs to do so
237- HPX_CORE_EXPORT void init (bool init_mpi = false ,
244+ HPX_CXX_EXPORT HPX_CORE_EXPORT void init (bool init_mpi = false ,
238245 std::string const & pool_name = " " , bool init_errorhandler = false );
239246
240247 // -----------------------------------------------------------------
241- HPX_CORE_EXPORT void finalize (std::string const & pool_name = " " );
248+ HPX_CXX_EXPORT HPX_CORE_EXPORT void finalize (
249+ std::string const & pool_name = " " );
242250
243251 // -----------------------------------------------------------------
244252 // This RAII helper class assumes that MPI initialization/finalization is
245253 // handled elsewhere
246- struct [[nodiscard]] enable_user_polling
254+ HPX_CXX_EXPORT struct [[nodiscard]] enable_user_polling
247255 {
248256 enable_user_polling (
249257 std::string const & pool_name = " " , bool init_errorhandler = false )
@@ -262,9 +270,9 @@ namespace hpx { namespace mpi { namespace experimental {
262270 };
263271
264272 // -----------------------------------------------------------------
265- template <typename ... Args>
273+ HPX_CXX_EXPORT template <typename ... Args>
266274 inline void debug (Args&&... args)
267275 {
268276 mpi_debug.debug (detail::get_mpi_info (), HPX_FORWARD (Args, args)...);
269277 }
270- }}} // namespace hpx::mpi::experimental
278+ } // namespace hpx::mpi::experimental
0 commit comments