@@ -128,7 +128,7 @@ void exec_post_wrapper(Executor & exec, F &&f, Ts &&...ts) {
128128}
129129
130130template <typename Executor, typename F, typename... Ts>
131- hpx::lcos:: future<void> exec_async_wrapper(Executor & exec, F &&f, Ts &&...ts) {
131+ hpx::future<void> exec_async_wrapper(Executor & exec, F &&f, Ts &&...ts) {
132132 return hpx::async(exec, std::forward<F>(f), std::forward<Ts>(ts)...);
133133}
134134
@@ -148,9 +148,9 @@ template <typename Executor> class aggregated_function_call {
148148 std::atomic<size_t> slice_counter = 0;
149149
150150 /// Promise to be set when all slices have visited this function call
151- /* hpx::lcos:: local::promise<void> slices_ready_promise; */
151+ /* hpx::local::promise<void> slices_ready_promise; */
152152 /// Tracks if all slices have visited this function call
153- /* hpx::lcos:: future<void> all_slices_ready = slices_ready_promise.get_future(); */
153+ /* hpx::future<void> all_slices_ready = slices_ready_promise.get_future(); */
154154 /// How many slices can we expect?
155155 const size_t number_slices;
156156 const bool async_mode;
@@ -168,7 +168,7 @@ template <typename Executor> class aggregated_function_call {
168168 aggregation_mutex_t debug_mut;
169169#endif
170170
171- std::vector<hpx::lcos:: local::promise<void>> potential_async_promises{};
171+ std::vector<hpx::local::promise<void>> potential_async_promises{};
172172
173173public:
174174 aggregated_function_call(const size_t number_slices, bool async_mode, Executor &exec)
@@ -182,7 +182,7 @@ template <typename Executor> class aggregated_function_call {
182182 // assert(!all_slices_ready.valid());
183183 }
184184 /// Returns true if all required slices have visited this point
185- bool sync_aggregation_slices(hpx::lcos:: future<void> &stream_future) {
185+ bool sync_aggregation_slices(hpx::future<void> &stream_future) {
186186 assert(!async_mode);
187187 assert(potential_async_promises.empty());
188188 const size_t local_counter = slice_counter++;
@@ -192,7 +192,7 @@ template <typename Executor> class aggregated_function_call {
192192 else return false;
193193 }
194194 template <typename F, typename... Ts>
195- void post_when(hpx::lcos:: future<void> &stream_future, F &&f, Ts &&...ts) {
195+ void post_when(hpx::future<void> &stream_future, F &&f, Ts &&...ts) {
196196#if !(defined(NDEBUG)) && defined(DEBUG_AGGREGATION_CALLS)
197197 // needed for concurrent access to function_tuple and debug_type_information
198198 // Not required for normal use
@@ -265,7 +265,7 @@ template <typename Executor> class aggregated_function_call {
265265 }
266266 }
267267 template <typename F, typename... Ts>
268- hpx::lcos:: future<void> async_when(hpx::lcos ::future<void> &stream_future,
268+ hpx::future<void> async_when(hpx::future<void> &stream_future,
269269 F &&f, Ts &&...ts) {
270270#if !(defined(NDEBUG)) && defined(DEBUG_AGGREGATION_CALLS)
271271 // needed for concurrent access to function_tuple and debug_type_information
@@ -330,7 +330,7 @@ template <typename Executor> class aggregated_function_call {
330330 assert(local_counter < number_slices);
331331 assert(slice_counter < number_slices + 1);
332332 assert(potential_async_promises.size() == number_slices);
333- hpx::lcos:: future<void> ret_fut =
333+ hpx::future<void> ret_fut =
334334 potential_async_promises[local_counter].get_future();
335335 if (local_counter == number_slices - 1) {
336336 /* slices_ready_promise.set_value(); */
@@ -347,15 +347,15 @@ template <typename Executor> class aggregated_function_call {
347347 return ret_fut;
348348 }
349349 template <typename F, typename... Ts>
350- hpx::lcos:: shared_future<void> wrap_async(hpx::lcos ::future<void> &stream_future,
350+ hpx::shared_future<void> wrap_async(hpx::future<void> &stream_future,
351351 F &&f, Ts &&...ts) {
352352 assert(async_mode);
353353 assert(!potential_async_promises.empty());
354354 const size_t local_counter = slice_counter++;
355355 assert(local_counter < number_slices);
356356 assert(slice_counter < number_slices + 1);
357357 assert(potential_async_promises.size() == number_slices);
358- hpx::lcos:: shared_future<void> ret_fut =
358+ hpx::shared_future<void> ret_fut =
359359 potential_async_promises[local_counter].get_shared_future();
360360 if (local_counter == number_slices - 1) {
361361 auto fut = f(std::forward<Ts>(ts)...);
@@ -496,11 +496,11 @@ template <typename Executor> class aggregated_executor {
496496 launch_counter++;
497497 }
498498 template <typename F, typename... Ts>
499- hpx::lcos:: future<void> async(F &&f, Ts &&...ts) {
499+ hpx::future<void> async(F &&f, Ts &&...ts) {
500500 // we should only execute function calls once all slices
501501 // have been given away (-> Executor Slices start)
502502 assert(parent.slices_exhausted == true);
503- hpx::lcos:: future<void> ret_fut = parent.async(
503+ hpx::future<void> ret_fut = parent.async(
504504 launch_counter, std::forward<F>(f), std::forward<Ts>(ts)...);
505505 launch_counter++;
506506 return ret_fut;
@@ -525,11 +525,11 @@ template <typename Executor> class aggregated_executor {
525525 }
526526
527527 template <typename F, typename... Ts>
528- hpx::lcos:: shared_future<void> wrap_async(F &&f, Ts &&...ts) {
528+ hpx::shared_future<void> wrap_async(F &&f, Ts &&...ts) {
529529 // we should only execute function calls once all slices
530530 // have been given away (-> Executor Slices start)
531531 assert(parent.slices_exhausted == true);
532- hpx::lcos:: shared_future<void> ret_fut = parent.wrap_async(
532+ hpx::shared_future<void> ret_fut = parent.wrap_async(
533533 launch_counter, std::forward<F>(f), std::forward<Ts>(ts)...);
534534 launch_counter++;
535535 return ret_fut;
@@ -557,10 +557,10 @@ template <typename Executor> class aggregated_executor {
557557
558558 //===============================================================================
559559
560- hpx::lcos:: local::promise<void> slices_full_promise;
560+ hpx::local::promise<void> slices_full_promise;
561561 /// Promises with the slice executors -- to be set when the starting criteria
562562 /// is met
563- std::vector<hpx::lcos:: local::promise<executor_slice>> executor_slices;
563+ std::vector<hpx::local::promise<executor_slice>> executor_slices;
564564 /// List of aggregated function calls - function will be launched when all
565565 /// slices have called it
566566 std::deque<aggregated_function_call<Executor>> function_calls;
@@ -715,8 +715,8 @@ template <typename Executor> class aggregated_executor {
715715 //===============================================================================
716716 // Public Interface
717717public:
718- hpx::lcos:: future<void> current_continuation;
719- hpx::lcos:: future<void> last_stream_launch_done;
718+ hpx::future<void> current_continuation;
719+ hpx::future<void> last_stream_launch_done;
720720 std::atomic<size_t> overall_launch_counter = 0;
721721
722722 /// Only meant to be accessed by the slice executors
@@ -764,7 +764,7 @@ template <typename Executor> class aggregated_executor {
764764
765765 /// Only meant to be accessed by the slice executors
766766 template <typename F, typename... Ts>
767- hpx::lcos:: future<void> async(const size_t slice_launch_counter, F &&f,
767+ hpx::future<void> async(const size_t slice_launch_counter, F &&f,
768768 Ts &&...ts) {
769769 std::lock_guard<aggregation_mutex_t> guard(mut);
770770 assert(slices_exhausted == true);
@@ -785,7 +785,7 @@ template <typename Executor> class aggregated_executor {
785785 }
786786 /// Only meant to be accessed by the slice executors
787787 template <typename F, typename... Ts>
788- hpx::lcos:: shared_future<void> wrap_async(const size_t slice_launch_counter, F &&f,
788+ hpx::shared_future<void> wrap_async(const size_t slice_launch_counter, F &&f,
789789 Ts &&...ts) {
790790 std::lock_guard<aggregation_mutex_t> guard(mut);
791791 assert(slices_exhausted == true);
@@ -810,7 +810,7 @@ template <typename Executor> class aggregated_executor {
810810 return !slices_exhausted;
811811 }
812812
813- std::optional<hpx::lcos:: future<executor_slice>> request_executor_slice() {
813+ std::optional<hpx::future<executor_slice>> request_executor_slice() {
814814 std::lock_guard<aggregation_mutex_t> guard(mut);
815815 if (!slices_exhausted) {
816816 const size_t local_slice_id = ++current_slices;
@@ -839,14 +839,14 @@ template <typename Executor> class aggregated_executor {
839839 dealloc_counter = 0;
840840
841841 if (mode == aggregated_executor_modes::STRICT ) {
842- slices_full_promise = hpx::lcos:: local::promise<void>{};
842+ slices_full_promise = hpx::local::promise<void>{};
843843 }
844844 }
845845
846846 // Create Executor Slice future -- that will be returned later
847- hpx::lcos:: future<executor_slice> ret_fut;
847+ hpx::future<executor_slice> ret_fut;
848848 if (local_slice_id < max_slices) {
849- executor_slices.emplace_back(hpx::lcos:: local::promise<executor_slice>{});
849+ executor_slices.emplace_back(hpx::local::promise<executor_slice>{});
850850 ret_fut =
851851 executor_slices[local_slice_id - 1].get_future();
852852 } else {
@@ -871,7 +871,7 @@ template <typename Executor> class aggregated_executor {
871871 gpu_id));
872872 // Renew promise that all slices will be ready as the primary launch
873873 // criteria...
874- hpx::lcos:: shared_future<void> fut;
874+ hpx::shared_future<void> fut;
875875 if (mode == aggregated_executor_modes::EAGER ||
876876 mode == aggregated_executor_modes::ENDLESS) {
877877 // Fallback launch condidtion: Launch as soon as the underlying stream
@@ -922,7 +922,7 @@ template <typename Executor> class aggregated_executor {
922922 return ret_fut;
923923 } else {
924924 // Return empty optional as failure
925- return std::optional<hpx::lcos:: future<executor_slice>>{};
925+ return std::optional<hpx::future<executor_slice>>{};
926926 }
927927 }
928928 size_t launched_slices;
0 commit comments