@@ -333,7 +333,7 @@ class BspScheduleCS : public BspSchedule<Graph_t> {
333333
334334 // The data structure stores for each processor a set of tuples representing required sends.
335335 // Each tuple is (communication_cost, source_node, destination_processor).
336- std::vector<std::set<std::tuple<v_commw_t <Graph_t>, vertex_idx_t <Graph_t>, unsigned >>> require_sending (BspSchedule<Graph_t>::instance->numberOfProcessors ());
336+ std::vector<std::set<std::tuple<v_commw_t <Graph_t>, vertex_idx_t <Graph_t>, unsigned >, std::greater<> >> require_sending (BspSchedule<Graph_t>::instance->numberOfProcessors ());
337337
338338 for (unsigned proc = 0 ; proc < BspSchedule<Graph_t>::instance->numberOfProcessors (); proc++) {
339339 for (const auto &node : step_proc_node_list[0 ][proc]) {
@@ -391,9 +391,9 @@ class BspScheduleCS : public BspSchedule<Graph_t> {
391391 if (require_sending[proc].empty () ||
392392 std::get<0 >(*require_sending[proc].rbegin ()) + send_cost[proc] >
393393 max_comm_cost)
394- continue ; // This check is not strictly necessary with the new loop but can be a fast exit.
395- auto iter = require_sending[proc].rbegin ();
396- while (iter != require_sending[proc].rend ()) {
394+ continue ;
395+ auto iter = require_sending[proc].begin ();
396+ while (iter != require_sending[proc].end ()) {
397397 const auto & [comm_cost, node_to_send, dest_proc] = *iter;
398398 if (comm_cost + send_cost[proc] > max_comm_cost ||
399399 comm_cost + receive_cost[dest_proc] > max_comm_cost) {
@@ -403,7 +403,7 @@ class BspScheduleCS : public BspSchedule<Graph_t> {
403403 node_to_proc_been_sent[node_to_send][dest_proc] = true ;
404404 send_cost[proc] += comm_cost;
405405 receive_cost[dest_proc] += comm_cost;
406- iter = std::make_reverse_iterator ( require_sending[proc].erase (std::next ( iter). base ()) );
406+ iter = require_sending[proc].erase (iter);
407407 if (require_sending[proc].empty () ||
408408 std::get<0 >(*require_sending[proc].rbegin ()) + send_cost[proc] >
409409 max_comm_cost)
0 commit comments