Skip to content

Commit fe354f3

Browse files
carlo-galcodebot
authored andcommitted
sched: add comments and clean code in srb0 sched
Signed-off-by: Carlo Galiotto <[email protected]>
1 parent ca25bc8 commit fe354f3

File tree

9 files changed

+74
-58
lines changed

9 files changed

+74
-58
lines changed

lib/scheduler/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ set(SOURCES
4040
cell_scheduler.cpp
4141
scheduler_factory.cpp
4242
scheduler_impl.cpp
43-
support/csi_rs_helpers.h
4443
support/csi_rs_helper.cpp)
4544

4645
add_library(srsran_sched STATIC ${SOURCES}

lib/scheduler/policy/scheduler_time_rr.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ static alloc_outcome alloc_dl_ue(const ue& u,
229229
const slot_point pdcch_slot = res_grid.get_pdcch_slot(ue_cc.cell_index);
230230

231231
if (ue_cc.is_in_fallback_mode()) {
232-
// Skip allocation for UEs in fallback mode, as it is handled by the SRB0 scheduler.
232+
// Skip allocation for UEs in fallback mode, as it is handled by the SRB fallback scheduler.
233233
return alloc_outcome::skip_ue;
234234
}
235235

lib/scheduler/pucch_scheduling/pucch_allocator_impl.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -112,10 +112,10 @@ optional<unsigned> pucch_allocator_impl::alloc_common_pucch_harq_ack_ue(cell_res
112112
}
113113

114114
if (has_common_pucch_f1_grant(tcrnti, pucch_slot_alloc.slot)) {
115-
logger.debug(
116-
"tc-rnti={}: PUCCH common not allocated for slot={}. Cause: existing grant for this UE at the same slot",
117-
tcrnti,
118-
pucch_slot_alloc.slot);
115+
logger.debug("tc-rnti={}: PUCCH common not allocated for slot={}. Cause: a grant for this UE already exists in the "
116+
"same slot",
117+
tcrnti,
118+
pucch_slot_alloc.slot);
119119
return nullopt;
120120
}
121121

lib/scheduler/support/csi_rs_helper.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,15 @@ bool srsran::csi_helper::is_csi_rs_slot(const cell_configuration& cell_cfg, slot
2020
return false;
2121
}
2222

23-
for (unsigned i = 0; i != cell_cfg.zp_csi_rs_list.size(); ++i) {
24-
const zp_csi_rs_resource& zp_csi = cell_cfg.zp_csi_rs_list[i];
23+
// for (unsigned i = 0; i != cell_cfg.zp_csi_rs_list.size(); ++i)
24+
for (const auto& zp_csi : cell_cfg.zp_csi_rs_list) {
2525
if (zp_csi.offset.has_value() and zp_csi.period.has_value() and
2626
(sl_tx - *zp_csi.offset).to_uint() % (unsigned)*zp_csi.period == 0) {
2727
return true;
2828
}
2929
}
3030

31-
for (unsigned i = 0; i != cell_cfg.nzp_csi_rs_list.size(); ++i) {
32-
const nzp_csi_rs_resource& nzp_csi = cell_cfg.nzp_csi_rs_list[i];
31+
for (const auto& nzp_csi : cell_cfg.nzp_csi_rs_list) {
3332
if (nzp_csi.csi_res_offset.has_value() and nzp_csi.csi_res_period.has_value() and
3433
(sl_tx - *nzp_csi.csi_res_offset).to_uint() % (unsigned)*nzp_csi.csi_res_period == 0) {
3534
return true;

lib/scheduler/ue_scheduling/ue.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -131,10 +131,13 @@ class ue
131131
/// to derive the required transport block size for an DL grant.
132132
/// param[in] lcid If the LCID is provided, the method will return the number of pending bytes for that LCID.
133133
/// Otherwise it will return the sum of all LCIDs pending bytes, excluding SRB0.
134+
/// \return The number of DL pending bytes that are not already allocated in a DL HARQ.
134135
unsigned pending_dl_newtx_bytes(lcid_t lcid = lcid_t::INVALID_LCID) const;
135136

136-
/// \brief Computes the number of DL pending bytes that are not already allocated in a DL HARQ for SRB0. The value
137-
/// is used to derive the required transport block size for an DL grant.
137+
/// \brief Computes the number of DL pending bytes that are not already allocated in a DL HARQ for SRB0 or SRB1. The
138+
/// value is used to derive the required transport block size for an DL grant.
139+
/// param[in] is_srb0 tells whether to computes the number of DL pending bytes for SRB0, if true; for SRB1 otherwise.
140+
/// \return The number of DL pending bytes.
138141
unsigned pending_dl_srb0_or_srb1_newtx_bytes(bool is_srb0) const;
139142

140143
/// \brief Computes the number of UL pending bytes that are not already allocated in a UL HARQ. The value is used

lib/scheduler/ue_scheduling/ue_event_manager.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ class ue_event_manager::ue_dl_buffer_occupancy_manager final : public scheduler_
7979
// Forward DL BO update to UE.
8080
u.handle_dl_buffer_state_indication(dl_bo);
8181
if (dl_bo.lcid == LCID_SRB0 or (u.get_pcell().is_in_fallback_mode() and dl_bo.lcid == LCID_SRB1)) {
82-
// Signal SRB0 scheduler with the new SRB0 buffer state.
82+
// Signal SRB fallback scheduler with the new SRB0/SRB1 buffer state.
8383
parent.du_cells[u.get_pcell().cell_index].srb0_sched->handle_dl_buffer_state_indication_srb(
8484
dl_bo.ue_index, dl_bo.lcid == LCID_SRB0);
8585
}

lib/scheduler/ue_scheduling/ue_srb0_scheduler.cpp

Lines changed: 39 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ ue_srb0_scheduler::ue_srb0_scheduler(const scheduler_ue_expert_config& expert_cf
4141

4242
void ue_srb0_scheduler::run_slot(cell_resource_allocator& res_alloc)
4343
{
44+
// Update the HARQ processes of UE with ongoing transmissions to check which ones still need to be acked or
45+
// retransmitted.
4446
update_ongoing_ue_retxs();
4547

4648
if (ues.empty()) {
@@ -61,7 +63,7 @@ void ue_srb0_scheduler::run_slot(cell_resource_allocator& res_alloc)
6163
auto* h_dl = next_ue_harq_retx->get_harq_process();
6264

6365
if (h_dl->has_pending_retx()) {
64-
optional<std::pair<slot_point, slot_point>> most_recent_tx_ack = get_most_recent_slot_tx(u.ue_index);
66+
optional<most_recent_tx_slots> most_recent_tx_ack = get_most_recent_slot_tx(u.ue_index);
6567
if (next_ue_harq_retx->is_srb0) {
6668
schedule_srb(res_alloc, u, next_ue_harq_retx->is_srb0, h_dl, most_recent_tx_ack);
6769
} else {
@@ -72,7 +74,7 @@ void ue_srb0_scheduler::run_slot(cell_resource_allocator& res_alloc)
7274
++next_ue_harq_retx;
7375
}
7476

75-
// Schedule SRB0 messages.
77+
// Schedule SRB0 messages before SRB1, as we prioritize SRB0 over SRB1.
7678
auto next_ue = pending_ues.begin();
7779
while (next_ue != pending_ues.end()) {
7880
if (not next_ue->is_srb0) {
@@ -86,8 +88,8 @@ void ue_srb0_scheduler::run_slot(cell_resource_allocator& res_alloc)
8688
continue;
8789
}
8890

89-
auto& u = ues[next_ue->ue_index];
90-
optional<std::pair<slot_point, slot_point>> most_recent_tx_ack = get_most_recent_slot_tx(u.ue_index);
91+
auto& u = ues[next_ue->ue_index];
92+
optional<most_recent_tx_slots> most_recent_tx_ack = get_most_recent_slot_tx(u.ue_index);
9193
if (u.has_pending_dl_newtx_bytes(LCID_SRB0) and schedule_srb(res_alloc, u, true, nullptr, most_recent_tx_ack)) {
9294
next_ue = pending_ues.erase(next_ue);
9395
} else {
@@ -109,18 +111,20 @@ void ue_srb0_scheduler::run_slot(cell_resource_allocator& res_alloc)
109111
continue;
110112
}
111113

112-
auto& u = ues[next_ue->ue_index];
113-
optional<std::pair<slot_point, slot_point>> most_recent_tx_ack = get_most_recent_slot_tx(u.ue_index);
114+
auto& u = ues[next_ue->ue_index];
115+
optional<most_recent_tx_slots> most_recent_tx_ack = get_most_recent_slot_tx(u.ue_index);
116+
// NOTE: Since SRB1 data can be segmented, it could happen that not all the SRB1 bytes are scheduled at once. The
117+
// scheduler will attempt to allocate those remaining bytes in the following slots. The policy we adopt in this
118+
// scheduler is to schedule first all possible grants to a given UE (to speed up the re-establishment and
119+
// re-configuration). Only after the SRB1 buffer of that UE is emptied, we move on to the next UE.
114120
if (u.has_pending_dl_newtx_bytes(LCID_SRB1) and schedule_srb(res_alloc, u, false, nullptr, most_recent_tx_ack)) {
121+
// If all bytes of SRB1 are scheduled, remove UE.
115122
if (not u.has_pending_dl_newtx_bytes(LCID_SRB1)) {
116123
logger.debug("rnti={}: Removing UE from list, as SRB1 buffer is empty.", u.crnti);
117124
next_ue = pending_ues.erase(next_ue);
118125
}
119-
// Don't increase the iterator here, as we give priority to the same UE if there left are bytes in the SRB1
120-
// buffer.
121-
// NOTE: The policy we adopt in this scheduler is to schedule first the all possible grants to a given UE,
122-
// to speed up the re-establishment and re-configuration for that UE. Only after the SRB1 buffer of the UE is
123-
// emptied, we move on to the next UE.
126+
// Don't increase the iterator here, as we give priority to the same UE, if there are still some SRB1 bytes left
127+
// in the buffer.
124128
} else {
125129
++next_ue;
126130
}
@@ -129,7 +133,7 @@ void ue_srb0_scheduler::run_slot(cell_resource_allocator& res_alloc)
129133

130134
void ue_srb0_scheduler::handle_dl_buffer_state_indication_srb(du_ue_index_t ue_index, bool is_srb0)
131135
{
132-
is_srb0 ? pending_ues.push_back({ue_index, is_srb0}) : pending_ues.push_back({ue_index, is_srb0});
136+
pending_ues.push_back({ue_index, is_srb0});
133137
}
134138

135139
static slot_point get_next_srb_slot(const cell_configuration& cell_cfg, slot_point sl_tx)
@@ -148,11 +152,11 @@ static slot_point get_next_srb_slot(const cell_configuration& cell_cfg, slot_poi
148152
return next_candidate_slot;
149153
}
150154

151-
bool ue_srb0_scheduler::schedule_srb(cell_resource_allocator& res_alloc,
152-
ue& u,
153-
bool is_srb0,
154-
dl_harq_process* h_dl_retx,
155-
optional<std::pair<slot_point, slot_point>> most_recent_tx_ack_slots)
155+
bool ue_srb0_scheduler::schedule_srb(cell_resource_allocator& res_alloc,
156+
ue& u,
157+
bool is_srb0,
158+
dl_harq_process* h_dl_retx,
159+
optional<most_recent_tx_slots> most_recent_tx_ack_slots)
156160
{
157161
const auto& bwp_cfg_common = cell_cfg.dl_cfg_common.init_dl_bwp;
158162
// Search valid PDSCH time domain resource.
@@ -172,19 +176,19 @@ bool ue_srb0_scheduler::schedule_srb(cell_resource_allocator&
172176
}
173177

174178
if (most_recent_tx_ack_slots.has_value() and
175-
sched_ref_slot + max_dl_slots_ahead_sched < most_recent_tx_ack_slots.value().first) {
179+
sched_ref_slot + max_dl_slots_ahead_sched < most_recent_tx_ack_slots.value().most_recent_ack_slot) {
176180
return false;
177181
}
178182

179183
// We keep track of the number of scheduling attempts for the given UE.
180184
unsigned sched_attempts_cnt = 0;
181185
slot_point next_slot =
182-
most_recent_tx_ack_slots.has_value() and most_recent_tx_ack_slots.value().first > sched_ref_slot
183-
? most_recent_tx_ack_slots.value().first
186+
most_recent_tx_ack_slots.has_value() and most_recent_tx_ack_slots.value().most_recent_tx_slot > sched_ref_slot
187+
? most_recent_tx_ack_slots.value().most_recent_tx_slot
184188
: sched_ref_slot;
185189

186190
while (next_slot <= sched_ref_slot + max_dl_slots_ahead_sched) {
187-
unsigned offset_to_sched_ref_slot = static_cast<unsigned>(next_slot - sched_ref_slot);
191+
auto offset_to_sched_ref_slot = static_cast<unsigned>(next_slot - sched_ref_slot);
188192
const cell_slot_resource_allocator& pdcch_alloc = res_alloc[offset_to_sched_ref_slot];
189193

190194
for (unsigned time_res_idx = 0; time_res_idx != bwp_cfg_common.pdsch_common.pdsch_td_alloc_list.size();
@@ -229,12 +233,15 @@ bool ue_srb0_scheduler::schedule_srb(cell_resource_allocator&
229233
return false;
230234
}
231235

236+
// As it is not possible to schedule a PDSCH whose related PUCCH falls in a slot that is the same as or older than
237+
// the most recent already scheduled ACK slot (for the same UE), whenever we detect this is the case we skip the
238+
// allocation in advance.
232239
slot_point most_recent_ack_slot = pdsch_alloc.slot;
233240
if (most_recent_tx_ack_slots.has_value()) {
234-
if (pdsch_alloc.slot + dci_1_0_k1_values.back() <= most_recent_tx_ack_slots.value().second) {
241+
if (pdsch_alloc.slot + dci_1_0_k1_values.back() <= most_recent_tx_ack_slots.value().most_recent_ack_slot) {
235242
continue;
236243
}
237-
most_recent_ack_slot = most_recent_tx_ack_slots.value().second;
244+
most_recent_ack_slot = most_recent_tx_ack_slots.value().most_recent_ack_slot;
238245
}
239246

240247
dl_harq_process* candidate_h_dl =
@@ -460,7 +467,6 @@ dl_harq_process* ue_srb0_scheduler::schedule_srb1(ue& u,
460467

461468
ue_grant_crbs = rb_helper::find_empty_interval_of_length(used_crbs, final_nof_prbs, 0);
462469
if (ue_grant_crbs.empty() or ue_grant_crbs.length() < final_nof_prbs) {
463-
// return alloc_outcome::skip_ue;
464470
return nullptr;
465471
}
466472

@@ -472,7 +478,6 @@ dl_harq_process* ue_srb0_scheduler::schedule_srb1(ue& u,
472478
grant_prbs_mcs mcs_prbs_estimate = ue_pcell.required_dl_prbs(pdsch_td_cfg, pending_bytes, dci_type);
473479

474480
if (mcs_prbs_estimate.n_prbs == 0) {
475-
// return alloc_outcome::skip_ue;
476481
return nullptr;
477482
}
478483

@@ -487,7 +492,6 @@ dl_harq_process* ue_srb0_scheduler::schedule_srb1(ue& u,
487492

488493
ue_grant_crbs = rb_helper::find_empty_interval_of_length(used_crbs, mcs_prbs_estimate.n_prbs, 0);
489494
if (ue_grant_crbs.empty() or (set_min_nof_prbs and ue_grant_crbs.length() < min_nof_prbs_partial_slots)) {
490-
// return alloc_outcome::skip_ue;
491495
return nullptr;
492496
}
493497

@@ -643,7 +647,7 @@ void ue_srb0_scheduler::fill_srb1_grant(ue& u,
643647
bool is_retx)
644648
{
645649
// Allocate DL HARQ.
646-
// NOTE: We do not multiplex the SRBO PUCCH with existing PUCCH HARQs, thus both DAI and HARQ-ACK bit index are 0.
650+
// NOTE: We do not multiplex the SRB1 PUCCH with existing PUCCH HARQs, thus both DAI and HARQ-ACK bit index are 0.
647651
static constexpr uint8_t srb1_dai = 0;
648652
if (not is_retx) {
649653
const bool is_fallback = true;
@@ -749,22 +753,23 @@ const pdsch_time_domain_resource_allocation& ue_srb0_scheduler::get_pdsch_td_cfg
749753
return cell_cfg.dl_cfg_common.init_dl_bwp.pdsch_common.pdsch_td_alloc_list[pdsch_time_res_idx];
750754
}
751755

752-
optional<std::pair<slot_point, slot_point>> ue_srb0_scheduler::get_most_recent_slot_tx(du_ue_index_t ue_idx) const
756+
optional<ue_srb0_scheduler::most_recent_tx_slots> ue_srb0_scheduler::get_most_recent_slot_tx(du_ue_index_t ue_idx) const
753757
{
754-
optional<std::pair<slot_point, slot_point>> most_recent_tx_ack_slot;
758+
optional<ue_srb0_scheduler::most_recent_tx_slots> most_recent_tx_ack_slot;
755759
for (const auto& ue_proc : ongoing_ues_ack_retxs) {
756760
if (ue_proc.ue_index == ue_idx) {
757761
slot_point h_dl_slot_tx = ue_proc.get_harq_process()->slot_tx();
758762
slot_point h_dl_slot_ack = ue_proc.get_harq_process()->slot_ack();
759763
if (not most_recent_tx_ack_slot.has_value()) {
760-
most_recent_tx_ack_slot.emplace(h_dl_slot_tx, h_dl_slot_ack);
764+
most_recent_tx_ack_slot.emplace(
765+
most_recent_tx_slots{.most_recent_tx_slot = h_dl_slot_tx, .most_recent_ack_slot = h_dl_slot_ack});
761766
continue;
762767
}
763-
if (h_dl_slot_tx > most_recent_tx_ack_slot.value().first) {
764-
most_recent_tx_ack_slot.value().first = h_dl_slot_tx;
768+
if (h_dl_slot_tx > most_recent_tx_ack_slot.value().most_recent_tx_slot) {
769+
most_recent_tx_ack_slot.value().most_recent_tx_slot = h_dl_slot_tx;
765770
}
766-
if (h_dl_slot_ack > most_recent_tx_ack_slot.value().second) {
767-
most_recent_tx_ack_slot.value().second = h_dl_slot_ack;
771+
if (h_dl_slot_ack > most_recent_tx_ack_slot.value().most_recent_ack_slot) {
772+
most_recent_tx_ack_slot.value().most_recent_ack_slot = h_dl_slot_ack;
768773
}
769774
}
770775
}

lib/scheduler/ue_scheduling/ue_srb0_scheduler.h

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,18 @@ class ue_srb0_scheduler
4242
/// Erase the UEs' HARQ processes that have been acked from the SRB scheduler cache.
4343
void update_ongoing_ue_retxs();
4444

45+
// Holds the most recent slot with PDSCH for SRB0/SRB1 and the most recent slot with the corresponding PUCCH.
46+
struct most_recent_tx_slots {
47+
slot_point most_recent_tx_slot;
48+
slot_point most_recent_ack_slot;
49+
};
50+
4551
/// \brief Tries to schedule SRB0 message for a UE. Returns true if successful, false otherwise.
46-
bool schedule_srb(cell_resource_allocator& res_alloc,
47-
ue& u,
48-
bool is_srb0,
49-
dl_harq_process* h_dl_retx,
50-
optional<std::pair<slot_point, slot_point>> most_recent_tx_ack_slots);
52+
bool schedule_srb(cell_resource_allocator& res_alloc,
53+
ue& u,
54+
bool is_srb0,
55+
dl_harq_process* h_dl_retx,
56+
optional<most_recent_tx_slots> most_recent_tx_ack_slots);
5157

5258
/// \brief Tries to schedule SRB0 message for a UE and a specific PDSCH TimeDomain Resource and Search Space.
5359
dl_harq_process* schedule_srb0(ue& u,
@@ -134,15 +140,19 @@ class ue_srb0_scheduler
134140

135141
void store_harq_tx(du_ue_index_t ue_index, dl_harq_process* h_dl, bool is_srb0);
136142

137-
// If there is any pending SRB0 or SRB1 transmissions for the UE, the function returns the most recent slot with PDSCH
138-
// for SRB0/SRB1 (first element of the pair) and the most recent slot with the corresponding PUCCH (first element of
139-
// the pair).
140-
optional<std::pair<slot_point, slot_point>> get_most_recent_slot_tx(du_ue_index_t ue_idx) const;
143+
// If there are any pending SRB0 or SRB1 transmissions for the UE, the function returns the most recent slot with
144+
// PDSCH for SRB0/SRB1 and the most recent slot with the corresponding PUCCH.
145+
optional<most_recent_tx_slots> get_most_recent_slot_tx(du_ue_index_t ue_idx) const;
141146

142147
const scheduler_ue_expert_config& expert_cfg;
143148
const cell_configuration& cell_cfg;
144149
// TODO: Find proper values for these 2 parameters.
145-
const unsigned max_dl_slots_ahead_sched = 10U;
150+
// Set the max number of slots the scheduler can look ahead in the resource grid (with respect to the current slot) to
151+
// find PDSCH space for SRB0 or SRB1.
152+
const unsigned max_dl_slots_ahead_sched = 10U;
153+
// Set the max number of attempt the scheduler can do while running through the nested loops over the PDSCH time
154+
// allocation indices and the ahead slots. This is to avoid excessive long iterations in case of a large number of
155+
// PDSCH time allocation indices.
146156
const unsigned max_sched_attempts_per_ue = 10U;
147157
pdcch_resource_allocator& pdcch_sch;
148158
pucch_allocator& pucch_alloc;

tests/unittests/scheduler/multiple_ue_sched_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@ class scheduler_impl_tester
507507
pdu.ue_index = bench->rnti_to_du_ue_index(pdu.crnti);
508508

509509
uci_indication::uci_pdu::uci_pusch_pdu pusch_pdu{};
510-
// Auto ACK latest_harq_states.
510+
// Auto ACK harqs.
511511
if (pusch.uci->harq.has_value()) {
512512
pusch_pdu.harqs.resize(pusch.uci->harq->harq_ack_nof_bits, mac_harq_ack_report_status::ack);
513513
}

0 commit comments

Comments
 (0)