Skip to content

Commit 51c0185

Browse files
committed
sched: integration of new cell harq system in the scheduler
1 parent e249f59 commit 51c0185

37 files changed

+954
-847
lines changed

lib/scheduler/cell/cell_harq_manager.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -695,7 +695,8 @@ int ul_harq_process_handle::ul_crc_info(bool ack)
695695
{
696696
if (impl->status != harq_state_t::waiting_ack) {
697697
// HARQ is not expecting CRC info.
698-
harq_repo->logger.warning("rnti={} h_id={}: CRC arrived for UL HARQ not expecting it", impl->rnti, impl->h_id);
698+
harq_repo->logger.warning(
699+
"rnti={} h_id={}: Discarding CRC. Cause: UL HARQ process is not expecting any CRC", impl->rnti, impl->h_id);
699700
return -1;
700701
}
701702

lib/scheduler/cell/cell_harq_manager.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -464,28 +464,28 @@ class unique_ue_harq_entity
464464

465465
std::optional<dl_harq_process_handle> dl_harq(harq_id_t h_id)
466466
{
467-
if (get_dl_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
467+
if (h_id < get_dl_ue().harqs.size() and get_dl_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
468468
return dl_harq_process_handle{cell_harq_mgr->dl, get_dl_ue().harqs[h_id]};
469469
}
470470
return std::nullopt;
471471
}
472472
std::optional<const dl_harq_process_handle> dl_harq(harq_id_t h_id) const
473473
{
474-
if (get_dl_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
474+
if (h_id < get_dl_ue().harqs.size() and get_dl_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
475475
return dl_harq_process_handle{cell_harq_mgr->dl, cell_harq_mgr->dl.ues[ue_index].harqs[h_id]};
476476
}
477477
return std::nullopt;
478478
}
479479
std::optional<ul_harq_process_handle> ul_harq(harq_id_t h_id)
480480
{
481-
if (get_ul_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
481+
if (h_id < get_ul_ue().harqs.size() and get_ul_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
482482
return ul_harq_process_handle{cell_harq_mgr->ul, get_ul_ue().harqs[h_id]};
483483
}
484484
return std::nullopt;
485485
}
486486
std::optional<const ul_harq_process_handle> ul_harq(harq_id_t h_id) const
487487
{
488-
if (get_ul_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
488+
if (h_id < get_ul_ue().harqs.size() and get_ul_ue().harqs[h_id].status != harq_utils::harq_state_t::empty) {
489489
return ul_harq_process_handle{cell_harq_mgr->ul, cell_harq_mgr->ul.ues[ue_index].harqs[h_id]};
490490
}
491491
return std::nullopt;

lib/scheduler/common_scheduling/ra_scheduler.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -165,12 +165,6 @@ void ra_scheduler::precompute_msg3_pdus()
165165
msg3_data.resize(pusch_td_alloc_list.size());
166166

167167
for (unsigned i = 0; i != msg3_data.size(); ++i) {
168-
// Create a dummy HARQ used to fill DCI and PUSCH.
169-
harq_logger dummy_harq_logger{logger, to_rnti(0x4601), cell_cfg.cell_index, false};
170-
ul_harq_process dummy_h_ul(to_harq_id(0), dummy_harq_logger);
171-
const slot_point dummy_slot{to_numerology_value(get_ul_bwp_cfg().scs), 0};
172-
dummy_h_ul.new_tx(dummy_slot, sched_cfg.max_nof_msg3_harq_retxs);
173-
174168
// Compute the required PRBs and TBS for Msg3.
175169
const pusch_config_params pusch_cfg = get_pusch_config_f0_0_tc_rnti(cell_cfg, pusch_td_alloc_list[i]);
176170
const sch_prbs_tbs prbs_tbs =

lib/scheduler/policy/scheduler_time_pf.cpp

Lines changed: 46 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ void scheduler_time_pf::dl_sched(ue_pdsch_allocator& pdsch_alloc,
5454
ue.save_dl_alloc(alloc_result.alloc_bytes);
5555
// Re-add the UE to the queue if scheduling of re-transmission fails so that scheduling of retransmission are
5656
// attempted again before scheduling new transmissions.
57-
if (ue.dl_retx_h != nullptr and alloc_result.status == alloc_status::invalid_params) {
57+
if (ue.dl_retx_h.has_value() and alloc_result.status == alloc_status::invalid_params) {
5858
dl_queue.push(&ue);
5959
}
6060
dl_queue.pop();
@@ -99,7 +99,7 @@ void scheduler_time_pf::ul_sched(ue_pusch_allocator& pusch_alloc,
9999
ue.save_ul_alloc(alloc_result.alloc_bytes);
100100
// Re-add the UE to the queue if scheduling of re-transmission fails so that scheduling of retransmission are
101101
// attempted again before scheduling new transmissions.
102-
if (ue.ul_retx_h != nullptr and alloc_result.status == alloc_status::invalid_params) {
102+
if (ue.ul_retx_h.has_value() and alloc_result.status == alloc_status::invalid_params) {
103103
ul_queue.push(&ue);
104104
}
105105
ul_queue.pop();
@@ -115,24 +115,24 @@ alloc_result scheduler_time_pf::try_dl_alloc(ue_ctxt& ctxt,
115115
alloc_result alloc_result = {alloc_status::invalid_params};
116116
ue_pdsch_grant grant{&ues[ctxt.ue_index], ctxt.cell_index};
117117
// Prioritize reTx over newTx.
118-
if (ctxt.dl_retx_h != nullptr) {
119-
grant.h_id = ctxt.dl_retx_h->id;
118+
if (ctxt.dl_retx_h.has_value()) {
119+
grant.h_id = ctxt.dl_retx_h->id();
120120
alloc_result = pdsch_alloc.allocate_dl_grant(grant);
121121
if (alloc_result.status == alloc_status::success) {
122-
ctxt.dl_retx_h = nullptr;
122+
ctxt.dl_retx_h.reset();
123123
}
124124
// Return result here irrespective of the outcome so that reTxs of UEs are scheduled before scheduling newTxs of
125125
// UEs.
126126
return alloc_result;
127127
}
128128

129-
if (ctxt.dl_newtx_h != nullptr) {
130-
grant.h_id = ctxt.dl_newtx_h->id;
129+
if (ctxt.has_empty_dl_harq) {
130+
grant.h_id = INVALID_HARQ_ID;
131131
grant.recommended_nof_bytes = ues[ctxt.ue_index].pending_dl_newtx_bytes();
132132
grant.max_nof_rbs = max_rbs;
133133
alloc_result = pdsch_alloc.allocate_dl_grant(grant);
134134
if (alloc_result.status == alloc_status::success) {
135-
ctxt.dl_newtx_h = nullptr;
135+
ctxt.has_empty_dl_harq = false;
136136
}
137137
return alloc_result;
138138
}
@@ -148,24 +148,24 @@ alloc_result scheduler_time_pf::try_ul_alloc(ue_ctxt& ctxt,
148148
alloc_result alloc_result = {alloc_status::invalid_params};
149149
ue_pusch_grant grant{&ues[ctxt.ue_index], ctxt.cell_index};
150150
// Prioritize reTx over newTx.
151-
if (ctxt.ul_retx_h != nullptr) {
152-
grant.h_id = ctxt.ul_retx_h->id;
151+
if (ctxt.ul_retx_h.has_value()) {
152+
grant.h_id = ctxt.ul_retx_h->id();
153153
alloc_result = pusch_alloc.allocate_ul_grant(grant);
154154
if (alloc_result.status == alloc_status::success) {
155-
ctxt.ul_retx_h = nullptr;
155+
ctxt.ul_retx_h.reset();
156156
}
157157
// Return result here irrespective of the outcome so that reTxs of UEs are scheduled before scheduling newTxs of
158158
// UEs.
159159
return alloc_result;
160160
}
161161

162-
if (ctxt.ul_newtx_h != nullptr) {
163-
grant.h_id = ctxt.ul_newtx_h->id;
162+
if (ctxt.has_empty_ul_harq) {
163+
grant.h_id = INVALID_HARQ_ID;
164164
grant.recommended_nof_bytes = ues[ctxt.ue_index].pending_ul_newtx_bytes();
165165
grant.max_nof_rbs = max_rbs;
166166
alloc_result = pusch_alloc.allocate_ul_grant(grant);
167167
if (alloc_result.status == alloc_status::success) {
168-
ctxt.ul_newtx_h = nullptr;
168+
ctxt.has_empty_ul_harq = false;
169169
}
170170
return alloc_result;
171171
}
@@ -177,8 +177,8 @@ alloc_result scheduler_time_pf::try_ul_alloc(ue_ctxt& ctxt,
177177

178178
void scheduler_time_pf::ue_ctxt::compute_dl_prio(const slice_ue& u, ran_slice_id_t slice_id)
179179
{
180-
dl_retx_h = nullptr;
181-
dl_newtx_h = nullptr;
180+
dl_retx_h.reset();
181+
has_empty_dl_harq = false;
182182
dl_prio = 0;
183183
const ue_cell* ue_cc = u.find_cell(cell_index);
184184
if (ue_cc == nullptr) {
@@ -188,23 +188,21 @@ void scheduler_time_pf::ue_ctxt::compute_dl_prio(const slice_ue& u, ran_slice_id
188188
"policy scheduler called for UE={} in fallback",
189189
ue_cc->ue_index);
190190

191-
static_vector<const dl_harq_process*, MAX_NOF_HARQS> dl_harq_candidates;
192-
// Create list of DL HARQ processes with pending retx, sorted from oldest to newest.
191+
std::optional<dl_harq_process_handle> oldest_dl_harq_candidate;
193192
for (unsigned i = 0; i != ue_cc->harqs.nof_dl_harqs(); ++i) {
194-
const dl_harq_process& h = ue_cc->harqs.dl_harq(i);
195-
if (h.has_pending_retx() and not h.last_alloc_params().is_fallback and
196-
h.last_alloc_params().tb[0]->slice_id == slice_id) {
197-
dl_harq_candidates.push_back(&h);
193+
std::optional<dl_harq_process_handle> h = ue_cc->harqs.dl_harq(to_harq_id(i));
194+
if (h.has_value() and h->has_pending_retx() and not h->get_grant_params().is_fallback and
195+
h->get_grant_params().slice_id == slice_id) {
196+
if (not oldest_dl_harq_candidate.has_value() or oldest_dl_harq_candidate->uci_slot() > h->uci_slot()) {
197+
oldest_dl_harq_candidate = h;
198+
}
198199
}
199200
}
200-
std::sort(dl_harq_candidates.begin(),
201-
dl_harq_candidates.end(),
202-
[](const dl_harq_process* lhs, const dl_harq_process* rhs) { return lhs->slot_ack() < rhs->slot_ack(); });
203201

204202
// Calculate DL priority.
205-
dl_retx_h = dl_harq_candidates.empty() ? nullptr : dl_harq_candidates.front();
206-
dl_newtx_h = ue_cc->harqs.find_empty_dl_harq();
207-
if (dl_retx_h != nullptr or (dl_newtx_h != nullptr and u.has_pending_dl_newtx_bytes())) {
203+
dl_retx_h = oldest_dl_harq_candidate;
204+
has_empty_dl_harq = ue_cc->harqs.has_empty_dl_harqs();
205+
if (dl_retx_h.has_value() or (has_empty_dl_harq and u.has_pending_dl_newtx_bytes())) {
208206
// NOTE: It does not matter whether it's a reTx or newTx since DL priority is computed based on estimated
209207
// instantaneous achievable rate to the average throughput of the user.
210208
// [Implementation-defined] We consider only the SearchSpace defined in UE dedicated configuration.
@@ -235,8 +233,8 @@ void scheduler_time_pf::ue_ctxt::compute_dl_prio(const slice_ue& u, ran_slice_id
235233
std::optional<sch_mcs_index> mcs = ue_cc->link_adaptation_controller().calculate_dl_mcs(pdsch_cfg.mcs_table);
236234
if (not mcs.has_value()) {
237235
// CQI is either 0, or > 15.
238-
dl_retx_h = nullptr;
239-
dl_newtx_h = nullptr;
236+
has_empty_dl_harq = false;
237+
dl_retx_h = std::nullopt;
240238
return;
241239
}
242240

@@ -251,15 +249,15 @@ void scheduler_time_pf::ue_ctxt::compute_dl_prio(const slice_ue& u, ran_slice_id
251249
}
252250
return;
253251
}
254-
dl_newtx_h = nullptr;
252+
has_empty_dl_harq = false;
255253
}
256254

257255
void scheduler_time_pf::ue_ctxt::compute_ul_prio(const slice_ue& u,
258256
const ue_resource_grid_view& res_grid,
259257
ran_slice_id_t slice_id)
260258
{
261-
ul_retx_h = nullptr;
262-
ul_newtx_h = nullptr;
259+
ul_retx_h.reset();
260+
has_empty_ul_harq = false;
263261
ul_prio = 0;
264262
sr_ind_received = false;
265263
const ue_cell* ue_cc = u.find_cell(cell_index);
@@ -270,23 +268,21 @@ void scheduler_time_pf::ue_ctxt::compute_ul_prio(const slice_ue& u,
270268
"policy scheduler called for UE={} in fallback",
271269
ue_cc->ue_index);
272270

273-
static_vector<const ul_harq_process*, MAX_NOF_HARQS> ul_harq_candidates;
274-
// Create list of UL HARQ processes with pending retx, sorted from oldest to newest.
271+
std::optional<ul_harq_process_handle> oldest_ul_harq_candidate;
275272
for (unsigned i = 0; i != ue_cc->harqs.nof_ul_harqs(); ++i) {
276-
const ul_harq_process& h = ue_cc->harqs.ul_harq(i);
277-
if (h.has_pending_retx() and h.last_tx_params().slice_id == slice_id) {
278-
ul_harq_candidates.push_back(&h);
273+
std::optional<ul_harq_process_handle> h = ue_cc->harqs.ul_harq(to_harq_id(i));
274+
if (h.has_value() and h->has_pending_retx() and h->get_grant_params().slice_id == slice_id) {
275+
if (not oldest_ul_harq_candidate.has_value() or oldest_ul_harq_candidate->pusch_slot() > h->pusch_slot()) {
276+
oldest_ul_harq_candidate = h;
277+
}
279278
}
280279
}
281-
std::sort(ul_harq_candidates.begin(),
282-
ul_harq_candidates.end(),
283-
[](const ul_harq_process* lhs, const ul_harq_process* rhs) { return lhs->slot_ack() < rhs->slot_ack(); });
284280

285281
// Calculate UL priority.
286-
ul_retx_h = ul_harq_candidates.empty() ? nullptr : ul_harq_candidates.front();
287-
ul_newtx_h = ue_cc->harqs.find_empty_ul_harq();
288-
sr_ind_received = u.has_pending_sr();
289-
if (ul_retx_h != nullptr or (ul_newtx_h != nullptr and u.pending_ul_newtx_bytes() > 0)) {
282+
ul_retx_h = oldest_ul_harq_candidate;
283+
has_empty_ul_harq = ue_cc->harqs.has_empty_ul_harqs();
284+
sr_ind_received = u.has_pending_sr();
285+
if (ul_retx_h.has_value() or (has_empty_ul_harq and u.pending_ul_newtx_bytes() > 0)) {
290286
// NOTE: It does not matter whether it's a reTx or newTx since UL priority is computed based on estimated
291287
// instantaneous achievable rate to the average throughput of the user.
292288
// [Implementation-defined] We consider only the SearchSpace defined in UE dedicated configuration.
@@ -340,7 +336,7 @@ void scheduler_time_pf::ue_ctxt::compute_ul_prio(const slice_ue& u,
340336
}
341337
return;
342338
}
343-
ul_newtx_h = nullptr;
339+
has_empty_ul_harq = false;
344340
}
345341

346342
void scheduler_time_pf::ue_ctxt::save_dl_alloc(uint32_t alloc_bytes)
@@ -368,8 +364,8 @@ void scheduler_time_pf::ue_ctxt::save_ul_alloc(uint32_t alloc_bytes)
368364
bool scheduler_time_pf::ue_dl_prio_compare::operator()(const scheduler_time_pf::ue_ctxt* lhs,
369365
const scheduler_time_pf::ue_ctxt* rhs) const
370366
{
371-
const bool is_lhs_retx = lhs->dl_retx_h != nullptr;
372-
const bool is_rhs_retx = rhs->dl_retx_h != nullptr;
367+
const bool is_lhs_retx = lhs->dl_retx_h.has_value();
368+
const bool is_rhs_retx = rhs->dl_retx_h.has_value();
373369

374370
// First, prioritize UEs with re-transmissions.
375371
// ReTx in one UE and not in other UE.
@@ -383,8 +379,8 @@ bool scheduler_time_pf::ue_dl_prio_compare::operator()(const scheduler_time_pf::
383379
bool scheduler_time_pf::ue_ul_prio_compare::operator()(const scheduler_time_pf::ue_ctxt* lhs,
384380
const scheduler_time_pf::ue_ctxt* rhs) const
385381
{
386-
const bool is_lhs_retx = lhs->ul_retx_h != nullptr;
387-
const bool is_rhs_retx = rhs->ul_retx_h != nullptr;
382+
const bool is_lhs_retx = lhs->ul_retx_h.has_value();
383+
const bool is_rhs_retx = rhs->ul_retx_h.has_value();
388384
// First, prioritize UEs with pending SR.
389385
// SR indication in one UE and not in other UE.
390386
if (lhs->sr_ind_received != rhs->sr_ind_received) {

lib/scheduler/policy/scheduler_time_pf.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,10 @@ class scheduler_time_pf : public scheduler_policy
5959
/// UL priority value of the UE.
6060
double ul_prio = 0;
6161

62-
const dl_harq_process* dl_retx_h = nullptr;
63-
const dl_harq_process* dl_newtx_h = nullptr;
64-
const ul_harq_process* ul_retx_h = nullptr;
65-
const ul_harq_process* ul_newtx_h = nullptr;
62+
bool has_empty_dl_harq = false;
63+
bool has_empty_ul_harq = false;
64+
std::optional<dl_harq_process_handle> dl_retx_h;
65+
std::optional<ul_harq_process_handle> ul_retx_h;
6666
/// Flag indicating whether SR indication from the UE is received or not.
6767
bool sr_ind_received = false;
6868

@@ -113,7 +113,7 @@ class scheduler_time_pf : public scheduler_policy
113113
// Adapter of the priority_queue push method to avoid adding candidates with skip priority level.
114114
void push(ue_ctxt* elem)
115115
{
116-
if (elem->dl_retx_h == nullptr and elem->dl_newtx_h == nullptr) {
116+
if (not elem->dl_retx_h.has_value() and not elem->has_empty_dl_harq) {
117117
return;
118118
}
119119
base_type::push(elem);
@@ -138,7 +138,7 @@ class scheduler_time_pf : public scheduler_policy
138138
// Adapter of the priority_queue push method to avoid adding candidates with skip priority level.
139139
void push(ue_ctxt* elem)
140140
{
141-
if (elem->ul_retx_h == nullptr and elem->ul_newtx_h == nullptr) {
141+
if (not elem->ul_retx_h.has_value() and not elem->has_empty_ul_harq) {
142142
return;
143143
}
144144
base_type::push(elem);

0 commit comments

Comments
 (0)