sched-nr: fix sched not allocating CCCH at low MCS

Signed-off-by: Carlo Galiotto <carlo@srs.io>
master
Carlo Galiotto 3 years ago committed by carlo-gal
parent 65dae777b1
commit 2b3158536a

@ -61,7 +61,7 @@ public:
struct pdu_builder { struct pdu_builder {
pdu_builder() = default; pdu_builder() = default;
explicit pdu_builder(uint32_t cc_, ue_buffer_manager& parent_) : cc(cc_), parent(&parent_) {} explicit pdu_builder(uint32_t cc_, ue_buffer_manager& parent_) : cc(cc_), parent(&parent_) {}
void alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu); bool alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu);
private: private:
uint32_t cc = SRSRAN_MAX_CARRIERS; uint32_t cc = SRSRAN_MAX_CARRIERS;
@ -180,9 +180,9 @@ public:
ul_harq_proc* find_empty_ul_harq() { return ue->harq_ent.find_empty_ul_harq(); } ul_harq_proc* find_empty_ul_harq() { return ue->harq_ent.find_empty_ul_harq(); }
/// Build PDU with MAC CEs and MAC SDUs /// Build PDU with MAC CEs and MAC SDUs
void build_pdu(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu) bool build_pdu(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu)
{ {
ue->pdu_builder.alloc_subpdus(rem_bytes, pdu); return ue->pdu_builder.alloc_subpdus(rem_bytes, pdu);
} }
/// Channel Information Getters /// Channel Information Getters

@ -353,33 +353,51 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
slot_cfg.idx = ue.pdsch_slot.to_uint(); slot_cfg.idx = ue.pdsch_slot.to_uint();
// Value 0.95 is from TS 38.214 v15.14.00, Section 5.1.3, page 17 // Value 0.95 is from TS 38.214 v15.14.00, Section 5.1.3, page 17
const static float max_R = 0.95; const static float max_R = 0.95;
double R_prime = max_R; double R_prime;
const static int min_MCS_ccch = 4;
// The purpose of the external loop is to reset the MCS to a min value of 4 if there are not enough PRBs to
// allocate the SRB0/CCCH. This loop only affects the low MCS values
while (true) { while (true) {
// Generate PDSCH // The purpose of the internal loop is to decrease the MCS if the effective coderate is too high. This loop
bool success = ue->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch); // only affects the high MCS values
srsran_assert(success, "Error converting DCI to grant"); while (true) {
if (ue.h_dl->nof_retx() != 0) { // Generate PDSCH
srsran_assert(pdsch.sch.grant.tb[0].tbs == (int)ue.h_dl->tbs(), "The TBS did not remain constant in retx"); bool success = ue->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch);
srsran_assert(success, "Error converting DCI to grant");
if (ue.h_dl->nof_retx() != 0) {
srsran_assert(pdsch.sch.grant.tb[0].tbs == (int)ue.h_dl->tbs(), "The TBS did not remain constant in retx");
}
R_prime = pdsch.sch.grant.tb[0].R_prime;
if (ue.h_dl->nof_retx() > 0 or R_prime < max_R or mcs <= 0) {
break;
}
// Decrease MCS if first tx and rate is too high
mcs--;
pdcch.dci.mcs = mcs;
} }
R_prime = pdsch.sch.grant.tb[0].R_prime; if (R_prime >= max_R and mcs == 0) {
if (ue.h_dl->nof_retx() > 0 or R_prime < max_R or mcs <= 0) { logger.warning("Couldn't find mcs that leads to R<0.95");
}
ue.h_dl->set_mcs(mcs);
ue.h_dl->set_tbs(pdsch.sch.grant.tb[0].tbs); // set HARQ TBS
pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get();
pdsch.data[0] = ue.h_dl->get_tx_pdu()->get();
// Select scheduled LCIDs and update UE buffer state
bwp_pdsch_slot.dl.data.emplace_back();
// NOTE: ue.h_dl->tbs() has to be converted from bits to bytes
bool segmented_ccch_pdu = not ue.build_pdu(ue.h_dl->tbs() / 8, bwp_pdsch_slot.dl.data.back());
if (segmented_ccch_pdu) {
// In case of segmented PDU for CCCH, set minimum MCS to 4 and re-run the outer while loop
bwp_pdsch_slot.dl.data.pop_back();
mcs = min_MCS_ccch;
pdcch.dci.mcs = mcs;
logger.warning(
"SCHED: MCS increased to min value %d to allocate SRB0/CCCH for rnti=0x%x", min_MCS_ccch, ue->rnti);
} else {
break; break;
} }
// Decrease MCS if first tx and rate is too high
mcs--;
pdcch.dci.mcs = mcs;
}
if (R_prime >= max_R and mcs == 0) {
logger.warning("Couldn't find mcs that leads to R<0.95");
} }
ue.h_dl->set_mcs(mcs);
ue.h_dl->set_tbs(pdsch.sch.grant.tb[0].tbs); // set HARQ TBS
pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get();
pdsch.data[0] = ue.h_dl->get_tx_pdu()->get();
// Select scheduled LCIDs and update UE buffer state
bwp_pdsch_slot.dl.data.emplace_back();
ue.build_pdu(ue.h_dl->tbs(), bwp_pdsch_slot.dl.data.back());
// Generate PUCCH // Generate PUCCH
bwp_uci_slot.pending_acks.emplace_back(); bwp_uci_slot.pending_acks.emplace_back();

@ -27,8 +27,16 @@ int ue_buffer_manager::get_dl_tx_total() const
return total_bytes; return total_bytes;
} }
void ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu) // Return true if there is no SRB0/CCCH MAC PDU segmentation, false otherwise
bool ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu)
{ {
// In case of SRB0/CCCH PDUs, we need to check whether there is PDU segmentation; if LCID = 0 has emtpy buffer, no
// need to perform this check
bool check_ccch_pdu_segmentation =
parent->get_dl_tx_total(srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CCCH) > 0 ? true : false;
// First step: allocate MAC CEs until resources allow
srsran::deque<ce_t> restore_ces;
for (ce_t ce : parent->pending_ces) { for (ce_t ce : parent->pending_ces) {
if (ce.cc == cc) { if (ce.cc == cc) {
// Note: This check also avoids thread collisions across UE carriers // Note: This check also avoids thread collisions across UE carriers
@ -38,17 +46,35 @@ void ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t rem_bytes, sched_nr_
} }
rem_bytes -= size_ce; rem_bytes -= size_ce;
pdu.subpdus.push_back(ce.lcid); pdu.subpdus.push_back(ce.lcid);
parent->pending_ces.pop_front(); // If there is possibility of CCCH segmentation, we need to save the MAC CEs in a tmp queue to be later restored
if (check_ccch_pdu_segmentation) {
restore_ces.push_back(parent->pending_ces.front());
parent->pending_ces.pop_front();
}
} }
} }
// Second step: allocate the remaining LCIDs (LCIDs for MAC CEs are addressed above)
for (uint32_t lcid = 0; rem_bytes > 0 and is_lcid_valid(lcid); ++lcid) { for (uint32_t lcid = 0; rem_bytes > 0 and is_lcid_valid(lcid); ++lcid) {
uint32_t pending_lcid_bytes = parent->get_dl_tx_total(lcid); uint32_t pending_lcid_bytes = parent->get_dl_tx_total(lcid);
// Verify if the TBS is big enough to store the entire CCCH buffer
// Note: (pending_lcid_bytes > rem_bytes) implies (check_ccch_pdu_segmentation == true)
if (lcid == srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CCCH and pending_lcid_bytes > rem_bytes) {
// restore the MAC CEs as they were at the beginning of the function
for (ce_t ce : restore_ces) {
parent->pending_ces.push_back(ce);
}
// double check if this line is required
pdu.subpdus.clear();
return false;
}
if (pending_lcid_bytes > 0) { if (pending_lcid_bytes > 0) {
rem_bytes -= std::min(rem_bytes, pending_lcid_bytes); rem_bytes -= std::min(rem_bytes, pending_lcid_bytes);
pdu.subpdus.push_back(lcid); pdu.subpdus.push_back(lcid);
} }
} }
return true;
} }
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

Loading…
Cancel
Save