sched-nr: simplfy code for MCS with SRB0/CCCH

Signed-off-by: Carlo Galiotto <carlo@srs.io>
master
Carlo Galiotto 3 years ago committed by carlo-gal
parent c935484102
commit f73286727e

@ -217,7 +217,7 @@ enable = false
#init_dl_cqi=5
#max_sib_coderate=0.3
#pdcch_cqi_offset=0
#nr_pdsch_mcs=28
nr_pdsch_mcs=28
#nr_pusch_mcs=28
#####################################################################

@ -61,7 +61,8 @@ public:
struct pdu_builder {
pdu_builder() = default;
explicit pdu_builder(uint32_t cc_, ue_buffer_manager& parent_) : cc(cc_), parent(&parent_) {}
bool alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu, bool reset_buf_states = false);
bool alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu);
uint32_t pending_bytes(uint32_t lcid) const { return parent->get_dl_tx(lcid); }
private:
uint32_t cc = SRSRAN_MAX_CARRIERS;
@ -182,9 +183,11 @@ public:
/// Build PDU with MAC CEs and MAC SDUs
bool build_pdu(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu, bool reset_buf_states = false)
{
return ue->pdu_builder.alloc_subpdus(rem_bytes, pdu, reset_buf_states);
return ue->pdu_builder.alloc_subpdus(rem_bytes, pdu);
}
bool get_pending_bytes(uint32_t lcid) const { return ue->pdu_builder.pending_bytes(lcid); }
/// Channel Information Getters
uint32_t dl_cqi() const { return ue->dl_cqi; }
uint32_t ul_cqi() const { return ue->ul_cqi; }

@ -13,6 +13,7 @@
#include "srsgnb/hdr/stack/mac/sched_nr_grant_allocator.h"
#include "srsgnb/hdr/stack/mac/sched_nr_bwp.h"
#include "srsgnb/hdr/stack/mac/sched_nr_helpers.h"
#include "srsran/mac/mac_sch_pdu_nr.h"
namespace srsenb {
namespace sched_nr_impl {
@ -326,8 +327,9 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
// Allocate PDSCH
pdsch_t& pdsch = bwp_pdcch_slot.pdschs.alloc_ue_pdsch_unchecked(ss_id, dci_fmt, dl_grant, ue.cfg(), pdcch.dci);
// Allocate HARQ
// Select MCS and Allocate HARQ
int mcs = ue->fixed_pdsch_mcs();
const static int min_MCS_ccch = 4;
if (ue.h_dl->empty()) {
if (mcs < 0) {
mcs = srsran_ra_nr_cqi_to_mcs(/* cqi */ ue.dl_cqi(),
@ -341,6 +343,13 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
mcs = 0;
}
}
// Overwrite MCS if there are pending bytes for LCID. The optimal way would be to verify that there are pending
// bytes and that the MAC SDU for CCCH gets segmented. But since the event of segmentation happens at most a couple
// of times (e.g., to send msg4/RRCSetup), we opt for the less optimal but simpler approach.
if (ue.get_pending_bytes(srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CCCH) and mcs < min_MCS_ccch) {
mcs = min_MCS_ccch;
logger.info("SCHED: MCS increased to min value %d to allocate SRB0/CCCH for rnti=0x%x", min_MCS_ccch, ue->rnti);
}
bool success = ue.h_dl->new_tx(ue.pdsch_slot, ue.uci_slot, dl_grant, mcs, 4, pdcch.dci);
srsran_assert(success, "Failed to allocate DL HARQ");
} else {
@ -354,10 +363,6 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
// Value 0.95 is from TS 38.214 v15.14.00, Section 5.1.3, page 17
const static float max_R = 0.95;
double R_prime;
const static int min_MCS_ccch = 4;
// The purpose of the external loop is to reset the MCS to a min value of 4 if there are not enough PRBs to
// allocate the SRB0/CCCH. This loop only affects the low MCS values
while (true) {
// The purpose of the internal loop is to decrease the MCS if the effective coderate is too high. This loop
// only affects the high MCS values
while (true) {
@ -368,7 +373,8 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
srsran_assert(pdsch.sch.grant.tb[0].tbs == (int)ue.h_dl->tbs(), "The TBS did not remain constant in retx");
}
R_prime = pdsch.sch.grant.tb[0].R_prime;
if (ue.h_dl->nof_retx() > 0 or R_prime < max_R or mcs <= 0) {
if (ue.h_dl->nof_retx() > 0 or R_prime < max_R or mcs <= 0 or
(ue.get_pending_bytes(srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CCCH) and mcs <= min_MCS_ccch)) {
break;
}
// Decrease MCS if first tx and rate is too high
@ -378,6 +384,7 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
if (R_prime >= max_R and mcs == 0) {
logger.warning("Couldn't find mcs that leads to R<0.95");
}
ue.h_dl->set_mcs(mcs);
ue.h_dl->set_tbs(pdsch.sch.grant.tb[0].tbs); // set HARQ TBS
pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get();
@ -385,25 +392,10 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, uint32_t ss_id, const
// Select scheduled LCIDs and update UE buffer state
bwp_pdsch_slot.dl.data.emplace_back();
// NOTES: 1) ue.h_dl->tbs() has to be converted from bits to bytes
// 2) In case of CCCH segmentation, we'll need to repeat the scheduling with a higher MCS. Hence, the
// function ue.build_pdu() will reset the LCIDs and UE buffer states as before its execution if the flag
// "mcs<min_MCS_ccch" is true
bool segmented_ccch_pdu = not ue.build_pdu(ue.h_dl->tbs() / 8, bwp_pdsch_slot.dl.data.back(), mcs < min_MCS_ccch);
if (segmented_ccch_pdu and mcs < min_MCS_ccch) {
// In case of segmented PDU for CCCH, set minimum MCS to 4 and re-run the outer while loop
bwp_pdsch_slot.dl.data.pop_back();
mcs = min_MCS_ccch;
pdcch.dci.mcs = mcs;
logger.info("SCHED: MCS increased to min value %d to allocate SRB0/CCCH for rnti=0x%x", min_MCS_ccch, ue->rnti);
} else if (segmented_ccch_pdu /* and mcs >= min_MCS_ccch */) {
// With MCS >= then min_MCS_ccch, it is not possible to allocate SRB0/CCCH without PDU segmentation
logger.error("SCHED: Insufficient resources to allocate SRB0/CCCH without PDU segmentation for rnti=0x%x",
ue->rnti);
break;
} else {
break;
}
// NOTE: ue.h_dl->tbs() has to be converted from bits to bytes
bool segmented_ccch_pdu = not ue.build_pdu(ue.h_dl->tbs() / 8, bwp_pdsch_slot.dl.data.back());
if (segmented_ccch_pdu) {
logger.error("SCHED: Insufficient resources to allocate SRB0/CCCH for rnti=0x%x", min_MCS_ccch, ue->rnti);
}
// Generate PUCCH

@ -36,15 +36,8 @@ int ue_buffer_manager::get_dl_tx_total() const
LCIDs as before running this function
* @return true if there is no SRB0/CCCH MAC PDU segmentation, false otherwise
*/
bool ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t rem_bytes,
sched_nr_interface::dl_pdu_t& pdu,
bool reset_buf_states)
bool ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu)
{
// In case of SRB0/CCCH PDUs, we need to check whether there is PDU segmentation; if LCID = 0 has emtpy buffer, no
// need to perform this check
bool check_ccch_pdu_segmentation =
parent->get_dl_tx_total(srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CCCH) > 0 ? true : false;
// First step: allocate MAC CEs until resources allow
srsran::deque<ce_t> restore_ces;
for (ce_t ce : parent->pending_ces) {
@ -56,10 +49,6 @@ bool ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t
}
rem_bytes -= size_ce;
pdu.subpdus.push_back(ce.lcid);
// If there is possibility of CCCH segmentation, we need to save the MAC CEs in a tmp queue to be later restored
if (check_ccch_pdu_segmentation and reset_buf_states) {
restore_ces.push_back(parent->pending_ces.front());
}
parent->pending_ces.pop_front();
}
}
@ -67,19 +56,9 @@ bool ue_buffer_manager::pdu_builder::alloc_subpdus(uint32_t
// Second step: allocate the remaining LCIDs (LCIDs for MAC CEs are addressed above)
for (uint32_t lcid = 0; rem_bytes > 0 and is_lcid_valid(lcid); ++lcid) {
uint32_t pending_lcid_bytes = parent->get_dl_tx_total(lcid);
// Verify if the TBS is big enough to store the entire CCCH buffer
// Note: (pending_lcid_bytes > rem_bytes) implies (check_ccch_pdu_segmentation == true)
// Return false if the TBS is too small to store the entire CCCH buffer without segmentation
if (lcid == srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CCCH and pending_lcid_bytes > rem_bytes) {
if (reset_buf_states) {
// restore the MAC CEs as they were at the beginning of the function
for (ce_t ce : restore_ces) {
parent->pending_ces.push_back(ce);
}
// double check if this line is required
pdu.subpdus.clear();
} else {
pdu.subpdus.push_back(lcid);
}
return false;
}
if (pending_lcid_bytes > 0) {

Loading…
Cancel
Save