From 8564996eafb26415e05f85b186a29b768bf28f69 Mon Sep 17 00:00:00 2001 From: Francisco Date: Tue, 11 May 2021 20:29:04 +0100 Subject: [PATCH] fix sched_dl_cqi method to compute cqi-optimal rbgmask --- srsenb/hdr/stack/mac/sched_common.h | 3 +- .../stack/mac/sched_ue_ctrl/sched_dl_cqi.h | 27 +++++------ srsenb/hdr/stack/mac/schedulers/sched_base.h | 2 +- srsenb/src/stack/mac/CMakeLists.txt | 2 +- .../stack/mac/sched_ue_ctrl/sched_dl_cqi.cc | 46 +++++++++++++++++++ srsenb/src/stack/mac/schedulers/sched_base.cc | 15 ++++-- srsenb/test/mac/sched_cqi_test.cc | 4 +- 7 files changed, 76 insertions(+), 23 deletions(-) create mode 100644 srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc diff --git a/srsenb/hdr/stack/mac/sched_common.h b/srsenb/hdr/stack/mac/sched_common.h index 8f71e55ef..11dc02a25 100644 --- a/srsenb/hdr/stack/mac/sched_common.h +++ b/srsenb/hdr/stack/mac/sched_common.h @@ -25,6 +25,7 @@ namespace srsenb { constexpr float tti_duration_ms = 1; constexpr uint32_t NOF_AGGR_LEVEL = 4; +constexpr uint32_t MAX_NOF_RBGS = 25; /*********************** * Helper Types @@ -85,7 +86,7 @@ public: using pdcch_mask_t = srsran::bounded_bitset; //! Bitmask that stores the allocared DL RBGs -using rbgmask_t = srsran::bounded_bitset<25, true>; +using rbgmask_t = srsran::bounded_bitset; //! Bitmask that stores the allocated UL PRBs using prbmask_t = srsran::bounded_bitset<100, true>; diff --git a/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h b/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h index 317cba0cf..1d54e0c7d 100644 --- a/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h +++ b/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h @@ -88,6 +88,16 @@ public: int get_avg_cqi() const { return get_grant_avg_cqi(rbg_interval(0, cell_nof_rbg)); } + /// Get CQI of RBG + int get_rbg_cqi(uint32_t rbg) const + { + if (not subband_cqi_enabled()) { + return static_cast(wb_cqi_avg); + } + uint32_t sb_idx = rbg_to_sb_index(rbg); + return bp_list[get_bp_index(sb_idx)].last_feedback_tti.is_valid() ? subband_cqi[sb_idx] : wb_cqi_avg; + } + /// Get average CQI in given RBG interval int get_grant_avg_cqi(rbg_interval interv) const { @@ -126,21 +136,12 @@ public: } /// Get CQI-optimal RBG mask - rbgmask_t get_optim_rbg_mask(uint32_t req_rbgs) const + rbgmask_t get_optim_rbgmask(uint32_t req_rbgs) const { - req_rbgs = std::min(req_rbgs, cell_nof_rbg); - rbgmask_t mask(cell_nof_rbg); - if (not subband_cqi_enabled()) { - mask.fill(0, req_rbgs); - return mask; - } - srsran::bounded_vector sorted_cqis = subband_cqi; - std::partial_sort(sorted_cqis.begin(), sorted_cqis.begin() + req_rbgs, sorted_cqis.end()); - for (uint32_t i = 0; i < req_rbgs; ++i) { - mask.set(i); - } - return mask; + rbgmask_t rbgmask(cell_nof_rbg); + return get_optim_rbgmask(rbgmask, req_rbgs); } + rbgmask_t get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs) const; /// TS 36.321, 7.2.2 - Parameter N uint32_t nof_subbands() const { return subband_cqi.size(); } diff --git a/srsenb/hdr/stack/mac/schedulers/sched_base.h b/srsenb/hdr/stack/mac/schedulers/sched_base.h index e984739ae..6555a482a 100644 --- a/srsenb/hdr/stack/mac/schedulers/sched_base.h +++ b/srsenb/hdr/stack/mac/schedulers/sched_base.h @@ -43,7 +43,7 @@ rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& cur * @param current_mask bitmask of occupied RBGs, where to search for available RBGs * @return bitmask of found RBGs. If a valid mask wasn't found, bitmask::size() == 0 */ -rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask); +rbgmask_t find_available_rbgmask(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask); /** * Finds a range of L contiguous PRBs that are empty diff --git a/srsenb/src/stack/mac/CMakeLists.txt b/srsenb/src/stack/mac/CMakeLists.txt index 3c18027f7..ee9f8e88d 100644 --- a/srsenb/src/stack/mac/CMakeLists.txt +++ b/srsenb/src/stack/mac/CMakeLists.txt @@ -9,7 +9,7 @@ add_subdirectory(schedulers) set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_ue_ctrl/sched_harq.cc sched_ue.cc - sched_ue_ctrl/sched_lch.cc sched_ue_ctrl/sched_ue_cell.cc sched_phy_ch/sf_cch_allocator.cc sched_phy_ch/sched_dci.cc sched_helpers.cc) + sched_ue_ctrl/sched_lch.cc sched_ue_ctrl/sched_ue_cell.cc sched_ue_ctrl/sched_dl_cqi.cc sched_phy_ch/sf_cch_allocator.cc sched_phy_ch/sched_dci.cc sched_helpers.cc) add_library(srsenb_mac STATIC ${SOURCES} $) set(SOURCES mac_nr.cc) diff --git a/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc b/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc new file mode 100644 index 000000000..f3702b332 --- /dev/null +++ b/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc @@ -0,0 +1,46 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2021 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h" +#include "srsenb/hdr/stack/mac/schedulers/sched_base.h" + +using namespace srsenb; + +rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs) const +{ + req_rbgs = std::min(req_rbgs, cell_nof_rbg); + if (not subband_cqi_enabled()) { + // in case of wideband, just find any available RBGs + return find_available_rbgmask(req_rbgs, false, dl_mask); + } + + rbgmask_t emptymask = ~dl_mask; + if (emptymask.none() or req_rbgs >= emptymask.size() or emptymask.count() <= req_rbgs) { + return emptymask; + } + + srsran::bounded_vector sorted_cqi_pos; + srsran::bounded_vector sorted_cqis; + for (int pos = emptymask.find_lowest(0, emptymask.size(), true); pos >= 0; + pos = emptymask.find_lowest(pos + 1, emptymask.size(), true)) { + sorted_cqis.push_back(get_rbg_cqi(pos)); + sorted_cqi_pos.push_back(pos); + } + std::stable_sort(sorted_cqi_pos.begin(), sorted_cqi_pos.end(), [&sorted_cqis](uint32_t lhs, uint32_t rhs) { + return sorted_cqis[lhs] > sorted_cqis[rhs]; + }); + for (size_t i = req_rbgs; i < sorted_cqi_pos.size(); ++i) { + emptymask.set(sorted_cqi_pos[i], false); + } + + return emptymask; +} diff --git a/srsenb/src/stack/mac/schedulers/sched_base.cc b/srsenb/src/stack/mac/schedulers/sched_base.cc index 0eb785a5f..755656a1a 100644 --- a/srsenb/src/stack/mac/schedulers/sched_base.cc +++ b/srsenb/src/stack/mac/schedulers/sched_base.cc @@ -49,11 +49,16 @@ RBInterval find_contiguous_interval(const RBMask& in_mask, uint32_t max_size) * DL Helper methods ***************************/ -rbgmask_t find_available_rb_mask(const rbgmask_t& in_mask, uint32_t max_size) +rbgmask_t find_available_rbgmask(const rbgmask_t& in_mask, uint32_t max_size) { // 1's for free RBs rbgmask_t localmask = ~(in_mask); + if (max_size >= localmask.size() or max_size >= localmask.count()) { + // shortcut in case rbg count < max_size + return localmask; + } + uint32_t i = 0, nof_alloc = 0; for (; i < localmask.size() and nof_alloc < max_size; ++i) { if (localmask.test(i)) { @@ -69,7 +74,7 @@ rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& cur return find_contiguous_interval(current_mask, max_nof_rbgs); } -rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask) +rbgmask_t find_available_rbgmask(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask) { // Allocate enough RBs that accommodate pending data rbgmask_t newtx_mask(current_mask.size()); @@ -77,7 +82,7 @@ rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, cons rbg_interval interv = find_contiguous_interval(current_mask, max_nof_rbgs); newtx_mask.fill(interv.start(), interv.stop()); } else { - newtx_mask = find_available_rb_mask(current_mask, max_nof_rbgs); + newtx_mask = find_available_rbgmask(current_mask, max_nof_rbgs); } return newtx_mask; } @@ -128,7 +133,7 @@ alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_ // If previous mask does not fit, find another with exact same number of rbgs size_t nof_rbg = retx_mask.count(); bool is_contiguous_alloc = ue.get_dci_format() == SRSRAN_DCI_FORMAT1A; - retx_mask = compute_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask()); + retx_mask = find_available_rbgmask(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask()); if (retx_mask.count() == nof_rbg) { return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id()); } @@ -155,7 +160,7 @@ alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const // Find RBG mask that accommodates pending data bool is_contiguous_alloc = ue.get_dci_format() == SRSRAN_DCI_FORMAT1A; - rbgmask_t newtxmask = compute_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask); + rbgmask_t newtxmask = find_available_rbgmask(req_rbgs.stop(), is_contiguous_alloc, current_mask); if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) { return alloc_result::no_sch_space; } diff --git a/srsenb/test/mac/sched_cqi_test.cc b/srsenb/test/mac/sched_cqi_test.cc index 349401e8b..99a16dd06 100644 --- a/srsenb/test/mac/sched_cqi_test.cc +++ b/srsenb/test/mac/sched_cqi_test.cc @@ -45,7 +45,7 @@ void test_sched_cqi_one_subband_cqi() TESTASSERT(ue_cqi.get_grant_avg_cqi(mask) > 0 and ue_cqi.get_grant_avg_cqi(mask) < 5); // TEST: Get optimal RBG mask in terms of CQI - mask = ue_cqi.get_optim_rbg_mask(5); + mask = ue_cqi.get_optim_rbgmask(5); TESTASSERT(mask.count() == 5); for (uint32_t i = 0; i < 5; ++i) { TESTASSERT(mask.test(i) > 0); @@ -73,7 +73,7 @@ void test_sched_cqi_wideband_cqi() TESTASSERT(ue_cqi.get_grant_avg_cqi(mask) == 5); // TEST: Get optimal RBG mask in terms of CQI - mask = ue_cqi.get_optim_rbg_mask(5); + mask = ue_cqi.get_optim_rbgmask(5); TESTASSERT(mask.count() == 5); for (uint32_t i = 0; i < 5; ++i) { TESTASSERT(mask.test(i) > 0);