From eafc003671d12f59eb3f756bd780cf7847665eef Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Fri, 9 Oct 2020 17:08:53 +0100 Subject: [PATCH] Altered the way the generation of the scheduling decision for a given TTI is made. Instead of generating one CC in each dl_sched()/ul_sched() call, all CC decisions are generated at once. This avoids race conditions with calls from the RRC to the MAC to configure UEs. --- srsenb/hdr/stack/mac/scheduler.h | 8 +- srsenb/hdr/stack/mac/scheduler_carrier.h | 4 +- srsenb/hdr/stack/mac/scheduler_grid.h | 4 +- srsenb/src/stack/mac/scheduler.cc | 100 +++++++++++++--------- srsenb/src/stack/mac/scheduler_carrier.cc | 83 +++++++++--------- srsenb/src/stack/mac/scheduler_grid.cc | 8 +- 6 files changed, 116 insertions(+), 91 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index 1ac14646a..3f683522d 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -138,6 +138,8 @@ public: class carrier_sched; protected: + void new_tti(srslte::tti_point tti_rx); + bool is_generated(srslte::tti_point, uint32_t enb_cc_idx) const; // Helper methods template int ue_db_access(uint16_t rnti, Func, const char* func_name = nullptr); @@ -156,9 +158,9 @@ protected: // Storage of past scheduling results sched_result_list sched_results; - uint32_t last_tti = 0; - std::mutex sched_mutex; - bool configured = false; + srslte::tti_point last_tti; + std::mutex sched_mutex; + bool configured = false; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/scheduler_carrier.h b/srsenb/hdr/stack/mac/scheduler_carrier.h index 319e919b1..5d0303725 100644 --- a/srsenb/hdr/stack/mac/scheduler_carrier.h +++ b/srsenb/hdr/stack/mac/scheduler_carrier.h @@ -40,7 +40,7 @@ public: void reset(); void carrier_cfg(const sched_cell_params_t& sched_params_); void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs); - const cc_sched_result& generate_tti_result(uint32_t tti_rx); + const cc_sched_result& generate_tti_result(srslte::tti_point tti_rx); int dl_rach_info(dl_sched_rar_info_t rar_info); // getters @@ -54,7 +54,7 @@ private: //! Compute UL scheduler result for given TTI int alloc_ul_users(sf_sched* tti_sched); //! Get sf_sched for a given TTI - sf_sched* get_sf_sched(uint32_t tti_rx); + sf_sched* get_sf_sched(srslte::tti_point tti_rx); // args const sched_cell_params_t* cc_cfg = nullptr; diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index bf302959b..d2f2c6f7c 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -53,6 +53,8 @@ struct cc_sched_result { rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations + + bool is_generated(srslte::tti_point tti_rx) const { return srslte::tti_point{tti_params.tti_rx} == tti_rx; } }; struct sf_sched_result { @@ -279,7 +281,7 @@ public: // Control/Configuration Methods sf_sched(); void init(const sched_cell_params_t& cell_params_); - void new_tti(uint32_t tti_rx_, sf_sched_result* cc_results); + void new_tti(srslte::tti_point tti_rx_, sf_sched_result* cc_results); // DL alloc methods alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx); diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index 3de0f22bd..b163e959b 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -30,6 +30,8 @@ #define Console(fmt, ...) srslte::console(fmt, ##__VA_ARGS__) #define Error(fmt, ...) srslte::logmap::get("MAC ")->error(fmt, ##__VA_ARGS__) +using srslte::tti_point; + namespace srsenb { namespace sched_utils { @@ -238,7 +240,7 @@ void sched::phy_config_enabled(uint16_t rnti, bool enabled) { // TODO: Check if correct use of last_tti ue_db_access( - rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(last_tti, enabled); }, __PRETTY_FUNCTION__); + rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(last_tti.to_uint(), enabled); }, __PRETTY_FUNCTION__); } int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_) @@ -263,8 +265,9 @@ uint32_t sched::get_ul_buffer(uint16_t rnti) { // TODO: Check if correct use of last_tti uint32_t ret = SRSLTE_ERROR; - ue_db_access( - rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(last_tti, -1); }, __PRETTY_FUNCTION__); + ue_db_access(rnti, + [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(last_tti.to_uint(), -1); }, + __PRETTY_FUNCTION__); return ret; } @@ -281,8 +284,7 @@ int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code, uint32_t nof_cmd int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) { int ret = -1; - ue_db_access( - rnti, [&](sched_ue& ue) { ret = ue.set_ack_info(tti, enb_cc_idx, tb_idx, ack); }, __PRETTY_FUNCTION__); + ue_db_access(rnti, [&](sched_ue& ue) { ret = ue.set_ack_info(tti, enb_cc_idx, tb_idx, ack); }, __PRETTY_FUNCTION__); return ret; } @@ -330,14 +332,12 @@ int sched::ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) int sched::ul_phr(uint16_t rnti, int phr) { - return ue_db_access( - rnti, [phr](sched_ue& ue) { ue.ul_phr(phr); }, __PRETTY_FUNCTION__); + return ue_db_access(rnti, [phr](sched_ue& ue) { ue.ul_phr(phr); }, __PRETTY_FUNCTION__); } int sched::ul_sr_info(uint32_t tti, uint16_t rnti) { - return ue_db_access( - rnti, [](sched_ue& ue) { ue.set_sr(); }, __PRETTY_FUNCTION__); + return ue_db_access(rnti, [](sched_ue& ue) { ue.set_sr(); }, __PRETTY_FUNCTION__); } void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) @@ -348,31 +348,28 @@ void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) void sched::tpc_inc(uint16_t rnti) { - ue_db_access( - rnti, [](sched_ue& ue) { ue.tpc_inc(); }, __PRETTY_FUNCTION__); + ue_db_access(rnti, [](sched_ue& ue) { ue.tpc_inc(); }, __PRETTY_FUNCTION__); } void sched::tpc_dec(uint16_t rnti) { - ue_db_access( - rnti, [](sched_ue& ue) { ue.tpc_dec(); }, __PRETTY_FUNCTION__); + ue_db_access(rnti, [](sched_ue& ue) { ue.tpc_dec(); }, __PRETTY_FUNCTION__); } std::array sched::get_enb_ue_cc_map(uint16_t rnti) { std::array ret{}; ret.fill(-1); // -1 for inactive carriers - ue_db_access( - rnti, - [this, &ret](sched_ue& ue) { - for (size_t enb_cc_idx = 0; enb_cc_idx < carrier_schedulers.size(); ++enb_cc_idx) { - auto p = ue.get_cell_index(enb_cc_idx); - if (p.second < SRSLTE_MAX_CARRIERS) { - ret[enb_cc_idx] = p.second; - } - } - }, - __PRETTY_FUNCTION__); + ue_db_access(rnti, + [this, &ret](sched_ue& ue) { + for (size_t enb_cc_idx = 0; enb_cc_idx < carrier_schedulers.size(); ++enb_cc_idx) { + auto p = ue.get_cell_index(enb_cc_idx); + if (p.second < SRSLTE_MAX_CARRIERS) { + ret[enb_cc_idx] = p.second; + } + } + }, + __PRETTY_FUNCTION__); return ret; } @@ -383,46 +380,69 @@ std::array sched::get_enb_ue_cc_map(uint16_t rnti) *******************************************************/ // Downlink Scheduler API -int sched::dl_sched(uint32_t tti_tx_dl, uint32_t cc_idx, sched_interface::dl_sched_res_t& sched_result) +int sched::dl_sched(uint32_t tti_tx_dl, uint32_t enb_cc_idx, sched_interface::dl_sched_res_t& sched_result) { if (!configured) { return 0; } std::lock_guard lock(sched_mutex); - uint32_t tti_rx = sched_utils::tti_subtract(tti_tx_dl, FDD_HARQ_DELAY_UL_MS); - last_tti = sched_utils::max_tti(last_tti, tti_rx); + if (enb_cc_idx >= carrier_schedulers.size()) { + return 0; + } - if (cc_idx < carrier_schedulers.size()) { - // Compute scheduling Result for tti_rx - const cc_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); + tti_point tti_rx = tti_point{tti_tx_dl} - FDD_HARQ_DELAY_UL_MS; + new_tti(tti_rx); - // copy result - sched_result = tti_sched.dl_sched_result; - } + // copy result + sched_result = sched_results.get_sf(tti_rx)->get_cc(enb_cc_idx)->dl_sched_result; return 0; } // Uplink Scheduler API -int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_sched_res_t& sched_result) +int sched::ul_sched(uint32_t tti, uint32_t enb_cc_idx, srsenb::sched_interface::ul_sched_res_t& sched_result) { if (!configured) { return 0; } std::lock_guard lock(sched_mutex); + if (enb_cc_idx >= carrier_schedulers.size()) { + return 0; + } + // Compute scheduling Result for tti_rx - uint32_t tti_rx = sched_utils::tti_subtract(tti, FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS); + tti_point tti_rx = tti_point{tti} - FDD_HARQ_DELAY_UL_MS - FDD_HARQ_DELAY_DL_MS; + new_tti(tti_rx); + + // copy result + sched_result = sched_results.get_sf(tti_rx)->get_cc(enb_cc_idx)->ul_sched_result; - if (cc_idx < carrier_schedulers.size()) { - const cc_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); + return SRSLTE_SUCCESS; +} - // copy result - sched_result = tti_sched.ul_sched_result; +/// Generate scheduling decision for tti_rx, if it wasn't already generated +/// NOTE: The scheduling decision is made for all CCs in a single call/lock, otherwise the UE can have different +/// configurations (e.g. different set of activated SCells) in different CC decisions +void sched::new_tti(tti_point tti_rx) +{ + last_tti = std::max(last_tti, tti_rx); + + // Generate sched results for all CCs, if not yet generated + for (size_t cc_idx = 0; cc_idx < carrier_schedulers.size(); ++cc_idx) { + if (not is_generated(tti_rx, cc_idx)) { + carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); + } } +} - return SRSLTE_SUCCESS; +/// Check if TTI result is generated +bool sched::is_generated(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const +{ + const sf_sched_result* sf_result = sched_results.get_sf(tti_rx); + return sf_result != nullptr and sf_result->get_cc(enb_cc_idx) != nullptr and + sf_result->get_cc(enb_cc_idx)->is_generated(tti_rx); } // Common way to access ue_db elements in a read locking way diff --git a/srsenb/src/stack/mac/scheduler_carrier.cc b/srsenb/src/stack/mac/scheduler_carrier.cc index 8fc024bfe..1f7cf12fd 100644 --- a/srsenb/src/stack/mac/scheduler_carrier.cc +++ b/srsenb/src/stack/mac/scheduler_carrier.cc @@ -26,6 +26,8 @@ namespace srsenb { +using srslte::tti_point; + /******************************************************* * Broadcast (SIB+Paging) scheduling *******************************************************/ @@ -136,7 +138,9 @@ void bc_sched::reset() *******************************************************/ ra_sched::ra_sched(const sched_cell_params_t& cfg_, std::map& ue_db_) : - cc_cfg(&cfg_), log_h(srslte::logmap::get("MAC")), ue_db(&ue_db_) + cc_cfg(&cfg_), + log_h(srslte::logmap::get("MAC")), + ue_db(&ue_db_) {} // Schedules RAR @@ -306,55 +310,50 @@ void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs); } -const cc_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx) +const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_rx) { - cc_sched_result* cc_result = prev_sched_results->get_cc(srslte::tti_point{tti_rx}, enb_cc_idx); - - // if it is the first time tti is run, reset vars - if (cc_result == nullptr) { - sf_sched* tti_sched = get_sf_sched(tti_rx); - sf_sched_result* sf_result = prev_sched_results->get_sf(srslte::tti_point{tti_rx}); - cc_result = sf_result->new_cc(enb_cc_idx); + sf_sched* tti_sched = get_sf_sched(tti_rx); + sf_sched_result* sf_result = prev_sched_results->get_sf(tti_rx); + cc_sched_result* cc_result = sf_result->new_cc(enb_cc_idx); - bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0; + bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0; - /* Schedule PHICH */ - for (auto& ue_pair : *ue_db) { - tti_sched->alloc_phich(&ue_pair.second, &cc_result->ul_sched_result); - } + /* Schedule PHICH */ + for (auto& ue_pair : *ue_db) { + tti_sched->alloc_phich(&ue_pair.second, &cc_result->ul_sched_result); + } - /* Schedule DL control data */ - if (dl_active) { - /* Schedule Broadcast data (SIB and paging) */ - bc_sched_ptr->dl_sched(tti_sched); + /* Schedule DL control data */ + if (dl_active) { + /* Schedule Broadcast data (SIB and paging) */ + bc_sched_ptr->dl_sched(tti_sched); - /* Schedule RAR */ - ra_sched_ptr->dl_sched(tti_sched); + /* Schedule RAR */ + ra_sched_ptr->dl_sched(tti_sched); - /* Schedule Msg3 */ - sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS); - ra_sched_ptr->ul_sched(tti_sched, sf_msg3_sched); - } + /* Schedule Msg3 */ + sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS); + ra_sched_ptr->ul_sched(tti_sched, sf_msg3_sched); + } - /* Prioritize PDCCH scheduling for DL and UL data in a RoundRobin fashion */ - if ((tti_rx % 2) == 0) { - alloc_ul_users(tti_sched); - } + /* Prioritize PDCCH scheduling for DL and UL data in a RoundRobin fashion */ + if ((tti_rx.to_uint() % 2) == 0) { + alloc_ul_users(tti_sched); + } - /* Schedule DL user data */ - alloc_dl_users(tti_sched); + /* Schedule DL user data */ + alloc_dl_users(tti_sched); - if ((tti_rx % 2) == 1) { - alloc_ul_users(tti_sched); - } + if ((tti_rx.to_uint() % 2) == 1) { + alloc_ul_users(tti_sched); + } - /* Select the winner DCI allocation combination, store all the scheduling results */ - tti_sched->generate_sched_results(); + /* Select the winner DCI allocation combination, store all the scheduling results */ + tti_sched->generate_sched_results(); - /* Reset ue harq pending ack state, clean-up blocked pids */ - for (auto& user : *ue_db) { - user.second.finish_tti(cc_result->tti_params, enb_cc_idx); - } + /* Reset ue harq pending ack state, clean-up blocked pids */ + for (auto& user : *ue_db) { + user.second.finish_tti(cc_result->tti_params, enb_cc_idx); } return *cc_result; @@ -386,10 +385,10 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) return SRSLTE_SUCCESS; } -sf_sched* sched::carrier_sched::get_sf_sched(uint32_t tti_rx) +sf_sched* sched::carrier_sched::get_sf_sched(tti_point tti_rx) { - sf_sched* ret = &sf_scheds[tti_rx % sf_scheds.size()]; - if (ret->get_tti_rx() != tti_rx) { + sf_sched* ret = &sf_scheds[tti_rx.to_uint() % sf_scheds.size()]; + if (ret->get_tti_rx() != tti_rx.to_uint()) { sf_sched_result* sf_res = prev_sched_results->get_sf(srslte::tti_point{tti_rx}); if (sf_res == nullptr) { // Reset if tti_rx has not been yet set in the sched results diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 27806c6d8..68ef564c2 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -25,6 +25,8 @@ #include "srslte/common/logmap.h" #include +using srslte::tti_point; + namespace srsenb { const char* alloc_outcome_t::to_string() const @@ -563,7 +565,7 @@ bool sf_grid_t::find_ul_alloc(uint32_t L, prb_interval* alloc) const * TTI resource Scheduling Methods *******************************************************/ -sf_sched::sf_sched() : log_h(srslte::logmap::get("MAC ")) {} +sf_sched::sf_sched() : log_h(srslte::logmap::get("MAC")) {} void sf_sched::init(const sched_cell_params_t& cell_params_) { @@ -578,7 +580,7 @@ void sf_sched::init(const sched_cell_params_t& cell_params_) } } -void sf_sched::new_tti(uint32_t tti_rx_, sf_sched_result* cc_results_) +void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_) { // reset internal state bc_allocs.clear(); @@ -586,7 +588,7 @@ void sf_sched::new_tti(uint32_t tti_rx_, sf_sched_result* cc_results_) data_allocs.clear(); ul_data_allocs.clear(); - tti_params = tti_params_t{tti_rx_}; + tti_params = tti_params_t{tti_rx_.to_uint()}; tti_alloc.new_tti(tti_params); cc_results = cc_results_;