From 4f0b954cde18fb6f16ae1bb3c440c70e6eefa439 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Mon, 5 Jul 2021 18:13:41 +0100 Subject: [PATCH] sched,nr: changed scheduler nr api to not require slot_indication function --- srsenb/hdr/stack/mac/nr/sched_nr.h | 13 +- srsenb/hdr/stack/mac/nr/sched_nr_interface.h | 40 ++----- srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h | 18 +-- srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h | 13 +- srsenb/hdr/stack/mac/nr/sched_nr_worker.h | 28 +++-- srsenb/src/stack/mac/nr/sched_nr.cc | 119 +++++++++++++++---- srsenb/src/stack/mac/nr/sched_nr_pdcch.cc | 8 +- srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc | 25 ++-- srsenb/src/stack/mac/nr/sched_nr_worker.cc | 100 +++++++++------- srsenb/test/mac/nr/sched_nr_sim_ue.cc | 7 +- srsenb/test/mac/nr/sched_nr_sim_ue.h | 10 +- srsenb/test/mac/nr/sched_nr_test.cc | 103 +++++++++------- 12 files changed, 289 insertions(+), 195 deletions(-) diff --git a/srsenb/hdr/stack/mac/nr/sched_nr.h b/srsenb/hdr/stack/mac/nr/sched_nr.h index fd6a6ddad..4a0cbf4ea 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr.h @@ -30,6 +30,7 @@ class sched_worker_manager; } class ue_event_manager; +class sched_result_manager; class sched_nr final : public sched_nr_interface { @@ -39,13 +40,14 @@ public: int cell_cfg(srsran::const_span cell_list) override; void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override; - void slot_indication(tti_point tti_rx) override; - int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& tti_req) override; - void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override; void ul_sr_info(tti_point tti_rx, uint16_t rnti) override; + int get_dl_sched(tti_point pdsch_tti, uint32_t cc, dl_sched_t& result) override; + int get_ul_sched(tti_point pdcch_tti, uint32_t cc, ul_sched_t& result) override; + private: + int generate_slot_result(tti_point pdcch_tti, uint32_t cc); void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg); // args @@ -59,8 +61,11 @@ private: std::mutex ue_db_mutex; ue_map_t ue_db; - // management of PHY UE feedback + // management of UE feedback std::unique_ptr pending_events; + + // management of Sched Result buffering + std::unique_ptr pending_results; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_interface.h b/srsenb/hdr/stack/mac/nr/sched_nr_interface.h index d19855129..2b82bf83e 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_interface.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_interface.h @@ -84,23 +84,8 @@ public: ///// Sched Result ///// - using pdcch_dl_t = mac_interface_phy_nr::pdcch_dl_t; - using pdcch_ul_t = mac_interface_phy_nr::pdcch_ul_t; - - struct pdsch_grant { - srsran_dci_dl_nr_t dci = {}; - }; - using pdsch_list_t = srsran::bounded_vector; - - struct dl_tti_request_t { - tti_point pdsch_tti; - pdsch_list_t pdschs; - }; - - struct pusch_grant { - srsran_dci_ul_nr_t dci = {}; - }; - using pusch_list_t = srsran::bounded_vector; + using dl_sched_t = mac_interface_phy_nr::dl_sched_t; + using ul_sched_t = mac_interface_phy_nr::ul_sched_t; struct pucch_resource_grant { uint16_t rnti; @@ -112,22 +97,11 @@ public: }; using pucch_list_t = srsran::bounded_vector; - struct ul_tti_request_t { - tti_point pusch_tti; - pusch_list_t puschs; - pucch_list_t pucchs; - }; - - struct tti_request_t { - dl_tti_request_t dl_res; - ul_tti_request_t ul_res; - }; - - virtual ~sched_nr_interface() = default; - virtual int cell_cfg(srsran::const_span ue_cfg) = 0; - virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0; - virtual void slot_indication(tti_point tti_rx) = 0; - virtual int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& result) = 0; + virtual ~sched_nr_interface() = default; + virtual int cell_cfg(srsran::const_span ue_cfg) = 0; + virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0; + virtual int get_dl_sched(tti_point tti_rx, uint32_t cc, dl_sched_t& result) = 0; + virtual int get_ul_sched(tti_point tti_rx, uint32_t cc, ul_sched_t& result) = 0; virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0; virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h b/srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h index 261156990..f7fe40bda 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h @@ -29,11 +29,11 @@ enum class pdcch_grant_type_t { sib, dl_data, ul_data }; class slot_ue; -using bwp_cfg_t = sched_nr_interface::bwp_cfg_t; -using pdsch_list_t = sched_nr_interface::pdsch_list_t; -using pdsch_grant = sched_nr_interface::pdsch_grant; -using pusch_list_t = sched_nr_interface::pusch_list_t; -using pusch_grant = sched_nr_interface::pusch_grant; +using bwp_cfg_t = sched_nr_interface::bwp_cfg_t; +using pdcch_dl_t = mac_interface_phy_nr::pdcch_dl_t; +using pdcch_ul_t = mac_interface_phy_nr::pdcch_ul_t; +using pdcch_dl_list_t = srsran::bounded_vector; +using pdcch_ul_list_t = srsran::bounded_vector; class coreset_region { @@ -41,8 +41,8 @@ public: coreset_region(const bwp_cfg_t& bwp_cfg_, uint32_t coreset_id_, uint32_t slot_idx, - pdsch_list_t& pdcch_dl_list, - pusch_list_t& pdcch_ul_list); + pdcch_dl_list_t& pdcch_dl_list, + pdcch_ul_list_t& pdcch_ul_list); void reset(); /** @@ -76,8 +76,8 @@ private: slot_ue* ue; }; srsran::bounded_vector dci_list; - pdsch_list_t& pdcch_dl_list; - pusch_list_t& pdcch_ul_list; + pdcch_dl_list_t& pdcch_dl_list; + pdcch_ul_list_t& pdcch_ul_list; // DFS decision tree of PDCCH grants struct tree_node { diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h b/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h index c73e78272..92a15f54c 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h @@ -25,11 +25,6 @@ namespace sched_nr_impl { using pdsch_bitmap = srsran::bounded_bitset<25, true>; using pusch_bitmap = srsran::bounded_bitset<25, true>; -using pdsch_t = sched_nr_interface::pdsch_grant; -using pdsch_list_t = sched_nr_interface::pdsch_list_t; - -using pusch_list = sched_nr_interface::pusch_list_t; - const static size_t MAX_CORESET_PER_BWP = 3; using slot_coreset_list = srsran::bounded_vector; @@ -39,8 +34,8 @@ struct bwp_slot_grid { bool is_dl, is_ul; pdsch_bitmap dl_rbgs; pusch_bitmap ul_rbgs; - pdsch_list_t pdschs; - pusch_list_t puschs; + pdcch_dl_list_t dl_pdcchs; + pdcch_ul_list_t ul_pdcchs; slot_coreset_list coresets; pucch_list_t pucchs; @@ -52,8 +47,8 @@ struct bwp_slot_grid { struct bwp_res_grid { bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_); - bwp_slot_grid& operator[](tti_point tti) { return slots[tti.sf_idx()]; }; - const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.sf_idx()]; }; + bwp_slot_grid& operator[](tti_point tti) { return slots[tti.to_uint() % slots.capacity()]; }; + const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.to_uint() % slots.capacity()]; }; uint32_t id() const { return bwp_id; } uint32_t nof_prbs() const { return cell_cfg->cell_cfg.nof_prb; } const sched_cell_params& cell_params() const { return *cell_cfg; } diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_worker.h b/srsenb/hdr/stack/mac/nr/sched_nr_worker.h index b7331a24b..0b4d4c45f 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_worker.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_worker.h @@ -22,12 +22,12 @@ #include "srsran/adt/span.h" #include #include -#include namespace srsenb { namespace sched_nr_impl { -using slot_res_t = sched_nr_interface::tti_request_t; +using dl_sched_t = sched_nr_interface::dl_sched_t; +using ul_sched_t = sched_nr_interface::ul_sched_t; class slot_cc_worker { @@ -55,27 +55,31 @@ private: class sched_worker_manager { + struct slot_worker_ctxt { + std::mutex slot_mutex; // lock of all workers of the same slot. + std::condition_variable cvar; + tti_point tti_rx; + int nof_workers_waiting = 0; + std::atomic worker_count{0}; // variable shared across slot_cc_workers + std::vector workers; + }; + public: explicit sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_); sched_worker_manager(const sched_worker_manager&) = delete; sched_worker_manager(sched_worker_manager&&) = delete; ~sched_worker_manager(); - void reserve_workers(tti_point tti_rx); - void start_tti(tti_point tti_rx); - bool run_tti(tti_point tti_rx, uint32_t cc, sched_nr_interface::tti_request_t& req); - void end_tti(tti_point tti_rx); + void start_slot(tti_point tti_rx, srsran::move_callback process_feedback); + bool run_slot(tti_point tti_rx, uint32_t cc); + void release_slot(tti_point tti_rx); + bool get_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res); private: const sched_params& cfg; ue_map_t& ue_db; + std::mutex ue_db_mutex; - struct slot_worker_ctxt { - sem_t sf_sem; // lock of all workers of the same slot. unlocked by last slot_cc_worker - tti_point tti_rx; - std::atomic worker_count{0}; // variable shared across slot_cc_workers - std::vector workers; - }; std::vector > slot_ctxts; srsran::bounded_vector cell_grid_list; diff --git a/srsenb/src/stack/mac/nr/sched_nr.cc b/srsenb/src/stack/mac/nr/sched_nr.cc index ff386dd4b..bd9022ff2 100644 --- a/srsenb/src/stack/mac/nr/sched_nr.cc +++ b/srsenb/src/stack/mac/nr/sched_nr.cc @@ -16,10 +16,7 @@ namespace srsenb { -using sched_nr_impl::sched_worker_manager; -using sched_nr_impl::ue; -using sched_nr_impl::ue_carrier; -using sched_nr_impl::ue_map_t; +using namespace sched_nr_impl; static int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t& uecfg); @@ -44,7 +41,7 @@ public: feedback_list.back().cc = cc; feedback_list.back().callback = std::move(event); } - void new_tti() + void new_slot() { { std::lock_guard lock(common_mutex); @@ -81,6 +78,68 @@ private: /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class sched_result_manager +{ +public: + explicit sched_result_manager(uint32_t nof_cc_) + { + for (auto& v : results) { + v.resize(nof_cc_); + } + } + + dl_sched_t& add_dl_result(tti_point tti, uint32_t cc) + { + if (not has_dl_result(tti, cc)) { + results[tti.to_uint()][cc].tti_dl = tti; + results[tti.to_uint()][cc].dl_res = {}; + } + return results[tti.to_uint()][cc].dl_res; + } + ul_sched_t& add_ul_result(tti_point tti, uint32_t cc) + { + if (not has_ul_result(tti, cc)) { + results[tti.to_uint()][cc].tti_ul = tti; + results[tti.to_uint()][cc].ul_res = {}; + } + return results[tti.to_uint()][cc].ul_res; + } + + bool has_dl_result(tti_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].tti_dl == tti; } + + bool has_ul_result(tti_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].tti_ul == tti; } + + dl_sched_t pop_dl_result(tti_point tti, uint32_t cc) + { + if (has_dl_result(tti, cc)) { + results[tti.to_uint()][cc].tti_dl.reset(); + return results[tti.to_uint()][cc].dl_res; + } + return {}; + } + + ul_sched_t pop_ul_result(tti_point tti, uint32_t cc) + { + if (has_ul_result(tti, cc)) { + results[tti.to_uint()][cc].tti_ul.reset(); + return results[tti.to_uint()][cc].ul_res; + } + return {}; + } + +private: + struct slot_result_t { + tti_point tti_dl; + tti_point tti_ul; + dl_sched_t dl_res; + ul_sched_t ul_res; + }; + + srsran::circular_array, TTIMOD_SZ> results; +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + sched_nr::sched_nr(const sched_cfg_t& sched_cfg) : cfg(sched_cfg), pending_events(new ue_event_manager(ue_db)), logger(srslog::fetch_basic_logger("MAC")) {} @@ -94,6 +153,7 @@ int sched_nr::cell_cfg(srsran::const_span cell_list) cfg.cells.emplace_back(cc, cell_list[cc], cfg.sched_cfg); } + pending_results.reset(new sched_result_manager(cell_list.size())); sched_workers.reset(new sched_nr_impl::sched_worker_manager(ue_db, cfg)); return SRSRAN_SUCCESS; } @@ -113,33 +173,50 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg) } } -void sched_nr::slot_indication(tti_point tti_rx) +/// Generate {tti,cc} scheduling decision +int sched_nr::generate_slot_result(tti_point pdcch_tti, uint32_t cc) { - // Lock slot workers for provided tti_rx - sched_workers->reserve_workers(tti_rx); + tti_point tti_rx = pdcch_tti - TX_ENB_DELAY; - { + // Lock carrier workers for provided tti_rx + sched_workers->start_slot(tti_rx, [this]() { + // In case it is first worker for the given slot // synchronize {tti,cc} state. e.g. reserve UE resources for {tti,cc} decision, process feedback - std::lock_guard lock(ue_db_mutex); - // Process pending events - pending_events->new_tti(); + pending_events->new_slot(); + }); - sched_workers->start_tti(tti_rx); + // unlocked, parallel region + bool all_workers_finished = sched_workers->run_slot(tti_rx, cc); + + if (all_workers_finished) { + // once all workers of the same subframe finished, synchronize sched outcome with ue_db + sched_workers->release_slot(tti_rx); } + + // Copy results to intermediate buffer + dl_sched_t& dl_res = pending_results->add_dl_result(pdcch_tti, cc); + ul_sched_t& ul_res = pending_results->add_ul_result(pdcch_tti, cc); + sched_workers->get_sched_result(pdcch_tti, cc, dl_res, ul_res); + + return SRSRAN_SUCCESS; } -/// Generate {tti,cc} scheduling decision -int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& req) +int sched_nr::get_dl_sched(tti_point tti_tx, uint32_t cc, dl_sched_t& result) { - // unlocked, parallel region - bool all_workers_finished = sched_workers->run_tti(tti_rx, cc, req); + if (not pending_results->has_dl_result(tti_tx, cc)) { + generate_slot_result(tti_tx, cc); + } - if (all_workers_finished) { - // once all workers of the same subframe finished, synchronize sched outcome with ue_db - std::lock_guard lock(ue_db_mutex); - sched_workers->end_tti(tti_rx); + result = pending_results->pop_dl_result(tti_tx, cc); + return SRSRAN_SUCCESS; +} +int sched_nr::get_ul_sched(tti_point tti_rx, uint32_t cc, ul_sched_t& result) +{ + if (not pending_results->has_ul_result(tti_rx, cc)) { + return SRSRAN_ERROR; } + result = pending_results->pop_ul_result(tti_rx, cc); return SRSRAN_SUCCESS; } diff --git a/srsenb/src/stack/mac/nr/sched_nr_pdcch.cc b/srsenb/src/stack/mac/nr/sched_nr_pdcch.cc index 298fcbfcd..526858bbd 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_pdcch.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_pdcch.cc @@ -19,8 +19,8 @@ namespace sched_nr_impl { coreset_region::coreset_region(const bwp_cfg_t& bwp_cfg_, uint32_t coreset_id_, uint32_t slot_idx_, - pdsch_list_t& dl_list_, - pusch_list_t& ul_list_) : + pdcch_dl_list_t& dl_list_, + pdcch_ul_list_t& ul_list_) : bwp_cfg(&bwp_cfg_), coreset_cfg(&bwp_cfg_.coresets[coreset_id_ - 1].value()), coreset_id(coreset_id_), @@ -156,10 +156,10 @@ bool coreset_region::alloc_dfs_node(const alloc_record& record, uint32_t start_d alloc_dfs.push_back(node); // set new DCI position if (record.alloc_type == pdcch_grant_type_t::ul_data) { - pusch_grant& pdcch_ul = pdcch_ul_list[record.idx]; + pdcch_ul_t& pdcch_ul = pdcch_ul_list[record.idx]; pdcch_ul.dci.ctx.location = node.dci_pos; } else { - pdsch_grant& pdcch_dl = pdcch_dl_list[record.idx]; + pdcch_dl_t& pdcch_dl = pdcch_dl_list[record.idx]; pdcch_dl.dci.ctx.location = node.dci_pos; } return true; diff --git a/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc b/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc index f0a433c26..cd70a26ed 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc @@ -27,7 +27,7 @@ bwp_slot_grid::bwp_slot_grid(const sched_cell_params& cell_params, uint32_t bwp_ is_ul(srsran_tdd_nr_is_ul(&cell_params.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_)) { const uint32_t coreset_id = 1; // Note: for now only one coreset per BWP supported - coresets.emplace_back(cell_params.cell_cfg.bwps[0], coreset_id, slot_idx_, pdschs, puschs); + coresets.emplace_back(cell_params.cell_cfg.bwps[0], coreset_id, slot_idx_, dl_pdcchs, ul_pdcchs); } void bwp_slot_grid::reset() @@ -37,15 +37,15 @@ void bwp_slot_grid::reset() } dl_rbgs.reset(); ul_rbgs.reset(); - pdschs.clear(); - puschs.clear(); + dl_pdcchs.clear(); + ul_pdcchs.clear(); pucchs.clear(); } bwp_res_grid::bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_) : bwp_id(bwp_id_), cell_cfg(&cell_cfg_) { - for (uint32_t sl = 0; sl < SCHED_NR_NOF_SUBFRAMES; ++sl) { - slots.emplace_back(cell_cfg_, bwp_id, sl); + for (uint32_t sl = 0; sl < slots.capacity(); ++sl) { + slots.emplace_back(cell_cfg_, bwp_id, sl % static_cast(SRSRAN_NSLOTS_PER_FRAME_NR(0u))); } } @@ -75,7 +75,7 @@ alloc_result slot_bwp_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask) logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch_slot.slot_idx); return alloc_result::no_sch_space; } - pdsch_list_t& pdsch_grants = bwp_pdsch_slot.pdschs; + pdcch_dl_list_t& pdsch_grants = bwp_pdsch_slot.dl_pdcchs; if (pdsch_grants.full()) { logger.warning("SCHED: Maximum number of DL allocations reached"); return alloc_result::no_grant_space; @@ -109,7 +109,7 @@ alloc_result slot_bwp_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask) } // Allocation Successful - pdsch_grant& pdcch = bwp_pdcch_slot.pdschs.back(); + pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back(); fill_dci_ue_cfg(ue, dl_mask, bwp_grid.cell_params(), pdcch.dci); pdsch_mask |= dl_mask; bwp_uci_slot.pucchs.emplace_back(); @@ -127,13 +127,14 @@ alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask) logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti); return alloc_result::no_rnti_opportunity; } + auto& bwp_pdcch_slot = bwp_grid[ue.pdcch_tti]; auto& bwp_pusch_slot = bwp_grid[ue.pusch_tti]; if (not bwp_pusch_slot.is_ul) { logger.warning("SCHED: Trying to allocate PUSCH in TDD non-UL slot index=%d", bwp_pusch_slot.slot_idx); return alloc_result::no_sch_space; } - pusch_list& pusch_grants = bwp_pusch_slot.puschs; - if (pusch_grants.full()) { + pdcch_ul_list_t& pdcchs = bwp_pdcch_slot.ul_pdcchs; + if (pdcchs.full()) { logger.warning("SCHED: Maximum number of UL allocations reached"); return alloc_result::no_grant_space; } @@ -142,7 +143,7 @@ alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask) return alloc_result::sch_collision; } const uint32_t aggr_idx = 2, coreset_id = 0; - if (not bwp_grid[ue.pdcch_tti].coresets[coreset_id].alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, &ue)) { + if (not bwp_pdcch_slot.coresets[coreset_id].alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, &ue)) { // Could not find space in PDCCH return alloc_result::no_cch_space; } @@ -159,8 +160,8 @@ alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask) } // Allocation Successful - pdsch_grant& pdsch = bwp_grid[ue.pdcch_tti].pdschs.back(); - fill_dci_ue_cfg(ue, ul_mask, bwp_grid.cell_params(), pdsch.dci); + pdcch_ul_t& pdcch = pdcchs.back(); + fill_dci_ue_cfg(ue, ul_mask, bwp_grid.cell_params(), pdcch.dci); pusch_mask |= ul_mask; return alloc_result::success; diff --git a/srsenb/src/stack/mac/nr/sched_nr_worker.cc b/srsenb/src/stack/mac/nr/sched_nr_worker.cc index ab3b63d5c..f07fa9368 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_worker.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_worker.cc @@ -103,7 +103,6 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& slot_ctxts.resize(cfg.sched_cfg.nof_concurrent_subframes); for (size_t i = 0; i < cfg.sched_cfg.nof_concurrent_subframes; ++i) { slot_ctxts[i].reset(new slot_worker_ctxt()); - sem_init(&slot_ctxts[i]->sf_sem, 0, 1); slot_ctxts[i]->workers.reserve(cfg.cells.size()); for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) { slot_ctxts[i]->workers.emplace_back(cfg.cells[cc], cell_grid_list[cc]); @@ -111,44 +110,47 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& } } -sched_worker_manager::~sched_worker_manager() -{ - // acquire all slot worker contexts - for (auto& slot_ctxt : slot_ctxts) { - sem_wait(&slot_ctxt->sf_sem); - } - // destroy all slot worker contexts - for (auto& slot_ctxt : slot_ctxts) { - sem_destroy(&slot_ctxt->sf_sem); - } -} +sched_worker_manager::~sched_worker_manager() = default; sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx) { return *slot_ctxts[tti_rx.to_uint() % slot_ctxts.size()]; } -void sched_worker_manager::reserve_workers(tti_point tti_rx_) +void sched_worker_manager::start_slot(tti_point tti_rx, srsran::move_callback process_feedback) { - // lock if slot worker is already being used - auto& sf_worker_ctxt = get_sf(tti_rx_); - sem_wait(&sf_worker_ctxt.sf_sem); + auto& sf_worker_ctxt = get_sf(tti_rx); + + std::unique_lock lock(sf_worker_ctxt.slot_mutex); + while ((sf_worker_ctxt.tti_rx.is_valid() and sf_worker_ctxt.tti_rx != tti_rx)) { + // wait for previous slot to finish + sf_worker_ctxt.nof_workers_waiting++; + sf_worker_ctxt.cvar.wait(lock); + sf_worker_ctxt.nof_workers_waiting--; + } + if (sf_worker_ctxt.tti_rx == tti_rx) { + // another worker with the same slot idx already started + return; + } - sf_worker_ctxt.tti_rx = tti_rx_; - sf_worker_ctxt.worker_count.store(static_cast(sf_worker_ctxt.workers.size()), std::memory_order_relaxed); -} + { + std::lock_guard db_lock(ue_db_mutex); -void sched_worker_manager::start_tti(tti_point tti_rx_) -{ - auto& sf_worker_ctxt = get_sf(tti_rx_); - srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); + process_feedback(); - for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) { - sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx, ue_db); + for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) { + sf_worker_ctxt.workers[cc].start(tti_rx, ue_db); + } + } + + sf_worker_ctxt.tti_rx = tti_rx; + sf_worker_ctxt.worker_count.store(static_cast(sf_worker_ctxt.workers.size()), std::memory_order_relaxed); + if (sf_worker_ctxt.nof_workers_waiting > 0) { + sf_worker_ctxt.cvar.notify_all(); } } -bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& tti_req) +bool sched_worker_manager::run_slot(tti_point tti_rx_, uint32_t cc) { auto& sf_worker_ctxt = get_sf(tti_rx_); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); @@ -156,15 +158,6 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& t // Get {tti, cc} scheduling decision sf_worker_ctxt.workers[cc].run(); - // Copy requested TTI DL and UL sched result - tti_req.dl_res.pdsch_tti = tti_rx_ + TX_ENB_DELAY; - tti_req.dl_res.pdschs = cell_grid_list[cc].bwps[0][tti_req.dl_res.pdsch_tti].pdschs; - cell_grid_list[cc].bwps[0][tti_req.dl_res.pdsch_tti].reset(); - tti_req.ul_res.pusch_tti = tti_rx_ + TX_ENB_DELAY; - tti_req.ul_res.puschs = cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].puschs; - tti_req.ul_res.pucchs = cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].pucchs; - cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].reset(); - // decrement the number of active workers int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1; srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)"); @@ -172,18 +165,45 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& t return rem_workers == 0; } -void sched_worker_manager::end_tti(tti_point tti_rx_) +void sched_worker_manager::release_slot(tti_point tti_rx_) { auto& sf_worker_ctxt = get_sf(tti_rx_); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); srsran_assert(sf_worker_ctxt.worker_count == 0, "invalid number of calls to run_tti(tti, cc)"); - // All the workers of the same TTI have finished. Synchronize scheduling decisions with UEs state - for (slot_cc_worker& worker : sf_worker_ctxt.workers) { - worker.end_tti(); + { + std::lock_guard lock(ue_db_mutex); + + // All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state + for (slot_cc_worker& worker : sf_worker_ctxt.workers) { + worker.end_tti(); + } + } + + std::unique_lock lock(sf_worker_ctxt.slot_mutex); + sf_worker_ctxt.tti_rx = {}; + if (sf_worker_ctxt.nof_workers_waiting > 0) { + sf_worker_ctxt.cvar.notify_one(); } +} + +bool sched_worker_manager::get_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res) +{ + auto& pdcch_bwp_slot = cell_grid_list[cc].bwps[0][pdcch_tti]; + + dl_res.pdcch_dl = pdcch_bwp_slot.dl_pdcchs; + dl_res.pdcch_ul = pdcch_bwp_slot.ul_pdcchs; + + ul_res.pucch.resize(pdcch_bwp_slot.pucchs.size()); + for (uint32_t i = 0; i < pdcch_bwp_slot.pucchs.size(); ++i) { + ul_res.pucch[i].uci_cfg.pucch.rnti = pdcch_bwp_slot.pucchs[i].resource.rnti; + ul_res.pucch[i].uci_cfg.pucch.resource_id = pdcch_bwp_slot.pucchs[i].resource.resource_id; + } + + // clear up BWP slot + pdcch_bwp_slot.reset(); - sem_post(&sf_worker_ctxt.sf_sem); + return true; } } // namespace sched_nr_impl diff --git a/srsenb/test/mac/nr/sched_nr_sim_ue.cc b/srsenb/test/mac/nr/sched_nr_sim_ue.cc index fa28063dd..8db2034e2 100644 --- a/srsenb/test/mac/nr/sched_nr_sim_ue.cc +++ b/srsenb/test/mac/nr/sched_nr_sim_ue.cc @@ -44,8 +44,8 @@ int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out) void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out) { uint32_t cc = cc_out.cc; - for (uint32_t i = 0; i < cc_out.dl_cc_result->pdschs.size(); ++i) { - const auto& data = cc_out.dl_cc_result->pdschs[i]; + for (uint32_t i = 0; i < cc_out.dl_cc_result->pdcch_dl.size(); ++i) { + const auto& data = cc_out.dl_cc_result->pdcch_dl[i]; if (data.dci.ctx.rnti != ctxt.rnti) { continue; } @@ -98,7 +98,7 @@ int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_ return SRSRAN_SUCCESS; } -void sched_nr_sim_base::slot_indication(srsran::tti_point tti_rx) +void sched_nr_sim_base::new_slot(srsran::tti_point tti_rx) { { std::unique_lock lock(mutex); @@ -113,7 +113,6 @@ void sched_nr_sim_base::slot_indication(srsran::tti_point tti_rx) apply_tti_events(ue.second.get_ctxt(), events); } } - sched_ptr->slot_indication(tti_rx); } void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out) diff --git a/srsenb/test/mac/nr/sched_nr_sim_ue.h b/srsenb/test/mac/nr/sched_nr_sim_ue.h index b6c8e5755..ccac6f2d0 100644 --- a/srsenb/test/mac/nr/sched_nr_sim_ue.h +++ b/srsenb/test/mac/nr/sched_nr_sim_ue.h @@ -20,10 +20,10 @@ namespace srsenb { struct sched_nr_cc_output_res_t { - tti_point tti_rx; - uint32_t cc; - sched_nr_interface::dl_tti_request_t* dl_cc_result; - sched_nr_interface::ul_tti_request_t* ul_cc_result; + tti_point tti_rx; + uint32_t cc; + sched_nr_interface::dl_sched_t* dl_cc_result; + sched_nr_interface::ul_sched_t* ul_cc_result; }; struct ue_nr_cc_ctxt_t { @@ -75,7 +75,7 @@ public: int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx); - void slot_indication(srsran::tti_point tti_rx); + void new_slot(srsran::tti_point tti_rx); void update(sched_nr_cc_output_res_t& cc_out); sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); } diff --git a/srsenb/test/mac/nr/sched_nr_test.cc b/srsenb/test/mac/nr/sched_nr_test.cc index 2130df547..60f1924c1 100644 --- a/srsenb/test/mac/nr/sched_nr_test.cc +++ b/srsenb/test/mac/nr/sched_nr_test.cc @@ -17,6 +17,8 @@ namespace srsenb { +using dl_sched_t = sched_nr_interface::dl_sched_t; + srsran_coreset_t get_default_coreset() { srsran_coreset_t coreset{}; @@ -87,37 +89,46 @@ sched_nr_interface::ue_cfg_t get_default_ue_cfg(uint32_t nof_cc) } struct task_job_manager { - std::mutex mutex; - std::condition_variable cond_var; - int tasks = 0; - int res_count = 0; - int pdsch_count = 0; - int max_tasks = std::numeric_limits::max() / 2; - srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST"); - - void start_task() + std::mutex mutex; + int res_count = 0; + int pdsch_count = 0; + srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST"); + struct slot_guard { + int count = 0; + std::condition_variable cvar; + }; + srsran::bounded_vector slot_counter{}; + + explicit task_job_manager(int max_concurrent_slots = 4) : slot_counter(max_concurrent_slots) {} + + void start_slot(tti_point tti, int nof_sectors) { std::unique_lock lock(mutex); - while (tasks >= max_tasks) { - cond_var.wait(lock); + auto& sl = slot_counter[tti.to_uint() % slot_counter.size()]; + while (sl.count > 0) { + sl.cvar.wait(lock); } - tasks++; + sl.count = nof_sectors; } - void finish_task(const sched_nr_interface::tti_request_t& res) + void finish_cc(tti_point tti, const dl_sched_t& dl_res, const sched_nr_interface::ul_sched_t& ul_res) { std::unique_lock lock(mutex); - TESTASSERT(res.dl_res.pdschs.size() <= 1); + TESTASSERT(dl_res.pdcch_dl.size() <= 1); res_count++; - pdsch_count += res.dl_res.pdschs.size(); - if (tasks-- >= max_tasks or tasks == 0) { - cond_var.notify_one(); + pdsch_count += dl_res.pdcch_dl.size(); + auto& sl = slot_counter[tti.to_uint() % slot_counter.size()]; + if (--sl.count == 0) { + sl.cvar.notify_one(); } } void wait_task_finish() { std::unique_lock lock(mutex); - while (tasks > 0) { - cond_var.wait(lock); + for (auto& sl : slot_counter) { + while (sl.count > 0) { + sl.cvar.wait(lock); + } + sl.count = 1; } } void print_results() const @@ -142,17 +153,19 @@ void sched_nr_cfg_serialized_test() sched_tester.add_user(0x46, uecfg, 0); for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { - tti_point tti(nof_ttis % 10240); - sched_tester.slot_indication(tti); + tti_point tti_rx(nof_ttis % 10240); + tti_point tti_tx = tti_rx + TX_ENB_DELAY; + tasks.start_slot(tti_rx, nof_sectors); + sched_tester.new_slot(tti_tx); for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { - tasks.start_task(); - sched_nr_interface::tti_request_t res; - TESTASSERT(sched_tester.get_sched()->generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); - sched_nr_cc_output_res_t out{tti, cc, &res.dl_res, &res.ul_res}; + sched_nr_interface::dl_sched_t dl_res; + sched_nr_interface::ul_sched_t ul_res; + TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS); + TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS); + sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res}; sched_tester.update(out); - tasks.finish_task(res); - TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (tti + TX_ENB_DELAY).sf_idx()) or - res.dl_res.pdschs.size() == 1); + tasks.finish_cc(tti_rx, dl_res, ul_res); + TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (tti_tx).sf_idx()) or dl_res.pdcch_dl.size() == 1); } } @@ -162,11 +175,12 @@ void sched_nr_cfg_serialized_test() void sched_nr_cfg_parallel_cc_test() { + uint32_t nof_sectors = 4; uint32_t max_nof_ttis = 1000; task_job_manager tasks; sched_nr_interface::sched_cfg_t cfg; - std::vector cells_cfg = get_default_cells_cfg(4); + std::vector cells_cfg = get_default_cells_cfg(nof_sectors); sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test"); @@ -174,16 +188,19 @@ void sched_nr_cfg_parallel_cc_test() sched_tester.add_user(0x46, uecfg, 0); for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { - tti_point tti(nof_ttis % 10240); - sched_tester.slot_indication(tti); + tti_point tti_rx(nof_ttis % 10240); + tti_point tti_tx = tti_rx + TX_ENB_DELAY; + tasks.start_slot(tti_tx, nof_sectors); + sched_tester.new_slot(tti_tx); for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { - tasks.start_task(); - srsran::get_background_workers().push_task([cc, tti, &tasks, &sched_tester]() { - sched_nr_interface::tti_request_t res; - TESTASSERT(sched_tester.get_sched()->generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); - sched_nr_cc_output_res_t out{tti, cc, &res.dl_res, &res.ul_res}; + srsran::get_background_workers().push_task([cc, tti_tx, &tasks, &sched_tester]() { + sched_nr_interface::dl_sched_t dl_res; + sched_nr_interface::ul_sched_t ul_res; + TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS); + TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS); + sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res}; sched_tester.update(out); - tasks.finish_task(res); + tasks.finish_cc(tti_tx, dl_res, ul_res); }); } } @@ -191,6 +208,7 @@ void sched_nr_cfg_parallel_cc_test() tasks.wait_task_finish(); tasks.print_results(); + TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors * 0.6)); } void sched_nr_cfg_parallel_sf_test() @@ -211,13 +229,14 @@ void sched_nr_cfg_parallel_sf_test() for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { tti_point tti(nof_ttis % 10240); - sched.slot_indication(tti); + tasks.start_slot(tti, nof_sectors); for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { - tasks.start_task(); srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() { - sched_nr_interface::tti_request_t res; - TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); - tasks.finish_task(res); + sched_nr_interface::dl_sched_t dl_res; + sched_nr_interface::ul_sched_t ul_res; + TESTASSERT(sched.get_dl_sched(tti, cc, dl_res) == SRSRAN_SUCCESS); + TESTASSERT(sched.get_ul_sched(tti, cc, ul_res) == SRSRAN_SUCCESS); + tasks.finish_cc(tti, dl_res, ul_res); }); } }