From 43e67b8536ae3906ab0994bbcbbb468731e49a40 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Thu, 5 Mar 2020 15:49:13 +0000 Subject: [PATCH] created a harq entity that handles all harq procs. This entity accepts as arg the number of harq procs. --- srsenb/hdr/stack/mac/scheduler_harq.h | 46 ++++-- srsenb/hdr/stack/mac/scheduler_ue.h | 27 +--- srsenb/src/stack/mac/scheduler_grid.cc | 25 ++-- srsenb/src/stack/mac/scheduler_harq.cc | 112 ++++++++++++--- srsenb/src/stack/mac/scheduler_ue.cc | 170 +++++------------------ srsenb/test/mac/scheduler_test_common.cc | 12 +- srsenb/test/mac/scheduler_test_rand.cc | 28 ++-- 7 files changed, 201 insertions(+), 219 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler_harq.h b/srsenb/hdr/stack/mac/scheduler_harq.h index fc93f6bda..27e03c4bc 100644 --- a/srsenb/hdr/stack/mac/scheduler_harq.h +++ b/srsenb/hdr/stack/mac/scheduler_harq.h @@ -149,38 +149,56 @@ private: typedef srslte::bounded_bitset<100, true> prbmask_t; -class dl_harq_entity : private std::vector +class harq_entity { - using base_t = std::vector; - public: static const bool is_async = ASYNC_DL_SCHED; - using base_t::const_iterator; - using base_t::iterator; - using base_t::operator[]; - using base_t::begin; - using base_t::data; - using base_t::end; - using base_t::size; + harq_entity(size_t nof_dl_harqs, size_t nof_ul_harqs); + void reset(); + void set_cfg(uint32_t max_retx); - explicit dl_harq_entity(size_t nof_harqs) : base_t(nof_harqs) {} + size_t nof_dl_harqs() const { return dl_harqs.size(); } + size_t nof_ul_harqs() const { return ul_harqs.size(); } + std::vector& dl_harq_procs() { return dl_harqs; } + const std::vector& dl_harq_procs() const { return dl_harqs; } + std::vector& ul_harq_procs() { return ul_harqs; } /** * Get the DL harq proc based on tti_tx_dl * @param tti_tx_dl assumed to always be equal or ahead in time in comparison to current harqs * @return pointer to found dl_harq */ - dl_harq_proc* get_pending_harq(uint32_t tti_tx_dl); + dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl); /** * Get empty DL Harq * @param tti_tx_dl only used in case of sync dl sched * @return pointer to found dl_harq */ - dl_harq_proc* get_empty_harq(uint32_t tti_tx_dl); + dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl); + + /** + * Set ACK state for DL Harq Proc + * @param tti_rx tti the DL ACK was received + * @param tb_idx TB index for the given ACK + * @param ack true for ACK and false for NACK + * @return pair with pid and size of TB of the DL harq that was ACKed + */ + std::pair set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack); + + //! Get UL Harq for a given tti_tx_ul + ul_harq_proc* get_ul_harq(uint32_t tti_tx_ul); + + //! Resets pending harq ACKs and cleans UL Harqs with maxretx == 0 + void reset_pending_data(uint32_t tti_rx); private: - dl_harq_proc* get_oldest_harq(uint32_t tti_tx_dl); + dl_harq_proc* get_oldest_dl_harq(uint32_t tti_tx_dl); + + srslte::log_ref log_h; + + std::vector dl_harqs; + std::vector ul_harqs; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/scheduler_ue.h b/srsenb/hdr/stack/mac/scheduler_ue.h index 567852f8f..b7b06be48 100644 --- a/srsenb/hdr/stack/mac/scheduler_ue.h +++ b/srsenb/hdr/stack/mac/scheduler_ue.h @@ -50,14 +50,6 @@ struct sched_ue_carrier { void reset(); void set_cfg(const sched_interface::ue_cfg_t& cfg); ///< reconfigure ue carrier - // Harq access - void reset_old_pending_pids(uint32_t tti_rx); - dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl); - dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl); - int set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack); - ul_harq_proc* get_ul_harq(uint32_t tti); - uint32_t get_pending_ul_old_data(); - uint32_t get_aggr_level(uint32_t nof_bits); int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs); int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs); @@ -67,8 +59,7 @@ struct sched_ue_carrier { bool is_active() const { return active; } void update_cell_activity(); - std::array dl_harq = {}; - std::array ul_harq = {}; + harq_entity harq_ent; uint32_t dl_ri = 0; uint32_t dl_ri_tti = 0; @@ -121,7 +112,6 @@ public: void ul_phr(int phr); void mac_buffer_state(uint32_t ce_code); void ul_recv_len(uint32_t lcid, uint32_t len); - void set_dl_ant_info(const sched_interface::ant_info_ded_t& dedicated); void set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code); void set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri); @@ -137,8 +127,7 @@ public: void tpc_inc(); void tpc_dec(); - dl_harq_proc* find_dl_harq(uint32_t tti_rx, uint32_t cc_idx); - dl_harq_proc* get_dl_harq(uint32_t idx, uint32_t cc_idx); + const dl_harq_proc& get_dl_harq(uint32_t idx, uint32_t cc_idx) const; uint16_t get_rnti() const { return rnti; } std::pair get_cell_index(uint32_t enb_cc_idx) const; const sched_interface::ue_cfg_t& get_ue_cfg() const { return cfg; } @@ -155,7 +144,6 @@ public: uint32_t get_pending_ul_old_data(uint32_t cc_idx); uint32_t get_pending_dl_new_data_total(); - void reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx); dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx); dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx); ul_harq_proc* get_ul_harq(uint32_t tti, uint32_t cc_idx); @@ -175,19 +163,19 @@ public: void set_needs_ta_cmd(uint32_t nof_ta_cmd); - int generate_format1(dl_harq_proc* h, + int generate_format1(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask); - int generate_format2a(dl_harq_proc* h, + int generate_format2a(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask); - int generate_format2(dl_harq_proc* h, + int generate_format2(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, @@ -245,7 +233,7 @@ private: bool needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_send = false); - int generate_format2a_unlocked(dl_harq_proc* h, + int generate_format2a_unlocked(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, @@ -280,8 +268,7 @@ private: bool phy_config_dedicated_enabled = false; - sched_interface::ant_info_ded_t dl_ant_info; - std::vector carriers; ///< map of UE CellIndex to carrier configuration + std::vector carriers; ///< map of UE CellIndex to carrier configuration // Control Element Command queue struct ce_cmd { diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 91e8494d1..cb7414313 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -661,9 +661,9 @@ bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_s if (h->has_pending_ack()) { phich_list.phich = h->get_pending_ack() ? phich_t::ACK : phich_t::NACK; phich_list.rnti = user->get_rnti(); - log_h->info("SCHED: Allocated PHICH for rnti=0x%x, value=%s\n", - user->get_rnti(), - phich_list.phich == phich_t::ACK ? "ACK" : "NACK"); + log_h->debug("SCHED: Allocated PHICH for rnti=0x%x, value=%s\n", + user->get_rnti(), + phich_list.phich == phich_t::ACK ? "ACK" : "NACK"); ul_sf_result->nof_phich_elems++; return true; @@ -805,21 +805,24 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_ // Generate DCI Format1/2/2A sched_ue* user = data_alloc.user_ptr; uint32_t cell_index = user->get_cell_index(cc_cfg->enb_cc_idx).second; - dl_harq_proc* h = user->get_dl_harq(data_alloc.pid, cell_index); uint32_t data_before = user->get_pending_dl_new_data(); srslte_dci_format_t dci_format = user->get_dci_format(); - bool is_newtx = h->is_empty(); + const dl_harq_proc& dl_harq = user->get_dl_harq(data_alloc.pid, cell_index); + bool is_newtx = dl_harq.is_empty(); int tbs = 0; switch (dci_format) { case SRSLTE_DCI_FORMAT1: - tbs = user->generate_format1(h, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); + tbs = user->generate_format1( + data_alloc.pid, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); break; case SRSLTE_DCI_FORMAT2: - tbs = user->generate_format2(h, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); + tbs = user->generate_format2( + data_alloc.pid, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); break; case SRSLTE_DCI_FORMAT2A: - tbs = user->generate_format2a(h, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); + tbs = user->generate_format2a( + data_alloc.pid, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); break; default: Error("DCI format (%d) not implemented\n", dci_format); @@ -829,7 +832,7 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_ log_h->warning("SCHED: DL %s failed rnti=0x%x, pid=%d, mask=%s, tbs=%d, buffer=%d\n", is_newtx ? "tx" : "retx", user->get_rnti(), - h->get_id(), + data_alloc.pid, data_alloc.user_mask.to_hex().c_str(), tbs, user->get_pending_dl_new_data()); @@ -841,11 +844,11 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_ !is_newtx ? "retx" : "tx", user->get_rnti(), cc_cfg->enb_cc_idx, - h->get_id(), + data_alloc.pid, data_alloc.user_mask.to_hex().c_str(), data->dci.location.L, data->dci.location.ncce, - h->nof_retx(0) + h->nof_retx(1), + dl_harq.nof_retx(0) + dl_harq.nof_retx(1), tbs, data_before, user->get_pending_dl_new_data()); diff --git a/srsenb/src/stack/mac/scheduler_harq.cc b/srsenb/src/stack/mac/scheduler_harq.cc index 841ae2d1d..4b10387d2 100644 --- a/srsenb/src/stack/mac/scheduler_harq.cc +++ b/srsenb/src/stack/mac/scheduler_harq.cc @@ -150,9 +150,9 @@ void harq_proc::new_retx_common(uint32_t tb_idx, uint32_t tti_, int* mcs, int* t void harq_proc::reset_pending_data_common() { // reuse harqs with no retxs - if (max_retx == 0 and !is_empty()) { - for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) { - active[tb] = false; + if (max_retx == 0 and not is_empty()) { + for (bool& tb : active) { + tb = false; } } } @@ -220,10 +220,9 @@ rbgmask_t dl_harq_proc::get_rbgmask() const return rbgmask; } -bool dl_harq_proc::has_pending_retx(uint32_t tb_idx, uint32_t current_tti) const +bool dl_harq_proc::has_pending_retx(uint32_t tb_idx, uint32_t tti_tx_dl) const { - uint32_t tti_diff = srslte_tti_interval(current_tti, tti); - // NOTE: tti may be ahead of current_tti due to thread flip + uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, tti); return (tti_diff < (10240 / 2)) and (tti_diff >= SRSLTE_FDD_NOF_HARQ) and has_pending_retx_common(tb_idx); } @@ -307,24 +306,105 @@ uint32_t ul_harq_proc::get_pending_data() const * Harq Entity *******************/ -dl_harq_proc* dl_harq_entity::get_empty_harq(uint32_t tti_tx_dl) +harq_entity::harq_entity(size_t nof_dl_harqs, size_t nof_ul_harqs) : + dl_harqs(nof_dl_harqs), + ul_harqs(nof_ul_harqs), + log_h(srslte::logmap::get("MAC ")) +{ + for (uint32_t i = 0; i < dl_harqs.size(); ++i) { + dl_harqs[i].init(i); + } + for (uint32_t i = 0; i < ul_harqs.size(); ++i) { + ul_harqs[i].init(i); + } +} + +void harq_entity::reset() +{ + for (auto& h : dl_harqs) { + for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { + h.reset(tb); + } + } + for (auto& h : ul_harqs) { + for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { + h.reset(tb); + } + } +} + +void harq_entity::set_cfg(uint32_t max_retx) +{ + for (auto& h : dl_harqs) { + h.set_cfg(max_retx); + } + for (auto& h : ul_harqs) { + h.set_cfg(max_retx); + } +} + +dl_harq_proc* harq_entity::get_empty_dl_harq(uint32_t tti_tx_dl) { if (not is_async) { - dl_harq_proc* h = &(*this)[tti_tx_dl % size()]; + dl_harq_proc* h = &dl_harqs[tti_tx_dl % nof_dl_harqs()]; return h->is_empty() ? h : nullptr; } - auto it = std::find_if(begin(), end(), [](dl_harq_proc& h) { return h.is_empty(); }); - return it != end() ? &(*it) : nullptr; + auto it = std::find_if(dl_harqs.begin(), dl_harqs.end(), [](dl_harq_proc& h) { return h.is_empty(); }); + return it != dl_harqs.end() ? &(*it) : nullptr; } -dl_harq_proc* dl_harq_entity::get_pending_harq(uint32_t tti_tx_dl) +dl_harq_proc* harq_entity::get_pending_dl_harq(uint32_t tti_tx_dl) { if (not is_async) { - dl_harq_proc* h = &(*this)[tti_tx_dl % size()]; + dl_harq_proc* h = &dl_harqs[tti_tx_dl % nof_dl_harqs()]; return (h->has_pending_retx(0, tti_tx_dl) or h->has_pending_retx(1, tti_tx_dl)) ? h : nullptr; } - return get_oldest_harq(tti_tx_dl); + return get_oldest_dl_harq(tti_tx_dl); +} + +std::pair harq_entity::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack) +{ + for (auto& h : dl_harqs) { + if (TTI_TX(h.get_tti()) == tti_rx) { + h.set_ack(tb_idx, ack); + return {h.get_id(), h.get_tbs(tb_idx)}; + } + } + return {dl_harqs.size(), -1}; +} + +ul_harq_proc* harq_entity::get_ul_harq(uint32_t tti_tx_ul) +{ + return &ul_harqs[tti_tx_ul % ul_harqs.size()]; +} + +void harq_entity::reset_pending_data(uint32_t tti_rx) +{ + uint32_t tti_tx_ul = TTI_RX_ACK(tti_rx); + uint32_t tti_tx_dl = TTI_TX(tti_rx); + + // Reset ACK state of UL Harq + get_ul_harq(tti_tx_ul)->reset_pending_data(); + + // Reset any DL harq which has 0 retxs + for (auto& h : dl_harqs) { + h.reset_pending_data(); + } + + // delete old DL harq procs + for (auto& h : dl_harqs) { + if (not h.is_empty()) { + uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, h.get_tti()); + if (tti_diff > 100 and tti_diff < 10240 / 2) { + srslte::logmap::get("MAC")->info( + "SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", h.get_id(), h.get_tti(), tti_tx_dl); + for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { + h.reset(tb); + } + } + } + } } /** @@ -332,11 +412,11 @@ dl_harq_proc* dl_harq_entity::get_pending_harq(uint32_t tti_tx_dl) * @param tti_tx_dl assumed to always be equal or ahead in time in comparison to current harqs * @return pointer to found dl_harq */ -dl_harq_proc* dl_harq_entity::get_oldest_harq(uint32_t tti_tx_dl) +dl_harq_proc* harq_entity::get_oldest_dl_harq(uint32_t tti_tx_dl) { int oldest_idx = -1; uint32_t oldest_tti = 0; - for (const dl_harq_proc& h : *this) { + for (const dl_harq_proc& h : dl_harqs) { if (h.has_pending_retx(0, tti_tx_dl) or h.has_pending_retx(1, tti_tx_dl)) { uint32_t x = srslte_tti_interval(tti_tx_dl, h.get_tti()); if (x > oldest_tti) { @@ -345,7 +425,7 @@ dl_harq_proc* dl_harq_entity::get_oldest_harq(uint32_t tti_tx_dl) } } } - return (oldest_idx >= 0) ? &(*this)[oldest_idx] : nullptr; + return (oldest_idx >= 0) ? &dl_harqs[oldest_idx] : nullptr; } } // namespace srsenb diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index 0888570df..de826bfb0 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -238,12 +238,18 @@ bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce) return false; } -int sched_ue::set_ack_info(uint32_t tti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) +int sched_ue::set_ack_info(uint32_t tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) { int ret = -1; auto p = get_cell_index(enb_cc_idx); if (p.first) { - ret = carriers[p.second].set_ack_info(tti, tb_idx, ack); + std::pair p2 = carriers[p.second].harq_ent.set_ack_info(tti_rx, tb_idx, ack); + ret = p2.second; + if (ret > 0) { + Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, p2.first, tb_idx, tti_rx); + } else { + Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx); + } } else { log_h->warning("Received DL ACK for invalid cell index %d\n", enb_cc_idx); } @@ -376,13 +382,14 @@ uint32_t sched_ue::allocate_mac_sdus(sched_interface::dl_sched_data_t* data, uin // Generates a Format1 dci // > return 0 if allocation is invalid -int sched_ue::generate_format1(dl_harq_proc* h, +int sched_ue::generate_format1(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti_tx_dl, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask) { + dl_harq_proc* h = &carriers[cc_idx].harq_ent.dl_harq_procs()[pid]; srslte_dci_dl_t* dci = &data->dci; int mcs = 0; @@ -471,26 +478,27 @@ int sched_ue::generate_format1(dl_harq_proc* h, } // Generates a Format2a dci -int sched_ue::generate_format2a(dl_harq_proc* h, +int sched_ue::generate_format2a(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask) { - int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask); + int ret = generate_format2a_unlocked(pid, data, tti, cc_idx, cfi, user_mask); return ret; } // Generates a Format2a dci -int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, +int sched_ue::generate_format2a_unlocked(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask) { - bool tb_en[SRSLTE_MAX_TB] = {false}; + dl_harq_proc* h = &carriers[cc_idx].harq_ent.dl_harq_procs()[pid]; + bool tb_en[SRSLTE_MAX_TB] = {false}; srslte_dci_dl_t* dci = &data->dci; @@ -589,7 +597,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, } // Generates a Format2 dci -int sched_ue::generate_format2(dl_harq_proc* h, +int sched_ue::generate_format2(uint32_t pid, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cc_idx, @@ -597,7 +605,7 @@ int sched_ue::generate_format2(dl_harq_proc* h, const rbgmask_t& user_mask) { /* Call Format 2a (common) */ - int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask); + int ret = generate_format2a_unlocked(pid, data, tti, cc_idx, cfi, user_mask); /* Compute precoding information */ data->dci.format = SRSLTE_DCI_FORMAT2; @@ -704,7 +712,7 @@ uint32_t sched_ue::get_max_retx() bool sched_ue::is_first_dl_tx() { for (const sched_ue_carrier& c : carriers) { - for (auto& h : c.dl_harq) { + for (const auto& h : c.harq_ent.dl_harq_procs()) { if (h.nof_tx(0) > 0) { return false; } @@ -836,7 +844,11 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti) // Private lock-free implementation uint32_t sched_ue::get_pending_ul_old_data_unlocked(uint32_t cc_idx) { - return carriers[cc_idx].get_pending_ul_old_data(); + uint32_t pending_data = 0; + for (auto& h : carriers[cc_idx].harq_ent.ul_harq_procs()) { + pending_data += h.get_pending_data(); + } + return pending_data; } uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols) @@ -891,16 +903,11 @@ bool sched_ue::is_sr_triggered() return sr; } -void sched_ue::reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx) -{ - carriers[cc_idx].reset_old_pending_pids(tti_rx); -} - /* Gets HARQ process with oldest pending retx */ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) { - return carriers[ue_cc_idx].get_pending_dl_harq(tti_tx_dl); + return carriers[ue_cc_idx].harq_ent.get_pending_dl_harq(tti_tx_dl); } return nullptr; } @@ -908,7 +915,7 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_i dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) { - return carriers[ue_cc_idx].get_empty_dl_harq(tti_tx_dl); + return carriers[ue_cc_idx].harq_ent.get_empty_dl_harq(tti_tx_dl); } return nullptr; } @@ -916,24 +923,14 @@ dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t ue_cc_idx) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) { - return carriers[ue_cc_idx].get_ul_harq(tti_tx_ul); - } - return nullptr; -} - -dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti_rx, uint32_t ue_cc_idx) -{ - for (auto& h : carriers[ue_cc_idx].dl_harq) { - if (h.get_tti() == tti_rx) { - return &h; - } + return carriers[ue_cc_idx].harq_ent.get_ul_harq(tti_tx_ul); } return nullptr; } -dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx, uint32_t ue_cc_idx) +const dl_harq_proc& sched_ue::get_dl_harq(uint32_t idx, uint32_t ue_cc_idx) const { - return &carriers[ue_cc_idx].dl_harq[idx]; + return carriers[ue_cc_idx].harq_ent.dl_harq_procs()[idx]; } std::pair sched_ue::get_cell_index(uint32_t enb_cc_idx) const @@ -957,11 +954,8 @@ void sched_ue::finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx) } uint32_t ue_cc_idx = p.second; - /* Reset pending ACKs and clean-up all the UL Harqs with maxretx == 0 */ - get_ul_harq(tti_params.tti_tx_ul, ue_cc_idx)->reset_pending_data(); - /* reset PIDs with pending data or blocked */ - reset_pending_pids(tti_params.tti_rx, ue_cc_idx); + carriers[ue_cc_idx].harq_ent.reset_pending_data(tti_params.tti_rx); } srslte_dci_format_t sched_ue::get_dci_format() @@ -1088,17 +1082,12 @@ sched_ue_carrier::sched_ue_carrier(const sched_interface::ue_cfg_t& cfg_, cell_params(&cell_cfg_), rnti(rnti_), log_h(srslte::logmap::get("MAC ")), - ue_cc_idx(ue_cc_idx_) + ue_cc_idx(ue_cc_idx_), + harq_ent(SCHED_MAX_HARQ_PROC, SCHED_MAX_HARQ_PROC) { // only PCell starts active. Remaining ones wait for valid CQI active = ue_cc_idx == 0; - // Init HARQ processes - for (uint32_t i = 0; i < dl_harq.size(); ++i) { - dl_harq[i].init(i); - ul_harq[i].init(i); - } - // set max mcs max_mcs_ul = cell_params->sched_cfg->pusch_max_mcs >= 0 ? cell_params->sched_cfg->pusch_max_mcs : 28; max_mcs_dl = cell_params->sched_cfg->pdsch_max_mcs >= 0 ? cell_params->sched_cfg->pdsch_max_mcs : 28; @@ -1128,12 +1117,7 @@ void sched_ue_carrier::reset() dl_cqi_tti = 0; ul_cqi = 1; ul_cqi_tti = 0; - for (uint32_t i = 0; i < dl_harq.size(); ++i) { - for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { - dl_harq[i].reset(tb); - ul_harq[i].reset(tb); - } - } + harq_ent.reset(); } void sched_ue_carrier::set_cfg(const sched_interface::ue_cfg_t& cfg_) @@ -1144,95 +1128,7 @@ void sched_ue_carrier::set_cfg(const sched_interface::ue_cfg_t& cfg_) } cfg = &cfg_; // Config HARQ processes - for (uint32_t i = 0; i < dl_harq.size(); ++i) { - dl_harq[i].set_cfg(cfg->maxharq_tx); - ul_harq[i].set_cfg(cfg->maxharq_tx); - } -} - -void sched_ue_carrier::reset_old_pending_pids(uint32_t tti_rx) -{ - uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx); - - // UL Harqs - get_ul_harq(tti_tx_ul)->reset_pending_data(); - - // DL harqs - for (auto& h : dl_harq) { - h.reset_pending_data(); - if (not h.is_empty()) { - uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, h.get_tti()); - if (tti_diff > 50 and tti_diff < 10240 / 2) { - log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", h.get_id(), h.get_tti(), tti_tx_dl); - for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { - h.reset(tb); - } - } - } - } -} - -dl_harq_proc* sched_ue_carrier::get_pending_dl_harq(uint32_t tti_tx_dl) -{ - if (not ASYNC_DL_SCHED) { - dl_harq_proc* h = &dl_harq[tti_tx_dl % SCHED_MAX_HARQ_PROC]; - return h->is_empty() ? nullptr : h; - } - - int oldest_idx = -1; - uint32_t oldest_tti = 0; - for (auto& h : dl_harq) { - if (h.has_pending_retx(0, tti_tx_dl) or h.has_pending_retx(1, tti_tx_dl)) { - uint32_t x = srslte_tti_interval(tti_tx_dl, h.get_tti()); - if (x > oldest_tti) { - oldest_idx = h.get_id(); - oldest_tti = x; - } - } - } - dl_harq_proc* h = nullptr; - if (oldest_idx >= 0) { - h = &dl_harq[oldest_idx]; - } - return h; -} - -dl_harq_proc* sched_ue_carrier::get_empty_dl_harq(uint32_t tti_tx_dl) -{ - if (not ASYNC_DL_SCHED) { - dl_harq_proc* h = &dl_harq[tti_tx_dl % SCHED_MAX_HARQ_PROC]; - return h->is_empty() ? nullptr : h; - } - - auto it = std::find_if(dl_harq.begin(), dl_harq.end(), [](dl_harq_proc& h) { return h.is_empty(); }); - return it != dl_harq.end() ? &(*it) : nullptr; -} - -int sched_ue_carrier::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack) -{ - for (auto& h : dl_harq) { - if (TTI_TX(h.get_tti()) == tti_rx) { - Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, h.get_id(), tb_idx, tti_rx); - h.set_ack(tb_idx, ack); - return h.get_tbs(tb_idx); - } - } - Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx); - return -1; -} - -ul_harq_proc* sched_ue_carrier::get_ul_harq(uint32_t tti) -{ - return &ul_harq[tti % SCHED_MAX_HARQ_PROC]; -} - -uint32_t sched_ue_carrier::get_pending_ul_old_data() -{ - uint32_t pending_data = 0; - for (auto& h : ul_harq) { - pending_data += h.get_pending_data(); - } - return pending_data; + harq_ent.set_cfg(cfg->maxharq_tx); } /* Find lowest DCI aggregation level supported by the UE spectral efficiency */ diff --git a/srsenb/test/mac/scheduler_test_common.cc b/srsenb/test/mac/scheduler_test_common.cc index 337ce7a3b..46ade7f50 100644 --- a/srsenb/test/mac/scheduler_test_common.cc +++ b/srsenb/test/mac/scheduler_test_common.cc @@ -686,7 +686,7 @@ int common_sched_tester::process_ack_txs() } const ack_info_t& dl_ack = ack_it.second; - srsenb::dl_harq_proc* h = ue_db[dl_ack.rnti].get_dl_harq(ack_it.second.dl_harq.get_id(), dl_ack.ue_cc_idx); + const srsenb::dl_harq_proc& h = ue_db[dl_ack.rnti].get_dl_harq(ack_it.second.dl_harq.get_id(), dl_ack.ue_cc_idx); const srsenb::dl_harq_proc& hack = dl_ack.dl_harq; CONDERROR(hack.is_empty(), "The acked DL harq was not active\n"); @@ -700,12 +700,12 @@ int common_sched_tester::process_ack_txs() CONDERROR(not ret, "The dl harq proc that was ACKed does not exist\n"); if (dl_ack.ack) { - CONDERROR(!h->is_empty(), "ACKed dl harq was not emptied\n"); - CONDERROR(h->has_pending_retx(0, tti_info.tti_params.tti_tx_dl), "ACKed dl harq still has pending retx\n"); + CONDERROR(!h.is_empty(), "ACKed dl harq was not emptied\n"); + CONDERROR(h.has_pending_retx(0, tti_info.tti_params.tti_tx_dl), "ACKed dl harq still has pending retx\n"); tester_log->info( "DL ACK tti=%u rnti=0x%x pid=%d\n", tti_info.tti_params.tti_rx, dl_ack.rnti, dl_ack.dl_harq.get_id()); } else { - CONDERROR(h->is_empty() and hack.nof_retx(0) + 1 < hack.max_nof_retx(), "NACKed DL harq got emptied\n"); + CONDERROR(h.is_empty() and hack.nof_retx(0) + 1 < hack.max_nof_retx(), "NACKed DL harq got emptied\n"); } } @@ -755,9 +755,9 @@ int common_sched_tester::schedule_acks() ack_data.tti = FDD_HARQ_DELAY_MS + tti_info.tti_params.tti_tx_dl; ack_data.enb_cc_idx = ccidx; ack_data.ue_cc_idx = ue_db[ack_data.rnti].get_cell_index(ccidx).second; - const srsenb::dl_harq_proc* dl_h = + const srsenb::dl_harq_proc& dl_h = ue_db[ack_data.rnti].get_dl_harq(tti_info.dl_sched_result[ccidx].data[i].dci.pid, ack_data.ue_cc_idx); - ack_data.dl_harq = *dl_h; + ack_data.dl_harq = dl_h; if (ack_data.dl_harq.nof_retx(0) == 0) { ack_data.ack = randf() > sim_args0.P_retx; } else { // always ack after three retxs diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index f2ff58dbe..41bfbbb8d 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -36,7 +36,6 @@ #include "srslte/interfaces/enb_interfaces.h" #include "srslte/interfaces/sched_interface.h" #include "srslte/phy/utils/debug.h" -#include "srslte/radio/radio.h" #include "scheduler_test_common.h" #include "scheduler_test_utils.h" @@ -176,8 +175,8 @@ void sched_tester::before_sched() tti_data.total_ues.has_ul_newtx |= d.has_ul_newtx; for (uint32_t i = 0; i < 2 * FDD_HARQ_DELAY_MS; ++i) { - const srsenb::dl_harq_proc* h = user->get_dl_harq(i, CARRIER_IDX); - tti_data.ue_data[rnti].dl_harqs[i] = *h; + const srsenb::dl_harq_proc& h = user->get_dl_harq(i, CARRIER_IDX); + tti_data.ue_data[rnti].dl_harqs[i] = h; } // NOTE: ACK might have just cleared the harq for tti_info.tti_params.tti_tx_ul tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_info.tti_params.tti_tx_ul, CARRIER_IDX); @@ -284,24 +283,23 @@ int sched_tester::test_harqs() const auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i]; uint32_t h_id = data.dci.pid; uint16_t rnti = data.dci.rnti; - const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX); - CONDERROR(h == nullptr, "scheduled DL harq pid=%d does not exist\n", h_id); - CONDERROR(h->is_empty(), "Cannot schedule an empty harq proc\n"); - CONDERROR(h->get_tti() != tti_info.tti_params.tti_tx_dl, + const srsenb::dl_harq_proc& h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX); + CONDERROR(h.is_empty(), "Cannot schedule an empty harq proc\n"); + CONDERROR(h.get_tti() != tti_info.tti_params.tti_tx_dl, "The scheduled DL harq pid=%d does not a valid tti=%u\n", h_id, tti_info.tti_params.tti_tx_dl); - CONDERROR(h->get_n_cce() != data.dci.location.ncce, "Harq DCI location does not match with result\n"); + CONDERROR(h.get_n_cce() != data.dci.location.ncce, "Harq DCI location does not match with result\n"); if (tti_data.ue_data[rnti].dl_harqs[h_id].has_pending_retx(0, tti_info.tti_params.tti_tx_dl)) { // retx - CONDERROR(tti_data.ue_data[rnti].dl_harqs[h_id].nof_retx(0) + 1 != h->nof_retx(0), + CONDERROR(tti_data.ue_data[rnti].dl_harqs[h_id].nof_retx(0) + 1 != h.nof_retx(0), "A dl harq of user rnti=0x%x was likely overwritten.\n", rnti); - CONDERROR(h->nof_retx(0) >= sim_args0.ue_cfg.maxharq_tx, + CONDERROR(h.nof_retx(0) >= sim_args0.ue_cfg.maxharq_tx, "The number of retx=%d exceeded its max=%d\n", - h->nof_retx(0), + h.nof_retx(0), sim_args0.ue_cfg.maxharq_tx); } else { // newtx - CONDERROR(h->nof_retx(0) != 0, "A new harq was scheduled but with invalid number of retxs\n"); + CONDERROR(h.nof_retx(0) != 0, "A new harq was scheduled but with invalid number of retxs\n"); } } @@ -362,11 +360,11 @@ int sched_tester::test_harqs() if (check_old_pids) { for (auto& user : ue_db) { for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) { - if (not(user.second.get_dl_harq(i, CARRIER_IDX)->is_empty(0) and user.second.get_dl_harq(1, CARRIER_IDX))) { - if (srslte_tti_interval(tti_info.tti_params.tti_tx_dl, user.second.get_dl_harq(i, CARRIER_IDX)->get_tti()) > + if (not user.second.get_dl_harq(i, CARRIER_IDX).is_empty(0)) { + if (srslte_tti_interval(tti_info.tti_params.tti_tx_dl, user.second.get_dl_harq(i, CARRIER_IDX).get_tti()) > 49) { TESTERROR( - "The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i, CARRIER_IDX)->get_id(), user.first); + "The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i, CARRIER_IDX).get_id(), user.first); } } }