From d8203acd568c8e79419f8ecc68fd7400b3c23d4f Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Fri, 6 Nov 2020 12:51:07 +0000 Subject: [PATCH] implement harq resuming functionality --- srsenb/hdr/stack/mac/scheduler_ue.h | 2 +- srsenb/src/stack/mac/scheduler_grid.cc | 22 ++++++++++++++++++++-- srsenb/src/stack/mac/scheduler_ue.cc | 4 ++-- srsenb/test/mac/scheduler_test_rand.cc | 10 +++++++++- 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler_ue.h b/srsenb/hdr/stack/mac/scheduler_ue.h index eee2cf8b9..2cbed4815 100644 --- a/srsenb/hdr/stack/mac/scheduler_ue.h +++ b/srsenb/hdr/stack/mac/scheduler_ue.h @@ -245,7 +245,7 @@ public: cc_sched_ue* find_ue_carrier(uint32_t enb_cc_idx); size_t nof_carriers_configured() const { return carriers.size(); } std::bitset scell_activation_mask() const; - int find_enb_cc_idx(uint32_t enb_cc_idx) const; + int enb_to_ue_cc_idx(uint32_t enb_cc_idx) const; bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false); uint32_t get_max_retx(); diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 20e94e8c5..d91be7a19 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -600,7 +600,8 @@ void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_) prbmask_t prach_mask{cc_cfg->nof_prb()}; prach_mask.fill(cc_cfg->cfg.prach_freq_offset, cc_cfg->cfg.prach_freq_offset + 6); reserve_ul_prbs(prach_mask, cc_cfg->nof_prb() != 6); - log_h->debug("SCHED: Allocated PRACH RBs. Mask: 0x%s\n", prach_mask.to_hex().c_str()); + log_h->debug( + "SCHED: Allocated PRACH RBs for tti_tx_ul=%d. Mask: 0x%s\n", tti_params.tti_tx_ul, prach_mask.to_hex().c_str()); } // setup first prb to be used for msg3 alloc. Account for potential PRACH alloc @@ -770,7 +771,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma } // Check if allocation would cause segmentation - uint32_t ue_cc_idx = user->find_enb_cc_idx(cc_cfg->enb_cc_idx); + uint32_t ue_cc_idx = user->enb_to_ue_cc_idx(cc_cfg->enb_cc_idx); const dl_harq_proc& h = user->get_dl_harq(pid, ue_cc_idx); if (h.is_empty()) { // It is newTx @@ -1227,6 +1228,23 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) { cc_sched_result* cc_result = cc_results->get_cc(cc_cfg->enb_cc_idx); + /* Resume UL HARQs with pending retxs that did not get allocated */ + using phich_t = sched_interface::ul_sched_phich_t; + auto& phich_list = cc_result->ul_sched_result.phich; + for (uint32_t i = 0; i < cc_result->ul_sched_result.nof_phich_elems; ++i) { + auto& phich = phich_list[i]; + if (phich.phich == phich_t::NACK) { + auto& ue = ue_db[phich.rnti]; + int ue_cc_idx = ue.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx); + ul_harq_proc* h = (ue_cc_idx >= 0) ? ue.get_ul_harq(get_tti_tx_ul(), ue_cc_idx) : nullptr; + if (not is_ul_alloc(ue.get_rnti()) and h != nullptr and not h->is_empty()) { + // There was a missed UL harq retx. Halt+Resume the HARQ + phich.phich = phich_t::ACK; + log_h->debug("SCHED: rnti=0x%x UL harq pid=%d is being resumed\n", ue.get_rnti(), h->get_id()); + } + } + } + /* Pick one of the possible DCI masks */ pdcch_grid_t::alloc_result_t dci_result; // tti_alloc.get_pdcch_grid().result_to_string(); diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index dc32a3a2c..dbd169957 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -1200,7 +1200,7 @@ sched_dci_cce_t* sched_ue::get_locations(uint32_t enb_cc_idx, uint32_t cfi, uint cc_sched_ue* sched_ue::find_ue_carrier(uint32_t enb_cc_idx) { - int ue_cc_idx = find_enb_cc_idx(enb_cc_idx); + int ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx); return ue_cc_idx >= 0 ? &carriers[ue_cc_idx] : nullptr; } @@ -1215,7 +1215,7 @@ std::bitset sched_ue::scell_activation_mask() const return ret; } -int sched_ue::find_enb_cc_idx(uint32_t enb_cc_idx) const +int sched_ue::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const { auto it = std::find_if(carriers.begin(), carriers.end(), [enb_cc_idx](const cc_sched_ue& c) { return c.get_cell_cfg()->enb_cc_idx == enb_cc_idx; diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 8c6210aab..e13347529 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -324,7 +324,15 @@ int sched_tester::test_harqs() CONDERROR(not hprev.has_pending_ack(), "Alloc PHICH did not have any pending ack\n"); bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx(); if (phich.phich == sched_interface::ul_sched_phich_t::ACK) { - CONDERROR(!hprev.is_empty(), "ack phich for UL harq that is not empty\n"); + // The harq can be either ACKed or Resumed + if (not hprev.is_empty()) { + // In case it was resumed + CONDERROR(h == nullptr or h->is_empty(), "Cannot resume empty UL harq\n"); + for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++j) { + auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[j]; + CONDERROR(pusch.dci.rnti == phich.rnti, "Cannot send PHICH::ACK for same harq that got UL grant.\n"); + } + } } else { CONDERROR(h->get_pending_data() == 0 and !maxretx_flag, "NACKed harq has no pending data\n"); }