From 47b05118ad3f50e42a224cbc933c08fa5b0af6f6 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 4 Mar 2020 17:05:07 +0000 Subject: [PATCH] simplified msg3 allocation. Now we can allocate resources ahead of time (e.g. msg3 is 2 ttis ahead) using the sf_sched interface. It's guaranteed that the given allocations wont be erased when the respective tti starts --- srsenb/hdr/stack/mac/scheduler_carrier.h | 1 - srsenb/hdr/stack/mac/scheduler_grid.h | 52 +++++---- srsenb/src/stack/mac/scheduler.cc | 4 +- srsenb/src/stack/mac/scheduler_carrier.cc | 68 +---------- srsenb/src/stack/mac/scheduler_grid.cc | 132 ++++++++++++++-------- srsenb/test/mac/scheduler_test_rand.cc | 13 +-- 6 files changed, 127 insertions(+), 143 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler_carrier.h b/srsenb/hdr/stack/mac/scheduler_carrier.h index 12b78e23c..07202da94 100644 --- a/srsenb/hdr/stack/mac/scheduler_carrier.h +++ b/srsenb/hdr/stack/mac/scheduler_carrier.h @@ -44,7 +44,6 @@ public: const sf_sched* get_sf_sched_ptr(uint32_t tti_rx) const { return &sf_scheds[tti_rx % sf_scheds.size()]; } private: - void generate_phich(sf_sched* tti_sched); //! Compute DL scheduler result for given TTI void alloc_dl_users(sf_sched* tti_result); //! Compute UL scheduler result for given TTI diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index 93e2eea11..d335086de 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -176,6 +176,13 @@ public: class sf_sched : public dl_sf_sched_itf, public ul_sf_sched_itf { public: + struct sf_sched_result { + sched_interface::dl_sched_res_t dl_sched_result; + sched_interface::ul_sched_res_t ul_sched_result; + rbgmask_t dl_mask; ///< Accumulation of all DL RBG allocations + prbmask_t ul_mask; ///< Accumulation of all UL PRB allocations + pdcch_mask_t pdcch_mask; ///< Accumulation of all CCE allocations + }; struct ctrl_alloc_t { size_t dci_idx; rbg_range_t rbg_range; @@ -227,11 +234,8 @@ public: }; typedef std::pair ctrl_code_t; - // TTI scheduler result - sched_interface::dl_sched_res_t dl_sched_result; - sched_interface::ul_sched_res_t ul_sched_result; - // Control/Configuration Methods + sf_sched(); void init(const sched_cell_params_t& cell_params_); void new_tti(uint32_t tti_rx_, uint32_t start_cfi); void reset(); @@ -243,10 +247,11 @@ public: bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } // UL alloc methods - alloc_outcome_t alloc_msg3(const pending_msg3_t& msg3); + alloc_outcome_t alloc_msg3(sched_ue* user, const pending_msg3_t& msg3); alloc_outcome_t alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t mcs = 0); bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); } + bool alloc_phich(sched_ue* user); // compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI void generate_sched_results(); @@ -262,12 +267,10 @@ public: uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; } // getters - uint32_t get_tti_rx() const { return tti_params.tti_rx; } - const tti_params_t& get_tti_params() const { return tti_params; } - std::deque& get_pending_msg3() { return pending_msg3s; } - const std::deque& get_pending_msg3() const { return pending_msg3s; } + uint32_t get_tti_rx() const { return tti_params.tti_rx; } + const tti_params_t& get_tti_params() const { return tti_params; } - const std::tuple last_sched_result_masks() const; + const sf_sched_result& last_sched_result() const { return *last_sf_result; } private: bool is_dl_alloc(sched_ue* user) const final; @@ -289,19 +292,22 @@ private: srslte::log_ref log_h; // internal state - tti_params_t tti_params{10241}; - sf_grid_t tti_alloc; - std::vector bc_allocs; - std::vector rar_allocs; - std::vector data_allocs; - std::vector ul_data_allocs; - std::deque pending_msg3s; - uint32_t last_msg3_prb = 0, max_msg3_prb = 0; - - // Store last decisions - rbgmask_t last_dl_mask; - prbmask_t last_ul_mask; - pdcch_mask_t last_pdcch_mask; + sf_grid_t tti_alloc; + std::vector bc_allocs; + std::vector rar_allocs; + std::vector data_allocs; + std::vector ul_data_allocs; + uint32_t last_msg3_prb = 0, max_msg3_prb = 0; + std::array sched_result_resources = {}; + + // Next TTI state + tti_params_t tti_params{10241}; + sf_sched_result* current_sf_result = nullptr; + sched_interface::dl_sched_res_t* dl_sched_result = nullptr; + sched_interface::ul_sched_res_t* ul_sched_result = nullptr; + + // Last subframe scheduler result + sf_sched_result* last_sf_result = nullptr; }; } // namespace srsenb diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index b748c94a4..7cf27905d 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -386,7 +386,7 @@ int sched::dl_sched(uint32_t tti, uint32_t cc_idx, sched_interface::dl_sched_res sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); // copy result - sched_result = tti_sched->dl_sched_result; + sched_result = tti_sched->last_sched_result().dl_sched_result; } return 0; @@ -407,7 +407,7 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); // copy result - sched_result = tti_sched->ul_sched_result; + sched_result = tti_sched->last_sched_result().ul_sched_result; } return SRSLTE_SUCCESS; diff --git a/srsenb/src/stack/mac/scheduler_carrier.cc b/srsenb/src/stack/mac/scheduler_carrier.cc index 2aed9db5a..d5536c900 100644 --- a/srsenb/src/stack/mac/scheduler_carrier.cc +++ b/srsenb/src/stack/mac/scheduler_carrier.cc @@ -198,31 +198,6 @@ void ra_sched::dl_sched(srsenb::sf_sched* tti_sched) } } -// Schedules Msg3 -void ra_sched::ul_sched(sf_sched* tti_sched) -{ - /* schedule pending Msg3s */ - while (not tti_sched->get_pending_msg3().empty()) { - sf_sched::pending_msg3_t& msg3 = tti_sched->get_pending_msg3().front(); - - // Verify if user still exists - auto user_it = ue_db->find(msg3.rnti); - if (user_it == ue_db->end()) { - log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", msg3.rnti); - tti_sched->get_pending_msg3().pop_front(); - continue; - } - - // Allocate RBGs and HARQ for pending Msg3 - ul_harq_proc::ul_alloc_t msg3_alloc = {msg3.n_prb, msg3.L}; - if (not tti_sched->alloc_ul(&user_it->second, msg3_alloc, sf_sched::ul_alloc_t::MSG3, msg3.mcs)) { - log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.n_prb, msg3.n_prb + msg3.L); - } - - tti_sched->get_pending_msg3().pop_front(); - } -} - int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) { log_h->info("SCHED: New PRACH tti=%d, preamble=%d, temp_crnti=0x%x, ta_cmd=%d, msg3_size=%d\n", @@ -273,7 +248,7 @@ void ra_sched::sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sch msg3.mcs = grant.grant.trunc_mcs; msg3.rnti = grant.data.temp_crnti; - if (not sf_msg3_sched->alloc_msg3(msg3)) { + if (not sf_msg3_sched->alloc_msg3(&(*ue_db)[msg3.rnti], msg3)) { log_h->error( "SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", msg3.rnti, sf_msg3_sched->get_tti_tx_ul()); } else { @@ -350,7 +325,9 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) tti_sched->new_tti(tti_rx, start_cfi); /* Schedule PHICH */ - generate_phich(tti_sched); + for (auto& ue_pair : *ue_db) { + tti_sched->alloc_phich(&ue_pair.second); + } /* Schedule DL control data */ if (dl_active) { @@ -379,7 +356,7 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) /* Enqueue Msg3s derived from allocated RARs */ if (dl_active) { sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS); - ra_sched_ptr->sched_msg3(sf_msg3_sched, tti_sched->dl_sched_result); + ra_sched_ptr->sched_msg3(sf_msg3_sched, tti_sched->last_sched_result().dl_sched_result); } /* clean-up blocked pids */ @@ -391,38 +368,6 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) return tti_sched; } -void sched::carrier_sched::generate_phich(sf_sched* tti_sched) -{ - // Allocate user PHICHs - uint32_t nof_phich_elems = 0; - for (auto& ue_pair : *ue_db) { - sched_ue& user = ue_pair.second; - uint16_t rnti = ue_pair.first; - auto p = user.get_cell_index(enb_cc_idx); - if (not p.first) { - // user does not support this carrier - continue; - } - uint32_t cell_index = p.second; - - // user.has_pucch = false; // TODO: What is this for? - - ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_rx(), cell_index); - - /* Indicate PHICH acknowledgment if needed */ - if (h->has_pending_ack()) { - tti_sched->ul_sched_result.phich[nof_phich_elems].phich = - h->get_pending_ack() ? ul_sched_phich_t::ACK : ul_sched_phich_t::NACK; - tti_sched->ul_sched_result.phich[nof_phich_elems].rnti = rnti; - log_h->info("SCHED: Allocated PHICH for rnti=0x%x, value=%s\n", - rnti, - tti_sched->ul_sched_result.phich[nof_phich_elems].phich == ul_sched_phich_t::ACK ? "ACK" : "NACK"); - nof_phich_elems++; - } - } - tti_sched->ul_sched_result.nof_phich_elems = nof_phich_elems; -} - void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result) { if (sf_dl_mask[tti_result->get_tti_tx_dl() % sf_dl_mask.size()] != 0) { @@ -451,9 +396,6 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) log_h->debug("SCHED: Allocated PRACH RBs. Mask: 0x%s\n", prach_mask.to_hex().c_str()); } - /* Allocate Msg3 if there's a pending RAR */ - ra_sched_ptr->ul_sched(tti_sched); - /* reserve PRBs for PUCCH */ tti_sched->reserve_ul_prbs(pucch_mask, true); diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 58131a632..970842b36 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -61,15 +61,24 @@ tti_params_t::tti_params_t(uint32_t tti_rx_) : void pdcch_grid_t::init(const sched_cell_params_t& cell_params_) { - cc_cfg = &cell_params_; - log_h = srslte::logmap::get("MAC "); + cc_cfg = &cell_params_; + log_h = srslte::logmap::get("MAC "); + current_cfix = cc_cfg->sched_cfg->nof_ctrl_symbols - 1; reset(); } void pdcch_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) { - tti_params = &tti_params_; - current_cfix = start_cfi - 1; + tti_params = &tti_params_; + set_cfi(start_cfi); +} + +void pdcch_grid_t::reset() +{ + prev_start = 0; + prev_end = 0; + dci_alloc_tree.clear(); + nof_dci_allocs = 0; } const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const @@ -191,14 +200,6 @@ uint32_t pdcch_grid_t::nof_cces() const return cc_cfg->nof_cce_table[current_cfix]; } -void pdcch_grid_t::reset() -{ - prev_start = 0; - prev_end = 0; - dci_alloc_tree.clear(); - nof_dci_allocs = 0; -} - void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const { // if alloc tree is empty @@ -273,6 +274,9 @@ void sf_grid_t::init(const sched_cell_params_t& cell_params_) si_n_rbg = srslte::ceil_div(4, cc_cfg->P); rar_n_rbg = srslte::ceil_div(3, cc_cfg->P); + dl_mask.resize(nof_rbgs); + ul_mask.resize(cc_cfg->nof_prb()); + pdcch_alloc.init(*cc_cfg); reset(); } @@ -282,9 +286,6 @@ void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) tti_params = &tti_params_; // internal state - avail_rbg = nof_rbgs; - dl_mask.resize(nof_rbgs); - ul_mask.resize(cc_cfg->nof_prb()); pdcch_alloc.new_tti(*tti_params, start_cfi); } @@ -293,6 +294,7 @@ void sf_grid_t::reset() dl_mask.reset(); ul_mask.reset(); pdcch_alloc.reset(); + avail_rbg = nof_rbgs; } //! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging) @@ -410,10 +412,18 @@ bool sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict) * TTI resource Scheduling Methods *******************************************************/ +sf_sched::sf_sched() : + current_sf_result(&sched_result_resources[0]), + dl_sched_result(&sched_result_resources[0].dl_sched_result), + ul_sched_result(&sched_result_resources[0].ul_sched_result), + last_sf_result(&sched_result_resources[1]), + log_h(srslte::logmap::get("MAC ")) +{ +} + void sf_sched::init(const sched_cell_params_t& cell_params_) { cc_cfg = &cell_params_; - log_h = srslte::logmap::get("MAC "); tti_alloc.init(*cc_cfg); max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch); reset(); @@ -423,28 +433,32 @@ void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi) { tti_params = tti_params_t{tti_rx_}; tti_alloc.new_tti(tti_params, start_cfi); - - // reset sf result - dl_sched_result = {}; - ul_sched_result = {}; - - // setup first prb to be used for msg3 alloc - last_msg3_prb = cc_cfg->cfg.nrb_pucch; - uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS); - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) { - last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6); - } } void sf_sched::reset() { + /* Store last results */ + std::swap(current_sf_result, last_sf_result); + last_sf_result->dl_mask = tti_alloc.get_dl_mask(); + last_sf_result->ul_mask = tti_alloc.get_ul_mask(); + dl_sched_result = ¤t_sf_result->dl_sched_result; + ul_sched_result = ¤t_sf_result->ul_sched_result; + *dl_sched_result = {}; + *ul_sched_result = {}; + // reset internal state bc_allocs.clear(); rar_allocs.clear(); data_allocs.clear(); ul_data_allocs.clear(); - tti_alloc.reset(); + + // setup first prb to be used for msg3 alloc + last_msg3_prb = cc_cfg->cfg.nrb_pucch; + uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS); + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) { + last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6); + } } bool sf_sched::is_dl_alloc(sched_ue* user) const @@ -659,15 +673,38 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t return alloc_ul(user, alloc, alloc_type); } -const std::tuple sf_sched::last_sched_result_masks() const +bool sf_sched::alloc_phich(sched_ue* user) { - return {last_pdcch_mask, last_dl_mask, last_ul_mask}; + using phich_t = sched_interface::ul_sched_phich_t; + auto& phich_list = ul_sched_result->phich[ul_sched_result->nof_phich_elems]; + + auto p = user->get_cell_index(cc_cfg->enb_cc_idx); + if (not p.first) { + // user does not support this carrier + return false; + } + uint32_t cell_index = p.second; + + ul_harq_proc* h = user->get_ul_harq(tti_params.tti_rx, cell_index); + + /* Indicate PHICH acknowledgment if needed */ + if (h->has_pending_ack()) { + phich_list.phich = h->get_pending_ack() ? phich_t::ACK : phich_t::NACK; + phich_list.rnti = user->get_rnti(); + log_h->info("SCHED: Allocated PHICH for rnti=0x%x, value=%s\n", + user->get_rnti(), + phich_list.phich == phich_t::ACK ? "ACK" : "NACK"); + + ul_sched_result->nof_phich_elems++; + return true; + } + return false; } void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& bc_alloc : bc_allocs) { - sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems]; + sched_interface::dl_sched_bc_t* bc = &dl_sched_result->bc[dl_sched_result->nof_bc_elems]; // assign NCCE/L bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; @@ -729,14 +766,14 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul bc->dci.tb[0].mcs_idx); } - dl_sched_result.nof_bc_elems++; + dl_sched_result->nof_bc_elems++; } } void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& rar_alloc : rar_allocs) { - sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems]; + sched_interface::dl_sched_rar_t* rar = &dl_sched_result->rar[dl_sched_result->nof_rar_elems]; // Assign NCCE/L rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; @@ -780,14 +817,14 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu msg3_grant.grant.trunc_mcs); } - dl_sched_result.nof_rar_elems++; + dl_sched_result->nof_rar_elems++; } } void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& data_alloc : data_allocs) { - sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems]; + sched_interface::dl_sched_data_t* data = &dl_sched_result->data[dl_sched_result->nof_data_elems]; // Assign NCCE/L data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos; @@ -840,7 +877,7 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_ data_before, user->get_pending_dl_new_data()); - dl_sched_result.nof_data_elems++; + dl_sched_result->nof_data_elems++; } } @@ -848,7 +885,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul { /* Set UL data DCI locs and format */ for (const auto& ul_alloc : ul_data_allocs) { - sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems]; + sched_interface::ul_sched_data_t* pusch = &ul_sched_result->pusch[ul_sched_result->nof_dci_elems]; sched_ue* user = ul_alloc.user_ptr; uint32_t cell_index = user->get_cell_index(cc_cfg->enb_cc_idx).second; @@ -898,14 +935,19 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul pending_data_before, user->get_pending_ul_old_data(cell_index)); - ul_sched_result.nof_dci_elems++; + ul_sched_result->nof_dci_elems++; } } -alloc_outcome_t sf_sched::alloc_msg3(const pending_msg3_t& msg3) +alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const pending_msg3_t& msg3) { - pending_msg3s.push_back(msg3); - return alloc_outcome_t::SUCCESS; + // Allocate RBGs and HARQ for pending Msg3 + ul_harq_proc::ul_alloc_t msg3_alloc = {msg3.n_prb, msg3.L}; + alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::MSG3, msg3.mcs); + if (not ret) { + log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.n_prb, msg3.n_prb + msg3.L); + } + return ret; } void sf_sched::generate_sched_results() @@ -913,10 +955,10 @@ void sf_sched::generate_sched_results() /* Pick one of the possible DCI masks */ pdcch_grid_t::alloc_result_t dci_result; // tti_alloc.get_pdcch_grid().result_to_string(); - tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &last_pdcch_mask); + tti_alloc.get_pdcch_grid().get_allocs(&dci_result, ¤t_sf_result->pdcch_mask); /* Register final CFI */ - dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi(); + dl_sched_result->cfi = tti_alloc.get_pdcch_grid().get_cfi(); /* Generate DCI formats and fill sched_result structs */ set_bc_sched_result(dci_result); @@ -927,10 +969,6 @@ void sf_sched::generate_sched_results() set_ul_sched_result(dci_result); - /* Store last results */ - last_dl_mask = tti_alloc.get_dl_mask(); - last_ul_mask = tti_alloc.get_ul_mask(); - /* Reset all resources */ reset(); } diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 4c991ed8f..840225add 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -62,8 +62,8 @@ * - DL adaptive retx/new tx <=> PDCCH alloc *******************************************************/ -// uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count(); -uint32_t const seed = 2452071795; +uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count(); +// uint32_t const seed = 2452071795; // uint32_t const seed = 1581009287; // prb==25 bool check_old_pids = false; @@ -253,7 +253,7 @@ int sched_tester::test_pdcch_collisions() /* verify if sched_result "used_cce" coincide with sched "used_cce" */ auto* tti_alloc = carrier_schedulers[0]->get_sf_sched_ptr(tti_info.tti_params.tti_rx); - srsenb::pdcch_mask_t mask = std::get<0>(tti_alloc->last_sched_result_masks()); + srsenb::pdcch_mask_t mask = tti_alloc->last_sched_result().pdcch_mask; if (used_cce != mask) { std::string mask_str = mask.to_string(); TESTERROR("The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), used_cce.to_string().c_str()); @@ -377,7 +377,6 @@ int sched_tester::test_harqs() int sched_tester::test_sch_collisions() { const srsenb::sf_sched* tti_sched = carrier_schedulers[CARRIER_IDX]->get_sf_sched_ptr(tti_info.tti_params.tti_rx); - const auto& combined_sched_result = tti_sched->last_sched_result_masks(); srsenb::prbmask_t ul_allocs(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); @@ -386,7 +385,7 @@ int sched_tester::test_sch_collisions() tti_info.tti_params, tti_info.ul_sched_result[CARRIER_IDX], ul_allocs) == SRSLTE_SUCCESS); /* TEST: check whether cumulative UL PRB masks coincide */ - if (ul_allocs != std::get<2>(combined_sched_result)) { + if (ul_allocs != tti_sched->last_sched_result().ul_mask) { TESTERROR("The UL PRB mask and the scheduler result UL mask are not consistent\n"); } @@ -416,10 +415,10 @@ int sched_tester::test_sch_collisions() } // TEST: check if resulting DL mask is equal to scheduler internal DL mask - if (rbgmask != std::get<1>(combined_sched_result)) { + if (rbgmask != tti_sched->last_sched_result().dl_mask) { TESTERROR("The DL PRB mask and the scheduler result DL mask are not consistent (%s!=%s)\n", rbgmask.to_string().c_str(), - std::get<1>(combined_sched_result).to_string().c_str()); + tti_sched->last_sched_result().dl_mask.to_string().c_str()); } return SRSLTE_SUCCESS; }