From 73447972d823bcb6e40bff65a291cd066487b9eb Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Mon, 16 Mar 2020 13:10:21 +0100 Subject: [PATCH] Fix issue with simultaneous CQI and ACK/NACK transmission in CA (#1067) * Fix memory corruption when phy calling mac scheduler and not yet initiated * Do not drop CQI if collision with ACK/NACK and PUSCH * Allocate CQI resources for SCell properly * Use UE_PCELL_CC_IDX macro * Protect ul_sched from being called if not yet started --- lib/include/srslte/phy/phch/cqi.h | 2 +- lib/include/srslte/phy/phch/pucch_cfg.h | 2 +- srsenb/hdr/phy/phy_ue_db.h | 1 + srsenb/hdr/stack/rrc/rrc.h | 18 ++- srsenb/rr.conf.example | 12 +- srsenb/src/phy/cc_worker.cc | 4 +- srsenb/src/phy/phy_ue_db.cc | 7 +- srsenb/src/stack/mac/mac.cc | 4 +- srsenb/src/stack/rrc/rrc.cc | 167 +++++++++++++----------- srsenb/test/phy/enb_phy_test.cc | 13 +- 10 files changed, 128 insertions(+), 102 deletions(-) diff --git a/lib/include/srslte/phy/phch/cqi.h b/lib/include/srslte/phy/phch/cqi.h index 40e577b62..78008cc1d 100644 --- a/lib/include/srslte/phy/phch/cqi.h +++ b/lib/include/srslte/phy/phch/cqi.h @@ -55,7 +55,7 @@ typedef enum { typedef struct { bool periodic_configured; bool aperiodic_configured; - uint32_t pmi_idx; + uint16_t pmi_idx; uint32_t ri_idx; bool ri_idx_present; bool format_is_subband; diff --git a/lib/include/srslte/phy/phch/pucch_cfg.h b/lib/include/srslte/phy/phch/pucch_cfg.h index f9e172b88..e174ffefb 100644 --- a/lib/include/srslte/phy/phch/pucch_cfg.h +++ b/lib/include/srslte/phy/phch/pucch_cfg.h @@ -79,7 +79,7 @@ typedef struct SRSLTE_API { // PUCCH configuration generated during a call to encode/decode srslte_pucch_format_t format; - uint32_t n_pucch; + uint16_t n_pucch; uint8_t pucch2_drs_bits[SRSLTE_PUCCH2_MAX_DMRS_BITS]; } srslte_pucch_cfg_t; diff --git a/srsenb/hdr/phy/phy_ue_db.h b/srsenb/hdr/phy/phy_ue_db.h index 19509bc47..dea6cf870 100644 --- a/srsenb/hdr/phy/phy_ue_db.h +++ b/srsenb/hdr/phy/phy_ue_db.h @@ -265,6 +265,7 @@ public: uint32_t enb_cc_idx, uint16_t rnti, bool aperiodic_cqi_request, + bool is_pusch_available, srslte_uci_cfg_t& uci_cfg) const; /** diff --git a/srsenb/hdr/stack/rrc/rrc.h b/srsenb/hdr/stack/rrc/rrc.h index 0ed1dfa27..1fd26b7c4 100644 --- a/srsenb/hdr/stack/rrc/rrc.h +++ b/srsenb/hdr/stack/rrc/rrc.h @@ -257,7 +257,7 @@ private: int sr_free(); int cqi_allocate(uint32_t period, uint16_t* pmi_idx, uint16_t* n_pucch); - void cqi_get(uint16_t* pmi_idx, uint16_t* n_pucch); + void cqi_get(uint16_t* pmi_idx, uint16_t* n_pucch, uint32_t ue_cc_idx); int cqi_free(); int ri_get(uint32_t m_ri, uint16_t* ri_idx); @@ -321,15 +321,23 @@ private: bool sr_allocated = false; uint32_t sr_N_pucch = 0; uint32_t sr_I = 0; - uint32_t cqi_pucch = 0; - uint32_t cqi_idx = 0; bool cqi_allocated = false; int cqi_sched_sf_idx = 0; int cqi_sched_prb_idx = 0; - int get_drbid_config(asn1::rrc::drb_to_add_mod_s* drb, int drbid); - bool nas_pending = false; + bool nas_pending = false; srslte::byte_buffer_t erab_info; + const static uint32_t UE_PCELL_CC_IDX = 0; + + typedef struct { + uint32_t idx; + uint32_t pucch_res; + } cqi_res_t; + + std::map cqi_res = {}; + + int get_drbid_config(asn1::rrc::drb_to_add_mod_s* drb, int drbid); + ///< Helper to access a cell cfg based on ue_cc_idx cell_ctxt_t* get_ue_cc_cfg(uint32_t ue_cc_idx); diff --git a/srsenb/rr.conf.example b/srsenb/rr.conf.example index 3dd3fee4c..c9b466922 100644 --- a/srsenb/rr.conf.example +++ b/srsenb/rr.conf.example @@ -35,16 +35,16 @@ phy_cnfg = sched_request_cnfg = { dsr_trans_max = 64; - period = 20; // in ms - subframe = [1]; // vector of subframe indices allowed for SR transmissions - nof_prb = 2; // number of PRBs on each extreme used for SR (total prb is twice this number) + period = 20; // in ms + subframe = [1, 11]; // vector of subframe indices allowed for SR transmissions + nof_prb = 2; // number of PRBs on each extreme used for SR (total prb is twice this number) }; cqi_report_cnfg = { mode = "periodic"; - simultaneousAckCQI = true; - period = 40; // in ms - subframe = [0]; + simultaneousAckCQI = true; + period = 40; // in ms + subframe = [0, 10, 20, 30]; // vector of subframe indices every period where CQI resources will be allocated nof_prb = 2; m_ri = 8; // RI period in CQI period }; diff --git a/srsenb/src/phy/cc_worker.cc b/srsenb/src/phy/cc_worker.cc index 076e2cfd9..1bbe96f3a 100644 --- a/srsenb/src/phy/cc_worker.cc +++ b/srsenb/src/phy/cc_worker.cc @@ -311,7 +311,7 @@ int cc_worker::decode_pusch(stack_interface_phy_lte::ul_sched_grant_t* grants, u ue_db[rnti]->is_grant_available = true; // Fill UCI configuration - phy->ue_db.fill_uci_cfg(tti_rx, cc_idx, rnti, grants->dci.cqi_request, phy_cfg.ul_cfg.pusch.uci_cfg); + phy->ue_db.fill_uci_cfg(tti_rx, cc_idx, rnti, grants->dci.cqi_request, true, phy_cfg.ul_cfg.pusch.uci_cfg); // Compute UL grant srslte_pusch_grant_t& grant = phy_cfg.ul_cfg.pusch.grant; @@ -400,7 +400,7 @@ int cc_worker::decode_pucch() srslte_ul_cfg_t ul_cfg = phy->ue_db.get_config(rnti, cc_idx).ul_cfg; // Check if user needs to receive PUCCH - if (phy->ue_db.fill_uci_cfg(tti_rx, cc_idx, rnti, false, ul_cfg.pucch.uci_cfg)) { + if (phy->ue_db.fill_uci_cfg(tti_rx, cc_idx, rnti, false, false, ul_cfg.pucch.uci_cfg)) { // Decode PUCCH if (srslte_enb_ul_get_pucch(&enb_ul, &ul_sf, &ul_cfg.pucch, &pucch_res)) { ERROR("Error getting PUCCH\n"); diff --git a/srsenb/src/phy/phy_ue_db.cc b/srsenb/src/phy/phy_ue_db.cc index f60bfe4c3..322404a50 100644 --- a/srsenb/src/phy/phy_ue_db.cc +++ b/srsenb/src/phy/phy_ue_db.cc @@ -81,7 +81,7 @@ inline void phy_ue_db::_clear_tti_pending_rnti(uint32_t tti, uint16_t rnti) } } - // Copy essentials + // Copy essentials. It is assumed the PUCCH parameters are the same for all carriers pdsch_ack.transmission_mode = ue.cell_info[0].phy_cfg.dl_cfg.tm; pdsch_ack.nof_cc = nof_active_cc; pdsch_ack.ack_nack_feedback_mode = ue.cell_info[0].phy_cfg.ul_cfg.pucch.ack_nack_feedback_mode; @@ -373,6 +373,7 @@ bool phy_ue_db::fill_uci_cfg(uint32_t tti, uint32_t enb_cc_idx, uint16_t rnti, bool aperiodic_cqi_request, + bool is_pusch_available, srslte_uci_cfg_t& uci_cfg) const { std::lock_guard lock(mutex); @@ -430,7 +431,9 @@ bool phy_ue_db::fill_uci_cfg(uint32_t tti, srslte_dl_sf_cfg_t dl_sf_cfg = {}; dl_sf_cfg.tti = tti; const srslte_cell_t& cell = cell_cfg_list->at(ue.cell_info[0].enb_cc_idx).cell; - srslte_enb_dl_gen_ack(&cell, &dl_sf_cfg, &ue.pdsch_ack[TTIMOD(tti)], &uci_cfg); + srslte_pdsch_ack_t ack_info = ue.pdsch_ack[TTIMOD(tti)]; + ack_info.is_pusch_available = is_pusch_available; + srslte_enb_dl_gen_ack(&cell, &dl_sf_cfg, &ack_info, &uci_cfg); uci_required |= (srslte_uci_cfg_total_ack(&uci_cfg) > 0); // Return whether UCI needs to be decoded diff --git a/srsenb/src/stack/mac/mac.cc b/srsenb/src/stack/mac/mac.cc index 0df536053..0bb7108e0 100644 --- a/srsenb/src/stack/mac/mac.cc +++ b/srsenb/src/stack/mac/mac.cc @@ -770,12 +770,12 @@ uint8_t* mac::assemble_si(const uint8_t enb_cc_idx, const uint32_t sib_index) int mac::get_ul_sched(uint32_t tti, ul_sched_list_t& ul_sched_res_list) { - log_h->step(tti); - if (!started) { return SRSLTE_SUCCESS; } + log_h->step(tti); + for (uint32_t enb_cc_idx = 0; enb_cc_idx < cell_config.size(); enb_cc_idx++) { ul_sched_t* phy_ul_sched_res = &ul_sched_res_list[enb_cc_idx]; diff --git a/srsenb/src/stack/rrc/rrc.cc b/srsenb/src/stack/rrc/rrc.cc index e15c005a0..ff264032a 100644 --- a/srsenb/src/stack/rrc/rrc.cc +++ b/srsenb/src/stack/rrc/rrc.cc @@ -1069,7 +1069,7 @@ rrc::ue::ue(rrc* outer_rrc, uint16_t rnti_, const sched_interface::ue_cfg_t& sch parent->rrc_log->warning("No PCell set. Picking eNBccIdx=0 as PCell\n"); current_sched_ue_cfg.supported_cc_list.resize(0); current_sched_ue_cfg.supported_cc_list[0].active = true; - current_sched_ue_cfg.supported_cc_list[0].enb_cc_idx = 0; + current_sched_ue_cfg.supported_cc_list[0].enb_cc_idx = UE_PCELL_CC_IDX; } activity_timer = outer_rrc->timers->get_unique_timer(); @@ -1129,8 +1129,8 @@ void rrc::ue::set_activity_timeout(const activity_timeout_type_t type) switch (type) { case MSG3_RX_TIMEOUT: deadline_s = 0; - deadline_ms = - static_cast((get_ue_cc_cfg(0)->sib2.rr_cfg_common.rach_cfg_common.max_harq_msg3_tx + 1) * 16); + deadline_ms = static_cast( + (get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.rach_cfg_common.max_harq_msg3_tx + 1) * 16); break; case UE_RESPONSE_RX_TIMEOUT: // Arbitrarily chosen value to complete each UE config step, i.e. security, bearer setup, etc. @@ -1642,7 +1642,7 @@ void rrc::ue::send_connection_setup(bool is_setup) phy_cfg->cqi_report_cfg.cqi_report_periodic.set_setup(); phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_format_ind_periodic.set( cqi_report_periodic_c::setup_s_::cqi_format_ind_periodic_c_::types::wideband_cqi); - phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().simul_ack_nack_and_cqi = false; + phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().simul_ack_nack_and_cqi = parent->cfg.cqi_cfg.simultaneousAckCQI; if (is_setup) { if (cqi_allocate(parent->cfg.cqi_cfg.period, &phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pmi_cfg_idx, @@ -1651,8 +1651,9 @@ void rrc::ue::send_connection_setup(bool is_setup) return; } } else { - phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pucch_res_idx = (uint16_t)cqi_pucch; - phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pmi_cfg_idx = (uint16_t)cqi_idx; + cqi_get(&phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pucch_res_idx, + &phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pmi_cfg_idx, + UE_PCELL_CC_IDX); } } phy_cfg->cqi_report_cfg.nom_pdsch_rs_epre_offset = 0; @@ -1666,15 +1667,14 @@ void rrc::ue::send_connection_setup(bool is_setup) current_sched_ue_cfg.aperiodic_cqi_period = parent->cfg.cqi_cfg.period; current_sched_ue_cfg.dl_cfg.cqi_report.aperiodic_configured = true; } else { - current_sched_ue_cfg.dl_cfg.cqi_report.pmi_idx = cqi_idx; + cqi_get(¤t_sched_ue_cfg.dl_cfg.cqi_report.pmi_idx, ¤t_sched_ue_cfg.pucch_cfg.n_pucch, UE_PCELL_CC_IDX); current_sched_ue_cfg.dl_cfg.cqi_report.periodic_configured = true; } current_sched_ue_cfg.dl_cfg.tm = SRSLTE_TM1; current_sched_ue_cfg.pucch_cfg.I_sr = sr_I; current_sched_ue_cfg.pucch_cfg.n_pucch_sr = sr_N_pucch; current_sched_ue_cfg.pucch_cfg.sr_configured = true; - current_sched_ue_cfg.pucch_cfg.n_pucch = cqi_pucch; - const sib_type2_s& sib2 = get_ue_cc_cfg(0)->sib2; + const sib_type2_s& sib2 = get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2; current_sched_ue_cfg.pucch_cfg.delta_pucch_shift = sib2.rr_cfg_common.pucch_cfg_common.delta_pucch_shift.to_number(); current_sched_ue_cfg.pucch_cfg.N_cs = sib2.rr_cfg_common.pucch_cfg_common.ncs_an; current_sched_ue_cfg.pucch_cfg.n_rb_2 = sib2.rr_cfg_common.pucch_cfg_common.nrb_cqi; @@ -1785,7 +1785,8 @@ void rrc::ue::send_connection_reconf_upd(srslte::unique_byte_buffer_t pdu) phy_cfg->cqi_report_cfg.cqi_report_periodic.set_setup().cqi_format_ind_periodic.set( cqi_report_periodic_c::setup_s_::cqi_format_ind_periodic_c_::types::wideband_cqi); cqi_get(&phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pmi_cfg_idx, - &phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pucch_res_idx); + &phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().cqi_pucch_res_idx, + UE_PCELL_CC_IDX); phy_cfg->cqi_report_cfg.cqi_report_periodic.setup().simul_ack_nack_and_cqi = parent->cfg.cqi_cfg.simultaneousAckCQI; if (parent->cfg.antenna_info.tx_mode == ant_info_ded_s::tx_mode_e_::tm3 || parent->cfg.antenna_info.tx_mode == ant_info_ded_s::tx_mode_e_::tm4) { @@ -1840,7 +1841,7 @@ void rrc::ue::send_connection_reconf(srslte::unique_byte_buffer_t pdu) } else { phy_cfg->cqi_report_cfg.cqi_report_periodic_present = true; auto& cqi_rep = phy_cfg->cqi_report_cfg.cqi_report_periodic.set_setup(); - cqi_get(&cqi_rep.cqi_pmi_cfg_idx, &cqi_rep.cqi_pucch_res_idx); + cqi_get(&cqi_rep.cqi_pmi_cfg_idx, &cqi_rep.cqi_pucch_res_idx, UE_PCELL_CC_IDX); cqi_rep.cqi_format_ind_periodic.set( cqi_report_periodic_c::setup_s_::cqi_format_ind_periodic_c_::types::wideband_cqi); cqi_rep.simul_ack_nack_and_cqi = parent->cfg.cqi_cfg.simultaneousAckCQI; @@ -1967,7 +1968,7 @@ void rrc::ue::fill_scell_to_addmod_list(asn1::rrc::rrc_conn_recfg_r8_ies_s* conn // Add all SCells configured for the current PCell uint32_t scell_idx = 1; // SCell start with 1, zero reserved for PCell - const cell_ctxt_t* pcell_cfg = get_ue_cc_cfg(0); + const cell_ctxt_t* pcell_cfg = get_ue_cc_cfg(UE_PCELL_CC_IDX); for (auto& scell : pcell_cfg->cell_cfg.scell_list) { // get corresponding eNB cell context for this scell const cell_ctxt_t* cc_cfg = parent->find_cell_ctxt(scell.cell_id); @@ -2033,11 +2034,13 @@ void rrc::ue::fill_scell_to_addmod_list(asn1::rrc::rrc_conn_recfg_r8_ies_s* conn ul_cfg_ded.cqi_report_cfg_scell_r10_present = true; ul_cfg_ded.cqi_report_cfg_scell_r10.nom_pdsch_rs_epre_offset_r10 = 0; ul_cfg_ded.cqi_report_cfg_scell_r10.cqi_report_periodic_scell_r10_present = true; + + // Get CQI allocation for secondary cell auto& cqi_setup = ul_cfg_ded.cqi_report_cfg_scell_r10.cqi_report_periodic_scell_r10.set_setup(); - cqi_setup.cqi_pucch_res_idx_r10 = 0; - cqi_setup.cqi_pmi_cfg_idx = cqi_idx + scell_idx; // Take next PMI idx starting from PCell + cqi_get(&cqi_setup.cqi_pmi_cfg_idx, &cqi_setup.cqi_pucch_res_idx_r10, scell_idx); + cqi_setup.cqi_format_ind_periodic_r10.set_wideband_cqi_r10(); - cqi_setup.simul_ack_nack_and_cqi = false; + cqi_setup.simul_ack_nack_and_cqi = parent->cfg.cqi_cfg.simultaneousAckCQI; #if SRS_ENABLED ul_cfg_ded.srs_ul_cfg_ded_r10_present = true; auto& srs_setup = ul_cfg_ded.srs_ul_cfg_ded_r10.set_setup(); @@ -2448,7 +2451,8 @@ void rrc::ue::sr_get(uint8_t* I_sr, uint16_t* N_pucch_sr) int rrc::ue::sr_allocate(uint32_t period, uint8_t* I_sr, uint16_t* N_pucch_sr) { uint32_t c = SRSLTE_CP_ISNORM(parent->cfg.cell.cp) ? 3 : 2; - uint32_t delta_pucch_shift = get_ue_cc_cfg(0)->sib2.rr_cfg_common.pucch_cfg_common.delta_pucch_shift.to_number(); + uint32_t delta_pucch_shift = + get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.pucch_cfg_common.delta_pucch_shift.to_number(); uint32_t max_users = 12 * c / delta_pucch_shift; @@ -2485,8 +2489,8 @@ int rrc::ue::sr_allocate(uint32_t period, uint8_t* I_sr, uint16_t* N_pucch_sr) // Compute N_pucch_sr *N_pucch_sr = i_min * max_users + parent->sr_sched.nof_users[i_min][j_min]; - if (get_ue_cc_cfg(0)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an) { - *N_pucch_sr += get_ue_cc_cfg(0)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an; + if (get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an) { + *N_pucch_sr += get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an; } // Allocate user @@ -2521,84 +2525,93 @@ int rrc::ue::cqi_free() return 0; } -void rrc::ue::cqi_get(uint16_t* pmi_idx, uint16_t* n_pucch) +void rrc::ue::cqi_get(uint16_t* pmi_idx, uint16_t* n_pucch, uint32_t ue_cc_idx) { - *pmi_idx = cqi_idx; - *n_pucch = cqi_pucch; + if (cqi_res.count(ue_cc_idx)) { + *pmi_idx = cqi_res[ue_cc_idx].idx; + *n_pucch = cqi_res[ue_cc_idx].pucch_res; + } else { + parent->rrc_log->error("CQI resources for ue_cc_idx=%d have not been allocated\n", ue_cc_idx); + } } int rrc::ue::cqi_allocate(uint32_t period, uint16_t* pmi_idx, uint16_t* n_pucch) { uint32_t c = SRSLTE_CP_ISNORM(parent->cfg.cell.cp) ? 3 : 2; - uint32_t delta_pucch_shift = get_ue_cc_cfg(0)->sib2.rr_cfg_common.pucch_cfg_common.delta_pucch_shift.to_number(); + uint32_t delta_pucch_shift = + get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.pucch_cfg_common.delta_pucch_shift.to_number(); uint32_t max_users = 12 * c / delta_pucch_shift; - // Find freq-time resources with least number of users - int i_min = 0, j_min = 0; - uint32_t min_users = std::numeric_limits::max(); - for (uint32_t i = 0; i < parent->cfg.cqi_cfg.nof_prb; i++) { - for (uint32_t j = 0; j < parent->cfg.cqi_cfg.nof_subframes; j++) { - if (parent->cqi_sched.nof_users[i][j] < min_users) { - i_min = i; - j_min = j; - min_users = parent->cqi_sched.nof_users[i][j]; + // Allocate all CQI resources for all carriers now + for (uint32_t cc_idx = 0; cc_idx < get_ue_cc_cfg(UE_PCELL_CC_IDX)->cell_cfg.scell_list.size(); cc_idx++) { + // Find freq-time resources with least number of users + int i_min = 0, j_min = 0; + uint32_t min_users = std::numeric_limits::max(); + for (uint32_t i = 0; i < parent->cfg.cqi_cfg.nof_prb; i++) { + for (uint32_t j = 0; j < parent->cfg.cqi_cfg.nof_subframes; j++) { + if (parent->cqi_sched.nof_users[i][j] < min_users) { + i_min = i; + j_min = j; + min_users = parent->cqi_sched.nof_users[i][j]; + } } } - } - if (parent->cqi_sched.nof_users[i_min][j_min] > max_users) { - parent->rrc_log->error("Not enough PUCCH resources to allocate Scheduling Request\n"); - return -1; - } + if (parent->cqi_sched.nof_users[i_min][j_min] > max_users) { + parent->rrc_log->error("Not enough PUCCH resources to allocate Scheduling Request\n"); + return -1; + } - // Compute I_sr - if (period != 2 && period != 5 && period != 10 && period != 20 && period != 40 && period != 80 && period != 160 && - period != 32 && period != 64 && period != 128) { - parent->rrc_log->error("Invalid CQI Report period %d ms\n", period); - return -1; - } - if (parent->cfg.cqi_cfg.sf_mapping[j_min] < period) { - if (period != 32 && period != 64 && period != 128) { - if (period > 2) { - *pmi_idx = period - 3 + parent->cfg.cqi_cfg.sf_mapping[j_min]; + // Compute I_sr + if (period != 2 && period != 5 && period != 10 && period != 20 && period != 40 && period != 80 && period != 160 && + period != 32 && period != 64 && period != 128) { + parent->rrc_log->error("Invalid CQI Report period %d ms\n", period); + return -1; + } + if (parent->cfg.cqi_cfg.sf_mapping[j_min] < period) { + if (period != 32 && period != 64 && period != 128) { + if (period > 2) { + *pmi_idx = period - 3 + parent->cfg.cqi_cfg.sf_mapping[j_min]; + } else { + *pmi_idx = parent->cfg.cqi_cfg.sf_mapping[j_min]; + } } else { - *pmi_idx = parent->cfg.cqi_cfg.sf_mapping[j_min]; + if (period == 32) { + *pmi_idx = 318 + parent->cfg.cqi_cfg.sf_mapping[j_min]; + } else if (period == 64) { + *pmi_idx = 350 + parent->cfg.cqi_cfg.sf_mapping[j_min]; + } else { + *pmi_idx = 414 + parent->cfg.cqi_cfg.sf_mapping[j_min]; + } } } else { - if (period == 32) { - *pmi_idx = 318 + parent->cfg.cqi_cfg.sf_mapping[j_min]; - } else if (period == 64) { - *pmi_idx = 350 + parent->cfg.cqi_cfg.sf_mapping[j_min]; - } else { - *pmi_idx = 414 + parent->cfg.cqi_cfg.sf_mapping[j_min]; - } + parent->rrc_log->error( + "Allocating CQI: invalid sf_idx=%d for period=%d\n", parent->cfg.cqi_cfg.sf_mapping[j_min], period); + return -1; } - } else { - parent->rrc_log->error( - "Allocating SR: invalid sf_idx=%d for period=%d\n", parent->cfg.cqi_cfg.sf_mapping[j_min], period); - return -1; - } - // Compute n_pucch_2 - *n_pucch = i_min * max_users + parent->cqi_sched.nof_users[i_min][j_min]; - if (get_ue_cc_cfg(0)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an) { - *n_pucch += get_ue_cc_cfg(0)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an; - } + // Compute n_pucch_2 + *n_pucch = i_min * max_users + parent->cqi_sched.nof_users[i_min][j_min]; + if (get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an) { + *n_pucch += get_ue_cc_cfg(UE_PCELL_CC_IDX)->sib2.rr_cfg_common.pucch_cfg_common.ncs_an; + } + // Allocate user + parent->cqi_sched.nof_users[i_min][j_min]++; + cqi_sched_prb_idx = i_min; + cqi_sched_sf_idx = j_min; + cqi_allocated = true; + cqi_res[cc_idx].idx = *pmi_idx; + cqi_res[cc_idx].pucch_res = *n_pucch; - // Allocate user - parent->cqi_sched.nof_users[i_min][j_min]++; - cqi_sched_prb_idx = i_min; - cqi_sched_sf_idx = j_min; - cqi_allocated = true; - cqi_idx = *pmi_idx; - cqi_pucch = *n_pucch; - - parent->rrc_log->info("Allocated CQI resources for time-frequency slot (%d, %d), n_pucch_2=%d, pmi_cfg_idx=%d\n", - cqi_sched_prb_idx, - cqi_sched_sf_idx, - *n_pucch, - *pmi_idx); + parent->rrc_log->info( + "Allocated CQI resources for cc_idx=%d, time-frequency slot (%d, %d), n_pucch_2=%d, pmi_cfg_idx=%d\n", + cc_idx, + cqi_sched_prb_idx, + cqi_sched_sf_idx, + *n_pucch, + *pmi_idx); + } return 0; } diff --git a/srsenb/test/phy/enb_phy_test.cc b/srsenb/test/phy/enb_phy_test.cc index 69b3ed63c..fefec084e 100644 --- a/srsenb/test/phy/enb_phy_test.cc +++ b/srsenb/test/phy/enb_phy_test.cc @@ -404,7 +404,7 @@ public: notify_sr_detected(); - log_h.info("Received SR tti=%d; rnti=x%x\n", tti, rnti); + log_h.info("Received SR tti=%d; rnti=0x%x\n", tti, rnti); return SRSLTE_SUCCESS; } @@ -432,7 +432,7 @@ public: notify_cqi_info(); - log_h.info("Received CQI tti=%d; rnti=x%x; cc_idx=%d; cqi=%d;\n", tti, rnti, cc_idx, cqi_value); + log_h.info("Received CQI tti=%d; rnti=0x%x; cc_idx=%d; cqi=%d;\n", tti, rnti, cc_idx, cqi_value); return SRSLTE_SUCCESS; } @@ -443,7 +443,7 @@ public: } int ta_info(uint32_t tti, uint16_t rnti, float ta_us) override { - log_h.info("Received TA INFO tti=%d; rnti=x%x; ta=%.1f us\n", tti, rnti, ta_us); + log_h.info("Received TA INFO tti=%d; rnti=0x%x; ta=%.1f us\n", tti, rnti, ta_us); notify_ta_info(); return 0; } @@ -457,7 +457,7 @@ public: tti_dl_info.ack = ack; tti_dl_info_ack_queue.push(tti_dl_info); - log_h.info("Received DL ACK tti=%d; rnti=x%x; cc=%d; tb=%d; ack=%d;\n", tti, rnti, cc_idx, tb_idx, ack); + log_h.info("Received DL ACK tti=%d; rnti=0x%x; cc=%d; tb=%d; ack=%d;\n", tti, rnti, cc_idx, tb_idx, ack); notify_ack_info(); return 0; } @@ -470,7 +470,7 @@ public: tti_ul_info.crc = crc_res; tti_ul_info_ack_queue.push(tti_ul_info); - log_h.info("Received UL ACK tti=%d; rnti=x%x; cc=%d; ack=%d;\n", tti, rnti, cc_idx, crc_res); + log_h.info("Received UL ACK tti=%d; rnti=0x%x; cc=%d; ack=%d;\n", tti, rnti, cc_idx, crc_res); notify_crc_info(); return 0; @@ -592,7 +592,7 @@ public: ul_sched.pusch[0].dci.type2_alloc.n_gap = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NG1; ul_sched.pusch[0].dci.type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; ul_sched.pusch[0].dci.freq_hop_fl = srslte_dci_ul_t::SRSLTE_RA_PUSCH_HOP_DISABLED; - ul_sched.pusch[0].dci.tb.mcs_idx = 24; + ul_sched.pusch[0].dci.tb.mcs_idx = 20; // Can't set it too high for grants with CQI and long ACK/NACK ul_sched.pusch[0].dci.tb.rv = 0; ul_sched.pusch[0].dci.tb.ndi = false; ul_sched.pusch[0].dci.tb.cw_idx = 0; @@ -935,6 +935,7 @@ public: ue_ul_cfg.ul_cfg.pusch.softbuffers.tx = &softbuffer_tx; ue_ul_cfg.grant_available = true; + pdsch_ack.is_pusch_available = true; } // Generate