From 881e278a106249b217f117feb2f5ea29d1f8008f Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Sat, 7 Jul 2018 12:17:55 +0200 Subject: [PATCH 1/5] Add rwlock to sched class --- srsenb/hdr/mac/scheduler.h | 6 +- srsenb/src/enb.cc | 14 +++++ srsenb/src/mac/scheduler.cc | 112 ++++++++++++++++++++++++++++-------- 3 files changed, 106 insertions(+), 26 deletions(-) diff --git a/srsenb/hdr/mac/scheduler.h b/srsenb/hdr/mac/scheduler.h index 0c69bcbef..519343097 100644 --- a/srsenb/hdr/mac/scheduler.h +++ b/srsenb/hdr/mac/scheduler.h @@ -151,8 +151,10 @@ private: metric_ul *ul_metric; srslte::log *log_h; rrc_interface_mac *rrc; - - cell_cfg_t cfg; + + pthread_rwlock_t rwlock; + + cell_cfg_t cfg; sched_args_t sched_cfg; const static int MAX_PRB = 100; diff --git a/srsenb/src/enb.cc b/srsenb/src/enb.cc index f1e237568..c5660e0f0 100644 --- a/srsenb/src/enb.cc +++ b/srsenb/src/enb.cc @@ -205,6 +205,20 @@ bool enb::init(all_args_t *args_) fprintf(stderr, "Error parsing DRB configuration\n"); return false; } + + uint32_t prach_freq_offset = rrc_cfg.sibs[1].sib.sib2.rr_config_common_sib.prach_cnfg.prach_cnfg_info.prach_freq_offset; + + if (prach_freq_offset + 6 > cell_cfg.nof_prb) { + fprintf(stderr, "Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", prach_freq_offset); + return false; + } + + if (prach_freq_offset < rrc_cfg.cqi_cfg.nof_prb || prach_freq_offset < rrc_cfg.sr_cfg.nof_prb ) { + fprintf(stderr, "Invalid PRACH configuration: frequency offset=%d lower than CQI offset: %d or SR offset: %d\n", + prach_freq_offset, rrc_cfg.cqi_cfg.nof_prb, rrc_cfg.sr_cfg.nof_prb); + return false; + } + rrc_cfg.inactivity_timeout_ms = args->expert.rrc_inactivity_timer; rrc_cfg.enable_mbsfn = args->expert.enable_mbsfn; diff --git a/srsenb/src/mac/scheduler.cc b/srsenb/src/mac/scheduler.cc index 4bebfb3d9..8737d9168 100644 --- a/srsenb/src/mac/scheduler.cc +++ b/srsenb/src/mac/scheduler.cc @@ -62,11 +62,16 @@ sched::sched() : bc_aggr_level(0), rar_aggr_level(0), avail_rbg(0), P(0), start_ bzero(rar_locations[i], sizeof(sched_ue::sched_dci_cce_t) * 10); } reset(); + + pthread_rwlock_init(&rwlock, NULL); } sched::~sched() { srslte_regs_free(®s); + pthread_rwlock_wrlock(&rwlock); + pthread_rwlock_unlock(&rwlock); + pthread_rwlock_destroy(&rwlock); } void sched::init(rrc_interface_mac *rrc_, srslte::log* log) @@ -86,9 +91,11 @@ int sched::reset() bzero(pending_msg3, sizeof(pending_msg3_t)*10); bzero(pending_rar, sizeof(sched_rar_t)*SCHED_MAX_PENDING_RAR); bzero(pending_sibs, sizeof(sched_sib_t)*MAX_SIBS); + configured = false; + pthread_rwlock_wrlock(&rwlock); ue_db.clear(); - configured = false; - return 0; + pthread_rwlock_unlock(&rwlock); + return 0; } void sched::set_sched_cfg(sched_interface::sched_args_t* sched_cfg_) @@ -152,9 +159,11 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t *ue_cfg) { // Add or config user - ue_db[rnti].set_cfg(rnti, ue_cfg, &cfg, ®s, log_h); + pthread_rwlock_rdlock(&rwlock); + ue_db[rnti].set_cfg(rnti, ue_cfg, &cfg, ®s, log_h); ue_db[rnti].set_max_mcs(sched_cfg.pusch_max_mcs, sched_cfg.pdsch_max_mcs); ue_db[rnti].set_fixed_mcs(sched_cfg.pusch_mcs, sched_cfg.pdsch_mcs); + pthread_rwlock_unlock(&rwlock); return 0; } @@ -162,167 +171,198 @@ int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t *ue_cfg) int sched::ue_rem(uint16_t rnti) { int ret = 0; + pthread_rwlock_wrlock(&rwlock); if (ue_db.count(rnti)) { ue_db.erase(rnti); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } bool sched::ue_exists(uint16_t rnti) { - return (ue_db.count(rnti) == 1); + pthread_rwlock_rdlock(&rwlock); + bool ret = (ue_db.count(rnti) == 1); + pthread_rwlock_unlock(&rwlock); + return ret; } void sched::phy_config_enabled(uint16_t rnti, bool enabled) { + pthread_rwlock_rdlock(&rwlock); if (ue_db.count(rnti)) { ue_db[rnti].phy_config_enabled(current_tti, enabled); } else { Error("User rnti=0x%x not found\n", rnti); } + pthread_rwlock_unlock(&rwlock); } int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t *cfg) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].set_bearer_cfg(lc_id, cfg); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].rem_bearer(lc_id); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } uint32_t sched::get_dl_buffer(uint16_t rnti) { uint32_t ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ret = ue_db[rnti].get_pending_dl_new_data(current_tti); } else { Error("User rnti=0x%x not found\n", rnti); } + pthread_rwlock_unlock(&rwlock); return ret; } uint32_t sched::get_ul_buffer(uint16_t rnti) { uint32_t ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ret = ue_db[rnti].get_pending_ul_new_data(current_tti); } else { Error("User rnti=0x%x not found\n", rnti); } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue) { int ret = 0; + pthread_rwlock_rdlock(&rwlock); if (ue_db.count(rnti)) { ue_db[rnti].dl_buffer_state(lc_id, tx_queue, retx_queue); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].mac_buffer_state(ce_code); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *dl_ant_info) { int ret = 0; + pthread_rwlock_rdlock(&rwlock); if (ue_db.count(rnti)) { ue_db[rnti].set_dl_ant_info(dl_ant_info); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ret = ue_db[rnti].set_ack_info(tti, tb_idx, ack); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].set_ul_crc(tti, crc); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].set_dl_ri(tti, cqi_value); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) { int ret = 0; + pthread_rwlock_rdlock(&rwlock); if (ue_db.count(rnti)) { ue_db[rnti].set_dl_pmi(tti, pmi_value); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) { int ret = 0; + pthread_rwlock_rdlock(&rwlock); if (ue_db.count(rnti)) { ue_db[rnti].set_dl_cqi(tti, cqi_value); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } @@ -345,79 +385,93 @@ int sched::dl_rach_info(uint32_t tti, uint32_t ra_id, uint16_t rnti, uint32_t es int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].set_ul_cqi(tti, cqi, ul_ch_code); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].ul_buffer_state(lcid, bsr, set_value); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].ul_recv_len(lcid, len); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::ul_phr(uint16_t rnti, int phr) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].ul_phr(phr); } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } int sched::ul_sr_info(uint32_t tti, uint16_t rnti) { int ret = 0; - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].set_sr();; } else { Error("User rnti=0x%x not found\n", rnti); ret = -1; } + pthread_rwlock_unlock(&rwlock); return ret; } void sched::tpc_inc(uint16_t rnti) { - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].tpc_inc(); } else { Error("User rnti=0x%x not found\n", rnti); } + pthread_rwlock_unlock(&rwlock); } void sched::tpc_dec(uint16_t rnti) { - if (ue_db.count(rnti)) { + pthread_rwlock_rdlock(&rwlock); + if (ue_db.count(rnti)) { ue_db[rnti].tpc_dec(); } else { Error("User rnti=0x%x not found\n", rnti); } + pthread_rwlock_unlock(&rwlock); } /******************************************************* @@ -714,6 +768,8 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) rar_aggr_level = 2; bzero(sched_result, sizeof(sched_interface::dl_sched_res_t)); + pthread_rwlock_rdlock(&rwlock); + /* Schedule Broadcast data */ sched_result->nof_bc_elems += dl_sched_bc(sched_result->bc); @@ -722,7 +778,9 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) /* Schedule pending RLC data */ sched_result->nof_data_elems += dl_sched_data(sched_result->data); - + + pthread_rwlock_unlock(&rwlock); + /* Set CFI */ sched_result->cfi = current_cfi; @@ -733,10 +791,16 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched_result) { typedef std::map::iterator it_t; + if (!configured) { return 0; } + if (cfg.prach_freq_offset + 6 > cfg.cell.nof_prb) { + fprintf(stderr, "Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset); + return -1; + } + /* If dl_sched() not yet called this tti (this tti is +4ms advanced), reset CCE state */ if (TTI_TX(current_tti) != tti) { bzero(used_cce, MAX_CCE*sizeof(bool)); @@ -757,6 +821,8 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched bzero(sched_result, sizeof(sched_interface::ul_sched_res_t)); ul_metric->reset_allocation(cfg.cell.nof_prb); + pthread_rwlock_rdlock(&rwlock); + // Get HARQ process for this TTI for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { sched_ue *user = (sched_ue*) &iter->second; @@ -819,10 +885,6 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched ul_harq_proc::ul_alloc_t prach = {cfg.prach_freq_offset, 6}; if(!ul_metric->update_allocation(prach)) { log_h->warning("SCHED: Failed to allocate PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L); - if (prach.RB_start + prach.L > cfg.cell.nof_prb) { - fprintf(stderr, "Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset); - return -1; - } } else { log_h->debug("SCHED: Allocated PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L); @@ -931,6 +993,8 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched user->get_ul_harq(current_tti)->reset_pending_data(); } + pthread_rwlock_unlock(&rwlock); + sched_result->nof_dci_elems = nof_dci_elems; sched_result->nof_phich_elems = nof_phich_elems; From 1e61dbceff054407a49b63a04725bc8ed42a4369 Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Sat, 7 Jul 2018 12:43:50 +0200 Subject: [PATCH 2/5] Set initial AGC gain correctly --- lib/src/phy/rf/rf_uhd_imp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/phy/rf/rf_uhd_imp.c b/lib/src/phy/rf/rf_uhd_imp.c index dc1bf6415..7fe291497 100644 --- a/lib/src/phy/rf/rf_uhd_imp.c +++ b/lib/src/phy/rf/rf_uhd_imp.c @@ -583,7 +583,7 @@ int rf_uhd_open_multi(char *args, void **h, uint32_t nof_channels) uhd_tx_metadata_make(&handler->tx_md, false, 0, 0, false, false); // Set starting gain to half maximum in case of using AGC - rf_uhd_set_rx_gain(handler, handler->info.max_tx_gain*0.7); + rf_uhd_set_rx_gain(handler, handler->info.max_rx_gain*0.7); #if HAVE_ASYNC_THREAD if (start_async_thread) { From 005fe87ae98952ed9e8eaeeb3e1e01351cc0f47d Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Mon, 9 Jul 2018 00:26:58 +0200 Subject: [PATCH 3/5] RLC reestablish to re-enable tx_enabled --- lib/include/srslte/upper/rlc_common.h | 1 + lib/include/srslte/upper/rlc_tm.h | 1 + lib/include/srslte/upper/rlc_um.h | 1 + lib/src/upper/rlc.cc | 6 +----- lib/src/upper/rlc_am.cc | 6 ++++++ lib/src/upper/rlc_entity.cc | 2 +- lib/src/upper/rlc_tm.cc | 6 ++++++ lib/src/upper/rlc_um.cc | 6 ++++++ 8 files changed, 23 insertions(+), 6 deletions(-) diff --git a/lib/include/srslte/upper/rlc_common.h b/lib/include/srslte/upper/rlc_common.h index a5b2e0403..c9655efae 100644 --- a/lib/include/srslte/upper/rlc_common.h +++ b/lib/include/srslte/upper/rlc_common.h @@ -163,6 +163,7 @@ public: srslte::mac_interface_timers *mac_timers_) = 0; virtual void configure(srslte_rlc_config_t cnfg) = 0; virtual void stop() = 0; + virtual void reestablish() = 0; virtual void empty_queue() = 0; virtual rlc_mode_t get_mode() = 0; diff --git a/lib/include/srslte/upper/rlc_tm.h b/lib/include/srslte/upper/rlc_tm.h index 774011292..d78ab59c4 100644 --- a/lib/include/srslte/upper/rlc_tm.h +++ b/lib/include/srslte/upper/rlc_tm.h @@ -49,6 +49,7 @@ public: mac_interface_timers *mac_timers); void configure(srslte_rlc_config_t cnfg); void stop(); + void reestablish(); void empty_queue(); rlc_mode_t get_mode(); diff --git a/lib/include/srslte/upper/rlc_um.h b/lib/include/srslte/upper/rlc_um.h index 864edaded..f099f60cf 100644 --- a/lib/include/srslte/upper/rlc_um.h +++ b/lib/include/srslte/upper/rlc_um.h @@ -57,6 +57,7 @@ public: srsue::rrc_interface_rlc *rrc_, mac_interface_timers *mac_timers_); void configure(srslte_rlc_config_t cnfg); + void reestablish(); void stop(); void empty_queue(); bool is_mrb(); diff --git a/lib/src/upper/rlc.cc b/lib/src/upper/rlc.cc index 7fa514e08..c24f0076b 100644 --- a/lib/src/upper/rlc.cc +++ b/lib/src/upper/rlc.cc @@ -130,11 +130,7 @@ void rlc::reestablish() { // defaul lcid=0 is created void rlc::reset() { - for(uint32_t i=0; ideallocate(buf); } + tx_sdu_queue.reset(); +} + +void rlc_am::reestablish() { + stop(); + tx_enabled = true; } void rlc_am::stop() diff --git a/lib/src/upper/rlc_entity.cc b/lib/src/upper/rlc_entity.cc index 4d0573043..783a14982 100644 --- a/lib/src/upper/rlc_entity.cc +++ b/lib/src/upper/rlc_entity.cc @@ -87,7 +87,7 @@ void rlc_entity::configure(srslte_rlc_config_t cnfg) // Reestablishment stops the entity but does not destroy it. Mode will not change void rlc_entity::reestablish() { - rlc->stop(); + rlc->reestablish(); } // A call to stop() stops the entity and clears deletes the instance. Next time this entity can be used for other mode. diff --git a/lib/src/upper/rlc_tm.cc b/lib/src/upper/rlc_tm.cc index c24c1ce29..988acf2af 100644 --- a/lib/src/upper/rlc_tm.cc +++ b/lib/src/upper/rlc_tm.cc @@ -68,6 +68,12 @@ void rlc_tm::empty_queue() while(ul_queue.try_read(&buf)) { pool->deallocate(buf); } + ul_queue.reset(); +} + +void rlc_tm::reestablish() { + stop(); + tx_enabled = true; } void rlc_tm::stop() diff --git a/lib/src/upper/rlc_um.cc b/lib/src/upper/rlc_um.cc index 3ba471724..77eab5270 100644 --- a/lib/src/upper/rlc_um.cc +++ b/lib/src/upper/rlc_um.cc @@ -122,6 +122,7 @@ void rlc_um::empty_queue() { while(tx_sdu_queue.try_read(&buf)) { pool->deallocate(buf); } + tx_sdu_queue.reset(); } bool rlc_um::is_mrb() @@ -129,6 +130,11 @@ bool rlc_um::is_mrb() return cfg.is_mrb; } +void rlc_um::reestablish() { + stop(); + tx_enabled = true; +} + void rlc_um::stop() { // Empty tx_sdu_queue before locking the mutex From 074db8478cb8288b55ea962918482e2b3c450d9e Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Tue, 10 Jul 2018 19:52:46 +0200 Subject: [PATCH 4/5] deallocate dedicatedInfoNAS in rrc --- lib/include/srslte/common/buffer_pool.h | 2 +- srsue/src/upper/nas.cc | 1 - srsue/src/upper/rrc.cc | 6 ++++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/include/srslte/common/buffer_pool.h b/lib/include/srslte/common/buffer_pool.h index 75c56fc0e..0a87c0df1 100644 --- a/lib/include/srslte/common/buffer_pool.h +++ b/lib/include/srslte/common/buffer_pool.h @@ -183,7 +183,7 @@ public: } b->reset(); if (!pool->deallocate(b)) { - fprintf(stderr, "Error deallocating PDU: Addr=0x%lx not found in pool\n", (uint64_t) b); + printf("Error deallocating PDU: Addr=0x%lx not found in pool\n", (uint64_t) b); } b = NULL; } diff --git a/srsue/src/upper/nas.cc b/srsue/src/upper/nas.cc index 2e491b733..6efcfc9a6 100644 --- a/srsue/src/upper/nas.cc +++ b/srsue/src/upper/nas.cc @@ -250,7 +250,6 @@ bool nas::rrc_connect() { } } else { nas_log->error("Could not establish RRC connection\n"); - pool->deallocate(dedicatedInfoNAS); } return false; } diff --git a/srsue/src/upper/rrc.cc b/srsue/src/upper/rrc.cc index 66a7de33e..aa6cb831a 100644 --- a/srsue/src/upper/rrc.cc +++ b/srsue/src/upper/rrc.cc @@ -526,6 +526,12 @@ bool rrc::connection_request(LIBLTE_RRC_CON_REQ_EST_CAUSE_ENUM cause, } } + if (!ret) { + rrc_log->warning("Could not estblish connection. Deallocating dedicatedInfoNAS PDU\n"); + pool->deallocate(this->dedicatedInfoNAS); + this->dedicatedInfoNAS = NULL; + } + pthread_mutex_unlock(&mutex); return ret; } From 9231bd0fe0266563afc57c0a3d773e692461ce91 Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Wed, 11 Jul 2018 14:49:39 +0200 Subject: [PATCH 5/5] Fix Aperiodic CQI retx when TBS=0 --- srsue/hdr/mac/ul_harq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srsue/hdr/mac/ul_harq.h b/srsue/hdr/mac/ul_harq.h index 6fb27be8a..d62a94aba 100644 --- a/srsue/hdr/mac/ul_harq.h +++ b/srsue/hdr/mac/ul_harq.h @@ -227,7 +227,7 @@ private: if (grant->has_cqi_request && grant->phy_grant.ul.mcs.tbs == 0) { /* Only CQI reporting (without SCH) */ memcpy(&action->phy_grant.ul, &grant->phy_grant.ul, sizeof(srslte_ra_ul_grant_t)); - memcpy(&cur_grant, grant, sizeof(Tgrant)); + //memcpy(&cur_grant, grant, sizeof(Tgrant)); action->tx_enabled = true; action->rnti = grant->rnti; } else if ((!(grant->rnti_type == SRSLTE_RNTI_TEMP) && grant->ndi[0] != get_ndi() && harq_feedback) ||