From a73cbcdc9d2c1ca32a1d596e55d82195d357f653 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Fri, 8 Jan 2021 14:59:32 +0000 Subject: [PATCH] added mem_pool for growing object pools. Applied the mem pool to the rrc::ue creation --- lib/include/srslte/adt/mem_pool.h | 163 ++++++++++++++++++++++++++++++ srsenb/hdr/stack/rrc/rrc.h | 9 +- srsenb/src/stack/rrc/rrc.cc | 3 +- srsenb/src/stack/rrc/rrc_ue.cc | 16 +-- 4 files changed, 179 insertions(+), 12 deletions(-) create mode 100644 lib/include/srslte/adt/mem_pool.h diff --git a/lib/include/srslte/adt/mem_pool.h b/lib/include/srslte/adt/mem_pool.h new file mode 100644 index 000000000..87066487e --- /dev/null +++ b/lib/include/srslte/adt/mem_pool.h @@ -0,0 +1,163 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2020 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#ifndef SRSLTE_MEM_POOL_H +#define SRSLTE_MEM_POOL_H + +namespace srslte { + +/// Stores provided mem blocks in a stack in an non-owning manner. Not thread-safe +class memblock_stack +{ +public: + memblock_stack() = default; + + memblock_stack(const memblock_stack&) = delete; + + memblock_stack(memblock_stack&& other) noexcept : head(other.head) { other.head = nullptr; } + + memblock_stack& operator=(const memblock_stack&) = delete; + + memblock_stack& operator=(memblock_stack&& other) noexcept + { + head = other.head; + other.head = nullptr; + return *this; + } + + void push(uint8_t* block) noexcept + { + // printf("head: %ld\n", (long)head); + node* next = ::new (block) node(head); + head = next; + } + + uint8_t* try_pop() noexcept + { + if (is_empty()) { + return nullptr; + } + node* last_head = head; + head = head->prev; + return (uint8_t*)last_head; + } + + bool is_empty() const { return head == nullptr; } + + void clear() { head = nullptr; } + +private: + struct node { + node* prev; + + explicit node(node* prev_) : prev(prev_) {} + }; + + node* head = nullptr; +}; + +/// memblock stack that mutexes pushing/popping +class mutexed_memblock_stack +{ +public: + mutexed_memblock_stack() = default; + + mutexed_memblock_stack(const mutexed_memblock_stack&) = delete; + + mutexed_memblock_stack(mutexed_memblock_stack&& other) noexcept + { + std::unique_lock lk1(other.mutex, std::defer_lock); + std::unique_lock lk2(mutex, std::defer_lock); + std::lock(lk1, lk2); + stack = std::move(other.stack); + } + + mutexed_memblock_stack& operator=(const mutexed_memblock_stack&) = delete; + + mutexed_memblock_stack& operator=(mutexed_memblock_stack&& other) noexcept + { + std::unique_lock lk1(other.mutex, std::defer_lock); + std::unique_lock lk2(mutex, std::defer_lock); + std::lock(lk1, lk2); + stack = std::move(other.stack); + return *this; + } + + void push(uint8_t* block) noexcept + { + // auto t = time_prof(push_telapsed); + std::lock_guard lock(mutex); + stack.push(block); + } + + uint8_t* try_pop() noexcept + { + // auto t = time_prof(pop_telapsed); + std::lock_guard lock(mutex); + uint8_t* block = stack.try_pop(); + return block; + } + + bool is_empty() const noexcept { return stack.is_empty(); } + + void clear() + { + std::lock_guard lock(mutex); + stack.clear(); + } + +private: + memblock_stack stack; + std::mutex mutex; +}; + +template +class single_thread_obj_pool +{ +public: + /// single-thread obj pool deleter + struct obj_deleter { + explicit obj_deleter(single_thread_obj_pool* pool_) : pool(pool_) {} + void operator()(void* block) { pool->stack.push(static_cast(block)); } + single_thread_obj_pool* pool; + }; + + using obj_ptr = std::unique_ptr; + + /// allocate object + template + obj_ptr make(Args&&... args) + { + uint8_t* block = stack.try_pop(); + if (block == nullptr) { + block = new uint8_t[sizeof(T)]; + } + new (block) T(std::forward(args)...); + return obj_ptr(reinterpret_cast(block), obj_deleter(this)); + } + + void reserve(size_t N) + { + for (size_t i = 0; i < N; ++i) { + stack.push(new uint8_t[sizeof(T)]); + } + } + +private: + memblock_stack stack; +}; +template +using unique_pool_obj = typename single_thread_obj_pool::obj_ptr; + +} // namespace srslte + +#endif // SRSLTE_MEM_POOL_H diff --git a/srsenb/hdr/stack/rrc/rrc.h b/srsenb/hdr/stack/rrc/rrc.h index 5c71a10bf..ed68daf77 100644 --- a/srsenb/hdr/stack/rrc/rrc.h +++ b/srsenb/hdr/stack/rrc/rrc.h @@ -17,6 +17,7 @@ #include "rrc_cell_cfg.h" #include "rrc_metrics.h" #include "srsenb/hdr/stack/upper/common_enb.h" +#include "srslte/adt/mem_pool.h" #include "srslte/common/block_queue.h" #include "srslte/common/buffer_pool.h" #include "srslte/common/common.h" @@ -148,9 +149,9 @@ private: std::unique_ptr cell_common_list; // state - std::unique_ptr cell_res_list; - std::map > users; // NOTE: has to have fixed addr - std::map pending_paging; + std::unique_ptr cell_res_list; + std::map > users; // NOTE: has to have fixed addr + std::map pending_paging; void process_release_complete(uint16_t rnti); void rem_user(uint16_t rnti); @@ -190,6 +191,8 @@ private: void rem_user_thread(uint16_t rnti); std::mutex paging_mutex; + + srslte::single_thread_obj_pool ue_pool; }; } // namespace srsenb diff --git a/srsenb/src/stack/rrc/rrc.cc b/srsenb/src/stack/rrc/rrc.cc index 94d3514c7..a7a69297b 100644 --- a/srsenb/src/stack/rrc/rrc.cc +++ b/srsenb/src/stack/rrc/rrc.cc @@ -31,6 +31,7 @@ namespace srsenb { rrc::rrc(srslte::task_sched_handle task_sched_) : rrc_log("RRC"), task_sched(task_sched_) { pending_paging.clear(); + ue_pool.reserve(10); } rrc::~rrc() {} @@ -150,7 +151,7 @@ void rrc::add_user(uint16_t rnti, const sched_interface::ue_cfg_t& sched_ue_cfg) bool rnti_added = true; if (rnti != SRSLTE_MRNTI) { // only non-eMBMS RNTIs are present in user map - auto p = users.insert(std::make_pair(rnti, std::unique_ptr(new ue{this, rnti, sched_ue_cfg}))); + auto p = users.insert(std::make_pair(rnti, ue_pool.make(this, rnti, sched_ue_cfg))); rnti_added = p.second and p.first->second->is_allocated(); } if (rnti_added) { diff --git a/srsenb/src/stack/rrc/rrc_ue.cc b/srsenb/src/stack/rrc/rrc_ue.cc index 1df46ea74..6ffaaf314 100644 --- a/srsenb/src/stack/rrc/rrc_ue.cc +++ b/srsenb/src/stack/rrc/rrc_ue.cc @@ -347,18 +347,18 @@ void rrc::ue::handle_rrc_con_reest_req(rrc_conn_reest_request_s* msg) old_rnti); // Cancel Handover in Target eNB if on-going - parent->users[old_rnti]->mobility_handler->trigger(rrc_mobility::ho_cancel_ev{}); + parent->users.at(old_rnti)->mobility_handler->trigger(rrc_mobility::ho_cancel_ev{}); // Recover security setup const enb_cell_common* pcell_cfg = get_ue_cc_cfg(UE_PCELL_CC_IDX); - ue_security_cfg = parent->users[old_rnti]->ue_security_cfg; + ue_security_cfg = parent->users.at(old_rnti)->ue_security_cfg; ue_security_cfg.regenerate_keys_handover(pcell_cfg->cell_cfg.pci, pcell_cfg->cell_cfg.dl_earfcn); // send reestablishment and restore bearer configuration - send_connection_reest(parent->users[old_rnti]->ue_security_cfg.get_ncc()); + send_connection_reest(parent->users.at(old_rnti)->ue_security_cfg.get_ncc()); // Get PDCP entity state (required when using RLC AM) - for (const auto& erab_pair : parent->users[old_rnti]->bearer_list.get_erabs()) { + for (const auto& erab_pair : parent->users.at(old_rnti)->bearer_list.get_erabs()) { uint16_t lcid = erab_pair.second.id - 2; old_reest_pdcp_state[lcid] = {}; parent->pdcp->get_bearer_state(old_rnti, lcid, &old_reest_pdcp_state[lcid]); @@ -374,9 +374,9 @@ void rrc::ue::handle_rrc_con_reest_req(rrc_conn_reest_request_s* msg) } // Make sure UE capabilities are copied over to new RNTI - eutra_capabilities = parent->users[old_rnti]->eutra_capabilities; - eutra_capabilities_unpacked = parent->users[old_rnti]->eutra_capabilities_unpacked; - ue_capabilities = parent->users[old_rnti]->ue_capabilities; + eutra_capabilities = parent->users.at(old_rnti)->eutra_capabilities; + eutra_capabilities_unpacked = parent->users.at(old_rnti)->eutra_capabilities_unpacked; + ue_capabilities = parent->users.at(old_rnti)->ue_capabilities; if (parent->rrc_log->get_level() == srslte::LOG_LEVEL_DEBUG) { asn1::json_writer js{}; eutra_capabilities.to_json(js); @@ -448,7 +448,7 @@ void rrc::ue::handle_rrc_con_reest_complete(rrc_conn_reest_complete_s* msg, srsl parent->pdcp->enable_encryption(rnti, RB_ID_SRB1); // Reestablish current DRBs during ConnectionReconfiguration - for (const auto& erab_pair : parent->users[old_reest_rnti]->bearer_list.get_erabs()) { + for (const auto& erab_pair : parent->users.at(old_reest_rnti)->bearer_list.get_erabs()) { const bearer_cfg_handler::erab_t& erab = erab_pair.second; bearer_list.add_erab(erab.id, erab.qos_params, erab.address, erab.teid_out, nullptr); }