From d1c5b000dcbd7833e2800f5ff257599084f8c671 Mon Sep 17 00:00:00 2001 From: Francisco Date: Tue, 30 Mar 2021 14:00:20 +0100 Subject: [PATCH] adt - creation of pre-initialized object pool, that leverages the background memory pool allocator --- lib/include/srsran/adt/circular_buffer.h | 9 +- .../srsran/adt/pool/background_mem_pool.h | 176 ++++++++++++++++++ lib/include/srsran/adt/pool/pool_utils.h | 44 +++++ lib/test/adt/mem_pool_test.cc | 16 ++ srsenb/hdr/stack/rrc/rrc_ue.h | 4 +- srsenb/src/stack/rrc/rrc_ue.cc | 2 +- 6 files changed, 240 insertions(+), 11 deletions(-) create mode 100644 lib/include/srsran/adt/pool/background_mem_pool.h create mode 100644 lib/include/srsran/adt/pool/pool_utils.h diff --git a/lib/include/srsran/adt/circular_buffer.h b/lib/include/srsran/adt/circular_buffer.h index 851366898..a88ef43c0 100644 --- a/lib/include/srsran/adt/circular_buffer.h +++ b/lib/include/srsran/adt/circular_buffer.h @@ -15,6 +15,7 @@ #include "srsran/adt/detail/type_storage.h" #include "srsran/adt/expected.h" +#include "srsran/adt/pool/pool_utils.h" #include "srsran/common/srsran_assert.h" #include @@ -231,14 +232,6 @@ protected: size_t count = 0; }; -struct noop_operator { - template - void operator()(const T&) - { - // noop - } -}; - /** * Base common class for definition of blocking queue data structures with the following features: * - it stores pushed/popped samples in an internal circular buffer diff --git a/lib/include/srsran/adt/pool/background_mem_pool.h b/lib/include/srsran/adt/pool/background_mem_pool.h new file mode 100644 index 000000000..4af01f7ff --- /dev/null +++ b/lib/include/srsran/adt/pool/background_mem_pool.h @@ -0,0 +1,176 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2021 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#ifndef SRSRAN_BACKGROUND_MEM_POOL_H +#define SRSRAN_BACKGROUND_MEM_POOL_H + +#include "memblock_cache.h" +#include "pool_utils.h" +#include "srsran/common/srsran_assert.h" +#include "srsran/common/thread_pool.h" +#include +#include +#include + +namespace srsran { + +namespace detail { + +/** + * Pool specialized for in allocating batches of objects in a preemptive way in a background thread to minimize latency. + * Note: Current implementation assumes that the pool object will outlive the background callbacks to allocate new + * batches + * @tparam T individual object type that is being allocated + * @tparam BatchSize number of T objects in a batch + * @tparam ThresholdSize number of T objects below which a new batch needs to be allocated + */ +template , + typename RecycleFunc = noop_operator> +class base_background_pool +{ + static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive"); + static_assert(BatchSize > 1, "BatchSize needs to be higher than 1"); + +public: + explicit base_background_pool(bool lazy_start = false, CtorFunc ctor_func_ = {}, RecycleFunc recycle_func_ = {}) : + ctor_func(ctor_func_), recycle_func(recycle_func_) + { + if (not lazy_start) { + allocate_batch_in_background(); + } + } + base_background_pool(base_background_pool&&) = delete; + base_background_pool(const base_background_pool&) = delete; + base_background_pool& operator=(base_background_pool&&) = delete; + base_background_pool& operator=(const base_background_pool&) = delete; + ~base_background_pool() + { + std::lock_guard lock(mutex); + for (std::unique_ptr& batch : batches) { + for (obj_storage_t& obj_store : *batch) { + obj_store.destroy(); + } + } + batches.clear(); + } + + /// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch. + void* allocate_node(size_t sz) + { + srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T)); + std::lock_guard lock(mutex); + void* block = obj_cache.try_pop(); + + if (block != nullptr) { + // allocation successful + if (obj_cache.size() < ThresholdSize) { + get_background_workers().push_task([this]() { + std::lock_guard lock(mutex); + allocate_batch_(); + }); + } + return block; + } + + // try allocation of new batch in same thread as caller. + allocate_batch_(); + return obj_cache.try_pop(); + } + + void deallocate_node(void* p) + { + std::lock_guard lock(mutex); + if (p != nullptr) { + recycle_func(static_cast(p)); + obj_cache.push(static_cast(p)); + } + } + + void allocate_batch_in_background() + { + get_background_workers().push_task([this]() { + std::lock_guard lock(mutex); + allocate_batch_(); + }); + } + +private: + using obj_storage_t = type_storage; + using batch_obj_t = std::array; + + /// Unprotected allocation of new Batch of Objects + void allocate_batch_() + { + batch_obj_t* batch = new batch_obj_t(); + if (batch == nullptr) { + srslog::fetch_basic_logger("POOL").warning("Failed to allocate new batch in background thread"); + return; + } + batches.emplace_back(batch); + for (obj_storage_t& obj_store : *batch) { + ctor_func(static_cast(&obj_store)); + obj_cache.push(static_cast(&obj_store)); + } + } + + CtorFunc ctor_func; + RecycleFunc recycle_func; + + // memory stack to cache allocate memory chunks + std::mutex mutex; + memblock_cache obj_cache; + std::vector > batches; +}; + +} // namespace detail + +template +using background_mem_pool = + detail::base_background_pool::type, BatchSize, ThresholdSize>; + +template , + typename RecycleFunc = detail::noop_operator> +class background_obj_pool +{ + using pool_type = background_obj_pool; + using mem_pool_type = detail::base_background_pool; + + struct pool_deleter { + mem_pool_type* pool; + explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {} + void operator()(void* ptr) { pool->deallocate_node(ptr); } + }; + +public: + background_obj_pool(CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) : + pool(false, std::forward(ctor_func), std::forward(recycle_func)) + {} + + unique_pool_ptr allocate_object() + { + void* ptr = pool.allocate_node(sizeof(T)); + return std::unique_ptr(static_cast(ptr), pool_deleter(&pool)); + } + +private: + mem_pool_type pool; +}; + +} // namespace srsran + +#endif // SRSRAN_BACKGROUND_MEM_POOL_H diff --git a/lib/include/srsran/adt/pool/pool_utils.h b/lib/include/srsran/adt/pool/pool_utils.h new file mode 100644 index 000000000..6ff74acb4 --- /dev/null +++ b/lib/include/srsran/adt/pool/pool_utils.h @@ -0,0 +1,44 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2021 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#ifndef SRSRAN_POOL_UTILS_H +#define SRSRAN_POOL_UTILS_H + +#include "../move_callback.h" +#include + +namespace srsran { + +namespace detail { + +template +struct default_ctor_operator { + void operator()(void* ptr) { new (ptr) T(); } +}; + +struct noop_operator { + template + void operator()(const T& ptr) + { + // do nothing + } +}; + +} // namespace detail + +/// unique ptr with type-erased dtor, so that it can be used by any pool +template +using unique_pool_ptr = std::unique_ptr >; + +} // namespace srsran + +#endif // SRSRAN_POOL_UTILS_H diff --git a/lib/test/adt/mem_pool_test.cc b/lib/test/adt/mem_pool_test.cc index a7f487268..f7089e688 100644 --- a/lib/test/adt/mem_pool_test.cc +++ b/lib/test/adt/mem_pool_test.cc @@ -10,6 +10,7 @@ * */ +#include "srsran/adt/pool/background_mem_pool.h" #include "srsran/adt/pool/fixed_size_pool.h" #include "srsran/adt/pool/mem_pool.h" #include "srsran/common/test_common.h" @@ -137,6 +138,20 @@ void test_fixedsize_pool() t.join(); } fixed_pool->print_all_buffers(); + TESTASSERT(C::default_ctor_counter == C::dtor_counter); +} + +void test_background_pool() +{ + C::default_ctor_counter = 0; + C::dtor_counter = 0; + { + srsran::background_obj_pool obj_pool; + + srsran::unique_pool_ptr c = obj_pool.allocate_object(); + TESTASSERT(C::default_ctor_counter == 16); + } + TESTASSERT(C::dtor_counter == 16); } int main(int argc, char** argv) @@ -145,6 +160,7 @@ int main(int argc, char** argv) test_nontrivial_obj_pool(); test_fixedsize_pool(); + test_background_pool(); printf("Success\n"); return 0; diff --git a/srsenb/hdr/stack/rrc/rrc_ue.h b/srsenb/hdr/stack/rrc/rrc_ue.h index 21bc01c92..b89b9daec 100644 --- a/srsenb/hdr/stack/rrc/rrc_ue.h +++ b/srsenb/hdr/stack/rrc/rrc_ue.h @@ -15,7 +15,7 @@ #include "mac_controller.h" #include "rrc.h" -#include "srsran/adt/pool/mem_pool.h" +#include "srsran/adt/pool/background_mem_pool.h" #include "srsran/interfaces/enb_phy_interfaces.h" #include "srsran/interfaces/pdcp_interface_types.h" @@ -153,7 +153,7 @@ public: void operator delete(void* ptr)noexcept; void operator delete[](void* ptr) = delete; - using ue_pool_t = srsran::background_allocator_obj_pool; + using ue_pool_t = srsran::background_obj_pool; static ue_pool_t* get_ue_pool(); private: diff --git a/srsenb/src/stack/rrc/rrc_ue.cc b/srsenb/src/stack/rrc/rrc_ue.cc index afd7db63c..6efba1f16 100644 --- a/srsenb/src/stack/rrc/rrc_ue.cc +++ b/srsenb/src/stack/rrc/rrc_ue.cc @@ -65,7 +65,7 @@ int rrc::ue::init() return SRSRAN_SUCCESS; } -srsran::background_allocator_obj_pool* rrc::ue::get_ue_pool() +srsran::background_obj_pool* rrc::ue::get_ue_pool() { // Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore, // will only be initialized if we instantiate an eNB