From 460d7a8f4f3ef6a87ac9bdeb4b4aaabdfe3ab1eb Mon Sep 17 00:00:00 2001 From: Francisco Date: Wed, 31 Mar 2021 21:00:16 +0100 Subject: [PATCH] fix memory pool test - placed the pool state into a shared_ptr so that the callbacks still have a valid handle when the pool is destroyed --- .../srsran/adt/pool/background_mem_pool.h | 54 ++++++++++++------- lib/include/srsran/adt/pool/memblock_cache.h | 3 +- lib/test/adt/mem_pool_test.cc | 14 +++-- srsenb/src/stack/rrc/rrc_ue.cc | 4 +- 4 files changed, 48 insertions(+), 27 deletions(-) diff --git a/lib/include/srsran/adt/pool/background_mem_pool.h b/lib/include/srsran/adt/pool/background_mem_pool.h index 1b0db41c2..e3bdd144d 100644 --- a/lib/include/srsran/adt/pool/background_mem_pool.h +++ b/lib/include/srsran/adt/pool/background_mem_pool.h @@ -38,13 +38,17 @@ class base_background_pool { static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive"); static_assert(BatchSize > 1, "BatchSize needs to be higher than 1"); + using pool_type = base_background_pool; public: - explicit base_background_pool(bool lazy_start = false, CtorFunc ctor_func_ = {}, RecycleFunc recycle_func_ = {}) : - ctor_func(ctor_func_), recycle_func(recycle_func_) + explicit base_background_pool(size_t initial_size = BatchSize, + CtorFunc ctor_func_ = {}, + RecycleFunc recycle_func_ = {}) : + ctor_func(ctor_func_), recycle_func(recycle_func_), state(std::make_shared(this)) { - if (not lazy_start) { - allocate_batch_in_background(); + int nof_batches = ceilf(initial_size / (float)BatchSize); + while (nof_batches-- > 0) { + allocate_batch_(); } } base_background_pool(base_background_pool&&) = delete; @@ -53,7 +57,8 @@ public: base_background_pool& operator=(const base_background_pool&) = delete; ~base_background_pool() { - std::lock_guard lock(mutex); + std::lock_guard lock(state->mutex); + state->pool = nullptr; for (std::unique_ptr& batch : batches) { for (obj_storage_t& obj_store : *batch) { obj_store.destroy(); @@ -66,16 +71,13 @@ public: void* allocate_node(size_t sz) { srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T)); - std::lock_guard lock(mutex); + std::lock_guard lock(state->mutex); void* block = obj_cache.try_pop(); if (block != nullptr) { // allocation successful if (obj_cache.size() < ThresholdSize) { - get_background_workers().push_task([this]() { - std::lock_guard lock(mutex); - allocate_batch_(); - }); + allocate_batch_in_background(); } return block; } @@ -87,21 +89,24 @@ public: void deallocate_node(void* p) { - std::lock_guard lock(mutex); + std::lock_guard lock(state->mutex); recycle_func(static_cast(p)); obj_cache.push(static_cast(p)); } void allocate_batch_in_background() { - get_background_workers().push_task([this]() { - std::lock_guard lock(mutex); - allocate_batch_(); + std::shared_ptr state_copy = state; + get_background_workers().push_task([state_copy]() { + std::lock_guard lock(state_copy->mutex); + if (state_copy->pool != nullptr) { + state_copy->pool->allocate_batch_(); + } }); } private: - using obj_storage_t = type_storage; + using obj_storage_t = type_storage; using batch_obj_t = std::array; /// Unprotected allocation of new Batch of Objects @@ -122,8 +127,14 @@ private: CtorFunc ctor_func; RecycleFunc recycle_func; + struct detached_pool_state { + std::mutex mutex; + pool_type* pool; + explicit detached_pool_state(pool_type* pool_) : pool(pool_) {} + }; + std::shared_ptr state; + // memory stack to cache allocate memory chunks - std::mutex mutex; memblock_cache obj_cache; std::vector > batches; }; @@ -147,12 +158,17 @@ class background_obj_pool struct pool_deleter { mem_pool_type* pool; explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {} - void operator()(void* ptr) { pool->deallocate_node(ptr); } + void operator()(void* ptr) + { + if (ptr != nullptr) { + pool->deallocate_node(ptr); + } + } }; public: - background_obj_pool(CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) : - pool(false, std::forward(ctor_func), std::forward(recycle_func)) + explicit background_obj_pool(size_t initial_size, CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) : + pool(initial_size, std::forward(ctor_func), std::forward(recycle_func)) {} unique_pool_ptr allocate_object() diff --git a/lib/include/srsran/adt/pool/memblock_cache.h b/lib/include/srsran/adt/pool/memblock_cache.h index 13ef4d07e..babeb1af5 100644 --- a/lib/include/srsran/adt/pool/memblock_cache.h +++ b/lib/include/srsran/adt/pool/memblock_cache.h @@ -27,6 +27,7 @@ class memblock_cache public: constexpr static size_t min_memblock_size() { return sizeof(node); } + constexpr static size_t min_memblock_align() { return alignof(node); } memblock_cache() = default; @@ -46,7 +47,7 @@ public: template void push(T* block) noexcept { - static_assert(sizeof(T) >= sizeof(node), "Provided memory block is too small"); + static_assert(sizeof(T) >= sizeof(node) and alignof(T) >= alignof(node), "Provided memory block is too small"); push(static_cast(block)); } diff --git a/lib/test/adt/mem_pool_test.cc b/lib/test/adt/mem_pool_test.cc index 1b6d91acb..4fd8f1f5a 100644 --- a/lib/test/adt/mem_pool_test.cc +++ b/lib/test/adt/mem_pool_test.cc @@ -147,14 +147,18 @@ void test_background_pool() C::default_ctor_counter = 0; C::dtor_counter = 0; { - srsran::background_obj_pool obj_pool; + srsran::background_obj_pool obj_pool(16); + std::vector > objs; - srsran::unique_pool_ptr c = obj_pool.allocate_object(); - srsran::unique_pool_ptr c2 = obj_pool.allocate_object(); - srsran::unique_pool_ptr c3 = obj_pool.allocate_object(); + for (size_t i = 0; i < 16 - 4; ++i) { + objs.push_back(obj_pool.allocate_object()); + } TESTASSERT(C::default_ctor_counter == 16); + + // This will trigger a new batch allocation in the background + objs.push_back(obj_pool.allocate_object()); } - TESTASSERT(C::dtor_counter == 16 and C::dtor_counter == C::default_ctor_counter); + TESTASSERT(C::dtor_counter == C::default_ctor_counter); } int main(int argc, char** argv) diff --git a/srsenb/src/stack/rrc/rrc_ue.cc b/srsenb/src/stack/rrc/rrc_ue.cc index 8899fdfc0..6dd92f737 100644 --- a/srsenb/src/stack/rrc/rrc_ue.cc +++ b/srsenb/src/stack/rrc/rrc_ue.cc @@ -65,11 +65,11 @@ int rrc::ue::init() return SRSRAN_SUCCESS; } -srsran::background_mem_pool* rrc::ue::get_ue_pool() +rrc::ue::ue_pool_t* rrc::ue::get_ue_pool() { // Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore, // will only be initialized if we instantiate an eNB - static rrc::ue::ue_pool_t ue_pool(true); + static rrc::ue::ue_pool_t ue_pool; return &ue_pool; }