diff --git a/lib/include/srsran/adt/pool/background_mem_pool.h b/lib/include/srsran/adt/pool/background_mem_pool.h index 4af01f7ff..1b0db41c2 100644 --- a/lib/include/srsran/adt/pool/background_mem_pool.h +++ b/lib/include/srsran/adt/pool/background_mem_pool.h @@ -33,11 +33,7 @@ namespace detail { * @tparam BatchSize number of T objects in a batch * @tparam ThresholdSize number of T objects below which a new batch needs to be allocated */ -template , - typename RecycleFunc = noop_operator> +template class base_background_pool { static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive"); @@ -92,10 +88,8 @@ public: void deallocate_node(void* p) { std::lock_guard lock(mutex); - if (p != nullptr) { - recycle_func(static_cast(p)); - obj_cache.push(static_cast(p)); - } + recycle_func(static_cast(p)); + obj_cache.push(static_cast(p)); } void allocate_batch_in_background() @@ -107,22 +101,22 @@ public: } private: - using obj_storage_t = type_storage; + using obj_storage_t = type_storage; using batch_obj_t = std::array; /// Unprotected allocation of new Batch of Objects void allocate_batch_() { - batch_obj_t* batch = new batch_obj_t(); + std::unique_ptr batch(new batch_obj_t()); if (batch == nullptr) { srslog::fetch_basic_logger("POOL").warning("Failed to allocate new batch in background thread"); return; } - batches.emplace_back(batch); for (obj_storage_t& obj_store : *batch) { - ctor_func(static_cast(&obj_store)); - obj_cache.push(static_cast(&obj_store)); + ctor_func(obj_store.addr()); + obj_cache.push(&obj_store.buffer); } + batches.emplace_back(std::move(batch)); } CtorFunc ctor_func; @@ -138,7 +132,7 @@ private: template using background_mem_pool = - detail::base_background_pool::type, BatchSize, ThresholdSize>; + detail::base_background_pool; template (p)); + stack.push(p); } } @@ -67,7 +67,7 @@ public: { static const size_t blocksize = std::max(sizeof(T), memblock_cache::min_memblock_size()); for (size_t i = 0; i < N; ++i) { - stack.push(new uint8_t[blocksize]); + stack.push(static_cast(new uint8_t[blocksize])); } } diff --git a/lib/include/srsran/adt/pool/memblock_cache.h b/lib/include/srsran/adt/pool/memblock_cache.h index 660f02aa2..13ef4d07e 100644 --- a/lib/include/srsran/adt/pool/memblock_cache.h +++ b/lib/include/srsran/adt/pool/memblock_cache.h @@ -43,6 +43,13 @@ public: return *this; } + template + void push(T* block) noexcept + { + static_assert(sizeof(T) >= sizeof(node), "Provided memory block is too small"); + push(static_cast(block)); + } + void push(void* block) noexcept { node* next = ::new (block) node(head); @@ -57,6 +64,7 @@ public: } node* last_head = head; head = head->prev; + last_head->~node(); count--; return static_cast(last_head); } @@ -99,7 +107,8 @@ public: return *this; } - void push(void* block) noexcept + template + void push(T* block) noexcept { std::lock_guard lock(mutex); stack.push(block); diff --git a/lib/test/adt/mem_pool_test.cc b/lib/test/adt/mem_pool_test.cc index f7089e688..05c9849b5 100644 --- a/lib/test/adt/mem_pool_test.cc +++ b/lib/test/adt/mem_pool_test.cc @@ -148,10 +148,12 @@ void test_background_pool() { srsran::background_obj_pool obj_pool; - srsran::unique_pool_ptr c = obj_pool.allocate_object(); + srsran::unique_pool_ptr c = obj_pool.allocate_object(); + srsran::unique_pool_ptr c2 = obj_pool.allocate_object(); + srsran::unique_pool_ptr c3 = obj_pool.allocate_object(); TESTASSERT(C::default_ctor_counter == 16); } - TESTASSERT(C::dtor_counter == 16); + TESTASSERT(C::dtor_counter == 16 and C::dtor_counter == C::default_ctor_counter); } int main(int argc, char** argv) diff --git a/srsenb/hdr/stack/rrc/rrc_ue.h b/srsenb/hdr/stack/rrc/rrc_ue.h index b89b9daec..ed90b81a0 100644 --- a/srsenb/hdr/stack/rrc/rrc_ue.h +++ b/srsenb/hdr/stack/rrc/rrc_ue.h @@ -153,7 +153,7 @@ public: void operator delete(void* ptr)noexcept; void operator delete[](void* ptr) = delete; - using ue_pool_t = srsran::background_obj_pool; + using ue_pool_t = srsran::background_mem_pool; static ue_pool_t* get_ue_pool(); private: diff --git a/srsenb/src/stack/rrc/rrc_ue.cc b/srsenb/src/stack/rrc/rrc_ue.cc index 6efba1f16..8899fdfc0 100644 --- a/srsenb/src/stack/rrc/rrc_ue.cc +++ b/srsenb/src/stack/rrc/rrc_ue.cc @@ -65,7 +65,7 @@ int rrc::ue::init() return SRSRAN_SUCCESS; } -srsran::background_obj_pool* rrc::ue::get_ue_pool() +srsran::background_mem_pool* rrc::ue::get_ue_pool() { // Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore, // will only be initialized if we instantiate an eNB