adt - creation of pre-initialized object pool, that leverages the background memory pool allocator

master
Francisco 4 years ago committed by Francisco Paisana
parent 3daba090f6
commit d1c5b000dc

@ -15,6 +15,7 @@
#include "srsran/adt/detail/type_storage.h"
#include "srsran/adt/expected.h"
#include "srsran/adt/pool/pool_utils.h"
#include "srsran/common/srsran_assert.h"
#include <array>
@ -231,14 +232,6 @@ protected:
size_t count = 0;
};
struct noop_operator {
template <typename T>
void operator()(const T&)
{
// noop
}
};
/**
* Base common class for definition of blocking queue data structures with the following features:
* - it stores pushed/popped samples in an internal circular buffer

@ -0,0 +1,176 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_BACKGROUND_MEM_POOL_H
#define SRSRAN_BACKGROUND_MEM_POOL_H
#include "memblock_cache.h"
#include "pool_utils.h"
#include "srsran/common/srsran_assert.h"
#include "srsran/common/thread_pool.h"
#include <memory>
#include <mutex>
#include <vector>
namespace srsran {
namespace detail {
/**
* Pool specialized for in allocating batches of objects in a preemptive way in a background thread to minimize latency.
* Note: Current implementation assumes that the pool object will outlive the background callbacks to allocate new
* batches
* @tparam T individual object type that is being allocated
* @tparam BatchSize number of T objects in a batch
* @tparam ThresholdSize number of T objects below which a new batch needs to be allocated
*/
template <typename T,
size_t BatchSize,
size_t ThresholdSize,
typename CtorFunc = default_ctor_operator<T>,
typename RecycleFunc = noop_operator>
class base_background_pool
{
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
public:
explicit base_background_pool(bool lazy_start = false, CtorFunc ctor_func_ = {}, RecycleFunc recycle_func_ = {}) :
ctor_func(ctor_func_), recycle_func(recycle_func_)
{
if (not lazy_start) {
allocate_batch_in_background();
}
}
base_background_pool(base_background_pool&&) = delete;
base_background_pool(const base_background_pool&) = delete;
base_background_pool& operator=(base_background_pool&&) = delete;
base_background_pool& operator=(const base_background_pool&) = delete;
~base_background_pool()
{
std::lock_guard<std::mutex> lock(mutex);
for (std::unique_ptr<batch_obj_t>& batch : batches) {
for (obj_storage_t& obj_store : *batch) {
obj_store.destroy();
}
}
batches.clear();
}
/// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch.
void* allocate_node(size_t sz)
{
srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T));
std::lock_guard<std::mutex> lock(mutex);
void* block = obj_cache.try_pop();
if (block != nullptr) {
// allocation successful
if (obj_cache.size() < ThresholdSize) {
get_background_workers().push_task([this]() {
std::lock_guard<std::mutex> lock(mutex);
allocate_batch_();
});
}
return block;
}
// try allocation of new batch in same thread as caller.
allocate_batch_();
return obj_cache.try_pop();
}
void deallocate_node(void* p)
{
std::lock_guard<std::mutex> lock(mutex);
if (p != nullptr) {
recycle_func(static_cast<void*>(p));
obj_cache.push(static_cast<uint8_t*>(p));
}
}
void allocate_batch_in_background()
{
get_background_workers().push_task([this]() {
std::lock_guard<std::mutex> lock(mutex);
allocate_batch_();
});
}
private:
using obj_storage_t = type_storage<T>;
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
/// Unprotected allocation of new Batch of Objects
void allocate_batch_()
{
batch_obj_t* batch = new batch_obj_t();
if (batch == nullptr) {
srslog::fetch_basic_logger("POOL").warning("Failed to allocate new batch in background thread");
return;
}
batches.emplace_back(batch);
for (obj_storage_t& obj_store : *batch) {
ctor_func(static_cast<void*>(&obj_store));
obj_cache.push(static_cast<void*>(&obj_store));
}
}
CtorFunc ctor_func;
RecycleFunc recycle_func;
// memory stack to cache allocate memory chunks
std::mutex mutex;
memblock_cache obj_cache;
std::vector<std::unique_ptr<batch_obj_t> > batches;
};
} // namespace detail
template <typename T, size_t BatchSize, size_t ThresholdSize>
using background_mem_pool =
detail::base_background_pool<typename std::aligned_storage<sizeof(T), alignof(T)>::type, BatchSize, ThresholdSize>;
template <typename T,
size_t BatchSize,
size_t ThresholdSize,
typename CtorFunc = detail::default_ctor_operator<T>,
typename RecycleFunc = detail::noop_operator>
class background_obj_pool
{
using pool_type = background_obj_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
using mem_pool_type = detail::base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
struct pool_deleter {
mem_pool_type* pool;
explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {}
void operator()(void* ptr) { pool->deallocate_node(ptr); }
};
public:
background_obj_pool(CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
pool(false, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
{}
unique_pool_ptr<T> allocate_object()
{
void* ptr = pool.allocate_node(sizeof(T));
return std::unique_ptr<T, pool_deleter>(static_cast<T*>(ptr), pool_deleter(&pool));
}
private:
mem_pool_type pool;
};
} // namespace srsran
#endif // SRSRAN_BACKGROUND_MEM_POOL_H

@ -0,0 +1,44 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_POOL_UTILS_H
#define SRSRAN_POOL_UTILS_H
#include "../move_callback.h"
#include <memory>
namespace srsran {
namespace detail {
template <typename T>
struct default_ctor_operator {
void operator()(void* ptr) { new (ptr) T(); }
};
struct noop_operator {
template <typename T>
void operator()(const T& ptr)
{
// do nothing
}
};
} // namespace detail
/// unique ptr with type-erased dtor, so that it can be used by any pool
template <typename T>
using unique_pool_ptr = std::unique_ptr<T, srsran::move_callback<void(void*)> >;
} // namespace srsran
#endif // SRSRAN_POOL_UTILS_H

@ -10,6 +10,7 @@
*
*/
#include "srsran/adt/pool/background_mem_pool.h"
#include "srsran/adt/pool/fixed_size_pool.h"
#include "srsran/adt/pool/mem_pool.h"
#include "srsran/common/test_common.h"
@ -137,6 +138,20 @@ void test_fixedsize_pool()
t.join();
}
fixed_pool->print_all_buffers();
TESTASSERT(C::default_ctor_counter == C::dtor_counter);
}
void test_background_pool()
{
C::default_ctor_counter = 0;
C::dtor_counter = 0;
{
srsran::background_obj_pool<C, 16, 4> obj_pool;
srsran::unique_pool_ptr<C> c = obj_pool.allocate_object();
TESTASSERT(C::default_ctor_counter == 16);
}
TESTASSERT(C::dtor_counter == 16);
}
int main(int argc, char** argv)
@ -145,6 +160,7 @@ int main(int argc, char** argv)
test_nontrivial_obj_pool();
test_fixedsize_pool();
test_background_pool();
printf("Success\n");
return 0;

@ -15,7 +15,7 @@
#include "mac_controller.h"
#include "rrc.h"
#include "srsran/adt/pool/mem_pool.h"
#include "srsran/adt/pool/background_mem_pool.h"
#include "srsran/interfaces/enb_phy_interfaces.h"
#include "srsran/interfaces/pdcp_interface_types.h"
@ -153,7 +153,7 @@ public:
void operator delete(void* ptr)noexcept;
void operator delete[](void* ptr) = delete;
using ue_pool_t = srsran::background_allocator_obj_pool<ue, 16, 4>;
using ue_pool_t = srsran::background_obj_pool<ue, 16, 4>;
static ue_pool_t* get_ue_pool();
private:

@ -65,7 +65,7 @@ int rrc::ue::init()
return SRSRAN_SUCCESS;
}
srsran::background_allocator_obj_pool<rrc::ue, 16, 4>* rrc::ue::get_ue_pool()
srsran::background_obj_pool<rrc::ue, 16, 4>* rrc::ue::get_ue_pool()
{
// Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore,
// will only be initialized if we instantiate an eNB

Loading…
Cancel
Save