|
|
|
@ -38,13 +38,17 @@ class base_background_pool
|
|
|
|
|
{
|
|
|
|
|
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
|
|
|
|
|
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
|
|
|
|
|
using pool_type = base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
explicit base_background_pool(bool lazy_start = false, CtorFunc ctor_func_ = {}, RecycleFunc recycle_func_ = {}) :
|
|
|
|
|
ctor_func(ctor_func_), recycle_func(recycle_func_)
|
|
|
|
|
explicit base_background_pool(size_t initial_size = BatchSize,
|
|
|
|
|
CtorFunc ctor_func_ = {},
|
|
|
|
|
RecycleFunc recycle_func_ = {}) :
|
|
|
|
|
ctor_func(ctor_func_), recycle_func(recycle_func_), state(std::make_shared<detached_pool_state>(this))
|
|
|
|
|
{
|
|
|
|
|
if (not lazy_start) {
|
|
|
|
|
allocate_batch_in_background();
|
|
|
|
|
int nof_batches = ceilf(initial_size / (float)BatchSize);
|
|
|
|
|
while (nof_batches-- > 0) {
|
|
|
|
|
allocate_batch_();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
base_background_pool(base_background_pool&&) = delete;
|
|
|
|
@ -53,7 +57,8 @@ public:
|
|
|
|
|
base_background_pool& operator=(const base_background_pool&) = delete;
|
|
|
|
|
~base_background_pool()
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
std::lock_guard<std::mutex> lock(state->mutex);
|
|
|
|
|
state->pool = nullptr;
|
|
|
|
|
for (std::unique_ptr<batch_obj_t>& batch : batches) {
|
|
|
|
|
for (obj_storage_t& obj_store : *batch) {
|
|
|
|
|
obj_store.destroy();
|
|
|
|
@ -66,16 +71,13 @@ public:
|
|
|
|
|
void* allocate_node(size_t sz)
|
|
|
|
|
{
|
|
|
|
|
srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T));
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
std::lock_guard<std::mutex> lock(state->mutex);
|
|
|
|
|
void* block = obj_cache.try_pop();
|
|
|
|
|
|
|
|
|
|
if (block != nullptr) {
|
|
|
|
|
// allocation successful
|
|
|
|
|
if (obj_cache.size() < ThresholdSize) {
|
|
|
|
|
get_background_workers().push_task([this]() {
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
allocate_batch_();
|
|
|
|
|
});
|
|
|
|
|
allocate_batch_in_background();
|
|
|
|
|
}
|
|
|
|
|
return block;
|
|
|
|
|
}
|
|
|
|
@ -87,21 +89,24 @@ public:
|
|
|
|
|
|
|
|
|
|
void deallocate_node(void* p)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
std::lock_guard<std::mutex> lock(state->mutex);
|
|
|
|
|
recycle_func(static_cast<void*>(p));
|
|
|
|
|
obj_cache.push(static_cast<void*>(p));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void allocate_batch_in_background()
|
|
|
|
|
{
|
|
|
|
|
get_background_workers().push_task([this]() {
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
allocate_batch_();
|
|
|
|
|
std::shared_ptr<detached_pool_state> state_copy = state;
|
|
|
|
|
get_background_workers().push_task([state_copy]() {
|
|
|
|
|
std::lock_guard<std::mutex> lock(state_copy->mutex);
|
|
|
|
|
if (state_copy->pool != nullptr) {
|
|
|
|
|
state_copy->pool->allocate_batch_();
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size()>;
|
|
|
|
|
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size(), memblock_cache::min_memblock_align()>;
|
|
|
|
|
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
|
|
|
|
|
|
|
|
|
|
/// Unprotected allocation of new Batch of Objects
|
|
|
|
@ -122,8 +127,14 @@ private:
|
|
|
|
|
CtorFunc ctor_func;
|
|
|
|
|
RecycleFunc recycle_func;
|
|
|
|
|
|
|
|
|
|
struct detached_pool_state {
|
|
|
|
|
std::mutex mutex;
|
|
|
|
|
pool_type* pool;
|
|
|
|
|
explicit detached_pool_state(pool_type* pool_) : pool(pool_) {}
|
|
|
|
|
};
|
|
|
|
|
std::shared_ptr<detached_pool_state> state;
|
|
|
|
|
|
|
|
|
|
// memory stack to cache allocate memory chunks
|
|
|
|
|
std::mutex mutex;
|
|
|
|
|
memblock_cache obj_cache;
|
|
|
|
|
std::vector<std::unique_ptr<batch_obj_t> > batches;
|
|
|
|
|
};
|
|
|
|
@ -147,12 +158,17 @@ class background_obj_pool
|
|
|
|
|
struct pool_deleter {
|
|
|
|
|
mem_pool_type* pool;
|
|
|
|
|
explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {}
|
|
|
|
|
void operator()(void* ptr) { pool->deallocate_node(ptr); }
|
|
|
|
|
void operator()(void* ptr)
|
|
|
|
|
{
|
|
|
|
|
if (ptr != nullptr) {
|
|
|
|
|
pool->deallocate_node(ptr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
background_obj_pool(CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
|
|
|
|
|
pool(false, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
|
|
|
|
|
explicit background_obj_pool(size_t initial_size, CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
|
|
|
|
|
pool(initial_size, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
|
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
unique_pool_ptr<T> allocate_object()
|
|
|
|
|