mirror of https://github.com/pvnis/srsRAN_4G.git
Reimplement batch-based background object/memory pool
Main changes: - addition of pool utilities - The node size/alignment and batch allocation threshold are now runtime arguments - object pool and memory pool are not anymore based on the same class. The object pool cannot use intrusive free list because it would overwrite the object memorymaster
parent
fbeb87c53e
commit
cdf72248f3
@ -1,192 +0,0 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_BACKGROUND_MEM_POOL_H
|
||||
#define SRSRAN_BACKGROUND_MEM_POOL_H
|
||||
|
||||
#include "common_pool.h"
|
||||
#include "memblock_cache.h"
|
||||
#include "pool_utils.h"
|
||||
#include "srsran/common/srsran_assert.h"
|
||||
#include "srsran/common/thread_pool.h"
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
namespace srsran {
|
||||
|
||||
namespace detail {
|
||||
|
||||
/**
|
||||
* Pool specialized for in allocating batches of objects in a preemptive way in a background thread to minimize latency.
|
||||
* Note: Current implementation assumes that the pool object will outlive the background callbacks to allocate new
|
||||
* batches
|
||||
* @tparam T individual object type that is being allocated
|
||||
* @tparam BatchSize number of T objects in a batch
|
||||
* @tparam ThresholdSize number of T objects below which a new batch needs to be allocated
|
||||
*/
|
||||
template <typename T, size_t BatchSize, size_t ThresholdSize, typename CtorFunc, typename RecycleFunc>
|
||||
class base_background_pool
|
||||
{
|
||||
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
|
||||
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
|
||||
using pool_type = base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
|
||||
|
||||
public:
|
||||
explicit base_background_pool(size_t initial_size = BatchSize,
|
||||
CtorFunc&& ctor_func_ = {},
|
||||
RecycleFunc&& recycle_func_ = {}) :
|
||||
ctor_func(std::forward<CtorFunc>(ctor_func_)),
|
||||
recycle_func(std::forward<RecycleFunc>(recycle_func_)),
|
||||
state(std::make_shared<detached_pool_state>(this))
|
||||
{
|
||||
int nof_batches = ceilf(initial_size / (float)BatchSize);
|
||||
while (nof_batches-- > 0) {
|
||||
allocate_batch_();
|
||||
}
|
||||
}
|
||||
base_background_pool(base_background_pool&&) = delete;
|
||||
base_background_pool(const base_background_pool&) = delete;
|
||||
base_background_pool& operator=(base_background_pool&&) = delete;
|
||||
base_background_pool& operator=(const base_background_pool&) = delete;
|
||||
~base_background_pool()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
state->pool = nullptr;
|
||||
for (std::unique_ptr<batch_obj_t>& batch : batches) {
|
||||
for (obj_storage_t& obj_store : *batch) {
|
||||
obj_store.destroy();
|
||||
}
|
||||
}
|
||||
batches.clear();
|
||||
}
|
||||
|
||||
/// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch.
|
||||
void* allocate_node(size_t sz)
|
||||
{
|
||||
srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T));
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
void* block = obj_cache.try_pop();
|
||||
|
||||
if (block != nullptr) {
|
||||
// allocation successful
|
||||
if (obj_cache.size() < ThresholdSize) {
|
||||
allocate_batch_in_background();
|
||||
}
|
||||
return block;
|
||||
}
|
||||
|
||||
// try allocation of new batch in same thread as caller.
|
||||
allocate_batch_();
|
||||
return obj_cache.try_pop();
|
||||
}
|
||||
|
||||
void deallocate_node(void* p)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
recycle_func(*static_cast<T*>(p));
|
||||
obj_cache.push(static_cast<void*>(p));
|
||||
}
|
||||
|
||||
void allocate_batch_in_background()
|
||||
{
|
||||
std::shared_ptr<detached_pool_state> state_copy = state;
|
||||
get_background_workers().push_task([state_copy]() {
|
||||
std::lock_guard<std::mutex> lock(state_copy->mutex);
|
||||
if (state_copy->pool != nullptr) {
|
||||
state_copy->pool->allocate_batch_();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size(), memblock_cache::min_memblock_align()>;
|
||||
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
|
||||
|
||||
/// Unprotected allocation of new Batch of Objects
|
||||
void allocate_batch_()
|
||||
{
|
||||
std::unique_ptr<batch_obj_t> batch(new batch_obj_t());
|
||||
if (batch == nullptr) {
|
||||
srslog::fetch_basic_logger("POOL").warning("Failed to allocate new batch in background thread");
|
||||
return;
|
||||
}
|
||||
for (obj_storage_t& obj_store : *batch) {
|
||||
ctor_func(obj_store.addr());
|
||||
obj_cache.push(&obj_store.buffer);
|
||||
}
|
||||
batches.emplace_back(std::move(batch));
|
||||
}
|
||||
|
||||
CtorFunc ctor_func;
|
||||
RecycleFunc recycle_func;
|
||||
|
||||
struct detached_pool_state {
|
||||
std::mutex mutex;
|
||||
pool_type* pool;
|
||||
explicit detached_pool_state(pool_type* pool_) : pool(pool_) {}
|
||||
};
|
||||
std::shared_ptr<detached_pool_state> state;
|
||||
|
||||
// memory stack to cache allocate memory chunks
|
||||
memblock_cache obj_cache;
|
||||
std::vector<std::unique_ptr<batch_obj_t> > batches;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename T, size_t BatchSize, size_t ThresholdSize>
|
||||
using background_mem_pool = detail::base_background_pool<detail::type_storage<T>,
|
||||
BatchSize,
|
||||
ThresholdSize,
|
||||
detail::noop_operator,
|
||||
detail::noop_operator>;
|
||||
|
||||
template <typename T,
|
||||
size_t BatchSize,
|
||||
size_t ThresholdSize,
|
||||
typename CtorFunc = detail::inplace_default_ctor_operator<T>,
|
||||
typename RecycleFunc = detail::noop_operator>
|
||||
class background_obj_pool : public obj_pool_itf<T>
|
||||
{
|
||||
using pool_type = background_obj_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
|
||||
using mem_pool_type = detail::base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
|
||||
|
||||
struct pool_deleter {
|
||||
mem_pool_type* pool;
|
||||
explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {}
|
||||
void operator()(void* ptr)
|
||||
{
|
||||
if (ptr != nullptr) {
|
||||
pool->deallocate_node(ptr);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
explicit background_obj_pool(size_t initial_size, CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
|
||||
pool(initial_size, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
|
||||
{}
|
||||
|
||||
unique_pool_ptr<T> allocate_object() final
|
||||
{
|
||||
void* ptr = pool.allocate_node(sizeof(T));
|
||||
return std::unique_ptr<T, pool_deleter>(static_cast<T*>(ptr), pool_deleter(&pool));
|
||||
}
|
||||
|
||||
private:
|
||||
mem_pool_type pool;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_BACKGROUND_MEM_POOL_H
|
@ -0,0 +1,161 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_BATCH_MEM_POOL_H
|
||||
#define SRSRAN_BATCH_MEM_POOL_H
|
||||
|
||||
#include "memblock_cache.h"
|
||||
#include "pool_utils.h"
|
||||
#include "srsran/common/srsran_assert.h"
|
||||
#include "srsran/common/thread_pool.h"
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace srsran {
|
||||
|
||||
/**
|
||||
* Non-thread-safe, node-based memory pool that allocates nodes in batches of "objs_per_batch" > 1, and caches
|
||||
* allocated blocks on deallocation
|
||||
*/
|
||||
class growing_batch_mem_pool
|
||||
{
|
||||
public:
|
||||
explicit growing_batch_mem_pool(size_t objs_per_batch_,
|
||||
size_t node_size_,
|
||||
size_t node_alignment_,
|
||||
int init_size = -1) :
|
||||
objs_per_batch(objs_per_batch_),
|
||||
memblock_size(std::max(node_size_, free_memblock_list::min_memblock_size())),
|
||||
allocated(objs_per_batch * memblock_size, std::max(node_alignment_, free_memblock_list::min_memblock_align()))
|
||||
{
|
||||
size_t N = init_size < 0 ? objs_per_batch_ : init_size;
|
||||
while (N > cache_size()) {
|
||||
allocate_batch();
|
||||
}
|
||||
}
|
||||
~growing_batch_mem_pool()
|
||||
{
|
||||
srsran_assert(cache_size() == size(), "Not all nodes have been deallocated yet (%zd < %zd)", cache_size(), size());
|
||||
}
|
||||
|
||||
size_t get_node_max_size() const { return allocated.get_node_max_size(); }
|
||||
|
||||
void clear()
|
||||
{
|
||||
free_list.clear();
|
||||
allocated.clear();
|
||||
}
|
||||
|
||||
size_t cache_size() const { return free_list.size(); }
|
||||
size_t size() const { return allocated.size() * objs_per_batch; }
|
||||
|
||||
void allocate_batch()
|
||||
{
|
||||
uint8_t* batch_payload = static_cast<uint8_t*>(allocated.allocate_block());
|
||||
for (size_t i = 0; i < objs_per_batch; ++i) {
|
||||
void* cache_node = batch_payload + i * memblock_size;
|
||||
free_list.push(cache_node);
|
||||
}
|
||||
}
|
||||
|
||||
void* allocate_node()
|
||||
{
|
||||
if (free_list.empty()) {
|
||||
allocate_batch();
|
||||
}
|
||||
return free_list.pop();
|
||||
}
|
||||
|
||||
void deallocate_node(void* ptr) { free_list.push(ptr); }
|
||||
|
||||
private:
|
||||
const size_t objs_per_batch;
|
||||
const size_t memblock_size;
|
||||
|
||||
memblock_stack allocated;
|
||||
free_memblock_list free_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* Thread-safe object pool specialized in allocating batches of objects in a preemptive way in a background thread
|
||||
* to minimize latency.
|
||||
* Note: The dispatched allocation jobs may outlive the pool. To handle this, the pool state is passed to jobs via a
|
||||
* shared ptr.
|
||||
*/
|
||||
class background_mem_pool
|
||||
{
|
||||
public:
|
||||
const size_t batch_threshold;
|
||||
|
||||
explicit background_mem_pool(size_t nodes_per_batch_, size_t node_size_, size_t thres_, int initial_size = -1) :
|
||||
batch_threshold(thres_),
|
||||
state(std::make_shared<detached_pool_state>(this)),
|
||||
grow_pool(nodes_per_batch_, node_size_, detail::max_alignment, initial_size)
|
||||
{
|
||||
srsran_assert(batch_threshold > 1, "Invalid arguments for background memory pool");
|
||||
}
|
||||
~background_mem_pool()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
state->pool = nullptr;
|
||||
grow_pool.clear();
|
||||
}
|
||||
|
||||
/// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch.
|
||||
void* allocate_node(size_t sz)
|
||||
{
|
||||
srsran_assert(sz <= grow_pool.get_node_max_size(),
|
||||
"Mismatch of allocated node size=%zd and object size=%zd",
|
||||
sz,
|
||||
grow_pool.get_node_max_size());
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
void* node = grow_pool.allocate_node();
|
||||
|
||||
if (grow_pool.size() < batch_threshold) {
|
||||
allocate_batch_in_background();
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
void deallocate_node(void* p)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
grow_pool.deallocate_node(p);
|
||||
}
|
||||
|
||||
size_t get_node_max_size() const { return grow_pool.get_node_max_size(); }
|
||||
|
||||
private:
|
||||
void allocate_batch_in_background()
|
||||
{
|
||||
std::shared_ptr<detached_pool_state> state_copy = state;
|
||||
get_background_workers().push_task([state_copy]() {
|
||||
std::lock_guard<std::mutex> lock(state_copy->mutex);
|
||||
if (state_copy->pool != nullptr) {
|
||||
state_copy->pool->grow_pool.allocate_batch();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
struct detached_pool_state {
|
||||
std::mutex mutex;
|
||||
background_mem_pool* pool;
|
||||
explicit detached_pool_state(background_mem_pool* pool_) : pool(pool_) {}
|
||||
};
|
||||
std::shared_ptr<detached_pool_state> state;
|
||||
|
||||
growing_batch_mem_pool grow_pool;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_BATCH_MEM_POOL_H
|
@ -1,43 +0,0 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_COMMON_POOL_H
|
||||
#define SRSRAN_COMMON_POOL_H
|
||||
|
||||
#include "srsran/adt/move_callback.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
/// unique ptr with type-erased dtor, so that it can be used by any pool
|
||||
template <typename T>
|
||||
using unique_pool_ptr = std::unique_ptr<T, srsran::move_callback<void(void*)> >;
|
||||
|
||||
/// Common object pool interface
|
||||
template <typename T>
|
||||
class obj_pool_itf
|
||||
{
|
||||
public:
|
||||
using object_type = T;
|
||||
|
||||
obj_pool_itf() = default;
|
||||
obj_pool_itf(const obj_pool_itf&) = delete;
|
||||
obj_pool_itf(obj_pool_itf&&) = delete;
|
||||
obj_pool_itf& operator=(const obj_pool_itf&) = delete;
|
||||
obj_pool_itf& operator=(obj_pool_itf&&) = delete;
|
||||
|
||||
virtual ~obj_pool_itf() = default;
|
||||
virtual unique_pool_ptr<T> allocate_object() = 0;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_COMMON_POOL_H
|
@ -0,0 +1,193 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_OBJ_POOL_H
|
||||
#define SRSRAN_OBJ_POOL_H
|
||||
|
||||
#include "batch_mem_pool.h"
|
||||
#include "memblock_cache.h"
|
||||
#include "pool_interface.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
template <typename T>
|
||||
class background_obj_pool;
|
||||
|
||||
template <typename T>
|
||||
class growing_batch_obj_pool final : public obj_pool_itf<T>
|
||||
{
|
||||
static size_t memblock_size()
|
||||
{
|
||||
/// Node Structure [ node header | (pad to node alignment) | node size | (pad to node header alignment) ]
|
||||
return align_next(align_next(free_memblock_list::min_memblock_size(), alignof(T)) + sizeof(T),
|
||||
free_memblock_list::min_memblock_align());
|
||||
}
|
||||
static size_t batch_size(size_t nof_objs_per_batch)
|
||||
{
|
||||
/// Batch Structure: [allocated stack header | (pad max alignment) | [memblock] x objs_per_batch ]
|
||||
return align_next(detail::max_alignment + (memblock_size() * nof_objs_per_batch), detail::max_alignment);
|
||||
}
|
||||
|
||||
public:
|
||||
using init_mem_oper_t = srsran::move_callback<void(void*)>;
|
||||
using recycle_oper_t = srsran::move_callback<void(T&)>;
|
||||
|
||||
explicit growing_batch_obj_pool(size_t objs_per_batch_,
|
||||
int init_size = -1,
|
||||
init_mem_oper_t init_oper_ = detail::inplace_default_ctor_operator<T>{},
|
||||
recycle_oper_t recycle_oper_ = detail::noop_operator{}) :
|
||||
objs_per_batch(objs_per_batch_),
|
||||
init_oper(std::move(init_oper_)),
|
||||
recycle_oper(std::move(recycle_oper_)),
|
||||
allocated(batch_size(objs_per_batch_), detail::max_alignment),
|
||||
cache(sizeof(T), alignof(T))
|
||||
{
|
||||
size_t N = init_size < 0 ? objs_per_batch_ : init_size;
|
||||
while (N > cache.size()) {
|
||||
allocate_batch();
|
||||
}
|
||||
}
|
||||
~growing_batch_obj_pool() { clear(); }
|
||||
|
||||
void clear()
|
||||
{
|
||||
if (not allocated.empty()) {
|
||||
srsran_assert(allocated.size() * objs_per_batch == cache_size(),
|
||||
"Not all objects have been deallocated (%zd < %zd)",
|
||||
cache_size(),
|
||||
allocated.size() * objs_per_batch);
|
||||
while (not cache.empty()) {
|
||||
void* node_payload = cache.top();
|
||||
static_cast<T*>(node_payload)->~T();
|
||||
cache.pop();
|
||||
}
|
||||
allocated.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void allocate_batch()
|
||||
{
|
||||
uint8_t* batch_payload = static_cast<uint8_t*>(allocated.allocate_block());
|
||||
for (size_t i = 0; i < objs_per_batch; ++i) {
|
||||
void* cache_node = batch_payload + (i * cache.memblock_size);
|
||||
cache.push(cache_node);
|
||||
init_oper(cache.top());
|
||||
}
|
||||
}
|
||||
|
||||
size_t cache_size() const { return cache.size(); }
|
||||
|
||||
private:
|
||||
friend class background_obj_pool<T>;
|
||||
|
||||
T* do_allocate() final
|
||||
{
|
||||
if (cache.empty()) {
|
||||
allocate_batch();
|
||||
}
|
||||
void* top = cache.top();
|
||||
cache.pop();
|
||||
return static_cast<T*>(top);
|
||||
}
|
||||
|
||||
void do_deallocate(void* payload_ptr) final
|
||||
{
|
||||
recycle_oper(*static_cast<T*>(payload_ptr));
|
||||
void* header_ptr = cache.get_node_header(payload_ptr);
|
||||
cache.push(header_ptr);
|
||||
}
|
||||
|
||||
// args
|
||||
const size_t objs_per_batch;
|
||||
init_mem_oper_t init_oper;
|
||||
recycle_oper_t recycle_oper;
|
||||
|
||||
memblock_stack allocated;
|
||||
memblock_node_list cache;
|
||||
};
|
||||
|
||||
/**
|
||||
* Thread-safe object pool specialized in allocating batches of objects in a preemptive way in a background thread
|
||||
* to minimize latency.
|
||||
* Note: The dispatched allocation jobs may outlive the pool. To handle this, the pool state is passed to jobs via a
|
||||
* shared ptr.
|
||||
*/
|
||||
template <typename T>
|
||||
class background_obj_pool final : public obj_pool_itf<T>
|
||||
{
|
||||
public:
|
||||
using init_mem_oper_t = typename growing_batch_obj_pool<T>::init_mem_oper_t;
|
||||
using recycle_oper_t = typename growing_batch_obj_pool<T>::recycle_oper_t;
|
||||
|
||||
explicit background_obj_pool(size_t nof_objs_per_batch,
|
||||
size_t thres_,
|
||||
int init_size = -1,
|
||||
init_mem_oper_t init_oper_ = detail::inplace_default_ctor_operator<T>{},
|
||||
recycle_oper_t recycle_oper_ = detail::noop_operator{}) :
|
||||
thres(thres_),
|
||||
state(std::make_shared<detached_pool_state>(this)),
|
||||
grow_pool(nof_objs_per_batch, init_size, std::move(init_oper_), std::move(recycle_oper_))
|
||||
{
|
||||
srsran_assert(thres_ > 1, "The provided threshold=%zd is not valid", thres_);
|
||||
}
|
||||
~background_obj_pool()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
state->pool = nullptr;
|
||||
grow_pool.clear();
|
||||
}
|
||||
|
||||
size_t cache_size() const { return grow_pool.cache_size(); }
|
||||
|
||||
private:
|
||||
T* do_allocate() final
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
T* obj = grow_pool.do_allocate();
|
||||
if (grow_pool.cache_size() < thres) {
|
||||
allocate_batch_in_background_();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
void do_deallocate(void* ptr) final
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
return grow_pool.do_deallocate(ptr);
|
||||
}
|
||||
|
||||
void allocate_batch_in_background_()
|
||||
{
|
||||
std::shared_ptr<detached_pool_state> state_copy = state;
|
||||
get_background_workers().push_task([state_copy]() {
|
||||
std::lock_guard<std::mutex> lock(state_copy->mutex);
|
||||
if (state_copy->pool != nullptr) {
|
||||
state_copy->pool->grow_pool.allocate_batch();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
size_t thres;
|
||||
|
||||
// state of pool is detached because pool may be destroyed while batches are being allocated in the background
|
||||
struct detached_pool_state {
|
||||
std::mutex mutex;
|
||||
background_obj_pool<T>* pool;
|
||||
explicit detached_pool_state(background_obj_pool<T>* pool_) : pool(pool_) {}
|
||||
};
|
||||
std::shared_ptr<detached_pool_state> state;
|
||||
|
||||
growing_batch_obj_pool<T> grow_pool;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_OBJ_POOL_H
|
@ -0,0 +1,63 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_POOL_INTERFACE_H
|
||||
#define SRSRAN_POOL_INTERFACE_H
|
||||
|
||||
#include "srsran/adt/move_callback.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
/// Common object pool interface
|
||||
template <typename T>
|
||||
class obj_pool_itf
|
||||
{
|
||||
public:
|
||||
struct pool_deallocator {
|
||||
obj_pool_itf<T>* pool;
|
||||
explicit pool_deallocator(obj_pool_itf<T>* pool_ = nullptr) : pool(pool_) {}
|
||||
void operator()(void* ptr)
|
||||
{
|
||||
if (ptr != nullptr and pool != nullptr) {
|
||||
pool->do_deallocate(ptr);
|
||||
}
|
||||
}
|
||||
};
|
||||
using object_type = T;
|
||||
|
||||
obj_pool_itf() = default;
|
||||
// Object pool address should not change
|
||||
obj_pool_itf(const obj_pool_itf&) = delete;
|
||||
obj_pool_itf(obj_pool_itf&&) = delete;
|
||||
obj_pool_itf& operator=(const obj_pool_itf&) = delete;
|
||||
obj_pool_itf& operator=(obj_pool_itf&&) = delete;
|
||||
|
||||
virtual ~obj_pool_itf() = default;
|
||||
|
||||
std::unique_ptr<T, pool_deallocator> make()
|
||||
{
|
||||
return std::unique_ptr<T, pool_deallocator>(do_allocate(), pool_deallocator(this));
|
||||
}
|
||||
|
||||
private:
|
||||
// defined in child class
|
||||
virtual T* do_allocate() = 0;
|
||||
virtual void do_deallocate(void* ptr) = 0;
|
||||
};
|
||||
|
||||
/// unique ptr with type-erased dtor, so that it can be used by any object pool
|
||||
template <typename T>
|
||||
using unique_pool_ptr = std::unique_ptr<T, typename obj_pool_itf<T>::pool_deallocator>;
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_POOL_INTERFACE_H
|
Loading…
Reference in New Issue