|
|
@ -38,13 +38,17 @@ class base_background_pool
|
|
|
|
{
|
|
|
|
{
|
|
|
|
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
|
|
|
|
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
|
|
|
|
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
|
|
|
|
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
|
|
|
|
|
|
|
|
using pool_type = base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
|
|
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
public:
|
|
|
|
explicit base_background_pool(bool lazy_start = false, CtorFunc ctor_func_ = {}, RecycleFunc recycle_func_ = {}) :
|
|
|
|
explicit base_background_pool(size_t initial_size = BatchSize,
|
|
|
|
ctor_func(ctor_func_), recycle_func(recycle_func_)
|
|
|
|
CtorFunc ctor_func_ = {},
|
|
|
|
|
|
|
|
RecycleFunc recycle_func_ = {}) :
|
|
|
|
|
|
|
|
ctor_func(ctor_func_), recycle_func(recycle_func_), state(std::make_shared<detached_pool_state>(this))
|
|
|
|
{
|
|
|
|
{
|
|
|
|
if (not lazy_start) {
|
|
|
|
int nof_batches = ceilf(initial_size / (float)BatchSize);
|
|
|
|
allocate_batch_in_background();
|
|
|
|
while (nof_batches-- > 0) {
|
|
|
|
|
|
|
|
allocate_batch_();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
base_background_pool(base_background_pool&&) = delete;
|
|
|
|
base_background_pool(base_background_pool&&) = delete;
|
|
|
@ -53,7 +57,8 @@ public:
|
|
|
|
base_background_pool& operator=(const base_background_pool&) = delete;
|
|
|
|
base_background_pool& operator=(const base_background_pool&) = delete;
|
|
|
|
~base_background_pool()
|
|
|
|
~base_background_pool()
|
|
|
|
{
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
std::lock_guard<std::mutex> lock(state->mutex);
|
|
|
|
|
|
|
|
state->pool = nullptr;
|
|
|
|
for (std::unique_ptr<batch_obj_t>& batch : batches) {
|
|
|
|
for (std::unique_ptr<batch_obj_t>& batch : batches) {
|
|
|
|
for (obj_storage_t& obj_store : *batch) {
|
|
|
|
for (obj_storage_t& obj_store : *batch) {
|
|
|
|
obj_store.destroy();
|
|
|
|
obj_store.destroy();
|
|
|
@ -66,16 +71,13 @@ public:
|
|
|
|
void* allocate_node(size_t sz)
|
|
|
|
void* allocate_node(size_t sz)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T));
|
|
|
|
srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T));
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
std::lock_guard<std::mutex> lock(state->mutex);
|
|
|
|
void* block = obj_cache.try_pop();
|
|
|
|
void* block = obj_cache.try_pop();
|
|
|
|
|
|
|
|
|
|
|
|
if (block != nullptr) {
|
|
|
|
if (block != nullptr) {
|
|
|
|
// allocation successful
|
|
|
|
// allocation successful
|
|
|
|
if (obj_cache.size() < ThresholdSize) {
|
|
|
|
if (obj_cache.size() < ThresholdSize) {
|
|
|
|
get_background_workers().push_task([this]() {
|
|
|
|
allocate_batch_in_background();
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
allocate_batch_();
|
|
|
|
|
|
|
|
});
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return block;
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -87,21 +89,24 @@ public:
|
|
|
|
|
|
|
|
|
|
|
|
void deallocate_node(void* p)
|
|
|
|
void deallocate_node(void* p)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
std::lock_guard<std::mutex> lock(state->mutex);
|
|
|
|
recycle_func(static_cast<void*>(p));
|
|
|
|
recycle_func(static_cast<void*>(p));
|
|
|
|
obj_cache.push(static_cast<void*>(p));
|
|
|
|
obj_cache.push(static_cast<void*>(p));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void allocate_batch_in_background()
|
|
|
|
void allocate_batch_in_background()
|
|
|
|
{
|
|
|
|
{
|
|
|
|
get_background_workers().push_task([this]() {
|
|
|
|
std::shared_ptr<detached_pool_state> state_copy = state;
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
get_background_workers().push_task([state_copy]() {
|
|
|
|
allocate_batch_();
|
|
|
|
std::lock_guard<std::mutex> lock(state_copy->mutex);
|
|
|
|
|
|
|
|
if (state_copy->pool != nullptr) {
|
|
|
|
|
|
|
|
state_copy->pool->allocate_batch_();
|
|
|
|
|
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
private:
|
|
|
|
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size()>;
|
|
|
|
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size(), memblock_cache::min_memblock_align()>;
|
|
|
|
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
|
|
|
|
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
|
|
|
|
|
|
|
|
|
|
|
|
/// Unprotected allocation of new Batch of Objects
|
|
|
|
/// Unprotected allocation of new Batch of Objects
|
|
|
@ -122,8 +127,14 @@ private:
|
|
|
|
CtorFunc ctor_func;
|
|
|
|
CtorFunc ctor_func;
|
|
|
|
RecycleFunc recycle_func;
|
|
|
|
RecycleFunc recycle_func;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct detached_pool_state {
|
|
|
|
|
|
|
|
std::mutex mutex;
|
|
|
|
|
|
|
|
pool_type* pool;
|
|
|
|
|
|
|
|
explicit detached_pool_state(pool_type* pool_) : pool(pool_) {}
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
std::shared_ptr<detached_pool_state> state;
|
|
|
|
|
|
|
|
|
|
|
|
// memory stack to cache allocate memory chunks
|
|
|
|
// memory stack to cache allocate memory chunks
|
|
|
|
std::mutex mutex;
|
|
|
|
|
|
|
|
memblock_cache obj_cache;
|
|
|
|
memblock_cache obj_cache;
|
|
|
|
std::vector<std::unique_ptr<batch_obj_t> > batches;
|
|
|
|
std::vector<std::unique_ptr<batch_obj_t> > batches;
|
|
|
|
};
|
|
|
|
};
|
|
|
@ -147,12 +158,17 @@ class background_obj_pool
|
|
|
|
struct pool_deleter {
|
|
|
|
struct pool_deleter {
|
|
|
|
mem_pool_type* pool;
|
|
|
|
mem_pool_type* pool;
|
|
|
|
explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {}
|
|
|
|
explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {}
|
|
|
|
void operator()(void* ptr) { pool->deallocate_node(ptr); }
|
|
|
|
void operator()(void* ptr)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
if (ptr != nullptr) {
|
|
|
|
|
|
|
|
pool->deallocate_node(ptr);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
public:
|
|
|
|
background_obj_pool(CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
|
|
|
|
explicit background_obj_pool(size_t initial_size, CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
|
|
|
|
pool(false, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
|
|
|
|
pool(initial_size, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
|
|
|
|
{}
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
|
|
unique_pool_ptr<T> allocate_object()
|
|
|
|
unique_pool_ptr<T> allocate_object()
|
|
|
|