fix memory pool test - placed the pool state into a shared_ptr so that the callbacks still have a valid handle when the pool is destroyed

master
Francisco 4 years ago committed by Francisco Paisana
parent 9bec13731a
commit 460d7a8f4f

@ -38,13 +38,17 @@ class base_background_pool
{ {
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive"); static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1"); static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
using pool_type = base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
public: public:
explicit base_background_pool(bool lazy_start = false, CtorFunc ctor_func_ = {}, RecycleFunc recycle_func_ = {}) : explicit base_background_pool(size_t initial_size = BatchSize,
ctor_func(ctor_func_), recycle_func(recycle_func_) CtorFunc ctor_func_ = {},
RecycleFunc recycle_func_ = {}) :
ctor_func(ctor_func_), recycle_func(recycle_func_), state(std::make_shared<detached_pool_state>(this))
{ {
if (not lazy_start) { int nof_batches = ceilf(initial_size / (float)BatchSize);
allocate_batch_in_background(); while (nof_batches-- > 0) {
allocate_batch_();
} }
} }
base_background_pool(base_background_pool&&) = delete; base_background_pool(base_background_pool&&) = delete;
@ -53,7 +57,8 @@ public:
base_background_pool& operator=(const base_background_pool&) = delete; base_background_pool& operator=(const base_background_pool&) = delete;
~base_background_pool() ~base_background_pool()
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(state->mutex);
state->pool = nullptr;
for (std::unique_ptr<batch_obj_t>& batch : batches) { for (std::unique_ptr<batch_obj_t>& batch : batches) {
for (obj_storage_t& obj_store : *batch) { for (obj_storage_t& obj_store : *batch) {
obj_store.destroy(); obj_store.destroy();
@ -66,16 +71,13 @@ public:
void* allocate_node(size_t sz) void* allocate_node(size_t sz)
{ {
srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T)); srsran_assert(sz == sizeof(T), "Mismatch of allocated node size=%zd and object size=%zd", sz, sizeof(T));
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(state->mutex);
void* block = obj_cache.try_pop(); void* block = obj_cache.try_pop();
if (block != nullptr) { if (block != nullptr) {
// allocation successful // allocation successful
if (obj_cache.size() < ThresholdSize) { if (obj_cache.size() < ThresholdSize) {
get_background_workers().push_task([this]() { allocate_batch_in_background();
std::lock_guard<std::mutex> lock(mutex);
allocate_batch_();
});
} }
return block; return block;
} }
@ -87,21 +89,24 @@ public:
void deallocate_node(void* p) void deallocate_node(void* p)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(state->mutex);
recycle_func(static_cast<void*>(p)); recycle_func(static_cast<void*>(p));
obj_cache.push(static_cast<void*>(p)); obj_cache.push(static_cast<void*>(p));
} }
void allocate_batch_in_background() void allocate_batch_in_background()
{ {
get_background_workers().push_task([this]() { std::shared_ptr<detached_pool_state> state_copy = state;
std::lock_guard<std::mutex> lock(mutex); get_background_workers().push_task([state_copy]() {
allocate_batch_(); std::lock_guard<std::mutex> lock(state_copy->mutex);
if (state_copy->pool != nullptr) {
state_copy->pool->allocate_batch_();
}
}); });
} }
private: private:
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size()>; using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size(), memblock_cache::min_memblock_align()>;
using batch_obj_t = std::array<obj_storage_t, BatchSize>; using batch_obj_t = std::array<obj_storage_t, BatchSize>;
/// Unprotected allocation of new Batch of Objects /// Unprotected allocation of new Batch of Objects
@ -122,8 +127,14 @@ private:
CtorFunc ctor_func; CtorFunc ctor_func;
RecycleFunc recycle_func; RecycleFunc recycle_func;
struct detached_pool_state {
std::mutex mutex;
pool_type* pool;
explicit detached_pool_state(pool_type* pool_) : pool(pool_) {}
};
std::shared_ptr<detached_pool_state> state;
// memory stack to cache allocate memory chunks // memory stack to cache allocate memory chunks
std::mutex mutex;
memblock_cache obj_cache; memblock_cache obj_cache;
std::vector<std::unique_ptr<batch_obj_t> > batches; std::vector<std::unique_ptr<batch_obj_t> > batches;
}; };
@ -147,12 +158,17 @@ class background_obj_pool
struct pool_deleter { struct pool_deleter {
mem_pool_type* pool; mem_pool_type* pool;
explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {} explicit pool_deleter(mem_pool_type* pool_) : pool(pool_) {}
void operator()(void* ptr) { pool->deallocate_node(ptr); } void operator()(void* ptr)
{
if (ptr != nullptr) {
pool->deallocate_node(ptr);
}
}
}; };
public: public:
background_obj_pool(CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) : explicit background_obj_pool(size_t initial_size, CtorFunc&& ctor_func = {}, RecycleFunc&& recycle_func = {}) :
pool(false, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func)) pool(initial_size, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
{} {}
unique_pool_ptr<T> allocate_object() unique_pool_ptr<T> allocate_object()

@ -27,6 +27,7 @@ class memblock_cache
public: public:
constexpr static size_t min_memblock_size() { return sizeof(node); } constexpr static size_t min_memblock_size() { return sizeof(node); }
constexpr static size_t min_memblock_align() { return alignof(node); }
memblock_cache() = default; memblock_cache() = default;
@ -46,7 +47,7 @@ public:
template <typename T> template <typename T>
void push(T* block) noexcept void push(T* block) noexcept
{ {
static_assert(sizeof(T) >= sizeof(node), "Provided memory block is too small"); static_assert(sizeof(T) >= sizeof(node) and alignof(T) >= alignof(node), "Provided memory block is too small");
push(static_cast<void*>(block)); push(static_cast<void*>(block));
} }

@ -147,14 +147,18 @@ void test_background_pool()
C::default_ctor_counter = 0; C::default_ctor_counter = 0;
C::dtor_counter = 0; C::dtor_counter = 0;
{ {
srsran::background_obj_pool<C, 16, 4> obj_pool; srsran::background_obj_pool<C, 16, 4> obj_pool(16);
std::vector<srsran::unique_pool_ptr<C> > objs;
srsran::unique_pool_ptr<C> c = obj_pool.allocate_object(); for (size_t i = 0; i < 16 - 4; ++i) {
srsran::unique_pool_ptr<C> c2 = obj_pool.allocate_object(); objs.push_back(obj_pool.allocate_object());
srsran::unique_pool_ptr<C> c3 = obj_pool.allocate_object(); }
TESTASSERT(C::default_ctor_counter == 16); TESTASSERT(C::default_ctor_counter == 16);
// This will trigger a new batch allocation in the background
objs.push_back(obj_pool.allocate_object());
} }
TESTASSERT(C::dtor_counter == 16 and C::dtor_counter == C::default_ctor_counter); TESTASSERT(C::dtor_counter == C::default_ctor_counter);
} }
int main(int argc, char** argv) int main(int argc, char** argv)

@ -65,11 +65,11 @@ int rrc::ue::init()
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }
srsran::background_mem_pool<rrc::ue, 16, 4>* rrc::ue::get_ue_pool() rrc::ue::ue_pool_t* rrc::ue::get_ue_pool()
{ {
// Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore, // Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore,
// will only be initialized if we instantiate an eNB // will only be initialized if we instantiate an eNB
static rrc::ue::ue_pool_t ue_pool(true); static rrc::ue::ue_pool_t ue_pool;
return &ue_pool; return &ue_pool;
} }

Loading…
Cancel
Save