implementation of object memory pool via class-specific operator new/delete

master
Francisco Paisana 4 years ago
parent ea74ca67eb
commit f8b7351e1b

@ -13,6 +13,7 @@
#ifndef SRSLTE_MEM_POOL_H
#define SRSLTE_MEM_POOL_H
#include <cassert>
#include <cstdint>
#include <memory>
#include <mutex>
@ -135,28 +136,17 @@ private:
};
/**
* Pool specialized for big objects. Created objects are of same time, and are not contiguous in memory.
* Memory management of created objects is automatically handled. Relevant methods:
* - ::make(...) - create an object whose memory is automatically managed by the pool. The object dtor returns the
* allocated memory back to the pool
* Pool specialized for big objects. Created objects are not contiguous in memory.
* Relevant methods:
* - ::allocate_node(sz) - allocate memory of sizeof(T), or reuse memory already present in cache
* - ::deallocate_node(void* p) - return memory addressed by p back to the pool to be cached.
* - ::reserve(N) - prereserve memory slots for faster object creation
* @tparam T object type
* @tparam ObjSize object memory size
* @tparam ThreadSafe if object pool is thread-safe or not
*/
template <typename T, bool ThreadSafe = false>
class obj_pool
class big_obj_pool
{
/// single-thread obj pool deleter
struct obj_deleter {
explicit obj_deleter(obj_pool<T, ThreadSafe>* pool_) : pool(pool_) {}
void operator()(void* block)
{
static_cast<T*>(block)->~T();
pool->stack.push(static_cast<uint8_t*>(block));
}
obj_pool<T, ThreadSafe>* pool;
};
// memory stack type derivation (thread safe or not)
using stack_type = typename std::conditional<ThreadSafe, mutexed_memblock_stack, memblock_stack>::type;
@ -164,24 +154,25 @@ class obj_pool
stack_type stack;
public:
using obj_ptr = std::unique_ptr<T, obj_deleter>;
~big_obj_pool() { clear(); }
~obj_pool()
/// alloc new object space. If no memory is pre-reserved in the pool, malloc is called.
void* allocate_node(size_t sz)
{
uint8_t* block = stack.try_pop();
while (block != nullptr) {
delete[] block;
block = stack.try_pop();
assert(sz == sizeof(T));
static const size_t blocksize = std::max(sizeof(T), memblock_stack::min_memblock_size());
uint8_t* block = stack.try_pop();
if (block == nullptr) {
block = new uint8_t[blocksize];
}
return block;
}
/// create new object with given arguments. If no memory is pre-reserved in the pool, malloc is called.
template <typename... Args>
obj_ptr make(Args&&... args)
void deallocate_node(void* p)
{
uint8_t* block = allocate_node();
new (block) T(std::forward<Args>(args)...);
return obj_ptr(reinterpret_cast<T*>(block), obj_deleter(this));
if (p != nullptr) {
stack.push(static_cast<uint8_t*>(p));
}
}
/// Pre-reserve N memory chunks for future object allocations
@ -195,24 +186,15 @@ public:
size_t capacity() const { return stack.size(); }
private:
uint8_t* allocate_node()
void clear()
{
static const size_t blocksize = std::max(sizeof(T), memblock_stack::min_memblock_size());
uint8_t* block = stack.try_pop();
if (block == nullptr) {
block = new uint8_t[blocksize];
uint8_t* block = stack.try_pop();
while (block != nullptr) {
delete[] block;
block = stack.try_pop();
}
return block;
}
};
template <typename T>
using mutexed_pool_obj = obj_pool<T, true>;
template <typename T>
using unique_pool_obj = typename obj_pool<T, false>::obj_ptr;
template <typename T>
using unique_mutexed_pool_obj = typename obj_pool<T, true>::obj_ptr;
} // namespace srslte

@ -19,17 +19,31 @@ public:
C() { default_ctor_counter++; }
~C() { dtor_counter++; }
void* operator new(size_t sz);
void operator delete(void* ptr)noexcept;
static int default_ctor_counter;
static int dtor_counter;
};
int C::default_ctor_counter = 0;
int C::dtor_counter = 0;
srslte::big_obj_pool<C> pool;
void* C::operator new(size_t sz)
{
return pool.allocate_node(sz);
}
void C::operator delete(void* ptr)noexcept
{
pool.deallocate_node(ptr);
}
int test_nontrivial_obj_pool()
{
// No object creation on reservation
{
srslte::obj_pool<C> pool;
pool.reserve(10);
}
TESTASSERT(C::default_ctor_counter == 0);
@ -37,10 +51,10 @@ int test_nontrivial_obj_pool()
// default Ctor/Dtor are correctly called
{
srslte::obj_pool<C> pool;
pool.clear();
pool.reserve(10);
srslte::unique_pool_obj<C> c = pool.make();
std::unique_ptr<C> c(new C{});
}
TESTASSERT(C::default_ctor_counter == 1);
TESTASSERT(C::dtor_counter == 1);
@ -49,11 +63,11 @@ int test_nontrivial_obj_pool()
C::default_ctor_counter = 0;
C::dtor_counter = 0;
{
srslte::obj_pool<C> pool;
pool.clear();
pool.reserve(10);
srslte::unique_pool_obj<C> c = pool.make();
srslte::unique_pool_obj<C> c2 = std::move(c);
std::unique_ptr<C> c(new C{});
auto c2 = std::move(c);
}
TESTASSERT(C::default_ctor_counter == 1);
TESTASSERT(C::dtor_counter == 1);

@ -149,9 +149,9 @@ private:
std::unique_ptr<enb_cell_common_list> cell_common_list;
// state
std::unique_ptr<freq_res_common_list> cell_res_list;
std::map<uint16_t, srslte::unique_pool_obj<ue> > users; // NOTE: has to have fixed addr
std::map<uint32_t, asn1::rrc::paging_record_s> pending_paging;
std::unique_ptr<freq_res_common_list> cell_res_list;
std::map<uint16_t, std::unique_ptr<ue> > users; // NOTE: has to have fixed addr
std::map<uint32_t, asn1::rrc::paging_record_s> pending_paging;
void process_release_complete(uint16_t rnti);
void rem_user(uint16_t rnti);
@ -192,7 +192,7 @@ private:
std::mutex paging_mutex;
srslte::obj_pool<ue, false> ue_pool;
static srslte::big_obj_pool<ue, false> ue_pool;
};
} // namespace srsenb

@ -94,6 +94,11 @@ public:
bool is_csfb = false;
void* operator new(size_t sz);
void* operator new[](size_t sz) = delete;
void operator delete(void* ptr)noexcept;
void operator delete[](void* ptr) = delete;
private:
// args
srslte::byte_buffer_pool* pool = nullptr;

@ -151,9 +151,9 @@ void rrc::add_user(uint16_t rnti, const sched_interface::ue_cfg_t& sched_ue_cfg)
bool rnti_added = true;
if (rnti != SRSLTE_MRNTI) {
// only non-eMBMS RNTIs are present in user map
auto p = users.insert(std::make_pair(rnti, ue_pool.make(this, rnti, sched_ue_cfg)));
auto p = users.insert(std::make_pair(rnti, std::unique_ptr<ue>{new ue(this, rnti, sched_ue_cfg)}));
if (ue_pool.capacity() <= 4) {
task_sched.defer_task([this]() { ue_pool.reserve(16); });
task_sched.defer_task([]() { rrc::ue_pool.reserve(16); });
}
rnti_added = p.second and p.first->second->is_allocated();
}
@ -962,4 +962,7 @@ void rrc::tti_clock()
}
}
// definition of rrc static member
srslte::big_obj_pool<rrc::ue, false> rrc::ue_pool;
} // namespace srsenb

@ -14,6 +14,7 @@
#include "srsenb/hdr/stack/rrc/mac_controller.h"
#include "srsenb/hdr/stack/rrc/rrc_mobility.h"
#include "srsenb/hdr/stack/rrc/ue_rr_cfg.h"
#include "srslte/adt/mem_pool.h"
#include "srslte/asn1/rrc_utils.h"
#include "srslte/common/enb_events.h"
#include "srslte/common/int_helpers.h"
@ -56,6 +57,16 @@ rrc::ue::ue(rrc* outer_rrc, uint16_t rnti_, const sched_interface::ue_cfg_t& sch
rrc::ue::~ue() {}
void* rrc::ue::operator new(size_t sz)
{
assert(sz == sizeof(ue));
return rrc::ue_pool.allocate_node(sz);
}
void rrc::ue::operator delete(void* ptr)noexcept
{
rrc::ue_pool.deallocate_node(ptr);
}
rrc_state_t rrc::ue::get_state()
{
return state;

Loading…
Cancel
Save