adt - added background object pool test, and fix existing bugs related to the creation of pools with objects that are too small to be used in free lists

master
Francisco 4 years ago committed by Francisco Paisana
parent d1c5b000dc
commit ec3cd9ffea

@ -33,11 +33,7 @@ namespace detail {
* @tparam BatchSize number of T objects in a batch
* @tparam ThresholdSize number of T objects below which a new batch needs to be allocated
*/
template <typename T,
size_t BatchSize,
size_t ThresholdSize,
typename CtorFunc = default_ctor_operator<T>,
typename RecycleFunc = noop_operator>
template <typename T, size_t BatchSize, size_t ThresholdSize, typename CtorFunc, typename RecycleFunc>
class base_background_pool
{
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
@ -92,10 +88,8 @@ public:
void deallocate_node(void* p)
{
std::lock_guard<std::mutex> lock(mutex);
if (p != nullptr) {
recycle_func(static_cast<void*>(p));
obj_cache.push(static_cast<uint8_t*>(p));
}
obj_cache.push(static_cast<void*>(p));
}
void allocate_batch_in_background()
@ -107,22 +101,22 @@ public:
}
private:
using obj_storage_t = type_storage<T>;
using obj_storage_t = type_storage<T, memblock_cache::min_memblock_size()>;
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
/// Unprotected allocation of new Batch of Objects
void allocate_batch_()
{
batch_obj_t* batch = new batch_obj_t();
std::unique_ptr<batch_obj_t> batch(new batch_obj_t());
if (batch == nullptr) {
srslog::fetch_basic_logger("POOL").warning("Failed to allocate new batch in background thread");
return;
}
batches.emplace_back(batch);
for (obj_storage_t& obj_store : *batch) {
ctor_func(static_cast<void*>(&obj_store));
obj_cache.push(static_cast<void*>(&obj_store));
ctor_func(obj_store.addr());
obj_cache.push(&obj_store.buffer);
}
batches.emplace_back(std::move(batch));
}
CtorFunc ctor_func;
@ -138,7 +132,7 @@ private:
template <typename T, size_t BatchSize, size_t ThresholdSize>
using background_mem_pool =
detail::base_background_pool<typename std::aligned_storage<sizeof(T), alignof(T)>::type, BatchSize, ThresholdSize>;
detail::base_background_pool<T, BatchSize, ThresholdSize, detail::noop_operator, detail::noop_operator>;
template <typename T,
size_t BatchSize,

@ -58,7 +58,7 @@ public:
void deallocate_node(void* p)
{
if (p != nullptr) {
stack.push(static_cast<uint8_t*>(p));
stack.push(p);
}
}
@ -67,7 +67,7 @@ public:
{
static const size_t blocksize = std::max(sizeof(T), memblock_cache::min_memblock_size());
for (size_t i = 0; i < N; ++i) {
stack.push(new uint8_t[blocksize]);
stack.push(static_cast<void*>(new uint8_t[blocksize]));
}
}

@ -43,6 +43,13 @@ public:
return *this;
}
template <typename T>
void push(T* block) noexcept
{
static_assert(sizeof(T) >= sizeof(node), "Provided memory block is too small");
push(static_cast<void*>(block));
}
void push(void* block) noexcept
{
node* next = ::new (block) node(head);
@ -57,6 +64,7 @@ public:
}
node* last_head = head;
head = head->prev;
last_head->~node();
count--;
return static_cast<void*>(last_head);
}
@ -99,7 +107,8 @@ public:
return *this;
}
void push(void* block) noexcept
template <typename T>
void push(T* block) noexcept
{
std::lock_guard<std::mutex> lock(mutex);
stack.push(block);

@ -149,9 +149,11 @@ void test_background_pool()
srsran::background_obj_pool<C, 16, 4> obj_pool;
srsran::unique_pool_ptr<C> c = obj_pool.allocate_object();
srsran::unique_pool_ptr<C> c2 = obj_pool.allocate_object();
srsran::unique_pool_ptr<C> c3 = obj_pool.allocate_object();
TESTASSERT(C::default_ctor_counter == 16);
}
TESTASSERT(C::dtor_counter == 16);
TESTASSERT(C::dtor_counter == 16 and C::dtor_counter == C::default_ctor_counter);
}
int main(int argc, char** argv)

@ -153,7 +153,7 @@ public:
void operator delete(void* ptr)noexcept;
void operator delete[](void* ptr) = delete;
using ue_pool_t = srsran::background_obj_pool<ue, 16, 4>;
using ue_pool_t = srsran::background_mem_pool<ue, 16, 4>;
static ue_pool_t* get_ue_pool();
private:

@ -65,7 +65,7 @@ int rrc::ue::init()
return SRSRAN_SUCCESS;
}
srsran::background_obj_pool<rrc::ue, 16, 4>* rrc::ue::get_ue_pool()
srsran::background_mem_pool<rrc::ue, 16, 4>* rrc::ue::get_ue_pool()
{
// Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore,
// will only be initialized if we instantiate an eNB

Loading…
Cancel
Save