mirror of https://github.com/pvnis/srsRAN_4G.git
Merge branch 'next' into agpl_next
# Conflicts: # srsue/test/mac_nr/mac_nr_test.ccmaster
commit
c0282856d0
@ -0,0 +1,184 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_BATCH_MEM_POOL_H
|
||||
#define SRSRAN_BATCH_MEM_POOL_H
|
||||
|
||||
#include "memblock_cache.h"
|
||||
#include "pool_utils.h"
|
||||
#include "srsran/common/srsran_assert.h"
|
||||
#include "srsran/common/thread_pool.h"
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace srsran {
|
||||
|
||||
/**
|
||||
* Non-thread-safe, node-based memory pool that allocates nodes in batches of "objs_per_batch" > 1, and caches
|
||||
* allocated blocks on deallocation
|
||||
*/
|
||||
class growing_batch_mem_pool
|
||||
{
|
||||
public:
|
||||
explicit growing_batch_mem_pool(size_t objs_per_batch_,
|
||||
size_t node_size_,
|
||||
size_t node_alignment_,
|
||||
int init_size = -1) :
|
||||
objs_per_batch(objs_per_batch_),
|
||||
memblock_size(std::max(node_size_, free_memblock_list::min_memblock_size())),
|
||||
allocated(objs_per_batch * memblock_size, std::max(node_alignment_, free_memblock_list::min_memblock_align()))
|
||||
{
|
||||
size_t N = init_size < 0 ? objs_per_batch_ : init_size;
|
||||
while (N > cache_size()) {
|
||||
allocate_batch();
|
||||
}
|
||||
}
|
||||
~growing_batch_mem_pool()
|
||||
{
|
||||
srsran_assert(cache_size() == size(), "Not all nodes have been deallocated yet (%zd < %zd)", cache_size(), size());
|
||||
}
|
||||
|
||||
size_t get_node_max_size() const { return memblock_size; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
free_list.clear();
|
||||
allocated.clear();
|
||||
}
|
||||
|
||||
size_t cache_size() const { return free_list.size(); }
|
||||
size_t size() const { return allocated.size() * objs_per_batch; }
|
||||
|
||||
void allocate_batch()
|
||||
{
|
||||
uint8_t* batch_payload = static_cast<uint8_t*>(allocated.allocate_block());
|
||||
for (size_t i = 0; i < objs_per_batch; ++i) {
|
||||
void* cache_node = batch_payload + i * memblock_size;
|
||||
free_list.push(cache_node);
|
||||
}
|
||||
}
|
||||
|
||||
void* allocate_node()
|
||||
{
|
||||
if (free_list.empty()) {
|
||||
allocate_batch();
|
||||
}
|
||||
return free_list.pop();
|
||||
}
|
||||
|
||||
void deallocate_node(void* ptr) { free_list.push(ptr); }
|
||||
|
||||
private:
|
||||
const size_t objs_per_batch;
|
||||
const size_t memblock_size;
|
||||
|
||||
memblock_stack allocated;
|
||||
free_memblock_list free_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* Thread-safe object pool specialized in allocating batches of objects in a preemptive way in a background thread
|
||||
* to minimize latency.
|
||||
* Note: The dispatched allocation jobs may outlive the pool. To handle this, the pool state is passed to jobs via a
|
||||
* shared ptr.
|
||||
*/
|
||||
class background_mem_pool
|
||||
{
|
||||
public:
|
||||
const size_t batch_threshold;
|
||||
|
||||
explicit background_mem_pool(size_t nodes_per_batch_, size_t node_size_, size_t thres_, int initial_size = -1) :
|
||||
batch_threshold(thres_),
|
||||
state(std::make_shared<detached_pool_state>(this)),
|
||||
grow_pool(nodes_per_batch_, node_size_, detail::max_alignment, initial_size)
|
||||
{
|
||||
srsran_assert(batch_threshold > 1, "Invalid arguments for background memory pool");
|
||||
}
|
||||
~background_mem_pool()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
state->pool = nullptr;
|
||||
grow_pool.clear();
|
||||
}
|
||||
|
||||
/// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch.
|
||||
void* allocate_node(size_t sz)
|
||||
{
|
||||
srsran_assert(sz <= grow_pool.get_node_max_size(),
|
||||
"Mismatch of allocated node size=%zd and object size=%zd",
|
||||
sz,
|
||||
grow_pool.get_node_max_size());
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
void* node = grow_pool.allocate_node();
|
||||
|
||||
if (grow_pool.size() < batch_threshold) {
|
||||
allocate_batch_in_background_unlocked();
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
void deallocate_node(void* p)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
grow_pool.deallocate_node(p);
|
||||
}
|
||||
|
||||
void allocate_batch()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
grow_pool.allocate_batch();
|
||||
}
|
||||
|
||||
size_t get_node_max_size() const { return grow_pool.get_node_max_size(); }
|
||||
size_t cache_size() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
return grow_pool.cache_size();
|
||||
}
|
||||
|
||||
private:
|
||||
void allocate_batch_in_background_unlocked()
|
||||
{
|
||||
if (state->dispatched) {
|
||||
// new batch allocation already ongoing
|
||||
return;
|
||||
}
|
||||
state->dispatched = true;
|
||||
std::shared_ptr<detached_pool_state> state_sptr = state;
|
||||
get_background_workers().push_task([state_sptr]() {
|
||||
std::lock_guard<std::mutex> lock(state_sptr->mutex);
|
||||
// check if pool has not been destroyed
|
||||
if (state_sptr->pool != nullptr) {
|
||||
auto* pool = state_sptr->pool;
|
||||
do {
|
||||
pool->grow_pool.allocate_batch();
|
||||
} while (pool->grow_pool.cache_size() < pool->batch_threshold);
|
||||
}
|
||||
state_sptr->dispatched = false;
|
||||
});
|
||||
}
|
||||
|
||||
// State is stored in a shared_ptr that may outlive the pool.
|
||||
struct detached_pool_state {
|
||||
std::mutex mutex;
|
||||
background_mem_pool* pool;
|
||||
bool dispatched = false;
|
||||
explicit detached_pool_state(background_mem_pool* pool_) : pool(pool_) {}
|
||||
};
|
||||
std::shared_ptr<detached_pool_state> state;
|
||||
|
||||
growing_batch_mem_pool grow_pool;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_BATCH_MEM_POOL_H
|
@ -0,0 +1,112 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_CIRCULAR_MAP_STACK_POOL_H
|
||||
#define SRSRAN_CIRCULAR_MAP_STACK_POOL_H
|
||||
|
||||
#include "batch_mem_pool.h"
|
||||
#include "linear_allocator.h"
|
||||
#include "srsran/adt/circular_array.h"
|
||||
#include <mutex>
|
||||
|
||||
namespace srsran {
|
||||
|
||||
template <size_t NofStacks>
|
||||
class circular_stack_pool
|
||||
{
|
||||
struct mem_block_elem_t {
|
||||
std::mutex mutex;
|
||||
size_t key = std::numeric_limits<size_t>::max();
|
||||
size_t count = 0;
|
||||
linear_allocator alloc;
|
||||
|
||||
void clear()
|
||||
{
|
||||
key = std::numeric_limits<size_t>::max();
|
||||
count = 0;
|
||||
alloc.clear();
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
circular_stack_pool(size_t nof_objs_per_batch, size_t stack_size, size_t batch_thres, int initial_size = -1) :
|
||||
central_cache(std::min(NofStacks, nof_objs_per_batch), stack_size, batch_thres, initial_size),
|
||||
logger(srslog::fetch_basic_logger("POOL"))
|
||||
{}
|
||||
circular_stack_pool(circular_stack_pool&&) = delete;
|
||||
circular_stack_pool(const circular_stack_pool&) = delete;
|
||||
circular_stack_pool& operator=(circular_stack_pool&&) = delete;
|
||||
circular_stack_pool& operator=(const circular_stack_pool&) = delete;
|
||||
~circular_stack_pool()
|
||||
{
|
||||
for (mem_block_elem_t& elem : pools) {
|
||||
std::unique_lock<std::mutex> lock(elem.mutex);
|
||||
srsran_assert(elem.count == 0, "There are missing deallocations for stack id=%zd", elem.key);
|
||||
if (elem.alloc.is_init()) {
|
||||
void* ptr = elem.alloc.memblock_ptr();
|
||||
elem.alloc.clear();
|
||||
central_cache.deallocate_node(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* allocate(size_t key, size_t size, size_t alignment) noexcept
|
||||
{
|
||||
size_t idx = key % NofStacks;
|
||||
mem_block_elem_t& elem = pools[idx];
|
||||
std::unique_lock<std::mutex> lock(elem.mutex);
|
||||
if (not elem.alloc.is_init()) {
|
||||
void* block = central_cache.allocate_node(central_cache.get_node_max_size());
|
||||
if (block == nullptr) {
|
||||
logger.warning("Failed to allocate memory block from central cache");
|
||||
return nullptr;
|
||||
}
|
||||
elem.key = key;
|
||||
elem.alloc = linear_allocator(block, central_cache.get_node_max_size());
|
||||
}
|
||||
void* ptr = elem.alloc.allocate(size, alignment);
|
||||
if (ptr == nullptr) {
|
||||
logger.warning("No space left in memory block with key=%zd of circular stack pool", key);
|
||||
} else {
|
||||
elem.count++;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(size_t key, void* p)
|
||||
{
|
||||
size_t idx = key % NofStacks;
|
||||
mem_block_elem_t& elem = pools[idx];
|
||||
std::lock_guard<std::mutex> lock(elem.mutex);
|
||||
elem.alloc.deallocate(p);
|
||||
elem.count--;
|
||||
if (elem.count == 0) {
|
||||
// return back to central cache
|
||||
void* ptr = elem.alloc.memblock_ptr();
|
||||
elem.clear();
|
||||
central_cache.deallocate_node(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
void allocate_batch() { central_cache.allocate_batch(); }
|
||||
|
||||
size_t cache_size() const { return central_cache.cache_size(); }
|
||||
|
||||
private:
|
||||
srsran::circular_array<mem_block_elem_t, NofStacks> pools;
|
||||
srsran::background_mem_pool central_cache;
|
||||
srslog::basic_logger& logger;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_CIRCULAR_MAP_STACK_POOL_H
|
@ -0,0 +1,79 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_LINEAR_ALLOCATOR_H
|
||||
#define SRSRAN_LINEAR_ALLOCATOR_H
|
||||
|
||||
#include "pool_utils.h"
|
||||
#include "srsran/common/srsran_assert.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
class linear_allocator
|
||||
{
|
||||
public:
|
||||
linear_allocator() = default;
|
||||
linear_allocator(void* start_, void* end_) :
|
||||
start(static_cast<uint8_t*>(start_)), end(static_cast<uint8_t*>(end_)), cur(start)
|
||||
{}
|
||||
linear_allocator(void* start_, size_t sz) : start(static_cast<uint8_t*>(start_)), end(start + sz), cur(start) {}
|
||||
linear_allocator(const linear_allocator& other) = delete;
|
||||
linear_allocator(linear_allocator&& other) noexcept : start(other.start), end(other.end), cur(other.cur)
|
||||
{
|
||||
other.clear();
|
||||
}
|
||||
linear_allocator& operator=(const linear_allocator& other) = delete;
|
||||
linear_allocator& operator =(linear_allocator&& other) noexcept
|
||||
{
|
||||
start = other.start;
|
||||
end = other.end;
|
||||
cur = other.cur;
|
||||
other.clear();
|
||||
return *this;
|
||||
}
|
||||
|
||||
void* allocate(size_t sz, size_t alignment)
|
||||
{
|
||||
void* alloc_start = align_to(cur, alignment);
|
||||
uint8_t* new_cur = static_cast<uint8_t*>(alloc_start) + sz;
|
||||
if (new_cur > end) {
|
||||
// Cannot fit allocation in memory block
|
||||
return nullptr;
|
||||
}
|
||||
cur = new_cur;
|
||||
return alloc_start;
|
||||
}
|
||||
|
||||
void deallocate(void* p) { srsran_assert(p >= start and p < end, "pointer does not belong to pool"); }
|
||||
|
||||
size_t nof_bytes_allocated() const { return cur - start; }
|
||||
size_t nof_bytes_left() const { return end - cur; }
|
||||
size_t size() const { return end - start; }
|
||||
bool is_init() const { return start != end; }
|
||||
void* memblock_ptr() { return static_cast<void*>(start); }
|
||||
|
||||
void clear()
|
||||
{
|
||||
start = nullptr;
|
||||
cur = nullptr;
|
||||
end = nullptr;
|
||||
}
|
||||
|
||||
protected:
|
||||
uint8_t* start = nullptr;
|
||||
uint8_t* end = nullptr;
|
||||
uint8_t* cur = nullptr;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_LINEAR_ALLOCATOR_H
|
@ -0,0 +1,219 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_OBJ_POOL_H
|
||||
#define SRSRAN_OBJ_POOL_H
|
||||
|
||||
#include "batch_mem_pool.h"
|
||||
#include "memblock_cache.h"
|
||||
#include "pool_interface.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
template <typename T>
|
||||
class background_obj_pool;
|
||||
|
||||
template <typename T>
|
||||
class growing_batch_obj_pool final : public obj_pool_itf<T>
|
||||
{
|
||||
static size_t memblock_size()
|
||||
{
|
||||
/// Node Structure [ node header | (pad to node alignment) | node size | (pad to node header alignment) ]
|
||||
return align_next(align_next(free_memblock_list::min_memblock_size(), alignof(T)) + sizeof(T),
|
||||
free_memblock_list::min_memblock_align());
|
||||
}
|
||||
static size_t batch_size(size_t nof_objs_per_batch)
|
||||
{
|
||||
/// Batch Structure: [allocated stack header | (pad max alignment) | [memblock] x objs_per_batch ]
|
||||
return align_next(detail::max_alignment + (memblock_size() * nof_objs_per_batch), detail::max_alignment);
|
||||
}
|
||||
|
||||
public:
|
||||
using init_mem_oper_t = srsran::move_callback<void(void*)>;
|
||||
using recycle_oper_t = srsran::move_callback<void(T&)>;
|
||||
|
||||
explicit growing_batch_obj_pool(size_t objs_per_batch_,
|
||||
int init_size = -1,
|
||||
init_mem_oper_t init_oper_ = detail::inplace_default_ctor_operator<T>{},
|
||||
recycle_oper_t recycle_oper_ = detail::noop_operator{}) :
|
||||
objs_per_batch(objs_per_batch_),
|
||||
init_oper(std::move(init_oper_)),
|
||||
recycle_oper(std::move(recycle_oper_)),
|
||||
allocated(batch_size(objs_per_batch_), detail::max_alignment),
|
||||
cache(sizeof(T), alignof(T))
|
||||
{
|
||||
size_t N = init_size < 0 ? objs_per_batch_ : init_size;
|
||||
while (N > cache.size()) {
|
||||
allocate_batch();
|
||||
}
|
||||
}
|
||||
~growing_batch_obj_pool() { clear(); }
|
||||
|
||||
void clear()
|
||||
{
|
||||
if (not allocated.empty()) {
|
||||
srsran_assert(allocated.size() * objs_per_batch == cache_size(),
|
||||
"Not all objects have been deallocated (%zd < %zd)",
|
||||
cache_size(),
|
||||
allocated.size() * objs_per_batch);
|
||||
while (not cache.empty()) {
|
||||
void* node_payload = cache.top();
|
||||
static_cast<T*>(node_payload)->~T();
|
||||
cache.pop();
|
||||
}
|
||||
allocated.clear();
|
||||
}
|
||||
}
|
||||
|
||||
unique_pool_ptr<T> make() final
|
||||
{
|
||||
return unique_pool_ptr<T>(do_allocate(), [this](T* ptr) {
|
||||
// dtor is not called, as object is going to be recycled
|
||||
do_deallocate(ptr);
|
||||
});
|
||||
}
|
||||
|
||||
void allocate_batch()
|
||||
{
|
||||
uint8_t* batch_payload = static_cast<uint8_t*>(allocated.allocate_block());
|
||||
for (size_t i = 0; i < objs_per_batch; ++i) {
|
||||
void* cache_node = batch_payload + (i * cache.memblock_size);
|
||||
cache.push(cache_node);
|
||||
init_oper(cache.top());
|
||||
}
|
||||
}
|
||||
|
||||
size_t cache_size() const { return cache.size(); }
|
||||
|
||||
private:
|
||||
friend class background_obj_pool<T>;
|
||||
|
||||
T* do_allocate()
|
||||
{
|
||||
if (cache.empty()) {
|
||||
allocate_batch();
|
||||
}
|
||||
void* top = cache.top();
|
||||
cache.pop();
|
||||
return static_cast<T*>(top);
|
||||
}
|
||||
|
||||
void do_deallocate(T* payload_ptr)
|
||||
{
|
||||
recycle_oper(*payload_ptr);
|
||||
void* header_ptr = cache.get_node_header(static_cast<void*>(payload_ptr));
|
||||
cache.push(header_ptr);
|
||||
}
|
||||
|
||||
// args
|
||||
const size_t objs_per_batch;
|
||||
init_mem_oper_t init_oper;
|
||||
recycle_oper_t recycle_oper;
|
||||
|
||||
memblock_stack allocated;
|
||||
memblock_node_list cache;
|
||||
};
|
||||
|
||||
/**
|
||||
* Thread-safe object pool specialized in allocating batches of objects in a preemptive way in a background thread
|
||||
* to minimize latency.
|
||||
* Note: The dispatched allocation jobs may outlive the pool. To handle this, the pool state is passed to jobs via a
|
||||
* shared ptr.
|
||||
*/
|
||||
template <typename T>
|
||||
class background_obj_pool final : public obj_pool_itf<T>
|
||||
{
|
||||
public:
|
||||
using init_mem_oper_t = typename growing_batch_obj_pool<T>::init_mem_oper_t;
|
||||
using recycle_oper_t = typename growing_batch_obj_pool<T>::recycle_oper_t;
|
||||
|
||||
explicit background_obj_pool(size_t nof_objs_per_batch,
|
||||
size_t thres_,
|
||||
int init_size = -1,
|
||||
init_mem_oper_t init_oper_ = detail::inplace_default_ctor_operator<T>{},
|
||||
recycle_oper_t recycle_oper_ = detail::noop_operator{}) :
|
||||
thres(thres_),
|
||||
state(std::make_shared<detached_pool_state>(this)),
|
||||
grow_pool(nof_objs_per_batch, init_size, std::move(init_oper_), std::move(recycle_oper_))
|
||||
{
|
||||
srsran_assert(thres_ > 1, "The provided threshold=%zd is not valid", thres_);
|
||||
}
|
||||
~background_obj_pool()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
state->pool = nullptr;
|
||||
grow_pool.clear();
|
||||
}
|
||||
|
||||
unique_pool_ptr<T> make() final
|
||||
{
|
||||
return unique_pool_ptr<T>(do_allocate(), [this](T* ptr) {
|
||||
// dtor is not called, as object is going to be recycled
|
||||
do_deallocate(ptr);
|
||||
});
|
||||
}
|
||||
|
||||
size_t cache_size() const { return grow_pool.cache_size(); }
|
||||
|
||||
private:
|
||||
T* do_allocate()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
T* obj = grow_pool.do_allocate();
|
||||
if (grow_pool.cache_size() < thres) {
|
||||
allocate_batch_in_background_();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
void do_deallocate(T* ptr)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(state->mutex);
|
||||
return grow_pool.do_deallocate(ptr);
|
||||
}
|
||||
|
||||
void allocate_batch_in_background_()
|
||||
{
|
||||
if (state->dispatched) {
|
||||
// new batch allocation already ongoing
|
||||
return;
|
||||
}
|
||||
state->dispatched = true;
|
||||
std::shared_ptr<detached_pool_state> state_sptr = state;
|
||||
get_background_workers().push_task([state_sptr]() {
|
||||
std::lock_guard<std::mutex> lock(state_sptr->mutex);
|
||||
if (state_sptr->pool != nullptr) {
|
||||
auto* pool = state_sptr->pool;
|
||||
do {
|
||||
pool->grow_pool.allocate_batch();
|
||||
} while (pool->grow_pool.cache_size() < pool->thres);
|
||||
}
|
||||
state_sptr->dispatched = false;
|
||||
});
|
||||
}
|
||||
|
||||
size_t thres;
|
||||
|
||||
// state of pool is detached because pool may be destroyed while batches are being allocated in the background
|
||||
struct detached_pool_state {
|
||||
std::mutex mutex;
|
||||
background_obj_pool<T>* pool;
|
||||
bool dispatched = false;
|
||||
explicit detached_pool_state(background_obj_pool<T>* pool_) : pool(pool_) {}
|
||||
};
|
||||
std::shared_ptr<detached_pool_state> state;
|
||||
|
||||
growing_batch_obj_pool<T> grow_pool;
|
||||
};
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_OBJ_POOL_H
|
@ -0,0 +1,62 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_POOL_INTERFACE_H
|
||||
#define SRSRAN_POOL_INTERFACE_H
|
||||
|
||||
#include "srsran/adt/move_callback.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
/// unique ptr with type-erased dtor, so that it can be used by any object or memory pool
|
||||
constexpr size_t unique_pool_deleter_small_buffer = sizeof(void*) * 2u;
|
||||
template <typename T>
|
||||
using unique_pool_ptr = std::unique_ptr<T, srsran::move_callback<void(T*), unique_pool_deleter_small_buffer> >;
|
||||
|
||||
/// Common object pool interface
|
||||
template <typename T>
|
||||
class obj_pool_itf
|
||||
{
|
||||
public:
|
||||
using object_type = T;
|
||||
|
||||
obj_pool_itf() = default;
|
||||
// Object pool address should not change
|
||||
obj_pool_itf(const obj_pool_itf&) = delete;
|
||||
obj_pool_itf(obj_pool_itf&&) = delete;
|
||||
obj_pool_itf& operator=(const obj_pool_itf&) = delete;
|
||||
obj_pool_itf& operator=(obj_pool_itf&&) = delete;
|
||||
|
||||
virtual ~obj_pool_itf() = default;
|
||||
virtual unique_pool_ptr<T> make() = 0;
|
||||
};
|
||||
|
||||
/// Allocate object in memory pool
|
||||
template <typename T, typename MemPool, typename... Args>
|
||||
unique_pool_ptr<T> make_pool_obj_with_heap_fallback(MemPool& mempool, Args&&... args)
|
||||
{
|
||||
void* block = mempool.allocate(sizeof(T), alignof(T));
|
||||
if (block == nullptr) {
|
||||
return unique_pool_ptr<T>(new T(std::forward<Args>(args)...), std::default_delete<T>());
|
||||
}
|
||||
new (block) T(std::forward<Args>(args)...);
|
||||
return unique_pool_ptr<T>(block, [&mempool](T* ptr) {
|
||||
if (ptr != nullptr) {
|
||||
ptr->~T();
|
||||
mempool.deallocate(ptr);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_POOL_INTERFACE_H
|
@ -0,0 +1,64 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_POOL_UTILS_H
|
||||
#define SRSRAN_POOL_UTILS_H
|
||||
|
||||
#include "../move_callback.h"
|
||||
#include <memory>
|
||||
|
||||
namespace srsran {
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename T>
|
||||
struct inplace_default_ctor_operator {
|
||||
void operator()(void* ptr) { new (ptr) T(); }
|
||||
};
|
||||
|
||||
struct noop_operator {
|
||||
template <typename T>
|
||||
void operator()(T&& t) const
|
||||
{
|
||||
// do nothing
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/// check if alignment is power of 2
|
||||
constexpr bool is_valid_alignment(std::size_t alignment)
|
||||
{
|
||||
return alignment && (alignment & (alignment - 1)) == 0u;
|
||||
}
|
||||
|
||||
inline bool is_aligned(void* ptr, std::size_t alignment)
|
||||
{
|
||||
return (reinterpret_cast<std::uintptr_t>(ptr) & (alignment - 1)) == 0;
|
||||
}
|
||||
|
||||
constexpr std::uintptr_t align_next(std::uintptr_t pos, size_t alignment)
|
||||
{
|
||||
return (pos + (alignment - 1)) & ~(alignment - 1);
|
||||
}
|
||||
inline void* align_to(void* pos, size_t alignment)
|
||||
{
|
||||
return reinterpret_cast<void*>(align_next(reinterpret_cast<std::uintptr_t>(pos), alignment));
|
||||
}
|
||||
inline void* offset_byte_ptr(void* pos, size_t offset)
|
||||
{
|
||||
return static_cast<void*>(static_cast<uint8_t*>(pos) + offset);
|
||||
}
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_POOL_UTILS_H
|
@ -0,0 +1,44 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_PDCCH_CFG_NR_H
|
||||
#define SRSRAN_PDCCH_CFG_NR_H
|
||||
|
||||
#include "dci_nr.h"
|
||||
|
||||
/**
|
||||
* Maximum number of CORESET
|
||||
* @remark Defined in TS 38.331 by maxNrofControlResourceSets-1
|
||||
*/
|
||||
#define SRSRAN_UE_DL_NR_MAX_NOF_CORESET 12
|
||||
|
||||
/**
|
||||
* Maximum number of Search spaces
|
||||
* @remark Defined in TS 38.331 by maxNrofSearchSpaces-1
|
||||
*/
|
||||
#define SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE 40
|
||||
|
||||
/**
|
||||
* @brief PDCCH configuration provided by upper layers
|
||||
*/
|
||||
typedef struct SRSRAN_API {
|
||||
srsran_coreset_t coreset[SRSRAN_UE_DL_NR_MAX_NOF_CORESET]; ///< PDCCH Control resource sets (CORESET) collection
|
||||
bool coreset_present[SRSRAN_UE_DL_NR_MAX_NOF_CORESET]; ///< CORESET present flags
|
||||
|
||||
srsran_search_space_t search_space[SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE];
|
||||
bool search_space_present[SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE];
|
||||
|
||||
srsran_search_space_t ra_search_space;
|
||||
bool ra_search_space_present;
|
||||
} srsran_pdcch_cfg_nr_t;
|
||||
|
||||
#endif // SRSRAN_PDCCH_CFG_NR_H
|
@ -0,0 +1,27 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_BEARER_MEM_POOL_H
|
||||
#define SRSRAN_BEARER_MEM_POOL_H
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace srsran {
|
||||
|
||||
// Allocation of objects in rnti-dedicated memory pool
|
||||
void reserve_rlc_memblocks(size_t nof_blocks);
|
||||
void* allocate_rlc_bearer(std::size_t size);
|
||||
void deallocate_rlc_bearer(void* p);
|
||||
|
||||
} // namespace srsran
|
||||
|
||||
#endif // SRSRAN_BEARER_MEM_POOL_H
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,53 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "srsran/common/test_common.h"
|
||||
#include "srsran/phy/phch/dci_nr.h"
|
||||
|
||||
static int test_52prb()
|
||||
{
|
||||
// Default configuration with all options disabled
|
||||
srsran_dci_cfg_nr_t cfg = {};
|
||||
|
||||
// Set bandwidths
|
||||
cfg.coreset0_bw = 0;
|
||||
cfg.bwp_dl_initial_bw = 52;
|
||||
cfg.bwp_dl_active_bw = 52;
|
||||
cfg.bwp_ul_initial_bw = 52;
|
||||
cfg.bwp_ul_active_bw = 52;
|
||||
|
||||
// Enable all monitoring
|
||||
cfg.monitor_common_0_0 = true;
|
||||
cfg.monitor_0_0_and_1_0 = true;
|
||||
cfg.monitor_0_1_and_1_1 = true;
|
||||
|
||||
// Configure DCI
|
||||
srsran_dci_nr_t dci = {};
|
||||
TESTASSERT(srsran_dci_nr_set_cfg(&dci, &cfg) == SRSRAN_SUCCESS);
|
||||
|
||||
// Check DCI sizes
|
||||
TESTASSERT(srsran_dci_nr_size(&dci, srsran_search_space_type_common_3, srsran_dci_format_nr_0_0) == 39);
|
||||
TESTASSERT(srsran_dci_nr_size(&dci, srsran_search_space_type_common_3, srsran_dci_format_nr_1_0) == 39);
|
||||
TESTASSERT(srsran_dci_nr_size(&dci, srsran_search_space_type_ue, srsran_dci_format_nr_0_0) == 39);
|
||||
TESTASSERT(srsran_dci_nr_size(&dci, srsran_search_space_type_ue, srsran_dci_format_nr_1_0) == 39);
|
||||
TESTASSERT(srsran_dci_nr_size(&dci, srsran_search_space_type_ue, srsran_dci_format_nr_0_1) == 28);
|
||||
TESTASSERT(srsran_dci_nr_size(&dci, srsran_search_space_type_ue, srsran_dci_format_nr_1_1) == 26);
|
||||
|
||||
return SRSRAN_SUCCESS;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
TESTASSERT(test_52prb() == SRSRAN_SUCCESS);
|
||||
|
||||
return SRSRAN_SUCCESS;
|
||||
}
|
@ -0,0 +1,150 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "srsran/common/test_common.h"
|
||||
#include "srsran/phy/ue/ue_dl_nr.h"
|
||||
#include <getopt.h>
|
||||
|
||||
static int test_case_1()
|
||||
{
|
||||
// Set configuration
|
||||
srsran_ue_dl_nr_harq_ack_cfg_t cfg = {};
|
||||
cfg.harq_ack_codebook = srsran_pdsch_harq_ack_codebook_dynamic;
|
||||
|
||||
// Generate ACK information
|
||||
srsran_pdsch_ack_nr_t ack_info = {};
|
||||
ack_info.nof_cc = 1;
|
||||
ack_info.use_pusch = true;
|
||||
|
||||
srsran_pdsch_ack_m_nr_t m = {};
|
||||
m.value[0] = 1;
|
||||
m.present = true;
|
||||
|
||||
m.resource.k1 = 8;
|
||||
m.resource.v_dai_dl = 0;
|
||||
m.value[0] = 1;
|
||||
m.present = true;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 5;
|
||||
m.resource.v_dai_dl = 2;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 6;
|
||||
m.resource.v_dai_dl = 1;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 4;
|
||||
m.resource.v_dai_dl = 3;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 3;
|
||||
m.resource.v_dai_dl = 0;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
// Print trace
|
||||
char str[512] = {};
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_info(&ack_info, str, (uint32_t)sizeof(str)) > SRSRAN_SUCCESS);
|
||||
INFO("%s", str);
|
||||
|
||||
// Generate UCI data
|
||||
srsran_uci_data_nr_t uci_data = {};
|
||||
TESTASSERT(srsran_ue_dl_nr_gen_ack(&cfg, &ack_info, &uci_data) == SRSRAN_SUCCESS);
|
||||
|
||||
// Assert UCI data
|
||||
TESTASSERT(uci_data.cfg.o_ack == 5);
|
||||
|
||||
return SRSRAN_SUCCESS;
|
||||
}
|
||||
|
||||
static int test_case_2()
|
||||
{
|
||||
// Set configuration
|
||||
srsran_ue_dl_nr_harq_ack_cfg_t cfg = {};
|
||||
cfg.harq_ack_codebook = srsran_pdsch_harq_ack_codebook_dynamic;
|
||||
|
||||
// Generate ACK information
|
||||
srsran_pdsch_ack_nr_t ack_info = {};
|
||||
ack_info.nof_cc = 1;
|
||||
ack_info.use_pusch = true;
|
||||
|
||||
srsran_pdsch_ack_m_nr_t m = {};
|
||||
m.value[0] = 1;
|
||||
m.present = true;
|
||||
|
||||
m.resource.k1 = 7;
|
||||
m.resource.v_dai_dl = 1;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 6;
|
||||
m.resource.v_dai_dl = 2;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 8;
|
||||
m.resource.v_dai_dl = 0;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 5;
|
||||
m.resource.v_dai_dl = 3;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
m.resource.k1 = 4;
|
||||
m.resource.v_dai_dl = 0;
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_insert_m(&ack_info, &m) == SRSRAN_SUCCESS);
|
||||
|
||||
// Print trace
|
||||
char str[512] = {};
|
||||
TESTASSERT(srsran_ue_dl_nr_ack_info(&ack_info, str, (uint32_t)sizeof(str)) > SRSRAN_SUCCESS);
|
||||
INFO("%s", str);
|
||||
|
||||
// Generate UCI data
|
||||
srsran_uci_data_nr_t uci_data = {};
|
||||
TESTASSERT(srsran_ue_dl_nr_gen_ack(&cfg, &ack_info, &uci_data) == SRSRAN_SUCCESS);
|
||||
|
||||
// Assert UCI data
|
||||
TESTASSERT(uci_data.cfg.o_ack == 5);
|
||||
|
||||
return SRSRAN_SUCCESS;
|
||||
}
|
||||
|
||||
static void usage(char* prog)
|
||||
{
|
||||
printf("Usage: %s [v]\n", prog);
|
||||
printf("\t-v Increase srsran_verbose\n");
|
||||
}
|
||||
|
||||
static void parse_args(int argc, char** argv)
|
||||
{
|
||||
int opt;
|
||||
while ((opt = getopt(argc, argv, "v")) != -1) {
|
||||
switch (opt) {
|
||||
case 'v':
|
||||
srsran_verbose++;
|
||||
break;
|
||||
default:
|
||||
usage(argv[0]);
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
parse_args(argc, argv);
|
||||
|
||||
// Test only until Format1B - CS
|
||||
TESTASSERT(test_case_1() == SRSRAN_SUCCESS);
|
||||
TESTASSERT(test_case_2() == SRSRAN_SUCCESS);
|
||||
|
||||
printf("Ok\n");
|
||||
return SRSRAN_SUCCESS;
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "srsran/upper/bearer_mem_pool.h"
|
||||
#include "srsran/adt/pool/batch_mem_pool.h"
|
||||
#include "srsran/upper/rlc_am_lte.h"
|
||||
#include "srsran/upper/rlc_um_lte.h"
|
||||
#include "srsran/upper/rlc_um_nr.h"
|
||||
|
||||
namespace srsran {
|
||||
|
||||
srsran::background_mem_pool* get_bearer_pool()
|
||||
{
|
||||
static background_mem_pool pool(
|
||||
4, std::max(std::max(sizeof(rlc_am_lte), sizeof(rlc_um_lte)), sizeof(rlc_um_nr)), 8, 8);
|
||||
return &pool;
|
||||
}
|
||||
|
||||
void reserve_rlc_memblocks(size_t nof_blocks)
|
||||
{
|
||||
srsran::background_mem_pool* pool = get_bearer_pool();
|
||||
while (pool->cache_size() < nof_blocks) {
|
||||
pool->allocate_batch();
|
||||
}
|
||||
}
|
||||
void* allocate_rlc_bearer(std::size_t sz)
|
||||
{
|
||||
return get_bearer_pool()->allocate_node(sz);
|
||||
}
|
||||
void deallocate_rlc_bearer(void* p)
|
||||
{
|
||||
get_bearer_pool()->deallocate_node(p);
|
||||
}
|
||||
|
||||
} // namespace srsran
|
@ -0,0 +1,48 @@
|
||||
/**
|
||||
*
|
||||
* \section COPYRIGHT
|
||||
*
|
||||
* Copyright 2013-2021 Software Radio Systems Limited
|
||||
*
|
||||
* By using this file, you agree to the terms and conditions set
|
||||
* forth in the LICENSE file which can be found at the top level of
|
||||
* the distribution.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SRSRAN_RNTI_POOL_H
|
||||
#define SRSRAN_RNTI_POOL_H
|
||||
|
||||
#include "srsran/adt/pool/pool_interface.h"
|
||||
#include "srsran/phy/common/phy_common.h"
|
||||
#include <memory>
|
||||
|
||||
namespace srsenb {
|
||||
|
||||
// Allocation of objects in rnti-dedicated memory pool
|
||||
void reserve_rnti_memblocks(size_t nof_blocks);
|
||||
void* allocate_rnti_dedicated_mem(uint16_t rnti, std::size_t size, std::size_t align);
|
||||
void deallocate_rnti_dedicated_mem(uint16_t rnti, void* p);
|
||||
|
||||
template <typename T>
|
||||
using unique_rnti_ptr = srsran::unique_pool_ptr<T>;
|
||||
|
||||
template <typename T, typename... Args>
|
||||
unique_rnti_ptr<T> make_rnti_obj(uint16_t rnti, Args&&... args)
|
||||
{
|
||||
void* block = allocate_rnti_dedicated_mem(rnti, sizeof(T), alignof(T));
|
||||
if (block == nullptr) {
|
||||
// allocated with "new" as a fallback
|
||||
return unique_rnti_ptr<T>(new T(std::forward<Args>(args)...), std::default_delete<T>());
|
||||
}
|
||||
// allocation using rnti-dedicated memory pool was successful
|
||||
new (block) T(std::forward<Args>(args)...);
|
||||
return unique_rnti_ptr<T>(static_cast<T*>(block), [rnti](T* ptr) {
|
||||
ptr->~T();
|
||||
deallocate_rnti_dedicated_mem(rnti, ptr);
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace srsenb
|
||||
|
||||
#endif // SRSRAN_RNTI_POOL_H
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue