sched,nr: implementation of main classes for NR scheduler, and basic test for concurrency

master
Francisco 4 years ago committed by Francisco Paisana
parent e248e086ed
commit 4fa27f3841

@ -0,0 +1,65 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_H
#define SRSRAN_SCHED_NR_H
#include "sched_nr_common.h"
#include "sched_nr_interface.h"
#include "sched_nr_ue.h"
#include "sched_nr_worker.h"
#include "srsran/adt/pool/cached_alloc.h"
#include "srsran/common/tti_point.h"
#include <array>
extern "C" {
#include "srsran/config.h"
}
namespace srsenb {
class ue_event_manager;
class sched_nr final : public sched_nr_interface
{
public:
sched_nr(const sched_nr_cfg& cfg);
~sched_nr() override;
void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& cfg) override;
void new_tti(tti_point tti_rx) override;
int generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result);
void dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) override;
void ul_sr_info(tti_point tti_rx, uint16_t rnti) override;
private:
void ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& cfg);
void run_tti(tti_point tti_rx, uint32_t cc);
sched_nr_cfg cfg;
using sched_worker_manager = sched_nr_impl::sched_worker_manager;
sched_worker_manager sched_workers;
std::array<std::array<sched_nr_res_t, SCHED_NR_MAX_CARRIERS>, SCHED_NR_MAX_CARRIERS> sched_results;
using ue_map_t = sched_nr_impl::ue_map_t;
std::mutex ue_db_mutex;
ue_map_t ue_db;
// management of PHY UE feedback
std::unique_ptr<ue_event_manager> pending_events;
};
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_H

@ -0,0 +1,25 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_COMMON_H
#define SRSRAN_SCHED_NR_COMMON_H
#include "srsran/adt/circular_map.h"
namespace srsenb {
const static size_t SCHED_NR_MAX_USERS = 4;
const static size_t SCHED_NR_NOF_SUBFRAMES = 10;
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_COMMON_H

@ -0,0 +1,61 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_HARQ_H
#define SRSRAN_SCHED_NR_HARQ_H
#include "srsran/common/tti_point.h"
#include <array>
namespace srsenb {
namespace sched_nr_impl {
template <size_t NofTbs>
class harq
{
public:
harq() = default;
bool empty() const
{
return std::all_of(tb.begin(), tb.end(), [](const tb_t t) { return not t.active; });
}
bool empty(uint32_t tb_idx) const { return tb[tb_idx].active; }
private:
struct tb_t {
bool active = false;
bool ack_state = false;
bool ndi = false;
uint32_t n_rtx = 0;
uint32_t mcs = 0;
};
uint32_t id;
tti_point tti_tx;
std::array<tb_t, NofTbs> tb;
};
class harq_entity
{
public:
void dl_ack_info(tti_point tti_rx, uint32_t tb_idx, bool ack) {}
private:
std::array<harq<1>, 16> dl_harqs;
std::array<harq<1>, 16> ul_harqs;
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_HARQ_H

@ -0,0 +1,58 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_INTERFACE_H
#define SRSRAN_SCHED_NR_INTERFACE_H
#include "srsran/adt/bounded_vector.h"
#include "srsran/common/tti_point.h"
namespace srsenb {
const static size_t SCHED_NR_MAX_CARRIERS = 4;
const static uint16_t SCHED_NR_INVALID_RNTI = 0;
struct sched_nr_cell_cfg {};
struct sched_nr_cfg {
uint32_t nof_concurrent_subframes = 1;
srsran::bounded_vector<sched_nr_cell_cfg, SCHED_NR_MAX_CARRIERS> cells;
};
struct sched_nr_ue_cc_cfg {
bool active = false;
};
struct sched_nr_ue_cfg {
srsran::bounded_vector<sched_nr_ue_cc_cfg, SCHED_NR_MAX_CARRIERS> carriers;
};
struct sched_nr_res_t {
struct dl_result {};
struct ul_result {};
};
class sched_nr_interface
{
public:
virtual ~sched_nr_interface() = default;
virtual void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& ue_cfg) = 0;
virtual void new_tti(tti_point tti_rx) = 0;
virtual void dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) = 0;
virtual void ul_sr_info(tti_point, uint16_t rnti) = 0;
};
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_INTERFACE_H

@ -0,0 +1,101 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_UE_H
#define SRSRAN_SCHED_NR_UE_H
#include "sched_nr_common.h"
#include "sched_nr_harq.h"
#include "sched_nr_interface.h"
#include "srsran/adt/move_callback.h"
#include "srsran/adt/pool/cached_alloc.h"
namespace srsenb {
namespace sched_nr_impl {
class ue_carrier;
class bwp_ue
{
public:
bwp_ue() = default;
explicit bwp_ue(ue_carrier& carrier_, tti_point tti_rx_);
~bwp_ue();
bwp_ue(bwp_ue&& other) noexcept : carrier(other.carrier) { other.carrier = nullptr; }
bwp_ue& operator=(bwp_ue&& other) noexcept
{
carrier = other.carrier;
other.carrier = nullptr;
return *this;
}
bool empty() const { return carrier == nullptr; }
tti_point tti_rx;
uint32_t cc = SCHED_NR_MAX_CARRIERS;
const sched_nr_ue_cfg* cfg = nullptr;
bool pending_sr;
private:
ue_carrier* carrier = nullptr;
};
class ue_carrier
{
public:
ue_carrier(uint16_t rnti, uint32_t cc, const sched_nr_ue_cfg& cfg);
bwp_ue try_reserve(tti_point tti_rx);
void push_feedback(srsran::move_callback<void(ue_carrier&)> callback);
const uint16_t rnti;
const uint32_t cc;
harq_entity harq_ent;
private:
friend class bwp_ue;
void release() { busy = false; }
const sched_nr_ue_cfg* cfg;
srsran::deque<srsran::move_callback<void(ue_carrier&)> > pending_feedback;
bool busy{false};
};
class ue
{
public:
ue(uint16_t rnti, const sched_nr_ue_cfg& cfg);
bwp_ue try_reserve(tti_point tti_rx, uint32_t cc);
void set_cfg(const sched_nr_ue_cfg& cfg);
void ul_sr_info(tti_point tti_rx) { pending_sr = true; }
std::array<std::unique_ptr<ue_carrier>, SCHED_NR_MAX_CARRIERS> carriers;
private:
bool pending_sr = false;
int current_idx = 0;
std::array<sched_nr_ue_cfg, 4> ue_cfgs;
};
using ue_map_t = srsran::static_circular_map<uint16_t, std::unique_ptr<ue>, SCHED_NR_MAX_USERS>;
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_UE_H

@ -0,0 +1,79 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_WORKER_H
#define SRSRAN_SCHED_NR_WORKER_H
#include "sched_nr_common.h"
#include "sched_nr_ue.h"
#include "srsran/adt/circular_array.h"
#include "srsran/adt/optional.h"
#include "srsran/adt/pool/cached_alloc.h"
#include "srsran/adt/span.h"
#include <condition_variable>
#include <mutex>
#include <semaphore.h>
namespace srsenb {
namespace sched_nr_impl {
class bwp_worker
{
public:
explicit bwp_worker(uint32_t cc_, ue_map_t& ue_db_) : cc(cc_), ue_db(ue_db_) {}
void start(tti_point tti_rx_);
void run();
void end_tti();
bool running() const { return tti_rx.is_valid(); }
private:
ue_map_t& ue_db;
tti_point tti_rx;
uint32_t cc;
srsran::circular_array<bwp_ue, SCHED_NR_MAX_USERS> bwp_ues;
};
class sched_worker_manager
{
public:
explicit sched_worker_manager(ue_map_t& ue_db_, const sched_nr_cfg& cfg_);
sched_worker_manager(const sched_worker_manager&) = delete;
sched_worker_manager(sched_worker_manager&&) = delete;
~sched_worker_manager();
void reserve_workers(tti_point tti_rx, srsran::span<sched_nr_res_t> sf_result_);
void start_tti(tti_point tti_rx);
bool run_tti(tti_point tti_rx, uint32_t cc);
void end_tti(tti_point tti_rx);
private:
const sched_nr_cfg& cfg;
struct sf_worker_ctxt {
sem_t sf_sem;
tti_point tti_rx;
srsran::span<sched_nr_res_t> sf_result;
int worker_count = 0;
std::vector<bwp_worker> workers;
};
std::vector<std::unique_ptr<sf_worker_ctxt> > sf_ctxts;
sf_worker_ctxt& get_sf(tti_point tti_rx);
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_WORKER_H

@ -14,5 +14,4 @@ set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_ue_ctrl/s
sched_helpers.cc) sched_helpers.cc)
add_library(srsenb_mac STATIC ${SOURCES} $<TARGET_OBJECTS:mac_schedulers>) add_library(srsenb_mac STATIC ${SOURCES} $<TARGET_OBJECTS:mac_schedulers>)
set(SOURCES mac_nr.cc) add_subdirectory(nr)
add_library(srsgnb_mac STATIC ${SOURCES})

@ -0,0 +1,11 @@
#
# Copyright 2013-2021 Software Radio Systems Limited
#
# By using this file, you agree to the terms and conditions set
# forth in the LICENSE file which can be found at the top level of
# the distribution.
#
set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc)
add_library(srsgnb_mac STATIC ${SOURCES})

@ -0,0 +1,154 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsran/common/thread_pool.h"
namespace srsenb {
using sched_nr_impl::bwp_worker;
using sched_nr_impl::sched_worker_manager;
using sched_nr_impl::ue;
using sched_nr_impl::ue_carrier;
using sched_nr_impl::ue_map_t;
class ue_event_manager
{
using callback_t = srsran::move_callback<void()>;
using callback_list = srsran::deque<callback_t>;
public:
explicit ue_event_manager(ue_map_t& ue_db_) : ue_db(ue_db_) {}
void push_event(srsran::move_callback<void()> event)
{
std::lock_guard<std::mutex> lock(common_mutex);
common_events.push_back(std::move(event));
}
void push_cc_feedback(uint16_t rnti, uint32_t cc, srsran::move_callback<void(ue_carrier&)> event)
{
std::lock_guard<std::mutex> lock(common_mutex);
feedback_list.emplace_back();
feedback_list.back().rnti = rnti;
feedback_list.back().cc = cc;
feedback_list.back().callback = std::move(event);
}
void new_tti()
{
{
std::lock_guard<std::mutex> lock(common_mutex);
common_events.swap(common_events_tmp); // reuse memory
feedback_list.swap(feedback_list_tmp);
}
while (not common_events_tmp.empty()) {
common_events_tmp.front()();
common_events_tmp.pop_front();
}
while (not feedback_list_tmp.empty()) {
auto& e = feedback_list_tmp.front();
if (ue_db.contains(e.rnti) and ue_db[e.rnti]->carriers[e.cc] != nullptr) {
ue_db[e.rnti]->carriers[e.cc]->push_feedback(std::move(e.callback));
}
}
}
private:
ue_map_t& ue_db;
std::mutex common_mutex;
callback_list common_events;
struct ue_feedback {
uint16_t rnti = SCHED_NR_INVALID_RNTI;
uint32_t cc = SCHED_NR_MAX_CARRIERS;
srsran::move_callback<void(ue_carrier&)> callback;
};
srsran::deque<ue_feedback> feedback_list;
callback_list common_events_tmp;
srsran::deque<ue_feedback> feedback_list_tmp;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr(const sched_nr_cfg& cfg_) :
cfg(cfg_), pending_events(new ue_event_manager(ue_db)), sched_workers(ue_db, cfg)
{}
sched_nr::~sched_nr() {}
void sched_nr::ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& uecfg)
{
pending_events->push_event([this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); });
}
void sched_nr::ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& uecfg)
{
if (not ue_db.contains(rnti)) {
ue_db.insert(rnti, std::unique_ptr<ue>(new ue{rnti, uecfg}));
} else {
ue_db[rnti]->set_cfg(uecfg);
}
}
void sched_nr::new_tti(tti_point tti_rx)
{
// Lock subframe workers to provided tti_rx
sched_workers.reserve_workers(tti_rx, sched_results[tti_rx.sf_idx()]);
{
// synchronize {tti,cc} state. e.g. reserve UE resources for {tti,cc} decision, process feedback
std::lock_guard<std::mutex> lock(ue_db_mutex);
// Process pending events
pending_events->new_tti();
sched_workers.start_tti(tti_rx);
}
}
int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result)
{
// Generate {tti,cc} scheduling decision
run_tti(tti_rx, cc);
// copy scheduling decision result
result = sched_results[tti_rx.sf_idx()][cc];
return SRSRAN_SUCCESS;
}
void sched_nr::run_tti(tti_point tti_rx, uint32_t cc)
{
// unlocked, parallel region
bool all_workers_finished = sched_workers.run_tti(tti_rx, cc);
if (all_workers_finished) {
// once all workers of the same subframe finished, synchronize sched outcome with ue_db
std::lock_guard<std::mutex> lock(ue_db_mutex);
sched_workers.end_tti(tti_rx);
}
}
void sched_nr::dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack)
{
pending_events->push_cc_feedback(
rnti, cc, [tti_rx, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(tti_rx, tb_idx, ack); });
}
void sched_nr::ul_sr_info(tti_point tti_rx, uint16_t rnti)
{
pending_events->push_event([this, rnti, tti_rx]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->ul_sr_info(tti_rx);
}
});
}
} // namespace srsenb

@ -0,0 +1,80 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h"
namespace srsenb {
namespace sched_nr_impl {
bwp_ue::bwp_ue(ue_carrier& carrier_, tti_point tti_rx_) : carrier(&carrier_), tti_rx(tti_rx_), cc(carrier_.cc) {}
bwp_ue::~bwp_ue()
{
if (carrier != nullptr) {
carrier->release();
}
}
ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const sched_nr_ue_cfg& cfg_) : rnti(rnti_), cc(cc_), cfg(&cfg_) {}
void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback)
{
pending_feedback.push_back(std::move(callback));
}
bwp_ue ue_carrier::try_reserve(tti_point tti_rx)
{
if (busy) {
return bwp_ue();
}
// successfully acquired
busy = true;
while (not pending_feedback.empty()) {
pending_feedback.front()(*this);
pending_feedback.pop_front();
}
return bwp_ue(*this, tti_rx);
}
ue::ue(uint16_t rnti, const sched_nr_ue_cfg& cfg)
{
for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) {
if (cfg.carriers[cc].active) {
carriers[cc].reset(new ue_carrier(rnti, cc, cfg));
}
}
}
void ue::set_cfg(const sched_nr_ue_cfg& cfg)
{
current_idx = (current_idx + 1) % ue_cfgs.size();
ue_cfgs[current_idx] = cfg;
}
bwp_ue ue::try_reserve(tti_point tti_rx, uint32_t cc)
{
if (carriers[cc] == nullptr) {
return bwp_ue();
}
bwp_ue sfu = carriers[cc]->try_reserve(tti_rx);
if (sfu.empty()) {
return bwp_ue();
}
// set UE-common parameters
sfu.pending_sr = pending_sr;
sfu.cfg = &ue_cfgs[current_idx];
return sfu;
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -0,0 +1,139 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_worker.h"
namespace srsenb {
namespace sched_nr_impl {
/// Called at the beginning of TTI in a locked context, to reserve available UE resources
void bwp_worker::start(tti_point tti_rx_)
{
srsran_assert(not running(), "scheduler worker::start() called for active worker");
// Try reserve UE cells for this worker
for (auto& ue_pair : ue_db) {
uint16_t rnti = ue_pair.first;
ue& u = *ue_pair.second;
bwp_ue sfu0 = u.try_reserve(tti_rx, cc);
if (sfu0.empty()) {
// Failed to synchronize because UE is being used by another worker
continue;
}
// Synchronization of UE for this {tti, cc} was successful
bwp_ues[rnti] = std::move(sfu0);
}
tti_rx = tti_rx_;
}
void bwp_worker::run()
{
srsran_assert(running(), "scheduler worker::run() called for non-active worker");
}
void bwp_worker::end_tti()
{
srsran_assert(running(), "scheduler worker::end() called for non-active worker");
// releases UE resources
for (bwp_ue& u : bwp_ues) {
if (not u.empty()) {
u = {};
}
}
tti_rx = {};
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_nr_cfg& cfg_) : cfg(cfg_)
{
// Note: For now, we only allow parallelism at the sector level
sf_ctxts.resize(cfg.nof_concurrent_subframes);
for (size_t i = 0; i < cfg.nof_concurrent_subframes; ++i) {
sf_ctxts[i].reset(new sf_worker_ctxt());
sem_init(&sf_ctxts[i]->sf_sem, 0, 1);
sf_ctxts[i]->workers.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
sf_ctxts[i]->workers.emplace_back(cc, ue_db_);
}
}
}
sched_worker_manager::~sched_worker_manager()
{
for (uint32_t sf = 0; sf < sf_ctxts.size(); ++sf) {
sem_destroy(&sf_ctxts[sf]->sf_sem);
}
}
sched_worker_manager::sf_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx)
{
return *sf_ctxts[tti_rx.to_uint() % sf_ctxts.size()];
}
void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span<sched_nr_res_t> sf_result_)
{
// lock if slot worker is already being used
auto& sf_worker_ctxt = get_sf(tti_rx_);
sem_wait(&sf_worker_ctxt.sf_sem);
sf_worker_ctxt.sf_result = sf_result_;
sf_worker_ctxt.tti_rx = tti_rx_;
sf_worker_ctxt.worker_count = sf_worker_ctxt.workers.size();
}
void sched_worker_manager::start_tti(tti_point tti_rx_)
{
auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) {
sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx);
}
}
bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc)
{
auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
if (not sf_worker_ctxt.workers[cc].running()) {
// run for this tti and cc was already called
return false;
}
// Get {tti, cc} scheduling decision
sf_worker_ctxt.workers[cc].run();
// decrement the number of active workers
--sf_worker_ctxt.worker_count;
srsran_assert(sf_worker_ctxt.worker_count >= 0, "invalid number of calls to run_tti(tti, cc)");
return sf_worker_ctxt.worker_count == 0;
}
void sched_worker_manager::end_tti(tti_point tti_rx_)
{
auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
srsran_assert(sf_worker_ctxt.worker_count == 0, "invalid number of calls to run_tti(tti, cc)");
// All the workers of the same TTI have finished. Synchronize scheduling decisions with UEs state
for (auto& worker : sf_worker_ctxt.workers) {
worker.end_tti();
}
sem_post(&sf_worker_ctxt.sf_sem);
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -74,3 +74,5 @@ add_test(sched_cqi_test sched_cqi_test)
add_executable(sched_phy_resource_test sched_phy_resource_test.cc) add_executable(sched_phy_resource_test sched_phy_resource_test.cc)
target_link_libraries(sched_phy_resource_test srsran_common srsenb_mac srsran_mac sched_test_common) target_link_libraries(sched_phy_resource_test srsran_common srsenb_mac srsran_mac sched_test_common)
add_test(sched_phy_resource_test sched_phy_resource_test) add_test(sched_phy_resource_test sched_phy_resource_test)
add_subdirectory(nr)

@ -0,0 +1,15 @@
#
# Copyright 2013-2021 Software Radio Systems Limited
#
# By using this file, you agree to the terms and conditions set
# forth in the LICENSE file which can be found at the top level of
# the distribution.
#
add_executable(sched_nr_test sched_nr_test.cc)
target_link_libraries(sched_nr_test
srsgnb_mac
srsran_common
${CMAKE_THREAD_LIBS_INIT}
${Boost_LIBRARIES})
add_test(sched_nr_test sched_nr_test)

@ -0,0 +1,120 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsran/common/test_common.h"
#include "srsran/common/thread_pool.h"
namespace srsenb {
void sched_nr_cfg_serialized_test()
{
sched_nr_cfg cfg;
cfg.cells.resize(1);
sched_nr sched(cfg);
sched_nr_ue_cfg uecfg;
uecfg.carriers.resize(1);
uecfg.carriers[0].active = true;
sched.ue_cfg(0x46, uecfg);
for (uint32_t nof_ttis = 0; nof_ttis < 1000; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
sched.new_tti(tti);
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
sched_nr_res_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
}
}
}
void sched_nr_cfg_parallel_cc_test()
{
std::atomic<int> tasks{0};
sched_nr_cfg cfg;
cfg.cells.resize(4);
sched_nr sched(cfg);
sched_nr_ue_cfg uecfg;
uecfg.carriers.resize(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
uecfg.carriers[cc].active = true;
}
sched.ue_cfg(0x46, uecfg);
for (uint32_t nof_ttis = 0; nof_ttis < 1000; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
sched.new_tti(tti);
++tasks;
srsran::get_background_workers().push_task([&cfg, &sched, tti, &tasks]() {
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
sched_nr_res_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
}
--tasks;
});
}
while (tasks > 0) {
usleep(100);
}
}
void sched_nr_cfg_parallel_sf_test()
{
uint32_t nof_sectors = 2;
std::atomic<int> tasks{0};
sched_nr_cfg cfg;
cfg.nof_concurrent_subframes = 2;
cfg.cells.resize(nof_sectors);
sched_nr sched(cfg);
sched_nr_ue_cfg uecfg;
uecfg.carriers.resize(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
uecfg.carriers[cc].active = true;
}
sched.ue_cfg(0x46, uecfg);
for (uint32_t nof_ttis = 0; nof_ttis < 1000; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
sched.new_tti(tti);
++tasks;
srsran::get_background_workers().push_task([&cfg, &sched, tti, &tasks]() {
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
sched_nr_res_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
}
--tasks;
});
}
while (tasks > 0) {
usleep(100);
}
}
} // namespace srsenb
int main()
{
srsran::get_background_workers().set_nof_workers(4);
srsenb::sched_nr_cfg_serialized_test();
srsenb::sched_nr_cfg_parallel_cc_test();
srsenb::sched_nr_cfg_parallel_sf_test();
}
Loading…
Cancel
Save