sched,nr: implementation of sched sim test class

master
Francisco 4 years ago committed by Francisco Paisana
parent f1b3cfc764
commit 75153c7c83

@ -36,11 +36,11 @@ class sched_nr final : public sched_nr_interface
public: public:
explicit sched_nr(const sched_cfg_t& sched_cfg); explicit sched_nr(const sched_cfg_t& sched_cfg);
~sched_nr() override; ~sched_nr() override;
int cell_cfg(const std::vector<cell_cfg_t>& cell_list); int cell_cfg(srsran::const_span<cell_cfg_t> cell_list) override;
void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override; void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override;
void slot_indication(tti_point tti_rx) override; void slot_indication(tti_point tti_rx) override;
int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& tti_req); int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& tti_req) override;
void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override; void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override;
void ul_sr_info(tti_point tti_rx, uint16_t rnti) override; void ul_sr_info(tti_point tti_rx, uint16_t rnti) override;

@ -27,9 +27,9 @@ public:
bool empty() const bool empty() const
{ {
return std::none_of(tb.begin(), tb.end(), [](const tb_t& t) { return t.active; }); return std::all_of(tb.begin(), tb.end(), [](const tb_t& t) { return not t.active; });
} }
bool empty(uint32_t tb_idx) const { return tb[tb_idx].active; } bool empty(uint32_t tb_idx) const { return not tb[tb_idx].active; }
bool has_pending_retx(tti_point tti_rx) const { return not empty() and not tb[0].ack_state and tti_ack <= tti_rx; } bool has_pending_retx(tti_point tti_rx) const { return not empty() and not tb[0].ack_state and tti_ack <= tti_rx; }
uint32_t nof_retx() const { return tb[0].n_rtx; } uint32_t nof_retx() const { return tb[0].n_rtx; }
uint32_t max_nof_retx() const { return max_retx; } uint32_t max_nof_retx() const { return max_retx; }
@ -67,7 +67,7 @@ private:
class harq_entity class harq_entity
{ {
public: public:
explicit harq_entity(uint32_t nof_harq_procs = 16); explicit harq_entity(uint32_t nof_harq_procs = SCHED_NR_MAX_HARQ);
void new_tti(tti_point tti_rx_); void new_tti(tti_point tti_rx_);
void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); } void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); }

@ -15,6 +15,7 @@
#include "srsran/adt/bounded_bitset.h" #include "srsran/adt/bounded_bitset.h"
#include "srsran/adt/bounded_vector.h" #include "srsran/adt/bounded_vector.h"
#include "srsran/adt/span.h"
#include "srsran/common/tti_point.h" #include "srsran/common/tti_point.h"
#include "srsran/phy/phch/dci_nr.h" #include "srsran/phy/phch/dci_nr.h"
@ -26,6 +27,7 @@ const static size_t SCHED_NR_MAX_PDSCH_DATA = 16;
const static size_t SCHED_NR_MAX_NOF_RBGS = 25; const static size_t SCHED_NR_MAX_NOF_RBGS = 25;
const static size_t SCHED_NR_MAX_UL_ALLOCS = 16; const static size_t SCHED_NR_MAX_UL_ALLOCS = 16;
const static size_t SCHED_NR_MAX_TB = 1; const static size_t SCHED_NR_MAX_TB = 1;
const static size_t SCHED_NR_MAX_HARQ = 16;
class sched_nr_interface class sched_nr_interface
{ {
@ -94,9 +96,11 @@ public:
ul_tti_request_t ul_res; ul_tti_request_t ul_res;
}; };
virtual ~sched_nr_interface() = default; virtual ~sched_nr_interface() = default;
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0; virtual int cell_cfg(srsran::const_span<sched_nr_interface::cell_cfg_t> ue_cfg) = 0;
virtual void slot_indication(tti_point tti_rx) = 0; virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
virtual void slot_indication(tti_point tti_rx) = 0;
virtual int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& result) = 0;
virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0; virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;
virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; virtual void ul_sr_info(tti_point, uint16_t rnti) = 0;

@ -29,12 +29,30 @@ struct pusch_t {};
struct pucch_t {}; struct pucch_t {};
struct phy_slot_grid { struct phy_slot_grid {
const sched_cell_params* cell_cfg = nullptr;
pdcchmask_t pdcch_tot_mask; pdcchmask_t pdcch_tot_mask;
rbgmask_t pdsch_tot_mask; rbgmask_t pdsch_tot_mask;
rbgmask_t ul_tot_mask; rbgmask_t ul_tot_mask;
pdsch_list pdsch_grants; pdsch_list pdsch_grants;
pusch_list pusch_grants; pusch_list pusch_grants;
srsran::bounded_vector<pucch_t, SCHED_NR_MAX_PDSCH_DATA> pucch_grants; srsran::bounded_vector<pucch_t, SCHED_NR_MAX_PDSCH_DATA> pucch_grants;
phy_slot_grid() = default;
explicit phy_slot_grid(const sched_cell_params& cell_cfg_) :
cell_cfg(&cell_cfg_),
pdcch_tot_mask(cell_cfg->cell_cfg.nof_rbg),
pdsch_tot_mask(cell_cfg->cell_cfg.nof_rbg),
ul_tot_mask(cell_cfg->cell_cfg.nof_rbg)
{}
void reset()
{
pdcch_tot_mask.reset();
pdsch_tot_mask.reset();
ul_tot_mask.reset();
pdsch_grants.clear();
pusch_grants.clear();
pucch_grants.clear();
}
}; };
using phy_cell_rb_grid = srsran::circular_array<phy_slot_grid, TTIMOD_SZ>; using phy_cell_rb_grid = srsran::circular_array<phy_slot_grid, TTIMOD_SZ>;
@ -80,7 +98,9 @@ class slot_sched
{ {
public: public:
explicit slot_sched(const sched_cell_params& cfg_, phy_cell_rb_grid& phy_grid_); explicit slot_sched(const sched_cell_params& cfg_, phy_cell_rb_grid& phy_grid_);
void new_tti(tti_point tti_rx_); void new_tti(tti_point tti_rx_);
void reset();
alloc_result alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask); alloc_result alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask);
alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask); alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask);
@ -90,7 +110,7 @@ public:
private: private:
srslog::basic_logger& logger; srslog::basic_logger& logger;
phy_cell_rb_grid phy_grid; phy_cell_rb_grid& phy_grid;
tti_point tti_rx; tti_point tti_rx;
}; };

@ -58,6 +58,7 @@ public:
if (ue_db.contains(e.rnti) and ue_db[e.rnti]->carriers[e.cc] != nullptr) { if (ue_db.contains(e.rnti) and ue_db[e.rnti]->carriers[e.cc] != nullptr) {
ue_db[e.rnti]->carriers[e.cc]->push_feedback(std::move(e.callback)); ue_db[e.rnti]->carriers[e.cc]->push_feedback(std::move(e.callback));
} }
feedback_list_tmp.pop_front();
} }
} }
@ -82,7 +83,7 @@ sched_nr::sched_nr(const sched_cfg_t& sched_cfg) : cfg(sched_cfg), pending_event
sched_nr::~sched_nr() {} sched_nr::~sched_nr() {}
int sched_nr::cell_cfg(const std::vector<cell_cfg_t>& cell_list) int sched_nr::cell_cfg(srsran::const_span<cell_cfg_t> cell_list)
{ {
cfg.cells.reserve(cell_list.size()); cfg.cells.reserve(cell_list.size());
for (uint32_t cc = 0; cc < cell_list.size(); ++cc) { for (uint32_t cc = 0; cc < cell_list.size(); ++cc) {
@ -137,7 +138,7 @@ int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }
void sched_nr::dl_ack_info(uint16_t rnti, uint32_t pid, uint32_t cc, uint32_t tb_idx, bool ack) void sched_nr::dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack)
{ {
pending_events->push_cc_feedback( pending_events->push_cc_feedback(
rnti, cc, [pid, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(pid, tb_idx, ack); }); rnti, cc, [pid, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(pid, tb_idx, ack); });

@ -45,11 +45,13 @@ alloc_result slot_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
int mcs = -1, tbs = -1; int mcs = -1, tbs = -1;
if (ue.h_dl->empty()) { if (ue.h_dl->empty()) {
mcs = 20; mcs = 20;
tbs = 100; tbs = 100;
srsran_assert(ue.h_dl->new_tx(ue.pdsch_tti, ue.uci_tti, dl_mask, mcs, tbs, 4), "Failed to allocate DL HARQ"); bool ret = ue.h_dl->new_tx(ue.pdsch_tti, ue.uci_tti, dl_mask, mcs, tbs, 4);
srsran_assert(ret, "Failed to allocate DL HARQ");
} else { } else {
srsran_assert(ue.h_dl->new_retx(ue.pdsch_tti, ue.uci_tti, dl_mask, &mcs, &tbs), "Failed to allocate DL HARQ retx"); bool ret = ue.h_dl->new_retx(ue.pdsch_tti, ue.uci_tti, dl_mask, &mcs, &tbs);
srsran_assert(ret, "Failed to allocate DL HARQ retx");
} }
// Allocation Successful // Allocation Successful

@ -19,6 +19,8 @@ namespace sched_nr_impl {
void slot_cc_worker::start(tti_point tti_rx_, ue_map_t& ue_db) void slot_cc_worker::start(tti_point tti_rx_, ue_map_t& ue_db)
{ {
srsran_assert(not running(), "scheduler worker::start() called for active worker"); srsran_assert(not running(), "scheduler worker::start() called for active worker");
tti_rx = tti_rx_;
// Try reserve UE cells for this worker // Try reserve UE cells for this worker
for (auto& ue_pair : ue_db) { for (auto& ue_pair : ue_db) {
uint16_t rnti = ue_pair.first; uint16_t rnti = ue_pair.first;
@ -97,6 +99,12 @@ void slot_cc_worker::alloc_ul_ues()
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) : cfg(cfg_), ue_db(ue_db_) sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) : cfg(cfg_), ue_db(ue_db_)
{ {
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
for (auto& slot_grid : phy_grid[cc]) {
slot_grid = phy_slot_grid(cfg.cells[cc]);
}
}
// Note: For now, we only allow parallelism at the sector level // Note: For now, we only allow parallelism at the sector level
slot_ctxts.resize(cfg.sched_cfg.nof_concurrent_subframes); slot_ctxts.resize(cfg.sched_cfg.nof_concurrent_subframes);
for (size_t i = 0; i < cfg.sched_cfg.nof_concurrent_subframes; ++i) { for (size_t i = 0; i < cfg.sched_cfg.nof_concurrent_subframes; ++i) {
@ -166,7 +174,7 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& t
if (rem_workers == 0) { if (rem_workers == 0) {
// Clear one slot of PHY grid, so it can be reused in the next TTIs // Clear one slot of PHY grid, so it can be reused in the next TTIs
phy_grid[cc][sf_worker_ctxt.tti_rx.to_uint()] = {}; phy_grid[cc][sf_worker_ctxt.tti_rx.to_uint()].reset();
} }
return rem_workers == 0; return rem_workers == 0;
} }

@ -6,7 +6,7 @@
# the distribution. # the distribution.
# #
add_executable(sched_nr_test sched_nr_test.cc) add_executable(sched_nr_test sched_nr_test.cc sched_nr_sim_ue.cc)
target_link_libraries(sched_nr_test target_link_libraries(sched_nr_test
srsgnb_mac srsgnb_mac
srsran_common srsran_common

@ -0,0 +1,211 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "sched_nr_sim_ue.h"
#include "lib/include/srsran/common/test_common.h"
namespace srsenb {
sched_nr_ue_sim::sched_nr_ue_sim(uint16_t rnti_,
const sched_nr_interface::ue_cfg_t& ue_cfg_,
tti_point prach_tti_rx,
uint32_t preamble_idx) :
logger(srslog::fetch_basic_logger("MAC"))
{
ctxt.rnti = rnti_;
ctxt.prach_tti_rx = prach_tti_rx;
ctxt.preamble_idx = preamble_idx;
ctxt.ue_cfg = ue_cfg_;
ctxt.cc_list.resize(ue_cfg_.carriers.size());
for (auto& cc : ctxt.cc_list) {
for (size_t pid = 0; pid < SCHED_NR_MAX_HARQ; ++pid) {
cc.ul_harqs[pid].pid = pid;
cc.dl_harqs[pid].pid = pid;
}
}
}
int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out)
{
update_dl_harqs(cc_out);
return SRSRAN_SUCCESS;
}
void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out)
{
uint32_t cc = cc_out.cc;
for (uint32_t i = 0; i < cc_out.dl_cc_result->pdsch.size(); ++i) {
const auto& data = cc_out.dl_cc_result->pdsch[i];
if (data.dci.ctx.rnti != ctxt.rnti) {
continue;
}
auto& h = ctxt.cc_list[cc].dl_harqs[data.dci.pid];
if (h.nof_txs == 0 or h.ndi != data.dci.ndi) {
// It is newtx
h.nof_retxs = 0;
h.ndi = data.dci.ndi;
h.first_tti_rx = cc_out.tti_rx;
h.dci_loc = data.dci.ctx.location;
h.tbs = 100; // TODO
} else {
// it is retx
h.nof_retxs++;
}
h.active = true;
h.last_tti_rx = cc_out.tti_rx;
h.nof_txs++;
}
}
sched_nr_sim_base::sched_nr_sim_base(const sched_nr_interface::sched_cfg_t& sched_args,
const std::vector<sched_nr_interface::cell_cfg_t>& cell_cfg_list,
std::string test_name_) :
logger(srslog::fetch_basic_logger("TEST")),
mac_logger(srslog::fetch_basic_logger("MAC")),
sched_ptr(new sched_nr(sched_args)),
test_name(std::move(test_name_))
{
logger.info("\n=========== Start %s ===========", test_name.c_str());
cell_params.reserve(cell_cfg_list.size());
for (uint32_t cc = 0; cc < cell_cfg_list.size(); ++cc) {
cell_params.emplace_back(cc, cell_cfg_list[cc], sched_args);
}
sched_ptr->cell_cfg(cell_cfg_list); // call parent cfg
TESTASSERT(cell_params.size() > 0);
}
sched_nr_sim_base::~sched_nr_sim_base()
{
logger.info("=========== End %s ==========\n", test_name.c_str());
}
int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx)
{
TESTASSERT(ue_db.count(rnti) == 0);
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_tti_rx, preamble_idx)));
sched_ptr->ue_cfg(rnti, ue_cfg_);
return SRSRAN_SUCCESS;
}
void sched_nr_sim_base::slot_indication(srsran::tti_point tti_rx)
{
{
std::unique_lock<std::mutex> lock(mutex);
logger.set_context(tti_rx.to_uint());
mac_logger.set_context(tti_rx.to_uint());
current_tti_rx = tti_rx;
logger.info("---------------- TTI=%d ---------------", tti_rx.to_uint());
for (auto& ue : ue_db) {
ue_tti_events events;
set_default_tti_events(ue.second.get_ctxt(), events);
set_external_tti_events(ue.second.get_ctxt(), events);
apply_tti_events(ue.second.get_ctxt(), events);
}
}
sched_ptr->slot_indication(tti_rx);
}
void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out)
{
std::unique_lock<std::mutex> lock(mutex);
for (auto& ue_pair : ue_db) {
ue_pair.second.update(cc_out);
}
}
int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events)
{
pending_events.cc_list.clear();
pending_events.cc_list.resize(cell_params.size());
pending_events.tti_rx = current_tti_rx;
for (uint32_t enb_cc_idx = 0; enb_cc_idx < pending_events.cc_list.size(); ++enb_cc_idx) {
auto& cc_feedback = pending_events.cc_list[enb_cc_idx];
cc_feedback.configured = true;
cc_feedback.ue_cc_idx = enb_cc_idx;
for (uint32_t pid = 0; pid < SCHED_NR_MAX_HARQ; ++pid) {
auto& dl_h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].dl_harqs[pid];
auto& ul_h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].ul_harqs[pid];
// Set default DL ACK
if (dl_h.active and (dl_h.last_tti_rx + 8) == current_tti_rx) {
cc_feedback.dl_pid = pid;
cc_feedback.dl_ack = true; // default is ACK
}
// Set default UL ACK
if (ul_h.active and (ul_h.last_tti_rx + 8) == current_tti_rx) {
cc_feedback.ul_pid = pid;
cc_feedback.ul_ack = true;
}
// TODO: other CSI
}
}
return SRSRAN_SUCCESS;
}
int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_tti_events& events)
{
for (uint32_t enb_cc_idx = 0; enb_cc_idx < events.cc_list.size(); ++enb_cc_idx) {
const auto& cc_feedback = events.cc_list[enb_cc_idx];
if (not cc_feedback.configured) {
continue;
}
if (cc_feedback.dl_pid >= 0) {
auto& h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].dl_harqs[cc_feedback.dl_pid];
if (cc_feedback.dl_ack) {
logger.info("DL ACK rnti=0x%x tti_dl_tx=%u cc=%d pid=%d",
ue_ctxt.rnti,
to_tx_dl(h.last_tti_rx).to_uint(),
enb_cc_idx,
cc_feedback.dl_pid);
}
// update scheduler
sched_ptr->dl_ack_info(ue_ctxt.rnti, enb_cc_idx, cc_feedback.dl_pid, cc_feedback.tb, cc_feedback.dl_ack);
// update UE sim context
if (cc_feedback.dl_ack or ue_ctxt.is_last_dl_retx(cc_feedback.ue_cc_idx, cc_feedback.dl_pid)) {
h.active = false;
}
}
if (cc_feedback.ul_pid >= 0) {
auto& h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].ul_harqs[cc_feedback.ul_pid];
if (cc_feedback.ul_ack) {
logger.info("UL ACK rnti=0x%x, tti_ul_tx=%u, cc=%d pid=%d",
ue_ctxt.rnti,
to_tx_ul(h.last_tti_rx).to_uint(),
enb_cc_idx,
cc_feedback.ul_pid);
}
// // update scheduler
// if (sched_ptr->ul_crc_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_ack) < 0) {
// logger.error("The ACKed UL Harq pid=%d does not exist.", cc_feedback.ul_pid);
// error_counter++;
// }
}
}
return SRSRAN_SUCCESS;
}
} // namespace srsenb

@ -0,0 +1,128 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_SIM_UE_H
#define SRSRAN_SCHED_NR_SIM_UE_H
#include "../sched_sim_ue.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include <condition_variable>
namespace srsenb {
struct sched_nr_cc_output_res_t {
tti_point tti_rx;
uint32_t cc;
sched_nr_interface::dl_tti_request_t* dl_cc_result;
sched_nr_interface::ul_tti_request_t* ul_cc_result;
};
struct sim_nr_ue_ctxt_t {
uint16_t rnti;
uint32_t preamble_idx;
srsran::tti_point prach_tti_rx;
sched_nr_interface::ue_cfg_t ue_cfg;
std::vector<ue_cc_ctxt_t> cc_list;
bool is_last_dl_retx(uint32_t ue_cc_idx, uint32_t pid) const
{
auto& h = cc_list.at(ue_cc_idx).dl_harqs[pid];
return h.nof_retxs + 1 >= ue_cfg.maxharq_tx;
}
};
class sched_nr_ue_sim
{
public:
sched_nr_ue_sim(uint16_t rnti_,
const sched_nr_interface::ue_cfg_t& ue_cfg_,
tti_point prach_tti_rx,
uint32_t preamble_idx);
int update(const sched_nr_cc_output_res_t& cc_out);
const sim_nr_ue_ctxt_t& get_ctxt() const { return ctxt; }
sim_nr_ue_ctxt_t& get_ctxt() { return ctxt; }
private:
void update_dl_harqs(const sched_nr_cc_output_res_t& sf_out);
srslog::basic_logger& logger;
sim_nr_ue_ctxt_t ctxt;
};
class sched_nr_sim_base
{
public:
sched_nr_sim_base(const sched_nr_interface::sched_cfg_t& sched_args,
const std::vector<sched_nr_interface::cell_cfg_t>& cell_params_,
std::string test_name);
virtual ~sched_nr_sim_base();
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx);
void slot_indication(srsran::tti_point tti_rx);
void update(sched_nr_cc_output_res_t& cc_out);
sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); }
const sched_nr_ue_sim& at(uint16_t rnti) const { return ue_db.at(rnti); }
sched_nr_ue_sim* find_rnti(uint16_t rnti)
{
auto it = ue_db.find(rnti);
return it != ue_db.end() ? &it->second : nullptr;
}
const sched_nr_ue_sim* find_rnti(uint16_t rnti) const
{
auto it = ue_db.find(rnti);
return it != ue_db.end() ? &it->second : nullptr;
}
bool user_exists(uint16_t rnti) const { return ue_db.count(rnti) > 0; }
const sched_nr_interface::ue_cfg_t* get_user_cfg(uint16_t rnti) const
{
const sched_nr_ue_sim* ret = find_rnti(rnti);
return ret == nullptr ? nullptr : &ret->get_ctxt().ue_cfg;
}
sched_nr* get_sched() { return sched_ptr.get(); }
srsran::const_span<sched_nr_impl::sched_cell_params> get_cell_params() { return cell_params; }
tti_point get_tti_rx() const
{
std::lock_guard<std::mutex> lock(mutex);
return current_tti_rx;
}
std::map<uint16_t, sched_nr_ue_sim>::iterator begin() { return ue_db.begin(); }
std::map<uint16_t, sched_nr_ue_sim>::iterator end() { return ue_db.end(); }
// configurable by simulator concrete implementation
virtual void set_external_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events) {}
private:
int set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events);
int apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_tti_events& events);
std::string test_name;
srslog::basic_logger& logger;
srslog::basic_logger& mac_logger;
std::unique_ptr<sched_nr> sched_ptr;
std::vector<sched_nr_impl::sched_cell_params> cell_params;
srsran::tti_point current_tti_rx;
std::map<uint16_t, sched_nr_ue_sim> ue_db;
int error_counter = 0;
mutable std::mutex mutex;
std::condition_variable cond_var;
};
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_SIM_UE_H

@ -10,6 +10,7 @@
* *
*/ */
#include "sched_nr_sim_ue.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h" #include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsran/common/test_common.h" #include "srsran/common/test_common.h"
#include "srsran/common/thread_pool.h" #include "srsran/common/thread_pool.h"
@ -23,6 +24,7 @@ struct task_job_manager {
int res_count = 0; int res_count = 0;
int pdsch_count = 0; int pdsch_count = 0;
int max_tasks = std::numeric_limits<int>::max() / 2; int max_tasks = std::numeric_limits<int>::max() / 2;
srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST");
void start_task() void start_task()
{ {
@ -49,66 +51,79 @@ struct task_job_manager {
cond_var.wait(lock); cond_var.wait(lock);
} }
} }
void print_results() const { printf("TESTER: %f PDSCH/{slot,cc} were allocated\n", pdsch_count / (double)res_count); } void print_results() const
{
test_logger.info("TESTER: %f PDSCH/{slot,cc} were allocated", pdsch_count / (double)res_count);
srslog::flush();
}
}; };
void sched_nr_cfg_serialized_test() void sched_nr_cfg_serialized_test()
{ {
uint32_t max_nof_ttis = 1000; auto& mac_logger = srslog::fetch_basic_logger("MAC");
uint32_t max_nof_ttis = 1000, nof_sectors = 2;
task_job_manager tasks; task_job_manager tasks;
sched_nr_interface::sched_cfg_t cfg; sched_nr_interface::sched_cfg_t cfg;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg(2); std::vector<sched_nr_interface::cell_cfg_t> cells_cfg(nof_sectors);
sched_nr sched(cfg); sched_nr_sim_base sched_tester(cfg, cells_cfg, "Serialized Test");
sched.cell_cfg(cells_cfg);
sched_nr_interface::ue_cfg_t uecfg; sched_nr_interface::ue_cfg_t uecfg;
uecfg.carriers.resize(2); uecfg.carriers.resize(nof_sectors);
uecfg.carriers[0].active = true; uecfg.carriers[0].active = true;
uecfg.carriers[1].active = true; uecfg.carriers[1].active = true;
sched.ue_cfg(0x46, uecfg);
sched_tester.add_user(0x46, uecfg, 0);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti(nof_ttis % 10240);
sched.slot_indication(tti); sched_tester.slot_indication(tti);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task(); tasks.start_task();
sched_nr_interface::tti_request_t res; sched_nr_interface::tti_request_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); TESTASSERT(sched_tester.get_sched()->generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
sched_nr_cc_output_res_t out{tti, cc, &res.dl_res, &res.ul_res};
sched_tester.update(out);
tasks.finish_task(res); tasks.finish_task(res);
TESTASSERT(res.dl_res.pdsch.size() == 1);
} }
} }
tasks.print_results(); tasks.print_results();
TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors));
} }
void sched_nr_cfg_parallel_cc_test() void sched_nr_cfg_parallel_cc_test()
{ {
auto& mac_logger = srslog::fetch_basic_logger("MAC");
uint32_t max_nof_ttis = 1000; uint32_t max_nof_ttis = 1000;
task_job_manager tasks; task_job_manager tasks;
sched_nr_interface::sched_cfg_t cfg; sched_nr_interface::sched_cfg_t cfg;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg(4); std::vector<sched_nr_interface::cell_cfg_t> cells_cfg(4);
sched_nr sched(cfg); sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test");
sched.cell_cfg(cells_cfg);
sched_nr_interface::ue_cfg_t uecfg; sched_nr_interface::ue_cfg_t uecfg;
uecfg.carriers.resize(cells_cfg.size()); uecfg.carriers.resize(cells_cfg.size());
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
uecfg.carriers[cc].active = true; uecfg.carriers[cc].active = true;
} }
sched.ue_cfg(0x46, uecfg); sched_tester.add_user(0x46, uecfg, 0);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti(nof_ttis % 10240);
sched.slot_indication(tti); sched_tester.slot_indication(tti);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task(); tasks.start_task();
srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() { srsran::get_background_workers().push_task([cc, tti, &tasks, &sched_tester]() {
sched_nr_interface::tti_request_t res; sched_nr_interface::tti_request_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); TESTASSERT(sched_tester.get_sched()->generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
sched_nr_cc_output_res_t out{tti, cc, &res.dl_res, &res.ul_res};
sched_tester.update(out);
tasks.finish_task(res); tasks.finish_task(res);
}); });
} }
@ -162,11 +177,16 @@ void sched_nr_cfg_parallel_sf_test()
int main() int main()
{ {
auto& test_logger = srslog::fetch_basic_logger("TEST");
test_logger.set_level(srslog::basic_levels::debug);
auto& mac_logger = srslog::fetch_basic_logger("MAC"); auto& mac_logger = srslog::fetch_basic_logger("MAC");
mac_logger.set_level(srslog::basic_levels::debug); mac_logger.set_level(srslog::basic_levels::debug);
auto& pool_logger = srslog::fetch_basic_logger("POOL"); auto& pool_logger = srslog::fetch_basic_logger("POOL");
pool_logger.set_level(srslog::basic_levels::debug); pool_logger.set_level(srslog::basic_levels::debug);
// Start the log backend.
srslog::init();
srsran::get_background_workers().set_nof_workers(8); srsran::get_background_workers().set_nof_workers(8);
srsenb::sched_nr_cfg_serialized_test(); srsenb::sched_nr_cfg_serialized_test();

@ -347,7 +347,7 @@ int main()
} }
auto& mac_log = srslog::fetch_basic_logger("MAC"); auto& mac_log = srslog::fetch_basic_logger("MAC");
mac_log.set_level(srslog::basic_levels::info); mac_log.set_level(srslog::basic_levels::debug);
auto& test_log = srslog::fetch_basic_logger("TEST", *spy, false); auto& test_log = srslog::fetch_basic_logger("TEST", *spy, false);
test_log.set_level(srslog::basic_levels::info); test_log.set_level(srslog::basic_levels::info);

Loading…
Cancel
Save