sched,nr: extend sched nr testing suite. restrict paralllelization for correct DAI generation

master
Francisco Paisana 3 years ago
parent 323703c2f3
commit 1535e6b205

@ -163,7 +163,7 @@ void phy_cfg_nr_default_t::make_harq_auto(srsran_harq_ack_cfg_hl_t& harq,
const srsran_tdd_config_nr_t& tdd_cfg)
{
// Generate as many entries as DL slots
harq.nof_dl_data_to_ul_ack = SRSRAN_MAX(tdd_cfg.pattern1.nof_dl_slots, SRSRAN_MAX_NOF_DL_DATA_TO_UL);
harq.nof_dl_data_to_ul_ack = SRSRAN_MIN(tdd_cfg.pattern1.nof_dl_slots, SRSRAN_MAX_NOF_DL_DATA_TO_UL);
// Set PDSCH to ACK timing delay to 4 or more
for (uint32_t n = 0; n < harq.nof_dl_data_to_ul_ack; n++) {

@ -10,8 +10,8 @@
*
*/
#ifndef SRSRAN_SCHED_NR_PHY_H
#define SRSRAN_SCHED_NR_PHY_H
#ifndef SRSRAN_SCHED_NR_HELPERS_H
#define SRSRAN_SCHED_NR_HELPERS_H
#include "sched_nr_cfg.h"
@ -41,4 +41,4 @@ void fill_ul_dci_ue_fields(const slot_ue& ue,
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_PHY_H
#endif // SRSRAN_SCHED_NR_HELPERS_H

@ -69,8 +69,6 @@ public:
struct ue_cc_cfg_t {
bool active = false;
pdsch_td_res_alloc_list pdsch_res_list{1};
pusch_td_res_alloc_list pusch_res_list{1};
};
struct ue_cfg_t {

@ -15,9 +15,9 @@
#include "../sched_common.h"
#include "lib/include/srsran/adt/circular_array.h"
#include "sched_nr_helpers.h"
#include "sched_nr_interface.h"
#include "sched_nr_pdcch.h"
#include "sched_nr_phy.h"
#include "sched_nr_ue.h"
namespace srsenb {
@ -33,6 +33,12 @@ using slot_coreset_list = std::array<srsran::optional<coreset_re
using pdsch_t = mac_interface_phy_nr::pdsch_t;
using pdsch_list_t = srsran::bounded_vector<pdsch_t, MAX_GRANTS>;
struct harq_ack_t {
const srsran::phy_cfg_nr_t* phy_cfg;
srsran_harq_ack_resource_t res;
};
using harq_ack_list_t = srsran::bounded_vector<harq_ack_t, MAX_GRANTS>;
struct bwp_slot_grid {
uint32_t slot_idx;
const bwp_params* cfg;
@ -44,8 +50,8 @@ struct bwp_slot_grid {
pdcch_ul_list_t ul_pdcchs;
pdsch_list_t pdschs;
slot_coreset_list coresets;
pucch_list_t pucchs;
pusch_list_t puschs;
harq_ack_list_t pending_acks;
bwp_slot_grid() = default;
explicit bwp_slot_grid(const bwp_params& bwp_params, uint32_t slot_idx_);

@ -43,9 +43,11 @@ public:
private:
void alloc_dl_ues();
void alloc_ul_ues();
void log_result() const;
const sched_cell_params& cfg;
serv_cell_ctxt& cell;
srslog::basic_logger& logger;
tti_point tti_rx;
bwp_slot_allocator bwp_alloc;
@ -73,11 +75,13 @@ public:
void start_slot(tti_point tti_rx, srsran::move_callback<void()> process_feedback);
bool run_slot(tti_point tti_rx, uint32_t cc);
void release_slot(tti_point tti_rx);
bool get_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res);
bool save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res);
private:
const sched_params& cfg;
ue_map_t& ue_db;
srslog::basic_logger& logger;
std::mutex ue_db_mutex;
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_worker_ctxts;

@ -7,6 +7,6 @@
#
set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc sched_nr_rb_grid.cc sched_nr_harq.cc
sched_nr_pdcch.cc sched_nr_cfg.cc sched_nr_phy.cc sched_nr_bwp.cc sched_nr_rb.cc)
sched_nr_pdcch.cc sched_nr_cfg.cc sched_nr_helpers.cc sched_nr_bwp.cc sched_nr_rb.cc)
add_library(srsgnb_mac STATIC ${SOURCES})

@ -197,7 +197,7 @@ int sched_nr::generate_slot_result(tti_point pdcch_tti, uint32_t cc)
// Copy results to intermediate buffer
dl_sched_t& dl_res = pending_results->add_dl_result(pdcch_tti, cc);
ul_sched_t& ul_res = pending_results->add_ul_result(pdcch_tti, cc);
sched_workers->get_sched_result(pdcch_tti, cc, dl_res, ul_res);
sched_workers->save_sched_result(pdcch_tti, cc, dl_res, ul_res);
return SRSRAN_SUCCESS;
}

@ -11,7 +11,7 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_cfg.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_phy.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_helpers.h"
namespace srsenb {
namespace sched_nr_impl {

@ -10,7 +10,7 @@
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_phy.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_helpers.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_harq.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h"

@ -11,7 +11,7 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_phy.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_helpers.h"
namespace srsenb {
namespace sched_nr_impl {
@ -45,7 +45,7 @@ void bwp_slot_grid::reset()
ul_prbs.reset();
dl_pdcchs.clear();
ul_pdcchs.clear();
pucchs.clear();
pending_acks.clear();
}
bwp_res_grid::bwp_res_grid(const bwp_params& bwp_cfg_) : cfg(&bwp_cfg_)
@ -167,14 +167,16 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
fill_dl_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
pdcch.dci.pucch_resource = 0;
pdcch.dci.dai = std::count_if(bwp_uci_slot.pucchs.begin(), bwp_uci_slot.pucchs.end(), [&ue](const pucch_t& p) {
return p.uci_cfg.pucch.rnti == ue.rnti;
});
pdcch.dci.dai = std::count_if(bwp_uci_slot.pending_acks.begin(),
bwp_uci_slot.pending_acks.end(),
[&ue](const harq_ack_t& p) { return p.res.rnti == ue.rnti; });
pdcch.dci.dai %= 4;
// Generate PUCCH
bwp_uci_slot.pucchs.emplace_back();
pucch_t& pucch = bwp_uci_slot.pucchs.back();
pucch.uci_cfg.pucch.rnti = ue.rnti;
bwp_uci_slot.pending_acks.emplace_back();
bwp_uci_slot.pending_acks.back().phy_cfg = &ue.cfg->phy();
srsran_assert(ue.cfg->phy().get_pdsch_ack_resource(pdcch.dci, bwp_uci_slot.pending_acks.back().res),
"Error getting ack resource");
// Generate PDSCH
bwp_pdsch_slot.dl_prbs |= dl_grant;

@ -60,9 +60,13 @@ slot_ue ue_carrier::try_reserve(tti_point tti_rx, const ue_cfg_t& uecfg_)
// copy cc-specific parameters and find available HARQs
sfu.cc_cfg = &uecfg_.carriers[cc];
sfu.pdcch_tti = tti_rx + TX_ENB_DELAY;
sfu.pdsch_tti = sfu.pdcch_tti + sfu.cc_cfg->pdsch_res_list[0].k0;
sfu.pusch_tti = sfu.pdcch_tti + sfu.cc_cfg->pusch_res_list[0].k2;
sfu.uci_tti = sfu.pdsch_tti + sfu.cc_cfg->pdsch_res_list[0].k1;
const uint32_t k0 = 0;
sfu.pdsch_tti = sfu.pdcch_tti + k0;
uint32_t k1 =
sfu.cfg->phy().harq_ack.dl_data_to_ul_ack[sfu.pdsch_tti.sf_idx() % sfu.cfg->phy().harq_ack.nof_dl_data_to_ul_ack];
sfu.uci_tti = sfu.pdsch_tti + k1;
uint32_t k2 = k1;
sfu.pusch_tti = sfu.pdcch_tti + k2;
sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi;

@ -11,12 +11,13 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_worker.h"
#include "srsran/common/string_helpers.h"
namespace srsenb {
namespace sched_nr_impl {
slot_cc_worker::slot_cc_worker(serv_cell_ctxt& cc_sched) :
cell(cc_sched), cfg(*cc_sched.cfg), bwp_alloc(cc_sched.bwps[0].grid)
cell(cc_sched), cfg(*cc_sched.cfg), bwp_alloc(cc_sched.bwps[0].grid), logger(srslog::fetch_basic_logger("MAC"))
{}
/// Called at the beginning of TTI in a locked context, to reserve available UE resources
@ -52,6 +53,9 @@ void slot_cc_worker::run()
// TODO: Prioritize PDCCH scheduling for DL and UL data in a Round-Robin fashion
alloc_dl_ues();
alloc_ul_ues();
// Log CC scheduler result
log_result();
}
void slot_cc_worker::end_tti()
@ -94,9 +98,37 @@ void slot_cc_worker::alloc_ul_ues()
bwp_alloc.alloc_pusch(ue, ulmask);
}
void slot_cc_worker::log_result() const
{
const bwp_slot_grid& bwp_slot = cell.bwps[0].grid[tti_rx + TX_ENB_DELAY];
for (const pdcch_dl_t& pdcch : bwp_slot.dl_pdcchs) {
fmt::memory_buffer fmtbuf;
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: DL {}, cc={}, rnti=0x{:x}, pid={}, nrtx={}, dai={}, tti_pdsch={}, tti_ack={}",
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
cell.cfg->cc,
ue.rnti,
ue.h_dl->pid,
ue.h_dl->nof_retx(),
pdcch.dci.dai,
ue.pdsch_tti,
ue.uci_tti);
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_ra) {
fmt::format_to(fmtbuf, "SCHED: DL RAR, cc={}", cell.cfg->cc);
} else {
fmt::format_to(fmtbuf, "SCHED: unknown format");
}
logger.info("%s", srsran::to_c_str(fmtbuf));
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) : cfg(cfg_), ue_db(ue_db_)
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) :
cfg(cfg_), ue_db(ue_db_), logger(srslog::fetch_basic_logger("MAC"))
{
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
cell_grid_list.emplace_back(cfg.cells[cc]);
@ -186,22 +218,44 @@ void sched_worker_manager::release_slot(tti_point tti_rx_)
std::unique_lock<std::mutex> lock(sf_worker_ctxt.slot_mutex);
sf_worker_ctxt.tti_rx = {};
if (sf_worker_ctxt.nof_workers_waiting > 0) {
lock.unlock();
sf_worker_ctxt.cvar.notify_one();
}
}
bool sched_worker_manager::get_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res)
bool sched_worker_manager::save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res)
{
auto& pdcch_bwp_slot = cell_grid_list[cc].bwps[0].grid[pdcch_tti];
auto& bwp_slot = cell_grid_list[cc].bwps[0].grid[pdcch_tti];
dl_res.pdcch_dl = bwp_slot.dl_pdcchs;
dl_res.pdcch_ul = bwp_slot.ul_pdcchs;
dl_res.pdsch = bwp_slot.pdschs;
ul_res.pusch = bwp_slot.puschs;
// Generate PUCCH
srsran_pdsch_ack_nr_t ack = {};
ack.nof_cc = not bwp_slot.pending_acks.empty();
const srsran::phy_cfg_nr_t* phy_cfg = nullptr;
for (const harq_ack_t& pending_ack : bwp_slot.pending_acks) {
srsran_harq_ack_m_t ack_m = {};
ack_m.resource = pending_ack.res;
ack_m.present = true;
srsran_harq_ack_insert_m(&ack, &ack_m);
phy_cfg = pending_ack.phy_cfg;
}
dl_res.pdcch_dl = pdcch_bwp_slot.dl_pdcchs;
dl_res.pdcch_ul = pdcch_bwp_slot.ul_pdcchs;
dl_res.pdsch = pdcch_bwp_slot.pdschs;
ul_res.pucch = pdcch_bwp_slot.pucchs;
ul_res.pusch = pdcch_bwp_slot.puschs;
if (phy_cfg != nullptr) {
srsran_slot_cfg_t slot_cfg{};
slot_cfg.idx = pdcch_tti.sf_idx();
ul_res.pucch.emplace_back();
pucch_t& pucch = ul_res.pucch.back();
if (not phy_cfg->get_pucch(slot_cfg, ack, pucch.pucch_cfg, pucch.uci_cfg, pucch.resource)) {
logger.error("Error getting UCI CFG");
}
}
// clear up BWP slot
pdcch_bwp_slot.reset();
bwp_slot.reset();
return true;
}

@ -6,7 +6,7 @@
# the distribution.
#
add_executable(sched_nr_test sched_nr_test.cc sched_nr_sim_ue.cc)
add_executable(sched_nr_test sched_nr_test.cc sched_nr_sim_ue.cc sched_nr_ue_ded_test_suite.cc)
target_link_libraries(sched_nr_test
srsgnb_mac
srsran_common

@ -11,6 +11,7 @@
*/
#include "sched_nr_sim_ue.h"
#include "sched_nr_ue_ded_test_suite.h"
#include "srsran/common/test_common.h"
namespace srsenb {
@ -38,6 +39,23 @@ sched_nr_ue_sim::sched_nr_ue_sim(uint16_t rnti_,
int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out)
{
update_dl_harqs(cc_out);
for (uint32_t i = 0; i < cc_out.dl_cc_result->pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result->pdcch_dl[i];
if (data.dci.ctx.rnti != ctxt.rnti) {
continue;
}
tti_point pdcch_tti = cc_out.tti;
uint32_t k1 = ctxt.ue_cfg.phy_cfg.harq_ack
.dl_data_to_ul_ack[pdcch_tti.sf_idx() % ctxt.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack];
tti_point uci_tti = pdcch_tti + k1;
ctxt.cc_list[cc_out.cc].pending_acks[uci_tti.to_uint()]++;
}
// clear up old slots
ctxt.cc_list[cc_out.cc].pending_acks[(cc_out.tti - 1).to_uint()] = 0;
return SRSRAN_SUCCESS;
}
@ -54,7 +72,7 @@ void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out)
// It is newtx
h.nof_retxs = 0;
h.ndi = data.dci.ndi;
h.first_tti_rx = cc_out.tti_rx;
h.first_tti_tx = cc_out.tti;
h.dci_loc = data.dci.ctx.location;
h.tbs = 100; // TODO
} else {
@ -62,7 +80,11 @@ void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out)
h.nof_retxs++;
}
h.active = true;
h.last_tti_rx = cc_out.tti_rx;
h.last_tti_tx = cc_out.tti;
h.last_tti_ack =
h.last_tti_tx +
ctxt.ue_cfg.phy_cfg.harq_ack
.dl_data_to_ul_ack[h.last_tti_tx.sf_idx() % ctxt.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack];
h.nof_txs++;
}
}
@ -81,6 +103,7 @@ sched_nr_sim_base::sched_nr_sim_base(const sched_nr_interface::sched_cfg_t&
cell_params.emplace_back(cc, cell_cfg_list[cc], sched_args);
}
sched_ptr->cell_cfg(cell_cfg_list); // call parent cfg
TESTASSERT(cell_params.size() > 0);
}
@ -94,60 +117,68 @@ int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_
TESTASSERT(ue_db.count(rnti) == 0);
sched_ptr->ue_cfg(rnti, ue_cfg_);
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_tti_rx, preamble_idx)));
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_tti_tx, preamble_idx)));
return SRSRAN_SUCCESS;
}
void sched_nr_sim_base::new_slot(srsran::tti_point tti_rx)
void sched_nr_sim_base::new_slot(srsran::tti_point tti_tx)
{
{
std::unique_lock<std::mutex> lock(mutex);
logger.set_context(tti_rx.to_uint());
mac_logger.set_context(tti_rx.to_uint());
current_tti_rx = tti_rx;
logger.info("---------------- TTI=%d ---------------", tti_rx.to_uint());
while (cc_finished > 0) {
cvar.wait(lock);
}
logger.set_context(tti_tx.to_uint());
mac_logger.set_context(tti_tx.to_uint());
logger.info("---------------- TTI=%d ---------------", tti_tx.to_uint());
current_tti_tx = tti_tx;
cc_finished = cell_params.size();
for (auto& ue : ue_db) {
ue_tti_events events;
ue_nr_tti_events events;
set_default_tti_events(ue.second.get_ctxt(), events);
set_external_tti_events(ue.second.get_ctxt(), events);
apply_tti_events(ue.second.get_ctxt(), events);
}
}
}
void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out)
{
std::unique_lock<std::mutex> lock(mutex);
for (auto& ue_pair : ue_db) {
ue_pair.second.update(cc_out);
sim_nr_enb_ctxt_t ctxt;
ctxt = get_enb_ctxt();
test_dl_sched_result(ctxt, cc_out);
for (auto& u : ue_db) {
u.second.update(cc_out);
}
if (--cc_finished <= 0) {
cvar.notify_one();
}
}
int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events)
int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_tti_events& pending_events)
{
pending_events.cc_list.clear();
pending_events.cc_list.resize(cell_params.size());
pending_events.tti_rx = current_tti_rx;
pending_events.tti_rx = current_tti_tx;
for (uint32_t enb_cc_idx = 0; enb_cc_idx < pending_events.cc_list.size(); ++enb_cc_idx) {
auto& cc_feedback = pending_events.cc_list[enb_cc_idx];
cc_feedback.configured = true;
cc_feedback.ue_cc_idx = enb_cc_idx;
for (uint32_t pid = 0; pid < SCHED_NR_MAX_HARQ; ++pid) {
auto& dl_h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].dl_harqs[pid];
auto& ul_h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].ul_harqs[pid];
auto& dl_h = ue_ctxt.cc_list[enb_cc_idx].dl_harqs[pid];
auto& ul_h = ue_ctxt.cc_list[enb_cc_idx].ul_harqs[pid];
// Set default DL ACK
if (dl_h.active and (dl_h.last_tti_rx + 8) == current_tti_rx) {
cc_feedback.dl_pid = pid;
cc_feedback.dl_ack = true; // default is ACK
if (dl_h.active and (dl_h.last_tti_ack) == current_tti_tx) {
cc_feedback.dl_acks.push_back(ue_nr_tti_events::ack_t{pid, true});
}
// Set default UL ACK
if (ul_h.active and (ul_h.last_tti_rx + 8) == current_tti_rx) {
cc_feedback.ul_pid = pid;
cc_feedback.ul_ack = true;
if (ul_h.active and (ul_h.last_tti_tx + 8) == current_tti_tx) {
cc_feedback.ul_acks.emplace_back(ue_nr_tti_events::ack_t{pid, true});
}
// TODO: other CSI
@ -157,7 +188,7 @@ int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, u
return SRSRAN_SUCCESS;
}
int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_tti_events& events)
int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_tti_events& events)
{
for (uint32_t enb_cc_idx = 0; enb_cc_idx < events.cc_list.size(); ++enb_cc_idx) {
const auto& cc_feedback = events.cc_list[enb_cc_idx];
@ -165,35 +196,29 @@ int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_tti_
continue;
}
if (cc_feedback.dl_pid >= 0) {
auto& h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].dl_harqs[cc_feedback.dl_pid];
for (auto& ack : cc_feedback.dl_acks) {
auto& h = ue_ctxt.cc_list[enb_cc_idx].dl_harqs[ack.pid];
if (cc_feedback.dl_ack) {
logger.info("DL ACK rnti=0x%x tti_dl_tx=%u cc=%d pid=%d",
ue_ctxt.rnti,
to_tx_dl(h.last_tti_rx).to_uint(),
enb_cc_idx,
cc_feedback.dl_pid);
if (ack.ack) {
logger.info(
"DL ACK rnti=0x%x tti_dl_tx=%u cc=%d pid=%d", ue_ctxt.rnti, h.last_tti_tx.to_uint(), enb_cc_idx, ack.pid);
}
// update scheduler
sched_ptr->dl_ack_info(ue_ctxt.rnti, enb_cc_idx, cc_feedback.dl_pid, cc_feedback.tb, cc_feedback.dl_ack);
sched_ptr->dl_ack_info(ue_ctxt.rnti, enb_cc_idx, h.pid, 0, ack.ack);
// update UE sim context
if (cc_feedback.dl_ack or ue_ctxt.is_last_dl_retx(cc_feedback.ue_cc_idx, cc_feedback.dl_pid)) {
if (ack.ack or ue_ctxt.is_last_dl_retx(enb_cc_idx, h.pid)) {
h.active = false;
}
}
if (cc_feedback.ul_pid >= 0) {
auto& h = ue_ctxt.cc_list[cc_feedback.ue_cc_idx].ul_harqs[cc_feedback.ul_pid];
for (auto& ack : cc_feedback.ul_acks) {
auto& h = ue_ctxt.cc_list[enb_cc_idx].ul_harqs[ack.pid];
if (cc_feedback.ul_ack) {
logger.info("UL ACK rnti=0x%x, tti_ul_tx=%u, cc=%d pid=%d",
ue_ctxt.rnti,
to_tx_ul(h.last_tti_rx).to_uint(),
enb_cc_idx,
cc_feedback.ul_pid);
if (ack.ack) {
logger.info(
"UL ACK rnti=0x%x, tti_ul_tx=%u, cc=%d pid=%d", ue_ctxt.rnti, h.last_tti_tx.to_uint(), enb_cc_idx, h.pid);
}
// // update scheduler
@ -207,4 +232,16 @@ int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_tti_
return SRSRAN_SUCCESS;
}
sim_nr_enb_ctxt_t sched_nr_sim_base::get_enb_ctxt() const
{
sim_nr_enb_ctxt_t ctxt;
ctxt.cell_params = cell_params;
for (auto& ue_pair : ue_db) {
ctxt.ue_db.insert(std::make_pair(ue_pair.first, &ue_pair.second.get_ctxt()));
}
return ctxt;
}
} // namespace srsenb

@ -15,20 +15,49 @@
#include "../sched_sim_ue.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsran/adt/circular_array.h"
#include <condition_variable>
namespace srsenb {
const static uint32_t MAX_GRANTS = mac_interface_phy_nr::MAX_GRANTS;
struct ue_nr_harq_ctxt_t {
bool active = false;
bool ndi = false;
uint32_t pid = 0;
uint32_t nof_txs = 0;
uint32_t nof_retxs = std::numeric_limits<uint32_t>::max();
uint32_t riv = 0;
srsran_dci_location_t dci_loc = {};
uint32_t tbs = 0;
tti_point last_tti_tx, first_tti_tx, last_tti_ack;
};
struct sched_nr_cc_output_res_t {
tti_point tti_rx;
tti_point tti;
uint32_t cc;
sched_nr_interface::dl_sched_t* dl_cc_result;
sched_nr_interface::ul_sched_t* ul_cc_result;
const sched_nr_interface::dl_sched_t* dl_cc_result;
const sched_nr_interface::ul_sched_t* ul_cc_result;
};
struct ue_nr_cc_ctxt_t {
std::array<ue_harq_ctxt_t, SCHED_NR_MAX_HARQ> dl_harqs;
std::array<ue_harq_ctxt_t, SCHED_NR_MAX_HARQ> ul_harqs;
std::array<ue_nr_harq_ctxt_t, SCHED_NR_MAX_HARQ> dl_harqs;
std::array<ue_nr_harq_ctxt_t, SCHED_NR_MAX_HARQ> ul_harqs;
srsran::circular_array<uint32_t, TTIMOD_SZ> pending_acks;
};
struct ue_nr_tti_events {
struct ack_t {
uint32_t pid;
bool ack;
};
struct cc_data {
bool configured = false;
srsran::bounded_vector<ack_t, MAX_GRANTS> dl_acks;
srsran::bounded_vector<ack_t, MAX_GRANTS> ul_acks;
};
srsran::tti_point tti_rx;
std::vector<cc_data> cc_list;
};
struct sim_nr_ue_ctxt_t {
@ -44,6 +73,10 @@ struct sim_nr_ue_ctxt_t {
return h.nof_retxs + 1 >= ue_cfg.maxharq_tx;
}
};
struct sim_nr_enb_ctxt_t {
srsran::span<const sched_nr_impl::sched_cell_params> cell_params;
std::map<uint16_t, const sim_nr_ue_ctxt_t*> ue_db;
};
class sched_nr_ue_sim
{
@ -75,7 +108,7 @@ public:
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx);
void new_slot(srsran::tti_point tti_rx);
void new_slot(srsran::tti_point tti_tx);
void update(sched_nr_cc_output_res_t& cc_out);
sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); }
@ -101,18 +134,20 @@ public:
tti_point get_tti_rx() const
{
std::lock_guard<std::mutex> lock(mutex);
return current_tti_rx;
return current_tti_tx;
}
sim_nr_enb_ctxt_t get_enb_ctxt() const;
std::map<uint16_t, sched_nr_ue_sim>::iterator begin() { return ue_db.begin(); }
std::map<uint16_t, sched_nr_ue_sim>::iterator end() { return ue_db.end(); }
// configurable by simulator concrete implementation
virtual void set_external_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events) {}
virtual void set_external_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_tti_events& pending_events) {}
private:
int set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events);
int apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_tti_events& events);
int set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_tti_events& pending_events);
int apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_tti_events& events);
std::string test_name;
srslog::basic_logger& logger;
@ -120,11 +155,13 @@ private:
std::unique_ptr<sched_nr> sched_ptr;
std::vector<sched_nr_impl::sched_cell_params> cell_params;
srsran::tti_point current_tti_rx;
srsran::tti_point current_tti_tx;
int cc_finished = 0;
std::map<uint16_t, sched_nr_ue_sim> ue_db;
mutable std::mutex mutex;
std::condition_variable cond_var;
std::condition_variable cvar;
};
} // namespace srsenb

@ -154,7 +154,7 @@ void sched_nr_cfg_serialized_test()
sched_tester.add_user(0x46, uecfg, 0);
auto tp1 = std::chrono::steady_clock::now();
std::vector<long> count_per_cc(nof_sectors, 0);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti_rx(nof_ttis % 10240);
tti_point tti_tx = tti_rx + TX_ENB_DELAY;
@ -163,26 +163,32 @@ void sched_nr_cfg_serialized_test()
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
auto tp1 = std::chrono::steady_clock::now();
TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS);
auto tp2 = std::chrono::steady_clock::now();
count_per_cc[cc] += std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp1).count();
sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res};
sched_tester.update(out);
tasks.finish_cc(tti_rx, dl_res, ul_res);
TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (tti_tx).sf_idx()) or dl_res.pdcch_dl.size() == 1);
}
}
auto tp2 = std::chrono::steady_clock::now();
tasks.print_results();
TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors * 0.6));
uint32_t microsecs = std::chrono::duration_cast<std::chrono::microseconds>(tp2 - tp1).count();
printf("Total time taken per slot: %f\n", microsecs / (float)max_nof_ttis);
double final_avg_usec = 0;
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
final_avg_usec += count_per_cc[cc];
}
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis;
printf("Total time taken per slot: %f usec\n", final_avg_usec);
}
void sched_nr_cfg_parallel_cc_test()
{
uint32_t nof_sectors = 4;
uint32_t nof_sectors = 2;
uint32_t max_nof_ttis = 1000;
task_job_manager tasks;
@ -194,33 +200,40 @@ void sched_nr_cfg_parallel_cc_test()
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(cells_cfg.size());
sched_tester.add_user(0x46, uecfg, 0);
auto tp1 = std::chrono::steady_clock::now();
std::vector<std::atomic<long> > nano_count(nof_sectors);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti_rx(nof_ttis % 10240);
tti_point tti_tx = tti_rx + TX_ENB_DELAY;
tasks.start_slot(tti_tx, nof_sectors);
sched_tester.new_slot(tti_tx);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
srsran::get_background_workers().push_task([cc, tti_tx, &tasks, &sched_tester]() {
srsran::get_background_workers().push_task([cc, tti_tx, &tasks, &sched_tester, &nano_count]() {
sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
auto tp1 = std::chrono::steady_clock::now();
TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS);
auto tp2 = std::chrono::steady_clock::now();
nano_count[cc].fetch_add(std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp1).count(),
std::memory_order_relaxed);
sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res};
sched_tester.update(out);
tasks.finish_cc(tti_tx, dl_res, ul_res);
});
}
}
auto tp2 = std::chrono::steady_clock::now();
tasks.wait_task_finish();
tasks.print_results();
TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors * 0.6));
uint32_t microsecs = std::chrono::duration_cast<std::chrono::microseconds>(tp2 - tp1).count();
printf("Total time taken per slot [usec]: %f\n", microsecs / (float)max_nof_ttis);
double final_avg_usec = 0;
for (uint32_t i = 0; i < nano_count.size(); ++i) {
final_avg_usec += nano_count[i];
}
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis / nof_sectors;
printf("Total time taken per slot [usec]: %f\n", final_avg_usec);
}
void sched_nr_cfg_parallel_sf_test()
@ -233,34 +246,44 @@ void sched_nr_cfg_parallel_sf_test()
cfg.nof_concurrent_subframes = 2;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(nof_sectors);
sched_nr sched(cfg);
sched.cell_cfg(cells_cfg);
sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel SF Test");
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(cells_cfg.size());
sched.ue_cfg(0x46, uecfg);
sched_tester.add_user(0x46, uecfg, 0);
auto tp1 = std::chrono::steady_clock::now();
std::vector<std::atomic<long> > nano_count(nof_sectors);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
tasks.start_slot(tti, nof_sectors);
tti_point tti_rx(nof_ttis % 10240);
tti_point tti_tx = tti_rx + TX_ENB_DELAY;
tasks.start_slot(tti_tx, nof_sectors);
sched_tester.new_slot(tti_tx);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() {
srsran::get_background_workers().push_task([cc, tti_tx, &sched_tester, &tasks, &nano_count]() {
sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
TESTASSERT(sched.get_dl_sched(tti, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched.get_ul_sched(tti, cc, ul_res) == SRSRAN_SUCCESS);
tasks.finish_cc(tti, dl_res, ul_res);
auto tp1 = std::chrono::steady_clock::now();
TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS);
auto tp2 = std::chrono::steady_clock::now();
nano_count[cc].fetch_add(std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp1).count(),
std::memory_order_relaxed);
sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res};
sched_tester.update(out);
tasks.finish_cc(tti_tx, dl_res, ul_res);
});
}
}
auto tp2 = std::chrono::steady_clock::now();
tasks.wait_task_finish();
tasks.print_results();
uint32_t microsecs = std::chrono::duration_cast<std::chrono::microseconds>(tp2 - tp1).count();
printf("Total time taken per slot [usec]: %f\n", microsecs / (float)max_nof_ttis);
double final_avg_usec = 0;
for (uint32_t i = 0; i < nano_count.size(); ++i) {
final_avg_usec += nano_count[i];
}
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis / nof_sectors;
printf("Total time taken per slot [usec]: %f\n", final_avg_usec);
}
} // namespace srsenb
@ -268,16 +291,16 @@ void sched_nr_cfg_parallel_sf_test()
int main()
{
auto& test_logger = srslog::fetch_basic_logger("TEST");
test_logger.set_level(srslog::basic_levels::debug);
test_logger.set_level(srslog::basic_levels::info);
auto& mac_logger = srslog::fetch_basic_logger("MAC");
mac_logger.set_level(srslog::basic_levels::debug);
mac_logger.set_level(srslog::basic_levels::info);
auto& pool_logger = srslog::fetch_basic_logger("POOL");
pool_logger.set_level(srslog::basic_levels::debug);
pool_logger.set_level(srslog::basic_levels::info);
// Start the log backend.
srslog::init();
srsran::get_background_workers().set_nof_workers(8);
srsran::get_background_workers().set_nof_workers(6);
srsenb::sched_nr_cfg_serialized_test();
srsenb::sched_nr_cfg_parallel_cc_test();

@ -0,0 +1,54 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "sched_nr_ue_ded_test_suite.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h"
#include "srsran/common/test_common.h"
namespace srsenb {
using namespace srsenb::sched_nr_impl;
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_output_res_t& cc_out)
{
tti_point pdcch_tti = cc_out.tti;
const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result->pdcch_dl;
// Iterate over UE PDCCH allocations
for (const pdcch_dl_t& pdcch : pdcchs) {
if (pdcch.dci.ctx.rnti_type != srsran_rnti_type_c) {
continue;
}
const sim_nr_ue_ctxt_t& ue = *enb_ctxt.ue_db.at(pdcch.dci.ctx.rnti);
uint32_t k1 = ue.ue_cfg.phy_cfg.harq_ack
.dl_data_to_ul_ack[pdcch_tti.sf_idx() % ue.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack];
// CHECK: Carrier activation
TESTASSERT(ue.ue_cfg.carriers[cc_out.cc].active);
// CHECK: Coreset chosen/DCI content
TESTASSERT(ue.ue_cfg.phy_cfg.pdcch.coreset_present[pdcch.dci.ctx.coreset_id]);
const auto& coreset = ue.ue_cfg.phy_cfg.pdcch.coreset[pdcch.dci.ctx.coreset_id];
TESTASSERT(coreset.id == pdcch.dci.ctx.coreset_id);
TESTASSERT(pdcch.dci.ctx.format == srsran_dci_format_nr_1_0 or pdcch.dci.ctx.format == srsran_dci_format_nr_1_1);
// CHECK: UCI
if (pdcch.dci.ctx.format == srsran_dci_format_nr_1_0) {
TESTASSERT(pdcch.dci.harq_feedback == k1 - 1);
} else {
TESTASSERT(pdcch.dci.harq_feedback == pdcch_tti.sf_idx());
}
TESTASSERT(ue.cc_list[cc_out.cc].pending_acks[(pdcch_tti + k1).to_uint()] % 4 == pdcch.dci.dai);
}
}
} // namespace srsenb

@ -0,0 +1,24 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_UE_DED_TEST_SUITE_H
#define SRSRAN_SCHED_NR_UE_DED_TEST_SUITE_H
#include "sched_nr_sim_ue.h"
namespace srsenb {
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_output_res_t& cc_out);
}
#endif // SRSRAN_SCHED_NR_UE_DED_TEST_SUITE_H
Loading…
Cancel
Save