sched,nr: integrate NR scheduler in nr_phy_test

The scheduler can be activated via a command line flag
master
Francisco Paisana 3 years ago
parent 660b66dbe3
commit c9a5180a09

@ -45,7 +45,7 @@ public:
void ul_sr_info(tti_point tti_rx, uint16_t rnti) override;
int get_dl_sched(tti_point pdsch_tti, uint32_t cc, dl_sched_t& result) override;
int get_ul_sched(tti_point pdcch_tti, uint32_t cc, ul_sched_t& result) override;
int get_ul_sched(tti_point pusch_tti, uint32_t cc, ul_sched_t& result) override;
private:
int generate_slot_result(tti_point pdcch_tti, uint32_t cc);

@ -80,6 +80,12 @@ public:
tx_harq_softbuffer& get_softbuffer() { return *softbuffer; }
bool set_tbs(uint32_t tbs)
{
softbuffer->reset();
return harq_proc::set_tbs(tbs);
}
private:
srsran::unique_pool_ptr<tx_harq_softbuffer> softbuffer;
};
@ -115,17 +121,17 @@ public:
{
return find_dl([this](const dl_harq_proc& h) { return h.has_pending_retx(tti_rx); });
}
harq_proc* find_pending_ul_retx()
ul_harq_proc* find_pending_ul_retx()
{
return find_ul([this](const harq_proc& h) { return h.has_pending_retx(tti_rx); });
return find_ul([this](const ul_harq_proc& h) { return h.has_pending_retx(tti_rx); });
}
dl_harq_proc* find_empty_dl_harq()
{
return find_dl([](const harq_proc& h) { return h.empty(); });
return find_dl([](const dl_harq_proc& h) { return h.empty(); });
}
harq_proc* find_empty_ul_harq()
ul_harq_proc* find_empty_ul_harq()
{
return find_ul([](const harq_proc& h) { return h.empty(); });
return find_ul([](const ul_harq_proc& h) { return h.empty(); });
}
private:
@ -136,7 +142,7 @@ private:
return (it == dl_harqs.end()) ? nullptr : &(*it);
}
template <typename Predicate>
harq_proc* find_ul(Predicate p)
ul_harq_proc* find_ul(Predicate p)
{
auto it = std::find_if(ul_harqs.begin(), ul_harqs.end(), p);
return (it == ul_harqs.end()) ? nullptr : &(*it);

@ -72,6 +72,10 @@ private:
srsran::bounded_vector<bwp_slot_grid, TTIMOD_SZ> slots;
};
/**
* Class responsible for jointly filling the DL/UL sched result fields and allocate RB/PDCCH resources in the RB grid
* to avoid potential RB/PDCCH collisions
*/
class bwp_slot_allocator
{
public:

@ -52,7 +52,7 @@ public:
uint32_t dl_cqi;
uint32_t ul_cqi;
dl_harq_proc* h_dl = nullptr;
harq_proc* h_ul = nullptr;
ul_harq_proc* h_ul = nullptr;
};
class ue_carrier

@ -90,8 +90,7 @@ public:
sched_worker_manager(sched_worker_manager&&) = delete;
~sched_worker_manager();
void run_slot(tti_point tti_tx, uint32_t cc);
bool save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res);
void run_slot(tti_point tti_tx, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res);
void enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev);
void enqueue_cc_feedback(uint16_t rnti, uint32_t cc, slot_cc_worker::feedback_callback_t fdbk)
@ -100,6 +99,8 @@ public:
}
private:
bool save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res);
const sched_params& cfg;
ue_map_t& ue_db;
srsran::span<std::unique_ptr<serv_cell_manager> > cells;

@ -128,13 +128,12 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
/// Generate {tti,cc} scheduling decision
int sched_nr::generate_slot_result(tti_point pdcch_tti, uint32_t cc)
{
// Generate {slot_idx,cc} result
sched_workers->run_slot(pdcch_tti, cc);
// Copy results to intermediate buffer
dl_sched_t& dl_res = pending_results->add_dl_result(pdcch_tti, cc);
ul_sched_t& ul_res = pending_results->add_ul_result(pdcch_tti, cc);
sched_workers->save_sched_result(pdcch_tti, cc, dl_res, ul_res);
// Generate {slot_idx,cc} result
sched_workers->run_slot(pdcch_tti, cc, dl_res, ul_res);
return SRSRAN_SUCCESS;
}
@ -148,14 +147,14 @@ int sched_nr::get_dl_sched(tti_point tti_tx, uint32_t cc, dl_sched_t& result)
result = pending_results->pop_dl_result(tti_tx, cc);
return SRSRAN_SUCCESS;
}
int sched_nr::get_ul_sched(tti_point tti_rx, uint32_t cc, ul_sched_t& result)
int sched_nr::get_ul_sched(tti_point pusch_tti, uint32_t cc, ul_sched_t& result)
{
if (not pending_results->has_ul_result(tti_rx, cc)) {
if (not pending_results->has_ul_result(pusch_tti, cc)) {
// sched result hasn't been generated
return SRSRAN_SUCCESS;
}
result = pending_results->pop_ul_result(tti_rx, cc);
result = pending_results->pop_ul_result(pusch_tti, cc);
return SRSRAN_SUCCESS;
}

@ -34,7 +34,8 @@ void fill_dci_common(const slot_ue& ue, const bwp_params& bwp_cfg, DciDlOrUl& dc
dci.cc_id = ue.cc;
dci.tpc = 1;
// harq
harq_proc* h = std::is_same<DciDlOrUl, srsran_dci_dl_nr_t>::value ? ue.h_dl : ue.h_ul;
harq_proc* h = std::is_same<DciDlOrUl, srsran_dci_dl_nr_t>::value ? static_cast<harq_proc*>(ue.h_dl)
: static_cast<harq_proc*>(ue.h_ul);
dci.pid = h->pid;
dci.ndi = h->ndi();
dci.mcs = h->mcs();

@ -195,12 +195,12 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
slot_cfg.idx = ue.pdsch_tti.sf_idx();
bool ret = ue.cfg->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch);
srsran_assert(ret, "Error converting DCI to grant");
pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get();
if (ue.h_dl->nof_retx() == 0) {
ue.h_dl->set_tbs(pdsch.sch.grant.tb[0].tbs); // update HARQ with correct TBS
} else {
srsran_assert(pdsch.sch.grant.tb[0].tbs == (int)ue.h_dl->tbs(), "The TBS did not remain constant in retx");
}
pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get();
return alloc_result::success;
}
@ -249,7 +249,14 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbg_bitmap& ul_m
fill_ul_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
pdcch.dci_cfg = ue.cfg->phy().get_dci_cfg();
// Generate PUSCH
bwp_pusch_slot.ul_prbs.add(ul_mask);
bwp_pusch_slot.ul_prbs |= ul_mask;
bwp_pusch_slot.puschs.emplace_back();
pusch_t& pusch = bwp_pusch_slot.puschs.back();
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = ue.pusch_tti.sf_idx();
bool ret = ue.cfg->phy().get_pusch_cfg(slot_cfg, pdcch.dci, pusch.sch);
srsran_assert(ret, "Error converting DCI to PUSCH grant");
pusch.sch.grant.tb[0].softbuffer.rx = ue.h_ul->get_softbuffer().get();
return alloc_result::success;
}

@ -161,15 +161,14 @@ void slot_cc_worker::log_result() const
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: UL {}, cc={}, rnti=0x{:x}, pid={}, nrtx={}, f={}, tti_pusch={}, tti_ack={}",
"SCHED: UL {}, cc={}, rnti=0x{:x}, pid={}, nrtx={}, f={}, tti_pusch={}",
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
cell.cfg.cc,
ue.rnti,
pdcch.dci.pid,
ue.h_dl->nof_retx(),
srsran_dci_format_nr_string(pdcch.dci.ctx.format),
ue.pusch_tti,
ue.uci_tti);
ue.pusch_tti);
} else {
fmt::format_to(fmtbuf, "SCHED: unknown rnti format");
}
@ -199,7 +198,7 @@ void sched_worker_manager::enqueue_event(uint16_t rnti, srsran::move_callback<vo
next_slot_events.push_back(ue_event_t{rnti, std::move(ev)});
}
void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc)
void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res)
{
srsran::bounded_vector<std::condition_variable*, SRSRAN_MAX_CARRIERS> waiting_cvars;
{
@ -281,10 +280,14 @@ void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc)
c->notify_one();
}
}
// Copy results to intermediate buffer
save_sched_result(tti_tx, cc, dl_res, ul_res);
}
bool sched_worker_manager::save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res)
{
// NOTE: Unlocked region
auto& bwp_slot = cells[cc]->bwps[0].grid[pdcch_tti];
dl_res.pdcch_dl = bwp_slot.dl_pdcchs;
@ -313,15 +316,15 @@ bool sched_worker_manager::save_sched_result(tti_point pdcch_tti, uint32_t cc, d
if (uci_cfg.ack.count > 0 || uci_cfg.nof_csi > 0 || uci_cfg.o_sr > 0) {
if (not ul_res.pusch.empty()) {
// Put UCI configuration in PUSCH config
srsran_assert(phy_cfg->get_pusch_uci_cfg(slot_cfg, uci_cfg, ul_res.pusch[0].sch),
"Error setting UCI configuration in PUSCH");
bool ret = phy_cfg->get_pusch_uci_cfg(slot_cfg, uci_cfg, ul_res.pusch[0].sch);
srsran_assert(ret, "Error setting UCI configuration in PUSCH");
} else {
// Put UCI configuration in PUCCH config
ul_res.pucch.emplace_back();
pucch_t& pucch = ul_res.pucch.back();
pucch.uci_cfg = uci_cfg;
srsran_assert(phy_cfg->get_pucch_uci_cfg(slot_cfg, pucch.uci_cfg, pucch.pucch_cfg, pucch.resource),
"Error getting PUCCH UCI cfg");
bool ret = phy_cfg->get_pucch_uci_cfg(slot_cfg, pucch.uci_cfg, pucch.pucch_cfg, pucch.resource);
srsran_assert(ret, "Error getting PUCCH UCI cfg");
}
}
}

@ -277,7 +277,7 @@ private:
public:
struct args_t {
srsran::phy_cfg_nr_t phy_cfg; ///< Physical layer configuration
bool use_dummy_sched = false; ///< Use dummy or real NR scheduler
bool use_dummy_sched = true; ///< Use dummy or real NR scheduler
uint16_t rnti = 0x1234; ///< C-RNTI
uint32_t ss_id = 1; ///< Search Space identifier
uint32_t pdcch_aggregation_level = 0; ///< PDCCH aggregation level
@ -293,7 +293,11 @@ public:
};
gnb_dummy_stack(const args_t& args) :
rnti(args.rnti), phy_cfg(args.phy_cfg), ss_id(args.ss_id), sched(srsenb::sched_nr_interface::sched_cfg_t{})
rnti(args.rnti),
phy_cfg(args.phy_cfg),
ss_id(args.ss_id),
sched(srsenb::sched_nr_interface::sched_cfg_t{}),
use_dummy_sched(args.use_dummy_sched)
{
logger.set_level(srslog::str_to_basic_level(args.log_level));
@ -382,6 +386,7 @@ public:
// Set TBS
// Select grant and set data
pdsch.data[0] = tx_harq_proc[slot_cfg.idx].get_tb(pdsch.sch.grant.tb[0].tbs).data();
pdsch.data[1] = nullptr;
// Generate random data
srsran_random_byte_vector(random_gen, pdsch.data[0], pdsch.sch.grant.tb[0].tbs / 8);
@ -423,7 +428,15 @@ public:
}
if (not use_dummy_sched) {
return sched.get_ul_sched(pusch_tti, 0, ul_sched);
int ret = sched.get_ul_sched(pusch_tti, 0, ul_sched);
for (pusch_t& pusch : ul_sched.pusch) {
pusch.data[0] = rx_harq_proc[pusch.pid].get_tb(pusch.sch.grant.tb[0].tbs).data();
pusch.data[1] = nullptr;
srsran_random_byte_vector(random_gen, pusch.data[0], pusch.sch.grant.tb[0].tbs / 8);
}
return ret;
}
// Get ACK information

Loading…
Cancel
Save