nr,gnb,sched: cleanup of checks in sched_nr_allocator and removal of unused classes

master
Francisco 3 years ago committed by Francisco Paisana
parent 7b989d9976
commit 6fb5257609

@ -161,37 +161,6 @@ private:
std::array<uint32_t, SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE> ss_id_to_cce_idx;
};
class ue_cfg_extended : public ue_cfg_t
{
public:
struct search_space_params {
const srsran_search_space_t* cfg;
bwp_cce_pos_list cce_positions;
};
struct coreset_params {
srsran_coreset_t* cfg = nullptr;
std::vector<uint32_t> ss_list;
};
struct bwp_params {
std::array<srsran::optional<search_space_params>, SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE> ss_list;
std::vector<coreset_params> coresets;
};
struct cc_params {
srsran::bounded_vector<bwp_params, SCHED_NR_MAX_BWP_PER_CELL> bwps;
};
ue_cfg_extended() = default;
explicit ue_cfg_extended(uint16_t rnti, const ue_cfg_t& uecfg);
const bwp_cce_pos_list& get_dci_pos_list(uint32_t cc, uint32_t bwp_id, uint32_t search_space_id) const
{
return cc_params[cc].bwps[bwp_id].ss_list[search_space_id]->cce_positions;
}
uint16_t rnti;
std::vector<cc_params> cc_params;
};
} // namespace sched_nr_impl
} // namespace srsenb

@ -111,8 +111,10 @@ public:
const bwp_params_t& cfg;
private:
alloc_result verify_pdsch_space(bwp_slot_grid& pdsch_grid, bwp_slot_grid& pdcch_grid) const;
alloc_result
verify_pdsch_space(bwp_slot_grid& pdsch_grid, bwp_slot_grid& pdcch_grid, bwp_slot_grid* uci_grid = nullptr) const;
alloc_result verify_pusch_space(bwp_slot_grid& pusch_grid, bwp_slot_grid* pdcch_grid = nullptr) const;
alloc_result verify_ue_cfg(const ue_carrier_params_t& ue_cfg, harq_proc* harq) const;
srslog::basic_logger& logger;
bwp_res_grid& bwp_grid;

@ -29,8 +29,6 @@ enum class pdcch_grant_type_t { sib, rar, dl_data, ul_data };
class slot_ue;
using bwp_cfg_t = sched_nr_interface::bwp_cfg_t;
class coreset_region
{
public:
@ -48,7 +46,10 @@ public:
* @param user UE object or null in case of broadcast/RAR/paging allocation
* @return if the allocation was successful
*/
bool alloc_dci(pdcch_grant_type_t alloc_type, uint32_t aggr_idx, uint32_t search_space_id, slot_ue* user = nullptr);
bool alloc_dci(pdcch_grant_type_t alloc_type,
uint32_t aggr_idx,
uint32_t search_space_id,
const ue_carrier_params_t* user = nullptr);
void rem_last_dci();
@ -66,11 +67,11 @@ private:
// List of PDCCH grants
struct alloc_record {
uint32_t aggr_idx;
uint32_t ss_id;
uint32_t idx;
pdcch_grant_type_t alloc_type;
slot_ue* ue;
uint32_t aggr_idx;
uint32_t ss_id;
uint32_t idx;
pdcch_grant_type_t alloc_type;
const ue_carrier_params_t* ue;
};
srsran::bounded_vector<alloc_record, 2 * MAX_GRANTS> dci_list;
pdcch_dl_list_t& pdcch_dl_list;

@ -39,8 +39,9 @@ public:
int dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack);
int ul_crc_info(uint32_t pid, bool crc);
const uint16_t rnti;
const uint32_t cc;
const uint16_t rnti;
const uint32_t cc;
const cell_params_t& cell_params;
// Channel state
uint32_t dl_cqi = 1;
@ -56,7 +57,6 @@ private:
srslog::basic_logger& logger;
ue_carrier_params_t bwp_cfg;
const cell_params_t& cell_params;
};
class ue
@ -66,11 +66,12 @@ public:
void new_slot(slot_point pdcch_slot);
slot_ue try_reserve(slot_point pdcch_slot, uint32_t cc);
slot_ue make_slot_ue(slot_point pdcch_slot, uint32_t cc);
void set_cfg(const ue_cfg_t& cfg);
const ue_cfg_t& cfg() const { return ue_cfg; }
/// UE state feedback
void rlc_buffer_state(uint32_t lcid, uint32_t newtx, uint32_t retx) { buffers.dl_buffer_state(lcid, newtx, retx); }
void ul_bsr(uint32_t lcg, uint32_t bsr_val) { buffers.ul_bsr(lcg, bsr_val); }
void ul_sr_info() { last_sr_slot = last_pdcch_slot - TX_ENB_DELAY; }
@ -108,12 +109,13 @@ public:
bool empty() const { return ue == nullptr; }
void release() { ue = nullptr; }
const ue_carrier_params_t& cfg() const { return ue->bwp_cfg; }
const ue_carrier_params_t& operator*() const { return ue->bwp_cfg; }
const ue_carrier_params_t* operator->() const { return &ue->bwp_cfg; }
// mutable interface to ue_carrier state
dl_harq_proc* find_empty_dl_harq();
ul_harq_proc* find_empty_ul_harq();
dl_harq_proc* find_empty_dl_harq() { return ue->harq_ent.find_empty_dl_harq(); }
ul_harq_proc* find_empty_ul_harq() { return ue->harq_ent.find_empty_ul_harq(); }
// UE parameters common to all sectors
uint32_t dl_pending_bytes = 0, ul_pending_bytes = 0;

@ -127,31 +127,5 @@ ue_carrier_params_t::ue_carrier_params_t(uint16_t rnti_, const bwp_params_t& bwp
}
}
ue_cfg_extended::ue_cfg_extended(uint16_t rnti_, const ue_cfg_t& uecfg) : ue_cfg_t(uecfg), rnti(rnti_)
{
auto ss_view = srsran::make_optional_span(phy_cfg.pdcch.search_space, phy_cfg.pdcch.search_space_present);
auto coreset_view = srsran::make_optional_span(phy_cfg.pdcch.coreset, phy_cfg.pdcch.coreset_present);
cc_params.resize(carriers.size());
for (uint32_t cc = 0; cc < cc_params.size(); ++cc) {
cc_params[cc].bwps.resize(1);
auto& bwp = cc_params[cc].bwps[0];
for (auto& ss : ss_view) {
bwp.ss_list[ss.id].emplace();
bwp.ss_list[ss.id]->cfg = &ss;
get_dci_locs(phy_cfg.pdcch.coreset[ss.coreset_id], ss, rnti, bwp.ss_list[ss.id]->cce_positions);
}
for (auto& coreset_cfg : coreset_view) {
bwp.coresets.emplace_back();
auto& coreset = bwp.coresets.back();
coreset.cfg = &coreset_cfg;
for (auto& ss : bwp.ss_list) {
if (ss.has_value() and ss->cfg->coreset_id == coreset.cfg->id) {
coreset.ss_list.push_back(ss->cfg->id);
}
}
}
}
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -89,20 +89,14 @@ alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, ui
alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t ra_rnti,
uint32_t aggr_idx,
prb_interval interv,
srsran::const_span<dl_sched_rar_info_t> pending_rars)
srsran::const_span<dl_sched_rar_info_t> pending_rachs)
{
static const uint32_t msg3_nof_prbs = 3, m = 0;
bwp_slot_grid& bwp_pdcch_slot = bwp_grid[pdcch_slot];
if (not bwp_pdcch_slot.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.info("SCHED: skipping ra-rnti=0x%x RAR allocation. Cause: concurrent PDSCH and SSB not yet supported",
ra_rnti);
return alloc_result::no_sch_space;
}
slot_point msg3_slot = pdcch_slot + cfg.pusch_ra_list[m].msg3_delay;
bwp_slot_grid& bwp_msg3_slot = bwp_grid[msg3_slot];
alloc_result ret = verify_pusch_space(bwp_msg3_slot, nullptr);
slot_point msg3_slot = pdcch_slot + cfg.pusch_ra_list[m].msg3_delay;
bwp_slot_grid& bwp_msg3_slot = bwp_grid[msg3_slot];
alloc_result ret = verify_pusch_space(bwp_msg3_slot, nullptr);
if (ret != alloc_result::success) {
return ret;
}
@ -110,17 +104,15 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
if (ret != alloc_result::success) {
return ret;
}
if (bwp_pdcch_slot.rar.full()) {
return alloc_result::no_grant_space;
}
if (pending_rars.size() > MAX_GRANTS) {
logger.error("SCHED: Trying to allocate too many Msg3 grants in a single slot (%zd)", pending_rars.size());
if (pending_rachs.size() > bwp_pdcch_slot.rar.capacity() - bwp_pdcch_slot.rar.size()) {
logger.error("SCHED: Trying to allocate too many Msg3 grants in a single slot (%zd)", pending_rachs.size());
return alloc_result::invalid_grant_params;
}
for (auto& rar : pending_rars) {
if (not slot_ues.contains(rar.temp_crnti)) {
for (auto& rach : pending_rachs) {
auto ue_it = slot_ues.find(rach.temp_crnti);
if (ue_it == slot_ues.end()) {
logger.info("SCHED: Postponing rnti=0x%x RAR allocation. Cause: The ue object not yet fully created",
rar.temp_crnti);
rach.temp_crnti);
return alloc_result::no_rnti_opportunity;
}
}
@ -132,7 +124,7 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
}
// Check Msg3 RB collision
uint32_t total_ul_nof_prbs = msg3_nof_prbs * pending_rars.size();
uint32_t total_ul_nof_prbs = msg3_nof_prbs * pending_rachs.size();
uint32_t total_ul_nof_rbgs = srsran::ceil_div(total_ul_nof_prbs, get_P(bwp_grid.nof_prbs(), false));
prb_interval msg3_rbs = find_empty_interval_of_length(bwp_msg3_slot.ul_prbs.prbs(), total_ul_nof_rbgs);
if (msg3_rbs.length() < total_ul_nof_rbgs) {
@ -158,7 +150,7 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
bwp_pdcch_slot.coresets[coreset_id]->rem_last_dci();
return alloc_result::invalid_coderate;
}
auto& phy_cfg = slot_ues[pending_rars[0].temp_crnti]->phy();
auto& phy_cfg = slot_ues[pending_rachs[0].temp_crnti]->phy();
pdcch.dci_cfg = phy_cfg.get_dci_cfg();
// Generate RAR PDSCH
// TODO: Properly fill Msg3 grants
@ -176,7 +168,7 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
slot_cfg.idx = msg3_slot.to_uint();
bwp_pdcch_slot.rar.emplace_back();
sched_nr_interface::rar_t& rar_out = bwp_pdcch_slot.rar.back();
for (const dl_sched_rar_info_t& grant : pending_rars) {
for (const dl_sched_rar_info_t& grant : pending_rachs) {
slot_ue& ue = slot_ues[grant.temp_crnti];
// Generate RAR grant
@ -207,35 +199,20 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
// func computes the grant allocation for this UE
alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_grant)
{
if (ue->active_bwp().bwp_id != bwp_grid.cfg->bwp_id) {
logger.warning(
"SCHED: Trying to allocate PDSCH for rnti=0x%x in inactive BWP id=%d", ue->rnti, ue->active_bwp().bwp_id);
return alloc_result::no_rnti_opportunity;
}
if (ue.h_dl == nullptr) {
logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue->rnti);
return alloc_result::no_rnti_opportunity;
}
bwp_slot_grid& bwp_pdcch_slot = bwp_grid[ue.pdcch_slot];
bwp_slot_grid& bwp_pdsch_slot = bwp_grid[ue.pdsch_slot];
bwp_slot_grid& bwp_uci_slot = bwp_grid[ue.uci_slot]; // UCI : UL control info
alloc_result result = verify_pdsch_space(bwp_pdsch_slot, bwp_pdcch_slot);
alloc_result result = verify_pdsch_space(bwp_pdsch_slot, bwp_pdcch_slot, &bwp_uci_slot);
if (result != alloc_result::success) {
return result;
}
if (bwp_uci_slot.pending_acks.full()) {
logger.warning("SCHED: PDSCH allocation for rnti=0x%x failed due to lack of space for respective ACK", ue->rnti);
return alloc_result::no_grant_space;
result = verify_ue_cfg(ue.cfg(), ue.h_dl);
if (result != alloc_result::success) {
return result;
}
if (bwp_pdsch_slot.dl_prbs.collides(dl_grant)) {
return alloc_result::sch_collision;
}
if (not bwp_pdcch_slot.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.info("SCHED: skipping rnti=0x%x PDSCH allocation. Cause: concurrent PDSCH and SSB not yet supported",
ue->rnti);
return alloc_result::no_sch_space;
}
// Find space in PUCCH
// TODO
@ -252,7 +229,7 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
}
}
uint32_t coreset_id = ue->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::dl_data, aggr_idx, ss_id, &ue)) {
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::dl_data, aggr_idx, ss_id, &ue.cfg())) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
@ -328,10 +305,9 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
if (ret != alloc_result::success) {
return ret;
}
if (ue.h_ul == nullptr) {
logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue->rnti);
return alloc_result::no_rnti_opportunity;
ret = verify_ue_cfg(ue.cfg(), ue.h_ul);
if (ret != alloc_result::success) {
return ret;
}
pdcch_ul_list_t& pdcchs = bwp_pdcch_slot.ul_pdcchs;
if (bwp_pusch_slot.ul_prbs.collides(ul_prbs)) {
@ -348,14 +324,16 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
}
}
uint32_t coreset_id = ue->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id].value().alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, ss_id, &ue)) {
if (not bwp_pdcch_slot.coresets[coreset_id].value().alloc_dci(
pdcch_grant_type_t::ul_data, aggr_idx, ss_id, &ue.cfg())) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
// Allocation Successful
if (ue.h_ul->empty()) {
int mcs = ue->fixed_pusch_mcs();
int tbs = 100;
bool success = ue.h_ul->new_tx(ue.pusch_slot, ue.pusch_slot, ul_prbs, mcs, ue->ue_cfg().maxharq_tx);
srsran_assert(success, "Failed to allocate UL HARQ");
} else {
@ -363,7 +341,6 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
srsran_assert(success, "Failed to allocate UL HARQ retx");
}
// Allocation Successful
// Generate PDCCH
pdcch_ul_t& pdcch = pdcchs.back();
fill_ul_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
@ -387,20 +364,33 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
return alloc_result::success;
}
alloc_result bwp_slot_allocator::verify_pdsch_space(bwp_slot_grid& bwp_pdsch, bwp_slot_grid& bwp_pdcch) const
alloc_result bwp_slot_allocator::verify_pdsch_space(bwp_slot_grid& pdsch_grid,
bwp_slot_grid& pdcch_grid,
bwp_slot_grid* uci_grid) const
{
if (not bwp_pdsch.is_dl() or not bwp_pdcch.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch.slot_idx);
if (not pdsch_grid.is_dl() or not pdcch_grid.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", pdsch_grid.slot_idx);
return alloc_result::no_sch_space;
}
if (bwp_pdcch.dl_pdcchs.full()) {
if (pdcch_grid.dl_pdcchs.full()) {
logger.warning("SCHED: Maximum number of DL PDCCH allocations reached");
return alloc_result::no_cch_space;
}
if (bwp_pdsch.pdschs.full()) {
if (pdsch_grid.pdschs.full()) {
logger.warning("SCHED: Maximum number of DL PDSCH grants reached");
return alloc_result::no_sch_space;
}
if (uci_grid != nullptr) {
if (uci_grid->pending_acks.full()) {
logger.warning("SCHED: No space for ACK.");
return alloc_result::no_grant_space;
}
}
if (not pdsch_grid.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.debug("SCHED: skipping PDSCH allocation. Cause: concurrent PDSCH and SSB not yet supported");
return alloc_result::no_sch_space;
}
return alloc_result::success;
}
@ -428,5 +418,19 @@ alloc_result bwp_slot_allocator::verify_pusch_space(bwp_slot_grid& pusch_grid, b
return alloc_result::success;
}
alloc_result bwp_slot_allocator::verify_ue_cfg(const ue_carrier_params_t& ue_cfg, harq_proc* harq) const
{
if (ue_cfg.active_bwp().bwp_id != cfg.bwp_id) {
logger.warning(
"SCHED: Trying to allocate rnti=0x%x in inactive BWP id=%d", ue_cfg.rnti, ue_cfg.active_bwp().bwp_id);
return alloc_result::no_rnti_opportunity;
}
if (harq == nullptr) {
logger.warning("SCHED: Trying to allocate rnti=0x%x with no available HARQs", ue_cfg.rnti);
return alloc_result::no_rnti_opportunity;
}
return alloc_result::success;
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -11,7 +11,6 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h"
namespace srsenb {
namespace sched_nr_impl {
@ -47,10 +46,10 @@ void coreset_region::reset()
pdcch_ul_list.clear();
}
bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
uint32_t aggr_idx,
uint32_t search_space_id,
slot_ue* user)
bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
uint32_t aggr_idx,
uint32_t search_space_id,
const ue_carrier_params_t* user)
{
srsran_assert(aggr_idx <= 4, "Invalid DCI aggregation level=%d", 1U << aggr_idx);
srsran_assert((user == nullptr) xor
@ -141,7 +140,7 @@ bool coreset_region::alloc_dfs_node(const alloc_record& record, uint32_t start_d
tree_node node;
node.dci_pos_idx = start_dci_idx;
node.dci_pos.L = record.aggr_idx;
node.rnti = record.ue != nullptr ? (*record.ue)->rnti : SRSRAN_INVALID_RNTI;
node.rnti = record.ue != nullptr ? record.ue->rnti : SRSRAN_INVALID_RNTI;
node.current_mask.resize(nof_cces());
// get cumulative pdcch bitmap
if (not alloc_dfs.empty()) {
@ -181,7 +180,7 @@ srsran::span<const uint32_t> coreset_region::get_cce_loc_table(const alloc_recor
switch (record.alloc_type) {
case pdcch_grant_type_t::dl_data:
case pdcch_grant_type_t::ul_data:
return (*record.ue)->cce_pos_list(record.ss_id, slot_idx, record.aggr_idx);
return record.ue->cce_pos_list(record.ss_id, slot_idx, record.aggr_idx);
case pdcch_grant_type_t::rar:
return rar_cce_list[slot_idx][record.aggr_idx];
default:

@ -47,15 +47,6 @@ slot_ue::slot_ue(ue_carrier& ue_, slot_point slot_tx_, uint32_t dl_bytes, uint32
}
}
dl_harq_proc* slot_ue::find_empty_dl_harq()
{
return dl_active ? ue->harq_ent.find_empty_dl_harq() : nullptr;
}
ul_harq_proc* slot_ue::find_empty_ul_harq()
{
return ul_active ? ue->harq_ent.find_empty_ul_harq() : nullptr;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue_carrier::ue_carrier(uint16_t rnti_, const ue_cfg_t& uecfg_, const cell_params_t& cell_params_) :
@ -160,9 +151,9 @@ void ue::new_slot(slot_point pdcch_slot)
}
}
slot_ue ue::try_reserve(slot_point pdcch_slot, uint32_t cc)
slot_ue ue::make_slot_ue(slot_point pdcch_slot, uint32_t cc)
{
srsran_assert(carriers[cc] != nullptr, "try_reserve() called for inexistent rnti=0x%x,cc=%d", rnti, cc);
srsran_assert(carriers[cc] != nullptr, "make_slot_ue() called for inexistent rnti=0x%x,cc=%d", rnti, cc);
return slot_ue(*carriers[cc], pdcch_slot, dl_pending_bytes, ul_pending_bytes);
}

@ -65,7 +65,7 @@ void cc_worker::run_slot(slot_point pdcch_slot, ue_map_t& ue_db, dl_sched_res_t&
}
// info for a given UE on a slot to be process
slot_ues.insert(rnti, u.try_reserve(pdcch_slot, cfg.cc));
slot_ues.insert(rnti, u.make_slot_ue(pdcch_slot, cfg.cc));
if (slot_ues[rnti].empty()) {
// Failed to generate slot UE because UE has no conditions for DL/UL tx
slot_ues.erase(rnti);

@ -57,7 +57,7 @@ void test_single_prach()
mac_logger.set_context(pdcch_slot.to_uint());
u.new_slot(pdcch_slot);
slot_ues.clear();
slot_ue sfu = u.try_reserve(pdcch_slot, 0);
slot_ue sfu = u.make_slot_ue(pdcch_slot, 0);
if (not sfu.empty()) {
slot_ues.insert(rnti, std::move(sfu));
}

Loading…
Cancel
Save