sched,nr: utilization of common bwp params structure across multiple sched objects

master
Francisco Paisana 4 years ago
parent 2db8b8d876
commit dd3c9b4d7e

@ -27,7 +27,7 @@ namespace srsenb {
namespace sched_nr_impl {
class sched_worker_manager;
class cell_sched;
class serv_cell_ctxt;
} // namespace sched_nr_impl
class ue_event_manager;
@ -69,7 +69,7 @@ private:
std::unique_ptr<sched_result_manager> pending_results;
// management of cell resources
std::vector<std::unique_ptr<sched_nr_impl::cell_sched> > cells;
std::vector<std::unique_ptr<sched_nr_impl::serv_cell_ctxt> > cells;
};
} // namespace srsenb

@ -32,7 +32,7 @@ struct pending_rar_t {
class ra_sched
{
public:
explicit ra_sched(const sched_cell_params& cell_cfg_);
explicit ra_sched(const bwp_params& bwp_cfg_);
int dl_rach_info(const dl_sched_rar_info_t& rar_info);
void run_slot(bwp_slot_allocator& slot_grid);
@ -42,33 +42,32 @@ private:
alloc_result
allocate_pending_rar(bwp_slot_allocator& slot_grid, const pending_rar_t& rar, uint32_t& nof_grants_alloc);
const sched_cell_params* cell_cfg = nullptr;
const bwp_params* bwp_cfg = nullptr;
srslog::basic_logger& logger;
srsran::deque<pending_rar_t> pending_rars;
};
class bwp_sched
class bwp_ctxt
{
public:
explicit bwp_sched(const sched_cell_params& cell_cfg_, uint32_t bwp_id_);
explicit bwp_ctxt(const bwp_params& bwp_cfg);
const sched_cell_params* cell_cfg;
const uint32_t bwp_id;
const bwp_params* cfg;
// channel-specific schedulers
ra_sched ra;
// Pending allocations
// Stores pending allocations and PRB bitmaps
bwp_res_grid grid;
};
class cell_sched
class serv_cell_ctxt
{
public:
srsran::bounded_vector<bwp_sched, SCHED_NR_MAX_BWP_PER_CELL> bwps;
srsran::bounded_vector<bwp_ctxt, SCHED_NR_MAX_BWP_PER_CELL> bwps;
explicit cell_sched(const sched_cell_params& cell_cfg_);
explicit serv_cell_ctxt(const sched_cell_params& cell_cfg_);
const sched_cell_params* cfg;
};

@ -35,10 +35,25 @@ using sched_cfg_t = sched_nr_interface::sched_cfg_t;
using cell_cfg_t = sched_nr_interface::cell_cfg_t;
using bwp_cfg_t = sched_nr_interface::bwp_cfg_t;
struct bwp_params {
const uint32_t bwp_id;
const uint32_t cc;
const bwp_cfg_t& cfg;
const cell_cfg_t& cell_cfg;
const sched_cfg_t& sched_cfg;
// derived params
uint32_t P;
uint32_t N_rbg;
bwp_params(const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_, uint32_t cc, uint32_t bwp_id);
};
struct sched_cell_params {
const uint32_t cc;
const cell_cfg_t cell_cfg;
const sched_cfg_t& sched_cfg;
std::vector<bwp_params> bwps;
sched_cell_params(uint32_t cc_, const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_);
};

@ -34,9 +34,6 @@ const static size_t SCHED_NR_MAX_BWP_PER_CELL = 2;
class sched_nr_interface
{
public:
using pdcch_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using rbg_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
static const size_t MAX_GRANTS = mac_interface_phy_nr::MAX_GRANTS;
///// Configuration /////
@ -52,21 +49,18 @@ public:
using pusch_td_res_alloc_list = srsran::bounded_vector<pusch_td_res_alloc, MAX_GRANTS>;
struct bwp_cfg_t {
uint32_t bwp_id = 1;
uint32_t start_rb = 0;
uint32_t rb_width = 100;
srsran_sch_hl_cfg_nr_t pdsch = {};
srsran_sch_hl_cfg_nr_t pusch = {};
uint32_t rar_window_size = 3;
std::array<srsran::optional<srsran_coreset_t>, SRSRAN_UE_DL_NR_MAX_NOF_CORESET> coresets;
};
struct cell_cfg_t {
uint32_t rar_window_size;
uint32_t nof_prb = 100;
uint32_t nof_rbg = 25;
srsran_tdd_config_nr_t tdd = {};
srsran::bounded_vector<bwp_cfg_t, SCHED_NR_MAX_BWP_PER_CELL> bwps{1};
srsran::bounded_vector<bwp_cfg_t, SCHED_NR_MAX_BWP_PER_CELL> bwps{1}; // idx0 for BWP-common
};
struct sched_cfg_t {

@ -38,7 +38,7 @@ using pdcch_ul_list_t = srsran::bounded_vector<pdcch_ul_t, MAX_GRANTS>;
class coreset_region
{
public:
coreset_region(const bwp_cfg_t& bwp_cfg_,
coreset_region(const bwp_params& bwp_cfg_,
uint32_t coreset_id_,
uint32_t slot_idx,
pdcch_dl_list_t& pdcch_dl_list,
@ -62,7 +62,7 @@ public:
size_t nof_allocs() const { return dfs_tree.size(); }
private:
const bwp_cfg_t* bwp_cfg;
const bwp_params* bwp_cfg;
const srsran_coreset_t* coreset_cfg;
uint32_t coreset_id;
uint32_t slot_idx;

@ -29,25 +29,13 @@ rbg_interval find_empty_rbg_interval(const pdsch_bitmap& bitmap, uint32_t max_no
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool fill_dci_rar(rbg_interval rbginterv, const sched_cell_params& cell, srsran_dci_dl_nr_t& dci);
bool fill_dci_rar(rbg_interval rbginterv, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci);
class slot_ue;
void fill_dci_ue_cfg(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_dci_dl_nr_t& dci);
void fill_dci_ue_cfg(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_dci_ul_nr_t& dci);
void fill_pdsch_ue(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_sch_cfg_nr_t& sch);
void fill_pusch_ue(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_sch_cfg_nr_t& sch);
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci);
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_ul_nr_t& dci);
void fill_pdsch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_sch_cfg_nr_t& sch);
void fill_pusch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_sch_cfg_nr_t& sch);
pucch_resource_grant find_pucch_resource(const slot_ue& ue, const rbgmask_t& rbgs, uint32_t tbs);

@ -31,8 +31,9 @@ const static size_t MAX_CORESET_PER_BWP = 3;
using slot_coreset_list = srsran::bounded_vector<coreset_region, MAX_CORESET_PER_BWP>;
struct bwp_slot_grid {
uint32_t bwp_id;
uint32_t slot_idx;
const bwp_params* cfg;
bool is_dl, is_ul;
pdsch_bitmap dl_rbgs;
pusch_bitmap ul_rbgs;
@ -42,25 +43,21 @@ struct bwp_slot_grid {
pucch_list_t pucchs;
bwp_slot_grid() = default;
explicit bwp_slot_grid(const sched_cell_params& cell_params, uint32_t bwp_id_, uint32_t slot_idx_);
explicit bwp_slot_grid(const bwp_params& bwp_params, uint32_t slot_idx_);
void reset();
};
struct bwp_res_grid {
bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_);
bwp_res_grid(const bwp_params& bwp_cfg_);
bwp_slot_grid& operator[](tti_point tti) { return slots[tti.to_uint() % slots.capacity()]; };
const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.to_uint() % slots.capacity()]; };
uint32_t id() const { return bwp_id; }
uint32_t nof_prbs() const { return cell_cfg->cell_cfg.nof_prb; }
const sched_cell_params& cell_params() const { return *cell_cfg; }
const bwp_cfg_t& bwp_cfg() const { return cell_cfg->cell_cfg.bwps[id() - 1]; }
uint32_t id() const { return cfg->bwp_id; }
uint32_t nof_prbs() const { return cfg->cfg.rb_width; }
const sched_cell_params* cell_cfg = nullptr;
const bwp_params* cfg = nullptr;
private:
uint32_t bwp_id;
srsran::bounded_vector<bwp_slot_grid, TTIMOD_SZ> slots;
};
@ -78,7 +75,7 @@ public:
tti_point get_pdcch_tti() const { return pdcch_tti; }
const bwp_res_grid& res_grid() const { return bwp_grid; }
const sched_cell_params& cfg;
const bwp_params& cfg;
private:
srslog::basic_logger& logger;

@ -33,7 +33,7 @@ using ul_sched_t = sched_nr_interface::ul_sched_t;
class slot_cc_worker
{
public:
explicit slot_cc_worker(cell_sched& sched);
explicit slot_cc_worker(serv_cell_ctxt& sched);
void start(tti_point tti_rx_, ue_map_t& ue_db_);
void run();
@ -45,7 +45,7 @@ private:
void alloc_ul_ues();
const sched_cell_params& cfg;
cell_sched& cell;
serv_cell_ctxt& cell;
tti_point tti_rx;
bwp_slot_allocator bwp_alloc;
@ -82,7 +82,7 @@ private:
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_worker_ctxts;
srsran::bounded_vector<cell_sched, SCHED_NR_MAX_CARRIERS> cell_grid_list;
srsran::bounded_vector<serv_cell_ctxt, SCHED_NR_MAX_CARRIERS> cell_grid_list;
slot_worker_ctxt& get_sf(tti_point tti_rx);
};

@ -17,8 +17,7 @@
namespace srsenb {
namespace sched_nr_impl {
ra_sched::ra_sched(const sched_cell_params& cell_cfg_) : cell_cfg(&cell_cfg_), logger(srslog::fetch_basic_logger("MAC"))
{}
ra_sched::ra_sched(const bwp_params& bwp_cfg_) : bwp_cfg(&bwp_cfg_), logger(srslog::fetch_basic_logger("MAC")) {}
alloc_result
ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid, const pending_rar_t& rar, uint32_t& nof_grants_alloc)
@ -29,7 +28,7 @@ ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid, const pending_rar_
alloc_result ret = alloc_result::other_cause;
for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) {
ret = alloc_result::invalid_coderate;
for (uint32_t nrbg = 1; nrbg < cell_cfg->cell_cfg.nof_rbg and ret == alloc_result::invalid_coderate; ++nrbg) {
for (uint32_t nrbg = 1; nrbg < bwp_cfg->N_rbg and ret == alloc_result::invalid_coderate; ++nrbg) {
rbg_interval rbg_interv = find_empty_rbg_interval(pdsch_bitmap, nrbg);
if (rbg_interv.length() == nrbg) {
ret = slot_grid.alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc);
@ -61,7 +60,7 @@ void ra_sched::run_slot(bwp_slot_allocator& slot_grid)
// - if window has passed, discard RAR
// - if window hasn't started, stop loop, as RARs are ordered by TTI
tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET,
rar.prach_tti + PRACH_RAR_OFFSET + cell_cfg->cell_cfg.rar_window_size};
rar.prach_tti + PRACH_RAR_OFFSET + bwp_cfg->cfg.rar_window_size};
if (not rar_window.contains(pdcch_tti)) {
if (pdcch_tti >= rar_window.stop()) {
fmt::memory_buffer str_buffer;
@ -142,14 +141,12 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info)
return SRSRAN_SUCCESS;
}
bwp_sched::bwp_sched(const sched_cell_params& cell_cfg_, uint32_t bwp_id_) :
cell_cfg(&cell_cfg_), bwp_id(bwp_id_), ra(cell_cfg_), grid(cell_cfg_, bwp_id_)
{}
bwp_ctxt::bwp_ctxt(const bwp_params& bwp_cfg) : cfg(&bwp_cfg), ra(bwp_cfg), grid(bwp_cfg) {}
cell_sched::cell_sched(const sched_cell_params& cell_cfg_) : cfg(&cell_cfg_)
serv_cell_ctxt::serv_cell_ctxt(const sched_cell_params& cell_cfg_) : cfg(&cell_cfg_)
{
for (uint32_t bwp_id = 0; bwp_id < cfg->cell_cfg.bwps.size(); ++bwp_id) {
bwps.emplace_back(cell_cfg_, bwp_id);
bwps.emplace_back(cell_cfg_.bwps[bwp_id]);
}
}

@ -16,9 +16,23 @@
namespace srsenb {
namespace sched_nr_impl {
bwp_params::bwp_params(const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_, uint32_t cc_, uint32_t bwp_id_) :
cell_cfg(cell), sched_cfg(sched_cfg_), cc(cc_), bwp_id(bwp_id_), cfg(cell.bwps[bwp_id_])
{
P = get_P(cfg.rb_width, cfg.pdsch.rbg_size_cfg_1);
N_rbg = get_nof_rbgs(cfg.rb_width, cfg.start_rb, cfg.pdsch.rbg_size_cfg_1);
srsran_assert(cfg.coresets[0].has_value(), "At least one coreset has to be active per BWP");
}
sched_cell_params::sched_cell_params(uint32_t cc_, const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_) :
cc(cc_), cell_cfg(cell), sched_cfg(sched_cfg_)
{}
{
bwps.reserve(cell.bwps.size());
for (uint32_t i = 0; i < cell.bwps.size(); ++i) {
bwps.emplace_back(cell, sched_cfg_, cc, i);
}
srsran_assert(not bwps.empty(), "No BWPs were configured");
}
sched_params::sched_params(const sched_cfg_t& sched_cfg_) : sched_cfg(sched_cfg_) {}

@ -16,13 +16,13 @@
namespace srsenb {
namespace sched_nr_impl {
coreset_region::coreset_region(const bwp_cfg_t& bwp_cfg_,
coreset_region::coreset_region(const bwp_params& bwp_cfg_,
uint32_t coreset_id_,
uint32_t slot_idx_,
pdcch_dl_list_t& dl_list_,
pdcch_ul_list_t& ul_list_) :
bwp_cfg(&bwp_cfg_),
coreset_cfg(&bwp_cfg_.coresets[coreset_id_ - 1].value()),
coreset_cfg(&bwp_cfg_.cfg.coresets[coreset_id_].value()),
coreset_id(coreset_id_),
slot_idx(slot_idx_),
pdcch_dl_list(dl_list_),
@ -173,8 +173,8 @@ srsran::span<const uint32_t> coreset_region::get_cce_loc_table(const alloc_recor
switch (record.alloc_type) {
case pdcch_grant_type_t::dl_data:
return record.ue->cfg->cc_params[record.ue->cc]
.bwps[bwp_cfg->bwp_id - 1]
.coresets[coreset_id - 1]
.bwps[bwp_cfg->bwp_id]
.coresets[coreset_id]
.cce_positions[slot_idx][record.aggr_idx];
default:
break;

@ -17,7 +17,7 @@
namespace srsenb {
namespace sched_nr_impl {
/// Table 6.1.2.2.1-1 - Nominal RBG size P
/// TS 38.214, Table 6.1.2.2.1-1 - Nominal RBG size P
uint32_t get_P(uint32_t bwp_nof_prb, bool config_1_or_2)
{
srsran_assert(bwp_nof_prb > 0 and bwp_nof_prb <= 275, "Invalid BWP size");
@ -33,6 +33,7 @@ uint32_t get_P(uint32_t bwp_nof_prb, bool config_1_or_2)
return 16;
}
/// TS 38.214 - total number of RBGs for a uplink bandwidth part of size "bwp_nof_prb" PRBs
uint32_t get_nof_rbgs(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2)
{
uint32_t P = get_P(bwp_nof_prb, config1_or_2);
@ -108,20 +109,20 @@ rbg_interval find_empty_rbg_interval(const pdsch_bitmap& in_mask, uint32_t max_s
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool fill_dci_rar(rbg_interval rbginterv, const sched_cell_params& cell, srsran_dci_dl_nr_t& dci)
bool fill_dci_rar(rbg_interval rbginterv, const bwp_params& cell, srsran_dci_dl_nr_t& dci)
{
dci.mcs = 5;
return true;
}
template <typename DciDlOrUl>
void fill_dci_common(const slot_ue& ue, const rbgmask_t& bitmap, const sched_cell_params& cc_cfg, DciDlOrUl& dci)
void fill_dci_common(const slot_ue& ue, const rbgmask_t& bitmap, const bwp_params& bwp_cfg, DciDlOrUl& dci)
{
const static uint32_t rv_idx[4] = {0, 2, 3, 1};
// Note: PDCCH DCI position already filled at this point
dci.bwp_id = ue.bwp_id;
dci.cc_id = ue.cc;
dci.freq_domain_assigment = bitmap_to_riv(bitmap, cc_cfg.cell_cfg.nof_prb);
dci.freq_domain_assigment = bitmap_to_riv(bitmap, bwp_cfg.cfg.rb_width);
dci.ctx.rnti = ue.rnti;
dci.ctx.rnti_type = srsran_rnti_type_c;
dci.tpc = 1;
@ -133,47 +134,35 @@ void fill_dci_common(const slot_ue& ue, const rbgmask_t& bitmap, const sched_cel
dci.rv = rv_idx[h->nof_retx() % 4];
}
void fill_dci_ue_cfg(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_dci_dl_nr_t& dci)
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci)
{
fill_dci_common(ue, rbgmask, cc_cfg, dci);
fill_dci_common(ue, rbgmask, bwp_cfg, dci);
}
void fill_dci_ue_cfg(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_dci_ul_nr_t& dci)
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_ul_nr_t& dci)
{
fill_dci_common(ue, rbgmask, cc_cfg, dci);
fill_dci_common(ue, rbgmask, bwp_cfg, dci);
}
void fill_sch_ue_common(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
const bwp_params& bwp_cfg,
srsran_sch_cfg_nr_t& sch)
{
sch.grant.rnti_type = srsran_rnti_type_c;
sch.grant.rnti = ue.rnti;
sch.grant.nof_layers = 1;
sch.grant.nof_prb = cc_cfg.cell_cfg.nof_prb;
sch.grant.nof_prb = bwp_cfg.cfg.rb_width;
}
void fill_pdsch_ue(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_sch_cfg_nr_t& sch)
void fill_pdsch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& cc_cfg, srsran_sch_cfg_nr_t& sch)
{
fill_sch_ue_common(ue, rbgmask, cc_cfg, sch);
sch.grant.k = ue.cc_cfg->pdsch_res_list[0].k0;
sch.grant.dci_format = srsran_dci_format_nr_1_0;
}
void fill_pusch_ue(const slot_ue& ue,
const rbgmask_t& rbgmask,
const sched_cell_params& cc_cfg,
srsran_sch_cfg_nr_t& sch)
void fill_pusch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& cc_cfg, srsran_sch_cfg_nr_t& sch)
{
fill_sch_ue_common(ue, rbgmask, cc_cfg, sch);
sch.grant.k = ue.cc_cfg->pusch_res_list[0].k2;

@ -18,16 +18,16 @@ namespace sched_nr_impl {
#define NUMEROLOGY_IDX 0
bwp_slot_grid::bwp_slot_grid(const sched_cell_params& cell_params, uint32_t bwp_id_, uint32_t slot_idx_) :
dl_rbgs(cell_params.cell_cfg.nof_rbg),
ul_rbgs(cell_params.cell_cfg.nof_rbg),
bwp_id(bwp_id_),
bwp_slot_grid::bwp_slot_grid(const bwp_params& bwp_cfg_, uint32_t slot_idx_) :
dl_rbgs(bwp_cfg_.N_rbg),
ul_rbgs(bwp_cfg_.N_rbg),
slot_idx(slot_idx_),
is_dl(srsran_tdd_nr_is_dl(&cell_params.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_)),
is_ul(srsran_tdd_nr_is_ul(&cell_params.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_))
cfg(&bwp_cfg_),
is_dl(srsran_tdd_nr_is_dl(&bwp_cfg_.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_)),
is_ul(srsran_tdd_nr_is_ul(&bwp_cfg_.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_))
{
const uint32_t coreset_id = 1; // Note: for now only one coreset per BWP supported
coresets.emplace_back(cell_params.cell_cfg.bwps[0], coreset_id, slot_idx_, dl_pdcchs, ul_pdcchs);
const uint32_t coreset_id = 0; // Note: for now only one coreset per BWP supported
coresets.emplace_back(*cfg, coreset_id, slot_idx_, dl_pdcchs, ul_pdcchs);
}
void bwp_slot_grid::reset()
@ -42,17 +42,17 @@ void bwp_slot_grid::reset()
pucchs.clear();
}
bwp_res_grid::bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_) : bwp_id(bwp_id_), cell_cfg(&cell_cfg_)
bwp_res_grid::bwp_res_grid(const bwp_params& bwp_cfg_) : cfg(&bwp_cfg_)
{
for (uint32_t sl = 0; sl < slots.capacity(); ++sl) {
slots.emplace_back(cell_cfg_, bwp_id, sl % static_cast<uint32_t>(SRSRAN_NSLOTS_PER_FRAME_NR(0u)));
slots.emplace_back(*cfg, sl % static_cast<uint32_t>(SRSRAN_NSLOTS_PER_FRAME_NR(0u)));
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_) :
logger(srslog::fetch_basic_logger("MAC")), cfg(*bwp_grid_.cell_cfg), bwp_grid(bwp_grid_)
logger(srslog::fetch_basic_logger("MAC")), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_)
{}
alloc_result bwp_slot_allocator::alloc_rar(uint32_t aggr_idx,
@ -81,7 +81,7 @@ alloc_result bwp_slot_allocator::alloc_rar(uint32_t
// Check Msg3 RB collision
uint32_t total_ul_nof_prbs = msg3_nof_prbs * nof_grants;
uint32_t total_ul_nof_rbgs = srsran::ceil_div(total_ul_nof_prbs, get_P(bwp_grid.bwp_cfg().rb_width, false));
uint32_t total_ul_nof_rbgs = srsran::ceil_div(total_ul_nof_prbs, get_P(bwp_grid.nof_prbs(), false));
rbg_interval msg3_rbgs = find_empty_rbg_interval(bwp_msg3_slot.ul_rbgs, total_ul_nof_rbgs);
if (msg3_rbgs.length() < total_ul_nof_rbgs) {
logger.debug("SCHED: No space in PUSCH for Msg3.");
@ -98,7 +98,7 @@ alloc_result bwp_slot_allocator::alloc_rar(uint32_t
// Generate DCI for RAR
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
if (not fill_dci_rar(interv, bwp_grid.cell_params(), pdcch.dci)) {
if (not fill_dci_rar(interv, *bwp_grid.cfg, pdcch.dci)) {
// Cancel on-going PDCCH allocation
bwp_pdcch_slot.coresets[coreset_id].rem_last_dci();
return alloc_result::invalid_coderate;
@ -158,7 +158,7 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_ma
// Allocation Successful
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
fill_dci_ue_cfg(ue, dl_mask, bwp_grid.cell_params(), pdcch.dci);
fill_dci_ue_cfg(ue, dl_mask, *bwp_grid.cfg, pdcch.dci);
pdsch_mask |= dl_mask;
bwp_uci_slot.pucchs.emplace_back();
pucch_grant& pucch = bwp_uci_slot.pucchs.back();
@ -209,7 +209,7 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_ma
// Allocation Successful
pdcch_ul_t& pdcch = pdcchs.back();
fill_dci_ue_cfg(ue, ul_mask, bwp_grid.cell_params(), pdcch.dci);
fill_dci_ue_cfg(ue, ul_mask, *bwp_grid.cfg, pdcch.dci);
pusch_mask |= ul_mask;
return alloc_result::success;

@ -15,7 +15,7 @@
namespace srsenb {
namespace sched_nr_impl {
slot_cc_worker::slot_cc_worker(cell_sched& cc_sched) :
slot_cc_worker::slot_cc_worker(serv_cell_ctxt& cc_sched) :
cell(cc_sched), cfg(*cc_sched.cfg), bwp_alloc(cc_sched.bwps[0].grid)
{}
@ -79,7 +79,7 @@ void slot_cc_worker::alloc_dl_ues()
return;
}
rbgmask_t dlmask(cfg.cell_cfg.nof_rbg);
rbgmask_t dlmask(cfg.bwps[0].N_rbg);
dlmask.fill(0, dlmask.size(), true);
bwp_alloc.alloc_pdsch(ue, dlmask);
}
@ -94,7 +94,7 @@ void slot_cc_worker::alloc_ul_ues()
return;
}
rbgmask_t ulmask(cfg.cell_cfg.nof_rbg);
rbgmask_t ulmask(cfg.bwps[0].N_rbg);
ulmask.fill(0, ulmask.size(), true);
bwp_alloc.alloc_pusch(ue, ulmask);
}

@ -44,10 +44,8 @@ sched_nr_interface::cell_cfg_t get_default_cell_cfg()
// Disable pattern 2
cell_cfg.tdd.pattern2.period_ms = 0;
cell_cfg.bwps.resize(2);
cell_cfg.bwps.resize(1);
cell_cfg.bwps[0].coresets[0].emplace(get_default_coreset());
cell_cfg.bwps[0].coresets[1].emplace(get_default_coreset());
cell_cfg.bwps[0].coresets[1].value().id = 1;
return cell_cfg;
}
std::vector<sched_nr_interface::cell_cfg_t> get_default_cells_cfg(uint32_t nof_sectors)

Loading…
Cancel
Save