sched,nr: avoid extra copy of DL result and other small fixes

master
Francisco Paisana 3 years ago
parent 9e1669c403
commit 2fb2598f8c

@ -31,7 +31,7 @@ class serv_cell_manager;
} // namespace sched_nr_impl
class ue_event_manager;
class sched_result_manager;
class ul_sched_result_buffer;
class sched_nr final : public sched_nr_interface
{
@ -51,7 +51,6 @@ public:
int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_sched_t& result) override;
private:
int generate_slot_result(slot_point pdcch_tti, uint32_t cc);
void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
// args
@ -66,7 +65,7 @@ private:
ue_map_t ue_db;
// management of Sched Result buffering
std::unique_ptr<sched_result_manager> pending_results;
std::unique_ptr<ul_sched_result_buffer> pending_results;
// management of cell resources
std::vector<std::unique_ptr<sched_nr_impl::serv_cell_manager> > cells;

@ -57,7 +57,7 @@ public:
int dl_rach_info(const dl_sched_rar_info_t& rar_info);
/// Allocate pending RARs
void run_slot(bwp_slot_allocator& slot_grid);
void run_slot(bwp_slot_allocator& slot_alloc);
/// Check if there are pending RARs
bool empty() const { return pending_rars.empty(); }

@ -53,6 +53,7 @@ struct bwp_params {
const sched_cfg_t& sched_cfg;
// derived params
srslog::basic_logger& logger;
uint32_t P;
uint32_t N_rbg;
@ -70,9 +71,9 @@ struct bwp_params {
};
std::vector<pusch_ra_time_cfg> pusch_ra_list;
bwp_params(const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_, uint32_t cc, uint32_t bwp_id);
bwp_cce_pos_list rar_cce_list;
bwp_params(const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_, uint32_t cc, uint32_t bwp_id);
};
struct sched_cell_params {

@ -32,7 +32,7 @@ using slot_coreset_list = std::array<srsran::optional<coreset_re
using pdsch_t = mac_interface_phy_nr::pdsch_t;
using pdsch_list_t = srsran::bounded_vector<pdsch_t, MAX_GRANTS>;
using rar_list_t = sched_nr_interface::sched_rar_list_t;
using sched_rar_list_t = sched_nr_interface::sched_rar_list_t;
struct harq_ack_t {
const srsran::phy_cfg_nr_t* phy_cfg;
@ -41,15 +41,15 @@ struct harq_ack_t {
using harq_ack_list_t = srsran::bounded_vector<harq_ack_t, MAX_GRANTS>;
struct bwp_slot_grid {
uint32_t slot_idx;
const bwp_params* cfg;
uint32_t slot_idx = 0;
const bwp_params* cfg = nullptr;
bwp_rb_bitmap dl_prbs;
bwp_rb_bitmap ul_prbs;
pdcch_dl_list_t dl_pdcchs;
pdcch_ul_list_t ul_pdcchs;
pdsch_list_t pdschs;
rar_list_t rar;
sched_rar_list_t rar;
slot_coreset_list coresets;
pusch_list_t puschs;
harq_ack_list_t pending_acks;
@ -65,7 +65,7 @@ struct bwp_slot_grid {
};
struct bwp_res_grid {
bwp_res_grid(const bwp_params& bwp_cfg_);
explicit bwp_res_grid(const bwp_params& bwp_cfg_);
bwp_slot_grid& operator[](slot_point tti) { return slots[tti.to_uint() % slots.capacity()]; };
const bwp_slot_grid& operator[](slot_point tti) const { return slots[tti.to_uint() % slots.capacity()]; };
@ -78,6 +78,8 @@ private:
srsran::bounded_vector<bwp_slot_grid, TTIMOD_SZ> slots;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Class responsible for jointly filling the DL/UL sched result fields and allocate RB/PDCCH resources in the RB grid
* to avoid potential RB/PDCCH collisions
@ -104,11 +106,11 @@ public:
slot_point get_pdcch_tti() const { return pdcch_slot; }
slot_point get_tti_rx() const { return pdcch_slot - TX_ENB_DELAY; }
const bwp_res_grid& res_grid() const { return bwp_grid; }
void log_bwp_sched_result();
const bwp_params& cfg;
private:
alloc_result verify_pdsch_space(bwp_slot_grid& pdsch_grid, bwp_slot_grid& pdcch_grid) const;
alloc_result verify_pusch_space(bwp_slot_grid& pusch_grid, bwp_slot_grid* pdcch_grid = nullptr) const;
srslog::basic_logger& logger;

@ -43,6 +43,7 @@ void fill_ul_dci_ue_fields(const slot_ue& ue,
srsran_dci_location_t dci_pos,
srsran_dci_ul_nr_t& dci);
/// Log Scheduling Result for a given BWP and slot
void log_sched_bwp_result(srslog::basic_logger& logger,
slot_point pdcch_slot,
const bwp_res_grid& res_grid,

@ -68,6 +68,7 @@ public:
struct sched_cfg_t {
bool pdsch_enabled = true;
bool pusch_enabled = true;
std::string logger_name = "MAC";
};
struct ue_cc_cfg_t {

@ -38,8 +38,7 @@ public:
explicit slot_cc_worker(serv_cell_manager& sched);
void start(slot_point pdcch_slot, ue_map_t& ue_db_);
void run();
void run(slot_point pdcch_slot, ue_map_t& ue_db_);
void finish();
bool running() const { return slot_rx.valid(); }

@ -24,24 +24,16 @@ static int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class sched_result_manager
class ul_sched_result_buffer
{
public:
explicit sched_result_manager(uint32_t nof_cc_)
explicit ul_sched_result_buffer(uint32_t nof_cc_)
{
for (auto& v : results) {
v.resize(nof_cc_);
}
}
dl_sched_res_t& add_dl_result(slot_point tti, uint32_t cc)
{
if (not has_dl_result(tti, cc)) {
results[tti.to_uint()][cc].slot_dl = tti;
results[tti.to_uint()][cc].dl_res = {};
}
return results[tti.to_uint()][cc].dl_res;
}
ul_sched_t& add_ul_result(slot_point tti, uint32_t cc)
{
if (not has_ul_result(tti, cc)) {
@ -51,19 +43,8 @@ public:
return results[tti.to_uint()][cc].ul_res;
}
bool has_dl_result(slot_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].slot_dl == tti; }
bool has_ul_result(slot_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].slot_ul == tti; }
dl_sched_res_t pop_dl_result(slot_point tti, uint32_t cc)
{
if (has_dl_result(tti, cc)) {
results[tti.to_uint()][cc].slot_dl.clear();
return results[tti.to_uint()][cc].dl_res;
}
return {};
}
ul_sched_t pop_ul_result(slot_point tti, uint32_t cc)
{
if (has_ul_result(tti, cc)) {
@ -75,9 +56,7 @@ public:
private:
struct slot_result_t {
slot_point slot_dl;
slot_point slot_ul;
dl_sched_res_t dl_res;
ul_sched_t ul_res;
};
@ -104,7 +83,7 @@ int sched_nr::cell_cfg(srsran::const_span<cell_cfg_t> cell_list)
cells.emplace_back(new serv_cell_manager{cfg.cells[cc]});
}
pending_results.reset(new sched_result_manager(cell_list.size()));
pending_results.reset(new ul_sched_result_buffer(cell_list.size()));
sched_workers.reset(new sched_nr_impl::sched_worker_manager(ue_db, cfg, cells));
return SRSRAN_SUCCESS;
@ -125,36 +104,28 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
}
}
/// Generate {tti,cc} scheduling decision
int sched_nr::generate_slot_result(slot_point pdcch_tti, uint32_t cc)
/// Generate {pdcch_slot,cc} scheduling decision
int sched_nr::get_dl_sched(slot_point slot_dl, uint32_t cc, dl_sched_res_t& result)
{
// Copy results to intermediate buffer
dl_sched_res_t& dl_res = pending_results->add_dl_result(pdcch_tti, cc);
ul_sched_t& ul_res = pending_results->add_ul_result(pdcch_tti, cc);
// Copy UL results to intermediate buffer
ul_sched_t& ul_res = pending_results->add_ul_result(slot_dl, cc);
// Generate {slot_idx,cc} result
sched_workers->run_slot(pdcch_tti, cc, dl_res, ul_res);
sched_workers->run_slot(slot_dl, cc, result, ul_res);
return SRSRAN_SUCCESS;
}
int sched_nr::get_dl_sched(slot_point slot_tx, uint32_t cc, dl_sched_res_t& result)
{
if (not pending_results->has_dl_result(slot_tx, cc)) {
generate_slot_result(slot_tx, cc);
}
result = pending_results->pop_dl_result(slot_tx, cc);
return SRSRAN_SUCCESS;
}
int sched_nr::get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_sched_t& result)
/// Fetch {ul_slot,cc} UL scheduling decision
int sched_nr::get_ul_sched(slot_point slot_ul, uint32_t cc, ul_sched_t& result)
{
if (not pending_results->has_ul_result(pusch_tti, cc)) {
if (not pending_results->has_ul_result(slot_ul, cc)) {
// sched result hasn't been generated
result = {};
return SRSRAN_SUCCESS;
}
result = pending_results->pop_ul_result(pusch_tti, cc);
result = pending_results->pop_ul_result(slot_ul, cc);
return SRSRAN_SUCCESS;
}

@ -110,9 +110,9 @@ ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid, const pending_rar_
return ret;
}
void ra_sched::run_slot(bwp_slot_allocator& slot_grid)
void ra_sched::run_slot(bwp_slot_allocator& slot_alloc)
{
slot_point pdcch_slot = slot_grid.get_pdcch_tti();
slot_point pdcch_slot = slot_alloc.get_pdcch_tti();
slot_point msg3_slot = pdcch_slot + bwp_cfg->pusch_ra_list[0].msg3_delay;
if (not bwp_cfg->slots[pdcch_slot.slot_idx()].is_dl or not bwp_cfg->slots[msg3_slot.slot_idx()].is_ul) {
// RAR only allowed if PDCCH is available and respective Msg3 slot is available for UL
@ -143,7 +143,7 @@ void ra_sched::run_slot(bwp_slot_allocator& slot_grid)
// Try to schedule DCIs + RBGs for RAR Grants
uint32_t nof_rar_allocs = 0;
alloc_result ret = allocate_pending_rar(slot_grid, rar, nof_rar_allocs);
alloc_result ret = allocate_pending_rar(slot_alloc, rar, nof_rar_allocs);
if (ret == alloc_result::success) {
// If RAR allocation was successful:

@ -21,7 +21,12 @@ namespace srsenb {
namespace sched_nr_impl {
bwp_params::bwp_params(const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_, uint32_t cc_, uint32_t bwp_id_) :
cell_cfg(cell), sched_cfg(sched_cfg_), cc(cc_), bwp_id(bwp_id_), cfg(cell.bwps[bwp_id_])
cell_cfg(cell),
sched_cfg(sched_cfg_),
cc(cc_),
bwp_id(bwp_id_),
cfg(cell.bwps[bwp_id_]),
logger(srslog::fetch_basic_logger(sched_cfg_.logger_name))
{
srsran_assert(bwp_id != 0 or cfg.pdcch.coreset_present[0], "CORESET#0 has to be active for initial BWP");

@ -58,7 +58,7 @@ bwp_res_grid::bwp_res_grid(const bwp_params& bwp_cfg_) : cfg(&bwp_cfg_)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_) :
logger(srslog::fetch_basic_logger("MAC")), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_)
logger(bwp_grid_.cfg->logger), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_)
{}
alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, uint32_t si_ntx, const prb_interval& prbs)
@ -96,17 +96,13 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
if (ret != alloc_result::success) {
return ret;
}
if (bwp_pdcch_slot.dl_pdcchs.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space;
ret = verify_pdsch_space(bwp_pdcch_slot, bwp_pdcch_slot);
if (ret != alloc_result::success) {
return ret;
}
// Check DL RB collision
const prb_bitmap& pdsch_mask = bwp_pdcch_slot.dl_prbs.prbs();
prb_bitmap dl_mask(pdsch_mask.size());
dl_mask.fill(interv.start(), interv.stop());
if ((pdsch_mask & dl_mask).any()) {
if (bwp_pdcch_slot.dl_prbs.collides(interv)) {
logger.debug("SCHED: Provided RBG mask collides with allocation previously made.");
return alloc_result::sch_collision;
}
@ -204,14 +200,9 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
bwp_slot_grid& bwp_pdcch_slot = bwp_grid[ue.pdcch_slot];
bwp_slot_grid& bwp_pdsch_slot = bwp_grid[ue.pdsch_slot];
bwp_slot_grid& bwp_uci_slot = bwp_grid[ue.uci_slot];
if (not bwp_pdsch_slot.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch_slot.slot_idx);
return alloc_result::no_sch_space;
}
pdcch_dl_list_t& pdsch_grants = bwp_pdsch_slot.dl_pdcchs;
if (pdsch_grants.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space;
alloc_result result = verify_pdsch_space(bwp_pdsch_slot, bwp_pdcch_slot);
if (result != alloc_result::success) {
return result;
}
if (bwp_pdcch_slot.dl_prbs.collides(dl_grant)) {
return alloc_result::sch_collision;
@ -335,6 +326,23 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
return alloc_result::success;
}
alloc_result bwp_slot_allocator::verify_pdsch_space(bwp_slot_grid& bwp_pdsch, bwp_slot_grid& bwp_pdcch) const
{
if (not bwp_pdsch.is_dl() or not bwp_pdcch.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch.slot_idx);
return alloc_result::no_sch_space;
}
if (bwp_pdcch.dl_pdcchs.full()) {
logger.warning("SCHED: Maximum number of DL PDCCH allocations reached");
return alloc_result::no_cch_space;
}
if (bwp_pdsch.pdschs.full()) {
logger.warning("SCHED: Maximum number of DL PDSCH grants reached");
return alloc_result::no_sch_space;
}
return alloc_result::success;
}
alloc_result bwp_slot_allocator::verify_pusch_space(bwp_slot_grid& pusch_grid, bwp_slot_grid* pdcch_grid) const
{
if (not pusch_grid.is_ul()) {
@ -359,10 +367,5 @@ alloc_result bwp_slot_allocator::verify_pusch_space(bwp_slot_grid& pusch_grid, b
return alloc_result::success;
}
void bwp_slot_allocator::log_bwp_sched_result()
{
log_sched_bwp_result(logger, get_pdcch_tti(), bwp_grid, *slot_ues);
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -58,8 +58,8 @@ void slot_cc_worker::run_feedback(ue_map_t& ue_db)
tmp_feedback_to_run.clear();
}
/// Called at the beginning of TTI in a locked context, to reserve available UE resources
void slot_cc_worker::start(slot_point pdcch_slot, ue_map_t& ue_db)
/// Called within a locked context, to generate {slot, cc} scheduling decision
void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
{
srsran_assert(not running(), "scheduler worker::start() called for active worker");
slot_rx = pdcch_slot - TX_ENB_DELAY;
@ -85,12 +85,8 @@ void slot_cc_worker::start(slot_point pdcch_slot, ue_map_t& ue_db)
}
// UE acquired successfully for scheduling in this {slot, cc}
}
}
void slot_cc_worker::run()
{
srsran_assert(running(), "scheduler worker::run() called for non-active worker");
// Create an BWP allocator object that will passed along to RA, SI, Data schedulers
bwp_alloc.new_slot(slot_rx + TX_ENB_DELAY, slot_ues);
// Allocate pending RARs
@ -101,7 +97,7 @@ void slot_cc_worker::run()
alloc_ul_ues();
// Log CC scheduler result
bwp_alloc.log_bwp_sched_result();
log_sched_bwp_result(logger, bwp_alloc.get_pdcch_tti(), cell.bwps[0].grid, slot_ues);
// releases UE resources
slot_ues.clear();
@ -207,11 +203,8 @@ void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_re
}
}
// process pending feedback and pre-cache UE state for slot decision
cc_worker_list[cc]->worker.start(slot_tx, ue_db);
// Get {slot, cc} scheduling decision
cc_worker_list[cc]->worker.run();
// process pending feedback, generate {slot, cc} scheduling decision
cc_worker_list[cc]->worker.run(slot_tx, ue_db);
// decrement the number of active workers
int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1;

@ -58,7 +58,7 @@ void test_single_prach()
alloc.new_slot(pdcch_slot, slot_ues);
rasched.run_slot(alloc);
alloc.log_bwp_sched_result();
log_sched_bwp_result(mac_logger, alloc.get_pdcch_tti(), alloc.res_grid(), slot_ues);
const bwp_slot_grid* result = &alloc.res_grid()[alloc.get_pdcch_tti()];
test_dl_pdcch_consistency(result->dl_pdcchs);
++pdcch_slot;

Loading…
Cancel
Save