sched,nr: added logic to update UE DL and UL buffer status

master
Francisco Paisana 3 years ago
parent 020bec025e
commit 510a87e4b2

@ -70,6 +70,7 @@ public:
// UL BSR methods
bool is_lcg_active(uint32_t lcg) const;
int get_bsr(uint32_t lcg) const;
int get_bsr() const;
const std::array<int, MAX_NOF_LCGS>& get_bsr_state() const { return lcg_bsr; }
static bool is_lcid_valid(uint32_t lcid) { return lcid <= MAX_LC_ID; }

@ -46,6 +46,8 @@ public:
void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override;
void ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) override;
void ul_sr_info(slot_point slot_rx, uint16_t rnti) override;
void ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr);
void dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx);
int get_dl_sched(slot_point pdsch_tti, uint32_t cc, dl_sched_res_t& result) override;
int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_sched_t& result) override;

@ -27,6 +27,10 @@ namespace sched_nr_impl {
const static size_t MAX_GRANTS = sched_nr_interface::MAX_GRANTS;
using pdcch_dl_t = mac_interface_phy_nr::pdcch_dl_t;
using pdcch_ul_t = mac_interface_phy_nr::pdcch_ul_t;
using pdcch_dl_list_t = srsran::bounded_vector<pdcch_dl_t, MAX_GRANTS>;
using pdcch_ul_list_t = srsran::bounded_vector<pdcch_ul_t, MAX_GRANTS>;
using pucch_t = mac_interface_phy_nr::pucch_t;
using pucch_list_t = srsran::bounded_vector<pucch_t, MAX_GRANTS>;
using pusch_t = mac_interface_phy_nr::pusch_t;
@ -159,48 +163,6 @@ public:
std::vector<cc_params> cc_params;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct resource_guard {
public:
resource_guard() = default;
resource_guard(const resource_guard& other) = delete;
resource_guard(resource_guard&& other) = delete;
resource_guard& operator=(const resource_guard& other) = delete;
resource_guard& operator=(resource_guard&& other) = delete;
bool busy() const { return flag; }
struct token {
token() = default;
token(resource_guard& parent) : flag(parent.busy() ? nullptr : &parent.flag)
{
if (flag != nullptr) {
*flag = true;
}
}
token(token&&) noexcept = default;
token& operator=(token&&) noexcept = default;
void release() { flag.reset(); }
bool owns_token() const { return flag != nullptr; }
bool empty() const { return flag == nullptr; }
private:
struct release_deleter {
void operator()(bool* ptr)
{
if (ptr != nullptr) {
srsran_assert(*ptr == true, "resource token: detected inconsistency token state");
*ptr = false;
}
}
};
std::unique_ptr<bool, release_deleter> flag;
};
private:
bool flag = false;
};
} // namespace sched_nr_impl
} // namespace srsenb

@ -41,6 +41,7 @@ public:
uint32_t ndi() const { return tb[0].ndi; }
uint32_t mcs() const { return tb[0].mcs; }
const prb_grant& prbs() const { return prbs_; }
slot_point harq_slot_tx() const { return slot_tx; }
slot_point harq_slot_ack() const { return slot_ack; }
bool ack_info(uint32_t tb_idx, bool ack);
@ -126,6 +127,11 @@ public:
void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); }
void ul_crc_info(uint32_t pid, bool ack) { ul_harqs[pid].ack_info(0, ack); }
uint32_t nof_dl_harqs() const { return dl_harqs.size(); }
uint32_t nof_ul_harqs() const { return ul_harqs.size(); }
const dl_harq_proc& dl_harq(uint32_t pid) const { return dl_harqs[pid]; }
const ul_harq_proc& ul_harq(uint32_t pid) const { return ul_harqs[pid]; }
dl_harq_proc* find_pending_dl_retx()
{
return find_dl([this](const dl_harq_proc& h) { return h.has_pending_retx(slot_rx); });

@ -68,6 +68,7 @@ public:
struct sched_cfg_t {
bool pdsch_enabled = true;
bool pusch_enabled = true;
bool auto_refill_buffer = true;
std::string logger_name = "MAC";
};

@ -30,10 +30,6 @@ enum class pdcch_grant_type_t { sib, rar, dl_data, ul_data };
class slot_ue;
using bwp_cfg_t = sched_nr_interface::bwp_cfg_t;
using pdcch_dl_t = mac_interface_phy_nr::pdcch_dl_t;
using pdcch_ul_t = mac_interface_phy_nr::pdcch_ul_t;
using pdcch_dl_list_t = srsran::bounded_vector<pdcch_dl_t, MAX_GRANTS>;
using pdcch_ul_list_t = srsran::bounded_vector<pdcch_ul_t, MAX_GRANTS>;
class coreset_region
{

@ -42,7 +42,7 @@ public:
uint32_t cc = SCHED_NR_MAX_CARRIERS;
// UE parameters common to all sectors
bool pending_sr = false;
int dl_pending_bytes = 0, ul_pending_bytes = 0;
// UE parameters that are sector specific
const bwp_ue_cfg* cfg = nullptr;
@ -61,8 +61,7 @@ class ue_carrier
{
public:
ue_carrier(uint16_t rnti, const ue_cfg_t& cfg, const sched_cell_params& cell_params_);
void new_slot(slot_point pdcch_slot, const ue_cfg_t& uecfg_);
slot_ue try_reserve(slot_point pdcch_slot);
slot_ue try_reserve(slot_point pdcch_slot, const ue_cfg_t& uecfg_, uint32_t dl_harq_bytes, uint32_t ul_harq_bytes);
const uint16_t rnti;
const uint32_t cc;
@ -83,25 +82,34 @@ class ue
public:
ue(uint16_t rnti, const ue_cfg_t& cfg, const sched_params& sched_cfg_);
void new_slot(slot_point pdcch_slot);
slot_ue try_reserve(slot_point pdcch_slot, uint32_t cc);
void set_cfg(const ue_cfg_t& cfg);
const ue_cfg_t& cfg() const { return ue_cfg; }
void ul_sr_info(slot_point slot_rx) { pending_sr = true; }
void rlc_buffer_state(uint32_t lcid, uint32_t newtx, uint32_t retx) { buffers.dl_buffer_state(lcid, newtx, retx); }
void ul_bsr(uint32_t lcg, uint32_t bsr_val) { buffers.ul_bsr(lcg, bsr_val); }
bool has_ca() const { return ue_cfg.carriers.size() > 1; }
void ul_sr_info(slot_point slot_rx) { last_sr_slot = slot_rx; }
bool has_ca() const
{
return ue_cfg.carriers.size() > 1 and std::count_if(ue_cfg.carriers.begin() + 1,
ue_cfg.carriers.end(),
[](const ue_cc_cfg_t& cc) { return cc.active; }) > 0;
}
uint32_t pcell_cc() const { return ue_cfg.carriers[0].cc; }
ue_buffer_manager<true> buffers;
std::array<std::unique_ptr<ue_carrier>, SCHED_NR_MAX_CARRIERS> carriers;
private:
const uint16_t rnti;
const sched_params& sched_cfg;
bool pending_sr = false;
ue_buffer_manager<true> buffers;
slot_point last_sr_slot;
int ul_pending_bytes = 0, dl_pending_bytes = 0;
ue_cfg_t ue_cfg;
};

@ -39,7 +39,6 @@ public:
explicit slot_cc_worker(serv_cell_manager& sched);
void run(slot_point pdcch_slot, ue_map_t& ue_db_);
void finish();
bool running() const { return slot_rx.valid(); }
void enqueue_cc_event(srsran::move_callback<void()> ev);
@ -102,6 +101,8 @@ public:
}
private:
void update_ue_db(slot_point slot_tx, bool update_ca_users);
bool save_sched_result(slot_point pdcch_slot, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res);
const sched_params& cfg;

@ -76,6 +76,18 @@ int ue_buffer_manager<isNR>::get_bsr(uint32_t lcg) const
return is_lcg_active(lcg) ? lcg_bsr[lcg] : 0;
}
template <bool isNR>
int ue_buffer_manager<isNR>::get_bsr() const
{
uint32_t count = 0;
for (uint32_t lcg = 0; is_lcg_valid(lcg); ++lcg) {
if (is_lcg_active(lcg)) {
count += lcg_bsr[lcg];
}
}
return count;
}
template <bool isNR>
void ue_buffer_manager<isNR>::ul_bsr(uint32_t lcg_id, uint32_t val)
{

@ -151,6 +151,17 @@ void sched_nr::ul_sr_info(slot_point slot_rx, uint16_t rnti)
sched_workers->enqueue_event(rnti, [this, rnti, slot_rx]() { ue_db[rnti]->ul_sr_info(slot_rx); });
}
void sched_nr::ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr)
{
sched_workers->enqueue_event(rnti, [this, rnti, lcg_id, bsr]() { ue_db[rnti]->ul_bsr(lcg_id, bsr); });
}
void sched_nr::dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx)
{
sched_workers->enqueue_event(rnti,
[this, rnti, lcid, newtx, retx]() { ue_db[rnti]->rlc_buffer_state(lcid, newtx, retx); });
}
#define VERIFY_INPUT(cond, msg, ...) \
do { \
if (not(cond)) { \

@ -29,23 +29,23 @@ ue_carrier::ue_carrier(uint16_t rnti_, const ue_cfg_t& uecfg_, const sched_cell_
harq_ent(cell_params_.nof_prb())
{}
void ue_carrier::new_slot(slot_point pdcch_slot, const ue_cfg_t& uecfg_)
slot_ue ue_carrier::try_reserve(slot_point pdcch_slot,
const ue_cfg_t& uecfg_,
uint32_t dl_pending_bytes,
uint32_t ul_pending_bytes)
{
slot_point slot_rx = pdcch_slot - TX_ENB_DELAY;
// update CC/BWP config if there were changes
if (bwp_cfg.ue_cfg() != &uecfg_) {
bwp_cfg = bwp_ue_cfg(rnti, cell_params.bwps[0], uecfg_);
}
harq_ent.new_slot(pdcch_slot - TX_ENB_DELAY);
}
slot_ue ue_carrier::try_reserve(slot_point pdcch_slot)
{
slot_point slot_rx = pdcch_slot - TX_ENB_DELAY;
// copy cc-specific parameters and find available HARQs
slot_ue sfu(rnti, slot_rx, cc);
sfu.cfg = &bwp_cfg;
sfu.harq_ent = &harq_ent;
sfu.pdcch_slot = pdcch_slot;
sfu.harq_ent = &harq_ent;
const uint32_t k0 = 0;
sfu.pdsch_slot = sfu.pdcch_slot + k0;
uint32_t k1 =
@ -57,45 +57,81 @@ slot_ue ue_carrier::try_reserve(slot_point pdcch_slot)
sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi;
// set UE-common parameters
sfu.dl_pending_bytes = dl_pending_bytes;
sfu.ul_pending_bytes = ul_pending_bytes;
const srsran_tdd_config_nr_t& tdd_cfg = cell_params.cell_cfg.tdd;
if (srsran_tdd_nr_is_dl(&tdd_cfg, 0, sfu.pdsch_slot.slot_idx())) {
// If DL enabled
sfu.h_dl = harq_ent.find_pending_dl_retx();
if (sfu.h_dl == nullptr) {
if (sfu.h_dl == nullptr and sfu.dl_pending_bytes > 0) {
sfu.h_dl = harq_ent.find_empty_dl_harq();
}
}
if (srsran_tdd_nr_is_ul(&tdd_cfg, 0, sfu.pusch_slot.slot_idx())) {
// If UL enabled
sfu.h_ul = harq_ent.find_pending_ul_retx();
if (sfu.h_ul == nullptr) {
if (sfu.h_ul == nullptr and sfu.ul_pending_bytes > 0) {
sfu.h_ul = harq_ent.find_empty_ul_harq();
}
}
if (sfu.h_dl == nullptr and sfu.h_ul == nullptr) {
// there needs to be at least one available HARQ for newtx/retx
sfu.release();
return sfu;
}
return sfu;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue::ue(uint16_t rnti_, const ue_cfg_t& cfg, const sched_params& sched_cfg_) :
rnti(rnti_), sched_cfg(sched_cfg_), ue_cfg(cfg), buffers(srslog::fetch_basic_logger(sched_cfg_.sched_cfg.logger_name))
rnti(rnti_), sched_cfg(sched_cfg_), buffers(srslog::fetch_basic_logger(sched_cfg_.sched_cfg.logger_name))
{
for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) {
if (cfg.carriers[cc].active) {
carriers[cc].reset(new ue_carrier(rnti, cfg, sched_cfg.cells[cc]));
}
}
set_cfg(cfg);
}
void ue::set_cfg(const ue_cfg_t& cfg)
{
ue_cfg = cfg;
for (auto& ue_cc_cfg : cfg.carriers) {
if (ue_cc_cfg.active and carriers[ue_cc_cfg.cc] == nullptr) {
carriers[ue_cc_cfg.cc].reset(new ue_carrier(rnti, cfg, sched_cfg.cells[ue_cc_cfg.cc]));
}
}
}
void ue::new_slot(slot_point pdcch_slot)
{
for (auto& ue_cc_cfg : ue_cfg.carriers) {
auto& cc = carriers[ue_cc_cfg.cc];
if (cc != nullptr) {
// Update CC HARQ state
cc->harq_ent.new_slot(pdcch_slot - TX_ENB_DELAY);
}
}
// Compute pending DL/UL bytes for {rnti, pdcch_slot}
if (sched_cfg.sched_cfg.auto_refill_buffer) {
dl_pending_bytes = 1000000;
ul_pending_bytes = 1000000;
} else {
dl_pending_bytes = buffers.get_dl_tx_total();
ul_pending_bytes = buffers.get_bsr();
for (auto& ue_cc_cfg : ue_cfg.carriers) {
auto& cc = carriers[ue_cc_cfg.cc];
if (cc != nullptr) {
// Discount UL HARQ pending bytes to BSR
for (uint32_t pid = 0; pid < cc->harq_ent.nof_dl_harqs(); ++pid) {
ul_pending_bytes -= cc->harq_ent.ul_harq(pid).tbs();
if (last_sr_slot.valid() and cc->harq_ent.ul_harq(pid).harq_slot_tx() > last_sr_slot) {
last_sr_slot.clear();
}
}
}
}
if (ul_pending_bytes == 0 and last_sr_slot.valid()) {
// If unanswered SR is pending
ul_pending_bytes = 512;
}
}
}
slot_ue ue::try_reserve(slot_point pdcch_slot, uint32_t cc)
@ -103,14 +139,12 @@ slot_ue ue::try_reserve(slot_point pdcch_slot, uint32_t cc)
if (carriers[cc] == nullptr) {
return slot_ue();
}
slot_ue sfu = carriers[cc]->try_reserve(pdcch_slot);
slot_ue sfu = carriers[cc]->try_reserve(pdcch_slot, cfg(), dl_pending_bytes, ul_pending_bytes);
if (sfu.empty()) {
return slot_ue();
}
// set UE-common parameters
sfu.pending_sr = pending_sr;
return sfu;
}

@ -75,8 +75,6 @@ void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
continue;
}
u.carriers[cfg.cc]->new_slot(pdcch_slot, u.cfg());
slot_ues.insert(rnti, u.try_reserve(pdcch_slot, cfg.cc));
if (slot_ues[rnti].empty()) {
// Failed to generate slot UE because UE has no conditions for DL/UL tx
@ -104,11 +102,6 @@ void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
slot_rx = {};
}
void slot_cc_worker::finish()
{
// synchronize results
}
void slot_cc_worker::alloc_dl_ues()
{
if (not cfg.sched_cfg.pdsch_enabled) {
@ -151,6 +144,28 @@ void sched_worker_manager::enqueue_cc_event(uint32_t cc, srsran::move_callback<v
cc_worker_list[cc]->worker.enqueue_cc_event(std::move(ev));
}
/**
* Update UEs state that is non-CC specific (e.g. SRs, buffer status, UE configuration)
* @param slot_tx
* @param update_ca_users to update only UEs with CA enabled or not
*/
void sched_worker_manager::update_ue_db(slot_point slot_tx, bool update_ca_users)
{
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config)
for (ue_event_t& ev : slot_events) {
if (not ue_db.contains(ev.rnti) or ue_db[ev.rnti]->has_ca() == update_ca_users) {
ev.callback();
}
}
// prepare UEs internal state for new slot
for (auto& u : ue_db) {
if (u.second->has_ca() == update_ca_users) {
u.second->new_slot(slot_tx);
}
}
}
void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res)
{
srsran::bounded_vector<std::condition_variable*, SRSRAN_MAX_CARRIERS> waiting_cvars;
@ -165,18 +180,14 @@ void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_re
if (not current_slot.valid()) {
/* First Worker to start slot */
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs with CA
// process non-cc specific feedback if pending for UEs with CA
// NOTE: there is no parallelism in these operations
slot_events.clear();
{
std::lock_guard<std::mutex> ev_lock(event_mutex);
next_slot_events.swap(slot_events);
}
for (ue_event_t& ev : slot_events) {
if (not ue_db.contains(ev.rnti) or ue_db[ev.rnti]->has_ca()) {
ev.callback();
}
}
update_ue_db(slot_tx, true);
// mark the start of slot. awake remaining workers if locking on the mutex
current_slot = slot_tx;
@ -197,11 +208,7 @@ void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_re
/* Parallel Region */
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs without CA
for (ue_event_t& ev : slot_events) {
if (ue_db.contains(ev.rnti) and not ue_db[ev.rnti]->has_ca() and ue_db[ev.rnti]->pcell_cc() == cc) {
ev.callback();
}
}
update_ue_db(slot_tx, false);
// process pending feedback, generate {slot, cc} scheduling decision
cc_worker_list[cc]->worker.run(slot_tx, ue_db);
@ -218,7 +225,6 @@ void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_re
// All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state
for (auto& c : cc_worker_list) {
c->worker.finish();
if (c->waiting > 0) {
waiting_cvars.push_back(&c->cvar);
}

@ -29,7 +29,11 @@ void test_single_prach()
std::default_random_engine rand_gen(seed);
std::default_random_engine rgen(rand_gen());
// Set scheduler configuration
sched_nr_interface::sched_cfg_t sched_cfg{};
sched_cfg.auto_refill_buffer = std::uniform_int_distribution<uint32_t>{0, 1}(rgen) > 0;
// Set cells configuration
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(1);
sched_params schedparams{sched_cfg};
schedparams.cells.emplace_back(0, cells_cfg[0], sched_cfg);
@ -52,10 +56,14 @@ void test_single_prach()
const bwp_slot_grid* result = nullptr;
auto run_slot = [&alloc, &rasched, &pdcch_slot, &slot_ues, &u]() -> const bwp_slot_grid* {
mac_logger.set_context(pdcch_slot.to_uint());
u.carriers[0]->new_slot(pdcch_slot, u.cfg());
u.new_slot(pdcch_slot);
slot_ues.clear();
slot_ues.insert(rnti, u.try_reserve(pdcch_slot, 0));
slot_ue sfu = u.try_reserve(pdcch_slot, 0);
if (not sfu.empty()) {
slot_ues.insert(rnti, std::move(sfu));
}
alloc.new_slot(pdcch_slot, slot_ues);
rasched.run_slot(alloc);
log_sched_bwp_result(mac_logger, alloc.get_pdcch_tti(), alloc.res_grid(), slot_ues);

Loading…
Cancel
Save