diff --git a/lib/include/srslte/interfaces/sched_interface.h b/lib/include/srslte/interfaces/sched_interface.h index 0ef50bc7b..ebef4ceca 100644 --- a/lib/include/srslte/interfaces/sched_interface.h +++ b/lib/include/srslte/interfaces/sched_interface.h @@ -46,6 +46,7 @@ public: struct sched_args_t { std::string sched_policy = "time_pf"; + std::string sched_policy_args = "2"; int pdsch_mcs = -1; int pdsch_max_mcs = 28; int pusch_mcs = -1; diff --git a/srsenb/hdr/stack/mac/schedulers/sched_time_pf.h b/srsenb/hdr/stack/mac/schedulers/sched_time_pf.h index 9166227ac..967bdf577 100644 --- a/srsenb/hdr/stack/mac/schedulers/sched_time_pf.h +++ b/srsenb/hdr/stack/mac/schedulers/sched_time_pf.h @@ -23,19 +23,20 @@ class sched_time_pf final : public sched_base using ue_cit_t = std::map::const_iterator; public: - sched_time_pf(const sched_cell_params_t& cell_params_); + sched_time_pf(const sched_cell_params_t& cell_params_, const sched_interface::sched_args_t& sched_args); void sched_dl_users(std::map& ue_db, sf_sched* tti_sched) override; void sched_ul_users(std::map& ue_db, sf_sched* tti_sched) override; private: void new_tti(std::map& ue_db, sf_sched* tti_sched); - const sched_cell_params_t* cc_cfg = nullptr; + const sched_cell_params_t* cc_cfg = nullptr; + float fairness_coeff = 1; srslte::tti_point current_tti_rx; struct ue_ctxt { - ue_ctxt(uint16_t rnti_) : rnti(rnti_) {} + ue_ctxt(uint16_t rnti_, float fairness_coeff_) : rnti(rnti_), fairness_coeff(fairness_coeff_) {} float dl_avg_rate() const { return dl_nof_samples == 0 ? 0 : dl_avg_rate_; } float ul_avg_rate() const { return ul_nof_samples == 0 ? 0 : ul_avg_rate_; } uint32_t dl_count() const { return dl_nof_samples; } @@ -45,6 +46,7 @@ private: void save_ul_alloc(uint32_t alloc_bytes, float alpha); const uint16_t rnti; + const float fairness_coeff; int ue_cc_idx = 0; float dl_prio = 0; diff --git a/srsenb/hdr/stack/mac/schedulers/sched_time_rr.h b/srsenb/hdr/stack/mac/schedulers/sched_time_rr.h index 5cd96bbc1..29e96b99f 100644 --- a/srsenb/hdr/stack/mac/schedulers/sched_time_rr.h +++ b/srsenb/hdr/stack/mac/schedulers/sched_time_rr.h @@ -22,7 +22,7 @@ class sched_time_rr final : public sched_base const static int MAX_RBG = 25; public: - sched_time_rr(const sched_cell_params_t& cell_params_); + sched_time_rr(const sched_cell_params_t& cell_params_, const sched_interface::sched_args_t& sched_args); void sched_dl_users(std::map& ue_db, sf_sched* tti_sched) override; void sched_ul_users(std::map& ue_db, sf_sched* tti_sched) override; diff --git a/srsenb/src/main.cc b/srsenb/src/main.cc index 9353009c4..ceacca5e2 100644 --- a/srsenb/src/main.cc +++ b/srsenb/src/main.cc @@ -133,6 +133,7 @@ void parse_args(all_args_t* args, int argc, char* argv[]) /* Scheduling section */ ("scheduler.policy", bpo::value(&args->stack.mac.sched.sched_policy)->default_value("time_pf"), "DL and UL data scheduling policy (E.g. time_rr, time_pf)") + ("scheduler.policy_args", bpo::value(&args->stack.mac.sched.sched_policy_args)->default_value("2"), "Scheduler policy-specific arguments") ("scheduler.pdsch_mcs", bpo::value(&args->stack.mac.sched.pdsch_mcs)->default_value(-1), "Optional fixed PDSCH MCS (ignores reported CQIs if specified)") ("scheduler.pdsch_max_mcs", bpo::value(&args->stack.mac.sched.pdsch_max_mcs)->default_value(-1), "Optional PDSCH MCS limit") ("scheduler.pusch_mcs", bpo::value(&args->stack.mac.sched.pusch_mcs)->default_value(-1), "Optional fixed PUSCH MCS (ignores reported CQIs if specified)") diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index 27cbdc432..2e1e5847b 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -289,10 +289,10 @@ void sched::carrier_sched::carrier_cfg(const sched_cell_params_t& cell_params_) // Setup data scheduling algorithms if (cell_params_.sched_cfg->sched_policy == "time_rr") { - sched_algo.reset(new sched_time_rr{*cc_cfg}); + sched_algo.reset(new sched_time_rr{*cc_cfg, *cell_params_.sched_cfg}); log_h->info("Using time-domain RR scheduling policy for cc=%d\n", cc_cfg->enb_cc_idx); } else { - sched_algo.reset(new sched_time_pf{*cc_cfg}); + sched_algo.reset(new sched_time_pf{*cc_cfg, *cell_params_.sched_cfg}); log_h->info("Using time-domain PF scheduling policy for cc=%d\n", cc_cfg->enb_cc_idx); } diff --git a/srsenb/src/stack/mac/schedulers/sched_time_pf.cc b/srsenb/src/stack/mac/schedulers/sched_time_pf.cc index df4c912fc..4ed794185 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_pf.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_pf.cc @@ -16,9 +16,12 @@ namespace srsenb { using srslte::tti_point; -sched_time_pf::sched_time_pf(const sched_cell_params_t& cell_params_) +sched_time_pf::sched_time_pf(const sched_cell_params_t& cell_params_, const sched_interface::sched_args_t& sched_args) { cc_cfg = &cell_params_; + if (not sched_args.sched_policy_args.empty()) { + fairness_coeff = std::stof(sched_args.sched_policy_args); + } } void sched_time_pf::new_tti(std::map& ue_db, sf_sched* tti_sched) @@ -36,7 +39,7 @@ void sched_time_pf::new_tti(std::map& ue_db, sf_sched* tti_s for (auto& u : ue_db) { auto it = ue_history_db.find(u.first); if (it == ue_history_db.end()) { - it = ue_history_db.insert(std::make_pair(u.first, ue_ctxt{u.first})).first; + it = ue_history_db.insert(std::make_pair(u.first, ue_ctxt{u.first, fairness_coeff})).first; } it->second.new_tti(*cc_cfg, u.second, tti_sched); if (it->second.dl_newtx_h != nullptr or it->second.dl_retx_h != nullptr) { @@ -162,7 +165,7 @@ void sched_time_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue& // calculate DL PF priority float r = ue.get_expected_dl_bitrate(ue_cc_idx) / 8; float R = dl_avg_rate(); - dl_prio = (R != 0) ? r / R : (r == 0 ? 0 : std::numeric_limits::max()); + dl_prio = (R != 0) ? pow(r, fairness_coeff) / R : (r == 0 ? 0 : std::numeric_limits::max()); } // Calculate UL priority @@ -177,24 +180,24 @@ void sched_time_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue& } } -void sched_time_pf::ue_ctxt::save_dl_alloc(uint32_t alloc_bytes, float alpha) +void sched_time_pf::ue_ctxt::save_dl_alloc(uint32_t alloc_bytes, float exp_avg_alpha) { - if (dl_nof_samples < 1 / alpha) { + if (dl_nof_samples < 1 / exp_avg_alpha) { // fast start dl_avg_rate_ = dl_avg_rate_ + (alloc_bytes - dl_avg_rate_) / (dl_nof_samples + 1); } else { - dl_avg_rate_ = (1 - alpha) * dl_avg_rate_ + (alpha)*alloc_bytes; + dl_avg_rate_ = (1 - exp_avg_alpha) * dl_avg_rate_ + (exp_avg_alpha)*alloc_bytes; } dl_nof_samples++; } -void sched_time_pf::ue_ctxt::save_ul_alloc(uint32_t alloc_bytes, float alpha) +void sched_time_pf::ue_ctxt::save_ul_alloc(uint32_t alloc_bytes, float exp_avg_alpha) { - if (ul_nof_samples < 1 / alpha) { + if (ul_nof_samples < 1 / exp_avg_alpha) { // fast start ul_avg_rate_ = ul_avg_rate_ + (alloc_bytes - ul_avg_rate_) / (ul_nof_samples + 1); } else { - ul_avg_rate_ = (1 - alpha) * ul_avg_rate_ + (alpha)*alloc_bytes; + ul_avg_rate_ = (1 - exp_avg_alpha) * ul_avg_rate_ + (exp_avg_alpha)*alloc_bytes; } ul_nof_samples++; } diff --git a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc index baa7a5de7..b7b7660fc 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc @@ -15,7 +15,7 @@ namespace srsenb { -sched_time_rr::sched_time_rr(const sched_cell_params_t& cell_params_) +sched_time_rr::sched_time_rr(const sched_cell_params_t& cell_params_, const sched_interface::sched_args_t& sched_args) { cc_cfg = &cell_params_; } @@ -65,9 +65,13 @@ void sched_time_rr::sched_dl_newtxs(std::map& ue_db, sf_sche if (iter == ue_db.end()) { iter = ue_db.begin(); // wrap around } - sched_ue& user = iter->second; + sched_ue& user = iter->second; + int ue_cc_idx = user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx); + if (ue_cc_idx < 0) { + continue; + } const dl_harq_proc* h = get_dl_newtx_harq(user, tti_sched); - rbg_interval req_rbgs = user.get_required_dl_rbgs(user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx)); + rbg_interval req_rbgs = user.get_required_dl_rbgs(ue_cc_idx); // Check if there is an empty harq for the newtx if (h == nullptr or req_rbgs.stop() == 0) { continue;