Added the ability to choose scheduling policy from enb.conf

- The two options provided at the moment are time-domain RR and PF
master
Francisco 4 years ago committed by Andre Puschmann
parent b71e8075f4
commit 8fb3ea6922

@ -44,6 +44,7 @@ public:
} cell_cfg_sib_t;
struct sched_args_t {
std::string sched_policy = "time_pf";
int pdsch_mcs = -1;
int pdsch_max_mcs = 28;
int pusch_mcs = -1;

@ -143,6 +143,7 @@ enable = false
#####################################################################
# Scheduler configuration options
#
# sched_policy: User MAC scheduling policy (E.g. time_rr, time_pf)
# max_aggr_level: Optional maximum aggregation level index (l=log2(L) can be 0, 1, 2 or 3)
# pdsch_mcs: Optional fixed PDSCH MCS (ignores reported CQIs if specified)
# pdsch_max_mcs: Optional PDSCH MCS limit
@ -153,6 +154,7 @@ enable = false
#
#####################################################################
[scheduler]
#sched_policy = time_pf
#max_aggr_level = -1
#pdsch_mcs = -1
#pdsch_max_mcs = -1

@ -51,30 +51,6 @@ inline bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2)
class sched : public sched_interface
{
public:
/*************************************************************
*
* Scheduling metric interface definition
*
************************************************************/
class metric_dl
{
public:
virtual ~metric_dl() = default;
/* Virtual methods for user metric calculation */
virtual void set_params(const sched_cell_params_t& cell_params_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched) = 0;
};
class metric_ul
{
public:
virtual ~metric_ul() = default;
/* Virtual methods for user metric calculation */
virtual void set_params(const sched_cell_params_t& cell_params_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) = 0;
};
/*************************************************************
*
* FAPI-like Interface

@ -191,32 +191,11 @@ private:
prbmask_t ul_mask = {};
};
//! generic interface used by DL scheduler algorithm
class dl_sf_sched_itf
{
public:
virtual alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) = 0;
virtual const rbgmask_t& get_dl_mask() const = 0;
virtual uint32_t get_tti_tx_dl() const = 0;
virtual uint32_t get_nof_ctrl_symbols() const = 0;
virtual bool is_dl_alloc(uint16_t rnti) const = 0;
};
//! generic interface used by UL scheduler algorithm
class ul_sf_sched_itf
{
public:
virtual alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc) = 0;
virtual const prbmask_t& get_ul_mask() const = 0;
virtual uint32_t get_tti_tx_ul() const = 0;
virtual bool is_ul_alloc(uint16_t rnti) const = 0;
};
/** Description: Stores the RAR, broadcast, paging, DL data, UL data allocations for the given subframe
* Converts the stored allocations' metadata to the scheduler DL/UL result
* Handles the generation of DCI formats
*/
class sf_sched : public dl_sf_sched_itf, public ul_sf_sched_itf
class sf_sched
{
public:
struct ctrl_alloc_t {
@ -290,21 +269,19 @@ public:
// compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI
void generate_sched_results(sched_ue_list& ue_db);
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
uint32_t get_tti_tx_dl() const final { return tti_params.tti_tx_dl; }
uint32_t get_nof_ctrl_symbols() const final;
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc) final;
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; }
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid);
uint32_t get_tti_tx_dl() const { return tti_params.tti_tx_dl; }
uint32_t get_nof_ctrl_symbols() const;
const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); }
alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc);
const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const { return tti_params.tti_tx_ul; }
// getters
uint32_t get_tti_rx() const { return tti_params.tti_rx; }
const tti_params_t& get_tti_params() const { return tti_params; }
bool is_dl_alloc(uint16_t rnti) const final;
bool is_ul_alloc(uint16_t rnti) const final;
bool is_dl_alloc(uint16_t rnti) const;
bool is_ul_alloc(uint16_t rnti) const;
uint32_t get_enb_cc_idx() const { return cc_cfg->enb_cc_idx; }
private:

@ -13,8 +13,8 @@
#ifndef SRSLTE_SCHED_INTERFACE_HELPERS_H
#define SRSLTE_SCHED_INTERFACE_HELPERS_H
#include "srslte/interfaces/sched_interface.h"
#include "srslte/common/logmap.h"
#include "srslte/interfaces/sched_interface.h"
namespace srsenb {

@ -129,7 +129,8 @@ void parse_args(all_args_t* args, int argc, char* argv[])
("pcap.s1ap_enable", bpo::value<bool>(&args->stack.s1ap_pcap.enable)->default_value(false), "Enable S1AP packet captures for wireshark")
("pcap.s1ap_filename", bpo::value<string>(&args->stack.s1ap_pcap.filename)->default_value("enb_s1ap.pcap"), "S1AP layer capture filename")
/* MCS section */
/* Scheduling section */
("scheduler.policy", bpo::value<string>(&args->stack.mac.sched.sched_policy)->default_value("time_pf"), "DL and UL data scheduling policy (E.g. time_rr, time_pf)")
("scheduler.pdsch_mcs", bpo::value<int>(&args->stack.mac.sched.pdsch_mcs)->default_value(-1), "Optional fixed PDSCH MCS (ignores reported CQIs if specified)")
("scheduler.pdsch_max_mcs", bpo::value<int>(&args->stack.mac.sched.pdsch_max_mcs)->default_value(-1), "Optional PDSCH MCS limit")
("scheduler.pusch_mcs", bpo::value<int>(&args->stack.mac.sched.pusch_mcs)->default_value(-1), "Optional fixed PUSCH MCS (ignores reported CQIs if specified)")

@ -247,7 +247,8 @@ int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
uint32_t sched::get_dl_buffer(uint16_t rnti)
{
uint32_t ret = SRSLTE_ERROR;
ue_db_access(rnti, [&ret](sched_ue& ue) { ret = ue.get_pending_dl_rlc_data(); }, __PRETTY_FUNCTION__);
ue_db_access(
rnti, [&ret](sched_ue& ue) { ret = ue.get_pending_dl_rlc_data(); }, __PRETTY_FUNCTION__);
return ret;
}
@ -255,7 +256,8 @@ uint32_t sched::get_ul_buffer(uint16_t rnti)
{
// TODO: Check if correct use of last_tti
uint32_t ret = SRSLTE_ERROR;
ue_db_access(rnti,
ue_db_access(
rnti,
[this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(last_tti.to_uint(), -1); },
__PRETTY_FUNCTION__);
return ret;
@ -274,7 +276,8 @@ int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code, uint32_t nof_cmd
int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack)
{
int ret = -1;
ue_db_access(rnti, [&](sched_ue& ue) { ret = ue.set_ack_info(tti, enb_cc_idx, tb_idx, ack); }, __PRETTY_FUNCTION__);
ue_db_access(
rnti, [&](sched_ue& ue) { ret = ue.set_ack_info(tti, enb_cc_idx, tb_idx, ack); }, __PRETTY_FUNCTION__);
return ret;
}
@ -322,12 +325,14 @@ int sched::ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes)
int sched::ul_phr(uint16_t rnti, int phr)
{
return ue_db_access(rnti, [phr](sched_ue& ue) { ue.ul_phr(phr); }, __PRETTY_FUNCTION__);
return ue_db_access(
rnti, [phr](sched_ue& ue) { ue.ul_phr(phr); }, __PRETTY_FUNCTION__);
}
int sched::ul_sr_info(uint32_t tti, uint16_t rnti)
{
return ue_db_access(rnti, [](sched_ue& ue) { ue.set_sr(); }, __PRETTY_FUNCTION__);
return ue_db_access(
rnti, [](sched_ue& ue) { ue.set_sr(); }, __PRETTY_FUNCTION__);
}
void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
@ -338,19 +343,22 @@ void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
void sched::tpc_inc(uint16_t rnti)
{
ue_db_access(rnti, [](sched_ue& ue) { ue.tpc_inc(); }, __PRETTY_FUNCTION__);
ue_db_access(
rnti, [](sched_ue& ue) { ue.tpc_inc(); }, __PRETTY_FUNCTION__);
}
void sched::tpc_dec(uint16_t rnti)
{
ue_db_access(rnti, [](sched_ue& ue) { ue.tpc_dec(); }, __PRETTY_FUNCTION__);
ue_db_access(
rnti, [](sched_ue& ue) { ue.tpc_dec(); }, __PRETTY_FUNCTION__);
}
std::array<int, SRSLTE_MAX_CARRIERS> sched::get_enb_ue_cc_map(uint16_t rnti)
{
std::array<int, SRSLTE_MAX_CARRIERS> ret{};
ret.fill(-1); // -1 for inactive & non-existent carriers
ue_db_access(rnti,
ue_db_access(
rnti,
[this, &ret](sched_ue& ue) {
for (size_t enb_cc_idx = 0; enb_cc_idx < carrier_schedulers.size(); ++enb_cc_idx) {
const cc_sched_ue* cc_ue = ue.find_ue_carrier(enb_cc_idx);

@ -288,8 +288,13 @@ void sched::carrier_sched::carrier_cfg(const sched_cell_params_t& cell_params_)
ra_sched_ptr.reset(new ra_sched{*cc_cfg, *ue_db});
// Setup data scheduling algorithms
if (cell_params_.sched_cfg->sched_policy == "time_rr") {
sched_algo.reset(new sched_time_rr{*cc_cfg});
log_h->info("Using time-domain RR scheduling policy for cc=%d\n", cc_cfg->enb_cc_idx);
} else {
sched_algo.reset(new sched_time_pf{*cc_cfg});
// sched_algo.reset(new sched_time_rr{*cc_cfg});
log_h->info("Using time-domain PF scheduling policy for cc=%d\n", cc_cfg->enb_cc_idx);
}
// Initiate the tti_scheduler for each TTI
for (sf_sched& tti_sched : sf_scheds) {

@ -51,10 +51,7 @@ ue_ctxt_test::ue_ctxt_test(uint16_t rnti_,
srslte::tti_point prach_tti_,
const ue_ctxt_test_cfg& cfg_,
ue_sim& ue_ctxt_) :
sim_cfg(cfg_),
rnti(rnti_),
current_tti_rx(prach_tti_),
ue_ctxt(&ue_ctxt_)
sim_cfg(cfg_), rnti(rnti_), current_tti_rx(prach_tti_), ue_ctxt(&ue_ctxt_)
{
set_cfg(cfg_.ue_cfg);
}

Loading…
Cancel
Save