implemented logical channel prioritization in DL scheduler

master
Francisco Paisana 4 years ago
parent ca6566ad46
commit 9b10acff06

@ -105,10 +105,10 @@ public:
};
struct ue_bearer_cfg_t {
int priority = 0;
int bsd = 0;
int pbr = 0;
int group = 0;
int priority = 1;
uint32_t bsd = 1000; // msec
uint32_t pbr = -1;
int group = 0;
enum direction_t { IDLE = 0, UL, DL, BOTH } direction = IDLE;
};

@ -22,12 +22,22 @@
#ifndef SRSLTE_SCHEDULER_COMMON_H
#define SRSLTE_SCHEDULER_COMMON_H
#include "srslte/adt/interval.h"
#include "srslte/adt/bounded_bitset.h"
#include "srslte/adt/interval.h"
#include "srslte/interfaces/sched_interface.h"
namespace srsenb {
/***********************
* Constants
**********************/
constexpr uint32_t tti_duration_ms = 1000;
/***********************
* Helper Types
**********************/
//! Struct used to store possible CCE locations.
struct sched_dci_cce_t {
uint32_t cce_start[4][6]; ///< Stores starting CCE for each aggr level index and CCE location index

@ -99,8 +99,13 @@ const char* to_string(sched_interface::ue_bearer_cfg_t::direction_t dir);
class lch_manager
{
constexpr static uint32_t pbr_infinity = -1;
constexpr static uint32_t MAX_LC = sched_interface::MAX_LC;
public:
void set_cfg(const sched_interface::ue_cfg_t& cfg_);
void new_tti();
void config_lcid(uint32_t lcg_id, const sched_interface::ue_bearer_cfg_t& bearer_cfg);
void ul_bsr(uint8_t lcg_id, uint32_t bsr);
void ul_buffer_add(uint8_t lcid, uint32_t bytes);
@ -112,22 +117,27 @@ public:
bool is_bearer_ul(uint32_t lcid) const;
bool is_bearer_dl(uint32_t lcid) const;
int get_dl_tx_total(uint32_t lcid) const { return get_dl_tx(lcid) + get_dl_retx(lcid); }
int get_dl_tx(uint32_t lcid) const;
int get_dl_retx(uint32_t lcid) const;
int get_bsr(uint32_t lcid) const;
int get_max_prio_lcid() const;
std::string get_bsr_text() const;
private:
struct ue_bearer_t {
sched_interface::ue_bearer_cfg_t cfg = {};
int buf_tx = 0;
int buf_retx = 0;
sched_interface::ue_bearer_cfg_t cfg = {};
int bucket_size = 0;
int buf_tx = 0;
int buf_retx = 0;
int Bj = 0;
};
int alloc_retx_bytes(uint8_t lcid, uint32_t rem_bytes);
int alloc_tx_bytes(uint8_t lcid, uint32_t rem_bytes);
size_t prio_idx = 0;
srslte::log_ref log_h{"MAC"};
std::array<ue_bearer_t, sched_interface::MAX_LC> lch = {};
std::array<int, 4> lcg_bsr = {};
@ -139,15 +149,17 @@ private:
class sched_ue
{
public:
sched_ue();
void reset();
void init(uint16_t rnti, const std::vector<sched_cell_params_t>& cell_list_params_);
void new_tti(srslte::tti_point new_tti);
/*************************************************************
*
* FAPI-like Interface
*
************************************************************/
sched_ue();
void reset();
void phy_config_enabled(uint32_t tti, bool enabled);
void init(uint16_t rnti, const std::vector<sched_cell_params_t>& cell_list_params_);
void set_cfg(const sched_interface::ue_cfg_t& cfg);
void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg);
@ -231,7 +243,7 @@ public:
bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false);
uint32_t get_max_retx();
bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce);
bool pucch_sr_collision(uint32_t tti, uint32_t n_cce);
private:
bool is_sr_triggered();
@ -298,7 +310,7 @@ private:
bool phy_config_dedicated_enabled = false;
srslte::tti_point last_tti;
srslte::tti_point current_tti;
std::vector<cc_sched_ue> carriers; ///< map of UE CellIndex to carrier configuration
// Control Element Command queue

@ -432,6 +432,12 @@ void sched::new_tti(tti_point tti_rx)
// Generate sched results for all CCs, if not yet generated
for (size_t cc_idx = 0; cc_idx < carrier_schedulers.size(); ++cc_idx) {
if (not is_generated(tti_rx, cc_idx)) {
// Setup tti-specific vars of the UE
for (auto& user : ue_db) {
user.second.new_tti(tti_rx);
}
// Generate carrier scheduling result
carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
}
}

@ -136,10 +136,10 @@ void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_)
if (ue_idx >= prev_supported_cc_list.size()) {
// New carrier needs to be added
carriers.emplace_back(cfg, (*cell_params_list)[cc_cfg.enb_cc_idx], rnti, ue_idx, last_tti);
carriers.emplace_back(cfg, (*cell_params_list)[cc_cfg.enb_cc_idx], rnti, ue_idx, current_tti);
} else if (cc_cfg.enb_cc_idx != prev_supported_cc_list[ue_idx].enb_cc_idx) {
// One carrier was added in the place of another
carriers[ue_idx] = cc_sched_ue{cfg, (*cell_params_list)[cc_cfg.enb_cc_idx], rnti, ue_idx, last_tti};
carriers[ue_idx] = cc_sched_ue{cfg, (*cell_params_list)[cc_cfg.enb_cc_idx], rnti, ue_idx, current_tti};
if (ue_idx == 0) {
log_h->info("SCHED: rnti=0x%x PCell is now enb_cc_idx=%d.\n", rnti, cc_cfg.enb_cc_idx);
}
@ -172,6 +172,13 @@ void sched_ue::reset()
}
}
void sched_ue::new_tti(srslte::tti_point new_tti)
{
current_tti = new_tti;
lch_handler.new_tti();
}
/*******************************************************
*
* FAPI-like main scheduler interface.
@ -241,12 +248,12 @@ void sched_ue::unset_sr()
sr = false;
}
bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
bool sched_ue::pucch_sr_collision(uint32_t tti, uint32_t n_cce)
{
if (!phy_config_dedicated_enabled) {
return false;
}
if (cfg.pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&cfg.pucch_cfg, current_tti)) {
if (cfg.pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&cfg.pucch_cfg, tti)) {
return (n_cce + cfg.pucch_cfg.N_pucch_1) == cfg.pucch_cfg.n_pucch_sr;
}
return false;
@ -1110,12 +1117,11 @@ uint32_t sched_ue::get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits)
void sched_ue::finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx)
{
last_tti = tti_point{tti_params.tti_rx};
cc_sched_ue* c = find_ue_carrier(enb_cc_idx);
if (c != nullptr) {
// Check that scell state needs to change
c->finish_tti(last_tti);
c->finish_tti(current_tti);
}
}
@ -1481,6 +1487,18 @@ void lch_manager::set_cfg(const sched_interface::ue_cfg_t& cfg)
}
}
void lch_manager::new_tti()
{
prio_idx++;
for (uint32_t lcid = 0; lcid < sched_interface::MAX_LC; ++lcid) {
if (is_bearer_active(lcid)) {
if (lch[lcid].cfg.pbr != pbr_infinity) {
lch[lcid].Bj = std::min(lch[lcid].Bj + (int)(lch[lcid].cfg.pbr * tti_duration_ms), lch[lcid].bucket_size);
}
}
}
}
void lch_manager::config_lcid(uint32_t lc_id, const sched_interface::ue_bearer_cfg_t& bearer_cfg)
{
if (lc_id >= sched_interface::MAX_LC) {
@ -1497,6 +1515,13 @@ void lch_manager::config_lcid(uint32_t lc_id, const sched_interface::ue_bearer_c
if (not is_equal) {
lch[lc_id].cfg = bearer_cfg;
if (lch[lc_id].cfg.pbr == pbr_infinity) {
lch[lc_id].bucket_size = std::numeric_limits<int>::max();
lch[lc_id].Bj = std::numeric_limits<int>::max();
} else {
lch[lc_id].bucket_size = lch[lc_id].cfg.bsd * lch[lc_id].cfg.pbr;
lch[lc_id].Bj = 0;
}
Info("SCHED: bearer configured: lc_id=%d, mode=%s, prio=%d\n",
lc_id,
to_string(lch[lc_id].cfg.direction),
@ -1535,21 +1560,65 @@ void lch_manager::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t retx
Debug("SCHED: DL lcid=%d buffer_state=%d,%d\n", lcid, tx_queue, retx_queue);
}
/* Allocates first available RLC PDU */
int lch_manager::get_max_prio_lcid() const
{
int min_prio_val = std::numeric_limits<int>::max(), prio_lcid = -1;
for (uint32_t lcid = 0; lcid < MAX_LC; ++lcid) {
if (get_dl_tx_total(lcid) > 0 and lch[lcid].Bj > 0 and lch[lcid].cfg.priority < min_prio_val) {
min_prio_val = lch[lcid].cfg.priority;
prio_lcid = lcid;
}
}
if (prio_lcid < 0) {
// disregard Bj value in selection of lcid
size_t nof_lcids = 0;
std::array<uint32_t, MAX_LC> chosen_lcids = {};
for (uint32_t lcid = 0; lcid < MAX_LC; ++lcid) {
if (get_dl_tx_total(lcid) > 0) {
if (lch[lcid].cfg.priority < min_prio_val) {
min_prio_val = lch[lcid].cfg.priority;
chosen_lcids[0] = lcid;
nof_lcids = 1;
} else if (lch[lcid].cfg.priority == min_prio_val) {
chosen_lcids[nof_lcids++] = lcid;
}
}
}
// logical chanels with equal priority should be served equally
if (nof_lcids > 0) {
prio_lcid = chosen_lcids[prio_idx % nof_lcids];
}
}
return prio_lcid;
}
/// Allocates first available RLC PDU
int lch_manager::alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* rlc_pdu, int rem_bytes)
{
// TODO: Implement lcid priority (now lowest index is lowest priority)
int alloc_bytes = 0;
int i = 0;
for (i = 0; i < sched_interface::MAX_LC and alloc_bytes == 0; i++) {
alloc_bytes = alloc_retx_bytes(i, rem_bytes);
if (alloc_bytes == 0) {
alloc_bytes = alloc_tx_bytes(i, rem_bytes);
}
int lcid = get_max_prio_lcid();
if (lcid < 0) {
return alloc_bytes;
}
// try first to allocate retxs
alloc_bytes = alloc_retx_bytes(lcid, rem_bytes);
// if no retx alloc, try newtx
if (alloc_bytes == 0) {
alloc_bytes = alloc_tx_bytes(lcid, rem_bytes);
}
if (alloc_bytes > 0) {
// Update Bj
if (lch[lcid].cfg.pbr != pbr_infinity) {
lch[lcid].Bj -= alloc_bytes;
}
rlc_pdu->nbytes = alloc_bytes;
rlc_pdu->lcid = i - 1;
rlc_pdu->lcid = lcid;
Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", rlc_pdu->lcid, rlc_pdu->nbytes, rem_bytes);
}
return alloc_bytes;

@ -27,6 +27,35 @@ namespace srsenb {
using namespace asn1::rrc;
// TS 36.331 9.1.1.2 - CCCH configuration
sched_interface::ue_bearer_cfg_t get_bearer_default_ccch_config()
{
sched_interface::ue_bearer_cfg_t bearer = {};
bearer.priority = 1;
bearer.pbr = -1;
bearer.bsd = -1;
bearer.group = 0;
return bearer;
}
// TS 36.331 9.2.1.1 - SRB1
sched_interface::ue_bearer_cfg_t get_bearer_default_srb1_config()
{
return get_bearer_default_ccch_config();
}
// TS 36.331 9.2.1.2 - SRB2
sched_interface::ue_bearer_cfg_t get_bearer_default_srb2_config()
{
sched_interface::ue_bearer_cfg_t bearer = get_bearer_default_srb1_config();
bearer.priority = 3;
return bearer;
}
/***************************
* MAC Controller class
**************************/
rrc::ue::mac_controller::mac_controller(rrc::ue* rrc_ue_, const sched_interface::ue_cfg_t& sched_ue_cfg) :
log_h("RRC"),
rrc_ue(rrc_ue_),
@ -202,24 +231,55 @@ void rrc::ue::mac_controller::handle_con_reconf_with_mobility()
void rrc::ue::mac_controller::apply_current_bearers_cfg()
{
srsenb::sched_interface::ue_bearer_cfg_t bearer_cfg = {};
current_sched_ue_cfg.ue_bearers = {};
current_sched_ue_cfg.ue_bearers[0].direction = sched_interface::ue_bearer_cfg_t::BOTH;
current_sched_ue_cfg.ue_bearers = {};
current_sched_ue_cfg.ue_bearers[0].direction = sched_interface::ue_bearer_cfg_t::BOTH;
// Configure SRBs
const srb_to_add_mod_list_l& srbs = rrc_ue->bearer_list.get_established_srbs();
for (const srb_to_add_mod_s& srb : srbs) {
bearer_cfg.direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH;
bearer_cfg.group = 0;
current_sched_ue_cfg.ue_bearers[srb.srb_id] = bearer_cfg;
auto& bcfg = current_sched_ue_cfg.ue_bearers[srb.srb_id];
switch (srb.srb_id) {
case 0:
bcfg = get_bearer_default_ccch_config();
break;
case 1:
bcfg = get_bearer_default_srb1_config();
break;
case 2:
bcfg = get_bearer_default_srb2_config();
break;
default:
bcfg = {};
}
bcfg.direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH;
if (srb.lc_ch_cfg_present and
srb.lc_ch_cfg.type().value == srb_to_add_mod_s::lc_ch_cfg_c_::types_opts::explicit_value and
srb.lc_ch_cfg.explicit_value().ul_specific_params_present) {
// NOTE: Use UL values for DL prioritization as well
auto& ul_params = srb.lc_ch_cfg.explicit_value().ul_specific_params;
bcfg.pbr = ul_params.prioritised_bit_rate.to_number();
bcfg.priority = ul_params.prio;
bcfg.bsd = ul_params.bucket_size_dur.to_number();
if (ul_params.lc_ch_group_present) {
bcfg.group = ul_params.lc_ch_group;
}
}
}
// Configure DRBs
const drb_to_add_mod_list_l& drbs = rrc_ue->bearer_list.get_established_drbs();
for (const drb_to_add_mod_s& drb : drbs) {
bearer_cfg.direction = sched_interface::ue_bearer_cfg_t::BOTH;
bearer_cfg.group = drb.lc_ch_cfg.ul_specific_params.lc_ch_group;
current_sched_ue_cfg.ue_bearers[drb.lc_ch_id] = bearer_cfg;
auto& bcfg = current_sched_ue_cfg.ue_bearers[drb.lc_ch_id];
bcfg = {};
bcfg.direction = sched_interface::ue_bearer_cfg_t::BOTH;
bcfg.group = 1;
bcfg.priority = 4;
if (drb.lc_ch_cfg_present and drb.lc_ch_cfg.ul_specific_params_present) {
bcfg.group = drb.lc_ch_cfg.ul_specific_params.lc_ch_group;
bcfg.pbr = drb.lc_ch_cfg.ul_specific_params.prioritised_bit_rate.to_number();
bcfg.priority = drb.lc_ch_cfg.ul_specific_params.prio;
bcfg.bsd = drb.lc_ch_cfg.ul_specific_params.bucket_size_dur;
}
}
}

Loading…
Cancel
Save