Integrated new sched_dl_cqi class in sched_ue_cell

- extend sched_dl_cqi interface to allow getting tti when cqi was last updated
- extend sched_dl_cqi to quickly get average cqi across the whole bandwidth
master
Francisco 4 years ago committed by Francisco Paisana
parent 16d05ff042
commit 84ad6dd8ee

@ -130,21 +130,22 @@ struct formatter<srsran::tti_point> {
namespace srsenb {
using tti_point = srsran::tti_point;
using tti_point = srsran::tti_point;
using tti_interval = srsran::tti_interval;
inline srsran::tti_point to_tx_dl(srsran::tti_point t)
inline tti_point to_tx_dl(tti_point t)
{
return t + TX_ENB_DELAY;
}
inline srsran::tti_point to_tx_ul(srsran::tti_point t)
inline tti_point to_tx_ul(tti_point t)
{
return t + (TX_ENB_DELAY + FDD_HARQ_DELAY_DL_MS);
}
inline srsran::tti_point to_tx_dl_ack(srsran::tti_point t)
inline tti_point to_tx_dl_ack(tti_point t)
{
return to_tx_ul(t);
}
inline srsran::tti_point to_tx_ul_ack(srsran::tti_point t)
inline tti_point to_tx_ul_ack(tti_point t)
{
return to_tx_ul(t) + TX_ENB_DELAY;
}

@ -95,6 +95,7 @@ struct prb_interval;
struct rbg_interval : public srsran::interval<uint32_t> {
using interval::interval;
static rbg_interval rbgmask_to_rbgs(const rbgmask_t& mask);
static rbg_interval prbs_to_rbgs(const prb_interval& prbs, uint32_t cell_nof_prbs);
};
/// Struct to express a {min,...,max} range of PRBs

@ -30,12 +30,12 @@ namespace srsenb {
class sched_dl_cqi
{
public:
sched_dl_cqi(uint32_t cell_nof_prb_, uint32_t K_, float alpha = 0.1) :
sched_dl_cqi(uint32_t cell_nof_prb_, uint32_t K_, uint32_t init_dl_cqi = 1) :
cell_nof_prb(cell_nof_prb_),
cell_nof_rbg(cell_nof_prb_to_rbg(cell_nof_prb_)),
K(K_),
wb_cqi_avg(alpha),
bp_list(nof_bandwidth_parts(cell_nof_prb_), bandwidth_part_context(alpha)),
wb_cqi_avg(init_dl_cqi),
bp_list(nof_bandwidth_parts(cell_nof_prb_), bandwidth_part_context(init_dl_cqi)),
subband_cqi(srsran_cqi_hl_get_no_subbands(cell_nof_prb), 0)
{
srsran_assert(K <= 4, "K=%d outside of {0, 4}", K);
@ -43,12 +43,20 @@ public:
void cqi_wb_info(tti_point tti, uint32_t cqi_value)
{
if (cqi_value > 0) {
last_pos_cqi_tti = tti;
}
last_wb_tti = tti;
wb_cqi_avg = static_cast<float>(cqi_value);
}
void cqi_sb_info(tti_point tti, uint32_t sb_index, uint32_t cqi_value)
{
if (cqi_value > 0) {
last_pos_cqi_tti = tti;
}
uint32_t bp_idx = get_bp_index(sb_index);
bp_list[bp_idx].last_feedback_tti = tti;
bp_list[bp_idx].last_cqi_subband_idx = sb_index;
@ -61,21 +69,42 @@ public:
}
}
/// Resets CQI to provided value
void reset_cqi(uint32_t dl_cqi)
{
last_pos_cqi_tti = {};
last_wb_tti = {};
wb_cqi_avg = dl_cqi;
for (auto& bp : bp_list) {
bp.cqi_val = dl_cqi;
bp.last_feedback_tti = {};
}
}
int get_avg_cqi() const { return get_grant_avg_cqi(rbg_interval(0, cell_nof_rbg)); }
/// Get average CQI in given RBG interval
int get_rbg_grant_avg_cqi(rbg_interval interv) const
int get_grant_avg_cqi(rbg_interval interv) const
{
if (not subband_cqi_enabled()) {
return static_cast<int>(wb_cqi_avg);
}
float cqi = 0;
for (uint32_t rbg = interv.start(); rbg < interv.stop(); ++rbg) {
cqi += subband_cqi[rbg_to_sb_index(rbg)];
float cqi = 0;
uint32_t sbstart = rbg_to_sb_index(interv.start()), sbend = rbg_to_sb_index(interv.stop() - 1) + 1;
for (uint32_t sb = sbstart; sb < sbend; ++sb) {
cqi += subband_cqi[sb];
}
return static_cast<int>(cqi / interv.length());
return static_cast<int>(cqi / (sbend - sbstart));
}
/// Get average CQI in given PRB interval
int get_grant_avg_cqi(prb_interval prb_interv) const
{
return get_grant_avg_cqi(rbg_interval::prbs_to_rbgs(prb_interv, cell_nof_prb));
}
/// Get average CQI in given RBG mask
int get_rbg_grant_avg_cqi(const rbgmask_t& mask) const
int get_grant_avg_cqi(const rbgmask_t& mask) const
{
if (not subband_cqi_enabled()) {
return static_cast<int>(wb_cqi_avg);
@ -112,6 +141,10 @@ public:
bool subband_cqi_enabled() const { return K > 0; }
bool is_cqi_info_received() const { return last_pos_cqi_tti.is_valid(); }
tti_point last_cqi_info_tti() const { return last_pos_cqi_tti; }
private:
static const uint32_t max_subband_size = 8;
static const uint32_t max_nof_subbands = 13;
@ -151,6 +184,8 @@ private:
explicit bandwidth_part_context(float alpha) : cqi_val(alpha), last_cqi_subband_idx(max_nof_subbands) {}
};
tti_point last_pos_cqi_tti;
tti_point last_wb_tti;
float wb_cqi_avg;

@ -33,7 +33,7 @@ struct sched_ue_cell {
void clear_feedback();
void finish_tti(tti_point tti_rx);
void set_dl_cqi(tti_point tti_rx, uint32_t dl_cqi_);
void set_dl_wb_cqi(tti_point tti_rx, uint32_t dl_cqi_);
bool configured() const { return ue_cc_idx >= 0; }
int get_ue_cc_idx() const { return ue_cc_idx; }
@ -58,23 +58,21 @@ struct sched_ue_cell {
tpc tpc_fsm;
/// UCI Feedback
uint32_t dl_ri = 0;
tti_point dl_ri_tti_rx{};
uint32_t dl_pmi = 0;
tti_point dl_pmi_tti_rx{};
uint32_t dl_cqi = 1;
tti_point dl_cqi_tti_rx{0};
uint32_t ul_cqi = 1;
tti_point ul_cqi_tti_rx{};
bool dl_cqi_rx = false;
const sched_dl_cqi& dl_cqi() const { return dl_cqi_ctxt; }
uint32_t dl_ri = 0;
tti_point dl_ri_tti_rx{};
uint32_t dl_pmi = 0;
tti_point dl_pmi_tti_rx{};
uint32_t ul_cqi = 1;
tti_point ul_cqi_tti_rx{};
uint32_t max_mcs_dl = 28, max_mcs_ul = 28;
uint32_t max_aggr_level = 3;
int fixed_mcs_ul = 0, fixed_mcs_dl = 0;
private:
srslog::basic_logger& logger;
// args
srslog::basic_logger& logger;
const sched_interface::ue_cfg_t* ue_cfg = nullptr;
tti_point cfg_tti;
int ue_cc_idx = -1;
@ -82,6 +80,8 @@ private:
// state
tti_point current_tti;
cc_st cc_state_ = cc_st::idle;
sched_dl_cqi dl_cqi_ctxt;
};
/*************************************************************

@ -152,6 +152,12 @@ void log_phich_cc_results(srslog::basic_logger& logger,
}
}
rbg_interval rbg_interval::prbs_to_rbgs(const prb_interval& prbs, uint32_t cell_nof_prb)
{
uint32_t P = srsran_ra_type0_P(cell_nof_prb);
return rbg_interval{prbs.start() / P, (prbs.stop() + P - 1) / P};
}
prb_interval prb_interval::rbgs_to_prbs(const rbg_interval& rbgs, uint32_t cell_nof_prb)
{
uint32_t P = srsran_ra_type0_P(cell_nof_prb);

@ -18,8 +18,6 @@
#include "srsran/common/string_helpers.h"
#include "srsran/srslog/bundled/fmt/ranges.h"
using srsran::tti_interval;
namespace srsenb {
/******************************************************
@ -35,9 +33,8 @@ namespace srsenb {
*******************************************************/
sched_ue::sched_ue(uint16_t rnti_, const std::vector<sched_cell_params_t>& cell_list_params_, const ue_cfg_t& cfg_) :
logger(srslog::fetch_basic_logger("MAC"))
logger(srslog::fetch_basic_logger("MAC")), rnti(rnti_)
{
rnti = rnti_;
cells.reserve(cell_list_params_.size());
for (auto& c : cell_list_params_) {
cells.emplace_back(rnti_, c, current_tti);
@ -127,11 +124,6 @@ void sched_ue::rem_bearer(uint32_t lc_id)
void sched_ue::phy_config_enabled(tti_point tti_rx, bool enabled)
{
for (sched_ue_cell& c : cells) {
if (c.configured()) {
c.dl_cqi_tti_rx = tti_rx;
}
}
phy_config_dedicated_enabled = enabled;
}
@ -292,7 +284,7 @@ void sched_ue::set_dl_pmi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t pmi)
void sched_ue::set_dl_cqi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t cqi)
{
if (cells[enb_cc_idx].cc_state() != cc_st::idle) {
cells[enb_cc_idx].set_dl_cqi(tti_rx, cqi);
cells[enb_cc_idx].set_dl_wb_cqi(tti_rx, cqi);
} else {
logger.warning("Received DL CQI for invalid enb cell index %d", enb_cc_idx);
}
@ -724,8 +716,8 @@ bool sched_ue::needs_cqi(uint32_t tti, uint32_t enb_cc_idx, bool will_send)
bool ret = false;
if (phy_config_dedicated_enabled && cfg.supported_cc_list[0].aperiodic_cqi_period &&
lch_handler.has_pending_dl_txs()) {
uint32_t interval = srsran_tti_interval(tti, cells[enb_cc_idx].dl_cqi_tti_rx.to_uint());
bool needscqi = interval >= cfg.supported_cc_list[0].aperiodic_cqi_period;
bool needscqi = tti_point(tti) >=
cells[enb_cc_idx].dl_cqi().last_cqi_info_tti() - cfg.supported_cc_list[0].aperiodic_cqi_period;
if (needscqi) {
uint32_t interval_sent = srsran_tti_interval(tti, cqi_request_tti);
if (interval_sent >= 16) {
@ -756,9 +748,8 @@ rbg_interval sched_ue::get_required_dl_rbgs(uint32_t enb_cc_idx)
int pending_prbs = get_required_prb_dl(cells[enb_cc_idx], to_tx_dl(current_tti), get_dci_format(), req_bytes.start());
if (pending_prbs < 0) {
// Cannot fit allocation in given PRBs
logger.error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. "
logger.error("SCHED: DL CQI does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. "
"Consider increasing initial CQI value.",
cells[enb_cc_idx].dl_cqi,
req_bytes.start());
return {cellparams->nof_prb(), cellparams->nof_prb()};
}
@ -846,7 +837,7 @@ uint32_t sched_ue::get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs) co
auto& cc = cells[enb_cc_idx];
uint32_t nof_re =
cc.cell_cfg->get_dl_lb_nof_re(to_tx_dl(current_tti), count_prb_per_tb_approx(nof_rbgs, cc.cell_cfg->nof_prb()));
float max_coderate = srsran_cqi_to_coderate(std::min(cc.dl_cqi + 1u, 15u), cfg.use_tbs_index_alt);
float max_coderate = srsran_cqi_to_coderate(std::min(cc.dl_cqi().get_avg_cqi() + 1u, 15u), cfg.use_tbs_index_alt);
// Inverse of srsran_coderate(tbs, nof_re)
uint32_t tbs = max_coderate * nof_re - 24;
@ -915,7 +906,7 @@ uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_enb_c
uint32_t max_cqi = 0, max_cc_idx = 0;
for (uint32_t cc = 0; cc < cells.size(); ++cc) {
if (cells[cc].configured()) {
uint32_t sum_cqi = cells[cc].dl_cqi + cells[cc].ul_cqi;
uint32_t sum_cqi = cells[cc].dl_cqi().get_avg_cqi() + cells[cc].ul_cqi;
if (cells[cc].cc_state() == cc_st::active and sum_cqi > max_cqi) {
max_cqi = sum_cqi;
max_cc_idx = cc;
@ -1008,7 +999,8 @@ std::pair<bool, uint32_t> sched_ue::get_active_cell_index(uint32_t enb_cc_idx) c
uint32_t sched_ue::get_aggr_level(uint32_t enb_cc_idx, uint32_t nof_bits)
{
const auto& cc = cells[enb_cc_idx];
return srsenb::get_aggr_level(nof_bits, cc.dl_cqi, cc.max_aggr_level, cc.cell_cfg->nof_prb(), cfg.use_tbs_index_alt);
return srsenb::get_aggr_level(
nof_bits, cc.dl_cqi().get_avg_cqi(), cc.max_aggr_level, cc.cell_cfg->nof_prb(), cfg.use_tbs_index_alt);
}
void sched_ue::finish_tti(tti_point tti_rx, uint32_t enb_cc_idx)

@ -34,7 +34,8 @@ sched_ue_cell::sched_ue_cell(uint16_t rnti_, const sched_cell_params_t& cell_cfg
fixed_mcs_dl(cell_cfg_.sched_cfg->pdsch_mcs),
fixed_mcs_ul(cell_cfg_.sched_cfg->pusch_mcs),
current_tti(current_tti_),
max_aggr_level(cell_cfg_.sched_cfg->max_aggr_level >= 0 ? cell_cfg_.sched_cfg->max_aggr_level : 3)
max_aggr_level(cell_cfg_.sched_cfg->max_aggr_level >= 0 ? cell_cfg_.sched_cfg->max_aggr_level : 3),
dl_cqi_ctxt(cell_cfg_.nof_prb(), 0, 1)
{
clear_feedback();
}
@ -83,15 +84,17 @@ void sched_ue_cell::set_ue_cfg(const sched_interface::ue_cfg_t& ue_cfg_)
case cc_st::active:
if (ue_cc_idx < 0 or not ue_cfg->supported_cc_list[ue_cc_idx].active) {
cc_state_ = cc_st::deactivating;
logger.info("SCHED: Deactivating rnti=0x%x, SCellIndex=%d...", rnti, ue_cc_idx);
logger.info(
"SCHED: Deactivating SCell, rnti=0x%x, cc=%d, SCellIndex=%d...", rnti, cell_cfg->enb_cc_idx, ue_cc_idx);
}
break;
case cc_st::deactivating:
case cc_st::idle:
if (ue_cc_idx > 0 and ue_cfg->supported_cc_list[ue_cc_idx].active) {
cc_state_ = cc_st::activating;
dl_cqi = 0;
logger.info("SCHED: Activating rnti=0x%x, SCellIndex=%d...", rnti, ue_cc_idx);
dl_cqi_ctxt.reset_cqi(0);
logger.info(
"SCHED: Activating SCell, rnti=0x%x, cc=%d, SCellIndex=%d...", rnti, cell_cfg->enb_cc_idx, ue_cc_idx);
}
break;
default:
@ -121,9 +124,7 @@ void sched_ue_cell::clear_feedback()
dl_ri_tti_rx = tti_point{};
dl_pmi = 0;
dl_pmi_tti_rx = tti_point{};
dl_cqi = ue_cc_idx == 0 ? cell_cfg->cfg.initial_dl_cqi : 1;
dl_cqi_tti_rx = tti_point{};
dl_cqi_rx = false;
dl_cqi_ctxt.reset_cqi(ue_cc_idx == 0 ? cell_cfg->cfg.initial_dl_cqi : 1);
ul_cqi = 1;
ul_cqi_tti_rx = tti_point{};
}
@ -134,12 +135,10 @@ void sched_ue_cell::finish_tti(tti_point tti_rx)
harq_ent.reset_pending_data(tti_rx);
}
void sched_ue_cell::set_dl_cqi(tti_point tti_rx, uint32_t dl_cqi_)
void sched_ue_cell::set_dl_wb_cqi(tti_point tti_rx, uint32_t dl_cqi_)
{
dl_cqi = dl_cqi_;
dl_cqi_tti_rx = tti_rx;
dl_cqi_rx = dl_cqi_rx or dl_cqi > 0;
if (ue_cc_idx > 0 and cc_state_ == cc_st::activating and dl_cqi_rx) {
dl_cqi_ctxt.cqi_wb_info(tti_rx, dl_cqi_);
if (ue_cc_idx > 0 and cc_state_ == cc_st::activating and dl_cqi_ > 0) {
// Wait for SCell to receive a positive CQI before activating it
cc_state_ = cc_st::active;
logger.info("SCHED: SCell index=%d is now active", ue_cc_idx);
@ -232,10 +231,12 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
bool use_tbs_index_alt = cell.get_ue_cfg()->use_tbs_index_alt and dci_format != SRSRAN_DCI_FORMAT1A;
tbs_info ret;
if (cell.fixed_mcs_dl < 0 or not cell.dl_cqi_rx) {
// Dynamic MCS
if (cell.fixed_mcs_dl < 0 or not cell.dl_cqi().is_cqi_info_received()) {
// Dynamic MCS configured or first Tx
uint32_t dl_cqi_avg = cell.dl_cqi().get_grant_avg_cqi(prb_interval(0, nof_prb));
ret = compute_min_mcs_and_tbs_from_required_bytes(
nof_prb, nof_re, cell.dl_cqi, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt);
nof_prb, nof_re, dl_cqi_avg, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt);
// If coderate > SRSRAN_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
@ -244,7 +245,7 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
ret.tbs_bytes = get_tbs_bytes((uint32_t)ret.mcs, nof_prb, use_tbs_index_alt, false);
}
} else {
// Fixed MCS
// Fixed MCS configured
ret.mcs = cell.fixed_mcs_dl;
ret.tbs_bytes = get_tbs_bytes((uint32_t)cell.fixed_mcs_dl, nof_prb, use_tbs_index_alt, false);
}

@ -28,21 +28,21 @@ void test_sched_cqi_one_subband_cqi()
ue_cqi.cqi_sb_info(tti_point(0), 0, 5);
// TEST: updated part has positive cqi. Non-updated cqi didn't change
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(rbg_interval(0, 1)) == 5);
TESTASSERT(ue_cqi.get_grant_avg_cqi(rbg_interval(0, 1)) == 5);
for (uint32_t i = 1; i < 5; ++i) {
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(rbg_interval(i, i + 1)) > 0);
TESTASSERT(ue_cqi.get_grant_avg_cqi(rbg_interval(i, i + 1)) > 0);
}
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(rbg_interval(6, cell_nof_prb_to_rbg(50))) == 0);
TESTASSERT(ue_cqi.get_grant_avg_cqi(rbg_interval(6, cell_nof_prb_to_rbg(50))) == 0);
// TEST: Check average cqi over a mask of RBGs
rbgmask_t mask(cell_nof_prb_to_rbg(50));
mask.fill(10, mask.size());
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(mask) == 0);
TESTASSERT(ue_cqi.get_grant_avg_cqi(mask) == 0);
mask.reset();
mask.set(1);
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(mask) == 5);
TESTASSERT(ue_cqi.get_grant_avg_cqi(mask) == 5);
mask.fill(0, mask.size());
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(mask) > 0 and ue_cqi.get_rbg_grant_avg_cqi(mask) < 5);
TESTASSERT(ue_cqi.get_grant_avg_cqi(mask) > 0 and ue_cqi.get_grant_avg_cqi(mask) < 5);
// TEST: Get optimal RBG mask in terms of CQI
mask = ue_cqi.get_optim_rbg_mask(5);
@ -63,14 +63,14 @@ void test_sched_cqi_wideband_cqi()
// TEST: all bandwidth has positive cqi.
for (uint32_t i = 0; i < nof_rbgs; ++i) {
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(rbg_interval(i, i + 1)) == 5);
TESTASSERT(ue_cqi.get_grant_avg_cqi(rbg_interval(i, i + 1)) == 5);
}
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(rbg_interval(0, nof_rbgs)) == 5);
TESTASSERT(ue_cqi.get_grant_avg_cqi(rbg_interval(0, nof_rbgs)) == 5);
// TEST: Check average cqi over a mask of RBGs
rbgmask_t mask(cell_nof_prb_to_rbg(50));
mask.fill(10, mask.size());
TESTASSERT(ue_cqi.get_rbg_grant_avg_cqi(mask) == 5);
TESTASSERT(ue_cqi.get_grant_avg_cqi(mask) == 5);
// TEST: Get optimal RBG mask in terms of CQI
mask = ue_cqi.get_optim_rbg_mask(5);

Loading…
Cancel
Save