fix check of pending UL bytes

master
Francisco 4 years ago committed by Andre Puschmann
parent bb96625129
commit 3067e81e67

@ -56,6 +56,8 @@ public:
int get_dl_tx_with_overhead(uint32_t lcid) const;
int get_dl_retx(uint32_t lcid) const;
int get_dl_retx_with_overhead(uint32_t lcid) const;
bool is_lcg_active(uint32_t lcg) const;
int get_bsr(uint32_t lcid) const;
int get_bsr_with_overhead(uint32_t lcid) const;
int get_max_prio_lcid() const;

@ -43,18 +43,19 @@ uint32_t get_mac_sdu_and_subheader_size(uint32_t sdu_bytes)
{
return sdu_bytes + get_mac_subheader_size(sdu_bytes);
}
uint32_t get_rlc_header_size_est(uint32_t lcid, uint32_t rlc_pdu_bytes)
{
return (lcid == 0 or rlc_pdu_bytes == 0) ? 0 : RLC_MAX_HEADER_SIZE_NO_LI;
}
uint32_t get_rlc_mac_overhead(uint32_t lcid, uint32_t rlc_pdu_bytes)
uint32_t get_dl_mac_sdu_size_with_overhead(uint32_t lcid, uint32_t rlc_pdu_bytes)
{
return get_mac_subheader_size(rlc_pdu_bytes) + get_rlc_header_size_est(lcid, rlc_pdu_bytes);
uint32_t overhead = (lcid == 0 or rlc_pdu_bytes == 0) ? 0 : RLC_MAX_HEADER_SIZE_NO_LI;
overhead += get_mac_sdu_and_subheader_size(overhead + rlc_pdu_bytes);
return overhead + rlc_pdu_bytes;
}
uint32_t get_mac_sdu_size_with_overhead(uint32_t lcid, uint32_t rlc_pdu_bytes)
uint32_t get_ul_mac_sdu_size_with_overhead(uint32_t rlc_pdu_bytes)
{
return rlc_pdu_bytes + get_rlc_mac_overhead(lcid, rlc_pdu_bytes);
if (rlc_pdu_bytes == 0) {
return 0;
}
uint32_t overhead = get_mac_subheader_size(rlc_pdu_bytes + RLC_MAX_HEADER_SIZE_NO_LI);
return overhead + rlc_pdu_bytes;
}
/*******************************************************
@ -313,7 +314,7 @@ int lch_ue_manager::get_dl_tx(uint32_t lcid) const
}
int lch_ue_manager::get_dl_tx_with_overhead(uint32_t lcid) const
{
return get_mac_sdu_size_with_overhead(lcid, get_dl_tx(lcid));
return get_dl_mac_sdu_size_with_overhead(lcid, get_dl_tx(lcid));
}
int lch_ue_manager::get_dl_retx(uint32_t lcid) const
@ -322,17 +323,28 @@ int lch_ue_manager::get_dl_retx(uint32_t lcid) const
}
int lch_ue_manager::get_dl_retx_with_overhead(uint32_t lcid) const
{
return get_mac_sdu_size_with_overhead(lcid, get_dl_retx(lcid));
return get_dl_mac_sdu_size_with_overhead(lcid, get_dl_retx(lcid));
}
int lch_ue_manager::get_bsr(uint32_t lcid) const
bool lch_ue_manager::is_lcg_active(uint32_t lcg) const
{
if (lcg == 0) {
return true;
}
for (uint32_t lcid = 0; lcid < MAX_LC; ++lcid) {
if (is_bearer_ul(lcid) and lch[lcid].cfg.group == (int)lcg) {
return true;
}
}
return false;
}
int lch_ue_manager::get_bsr(uint32_t lcg) const
{
return is_bearer_ul(lcid) ? lcg_bsr[lch[lcid].cfg.group] : 0;
return is_lcg_active(lcg) ? lcg_bsr[lcg] : 0;
}
int lch_ue_manager::get_bsr_with_overhead(uint32_t lcid) const
int lch_ue_manager::get_bsr_with_overhead(uint32_t lcg) const
{
int bsr = get_bsr(lcid);
return bsr + (bsr == 0 ? 0 : ((lcid == 0) ? 0 : RLC_MAX_HEADER_SIZE_NO_LI));
return get_ul_mac_sdu_size_with_overhead(get_bsr(lcg));
}
std::string lch_ue_manager::get_bsr_text() const

@ -861,7 +861,7 @@ bool sched_ue::needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_be_se
{
bool ret = false;
if (phy_config_dedicated_enabled && cfg.supported_cc_list[0].aperiodic_cqi_period &&
lch_handler.has_pending_dl_txs() && scell_activation_mask().any()) {
lch_handler.has_pending_dl_txs()) {
uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti);
bool needscqi = interval >= cfg.supported_cc_list[0].aperiodic_cqi_period;
if (needscqi) {
@ -1012,12 +1012,12 @@ uint32_t sched_ue::get_pending_ul_data_total(uint32_t tti, int this_ue_cc_idx)
// Note: If there are no active bearers, scheduling requests are also ignored.
uint32_t pending_data = 0;
uint32_t active_lcgs = 0, pending_lcgs = 0;
for (int i = 0; i < sched_interface::MAX_LC_GROUP; i++) {
if (lch_handler.is_bearer_ul(i)) {
int bsr = lch_handler.get_bsr_with_overhead(i);
pending_data += bsr == 0 ? 0 : 1;
active_lcgs++;
uint32_t pending_lcgs = 0;
for (int lcg = 0; lcg < sched_interface::MAX_LC_GROUP; lcg++) {
uint32_t bsr = lch_handler.get_bsr_with_overhead(lcg);
if (bsr > 0) {
pending_data += bsr;
pending_lcgs++;
}
}
if (pending_data > 0) {
@ -1026,7 +1026,7 @@ uint32_t sched_ue::get_pending_ul_data_total(uint32_t tti, int this_ue_cc_idx)
// may be fully occupied by a BSR, and RRC the message transmission needs to be postponed.
pending_data += (pending_lcgs <= 1) ? sbsr_size : lbsr_size;
} else {
if (is_sr_triggered() and active_lcgs > 0 and this_ue_cc_idx >= 0) {
if (is_sr_triggered() and this_ue_cc_idx >= 0) {
// Check if this_cc_idx is the carrier with highest CQI
uint32_t max_cqi = 0, max_cc_idx = 0;
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {

Loading…
Cancel
Save