Several fixes in UL scheduler

- Limit minimum UL grant size to accommodate both BSR and RLC headers
- Limit UL CQI update to PUSCH SNR reports
- Avoid TPC commands when target PUSCH and PUCCH SNR are not specified
master
Francisco 4 years ago
parent ad03a147d3
commit b94b0e77b3

@ -90,8 +90,10 @@ public:
uint32_t get_required_prb_ul(uint32_t enb_cc_idx, uint32_t req_bytes); uint32_t get_required_prb_ul(uint32_t enb_cc_idx, uint32_t req_bytes);
/// Get total pending bytes to be transmitted in DL.
/// The amount of CEs to transmit depends on whether enb_cc_idx is UE's PCell
uint32_t get_pending_dl_bytes(uint32_t enb_cc_idx);
rbg_interval get_required_dl_rbgs(uint32_t enb_cc_idx); rbg_interval get_required_dl_rbgs(uint32_t enb_cc_idx);
srsran::interval<uint32_t> get_requested_dl_bytes(uint32_t enb_cc_idx);
uint32_t get_pending_dl_rlc_data() const; uint32_t get_pending_dl_rlc_data() const;
uint32_t get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs = -1) const; uint32_t get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs = -1) const;
@ -147,6 +149,8 @@ public:
bool pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const; bool pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const;
private: private:
srsran::interval<uint32_t> get_requested_dl_bytes(uint32_t enb_cc_idx);
bool is_sr_triggered(); bool is_sr_triggered();
tbs_info allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data, tbs_info allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data,

@ -119,14 +119,14 @@ public:
* @remark See TS 36.213 Section 5.1.1 * @remark See TS 36.213 Section 5.1.1
* @return accumulated TPC value {-1, 0, 1, 3} * @return accumulated TPC value {-1, 0, 1, 3}
*/ */
uint8_t encode_pusch_tpc() { return enconde_tpc(PUSCH_CODE); } uint8_t encode_pusch_tpc() { return encode_tpc(PUSCH_CODE); }
/** /**
* Called during DCI format1/2A/A encoding to set PUCCH TPC command * Called during DCI format1/2A/A encoding to set PUCCH TPC command
* @remark See TS 36.213 Section 5.1.2 * @remark See TS 36.213 Section 5.1.2
* @return accumulated TPC value {-1, 0, 1, 3} * @return accumulated TPC value {-1, 0, 1, 3}
*/ */
uint8_t encode_pucch_tpc() { return enconde_tpc(PUCCH_CODE); } uint8_t encode_pucch_tpc() { return encode_tpc(PUCCH_CODE); }
uint32_t max_ul_prbs() const { return max_prbs_cached; } uint32_t max_ul_prbs() const { return max_prbs_cached; }
@ -147,18 +147,14 @@ private:
return 1; return 1;
} }
} }
uint8_t enconde_tpc(uint32_t cc) uint8_t encode_tpc(uint32_t cc)
{ {
float target_snr_dB = cc == PUSCH_CODE ? target_pusch_snr_dB : target_pucch_snr_dB; float target_snr_dB = cc == PUSCH_CODE ? target_pusch_snr_dB : target_pucch_snr_dB;
auto& ch_snr = snr_estim_list[cc]; auto& ch_snr = snr_estim_list[cc];
assert(ch_snr.pending_delta == 0); // ensure called once per {cc,tti} assert(ch_snr.pending_delta == 0); // ensure called once per {cc,tti}
if (target_snr_dB < 0) { if (target_snr_dB < 0) {
// undefined target SINR case. Increase Tx power once per PHR, considering the number of allocable PRBs remains // undefined target sinr case.
// unchanged ch_snr.pending_delta = 0;
if (not ch_snr.phr_flag) {
ch_snr.pending_delta = (max_prbs_cached == nof_prb) ? 1 : (last_phr < 0 ? -1 : 0);
ch_snr.phr_flag = true;
}
} else { } else {
// target SINR is finite and there is power headroom // target SINR is finite and there is power headroom
float diff = target_snr_dB - ch_snr.snr_avg.value(); float diff = target_snr_dB - ch_snr.snr_avg.value();

@ -671,7 +671,7 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t&
continue; continue;
} }
sched_ue* user = ue_it->second.get(); sched_ue* user = ue_it->second.get();
uint32_t data_before = user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop(); uint32_t data_before = user->get_pending_dl_bytes(cc_cfg->enb_cc_idx);
const dl_harq_proc& dl_harq = user->get_dl_harq(data_alloc.pid, cc_cfg->enb_cc_idx); const dl_harq_proc& dl_harq = user->get_dl_harq(data_alloc.pid, cc_cfg->enb_cc_idx);
bool is_newtx = dl_harq.is_empty(); bool is_newtx = dl_harq.is_empty();
@ -687,7 +687,7 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t&
data_alloc.pid, data_alloc.pid,
data_alloc.user_mask, data_alloc.user_mask,
tbs, tbs,
user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop()); user->get_pending_dl_bytes(cc_cfg->enb_cc_idx));
logger.warning("%s", srsran::to_c_str(str_buffer)); logger.warning("%s", srsran::to_c_str(str_buffer));
continue; continue;
} }
@ -707,7 +707,7 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t&
dl_harq.nof_retx(0) + dl_harq.nof_retx(1), dl_harq.nof_retx(0) + dl_harq.nof_retx(1),
tbs, tbs,
data_before, data_before,
user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop(), user->get_pending_dl_bytes(cc_cfg->enb_cc_idx),
get_tti_tx_dl()); get_tti_tx_dl());
logger.info("%s", srsran::to_c_str(str_buffer)); logger.info("%s", srsran::to_c_str(str_buffer));
} }

@ -311,8 +311,10 @@ void sched_ue::set_ul_snr(tti_point tti_rx, uint32_t enb_cc_idx, float snr, uint
{ {
if (cells[enb_cc_idx].cc_state() != cc_st::idle) { if (cells[enb_cc_idx].cc_state() != cc_st::idle) {
cells[enb_cc_idx].tpc_fsm.set_snr(snr, ul_ch_code); cells[enb_cc_idx].tpc_fsm.set_snr(snr, ul_ch_code);
if (ul_ch_code == tpc::PUSCH_CODE) {
cells[enb_cc_idx].ul_cqi = srsran_cqi_from_snr(snr); cells[enb_cc_idx].ul_cqi = srsran_cqi_from_snr(snr);
cells[enb_cc_idx].ul_cqi_tti_rx = tti_rx; cells[enb_cc_idx].ul_cqi_tti_rx = tti_rx;
}
} else { } else {
logger.warning("Received SNR info for invalid cell index %d", enb_cc_idx); logger.warning("Received SNR info for invalid cell index %d", enb_cc_idx);
} }
@ -776,6 +778,11 @@ rbg_interval sched_ue::get_required_dl_rbgs(uint32_t enb_cc_idx)
return {min_pending_rbg, max_pending_rbg}; return {min_pending_rbg, max_pending_rbg};
} }
uint32_t sched_ue::get_pending_dl_bytes(uint32_t enb_cc_idx)
{
return get_requested_dl_bytes(enb_cc_idx).stop();
}
/** /**
* Returns the range (min,max) of possible MAC PDU sizes. * Returns the range (min,max) of possible MAC PDU sizes.
* - the lower boundary value is set based on the following conditions: * - the lower boundary value is set based on the following conditions:

@ -307,6 +307,7 @@ int get_required_prb_dl(const sched_ue_cell& cell,
uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes) uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes)
{ {
const static int MIN_ALLOC_BYTES = 10;
if (req_bytes == 0) { if (req_bytes == 0) {
return 0; return 0;
} }
@ -317,11 +318,20 @@ uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes)
}; };
// find nof prbs that lead to a tbs just above req_bytes // find nof prbs that lead to a tbs just above req_bytes
int target_tbs = static_cast<int>(req_bytes) + 4; int target_tbs = std::max(static_cast<int>(req_bytes) + 4, MIN_ALLOC_BYTES);
uint32_t max_prbs = std::min(cell.tpc_fsm.max_ul_prbs(), cell.cell_cfg->nof_prb()); uint32_t max_prbs = std::min(cell.tpc_fsm.max_ul_prbs(), cell.cell_cfg->nof_prb());
std::tuple<uint32_t, int, uint32_t, int> ret = std::tuple<uint32_t, int, uint32_t, int> ret =
false_position_method(1U, max_prbs, target_tbs, compute_tbs_approx, [](int y) { return y == SRSRAN_ERROR; }); false_position_method(1U, max_prbs, target_tbs, compute_tbs_approx, [](int y) { return y == SRSRAN_ERROR; });
uint32_t req_prbs = std::get<2>(ret); uint32_t req_prbs = std::get<2>(ret);
uint32_t final_tbs = std::get<3>(ret);
while (final_tbs < MIN_ALLOC_BYTES and req_prbs < cell.cell_cfg->nof_prb()) {
// Note: If PHR<0 is limiting the max nof PRBs per UL grant, the UL grant may become too small to fit any
// data other than headers + BSR. Besides, forcing unnecessary segmentation, it may additionally
// forbid the UE from fitting small RRC messages (e.g. RRCReconfComplete) in the UL grants.
// To avoid TBS<10, we force an increase the nof required PRBs.
req_prbs++;
final_tbs = compute_tbs_approx(req_prbs);
}
while (!srsran_dft_precoding_valid_prb(req_prbs) && req_prbs < cell.cell_cfg->nof_prb()) { while (!srsran_dft_precoding_valid_prb(req_prbs) && req_prbs < cell.cell_cfg->nof_prb()) {
req_prbs++; req_prbs++;
} }

@ -109,7 +109,7 @@ int test_undefined_target_snr()
TESTASSERT(sum_pusch == 0); TESTASSERT(sum_pusch == 0);
TESTASSERT(sum_pucch == 0); TESTASSERT(sum_pucch == 0);
// TEST: If the PHR allows full utilization of available PRBs, the TPC slightly increments UL Tx power // TEST: Check that high PHR allows full utilization of available PRBs, TPC remains at zero (no target SINR)
int phr = 30; int phr = 30;
tpcfsm.set_phr(phr); tpcfsm.set_phr(phr);
TESTASSERT(tpcfsm.max_ul_prbs() == 50); TESTASSERT(tpcfsm.max_ul_prbs() == 50);
@ -120,8 +120,7 @@ int test_undefined_target_snr()
sum_pusch += decode_tpc(tpcfsm.encode_pusch_tpc()); sum_pusch += decode_tpc(tpcfsm.encode_pusch_tpc());
sum_pucch += decode_tpc(tpcfsm.encode_pucch_tpc()); sum_pucch += decode_tpc(tpcfsm.encode_pucch_tpc());
} }
TESTASSERT(sum_pusch > 0 and sum_pusch <= 3); TESTASSERT(sum_pusch == 0 and sum_pucch == 0);
TESTASSERT(sum_pucch > 0 and sum_pucch <= 3);
// TEST: PHR is too low to allow all PRBs to be allocated. This event should not affect TPC commands // TEST: PHR is too low to allow all PRBs to be allocated. This event should not affect TPC commands
phr = 5; phr = 5;
@ -144,8 +143,8 @@ int test_undefined_target_snr()
sum_pusch += decode_tpc(tpcfsm.encode_pusch_tpc()); sum_pusch += decode_tpc(tpcfsm.encode_pusch_tpc());
sum_pucch += decode_tpc(tpcfsm.encode_pucch_tpc()); sum_pucch += decode_tpc(tpcfsm.encode_pucch_tpc());
} }
TESTASSERT(sum_pusch <= 0 and sum_pusch >= -1); TESTASSERT(sum_pusch == 0);
TESTASSERT(sum_pucch <= 0 and sum_pucch >= -1); TESTASSERT(sum_pucch == 0);
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }

Loading…
Cancel
Save