rlc_am_lte: fix counting of retx of entire PDUs and PDU segments

this patch fixes a bug discovered in a real network where the DL CQI of a
user degraded repidly in very short time. A relativly big RLC PDU that
was still sent with the good CQI in a big grant now needs to be split
across many tiny segments because the CQI degraded so much.

The retx couting for each transmitted segment caused the retx counter to
reach maxRetx quickly.

With this patch we do not increment the retx counter for each transmitted
PDU or segment of a PDU but instead only increment the counter when
a given SN is added to the retx queue. This can happen either:
a) if the SN is negativly acknowledged and was not already on the retx queue,
b) no new data is available for tx and a SN is selected for retx.

This is in accordance with TS 36.322 which handles retx counting in section
5.2.1 according to the above description.
master
Andre Puschmann 3 years ago
parent cba6df3722
commit 7726acad41

@ -608,6 +608,11 @@ void rlc_am_lte::rlc_am_lte_tx::retransmit_pdu(uint32_t sn)
// select first PDU in tx window for retransmission // select first PDU in tx window for retransmission
rlc_amd_tx_pdu& pdu = tx_window[sn]; rlc_amd_tx_pdu& pdu = tx_window[sn];
// increment retx counter and inform upper layers
pdu.retx_count++;
check_sn_reached_max_retx(sn);
logger.info("%s Schedule SN=%d for reTx", RB_NAME, pdu.rlc_sn); logger.info("%s Schedule SN=%d for reTx", RB_NAME, pdu.rlc_sn);
rlc_amd_retx_t& retx = retx_queue.push(); rlc_amd_retx_t& retx = retx_queue.push();
retx.is_segment = false; retx.is_segment = false;
@ -751,8 +756,6 @@ int rlc_am_lte::rlc_am_lte_tx::build_retx_pdu(uint8_t* payload, uint32_t nof_byt
memcpy(ptr, tx_window[retx.sn].buf->msg, tx_window[retx.sn].buf->N_bytes); memcpy(ptr, tx_window[retx.sn].buf->msg, tx_window[retx.sn].buf->N_bytes);
retx_queue.pop(); retx_queue.pop();
tx_window[retx.sn].retx_count++;
check_sn_reached_max_retx(retx.sn);
logger.info(payload, logger.info(payload,
tx_window[retx.sn].buf->N_bytes, tx_window[retx.sn].buf->N_bytes,
@ -911,9 +914,6 @@ int rlc_am_lte::rlc_am_lte_tx::build_segment(uint8_t* payload, uint32_t nof_byte
retx_queue.front().so_start = retx.so_end; retx_queue.front().so_start = retx.so_end;
} }
tx_window[retx.sn].retx_count++;
check_sn_reached_max_retx(retx.sn);
// Write header and pdu // Write header and pdu
uint8_t* ptr = payload; uint8_t* ptr = payload;
rlc_am_write_data_pdu_header(&new_header, &ptr); rlc_am_write_data_pdu_header(&new_header, &ptr);
@ -1219,7 +1219,13 @@ void rlc_am_lte::rlc_am_lte_tx::handle_control_pdu(uint8_t* payload, uint32_t no
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
if (tx_window.has_sn(i)) { if (tx_window.has_sn(i)) {
auto& pdu = tx_window[i]; auto& pdu = tx_window[i];
// add to retx queue if it's not already there
if (not retx_queue.has_sn(i)) { if (not retx_queue.has_sn(i)) {
// increment Retx counter and inform upper layers if needed
pdu.retx_count++;
check_sn_reached_max_retx(i);
rlc_amd_retx_t& retx = retx_queue.push(); rlc_amd_retx_t& retx = retx_queue.push();
srsran_expect(tx_window[i].rlc_sn == i, "Incorrect RLC SN=%d!=%d being accessed", tx_window[i].rlc_sn, i); srsran_expect(tx_window[i].rlc_sn == i, "Incorrect RLC SN=%d!=%d being accessed", tx_window[i].rlc_sn, i);
retx.sn = i; retx.sn = i;

Loading…
Cancel
Save