Merge branch 'next' into agpl_next

master
Codebot 3 years ago committed by Your Name
commit 81061fd545

@ -79,11 +79,9 @@ void copy_if_present_helper(type_storage<T, MinSize, AlignSize>& lhs,
{
if (lhs_present and rhs_present) {
lhs.get() = rhs.get();
}
if (lhs_present) {
} else if (lhs_present) {
lhs.destroy();
}
if (rhs_present) {
} else if (rhs_present) {
lhs.copy_ctor(rhs);
}
}
@ -96,11 +94,9 @@ void move_if_present_helper(type_storage<T, MinSize, AlignSize>& lhs,
{
if (lhs_present and rhs_present) {
lhs.move_assign(std::move(rhs));
}
if (lhs_present) {
} else if (lhs_present) {
lhs.destroy();
}
if (rhs_present) {
} else if (rhs_present) {
lhs.move_ctor(std::move(rhs));
}
}

@ -116,6 +116,22 @@ private:
srslog::basic_logger& logger;
};
template <typename T, size_t N, typename... Args>
unique_pool_ptr<T> make_pool_obj_with_fallback(circular_stack_pool<N>& pool, size_t key, Args&&... args)
{
void* block = pool.allocate(key, sizeof(T), alignof(T));
if (block == nullptr) {
// allocated with "new" as a fallback
return unique_pool_ptr<T>(new T(std::forward<Args>(args)...), std::default_delete<T>());
}
// allocation using memory pool was successful
new (block) T(std::forward<Args>(args)...);
return unique_pool_ptr<T>(static_cast<T*>(block), [key, &pool](T* ptr) {
ptr->~T();
pool.deallocate(key, ptr);
});
}
} // namespace srsran
#endif // SRSRAN_CIRCULAR_MAP_STACK_POOL_H

@ -314,6 +314,7 @@ struct rlc_amd_retx_t {
bool is_segment;
uint32_t so_start;
uint32_t so_end;
uint32_t current_so;
};
template <std::size_t WINDOW_SIZE>

@ -83,7 +83,7 @@ struct rlc_amd_tx_pdu_nr {
const uint32_t rlc_sn = INVALID_RLC_SN;
const uint32_t pdcp_sn = INVALID_RLC_SN;
rlc_am_nr_pdu_header_t header = {};
unique_byte_buffer_t buf = nullptr;
unique_byte_buffer_t sdu_buf = nullptr;
uint32_t retx_count = 0;
struct pdu_segment {
uint32_t so = 0;
@ -113,12 +113,14 @@ public:
void empty_queue() final;
// Data PDU helpers
int build_new_sdu_segment(unique_byte_buffer_t tx_sdu,
rlc_amd_tx_pdu_nr& tx_pdu,
uint8_t* payload,
uint32_t nof_bytes);
int build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint8_t* payload, uint32_t nof_bytes);
int build_retx_pdu(unique_byte_buffer_t& tx_pdu, uint32_t nof_bytes);
uint32_t build_new_pdu(uint8_t* payload, uint32_t nof_bytes);
uint32_t build_new_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint8_t* payload, uint32_t nof_bytes);
uint32_t build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint8_t* payload, uint32_t nof_bytes);
uint32_t build_retx_pdu(uint8_t* payload, uint32_t nof_bytes);
uint32_t build_retx_pdu_without_segmentation(rlc_amd_retx_t& retx, uint8_t* payload, uint32_t nof_bytes);
uint32_t build_retx_pdu_with_segmentation(rlc_amd_retx_t& retx, uint8_t* payload, uint32_t nof_bytes);
bool is_retx_segmentation_required(const rlc_amd_retx_t& retx, uint32_t nof_bytes);
uint32_t get_retx_expected_hdr_len(const rlc_amd_retx_t& retx);
// Buffer State
bool has_data() final;
@ -134,7 +136,7 @@ public:
void stop() final;
bool inside_tx_window(uint32_t sn);
bool inside_tx_window(uint32_t sn) const;
private:
rlc_am* parent = nullptr;
@ -158,7 +160,7 @@ private:
// Queues and buffers
pdu_retx_queue<RLC_AM_WINDOW_SIZE> retx_queue;
rlc_amd_tx_sdu_nr_t sdu_under_segmentation;
uint32_t sdu_under_segmentation_sn = INVALID_RLC_SN; // SN of the SDU currently being segmented.
// Helper constants
uint32_t min_hdr_size = 2;
@ -170,6 +172,9 @@ public:
void set_tx_state(const rlc_am_nr_tx_state_t& st_) { st = st_; } // This should only be used for testing.
rlc_am_nr_tx_state_t get_tx_state() { return st; } // This should only be used for testing.
uint32_t get_tx_window_size() { return tx_window.size(); } // This should only be used for testing.
// Debug Helper
void debug_state() const;
};
/****************************************************************************
@ -225,7 +230,8 @@ public:
int handle_segment_data_sdu(const rlc_am_nr_pdu_header_t& header, const uint8_t* payload, uint32_t nof_bytes);
bool inside_rx_window(uint32_t sn);
void write_to_upper_layers(uint32_t lcid, unique_byte_buffer_t sdu);
bool have_all_segments_been_received(const std::list<rlc_amd_rx_pdu_nr>& segment_list);
void insert_received_segment(rlc_amd_rx_pdu_nr segment, rlc_amd_rx_sdu_nr_t::segment_list_t& segment_list) const;
bool have_all_segments_been_received(const rlc_amd_rx_sdu_nr_t::segment_list_t& segment_list) const;
// Metrics
uint32_t get_sdu_rx_latency_ms() final;
@ -235,7 +241,7 @@ public:
void timer_expired(uint32_t timeout_id);
// Helpers
void debug_state();
void debug_state() const;
private:
rlc_am* parent = nullptr;

@ -24,6 +24,7 @@
#include "srsran/common/string_helpers.h"
#include "srsran/rlc/rlc_am_base.h"
#include <set>
namespace srsran {
@ -61,11 +62,16 @@ struct rlc_amd_rx_pdu_nr {
explicit rlc_amd_rx_pdu_nr(uint32_t rlc_sn_) : rlc_sn(rlc_sn_) {}
};
struct rlc_amd_rx_pdu_nr_cmp {
bool operator()(const rlc_amd_rx_pdu_nr& a, const rlc_amd_rx_pdu_nr& b) const { return a.header.so < b.header.so; }
};
struct rlc_amd_rx_sdu_nr_t {
uint32_t rlc_sn = 0;
bool fully_received = false;
unique_byte_buffer_t buf;
std::list<rlc_amd_rx_pdu_nr> segments;
using segment_list_t = std::set<rlc_amd_rx_pdu_nr, rlc_amd_rx_pdu_nr_cmp>;
segment_list_t segments;
rlc_amd_rx_sdu_nr_t() = default;
explicit rlc_amd_rx_sdu_nr_t(uint32_t rlc_sn_) : rlc_sn(rlc_sn_) {}

@ -33,6 +33,8 @@
namespace srsran {
const static uint32_t max_tx_queue_size = 256;
const static uint32_t so_end_of_sdu = 0xFFFF;
/****************************************************************************
* RLC AM NR entity
***************************************************************************/
@ -76,6 +78,18 @@ bool rlc_am_nr_tx::has_data()
tx_sdu_queue.get_n_sdus() != 1; // or if there is a SDU queued up for transmission
}
/**
* Builds the RLC PDU.
*
* Called by the MAC, trough one of the PHY worker threads.
*
* \param [payload] is a pointer to the buffer that will hold the PDU.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
* \remark: This will be called multiple times from the MAC,
* while there is something to TX and enough space in the TB.
*/
uint32_t rlc_am_nr_tx::read_pdu(uint8_t* payload, uint32_t nof_bytes)
{
std::lock_guard<std::mutex> lock(mutex);
@ -90,79 +104,90 @@ uint32_t rlc_am_nr_tx::read_pdu(uint8_t* payload, uint32_t nof_bytes)
if (do_status()) {
unique_byte_buffer_t tx_pdu = srsran::make_byte_buffer();
if (tx_pdu == nullptr) {
RlcError("couldn't allocate PDU in %s().", __FUNCTION__);
RlcError("Couldn't allocate PDU in %s().", __FUNCTION__);
return 0;
}
build_status_pdu(tx_pdu.get(), nof_bytes);
memcpy(payload, tx_pdu->msg, tx_pdu->N_bytes);
RlcDebug("status PDU built - %d bytes", tx_pdu->N_bytes);
RlcDebug("Status PDU built - %d bytes", tx_pdu->N_bytes);
return tx_pdu->N_bytes;
}
// Retransmit if required
if (not retx_queue.empty()) {
RlcInfo("re-transmission required. Retransmission queue size: %d", retx_queue.size());
unique_byte_buffer_t tx_pdu = srsran::make_byte_buffer();
if (tx_pdu == nullptr) {
RlcError("couldn't allocate PDU in %s().", __FUNCTION__);
return 0;
}
int retx_err = build_retx_pdu(tx_pdu, nof_bytes);
if (retx_err >= 0 && tx_pdu->N_bytes <= nof_bytes) {
memcpy(payload, tx_pdu->msg, tx_pdu->N_bytes);
return tx_pdu->N_bytes;
}
RlcInfo("Re-transmission required. Retransmission queue size: %d", retx_queue.size());
return build_retx_pdu(payload, nof_bytes);
}
// Send remaining segment, if it exists
if (sdu_under_segmentation.rlc_sn != INVALID_RLC_SN) {
if (not tx_window.has_sn(sdu_under_segmentation.rlc_sn)) {
sdu_under_segmentation.rlc_sn = INVALID_RLC_SN;
if (sdu_under_segmentation_sn != INVALID_RLC_SN) {
if (not tx_window.has_sn(sdu_under_segmentation_sn)) {
sdu_under_segmentation_sn = INVALID_RLC_SN;
RlcError("SDU currently being segmented does not exist in tx_window. Aborting segmentation SN=%d",
sdu_under_segmentation.rlc_sn);
sdu_under_segmentation_sn);
return 0;
}
return build_continuation_sdu_segment(tx_window[sdu_under_segmentation.rlc_sn], payload, nof_bytes);
return build_continuation_sdu_segment(tx_window[sdu_under_segmentation_sn], payload, nof_bytes);
}
// Check whether there is something to TX
if (tx_sdu_queue.is_empty()) {
RlcInfo("no data available to be sent");
RlcInfo("No data available to be sent");
return 0;
}
return build_new_pdu(payload, nof_bytes);
}
/**
* Builds a new RLC PDU.
*
* This will be called after checking whether control, retransmission,
* or segment PDUs needed to be transmitted first.
*
* This will read an SDU from the SDU queue, build a new PDU, and add it to the tx_window.
* SDU segmentation will be done if necessary.
*
* \param [payload] is a pointer to the buffer that will hold the PDU.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
*/
uint32_t rlc_am_nr_tx::build_new_pdu(uint8_t* payload, uint32_t nof_bytes)
{
// Read new SDU from TX queue
unique_byte_buffer_t tx_sdu;
RlcDebug("reading from RLC SDU queue. Queue size %d", tx_sdu_queue.size());
RlcDebug("Reading from RLC SDU queue. Queue size %d", tx_sdu_queue.size());
do {
tx_sdu = tx_sdu_queue.read();
} while (tx_sdu == nullptr && tx_sdu_queue.size() != 0);
if (tx_sdu != nullptr) {
RlcDebug("read RLC SDU - %d bytes", tx_sdu->N_bytes);
RlcDebug("Read RLC SDU - %d bytes", tx_sdu->N_bytes);
} else {
RlcDebug("no SDUs left in the tx queue.");
RlcDebug("No SDUs left in the tx queue.");
return 0;
}
// insert newly assigned SN into window and use reference for in-place operations
// NOTE: from now on, we can't return from this function anymore before increasing tx_next
rlc_amd_tx_pdu_nr& tx_pdu = tx_window.add_pdu(st.tx_next);
tx_pdu.buf = srsran::make_byte_buffer();
if (tx_pdu.buf == nullptr) {
RlcError("couldn't allocate PDU in %s().", __FUNCTION__);
tx_pdu.sdu_buf = srsran::make_byte_buffer();
if (tx_pdu.sdu_buf == nullptr) {
RlcError("Couldn't allocate PDU in %s().", __FUNCTION__);
return 0;
}
// Copy SDU into TX window SDU info
memcpy(tx_pdu.sdu_buf->msg, tx_sdu->msg, tx_sdu->N_bytes);
tx_pdu.sdu_buf->N_bytes = tx_sdu->N_bytes;
// Segment new SDU if necessary
if (tx_sdu->N_bytes + min_hdr_size > nof_bytes) {
RlcInfo("trying to build PDU segment from SDU.");
return build_new_sdu_segment(std::move(tx_sdu), tx_pdu, payload, nof_bytes);
return build_new_sdu_segment(tx_pdu, payload, nof_bytes);
}
memcpy(tx_pdu.buf->msg, tx_sdu->msg, tx_sdu->N_bytes);
tx_pdu.buf->N_bytes = tx_sdu->N_bytes;
// Prepare header
rlc_am_nr_pdu_header_t hdr = {};
hdr.dc = RLC_DC_FIELD_DATA_PDU;
@ -188,18 +213,25 @@ uint32_t rlc_am_nr_tx::read_pdu(uint8_t* payload, uint32_t nof_bytes)
return tx_sdu->N_bytes;
}
int rlc_am_nr_tx::build_new_sdu_segment(unique_byte_buffer_t tx_sdu,
rlc_amd_tx_pdu_nr& tx_pdu,
uint8_t* payload,
uint32_t nof_bytes)
/**
* Builds a new RLC PDU segment, from a RLC SDU.
*
* \param [tx_pdu] is the tx_pdu info contained in the tx_window.
* \param [payload] is a pointer to the MAC buffer that will hold the PDU segment.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
* \remark: This functions assumes that the SDU has already been copied to tx_pdu.sdu_buf.
*/
uint32_t rlc_am_nr_tx::build_new_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint8_t* payload, uint32_t nof_bytes)
{
RlcInfo("creating new SDU segment. Tx SDU (%d B), nof_bytes=%d B ", tx_sdu->N_bytes, nof_bytes);
RlcInfo("creating new SDU segment. Tx SDU (%d B), nof_bytes=%d B ", tx_pdu.sdu_buf->N_bytes, nof_bytes);
// Sanity check: can this SDU be sent this in a single PDU?
if ((tx_sdu->N_bytes + min_hdr_size) < nof_bytes) {
if ((tx_pdu.sdu_buf->N_bytes + min_hdr_size) < nof_bytes) {
RlcError("calling build_new_sdu_segment(), but there are enough bytes to tx in a single PDU. Tx SDU (%d B), "
"nof_bytes=%d B ",
tx_sdu->N_bytes,
tx_pdu.sdu_buf->N_bytes,
nof_bytes);
return 0;
}
@ -232,11 +264,10 @@ int rlc_am_nr_tx::build_new_sdu_segment(unique_byte_buffer_t tx_sdu,
// Copy PDU to payload
uint32_t segment_payload_len = nof_bytes - hdr_len;
srsran_assert((hdr_len + segment_payload_len) <= nof_bytes, "Error calculating hdr_len and segment_payload_len");
memcpy(&payload[hdr_len], tx_pdu.buf->msg, segment_payload_len);
memcpy(&payload[hdr_len], tx_pdu.sdu_buf->msg, segment_payload_len);
// Save SDU currently being segmented
sdu_under_segmentation.rlc_sn = st.tx_next;
sdu_under_segmentation.buf = std::move(tx_sdu);
sdu_under_segmentation_sn = st.tx_next;
// Store Segment Info
rlc_amd_tx_pdu_nr::pdu_segment segment_info;
@ -245,22 +276,31 @@ int rlc_am_nr_tx::build_new_sdu_segment(unique_byte_buffer_t tx_sdu,
return hdr_len + segment_payload_len;
}
int rlc_am_nr_tx::build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint8_t* payload, uint32_t nof_bytes)
/**
* Build PDU segment for an RLC SDU that is already on-going segmentation.
*
* \param [tx_pdu] is the tx_pdu info contained in the tx_window.
* \param [payload] is a pointer to the MAC buffer that will hold the PDU segment.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
* \remark: This functions assumes that the SDU has already been copied to tx_pdu.sdu_buf.
*/
uint32_t rlc_am_nr_tx::build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint8_t* payload, uint32_t nof_bytes)
{
RlcInfo("continuing SDU segment. SN=%d, Tx SDU (%d B), nof_bytes=%d B ",
sdu_under_segmentation.rlc_sn,
sdu_under_segmentation.buf->N_bytes,
sdu_under_segmentation_sn,
tx_pdu.sdu_buf->N_bytes,
nof_bytes);
// Sanity check: is there an initial SDU segment?
if (tx_pdu.segment_list.empty()) {
RlcError("build_continuation_sdu_segment was called, but there was no initial segment. SN=%d, Tx SDU (%d B), "
"nof_bytes=%d B ",
sdu_under_segmentation.rlc_sn,
sdu_under_segmentation.buf->N_bytes,
sdu_under_segmentation_sn,
tx_pdu.sdu_buf->N_bytes,
nof_bytes);
sdu_under_segmentation.rlc_sn = INVALID_RLC_SN;
sdu_under_segmentation.buf = nullptr;
sdu_under_segmentation_sn = INVALID_RLC_SN;
return 0;
}
@ -276,14 +316,15 @@ int rlc_am_nr_tx::build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint
uint32_t last_byte = seg.so + seg.payload_len;
RlcDebug("continuing SDU segment. SN=%d, last byte transmitted %d", tx_pdu.rlc_sn, last_byte);
// Sanity check: last byte must be smaller than SDU
if (sdu_under_segmentation.buf->N_bytes < last_byte) {
RlcError("last byte transmitted larger than SDU len. SDU len=%d B, last_byte=%d B", tx_pdu.buf->N_bytes, last_byte);
// Sanity check: last byte must be smaller than SDU size
if (last_byte > tx_pdu.sdu_buf->N_bytes) {
RlcError(
"last byte transmitted larger than SDU len. SDU len=%d B, last_byte=%d B", tx_pdu.sdu_buf->N_bytes, last_byte);
return 0;
}
uint32_t segment_payload_full_len = sdu_under_segmentation.buf->N_bytes - last_byte + max_hdr_size; // SO is included
uint32_t segment_payload_len = sdu_under_segmentation.buf->N_bytes - last_byte;
uint32_t segment_payload_full_len = tx_pdu.sdu_buf->N_bytes - last_byte + max_hdr_size; // SO is included
uint32_t segment_payload_len = tx_pdu.sdu_buf->N_bytes - last_byte;
rlc_nr_si_field_t si = {};
if (segment_payload_full_len > nof_bytes) {
@ -322,7 +363,7 @@ int rlc_am_nr_tx::build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint
// Copy PDU to payload
srsran_assert((hdr_len + segment_payload_len) <= nof_bytes, "Error calculating hdr_len and segment_payload_len");
memcpy(&payload[hdr_len], &tx_pdu.buf->msg[last_byte], segment_payload_len);
memcpy(&payload[hdr_len], &tx_pdu.sdu_buf->msg[last_byte], segment_payload_len);
// Store PDU segment info into tx_window
rlc_amd_tx_pdu_nr::pdu_segment segment_info = {};
@ -336,22 +377,37 @@ int rlc_am_nr_tx::build_continuation_sdu_segment(rlc_amd_tx_pdu_nr& tx_pdu, uint
} else {
RlcInfo("grant is large enough for full SDU."
"Removing current SDU info");
sdu_under_segmentation.rlc_sn = INVALID_RLC_SN;
sdu_under_segmentation.buf = nullptr;
sdu_under_segmentation_sn = INVALID_RLC_SN;
// SDU is fully TX'ed. Increment TX_NEXt
st.tx_next++;
}
return hdr_len + segment_payload_len;
}
int rlc_am_nr_tx::build_retx_pdu(unique_byte_buffer_t& tx_pdu, uint32_t nof_bytes)
/**
* Builds a retx RLC PDU.
*
* This will use the retx_queue to get information about the RLC PDU
* being retx'ed. The retx may have been previously transmitted as
* a full SDU or an SDU segment.
*
* \param [tx_pdu] is the tx_pdu info contained in the tx_window.
* \param [payload] is a pointer to the MAC buffer that will hold the PDU segment.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
* \remark: This functions assumes that the SDU has already been copied to tx_pdu.sdu_buf.
*/
uint32_t rlc_am_nr_tx::build_retx_pdu(uint8_t* payload, uint32_t nof_bytes)
{
// Check there is at least 1 element before calling front()
if (retx_queue.empty()) {
RlcError("in build_retx_pdu(): retx_queue is empty");
return SRSRAN_ERROR;
return 0;
}
rlc_amd_retx_t retx = retx_queue.front();
rlc_amd_retx_t& retx = retx_queue.front();
// Sanity check - drop any retx SNs not present in tx_window
while (not tx_window.has_sn(retx.sn)) {
@ -361,38 +417,252 @@ int rlc_am_nr_tx::build_retx_pdu(unique_byte_buffer_t& tx_pdu, uint32_t nof_byte
retx = retx_queue.front();
} else {
RlcWarning("empty retx queue, cannot provide retx PDU");
return SRSRAN_ERROR;
return 0;
}
}
// Update & write header
rlc_am_nr_pdu_header_t new_header = tx_window[retx.sn].header;
new_header.p = 0;
uint32_t hdr_len = rlc_am_nr_write_data_pdu_header(new_header, tx_pdu.get());
// Check if we exceed allocated number of bytes
if (hdr_len + tx_window[retx.sn].buf->N_bytes > nof_bytes) {
RlcWarning("segmentation not supported yet. Cannot provide retx PDU");
return SRSRAN_ERROR;
RlcDebug("RETX - SN=%d, is_segment=%s, current_so=%d, so_start=%d, so_end=%d",
retx.sn,
retx.is_segment ? "true" : "false",
retx.current_so,
retx.so_start,
retx.so_end);
// Is segmentation/re-segmentation required?
bool segmentation_required = is_retx_segmentation_required(retx, nof_bytes);
if (segmentation_required) {
return build_retx_pdu_with_segmentation(retx, payload, nof_bytes);
}
// TODO Consider re-segmentation
return build_retx_pdu_without_segmentation(retx, payload, nof_bytes);
}
memcpy(&tx_pdu->msg[hdr_len], tx_window[retx.sn].buf->msg, tx_window[retx.sn].buf->N_bytes);
tx_pdu->N_bytes += tx_window[retx.sn].buf->N_bytes;
/**
* Builds a retx RLC PDU, without requiring (re-)segmentation.
*
* The RETX PDU may be transporting a full SDU or an SDU segment.
*
* \param [retx] is the retx info contained in the retx_queue.
* \param [payload] is a pointer to the MAC buffer that will hold the PDU segment.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
* \remark this function will not update the SI. This means that if the retx is of the last
* SDU segment, the SI should already be of the `last_segment` type.
*/
uint32_t rlc_am_nr_tx::build_retx_pdu_without_segmentation(rlc_amd_retx_t& retx, uint8_t* payload, uint32_t nof_bytes)
{
srsran_assert(tx_window.has_sn(retx.sn), "Called %s without checking retx SN", __FUNCTION__);
srsran_assert(not is_retx_segmentation_required(retx, nof_bytes),
"Called %s without checking if segmentation was required",
__FUNCTION__);
// Get tx_pdu info from tx_window
rlc_amd_tx_pdu_nr& tx_pdu = tx_window[retx.sn];
// Get expected header and payload len
uint32_t expected_hdr_len = get_retx_expected_hdr_len(retx);
uint32_t retx_payload_len = retx.is_segment ? (retx.so_end - retx.current_so) : tx_window[retx.sn].sdu_buf->N_bytes;
srsran_assert(nof_bytes >= (expected_hdr_len + retx_payload_len),
"Called %s but segmentation is required. nof_bytes=%d, expeced_hdr_len=%d, retx_payload_len=%d",
__FUNCTION__,
nof_bytes,
expected_hdr_len,
retx_payload_len);
// Log RETX info
RlcDebug("SDU%scan be fully re-transmitted. SN=%d, nof_bytes=%d, expected_hdr_len=%d, "
"current_so=%d, so_start=%d, so_end=%d",
retx.is_segment ? " segment " : " ",
retx.sn,
nof_bytes,
expected_hdr_len,
retx.current_so,
retx.so_start,
retx.so_end);
retx_queue.pop();
// Update & write header
uint32_t current_so = 0;
rlc_nr_si_field_t si = rlc_nr_si_field_t::full_sdu;
if (retx.is_segment) {
if (retx.current_so == 0) {
si = rlc_nr_si_field_t::first_segment;
} else if ((retx.current_so + retx_payload_len) < tx_pdu.sdu_buf->N_bytes) {
si = rlc_nr_si_field_t::neither_first_nor_last_segment;
} else {
si = rlc_nr_si_field_t::last_segment;
}
current_so = retx.current_so;
}
rlc_am_nr_pdu_header_t new_header = tx_pdu.header;
new_header.p = 0;
new_header.si = si;
new_header.so = current_so;
uint32_t hdr_len = rlc_am_nr_write_data_pdu_header(new_header, payload);
// Write payload into PDU
uint32_t pdu_bytes = 0;
uint32_t retx_pdu_payload_size = 0;
if (not retx.is_segment) {
// RETX full SDU
retx_pdu_payload_size = tx_window[retx.sn].sdu_buf->N_bytes;
pdu_bytes = hdr_len + tx_window[retx.sn].sdu_buf->N_bytes;
} else {
// RETX SDU segment
uint32_t retx_pdu_payload_size = (retx.so_end - retx.current_so);
pdu_bytes = hdr_len + retx_pdu_payload_size;
}
srsran_assert(pdu_bytes <= nof_bytes, "Error calculating hdr_len and pdu_payload_len");
memcpy(&payload[hdr_len], &tx_pdu.sdu_buf->msg[retx.current_so], retx_pdu_payload_size);
RlcHexInfo(tx_window[retx.sn].buf->msg,
tx_window[retx.sn].buf->N_bytes,
// Update RETX queue and log
retx_queue.pop();
RlcHexInfo(tx_window[retx.sn].sdu_buf->msg,
tx_window[retx.sn].sdu_buf->N_bytes,
"Original SDU SN=%d (%d B) (attempt %d/%d)",
retx.sn,
tx_window[retx.sn].buf->N_bytes,
tx_window[retx.sn].sdu_buf->N_bytes,
tx_window[retx.sn].retx_count + 1,
cfg.max_retx_thresh);
RlcHexInfo(tx_pdu->msg, tx_pdu->N_bytes, "retx PDU SN=%d (%d B)", retx.sn, tx_pdu->N_bytes);
RlcHexInfo(payload, nof_bytes, "retx PDU SN=%d (%d B)", retx.sn, nof_bytes);
log_rlc_am_nr_pdu_header_to_string(logger.debug, new_header);
// debug_state();
return SRSRAN_SUCCESS;
debug_state();
return pdu_bytes;
}
/**
* Builds a retx RLC PDU that requires (re-)segmentation.
*
* \param [tx_pdu] is the tx_pdu info contained in the tx_window.
* \param [payload] is a pointer to the MAC buffer that will hold the PDU segment.
* \param [nof_bytes] is the number of bytes the RLC is allowed to fill.
*
* \returns the number of bytes written to the payload buffer.
* \remark: This functions assumes that the SDU has already been copied to tx_pdu.sdu_buf.
*/
uint32_t rlc_am_nr_tx::build_retx_pdu_with_segmentation(rlc_amd_retx_t& retx, uint8_t* payload, uint32_t nof_bytes)
{
// Get tx_pdu info from tx_window
srsran_assert(tx_window.has_sn(retx.sn), "Called %s without checking retx SN", __FUNCTION__);
srsran_assert(is_retx_segmentation_required(retx, nof_bytes),
"Called %s without checking if segmentation was not required",
__FUNCTION__);
rlc_amd_tx_pdu_nr& tx_pdu = tx_window[retx.sn];
// Is this an SDU segment or a full SDU?
if (not retx.is_segment) {
RlcDebug("Creating SDU segment from full SDU. SN=%d Tx SDU (%d B), nof_bytes=%d B ",
retx.sn,
tx_pdu.sdu_buf->N_bytes,
nof_bytes);
} else {
RlcDebug("Creating SDU segment from SDU segment. SN=%d, current_so=%d, so_start=%d, so_end=%d",
retx.sn,
retx.current_so,
retx.so_start,
retx.so_end);
}
uint32_t expected_hdr_len = min_hdr_size;
rlc_nr_si_field_t si = rlc_nr_si_field_t::first_segment;
if (retx.current_so != 0) {
si = rlc_nr_si_field_t::neither_first_nor_last_segment;
expected_hdr_len = max_hdr_size;
}
// Sanity check: are there enough bytes for header plus data?
if (nof_bytes <= expected_hdr_len) {
RlcError("called %s, but there are not enough bytes for data plus header. SN=%d", __FUNCTION__, retx.sn);
return 0;
}
// Sanity check: could this have been transmitted without segmentation?
if (nof_bytes > (tx_pdu.sdu_buf->N_bytes + expected_hdr_len)) {
RlcError("called %s, but there are enough bytes to avoid segmentation. SN=%d", __FUNCTION__, retx.sn);
return 0;
}
// Can the RETX PDU be transmitted in a single PDU?
uint32_t retx_pdu_payload_size = nof_bytes - expected_hdr_len;
// Write header
rlc_am_nr_pdu_header_t hdr = tx_pdu.header;
hdr.so = retx.current_so;
hdr.si = si;
uint32_t hdr_len = rlc_am_nr_write_data_pdu_header(hdr, payload);
if (hdr_len >= nof_bytes || hdr_len != expected_hdr_len) {
log_rlc_am_nr_pdu_header_to_string(logger.error, hdr);
RlcError("Error writing AMD PDU header. nof_bytes=%d, hdr_len=%d", nof_bytes, hdr_len);
return 0;
}
log_rlc_am_nr_pdu_header_to_string(logger.info, hdr);
// Copy SDU segment into payload
srsran_assert((hdr_len + retx_pdu_payload_size) <= nof_bytes, "Error calculating hdr_len and segment_payload_len");
memcpy(&payload[hdr_len], tx_pdu.sdu_buf->msg, retx_pdu_payload_size);
// Update retx queue
retx.is_segment = true;
retx.current_so = retx.current_so + retx_pdu_payload_size;
RlcDebug("Updated RETX info. is_segment=%s, current_so=%d, so_start=%d, so_end=%d",
retx.is_segment ? "true" : "false",
retx.current_so,
retx.so_start,
retx.so_end);
if (retx.current_so >= tx_pdu.sdu_buf->N_bytes) {
RlcError("Current SO larger or equal to SDU size when creating SDU segment. SN=%d, current SO=%d, SO_start=%d, "
"SO_end=%d",
retx.sn,
retx.current_so,
retx.so_start,
retx.so_end);
return 0;
}
if (retx.current_so >= retx.so_end) {
RlcError("Current SO larger than SO end. SN=%d, current SO=%d, SO_start=%d, SO_end=%s",
retx.sn,
retx.current_so,
retx.so_start,
retx.so_end);
return 0;
}
// Update SDU segment info
// TODO
return hdr_len + retx_pdu_payload_size;
}
bool rlc_am_nr_tx::is_retx_segmentation_required(const rlc_amd_retx_t& retx, uint32_t nof_bytes)
{
bool segmentation_required = false;
if (retx.is_segment) {
uint32_t expected_hdr_size = retx.current_so == 0 ? min_hdr_size : max_hdr_size;
if (nof_bytes < ((retx.so_end - retx.current_so) + expected_hdr_size)) {
RlcInfo("Re-segmentation required for RETX. SN=%d", retx.sn);
segmentation_required = true;
}
} else {
if (nof_bytes < (tx_window[retx.sn].sdu_buf->N_bytes + min_hdr_size)) {
RlcInfo("Segmentation required for RETX. SN=%d", retx.sn);
segmentation_required = true;
}
}
return segmentation_required;
}
uint32_t rlc_am_nr_tx::get_retx_expected_hdr_len(const rlc_amd_retx_t& retx)
{
uint32_t expected_hdr_len = min_hdr_size;
if (retx.is_segment && retx.current_so != 0) {
expected_hdr_len = max_hdr_size;
}
return expected_hdr_len;
}
uint32_t rlc_am_nr_tx::build_status_pdu(byte_buffer_t* payload, uint32_t nof_bytes)
@ -435,7 +705,7 @@ void rlc_am_nr_tx::handle_control_pdu(uint8_t* payload, uint32_t nof_bytes)
// Process ACKs
uint32_t stop_sn = status.N_nack == 0
? status.ack_sn
: status.nacks[0].nack_sn - 1; // Stop processing ACKs at the first NACK, if it exists.
: status.nacks[0].nack_sn; // Stop processing ACKs at the first NACK, if it exists.
if (stop_sn > st.tx_next) {
RlcError("Received ACK or NACK larger than TX_NEXT. Ignoring status report");
return;
@ -454,25 +724,44 @@ void rlc_am_nr_tx::handle_control_pdu(uint8_t* payload, uint32_t nof_bytes)
// Process N_acks
for (uint32_t nack_idx = 0; nack_idx < status.N_nack; nack_idx++) {
if (st.tx_next_ack <= status.nacks[nack_idx].nack_sn && status.nacks[nack_idx].nack_sn <= st.tx_next) {
uint32_t nack_sn = status.nacks[nack_idx].nack_sn;
auto nack = status.nacks[nack_idx];
uint32_t nack_sn = nack.nack_sn;
if (tx_window.has_sn(nack_sn)) {
auto& pdu = tx_window[nack_sn];
if (nack.has_so) {
// NACK'ing missing bytes in SDU segment.
// Retransmit all SDU segments within those missing bytes.
if (pdu.segment_list.empty()) {
RlcError("Received NACK with SO, but there is no segment information");
}
for (std::list<rlc_amd_tx_pdu_nr::pdu_segment>::iterator segm = pdu.segment_list.begin();
segm != pdu.segment_list.end();
segm++) {
if (segm->so >= nack.so_start && segm->so < nack.so_end) {
rlc_amd_retx_t& retx = retx_queue.push();
retx.sn = nack_sn;
retx.is_segment = true;
retx.so_start = segm->so;
retx.current_so = segm->so;
retx.so_end = segm->so + segm->payload_len;
}
}
} else {
// NACK'ing full SDU.
// add to retx queue if it's not already there
if (not retx_queue.has_sn(nack_sn)) {
// increment Retx counter and inform upper layers if needed
pdu.retx_count++;
// check_sn_reached_max_retx(nack_sn);
// check_sn_reached_max_retx(nack_sn);
rlc_amd_retx_t& retx = retx_queue.push();
srsran_expect(tx_window[nack_sn].rlc_sn == nack_sn,
"Incorrect RLC SN=%d!=%d being accessed",
tx_window[nack_sn].rlc_sn,
nack_sn);
retx.sn = nack_sn;
retx.is_segment = false;
retx.so_start = 0;
retx.so_end = pdu.buf->N_bytes;
retx.current_so = 0;
retx.so_end = pdu.sdu_buf->N_bytes;
}
}
}
}
@ -575,6 +864,7 @@ bool rlc_am_nr_tx::sdu_queue_is_full()
void rlc_am_nr_tx::empty_queue() {}
void rlc_am_nr_tx::stop() {}
/*
* Window helpers
*/
@ -583,12 +873,24 @@ uint32_t rlc_am_nr_tx::tx_mod_base_nr(uint32_t sn) const
return (sn - st.tx_next_ack) % mod_nr;
}
bool rlc_am_nr_tx::inside_tx_window(uint32_t sn)
bool rlc_am_nr_tx::inside_tx_window(uint32_t sn) const
{
// TX_Next_Ack <= SN < TX_Next_Ack + AM_Window_Size
return tx_mod_base_nr(sn) < RLC_AM_NR_WINDOW_SIZE;
}
/*
* Debug Helpers
*/
void rlc_am_nr_tx::debug_state() const
{
RlcDebug("TX entity state: Tx_Next %d, Rx_Next_Ack %d, POLL_SN %d, PDU_WITHOUT_POLL %d, BYTE_WITHOUT_POLL %d",
st.tx_next,
st.tx_next_ack,
st.poll_sn,
st.pdu_without_poll,
st.byte_without_poll);
}
/****************************************************************************
* Rx subclass implementation
***************************************************************************/
@ -823,8 +1125,8 @@ int rlc_am_nr_rx::handle_segment_data_sdu(const rlc_am_nr_pdu_header_t& header,
memcpy(pdu_segment.buf->msg, payload + hdr_len, nof_bytes - hdr_len); // Don't copy header
pdu_segment.buf->N_bytes = nof_bytes - hdr_len;
// Store SDU segment. TODO sort by SO and check for duplicate bytes.
rx_sdu.segments.push_back(std::move(pdu_segment));
// Store SDU segment. Sort by SO and check for duplicate bytes.
insert_received_segment(std::move(pdu_segment), rx_sdu.segments);
// Check weather all segments have been received
rx_sdu.fully_received = have_all_segments_been_received(rx_sdu.segments);
@ -861,13 +1163,42 @@ uint32_t rlc_am_nr_rx::get_status_pdu(rlc_am_nr_status_pdu_t* status, uint32_t m
uint32_t i = status->ack_sn;
while (rx_mod_base_nr(i) <= rx_mod_base_nr(st.rx_highest_status)) {
if (rx_window.has_sn(i) || i == st.rx_highest_status) {
// only update ACK_SN if this SN has been received, or if we reached the maximum possible SN
if ((rx_window.has_sn(i) && rx_window[i].fully_received) || i == st.rx_highest_status) {
// only update ACK_SN if this SN has been fully received, or if we reached the maximum possible SN
status->ack_sn = i;
} else {
if (not rx_window.has_sn(i)) {
// No segment received, NACK the whole SDU
status->nacks[status->N_nack].nack_sn = i;
status->N_nack++;
} else if (not rx_window[i].fully_received) {
// Some segments were received, but not all.
// NACK non consecutive missing bytes
uint32_t last_so = 0;
bool last_segment_rx = false;
for (auto segm = rx_window[i].segments.begin(); segm != rx_window[i].segments.end(); segm++) {
if (segm->header.so != last_so) {
// Some bytes were not received
status->nacks[status->N_nack].nack_sn = i;
status->nacks[status->N_nack].has_so = true;
status->nacks[status->N_nack].so_start = last_so;
status->nacks[status->N_nack].so_end = segm->header.so;
status->N_nack++;
}
if (segm->header.si == rlc_nr_si_field_t::last_segment) {
last_segment_rx = true;
}
last_so = segm->header.so + segm->buf->N_bytes;
}
if (not last_segment_rx) {
status->nacks[status->N_nack].nack_sn = i;
status->nacks[status->N_nack].has_so = true;
status->nacks[status->N_nack].so_start = last_so;
status->nacks[status->N_nack].so_end = so_end_of_sdu;
status->N_nack++;
}
}
}
// make sure we don't exceed grant size (FIXME)
rlc_am_nr_write_status_pdu(*status, rlc_am_nr_sn_size_t::size12bits, &tmp_buf);
@ -920,7 +1251,7 @@ void rlc_am_nr_rx::timer_expired(uint32_t timeout_id)
*/
for (uint32_t tmp_sn = st.rx_next_status_trigger; tmp_sn < st.rx_next_status_trigger + RLC_AM_WINDOW_SIZE;
tmp_sn++) {
if (not rx_window.has_sn(tmp_sn) || not rx_window[tmp_sn].fully_received) {
if (not rx_window.has_sn(tmp_sn)) {
st.rx_highest_status = tmp_sn;
break;
}
@ -983,14 +1314,21 @@ uint32_t rlc_am_nr_rx::get_rx_buffered_bytes()
return 0;
}
bool rlc_am_nr_rx::have_all_segments_been_received(const std::list<rlc_amd_rx_pdu_nr>& segment_list)
void rlc_am_nr_rx::insert_received_segment(rlc_amd_rx_pdu_nr segment,
std::set<rlc_amd_rx_pdu_nr, rlc_amd_rx_pdu_nr_cmp>& segment_list) const
{
segment_list.insert(std::move(segment));
}
bool rlc_am_nr_rx::have_all_segments_been_received(
const std::set<rlc_amd_rx_pdu_nr, rlc_amd_rx_pdu_nr_cmp>& segment_list) const
{
if (segment_list.empty()) {
return false;
}
// Check if we have received the last segment
if ((--segment_list.end())->header.si != rlc_nr_si_field_t::last_segment) {
if (segment_list.rbegin()->header.si != rlc_nr_si_field_t::last_segment) {
return false;
}
@ -1008,7 +1346,7 @@ bool rlc_am_nr_rx::have_all_segments_been_received(const std::list<rlc_amd_rx_pd
/*
* Debug Helpers
*/
void rlc_am_nr_rx::debug_state()
void rlc_am_nr_rx::debug_state() const
{
RlcDebug("RX entity state: Rx_Next %d, Rx_Next_Status_Trigger %d, Rx_Highest_Status %d, Rx_Next_Highest",
st.rx_next,

@ -194,16 +194,36 @@ uint32_t rlc_am_nr_read_status_pdu(const uint8_t* payload,
// reset number of acks
status->N_nack = 0;
if (e1) {
while (e1 != 0) {
// E1 flag set, read a NACK_SN
rlc_status_nack_t nack = {};
nack.nack_sn = (*ptr & 0xff) << 4;
ptr++;
e1 = *ptr & 0x08;
uint8_t e2 = *ptr & 0x04;
// uint8_t len2 = (*ptr & 0xF0) >> 4;
nack.nack_sn |= (*ptr & 0xF0) >> 4;
status->nacks[status->N_nack] = nack;
ptr++;
if (e2 != 0) {
status->nacks[status->N_nack].has_so = true;
status->nacks[status->N_nack].so_start = (*ptr) << 8;
ptr++;
status->nacks[status->N_nack].so_start |= (*ptr);
ptr++;
status->nacks[status->N_nack].so_end = (*ptr) << 8;
ptr++;
status->nacks[status->N_nack].so_end |= (*ptr);
ptr++;
}
status->N_nack++;
if ((ptr - payload) > nof_bytes) {
fprintf(stderr, "Malformed PDU, trying to read more bytes than it is available\n");
return 0;
}
}
}
@ -233,17 +253,41 @@ int32_t rlc_am_nr_write_status_pdu(const rlc_am_nr_status_pdu_t& status_pdu,
ptr++;
// write E1 flag in octet 3
*ptr = (status_pdu.N_nack > 0) ? 0x80 : 0x00;
if (status_pdu.N_nack > 0) {
*ptr = 0x80;
} else {
*ptr = 0x00;
}
ptr++;
if (status_pdu.N_nack > 0) {
for (uint32_t i = 0; i < status_pdu.N_nack; i++) {
// write first 8 bit of NACK_SN
*ptr = (status_pdu.nacks[0].nack_sn >> 4) & 0xff;
*ptr = (status_pdu.nacks[i].nack_sn >> 4) & 0xff;
ptr++;
// write remaining 4 bits of NACK_SN
*ptr = (status_pdu.nacks[0].nack_sn & 0x0f) << 4;
*ptr = (status_pdu.nacks[i].nack_sn & 0x0f) << 4;
// Set E1 if necessary
if (i < (uint32_t)(status_pdu.N_nack - 1)) {
*ptr |= 0x08;
}
if (status_pdu.nacks[i].has_so) {
// Set E2
*ptr |= 0x04;
ptr++;
(*ptr) = status_pdu.nacks[i].so_start >> 8;
ptr++;
(*ptr) = status_pdu.nacks[i].so_start;
ptr++;
(*ptr) = status_pdu.nacks[i].so_end >> 8;
ptr++;
(*ptr) = status_pdu.nacks[i].so_end;
}
ptr++;
}
}
} else {
// 18bit SN

@ -38,7 +38,26 @@ void test_optional_int()
TESTASSERT(opt == opt2);
}
struct C {
std::unique_ptr<int> val;
C(int val = 0) : val(std::make_unique<int>(val)) {}
};
void test_optional_move_only()
{
optional<C> a, b;
a.emplace(C{});
TESTASSERT(a.has_value());
TESTASSERT_EQ(0, *a.value().val);
TESTASSERT(not b.has_value());
b.emplace(C{5});
a = std::move(b);
TESTASSERT_EQ(5, *a.value().val);
}
int main()
{
test_optional_int();
test_optional_move_only();
}

@ -37,8 +37,8 @@ target_link_libraries(rlc_am_nr_test srsran_rlc srsran_phy srsran_common)
add_nr_test(rlc_am_nr_test rlc_am_nr_test)
add_executable(rlc_am_nr_pdu_test rlc_am_nr_pdu_test.cc)
target_link_libraries(rlc_am_nr_pdu_test srsran_rlc srsran_phy)
add_nr_test(rlc_am_nr_pdu_test rlc_am_nr_pdu_test)
target_link_libraries(rlc_am_nr_pdu_test srsran_rlc srsran_phy srsran_mac srsran_common )
add_nr_test(rlc_am_nr_pdu_test rlc_am_nr_pdu_test )
add_executable(rlc_stress_test rlc_stress_test.cc)
target_link_libraries(rlc_stress_test srsran_rlc srsran_mac srsran_phy srsran_common ${Boost_LIBRARIES} ${ATOMIC_LIBS})
@ -48,6 +48,7 @@ add_lte_test(rlc_tm_stress_test rlc_stress_test --mode=TM --loglevel 1 --random_
add_nr_test(rlc_um6_nr_stress_test rlc_stress_test --rat NR --mode=UM6 --loglevel 1)
add_nr_test(rlc_um12_nr_stress_test rlc_stress_test --rat NR --mode=UM12 --loglevel 1)
#add_nr_test(rlc_am12_nr_stress_test rlc_stress_test --rat NR --mode=AM12 --loglevel 1)
add_executable(rlc_um_data_test rlc_um_data_test.cc)
target_link_libraries(rlc_um_data_test srsran_rlc srsran_phy srsran_common)

@ -19,48 +19,38 @@
*
*/
#include "srsran/common/test_common.h"
#include "srsran/config.h"
#include "srsran/rlc/rlc.h"
#include "srsran/rlc/rlc_am_nr_packing.h"
#include <array>
#include <getopt.h>
#include <iostream>
#include <memory>
#include <vector>
#define TESTASSERT(cond) \
{ \
if (!(cond)) { \
std::cout << "[" << __FUNCTION__ << "][Line " << __LINE__ << "]: FAIL at " << (#cond) << std::endl; \
return -1; \
} \
}
#define PCAP 0
#define PCAP_CRNTI (0x1001)
#define PCAP_TTI (666)
using namespace srsran;
#if PCAP
#include "srsran/common/mac_nr_pcap.h"
#include "srsran/mac/mac_nr_pdu.h"
static std::unique_ptr<srsran::mac_nr_pcap> pcap_handle = nullptr;
#endif
#include "srsran/common/mac_pcap.h"
#include "srsran/mac/mac_rar_pdu_nr.h"
#include "srsran/mac/mac_sch_pdu_nr.h"
static std::unique_ptr<srsran::mac_pcap> pcap_handle = nullptr;
int write_pdu_to_pcap(const uint32_t lcid, const uint8_t* payload, const uint32_t len)
{
#if PCAP
if (pcap_handle) {
byte_buffer_t tx_buffer;
srsran::mac_nr_sch_pdu tx_pdu;
srsran::mac_sch_pdu_nr tx_pdu;
tx_pdu.init_tx(&tx_buffer, len + 10);
tx_pdu.add_sdu(lcid, payload, len);
tx_pdu.pack();
pcap_handle->write_dl_crnti(tx_buffer.msg, tx_buffer.N_bytes, PCAP_CRNTI, true, PCAP_TTI);
pcap_handle->write_dl_crnti_nr(tx_buffer.msg, tx_buffer.N_bytes, PCAP_CRNTI, true, PCAP_TTI);
return SRSRAN_SUCCESS;
}
#endif
return SRSRAN_ERROR;
}
@ -87,6 +77,7 @@ void corrupt_pdu_header(srsran::byte_buffer_t& pdu, const uint32_t header_len, c
// RLC AM PDU 12bit with complete SDU
int rlc_am_nr_pdu_test1()
{
test_delimit_logger delimiter("PDU test 1");
const int header_len = 2, payload_len = 4;
std::array<uint8_t, header_len + payload_len> tv = {0x80, 0x00, 0x11, 0x22, 0x33, 0x44};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -110,6 +101,7 @@ int rlc_am_nr_pdu_test1()
// RLC AM PDU 12bit first segment of SDU with P flag and SN 511
int rlc_am_nr_pdu_test2()
{
test_delimit_logger delimiter("PDU test 2");
const int header_len = 2, payload_len = 4;
std::array<uint8_t, header_len + payload_len> tv = {0xd1, 0xff, 0x11, 0x22, 0x33, 0x44};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -138,6 +130,7 @@ int rlc_am_nr_pdu_test2()
// RLC AM PDU 12bit last segment of SDU without P flag and SN 0x0404 and SO 0x0404 (1028)
int rlc_am_nr_pdu_test3()
{
test_delimit_logger delimiter("PDU test 3");
const int header_len = 4, payload_len = 4;
std::array<uint8_t, header_len + payload_len> tv = {0xa4, 0x04, 0x04, 0x04, 0x11, 0x22, 0x33, 0x44};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -166,6 +159,7 @@ int rlc_am_nr_pdu_test3()
// RLC AM PDU 18bit full SDU with P flag and SN 0x100000001000000010 (131586)
int rlc_am_nr_pdu_test4()
{
test_delimit_logger delimiter("PDU test 4");
const int header_len = 3, payload_len = 4;
std::array<uint8_t, header_len + payload_len> tv = {0xc2, 0x02, 0x02, 0x11, 0x22, 0x33, 0x44};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -194,6 +188,7 @@ int rlc_am_nr_pdu_test4()
// RLC AM PDU 18bit middle part of SDU (SO 514) without P flag and SN 131327
int rlc_am_nr_pdu_test5()
{
test_delimit_logger delimiter("PDU test 5");
const int header_len = 5, payload_len = 4;
std::array<uint8_t, header_len + payload_len> tv = {0xb2, 0x00, 0xff, 0x02, 0x02, 0x11, 0x22, 0x33, 0x44};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -222,6 +217,7 @@ int rlc_am_nr_pdu_test5()
// Malformed RLC AM PDU 18bit with reserved bits set
int rlc_am_nr_pdu_test6()
{
test_delimit_logger delimiter("PDU test 6");
const int header_len = 5, payload_len = 4;
std::array<uint8_t, header_len + payload_len> tv = {0xb7, 0x00, 0xff, 0x02, 0x02, 0x11, 0x22, 0x33, 0x44};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -238,6 +234,7 @@ int rlc_am_nr_pdu_test6()
// Status PDU for 12bit SN with ACK_SN=2065 and no further NACK_SN (E1 bit not set)
int rlc_am_nr_control_pdu_test1()
{
test_delimit_logger delimiter("Control PDU test 1");
const int len = 3;
std::array<uint8_t, len> tv = {0x08, 0x11, 0x00};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -267,6 +264,7 @@ int rlc_am_nr_control_pdu_test1()
// Status PDU for 12bit SN with ACK_SN=2065 and NACK_SN=273 (E1 bit set)
int rlc_am_nr_control_pdu_test2()
{
test_delimit_logger delimiter("Control PDU test 2");
const int len = 5;
std::array<uint8_t, len> tv = {0x08, 0x11, 0x80, 0x11, 0x10};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
@ -294,12 +292,122 @@ int rlc_am_nr_control_pdu_test2()
return SRSRAN_SUCCESS;
}
// Status PDU for 12bit SN with ACK_SN=2065, NACK_SN=273, SO_START=2, SO_END=5, NACK_SN=275, SO_START=5, SO_END=0xFFFF
// E1 and E2 bit set on first NACK, only E2 on second.
int rlc_am_nr_control_pdu_test3()
{
test_delimit_logger delimiter("Control PDU test 3");
const int len = 15;
std::array<uint8_t, len> tv = {
0x08, 0x11, 0x80, 0x11, 0x1c, 0x00, 0x02, 0x00, 0x05, 0x11, 0x34, 0x00, 0x05, 0xFF, 0xFF};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
TESTASSERT(rlc_am_is_control_pdu(pdu.msg) == true);
// unpack PDU
rlc_am_nr_status_pdu_t status_pdu = {};
TESTASSERT(rlc_am_nr_read_status_pdu(&pdu, srsran::rlc_am_nr_sn_size_t::size12bits, &status_pdu) == SRSRAN_SUCCESS);
TESTASSERT(status_pdu.ack_sn == 2065);
TESTASSERT(status_pdu.N_nack == 2);
TESTASSERT(status_pdu.nacks[0].nack_sn == 273);
TESTASSERT(status_pdu.nacks[0].so_start == 2);
TESTASSERT(status_pdu.nacks[0].so_end == 5);
TESTASSERT(status_pdu.nacks[1].nack_sn == 275);
TESTASSERT(status_pdu.nacks[1].so_start == 5);
TESTASSERT(status_pdu.nacks[1].so_end == 0xFFFF);
// reset status PDU
pdu.clear();
// pack again
TESTASSERT(rlc_am_nr_write_status_pdu(status_pdu, srsran::rlc_am_nr_sn_size_t::size12bits, &pdu) == SRSRAN_SUCCESS);
TESTASSERT(pdu.N_bytes == tv.size());
write_pdu_to_pcap(4, pdu.msg, pdu.N_bytes);
TESTASSERT(memcmp(pdu.msg, tv.data(), pdu.N_bytes) == 0);
return SRSRAN_SUCCESS;
}
// Status PDU for 12bit SN with ACK_SN=2065, NACK_SN=273, SO_START=2, SO_END=5, NACK_SN=275
// E1 and E2 bit set on first NACK, neither E1 or E2 on the second.
int rlc_am_nr_control_pdu_test4()
{
test_delimit_logger delimiter("Control PDU test 4");
const int len = 11;
std::array<uint8_t, len> tv = {0x08, 0x11, 0x80, 0x11, 0x1c, 0x00, 0x02, 0x00, 0x05, 0x11, 0x30};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
TESTASSERT(rlc_am_is_control_pdu(pdu.msg) == true);
// unpack PDU
rlc_am_nr_status_pdu_t status_pdu = {};
TESTASSERT(rlc_am_nr_read_status_pdu(&pdu, srsran::rlc_am_nr_sn_size_t::size12bits, &status_pdu) == SRSRAN_SUCCESS);
TESTASSERT(status_pdu.ack_sn == 2065);
TESTASSERT(status_pdu.N_nack == 2);
TESTASSERT(status_pdu.nacks[0].nack_sn == 273);
TESTASSERT(status_pdu.nacks[0].has_so == true);
TESTASSERT(status_pdu.nacks[0].so_start == 2);
TESTASSERT(status_pdu.nacks[0].so_end == 5);
TESTASSERT(status_pdu.nacks[1].nack_sn == 275);
TESTASSERT(status_pdu.nacks[1].has_so == false);
// reset status PDU
pdu.clear();
// pack again
TESTASSERT(rlc_am_nr_write_status_pdu(status_pdu, srsran::rlc_am_nr_sn_size_t::size12bits, &pdu) == SRSRAN_SUCCESS);
TESTASSERT(pdu.N_bytes == tv.size());
write_pdu_to_pcap(4, pdu.msg, pdu.N_bytes);
TESTASSERT(memcmp(pdu.msg, tv.data(), pdu.N_bytes) == 0);
return SRSRAN_SUCCESS;
}
// Malformed Status PDU, with E1 still set at the end of the PDU
// 12bit SN with ACK_SN=2065, NACK_SN=273, SO_START=2, SO_END=5, NACK_SN=275, SO_START=5, SO_END=0xFFFF
// E1 and E2 bit set on first NACK, only E2 on second.
int rlc_am_nr_control_pdu_test5()
{
test_delimit_logger delimiter("Control PDU test 5");
const int len = 15;
std::array<uint8_t, len> tv = {
0x08, 0x11, 0x80, 0x11, 0x1c, 0x00, 0x02, 0x00, 0x05, 0x11, 0x3c, 0x00, 0x05, 0xFF, 0xFF};
srsran::byte_buffer_t pdu = make_pdu_and_log(tv);
TESTASSERT(rlc_am_is_control_pdu(pdu.msg) == true);
// unpack PDU
rlc_am_nr_status_pdu_t status_pdu = {};
TESTASSERT(rlc_am_nr_read_status_pdu(&pdu, srsran::rlc_am_nr_sn_size_t::size12bits, &status_pdu) == 0);
return SRSRAN_SUCCESS;
}
int main(int argc, char** argv)
{
#if PCAP
pcap_handle = std::unique_ptr<srsran::mac_nr_pcap>(new srsran::mac_nr_pcap());
static const struct option long_options[] = {{"pcap", no_argument, nullptr, 'p'}, {nullptr, 0, nullptr, 0}};
// Parse arguments
while (true) {
int option_index = 0;
int c = getopt_long(argc, argv, "p", long_options, &option_index);
if (c == -1) {
break;
}
switch (c) {
case 'p':
printf("Setting up PCAP\n");
pcap_handle = std::unique_ptr<srsran::mac_pcap>(new srsran::mac_pcap());
pcap_handle->open("rlc_am_nr_pdu_test.pcap");
#endif
break;
default:
fprintf(stderr, "error parsing arguments\n");
return SRSRAN_ERROR;
}
}
srslog::init();
@ -343,5 +451,20 @@ int main(int argc, char** argv)
return SRSRAN_ERROR;
}
if (rlc_am_nr_control_pdu_test3()) {
fprintf(stderr, "rlc_am_nr_control_pdu_test3() failed.\n");
return SRSRAN_ERROR;
}
if (rlc_am_nr_control_pdu_test4()) {
fprintf(stderr, "rlc_am_nr_control_pdu_test4() failed.\n");
return SRSRAN_ERROR;
}
if (rlc_am_nr_control_pdu_test5()) {
fprintf(stderr, "rlc_am_nr_control_pdu_test5() failed.\n");
return SRSRAN_ERROR;
}
return SRSRAN_SUCCESS;
}

@ -47,7 +47,7 @@ int basic_test_tx(rlc_am* rlc, byte_buffer_t pdu_bufs[NBUFS])
rlc->write_sdu(std::move(sdu_bufs[i]));
}
TESTASSERT(15 == rlc->get_buffer_state()); // 2 Bytes * NBUFFS (header size) + NBUFFS (data) = 15
TESTASSERT_EQ(15, rlc->get_buffer_state()); // 2 Bytes * NBUFFS (header size) + NBUFFS (data) = 15
// Read 5 PDUs from RLC1 (1 byte each)
for (int i = 0; i < NBUFS; i++) {
@ -56,14 +56,13 @@ int basic_test_tx(rlc_am* rlc, byte_buffer_t pdu_bufs[NBUFS])
TESTASSERT_EQ(3, len);
}
TESTASSERT(0 == rlc->get_buffer_state());
TESTASSERT_EQ(0, rlc->get_buffer_state());
return SRSRAN_SUCCESS;
}
/*
* Test the limits of the TX/RX window checkers
*
* This will test
*/
int window_checker_test()
{
@ -123,6 +122,88 @@ int window_checker_test()
return SRSRAN_SUCCESS;
}
/*
* Test is retx_segmentation required
*
*/
int retx_segmentation_required_checker_test()
{
rlc_am_tester tester;
timer_handler timers(8);
auto& test_logger = srslog::fetch_basic_logger("TESTER ");
test_delimit_logger delimiter("retx segmentation required checkers");
rlc_am rlc1(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_1"), 1, &tester, &tester, &timers);
rlc_am_nr_tx* tx = dynamic_cast<rlc_am_nr_tx*>(rlc1.get_tx());
rlc_am_nr_rx* rx = dynamic_cast<rlc_am_nr_rx*>(rlc1.get_rx());
if (not rlc1.configure(rlc_config_t::default_rlc_am_nr_config())) {
return SRSRAN_ERROR;
}
unique_byte_buffer_t sdu_bufs[NBUFS];
unique_byte_buffer_t pdu_bufs[NBUFS];
for (int i = 0; i < NBUFS; i++) {
sdu_bufs[i] = srsran::make_byte_buffer();
pdu_bufs[i] = srsran::make_byte_buffer();
sdu_bufs[i]->msg[0] = i; // Write the index into the buffer
sdu_bufs[i]->N_bytes = 5; // Give each buffer a size of 1 byte
sdu_bufs[i]->md.pdcp_sn = i; // PDCP SN for notifications
rlc1.write_sdu(std::move(sdu_bufs[i]));
rlc1.read_pdu(pdu_bufs[i]->msg, 8);
}
// Test full SDU retx
{
uint32_t nof_bytes = 8;
rlc_amd_retx_t retx = {};
retx.sn = 0;
retx.is_segment = false;
tx->is_retx_segmentation_required(retx, nof_bytes);
TESTASSERT_EQ(false, tx->is_retx_segmentation_required(retx, nof_bytes));
}
// Test SDU retx segmentation required
{
uint32_t nof_bytes = 4;
rlc_amd_retx_t retx;
retx.sn = 0;
retx.is_segment = false;
tx->is_retx_segmentation_required(retx, nof_bytes);
TESTASSERT_EQ(true, tx->is_retx_segmentation_required(retx, nof_bytes));
}
// Test full SDU segment retx
{
uint32_t nof_bytes = 40;
rlc_amd_retx_t retx = {};
retx.sn = 0;
retx.is_segment = true;
retx.so_start = 4;
retx.so_end = 6;
tx->is_retx_segmentation_required(retx, nof_bytes);
TESTASSERT_EQ(false, tx->is_retx_segmentation_required(retx, nof_bytes));
}
// Test SDU segment retx segmentation required
{
uint32_t nof_bytes = 4;
rlc_amd_retx_t retx = {};
retx.sn = 0;
retx.is_segment = true;
retx.so_start = 4;
retx.so_end = 6;
tx->is_retx_segmentation_required(retx, nof_bytes);
TESTASSERT_EQ(true, tx->is_retx_segmentation_required(retx, nof_bytes));
}
return SRSRAN_SUCCESS;
}
/*
* Test the transmission and acknowledgement of 5 SDUs.
*
@ -148,7 +229,7 @@ int basic_test()
rlc_am_nr_rx* rx2 = dynamic_cast<rlc_am_nr_rx*>(rlc2.get_rx());
// before configuring entity
TESTASSERT(0 == rlc1.get_buffer_state());
TESTASSERT_EQ(0, rlc1.get_buffer_state());
if (not rlc1.configure(rlc_config_t::default_rlc_am_nr_config())) {
return -1;
@ -165,18 +246,18 @@ int basic_test()
rlc2.write_pdu(pdu_bufs[i].msg, pdu_bufs[i].N_bytes);
}
TESTASSERT(3 == rlc2.get_buffer_state());
TESTASSERT_EQ(3, rlc2.get_buffer_state());
// Read status PDU from RLC2
byte_buffer_t status_buf;
int len = rlc2.read_pdu(status_buf.msg, 3);
status_buf.N_bytes = len;
TESTASSERT(0 == rlc2.get_buffer_state());
TESTASSERT_EQ(0, rlc2.get_buffer_state());
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT(status_check.ack_sn == 5); // 5 is the last SN that was not received.
TESTASSERT_EQ(5, status_check.ack_sn); // 5 is the last SN that was not received.
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
@ -255,7 +336,7 @@ int lost_pdu_test()
}
// Only after t-reassembly has expired, will the status report include NACKs.
TESTASSERT(3 == rlc2.get_buffer_state());
TESTASSERT_EQ(3, rlc2.get_buffer_state());
{
// Read status PDU from RLC2
byte_buffer_t status_buf;
@ -267,7 +348,7 @@ int lost_pdu_test()
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT(status_check.ack_sn == 3); // 3 is the next expected SN (i.e. the lost packet.)
TESTASSERT_EQ(3, status_check.ack_sn); // 3 is the next expected SN (i.e. the lost packet.)
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
@ -279,27 +360,27 @@ int lost_pdu_test()
}
// t-reassembly has expired. There should be a NACK in the status report.
TESTASSERT(5 == rlc2.get_buffer_state());
TESTASSERT_EQ(5, rlc2.get_buffer_state());
{
// Read status PDU from RLC2
byte_buffer_t status_buf;
int len = rlc2.read_pdu(status_buf.msg, 5);
status_buf.N_bytes = len;
TESTASSERT(0 == rlc2.get_buffer_state());
TESTASSERT_EQ(0, rlc2.get_buffer_state());
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT(status_check.ack_sn == 5); // 5 is the next expected SN.
TESTASSERT(status_check.N_nack == 1); // We lost one PDU.
TESTASSERT(status_check.nacks[0].nack_sn == 3); // Lost PDU SN=3.
TESTASSERT_EQ(5, status_check.ack_sn); // 5 is the next expected SN.
TESTASSERT_EQ(1, status_check.N_nack); // We lost one PDU.
TESTASSERT_EQ(3, status_check.nacks[0].nack_sn); // Lost PDU SN=3.
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
// Check there is an Retx of SN=3
TESTASSERT(3 == rlc1.get_buffer_state());
TESTASSERT_EQ(3, rlc1.get_buffer_state());
}
{
@ -307,11 +388,11 @@ int lost_pdu_test()
byte_buffer_t retx_buf;
int len = rlc1.read_pdu(retx_buf.msg, 3);
retx_buf.N_bytes = len;
TESTASSERT(3 == len);
TESTASSERT_EQ(3, len);
rlc2.write_pdu(retx_buf.msg, retx_buf.N_bytes);
TESTASSERT(0 == rlc2.get_buffer_state());
TESTASSERT_EQ(0, rlc2.get_buffer_state());
}
// Check statistics
@ -359,8 +440,13 @@ int basic_segmentation_test()
rlc_am rlc1(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_1"), 1, &tester, &tester, &timers);
rlc_am rlc2(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_2"), 1, &tester, &tester, &timers);
rlc_am_nr_tx* tx1 = dynamic_cast<rlc_am_nr_tx*>(rlc1.get_tx());
rlc_am_nr_rx* rx1 = dynamic_cast<rlc_am_nr_rx*>(rlc1.get_rx());
rlc_am_nr_tx* tx2 = dynamic_cast<rlc_am_nr_tx*>(rlc2.get_tx());
rlc_am_nr_rx* rx2 = dynamic_cast<rlc_am_nr_rx*>(rlc2.get_rx());
// before configuring entity
TESTASSERT(0 == rlc1.get_buffer_state());
TESTASSERT_EQ(0, rlc1.get_buffer_state());
if (not rlc1.configure(rlc_config_t::default_rlc_am_nr_config())) {
return -1;
@ -416,10 +502,389 @@ int basic_segmentation_test()
TESTASSERT_EQ(13, metrics2.num_rx_pdu_bytes); // 1 PDU (No SO) + 2 PDUs (with SO) = 3 + 2*5
TESTASSERT_EQ(0, metrics2.num_lost_sdus); // No lost SDUs
// Check state
rlc_am_nr_tx_state_t state1_tx = tx1->get_tx_state();
TESTASSERT_EQ(1, state1_tx.tx_next);
return SRSRAN_SUCCESS;
}
int main(int argc, char** argv)
int segment_retx_test()
{
rlc_am_tester tester;
timer_handler timers(8);
byte_buffer_t pdu_bufs[NBUFS];
auto& test_logger = srslog::fetch_basic_logger("TESTER ");
rlc_am rlc1(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_1"), 1, &tester, &tester, &timers);
rlc_am rlc2(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_2"), 1, &tester, &tester, &timers);
test_delimit_logger delimiter("segment retx PDU");
rlc_am_nr_tx* tx1 = dynamic_cast<rlc_am_nr_tx*>(rlc1.get_tx());
rlc_am_nr_rx* rx1 = dynamic_cast<rlc_am_nr_rx*>(rlc1.get_rx());
rlc_am_nr_tx* tx2 = dynamic_cast<rlc_am_nr_tx*>(rlc2.get_tx());
rlc_am_nr_rx* rx2 = dynamic_cast<rlc_am_nr_rx*>(rlc2.get_rx());
// before configuring entity
TESTASSERT_EQ(0, rlc1.get_buffer_state());
if (not rlc1.configure(rlc_config_t::default_rlc_am_nr_config())) {
return -1;
}
if (not rlc2.configure(rlc_config_t::default_rlc_am_nr_config())) {
return -1;
}
// Push 5 SDUs into RLC1
unique_byte_buffer_t sdu_bufs[NBUFS];
for (int i = 0; i < NBUFS; i++) {
sdu_bufs[i] = srsran::make_byte_buffer();
sdu_bufs[i]->msg[0] = i; // Write the index into the buffer
sdu_bufs[i]->N_bytes = 3; // Give each buffer a size of 3 bytes
sdu_bufs[i]->md.pdcp_sn = i; // PDCP SN for notifications
rlc1.write_sdu(std::move(sdu_bufs[i]));
}
TESTASSERT_EQ(25, rlc1.get_buffer_state()); // 2 Bytes * NBUFFS (header size) + NBUFFS * 3 (data) = 25
// Read 5 PDUs from RLC1 (1 byte each)
for (int i = 0; i < NBUFS; i++) {
uint32_t len = rlc1.read_pdu(pdu_bufs[i].msg, 5); // 2 bytes for header + 3 byte payload
pdu_bufs[i].N_bytes = len;
TESTASSERT_EQ(5, len);
}
TESTASSERT_EQ(0, rlc1.get_buffer_state());
// Write 5 PDUs into RLC2
for (int i = 0; i < NBUFS; i++) {
if (i != 3) {
rlc2.write_pdu(pdu_bufs[i].msg, pdu_bufs[i].N_bytes); // Don't write RLC_SN=3.
}
}
// Only after t-reassembly has expired, will the status report include NACKs.
TESTASSERT_EQ(3, rlc2.get_buffer_state());
{
// Read status PDU from RLC2
byte_buffer_t status_buf;
int len = rlc2.read_pdu(status_buf.msg, 5);
status_buf.N_bytes = len;
TESTASSERT_EQ(0, rlc2.get_buffer_state());
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT_EQ(3, status_check.ack_sn); // 3 is the next expected SN (i.e. the lost packet.)
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
}
// Step timers until reassambly timeout expires
for (int cnt = 0; cnt < 35; cnt++) {
timers.step_all();
}
// t-reassembly has expired. There should be a NACK in the status report.
TESTASSERT_EQ(5, rlc2.get_buffer_state());
{
// Read status PDU from RLC2
byte_buffer_t status_buf;
int len = rlc2.read_pdu(status_buf.msg, 5);
status_buf.N_bytes = len;
TESTASSERT_EQ(0, rlc2.get_buffer_state());
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT_EQ(5, status_check.ack_sn); // 5 is the next expected SN.
TESTASSERT_EQ(1, status_check.N_nack); // We lost one PDU.
TESTASSERT_EQ(3, status_check.nacks[0].nack_sn); // Lost PDU SN=3.
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
// Check there is an Retx of SN=3
TESTASSERT_EQ(5, rlc1.get_buffer_state());
}
{
// Re-transmit PDU in 3 segments
for (int i = 0; i < 3; i++) {
byte_buffer_t retx_buf;
uint32_t len = 0;
if (i == 0) {
len = rlc1.read_pdu(retx_buf.msg, 3);
TESTASSERT_EQ(3, len);
} else {
len = rlc1.read_pdu(retx_buf.msg, 5);
TESTASSERT_EQ(5, len);
}
retx_buf.N_bytes = len;
rlc_am_nr_pdu_header_t header_check = {};
uint32_t hdr_len = rlc_am_nr_read_data_pdu_header(&retx_buf, rlc_am_nr_sn_size_t::size12bits, &header_check);
// Double check header.
TESTASSERT_EQ(3, header_check.sn); // Double check RETX SN
if (i == 0) {
TESTASSERT_EQ(rlc_nr_si_field_t::first_segment, header_check.si);
} else if (i == 1) {
TESTASSERT_EQ(rlc_nr_si_field_t::neither_first_nor_last_segment, header_check.si);
} else {
TESTASSERT_EQ(rlc_nr_si_field_t::last_segment, header_check.si);
}
rlc2.write_pdu(retx_buf.msg, retx_buf.N_bytes);
}
TESTASSERT(0 == rlc1.get_buffer_state());
}
// Check statistics
rlc_bearer_metrics_t metrics1 = rlc1.get_metrics();
rlc_bearer_metrics_t metrics2 = rlc2.get_metrics();
// SDU metrics
TESTASSERT_EQ(5, metrics1.num_tx_sdus);
TESTASSERT_EQ(0, metrics1.num_rx_sdus);
TESTASSERT_EQ(15, metrics1.num_tx_sdu_bytes);
TESTASSERT_EQ(0, metrics1.num_rx_sdu_bytes);
TESTASSERT_EQ(0, metrics1.num_lost_sdus);
// PDU metrics
TESTASSERT_EQ(5 + 3, metrics1.num_tx_pdus); // 3 re-transmissions
TESTASSERT_EQ(2, metrics1.num_rx_pdus); // Two status PDU
TESTASSERT_EQ(38, metrics1.num_tx_pdu_bytes); // 2 Bytes * NBUFFS (header size) + NBUFFS * 3 (data) +
// 3 (1 retx no SO) + 2 * 5 (2 retx with SO) = 38
TESTASSERT_EQ(3 + 5, metrics1.num_rx_pdu_bytes); // Two status PDU (one with a NACK)
TESTASSERT_EQ(0, metrics1.num_lost_sdus); // No lost SDUs
// PDU metrics
TESTASSERT_EQ(0, metrics2.num_tx_sdus);
TESTASSERT_EQ(5, metrics2.num_rx_sdus);
TESTASSERT_EQ(0, metrics2.num_tx_sdu_bytes);
TESTASSERT_EQ(15, metrics2.num_rx_sdu_bytes); // 5 SDUs, 3 bytes each
TESTASSERT_EQ(0, metrics2.num_lost_sdus);
// SDU metrics
TESTASSERT_EQ(2, metrics2.num_tx_pdus); // Two status PDUs
TESTASSERT_EQ(7, metrics2.num_rx_pdus); // 7 PDUs (8 tx'ed, but one was lost)
TESTASSERT_EQ(5 + 3, metrics2.num_tx_pdu_bytes); // Two status PDU (one with a NACK)
TESTASSERT_EQ(33, metrics2.num_rx_pdu_bytes); // 2 Bytes * (NBUFFS-1) (header size) + (NBUFFS-1) * 3 (data)
// 3 (1 retx no SO) + 2 * 5 (2 retx with SO) = 33
TESTASSERT_EQ(0, metrics2.num_lost_sdus); // No lost SDUs
// Check state
rlc_am_nr_rx_state_t state2_rx = rx2->get_rx_state();
TESTASSERT_EQ(5, state2_rx.rx_next);
return SRSRAN_SUCCESS;
}
int retx_segment_test()
{
rlc_am_tester tester;
timer_handler timers(8);
auto& test_logger = srslog::fetch_basic_logger("TESTER ");
rlc_am rlc1(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_1"), 1, &tester, &tester, &timers);
rlc_am rlc2(srsran_rat_t::nr, srslog::fetch_basic_logger("RLC_AM_2"), 1, &tester, &tester, &timers);
test_delimit_logger delimiter("retx segment PDU");
rlc_am_nr_tx* tx1 = dynamic_cast<rlc_am_nr_tx*>(rlc1.get_tx());
rlc_am_nr_rx* rx1 = dynamic_cast<rlc_am_nr_rx*>(rlc1.get_rx());
rlc_am_nr_tx* tx2 = dynamic_cast<rlc_am_nr_tx*>(rlc2.get_tx());
rlc_am_nr_rx* rx2 = dynamic_cast<rlc_am_nr_rx*>(rlc2.get_rx());
// before configuring entity
TESTASSERT(0 == rlc1.get_buffer_state());
if (not rlc1.configure(rlc_config_t::default_rlc_am_nr_config())) {
return -1;
}
if (not rlc2.configure(rlc_config_t::default_rlc_am_nr_config())) {
return -1;
}
int n_sdu_bufs = 5;
int n_pdu_bufs = 15;
// Push 5 SDUs into RLC1
std::vector<unique_byte_buffer_t> sdu_bufs(n_sdu_bufs);
for (int i = 0; i < n_sdu_bufs; i++) {
sdu_bufs[i] = srsran::make_byte_buffer();
sdu_bufs[i]->msg[0] = i; // Write the index into the buffer
sdu_bufs[i]->N_bytes = 3; // Give each buffer a size of 3 bytes
sdu_bufs[i]->md.pdcp_sn = i; // PDCP SN for notifications
rlc1.write_sdu(std::move(sdu_bufs[i]));
}
TESTASSERT(25 == rlc1.get_buffer_state()); // 2 Bytes * NBUFFS (header size) + NBUFFS * 3 (data) = 25
// Read 15 PDUs from RLC1
std::vector<unique_byte_buffer_t> pdu_bufs(n_pdu_bufs);
for (int i = 0; i < n_pdu_bufs; i++) {
pdu_bufs[i] = srsran::make_byte_buffer();
if (i == 0 || i == 3 || i == 6 || i == 9 || i == 12) {
// First segment, no SO
uint32_t len = rlc1.read_pdu(pdu_bufs[i]->msg, 3); // 2 bytes for header + 1 byte payload
pdu_bufs[i]->N_bytes = len;
TESTASSERT_EQ(3, len);
} else {
// Middle or last segment, SO present
uint32_t len = rlc1.read_pdu(pdu_bufs[i]->msg, 5); // 4 bytes for header + 1 byte payload
pdu_bufs[i]->N_bytes = len;
TESTASSERT_EQ(5, len);
}
}
TESTASSERT_EQ(0, rlc1.get_buffer_state());
// Write 15 - 3 PDUs into RLC2
for (int i = 0; i < n_pdu_bufs; i++) {
if (i != 3 && i != 7 && i != 11) {
rlc2.write_pdu(pdu_bufs[i]->msg, pdu_bufs[i]->N_bytes); // Lose first segment of RLC_SN=1.
}
}
// Only after t-reassembly has expired, will the status report include NACKs.
TESTASSERT_EQ(3, rlc2.get_buffer_state());
{
// Read status PDU from RLC2
byte_buffer_t status_buf;
int len = rlc2.read_pdu(status_buf.msg, 5);
status_buf.N_bytes = len;
TESTASSERT_EQ(0, rlc2.get_buffer_state());
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT_EQ(1, status_check.ack_sn); // 1 is the next expected SN (i.e. the first lost packet.)
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
}
// Step timers until reassambly timeout expires
for (int cnt = 0; cnt < 35; cnt++) {
timers.step_all();
}
// t-reassembly has expired. There should be a NACK in the status report.
// There should be 3 NACKs with SO_start and SO_end
TESTASSERT_EQ(21, rlc2.get_buffer_state()); // 3 bytes for fixed header (ACK+E1) + 3 * 6 for NACK with SO = 21.
{
// Read status PDU from RLC2
byte_buffer_t status_buf;
int len = rlc2.read_pdu(status_buf.msg, 21);
status_buf.N_bytes = len;
TESTASSERT_EQ(0, rlc2.get_buffer_state());
// Assert status is correct
rlc_am_nr_status_pdu_t status_check = {};
rlc_am_nr_read_status_pdu(&status_buf, rlc_am_nr_sn_size_t::size12bits, &status_check);
TESTASSERT_EQ(5, status_check.ack_sn); // 5 is the next expected SN.
TESTASSERT_EQ(3, status_check.N_nack); // We lost one PDU.
TESTASSERT_EQ(1, status_check.nacks[0].nack_sn); // Lost SDU on SN=1.
TESTASSERT_EQ(true, status_check.nacks[0].has_so); // Lost SDU on SN=1.
TESTASSERT_EQ(0, status_check.nacks[0].so_start); // Lost SDU on SN=1.
TESTASSERT_EQ(1, status_check.nacks[0].so_end); // Lost SDU on SN=1.
TESTASSERT_EQ(2, status_check.nacks[1].nack_sn); // Lost SDU on SN=1.
TESTASSERT_EQ(true, status_check.nacks[1].has_so); // Lost SDU on SN=1.
TESTASSERT_EQ(1, status_check.nacks[1].so_start); // Lost SDU on SN=1.
TESTASSERT_EQ(2, status_check.nacks[1].so_end); // Lost SDU on SN=1.
TESTASSERT_EQ(3, status_check.nacks[2].nack_sn); // Lost SDU on SN=1.
TESTASSERT_EQ(true, status_check.nacks[2].has_so); // Lost SDU on SN=1.
TESTASSERT_EQ(2, status_check.nacks[2].so_start); // Lost SDU on SN=1.
TESTASSERT_EQ(0xFFFF, status_check.nacks[2].so_end); // Lost SDU on SN=1.
// Write status PDU to RLC1
rlc1.write_pdu(status_buf.msg, status_buf.N_bytes);
// Check there is an Retx of SN=3
TESTASSERT_EQ(5, rlc1.get_buffer_state());
}
{
// Re-transmit the 3 lost segments
for (int i = 0; i < 3; i++) {
byte_buffer_t retx_buf;
uint32_t len = 0;
if (i == 0) {
len = rlc1.read_pdu(retx_buf.msg, 3);
TESTASSERT_EQ(3, len);
} else {
len = rlc1.read_pdu(retx_buf.msg, 5);
TESTASSERT_EQ(5, len);
}
retx_buf.N_bytes = len;
rlc_am_nr_pdu_header_t header_check = {};
uint32_t hdr_len = rlc_am_nr_read_data_pdu_header(&retx_buf, rlc_am_nr_sn_size_t::size12bits, &header_check);
// Double check header.
if (i == 0) {
TESTASSERT_EQ(1, header_check.sn); // Double check RETX SN
TESTASSERT_EQ(rlc_nr_si_field_t::first_segment, header_check.si);
} else if (i == 1) {
TESTASSERT_EQ(2, header_check.sn); // Double check RETX SN
TESTASSERT_EQ(rlc_nr_si_field_t::neither_first_nor_last_segment, header_check.si);
} else {
TESTASSERT_EQ(3, header_check.sn); // Double check RETX SN
TESTASSERT_EQ(rlc_nr_si_field_t::last_segment, header_check.si);
}
rlc2.write_pdu(retx_buf.msg, retx_buf.N_bytes);
}
TESTASSERT_EQ(0, rlc1.get_buffer_state());
}
// Check statistics
rlc_bearer_metrics_t metrics1 = rlc1.get_metrics();
rlc_bearer_metrics_t metrics2 = rlc2.get_metrics();
// SDU metrics
TESTASSERT_EQ(5, metrics1.num_tx_sdus);
TESTASSERT_EQ(0, metrics1.num_rx_sdus);
TESTASSERT_EQ(15, metrics1.num_tx_sdu_bytes);
TESTASSERT_EQ(0, metrics1.num_rx_sdu_bytes);
TESTASSERT_EQ(0, metrics1.num_lost_sdus);
// PDU metrics
TESTASSERT_EQ(15 + 3, metrics1.num_tx_pdus); // 15 PDUs + 3 re-transmissions
TESTASSERT_EQ(2, metrics1.num_rx_pdus); // Two status PDU
TESTASSERT_EQ(78, metrics1.num_tx_pdu_bytes); // 3 Bytes * 5 (5 PDUs without SO) + 10 * 5 (10 PDUs with SO)
// 3 (1 retx no SO) + 2 * 5 (2 retx with SO) = 78
TESTASSERT_EQ(24, metrics1.num_rx_pdu_bytes); // Two status PDU. One with just an ack (3 bytes)
// Another with 3 NACKs all with SO. (3 + 3*6 bytes)
TESTASSERT_EQ(0, metrics1.num_lost_sdus); // No lost SDUs
// PDU metrics
TESTASSERT_EQ(0, metrics2.num_tx_sdus);
TESTASSERT_EQ(5, metrics2.num_rx_sdus);
TESTASSERT_EQ(0, metrics2.num_tx_sdu_bytes);
TESTASSERT_EQ(15, metrics2.num_rx_sdu_bytes); // 5 SDUs, 3 bytes each
TESTASSERT_EQ(0, metrics2.num_lost_sdus);
// SDU metrics
TESTASSERT_EQ(2, metrics2.num_tx_pdus); // Two status PDUs
TESTASSERT_EQ(15, metrics2.num_rx_pdus); // 15 PDUs (18 tx'ed, but three were lost)
TESTASSERT_EQ(24, metrics2.num_tx_pdu_bytes); // Two status PDU. One with just an ack (3 bytes)
// Another with 3 NACKs all with SO. (3 + 3*6 bytes)
TESTASSERT_EQ(65, metrics2.num_rx_pdu_bytes); // 3 Bytes (header + data size, without SO) * 5 (N PDUs without SO)
// 5 bytes (header + data size, with SO) * 10 (N PDUs with SO)
// = 81 bytes
TESTASSERT_EQ(0, metrics2.num_lost_sdus); // No lost SDUs
// Check state
rlc_am_nr_rx_state_t state2_rx = rx2->get_rx_state();
TESTASSERT_EQ(5, state2_rx.rx_next);
return SRSRAN_SUCCESS;
}
int main()
{
// Setup the log message spy to intercept error and warning log entries from RLC
if (!srslog::install_custom_sink(srsran::log_sink_message_spy::name(),
@ -443,11 +908,12 @@ int main(int argc, char** argv)
// start log back-end
srslog::init();
TESTASSERT(window_checker_test() == SRSRAN_SUCCESS);
TESTASSERT(retx_segmentation_required_checker_test() == SRSRAN_SUCCESS);
TESTASSERT(basic_test() == SRSRAN_SUCCESS);
TESTASSERT(lost_pdu_test() == SRSRAN_SUCCESS);
TESTASSERT(basic_segmentation_test() == SRSRAN_SUCCESS);
TESTASSERT(segment_retx_test() == SRSRAN_SUCCESS);
TESTASSERT(retx_segment_test() == SRSRAN_SUCCESS);
return SRSRAN_SUCCESS;
}

@ -497,6 +497,8 @@ void stress_test(stress_test_args_t args)
cnfg_ = rlc_config_t::default_rlc_um_nr_config(6);
} else if (args.mode == "UM12") {
cnfg_ = rlc_config_t::default_rlc_um_nr_config(12);
} else if (args.mode == "AM12") {
cnfg_ = rlc_config_t::default_rlc_am_nr_config();
} else {
cout << "Unsupported RLC mode " << args.mode << ", exiting." << endl;
exit(-1);

@ -93,6 +93,7 @@ bool slot_worker::init(const args_t& args)
ul_args.pusch.measure_time = true;
ul_args.pusch.measure_evm = true;
ul_args.pusch.max_layers = args.nof_rx_ports;
ul_args.pusch.sch.max_nof_iter = args.pusch_max_its;
ul_args.pusch.max_prb = args.nof_max_prb;
ul_args.nof_max_prb = args.nof_max_prb;
ul_args.pusch_min_snr_dB = args.pusch_min_snr_dB;

@ -793,7 +793,7 @@ int phy_ue_db::set_ul_grant_available(uint32_t tti, const stack_interface_phy_lt
// Check that eNb Cell/Carrier is active for the given RNTI
if (_assert_active_enb_cc(rnti, enb_cc_idx) != SRSRAN_SUCCESS) {
ret = SRSRAN_ERROR;
srslog::fetch_basic_logger("PHY").error("Error setting grant for rnti=0x%x, cc=%d\n", rnti, enb_cc_idx);
srslog::fetch_basic_logger("PHY").info("Error setting grant for rnti=0x%x, cc=%d", rnti, enb_cc_idx);
continue;
}
// Rise Grant available flag

@ -26,6 +26,7 @@
#include "sched_nr_interface.h"
#include "sched_nr_ue.h"
#include "srsran/adt/pool/cached_alloc.h"
#include "srsran/adt/pool/circular_stack_pool.h"
#include "srsran/common/slot_point.h"
#include <array>
extern "C" {
@ -70,7 +71,7 @@ public:
private:
int ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
int add_ue_impl(uint16_t rnti, std::unique_ptr<sched_nr_impl::ue> u);
int add_ue_impl(uint16_t rnti, sched_nr_impl::unique_ue_ptr u);
// args
sched_nr_impl::sched_params_t cfg;
@ -83,6 +84,8 @@ private:
using slot_cc_worker = sched_nr_impl::cc_worker;
std::vector<std::unique_ptr<sched_nr_impl::cc_worker> > cc_workers;
// UE Database
std::unique_ptr<srsran::circular_stack_pool<SRSENB_MAX_UES> > ue_pool;
using ue_map_t = sched_nr_impl::ue_map_t;
ue_map_t ue_db;

@ -31,6 +31,7 @@
#include "srsran/adt/circular_map.h"
#include "srsran/adt/move_callback.h"
#include "srsran/adt/pool/cached_alloc.h"
#include "srsran/adt/pool/pool_interface.h"
namespace srsenb {
@ -214,7 +215,8 @@ private:
ue_carrier* ue = nullptr;
};
using ue_map_t = rnti_map_t<std::unique_ptr<ue> >;
using unique_ue_ptr = srsran::unique_pool_ptr<ue>;
using ue_map_t = rnti_map_t<unique_ue_ptr>;
using slot_ue_map_t = rnti_map_t<slot_ue>;
} // namespace sched_nr_impl

@ -80,7 +80,7 @@ public:
int read_pdu_bcch_bch(const uint32_t tti, srsran::byte_buffer_t& buffer) final;
int read_pdu_bcch_dlsch(uint32_t sib_index, srsran::byte_buffer_t& buffer) final;
/// User manegement
/// User management
int add_user(uint16_t rnti, uint32_t pcell_cc_idx) final;
void rem_user(uint16_t rnti);
int update_user(uint16_t new_rnti, uint16_t old_rnti) final;

@ -304,6 +304,9 @@ int sched_nr::config(const sched_args_t& sched_cfg, srsran::const_span<sched_nr_
cfg = sched_params_t{sched_cfg};
logger = &srslog::fetch_basic_logger(sched_cfg.logger_name);
// Initiate UE memory pool
ue_pool.reset(new srsran::circular_stack_pool<SRSENB_MAX_UES>(8, sizeof(ue), 4));
// Initiate Common Sched Configuration
cfg.cells.reserve(cell_list.size());
for (uint32_t cc = 0; cc < cell_list.size(); ++cc) {
@ -342,7 +345,7 @@ void sched_nr::ue_rem(uint16_t rnti)
});
}
int sched_nr::add_ue_impl(uint16_t rnti, std::unique_ptr<sched_nr_impl::ue> u)
int sched_nr::add_ue_impl(uint16_t rnti, sched_nr_impl::unique_ue_ptr u)
{
logger->info("SCHED: New user rnti=0x%x, cc=%d", rnti, cfg.cells[0].cc);
return ue_db.insert(rnti, std::move(u)).has_value() ? SRSRAN_SUCCESS : SRSRAN_ERROR;
@ -351,6 +354,8 @@ int sched_nr::add_ue_impl(uint16_t rnti, std::unique_ptr<sched_nr_impl::ue> u)
int sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
{
if (not ue_db.contains(rnti)) {
// create user object
unique_ue_ptr u = srsran::make_pool_obj_with_fallback<ue>(*ue_pool, rnti, rnti, uecfg, cfg);
return add_ue_impl(rnti, std::make_unique<ue>(rnti, uecfg, cfg));
}
ue_db[rnti]->set_cfg(uecfg);
@ -425,7 +430,8 @@ void sched_nr::get_metrics(mac_metrics_t& metrics)
int sched_nr::dl_rach_info(const rar_info_t& rar_info)
{
// create user object outside of sched main thread
std::unique_ptr<ue> u = std::make_unique<ue>(rar_info.temp_crnti, rar_info.cc, cfg);
unique_ue_ptr u =
srsran::make_pool_obj_with_fallback<ue>(*ue_pool, rar_info.temp_crnti, rar_info.temp_crnti, rar_info.cc, cfg);
// enqueue UE creation event + RACH handling
auto add_ue = [this, rar_info, u = std::move(u)](event_manager::logger& ev_logger) mutable {

Loading…
Cancel
Save