Integrated PUSCH decoder in gNb

master
Xavier Arteaga 3 years ago committed by Xavier Arteaga
parent f07d925be1
commit ecc0b92142

@ -44,7 +44,7 @@ public:
* @brief Carrier reference configuration for 10MHz serving cell bandwidth
* - CORESET: all channel, 1 symbol
* - Single common Search Space
* - 1 possible candidate per aggregation level
* - 2 possible candidate per aggregation level to allow DL and UL grants simultaneously
*/
R_PDCCH_CUSTOM_COMMON_SS = 0,
} pdcch = R_PDCCH_CUSTOM_COMMON_SS;

@ -242,9 +242,9 @@ public:
};
struct pusch_t {
srsran_sch_cfg_nr_t sch = {}; ///< PUSCH configuration
std::array<uint8_t*, SRSRAN_MAX_TB> data = {}; ///< Data pointer
std::array<srsran_softbuffer_tx_t*, SRSRAN_MAX_TB> softbuffer_tx = {}; ///< Tx Softbuffer
uint32_t pid = 0; ///< HARQ process ID
srsran_sch_cfg_nr_t sch = {}; ///< PUSCH configuration
std::array<uint8_t*, SRSRAN_MAX_TB> data = {}; ///< Data pointer
};
struct pucch_t {
@ -265,7 +265,9 @@ public:
struct pusch_info_t {
uint16_t rnti;
uint32_t pid = 0; ///< HARQ process ID
srsran_pusch_res_nr_t pusch_data;
srsran_uci_cfg_nr_t uci_cfg; ///< Provides UCI configuration, so stack does not need to keep the pending state
// ... add signal measurements here
};

@ -65,4 +65,10 @@ SRSRAN_API uint32_t srsran_gnb_ul_pucch_info(srsran_gnb_ul_t* q
char* str,
uint32_t str_len);
SRSRAN_API uint32_t srsran_gnb_ul_pusch_info(srsran_gnb_ul_t* q,
const srsran_sch_cfg_nr_t* cfg,
const srsran_pusch_res_nr_t* res,
char* str,
uint32_t str_len);
#endif // SRSRAN_GNB_UL_H

@ -65,7 +65,7 @@ void phy_cfg_nr_default_t::make_pdcch_custom_common_ss(srsran_pdcch_cfg_nr_t& pd
// Generate 1 candidate for each aggregation level if possible
for (uint32_t L = 0; L < SRSRAN_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR; L++) {
pdcch.search_space[1].nof_candidates[L] =
SRSRAN_MIN(1, srsran_pdcch_nr_max_candidates_coreset(&pdcch.coreset[1], L));
SRSRAN_MIN(2, srsran_pdcch_nr_max_candidates_coreset(&pdcch.coreset[1], L));
}
}

@ -287,5 +287,24 @@ uint32_t srsran_gnb_ul_pucch_info(srsran_gnb_ul_t* q,
len = srsran_print_check(
str, str_len, len, "snr=%+.1f valid=%c", q->chest_pucch.snr_db, uci_data->value.valid ? 'y' : 'n');
return len;
}
uint32_t srsran_gnb_ul_pusch_info(srsran_gnb_ul_t* q,
const srsran_sch_cfg_nr_t* cfg,
const srsran_pusch_res_nr_t* res,
char* str,
uint32_t str_len)
{
if (q == NULL || cfg == NULL || res == NULL) {
return 0;
}
uint32_t len = 0;
len += srsran_pusch_nr_rx_info(&q->pusch, cfg, &cfg->grant, res, str, str_len - len);
len = srsran_print_check(str, str_len, len, "snr=%+.1f", q->chest_pusch.snr_db);
return len;
}

@ -190,7 +190,33 @@ bool slot_worker::work_ul()
// Decode PUSCH
for (stack_interface_phy_nr::pusch_t& pusch : ul_sched.pusch) {
// ...
// Get payload PDU
stack_interface_phy_nr::pusch_info_t pusch_info = {};
pusch_info.uci_cfg = pusch.sch.uci;
pusch_info.pid = pusch.pid;
pusch_info.pusch_data.tb[0].payload = pusch.data[0];
pusch_info.pusch_data.tb[1].payload = pusch.data[1];
// Decode PUCCH
if (srsran_gnb_ul_get_pusch(&gnb_ul, &ul_slot_cfg, &pusch.sch, &pusch.sch.grant, &pusch_info.pusch_data) <
SRSRAN_SUCCESS) {
logger.error("Error getting PUSCH");
return false;
}
// Inform stack
if (stack.pusch_info(ul_slot_cfg, pusch_info) < SRSRAN_SUCCESS) {
logger.error("Error pushing PUSCH information to stack");
return false;
}
// Log PUSCH decoding
if (logger.info.enabled()) {
std::array<char, 512> str;
srsran_gnb_ul_pusch_info(&gnb_ul, &pusch.sch, &pusch_info.pusch_data, str.data(), (uint32_t)str.size());
logger.info("PUSCH: %s", str.data());
}
}
return true;

@ -88,21 +88,24 @@ private:
std::mutex mutex;
srsran_sch_cfg_nr_t pusch = {};
bool valid = false;
uint32_t pid = 0;
public:
pending_pusch_t() = default;
void push(const srsran_sch_cfg_nr_t& pusch_)
void push(const uint32_t& pid_, const srsran_sch_cfg_nr_t& pusch_)
{
std::unique_lock<std::mutex> lock(mutex);
pusch = pusch_;
pid = pid_;
valid = true;
}
bool pop(srsran_sch_cfg_nr_t& pusch_)
bool pop(uint32_t& pid_, srsran_sch_cfg_nr_t& pusch_)
{
std::unique_lock<std::mutex> lock(mutex);
bool ret = valid;
pusch_ = pusch;
pid_ = pid;
valid = false;
return ret;
}
@ -113,16 +116,12 @@ private:
srsran::circular_array<dummy_rx_harq_proc, SRSRAN_MAX_HARQ_PROC_DL_NR> rx_harq_proc;
private:
bool schedule_pdsch(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched)
{
// Instantiate PDCCH and PDSCH
pdcch_dl_t pdcch = {};
pdsch_t pdsch = {};
// Select grant and set data
pdsch.data[0] = tx_harq_proc[slot_cfg.idx].data.data();
// Second TB is not used
pdsch.data[1] = nullptr;
@ -161,14 +160,15 @@ private:
return false;
}
// Set TBS
// Select grant and set data
pdsch.data[0] = tx_harq_proc[slot_cfg.idx].get_tb(pdsch.sch.grant.tb[0].tbs).data();
// Generate random data
srsran_random_byte_vector(random_gen, pdsch.data[0], pdsch.sch.grant.tb[0].tbs / 8);
// Set TBS
tx_harq_proc[slot_cfg.idx].tbs = pdsch.sch.grant.tb[0].tbs;
// Set softbuffer
pdsch.sch.grant.tb[0].softbuffer.tx = &tx_harq_proc[slot_cfg.idx].softbuffer;
pdsch.sch.grant.tb[0].softbuffer.tx = &tx_harq_proc[slot_cfg.idx].get_softbuffer(dci.ndi);
// Reset Tx softbuffer always
srsran_softbuffer_tx_reset(pdsch.sch.grant.tb[0].softbuffer.tx);
@ -222,20 +222,33 @@ private:
return false;
}
// Set TBS
rx_harq_proc[slot_cfg.idx].tbs = pusch_cfg.grant.tb[0].tbs;
// Set softbuffer
pusch_cfg.grant.tb[0].softbuffer.rx = &rx_harq_proc[slot_cfg.idx].softbuffer;
// Reset Tx softbuffer always
srsran_softbuffer_rx_reset(pusch_cfg.grant.tb[0].softbuffer.rx);
pusch_cfg.grant.tb[0].softbuffer.rx = &rx_harq_proc[slot_cfg.idx].get_softbuffer(dci.ndi);
// Push scheduling results
dl_sched.pdcch_ul.push_back(pdcch);
// Set pending PUSCH
pending_pusch[TTI_TX(slot_cfg.idx) % pending_pusch.size()].push(pusch_cfg);
pending_pusch[TTI_TX(slot_cfg.idx) % pending_pusch.size()].push(dci.pid, pusch_cfg);
return true;
}
bool handle_uci_data(const srsran_uci_cfg_nr_t& cfg, const srsran_uci_value_nr_t& value)
{
std::unique_lock<std::mutex> lock(mac_metrics_mutex);
for (uint32_t i = 0; i < cfg.ack.count; i++) {
const srsran_harq_ack_bit_t* ack_bit = &cfg.ack.bits[i];
bool is_ok = (value.ack[i] == 1) and value.valid;
uint32_t tb_count = (ack_bit->tb0 ? 1 : 0) + (ack_bit->tb1 ? 1 : 0);
mac_metrics.tx_brate += tx_harq_proc[ack_bit->pid].get_tbs();
mac_metrics.tx_pkts += tb_count;
if (not is_ok) {
mac_metrics.tx_errors += tb_count;
logger.debug("NACK received!");
}
}
return true;
}
@ -248,7 +261,7 @@ public:
uint32_t ss_id = 1; ///< Search Space identifier
uint32_t pdcch_aggregation_level = 0; ///< PDCCH aggregation level
uint32_t pdcch_dl_candidate_index = 0; ///< PDCCH DL DCI candidate index
uint32_t pdcch_ul_candidate_index = 0; ///< PDCCH UL DCI candidate index
uint32_t pdcch_ul_candidate_index = 1; ///< PDCCH UL DCI candidate index
uint32_t dl_start_rb = 0; ///< Start resource block
uint32_t dl_length_rb = 0; ///< Number of resource blocks
uint32_t ul_start_rb = 0; ///< Start resource block
@ -258,11 +271,7 @@ public:
};
gnb_dummy_stack(args_t args) :
mcs(args.mcs),
rnti(args.rnti),
dl_time_res(args.dl_time_res),
phy_cfg(args.phy_cfg),
ss_id(args.ss_id)
mcs(args.mcs), rnti(args.rnti), dl_time_res(args.dl_time_res), phy_cfg(args.phy_cfg), ss_id(args.ss_id)
{
random_gen = srsran_random_init(0x1234);
logger.set_level(srslog::str_to_basic_level(args.log_level));
@ -363,8 +372,8 @@ public:
}
}
}
mac_interface_phy_nr::pusch_t pusch = {};
bool has_pusch = pending_pusch[slot_cfg.idx % pending_pusch.size()].pop(pusch.sch);
mac_interface_phy_nr::pusch_t pusch = {};
bool has_pusch = pending_pusch[slot_cfg.idx % pending_pusch.size()].pop(pusch.pid, pusch.sch);
srsran_uci_cfg_nr_t uci_cfg = {};
if (not phy_cfg.get_uci_cfg(slot_cfg, ack, uci_cfg)) {
@ -372,7 +381,14 @@ public:
return SRSRAN_ERROR;
}
// Schedule PUSCH
if (has_pusch) {
// Generate data
pusch.data[0] = rx_harq_proc[pusch.pid].get_tb(pusch.sch.grant.tb[0].tbs).data();
pusch.data[1] = nullptr;
srsran_random_byte_vector(random_gen, pusch.data[0], pusch.sch.grant.tb[0].tbs / 8);
// Put UCI configuration in PUSCH config
if (not phy_cfg.get_pusch_uci_cfg(slot_cfg, uci_cfg, pusch.sch)) {
logger.error("Error setting UCI configuration in PUSCH");
return SRSRAN_ERROR;
@ -380,7 +396,10 @@ public:
ul_sched.pusch.push_back(pusch);
return SRSRAN_SUCCESS;
} else if (has_ack) {
}
// If any UCI information is triggered, schedule PUCCH
if (uci_cfg.ack.count > 0 || uci_cfg.nof_csi > 0 || uci_cfg.o_sr > 0) {
mac_interface_phy_nr::pucch_t pucch = {};
pucch.uci_cfg = uci_cfg;
if (not phy_cfg.get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.resource)) {
@ -389,33 +408,42 @@ public:
}
ul_sched.pucch.push_back(pucch);
return SRSRAN_SUCCESS;
}
return 0;
// Otherwise no UL scheduling
return SRSRAN_SUCCESS;
}
int pucch_info(const srsran_slot_cfg_t& slot_cfg, const pucch_info_t& pucch_info) override
{
std::unique_lock<std::mutex> lock(mac_metrics_mutex);
for (uint32_t i = 0; i < pucch_info.uci_data.cfg.ack.count; i++) {
const srsran_harq_ack_bit_t* ack_bit = &pucch_info.uci_data.cfg.ack.bits[i];
bool is_ok = (pucch_info.uci_data.value.ack[i] == 1) and pucch_info.uci_data.value.valid;
uint32_t tb_count = (ack_bit->tb0 ? 1 : 0) + (ack_bit->tb1 ? 1 : 0);
mac_metrics.tx_brate += tx_harq_proc[ack_bit->pid].tbs;
mac_metrics.tx_pkts += tb_count;
if (not is_ok) {
mac_metrics.tx_errors += tb_count;
logger.debug("NACK received!");
}
// Handle UCI data
if (not handle_uci_data(pucch_info.uci_data.cfg, pucch_info.uci_data.value)) {
logger.error("Error handling UCI data from PUCCH reception");
return SRSRAN_ERROR;
}
// Handle PHY metrics
// ...
return SRSRAN_SUCCESS;
}
int pusch_info(const srsran_slot_cfg_t& slot_cfg, const pusch_info_t& pusch_info) override
{
// ... Not implemented
return SRSRAN_ERROR;
// Handle UCI data
if (not handle_uci_data(pusch_info.uci_cfg, pusch_info.pusch_data.uci)) {
logger.error("Error handling UCI data from PUCCH reception");
return SRSRAN_ERROR;
}
if (not pusch_info.pusch_data.tb[0].crc) {
mac_metrics.rx_errors++;
}
mac_metrics.rx_brate += rx_harq_proc[pusch_info.pid].get_tbs();
mac_metrics.rx_pkts++;
return SRSRAN_SUCCESS;
}
srsenb::mac_ue_metrics_t get_metrics()

@ -20,12 +20,16 @@
#include <srsran/common/phy_cfg_nr.h>
#include <srsran/common/standard_streams.h>
struct dummy_rx_harq_proc {
static const uint32_t MAX_TB_SZ = SRSRAN_LDPC_MAX_LEN_CB * SRSRAN_SCH_NR_MAX_NOF_CB_LDPC;
class dummy_rx_harq_proc
{
private:
srsran::byte_buffer_t data;
srsran_softbuffer_rx_t softbuffer = {};
std::atomic<uint32_t> tbs = {0};
uint32_t tbs = {0};
bool first = true;
uint32_t ndi = 0;
public:
dummy_rx_harq_proc() : data(0)
{
// Initialise softbuffer
@ -36,6 +40,25 @@ struct dummy_rx_harq_proc {
}
~dummy_rx_harq_proc() { srsran_softbuffer_rx_free(&softbuffer); }
srsran::byte_buffer_t& get_tb(uint32_t tbs_)
{
tbs = tbs_;
return data;
}
srsran_softbuffer_rx_t& get_softbuffer(uint32_t ndi_)
{
if (ndi != ndi_ || first) {
srsran_softbuffer_rx_reset(&softbuffer);
ndi = ndi_;
first = false;
}
return softbuffer;
}
uint32_t get_tbs() const { return tbs; }
};
#endif // SRSRAN_DUMMY_RX_HARQ_PROC_H

@ -20,12 +20,16 @@
#include <srsran/common/phy_cfg_nr.h>
#include <srsran/common/standard_streams.h>
struct dummy_tx_harq_proc {
static const uint32_t MAX_TB_SZ = SRSRAN_LDPC_MAX_LEN_CB * SRSRAN_SCH_NR_MAX_NOF_CB_LDPC;
class dummy_tx_harq_proc
{
private:
srsran::byte_buffer_t data;
srsran_softbuffer_tx_t softbuffer = {};
std::atomic<uint32_t> tbs = {0};
bool first = true;
uint32_t ndi = 0;
public:
dummy_tx_harq_proc()
{
// Initialise softbuffer
@ -36,6 +40,25 @@ struct dummy_tx_harq_proc {
}
~dummy_tx_harq_proc() { srsran_softbuffer_tx_free(&softbuffer); }
srsran::byte_buffer_t& get_tb(uint32_t tbs_)
{
tbs = tbs_;
return data;
}
srsran_softbuffer_tx_t& get_softbuffer(uint32_t ndi_)
{
if (ndi_ != ndi || first) {
srsran_softbuffer_tx_reset(&softbuffer);
ndi = ndi_;
first = false;
}
return softbuffer;
}
uint32_t get_tbs() const { return tbs; }
};
#endif // SRSRAN_TX_DUMMY_HARQ_PROC_H

@ -0,0 +1,61 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_DUMMY_UE_STACK_H
#define SRSRAN_DUMMY_UE_STACK_H
#include <srsran/interfaces/ue_nr_interfaces.h>
class ue_dummy_stack : public srsue::stack_interface_phy_nr
{
private:
srsran_random_t random_gen = srsran_random_init(0x4567);
uint16_t rnti = 0;
bool valid = false;
srsran::circular_array<dummy_tx_harq_proc, SRSRAN_MAX_HARQ_PROC_DL_NR> tx_harq_proc;
srsran::circular_array<dummy_rx_harq_proc, SRSRAN_MAX_HARQ_PROC_DL_NR> rx_harq_proc;
public:
struct args_t {
uint16_t rnti = 0x1234;
};
ue_dummy_stack(const args_t& args) : rnti(args.rnti) { valid = true; }
~ue_dummy_stack() { srsran_random_free(random_gen); }
void in_sync() override {}
void out_of_sync() override {}
void run_tti(const uint32_t tti) override {}
int sf_indication(const uint32_t tti) override { return 0; }
sched_rnti_t get_dl_sched_rnti_nr(const uint32_t tti) override { return {rnti, srsran_rnti_type_c}; }
sched_rnti_t get_ul_sched_rnti_nr(const uint32_t tti) override { return {rnti, srsran_rnti_type_c}; }
void new_grant_dl(const uint32_t cc_idx, const mac_nr_grant_dl_t& grant, tb_action_dl_t* action) override
{
action->tb.enabled = true;
action->tb.softbuffer = &rx_harq_proc[grant.pid].get_softbuffer(grant.ndi);
}
void tb_decoded(const uint32_t cc_idx, const mac_nr_grant_dl_t& grant, tb_action_dl_result_t result) override {}
void new_grant_ul(const uint32_t cc_idx, const mac_nr_grant_ul_t& grant, tb_action_ul_t* action) override
{
if (action == nullptr) {
return;
}
action->tb.enabled = true;
action->tb.payload = &tx_harq_proc[grant.pid].get_tb(grant.tbs);
action->tb.softbuffer = &tx_harq_proc[grant.pid].get_softbuffer(grant.ndi);
srsran_random_byte_vector(random_gen, action->tb.payload->msg, grant.tbs / 8);
}
void prach_sent(uint32_t tti, uint32_t s_id, uint32_t t_id, uint32_t f_id, uint32_t ul_carrier_id) override {}
bool sr_opportunity(uint32_t tti, uint32_t sr_id, bool meas_gap, bool ul_sch_tx) override { return false; }
bool is_valid() const { return valid; }
};
#endif // SRSRAN_DUMMY_UE_STACK_H

@ -11,6 +11,7 @@
*/
#include "dummy_gnb_stack.h"
#include "dummy_ue_stack.h"
#include "srsran/common/phy_cfg_nr_default.h"
#include "srsran/common/test_common.h"
#include "test_bench.h"
@ -31,45 +32,6 @@ test_bench::args_t::args_t(int argc, char** argv)
cell_list[0].pdcch = phy_cfg.pdcch;
}
class ue_dummy_stack : public srsue::stack_interface_phy_nr
{
private:
uint16_t rnti = 0;
bool valid = false;
srsran::circular_array<dummy_tx_harq_proc, SRSRAN_MAX_HARQ_PROC_DL_NR> tx_harq_proc;
srsran::circular_array<dummy_rx_harq_proc, SRSRAN_MAX_HARQ_PROC_DL_NR> rx_harq_proc;
public:
struct args_t {
uint16_t rnti = 0x1234;
};
ue_dummy_stack(const args_t& args) : rnti(args.rnti) { valid = true; }
void in_sync() override {}
void out_of_sync() override {}
void run_tti(const uint32_t tti) override {}
int sf_indication(const uint32_t tti) override { return 0; }
sched_rnti_t get_dl_sched_rnti_nr(const uint32_t tti) override { return {rnti, srsran_rnti_type_c}; }
sched_rnti_t get_ul_sched_rnti_nr(const uint32_t tti) override { return {rnti, srsran_rnti_type_c}; }
void new_grant_dl(const uint32_t cc_idx, const mac_nr_grant_dl_t& grant, tb_action_dl_t* action) override
{
action->tb.enabled = true;
action->tb.softbuffer = &rx_harq_proc[grant.pid].softbuffer;
}
void tb_decoded(const uint32_t cc_idx, const mac_nr_grant_dl_t& grant, tb_action_dl_result_t result) override {}
void new_grant_ul(const uint32_t cc_idx, const mac_nr_grant_ul_t& grant, tb_action_ul_t* action) override
{
if (action == nullptr) {
return;
}
action->tb.enabled = true;
action->tb.payload = &rx_harq_proc[grant.pid].data;
}
void prach_sent(uint32_t tti, uint32_t s_id, uint32_t t_id, uint32_t f_id, uint32_t ul_carrier_id) override {}
bool sr_opportunity(uint32_t tti, uint32_t sr_id, bool meas_gap, bool ul_sch_tx) override { return false; }
bool is_valid() const { return valid; }
};
int main(int argc, char** argv)
{
srslog::init();
@ -81,7 +43,7 @@ int main(int argc, char** argv)
args.gnb_args.nof_phy_threads = 1;
args.ue_args.log.id_preamble = " UE/";
args.ue_args.log.phy_level = "info";
args.ue_args.log.phy_hex_limit = 0;
args.ue_args.log.phy_hex_limit = 1;
args.ue_args.nof_phy_threads = 1;
// Parse arguments
@ -134,18 +96,37 @@ int main(int argc, char** argv)
if (mac_metrics.tx_pkts != 0) {
pdsch_bler = (float)mac_metrics.tx_errors / (float)mac_metrics.tx_pkts;
}
float pdsch_rate = 0.0f;
float pusch_bler = 0.0f;
if (mac_metrics.rx_pkts != 0) {
pusch_bler = (float)mac_metrics.rx_errors / (float)mac_metrics.rx_pkts;
}
float pdsch_shed_rate = 0.0f;
if (mac_metrics.tx_pkts != 0) {
pdsch_rate = (float)mac_metrics.tx_brate / (float)mac_metrics.tx_pkts / 1000.0f;
pdsch_shed_rate = (float)mac_metrics.tx_brate / (float)mac_metrics.tx_pkts / 1000.0f;
}
float pusch_shed_rate = 0.0f;
if (mac_metrics.rx_pkts != 0) {
pusch_shed_rate = (float)mac_metrics.rx_brate / (float)mac_metrics.rx_pkts / 1000.0f;
}
srsran::console("PDSCH:\n");
srsran::console(" Count: %d\n", mac_metrics.tx_pkts);
srsran::console(" BLER: %f\n", pdsch_bler);
srsran::console(" Rate: %f Mbps\n", pdsch_rate);
srsran::console(" Count: %d\n", mac_metrics.tx_pkts);
srsran::console(" BLER: %f\n", pdsch_bler);
srsran::console(" Sched Rate: %f Mbps\n", pdsch_shed_rate);
srsran::console(" Net Rate: %f Mbps\n", (1.0f - pdsch_bler) * pdsch_shed_rate);
srsran::console(" Retx Rate: %f Mbps\n", pdsch_bler * pdsch_shed_rate);
srsran::console("\n");
srsran::console("PUSCH:\n");
srsran::console(" Count: %d\n", mac_metrics.rx_pkts);
srsran::console(" BLER: %f\n", pusch_bler);
srsran::console(" Sched Rate: %f Mbps\n", pusch_shed_rate);
srsran::console(" Net Rate: %f Mbps\n", (1.0f - pusch_bler) * pusch_shed_rate);
srsran::console(" Retx Rate: %f Mbps\n", pusch_bler * pusch_shed_rate);
// Assert metrics
TESTASSERT(mac_metrics.tx_errors == 0);
TESTASSERT(mac_metrics.rx_errors == 0);
// If reached here, the test is successful
return SRSRAN_SUCCESS;

Loading…
Cancel
Save