added extra tbs and dci aggregation level checks to sched test suite

master
Francisco Paisana 4 years ago committed by Andre Puschmann
parent 5467ee9f83
commit e8ac98d06f

@ -124,6 +124,8 @@ void ue_sim::update_dl_harqs(const sf_output_res_t& sf_out)
h.nof_retxs = 0; h.nof_retxs = 0;
h.ndi = data.dci.tb[0].ndi; h.ndi = data.dci.tb[0].ndi;
h.first_tti_rx = sf_out.tti_rx; h.first_tti_rx = sf_out.tti_rx;
h.dci_loc = data.dci.location;
h.tbs = data.tbs[0];
} else { } else {
// it is retx // it is retx
h.nof_retxs++; h.nof_retxs++;
@ -174,6 +176,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out)
h.nof_retxs = 0; h.nof_retxs = 0;
h.ndi = data.dci.tb.ndi; h.ndi = data.dci.tb.ndi;
h.first_tti_rx = sf_out.tti_rx; h.first_tti_rx = sf_out.tti_rx;
h.tbs = data.tbs;
} else { } else {
h.nof_retxs++; h.nof_retxs++;
} }

@ -29,13 +29,15 @@
namespace srsenb { namespace srsenb {
struct ue_harq_ctxt_t { struct ue_harq_ctxt_t {
bool active = false; bool active = false;
bool ndi = false; bool ndi = false;
uint32_t pid = 0; uint32_t pid = 0;
uint32_t nof_txs = 0; uint32_t nof_txs = 0;
uint32_t nof_retxs = 0; uint32_t nof_retxs = 0;
uint32_t riv = 0; uint32_t riv = 0;
srslte::tti_point last_tti_rx, first_tti_rx; srslte_dci_location_t dci_loc = {};
uint32_t tbs = 0;
srslte::tti_point last_tti_rx, first_tti_rx;
}; };
struct ue_cc_ctxt_t { struct ue_cc_ctxt_t {
std::array<ue_harq_ctxt_t, SRSLTE_FDD_NOF_HARQ> dl_harqs; std::array<ue_harq_ctxt_t, SRSLTE_FDD_NOF_HARQ> dl_harqs;

@ -75,11 +75,26 @@ int test_pdsch_grant(const sim_ue_ctxt_t& ue_ctxt,
"The number of retx=%d exceeded its max=%d\n", "The number of retx=%d exceeded its max=%d\n",
h.nof_retxs + 1, h.nof_retxs + 1,
ue_ctxt.ue_cfg.maxharq_tx); ue_ctxt.ue_cfg.maxharq_tx);
CONDERROR(h.dci_loc.L != pdsch.dci.location.L, "Harq DCI aggregation level changed.\n");
CONDERROR(h.tbs != pdsch.tbs[0], "TBS changed during HARQ retx\n");
} }
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }
int test_dl_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
const sched_interface::dl_sched_data_t& data = sf_out.dl_cc_result[cc].data[i];
CONDERROR(
enb_ctxt.ue_db.count(data.dci.rnti) == 0, "Allocated DL grant for non-existent rnti=0x%x\n", data.dci.rnti);
TESTASSERT(test_pdsch_grant(*enb_ctxt.ue_db.at(data.dci.rnti), sf_out.tti_rx, cc, data) == SRSLTE_SUCCESS);
}
}
return SRSLTE_SUCCESS;
}
int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{ {
uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS); uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
@ -133,7 +148,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
pid); pid);
// TEST: absent PUSCH grants for active DL HARQs must be either ACKs, last retx, or interrupted HARQs // TEST: absent PUSCH grants for active DL HARQs must be either ACKs, last retx, or interrupted HARQs
if (phich_ptr != nullptr and pusch_ptr == nullptr) { if ((phich_ptr != nullptr) and (pusch_ptr == nullptr)) {
CONDERROR(not h_inactive, "PHICH NACK received for rnti=0x%x but no PUSCH retx reallocated\n", rnti); CONDERROR(not h_inactive, "PHICH NACK received for rnti=0x%x but no PUSCH retx reallocated\n", rnti);
} }
@ -156,14 +171,13 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
} }
if (pusch_ptr->needs_pdcch) { if (pusch_ptr->needs_pdcch) {
// adaptive retx // adaptive retx
CONDERROR(h.tbs != pusch_ptr->tbs, "TBS changed during HARQ retx\n");
CONDERROR(sched_utils::get_rvidx(h.nof_retxs + 1) != (uint32_t)pusch_ptr->dci.tb.rv,
"Invalid rv index for retx\n");
} else { } else {
// non-adaptive retx // non-adaptive retx
CONDERROR(pusch_ptr->dci.type2_alloc.riv != h.riv, "Non-adaptive retx must keep the same riv\n"); CONDERROR(pusch_ptr->dci.type2_alloc.riv != h.riv, "Non-adaptive retx must keep the same riv\n");
} }
if (pusch_ptr->tbs > 0) {
CONDERROR(sched_utils::get_rvidx(h.nof_retxs + 1) != (uint32_t)pusch_ptr->dci.tb.rv,
"Invalid rv index for retx\n");
}
CONDERROR(to_tx_ul(h.last_tti_rx) > sf_out.tti_rx, "UL harq pid=%d was reused too soon\n", h.pid); CONDERROR(to_tx_ul(h.last_tti_rx) > sf_out.tti_rx, "UL harq pid=%d was reused too soon\n", h.pid);
} }
} }
@ -304,14 +318,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
int test_all_ues(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) int test_all_ues(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{ {
for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) { TESTASSERT(test_dl_sched_result(enb_ctxt, sf_out) == SRSLTE_SUCCESS);
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
const sched_interface::dl_sched_data_t& data = sf_out.dl_cc_result[cc].data[i];
CONDERROR(
enb_ctxt.ue_db.count(data.dci.rnti) == 0, "Allocated DL grant for non-existent rnti=0x%x\n", data.dci.rnti);
TESTASSERT(test_pdsch_grant(*enb_ctxt.ue_db.at(data.dci.rnti), sf_out.tti_rx, cc, data) == SRSLTE_SUCCESS);
}
}
TESTASSERT(test_ul_sched_result(enb_ctxt, sf_out) == SRSLTE_SUCCESS); TESTASSERT(test_ul_sched_result(enb_ctxt, sf_out) == SRSLTE_SUCCESS);

@ -32,16 +32,11 @@ namespace srsenb {
* - The DCI rv matches the nof DL harq retxs observed from the UE perspective * - The DCI rv matches the nof DL harq retxs observed from the UE perspective
* - The number of retxs per DL harq does not exceed its maximum set in the ue cfg * - The number of retxs per DL harq does not exceed its maximum set in the ue cfg
* - HARQ pids are not reused too early (ACK hasn't arrive to the eNB yet) * - HARQ pids are not reused too early (ACK hasn't arrive to the eNB yet)
* @param ue_ctxt current simulation UE context * @param enb_ctxt current eNB state, including list of UEs
* @param tti_rx TTI when scheduling decision was made * @param sf_out result of a subframe sched result
* @param enb_cc_idx eNB carrier index
* @param pdsch PDSCH grant data
* @return error code * @return error code
*/ */
int test_pdsch_grant(const sim_ue_ctxt_t& ue_ctxt, int test_dl_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out);
srslte::tti_point tti_rx,
uint32_t enb_cc_idx,
const sched_interface::dl_sched_data_t& pdsch);
/** /**
* Checks PHICH & PUSCH grant content and whether it is consistent with the current UE HARQ state. Current checks: * Checks PHICH & PUSCH grant content and whether it is consistent with the current UE HARQ state. Current checks:

@ -65,8 +65,7 @@ using srslte::tti_point;
* - DL adaptive retx/new tx <=> PDCCH alloc * - DL adaptive retx/new tx <=> PDCCH alloc
*******************************************************/ *******************************************************/
uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count(); uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count();
bool check_old_pids = false;
struct ue_stats_t { struct ue_stats_t {
uint64_t nof_dl_rbs = 0; uint64_t nof_dl_rbs = 0;
@ -233,15 +232,6 @@ int sched_tester::assert_no_empty_allocs()
CONDERROR(tti_data.total_ues.ul_retx_got_delayed, "There was a retx that was erased for user rnti=0x%x\n", rnti); CONDERROR(tti_data.total_ues.ul_retx_got_delayed, "There was a retx that was erased for user rnti=0x%x\n", rnti);
} }
// There must be allocations if there is pending data/retxs.
// bool no_dl_allocs = true;
// for (auto& it : tti_data.ue_data) {
// if (it.second.dl_sched != nullptr) {
// no_dl_allocs = false;
// }
// }
// CONDERROR(tti_data.total_ues.has_dl_tx and no_dl_allocs, "There was pending DL data but no user got allocated\n");
// TODO: You have to verify if there is space for the retx since it is non-adaptive
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }
@ -253,7 +243,6 @@ int sched_tester::test_harqs()
uint32_t h_id = data.dci.pid; uint32_t h_id = data.dci.pid;
uint16_t rnti = data.dci.rnti; uint16_t rnti = data.dci.rnti;
const srsenb::dl_harq_proc& h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX); const srsenb::dl_harq_proc& h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX);
CONDERROR(h.is_empty(), "Cannot schedule an empty harq proc\n");
CONDERROR(h.get_tti() != tti_point{tti_info.tti_params.tti_tx_dl}, CONDERROR(h.get_tti() != tti_point{tti_info.tti_params.tti_tx_dl},
"The scheduled DL harq pid=%d does not a valid tti=%u\n", "The scheduled DL harq pid=%d does not a valid tti=%u\n",
h_id, h_id,
@ -287,7 +276,6 @@ int sched_tester::test_harqs()
/* Check PHICH allocations */ /* Check PHICH allocations */
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) { for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) {
const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i]; const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i];
CONDERROR(tti_data.ue_data.count(phich.rnti) == 0, "Allocated PHICH rnti no longer exists\n");
const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq; const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq;
const auto* h = ue_db[phich.rnti].get_ul_harq(tti_info.tti_params.tti_tx_ul, CARRIER_IDX); const auto* h = ue_db[phich.rnti].get_ul_harq(tti_info.tti_params.tti_tx_ul, CARRIER_IDX);
CONDERROR(not hprev.has_pending_ack(), "Alloc PHICH did not have any pending ack\n"); CONDERROR(not hprev.has_pending_ack(), "Alloc PHICH did not have any pending ack\n");
@ -306,35 +294,6 @@ int sched_tester::test_harqs()
CONDERROR(h->get_pending_data() == 0 and !maxretx_flag, "NACKed harq has no pending data\n"); CONDERROR(h->get_pending_data() == 0 and !maxretx_flag, "NACKed harq has no pending data\n");
} }
} }
for (const auto& ue : ue_db) {
const auto& hprev = tti_data.ue_data[ue.first].ul_harq;
if (not hprev.has_pending_ack()) {
continue;
}
uint32_t i = 0;
for (; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) {
const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i];
if (phich.rnti == ue.first) {
break;
}
}
CONDERROR(i == tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems,
"harq had pending ack but no phich was allocked\n");
}
// Check whether some pids got old
if (check_old_pids) {
for (auto& user : ue_db) {
for (int i = 0; i < srsenb::cc_sched_ue::SCHED_MAX_HARQ_PROC; i++) {
if (not user.second.get_dl_harq(i, CARRIER_IDX).is_empty(0)) {
if (tti_point{tti_info.tti_params.tti_tx_dl} > user.second.get_dl_harq(i, CARRIER_IDX).get_tti() + 49) {
TESTERROR(
"The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i, CARRIER_IDX).get_id(), user.first);
}
}
}
}
}
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }

Loading…
Cancel
Save