sched, feature enhancement, bugfix - allow RAR DL grants with variable PRB size

Some bug fixes had to be solved:
- the cfi cannot be dynamic once we set a SIB/paging/RAR allocation. This is too avoid effective coderate to exceed its maximum
- the previous bugfix required adding the feature to cancel the last PDCCH+PDSCH allocation
master
Francisco 4 years ago committed by Francisco Paisana
parent 47f1175502
commit 1f35c4dc8b

@ -115,6 +115,8 @@ public:
void reset(); void reset();
private: private:
alloc_outcome_t allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc);
// args // args
srslog::basic_logger& logger; srslog::basic_logger& logger;
const sched_cell_params_t* cc_cfg = nullptr; const sched_cell_params_t* cc_cfg = nullptr;

@ -106,6 +106,10 @@ struct prb_interval : public srslte::interval<uint32_t> {
/// Type of Allocation stored in PDSCH/PUSCH /// Type of Allocation stored in PDSCH/PUSCH
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA }; enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
inline bool is_dl_ctrl_alloc(alloc_type_t a)
{
return a == alloc_type_t::DL_BC or a == alloc_type_t::DL_PCCH or a == alloc_type_t::DL_RAR;
}
} // namespace srsenb } // namespace srsenb

@ -37,7 +37,8 @@ struct alloc_outcome_t {
NO_DATA, NO_DATA,
INVALID_PRBMASK, INVALID_PRBMASK,
INVALID_CARRIER, INVALID_CARRIER,
INVALID_CODERATE CODERATE_TOO_HIGH,
NOF_ALLOCS_LIMIT
}; };
result_enum result = ERROR; result_enum result = ERROR;
alloc_outcome_t() = default; alloc_outcome_t() = default;
@ -101,10 +102,11 @@ public:
void init(const sched_cell_params_t& cell_params_); void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx); void new_tti(tti_point tti_rx);
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
void rem_last_alloc_dl(rbg_interval rbgs);
alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true); alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true);
alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict); alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
alloc_outcome_t reserve_ul_prbs(prb_interval alloc, bool strict); alloc_outcome_t reserve_ul_prbs(prb_interval alloc, bool strict);
@ -128,7 +130,6 @@ private:
const sched_cell_params_t* cc_cfg = nullptr; const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger; srslog::basic_logger& logger;
uint32_t nof_rbgs = 0; uint32_t nof_rbgs = 0;
uint32_t si_n_rbg = 0, rar_n_rbg = 0;
uint32_t pucch_nrb = 0; uint32_t pucch_nrb = 0;
prbmask_t pucch_mask; prbmask_t pucch_mask;
@ -137,7 +138,6 @@ private:
// internal state // internal state
tti_point tti_rx; tti_point tti_rx;
uint32_t avail_rbg = 0;
rbgmask_t dl_mask = {}; rbgmask_t dl_mask = {};
prbmask_t ul_mask = {}; prbmask_t ul_mask = {};
}; };
@ -157,8 +157,6 @@ public:
struct rar_alloc_t { struct rar_alloc_t {
sf_sched::ctrl_alloc_t alloc_data; sf_sched::ctrl_alloc_t alloc_data;
sched_interface::dl_sched_rar_t rar_grant; sched_interface::dl_sched_rar_t rar_grant;
rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r)
{}
}; };
struct bc_alloc_t : public ctrl_alloc_t { struct bc_alloc_t : public ctrl_alloc_t {
sched_interface::dl_sched_bc_t bc_grant; sched_interface::dl_sched_bc_t bc_grant;
@ -198,7 +196,7 @@ public:
// DL alloc methods // DL alloc methods
alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs); alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs);
std::pair<alloc_outcome_t, uint32_t> alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant); alloc_outcome_t alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants);
bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); }
const std::vector<rar_alloc_t>& get_allocated_rars() const { return rar_allocs; } const std::vector<rar_alloc_t>& get_allocated_rars() const { return rar_allocs; }
@ -228,7 +226,6 @@ public:
const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; } const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; }
private: private:
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result); sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,

@ -48,6 +48,8 @@ public:
*/ */
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false); bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false);
void rem_last_dci();
// getters // getters
uint32_t get_cfi() const { return current_cfix + 1; } uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const; void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
@ -107,6 +109,7 @@ private:
// tti vars // tti vars
tti_point tti_rx; tti_point tti_rx;
uint32_t current_cfix = 0; uint32_t current_cfix = 0;
uint32_t current_max_cfix = 0;
std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index
std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
}; };

@ -88,17 +88,19 @@ void bc_sched::update_si_windows(sf_sched* tti_sched)
void bc_sched::alloc_sibs(sf_sched* tti_sched) void bc_sched::alloc_sibs(sf_sched* tti_sched)
{ {
const uint32_t max_nof_prbs_sib = 4;
uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx();
uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn();
for (uint32_t sib_idx = 0; sib_idx < pending_sibs.size(); sib_idx++) { for (uint32_t sib_idx = 0; sib_idx < pending_sibs.size(); sib_idx++) {
sched_sib_t& pending_sib = pending_sibs[sib_idx]; sched_sib_t& pending_sib = pending_sibs[sib_idx];
if (cc_cfg->cfg.sibs[sib_idx].len > 0 and pending_sib.is_in_window and pending_sib.n_tx < 4) { // Check if SIB is configured and within window
if (cc_cfg->cfg.sibs[sib_idx].len == 0 or not pending_sib.is_in_window or pending_sib.n_tx >= 4) {
continue;
}
// Check if subframe index is the correct one for SIB transmission
uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4; uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4;
uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start); uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start);
// Check if there is any SIB to tx
bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5;
bool other_sibs_flag = (sib_idx > 0) and bool other_sibs_flag = (sib_idx > 0) and
(n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and
@ -107,27 +109,25 @@ void bc_sched::alloc_sibs(sf_sched* tti_sched)
continue; continue;
} }
// Attempt different number of RBGs // Attempt PDSCH grants with increasing number of RBGs
bool success = false; alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH;
for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) { for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.empty()) { if (rbg_interv.length() != nrbgs) {
ret = alloc_outcome_t::RB_COLLISION;
break; break;
} }
alloc_outcome_t ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv); ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv);
if (ret != alloc_outcome_t::INVALID_CODERATE) {
if (ret == alloc_outcome_t::SUCCESS) { if (ret == alloc_outcome_t::SUCCESS) {
// SIB scheduled successfully // SIB scheduled successfully
success = true;
pending_sibs[sib_idx].n_tx++; pending_sibs[sib_idx].n_tx++;
} }
break;
}
// Attempt again, but with more RBGs
}
if (not success) {
logger.warning("SCHED: Could not allocate SIB=%d, len=%d", sib_idx + 1, cc_cfg->cfg.sibs[sib_idx].len);
} }
if (ret != alloc_outcome_t::SUCCESS) {
logger.warning("SCHED: Could not allocate SIB=%d, len=%d. Cause: %s",
sib_idx + 1,
cc_cfg->cfg.sibs[sib_idx].len,
ret.to_string());
} }
} }
} }
@ -135,22 +135,28 @@ void bc_sched::alloc_sibs(sf_sched* tti_sched)
void bc_sched::alloc_paging(sf_sched* tti_sched) void bc_sched::alloc_paging(sf_sched* tti_sched)
{ {
uint32_t paging_payload = 0; uint32_t paging_payload = 0;
if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) {
alloc_outcome_t ret = alloc_outcome_t::ERROR;
for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv); // Check if pending Paging message
if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::RB_COLLISION) { if (not rrc->is_paging_opportunity(tti_sched->get_tti_tx_dl().to_uint(), &paging_payload) or paging_payload == 0) {
return;
}
alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.length() != nrbgs) {
ret = alloc_outcome_t::RB_COLLISION;
break; break;
} }
ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv);
} }
if (ret != alloc_outcome_t::SUCCESS) { if (ret != alloc_outcome_t::SUCCESS) {
logger.warning( logger.warning(
"SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string()); "SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string());
} }
} }
}
void bc_sched::reset() void bc_sched::reset()
{ {
@ -167,6 +173,32 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, sched_ue_list& ue_db_) :
cc_cfg(&cfg_), logger(srslog::fetch_basic_logger("MAC")), ue_db(&ue_db_) cc_cfg(&cfg_), logger(srslog::fetch_basic_logger("MAC")), ue_db(&ue_db_)
{} {}
alloc_outcome_t
ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc)
{
alloc_outcome_t ret = alloc_outcome_t::ERROR;
for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) {
ret = alloc_outcome_t::CODERATE_TOO_HIGH;
for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbg) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbg, tti_sched->get_dl_mask());
if (rbg_interv.length() == nrbg) {
ret = tti_sched->alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc);
} else {
ret = alloc_outcome_t::RB_COLLISION;
}
}
// If allocation was not successful because there were not enough RBGs, try allocating fewer Msg3 grants
if (ret != alloc_outcome_t::CODERATE_TOO_HIGH and ret != alloc_outcome_t::RB_COLLISION) {
break;
}
}
if (ret != alloc_outcome_t::SUCCESS) {
logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, ret.to_string());
}
return ret;
}
// Schedules RAR // Schedules RAR
// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we // On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we
// discard it. // discard it.
@ -192,7 +224,7 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
rar_window, rar_window,
tti_tx_dl); tti_tx_dl);
srslte::console("%s\n", srslte::to_c_str(str_buffer)); srslte::console("%s\n", srslte::to_c_str(str_buffer));
logger.error("%s", srslte::to_c_str(str_buffer)); logger.warning("%s", srslte::to_c_str(str_buffer));
it = pending_rars.erase(it); it = pending_rars.erase(it);
continue; continue;
} }
@ -200,35 +232,32 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
} }
// Try to schedule DCI + RBGs for RAR Grant // Try to schedule DCI + RBGs for RAR Grant
std::pair<alloc_outcome_t, uint32_t> ret = tti_sched->alloc_rar(rar_aggr_level, rar); uint32_t nof_rar_allocs = 0;
alloc_outcome_t ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs);
if (ret == alloc_outcome_t::SUCCESS) {
// If RAR allocation was successful: // If RAR allocation was successful:
// - in case all Msg3 grants were allocated, remove pending RAR // - in case all Msg3 grants were allocated, remove pending RAR, and continue with following RAR
// - otherwise, erase only Msg3 grants that were allocated. // - otherwise, erase only Msg3 grants that were allocated, and stop iteration
if (ret.first == alloc_outcome_t::SUCCESS) {
uint32_t nof_rar_allocs = ret.second;
if (nof_rar_allocs == rar.msg3_grant.size()) { if (nof_rar_allocs == rar.msg3_grant.size()) {
pending_rars.erase(it); it = pending_rars.erase(it);
} else { } else {
std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin()); std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin());
rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs); rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs);
}
break; break;
} }
} else {
// If RAR allocation was not successful: // If RAR allocation was not successful:
// - in case of unavailable RBGs, stop loop // - in case of unavailable PDCCH space, try next pending RAR allocation
// - otherwise, attempt to schedule next pending RAR // - otherwise, stop iteration
logger.info("SCHED: Could not allocate RAR for L=%d, cause=%s", rar_aggr_level, ret.first.to_string()); if (ret != alloc_outcome_t::DCI_COLLISION) {
if (ret.first == alloc_outcome_t::RB_COLLISION) { break;
// there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI
return;
} }
// For any other type of error, continue with next pending RAR
++it; ++it;
} }
} }
}
int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
{ {

@ -22,7 +22,7 @@ const char* alloc_outcome_t::to_string() const
case SUCCESS: case SUCCESS:
return "success"; return "success";
case DCI_COLLISION: case DCI_COLLISION:
return "dci_collision"; return "PDCCH position not available";
case RB_COLLISION: case RB_COLLISION:
return "rb_collision"; return "rb_collision";
case ERROR: case ERROR:
@ -41,6 +41,10 @@ const char* alloc_outcome_t::to_string() const
return "invalid rbg mask"; return "invalid rbg mask";
case INVALID_CARRIER: case INVALID_CARRIER:
return "invalid eNB carrier"; return "invalid eNB carrier";
case CODERATE_TOO_HIGH:
return "Effective coderate is too high";
case NOF_ALLOCS_LIMIT:
return "Max number of allocations reached";
default: default:
break; break;
} }
@ -118,8 +122,6 @@ void sf_grid_t::init(const sched_cell_params_t& cell_params_)
{ {
cc_cfg = &cell_params_; cc_cfg = &cell_params_;
nof_rbgs = cc_cfg->nof_rbgs; nof_rbgs = cc_cfg->nof_rbgs;
si_n_rbg = srslte::ceil_div(4, cc_cfg->P);
rar_n_rbg = srslte::ceil_div(3, cc_cfg->P);
dl_mask.resize(nof_rbgs); dl_mask.resize(nof_rbgs);
ul_mask.resize(cc_cfg->nof_prb()); ul_mask.resize(cc_cfg->nof_prb());
@ -144,7 +146,6 @@ void sf_grid_t::new_tti(tti_point tti_rx_)
dl_mask.reset(); dl_mask.reset();
ul_mask.reset(); ul_mask.reset();
avail_rbg = nof_rbgs;
// Reserve PRBs for PUCCH // Reserve PRBs for PUCCH
ul_mask |= pucch_mask; ul_mask |= pucch_mask;
@ -181,7 +182,7 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx,
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user, has_pusch_grant)) { if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user, has_pusch_grant)) {
if (user != nullptr) { if (user != nullptr) {
if (logger.debug.enabled()) { if (logger.debug.enabled()) {
logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation: %s", logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation:\n%s",
user->get_rnti(), user->get_rnti(),
pdcch_alloc.result_to_string(true).c_str()); pdcch_alloc.result_to_string(true).c_str());
} }
@ -191,20 +192,11 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx,
// Allocate RBGs // Allocate RBGs
dl_mask |= alloc_mask; dl_mask |= alloc_mask;
avail_rbg -= alloc_mask.count();
return alloc_outcome_t::SUCCESS; return alloc_outcome_t::SUCCESS;
} }
//! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. /// Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner.
sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_type_t alloc_type)
{
rbg_interval range{nof_rbgs - avail_rbg,
nof_rbgs - avail_rbg + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg)};
return {alloc_dl_ctrl(aggr_idx, range, alloc_type), range};
}
alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type) alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type)
{ {
if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and
@ -252,7 +244,7 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, boo
uint32_t aggr_idx = user->get_aggr_level(cc_cfg->enb_cc_idx, nof_bits); uint32_t aggr_idx = user->get_aggr_level(cc_cfg->enb_cc_idx, nof_bits);
if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) { if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) {
if (logger.debug.enabled()) { if (logger.debug.enabled()) {
logger.debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s", logger.debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation:\n%s",
user->get_rnti(), user->get_rnti(),
pdcch_alloc.result_to_string(true).c_str()); pdcch_alloc.result_to_string(true).c_str());
} }
@ -271,6 +263,19 @@ bool sf_grid_t::reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg)
return true; return true;
} }
void sf_grid_t::rem_last_alloc_dl(rbg_interval rbgs)
{
if (pdcch_alloc.nof_allocs() == 0) {
logger.error("Remove DL alloc called for empty Subframe RB grid");
return;
}
pdcch_alloc.rem_last_dci();
rbgmask_t rbgmask(dl_mask.size());
rbgmask.fill(rbgs.start(), rbgs.stop());
dl_mask &= ~rbgmask;
}
alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict)
{ {
if (alloc.stop() > ul_mask.size()) { if (alloc.stop() > ul_mask.size()) {
@ -365,69 +370,36 @@ void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_)
bool sf_sched::is_dl_alloc(uint16_t rnti) const bool sf_sched::is_dl_alloc(uint16_t rnti) const
{ {
for (const auto& a : data_allocs) { return std::any_of(data_allocs.begin(), data_allocs.end(), [rnti](const dl_alloc_t& u) { return u.rnti == rnti; });
if (a.rnti == rnti) {
return true;
}
}
return false;
} }
bool sf_sched::is_ul_alloc(uint16_t rnti) const bool sf_sched::is_ul_alloc(uint16_t rnti) const
{ {
for (const auto& a : ul_data_allocs) { return std::any_of(
if (a.rnti == rnti) { ul_data_allocs.begin(), ul_data_allocs.end(), [rnti](const ul_alloc_t& u) { return u.rnti == rnti; });
return true;
}
}
return false;
}
sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti)
{
ctrl_alloc_t ctrl_alloc{};
// based on rnti, check which type of alloc
alloc_type_t alloc_type = alloc_type_t::DL_RAR;
if (rnti == SRSLTE_SIRNTI) {
alloc_type = alloc_type_t::DL_BC;
} else if (rnti == SRSLTE_PRNTI) {
alloc_type = alloc_type_t::DL_PCCH;
}
/* Allocate space in the DL RBG and PDCCH grids */
sf_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type);
if (not ret.outcome) {
return {ret.outcome, ctrl_alloc};
}
// Allocation Successful
ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ctrl_alloc.rbg_range = ret.rbg_range;
ctrl_alloc.req_bytes = tbs_bytes;
return {ret.outcome, ctrl_alloc};
} }
alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs)
{ {
if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) {
logger.warning("SCHED: Maximum number of Broadcast allocations reached"); logger.warning("SCHED: Maximum number of Broadcast allocations reached");
return alloc_outcome_t::ERROR; return alloc_outcome_t::NOF_ALLOCS_LIMIT;
} }
bc_alloc_t bc_alloc; bc_alloc_t bc_alloc;
// Generate DCI for SIB
if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
return alloc_outcome_t::INVALID_CODERATE;
}
// Allocate SIB RBGs and PDCCH // Allocate SIB RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC); alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC);
if (ret != alloc_outcome_t::SUCCESS) { if (ret != alloc_outcome_t::SUCCESS) {
return ret; return ret;
} }
// Generate DCI for SIB
if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
// Cancel on-going allocation
tti_alloc.rem_last_alloc_dl(rbgs);
return alloc_outcome_t::CODERATE_TOO_HIGH;
}
// Allocation Successful // Allocation Successful
bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
bc_alloc.rbg_range = rbgs; bc_alloc.rbg_range = rbgs;
@ -441,21 +413,23 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa
{ {
if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) {
logger.warning("SCHED: Maximum number of Broadcast allocations reached"); logger.warning("SCHED: Maximum number of Broadcast allocations reached");
return alloc_outcome_t::ERROR; return alloc_outcome_t::NOF_ALLOCS_LIMIT;
} }
bc_alloc_t bc_alloc; bc_alloc_t bc_alloc;
// Generate DCI for Paging message
if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
return alloc_outcome_t::INVALID_CODERATE;
}
// Allocate Paging RBGs and PDCCH // Allocate Paging RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH); alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH);
if (ret != alloc_outcome_t::SUCCESS) { if (ret != alloc_outcome_t::SUCCESS) {
return ret; return ret;
} }
// Generate DCI for Paging message
if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
// Cancel on-going allocation
tti_alloc.rem_last_alloc_dl(rbgs);
return alloc_outcome_t::CODERATE_TOO_HIGH;
}
// Allocation Successful // Allocation Successful
bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
bc_alloc.rbg_range = rbgs; bc_alloc.rbg_range = rbgs;
@ -465,53 +439,44 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa
return alloc_outcome_t::SUCCESS; return alloc_outcome_t::SUCCESS;
} }
std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar) alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants)
{ {
const uint32_t msg3_grant_size = 3; static const uint32_t msg3_nof_prbs = 3;
std::pair<alloc_outcome_t, uint32_t> ret = {alloc_outcome_t::ERROR, 0};
if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) { if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) {
logger.warning("SCHED: Maximum number of RAR allocations per TTI reached."); logger.info("SCHED: Maximum number of RAR allocations per TTI reached.");
return ret; return alloc_outcome_t::NOF_ALLOCS_LIMIT;
} }
for (uint32_t nof_grants = rar.msg3_grant.size(); nof_grants > 0; nof_grants--) {
uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff
uint32_t total_msg3_size = msg3_grant_size * nof_grants; uint32_t total_ul_nof_prbs = msg3_nof_prbs * nof_grants;
// check if there is enough space for Msg3, try again with a lower number of grants // check if there is enough space for Msg3
if (last_msg3_prb + total_msg3_size > max_msg3_prb) { if (last_msg3_prb + total_ul_nof_prbs > max_msg3_prb) {
ret.first = alloc_outcome_t::RB_COLLISION; return alloc_outcome_t::RB_COLLISION;
continue;
} }
// allocate RBs and PDCCH // allocate RBGs and PDCCH
sf_sched::ctrl_code_t ret2 = alloc_dl_ctrl(aggr_lvl, buf_rar, rar.ra_rnti); alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_RAR);
ret.first = ret2.first.result; if (ret != alloc_outcome_t::SUCCESS) {
ret.second = nof_grants;
if (ret.first == alloc_outcome_t::SUCCESS) {
sched_interface::dl_sched_rar_t rar_grant;
if (generate_rar_dci(rar_grant,
get_tti_tx_dl(),
rar,
ret2.second.rbg_range,
nof_grants,
last_msg3_prb,
*cc_cfg,
tti_alloc.get_cfi())) {
// RAR allocation successful
rar_allocs.emplace_back(ret2.second, rar_grant);
last_msg3_prb += msg3_grant_size * nof_grants;
return ret;
}
} else if (ret.first != alloc_outcome_t::RB_COLLISION) {
return ret; return ret;
} }
// if there was no space for the RAR, try again with a lower number of grants // Generate DCI for RAR
rar_alloc_t rar_alloc;
if (not generate_rar_dci(
rar_alloc.rar_grant, get_tti_tx_dl(), rar, rbgs, nof_grants, last_msg3_prb, *cc_cfg, tti_alloc.get_cfi())) {
// Cancel on-going allocation
tti_alloc.rem_last_alloc_dl(rbgs);
return alloc_outcome_t::CODERATE_TOO_HIGH;
} }
logger.info("SCHED: RAR allocation postponed due to lack of RBs"); // RAR allocation successful
rar_alloc.alloc_data.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
rar_alloc.alloc_data.rbg_range = rbgs;
rar_alloc.alloc_data.req_bytes = buf_rar;
rar_allocs.push_back(rar_alloc);
last_msg3_prb += total_ul_nof_prbs * nof_grants;
return ret; return ret;
} }
@ -531,7 +496,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
{ {
if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) { if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) {
logger.warning("SCHED: Maximum number of DL allocations reached"); logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_outcome_t::ERROR; return alloc_outcome_t::NOF_ALLOCS_LIMIT;
} }
if (is_dl_alloc(user->get_rnti())) { if (is_dl_alloc(user->get_rnti())) {

@ -319,8 +319,9 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc,
fmt::format_to(str_buffer, "{}", rbg_range); fmt::format_to(str_buffer, "{}", rbg_range);
if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) { if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) {
logger.debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", logger.debug("SCHED: SIB%d, cc=%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d",
bc.index + 1, bc.index + 1,
cell_params.enb_cc_idx,
rbg_range.start(), rbg_range.start(),
rbg_range.stop(), rbg_range.stop(),
bc.dci.location.L, bc.dci.location.L,
@ -330,8 +331,9 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc,
cell_params.cfg.sibs[bc.index].period_rf, cell_params.cfg.sibs[bc.index].period_rf,
bc.dci.tb[0].mcs_idx); bc.dci.tb[0].mcs_idx);
} else { } else {
logger.info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", logger.info("SCHED: PCH, cc=%d, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d",
srslte::to_c_str(str_buffer), srslte::to_c_str(str_buffer),
cell_params.enb_cc_idx,
bc.dci.location.L, bc.dci.location.L,
bc.dci.location.ncce, bc.dci.location.ncce,
bc.tbs, bc.tbs,

@ -38,6 +38,7 @@ void sf_cch_allocator::new_tti(tti_point tti_rx_)
} }
dci_record_list.clear(); dci_record_list.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1; current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1;
} }
const cce_cfi_position_table* const cce_cfi_position_table*
@ -70,7 +71,7 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch
bool success; bool success;
do { do {
success = alloc_dci_record(record, get_cfi() - 1); success = alloc_dci_record(record, get_cfi() - 1);
} while (not success and get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols and set_cfi(get_cfi() + 1)); } while (not success and current_cfix < current_max_cfix and set_cfi(get_cfi() + 1));
if (not success) { if (not success) {
// DCI allocation failed. go back to original CFI // DCI allocation failed. go back to original CFI
@ -82,9 +83,45 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch
// DCI record allocation successful // DCI record allocation successful
dci_record_list.push_back(record); dci_record_list.push_back(record);
if (is_dl_ctrl_alloc(alloc_type)) {
// Dynamic CFI not yet supported for DL control allocations, as coderate can be exceeded
current_max_cfix = current_cfix;
}
return true; return true;
} }
void sf_cch_allocator::rem_last_dci()
{
assert(not dci_record_list.empty());
// Remove DCI record
dci_record_list.pop_back();
// Remove leaves of PDCCH position decisions
auto& tree = alloc_trees[current_cfix];
tree.prev_end = tree.prev_start;
if (dci_record_list.empty()) {
tree.prev_start = 0;
} else {
tree.prev_start = tree.dci_alloc_tree[tree.prev_start].parent_idx;
// Discover other tree nodes with same level
while (tree.prev_start > 0) {
uint32_t count = 0;
while (tree.dci_alloc_tree[tree.prev_start - 1].parent_idx >= 0) {
count++;
}
if (count == dci_record_list.size()) {
tree.prev_start--;
} else {
break;
}
}
}
tree.dci_alloc_tree.erase(tree.dci_alloc_tree.begin() + tree.prev_end, tree.dci_alloc_tree.end());
}
bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t cfix) bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t cfix)
{ {
bool ret = false; bool ret = false;

@ -145,7 +145,7 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
: 0; : 0;
} }
if (code == alloc_outcome_t::DCI_COLLISION) { if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", ue.get_rnti()); logger.info("SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", ue.get_rnti(), cc_cfg->enb_cc_idx);
} }
return estim_tbs_bytes; return estim_tbs_bytes;
} }

@ -142,7 +142,8 @@ void sched_time_rr::sched_ul_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s
} }
alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc); alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc);
if (ret == alloc_outcome_t::DCI_COLLISION) { if (ret == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x", user.get_rnti()); logger.info(
"SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", user.get_rnti(), cc_cfg->enb_cc_idx);
} }
} }
} }

@ -17,7 +17,7 @@
using namespace srsenb; using namespace srsenb;
uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count(); uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count();
/******************* /*******************
* Logging * * Logging *
@ -157,7 +157,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
} }
}; };
generate_data(20, 1.0, P_ul_sr, randf()); generate_data(20, 1.0, P_ul_sr, randf());
tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
// Event: Reconf Complete. Activate SCells. Check if CE correctly transmitted // Event: Reconf Complete. Activate SCells. Check if CE correctly transmitted
generator.step_tti(); generator.step_tti();
@ -169,7 +169,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
user->ue_sim_cfg->ue_cfg.supported_cc_list[i].active = true; user->ue_sim_cfg->ue_cfg.supported_cc_list[i].active = true;
user->ue_sim_cfg->ue_cfg.supported_cc_list[i].enb_cc_idx = cc_idxs[i]; user->ue_sim_cfg->ue_cfg.supported_cc_list[i].enb_cc_idx = cc_idxs[i];
} }
tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
auto activ_list = tester.get_enb_ue_cc_map(rnti1); auto activ_list = tester.get_enb_ue_cc_map(rnti1);
for (uint32_t i = 0; i < cc_idxs.size(); ++i) { for (uint32_t i = 0; i < cc_idxs.size(); ++i) {
TESTASSERT(activ_list[i] >= 0); TESTASSERT(activ_list[i] >= 0);
@ -187,7 +187,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
} }
} }
generator.step_tti(); generator.step_tti();
tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
} }
// Event: Wait for UE to receive and ack CE. Send cqi==0, which should not activate the SCell // Event: Wait for UE to receive and ack CE. Send cqi==0, which should not activate the SCell
@ -198,12 +198,12 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
generator.step_tti(); generator.step_tti();
} }
} }
tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
// The UE should now have received the CE // The UE should now have received the CE
// Event: Generate a bit more data, it should *not* go through SCells until we send a CQI // Event: Generate a bit more data, it should *not* go through SCells until we send a CQI
generate_data(5, P_dl, P_ul_sr, randf()); generate_data(5, P_dl, P_ul_sr, randf());
tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[params.pcell_idx] > 0); TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[params.pcell_idx] > 0);
TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[params.pcell_idx] > 0); TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[params.pcell_idx] > 0);
for (uint32_t i = 1; i < cc_idxs.size(); ++i) { for (uint32_t i = 1; i < cc_idxs.size(); ++i) {
@ -217,7 +217,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
tester.dl_cqi_info(tester.tti_rx.to_uint(), rnti1, cc_idxs[i], cqi); tester.dl_cqi_info(tester.tti_rx.to_uint(), rnti1, cc_idxs[i], cqi);
} }
generate_data(10, 1.0, 1.0, 1.0); generate_data(10, 1.0, 1.0, 1.0);
tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
uint64_t tot_dl_sched_data = 0; uint64_t tot_dl_sched_data = 0;
uint64_t tot_ul_sched_data = 0; uint64_t tot_ul_sched_data = 0;
for (const auto& c : cc_idxs) { for (const auto& c : cc_idxs) {

@ -316,7 +316,8 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant); uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant);
float coderate = srslte_coderate(tbs * 8, nof_re); float coderate = srslte_coderate(tbs * 8, nof_re);
const uint32_t Qm = 2; const uint32_t Qm = 2;
CONDERROR(coderate > 0.930f * Qm, "Max coderate was exceeded from broadcast DCI"); CONDERROR(
coderate > 0.930f * Qm, "Max coderate was exceeded from %s DCI", dci.rnti == SRSLTE_SIRNTI ? "SIB" : "RAR");
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
}; };

@ -78,7 +78,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
uint32_t nof_retx = get_nof_retx(pdsch.dci.tb[0].rv); // 0..3 uint32_t nof_retx = get_nof_retx(pdsch.dci.tb[0].rv); // 0..3
if (h.nof_txs == 0 or h.ndi != pdsch.dci.tb[0].ndi) { if (h.nof_txs == 0 or h.ndi != pdsch.dci.tb[0].ndi) {
// It is newtx // It is newtx
CONDERROR(nof_retx != 0, "Invalid rv index for new tx"); CONDERROR(nof_retx != 0, "Invalid rv index for new DL tx");
CONDERROR(h.active, "DL newtx for already active DL harq pid=%d", h.pid); CONDERROR(h.active, "DL newtx for already active DL harq pid=%d", h.pid);
} else { } else {
// it is retx // it is retx
@ -196,7 +196,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
if (h.nof_txs == 0 or h.ndi != pusch_ptr->dci.tb.ndi) { if (h.nof_txs == 0 or h.ndi != pusch_ptr->dci.tb.ndi) {
// newtx // newtx
CONDERROR(nof_retx != 0, "Invalid rv index for new tx"); CONDERROR(nof_retx != 0, "Invalid rv index for new UL tx");
CONDERROR(pusch_ptr->current_tx_nb != 0, "UL HARQ retxs need to have been previously transmitted"); CONDERROR(pusch_ptr->current_tx_nb != 0, "UL HARQ retxs need to have been previously transmitted");
CONDERROR(not h_inactive, "New tx for already active UL HARQ"); CONDERROR(not h_inactive, "New tx for already active UL HARQ");
CONDERROR(not pusch_ptr->needs_pdcch and ue.msg3_tti_rx.is_valid() and sf_out.tti_rx > ue.msg3_tti_rx, CONDERROR(not pusch_ptr->needs_pdcch and ue.msg3_tti_rx.is_valid() and sf_out.tti_rx > ue.msg3_tti_rx,

Loading…
Cancel
Save