sched refactor - use simple enum to represent allocation result. Refactored logging messages when allocation fails.

master
Francisco 4 years ago committed by Francisco Paisana
parent 2cfc657fbb
commit 137a21d6b2

@ -115,7 +115,7 @@ public:
void reset();
private:
alloc_outcome_t allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc);
alloc_result allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc);
// args
srslog::basic_logger& logger;

@ -25,29 +25,18 @@
namespace srsenb {
/// Error code of alloc attempt
struct alloc_outcome_t {
enum result_enum {
SUCCESS,
DCI_COLLISION,
RB_COLLISION,
ERROR,
NOF_RB_INVALID,
PUCCH_COLLISION,
MEASGAP_COLLISION,
ALREADY_ALLOC,
NO_DATA,
INVALID_PRBMASK,
INVALID_CARRIER,
CODERATE_TOO_HIGH,
NOF_ALLOCS_LIMIT
};
result_enum result = ERROR;
alloc_outcome_t() = default;
alloc_outcome_t(result_enum e) : result(e) {}
operator result_enum() { return result; }
operator bool() { return result == SUCCESS; }
const char* to_string() const;
enum class alloc_result {
success,
sch_collision,
no_cch_space,
no_sch_space,
rnti_inactive,
invalid_grant_params,
invalid_coderate,
no_grant_space,
other_cause
};
const char* to_string(alloc_result res);
//! Result of a Subframe sched computation
struct cc_sched_result {
@ -113,23 +102,23 @@ class sf_grid_t
{
public:
struct dl_ctrl_alloc_t {
alloc_outcome_t outcome;
rbg_interval rbg_range;
alloc_result outcome;
rbg_interval rbg_range;
};
sf_grid_t() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx);
alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
void rem_last_alloc_dl(rbg_interval rbgs);
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx);
alloc_result alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type);
alloc_result alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
void rem_last_alloc_dl(rbg_interval rbgs);
alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true);
alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
alloc_outcome_t reserve_ul_prbs(prb_interval alloc, bool strict);
bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
alloc_result alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true);
alloc_result reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
alloc_result reserve_ul_prbs(prb_interval alloc, bool strict);
bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
// getters
const rbgmask_t& get_dl_mask() const { return dl_mask; }
@ -139,11 +128,11 @@ public:
uint32_t get_pucch_width() const { return pucch_nrb; }
private:
alloc_outcome_t alloc_dl(uint32_t aggr_lvl,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user = nullptr,
bool has_pusch_grant = false);
alloc_result alloc_dl(uint32_t aggr_lvl,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user = nullptr,
bool has_pusch_grant = false);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
@ -205,7 +194,7 @@ public:
uint32_t n_prb = 0;
uint32_t mcs = 0;
};
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
typedef std::pair<alloc_result, const ctrl_alloc_t> ctrl_code_t;
// Control/Configuration Methods
sf_sched();
@ -213,27 +202,30 @@ public:
void new_tti(srslte::tti_point tti_rx_, sf_sched_result* cc_results);
// DL alloc methods
alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs);
alloc_outcome_t alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants);
alloc_result alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs);
alloc_result alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs);
alloc_result alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants);
bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); }
const std::vector<rar_alloc_t>& get_allocated_rars() const { return rar_allocs; }
// UL alloc methods
alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant);
alloc_outcome_t
alloc_result alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant);
alloc_result
alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1);
bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); }
bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict)
{
return tti_alloc.reserve_ul_prbs(ulmask, strict) == alloc_result::success;
}
bool alloc_phich(sched_ue* user);
// compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI
void generate_sched_results(sched_ue_list& ue_db);
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid);
alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid);
tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); }
uint32_t get_nof_ctrl_symbols() const;
const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); }
alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc);
alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc);
const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); }
tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); }
@ -264,11 +256,11 @@ private:
// internal state
sf_grid_t tti_alloc;
srslte::bounded_vector<bc_alloc_t, sched_interface::MAX_BC_LIST> bc_allocs;
std::vector<rar_alloc_t> rar_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
srslte::bounded_vector<bc_alloc_t, sched_interface::MAX_BC_LIST> bc_allocs;
std::vector<rar_alloc_t> rar_allocs;
std::vector<dl_alloc_t> data_allocs;
srslte::bounded_vector<ul_alloc_t, sched_interface::MAX_DATA_LIST> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
// Next TTI state
tti_point tti_rx;

@ -59,10 +59,10 @@ const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched);
const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched);
/// Helper methods to allocate resources in subframe
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h);
alloc_outcome_t
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr);
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h);
alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h);
alloc_result
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr);
alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h);
} // namespace srsenb

@ -110,24 +110,24 @@ void bc_sched::alloc_sibs(sf_sched* tti_sched)
}
// Attempt PDSCH grants with increasing number of RBGs
alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) {
alloc_result ret = alloc_result::invalid_coderate;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.length() != nrbgs) {
ret = alloc_outcome_t::RB_COLLISION;
ret = alloc_result::no_sch_space;
break;
}
ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv);
if (ret == alloc_outcome_t::SUCCESS) {
if (ret == alloc_result::success) {
// SIB scheduled successfully
pending_sibs[sib_idx].n_tx++;
}
}
if (ret != alloc_outcome_t::SUCCESS) {
if (ret != alloc_result::success) {
logger.warning("SCHED: Could not allocate SIB=%d, len=%d. Cause: %s",
sib_idx + 1,
cc_cfg->cfg.sibs[sib_idx].len,
ret.to_string());
to_string(ret));
}
}
}
@ -141,20 +141,19 @@ void bc_sched::alloc_paging(sf_sched* tti_sched)
return;
}
alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) {
alloc_result ret = alloc_result::invalid_coderate;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.length() != nrbgs) {
ret = alloc_outcome_t::RB_COLLISION;
ret = alloc_result::no_sch_space;
break;
}
ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv);
}
if (ret != alloc_outcome_t::SUCCESS) {
logger.warning(
"SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string());
if (ret != alloc_result::success) {
logger.warning("SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, to_string(ret));
}
}
@ -173,28 +172,27 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, sched_ue_list& ue_db_) :
cc_cfg(&cfg_), logger(srslog::fetch_basic_logger("MAC")), ue_db(&ue_db_)
{}
alloc_outcome_t
ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc)
alloc_result ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc)
{
alloc_outcome_t ret = alloc_outcome_t::ERROR;
alloc_result ret = alloc_result::other_cause;
for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) {
ret = alloc_outcome_t::CODERATE_TOO_HIGH;
for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbg) {
ret = alloc_result::invalid_coderate;
for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbg) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbg, tti_sched->get_dl_mask());
if (rbg_interv.length() == nrbg) {
ret = tti_sched->alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc);
} else {
ret = alloc_outcome_t::RB_COLLISION;
ret = alloc_result::no_sch_space;
}
}
// If allocation was not successful because there were not enough RBGs, try allocating fewer Msg3 grants
if (ret != alloc_outcome_t::CODERATE_TOO_HIGH and ret != alloc_outcome_t::RB_COLLISION) {
if (ret != alloc_result::invalid_coderate and ret != alloc_result::no_sch_space) {
break;
}
}
if (ret != alloc_outcome_t::SUCCESS) {
logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, ret.to_string());
if (ret != alloc_result::success) {
logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, to_string(ret));
}
return ret;
}
@ -232,10 +230,10 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
}
// Try to schedule DCI + RBGs for RAR Grant
uint32_t nof_rar_allocs = 0;
alloc_outcome_t ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs);
uint32_t nof_rar_allocs = 0;
alloc_result ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs);
if (ret == alloc_outcome_t::SUCCESS) {
if (ret == alloc_result::success) {
// If RAR allocation was successful:
// - in case all Msg3 grants were allocated, remove pending RAR, and continue with following RAR
// - otherwise, erase only Msg3 grants that were allocated, and stop iteration
@ -251,7 +249,7 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
// If RAR allocation was not successful:
// - in case of unavailable PDCCH space, try next pending RAR allocation
// - otherwise, stop iteration
if (ret != alloc_outcome_t::DCI_COLLISION) {
if (ret != alloc_result::no_cch_space) {
break;
}
++it;
@ -303,7 +301,8 @@ void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched)
for (const auto& msg3grant : rar.rar_grant.msg3_grant) {
uint16_t crnti = msg3grant.data.temp_crnti;
auto user_it = ue_db->find(crnti);
if (user_it != ue_db->end() and sf_msg3_sched->alloc_msg3(user_it->second.get(), msg3grant)) {
if (user_it != ue_db->end() and
sf_msg3_sched->alloc_msg3(user_it->second.get(), msg3grant) == alloc_result::success) {
logger.debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d", crnti, sf_msg3_sched->get_tti_tx_ul().to_uint());
} else {
logger.error(

@ -16,34 +16,26 @@
namespace srsenb {
const char* alloc_outcome_t::to_string() const
const char* to_string(alloc_result result)
{
switch (result) {
case SUCCESS:
case alloc_result::success:
return "success";
case DCI_COLLISION:
return "PDCCH position not available";
case RB_COLLISION:
return "rb_collision";
case ERROR:
case alloc_result::sch_collision:
return "Collision with existing SCH allocations";
case alloc_result::other_cause:
return "error";
case NOF_RB_INVALID:
return "invalid nof prbs";
case PUCCH_COLLISION:
return "pucch_collision";
case MEASGAP_COLLISION:
return "measgap_collision";
case ALREADY_ALLOC:
return "already allocated";
case NO_DATA:
return "no pending data to allocate";
case INVALID_PRBMASK:
return "invalid rbg mask";
case INVALID_CARRIER:
return "invalid eNB carrier";
case CODERATE_TOO_HIGH:
return "Effective coderate is too high";
case NOF_ALLOCS_LIMIT:
case alloc_result::no_cch_space:
return "No space available in PUCCH or PDCCH";
case alloc_result::no_sch_space:
return "Requested number of PRBs not available";
case alloc_result::rnti_inactive:
return "rnti cannot be allocated (e.g. already allocated, no data, meas gap collision, carrier inactive, etc.)";
case alloc_result::invalid_grant_params:
return "invalid grant arguments (e.g. invalid prb mask)";
case alloc_result::invalid_coderate:
return "Effective coderate exceeds threshold";
case alloc_result::no_grant_space:
return "Max number of allocations reached";
default:
break;
@ -150,46 +142,50 @@ void sf_grid_t::new_tti(tti_point tti_rx_)
}
//! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging)
alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user,
bool has_pusch_grant)
alloc_result sf_grid_t::alloc_dl(uint32_t aggr_idx,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user,
bool has_pusch_grant)
{
// Check RBG collision
if ((dl_mask & alloc_mask).any()) {
return alloc_outcome_t::RB_COLLISION;
logger.debug("SCHED: Provided RBG mask collides with allocation previously made.\n");
return alloc_result::sch_collision;
}
// Allocate DCI in PDCCH
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user, has_pusch_grant)) {
if (user != nullptr) {
if (logger.debug.enabled()) {
logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation:\n%s",
if (logger.debug.enabled()) {
if (user != nullptr) {
logger.debug("SCHED: No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation:\n%s",
user->get_rnti(),
pdcch_alloc.result_to_string(true).c_str());
} else {
logger.debug("SCHED: No space in PDCCH for DL tx. Current PDCCH allocation:\n%s",
pdcch_alloc.result_to_string(true).c_str());
}
}
return alloc_outcome_t::DCI_COLLISION;
return alloc_result::no_cch_space;
}
// Allocate RBGs
dl_mask |= alloc_mask;
return alloc_outcome_t::SUCCESS;
return alloc_result::success;
}
/// Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner.
alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type)
alloc_result sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type)
{
if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and
alloc_type != alloc_type_t::DL_PCCH) {
logger.error("SCHED: DL control allocations must be RAR/BC/PDCCH");
return alloc_outcome_t::ERROR;
return alloc_result::other_cause;
}
// Setup rbg_range starting from left
if (rbg_range.stop() > nof_rbgs) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::sch_collision;
}
// allocate DCI and RBGs
@ -199,26 +195,27 @@ alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_ran
}
//! Allocates CCEs and RBs for a user DL data alloc.
alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant)
alloc_result sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant)
{
srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format);
uint32_t aggr_idx = user->get_aggr_level(cc_cfg->enb_cc_idx, nof_bits);
alloc_outcome_t ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user, has_pusch_grant);
alloc_result ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user, has_pusch_grant);
return ret;
}
alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict)
alloc_result sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict)
{
if (alloc.stop() > ul_mask.size()) {
return alloc_outcome_t::ERROR;
return alloc_result::no_sch_space;
}
prbmask_t newmask(ul_mask.size());
newmask.fill(alloc.start(), alloc.stop());
if (strict and (ul_mask & newmask).any()) {
return alloc_outcome_t::RB_COLLISION;
logger.debug("SCHED: Failed UL allocation. Cause: %s", to_string(alloc_result::sch_collision));
return alloc_result::sch_collision;
}
// Generate PDCCH except for RAR and non-adaptive retx
@ -231,13 +228,13 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, boo
user->get_rnti(),
pdcch_alloc.result_to_string(true).c_str());
}
return alloc_outcome_t::DCI_COLLISION;
return alloc_result::no_cch_space;
}
}
ul_mask |= newmask;
return alloc_outcome_t::SUCCESS;
return alloc_result::success;
}
bool sf_grid_t::reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg)
@ -259,10 +256,10 @@ void sf_grid_t::rem_last_alloc_dl(rbg_interval rbgs)
dl_mask &= ~rbgmask;
}
alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict)
alloc_result sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict)
{
if (alloc.stop() > ul_mask.size()) {
return alloc_outcome_t::ERROR;
return alloc_result::no_sch_space;
}
prbmask_t newmask(ul_mask.size());
@ -270,14 +267,14 @@ alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict)
return reserve_ul_prbs(newmask, strict);
}
alloc_outcome_t sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict)
alloc_result sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict)
{
alloc_outcome_t ret = alloc_outcome_t::SUCCESS;
alloc_result ret = alloc_result::success;
if (strict and (ul_mask & prbmask).any()) {
fmt::memory_buffer tmp_buffer;
fmt::format_to(tmp_buffer, "There was a collision in the UL. Current mask={:x}, new mask={:x}", ul_mask, prbmask);
logger.error("%s", srslte::to_c_str(tmp_buffer));
ret = alloc_outcome_t::ERROR;
ret = alloc_result::sch_collision;
}
ul_mask |= prbmask;
return ret;
@ -362,17 +359,17 @@ bool sf_sched::is_ul_alloc(uint16_t rnti) const
ul_data_allocs.begin(), ul_data_allocs.end(), [rnti](const ul_alloc_t& u) { return u.rnti == rnti; });
}
alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs)
alloc_result sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs)
{
if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) {
logger.warning("SCHED: Maximum number of Broadcast allocations reached");
return alloc_outcome_t::NOF_ALLOCS_LIMIT;
return alloc_result::no_grant_space;
}
bc_alloc_t bc_alloc;
// Allocate SIB RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC);
if (ret != alloc_outcome_t::SUCCESS) {
alloc_result ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC);
if (ret != alloc_result::success) {
return ret;
}
@ -380,7 +377,7 @@ alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_
if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
// Cancel on-going allocation
tti_alloc.rem_last_alloc_dl(rbgs);
return alloc_outcome_t::CODERATE_TOO_HIGH;
return alloc_result::invalid_coderate;
}
// Allocation Successful
@ -389,20 +386,20 @@ alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_
bc_alloc.req_bytes = cc_cfg->cfg.sibs[sib_idx].len;
bc_allocs.push_back(bc_alloc);
return alloc_outcome_t::SUCCESS;
return alloc_result::success;
}
alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs)
alloc_result sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs)
{
if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) {
logger.warning("SCHED: Maximum number of Broadcast allocations reached");
return alloc_outcome_t::NOF_ALLOCS_LIMIT;
return alloc_result::no_grant_space;
}
bc_alloc_t bc_alloc;
// Allocate Paging RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH);
if (ret != alloc_outcome_t::SUCCESS) {
alloc_result ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH);
if (ret != alloc_result::success) {
return ret;
}
@ -410,7 +407,7 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa
if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
// Cancel on-going allocation
tti_alloc.rem_last_alloc_dl(rbgs);
return alloc_outcome_t::CODERATE_TOO_HIGH;
return alloc_result::invalid_coderate;
}
// Allocation Successful
@ -419,15 +416,15 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa
bc_alloc.req_bytes = paging_payload;
bc_allocs.push_back(bc_alloc);
return alloc_outcome_t::SUCCESS;
return alloc_result::success;
}
alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants)
alloc_result sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants)
{
static const uint32_t msg3_nof_prbs = 3;
if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) {
logger.info("SCHED: Maximum number of RAR allocations per TTI reached.");
return alloc_outcome_t::NOF_ALLOCS_LIMIT;
return alloc_result::no_grant_space;
}
uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff
@ -435,12 +432,12 @@ alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar,
// check if there is enough space for Msg3
if (last_msg3_prb + total_ul_nof_prbs > max_msg3_prb) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::sch_collision;
}
// allocate RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_RAR);
if (ret != alloc_outcome_t::SUCCESS) {
alloc_result ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_RAR);
if (ret != alloc_result::success) {
return ret;
}
@ -450,7 +447,7 @@ alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar,
rar_alloc.rar_grant, get_tti_tx_dl(), rar, rbgs, nof_grants, last_msg3_prb, *cc_cfg, tti_alloc.get_cfi())) {
// Cancel on-going allocation
tti_alloc.rem_last_alloc_dl(rbgs);
return alloc_outcome_t::CODERATE_TOO_HIGH;
return alloc_result::invalid_coderate;
}
// RAR allocation successful
@ -475,24 +472,24 @@ bool is_periodic_cqi_expected(const sched_interface::ue_cfg_t& ue_cfg, tti_point
return false;
}
alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
{
if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) {
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_outcome_t::NOF_ALLOCS_LIMIT;
return alloc_result::no_grant_space;
}
if (is_dl_alloc(user->get_rnti())) {
logger.warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x", user->get_rnti());
return alloc_outcome_t::ALREADY_ALLOC;
return alloc_result::rnti_inactive;
}
auto* cc = user->find_ue_carrier(cc_cfg->enb_cc_idx);
if (cc == nullptr or cc->cc_state() != cc_st::active) {
return alloc_outcome_t::INVALID_CARRIER;
return alloc_result::rnti_inactive;
}
if (not user->pdsch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx)) {
return alloc_outcome_t::MEASGAP_COLLISION;
return alloc_result::rnti_inactive;
}
// Check if allocation would cause segmentation
@ -502,14 +499,14 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx);
if (r.start() > user_mask.count()) {
logger.warning("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti());
return alloc_outcome_t::NOF_RB_INVALID;
return alloc_result::invalid_grant_params;
}
}
srslte_dci_format_t dci_format = user->get_dci_format();
if (dci_format == SRSLTE_DCI_FORMAT1A and not is_contiguous(user_mask)) {
logger.warning("SCHED: Can't use distributed RBGs for DCI format 1A");
return alloc_outcome_t::INVALID_PRBMASK;
return alloc_result::invalid_grant_params;
}
bool has_pusch_grant = is_ul_alloc(user->get_rnti()) or cc_results->is_ul_alloc(user->get_rnti());
@ -525,28 +522,28 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
prb_interval alloc = {};
uint32_t L = user->get_required_prb_ul(cc_cfg->enb_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8));
tti_alloc.find_ul_alloc(L, &alloc);
has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc);
has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc) == alloc_result::success;
if (ue_cc_idx != 0 and not has_pusch_grant) {
// For SCells, if we can't allocate small PUSCH grant, abort DL allocation
return alloc_outcome_t::PUCCH_COLLISION;
return alloc_result::no_cch_space;
}
}
// Try to allocate RBGs, PDCCH, and PUCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant);
alloc_result ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant);
if (ret == alloc_outcome_t::DCI_COLLISION and not has_pusch_grant and not data_allocs.empty() and
if (ret == alloc_result::no_cch_space and not has_pusch_grant and not data_allocs.empty() and
user->get_ul_harq(get_tti_tx_ul(), get_enb_cc_idx())->is_empty()) {
// PUCCH may be too full. Attempt small UL grant allocation for UCI-PUSCH
uint32_t L = user->get_required_prb_ul(cc_cfg->enb_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8));
prb_interval alloc = {};
tti_alloc.find_ul_alloc(L, &alloc);
has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc);
has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc) == alloc_result::success;
if (has_pusch_grant) {
ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant);
}
}
if (ret != alloc_outcome_t::SUCCESS) {
if (ret != alloc_result::success) {
return ret;
}
@ -558,49 +555,49 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
alloc.pid = pid;
data_allocs.push_back(alloc);
return alloc_outcome_t::SUCCESS;
return alloc_result::success;
}
alloc_outcome_t
alloc_result
sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3, int msg3_mcs)
{
if (ul_data_allocs.size() >= sched_interface::MAX_DATA_LIST) {
logger.warning("SCHED: Maximum number of UL allocations reached");
return alloc_outcome_t::ERROR;
if (ul_data_allocs.full()) {
logger.debug("SCHED: Maximum number of UL allocations=%zd reached", ul_data_allocs.size());
return alloc_result::no_grant_space;
}
// Check whether user was already allocated
if (is_ul_alloc(user->get_rnti())) {
logger.warning("SCHED: Attempt to assign multiple ul_harq_proc to the same user rnti=0x%x", user->get_rnti());
return alloc_outcome_t::ALREADY_ALLOC;
logger.warning("SCHED: Attempt to assign multiple UL grants to the same user rnti=0x%x", user->get_rnti());
return alloc_result::rnti_inactive;
}
// Check if there is no collision with measGap
bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or (alloc_type == ul_alloc_t::NEWTX and not is_msg3);
if (not user->pusch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx, needs_pdcch)) {
return alloc_outcome_t::MEASGAP_COLLISION;
if (not user->pusch_enabled(get_tti_rx(), cc_cfg->enb_cc_idx, needs_pdcch)) {
logger.debug("SCHED: PDCCH would collide with rnti=0x%x Measurement Gap", user->get_rnti());
return alloc_result::rnti_inactive;
}
// Allocate RBGs and DCI space
bool allow_pucch_collision = cc_cfg->nof_prb() == 6 and is_msg3;
alloc_outcome_t ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch, not allow_pucch_collision);
if (ret != alloc_outcome_t::SUCCESS) {
bool allow_pucch_collision = cc_cfg->nof_prb() == 6 and is_msg3;
alloc_result ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch, not allow_pucch_collision);
if (ret != alloc_result::success) {
return ret;
}
ul_alloc_t ul_alloc = {};
ul_alloc.type = alloc_type;
ul_alloc.is_msg3 = is_msg3;
ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ul_alloc.rnti = user->get_rnti();
ul_alloc.alloc = alloc;
ul_alloc.msg3_mcs = msg3_mcs;
ul_data_allocs.push_back(ul_alloc);
ul_data_allocs.emplace_back();
ul_alloc_t& ul_alloc = ul_data_allocs.back();
ul_alloc.type = alloc_type;
ul_alloc.is_msg3 = is_msg3;
ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ul_alloc.rnti = user->get_rnti();
ul_alloc.alloc = alloc;
ul_alloc.msg3_mcs = msg3_mcs;
return alloc_outcome_t::SUCCESS;
return alloc_result::success;
}
alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc)
alloc_result sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc)
{
// check whether adaptive/non-adaptive retx/newtx
ul_alloc_t::type_t alloc_type;
@ -887,16 +884,16 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
}
}
alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant)
alloc_result sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant)
{
// Derive PRBs from allocated RAR grants
prb_interval msg3_alloc = prb_interval::riv_to_prbs(rargrant.grant.rba, cc_cfg->nof_prb());
alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::NEWTX, true, rargrant.grant.trunc_mcs);
if (not ret) {
alloc_result ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::NEWTX, true, rargrant.grant.trunc_mcs);
if (ret != alloc_result::success) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", msg3_alloc);
logger.warning("SCHED: Could not allocate msg3 within %s", srslte::to_c_str(str_buffer));
logger.warning("SCHED: Could not allocate msg3 within %s.", srslte::to_c_str(str_buffer));
}
return ret;
}

@ -112,12 +112,12 @@ const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched)
return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), tti_sched->get_enb_cc_idx());
}
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
{
// Try to reuse the same mask
rbgmask_t retx_mask = h.get_rbgmask();
alloc_outcome_t code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code == alloc_outcome_t::SUCCESS or code == alloc_outcome_t::DCI_COLLISION) {
rbgmask_t retx_mask = h.get_rbgmask();
alloc_result code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code != alloc_result::sch_collision) {
return code;
}
@ -128,11 +128,10 @@ alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_ha
if (retx_mask.count() == nof_rbg) {
return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
}
return alloc_outcome_t::RB_COLLISION;
return alloc_result::sch_collision;
}
alloc_outcome_t
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask)
alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask)
{
if (result_mask != nullptr) {
*result_mask = {};
@ -141,25 +140,25 @@ try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc&
// If all RBGs are occupied, the next steps can be shortcut
const rbgmask_t& current_mask = tti_sched.get_dl_mask();
if (current_mask.all()) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::no_sch_space;
}
// If there is no data to transmit, no need to allocate
rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx());
if (req_rbgs.stop() == 0) {
return alloc_outcome_t::NO_DATA;
return alloc_result::rnti_inactive;
}
// Find RBG mask that accommodates pending data
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
rbgmask_t newtxmask = compute_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask);
if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::no_sch_space;
}
// empty RBGs were found. Attempt allocation
alloc_outcome_t ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id());
if (ret == alloc_outcome_t::SUCCESS and result_mask != nullptr) {
alloc_result ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id());
if (ret == alloc_result::success and result_mask != nullptr) {
*result_mask = newtxmask;
}
return ret;
@ -228,7 +227,7 @@ const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched)
return h->is_empty() ? h : nullptr;
}
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h)
alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h)
{
prb_interval alloc = h.get_alloc();
if (tti_sched.get_cc_cfg()->nof_prb() == 6 and h.is_msg3()) {
@ -238,20 +237,20 @@ alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_ha
// If can schedule the same mask as in earlier tx, do it
if (not tti_sched.get_ul_mask().any(alloc.start(), alloc.stop())) {
alloc_outcome_t ret = tti_sched.alloc_ul_user(&ue, alloc);
if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::DCI_COLLISION) {
alloc_result ret = tti_sched.alloc_ul_user(&ue, alloc);
if (ret != alloc_result::sch_collision) {
return ret;
}
}
// Avoid measGaps accounting for PDCCH
if (not ue.pusch_enabled(tti_sched.get_tti_rx(), tti_sched.get_enb_cc_idx(), true)) {
return alloc_outcome_t::MEASGAP_COLLISION;
return alloc_result::rnti_inactive;
}
uint32_t nof_prbs = alloc.length();
alloc = find_contiguous_ul_prbs(nof_prbs, tti_sched.get_ul_mask());
if (alloc.length() != nof_prbs) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::no_sch_space;
}
return tti_sched.alloc_ul_user(&ue, alloc);
}

@ -71,25 +71,22 @@ void sched_time_pf::sched_dl_users(sched_ue_list& ue_db, sf_sched* tti_sched)
uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched)
{
alloc_outcome_t code = alloc_outcome_t::ERROR;
alloc_result code = alloc_result::other_cause;
if (ue_ctxt.dl_retx_h != nullptr) {
code = try_dl_retx_alloc(*tti_sched, ue, *ue_ctxt.dl_retx_h);
if (code == alloc_outcome_t::SUCCESS) {
if (code == alloc_result::success) {
return ue_ctxt.dl_retx_h->get_tbs(0) + ue_ctxt.dl_retx_h->get_tbs(1);
}
}
// There is space in PDCCH and an available DL HARQ
if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) {
if (code != alloc_result::no_cch_space and ue_ctxt.dl_newtx_h != nullptr) {
rbgmask_t alloc_mask;
code = try_dl_newtx_alloc_greedy(*tti_sched, ue, *ue_ctxt.dl_newtx_h, &alloc_mask);
if (code == alloc_outcome_t::SUCCESS) {
if (code == alloc_result::success) {
return ue.get_expected_dl_bitrate(cc_cfg->enb_cc_idx, alloc_mask.count()) * tti_duration_ms / 8;
}
}
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", ue.get_rnti());
}
return 0;
}
@ -122,11 +119,11 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
return ue_ctxt.ul_h->get_pending_data();
}
alloc_outcome_t code;
uint32_t estim_tbs_bytes = 0;
alloc_result code;
uint32_t estim_tbs_bytes = 0;
if (ue_ctxt.ul_h->has_pending_retx()) {
code = try_ul_retx_alloc(*tti_sched, ue, *ue_ctxt.ul_h);
estim_tbs_bytes = code == alloc_outcome_t::SUCCESS ? ue_ctxt.ul_h->get_pending_data() : 0;
estim_tbs_bytes = code == alloc_result::success ? ue_ctxt.ul_h->get_pending_data() : 0;
} else {
// Note: h->is_empty check is required, in case CA allocated a small UL grant for UCI
uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), cc_cfg->enb_cc_idx);
@ -140,13 +137,10 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
return 0;
}
code = tti_sched->alloc_ul_user(&ue, alloc);
estim_tbs_bytes = code == alloc_outcome_t::SUCCESS
estim_tbs_bytes = code == alloc_result::success
? ue.get_expected_ul_bitrate(cc_cfg->enb_cc_idx, alloc.length()) * tti_duration_ms / 8
: 0;
}
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", ue.get_rnti(), cc_cfg->enb_cc_idx);
}
return estim_tbs_bytes;
}

@ -11,7 +11,6 @@
*/
#include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h"
#include <string.h>
namespace srsenb {
@ -50,10 +49,7 @@ void sched_time_rr::sched_dl_retxs(sched_ue_list& ue_db, sf_sched* tti_sched, si
if (h == nullptr) {
continue;
}
alloc_outcome_t code = try_dl_retx_alloc(*tti_sched, user, *h);
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL retx for rnti=0x%x", user.get_rnti());
}
try_dl_retx_alloc(*tti_sched, user, *h);
}
}
@ -74,7 +70,7 @@ void sched_time_rr::sched_dl_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s
if (h == nullptr) {
continue;
}
if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_outcome_t::DCI_COLLISION) {
if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_result::no_cch_space) {
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", user.get_rnti());
}
}
@ -109,9 +105,9 @@ void sched_time_rr::sched_ul_retxs(sched_ue_list& ue_db, sf_sched* tti_sched, si
if (h == nullptr) {
continue;
}
alloc_outcome_t code = try_ul_retx_alloc(*tti_sched, user, *h);
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", user.get_rnti());
alloc_result code = try_ul_retx_alloc(*tti_sched, user, *h);
if (code == alloc_result::no_cch_space) {
logger.debug("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", user.get_rnti());
}
}
}
@ -140,8 +136,8 @@ void sched_time_rr::sched_ul_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s
if (alloc.empty()) {
continue;
}
alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc);
if (ret == alloc_outcome_t::DCI_COLLISION) {
alloc_result ret = tti_sched->alloc_ul_user(&user, alloc);
if (ret == alloc_result::no_cch_space) {
logger.info(
"SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", user.get_rnti(), cc_cfg->enb_cc_idx);
}

Loading…
Cancel
Save