moved tti_sched_result out of carrier_sched. Removed interdependencies between the two classes. Simplified ra_sched and bc_sched initialization

master
Francisco Paisana 5 years ago
parent 7990e2f563
commit 6d4f746a61

@ -33,113 +33,6 @@ class sched::carrier_sched
{
public:
explicit carrier_sched(sched* sched_);
class tti_sched_result_t : public dl_tti_sched_t, public ul_tti_sched_t
{
public:
struct ctrl_alloc_t {
size_t dci_idx;
rbg_range_t rbg_range;
uint16_t rnti;
uint32_t req_bytes;
alloc_type_t alloc_type;
};
struct rar_alloc_t : public ctrl_alloc_t {
dl_sched_rar_t rar_grant;
rar_alloc_t() = default;
explicit rar_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
uint32_t sib_idx = 0;
bc_alloc_t() = default;
explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
struct dl_alloc_t {
size_t dci_idx;
sched_ue* user_ptr;
rbgmask_t user_mask;
uint32_t pid;
};
struct ul_alloc_t {
enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 };
size_t dci_idx;
type_t type;
sched_ue* user_ptr;
ul_harq_proc::ul_alloc_t alloc;
uint32_t mcs = 0;
bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; }
bool is_msg3() const { return type == MSG3; }
bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; }
};
typedef std::pair<alloc_outcome_t, const rar_alloc_t*> rar_code_t;
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
// TTI scheduler result
pdcch_mask_t pdcch_mask;
sched_interface::dl_sched_res_t dl_sched_result;
sched_interface::ul_sched_res_t ul_sched_result;
void init(carrier_sched* carrier_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
rar_code_t alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant, uint32_t rar_tti, uint32_t buf_rar);
void generate_dcis();
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
uint32_t get_tti_tx_dl() const final { return tti_params.tti_tx_dl; }
uint32_t get_nof_ctrl_symbols() const final;
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final;
alloc_outcome_t alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs);
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; }
// getters
const pdcch_mask_t& get_pdcch_mask() const { return pdcch_mask; }
rbgmask_t& get_dl_mask() { return tti_alloc.get_dl_mask(); }
prbmask_t& get_ul_mask() { return tti_alloc.get_ul_mask(); }
const std::vector<ul_alloc_t>& get_ul_allocs() const { return ul_data_allocs; }
uint32_t get_cfi() const { return tti_alloc.get_cfi(); }
uint32_t get_tti_rx() const { return tti_params.tti_rx; }
uint32_t get_sfn() const { return tti_params.sfn; }
uint32_t get_sf_idx() const { return tti_params.sf_idx; }
private:
bool is_dl_alloc(sched_ue* user) const final;
bool is_ul_alloc(sched_ue* user) const final;
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
alloc_outcome_t alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_result_t::ul_alloc_t::type_t alloc_type,
uint32_t msg3 = 0);
int generate_format1a(uint32_t rb_start,
uint32_t l_crb,
uint32_t tbs,
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
// consts
carrier_sched* parent_carrier = nullptr;
sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr;
// internal state
tti_params_t tti_params{10241};
tti_grid_t tti_alloc;
std::vector<rar_alloc_t> rar_allocs;
std::vector<bc_alloc_t> bc_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
};
void reset();
void carrier_cfg();
void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_);
@ -147,7 +40,14 @@ public:
tti_sched_result_t* generate_tti_result(uint32_t tti_rx);
int dl_rach_info(dl_sched_rar_info_t rar_info);
// private:
// getters
const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); }
const tti_sched_result_t* get_tti_sched_view(uint32_t tti_rx) const
{
return &tti_scheds[tti_rx % tti_scheds.size()];
}
private:
void generate_phich(tti_sched_result_t* tti_sched);
//! Compute DL scheduler result for given TTI
void alloc_dl_users(tti_sched_result_t* tti_result);
@ -155,11 +55,11 @@ public:
int alloc_ul_users(tti_sched_result_t* tti_sched);
// args
sched* sched_ptr = nullptr;
srslte::log* log_h = nullptr;
cell_cfg_t* cfg = nullptr;
metric_dl* dl_metric = nullptr;
metric_ul* ul_metric = nullptr;
sched* sched_ptr = nullptr;
const sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr;
metric_dl* dl_metric = nullptr;
metric_ul* ul_metric = nullptr;
// derived from args
prbmask_t prach_mask;
@ -174,17 +74,15 @@ public:
std::unique_ptr<ra_sched> ra_sched_ptr;
// protects access to bc/ra schedulers and harqs
std::mutex sched_mutex;
std::mutex carrier_mutex;
};
//! Broadcast (SIB + paging) scheduler
class bc_sched
{
public:
explicit bc_sched(sched::cell_cfg_t* cfg_);
void init(rrc_interface_mac* rrc_);
void dl_sched(sched::carrier_sched::tti_sched_result_t* tti_sched);
explicit bc_sched(const sched::cell_cfg_t& cfg_, rrc_interface_mac* rrc_);
void dl_sched(tti_sched_result_t* tti_sched);
void reset();
private:
@ -194,13 +92,13 @@ private:
uint32_t n_tx = 0;
};
void update_si_windows(sched::carrier_sched::tti_sched_result_t* tti_sched);
void alloc_sibs(sched::carrier_sched::tti_sched_result_t* tti_sched);
void alloc_paging(sched::carrier_sched::tti_sched_result_t* tti_sched);
void update_si_windows(tti_sched_result_t* tti_sched);
void alloc_sibs(tti_sched_result_t* tti_sched);
void alloc_paging(tti_sched_result_t* tti_sched);
// args
sched::cell_cfg_t* cfg;
rrc_interface_mac* rrc = nullptr;
const sched::cell_cfg_t* cfg;
rrc_interface_mac* rrc = nullptr;
std::array<sched_sib_t, sched_interface::MAX_SIBS> pending_sibs;
@ -225,18 +123,17 @@ public:
uint32_t mcs = 0;
};
explicit ra_sched(sched::cell_cfg_t* cfg_);
void init(srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_);
void dl_sched(sched::carrier_sched::tti_sched_result_t* tti_sched);
void ul_sched(sched::carrier_sched::tti_sched_result_t* tti_sched);
explicit ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_);
void dl_sched(tti_sched_result_t* tti_sched);
void ul_sched(tti_sched_result_t* tti_sched);
int dl_rach_info(dl_sched_rar_info_t rar_info);
void reset();
const pending_msg3_t& find_pending_msg3(uint32_t tti);
const pending_msg3_t& find_pending_msg3(uint32_t tti) const;
private:
// args
srslte::log* log_h = nullptr;
sched::cell_cfg_t* cfg;
const sched::cell_cfg_t* cfg = nullptr;
std::map<uint16_t, sched_ue>* ue_db = nullptr;
std::queue<dl_sched_rar_info_t> pending_rars;

@ -168,6 +168,116 @@ public:
virtual bool is_ul_alloc(sched_ue* user) const = 0;
};
/** Description: Stores the RAR, broadcast, paging, DL data, UL data allocations for the given TTI
* Converts the stored allocations' metadata to the scheduler UL/DL result
* Handles the generation of DCI formats
*/
class tti_sched_result_t : public dl_tti_sched_t, public ul_tti_sched_t
{
public:
struct ctrl_alloc_t {
size_t dci_idx;
rbg_range_t rbg_range;
uint16_t rnti;
uint32_t req_bytes;
alloc_type_t alloc_type;
};
struct rar_alloc_t : public ctrl_alloc_t {
sched_interface::dl_sched_rar_t rar_grant;
rar_alloc_t() = default;
explicit rar_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
uint32_t sib_idx = 0;
bc_alloc_t() = default;
explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
struct dl_alloc_t {
size_t dci_idx;
sched_ue* user_ptr;
rbgmask_t user_mask;
uint32_t pid;
};
struct ul_alloc_t {
enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 };
size_t dci_idx;
type_t type;
sched_ue* user_ptr;
ul_harq_proc::ul_alloc_t alloc;
uint32_t mcs = 0;
bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; }
bool is_msg3() const { return type == MSG3; }
bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; }
};
typedef std::pair<alloc_outcome_t, const rar_alloc_t*> rar_code_t;
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
// TTI scheduler result
pdcch_mask_t pdcch_mask;
sched_interface::dl_sched_res_t dl_sched_result;
sched_interface::ul_sched_res_t ul_sched_result;
void init(const sched_params_t& sched_params_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
rar_code_t
alloc_rar(uint32_t aggr_lvl, const sched_interface::dl_sched_rar_t& rar_grant, uint32_t rar_tti, uint32_t buf_rar);
void generate_dcis();
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
uint32_t get_tti_tx_dl() const final { return tti_params.tti_tx_dl; }
uint32_t get_nof_ctrl_symbols() const final;
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final;
alloc_outcome_t alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs);
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; }
// getters
const pdcch_mask_t& get_pdcch_mask() const { return pdcch_mask; }
rbgmask_t& get_dl_mask() { return tti_alloc.get_dl_mask(); }
prbmask_t& get_ul_mask() { return tti_alloc.get_ul_mask(); }
const std::vector<ul_alloc_t>& get_ul_allocs() const { return ul_data_allocs; }
uint32_t get_cfi() const { return tti_alloc.get_cfi(); }
uint32_t get_tti_rx() const { return tti_params.tti_rx; }
uint32_t get_sfn() const { return tti_params.sfn; }
uint32_t get_sf_idx() const { return tti_params.sf_idx; }
private:
bool is_dl_alloc(sched_ue* user) const final;
bool is_ul_alloc(sched_ue* user) const final;
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
alloc_outcome_t alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_result_t::ul_alloc_t::type_t alloc_type,
uint32_t msg3 = 0);
int generate_format1a(uint32_t rb_start,
uint32_t l_crb,
uint32_t tbs,
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
// consts
const sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr;
// internal state
tti_params_t tti_params{10241};
tti_grid_t tti_alloc;
std::vector<rar_alloc_t> rar_allocs;
std::vector<bc_alloc_t> bc_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
};
} // namespace srsenb
#endif // SRSLTE_SCHEDULER_GRID_H

@ -388,9 +388,11 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
current_tti = sched_utils::max_tti(current_tti, tti_rx);
// Compute scheduling Result for tti_rx
carrier_sched::tti_sched_result_t* tti_sched = carrier_schedulers[0]->generate_tti_result(tti_rx);
pthread_rwlock_rdlock(&rwlock);
tti_sched_result_t* tti_sched = carrier_schedulers[0]->generate_tti_result(tti_rx);
pthread_rwlock_unlock(&rwlock);
// copy result
// Copy result
*sched_result = tti_sched->dl_sched_result;
return 0;
@ -404,10 +406,12 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
}
// Compute scheduling Result for tti_rx
uint32_t tti_rx = sched_utils::tti_subtract(tti, 2 * FDD_HARQ_DELAY_MS);
carrier_sched::tti_sched_result_t* tti_sched = carrier_schedulers[0]->generate_tti_result(tti_rx);
uint32_t tti_rx = sched_utils::tti_subtract(tti, 2 * FDD_HARQ_DELAY_MS);
pthread_rwlock_rdlock(&rwlock);
tti_sched_result_t* tti_sched = carrier_schedulers[0]->generate_tti_result(tti_rx);
pthread_rwlock_unlock(&rwlock);
// Copy results
// Copy result
*sched_result = tti_sched->ul_sched_result;
return SRSLTE_SUCCESS;

@ -28,554 +28,13 @@
namespace srsenb {
/*******************************************************
* TTI resource Scheduling Methods
*******************************************************/
void sched::carrier_sched::tti_sched_result_t::init(carrier_sched* carrier_)
{
parent_carrier = carrier_;
sched_params = &carrier_->sched_ptr->sched_params;
log_h = sched_params->log_h;
tti_alloc.init(*sched_params, 0);
}
void sched::carrier_sched::tti_sched_result_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
{
tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params, start_cfi);
// internal state
rar_allocs.clear();
bc_allocs.clear();
data_allocs.clear();
ul_data_allocs.clear();
// TTI result
pdcch_mask.reset();
pdcch_mask.resize(tti_alloc.get_pdcch_grid().nof_cces());
bzero(&dl_sched_result, sizeof(dl_sched_result));
bzero(&ul_sched_result, sizeof(ul_sched_result));
}
bool sched::carrier_sched::tti_sched_result_t::is_dl_alloc(sched_ue* user) const
{
for (const auto& a : data_allocs) {
if (a.user_ptr == user) {
return true;
}
}
return false;
}
bool sched::carrier_sched::tti_sched_result_t::is_ul_alloc(sched_ue* user) const
{
for (const auto& a : ul_data_allocs) {
if (a.user_ptr == user) {
return true;
}
}
return false;
}
sched::carrier_sched::tti_sched_result_t::ctrl_code_t
sched::carrier_sched::tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti)
{
ctrl_alloc_t ctrl_alloc{};
// based on rnti, check which type of alloc
alloc_type_t alloc_type = alloc_type_t::DL_RAR;
if (rnti == SRSLTE_SIRNTI) {
alloc_type = alloc_type_t::DL_BC;
} else if (rnti == SRSLTE_PRNTI) {
alloc_type = alloc_type_t::DL_PCCH;
}
/* Allocate space in the DL RBG and PDCCH grids */
tti_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type);
if (not ret.outcome) {
return {ret.outcome, ctrl_alloc};
}
// Allocation Successful
ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ctrl_alloc.rbg_range = ret.rbg_range;
ctrl_alloc.rnti = rnti;
ctrl_alloc.req_bytes = tbs_bytes;
ctrl_alloc.alloc_type = alloc_type;
return {ret.outcome, ctrl_alloc};
}
alloc_outcome_t
sched::carrier_sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
{
uint32_t sib_len = sched_params->cfg->sibs[sib_idx].len;
uint32_t rv = get_rvidx(sib_ntx);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI);
if (not ret.first) {
Warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s\n",
sib_idx + 1,
aggr_lvl,
sib_len,
ret.first.to_string());
return ret.first;
}
// BC allocation successful
bc_alloc_t bc_alloc(ret.second);
bc_alloc.rv = rv;
bc_alloc.sib_idx = sib_idx;
bc_allocs.push_back(bc_alloc);
return ret.first;
}
alloc_outcome_t sched::carrier_sched::tti_sched_result_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload)
{
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI);
if (not ret.first) {
Warning(
"SCHED: Could not allocate Paging with payload length=%d, cause=%s\n", paging_payload, ret.first.to_string());
return ret.first;
}
// Paging allocation successful
bc_alloc_t bc_alloc(ret.second);
bc_allocs.push_back(bc_alloc);
return ret.first;
}
sched::carrier_sched::tti_sched_result_t::rar_code_t
sched::carrier_sched::tti_sched_result_t::alloc_rar(uint32_t aggr_lvl,
const dl_sched_rar_t& rar_grant,
uint32_t prach_tti,
uint32_t buf_rar)
{
// RA-RNTI = 1 + t_id + f_id
// t_id = index of first subframe specified by PRACH (0<=t_id<10)
// f_id = index of the PRACH within subframe, in ascending order of freq domain (0<=f_id<6) (for FDD, f_id=0)
uint16_t ra_rnti = 1 + (uint16_t)(prach_tti % 10);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, buf_rar, ra_rnti);
if (not ret.first) {
Warning("SCHED: Could not allocate RAR for L=%d, cause=%s\n", aggr_lvl, ret.first.to_string());
return {ret.first, nullptr};
}
// Allocation successful
rar_alloc_t rar_alloc(ret.second);
rar_alloc.rar_grant = rar_grant;
rar_allocs.push_back(rar_alloc);
return {ret.first, &rar_allocs.back()};
}
alloc_outcome_t
sched::carrier_sched::tti_sched_result_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
{
if (is_dl_alloc(user)) {
log_h->warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x\n", user->get_rnti());
return alloc_outcome_t::ERROR;
}
// Try to allocate RBGs and DCI
alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}
// Allocation Successful
dl_alloc_t alloc;
alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
alloc.user_ptr = user;
alloc.user_mask = user_mask;
alloc.pid = pid;
data_allocs.push_back(alloc);
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t sched::carrier_sched::tti_sched_result_t::alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_result_t::ul_alloc_t::type_t alloc_type,
uint32_t mcs)
{
// Check whether user was already allocated
if (is_ul_alloc(user)) {
log_h->warning("SCHED: Attempt to assign multiple ul_harq_proc to the same user rnti=0x%x\n", user->get_rnti());
return alloc_outcome_t::ERROR;
}
// Allocate RBGs and DCI space
bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or alloc_type == ul_alloc_t::NEWTX;
alloc_outcome_t ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}
ul_alloc_t ul_alloc = {};
ul_alloc.type = alloc_type;
ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ul_alloc.user_ptr = user;
ul_alloc.alloc = alloc;
ul_alloc.mcs = mcs;
ul_data_allocs.push_back(ul_alloc);
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t sched::carrier_sched::tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc)
{
// check whether adaptive/non-adaptive retx/newtx
tti_sched_result_t::ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
bool has_retx = h->has_pending_retx();
if (has_retx) {
ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc();
if (prev_alloc.L == alloc.L and prev_alloc.RB_start == prev_alloc.L) {
alloc_type = ul_alloc_t::NOADAPT_RETX;
} else {
alloc_type = ul_alloc_t::ADAPT_RETX;
}
} else {
alloc_type = ul_alloc_t::NEWTX;
}
return alloc_ul(user, alloc, alloc_type);
}
alloc_outcome_t
sched::carrier_sched::tti_sched_result_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs)
{
return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs);
}
void sched::carrier_sched::tti_sched_result_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems];
// assign NCCE/L
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, sched_params->P);
int tbs = generate_format1a(
prb_range.prb_start, prb_range.length(), bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci);
// Setup BC/Paging processes
if (bc_alloc.alloc_type == alloc_type_t::DL_BC) {
if (tbs <= (int)bc_alloc.req_bytes) {
log_h->warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d\n",
bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce,
bc_alloc.req_bytes);
continue;
}
// Setup BC process
bc->index = bc_alloc.sib_idx;
bc->type = sched_interface::dl_sched_bc_t::BCCH;
bc->tbs = (uint32_t)bc_alloc.req_bytes;
log_h->info("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d\n",
bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce,
bc_alloc.rv,
bc_alloc.req_bytes,
sched_params->cfg->sibs[bc_alloc.sib_idx].period_rf,
bc->dci.tb[0].mcs_idx);
} else {
// Paging
if (tbs <= 0) {
log_h->warning("SCHED: Error Paging, rbgs=(%d,%d), dci=(%d,%d)\n",
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce);
continue;
}
// Setup Paging process
bc->type = sched_interface::dl_sched_bc_t::PCCH;
bc->tbs = (uint32_t)tbs;
log_h->info("SCHED: PCH, rbgs=(%d,%d), dci=(%d,%d), tbs=%d, mcs=%d\n",
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce,
tbs,
bc->dci.tb[0].mcs_idx);
}
dl_sched_result.nof_bc_elems++;
}
}
void sched::carrier_sched::tti_sched_result_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems];
// Assign NCCE/L
rar->dci.location = dci_result[rar_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(rar_alloc.rbg_range, sched_params->P);
int tbs =
generate_format1a(prb_range.prb_start, prb_range.length(), rar_alloc.req_bytes, 0, rar_alloc.rnti, &rar->dci);
if (tbs <= 0) {
log_h->warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=(%d,%d), dci=(%d,%d)\n",
rar_alloc.rnti,
rar_alloc.rbg_range.rbg_start,
rar_alloc.rbg_range.rbg_end,
rar->dci.location.L,
rar->dci.location.ncce);
continue;
}
// Setup RAR process
rar->tbs = rar_alloc.req_bytes;
rar->nof_grants = rar_alloc.rar_grant.nof_grants;
memcpy(rar->msg3_grant, rar_alloc.rar_grant.msg3_grant, sizeof(dl_sched_rar_grant_t) * rar->nof_grants);
// Print RAR allocation result
for (uint32_t i = 0; i < rar->nof_grants; ++i) {
const auto& msg3_grant = rar->msg3_grant[i];
uint16_t expected_rnti =
parent_carrier->ra_sched_ptr->find_pending_msg3(get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY).rnti;
log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, "
"rar_grant_mcs=%d\n",
expected_rnti,
rar_alloc.rnti,
rar_alloc.rbg_range.rbg_start,
rar_alloc.rbg_range.rbg_end,
rar->dci.location.L,
rar->dci.location.ncce,
msg3_grant.grant.rba,
msg3_grant.grant.trunc_mcs);
}
dl_sched_result.nof_rar_elems++;
}
}
void sched::carrier_sched::tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& data_alloc : data_allocs) {
sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems];
// Assign NCCE/L
data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos;
// Generate DCI Format1/2/2A
sched_ue* user = data_alloc.user_ptr;
dl_harq_proc* h = user->get_dl_harq(data_alloc.pid);
uint32_t data_before = user->get_pending_dl_new_data(get_tti_tx_dl());
srslte_dci_format_t dci_format = user->get_dci_format();
bool is_newtx = h->is_empty();
int tbs = 0;
switch (dci_format) {
case SRSLTE_DCI_FORMAT1:
tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
break;
case SRSLTE_DCI_FORMAT2:
tbs = user->generate_format2(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
break;
case SRSLTE_DCI_FORMAT2A:
tbs = user->generate_format2a(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
break;
default:
Error("DCI format (%d) not implemented\n", dci_format);
}
if (tbs <= 0) {
log_h->warning("SCHED: Error DL %s rnti=0x%x, pid=%d, mask=%s, tbs=%d, buffer=%d\n",
is_newtx ? "tx" : "retx",
user->get_rnti(),
h->get_id(),
data_alloc.user_mask.to_hex().c_str(),
tbs,
user->get_pending_dl_new_data(get_tti_tx_dl()));
continue;
}
// Print Resulting DL Allocation
log_h->info("SCHED: DL %s rnti=0x%x, pid=%d, mask=0x%s, dci=(%d,%d), n_rtx=%d, tbs=%d, buffer=%d/%d\n",
!is_newtx ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
data_alloc.user_mask.to_hex().c_str(),
data->dci.location.L,
data->dci.location.ncce,
h->nof_retx(0) + h->nof_retx(1),
tbs,
data_before,
user->get_pending_dl_new_data(get_tti_tx_dl()));
dl_sched_result.nof_data_elems++;
}
}
void sched::carrier_sched::tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems];
sched_ue* user = ul_alloc.user_ptr;
srslte_dci_location_t cce_range = {0, 0};
if (ul_alloc.needs_pdcch()) {
cce_range = dci_result[ul_alloc.dci_idx]->dci_pos;
}
/* Set fixed mcs if specified */
int fixed_mcs = (ul_alloc.type == ul_alloc_t::MSG3) ? ul_alloc.mcs : -1;
/* Generate DCI Format1A */
uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul());
int tbs =
user->generate_format0(pusch, get_tti_tx_ul(), ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs);
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
if (tbs <= 0) {
log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n",
ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL",
ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
pusch->dci.location.L,
pusch->dci.location.ncce,
ul_alloc.alloc.RB_start,
ul_alloc.alloc.RB_start + ul_alloc.alloc.L,
tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()));
continue;
}
// Allocation was successful
if (ul_alloc.type == ul_alloc_t::NEWTX) {
// Un-trigger SR
user->unset_sr();
}
// Print Resulting UL Allocation
log_h->info("SCHED: %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), n_rtx=%d, tbs=%d, bsr=%d (%d-%d)\n",
ul_alloc.is_msg3() ? "Msg3" : "UL",
ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
pusch->dci.location.L,
pusch->dci.location.ncce,
ul_alloc.alloc.RB_start,
ul_alloc.alloc.RB_start + ul_alloc.alloc.L,
h->nof_retx(0),
tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()),
pending_data_before,
user->get_pending_ul_old_data());
ul_sched_result.nof_dci_elems++;
}
}
void sched::carrier_sched::tti_sched_result_t::generate_dcis()
{
/* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &pdcch_mask);
/* Register final CFI */
dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi();
/* Generate DCI formats and fill sched_result structs */
set_bc_sched_result(dci_result);
set_rar_sched_result(dci_result);
set_dl_data_sched_result(dci_result);
set_ul_sched_result(dci_result);
}
uint32_t sched::carrier_sched::tti_sched_result_t::get_nof_ctrl_symbols() const
{
return tti_alloc.get_cfi() + ((sched_params->cfg->cell.nof_prb <= 10) ? 1 : 0);
}
int sched::carrier_sched::tti_sched_result_t::generate_format1a(uint32_t rb_start,
uint32_t l_crb,
uint32_t tbs_bytes,
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci)
{
/* Calculate I_tbs for this TBS */
int tbs = tbs_bytes * 8;
int i;
int mcs = -1;
for (i = 0; i < 27; i++) {
if (srslte_ra_tbs_from_idx(i, 2) >= tbs) {
dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 2);
break;
}
if (srslte_ra_tbs_from_idx(i, 3) >= tbs) {
dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 3);
break;
}
}
if (i == 28) {
Error("Can't allocate Format 1A for TBS=%d\n", tbs);
return -1;
}
Debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d\n",
srslte_ra_tbs_from_idx(mcs, 2),
srslte_ra_tbs_from_idx(mcs, 3),
tbs_bytes,
tbs,
mcs);
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, sched_params->cfg->cell.nof_prb);
dci->pid = 0;
dci->tb[0].mcs_idx = mcs;
dci->tb[0].rv = rv;
dci->format = SRSLTE_DCI_FORMAT1A;
dci->rnti = rnti;
return tbs;
}
/*******************************************************
* Broadcast (SIB+Paging) scheduling
*******************************************************/
bc_sched::bc_sched(sched::cell_cfg_t* cfg_) : cfg(cfg_) {}
bc_sched::bc_sched(const sched::cell_cfg_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cfg(&cfg_), rrc(rrc_) {}
void bc_sched::init(srsenb::rrc_interface_mac* rrc_)
{
rrc = rrc_;
}
void bc_sched::dl_sched(sched::carrier_sched::tti_sched_result_t* tti_sched)
void bc_sched::dl_sched(tti_sched_result_t* tti_sched)
{
current_sf_idx = tti_sched->get_sf_idx();
current_sfn = tti_sched->get_sfn();
@ -593,7 +52,7 @@ void bc_sched::dl_sched(sched::carrier_sched::tti_sched_result_t* tti_sched)
alloc_paging(tti_sched);
}
void bc_sched::update_si_windows(sched::carrier_sched::tti_sched_result_t* tti_sched)
void bc_sched::update_si_windows(tti_sched_result_t* tti_sched)
{
uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl();
@ -631,7 +90,7 @@ void bc_sched::update_si_windows(sched::carrier_sched::tti_sched_result_t* tti_s
}
}
void bc_sched::alloc_sibs(sched::carrier_sched::tti_sched_result_t* tti_sched)
void bc_sched::alloc_sibs(tti_sched_result_t* tti_sched)
{
for (uint32_t i = 0; i < pending_sibs.size(); i++) {
if (cfg->sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) {
@ -653,7 +112,7 @@ void bc_sched::alloc_sibs(sched::carrier_sched::tti_sched_result_t* tti_sched)
}
}
void bc_sched::alloc_paging(sched::carrier_sched::tti_sched_result_t* tti_sched)
void bc_sched::alloc_paging(tti_sched_result_t* tti_sched)
{
/* Allocate DCIs and RBGs for paging */
if (rrc != nullptr) {
@ -675,18 +134,17 @@ void bc_sched::reset()
* RAR scheduling
*******************************************************/
ra_sched::ra_sched(sched::cell_cfg_t* cfg_) : cfg(cfg_) {}
void ra_sched::init(srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_)
ra_sched::ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_) :
cfg(&cfg_),
log_h(log_),
ue_db(&ue_db_)
{
log_h = log_;
ue_db = &ue_db_;
}
// Schedules RAR
// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we
// discard it.
void ra_sched::dl_sched(srsenb::sched::carrier_sched::tti_sched_result_t* tti_sched)
void ra_sched::dl_sched(srsenb::tti_sched_result_t* tti_sched)
{
tti_tx_dl = tti_sched->get_tti_tx_dl();
rar_aggr_level = 2;
@ -730,11 +188,10 @@ void ra_sched::dl_sched(srsenb::sched::carrier_sched::tti_sched_result_t* tti_sc
rar_grant.nof_grants++;
// Try to schedule DCI + RBGs for RAR Grant
sched::carrier_sched::tti_sched_result_t::rar_code_t ret =
tti_sched->alloc_rar(rar_aggr_level,
rar_grant,
rar.prach_tti,
7 * rar_grant.nof_grants); // fixme: check RAR size
tti_sched_result_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level,
rar_grant,
rar.prach_tti,
7 * rar_grant.nof_grants); // fixme: check RAR size
// If we can allocate, schedule Msg3 and remove from pending
if (!ret.first) {
@ -760,7 +217,7 @@ void ra_sched::dl_sched(srsenb::sched::carrier_sched::tti_sched_result_t* tti_sc
}
// Schedules Msg3
void ra_sched::ul_sched(sched::carrier_sched::tti_sched_result_t* tti_sched)
void ra_sched::ul_sched(tti_sched_result_t* tti_sched)
{
uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ;
@ -808,7 +265,7 @@ void ra_sched::reset()
}
}
const ra_sched::pending_msg3_t& ra_sched::find_pending_msg3(uint32_t tti)
const ra_sched::pending_msg3_t& ra_sched::find_pending_msg3(uint32_t tti) const
{
uint32_t pending_tti = tti % TTIMOD_SZ;
return pending_msg3[pending_tti];
@ -818,48 +275,46 @@ const ra_sched::pending_msg3_t& ra_sched::find_pending_msg3(uint32_t tti)
* Carrier scheduling
*******************************************************/
sched::carrier_sched::carrier_sched(sched* sched_) : sched_ptr(sched_), cfg(&sched_->cfg)
sched::carrier_sched::carrier_sched(sched* sched_) : sched_ptr(sched_)
{
bc_sched_ptr.reset(new bc_sched{cfg});
ra_sched_ptr.reset(new ra_sched{cfg});
tti_dl_mask.resize(1, 0);
}
void sched::carrier_sched::reset()
{
std::lock_guard<std::mutex> lock(sched_mutex);
if (ra_sched_ptr != nullptr) {
ra_sched_ptr->reset();
}
if (bc_sched_ptr != nullptr) {
bc_sched_ptr->reset();
}
std::lock_guard<std::mutex> lock(carrier_mutex);
ra_sched_ptr.reset();
bc_sched_ptr.reset();
}
void sched::carrier_sched::carrier_cfg()
{
// sched::cfg is now fully set
log_h = sched_ptr->log_h;
tti_dl_mask.resize(10, 0);
sched_params = &sched_ptr->sched_params;
log_h = sched_params->log_h;
const cell_cfg_t* cfg_ = sched_params->cfg;
std::lock_guard<std::mutex> lock(carrier_mutex);
// init Broadcast/RA schedulers
bc_sched_ptr->init(sched_ptr->rrc);
ra_sched_ptr->init(log_h, sched_ptr->ue_db);
bc_sched_ptr.reset(new bc_sched{*sched_params->cfg, sched_ptr->rrc});
ra_sched_ptr.reset(new ra_sched{*sched_params->cfg, log_h, sched_ptr->ue_db});
dl_metric->set_log(log_h);
ul_metric->set_log(log_h);
// Setup constant PUCCH/PRACH mask
pucch_mask.resize(cfg->cell.nof_prb);
if (cfg->nrb_pucch > 0) {
pucch_mask.fill(0, (uint32_t)cfg->nrb_pucch);
pucch_mask.fill(cfg->cell.nof_prb - cfg->nrb_pucch, cfg->cell.nof_prb);
pucch_mask.resize(cfg_->cell.nof_prb);
if (cfg_->nrb_pucch > 0) {
pucch_mask.fill(0, (uint32_t)cfg_->nrb_pucch);
pucch_mask.fill(cfg_->cell.nof_prb - cfg_->nrb_pucch, cfg_->cell.nof_prb);
}
prach_mask.resize(cfg->cell.nof_prb);
prach_mask.fill(cfg->prach_freq_offset, cfg->prach_freq_offset + 6);
prach_mask.resize(cfg_->cell.nof_prb);
prach_mask.fill(cfg_->prach_freq_offset, cfg_->prach_freq_offset + 6);
// Initiate the tti_scheduler for each TTI
for (tti_sched_result_t& tti_sched : tti_scheds) {
tti_sched.init(this);
tti_sched.init(*sched_params);
}
}
@ -874,18 +329,17 @@ void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
tti_dl_mask.assign(tti_mask, tti_mask + nof_sfs);
}
sched::carrier_sched::tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
{
tti_sched_result_t* tti_sched = get_tti_sched(tti_rx);
// if it is the first time tti is run, reset vars
if (tti_rx != tti_sched->get_tti_rx()) {
uint32_t start_cfi = sched_ptr->sched_params.sched_cfg.nof_ctrl_symbols;
uint32_t start_cfi = sched_params->sched_cfg.nof_ctrl_symbols;
tti_sched->new_tti(tti_rx, start_cfi);
// Protects access to pending_rar[], pending_msg3[], pending_sibs[], rlc buffers
std::lock_guard<std::mutex> lock(sched_mutex);
pthread_rwlock_rdlock(&sched_ptr->rwlock);
// Protects access to pending_rar[], pending_msg3[], ra_sched, bc_sched, rlc buffers
std::lock_guard<std::mutex> lock(carrier_mutex);
/* Schedule PHICH */
generate_phich(tti_sched);
@ -893,9 +347,7 @@ sched::carrier_sched::tti_sched_result_t* sched::carrier_sched::generate_tti_res
/* Schedule DL control data */
if (tti_dl_mask[tti_sched->get_tti_tx_dl() % tti_dl_mask.size()] == 0) {
/* Schedule Broadcast data (SIB and paging) */
if (bc_sched_ptr != nullptr) {
bc_sched_ptr->dl_sched(tti_sched);
}
bc_sched_ptr->dl_sched(tti_sched);
/* Schedule RAR */
ra_sched_ptr->dl_sched(tti_sched);
@ -920,8 +372,6 @@ sched::carrier_sched::tti_sched_result_t* sched::carrier_sched::generate_tti_res
for (auto& user : sched_ptr->ue_db) {
user.second.reset_pending_pids(tti_rx);
}
pthread_rwlock_unlock(&sched_ptr->rwlock);
}
return tti_sched;
@ -953,20 +403,20 @@ void sched::carrier_sched::generate_phich(tti_sched_result_t* tti_sched)
tti_sched->ul_sched_result.nof_phich_elems = nof_phich_elems;
}
void sched::carrier_sched::alloc_dl_users(sched::carrier_sched::tti_sched_result_t* tti_result)
void sched::carrier_sched::alloc_dl_users(tti_sched_result_t* tti_result)
{
if (tti_dl_mask[tti_result->get_tti_tx_dl() % tti_dl_mask.size()] != 0) {
return;
}
// NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions
if (cfg->cell.nof_prb == 6) {
if (sched_params->cfg->cell.nof_prb == 6) {
uint32_t tti_rx_ack = TTI_RX_ACK(tti_result->get_tti_rx());
bool msg3_enabled = false;
if (ra_sched_ptr != nullptr and ra_sched_ptr->find_pending_msg3(tti_rx_ack).enabled) {
msg3_enabled = true;
}
if (srslte_prach_tti_opportunity_config_fdd(cfg->prach_config, tti_rx_ack, -1) or msg3_enabled) {
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_rx_ack, -1) or msg3_enabled) {
tti_result->get_dl_mask().fill(0, tti_result->get_dl_mask().size());
}
}
@ -975,13 +425,13 @@ void sched::carrier_sched::alloc_dl_users(sched::carrier_sched::tti_sched_result
dl_metric->sched_users(sched_ptr->ue_db, tti_result);
}
int sched::carrier_sched::alloc_ul_users(sched::carrier_sched::tti_sched_result_t* tti_sched)
int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched)
{
uint32_t tti_tx_ul = tti_sched->get_tti_tx_ul();
prbmask_t& ul_mask = tti_sched->get_ul_mask();
/* reserve PRBs for PRACH */
if (srslte_prach_tti_opportunity_config_fdd(cfg->prach_config, tti_tx_ul, -1)) {
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_tx_ul, -1)) {
ul_mask = prach_mask;
log_h->debug("SCHED: Allocated PRACH RBs. Mask: 0x%s\n", prach_mask.to_hex().c_str());
}
@ -990,7 +440,7 @@ int sched::carrier_sched::alloc_ul_users(sched::carrier_sched::tti_sched_result_
ra_sched_ptr->ul_sched(tti_sched);
/* reserve PRBs for PUCCH */
if (cfg->cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) {
if (sched_params->cfg->cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) {
log_h->error("There was a collision with the PUCCH. current mask=0x%s, pucch_mask=0x%s\n",
ul_mask.to_hex().c_str(),
pucch_mask.to_hex().c_str());
@ -1010,7 +460,7 @@ int sched::carrier_sched::alloc_ul_users(sched::carrier_sched::tti_sched_result_
int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
{
std::lock_guard<std::mutex> lock(sched_mutex);
std::lock_guard<std::mutex> lock(carrier_mutex);
return ra_sched_ptr->dl_rach_info(rar_info);
}

@ -23,6 +23,11 @@
#include "srsenb/hdr/stack/mac/scheduler.h"
#include <srslte/interfaces/sched_interface.h>
#define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__)
#define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__)
#define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__)
#define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__)
namespace srsenb {
const char* alloc_outcome_t::to_string() const
@ -373,4 +378,533 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc
return alloc_outcome_t::SUCCESS;
}
/*******************************************************
* TTI resource Scheduling Methods
*******************************************************/
void tti_sched_result_t::init(const sched_params_t& sched_params_)
{
sched_params = &sched_params_;
log_h = sched_params->log_h;
tti_alloc.init(*sched_params, 0);
}
void tti_sched_result_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
{
tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params, start_cfi);
// internal state
rar_allocs.clear();
bc_allocs.clear();
data_allocs.clear();
ul_data_allocs.clear();
// TTI result
pdcch_mask.reset();
pdcch_mask.resize(tti_alloc.get_pdcch_grid().nof_cces());
bzero(&dl_sched_result, sizeof(dl_sched_result));
bzero(&ul_sched_result, sizeof(ul_sched_result));
}
bool tti_sched_result_t::is_dl_alloc(sched_ue* user) const
{
for (const auto& a : data_allocs) {
if (a.user_ptr == user) {
return true;
}
}
return false;
}
bool tti_sched_result_t::is_ul_alloc(sched_ue* user) const
{
for (const auto& a : ul_data_allocs) {
if (a.user_ptr == user) {
return true;
}
}
return false;
}
tti_sched_result_t::ctrl_code_t tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti)
{
ctrl_alloc_t ctrl_alloc{};
// based on rnti, check which type of alloc
alloc_type_t alloc_type = alloc_type_t::DL_RAR;
if (rnti == SRSLTE_SIRNTI) {
alloc_type = alloc_type_t::DL_BC;
} else if (rnti == SRSLTE_PRNTI) {
alloc_type = alloc_type_t::DL_PCCH;
}
/* Allocate space in the DL RBG and PDCCH grids */
tti_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type);
if (not ret.outcome) {
return {ret.outcome, ctrl_alloc};
}
// Allocation Successful
ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ctrl_alloc.rbg_range = ret.rbg_range;
ctrl_alloc.rnti = rnti;
ctrl_alloc.req_bytes = tbs_bytes;
ctrl_alloc.alloc_type = alloc_type;
return {ret.outcome, ctrl_alloc};
}
alloc_outcome_t tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
{
uint32_t sib_len = sched_params->cfg->sibs[sib_idx].len;
uint32_t rv = sched::get_rvidx(sib_ntx);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI);
if (not ret.first) {
Warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s\n",
sib_idx + 1,
aggr_lvl,
sib_len,
ret.first.to_string());
return ret.first;
}
// BC allocation successful
bc_alloc_t bc_alloc(ret.second);
bc_alloc.rv = rv;
bc_alloc.sib_idx = sib_idx;
bc_allocs.push_back(bc_alloc);
return ret.first;
}
alloc_outcome_t tti_sched_result_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload)
{
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI);
if (not ret.first) {
Warning(
"SCHED: Could not allocate Paging with payload length=%d, cause=%s\n", paging_payload, ret.first.to_string());
return ret.first;
}
// Paging allocation successful
bc_alloc_t bc_alloc(ret.second);
bc_allocs.push_back(bc_alloc);
return ret.first;
}
tti_sched_result_t::rar_code_t tti_sched_result_t::alloc_rar(uint32_t aggr_lvl,
const sched_interface::dl_sched_rar_t& rar_grant,
uint32_t prach_tti,
uint32_t buf_rar)
{
// RA-RNTI = 1 + t_id + f_id
// t_id = index of first subframe specified by PRACH (0<=t_id<10)
// f_id = index of the PRACH within subframe, in ascending order of freq domain (0<=f_id<6) (for FDD, f_id=0)
uint16_t ra_rnti = 1 + (uint16_t)(prach_tti % 10);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, buf_rar, ra_rnti);
if (not ret.first) {
Warning("SCHED: Could not allocate RAR for L=%d, cause=%s\n", aggr_lvl, ret.first.to_string());
return {ret.first, nullptr};
}
// Allocation successful
rar_alloc_t rar_alloc(ret.second);
rar_alloc.rar_grant = rar_grant;
rar_allocs.push_back(rar_alloc);
return {ret.first, &rar_allocs.back()};
}
alloc_outcome_t tti_sched_result_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
{
if (is_dl_alloc(user)) {
log_h->warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x\n", user->get_rnti());
return alloc_outcome_t::ERROR;
}
// Try to allocate RBGs and DCI
alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}
// Allocation Successful
dl_alloc_t alloc;
alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
alloc.user_ptr = user;
alloc.user_mask = user_mask;
alloc.pid = pid;
data_allocs.push_back(alloc);
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t tti_sched_result_t::alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_result_t::ul_alloc_t::type_t alloc_type,
uint32_t mcs)
{
// Check whether user was already allocated
if (is_ul_alloc(user)) {
log_h->warning("SCHED: Attempt to assign multiple ul_harq_proc to the same user rnti=0x%x\n", user->get_rnti());
return alloc_outcome_t::ERROR;
}
// Allocate RBGs and DCI space
bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or alloc_type == ul_alloc_t::NEWTX;
alloc_outcome_t ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}
ul_alloc_t ul_alloc = {};
ul_alloc.type = alloc_type;
ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ul_alloc.user_ptr = user;
ul_alloc.alloc = alloc;
ul_alloc.mcs = mcs;
ul_data_allocs.push_back(ul_alloc);
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc)
{
// check whether adaptive/non-adaptive retx/newtx
tti_sched_result_t::ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
bool has_retx = h->has_pending_retx();
if (has_retx) {
ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc();
if (prev_alloc.L == alloc.L and prev_alloc.RB_start == prev_alloc.L) {
alloc_type = ul_alloc_t::NOADAPT_RETX;
} else {
alloc_type = ul_alloc_t::ADAPT_RETX;
}
} else {
alloc_type = ul_alloc_t::NEWTX;
}
return alloc_ul(user, alloc, alloc_type);
}
alloc_outcome_t tti_sched_result_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs)
{
return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs);
}
void tti_sched_result_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems];
// assign NCCE/L
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, sched_params->P);
int tbs = generate_format1a(
prb_range.prb_start, prb_range.length(), bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci);
// Setup BC/Paging processes
if (bc_alloc.alloc_type == alloc_type_t::DL_BC) {
if (tbs <= (int)bc_alloc.req_bytes) {
log_h->warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d\n",
bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce,
bc_alloc.req_bytes);
continue;
}
// Setup BC process
bc->index = bc_alloc.sib_idx;
bc->type = sched_interface::dl_sched_bc_t::BCCH;
bc->tbs = (uint32_t)bc_alloc.req_bytes;
log_h->info("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d\n",
bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce,
bc_alloc.rv,
bc_alloc.req_bytes,
sched_params->cfg->sibs[bc_alloc.sib_idx].period_rf,
bc->dci.tb[0].mcs_idx);
} else {
// Paging
if (tbs <= 0) {
log_h->warning("SCHED: Error Paging, rbgs=(%d,%d), dci=(%d,%d)\n",
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce);
continue;
}
// Setup Paging process
bc->type = sched_interface::dl_sched_bc_t::PCCH;
bc->tbs = (uint32_t)tbs;
log_h->info("SCHED: PCH, rbgs=(%d,%d), dci=(%d,%d), tbs=%d, mcs=%d\n",
bc_alloc.rbg_range.rbg_start,
bc_alloc.rbg_range.rbg_end,
bc->dci.location.L,
bc->dci.location.ncce,
tbs,
bc->dci.tb[0].mcs_idx);
}
dl_sched_result.nof_bc_elems++;
}
}
void tti_sched_result_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems];
// Assign NCCE/L
rar->dci.location = dci_result[rar_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(rar_alloc.rbg_range, sched_params->P);
int tbs =
generate_format1a(prb_range.prb_start, prb_range.length(), rar_alloc.req_bytes, 0, rar_alloc.rnti, &rar->dci);
if (tbs <= 0) {
log_h->warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=(%d,%d), dci=(%d,%d)\n",
rar_alloc.rnti,
rar_alloc.rbg_range.rbg_start,
rar_alloc.rbg_range.rbg_end,
rar->dci.location.L,
rar->dci.location.ncce);
continue;
}
// Setup RAR process
rar->tbs = rar_alloc.req_bytes;
rar->nof_grants = rar_alloc.rar_grant.nof_grants;
std::copy(&rar_alloc.rar_grant.msg3_grant[0], &rar_alloc.rar_grant.msg3_grant[rar->nof_grants], rar->msg3_grant);
// Print RAR allocation result
for (uint32_t i = 0; i < rar->nof_grants; ++i) {
const auto& msg3_grant = rar->msg3_grant[i];
uint16_t expected_rnti = msg3_grant.data.temp_crnti;
log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, "
"rar_grant_mcs=%d\n",
expected_rnti,
rar_alloc.rnti,
rar_alloc.rbg_range.rbg_start,
rar_alloc.rbg_range.rbg_end,
rar->dci.location.L,
rar->dci.location.ncce,
msg3_grant.grant.rba,
msg3_grant.grant.trunc_mcs);
}
dl_sched_result.nof_rar_elems++;
}
}
void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& data_alloc : data_allocs) {
sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems];
// Assign NCCE/L
data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos;
// Generate DCI Format1/2/2A
sched_ue* user = data_alloc.user_ptr;
dl_harq_proc* h = user->get_dl_harq(data_alloc.pid);
uint32_t data_before = user->get_pending_dl_new_data(get_tti_tx_dl());
srslte_dci_format_t dci_format = user->get_dci_format();
bool is_newtx = h->is_empty();
int tbs = 0;
switch (dci_format) {
case SRSLTE_DCI_FORMAT1:
tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
break;
case SRSLTE_DCI_FORMAT2:
tbs = user->generate_format2(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
break;
case SRSLTE_DCI_FORMAT2A:
tbs = user->generate_format2a(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
break;
default:
Error("DCI format (%d) not implemented\n", dci_format);
}
if (tbs <= 0) {
log_h->warning("SCHED: Error DL %s rnti=0x%x, pid=%d, mask=%s, tbs=%d, buffer=%d\n",
is_newtx ? "tx" : "retx",
user->get_rnti(),
h->get_id(),
data_alloc.user_mask.to_hex().c_str(),
tbs,
user->get_pending_dl_new_data(get_tti_tx_dl()));
continue;
}
// Print Resulting DL Allocation
log_h->info("SCHED: DL %s rnti=0x%x, pid=%d, mask=0x%s, dci=(%d,%d), n_rtx=%d, tbs=%d, buffer=%d/%d\n",
!is_newtx ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
data_alloc.user_mask.to_hex().c_str(),
data->dci.location.L,
data->dci.location.ncce,
h->nof_retx(0) + h->nof_retx(1),
tbs,
data_before,
user->get_pending_dl_new_data(get_tti_tx_dl()));
dl_sched_result.nof_data_elems++;
}
}
void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems];
sched_ue* user = ul_alloc.user_ptr;
srslte_dci_location_t cce_range = {0, 0};
if (ul_alloc.needs_pdcch()) {
cce_range = dci_result[ul_alloc.dci_idx]->dci_pos;
}
/* Set fixed mcs if specified */
int fixed_mcs = (ul_alloc.type == ul_alloc_t::MSG3) ? ul_alloc.mcs : -1;
/* Generate DCI Format1A */
uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul());
int tbs =
user->generate_format0(pusch, get_tti_tx_ul(), ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs);
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
if (tbs <= 0) {
log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n",
ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL",
ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
pusch->dci.location.L,
pusch->dci.location.ncce,
ul_alloc.alloc.RB_start,
ul_alloc.alloc.RB_start + ul_alloc.alloc.L,
tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()));
continue;
}
// Allocation was successful
if (ul_alloc.type == ul_alloc_t::NEWTX) {
// Un-trigger SR
user->unset_sr();
}
// Print Resulting UL Allocation
log_h->info("SCHED: %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), n_rtx=%d, tbs=%d, bsr=%d (%d-%d)\n",
ul_alloc.is_msg3() ? "Msg3" : "UL",
ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
pusch->dci.location.L,
pusch->dci.location.ncce,
ul_alloc.alloc.RB_start,
ul_alloc.alloc.RB_start + ul_alloc.alloc.L,
h->nof_retx(0),
tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()),
pending_data_before,
user->get_pending_ul_old_data());
ul_sched_result.nof_dci_elems++;
}
}
void tti_sched_result_t::generate_dcis()
{
/* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &pdcch_mask);
/* Register final CFI */
dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi();
/* Generate DCI formats and fill sched_result structs */
set_bc_sched_result(dci_result);
set_rar_sched_result(dci_result);
set_dl_data_sched_result(dci_result);
set_ul_sched_result(dci_result);
}
uint32_t tti_sched_result_t::get_nof_ctrl_symbols() const
{
return tti_alloc.get_cfi() + ((sched_params->cfg->cell.nof_prb <= 10) ? 1 : 0);
}
int tti_sched_result_t::generate_format1a(uint32_t rb_start,
uint32_t l_crb,
uint32_t tbs_bytes,
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci)
{
/* Calculate I_tbs for this TBS */
int tbs = tbs_bytes * 8;
int i;
int mcs = -1;
for (i = 0; i < 27; i++) {
if (srslte_ra_tbs_from_idx(i, 2) >= tbs) {
dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 2);
break;
}
if (srslte_ra_tbs_from_idx(i, 3) >= tbs) {
dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 3);
break;
}
}
if (i == 28) {
Error("Can't allocate Format 1A for TBS=%d\n", tbs);
return -1;
}
Debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d\n",
srslte_ra_tbs_from_idx(mcs, 2),
srslte_ra_tbs_from_idx(mcs, 3),
tbs_bytes,
tbs,
mcs);
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, sched_params->cfg->cell.nof_prb);
dci->pid = 0;
dci->tb[0].mcs_idx = mcs;
dci->tb[0].rv = rv;
dci->format = SRSLTE_DCI_FORMAT1A;
dci->rnti = rnti;
return tbs;
}
} // namespace srsenb

@ -274,7 +274,7 @@ void sched_tester::new_test_tti(uint32_t tti_)
} else {
tti_data.ul_sf_idx = (tti_data.tti_tx_ul + 10240 - FDD_HARQ_DELAY_MS) % 10;
}
tti_data.ul_pending_msg3 = carrier_schedulers[0]->ra_sched_ptr->find_pending_msg3(tti_data.tti_tx_ul);
tti_data.ul_pending_msg3 = carrier_schedulers[0]->get_ra_sched()->find_pending_msg3(tti_data.tti_tx_ul);
tti_data.current_cfi = sched_params.sched_cfg.nof_ctrl_symbols;
tti_data.used_cce.resize(srslte_regs_pdcch_ncce(&regs, tti_data.current_cfi));
tti_data.used_cce.reset();
@ -494,7 +494,7 @@ int sched_tester::assert_no_empty_allocs()
*/
int sched_tester::test_tti_result()
{
carrier_sched::tti_sched_result_t* tti_sched = carrier_schedulers[0]->get_tti_sched(tti_data.tti_rx);
const srsenb::tti_sched_result_t* tti_sched = carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx);
// Helper Function: checks if there is any collision. If not, fills the mask
auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) {
@ -546,8 +546,8 @@ int sched_tester::test_tti_result()
CONDERROR(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", rar.tbs);
for (uint32_t j = 0; j < rar.nof_grants; ++j) {
const auto& msg3_grant = rar.msg3_grant[j];
const srsenb::ra_sched::pending_msg3_t& p =
carrier_schedulers[0]->ra_sched_ptr->find_pending_msg3(tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
const srsenb::ra_sched::pending_msg3_t& p = carrier_schedulers[0]->get_ra_sched()->find_pending_msg3(
tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
CONDERROR(not p.enabled, "Pending Msg3 should have been set\n");
uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg.cell.nof_prb);
CONDERROR(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n");
@ -555,7 +555,7 @@ int sched_tester::test_tti_result()
}
/* verify if sched_result "used_cce" coincide with sched "used_cce" */
auto* tti_alloc = carrier_schedulers[0]->get_tti_sched(tti_data.tti_rx);
auto* tti_alloc = carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx);
if (tti_data.used_cce != tti_alloc->get_pdcch_mask()) {
std::string mask_str = tti_alloc->get_pdcch_mask().to_string();
TESTERROR("[TESTER] The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), tti_data.used_cce.to_hex().c_str());
@ -757,7 +757,7 @@ int sched_tester::test_sibs()
int sched_tester::test_collisions()
{
carrier_sched::tti_sched_result_t* tti_sched = carrier_schedulers[0]->get_tti_sched(tti_data.tti_rx);
const srsenb::tti_sched_result_t* tti_sched = carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx);
srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb);
@ -901,7 +901,7 @@ int sched_tester::test_collisions()
rbgmask.reset(i);
}
}
if (rbgmask != carrier_schedulers[0]->get_tti_sched(tti_data.tti_rx)->get_dl_mask()) {
if (rbgmask != carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx)->get_dl_mask()) {
TESTERROR("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n");
}
return SRSLTE_SUCCESS;

Loading…
Cancel
Save