added interface to configure multiple cells from the RRC

master
Francisco Paisana 5 years ago
parent c7343cf6d8
commit 2dc31ea440

@ -134,8 +134,8 @@ class mac_interface_rrc
{
public:
/* Provides cell configuration including SIB periodicity, etc. */
virtual int cell_cfg(sched_interface::cell_cfg_t* cell_cfg) = 0;
virtual void reset() = 0;
virtual int cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg) = 0;
virtual void reset() = 0;
/* Manages UE configuration context */
virtual int ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg) = 0;

@ -21,6 +21,7 @@
#include "srslte/common/common.h"
#include "srslte/srslte.h"
#include <vector>
#ifndef SRSLTE_SCHED_INTERFACE_H
#define SRSLTE_SCHED_INTERFACE_H
@ -30,7 +31,7 @@ namespace srsenb {
class sched_interface
{
public:
virtual ~sched_interface(){};
virtual ~sched_interface() {}
const static uint32_t max_cce = 128;
const static uint32_t max_prb = 100;
@ -59,7 +60,7 @@ public:
int max_aggr_level;
} sched_args_t;
typedef struct {
struct cell_cfg_t {
// Main cell configuration (used to calculate DCI locations in scheduler)
srslte_cell_t cell;
@ -92,7 +93,13 @@ public:
uint32_t srs_subframe_offset;
uint32_t srs_bw_config;
} cell_cfg_t;
struct scell_cfg_t {
uint32_t enb_cc_idx = 0;
bool cross_carrier_scheduling = false;
bool ul_allowed = false;
};
std::vector<scell_cfg_t> scell_list;
};
typedef struct {
int priority;
@ -205,8 +212,8 @@ public:
/******************* Scheduler Control ****************************/
/* Provides cell configuration including SIB periodicity, etc. */
virtual int cell_cfg(cell_cfg_t* cell_cfg) = 0;
virtual int reset() = 0;
virtual int cell_cfg(const std::vector<cell_cfg_t>& cell_cfg) = 0;
virtual int reset() = 0;
/* Manages UE scheduling context */
virtual int ue_cfg(uint16_t rnti, ue_cfg_t* cfg) = 0;

@ -36,7 +36,7 @@
namespace srsenb {
class mac : public mac_interface_phy_lte, public mac_interface_rlc, public mac_interface_rrc
class mac final : public mac_interface_phy_lte, public mac_interface_rlc, public mac_interface_rrc
{
public:
mac();
@ -78,8 +78,8 @@ public:
/******** Interface from RRC (RRC -> MAC) ****************/
/* Provides cell configuration including SIB periodicity, etc. */
int cell_cfg(sched_interface::cell_cfg_t* cell_cfg);
void reset();
int cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg) override;
void reset() override;
/* Manages UE scheduling context */
int ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg);
@ -123,10 +123,8 @@ private:
bool started = false;
/* Scheduler unit */
sched scheduler;
dl_metric_rr sched_metric_dl_rr;
ul_metric_rr sched_metric_ul_rr;
sched_interface::cell_cfg_t cell_config;
sched scheduler;
std::vector<sched_interface::cell_cfg_t> cell_config;
sched_interface::dl_pdu_mch_t mch;

@ -50,22 +50,29 @@ inline bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2)
} // namespace sched_utils
//! struct to bundle together all the sched arguments, and share them with all the sched sub-components
//! structs to bundle together all the sched arguments, and share them with all the sched sub-components
struct sched_cell_params_t {
sched_interface::cell_cfg_t* cfg = nullptr;
uint32_t P = 0;
uint32_t nof_rbgs = 0;
// convenience getters
uint32_t prb_to_rbg(uint32_t nof_prbs) const { return (nof_prbs + (P - 1)) / P; }
uint32_t nof_prb() const { return cfg->cell.nof_prb; }
};
class sched_params_t
{
public:
srslte::log* log_h = nullptr;
sched_interface::cell_cfg_t* cfg = nullptr;
srslte::log* log_h = nullptr;
std::vector<sched_cell_params_t> cell_cfg;
sched_interface::sched_args_t sched_cfg = {};
srslte_regs_t* regs = nullptr;
std::array<sched_ue::sched_dci_cce_t, 3> common_locations = {};
std::array<std::array<sched_ue::sched_dci_cce_t, 10>, 3> rar_locations = {};
std::array<uint32_t, 3> nof_cce_table = {}; ///< map cfix -> nof cces in PDCCH
uint32_t P = 0;
uint32_t nof_rbgs = 0;
sched_params_t();
bool set_cfg(srslte::log* log_, sched_interface::cell_cfg_t* cfg_, srslte_regs_t* regs_);
bool set_cfg(srslte::log* log_, std::vector<sched_interface::cell_cfg_t>* cfg_, srslte_regs_t* regs_);
};
/* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe
@ -88,16 +95,16 @@ public:
{
public:
/* Virtual methods for user metric calculation */
virtual void set_params(const sched_params_t& sched_params_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched, uint32_t cc_idx) = 0;
virtual void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched) = 0;
};
class metric_ul
{
public:
/* Virtual methods for user metric calculation */
virtual void set_params(const sched_params_t& sched_params_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched, uint32_t cc_idx) = 0;
virtual void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) = 0;
};
/*************************************************************
@ -110,8 +117,7 @@ public:
~sched();
void init(rrc_interface_mac* rrc, srslte::log* log);
void set_metric(metric_dl* dl_metric, metric_ul* ul_metric);
int cell_cfg(cell_cfg_t* cell_cfg) override;
int cell_cfg(const std::vector<cell_cfg_t>& cell_cfg) override;
void set_sched_cfg(sched_args_t* sched_cfg);
int reset() final;
@ -176,7 +182,7 @@ protected:
pthread_rwlock_t rwlock;
cell_cfg_t cfg;
std::vector<cell_cfg_t> cfg;
// This is for computing DCI locations
srslte_regs_t regs;

@ -35,7 +35,6 @@ public:
explicit carrier_sched(rrc_interface_mac* rrc_, std::map<uint16_t, sched_ue>* ue_db_, uint32_t enb_cc_idx_);
void reset();
void carrier_cfg(const sched_params_t& sched_params_);
void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_);
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs);
sf_sched* generate_tti_result(uint32_t tti_rx);
int dl_rach_info(dl_sched_rar_info_t rar_info);
@ -53,11 +52,12 @@ private:
// args
const sched_params_t* sched_params = nullptr;
const cell_cfg_t* cc_cfg = nullptr;
srslte::log* log_h = nullptr;
rrc_interface_mac* rrc = nullptr;
std::map<uint16_t, sched_ue>* ue_db = nullptr;
metric_dl* dl_metric = nullptr;
metric_ul* ul_metric = nullptr;
std::unique_ptr<metric_dl> dl_metric;
std::unique_ptr<metric_ul> ul_metric;
const uint32_t enb_cc_idx;
// derived from args

@ -113,7 +113,7 @@ public:
rbg_range_t rbg_range;
};
void init(const sched_params_t& sched_params_, uint32_t cc_idx_);
void init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_);
void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi);
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask);
@ -131,11 +131,12 @@ private:
alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = nullptr);
// consts
const sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr;
uint32_t nof_rbgs = 0;
uint32_t si_n_rbg = 0, rar_n_rbg = 0;
uint32_t cc_idx = 0;
const sched_params_t* sched_params = nullptr;
const sched_interface::cell_cfg_t* cell_cfg = nullptr;
srslte::log* log_h = nullptr;
uint32_t nof_rbgs = 0;
uint32_t si_n_rbg = 0, rar_n_rbg = 0;
uint32_t enb_cc_idx = 0;
// tti const
const tti_params_t* tti_params = nullptr;
@ -286,9 +287,10 @@ private:
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
// consts
const sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr;
uint32_t enb_cc_idx = 0;
const sched_params_t* sched_params = nullptr;
const sched_cell_params_t* cell_cfg = nullptr;
srslte::log* log_h = nullptr;
uint32_t enb_cc_idx = 0;
// internal state
tti_params_t tti_params{10241};

@ -31,31 +31,37 @@ class dl_metric_rr : public sched::metric_dl
const static int MAX_RBG = 25;
public:
void set_params(const sched_params_t& sched_params_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched, uint32_t cc_idx) final;
void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched) final;
private:
bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask);
dl_harq_proc* allocate_user(sched_ue* user, uint32_t cc_idx);
dl_harq_proc* allocate_user(sched_ue* user);
srslte::log* log_h = nullptr;
const sched_params_t* sched_params = nullptr;
const sched_cell_params_t* cell_params = nullptr;
srslte::log* log_h = nullptr;
uint32_t enb_cc_idx = 0;
dl_sf_sched_itf* tti_alloc = nullptr;
};
class ul_metric_rr : public sched::metric_ul
{
public:
void set_params(const sched_params_t& sched_params_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched, uint32_t cc_idx) final;
void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) final;
private:
bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user, uint32_t cc_idx);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user, uint32_t cc_idx);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user);
srslte::log* log_h = nullptr;
ul_sf_sched_itf* tti_alloc = nullptr;
uint32_t current_tti;
const sched_params_t* sched_params = nullptr;
const sched_cell_params_t* cell_params = nullptr;
srslte::log* log_h = nullptr;
ul_sf_sched_itf* tti_alloc = nullptr;
uint32_t current_tti = 0;
uint32_t enb_cc_idx = 0;
};
} // namespace srsenb

@ -34,13 +34,14 @@
namespace srsenb {
class sched_params_t;
struct sched_cell_params_t;
struct tti_params_t;
struct sched_ue_carrier {
const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ;
sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
srslte_cell_t* cell_cfg_,
const sched_cell_params_t* cell_cfg_,
uint16_t rnti_,
uint32_t cc_idx_,
srslte::log* log_);
@ -54,11 +55,12 @@ struct sched_ue_carrier {
ul_harq_proc* get_ul_harq(uint32_t tti);
uint32_t get_pending_ul_old_data();
uint32_t get_aggr_level(uint32_t nof_bits);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
uint32_t get_required_prb_ul(uint32_t req_bytes);
uint32_t get_aggr_level(uint32_t nof_bits);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
uint32_t get_required_prb_ul(uint32_t req_bytes);
const sched_cell_params_t* get_cell_cfg() const { return cell_params; }
std::array<dl_harq_proc, SCHED_MAX_HARQ_PROC> dl_harq = {};
std::array<ul_harq_proc, SCHED_MAX_HARQ_PROC> ul_harq = {};
@ -77,9 +79,9 @@ struct sched_ue_carrier {
int fixed_mcs_ul = 0, fixed_mcs_dl = 0;
private:
srslte::log* log_h = nullptr;
sched_interface::ue_cfg_t* cfg = nullptr;
srslte_cell_t* cell = nullptr;
srslte::log* log_h = nullptr;
sched_interface::ue_cfg_t* cfg = nullptr;
const sched_cell_params_t* cell_params = nullptr;
uint32_t cc_idx;
uint16_t rnti;
};
@ -148,8 +150,6 @@ public:
uint32_t get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes);
uint32_t prb_to_rbg(uint32_t nof_prb);
uint32_t rgb_to_prb(uint32_t nof_rbg);
uint32_t get_pending_dl_new_data();
uint32_t get_pending_ul_new_data(uint32_t tti);
@ -204,7 +204,7 @@ public:
srslte_dci_format_t get_dci_format();
sched_dci_cce_t* get_locations(uint32_t current_cfi, uint32_t sf_idx);
sched_ue_carrier* get_ue_carrier(uint32_t cc_idx) { return &carriers[cc_idx]; }
sched_ue_carrier* get_ue_carrier(uint32_t enb_cc_idx);
bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false);
uint32_t get_max_retx();
@ -231,7 +231,7 @@ private:
bool is_sr_triggered();
int alloc_pdu(int tbs, sched_interface::dl_sched_pdu_t* pdu);
uint32_t format1_count_prb(const rbgmask_t& bitmask);
uint32_t format1_count_prb(const rbgmask_t& bitmask, uint32_t cc_idx);
static bool bearer_is_ul(ue_bearer_t* lch);
static bool bearer_is_dl(const ue_bearer_t* lch);

@ -76,7 +76,7 @@ struct meas_cell_cfg_t {
struct scell_cfg_t {
uint32_t cell_id;
bool cross_carrier_sched;
bool cross_carrier_sched = false;
uint32_t sched_cell_id;
bool ul_allowed;
};

@ -76,8 +76,6 @@ bool mac::init(const mac_args_t& args_,
cell = *cell_;
scheduler.init(rrc, log_h);
// Set default scheduler (RR)
scheduler.set_metric(&sched_metric_dl_rr, &sched_metric_ul_rr);
// Set default scheduler configuration
scheduler.set_sched_cfg(&args.sched);
@ -256,10 +254,10 @@ int mac::ue_rem(uint16_t rnti)
return 0;
}
int mac::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
int mac::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg_)
{
this->cell_config = *cell_cfg;
return scheduler.cell_cfg(cell_cfg);
cell_config = cell_cfg_;
return scheduler.cell_cfg(cell_config);
}
void mac::get_metrics(mac_metrics_t metrics[ENB_METRICS_MAX_USERS])
@ -672,8 +670,8 @@ int mac::get_mch_sched(uint32_t tti, bool is_mcch, dl_sched_list_t& dl_sched_res
srslte_ra_tb_t mcs_data = {};
mcs.mcs_idx = this->sib13.mbsfn_area_info_list_r9[0].mcch_cfg_r9.sig_mcs_r9.to_number();
mcs_data.mcs_idx = this->mcch.msg.c1().mbsfn_area_cfg_r9().pmch_info_list_r9[0].pmch_cfg_r9.data_mcs_r9;
srslte_dl_fill_ra_mcs(&mcs, 0, this->cell_config.cell.nof_prb, false);
srslte_dl_fill_ra_mcs(&mcs_data, 0, this->cell_config.cell.nof_prb, false);
srslte_dl_fill_ra_mcs(&mcs, 0, cell_config[0].cell.nof_prb, false);
srslte_dl_fill_ra_mcs(&mcs_data, 0, cell_config[0].cell.nof_prb, false);
if (is_mcch) {
build_mch_sched(mcs_data.tbs);
mch.mcch_payload = mcch_payload_buffer;

@ -62,16 +62,38 @@ sched_params_t::sched_params_t()
sched_cfg.max_aggr_level = 3;
}
bool sched_params_t::set_cfg(srslte::log* log_, sched_interface::cell_cfg_t* cfg_, srslte_regs_t* regs_)
bool sched_params_t::set_cfg(srslte::log* log_, std::vector<sched_interface::cell_cfg_t>* cfg_, srslte_regs_t* regs_)
{
log_h = log_;
cfg = cfg_;
regs = regs_;
// Basic cell config checks
if (cfg->si_window_ms == 0) {
Error("SCHED: Invalid si-window length 0 ms\n");
return false;
// copy cell cfgs
cell_cfg.resize(cfg_->size());
for (uint32_t i = 0; i < cfg_->size(); ++i) {
sched_cell_params_t& item = cell_cfg[i];
item.cfg = &(*cfg_)[i];
// Basic cell config checks
if (item.cfg->si_window_ms == 0) {
Error("SCHED: Invalid si-window length 0 ms\n");
return false;
}
item.P = srslte_ra_type0_P(item.cfg->cell.nof_prb);
item.nof_rbgs = srslte::ceil_div(item.cfg->cell.nof_prb, item.P);
// PRACH has to fit within the PUSCH space
bool invalid_prach = item.cfg->cell.nof_prb == 6 and (item.cfg->prach_freq_offset + 6 > item.cfg->cell.nof_prb);
invalid_prach |= item.cfg->cell.nof_prb > 6 and
((item.cfg->prach_freq_offset + 6) > (item.cfg->cell.nof_prb - item.cfg->nrb_pucch) or
(int) item.cfg->prach_freq_offset < item.cfg->nrb_pucch);
if (invalid_prach) {
log_h->error("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n",
item.cfg->prach_freq_offset);
log_h->console("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n",
item.cfg->prach_freq_offset);
return false;
}
}
// Compute Common locations for DCI for each CFI
@ -86,9 +108,6 @@ bool sched_params_t::set_cfg(srslte::log* log_, sched_interface::cell_cfg_t* cfg
}
}
P = srslte_ra_type0_P(cfg->cell.nof_prb);
nof_rbgs = srslte::ceil_div(cfg->cell.nof_prb, P);
// precompute nof cces in PDCCH for each CFI
for (uint32_t cfix = 0; cfix < nof_cce_table.size(); ++cfix) {
int ret = srslte_regs_pdcch_ncce(regs, cfix + 1);
@ -108,17 +127,6 @@ bool sched_params_t::set_cfg(srslte::log* log_, sched_interface::cell_cfg_t* cfg
return false;
}
// PRACH has to fit within the PUSCH space
bool invalid_prach = cfg->cell.nof_prb == 6 and (cfg->prach_freq_offset + 6 > cfg->cell.nof_prb);
invalid_prach |= cfg->cell.nof_prb > 6 and ((cfg->prach_freq_offset + 6) > (cfg->cell.nof_prb - cfg->nrb_pucch) or
(int) cfg->prach_freq_offset < cfg->nrb_pucch);
if (invalid_prach) {
log_h->error("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg->prach_freq_offset);
log_h->console("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n",
cfg->prach_freq_offset);
return false;
}
return true;
}
@ -153,7 +161,7 @@ void sched::init(rrc_interface_mac* rrc_, srslte::log* log)
log_h = log;
rrc = rrc_;
// Initialize Independent carrier schedulers
// Initialize first carrier scheduler
carrier_schedulers.emplace_back(new carrier_sched{rrc, &ue_db, 0});
reset();
@ -178,19 +186,12 @@ void sched::set_sched_cfg(sched_interface::sched_args_t* sched_cfg_)
}
}
void sched::set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_)
int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
{
for (std::unique_ptr<carrier_sched>& c : carrier_schedulers) {
c->set_metric(dl_metric_, ul_metric_);
}
}
int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
{
cfg = *cell_cfg;
cfg = cell_cfg;
// Get DCI locations
if (srslte_regs_init(&regs, cfg.cell) != LIBLTE_SUCCESS) {
if (srslte_regs_init(&regs, cfg[0].cell) != LIBLTE_SUCCESS) {
Error("Getting DCI locations\n");
return SRSLTE_ERROR;
}
@ -200,7 +201,14 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
return -1;
}
// Initiate the tti_scheduler for each TTI
// Create remaining cells, if not created yet
uint32_t prev_size = carrier_schedulers.size();
carrier_schedulers.resize(sched_params.cell_cfg.size());
for (uint32_t i = prev_size; i < sched_params.cell_cfg.size(); ++i) {
carrier_schedulers[i].reset(new carrier_sched{rrc, &ue_db, i});
}
// Setup the ra/bc/tti_scheduler for each TTI
for (std::unique_ptr<carrier_sched>& c : carrier_schedulers) {
c->carrier_cfg(sched_params);
}

@ -20,6 +20,7 @@
*/
#include "srsenb/hdr/stack/mac/scheduler_carrier.h"
#include "srsenb/hdr/stack/mac/scheduler_metric.h"
#define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__)
#define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__)
@ -304,25 +305,28 @@ void sched::carrier_sched::carrier_cfg(const sched_params_t& sched_params_)
// sched::cfg is now fully set
sched_params = &sched_params_;
log_h = sched_params->log_h;
cc_cfg = sched_params->cell_cfg[enb_cc_idx].cfg;
const cell_cfg_t* cfg_ = sched_params->cfg;
std::lock_guard<std::mutex> lock(carrier_mutex);
// init Broadcast/RA schedulers
bc_sched_ptr.reset(new bc_sched{*sched_params->cfg, rrc});
ra_sched_ptr.reset(new ra_sched{*sched_params->cfg, log_h, *ue_db});
bc_sched_ptr.reset(new bc_sched{*cc_cfg, rrc});
ra_sched_ptr.reset(new ra_sched{*cc_cfg, log_h, *ue_db});
dl_metric->set_params(*sched_params);
ul_metric->set_params(*sched_params);
// Setup data scheduling algorithms
dl_metric.reset(new srsenb::dl_metric_rr{});
dl_metric->set_params(*sched_params, enb_cc_idx);
ul_metric.reset(new srsenb::ul_metric_rr{});
ul_metric->set_params(*sched_params, enb_cc_idx);
// Setup constant PUCCH/PRACH mask
pucch_mask.resize(cfg_->cell.nof_prb);
if (cfg_->nrb_pucch > 0) {
pucch_mask.fill(0, (uint32_t)cfg_->nrb_pucch);
pucch_mask.fill(cfg_->cell.nof_prb - cfg_->nrb_pucch, cfg_->cell.nof_prb);
pucch_mask.resize(cc_cfg->cell.nof_prb);
if (cc_cfg->nrb_pucch > 0) {
pucch_mask.fill(0, (uint32_t)cc_cfg->nrb_pucch);
pucch_mask.fill(cc_cfg->cell.nof_prb - cc_cfg->nrb_pucch, cc_cfg->cell.nof_prb);
}
prach_mask.resize(cfg_->cell.nof_prb);
prach_mask.fill(cfg_->prach_freq_offset, cfg_->prach_freq_offset + 6);
prach_mask.resize(cc_cfg->cell.nof_prb);
prach_mask.fill(cc_cfg->prach_freq_offset, cc_cfg->prach_freq_offset + 6);
// Initiate the tti_scheduler for each TTI
for (sf_sched& tti_sched : sf_scheds) {
@ -330,12 +334,6 @@ void sched::carrier_sched::carrier_cfg(const sched_params_t& sched_params_)
}
}
void sched::carrier_sched::set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_)
{
dl_metric = dl_metric_;
ul_metric = ul_metric_;
}
void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
{
sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs);
@ -435,15 +433,15 @@ void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result)
}
// NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions
if (sched_params->cfg->cell.nof_prb == 6) {
uint32_t tti_rx_ack = TTI_RX_ACK(tti_result->get_tti_rx());
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_rx_ack, -1)) {
if (cc_cfg->cell.nof_prb == 6) {
uint32_t tti_rx_ack = TTI_RX_ACK(tti_result->get_tti_rx());
if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->prach_config, tti_rx_ack, -1)) {
tti_result->get_dl_mask().fill(0, tti_result->get_dl_mask().size());
}
}
// call DL scheduler metric to fill RB grid
dl_metric->sched_users(*ue_db, tti_result, enb_cc_idx);
dl_metric->sched_users(*ue_db, tti_result);
}
int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
@ -452,7 +450,7 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
prbmask_t& ul_mask = tti_sched->get_ul_mask();
/* reserve PRBs for PRACH */
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_tx_ul, -1)) {
if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->prach_config, tti_tx_ul, -1)) {
ul_mask = prach_mask;
log_h->debug("SCHED: Allocated PRACH RBs. Mask: 0x%s\n", prach_mask.to_hex().c_str());
}
@ -461,7 +459,7 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
ra_sched_ptr->ul_sched(tti_sched);
/* reserve PRBs for PUCCH */
if (sched_params->cfg->cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) {
if (cc_cfg->cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) {
log_h->error("There was a collision with the PUCCH. current mask=0x%s, pucch_mask=0x%s\n",
ul_mask.to_hex().c_str(),
pucch_mask.to_hex().c_str());
@ -469,7 +467,7 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
ul_mask |= pucch_mask;
/* Call scheduler for UL data */
ul_metric->sched_users(*ue_db, tti_sched, enb_cc_idx);
ul_metric->sched_users(*ue_db, tti_sched);
return SRSLTE_SUCCESS;
}

@ -263,14 +263,15 @@ std::string pdcch_grid_t::result_to_string(bool verbose) const
* TTI resource Scheduling Methods
*******************************************************/
void sf_grid_t::init(const sched_params_t& sched_params_, uint32_t cc_idx_)
void sf_grid_t::init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_)
{
sched_params = &sched_params_;
enb_cc_idx = enb_cc_idx_;
log_h = sched_params->log_h;
nof_rbgs = sched_params->nof_rbgs;
si_n_rbg = srslte::ceil_div(4, sched_params->P);
rar_n_rbg = srslte::ceil_div(3, sched_params->P);
cc_idx = cc_idx_;
cell_cfg = sched_params->cell_cfg[enb_cc_idx].cfg;
nof_rbgs = sched_params->cell_cfg[enb_cc_idx].nof_rbgs;
si_n_rbg = srslte::ceil_div(4, sched_params->cell_cfg[enb_cc_idx].P);
rar_n_rbg = srslte::ceil_div(3, sched_params->cell_cfg[enb_cc_idx].P);
pdcch_alloc.init(*sched_params);
}
@ -284,7 +285,7 @@ void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi)
dl_mask.reset();
dl_mask.resize(nof_rbgs);
ul_mask.reset();
ul_mask.resize(sched_params->cfg->cell.nof_prb);
ul_mask.resize(cell_cfg->cell.nof_prb);
pdcch_alloc.new_tti(*tti_params, start_cfi);
}
@ -342,8 +343,9 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_typ
alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
{
srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, dci_format);
uint32_t aggr_level = user->get_ue_carrier(cc_idx)->get_aggr_level(nof_bits);
uint32_t nof_bits =
srslte_dci_format_sizeof(const_cast<srslte_cell_t*>(&cell_cfg->cell), nullptr, nullptr, dci_format);
uint32_t aggr_level = user->get_ue_carrier(enb_cc_idx)->get_aggr_level(nof_bits);
return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user);
}
@ -361,8 +363,9 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_
// Generate PDCCH except for RAR and non-adaptive retx
if (needs_pdcch) {
uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, SRSLTE_DCI_FORMAT0);
uint32_t aggr_idx = user->get_ue_carrier(cc_idx)->get_aggr_level(nof_bits);
uint32_t nof_bits =
srslte_dci_format_sizeof(const_cast<srslte_cell_t*>(&cell_cfg->cell), nullptr, nullptr, SRSLTE_DCI_FORMAT0);
uint32_t aggr_idx = user->get_ue_carrier(enb_cc_idx)->get_aggr_level(nof_bits);
if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) {
if (log_h->get_level() == srslte::LOG_LEVEL_DEBUG) {
log_h->debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s\n",
@ -386,9 +389,10 @@ void sf_sched::init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_)
{
sched_params = &sched_params_;
enb_cc_idx = enb_cc_idx_;
cell_cfg = &sched_params->cell_cfg[enb_cc_idx];
log_h = sched_params->log_h;
tti_alloc.init(*sched_params, 0);
max_msg3_prb = std::max(6u, sched_params->cfg->cell.nof_prb - (uint32_t)sched_params->cfg->nrb_pucch);
max_msg3_prb = std::max(6u, cell_cfg->cfg->cell.nof_prb - (uint32_t)cell_cfg->cfg->nrb_pucch);
}
void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
@ -409,10 +413,10 @@ void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
ul_data_allocs.clear();
// setup first prb to be used for msg3 alloc
last_msg3_prb = sched_params->cfg->nrb_pucch;
last_msg3_prb = cell_cfg->cfg->nrb_pucch;
uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS);
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_msg3_alloc, -1)) {
last_msg3_prb = std::max(last_msg3_prb, sched_params->cfg->prach_freq_offset + 6);
if (srslte_prach_tti_opportunity_config_fdd(cell_cfg->cfg->prach_config, tti_msg3_alloc, -1)) {
last_msg3_prb = std::max(last_msg3_prb, cell_cfg->cfg->prach_freq_offset + 6);
}
}
@ -466,7 +470,7 @@ sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_by
alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
{
uint32_t sib_len = sched_params->cfg->sibs[sib_idx].len;
uint32_t sib_len = cell_cfg->cfg->sibs[sib_idx].len;
uint32_t rv = sched::get_rvidx(sib_ntx);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI);
if (not ret.first) {
@ -540,7 +544,7 @@ std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, cons
rar_grant.msg3_grant[i].data = rar.msg3_grant[i];
rar_grant.msg3_grant[i].grant.tpc_pusch = 3;
rar_grant.msg3_grant[i].grant.trunc_mcs = 0;
uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, sched_params->cfg->cell.nof_prb);
uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, cell_cfg->cfg->cell.nof_prb);
rar_grant.msg3_grant[i].grant.rba = rba;
last_msg3_prb += msg3_grant_size;
@ -637,7 +641,7 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, sched_params->P);
prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, cell_cfg->P);
int tbs = generate_format1a(
prb_range.prb_start, prb_range.length(), bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci);
@ -667,7 +671,7 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
bc->dci.location.ncce,
bc_alloc.rv,
bc_alloc.req_bytes,
sched_params->cfg->sibs[bc_alloc.sib_idx].period_rf,
cell_cfg->cfg->sibs[bc_alloc.sib_idx].period_rf,
bc->dci.tb[0].mcs_idx);
} else {
// Paging
@ -706,7 +710,7 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(rar_alloc.alloc_data.rbg_range, sched_params->P);
prb_range_t prb_range = prb_range_t(rar_alloc.alloc_data.rbg_range, cell_cfg->P);
int tbs = generate_format1a(prb_range.prb_start,
prb_range.length(),
rar_alloc.alloc_data.req_bytes,
@ -899,7 +903,7 @@ void sf_sched::generate_dcis()
uint32_t sf_sched::get_nof_ctrl_symbols() const
{
return tti_alloc.get_cfi() + ((sched_params->cfg->cell.nof_prb <= 10) ? 1 : 0);
return tti_alloc.get_cfi() + ((cell_cfg->cfg->cell.nof_prb <= 10) ? 1 : 0);
}
int sf_sched::generate_format1a(uint32_t rb_start,
@ -941,7 +945,7 @@ int sf_sched::generate_format1a(uint32_t rb_start,
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, sched_params->cfg->cell.nof_prb);
dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, cell_cfg->cfg->cell.nof_prb);
dci->pid = 0;
dci->tb[0].mcs_idx = mcs;
dci->tb[0].rv = rv;

@ -36,12 +36,15 @@ namespace srsenb {
*
*****************************************************************/
void dl_metric_rr::set_params(const sched_params_t& sched_params_)
void dl_metric_rr::set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_)
{
log_h = sched_params_.log_h;
sched_params = &sched_params_;
enb_cc_idx = enb_cc_idx_;
cell_params = &sched_params->cell_cfg[enb_cc_idx];
log_h = sched_params_.log_h;
}
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched, uint32_t enb_cc_idx)
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched)
{
tti_alloc = tti_sched;
@ -58,7 +61,7 @@ void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user(user, enb_cc_idx);
allocate_user(user);
}
}
@ -77,7 +80,7 @@ bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask)
return nof_rbg == 0;
}
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user, uint32_t enb_cc_idx)
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
{
if (tti_alloc->is_dl_alloc(user)) {
return nullptr;
@ -133,8 +136,8 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user, uint32_t enb_cc_idx)
#endif
// Allocate resources based on pending data
if (req_bytes > 0) {
uint32_t pending_rbg =
user->prb_to_rbg(user->get_required_prb_dl(cell_idx, req_bytes, tti_alloc->get_nof_ctrl_symbols()));
uint32_t pending_prbs = user->get_required_prb_dl(cell_idx, req_bytes, tti_alloc->get_nof_ctrl_symbols());
uint32_t pending_rbg = cell_params->prb_to_rbg(pending_prbs);
rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size());
find_allocation(pending_rbg, &newtx_mask);
if (newtx_mask.any()) { // some empty spaces were found
@ -155,12 +158,15 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user, uint32_t enb_cc_idx)
*
*****************************************************************/
void ul_metric_rr::set_params(const sched_params_t& sched_params_)
void ul_metric_rr::set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_)
{
log_h = sched_params_.log_h;
sched_params = &sched_params_;
enb_cc_idx = enb_cc_idx_;
cell_params = &sched_params->cell_cfg[enb_cc_idx];
log_h = sched_params_.log_h;
}
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched, uint32_t enb_cc_idx)
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched)
{
tti_alloc = tti_sched;
current_tti = tti_alloc->get_tti_tx_ul();
@ -181,7 +187,7 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user_retx_prbs(user, enb_cc_idx);
allocate_user_retx_prbs(user);
}
// give priority in a time-domain RR basis
@ -192,7 +198,7 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user_newtx_prbs(user, enb_cc_idx);
allocate_user_newtx_prbs(user);
}
}
@ -233,7 +239,7 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
return alloc->L == L;
}
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user, uint32_t enb_cc_idx)
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
{
if (tti_alloc->is_ul_alloc(user)) {
return nullptr;
@ -275,7 +281,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user, uint32_t enb
return nullptr;
}
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user, uint32_t enb_cc_idx)
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
{
if (tti_alloc->is_ul_alloc(user)) {
return nullptr;

@ -69,9 +69,9 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
rnti = rnti_;
sched_params = &sched_params_;
log_h = sched_params->log_h;
cell = sched_params->cfg->cell;
cell = sched_params->cell_cfg[0].cfg->cell;
max_msg3retx = sched_params->cfg->maxharq_msg3tx;
max_msg3retx = sched_params->cell_cfg[0].cfg->maxharq_msg3tx;
cfg = *cfg_;
@ -82,7 +82,7 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
// Init sched_ue carriers
// TODO: check config for number of carriers
carriers.emplace_back(&cfg, &cell, rnti, 0, log_h);
carriers.emplace_back(&cfg, &sched_params->cell_cfg[0], rnti, 0, log_h);
enb_ue_cellindex_map.insert(std::make_pair(0, 0)); // TODO: use real values
// Generate allowed CCE locations
@ -405,7 +405,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
bool need_conres_ce = is_conres_ce_pending();
uint32_t nof_prb = format1_count_prb(user_mask);
uint32_t nof_prb = format1_count_prb(user_mask, cc_idx);
// Calculate exact number of RE for this PRB allocation
srslte_pdsch_grant_t grant = {};
@ -510,7 +510,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE0;
dci->type0_alloc.rbg_bitmask = (uint32_t)user_mask.to_uint64();
uint32_t nof_prb = format1_count_prb(user_mask); // TODO: format1???
uint32_t nof_prb = format1_count_prb(user_mask, cc_idx); // TODO: format1???
// Calculate exact number of RE for this PRB allocation
srslte_pdsch_grant_t grant = {};
@ -863,16 +863,6 @@ uint32_t sched_ue::get_pending_ul_old_data_unlocked(uint32_t cc_idx)
return carriers[cc_idx].get_pending_ul_old_data();
}
uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb)
{
return (uint32_t)ceil((float)nof_prb / sched_params->P);
}
uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
{
return sched_params->P * nof_rbg;
}
uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{
std::lock_guard<std::mutex> lock(mutex);
@ -1015,6 +1005,11 @@ sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx
}
}
sched_ue_carrier* sched_ue::get_ue_carrier(uint32_t enb_cc_idx)
{
return &carriers[enb_ue_cellindex_map[enb_cc_idx]];
}
/* Allocates first available RLC PDU */
int sched_ue::alloc_pdu(int tbs_bytes, sched_interface::dl_sched_pdu_t* pdu)
{
@ -1038,12 +1033,13 @@ int sched_ue::alloc_pdu(int tbs_bytes, sched_interface::dl_sched_pdu_t* pdu)
return x;
}
uint32_t sched_ue::format1_count_prb(const rbgmask_t& bitmask)
uint32_t sched_ue::format1_count_prb(const rbgmask_t& bitmask, uint32_t cc_idx)
{
uint32_t nof_prb = 0;
const sched_cell_params_t* cell_cfg = carriers[cc_idx].get_cell_cfg();
uint32_t nof_prb = 0;
for (uint32_t i = 0; i < bitmask.size(); i++) {
if (bitmask.test(i)) {
nof_prb += std::min(sched_params->cfg->cell.nof_prb - (i * sched_params->P), sched_params->P);
nof_prb += std::min(cell_cfg->cfg->cell.nof_prb - (i * cell_cfg->P), cell_cfg->P);
}
}
return nof_prb;
@ -1084,12 +1080,12 @@ int sched_ue::cqi_to_tbs(uint32_t cqi,
***********************************************************************************************/
sched_ue_carrier::sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
srslte_cell_t* cell_cfg_,
const sched_cell_params_t* cell_cfg_,
uint16_t rnti_,
uint32_t cc_idx_,
srslte::log* log_) :
cfg(cfg_),
cell(cell_cfg_),
cell_params(cell_cfg_),
rnti(rnti_),
cc_idx(cc_idx_),
log_h(log_)
@ -1210,7 +1206,7 @@ uint32_t sched_ue_carrier::get_aggr_level(uint32_t nof_bits)
float coderate = 99;
float factor = 1.5;
uint32_t l_max = 3;
if (cell->nof_prb == 6) {
if (cell_params->nof_prb() == 6) {
factor = 1.0;
l_max = 2;
}
@ -1287,8 +1283,8 @@ uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes)
return 0;
}
for (n = 1; n < cell->nof_prb && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell->cp) - 1) - N_srs) * n * SRSLTE_NRE;
for (n = 1; n < cell_params->nof_prb() && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell_params->cfg->cell.cp) - 1) - N_srs) * n * SRSLTE_NRE;
int tbs = 0;
if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
@ -1300,7 +1296,7 @@ uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes)
}
}
while (!srslte_dft_precoding_valid_prb(n) && n <= cell->nof_prb) {
while (!srslte_dft_precoding_valid_prb(n) && n <= cell_params->nof_prb()) {
n++;
}

@ -766,34 +766,58 @@ void rrc::rem_user(uint16_t rnti)
void rrc::config_mac()
{
using sched_cell_t = sched_interface::cell_cfg_t;
// Fill MAC scheduler configuration for SIBs
sched_interface::cell_cfg_t sched_cfg;
bzero(&sched_cfg, sizeof(sched_interface::cell_cfg_t));
for (uint32_t i = 0; i < nof_si_messages; i++) {
sched_cfg.sibs[i].len = sib_buffer[i]->N_bytes;
if (i == 0) {
sched_cfg.sibs[i].period_rf = 8; // SIB1 is always 8 rf
} else {
sched_cfg.sibs[i].period_rf = cfg.sib1.sched_info_list[i - 1].si_periodicity.to_number();
}
}
sched_cfg.prach_config = cfg.sibs[1].sib2().rr_cfg_common.prach_cfg.prach_cfg_info.prach_cfg_idx;
sched_cfg.prach_nof_preambles =
cfg.sibs[1].sib2().rr_cfg_common.rach_cfg_common.preamb_info.nof_ra_preambs.to_number();
sched_cfg.si_window_ms = cfg.sib1.si_win_len.to_number();
sched_cfg.prach_rar_window =
cfg.sibs[1].sib2().rr_cfg_common.rach_cfg_common.ra_supervision_info.ra_resp_win_size.to_number();
sched_cfg.prach_freq_offset = cfg.sibs[1].sib2().rr_cfg_common.prach_cfg.prach_cfg_info.prach_freq_offset;
sched_cfg.maxharq_msg3tx = cfg.sibs[1].sib2().rr_cfg_common.rach_cfg_common.max_harq_msg3_tx;
std::vector<sched_cell_t> sched_cfg;
sched_cfg.resize(cfg.cell_list.size());
sched_cfg.nrb_pucch = SRSLTE_MAX(cfg.sr_cfg.nof_prb, cfg.cqi_cfg.nof_prb);
rrc_log->info("Allocating %d PRBs for PUCCH\n", sched_cfg.nrb_pucch);
for (uint32_t ccidx = 0; ccidx < cfg.cell_list.size(); ++ccidx) {
sched_interface::cell_cfg_t& item = sched_cfg[ccidx];
// Copy Cell configuration
memcpy(&sched_cfg.cell, &cfg.cell, sizeof(srslte_cell_t));
// set sib/prach cfg
for (uint32_t i = 0; i < nof_si_messages; i++) {
item.sibs[i].len = sib_buffer[i]->N_bytes;
if (i == 0) {
item.sibs[i].period_rf = 8; // SIB1 is always 8 rf
} else {
item.sibs[i].period_rf = cfg.sib1.sched_info_list[i - 1].si_periodicity.to_number();
}
}
item.prach_config = cfg.sibs[1].sib2().rr_cfg_common.prach_cfg.prach_cfg_info.prach_cfg_idx;
item.prach_nof_preambles = cfg.sibs[1].sib2().rr_cfg_common.rach_cfg_common.preamb_info.nof_ra_preambs.to_number();
item.si_window_ms = cfg.sib1.si_win_len.to_number();
item.prach_rar_window =
cfg.sibs[1].sib2().rr_cfg_common.rach_cfg_common.ra_supervision_info.ra_resp_win_size.to_number();
item.prach_freq_offset = cfg.sibs[1].sib2().rr_cfg_common.prach_cfg.prach_cfg_info.prach_freq_offset;
item.maxharq_msg3tx = cfg.sibs[1].sib2().rr_cfg_common.rach_cfg_common.max_harq_msg3_tx;
item.nrb_pucch = SRSLTE_MAX(cfg.sr_cfg.nof_prb, cfg.cqi_cfg.nof_prb);
rrc_log->info("Allocating %d PRBs for PUCCH\n", item.nrb_pucch);
// Copy Cell configuration
item.cell = cfg.cell;
// copy secondary cell list info
sched_cfg[ccidx].scell_list.resize(cfg.cell_list[ccidx].scell_list.size());
for (uint32_t scidx = 0; scidx < cfg.cell_list[ccidx].scell_list.size(); ++scidx) {
const auto& scellitem = cfg.cell_list[ccidx].scell_list[scidx];
// search enb_cc_idx specific to cell_id
auto it = std::find_if(cfg.cell_list.begin(), cfg.cell_list.end(), [&scellitem](const cell_cfg_t& e) {
return e.cell_id == scellitem.cell_id;
});
if (it == cfg.cell_list.end()) {
rrc_log->warning("Secondary cell 0x%x not configured\n", scellitem.cell_id);
}
uint32_t scell_enb_idx = it - cfg.cell_list.begin();
sched_cfg[ccidx].scell_list[scidx].enb_cc_idx = scell_enb_idx;
sched_cfg[ccidx].scell_list[scidx].ul_allowed = scellitem.ul_allowed;
sched_cfg[ccidx].scell_list[scidx].cross_carrier_scheduling = scellitem.cross_carrier_sched;
}
}
// Configure MAC scheduler
mac->cell_cfg(&sched_cfg);
mac->cell_cfg(sched_cfg);
}
uint32_t rrc::generate_sibs()

@ -29,7 +29,7 @@ namespace srsenb {
class mac_dummy : public mac_interface_rrc
{
public:
int cell_cfg(sched_interface::cell_cfg_t* cell_cfg) override { return 0; }
int cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg) override { return 0; }
void reset() override {}
int ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg) override { return 0; }
int ue_rem(uint16_t rnti) override { return 0; }

@ -90,20 +90,19 @@ int main(int argc, char* argv[])
cell_cfg_phy.phich_length = SRSLTE_PHICH_NORM;
cell_cfg_phy.phich_resources = SRSLTE_PHICH_R_1;
srsenb::sched_interface::cell_cfg_t cell_cfg;
std::vector<srsenb::sched_interface::cell_cfg_t> cell_cfg(1);
/* Set MAC cell configuration */
bzero(&cell_cfg, sizeof(srsenb::sched_interface::cell_cfg_t));
memcpy(&cell_cfg.cell, &cell_cfg_phy, sizeof(srslte_cell_t));
cell_cfg.sibs[0].len = 18;
cell_cfg.sibs[0].period_rf = 8;
cell_cfg.sibs[1].len = 41;
cell_cfg.sibs[1].period_rf = 16;
cell_cfg.si_window_ms = 40;
cell_cfg[0] = {};
cell_cfg[0].cell = cell_cfg_phy;
cell_cfg[0].sibs[0].len = 18;
cell_cfg[0].sibs[0].period_rf = 8;
cell_cfg[0].sibs[1].len = 41;
cell_cfg[0].sibs[1].period_rf = 16;
cell_cfg[0].si_window_ms = 40;
my_sched.init(nullptr, &log_out);
my_sched.set_metric(&dl_metric, &ul_metric);
my_sched.cell_cfg(&cell_cfg);
my_sched.cell_cfg(cell_cfg);
srsenb::sched_interface::dl_sched_res_t sched_result_dl;
srsenb::sched_interface::ul_sched_res_t sched_result_ul;

@ -26,11 +26,13 @@
using namespace srsenb;
#define CARRIER_IDX 0
int output_sched_tester::test_pusch_collisions(const tti_params_t& tti_params,
const sched_interface::ul_sched_res_t& ul_result,
prbmask_t& ul_allocs) const
{
uint32_t nof_prb = params.cfg->cell.nof_prb;
uint32_t nof_prb = params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb;
ul_allocs.resize(nof_prb);
ul_allocs.reset();
@ -52,15 +54,15 @@ int output_sched_tester::test_pusch_collisions(const tti_params_t&
};
/* TEST: Check if there is space for PRACH */
bool is_prach_tti_tx_ul = srslte_prach_tti_opportunity_config_fdd(params.cfg->prach_config, tti_params.tti_tx_ul, -1);
bool is_prach_tti_tx_ul = srslte_prach_tti_opportunity_config_fdd(params.cell_cfg[CARRIER_IDX].cfg->prach_config, tti_params.tti_tx_ul, -1);
if (is_prach_tti_tx_ul) {
try_ul_fill({params.cfg->prach_freq_offset, 6}, "PRACH");
try_ul_fill({params.cell_cfg[CARRIER_IDX].cfg->prach_freq_offset, 6}, "PRACH");
}
/* TEST: check collisions in PUCCH */
bool strict = nof_prb != 6 or (not is_prach_tti_tx_ul); // and not tti_data.ul_pending_msg3_present);
try_ul_fill({0, (uint32_t)params.cfg->nrb_pucch}, "PUCCH", strict);
try_ul_fill({params.cfg->cell.nof_prb - params.cfg->nrb_pucch, (uint32_t)params.cfg->nrb_pucch}, "PUCCH", strict);
try_ul_fill({0, (uint32_t)params.cell_cfg[CARRIER_IDX].cfg->nrb_pucch}, "PUCCH", strict);
try_ul_fill({params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb - params.cell_cfg[CARRIER_IDX].cfg->nrb_pucch, (uint32_t)params.cell_cfg[CARRIER_IDX].cfg->nrb_pucch}, "PUCCH", strict);
/* TEST: check collisions in the UL PUSCH */
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
@ -78,10 +80,10 @@ int output_sched_tester::test_pdsch_collisions(const tti_params_t&
const sched_interface::dl_sched_res_t& dl_result,
rbgmask_t& rbgmask) const
{
srslte::bounded_bitset<100, true> dl_allocs(params.cfg->cell.nof_prb), alloc_mask(params.cfg->cell.nof_prb);
srslte::bounded_bitset<100, true> dl_allocs(params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb), alloc_mask(params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb);
auto try_dl_mask_fill = [&](const srslte_dci_dl_t& dci, const char* channel) {
if (extract_dl_prbmask(params.cfg->cell, dci, &alloc_mask) != SRSLTE_SUCCESS) {
if (extract_dl_prbmask(params.cell_cfg[CARRIER_IDX].cfg->cell, dci, &alloc_mask) != SRSLTE_SUCCESS) {
return SRSLTE_ERROR;
}
if ((dl_allocs & alloc_mask).any()) {
@ -105,9 +107,9 @@ int output_sched_tester::test_pdsch_collisions(const tti_params_t&
}
// forbid Data in DL if it conflicts with PRACH for PRB==6
if (params.cfg->cell.nof_prb == 6) {
if (params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb == 6) {
uint32_t tti_rx_ack = TTI_RX_ACK(tti_params.tti_rx);
if (srslte_prach_tti_opportunity_config_fdd(params.cfg->prach_config, tti_rx_ack, -1)) {
if (srslte_prach_tti_opportunity_config_fdd(params.cell_cfg[CARRIER_IDX].cfg->prach_config, tti_rx_ack, -1)) {
dl_allocs.fill(0, dl_allocs.size());
}
}
@ -118,13 +120,13 @@ int output_sched_tester::test_pdsch_collisions(const tti_params_t&
}
// TEST: check for holes in the PRB mask (RBGs not fully filled)
rbgmask.resize(params.nof_rbgs);
rbgmask.resize(params.cell_cfg[CARRIER_IDX].nof_rbgs);
rbgmask.reset();
srslte::bounded_bitset<100, true> rev_alloc = ~dl_allocs;
for (uint32_t i = 0; i < params.nof_rbgs; ++i) {
uint32_t lim = SRSLTE_MIN((i + 1) * params.P, dl_allocs.size());
bool val = dl_allocs.any(i * params.P, lim);
CONDERROR(rev_alloc.any(i * params.P, lim) and val, "[TESTER] No holes can be left in an RBG\n");
for (uint32_t i = 0; i < params.cell_cfg[CARRIER_IDX].nof_rbgs; ++i) {
uint32_t lim = SRSLTE_MIN((i + 1) * params.cell_cfg[CARRIER_IDX].P, dl_allocs.size());
bool val = dl_allocs.any(i * params.cell_cfg[CARRIER_IDX].P, lim);
CONDERROR(rev_alloc.any(i * params.cell_cfg[CARRIER_IDX].P, lim) and val, "[TESTER] No holes can be left in an RBG\n");
if (val) {
rbgmask.set(i);
}
@ -156,18 +158,18 @@ int output_sched_tester::test_sib_scheduling(const tti_params_t&
continue;
}
CONDERROR(bc->index >= sched_interface::MAX_SIBS, "Invalid SIB idx=%d\n", bc->index + 1);
CONDERROR(bc->tbs < params.cfg->sibs[bc->index].len,
CONDERROR(bc->tbs < params.cell_cfg[CARRIER_IDX].cfg->sibs[bc->index].len,
"Allocated BC process with TBS=%d < sib_len=%d\n",
bc->tbs,
params.cfg->sibs[bc->index].len);
uint32_t x = (bc->index - 1) * params.cfg->si_window_ms;
params.cell_cfg[CARRIER_IDX].cfg->sibs[bc->index].len);
uint32_t x = (bc->index - 1) * params.cell_cfg[CARRIER_IDX].cfg->si_window_ms;
uint32_t sf = x % 10;
uint32_t sfn_start = sfn;
while ((sfn_start % params.cfg->sibs[bc->index].period_rf) != x / 10) {
while ((sfn_start % params.cell_cfg[CARRIER_IDX].cfg->sibs[bc->index].period_rf) != x / 10) {
sfn_start--;
}
uint32_t win_start = sfn_start * 10 + sf;
uint32_t win_end = win_start + params.cfg->si_window_ms;
uint32_t win_end = win_start + params.cell_cfg[CARRIER_IDX].cfg->si_window_ms;
CONDERROR(tti_params.tti_tx_dl < win_start or tti_params.tti_tx_dl > win_end,
"Scheduled SIB is outside of its SIB window\n");
}
@ -235,10 +237,10 @@ int output_sched_tester::test_dci_values_consistency(const sched_interface::dl_s
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
auto& bc = dl_result.bc[i];
if (bc.type == sched_interface::dl_sched_bc_t::BCCH) {
CONDERROR(bc.tbs < params.cfg->sibs[bc.index].len,
CONDERROR(bc.tbs < params.cell_cfg[CARRIER_IDX].cfg->sibs[bc.index].len,
"Allocated BC process with TBS=%d < sib_len=%d\n",
bc.tbs,
params.cfg->sibs[bc.index].len);
params.cell_cfg[CARRIER_IDX].cfg->sibs[bc.index].len);
} else if (bc.type == sched_interface::dl_sched_bc_t::PCCH) {
CONDERROR(bc.tbs == 0, "Allocated paging process with invalid TBS=%d\n", bc.tbs);
} else {

@ -214,7 +214,7 @@ struct sched_tester : public srsenb::sched {
sched_tti_data tti_data;
srsenb::tti_params_t tti_params{10241};
int cell_cfg(sched_interface::cell_cfg_t* cell_cfg) final;
int cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg) final;
int add_user(uint16_t rnti,
srsenb::sched_interface::ue_bearer_cfg_t bearer_cfg,
srsenb::sched_interface::ue_cfg_t ue_cfg_);
@ -237,7 +237,7 @@ private:
std::unique_ptr<srsenb::output_sched_tester> output_tester;
};
int sched_tester::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
int sched_tester::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
{
sched::cell_cfg(cell_cfg);
output_tester.reset(new srsenb::output_sched_tester{sched_params});
@ -303,7 +303,7 @@ int sched_tester::process_tti_args()
{
// may add a new user
if (sim_args.tti_events[tti_data.tti_rx].new_user) {
CONDERROR(!srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_data.tti_rx, -1),
CONDERROR(!srslte_prach_tti_opportunity_config_fdd(cfg[CARRIER_IDX].prach_config, tti_data.tti_rx, -1),
"[TESTER] New user added in a non-PRACH TTI\n");
uint16_t rnti = sim_args.tti_events[tti_data.tti_rx].new_rnti;
add_user(rnti, sim_args.bearer_cfg, sim_args.ue_cfg);
@ -448,7 +448,7 @@ int sched_tester::test_ra()
// continue;
// }
uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg.prach_rar_window};
uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg[CARRIER_IDX].prach_rar_window};
if (prach_tti >= userinfo.rar_tti) { // RAR not yet sent
CONDERROR(tti_data.tti_tx_dl > window[1], "[TESTER] There was no RAR scheduled within the RAR Window\n");
if (tti_data.tti_tx_dl >= window[0]) {
@ -576,7 +576,7 @@ int sched_tester::test_tti_result()
carrier_schedulers[0]->get_sf_sched_ptr(tti_sched->get_tti_rx() + MSG3_DELAY_MS)->get_pending_msg3();
const auto& p = msg3_list.front();
CONDERROR(msg3_list.empty(), "Pending Msg3 should have been set\n");
uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg.cell.nof_prb);
uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg[CARRIER_IDX].cell.nof_prb);
CONDERROR(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n");
}
}
@ -758,7 +758,7 @@ int sched_tester::test_sibs()
int sched_tester::test_collisions()
{
const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx);
srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb);
srsenb::prbmask_t ul_allocs(cfg[CARRIER_IDX].cell.nof_prb);
/* TEST: any collision in PUCCH and PUSCH */
TESTASSERT(output_tester->test_pusch_collisions(tti_params, tti_data.sched_result_ul, ul_allocs) == SRSLTE_SUCCESS);
@ -776,8 +776,11 @@ int sched_tester::test_collisions()
CONDERROR(passed, "[TESTER] There can only be one msg3 allocation per UE\n");
CONDERROR(tti_data.sched_result_ul.pusch[i].needs_pdcch, "[TESTER] Msg3 allocations do not need PDCCH DCI\n");
uint32_t L, RBstart;
srslte_ra_type2_from_riv(
tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv, &L, &RBstart, cfg.cell.nof_prb, cfg.cell.nof_prb);
srslte_ra_type2_from_riv(tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv,
&L,
&RBstart,
cfg[CARRIER_IDX].cell.nof_prb,
cfg[CARRIER_IDX].cell.nof_prb);
if (RBstart != tti_data.ul_pending_msg3.n_prb or L != tti_data.ul_pending_msg3.L) {
TESTERROR("[TESTER] The Msg3 allocation does not coincide with the expected.\n");
}
@ -790,19 +793,22 @@ int sched_tester::test_collisions()
// update ue stats with number of allocated UL PRBs
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
uint32_t L, RBstart;
srslte_ra_type2_from_riv(
tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv, &L, &RBstart, cfg.cell.nof_prb, cfg.cell.nof_prb);
srslte_ra_type2_from_riv(tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv,
&L,
&RBstart,
cfg[CARRIER_IDX].cell.nof_prb,
cfg[CARRIER_IDX].cell.nof_prb);
ue_stats[tti_data.sched_result_ul.pusch[i].dci.rnti].nof_ul_rbs += L;
}
/* TEST: check any collision in PDSCH */
srsenb::rbgmask_t rbgmask(cfg.cell.nof_prb);
srsenb::rbgmask_t rbgmask(cfg[CARRIER_IDX].cell.nof_prb);
TESTASSERT(output_tester->test_pdsch_collisions(tti_params, tti_data.sched_result_dl, rbgmask) == SRSLTE_SUCCESS);
// update ue stats with number of DL RB allocations
srslte::bounded_bitset<100, true> alloc_mask(cfg.cell.nof_prb);
srslte::bounded_bitset<100, true> alloc_mask(cfg[CARRIER_IDX].cell.nof_prb);
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) {
TESTASSERT(srsenb::extract_dl_prbmask(cfg.cell, tti_data.sched_result_dl.data[i].dci, &alloc_mask) ==
TESTASSERT(srsenb::extract_dl_prbmask(cfg[CARRIER_IDX].cell, tti_data.sched_result_dl.data[i].dci, &alloc_mask) ==
SRSLTE_SUCCESS);
ue_stats[tti_data.sched_result_dl.data[i].dci.rnti].nof_dl_rbs += alloc_mask.count();
}
@ -927,13 +933,11 @@ srsenb::sched_interface::cell_cfg_t generate_cell_cfg()
return cell_cfg;
}
void test_scheduler_rand(srsenb::sched_interface::cell_cfg_t cell_cfg, const sched_sim_args& args)
void test_scheduler_rand(std::vector<srsenb::sched_interface::cell_cfg_t> cell_cfg, const sched_sim_args& args)
{
// Create classes
sched_tester tester;
srsenb::sched my_sched;
srsenb::dl_metric_rr dl_metric;
srsenb::ul_metric_rr ul_metric;
sched_tester tester;
srsenb::sched my_sched;
log_global->set_level(srslte::LOG_LEVEL_INFO);
@ -943,8 +947,7 @@ void test_scheduler_rand(srsenb::sched_interface::cell_cfg_t cell_cfg, const sch
// srsenb::sched_interface::ul_sched_res_t& sched_result_ul = tester.tti_data.sched_result_ul;
tester.init(nullptr, log_global.get());
tester.set_metric(&dl_metric, &ul_metric);
tester.cell_cfg(&cell_cfg);
tester.cell_cfg(cell_cfg);
bool running = true;
uint32_t tti = 0;
@ -1035,8 +1038,8 @@ int main()
for (uint32_t n = 0; n < N_runs; ++n) {
printf("Sim run number: %u\n", n + 1);
srsenb::sched_interface::cell_cfg_t cell_cfg = generate_cell_cfg();
sched_sim_args sim_args = rand_sim_params(cell_cfg, nof_ttis);
std::vector<srsenb::sched_interface::cell_cfg_t> cell_cfg = {generate_cell_cfg()};
sched_sim_args sim_args = rand_sim_params(cell_cfg[0], nof_ttis);
test_scheduler_rand(cell_cfg, sim_args);
}

Loading…
Cancel
Save