diff --git a/lib/include/srslte/common/bounded_bitset.h b/lib/include/srslte/common/bounded_bitset.h index faeec15a5..939ba6efa 100644 --- a/lib/include/srslte/common/bounded_bitset.h +++ b/lib/include/srslte/common/bounded_bitset.h @@ -22,12 +22,11 @@ #ifndef SRSLTE_DYN_BITSET_H #define SRSLTE_DYN_BITSET_H +#include "srslte/common/logmap.h" #include #include #include -#define CEILFRAC(x, y) (((x) > 0) ? ((((x)-1) / (y)) + 1) : 0) - namespace srslte { constexpr uint32_t ceil_div(uint32_t x, uint32_t y) @@ -53,7 +52,7 @@ public: void resize(size_t new_size) noexcept { if (new_size > max_size()) { - printf("ERROR: bitset resize out of bounds: %zd>=%zd\n", max_size(), new_size); + srslte::logmap::get("COMM")->error("ERROR: bitset resize out of bounds: %zd>=%zd\n", max_size(), new_size); return; } else if (new_size == cur_size) { return; @@ -68,7 +67,7 @@ public: void set(size_t pos) noexcept { if (pos >= size()) { - printf("ERROR: bitset out of bounds: %zd>=%zd\n", pos, size()); + srslte::logmap::get("COMM")->error("ERROR: bitset out of bounds: %zd>=%zd\n", pos, size()); return; } set_(pos); diff --git a/lib/include/srslte/common/logmap.h b/lib/include/srslte/common/logmap.h index 0d3a87206..06a861789 100644 --- a/lib/include/srslte/common/logmap.h +++ b/lib/include/srslte/common/logmap.h @@ -19,7 +19,8 @@ * */ -#include "logger.h" +#include "log_filter.h" +#include "logger_stdout.h" #include "singleton.h" #include "srslte/common/log_filter.h" #include "srslte/common/logger_stdout.h" diff --git a/lib/include/srslte/interfaces/sched_interface.h b/lib/include/srslte/interfaces/sched_interface.h index 96de66b10..907fa14d8 100644 --- a/lib/include/srslte/interfaces/sched_interface.h +++ b/lib/include/srslte/interfaces/sched_interface.h @@ -51,14 +51,14 @@ public: uint32_t period_rf; } cell_cfg_sib_t; - typedef struct { - int pdsch_mcs; - int pdsch_max_mcs; - int pusch_mcs; - int pusch_max_mcs; - int nof_ctrl_symbols; - int max_aggr_level; - } sched_args_t; + struct sched_args_t { + int pdsch_mcs = -1; + int pdsch_max_mcs = 28; + int pusch_mcs = -1; + int pusch_max_mcs = 28; + int nof_ctrl_symbols = 3; + int max_aggr_level = 3; + }; struct cell_cfg_t { @@ -216,9 +216,9 @@ public: virtual int reset() = 0; /* Manages UE scheduling context */ - virtual int ue_cfg(uint16_t rnti, ue_cfg_t* cfg) = 0; - virtual int ue_rem(uint16_t rnti) = 0; - virtual bool ue_exists(uint16_t rnti) = 0; + virtual int ue_cfg(uint16_t rnti, uint32_t enb_cc_idx, ue_cfg_t* cfg) = 0; + virtual int ue_rem(uint16_t rnti) = 0; + virtual bool ue_exists(uint16_t rnti) = 0; /* Manages UE bearers and associated configuration */ virtual int bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, ue_bearer_cfg_t* cfg) = 0; diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index b8c4c9b87..d12a68947 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -51,28 +51,34 @@ inline bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) } // namespace sched_utils //! structs to bundle together all the sched arguments, and share them with all the sched sub-components -struct sched_cell_params_t { - sched_interface::cell_cfg_t* cfg = nullptr; - uint32_t P = 0; - uint32_t nof_rbgs = 0; +class sched_cell_params_t +{ + struct regs_deleter { + void operator()(srslte_regs_t* p) + { + if (p != nullptr) { + srslte_regs_free(p); + } + } + }; +public: + bool set_cfg(uint32_t enb_cc_idx_, + const sched_interface::cell_cfg_t& cfg_, + const sched_interface::sched_args_t& sched_args); // convenience getters uint32_t prb_to_rbg(uint32_t nof_prbs) const { return (nof_prbs + (P - 1)) / P; } - uint32_t nof_prb() const { return cfg->cell.nof_prb; } -}; -class sched_params_t -{ -public: - srslte::log* log_h = nullptr; - std::vector cell_cfg; - sched_interface::sched_args_t sched_cfg = {}; - srslte_regs_t* regs = nullptr; - std::array common_locations = {}; - std::array, 3> rar_locations = {}; - std::array nof_cce_table = {}; ///< map cfix -> nof cces in PDCCH - - sched_params_t(); - bool set_cfg(srslte::log* log_, std::vector* cfg_, srslte_regs_t* regs_); + uint32_t nof_prb() const { return cfg.cell.nof_prb; } + + uint32_t enb_cc_idx = 0; + sched_interface::cell_cfg_t cfg = {}; + const sched_interface::sched_args_t* sched_cfg = nullptr; + std::unique_ptr regs; + std::array common_locations = {}; + std::array, 3> rar_locations = {}; + std::array nof_cce_table = {}; ///< map cfix -> nof cces in PDCCH + uint32_t P = 0; + uint32_t nof_rbgs = 0; }; /* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe @@ -95,7 +101,7 @@ public: { public: /* Virtual methods for user metric calculation */ - virtual void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) = 0; + virtual void set_params(const sched_cell_params_t& cell_params_) = 0; virtual void sched_users(std::map& ue_db, dl_sf_sched_itf* tti_sched) = 0; }; @@ -103,7 +109,7 @@ public: { public: /* Virtual methods for user metric calculation */ - virtual void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) = 0; + virtual void set_params(const sched_cell_params_t& cell_params_) = 0; virtual void sched_users(std::map& ue_db, ul_sf_sched_itf* tti_sched) = 0; }; @@ -116,12 +122,12 @@ public: sched(); ~sched(); - void init(rrc_interface_mac* rrc, srslte::log* log); + void init(rrc_interface_mac* rrc); int cell_cfg(const std::vector& cell_cfg) override; void set_sched_cfg(sched_args_t* sched_cfg); int reset() final; - int ue_cfg(uint16_t rnti, ue_cfg_t* ue_cfg) final; + int ue_cfg(uint16_t rnti, uint32_t enb_cc_idx, ue_cfg_t* ue_cfg) final; int ue_rem(uint16_t rnti) final; bool ue_exists(uint16_t rnti) final; void ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd); @@ -166,27 +172,23 @@ public: const static int rv_idx[4] = {0, 2, 3, 1}; return rv_idx[retx_idx % 4]; } - static void generate_cce_location(srslte_regs_t* regs, - sched_ue::sched_dci_cce_t* location, - uint32_t cfi, - uint32_t sf_idx = 0, - uint16_t rnti = 0); + static void generate_cce_location(srslte_regs_t* regs, + sched_dci_cce_t* location, + uint32_t cfi, + uint32_t sf_idx = 0, + uint16_t rnti = 0); static uint32_t aggr_level(uint32_t aggr_idx) { return 1u << aggr_idx; } class carrier_sched; protected: - srslte::log* log_h; - rrc_interface_mac* rrc; - sched_params_t sched_params; + srslte::log* log_h = nullptr; + rrc_interface_mac* rrc = nullptr; + sched_args_t sched_cfg = {}; + std::vector sched_cell_params; pthread_rwlock_t rwlock; - std::vector cfg; - - // This is for computing DCI locations - srslte_regs_t regs; - // Helper methods template int ue_db_access(uint16_t rnti, Func); @@ -196,10 +198,10 @@ protected: // independent schedulers for each carrier std::vector > carrier_schedulers; - std::array pdsch_re; - uint32_t current_tti; + std::array pdsch_re = {}; + uint32_t current_tti = 0; - bool configured; + bool configured = false; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/scheduler_carrier.h b/srsenb/hdr/stack/mac/scheduler_carrier.h index b87cc3298..8ffd7df2c 100644 --- a/srsenb/hdr/stack/mac/scheduler_carrier.h +++ b/srsenb/hdr/stack/mac/scheduler_carrier.h @@ -34,7 +34,7 @@ class sched::carrier_sched public: explicit carrier_sched(rrc_interface_mac* rrc_, std::map* ue_db_, uint32_t enb_cc_idx_); void reset(); - void carrier_cfg(const sched_params_t& sched_params_); + void carrier_cfg(const sched_cell_params_t& sched_params_); void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs); sf_sched* generate_tti_result(uint32_t tti_rx); int dl_rach_info(dl_sched_rar_info_t rar_info); @@ -51,11 +51,10 @@ private: int alloc_ul_users(sf_sched* tti_sched); // args - const sched_params_t* sched_params = nullptr; - const cell_cfg_t* cc_cfg = nullptr; - srslte::log* log_h = nullptr; - rrc_interface_mac* rrc = nullptr; - std::map* ue_db = nullptr; + const sched_cell_params_t* cc_cfg = nullptr; + srslte::log* log_h = nullptr; + rrc_interface_mac* rrc = nullptr; + std::map* ue_db = nullptr; std::unique_ptr dl_metric; std::unique_ptr ul_metric; const uint32_t enb_cc_idx; @@ -80,7 +79,7 @@ private: class bc_sched { public: - explicit bc_sched(const sched::cell_cfg_t& cfg_, rrc_interface_mac* rrc_); + explicit bc_sched(const sched_cell_params_t& cfg_, rrc_interface_mac* rrc_); void dl_sched(sf_sched* tti_sched); void reset(); @@ -96,8 +95,8 @@ private: void alloc_paging(sf_sched* tti_sched); // args - const sched::cell_cfg_t* cfg; - rrc_interface_mac* rrc = nullptr; + const sched_cell_params_t* cc_cfg = nullptr; + rrc_interface_mac* rrc = nullptr; std::array pending_sibs; @@ -115,7 +114,7 @@ public: using dl_sched_rar_t = sched_interface::dl_sched_rar_t; using dl_sched_rar_grant_t = sched_interface::dl_sched_rar_grant_t; - explicit ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map& ue_db_); + explicit ra_sched(const sched_cell_params_t& cfg_, srslte::log* log_, std::map& ue_db_); void dl_sched(sf_sched* tti_sched); void ul_sched(sf_sched* tti_sched); int dl_rach_info(dl_sched_rar_info_t rar_info); @@ -124,9 +123,9 @@ public: private: // args - srslte::log* log_h = nullptr; - const sched::cell_cfg_t* cfg = nullptr; - std::map* ue_db = nullptr; + srslte::log* log_h = nullptr; + const sched_cell_params_t* cc_cfg = nullptr; + std::map* ue_db = nullptr; std::deque pending_rars; uint32_t rar_aggr_level = 2; diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index 3b3a81ef9..1973a8cfc 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -67,7 +67,7 @@ public: }; using alloc_result_t = std::vector; - void init(const sched_params_t& sched_params); + void init(const sched_cell_params_t& cell_params_); void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi); bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr); bool set_cfi(uint32_t cfi); @@ -84,17 +84,17 @@ private: const static uint32_t nof_cfis = 3; using tree_node_t = std::pair; ///< First represents the parent node idx, and second the alloc tree node - void reset(); - const sched_ue::sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const; - void update_alloc_tree(int node_idx, - uint32_t aggr_idx, - sched_ue* user, - alloc_type_t alloc_type, - const sched_ue::sched_dci_cce_t* dci_locs); + void reset(); + const sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const; + void update_alloc_tree(int node_idx, + uint32_t aggr_idx, + sched_ue* user, + alloc_type_t alloc_type, + const sched_dci_cce_t* dci_locs); // consts - const sched_params_t* sched_params = nullptr; - srslte::log* log_h = nullptr; + const sched_cell_params_t* cc_cfg = nullptr; + srslte::log* log_h = nullptr; // tti vars const tti_params_t* tti_params = nullptr; @@ -113,7 +113,7 @@ public: rbg_range_t rbg_range; }; - void init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_); + void init(const sched_cell_params_t& cell_params_, uint32_t enb_cc_idx_); void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi); dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask); @@ -131,12 +131,10 @@ private: alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = nullptr); // consts - const sched_params_t* sched_params = nullptr; - const sched_interface::cell_cfg_t* cell_cfg = nullptr; - srslte::log* log_h = nullptr; - uint32_t nof_rbgs = 0; - uint32_t si_n_rbg = 0, rar_n_rbg = 0; - uint32_t enb_cc_idx = 0; + const sched_cell_params_t* cc_cfg = nullptr; + srslte::log* log_h = nullptr; + uint32_t nof_rbgs = 0; + uint32_t si_n_rbg = 0, rar_n_rbg = 0; // tti const const tti_params_t* tti_params = nullptr; @@ -233,7 +231,7 @@ public: sched_interface::dl_sched_res_t dl_sched_result; sched_interface::ul_sched_res_t ul_sched_result; - void init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_); + void init(const sched_cell_params_t& cell_params_); void new_tti(uint32_t tti_rx_, uint32_t start_cfi); // DL alloc @@ -287,10 +285,8 @@ private: void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); // consts - const sched_params_t* sched_params = nullptr; - const sched_cell_params_t* cell_cfg = nullptr; - srslte::log* log_h = nullptr; - uint32_t enb_cc_idx = 0; + const sched_cell_params_t* cc_cfg = nullptr; + srslte::log* log_h = nullptr; // internal state tti_params_t tti_params{10241}; diff --git a/srsenb/hdr/stack/mac/scheduler_metric.h b/srsenb/hdr/stack/mac/scheduler_metric.h index 7feec2cb5..0b2fb2c95 100644 --- a/srsenb/hdr/stack/mac/scheduler_metric.h +++ b/srsenb/hdr/stack/mac/scheduler_metric.h @@ -31,24 +31,22 @@ class dl_metric_rr : public sched::metric_dl const static int MAX_RBG = 25; public: - void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx) final; + void set_params(const sched_cell_params_t& cell_params_) final; void sched_users(std::map& ue_db, dl_sf_sched_itf* tti_sched) final; private: bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask); dl_harq_proc* allocate_user(sched_ue* user); - const sched_params_t* sched_params = nullptr; - const sched_cell_params_t* cell_params = nullptr; - srslte::log* log_h = nullptr; - uint32_t enb_cc_idx = 0; - dl_sf_sched_itf* tti_alloc = nullptr; + const sched_cell_params_t* cc_cfg = nullptr; + srslte::log* log_h = nullptr; + dl_sf_sched_itf* tti_alloc = nullptr; }; class ul_metric_rr : public sched::metric_ul { public: - void set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx) final; + void set_params(const sched_cell_params_t& cell_params_) final; void sched_users(std::map& ue_db, ul_sf_sched_itf* tti_sched) final; private: @@ -56,12 +54,10 @@ private: ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user); ul_harq_proc* allocate_user_retx_prbs(sched_ue* user); - const sched_params_t* sched_params = nullptr; - const sched_cell_params_t* cell_params = nullptr; - srslte::log* log_h = nullptr; - ul_sf_sched_itf* tti_alloc = nullptr; - uint32_t current_tti = 0; - uint32_t enb_cc_idx = 0; + const sched_cell_params_t* cc_cfg = nullptr; + srslte::log* log_h = nullptr; + ul_sf_sched_itf* tti_alloc = nullptr; + uint32_t current_tti = 0; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/scheduler_ue.h b/srsenb/hdr/stack/mac/scheduler_ue.h index b843d9d87..a0b1a8bce 100644 --- a/srsenb/hdr/stack/mac/scheduler_ue.h +++ b/srsenb/hdr/stack/mac/scheduler_ue.h @@ -33,18 +33,18 @@ namespace srsenb { -class sched_params_t; -struct sched_cell_params_t; +class sched_cell_params_t; struct tti_params_t; +struct sched_dci_cce_t { + uint32_t cce_start[4][6]; + uint32_t nof_loc[4]; +}; + struct sched_ue_carrier { const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ; - sched_ue_carrier(sched_interface::ue_cfg_t* cfg_, - const sched_cell_params_t* cell_cfg_, - uint16_t rnti_, - uint32_t cc_idx_, - srslte::log* log_); + sched_ue_carrier(sched_interface::ue_cfg_t* cfg_, const sched_cell_params_t* cell_cfg_, uint16_t rnti_); void reset(); // Harq access @@ -78,11 +78,13 @@ struct sched_ue_carrier { uint32_t max_aggr_level = 3; int fixed_mcs_ul = 0, fixed_mcs_dl = 0; + // Allowed DCI locations per per CFI and per subframe + std::array, 3> dci_locations = {}; + private: srslte::log* log_h = nullptr; sched_interface::ue_cfg_t* cfg = nullptr; const sched_cell_params_t* cell_params = nullptr; - uint32_t cc_idx; uint16_t rnti; }; @@ -93,16 +95,7 @@ private: */ class sched_ue { - public: - // used by sched_metric to store the pdsch/pusch allocations - bool has_pucch = false; - - typedef struct { - uint32_t cce_start[4][6]; - uint32_t nof_loc[4]; - } sched_dci_cce_t; - /************************************************************* * * FAPI-like Interface @@ -111,7 +104,10 @@ public: sched_ue(); void reset(); void phy_config_enabled(uint32_t tti, bool enabled); - void set_cfg(uint16_t rnti, const sched_params_t& sched_params_, sched_interface::ue_cfg_t* cfg); + void set_cfg(uint16_t rnti, + const std::vector& cell_list_params_, + sched_interface::ue_cfg_t* cfg, + uint32_t primary_cc_idx_); void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg); void rem_bearer(uint32_t lc_id); @@ -203,7 +199,7 @@ public: int explicit_mcs = -1); srslte_dci_format_t get_dci_format(); - sched_dci_cce_t* get_locations(uint32_t current_cfi, uint32_t sf_idx); + sched_dci_cce_t* get_locations(uint32_t enb_cc_idx, uint32_t current_cfi, uint32_t sf_idx); sched_ue_carrier* get_ue_carrier(uint32_t enb_cc_idx); bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false); @@ -254,10 +250,11 @@ private: bool is_first_dl_tx(); /* Args */ - sched_interface::ue_cfg_t cfg = {}; - srslte_cell_t cell = {}; - srslte::log* log_h = nullptr; - const sched_params_t* sched_params = nullptr; + sched_interface::ue_cfg_t cfg = {}; + srslte_cell_t cell = {}; + srslte::log* log_h = nullptr; + const std::vector* cell_params_list = nullptr; + const sched_cell_params_t* main_cc_params = nullptr; std::mutex mutex; @@ -280,14 +277,11 @@ private: int next_tpc_pusch = 0; int next_tpc_pucch = 0; - // Allowed DCI locations per CFI and per subframe - std::array, 3> dci_locations = {}; - bool phy_config_dedicated_enabled = false; asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info; - std::vector carriers; ///< map of UE CellIndex to carrier configuration - std::map enb_ue_cellindex_map; + std::vector carriers; ///< map of UE CellIndex to carrier configuration + std::map enb_ue_cellindex_map; ///< map cc idx eNB -> UE }; } // namespace srsenb diff --git a/srsenb/src/stack/mac/mac.cc b/srsenb/src/stack/mac/mac.cc index 76949c7a2..fa705449b 100644 --- a/srsenb/src/stack/mac/mac.cc +++ b/srsenb/src/stack/mac/mac.cc @@ -75,7 +75,7 @@ bool mac::init(const mac_args_t& args_, args = args_; cell = *cell_; - scheduler.init(rrc, log_h); + scheduler.init(rrc); // Set default scheduler configuration scheduler.set_sched_cfg(&args.sched); @@ -214,7 +214,7 @@ int mac::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg) } // Update Scheduler configuration - if ((cfg != nullptr) ? scheduler.ue_cfg(rnti, cfg) : false) { + if ((cfg != nullptr) ? scheduler.ue_cfg(rnti, 0, cfg) : false) { // TODO: provide enb_cc_idx Error("Registering new UE rnti=0x%x to SCHED\n", rnti); } else { ret = 0; @@ -486,7 +486,7 @@ int mac::rach_detected(uint32_t tti, uint32_t enb_cc_idx, uint32_t preamble_idx, sched_interface::ue_cfg_t uecfg; bzero(&uecfg, sizeof(sched_interface::ue_cfg_t)); uecfg.ue_bearers[0].direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH; - if (scheduler.ue_cfg(rnti, &uecfg)) { + if (scheduler.ue_cfg(rnti, enb_cc_idx, &uecfg)) { Error("Registering new user rnti=0x%x to SCHED\n", rnti); return -1; } diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index c1a0d46f9..ef173284a 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -24,10 +24,12 @@ #include "srsenb/hdr/stack/mac/scheduler.h" #include "srsenb/hdr/stack/mac/scheduler_carrier.h" +#include "srslte/common/logmap.h" #include "srslte/common/pdu.h" #include "srslte/srslte.h" -#define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) +#define Console(fmt, ...) srslte::logmap::get("MAC ")->console(fmt, ##__VA_ARGS__) +#define Error(fmt, ...) srslte::logmap::get("MAC ")->error(fmt, ##__VA_ARGS__) #define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__) #define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__) #define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__) @@ -52,80 +54,70 @@ uint32_t max_tti(uint32_t tti1, uint32_t tti2) * Sched Params *******************************************************/ -sched_params_t::sched_params_t() +bool sched_cell_params_t::set_cfg(uint32_t enb_cc_idx_, + const sched_interface::cell_cfg_t& cfg_, + const sched_interface::sched_args_t& sched_args) { - sched_cfg.pdsch_max_mcs = 28; - sched_cfg.pdsch_mcs = -1; - sched_cfg.pusch_max_mcs = 28; - sched_cfg.pusch_mcs = -1; - sched_cfg.nof_ctrl_symbols = 3; - sched_cfg.max_aggr_level = 3; -} + enb_cc_idx = enb_cc_idx_; + cfg = cfg_; + sched_cfg = &sched_args; -bool sched_params_t::set_cfg(srslte::log* log_, std::vector* cfg_, srslte_regs_t* regs_) -{ - log_h = log_; - regs = regs_; + // Basic cell config checks + if (cfg.si_window_ms == 0) { + Error("SCHED: Invalid si-window length 0 ms\n"); + return false; + } - // copy cell cfgs - cell_cfg.resize(cfg_->size()); - for (uint32_t i = 0; i < cfg_->size(); ++i) { - sched_cell_params_t& item = cell_cfg[i]; - item.cfg = &(*cfg_)[i]; + // PRACH has to fit within the PUSCH space + bool invalid_prach = cfg.cell.nof_prb == 6 and (cfg.prach_freq_offset + 6 > cfg.cell.nof_prb); + invalid_prach |= cfg.cell.nof_prb > 6 and ((cfg.prach_freq_offset + 6) > (cfg.cell.nof_prb - cfg.nrb_pucch) or + (int) cfg.prach_freq_offset < cfg.nrb_pucch); + if (invalid_prach) { + Error("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset); + Console("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset); + return false; + } - // Basic cell config checks - if (item.cfg->si_window_ms == 0) { - Error("SCHED: Invalid si-window length 0 ms\n"); - return false; - } + // Set derived sched parameters - item.P = srslte_ra_type0_P(item.cfg->cell.nof_prb); - item.nof_rbgs = srslte::ceil_div(item.cfg->cell.nof_prb, item.P); - - // PRACH has to fit within the PUSCH space - bool invalid_prach = item.cfg->cell.nof_prb == 6 and (item.cfg->prach_freq_offset + 6 > item.cfg->cell.nof_prb); - invalid_prach |= item.cfg->cell.nof_prb > 6 and - ((item.cfg->prach_freq_offset + 6) > (item.cfg->cell.nof_prb - item.cfg->nrb_pucch) or - (int) item.cfg->prach_freq_offset < item.cfg->nrb_pucch); - if (invalid_prach) { - log_h->error("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", - item.cfg->prach_freq_offset); - log_h->console("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", - item.cfg->prach_freq_offset); - return false; - } + // init regs + regs.reset(new srslte_regs_t{}); + if (srslte_regs_init(regs.get(), cfg.cell) != LIBLTE_SUCCESS) { + Error("Getting DCI locations\n"); + return false; } // Compute Common locations for DCI for each CFI for (uint32_t cfi = 0; cfi < 3; cfi++) { - sched::generate_cce_location(regs, &common_locations[cfi], cfi + 1); + sched::generate_cce_location(regs.get(), &common_locations[cfi], cfi + 1); + } + if (common_locations[sched_cfg->nof_ctrl_symbols - 1].nof_loc[2] == 0) { + Error("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n", + sched_cfg->nof_ctrl_symbols); + Console("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n", + sched_cfg->nof_ctrl_symbols); + return false; } // Compute UE locations for RA-RNTI for (uint32_t cfi = 0; cfi < 3; cfi++) { for (uint32_t sf_idx = 0; sf_idx < 10; sf_idx++) { - sched::generate_cce_location(regs, &rar_locations[cfi][sf_idx], cfi + 1, sf_idx); + sched::generate_cce_location(regs.get(), &rar_locations[cfi][sf_idx], cfi + 1, sf_idx); } } // precompute nof cces in PDCCH for each CFI for (uint32_t cfix = 0; cfix < nof_cce_table.size(); ++cfix) { - int ret = srslte_regs_pdcch_ncce(regs, cfix + 1); + int ret = srslte_regs_pdcch_ncce(regs.get(), cfix + 1); if (ret < 0) { - log_h->error("SCHED: Failed to calculate the number of CCEs in the PDCCH\n"); + Error("SCHED: Failed to calculate the number of CCEs in the PDCCH\n"); return false; } nof_cce_table[cfix] = (uint32_t)ret; } - if (common_locations[sched_cfg.nof_ctrl_symbols - 1].nof_loc[2] == 0) { - log_h->error("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n", - sched_cfg.nof_ctrl_symbols); - log_h->console( - "SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n", - sched_cfg.nof_ctrl_symbols); - return false; - } + P = srslte_ra_type0_P(cfg.cell.nof_prb); + nof_rbgs = srslte::ceil_div(cfg.cell.nof_prb, P); return true; } @@ -137,28 +129,17 @@ bool sched_params_t::set_cfg(srslte::log* log_, std::vector& cell_cfg) { - cfg = cell_cfg; - - // Get DCI locations - if (srslte_regs_init(®s, cfg[0].cell) != LIBLTE_SUCCESS) { - Error("Getting DCI locations\n"); - return SRSLTE_ERROR; - } - - // Setup common sched_params - if (not sched_params.set_cfg(log_h, &cfg, ®s)) { - return -1; + // Setup derived config params + sched_cell_params.resize(cell_cfg.size()); + for (uint32_t cc_idx = 0; cc_idx < cell_cfg.size(); ++cc_idx) { + if (not sched_cell_params[cc_idx].set_cfg(cc_idx, cell_cfg[cc_idx], sched_cfg)) { + return SRSLTE_ERROR; + } } // Create remaining cells, if not created yet uint32_t prev_size = carrier_schedulers.size(); - carrier_schedulers.resize(sched_params.cell_cfg.size()); - for (uint32_t i = prev_size; i < sched_params.cell_cfg.size(); ++i) { + carrier_schedulers.resize(sched_cell_params.size()); + for (uint32_t i = prev_size; i < sched_cell_params.size(); ++i) { carrier_schedulers[i].reset(new carrier_sched{rrc, &ue_db, i}); } - // Setup the ra/bc/tti_scheduler for each TTI - for (std::unique_ptr& c : carrier_schedulers) { - c->carrier_cfg(sched_params); + // setup all carriers cfg params + for (uint32_t i = 0; i < sched_cell_params.size(); ++i) { + carrier_schedulers[i]->carrier_cfg(sched_cell_params[i]); } + configured = true; return 0; @@ -223,11 +200,11 @@ int sched::cell_cfg(const std::vector& cell_cfg) * *******************************************************/ -int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* ue_cfg) +int sched::ue_cfg(uint16_t rnti, uint32_t enb_cc_idx, sched_interface::ue_cfg_t* ue_cfg) { // Add or config user pthread_rwlock_wrlock(&rwlock); - ue_db[rnti].set_cfg(rnti, sched_params, ue_cfg); + ue_db[rnti].set_cfg(rnti, sched_cell_params, ue_cfg, enb_cc_idx); pthread_rwlock_unlock(&rwlock); return 0; @@ -442,13 +419,13 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s * *******************************************************/ -void sched::generate_cce_location(srslte_regs_t* regs_, - sched_ue::sched_dci_cce_t* location, - uint32_t cfi, - uint32_t sf_idx, - uint16_t rnti) +void sched::generate_cce_location(srslte_regs_t* regs_, + sched_dci_cce_t* location, + uint32_t cfi, + uint32_t sf_idx, + uint16_t rnti) { - bzero(location, sizeof(sched_ue::sched_dci_cce_t)); + *location = {}; srslte_dci_location_t loc[64]; uint32_t nloc = 0; diff --git a/srsenb/src/stack/mac/scheduler_carrier.cc b/srsenb/src/stack/mac/scheduler_carrier.cc index 16c2ddf4e..68563e6c5 100644 --- a/srsenb/src/stack/mac/scheduler_carrier.cc +++ b/srsenb/src/stack/mac/scheduler_carrier.cc @@ -21,6 +21,7 @@ #include "srsenb/hdr/stack/mac/scheduler_carrier.h" #include "srsenb/hdr/stack/mac/scheduler_metric.h" +#include "srslte/common/logmap.h" #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) #define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__) @@ -33,7 +34,7 @@ namespace srsenb { * Broadcast (SIB+Paging) scheduling *******************************************************/ -bc_sched::bc_sched(const sched::cell_cfg_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cfg(&cfg_), rrc(rrc_) {} +bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cc_cfg(&cfg_), rrc(rrc_) {} void bc_sched::dl_sched(sf_sched* tti_sched) { @@ -59,7 +60,7 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) for (uint32_t i = 0; i < pending_sibs.size(); ++i) { // There is SIB data - if (cfg->sibs[i].len == 0) { + if (cc_cfg->cfg.sibs[i].len == 0) { continue; } @@ -67,17 +68,17 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) uint32_t sf = 5; uint32_t x = 0; if (i > 0) { - x = (i - 1) * cfg->si_window_ms; + x = (i - 1) * cc_cfg->cfg.si_window_ms; sf = x % 10; } - if ((current_sfn % (cfg->sibs[i].period_rf)) == x / 10 && current_sf_idx == sf) { + if ((current_sfn % (cc_cfg->cfg.sibs[i].period_rf)) == x / 10 && current_sf_idx == sf) { pending_sibs[i].is_in_window = true; pending_sibs[i].window_start = tti_tx_dl; pending_sibs[i].n_tx = 0; } } else { if (i > 0) { - if (srslte_tti_interval(tti_tx_dl, pending_sibs[i].window_start) > cfg->si_window_ms) { + if (srslte_tti_interval(tti_tx_dl, pending_sibs[i].window_start) > cc_cfg->cfg.si_window_ms) { // the si window has passed pending_sibs[i] = {}; } @@ -94,14 +95,14 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) void bc_sched::alloc_sibs(sf_sched* tti_sched) { for (uint32_t i = 0; i < pending_sibs.size(); i++) { - if (cfg->sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { - uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(srslte::ceil_div(cfg->si_window_ms, 10), 4) : 4; + if (cc_cfg->cfg.sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { + uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4; uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start); // Check if there is any SIB to tx bool sib1_flag = (i == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; bool other_sibs_flag = - (i > 0) and (n_sf >= (cfg->si_window_ms / nof_tx) * pending_sibs[i].n_tx) and current_sf_idx == 9; + (i > 0) and (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[i].n_tx) and current_sf_idx == 9; if (not sib1_flag and not other_sibs_flag) { continue; } @@ -135,8 +136,8 @@ void bc_sched::reset() * RAR scheduling *******************************************************/ -ra_sched::ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map& ue_db_) : - cfg(&cfg_), +ra_sched::ra_sched(const sched_cell_params_t& cfg_, srslte::log* log_, std::map& ue_db_) : + cc_cfg(&cfg_), log_h(log_), ue_db(&ue_db_) { @@ -155,15 +156,15 @@ void ra_sched::dl_sched(srsenb::sf_sched* tti_sched) uint32_t prach_tti = rar.prach_tti; // Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit - if (not sched_utils::is_in_tti_interval(tti_tx_dl, prach_tti + 3, prach_tti + 3 + cfg->prach_rar_window)) { - if (tti_tx_dl >= prach_tti + 3 + cfg->prach_rar_window) { + if (not sched_utils::is_in_tti_interval(tti_tx_dl, prach_tti + 3, prach_tti + 3 + cc_cfg->cfg.prach_rar_window)) { + if (tti_tx_dl >= prach_tti + 3 + cc_cfg->cfg.prach_rar_window) { log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", prach_tti, - cfg->prach_rar_window, + cc_cfg->cfg.prach_rar_window, tti_tx_dl); log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", prach_tti, - cfg->prach_rar_window, + cc_cfg->cfg.prach_rar_window, tti_tx_dl); // Remove from pending queue and get next one if window has passed already pending_rars.pop_front(); @@ -265,7 +266,7 @@ void ra_sched::sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sch auto& grant = dl_sched_result.rar[i].msg3_grant[j]; sf_sched::pending_msg3_t msg3; - srslte_ra_type2_from_riv(grant.grant.rba, &msg3.L, &msg3.n_prb, cfg->cell.nof_prb, cfg->cell.nof_prb); + srslte_ra_type2_from_riv(grant.grant.rba, &msg3.L, &msg3.n_prb, cc_cfg->nof_prb(), cc_cfg->nof_prb()); msg3.mcs = grant.grant.trunc_mcs; msg3.rnti = grant.data.temp_crnti; @@ -288,6 +289,7 @@ sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, uint32_t enb_cc_idx_) : rrc(rrc_), ue_db(ue_db_), + log_h(srslte::logmap::get("MAC ")), enb_cc_idx(enb_cc_idx_) { sf_dl_mask.resize(1, 0); @@ -300,12 +302,10 @@ void sched::carrier_sched::reset() bc_sched_ptr.reset(); } -void sched::carrier_sched::carrier_cfg(const sched_params_t& sched_params_) +void sched::carrier_sched::carrier_cfg(const sched_cell_params_t& cell_params_) { - // sched::cfg is now fully set - sched_params = &sched_params_; - log_h = sched_params->log_h; - cc_cfg = sched_params->cell_cfg[enb_cc_idx].cfg; + // carrier_sched is now fully set + cc_cfg = &cell_params_; std::lock_guard lock(carrier_mutex); @@ -315,22 +315,22 @@ void sched::carrier_sched::carrier_cfg(const sched_params_t& sched_params_) // Setup data scheduling algorithms dl_metric.reset(new srsenb::dl_metric_rr{}); - dl_metric->set_params(*sched_params, enb_cc_idx); + dl_metric->set_params(*cc_cfg); ul_metric.reset(new srsenb::ul_metric_rr{}); - ul_metric->set_params(*sched_params, enb_cc_idx); + ul_metric->set_params(*cc_cfg); // Setup constant PUCCH/PRACH mask - pucch_mask.resize(cc_cfg->cell.nof_prb); - if (cc_cfg->nrb_pucch > 0) { - pucch_mask.fill(0, (uint32_t)cc_cfg->nrb_pucch); - pucch_mask.fill(cc_cfg->cell.nof_prb - cc_cfg->nrb_pucch, cc_cfg->cell.nof_prb); + pucch_mask.resize(cc_cfg->nof_prb()); + if (cc_cfg->cfg.nrb_pucch > 0) { + pucch_mask.fill(0, (uint32_t)cc_cfg->cfg.nrb_pucch); + pucch_mask.fill(cc_cfg->nof_prb() - cc_cfg->cfg.nrb_pucch, cc_cfg->nof_prb()); } - prach_mask.resize(cc_cfg->cell.nof_prb); - prach_mask.fill(cc_cfg->prach_freq_offset, cc_cfg->prach_freq_offset + 6); + prach_mask.resize(cc_cfg->nof_prb()); + prach_mask.fill(cc_cfg->cfg.prach_freq_offset, cc_cfg->cfg.prach_freq_offset + 6); // Initiate the tti_scheduler for each TTI for (sf_sched& tti_sched : sf_scheds) { - tti_sched.init(*sched_params, enb_cc_idx); + tti_sched.init(*cc_cfg); } } @@ -345,7 +345,7 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) // if it is the first time tti is run, reset vars if (tti_rx != tti_sched->get_tti_rx()) { - uint32_t start_cfi = sched_params->sched_cfg.nof_ctrl_symbols; + uint32_t start_cfi = cc_cfg->sched_cfg->nof_ctrl_symbols; bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0; tti_sched->new_tti(tti_rx, start_cfi); @@ -433,9 +433,9 @@ void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result) } // NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions - if (cc_cfg->cell.nof_prb == 6) { - uint32_t tti_rx_ack = TTI_RX_ACK(tti_result->get_tti_rx()); - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->prach_config, tti_rx_ack, -1)) { + if (cc_cfg->nof_prb() == 6) { + uint32_t tti_rx_ack = TTI_RX_ACK(tti_result->get_tti_rx()); + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_rx_ack, -1)) { tti_result->get_dl_mask().fill(0, tti_result->get_dl_mask().size()); } } @@ -450,7 +450,7 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) prbmask_t& ul_mask = tti_sched->get_ul_mask(); /* reserve PRBs for PRACH */ - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->prach_config, tti_tx_ul, -1)) { + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_tx_ul, -1)) { ul_mask = prach_mask; log_h->debug("SCHED: Allocated PRACH RBs. Mask: 0x%s\n", prach_mask.to_hex().c_str()); } @@ -459,7 +459,7 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) ra_sched_ptr->ul_sched(tti_sched); /* reserve PRBs for PUCCH */ - if (cc_cfg->cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) { + if (cc_cfg->nof_prb() != 6 and (ul_mask & pucch_mask).any()) { log_h->error("There was a collision with the PUCCH. current mask=0x%s, pucch_mask=0x%s\n", ul_mask.to_hex().c_str(), pucch_mask.to_hex().c_str()); diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index c732ff62a..589a00f06 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -21,6 +21,7 @@ #include "srsenb/hdr/stack/mac/scheduler_grid.h" #include "srsenb/hdr/stack/mac/scheduler.h" +#include "srslte/common/logmap.h" #include #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) @@ -58,11 +59,10 @@ tti_params_t::tti_params_t(uint32_t tti_rx_) : * PDCCH Allocation Methods *******************************************************/ -void pdcch_grid_t::init(const sched_params_t& sched_params_) +void pdcch_grid_t::init(const sched_cell_params_t& cell_params_) { - sched_params = &sched_params_; - log_h = sched_params_.log_h; - + cc_cfg = &cell_params_; + log_h = srslte::logmap::get("MAC "); reset(); } @@ -73,19 +73,21 @@ void pdcch_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) reset(); } -const sched_ue::sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const +const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const { switch (alloc_type) { case alloc_type_t::DL_BC: - return &sched_params->common_locations[current_cfix]; + return &cc_cfg->common_locations[current_cfix]; case alloc_type_t::DL_PCCH: - return &sched_params->common_locations[current_cfix]; + return &cc_cfg->common_locations[current_cfix]; case alloc_type_t::DL_RAR: - return &sched_params->rar_locations[current_cfix][tti_params->sf_idx]; + return &cc_cfg->rar_locations[current_cfix][tti_params->sf_idx]; case alloc_type_t::DL_DATA: - return user->get_locations(current_cfix + 1, tti_params->sf_idx); + return user->get_locations(cc_cfg->enb_cc_idx, current_cfix + 1, tti_params->sf_idx); case alloc_type_t::UL_DATA: - return user->get_locations(current_cfix + 1, tti_params->sf_idx); + return user->get_locations(cc_cfg->enb_cc_idx, current_cfix + 1, tti_params->sf_idx); + default: + break; } return nullptr; } @@ -95,7 +97,7 @@ bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_u // TODO: Make the alloc tree update lazy /* Get DCI Location Table */ - const sched_ue::sched_dci_cce_t* dci_locs = get_cce_loc_table(alloc_type, user); + const sched_dci_cce_t* dci_locs = get_cce_loc_table(alloc_type, user); if (dci_locs == nullptr) { return false; } @@ -122,11 +124,11 @@ bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_u return true; } -void pdcch_grid_t::update_alloc_tree(int parent_node_idx, - uint32_t aggr_idx, - sched_ue* user, - alloc_type_t alloc_type, - const sched_ue::sched_dci_cce_t* dci_locs) +void pdcch_grid_t::update_alloc_tree(int parent_node_idx, + uint32_t aggr_idx, + sched_ue* user, + alloc_type_t alloc_type, + const sched_dci_cce_t* dci_locs) { alloc_t alloc; alloc.rnti = (user != nullptr) ? user->get_rnti() : (uint16_t)0u; @@ -187,7 +189,7 @@ bool pdcch_grid_t::set_cfi(uint32_t cfi) uint32_t pdcch_grid_t::nof_cces() const { - return sched_params->nof_cce_table[current_cfix]; + return cc_cfg->nof_cce_table[current_cfix]; } void pdcch_grid_t::reset() @@ -263,17 +265,15 @@ std::string pdcch_grid_t::result_to_string(bool verbose) const * TTI resource Scheduling Methods *******************************************************/ -void sf_grid_t::init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) +void sf_grid_t::init(const sched_cell_params_t& cell_params_, uint32_t enb_cc_idx_) { - sched_params = &sched_params_; - enb_cc_idx = enb_cc_idx_; - log_h = sched_params->log_h; - cell_cfg = sched_params->cell_cfg[enb_cc_idx].cfg; - nof_rbgs = sched_params->cell_cfg[enb_cc_idx].nof_rbgs; - si_n_rbg = srslte::ceil_div(4, sched_params->cell_cfg[enb_cc_idx].P); - rar_n_rbg = srslte::ceil_div(3, sched_params->cell_cfg[enb_cc_idx].P); + cc_cfg = &cell_params_; + log_h = srslte::logmap::get("MAC "); + nof_rbgs = cc_cfg->nof_rbgs; + si_n_rbg = srslte::ceil_div(4, cc_cfg->P); + rar_n_rbg = srslte::ceil_div(3, cc_cfg->P); - pdcch_alloc.init(*sched_params); + pdcch_alloc.init(*cc_cfg); } void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) @@ -285,7 +285,7 @@ void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) dl_mask.reset(); dl_mask.resize(nof_rbgs); ul_mask.reset(); - ul_mask.resize(cell_cfg->cell.nof_prb); + ul_mask.resize(cc_cfg->nof_prb()); pdcch_alloc.new_tti(*tti_params, start_cfi); } @@ -344,8 +344,8 @@ alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_m { srslte_dci_format_t dci_format = user->get_dci_format(); uint32_t nof_bits = - srslte_dci_format_sizeof(const_cast(&cell_cfg->cell), nullptr, nullptr, dci_format); - uint32_t aggr_level = user->get_ue_carrier(enb_cc_idx)->get_aggr_level(nof_bits); + srslte_dci_format_sizeof(const_cast(&cc_cfg->cfg.cell), nullptr, nullptr, dci_format); + uint32_t aggr_level = user->get_ue_carrier(cc_cfg->enb_cc_idx)->get_aggr_level(nof_bits); return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user); } @@ -364,8 +364,8 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_ // Generate PDCCH except for RAR and non-adaptive retx if (needs_pdcch) { uint32_t nof_bits = - srslte_dci_format_sizeof(const_cast(&cell_cfg->cell), nullptr, nullptr, SRSLTE_DCI_FORMAT0); - uint32_t aggr_idx = user->get_ue_carrier(enb_cc_idx)->get_aggr_level(nof_bits); + srslte_dci_format_sizeof(const_cast(&cc_cfg->cfg.cell), nullptr, nullptr, SRSLTE_DCI_FORMAT0); + uint32_t aggr_idx = user->get_ue_carrier(cc_cfg->enb_cc_idx)->get_aggr_level(nof_bits); if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) { if (log_h->get_level() == srslte::LOG_LEVEL_DEBUG) { log_h->debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s\n", @@ -385,14 +385,12 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_ * TTI resource Scheduling Methods *******************************************************/ -void sf_sched::init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) +void sf_sched::init(const sched_cell_params_t& cell_params_) { - sched_params = &sched_params_; - enb_cc_idx = enb_cc_idx_; - cell_cfg = &sched_params->cell_cfg[enb_cc_idx]; - log_h = sched_params->log_h; - tti_alloc.init(*sched_params, 0); - max_msg3_prb = std::max(6u, cell_cfg->cfg->cell.nof_prb - (uint32_t)cell_cfg->cfg->nrb_pucch); + cc_cfg = &cell_params_; + log_h = srslte::logmap::get("MAC "); + tti_alloc.init(*cc_cfg, 0); + max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch); } void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi) @@ -413,10 +411,10 @@ void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi) ul_data_allocs.clear(); // setup first prb to be used for msg3 alloc - last_msg3_prb = cell_cfg->cfg->nrb_pucch; + last_msg3_prb = cc_cfg->cfg.nrb_pucch; uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS); - if (srslte_prach_tti_opportunity_config_fdd(cell_cfg->cfg->prach_config, tti_msg3_alloc, -1)) { - last_msg3_prb = std::max(last_msg3_prb, cell_cfg->cfg->prach_freq_offset + 6); + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) { + last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6); } } @@ -470,7 +468,7 @@ sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_by alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) { - uint32_t sib_len = cell_cfg->cfg->sibs[sib_idx].len; + uint32_t sib_len = cc_cfg->cfg.sibs[sib_idx].len; uint32_t rv = sched::get_rvidx(sib_ntx); ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI); if (not ret.first) { @@ -544,7 +542,7 @@ std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, cons rar_grant.msg3_grant[i].data = rar.msg3_grant[i]; rar_grant.msg3_grant[i].grant.tpc_pusch = 3; rar_grant.msg3_grant[i].grant.trunc_mcs = 0; - uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, cell_cfg->cfg->cell.nof_prb); + uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, cc_cfg->cfg.cell.nof_prb); rar_grant.msg3_grant[i].grant.rba = rba; last_msg3_prb += msg3_grant_size; @@ -616,7 +614,7 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t { // check whether adaptive/non-adaptive retx/newtx sf_sched::ul_alloc_t::type_t alloc_type; - ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_cell_index(enb_cc_idx).second); + ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_cell_index(cc_cfg->enb_cc_idx).second); bool has_retx = h->has_pending_retx(); if (has_retx) { ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc(); @@ -641,7 +639,7 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; /* Generate DCI format1A */ - prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, cell_cfg->P); + prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, cc_cfg->P); int tbs = generate_format1a( prb_range.prb_start, prb_range.length(), bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci); @@ -671,7 +669,7 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul bc->dci.location.ncce, bc_alloc.rv, bc_alloc.req_bytes, - cell_cfg->cfg->sibs[bc_alloc.sib_idx].period_rf, + cc_cfg->cfg.sibs[bc_alloc.sib_idx].period_rf, bc->dci.tb[0].mcs_idx); } else { // Paging @@ -710,7 +708,7 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; /* Generate DCI format1A */ - prb_range_t prb_range = prb_range_t(rar_alloc.alloc_data.rbg_range, cell_cfg->P); + prb_range_t prb_range = prb_range_t(rar_alloc.alloc_data.rbg_range, cc_cfg->P); int tbs = generate_format1a(prb_range.prb_start, prb_range.length(), rar_alloc.alloc_data.req_bytes, @@ -762,7 +760,7 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_ // Generate DCI Format1/2/2A sched_ue* user = data_alloc.user_ptr; - uint32_t cell_index = user->get_cell_index(enb_cc_idx).second; + uint32_t cell_index = user->get_cell_index(cc_cfg->enb_cc_idx).second; dl_harq_proc* h = user->get_dl_harq(data_alloc.pid, cell_index); uint32_t data_before = user->get_pending_dl_new_data(); srslte_dci_format_t dci_format = user->get_dci_format(); @@ -818,7 +816,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems]; sched_ue* user = ul_alloc.user_ptr; - uint32_t cell_index = user->get_cell_index(enb_cc_idx).second; + uint32_t cell_index = user->get_cell_index(cc_cfg->enb_cc_idx).second; srslte_dci_location_t cce_range = {0, 0}; if (ul_alloc.needs_pdcch()) { @@ -903,7 +901,7 @@ void sf_sched::generate_dcis() uint32_t sf_sched::get_nof_ctrl_symbols() const { - return tti_alloc.get_cfi() + ((cell_cfg->cfg->cell.nof_prb <= 10) ? 1 : 0); + return tti_alloc.get_cfi() + ((cc_cfg->cfg.cell.nof_prb <= 10) ? 1 : 0); } int sf_sched::generate_format1a(uint32_t rb_start, @@ -945,7 +943,7 @@ int sf_sched::generate_format1a(uint32_t rb_start, dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2; dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; - dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, cell_cfg->cfg->cell.nof_prb); + dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, cc_cfg->cfg.cell.nof_prb); dci->pid = 0; dci->tb[0].mcs_idx = mcs; dci->tb[0].rv = rv; diff --git a/srsenb/src/stack/mac/scheduler_metric.cc b/srsenb/src/stack/mac/scheduler_metric.cc index 4141280b0..78a023516 100644 --- a/srsenb/src/stack/mac/scheduler_metric.cc +++ b/srsenb/src/stack/mac/scheduler_metric.cc @@ -21,6 +21,7 @@ #include "srsenb/hdr/stack/mac/scheduler_metric.h" #include "srsenb/hdr/stack/mac/scheduler_harq.h" +#include "srslte/common/logmap.h" #include #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) @@ -36,12 +37,10 @@ namespace srsenb { * *****************************************************************/ -void dl_metric_rr::set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) +void dl_metric_rr::set_params(const sched_cell_params_t& cell_params_) { - sched_params = &sched_params_; - enb_cc_idx = enb_cc_idx_; - cell_params = &sched_params->cell_cfg[enb_cc_idx]; - log_h = sched_params_.log_h; + cc_cfg = &cell_params_; + log_h = srslte::logmap::get("MAC "); } void dl_metric_rr::sched_users(std::map& ue_db, dl_sf_sched_itf* tti_sched) @@ -85,7 +84,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) if (tti_alloc->is_dl_alloc(user)) { return nullptr; } - auto p = user->get_cell_index(enb_cc_idx); + auto p = user->get_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { return nullptr; } @@ -137,7 +136,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) // Allocate resources based on pending data if (req_bytes > 0) { uint32_t pending_prbs = user->get_required_prb_dl(cell_idx, req_bytes, tti_alloc->get_nof_ctrl_symbols()); - uint32_t pending_rbg = cell_params->prb_to_rbg(pending_prbs); + uint32_t pending_rbg = cc_cfg->prb_to_rbg(pending_prbs); rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size()); find_allocation(pending_rbg, &newtx_mask); if (newtx_mask.any()) { // some empty spaces were found @@ -158,12 +157,10 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) * *****************************************************************/ -void ul_metric_rr::set_params(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) +void ul_metric_rr::set_params(const sched_cell_params_t& cell_params_) { - sched_params = &sched_params_; - enb_cc_idx = enb_cc_idx_; - cell_params = &sched_params->cell_cfg[enb_cc_idx]; - log_h = sched_params_.log_h; + cc_cfg = &cell_params_; + log_h = srslte::logmap::get("MAC "); } void ul_metric_rr::sched_users(std::map& ue_db, ul_sf_sched_itf* tti_sched) @@ -244,7 +241,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user) if (tti_alloc->is_ul_alloc(user)) { return nullptr; } - auto p = user->get_cell_index(enb_cc_idx); + auto p = user->get_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { // this cc is not activated for this user return nullptr; @@ -286,7 +283,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user) if (tti_alloc->is_ul_alloc(user)) { return nullptr; } - auto p = user->get_cell_index(enb_cc_idx); + auto p = user->get_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { // this cc is not activated for this user return nullptr; diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index 7081fd0e8..d4a73afdf 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -23,6 +23,7 @@ #include "srsenb/hdr/stack/mac/scheduler.h" #include "srsenb/hdr/stack/mac/scheduler_ue.h" +#include "srslte/common/logmap.h" #include "srslte/common/pdu.h" #include "srslte/srslte.h" @@ -50,28 +51,30 @@ constexpr uint32_t conres_ce_size = 6; sched_ue::sched_ue() { - log_h = nullptr; + log_h = srslte::logmap::get("MAC "); bzero(&cell, sizeof(cell)); bzero(&lch, sizeof(lch)); - bzero(&dci_locations, sizeof(dci_locations)); bzero(&dl_ant_info, sizeof(dl_ant_info)); reset(); } -void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sched_interface::ue_cfg_t* cfg_) +void sched_ue::set_cfg(uint16_t rnti_, + const std::vector& cell_list_params_, + sched_interface::ue_cfg_t* cfg_, + uint32_t primary_cc_idx_) { reset(); { std::lock_guard lock(mutex); - rnti = rnti_; - sched_params = &sched_params_; - log_h = sched_params->log_h; - cell = sched_params->cell_cfg[0].cfg->cell; + rnti = rnti_; + cell_params_list = &cell_list_params_; + main_cc_params = &(*cell_params_list)[primary_cc_idx_]; + cell = main_cc_params->cfg.cell; - max_msg3retx = sched_params->cell_cfg[0].cfg->maxharq_msg3tx; + max_msg3retx = main_cc_params->cfg.maxharq_msg3tx; cfg = *cfg_; @@ -81,26 +84,18 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche Info("SCHED: Added user rnti=0x%x\n", rnti); // Init sched_ue carriers - // TODO: check config for number of carriers - carriers.emplace_back(&cfg, &sched_params->cell_cfg[0], rnti, 0, log_h); - enb_ue_cellindex_map.insert(std::make_pair(0, 0)); // TODO: use real values - - // Generate allowed CCE locations - for (int cfi = 0; cfi < 3; cfi++) { - for (int sf_idx = 0; sf_idx < 10; sf_idx++) { - sched::generate_cce_location(sched_params->regs, &dci_locations[cfi][sf_idx], cfi + 1, sf_idx, rnti); - } - } + carriers.emplace_back(&cfg, main_cc_params, rnti); + enb_ue_cellindex_map[primary_cc_idx_] = 0; } for (int i = 0; i < sched_interface::MAX_LC; i++) { set_bearer_cfg(i, &cfg.ue_bearers[i]); } - set_max_mcs(sched_params->sched_cfg.pusch_max_mcs, - sched_params->sched_cfg.pdsch_max_mcs, - sched_params->sched_cfg.max_aggr_level); - set_fixed_mcs(sched_params->sched_cfg.pusch_mcs, sched_params->sched_cfg.pdsch_mcs); + set_max_mcs(main_cc_params->sched_cfg->pusch_max_mcs, + main_cc_params->sched_cfg->pdsch_max_mcs, + main_cc_params->sched_cfg->max_aggr_level); + set_fixed_mcs(main_cc_params->sched_cfg->pusch_mcs, main_cc_params->sched_cfg->pdsch_mcs); } void sched_ue::reset() @@ -995,13 +990,13 @@ srslte_dci_format_t sched_ue::get_dci_format() return ret; } -sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx) +sched_dci_cce_t* sched_ue::get_locations(uint32_t enb_cc_idx, uint32_t cfi, uint32_t sf_idx) { if (cfi > 0 && cfi <= 3) { - return &dci_locations[cfi - 1][sf_idx]; + return &carriers[enb_cc_idx].dci_locations[cfi - 1][sf_idx]; } else { Error("SCHED: Invalid CFI=%d\n", cfi); - return &dci_locations[0][sf_idx]; + return &carriers[enb_cc_idx].dci_locations[0][sf_idx]; } } @@ -1039,7 +1034,7 @@ uint32_t sched_ue::format1_count_prb(const rbgmask_t& bitmask, uint32_t cc_idx) uint32_t nof_prb = 0; for (uint32_t i = 0; i < bitmask.size(); i++) { if (bitmask.test(i)) { - nof_prb += std::min(cell_cfg->cfg->cell.nof_prb - (i * cell_cfg->P), cell_cfg->P); + nof_prb += std::min(cell_cfg->cfg.cell.nof_prb - (i * cell_cfg->P), cell_cfg->P); } } return nof_prb; @@ -1081,20 +1076,24 @@ int sched_ue::cqi_to_tbs(uint32_t cqi, sched_ue_carrier::sched_ue_carrier(sched_interface::ue_cfg_t* cfg_, const sched_cell_params_t* cell_cfg_, - uint16_t rnti_, - uint32_t cc_idx_, - srslte::log* log_) : + uint16_t rnti_) : cfg(cfg_), cell_params(cell_cfg_), rnti(rnti_), - cc_idx(cc_idx_), - log_h(log_) + log_h(srslte::logmap::get("MAC ")) { // Config HARQ processes for (uint32_t i = 0; i < dl_harq.size(); ++i) { dl_harq[i].config(i, cfg->maxharq_tx, log_h); ul_harq[i].config(i, cfg->maxharq_tx, log_h); } + + // Generate allowed CCE locations + for (int cfi = 0; cfi < 3; cfi++) { + for (int sf_idx = 0; sf_idx < 10; sf_idx++) { + sched::generate_cce_location(cell_params->regs.get(), &dci_locations[cfi][sf_idx], cfi + 1, sf_idx, rnti); + } + } } void sched_ue_carrier::reset() @@ -1284,7 +1283,7 @@ uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes) } for (n = 1; n < cell_params->nof_prb() && nbytes < req_bytes + 4; n++) { - uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell_params->cfg->cell.cp) - 1) - N_srs) * n * SRSLTE_NRE; + uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell_params->cfg.cell.cp) - 1) - N_srs) * n * SRSLTE_NRE; int tbs = 0; if (fixed_mcs_ul < 0) { tbs = alloc_tbs_ul(n, nof_re, 0, &mcs); diff --git a/srsenb/test/mac/scheduler_test.cc b/srsenb/test/mac/scheduler_test.cc index e6b4b96f8..aa9601e22 100644 --- a/srsenb/test/mac/scheduler_test.cc +++ b/srsenb/test/mac/scheduler_test.cc @@ -101,7 +101,7 @@ int main(int argc, char* argv[]) cell_cfg[0].sibs[1].period_rf = 16; cell_cfg[0].si_window_ms = 40; - my_sched.init(nullptr, &log_out); + my_sched.init(nullptr); my_sched.cell_cfg(cell_cfg); srsenb::sched_interface::dl_sched_res_t sched_result_dl; @@ -118,7 +118,7 @@ int main(int argc, char* argv[]) bzero(&bearer_cfg, sizeof(srsenb::sched_interface::ue_bearer_cfg_t)); bearer_cfg.direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH; - my_sched.ue_cfg(rnti, &ue_cfg); + my_sched.ue_cfg(rnti, 0, &ue_cfg); my_sched.bearer_ue_cfg(rnti, 0, &bearer_cfg); // my_sched.dl_rlc_buffer_state(rnti, 0, 1e6, 0); my_sched.ul_bsr(rnti, 0, 1e6f, true); diff --git a/srsenb/test/mac/scheduler_test_common.cc b/srsenb/test/mac/scheduler_test_common.cc index 175a0b93b..77cd63582 100644 --- a/srsenb/test/mac/scheduler_test_common.cc +++ b/srsenb/test/mac/scheduler_test_common.cc @@ -26,13 +26,11 @@ using namespace srsenb; -#define CARRIER_IDX 0 - int output_sched_tester::test_pusch_collisions(const tti_params_t& tti_params, const sched_interface::ul_sched_res_t& ul_result, prbmask_t& ul_allocs) const { - uint32_t nof_prb = params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb; + uint32_t nof_prb = cell_params.nof_prb(); ul_allocs.resize(nof_prb); ul_allocs.reset(); @@ -54,15 +52,17 @@ int output_sched_tester::test_pusch_collisions(const tti_params_t& }; /* TEST: Check if there is space for PRACH */ - bool is_prach_tti_tx_ul = srslte_prach_tti_opportunity_config_fdd(params.cell_cfg[CARRIER_IDX].cfg->prach_config, tti_params.tti_tx_ul, -1); + bool is_prach_tti_tx_ul = + srslte_prach_tti_opportunity_config_fdd(cell_params.cfg.prach_config, tti_params.tti_tx_ul, -1); if (is_prach_tti_tx_ul) { - try_ul_fill({params.cell_cfg[CARRIER_IDX].cfg->prach_freq_offset, 6}, "PRACH"); + try_ul_fill({cell_params.cfg.prach_freq_offset, 6}, "PRACH"); } /* TEST: check collisions in PUCCH */ bool strict = nof_prb != 6 or (not is_prach_tti_tx_ul); // and not tti_data.ul_pending_msg3_present); - try_ul_fill({0, (uint32_t)params.cell_cfg[CARRIER_IDX].cfg->nrb_pucch}, "PUCCH", strict); - try_ul_fill({params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb - params.cell_cfg[CARRIER_IDX].cfg->nrb_pucch, (uint32_t)params.cell_cfg[CARRIER_IDX].cfg->nrb_pucch}, "PUCCH", strict); + try_ul_fill({0, (uint32_t)cell_params.cfg.nrb_pucch}, "PUCCH", strict); + try_ul_fill( + {cell_params.cfg.cell.nof_prb - cell_params.cfg.nrb_pucch, (uint32_t)cell_params.cfg.nrb_pucch}, "PUCCH", strict); /* TEST: check collisions in the UL PUSCH */ for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { @@ -80,10 +80,10 @@ int output_sched_tester::test_pdsch_collisions(const tti_params_t& const sched_interface::dl_sched_res_t& dl_result, rbgmask_t& rbgmask) const { - srslte::bounded_bitset<100, true> dl_allocs(params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb), alloc_mask(params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb); + srslte::bounded_bitset<100, true> dl_allocs(cell_params.cfg.cell.nof_prb), alloc_mask(cell_params.cfg.cell.nof_prb); auto try_dl_mask_fill = [&](const srslte_dci_dl_t& dci, const char* channel) { - if (extract_dl_prbmask(params.cell_cfg[CARRIER_IDX].cfg->cell, dci, &alloc_mask) != SRSLTE_SUCCESS) { + if (extract_dl_prbmask(cell_params.cfg.cell, dci, &alloc_mask) != SRSLTE_SUCCESS) { return SRSLTE_ERROR; } if ((dl_allocs & alloc_mask).any()) { @@ -107,9 +107,9 @@ int output_sched_tester::test_pdsch_collisions(const tti_params_t& } // forbid Data in DL if it conflicts with PRACH for PRB==6 - if (params.cell_cfg[CARRIER_IDX].cfg->cell.nof_prb == 6) { + if (cell_params.cfg.cell.nof_prb == 6) { uint32_t tti_rx_ack = TTI_RX_ACK(tti_params.tti_rx); - if (srslte_prach_tti_opportunity_config_fdd(params.cell_cfg[CARRIER_IDX].cfg->prach_config, tti_rx_ack, -1)) { + if (srslte_prach_tti_opportunity_config_fdd(cell_params.cfg.prach_config, tti_rx_ack, -1)) { dl_allocs.fill(0, dl_allocs.size()); } } @@ -120,13 +120,13 @@ int output_sched_tester::test_pdsch_collisions(const tti_params_t& } // TEST: check for holes in the PRB mask (RBGs not fully filled) - rbgmask.resize(params.cell_cfg[CARRIER_IDX].nof_rbgs); + rbgmask.resize(cell_params.nof_rbgs); rbgmask.reset(); srslte::bounded_bitset<100, true> rev_alloc = ~dl_allocs; - for (uint32_t i = 0; i < params.cell_cfg[CARRIER_IDX].nof_rbgs; ++i) { - uint32_t lim = SRSLTE_MIN((i + 1) * params.cell_cfg[CARRIER_IDX].P, dl_allocs.size()); - bool val = dl_allocs.any(i * params.cell_cfg[CARRIER_IDX].P, lim); - CONDERROR(rev_alloc.any(i * params.cell_cfg[CARRIER_IDX].P, lim) and val, "[TESTER] No holes can be left in an RBG\n"); + for (uint32_t i = 0; i < cell_params.nof_rbgs; ++i) { + uint32_t lim = SRSLTE_MIN((i + 1) * cell_params.P, dl_allocs.size()); + bool val = dl_allocs.any(i * cell_params.P, lim); + CONDERROR(rev_alloc.any(i * cell_params.P, lim) and val, "[TESTER] No holes can be left in an RBG\n"); if (val) { rbgmask.set(i); } @@ -158,18 +158,18 @@ int output_sched_tester::test_sib_scheduling(const tti_params_t& continue; } CONDERROR(bc->index >= sched_interface::MAX_SIBS, "Invalid SIB idx=%d\n", bc->index + 1); - CONDERROR(bc->tbs < params.cell_cfg[CARRIER_IDX].cfg->sibs[bc->index].len, + CONDERROR(bc->tbs < cell_params.cfg.sibs[bc->index].len, "Allocated BC process with TBS=%d < sib_len=%d\n", bc->tbs, - params.cell_cfg[CARRIER_IDX].cfg->sibs[bc->index].len); - uint32_t x = (bc->index - 1) * params.cell_cfg[CARRIER_IDX].cfg->si_window_ms; + cell_params.cfg.sibs[bc->index].len); + uint32_t x = (bc->index - 1) * cell_params.cfg.si_window_ms; uint32_t sf = x % 10; uint32_t sfn_start = sfn; - while ((sfn_start % params.cell_cfg[CARRIER_IDX].cfg->sibs[bc->index].period_rf) != x / 10) { + while ((sfn_start % cell_params.cfg.sibs[bc->index].period_rf) != x / 10) { sfn_start--; } uint32_t win_start = sfn_start * 10 + sf; - uint32_t win_end = win_start + params.cell_cfg[CARRIER_IDX].cfg->si_window_ms; + uint32_t win_end = win_start + cell_params.cfg.si_window_ms; CONDERROR(tti_params.tti_tx_dl < win_start or tti_params.tti_tx_dl > win_end, "Scheduled SIB is outside of its SIB window\n"); } @@ -180,7 +180,7 @@ int output_sched_tester::test_pdcch_collisions(const sched_interface::dl_sched_r const sched_interface::ul_sched_res_t& ul_result, srslte::bounded_bitset<128, true>* used_cce) const { - used_cce->resize(srslte_regs_pdcch_ncce(params.regs, dl_result.cfi)); + used_cce->resize(srslte_regs_pdcch_ncce(cell_params.regs.get(), dl_result.cfi)); used_cce->reset(); // Helper Function: checks if there is any collision. If not, fills the PDCCH mask @@ -237,10 +237,10 @@ int output_sched_tester::test_dci_values_consistency(const sched_interface::dl_s for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) { auto& bc = dl_result.bc[i]; if (bc.type == sched_interface::dl_sched_bc_t::BCCH) { - CONDERROR(bc.tbs < params.cell_cfg[CARRIER_IDX].cfg->sibs[bc.index].len, + CONDERROR(bc.tbs < cell_params.cfg.sibs[bc.index].len, "Allocated BC process with TBS=%d < sib_len=%d\n", bc.tbs, - params.cell_cfg[CARRIER_IDX].cfg->sibs[bc.index].len); + cell_params.cfg.sibs[bc.index].len); } else if (bc.type == sched_interface::dl_sched_bc_t::PCCH) { CONDERROR(bc.tbs == 0, "Allocated paging process with invalid TBS=%d\n", bc.tbs); } else { diff --git a/srsenb/test/mac/scheduler_test_common.h b/srsenb/test/mac/scheduler_test_common.h index 3f3d7e627..b841bfe7d 100644 --- a/srsenb/test/mac/scheduler_test_common.h +++ b/srsenb/test/mac/scheduler_test_common.h @@ -34,7 +34,7 @@ int extract_dl_prbmask(const srslte_cell_t& cell, class output_sched_tester { public: - explicit output_sched_tester(const sched_params_t& params_) : params(params_) {} + explicit output_sched_tester(const sched_cell_params_t& cell_params_) : cell_params(cell_params_) {} /* Check for collisions between RB allocations in the PUSCH and PUCCH */ int test_pusch_collisions(const tti_params_t& tti_params, @@ -59,7 +59,7 @@ public: const sched_interface::ul_sched_res_t& ul_result) const; private: - const sched_params_t& params; + const sched_cell_params_t& cell_params; }; } // namespace srsenb diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 2c415786f..5986ada07 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -71,7 +71,7 @@ * Setup Random generators **************************/ uint32_t const seed = time(nullptr); -// uint32_t const seed = 2452071795; +// uint32_t const seed = 2452071795; // [{3054, 1656}, {5970, 1595}, ... {204, 3}] std::default_random_engine rand_gen(seed); std::uniform_real_distribution unif_dist(0, 1.0); bool check_old_pids = false; @@ -240,7 +240,7 @@ private: int sched_tester::cell_cfg(const std::vector& cell_cfg) { sched::cell_cfg(cell_cfg); - output_tester.reset(new srsenb::output_sched_tester{sched_params}); + output_tester.reset(new srsenb::output_sched_tester{sched_cell_params[CARRIER_IDX]}); return SRSLTE_SUCCESS; } @@ -255,7 +255,7 @@ int sched_tester::add_user(uint16_t rnti, info.preamble_idx = tti_data.nof_prachs++; tester_ues.insert(std::make_pair(rnti, info)); - if (ue_cfg(rnti, &ue_cfg_) != SRSLTE_SUCCESS) { + if (ue_cfg(rnti, CARRIER_IDX, &ue_cfg_) != SRSLTE_SUCCESS) { TESTERROR("[TESTER] Registering new user rnti=0x%x to SCHED\n", rnti); } dl_sched_rar_info_t rar_info = {}; @@ -291,8 +291,8 @@ void sched_tester::new_test_tti(uint32_t tti_) tti_data.ul_pending_msg3_present = true; tti_data.ul_pending_msg3 = pending_msg3s.front(); } - tti_data.current_cfi = sched_params.sched_cfg.nof_ctrl_symbols; - tti_data.used_cce.resize(srslte_regs_pdcch_ncce(®s, tti_data.current_cfi)); + tti_data.current_cfi = sched_cfg.nof_ctrl_symbols; + tti_data.used_cce.resize(srslte_regs_pdcch_ncce(sched_cell_params[CARRIER_IDX].regs.get(), tti_data.current_cfi)); tti_data.used_cce.reset(); tti_data.ue_data.clear(); tti_data.total_ues = tester_user_results(); @@ -303,8 +303,9 @@ int sched_tester::process_tti_args() { // may add a new user if (sim_args.tti_events[tti_data.tti_rx].new_user) { - CONDERROR(!srslte_prach_tti_opportunity_config_fdd(cfg[CARRIER_IDX].prach_config, tti_data.tti_rx, -1), - "[TESTER] New user added in a non-PRACH TTI\n"); + CONDERROR( + !srslte_prach_tti_opportunity_config_fdd(sched_cell_params[CARRIER_IDX].cfg.prach_config, tti_data.tti_rx, -1), + "[TESTER] New user added in a non-PRACH TTI\n"); uint16_t rnti = sim_args.tti_events[tti_data.tti_rx].new_rnti; add_user(rnti, sim_args.bearer_cfg, sim_args.ue_cfg); } @@ -448,7 +449,7 @@ int sched_tester::test_ra() // continue; // } - uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg[CARRIER_IDX].prach_rar_window}; + uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + sched_cell_params[CARRIER_IDX].cfg.prach_rar_window}; if (prach_tti >= userinfo.rar_tti) { // RAR not yet sent CONDERROR(tti_data.tti_tx_dl > window[1], "[TESTER] There was no RAR scheduled within the RAR Window\n"); if (tti_data.tti_tx_dl >= window[0]) { @@ -576,7 +577,7 @@ int sched_tester::test_tti_result() carrier_schedulers[0]->get_sf_sched_ptr(tti_sched->get_tti_rx() + MSG3_DELAY_MS)->get_pending_msg3(); const auto& p = msg3_list.front(); CONDERROR(msg3_list.empty(), "Pending Msg3 should have been set\n"); - uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg[CARRIER_IDX].cell.nof_prb); + uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); CONDERROR(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n"); } } @@ -758,7 +759,7 @@ int sched_tester::test_sibs() int sched_tester::test_collisions() { const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx); - srsenb::prbmask_t ul_allocs(cfg[CARRIER_IDX].cell.nof_prb); + srsenb::prbmask_t ul_allocs(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); /* TEST: any collision in PUCCH and PUSCH */ TESTASSERT(output_tester->test_pusch_collisions(tti_params, tti_data.sched_result_ul, ul_allocs) == SRSLTE_SUCCESS); @@ -779,8 +780,8 @@ int sched_tester::test_collisions() srslte_ra_type2_from_riv(tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv, &L, &RBstart, - cfg[CARRIER_IDX].cell.nof_prb, - cfg[CARRIER_IDX].cell.nof_prb); + sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb, + sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); if (RBstart != tti_data.ul_pending_msg3.n_prb or L != tti_data.ul_pending_msg3.L) { TESTERROR("[TESTER] The Msg3 allocation does not coincide with the expected.\n"); } @@ -796,20 +797,21 @@ int sched_tester::test_collisions() srslte_ra_type2_from_riv(tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv, &L, &RBstart, - cfg[CARRIER_IDX].cell.nof_prb, - cfg[CARRIER_IDX].cell.nof_prb); + sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb, + sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); ue_stats[tti_data.sched_result_ul.pusch[i].dci.rnti].nof_ul_rbs += L; } /* TEST: check any collision in PDSCH */ - srsenb::rbgmask_t rbgmask(cfg[CARRIER_IDX].cell.nof_prb); + srsenb::rbgmask_t rbgmask(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); TESTASSERT(output_tester->test_pdsch_collisions(tti_params, tti_data.sched_result_dl, rbgmask) == SRSLTE_SUCCESS); // update ue stats with number of DL RB allocations - srslte::bounded_bitset<100, true> alloc_mask(cfg[CARRIER_IDX].cell.nof_prb); + srslte::bounded_bitset<100, true> alloc_mask(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) { - TESTASSERT(srsenb::extract_dl_prbmask(cfg[CARRIER_IDX].cell, tti_data.sched_result_dl.data[i].dci, &alloc_mask) == - SRSLTE_SUCCESS); + TESTASSERT(srsenb::extract_dl_prbmask(sched_cell_params[CARRIER_IDX].cfg.cell, + tti_data.sched_result_dl.data[i].dci, + &alloc_mask) == SRSLTE_SUCCESS); ue_stats[tti_data.sched_result_dl.data[i].dci.rnti].nof_dl_rbs += alloc_mask.count(); } @@ -946,7 +948,7 @@ void test_scheduler_rand(std::vector cell_c // srsenb::sched_interface::dl_sched_res_t& sched_result_dl = tester.tti_data.sched_result_dl; // srsenb::sched_interface::ul_sched_res_t& sched_result_ul = tester.tti_data.sched_result_ul; - tester.init(nullptr, log_global.get()); + tester.init(nullptr); tester.cell_cfg(cell_cfg); bool running = true; @@ -1032,6 +1034,7 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c int main() { + srslte::logmap::get_instance()->set_default_log_level(srslte::LOG_LEVEL_INFO); printf("[TESTER] This is the chosen seed: %u\n", seed); /* initialize random seed: */ uint32_t N_runs = 1, nof_ttis = 10240 + 10;