sched,nr: implement NR PDCCH allocation algorithm

master
Francisco 4 years ago committed by Andre Puschmann
parent a70ad58440
commit 5b3c5ded63

@ -21,6 +21,7 @@ namespace srsenb {
const static size_t SCHED_NR_MAX_USERS = 4; const static size_t SCHED_NR_MAX_USERS = 4;
const static size_t SCHED_NR_NOF_SUBFRAMES = 10; const static size_t SCHED_NR_NOF_SUBFRAMES = 10;
const static size_t SCHED_NR_NOF_HARQS = 16; const static size_t SCHED_NR_NOF_HARQS = 16;
static const size_t MAX_NOF_AGGR_LEVELS = 5;
namespace sched_nr_impl { namespace sched_nr_impl {
@ -32,16 +33,14 @@ struct sched_cell_params {
const cell_cfg_t cell_cfg; const cell_cfg_t cell_cfg;
const sched_cfg_t& sched_cfg; const sched_cfg_t& sched_cfg;
sched_cell_params(uint32_t cc_, const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_) : sched_cell_params(uint32_t cc_, const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_);
cc(cc_), cell_cfg(cell), sched_cfg(sched_cfg_)
{}
}; };
struct sched_params { struct sched_params {
const sched_cfg_t sched_cfg; const sched_cfg_t sched_cfg;
std::vector<sched_cell_params> cells; std::vector<sched_cell_params> cells;
explicit sched_params(const sched_cfg_t& sched_cfg_) : sched_cfg(sched_cfg_) {} explicit sched_params(const sched_cfg_t& sched_cfg_);
}; };
using pdcchmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>; using pdcchmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
@ -87,6 +86,13 @@ private:
bool flag = false; bool flag = false;
}; };
using pdcch_cce_pos_list = srsran::bounded_vector<uint32_t, SRSRAN_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR>;
using bwp_cce_pos_list = std::array<std::array<pdcch_cce_pos_list, MAX_NOF_AGGR_LEVELS>, SRSRAN_NOF_SF_X_FRAME>;
void get_dci_locs(const srsran_coreset_t& coreset,
const srsran_search_space_t& search_space,
uint16_t rnti,
bwp_cce_pos_list& cce_locs);
} // namespace sched_nr_impl } // namespace sched_nr_impl
} // namespace srsenb } // namespace srsenb

@ -17,6 +17,7 @@
#include "srsran/adt/bounded_vector.h" #include "srsran/adt/bounded_vector.h"
#include "srsran/adt/span.h" #include "srsran/adt/span.h"
#include "srsran/common/tti_point.h" #include "srsran/common/tti_point.h"
#include "srsran/interfaces/rrc_nr_interface_types.h"
#include "srsran/phy/phch/dci_nr.h" #include "srsran/phy/phch/dci_nr.h"
namespace srsenb { namespace srsenb {
@ -28,6 +29,7 @@ const static size_t SCHED_NR_MAX_NOF_RBGS = 25;
const static size_t SCHED_NR_MAX_UL_ALLOCS = 16; const static size_t SCHED_NR_MAX_UL_ALLOCS = 16;
const static size_t SCHED_NR_MAX_TB = 1; const static size_t SCHED_NR_MAX_TB = 1;
const static size_t SCHED_NR_MAX_HARQ = 16; const static size_t SCHED_NR_MAX_HARQ = 16;
const static size_t SCHED_NR_MAX_BWP_PER_CELL = 1;
class sched_nr_interface class sched_nr_interface
{ {
@ -47,9 +49,15 @@ public:
}; };
using pusch_td_res_alloc_list = srsran::bounded_vector<pusch_td_res_alloc, SCHED_NR_MAX_UL_ALLOCS>; using pusch_td_res_alloc_list = srsran::bounded_vector<pusch_td_res_alloc, SCHED_NR_MAX_UL_ALLOCS>;
struct bwp_cfg_t {
uint32_t start_rb = 0;
uint32_t rb_width = 100;
};
struct cell_cfg_t { struct cell_cfg_t {
uint32_t nof_prb = 100; uint32_t nof_prb = 100;
uint32_t nof_rbg = 25; uint32_t nof_rbg = 25;
srsran::bounded_vector<bwp_cfg_t, SCHED_NR_MAX_BWP_PER_CELL> bwps{1};
}; };
struct sched_cfg_t { struct sched_cfg_t {
@ -65,6 +73,7 @@ public:
struct ue_cfg_t { struct ue_cfg_t {
uint32_t maxharq_tx = 4; uint32_t maxharq_tx = 4;
srsran::bounded_vector<ue_cc_cfg_t, SCHED_NR_MAX_CARRIERS> carriers; srsran::bounded_vector<ue_cc_cfg_t, SCHED_NR_MAX_CARRIERS> carriers;
srsran::phy_cfg_nr_t phy_cfg = {};
}; };
///// Sched Result ///// ///// Sched Result /////

@ -0,0 +1,103 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_PDCCH_H
#define SRSRAN_SCHED_NR_PDCCH_H
#include "srsenb/hdr/stack/mac/nr/sched_nr_common.h"
#include "srsran/adt/bounded_bitset.h"
#include "srsran/adt/bounded_vector.h"
#include "srsran/phy/common/phy_common_nr.h"
#include "srsran/phy/phch/dci.h"
namespace srsenb {
namespace sched_nr_impl {
using coreset_bitmap = srsran::bounded_bitset<SRSRAN_CORESET_FREQ_DOMAIN_RES_SIZE * SRSRAN_CORESET_DURATION_MAX, true>;
enum class pdcch_grant_type_t { sib, dl_data, ul_data };
class slot_ue;
struct pdcch_dl_t {
srsran_dci_cfg_nr_t dci_cfg = {};
srsran_dci_dl_nr_t dci = {};
};
static const size_t MAX_NOF_PDCCH_DL_GRANTS = 16;
using pdcch_dl_list_t = srsran::bounded_vector<pdcch_dl_t, MAX_NOF_PDCCH_DL_GRANTS>;
class coreset_region
{
public:
coreset_region(uint32_t bwp_id_,
uint32_t slot_idx,
uint32_t nof_td_symbols,
uint32_t nof_freq_resources,
pdcch_dl_list_t& pdcch_list);
void reset();
/**
* Allocates DCI space in PDCCH, avoiding in the process collisions with other users
* @param pdcch_grant_type_t allocation type (e.g. DL data, UL data, SIB)
* @param aggr_idx Aggregation level index (0..4)
* @param user UE object or null in case of broadcast/RAR/paging allocation
* @return if the allocation was successful
*/
bool alloc_dci(pdcch_grant_type_t alloc_type, uint32_t aggr_idx, uint32_t coreset_id, slot_ue* user = nullptr);
void rem_last_dci();
uint32_t get_td_symbols() const { return nof_symbols; }
uint32_t get_freq_resources() const { return nof_freq_res; }
uint32_t nof_cces() const { return nof_freq_res * nof_symbols; }
size_t nof_allocs() const { return dfs_tree.size(); }
private:
uint32_t bwp_id;
uint32_t slot_idx;
uint32_t nof_symbols;
uint32_t nof_freq_res;
// List of PDCCH grants
struct alloc_record {
uint32_t coreset_id;
uint32_t aggr_idx;
uint32_t idx;
pdcch_grant_type_t alloc_type;
slot_ue* ue;
};
srsran::bounded_vector<alloc_record, MAX_NOF_PDCCH_DL_GRANTS> dci_list;
pdcch_dl_list_t& pdcch_dl_list;
// DFS decision tree of PDCCH grants
struct tree_node {
uint16_t rnti = SRSRAN_INVALID_RNTI;
uint32_t record_idx = 0;
uint32_t dci_pos_idx = 0;
srsran_dci_location_t dci_pos = {0, 0};
/// Accumulation of all PDCCH masks for the current solution (DFS path)
coreset_bitmap total_mask, current_mask;
};
using alloc_tree_dfs_t = srsran::bounded_vector<tree_node, MAX_NOF_PDCCH_DL_GRANTS>;
alloc_tree_dfs_t dfs_tree, saved_dfs_tree;
srsran::span<const uint32_t> get_cce_loc_table(const alloc_record& record) const;
bool alloc_dfs_node(const alloc_record& record, uint32_t dci_idx);
bool get_next_dfs();
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_PDCCH_H

@ -13,104 +13,72 @@
#ifndef SRSRAN_SCHED_NR_RB_GRID_H #ifndef SRSRAN_SCHED_NR_RB_GRID_H
#define SRSRAN_SCHED_NR_RB_GRID_H #define SRSRAN_SCHED_NR_RB_GRID_H
#include "../sched_common.h"
#include "lib/include/srsran/adt/circular_array.h" #include "lib/include/srsran/adt/circular_array.h"
#include "sched_nr_interface.h" #include "sched_nr_interface.h"
#include "sched_nr_pdcch.h"
#include "sched_nr_ue.h" #include "sched_nr_ue.h"
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
using pdsch_bitmap = srsran::bounded_bitset<25, true>;
using pusch_bitmap = srsran::bounded_bitset<25, true>;
using pdsch_list = sched_nr_interface::pdsch_list; using pdsch_list = sched_nr_interface::pdsch_list;
using pusch_list = sched_nr_interface::pusch_list; using pusch_list = sched_nr_interface::pusch_list;
struct pdcch_t {};
struct pdsch_t {};
struct pusch_t {};
struct pucch_t {}; struct pucch_t {};
struct phy_slot_grid { const static size_t MAX_CORESET_PER_BWP = 3;
const sched_cell_params* cell_cfg = nullptr; using slot_coreset_list = srsran::bounded_vector<coreset_region, MAX_CORESET_PER_BWP>;
pdcchmask_t pdcch_tot_mask;
rbgmask_t pdsch_tot_mask; struct bwp_slot_grid {
rbgmask_t ul_tot_mask; pdcch_dl_list_t pdcch_dl_list;
slot_coreset_list coresets;
pdsch_bitmap dl_rbgs;
pdsch_list pdsch_grants; pdsch_list pdsch_grants;
pusch_bitmap ul_rbgs;
pusch_list pusch_grants; pusch_list pusch_grants;
srsran::bounded_vector<pucch_t, SCHED_NR_MAX_PDSCH_DATA> pucch_grants; srsran::bounded_vector<pucch_t, SCHED_NR_MAX_PDSCH_DATA> pucch_grants;
phy_slot_grid() = default; bwp_slot_grid() = default;
explicit phy_slot_grid(const sched_cell_params& cell_cfg_) : explicit bwp_slot_grid(const sched_cell_params& cell_params, uint32_t bwp_id_, uint32_t slot_idx_);
cell_cfg(&cell_cfg_), void reset();
pdcch_tot_mask(cell_cfg->cell_cfg.nof_rbg),
pdsch_tot_mask(cell_cfg->cell_cfg.nof_rbg),
ul_tot_mask(cell_cfg->cell_cfg.nof_rbg)
{}
void reset()
{
pdcch_tot_mask.reset();
pdsch_tot_mask.reset();
ul_tot_mask.reset();
pdsch_grants.clear();
pusch_grants.clear();
pucch_grants.clear();
}
};
using phy_cell_rb_grid = srsran::circular_array<phy_slot_grid, TTIMOD_SZ>;
struct slot_ue_grid {
phy_slot_grid* pdcch_slot;
phy_slot_grid* pdsch_slot;
phy_slot_grid* pusch_slot;
phy_slot_grid* pucch_slot;
pdcch_t* pdcch_alloc = nullptr;
pdsch_t* pdsch_alloc = nullptr;
pusch_t* pusch_alloc = nullptr;
pucch_t* pucch_alloc = nullptr;
slot_ue_grid(phy_slot_grid& pdcch_sl, phy_slot_grid& pdsch_sl, phy_slot_grid& pusch_sl, phy_slot_grid& pucch_sl) :
pdcch_slot(&pdcch_sl), pdsch_slot(&pdsch_sl), pusch_slot(&pusch_sl), pucch_slot(&pucch_sl)
{}
}; };
class rb_alloc_grid struct bwp_res_grid {
{ bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_);
public:
slot_ue_grid get_slot_ue_grid(tti_point pdcch_tti, uint8_t K0, uint8_t K1, uint8_t K2) bwp_slot_grid& operator[](tti_point tti) { return slots[tti.sf_idx()]; };
{ const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.sf_idx()]; };
phy_slot_grid& pdcch_slot = phy_grid[pdcch_tti.to_uint()]; uint32_t id() const { return bwp_id; }
phy_slot_grid& pdsch_slot = phy_grid[(pdcch_tti + K0).to_uint()];
phy_slot_grid& pucch_slot = phy_grid[(pdcch_tti + K0 + K1).to_uint()];
phy_slot_grid& pusch_slot = phy_grid[(pdcch_tti + K2).to_uint()];
return slot_ue_grid{pdcch_slot, pdsch_slot, pusch_slot, pucch_slot};
}
private: private:
phy_cell_rb_grid phy_grid; uint32_t bwp_id;
srsran::bounded_vector<bwp_slot_grid, TTIMOD_SZ> slots;
}; };
/// Error code of alloc attempt struct cell_res_grid {
enum class alloc_result { success, sch_collision, no_grant_space, no_rnti_opportunity }; const sched_cell_params* cell_cfg = nullptr;
inline const char* to_string(alloc_result res) srsran::bounded_vector<bwp_res_grid, SCHED_NR_MAX_BWP_PER_CELL> bwps;
{
return "";
}
class slot_sched explicit cell_res_grid(const sched_cell_params& cell_cfg);
};
class slot_bwp_sched
{ {
public: public:
explicit slot_sched(const sched_cell_params& cfg_, phy_cell_rb_grid& phy_grid_); explicit slot_bwp_sched(uint32_t bwp_id, cell_res_grid& phy_grid_);
void new_tti(tti_point tti_rx_);
void reset();
alloc_result alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask); alloc_result alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask);
alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask); alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask);
void generate_dcis();
const sched_cell_params& cfg; const sched_cell_params& cfg;
private: private:
srslog::basic_logger& logger; srslog::basic_logger& logger;
phy_cell_rb_grid& phy_grid; bwp_res_grid& bwp_grid;
tti_point tti_rx; tti_point tti_rx;
}; };

@ -27,6 +27,32 @@ namespace sched_nr_impl {
using ue_cfg_t = sched_nr_interface::ue_cfg_t; using ue_cfg_t = sched_nr_interface::ue_cfg_t;
using ue_cc_cfg_t = sched_nr_interface::ue_cc_cfg_t; using ue_cc_cfg_t = sched_nr_interface::ue_cc_cfg_t;
class ue_cfg_extended : public ue_cfg_t
{
public:
struct search_space_params {
srsran_search_space_t* cfg = nullptr;
};
struct coreset_params {
srsran_coreset_t* cfg = nullptr;
std::vector<search_space_params*> ss_list;
bwp_cce_pos_list cce_positions;
};
struct bwp_params {
std::vector<search_space_params> search_spaces;
std::vector<coreset_params> coresets;
};
struct cc_params {
srsran::bounded_vector<bwp_params, SCHED_NR_MAX_BWP_PER_CELL> bwps;
};
uint16_t rnti;
std::vector<cc_params> cc_params;
ue_cfg_extended() = default;
explicit ue_cfg_extended(uint16_t rnti, const ue_cfg_t& uecfg);
};
class ue_carrier; class ue_carrier;
class slot_ue class slot_ue
@ -44,11 +70,13 @@ public:
uint32_t cc = SCHED_NR_MAX_CARRIERS; uint32_t cc = SCHED_NR_MAX_CARRIERS;
// UE parameters common to all sectors // UE parameters common to all sectors
const ue_cfg_t* cfg = nullptr; const ue_cfg_extended* cfg = nullptr;
bool pending_sr; bool pending_sr;
// UE parameters that are sector specific // UE parameters that are sector specific
const ue_cc_cfg_t* cc_cfg = nullptr; const ue_cc_cfg_t* cc_cfg = nullptr;
uint32_t bwp_id;
tti_point pdcch_tti;
tti_point pdsch_tti; tti_point pdsch_tti;
tti_point pusch_tti; tti_point pusch_tti;
tti_point uci_tti; tti_point uci_tti;
@ -65,9 +93,8 @@ class ue_carrier
{ {
public: public:
ue_carrier(uint16_t rnti, uint32_t cc, const ue_cfg_t& cfg); ue_carrier(uint16_t rnti, uint32_t cc, const ue_cfg_t& cfg);
slot_ue try_reserve(tti_point pdcch_tti, const ue_cfg_t& cfg); slot_ue try_reserve(tti_point pdcch_tti, const ue_cfg_extended& cfg);
void push_feedback(srsran::move_callback<void(ue_carrier&)> callback); void push_feedback(srsran::move_callback<void(ue_carrier&)> callback);
void set_cfg(const ue_cfg_t& uecfg);
const uint16_t rnti; const uint16_t rnti;
const uint32_t cc; const uint32_t cc;
@ -101,10 +128,12 @@ public:
std::array<std::unique_ptr<ue_carrier>, SCHED_NR_MAX_CARRIERS> carriers; std::array<std::unique_ptr<ue_carrier>, SCHED_NR_MAX_CARRIERS> carriers;
private: private:
const uint16_t rnti;
bool pending_sr = false; bool pending_sr = false;
int current_idx = 0; int current_idx = 0;
std::array<ue_cfg_t, 4> ue_cfgs; std::array<ue_cfg_extended, 4> ue_cfgs;
}; };
using ue_map_t = srsran::static_circular_map<uint16_t, std::unique_ptr<ue>, SCHED_NR_MAX_USERS>; using ue_map_t = srsran::static_circular_map<uint16_t, std::unique_ptr<ue>, SCHED_NR_MAX_USERS>;

@ -32,8 +32,8 @@ using slot_res_t = sched_nr_interface::tti_request_t;
class slot_cc_worker class slot_cc_worker
{ {
public: public:
explicit slot_cc_worker(const sched_cell_params& cell_params, phy_cell_rb_grid& phy_grid) : explicit slot_cc_worker(const sched_cell_params& cell_params, cell_res_grid& phy_grid) :
cfg(cell_params), res_grid(cfg, phy_grid) cfg(cell_params), res_grid(0, phy_grid)
{} {}
void start(tti_point tti_rx_, ue_map_t& ue_db_); void start(tti_point tti_rx_, ue_map_t& ue_db_);
@ -48,7 +48,7 @@ private:
const sched_cell_params& cfg; const sched_cell_params& cfg;
tti_point tti_rx; tti_point tti_rx;
slot_sched res_grid; slot_bwp_sched res_grid;
srsran::static_circular_map<uint16_t, slot_ue, SCHED_NR_MAX_USERS> slot_ues; srsran::static_circular_map<uint16_t, slot_ue, SCHED_NR_MAX_USERS> slot_ues;
}; };
@ -78,7 +78,7 @@ private:
}; };
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_ctxts; std::vector<std::unique_ptr<slot_worker_ctxt> > slot_ctxts;
std::array<phy_cell_rb_grid, SCHED_NR_MAX_CARRIERS> phy_grid; srsran::bounded_vector<cell_res_grid, SCHED_NR_MAX_CARRIERS> cell_grid_list;
slot_worker_ctxt& get_sf(tti_point tti_rx); slot_worker_ctxt& get_sf(tti_point tti_rx);
}; };

@ -13,79 +13,49 @@
#ifndef SRSRAN_SCHED_COMMON_H #ifndef SRSRAN_SCHED_COMMON_H
#define SRSRAN_SCHED_COMMON_H #define SRSRAN_SCHED_COMMON_H
#include "srsran/adt/bounded_bitset.h" /**
#include "srsran/common/tti_point.h" * File used for all functions and types common to the LTE and NR schedulers
#include "srsran/interfaces/sched_interface.h" */
namespace srsenb { namespace srsenb {
/*********************** /// Error code of alloc attempt
* Constants enum class alloc_result {
**********************/ success,
sch_collision,
constexpr float tti_duration_ms = 1; no_cch_space,
constexpr uint32_t NOF_AGGR_LEVEL = 4; no_sch_space,
no_rnti_opportunity,
/*********************** invalid_grant_params,
* Helper Types invalid_coderate,
**********************/ no_grant_space,
other_cause
/// List of CCE start positions in PDCCH
using cce_position_list = srsran::bounded_vector<uint32_t, 6>;
/// Map {L} -> list of CCE positions
using cce_cfi_position_table = std::array<cce_position_list, NOF_AGGR_LEVEL>;
/// Map {cfi, L} -> list of CCE positions
using cce_sf_position_table = std::array<std::array<cce_position_list, NOF_AGGR_LEVEL>, SRSRAN_NOF_CFI>;
/// Map {sf, cfi, L} -> list of CCE positions
using cce_frame_position_table = std::array<cce_sf_position_table, SRSRAN_NOF_SF_X_FRAME>;
/// structs to bundle together all the sched arguments, and share them with all the sched sub-components
class sched_cell_params_t
{
struct regs_deleter {
void operator()(srsran_regs_t* p);
};
public:
bool set_cfg(uint32_t enb_cc_idx_,
const sched_interface::cell_cfg_t& cfg_,
const sched_interface::sched_args_t& sched_args);
// convenience getters
uint32_t nof_prbs_to_rbgs(uint32_t nof_prbs) const { return srsran::ceil_div(nof_prbs, P); }
uint32_t nof_prb() const { return cfg.cell.nof_prb; }
uint32_t get_dl_lb_nof_re(tti_point tti_tx_dl, uint32_t nof_prbs_alloc) const;
uint32_t get_dl_nof_res(srsran::tti_point tti_tx_dl, const srsran_dci_dl_t& dci, uint32_t cfi) const;
uint32_t enb_cc_idx = 0;
sched_interface::cell_cfg_t cfg = {};
srsran_pucch_cfg_t pucch_cfg_common = {};
const sched_interface::sched_args_t* sched_cfg = nullptr;
std::unique_ptr<srsran_regs_t, regs_deleter> regs;
cce_sf_position_table common_locations = {};
cce_frame_position_table rar_locations = {};
std::array<uint32_t, SRSRAN_NOF_CFI> nof_cce_table = {}; ///< map cfix -> nof cces in PDCCH
uint32_t P = 0;
uint32_t nof_rbgs = 0;
using dl_nof_re_table = srsran::bounded_vector<
std::array<std::array<std::array<uint32_t, SRSRAN_NOF_CFI>, SRSRAN_NOF_SLOTS_PER_SF>, SRSRAN_NOF_SF_X_FRAME>,
SRSRAN_MAX_PRB>;
using dl_lb_nof_re_table = std::array<srsran::bounded_vector<uint32_t, SRSRAN_MAX_PRB>, SRSRAN_NOF_SF_X_FRAME>;
/// Table of nof REs
dl_nof_re_table nof_re_table;
/// Cached computation of Lower bound of nof REs
dl_lb_nof_re_table nof_re_lb_table;
}; };
inline const char* to_string(alloc_result result)
/// Type of Allocation stored in PDSCH/PUSCH
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
inline bool is_dl_ctrl_alloc(alloc_type_t a)
{ {
return a == alloc_type_t::DL_BC or a == alloc_type_t::DL_PCCH or a == alloc_type_t::DL_RAR; switch (result) {
case alloc_result::success:
return "success";
case alloc_result::sch_collision:
return "Collision with existing SCH allocations";
case alloc_result::other_cause:
return "error";
case alloc_result::no_cch_space:
return "No space available in PUCCH or PDCCH";
case alloc_result::no_sch_space:
return "Requested number of PRBs not available";
case alloc_result::no_rnti_opportunity:
return "rnti cannot be allocated (e.g. already allocated, no data, meas gap collision, carrier inactive, etc.)";
case alloc_result::invalid_grant_params:
return "invalid grant arguments (e.g. invalid prb mask)";
case alloc_result::invalid_coderate:
return "Effective coderate exceeds threshold";
case alloc_result::no_grant_space:
return "Max number of allocations reached";
default:
break;
}
return "unknown error";
} }
} // namespace srsenb } // namespace srsenb

@ -14,6 +14,7 @@
#define SRSRAN_SCHED_GRID_H #define SRSRAN_SCHED_GRID_H
#include "lib/include/srsran/interfaces/sched_interface.h" #include "lib/include/srsran/interfaces/sched_interface.h"
#include "sched_common.h"
#include "sched_phy_ch/sched_result.h" #include "sched_phy_ch/sched_result.h"
#include "sched_phy_ch/sf_cch_allocator.h" #include "sched_phy_ch/sf_cch_allocator.h"
#include "sched_ue.h" #include "sched_ue.h"
@ -24,20 +25,6 @@
namespace srsenb { namespace srsenb {
/// Error code of alloc attempt
enum class alloc_result {
success,
sch_collision,
no_cch_space,
no_sch_space,
no_rnti_opportunity,
invalid_grant_params,
invalid_coderate,
no_grant_space,
other_cause
};
const char* to_string(alloc_result res);
struct sf_sched_result { struct sf_sched_result {
tti_point tti_rx; tti_point tti_rx;
std::vector<cc_sched_result> enb_cc_list; std::vector<cc_sched_result> enb_cc_list;

@ -13,7 +13,7 @@
#ifndef SRSRAN_SCHED_HELPERS_H #ifndef SRSRAN_SCHED_HELPERS_H
#define SRSRAN_SCHED_HELPERS_H #define SRSRAN_SCHED_HELPERS_H
#include "srsenb/hdr/stack/mac/sched_common.h" #include "srsenb/hdr/stack/mac/sched_lte_common.h"
#include "srsran/interfaces/sched_interface.h" #include "srsran/interfaces/sched_interface.h"
#include "srsran/srslog/srslog.h" #include "srsran/srslog/srslog.h"

@ -0,0 +1,93 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_LTE_COMMON_H
#define SRSRAN_SCHED_LTE_COMMON_H
#include "srsran/adt/bounded_bitset.h"
#include "srsran/common/tti_point.h"
#include "srsran/interfaces/sched_interface.h"
namespace srsenb {
/***********************
* Constants
**********************/
constexpr float tti_duration_ms = 1;
constexpr uint32_t NOF_AGGR_LEVEL = 4;
/***********************
* Helper Types
**********************/
/// List of CCE start positions in PDCCH
using cce_position_list = srsran::bounded_vector<uint32_t, 6>;
/// Map {L} -> list of CCE positions
using cce_cfi_position_table = std::array<cce_position_list, NOF_AGGR_LEVEL>;
/// Map {cfi, L} -> list of CCE positions
using cce_sf_position_table = std::array<std::array<cce_position_list, NOF_AGGR_LEVEL>, SRSRAN_NOF_CFI>;
/// Map {sf, cfi, L} -> list of CCE positions
using cce_frame_position_table = std::array<cce_sf_position_table, SRSRAN_NOF_SF_X_FRAME>;
/// structs to bundle together all the sched arguments, and share them with all the sched sub-components
class sched_cell_params_t
{
struct regs_deleter {
void operator()(srsran_regs_t* p);
};
public:
bool set_cfg(uint32_t enb_cc_idx_,
const sched_interface::cell_cfg_t& cfg_,
const sched_interface::sched_args_t& sched_args);
// convenience getters
uint32_t nof_prbs_to_rbgs(uint32_t nof_prbs) const { return srsran::ceil_div(nof_prbs, P); }
uint32_t nof_prb() const { return cfg.cell.nof_prb; }
uint32_t get_dl_lb_nof_re(tti_point tti_tx_dl, uint32_t nof_prbs_alloc) const;
uint32_t get_dl_nof_res(srsran::tti_point tti_tx_dl, const srsran_dci_dl_t& dci, uint32_t cfi) const;
uint32_t enb_cc_idx = 0;
sched_interface::cell_cfg_t cfg = {};
srsran_pucch_cfg_t pucch_cfg_common = {};
const sched_interface::sched_args_t* sched_cfg = nullptr;
std::unique_ptr<srsran_regs_t, regs_deleter> regs;
cce_sf_position_table common_locations = {};
cce_frame_position_table rar_locations = {};
std::array<uint32_t, SRSRAN_NOF_CFI> nof_cce_table = {}; ///< map cfix -> nof cces in PDCCH
uint32_t P = 0;
uint32_t nof_rbgs = 0;
using dl_nof_re_table = srsran::bounded_vector<
std::array<std::array<std::array<uint32_t, SRSRAN_NOF_CFI>, SRSRAN_NOF_SLOTS_PER_SF>, SRSRAN_NOF_SF_X_FRAME>,
SRSRAN_MAX_PRB>;
using dl_lb_nof_re_table = std::array<srsran::bounded_vector<uint32_t, SRSRAN_MAX_PRB>, SRSRAN_NOF_SF_X_FRAME>;
/// Table of nof REs
dl_nof_re_table nof_re_table;
/// Cached computation of Lower bound of nof REs
dl_lb_nof_re_table nof_re_lb_table;
};
/// Type of Allocation stored in PDSCH/PUSCH
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
inline bool is_dl_ctrl_alloc(alloc_type_t a)
{
return a == alloc_type_t::DL_BC or a == alloc_type_t::DL_PCCH or a == alloc_type_t::DL_RAR;
}
} // namespace srsenb
#endif // SRSRAN_SCHED_LTE_COMMON_H

@ -13,7 +13,7 @@
#ifndef SRSRAN_SCHED_DCI_H #ifndef SRSRAN_SCHED_DCI_H
#define SRSRAN_SCHED_DCI_H #define SRSRAN_SCHED_DCI_H
#include "../sched_common.h" #include "../sched_lte_common.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h"
#include "srsran/adt/bounded_vector.h" #include "srsran/adt/bounded_vector.h"

@ -13,7 +13,7 @@
#ifndef SRSRAN_SCHED_RESULT_H #ifndef SRSRAN_SCHED_RESULT_H
#define SRSRAN_SCHED_RESULT_H #define SRSRAN_SCHED_RESULT_H
#include "../sched_common.h" #include "../sched_lte_common.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h"
namespace srsenb { namespace srsenb {

@ -10,7 +10,7 @@
* *
*/ */
#include "../sched_common.h" #include "../sched_lte_common.h"
#include "sched_result.h" #include "sched_result.h"
#ifndef SRSRAN_PDCCH_SCHED_H #ifndef SRSRAN_PDCCH_SCHED_H

@ -13,7 +13,7 @@
#ifndef SRSENB_SCHEDULER_UE_H #ifndef SRSENB_SCHEDULER_UE_H
#define SRSENB_SCHEDULER_UE_H #define SRSENB_SCHEDULER_UE_H
#include "sched_common.h" #include "sched_lte_common.h"
#include "sched_ue_ctrl/sched_lch.h" #include "sched_ue_ctrl/sched_lch.h"
#include "sched_ue_ctrl/sched_ue_cell.h" #include "sched_ue_ctrl/sched_ue_cell.h"
#include "sched_ue_ctrl/tpc.h" #include "sched_ue_ctrl/tpc.h"

@ -13,8 +13,8 @@
#ifndef SRSRAN_SCHED_DL_CQI_H #ifndef SRSRAN_SCHED_DL_CQI_H
#define SRSRAN_SCHED_DL_CQI_H #define SRSRAN_SCHED_DL_CQI_H
#include "srsenb/hdr/stack/mac/sched_common.h"
#include "srsenb/hdr/stack/mac/sched_helpers.h" #include "srsenb/hdr/stack/mac/sched_helpers.h"
#include "srsenb/hdr/stack/mac/sched_lte_common.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h"
#include "srsran/adt/accumulators.h" #include "srsran/adt/accumulators.h"
#include "srsran/common/common_lte.h" #include "srsran/common/common_lte.h"

@ -13,7 +13,7 @@
#ifndef SRSRAN_SCHED_UE_CELL_H #ifndef SRSRAN_SCHED_UE_CELL_H
#define SRSRAN_SCHED_UE_CELL_H #define SRSRAN_SCHED_UE_CELL_H
#include "../sched_common.h" #include "../sched_lte_common.h"
#include "sched_dl_cqi.h" #include "sched_dl_cqi.h"
#include "sched_harq.h" #include "sched_harq.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h"

@ -6,6 +6,6 @@
# the distribution. # the distribution.
# #
set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc sched_nr_rb_grid.cc sched_nr_harq.cc) set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc sched_nr_rb_grid.cc sched_nr_harq.cc sched_nr_pdcch.cc sched_nr_common.cc)
add_library(srsgnb_mac STATIC ${SOURCES}) add_library(srsgnb_mac STATIC ${SOURCES})

@ -21,6 +21,8 @@ using sched_nr_impl::ue;
using sched_nr_impl::ue_carrier; using sched_nr_impl::ue_carrier;
using sched_nr_impl::ue_map_t; using sched_nr_impl::ue_map_t;
static int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t& uecfg);
class ue_event_manager class ue_event_manager
{ {
using callback_t = srsran::move_callback<void()>; using callback_t = srsran::move_callback<void()>;
@ -96,6 +98,7 @@ int sched_nr::cell_cfg(srsran::const_span<cell_cfg_t> cell_list)
void sched_nr::ue_cfg(uint16_t rnti, const ue_cfg_t& uecfg) void sched_nr::ue_cfg(uint16_t rnti, const ue_cfg_t& uecfg)
{ {
srsran_assert(assert_ue_cfg_valid(rnti, uecfg) == SRSRAN_SUCCESS, "Invalid UE configuration");
pending_events->push_event([this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); }); pending_events->push_event([this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); });
} }
@ -153,4 +156,16 @@ void sched_nr::ul_sr_info(tti_point tti_rx, uint16_t rnti)
}); });
} }
int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t& uecfg)
{
const srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC");
if (std::count(&uecfg.phy_cfg.pdcch.coreset_present[0],
&uecfg.phy_cfg.pdcch.coreset_present[SRSRAN_UE_DL_NR_MAX_NOF_CORESET],
true) == 0) {
logger.warning("Provided rnti=0x%x configuration does not contain any coreset", rnti);
return SRSRAN_ERROR;
}
return SRSRAN_SUCCESS;
}
} // namespace srsenb } // namespace srsenb

@ -0,0 +1,43 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_common.h"
namespace srsenb {
namespace sched_nr_impl {
sched_cell_params::sched_cell_params(uint32_t cc_, const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_) :
cc(cc_), cell_cfg(cell), sched_cfg(sched_cfg_)
{}
sched_params::sched_params(const sched_cfg_t& sched_cfg_) : sched_cfg(sched_cfg_) {}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void get_dci_locs(const srsran_coreset_t& coreset,
const srsran_search_space_t& search_space,
uint16_t rnti,
bwp_cce_pos_list& cce_locs)
{
for (uint32_t sl = 0; sl < SRSRAN_NOF_SF_X_FRAME; ++sl) {
for (uint32_t agg_idx = 0; agg_idx < MAX_NOF_AGGR_LEVELS; ++agg_idx) {
pdcch_cce_pos_list pdcch_locs;
cce_locs[sl][agg_idx].resize(pdcch_locs.capacity());
uint32_t n =
srsran_pdcch_nr_locations_coreset(&coreset, &search_space, rnti, agg_idx, sl, cce_locs[sl][agg_idx].data());
cce_locs[sl][agg_idx].resize(n);
}
}
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -0,0 +1,167 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h"
namespace srsenb {
namespace sched_nr_impl {
coreset_region::coreset_region(uint32_t bwp_id_,
uint32_t slot_idx_,
uint32_t nof_td_symbols,
uint32_t nof_freq_resources,
pdcch_dl_list_t& pdcch_list_) :
bwp_id(bwp_id_),
slot_idx(slot_idx_),
nof_symbols(nof_td_symbols),
nof_freq_res(nof_freq_resources),
pdcch_dl_list(pdcch_list_)
{
srsran_assert(nof_td_symbols <= SRSRAN_CORESET_DURATION_MAX,
"Possible number of time-domain OFDM symbols in CORESET must be within {1,2,3}");
srsran_assert(nof_freq_resources <= SRSRAN_CORESET_FREQ_DOMAIN_RES_SIZE,
"Provided number of CORESET freq domain resources=%d is too high",
nof_freq_resources);
}
void coreset_region::reset()
{
dfs_tree.clear();
saved_dfs_tree.clear();
dci_list.clear();
pdcch_dl_list.clear();
}
bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type, uint32_t aggr_idx, uint32_t coreset_id, slot_ue* user)
{
srsran_assert(aggr_idx <= 4, "Invalid DCI aggregation level=%d", 1U << aggr_idx);
srsran_assert((user == nullptr) xor
(alloc_type == pdcch_grant_type_t::dl_data or alloc_type == pdcch_grant_type_t::ul_data),
"UE should be only provided for DL or UL data allocations");
saved_dfs_tree.clear();
alloc_record record;
record.ue = user;
record.aggr_idx = aggr_idx;
record.alloc_type = alloc_type;
record.idx = pdcch_dl_list.size();
record.coreset_id = coreset_id;
pdcch_dl_list.emplace_back();
// Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI
// positions
do {
bool success = alloc_dfs_node(record, 0);
if (success) {
// DCI record allocation successful
dci_list.push_back(record);
return true;
}
if (dfs_tree.empty()) {
saved_dfs_tree = dfs_tree;
}
} while (get_next_dfs());
// Revert steps to initial state, before dci record allocation was attempted
dfs_tree = saved_dfs_tree;
pdcch_dl_list.pop_back();
return false;
}
void coreset_region::rem_last_dci()
{
srsran_assert(not dci_list.empty(), "%s called when no PDCCH have yet been allocated", __FUNCTION__);
// Remove DCI record
dfs_tree.pop_back();
dci_list.pop_back();
pdcch_dl_list.pop_back();
}
bool coreset_region::get_next_dfs()
{
do {
if (dfs_tree.empty()) {
// If we reach root, the allocation failed
return false;
}
// Attempt to re-add last tree node, but with a higher node child index
uint32_t start_child_idx = dfs_tree.back().dci_pos_idx + 1;
dfs_tree.pop_back();
while (dfs_tree.size() < dci_list.size() and alloc_dfs_node(dci_list[dfs_tree.size()], start_child_idx)) {
start_child_idx = 0;
}
} while (dfs_tree.size() < dci_list.size());
// Finished computation of next DFS node
return true;
}
bool coreset_region::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx)
{
alloc_tree_dfs_t& alloc_dfs = dfs_tree;
// Get DCI Location Table
auto cce_locs = get_cce_loc_table(record);
if (start_dci_idx >= cce_locs.size()) {
return false;
}
tree_node node;
node.dci_pos_idx = start_dci_idx;
node.dci_pos.L = record.aggr_idx;
node.rnti = record.ue != nullptr ? record.ue->rnti : SRSRAN_INVALID_RNTI;
node.current_mask.resize(nof_cces());
// get cumulative pdcch bitmap
if (not alloc_dfs.empty()) {
node.total_mask = alloc_dfs.back().total_mask;
} else {
node.total_mask.resize(nof_cces());
}
for (; node.dci_pos_idx < cce_locs.size(); ++node.dci_pos_idx) {
node.dci_pos.ncce = cce_locs[node.dci_pos_idx];
node.current_mask.reset();
node.current_mask.fill(node.dci_pos.ncce, node.dci_pos.ncce + (1U << record.aggr_idx));
if ((node.total_mask & node.current_mask).any()) {
// there is a PDCCH collision. Try another CCE position
continue;
}
// Allocation successful
node.total_mask |= node.current_mask;
alloc_dfs.push_back(node);
pdcch_dl_t& pdcch_dl = pdcch_dl_list[record.idx];
pdcch_dl.dci.ctx.location = node.dci_pos;
return true;
}
return false;
}
srsran::span<const uint32_t> coreset_region::get_cce_loc_table(const alloc_record& record) const
{
switch (record.alloc_type) {
case pdcch_grant_type_t::dl_data:
return record.ue->cfg->cc_params[record.ue->cc]
.bwps[bwp_id]
.coresets[record.coreset_id]
.cce_positions[slot_idx][record.aggr_idx];
default:
break;
}
return {};
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -18,30 +18,66 @@ namespace sched_nr_impl {
using pdsch_grant = sched_nr_interface::pdsch_grant; using pdsch_grant = sched_nr_interface::pdsch_grant;
using pusch_grant = sched_nr_interface::pusch_grant; using pusch_grant = sched_nr_interface::pusch_grant;
slot_sched::slot_sched(const sched_cell_params& cfg_, phy_cell_rb_grid& phy_grid_) : bwp_slot_grid::bwp_slot_grid(const sched_cell_params& cell_params, uint32_t bwp_id_, uint32_t slot_idx_) :
logger(srslog::fetch_basic_logger("MAC")), cfg(cfg_), phy_grid(phy_grid_) dl_rbgs(cell_params.cell_cfg.nof_rbg), ul_rbgs(cell_params.cell_cfg.nof_rbg)
{} {
coresets.emplace_back(bwp_id_, slot_idx_, 1, cell_params.cell_cfg.bwps[bwp_id_].rb_width / 6, pdcch_dl_list);
}
void slot_sched::new_tti(tti_point tti_rx_) void bwp_slot_grid::reset()
{ {
tti_rx = tti_rx_; for (auto& coreset : coresets) {
coreset.reset();
}
dl_rbgs.reset();
ul_rbgs.reset();
pdsch_grants.clear();
pdcch_dl_list.clear();
pusch_grants.clear();
pucch_grants.clear();
} }
alloc_result slot_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask) bwp_res_grid::bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_) : bwp_id(bwp_id_)
{
for (uint32_t sl = 0; sl < SCHED_NR_NOF_SUBFRAMES; ++sl) {
slots.emplace_back(cell_cfg_, bwp_id, sl);
}
}
cell_res_grid::cell_res_grid(const sched_cell_params& cell_cfg_) : cell_cfg(&cell_cfg_)
{
for (uint32_t bwp_id = 0; bwp_id < cell_cfg->cell_cfg.bwps.size(); ++bwp_id) {
bwps.emplace_back(cell_cfg_, bwp_id);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
slot_bwp_sched::slot_bwp_sched(uint32_t bwp_id, cell_res_grid& phy_grid_) :
logger(srslog::fetch_basic_logger("MAC")), cfg(*phy_grid_.cell_cfg), bwp_grid(phy_grid_.bwps[bwp_id])
{}
alloc_result slot_bwp_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
{ {
if (ue.h_dl == nullptr) { if (ue.h_dl == nullptr) {
logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue.rnti); logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity; return alloc_result::no_rnti_opportunity;
} }
pdsch_list& pdsch_grants = phy_grid[ue.pdsch_tti.to_uint()].pdsch_grants; pdsch_list& pdsch_grants = bwp_grid[ue.pdsch_tti].pdsch_grants;
if (pdsch_grants.full()) { if (pdsch_grants.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached"); logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space; return alloc_result::no_grant_space;
} }
rbgmask_t& pdsch_mask = phy_grid[ue.pdsch_tti.to_uint()].pdsch_tot_mask; rbgmask_t& pdsch_mask = bwp_grid[ue.pdsch_tti].dl_rbgs;
if ((pdsch_mask & dl_mask).any()) { if ((pdsch_mask & dl_mask).any()) {
return alloc_result::sch_collision; return alloc_result::sch_collision;
} }
const uint32_t aggr_idx = 3, coreset_id = 0;
if (not bwp_grid[ue.pdcch_tti].coresets[coreset_id].alloc_dci(
pdcch_grant_type_t::dl_data, aggr_idx, coreset_id, &ue)) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
int mcs = -1, tbs = -1; int mcs = -1, tbs = -1;
if (ue.h_dl->empty()) { if (ue.h_dl->empty()) {
@ -65,18 +101,18 @@ alloc_result slot_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
return alloc_result::success; return alloc_result::success;
} }
alloc_result slot_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask) alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
{ {
if (ue.h_ul == nullptr) { if (ue.h_ul == nullptr) {
logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti); logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity; return alloc_result::no_rnti_opportunity;
} }
pusch_list& pusch_grants = phy_grid[ue.pusch_tti.to_uint()].pusch_grants; pusch_list& pusch_grants = bwp_grid[ue.pusch_tti].pusch_grants;
if (pusch_grants.full()) { if (pusch_grants.full()) {
logger.warning("SCHED: Maximum number of UL allocations reached"); logger.warning("SCHED: Maximum number of UL allocations reached");
return alloc_result::no_grant_space; return alloc_result::no_grant_space;
} }
rbgmask_t& pusch_mask = phy_grid[ue.pusch_tti.to_uint()].ul_tot_mask; rbgmask_t& pusch_mask = bwp_grid[ue.pusch_tti].ul_rbgs;
if ((pusch_mask & ul_mask).any()) { if ((pusch_mask & ul_mask).any()) {
return alloc_result::sch_collision; return alloc_result::sch_collision;
} }
@ -104,7 +140,5 @@ alloc_result slot_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
return alloc_result::success; return alloc_result::success;
} }
void slot_sched::generate_dcis() {}
} // namespace sched_nr_impl } // namespace sched_nr_impl
} // namespace srsenb } // namespace srsenb

@ -11,10 +11,41 @@
*/ */
#include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h" #include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h"
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
ue_cfg_extended::ue_cfg_extended(uint16_t rnti_, const ue_cfg_t& uecfg) : ue_cfg_t(uecfg), rnti(rnti_)
{
cc_params.resize(carriers.size());
for (uint32_t cc = 0; cc < cc_params.size(); ++cc) {
cc_params[cc].bwps.resize(1);
auto& bwp = cc_params[cc].bwps[0];
for (uint32_t ssid = 0; ssid < SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE; ++ssid) {
if (phy_cfg.pdcch.search_space_present[ssid]) {
bwp.search_spaces.emplace_back();
bwp.search_spaces.back().cfg = &phy_cfg.pdcch.search_space[ssid];
}
}
for (uint32_t csid = 0; csid < SRSRAN_UE_DL_NR_MAX_NOF_CORESET; ++csid) {
if (phy_cfg.pdcch.coreset_present[csid]) {
bwp.coresets.emplace_back();
auto& coreset = bwp.coresets.back();
coreset.cfg = &phy_cfg.pdcch.coreset[csid];
for (auto& ss : bwp.search_spaces) {
if (ss.cfg->coreset_id == csid) {
coreset.ss_list.push_back(&ss);
get_dci_locs(*coreset.cfg, *coreset.ss_list.back()->cfg, rnti, coreset.cce_positions);
}
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
slot_ue::slot_ue(resource_guard::token ue_token_, uint16_t rnti_, tti_point tti_rx_, uint32_t cc_) : slot_ue::slot_ue(resource_guard::token ue_token_, uint16_t rnti_, tti_point tti_rx_, uint32_t cc_) :
ue_token(std::move(ue_token_)), rnti(rnti_), tti_rx(tti_rx_), cc(cc_) ue_token(std::move(ue_token_)), rnti(rnti_), tti_rx(tti_rx_), cc(cc_)
{} {}
@ -23,26 +54,19 @@ slot_ue::slot_ue(resource_guard::token ue_token_, uint16_t rnti_, tti_point tti_
ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const ue_cfg_t& uecfg_) : rnti(rnti_), cc(cc_), cfg(&uecfg_) {} ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const ue_cfg_t& uecfg_) : rnti(rnti_), cc(cc_), cfg(&uecfg_) {}
void ue_carrier::set_cfg(const ue_cfg_t& uecfg)
{
cfg = &uecfg;
}
void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback) void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback)
{ {
pending_feedback.push_back(std::move(callback)); pending_feedback.push_back(std::move(callback));
} }
slot_ue ue_carrier::try_reserve(tti_point tti_rx, const ue_cfg_t& uecfg_) slot_ue ue_carrier::try_reserve(tti_point tti_rx, const ue_cfg_extended& uecfg_)
{ {
slot_ue sfu(busy, rnti, tti_rx, cc); slot_ue sfu(busy, rnti, tti_rx, cc);
if (sfu.empty()) { if (sfu.empty()) {
return sfu; return sfu;
} }
// successfully acquired. Process any CC-specific pending feedback // successfully acquired. Process any CC-specific pending feedback
if (cfg != &uecfg_) { cfg = &uecfg_;
set_cfg(uecfg_);
}
while (not pending_feedback.empty()) { while (not pending_feedback.empty()) {
pending_feedback.front()(*this); pending_feedback.front()(*this);
pending_feedback.pop_front(); pending_feedback.pop_front();
@ -61,8 +85,9 @@ slot_ue ue_carrier::try_reserve(tti_point tti_rx, const ue_cfg_t& uecfg_)
// copy cc-specific parameters and find available HARQs // copy cc-specific parameters and find available HARQs
sfu.cc_cfg = &uecfg_.carriers[cc]; sfu.cc_cfg = &uecfg_.carriers[cc];
sfu.pdsch_tti = tti_rx + TX_ENB_DELAY + sfu.cc_cfg->pdsch_res_list[0].k0; sfu.pdcch_tti = tti_rx + TX_ENB_DELAY;
sfu.pusch_tti = tti_rx + TX_ENB_DELAY + sfu.cc_cfg->pusch_res_list[0].k2; sfu.pdsch_tti = sfu.pdcch_tti + sfu.cc_cfg->pdsch_res_list[0].k0;
sfu.pusch_tti = sfu.pdcch_tti + sfu.cc_cfg->pusch_res_list[0].k2;
sfu.uci_tti = sfu.pdsch_tti + sfu.cc_cfg->pdsch_res_list[0].k1; sfu.uci_tti = sfu.pdsch_tti + sfu.cc_cfg->pdsch_res_list[0].k1;
sfu.dl_cqi = dl_cqi; sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi; sfu.ul_cqi = ul_cqi;
@ -85,9 +110,9 @@ slot_ue ue_carrier::try_reserve(tti_point tti_rx, const ue_cfg_t& uecfg_)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue::ue(uint16_t rnti, const ue_cfg_t& cfg) ue::ue(uint16_t rnti_, const ue_cfg_t& cfg) : rnti(rnti_)
{ {
ue_cfgs[0] = cfg; ue_cfgs[0] = ue_cfg_extended(rnti, cfg);
for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) { for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) {
if (cfg.carriers[cc].active) { if (cfg.carriers[cc].active) {
carriers[cc].reset(new ue_carrier(rnti, cc, cfg)); carriers[cc].reset(new ue_carrier(rnti, cc, cfg));
@ -97,8 +122,8 @@ ue::ue(uint16_t rnti, const ue_cfg_t& cfg)
void ue::set_cfg(const ue_cfg_t& cfg) void ue::set_cfg(const ue_cfg_t& cfg)
{ {
current_idx = (current_idx + 1) % ue_cfgs.size(); current_idx = (current_idx + 1U) % ue_cfgs.size();
ue_cfgs[current_idx] = cfg; ue_cfgs[current_idx] = ue_cfg_extended(rnti, cfg);
} }
slot_ue ue::try_reserve(tti_point tti_rx, uint32_t cc) slot_ue ue::try_reserve(tti_point tti_rx, uint32_t cc)

@ -35,7 +35,6 @@ void slot_cc_worker::start(tti_point tti_rx_, ue_map_t& ue_db)
// UE acquired successfully for scheduling in this {tti, cc} // UE acquired successfully for scheduling in this {tti, cc}
} }
res_grid.new_tti(tti_rx_);
tti_rx = tti_rx_; tti_rx = tti_rx_;
} }
@ -51,9 +50,6 @@ void slot_cc_worker::run()
alloc_ul_ues(); alloc_ul_ues();
alloc_dl_ues(); alloc_dl_ues();
} }
// Select the winner PDCCH allocation combination, store all the scheduling results
res_grid.generate_dcis();
} }
void slot_cc_worker::end_tti() void slot_cc_worker::end_tti()
@ -100,9 +96,7 @@ void slot_cc_worker::alloc_ul_ues()
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) : cfg(cfg_), ue_db(ue_db_) sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) : cfg(cfg_), ue_db(ue_db_)
{ {
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) { for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
for (auto& slot_grid : phy_grid[cc]) { cell_grid_list.emplace_back(cfg.cells[cc]);
slot_grid = phy_slot_grid(cfg.cells[cc]);
}
} }
// Note: For now, we only allow parallelism at the sector level // Note: For now, we only allow parallelism at the sector level
@ -112,7 +106,7 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params&
sem_init(&slot_ctxts[i]->sf_sem, 0, 1); sem_init(&slot_ctxts[i]->sf_sem, 0, 1);
slot_ctxts[i]->workers.reserve(cfg.cells.size()); slot_ctxts[i]->workers.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) { for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
slot_ctxts[i]->workers.emplace_back(cfg.cells[cc], phy_grid[cc]); slot_ctxts[i]->workers.emplace_back(cfg.cells[cc], cell_grid_list[cc]);
} }
} }
} }
@ -164,18 +158,16 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& t
// Copy requested TTI DL and UL sched result // Copy requested TTI DL and UL sched result
tti_req.dl_res.pdsch_tti = tti_rx_ + TX_ENB_DELAY; tti_req.dl_res.pdsch_tti = tti_rx_ + TX_ENB_DELAY;
tti_req.dl_res.pdsch = phy_grid[cc][tti_req.dl_res.pdsch_tti.to_uint()].pdsch_grants; tti_req.dl_res.pdsch = cell_grid_list[cc].bwps[0][tti_req.dl_res.pdsch_tti].pdsch_grants;
cell_grid_list[cc].bwps[0][tti_req.dl_res.pdsch_tti].reset();
tti_req.ul_res.pusch_tti = tti_rx_ + TX_ENB_DELAY; tti_req.ul_res.pusch_tti = tti_rx_ + TX_ENB_DELAY;
tti_req.ul_res.pusch = phy_grid[cc][tti_req.ul_res.pusch_tti.to_uint()].pusch_grants; tti_req.ul_res.pusch = cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].pusch_grants;
cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].reset();
// decrement the number of active workers // decrement the number of active workers
int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1; int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)"); srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)");
if (rem_workers == 0) {
// Clear one slot of PHY grid, so it can be reused in the next TTIs
phy_grid[cc][sf_worker_ctxt.tti_rx.to_uint()].reset();
}
return rem_workers == 0; return rem_workers == 0;
} }

@ -16,33 +16,6 @@
namespace srsenb { namespace srsenb {
const char* to_string(alloc_result result)
{
switch (result) {
case alloc_result::success:
return "success";
case alloc_result::sch_collision:
return "Collision with existing SCH allocations";
case alloc_result::other_cause:
return "error";
case alloc_result::no_cch_space:
return "No space available in PUCCH or PDCCH";
case alloc_result::no_sch_space:
return "Requested number of PRBs not available";
case alloc_result::no_rnti_opportunity:
return "rnti cannot be allocated (e.g. already allocated, no data, meas gap collision, carrier inactive, etc.)";
case alloc_result::invalid_grant_params:
return "invalid grant arguments (e.g. invalid prb mask)";
case alloc_result::invalid_coderate:
return "Effective coderate exceeds threshold";
case alloc_result::no_grant_space:
return "Max number of allocations reached";
default:
break;
}
return "unknown error";
}
void sf_sched_result::new_tti(tti_point tti_rx_) void sf_sched_result::new_tti(tti_point tti_rx_)
{ {
assert(tti_rx != tti_rx_); assert(tti_rx != tti_rx_);

@ -11,8 +11,8 @@
*/ */
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h"
#include "srsenb/hdr/stack/mac/sched_common.h"
#include "srsenb/hdr/stack/mac/sched_helpers.h" #include "srsenb/hdr/stack/mac/sched_helpers.h"
#include "srsenb/hdr/stack/mac/sched_lte_common.h"
#include "srsran/common/string_helpers.h" #include "srsran/common/string_helpers.h"
#include <cmath> #include <cmath>

@ -93,8 +93,8 @@ int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_
{ {
TESTASSERT(ue_db.count(rnti) == 0); TESTASSERT(ue_db.count(rnti) == 0);
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_tti_rx, preamble_idx)));
sched_ptr->ue_cfg(rnti, ue_cfg_); sched_ptr->ue_cfg(rnti, ue_cfg_);
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_tti_rx, preamble_idx)));
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }

@ -17,6 +17,33 @@
namespace srsenb { namespace srsenb {
sched_nr_interface::ue_cfg_t get_default_ue_cfg(uint32_t nof_cc)
{
sched_nr_interface::ue_cfg_t uecfg{};
uecfg.carriers.resize(nof_cc);
for (uint32_t cc = 0; cc < nof_cc; ++cc) {
uecfg.carriers[cc].active = true;
}
uecfg.phy_cfg.pdcch.coreset_present[0] = true;
uecfg.phy_cfg.pdcch.coreset[0].id = 0;
for (uint32_t i = 0; i < 100 / 6; ++i) {
uecfg.phy_cfg.pdcch.coreset[0].freq_resources[i] = true;
}
uecfg.phy_cfg.pdcch.coreset[0].duration = 1;
uecfg.phy_cfg.pdcch.search_space_present[0] = true;
uecfg.phy_cfg.pdcch.search_space[0].id = 0;
uecfg.phy_cfg.pdcch.search_space[0].coreset_id = 0;
uecfg.phy_cfg.pdcch.search_space[0].duration = 1;
uecfg.phy_cfg.pdcch.search_space[0].type = srsran_search_space_type_common_0;
uecfg.phy_cfg.pdcch.search_space[0].nof_candidates[0] = 1;
uecfg.phy_cfg.pdcch.search_space[0].nof_candidates[1] = 1;
uecfg.phy_cfg.pdcch.search_space[0].nof_candidates[2] = 1;
uecfg.phy_cfg.pdcch.search_space[0].nof_candidates[3] = 1;
uecfg.phy_cfg.pdcch.search_space[0].nof_formats = 1;
uecfg.phy_cfg.pdcch.search_space[0].formats[0] = srsran_dci_format_nr_0_0;
return uecfg;
}
struct task_job_manager { struct task_job_manager {
std::mutex mutex; std::mutex mutex;
std::condition_variable cond_var; std::condition_variable cond_var;
@ -70,10 +97,7 @@ void sched_nr_cfg_serialized_test()
sched_nr_sim_base sched_tester(cfg, cells_cfg, "Serialized Test"); sched_nr_sim_base sched_tester(cfg, cells_cfg, "Serialized Test");
sched_nr_interface::ue_cfg_t uecfg; sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(2);
uecfg.carriers.resize(nof_sectors);
uecfg.carriers[0].active = true;
uecfg.carriers[1].active = true;
sched_tester.add_user(0x46, uecfg, 0); sched_tester.add_user(0x46, uecfg, 0);
@ -107,11 +131,7 @@ void sched_nr_cfg_parallel_cc_test()
sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test"); sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test");
sched_nr_interface::ue_cfg_t uecfg; sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(cells_cfg.size());
uecfg.carriers.resize(cells_cfg.size());
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
uecfg.carriers[cc].active = true;
}
sched_tester.add_user(0x46, uecfg, 0); sched_tester.add_user(0x46, uecfg, 0);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
@ -148,11 +168,7 @@ void sched_nr_cfg_parallel_sf_test()
sched_nr sched(cfg); sched_nr sched(cfg);
sched.cell_cfg(cells_cfg); sched.cell_cfg(cells_cfg);
sched_nr_interface::ue_cfg_t uecfg; sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(cells_cfg.size());
uecfg.carriers.resize(cells_cfg.size());
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
uecfg.carriers[cc].active = true;
}
sched.ue_cfg(0x46, uecfg); sched.ue_cfg(0x46, uecfg);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {

@ -13,7 +13,7 @@
#ifndef SRSRAN_SCHED_COMMON_TEST_SUITE_H #ifndef SRSRAN_SCHED_COMMON_TEST_SUITE_H
#define SRSRAN_SCHED_COMMON_TEST_SUITE_H #define SRSRAN_SCHED_COMMON_TEST_SUITE_H
#include "srsenb/hdr/stack/mac/sched_common.h" #include "srsenb/hdr/stack/mac/sched_lte_common.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_phy_resource.h"
#include "srsran/adt/span.h" #include "srsran/adt/span.h"
#include "srsran/common/tti_point.h" #include "srsran/common/tti_point.h"

@ -11,7 +11,7 @@
*/ */
#include "sched_test_utils.h" #include "sched_test_utils.h"
#include "srsenb/hdr/stack/mac/sched_common.h" #include "srsenb/hdr/stack/mac/sched_lte_common.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h"
#include "srsran/common/common_lte.h" #include "srsran/common/common_lte.h"
#include "srsran/common/test_common.h" #include "srsran/common/test_common.h"

Loading…
Cancel
Save