added dynamic CFI capability to the eNB scheduler

master
Francisco Paisana 5 years ago committed by Francisco Paisana
parent 12dfe1cc58
commit ede09cb9b2

@ -56,7 +56,8 @@ public:
int pdsch_max_mcs = 28;
int pusch_mcs = -1;
int pusch_max_mcs = 28;
int nof_ctrl_symbols = 3;
uint32_t min_nof_ctrl_symbols = 1;
uint32_t max_nof_ctrl_symbols = 3;
int max_aggr_level = 3;
};

@ -36,7 +36,7 @@ enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
//! Result of alloc attempt
struct alloc_outcome_t {
enum result_enum { SUCCESS, DCI_COLLISION, RB_COLLISION, ERROR };
enum result_enum { SUCCESS, DCI_COLLISION, RB_COLLISION, ERROR, NOF_RB_INVALID };
result_enum result = ERROR;
alloc_outcome_t() = default;
alloc_outcome_t(result_enum e) : result(e) {}
@ -59,6 +59,7 @@ struct sf_sched_result {
class pdcch_grid_t
{
public:
const static uint32_t MAX_CFI = 3;
struct alloc_t {
uint16_t rnti = 0;
srslte_dci_location_t dci_pos = {0, 0};
@ -68,28 +69,51 @@ public:
using alloc_result_t = std::vector<const alloc_t*>;
void init(const sched_cell_params_t& cell_params_);
void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi);
void new_tti(const tti_params_t& tti_params_);
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr);
bool set_cfi(uint32_t cfi);
void set_max_cfi(uint32_t cfi) { max_user_cfi = cfi; }
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const;
size_t nof_allocs() const { return nof_dci_allocs; }
size_t nof_alloc_combinations() const { return prev_end - prev_start; }
uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; }
size_t nof_allocs() const { return dci_record_list.size(); }
size_t nof_alloc_combinations() const { return get_alloc_tree().nof_leaves(); }
std::string result_to_string(bool verbose = false) const;
private:
const static uint32_t nof_cfis = 3;
using tree_node_t = std::pair<int, alloc_t>; ///< First represents the parent node idx, and second the alloc tree node
struct alloc_tree_t {
struct node_t {
int parent_idx;
alloc_t node;
node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {}
};
// state
size_t nof_cces;
std::vector<node_t> dci_alloc_tree;
size_t prev_start = 0, prev_end = 0;
explicit alloc_tree_t(size_t nof_cces_) : nof_cces(nof_cces_) {}
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
};
struct alloc_record_t {
sched_ue* user;
uint32_t aggr_idx;
alloc_type_t alloc_type;
};
const sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const;
void update_alloc_tree(int node_idx,
uint32_t aggr_idx,
sched_ue* user,
alloc_type_t alloc_type,
const sched_dci_cce_t* dci_locs);
const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; }
const sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
static bool add_tree_node_leaves(alloc_tree_t& tree,
int node_idx,
const alloc_record_t& dci_record,
const sched_dci_cce_t& dci_locs,
uint32_t tti_tx_dl);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
@ -97,10 +121,9 @@ private:
// tti vars
const tti_params_t* tti_params = nullptr;
uint32_t current_cfix = 0;
size_t prev_start = 0, prev_end = 0;
std::vector<tree_node_t> dci_alloc_tree;
size_t nof_dci_allocs = 0;
uint32_t current_cfix = 0, max_user_cfi = MAX_CFI;
std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index
std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
//! manages a subframe grid resources, namely CCE and DL/UL RB allocations
@ -113,7 +136,7 @@ public:
};
void init(const sched_cell_params_t& cell_params_);
void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi);
void new_tti(const tti_params_t& tti_params_);
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
@ -228,7 +251,7 @@ public:
// Control/Configuration Methods
sf_sched();
void init(const sched_cell_params_t& cell_params_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
void new_tti(uint32_t tti_rx_);
// DL alloc methods
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);

@ -125,6 +125,7 @@ public:
uint16_t get_rnti() const { return rnti; }
std::pair<bool, uint32_t> get_cell_index(uint32_t enb_cc_idx) const;
const sched_interface::ue_cfg_t& get_ue_cfg() const { return cfg; }
uint32_t get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits);
/*******************************************************
* Functions used by scheduler metric objects

@ -937,10 +937,10 @@ int set_derived_args(all_args_t* args_, rrc_cfg_t* rrc_cfg_, phy_cfg_t* phy_cfg_
rrc_cfg_->enable_mbsfn = args_->stack.embms.enable;
// Check number of control symbols
if (cell_cfg_.nof_prb < 50 && args_->stack.mac.sched.nof_ctrl_symbols != 3) {
args_->stack.mac.sched.nof_ctrl_symbols = 3;
if (cell_cfg_.nof_prb < 50 && args_->stack.mac.sched.max_nof_ctrl_symbols != 3) {
args_->stack.mac.sched.max_nof_ctrl_symbols = 3;
INFO("Setting number of control symbols to %d for %d PRB cell.\n",
args_->stack.mac.sched.nof_ctrl_symbols,
args_->stack.mac.sched.max_nof_ctrl_symbols,
cell_cfg_.nof_prb);
}

@ -129,7 +129,7 @@ void parse_args(all_args_t* args, int argc, char* argv[])
("scheduler.pusch_mcs", bpo::value<int>(&args->stack.mac.sched.pusch_mcs)->default_value(-1), "Optional fixed PUSCH MCS (ignores reported CQIs if specified)")
("scheduler.pusch_max_mcs", bpo::value<int>(&args->stack.mac.sched.pusch_max_mcs)->default_value(-1), "Optional PUSCH MCS limit")
("scheduler.max_aggr_level", bpo::value<int>(&args->stack.mac.sched.max_aggr_level)->default_value(-1), "Optional maximum aggregation level index (l=log2(L)) ")
("scheduler.nof_ctrl_symbols", bpo::value<int>(&args->stack.mac.sched.nof_ctrl_symbols)->default_value(3), "Number of control symbols")
("scheduler.nof_ctrl_symbols", bpo::value<uint32_t>(&args->stack.mac.sched.max_nof_ctrl_symbols)->default_value(3), "Number of control symbols")
/* Downlink Channel emulator section */
("channel.dl.enable", bpo::value<bool>(&args->phy.dl_channel_args.enable)->default_value(false), "Enable/Disable internal Downlink channel emulator")
@ -259,11 +259,11 @@ void parse_args(all_args_t* args, int argc, char* argv[])
}
if (args->stack.embms.enable) {
if (args->stack.mac.sched.nof_ctrl_symbols == 3) {
if (args->stack.mac.sched.max_nof_ctrl_symbols == 3) {
fprintf(stderr,
"nof_ctrl_symbols = %d, While using MBMS, please set number of control symbols to either 1 or 2, "
"depending on the length of the non-mbsfn region\n",
args->stack.mac.sched.nof_ctrl_symbols);
args->stack.mac.sched.max_nof_ctrl_symbols);
exit(1);
}
}

@ -88,14 +88,14 @@ bool sched_cell_params_t::set_cfg(uint32_t enb_cc_id
}
// Compute Common locations for DCI for each CFI
for (uint32_t cfi = 0; cfi < 3; cfi++) {
sched_utils::generate_cce_location(regs.get(), &common_locations[cfi], cfi + 1);
}
if (common_locations[sched_cfg->nof_ctrl_symbols - 1].nof_loc[2] == 0) {
Error("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n",
sched_cfg->nof_ctrl_symbols);
Console("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n",
sched_cfg->nof_ctrl_symbols);
for (uint32_t cfix = 0; cfix < pdcch_grid_t::MAX_CFI; cfix++) {
sched_utils::generate_cce_location(regs.get(), &common_locations[cfix], cfix + 1);
}
if (common_locations[sched_cfg->max_nof_ctrl_symbols - 1].nof_loc[2] == 0) {
Error("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.max_nof_ctrl_symbols in conf file).\n",
sched_cfg->max_nof_ctrl_symbols);
Console("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.max_nof_ctrl_symbols in conf file).\n",
sched_cfg->max_nof_ctrl_symbols);
return false;
}

@ -409,7 +409,7 @@ sf_sched* sched::carrier_sched::get_sf_sched(uint32_t tti_rx)
sf_sched* ret = &sf_scheds[tti_rx % sf_scheds.size()];
if (ret->get_tti_rx() != tti_rx) {
// start new TTI. Bind the struct where the result is going to be stored
ret->new_tti(tti_rx, cc_cfg->sched_cfg->nof_ctrl_symbols);
ret->new_tti(tti_rx);
}
return ret;
}

@ -42,6 +42,8 @@ const char* alloc_outcome_t::to_string() const
return "rb_collision";
case ERROR:
return "error";
case NOF_RB_INVALID:
return "invalid nof prbs";
}
return "unknown error";
}
@ -59,37 +61,51 @@ tti_params_t::tti_params_t(uint32_t tti_rx_) :
* PDCCH Allocation Methods
*******************************************************/
void pdcch_grid_t::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
void pdcch_grid_t::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
log_h = srslte::logmap::get("MAC ");
current_cfix = cc_cfg->sched_cfg->nof_ctrl_symbols - 1;
// init alloc trees
alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols);
for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) {
alloc_trees.emplace_back(cc_cfg->nof_cce_table[i]);
}
}
void pdcch_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi)
void pdcch_grid_t::new_tti(const tti_params_t& tti_params_)
{
tti_params = &tti_params_;
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
nof_dci_allocs = 0;
set_cfi(start_cfi);
// Reset back all CFIs
for (auto& t : alloc_trees) {
t.reset();
}
dci_record_list.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
max_user_cfi = cc_cfg->sched_cfg->max_nof_ctrl_symbols;
}
const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const
const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[current_cfix];
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[current_cfix];
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[current_cfix][tti_params->sf_idx_tx_dl];
return &cc_cfg->rar_locations[cfix][tti_params->sf_idx_tx_dl];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, current_cfix + 1, tti_params->sf_idx_tx_dl);
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, tti_params->sf_idx_tx_dl);
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, current_cfix + 1, tti_params->sf_idx_tx_dl);
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, tti_params->sf_idx_tx_dl);
default:
break;
}
@ -99,64 +115,85 @@ const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type,
bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user)
{
// TODO: Make the alloc tree update lazy
/* Get DCI Location Table */
const sched_dci_cce_t* dci_locs = get_cce_loc_table(alloc_type, user);
if (dci_locs == nullptr) {
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type};
// Try to allocate user in PDCCH for given CFI. If it fails, increment CFI
uint32_t first_cfi = get_cfi();
bool success = alloc_dci_record(record, get_cfi() - 1);
while (not success and get_cfi() < max_user_cfi) {
set_cfi(get_cfi() + 1);
success = alloc_dci_record(record, get_cfi() - 1);
}
if (not success) {
// DCI allocation failed. go back to original CFI
set_cfi(first_cfi);
return false;
}
/* Search for potential DCI positions */
if (prev_end > 0) {
for (size_t j = prev_start; j < prev_end; ++j) {
update_alloc_tree((int)j, aggr_idx, user, alloc_type, dci_locs);
}
} else {
update_alloc_tree(-1, aggr_idx, user, alloc_type, dci_locs);
// DCI record allocation successful
dci_record_list.push_back(record);
return true;
}
// if no pdcch space was available
if (dci_alloc_tree.size() == prev_end) {
return false;
bool pdcch_grid_t::alloc_dci_record(const alloc_record_t& record, uint32_t cfix)
{
bool ret = false;
auto& tree = alloc_trees[cfix];
// Get DCI Location Table
const sched_dci_cce_t* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix);
if (dci_locs == nullptr or dci_locs->nof_loc[record.aggr_idx] == 0) {
return ret;
}
prev_start = prev_end;
prev_end = dci_alloc_tree.size();
if (tree.prev_end > 0) {
for (size_t j = tree.prev_start; j < tree.prev_end; ++j) {
ret |= add_tree_node_leaves(tree, (int)j, record, *dci_locs, tti_params->tti_tx_dl);
}
} else {
ret = add_tree_node_leaves(tree, -1, record, *dci_locs, tti_params->tti_tx_dl);
}
nof_dci_allocs++;
if (ret) {
tree.prev_start = tree.prev_end;
tree.prev_end = tree.dci_alloc_tree.size();
}
return true;
return ret;
}
void pdcch_grid_t::update_alloc_tree(int parent_node_idx,
uint32_t aggr_idx,
sched_ue* user,
alloc_type_t alloc_type,
const sched_dci_cce_t* dci_locs)
//! Algorithm to compute a valid PDCCH allocation
bool pdcch_grid_t::add_tree_node_leaves(alloc_tree_t& tree,
int parent_node_idx,
const alloc_record_t& dci_record,
const sched_dci_cce_t& dci_locs,
uint32_t tti_tx_dl)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (user != nullptr) ? user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = aggr_idx;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = dci_record.aggr_idx;
// get cumulative pdcch mask
pdcch_mask_t cum_mask;
if (parent_node_idx >= 0) {
cum_mask = dci_alloc_tree[parent_node_idx].second.total_mask;
cum_mask = tree.dci_alloc_tree[parent_node_idx].node.total_mask;
} else {
cum_mask.resize(nof_cces());
cum_mask.resize(tree.nof_cces);
}
uint32_t nof_locs = dci_locs->nof_loc[aggr_idx];
uint32_t nof_locs = dci_locs.nof_loc[dci_record.aggr_idx];
for (uint32_t i = 0; i < nof_locs; ++i) {
uint32_t startpos = dci_locs->cce_start[aggr_idx][i];
uint32_t startpos = dci_locs.cce_start[dci_record.aggr_idx][i];
if (alloc_type == alloc_type_t::DL_DATA and user->pucch_sr_collision(tti_params->tti_tx_dl, startpos)) {
if (dci_record.alloc_type == alloc_type_t::DL_DATA and dci_record.user->pucch_sr_collision(tti_tx_dl, startpos)) {
// will cause a collision in the PUCCH
continue;
}
pdcch_mask_t alloc_mask(nof_cces());
alloc_mask.fill(startpos, startpos + (1u << aggr_idx));
pdcch_mask_t alloc_mask(tree.nof_cces);
alloc_mask.fill(startpos, startpos + (1u << dci_record.aggr_idx));
if ((cum_mask & alloc_mask).any()) {
// there is collision. Try another mask
continue;
@ -168,38 +205,65 @@ void pdcch_grid_t::update_alloc_tree(int parent_node_idx,
alloc.dci_pos.ncce = startpos;
// Prune if repetition
uint32_t j = prev_end;
for (; j < dci_alloc_tree.size(); ++j) {
if (dci_alloc_tree[j].second.total_mask == alloc.total_mask) {
uint32_t j = tree.prev_end;
for (; j < tree.dci_alloc_tree.size(); ++j) {
if (tree.dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
break;
}
}
if (j < dci_alloc_tree.size()) {
if (j < tree.dci_alloc_tree.size()) {
continue;
}
// Register allocation
dci_alloc_tree.emplace_back(parent_node_idx, alloc);
tree.dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
}
return ret;
}
bool pdcch_grid_t::set_cfi(uint32_t cfi)
{
current_cfix = cfi - 1;
// TODO: use this function for dynamic cfi
// TODO: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences
if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
srslte::logmap::get("MAC")->error("Invalid CFI value. Defaulting to current CFI.\n");
return true;
}
uint32_t pdcch_grid_t::nof_cces() const
{
return cc_cfg->nof_cce_table[current_cfix];
uint32_t new_cfix = cfi - 1;
if (new_cfix == current_cfix) {
return true;
}
// setup new PDCCH alloc tree
auto& new_tree = alloc_trees[new_cfix];
new_tree.reset();
if (not dci_record_list.empty()) {
// there are already PDCCH allocations
// Rebuild Allocation Tree
bool ret = true;
for (const auto& old_record : dci_record_list) {
ret &= alloc_dci_record(old_record, new_cfix);
}
if (not ret) {
// Fail to rebuild allocation tree. Go back to previous CFI
return false;
}
}
current_cfix = new_cfix;
// TODO: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences
return true;
}
void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
auto& tree = alloc_trees[current_cfix];
// if alloc tree is empty
if (prev_start == prev_end) {
if (tree.prev_start == tree.prev_end) {
if (vec != nullptr) {
vec->clear();
}
@ -213,32 +277,33 @@ void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_
// set vector of allocations
if (vec != nullptr) {
vec->clear();
size_t i = prev_start + idx;
while (dci_alloc_tree[i].first >= 0) {
vec->push_back(&dci_alloc_tree[i].second);
i = (size_t)dci_alloc_tree[i].first;
size_t i = tree.prev_start + idx;
while (tree.dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&tree.dci_alloc_tree[i].node);
i = (size_t)tree.dci_alloc_tree[i].parent_idx;
}
vec->push_back(&dci_alloc_tree[i].second);
vec->push_back(&tree.dci_alloc_tree[i].node);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask != nullptr) {
*tot_mask = dci_alloc_tree[prev_start + idx].second.total_mask;
*tot_mask = tree.dci_alloc_tree[tree.prev_start + idx].node.total_mask;
}
}
std::string pdcch_grid_t::result_to_string(bool verbose) const
{
auto& tree = alloc_trees[current_cfix];
std::stringstream ss;
ss << "cfi=" << get_cfi() << ", mask_size=" << nof_cces() << ", " << prev_end - prev_start
ss << "cfi=" << get_cfi() << ", mask_size=" << nof_cces() << ", " << tree.prev_end - tree.prev_start
<< " DCI allocation combinations:\n";
// get all the possible combinations of DCI allocations
uint32_t count = 0;
for (size_t i = prev_start; i < prev_end; ++i) {
for (size_t i = tree.prev_start; i < tree.prev_end; ++i) {
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - prev_start);
get_allocs(&vec, &tot_mask, i - tree.prev_start);
ss << " combination " << count << ": mask=0x" << tot_mask.to_hex().c_str();
if (verbose) {
@ -276,7 +341,7 @@ void sf_grid_t::init(const sched_cell_params_t& cell_params_)
pdcch_alloc.init(*cc_cfg);
}
void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi)
void sf_grid_t::new_tti(const tti_params_t& tti_params_)
{
tti_params = &tti_params_;
@ -285,11 +350,11 @@ void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi)
avail_rbg = nof_rbgs;
// internal state
pdcch_alloc.new_tti(*tti_params, start_cfi);
pdcch_alloc.new_tti(*tti_params);
}
//! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging)
alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user)
alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user)
{
// Check RBG collision
if ((dl_mask & alloc_mask).any()) {
@ -297,7 +362,7 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type,
}
// Allocate DCI in PDCCH
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_lvl, user)) {
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user)) {
if (user != nullptr) {
if (log_h->get_level() == srslte::LOG_LEVEL_DEBUG) {
log_h->debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation: %s\n",
@ -316,7 +381,7 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type,
}
//! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner.
sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type)
sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_type_t alloc_type)
{
rbg_range_t range;
range.rbg_min = nof_rbgs - avail_rbg;
@ -335,16 +400,37 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_typ
// allocate DCI and RBGs
rbgmask_t new_mask(dl_mask.size());
new_mask.fill(range.rbg_min, range.rbg_max);
return {alloc_dl(aggr_lvl, alloc_type, new_mask), range};
return {alloc_dl(aggr_idx, alloc_type, new_mask), range};
}
//! Allocates CCEs and RBs for a user DL data alloc.
alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
{
// Check if allocation would cause segmentation
uint32_t ue_cc_idx = user->get_cell_index(cc_cfg->enb_cc_idx).second;
rbg_range_t r = user->get_required_dl_rbgs(ue_cc_idx, pdcch_alloc.get_cfi());
if (r.rbg_min > user_mask.count()) {
log_h->error("The number of RBGs allocated will force segmentation\n");
return alloc_outcome_t::NOF_RB_INVALID;
}
// Place an upper bound in CFI if necessary, to avoid segmentation
if (pdcch_alloc.get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
for (uint32_t cfi = cc_cfg->sched_cfg->max_nof_ctrl_symbols; cfi >= pdcch_alloc.get_cfi() + 1; --cfi) {
r = user->get_required_dl_rbgs(ue_cc_idx, cfi);
if (r.rbg_min <= user_mask.count()) {
break;
}
// decrease max CFI
pdcch_alloc.set_max_cfi(cfi);
}
}
srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format);
uint32_t aggr_level = user->get_ue_carrier(cc_cfg->enb_cc_idx)->get_aggr_level(nof_bits);
return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user);
uint32_t aggr_idx = user->get_ue_carrier(cc_cfg->enb_cc_idx)->get_aggr_level(nof_bits);
alloc_outcome_t ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user);
return ret;
}
alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch)
@ -410,7 +496,7 @@ void sf_sched::init(const sched_cell_params_t& cell_params_)
max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch);
}
void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
void sf_sched::new_tti(uint32_t tti_rx_)
{
// reset internal state
bc_allocs.clear();
@ -419,7 +505,7 @@ void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
ul_data_allocs.clear();
tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params, start_cfi);
tti_alloc.new_tti(tti_params);
// setup first prb to be used for msg3 alloc. Account for potential PRACH alloc
last_msg3_prb = cc_cfg->cfg.nrb_pucch;

@ -1059,6 +1059,11 @@ std::pair<bool, uint32_t> sched_ue::get_cell_index(uint32_t enb_cc_idx) const
return {false, std::numeric_limits<uint32_t>::max()};
}
uint32_t sched_ue::get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits)
{
return carriers[ue_cc_idx].get_aggr_level(nof_bits);
}
void sched_ue::finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx)
{
auto p = get_cell_index(enb_cc_idx);

@ -18,11 +18,24 @@
# and at http://www.gnu.org/licenses/.
#
add_library(scheduler_test_common STATIC scheduler_test_common.cc)
# Scheduler subcomponent testing
add_executable(sched_grid_test sched_grid_test.cc)
target_link_libraries(sched_grid_test srsenb_mac
srsenb_phy
srslte_common
scheduler_test_common
${CMAKE_THREAD_LIBS_INIT}
${Boost_LIBRARIES})
add_test(sched_grid_test sched_grid_test)
# Scheduler test random
add_executable(scheduler_test_rand scheduler_test_rand.cc scheduler_test_common.cc)
add_executable(scheduler_test_rand scheduler_test_rand.cc)
target_link_libraries(scheduler_test_rand srsenb_mac
srsenb_phy
srslte_common
scheduler_test_common
srslte_phy
rrc_asn1
${CMAKE_THREAD_LIBS_INIT}
@ -35,6 +48,7 @@ target_link_libraries(scheduler_ca_test srsenb_mac
srsenb_phy
srslte_common
srslte_phy
scheduler_test_common
rrc_asn1
${CMAKE_THREAD_LIBS_INIT}
${Boost_LIBRARIES})

@ -0,0 +1,151 @@
/*
* Copyright 2013-2020 Software Radio Systems Limited
*
* This file is part of srsLTE.
*
* srsLTE is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* srsLTE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* A copy of the GNU Affero General Public License can be found in
* the LICENSE file in the top-level directory of this distribution
* and at http://www.gnu.org/licenses/.
*
*/
#include "scheduler_test_common.h"
#include "srsenb/hdr/stack/mac/scheduler_grid.h"
#include "srslte/common/test_common.h"
using namespace srsenb;
// const uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count();
const uint32_t seed = 3930373626;
const uint32_t PCell_IDX = 0;
const std::array<uint32_t, 6> prb_list = {6, 15, 25, 50, 75, 100};
uint32_t get_aggr_level(sched_ue& sched_ue, uint32_t ue_cc_idx, const std::vector<sched_cell_params_t>& cell_params)
{
srslte_dci_format_t dci_format = sched_ue.get_dci_format();
uint32_t nof_dci_bits = srslte_dci_format_sizeof(&cell_params[ue_cc_idx].cfg.cell, nullptr, nullptr, dci_format);
uint32_t aggr_level = sched_ue.get_aggr_level(ue_cc_idx, nof_dci_bits);
return aggr_level;
}
int test_pdcch_one_ue()
{
using rand_uint = std::uniform_int_distribution<uint32_t>;
const uint32_t ENB_CC_IDX = 0;
// Params
uint32_t nof_prb = prb_list[rand_uint{0, 5}(get_rand_gen())];
uint16_t rnti = rand_uint{70, 120}(get_rand_gen());
srslte::tti_point start_tti{rand_uint{0, 10240}(get_rand_gen())};
uint32_t nof_ttis = 100;
// Derived
std::vector<sched_cell_params_t> cell_params(1);
sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg();
sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(nof_prb);
sched_interface::sched_args_t sched_args{};
cell_params[ENB_CC_IDX].set_cfg(ENB_CC_IDX, cell_cfg, sched_args);
pdcch_grid_t pdcch;
sched_ue sched_ue{};
sched_ue.init(rnti, cell_params);
sched_ue.set_cfg(ue_cfg);
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_alloc_combinations() == 0);
TESTASSERT(pdcch.nof_allocs() == 0);
uint32_t tti_counter = 0;
for (; tti_counter < nof_ttis; ++tti_counter) {
tti_params_t tti_params{(start_tti + tti_counter).to_uint()};
pdcch.new_tti(tti_params);
TESTASSERT(pdcch.nof_cces() == cell_params[ENB_CC_IDX].nof_cce_table[0]);
TESTASSERT(pdcch.get_cfi() == 1); // Start at CFI=1
// Set DL CQI - it should affect aggregation level
uint32_t dl_cqi = std::uniform_int_distribution<uint32_t>{1, 25}(srsenb::get_rand_gen());
sched_ue.set_dl_cqi(tti_params.tti_tx_dl, ENB_CC_IDX, dl_cqi);
uint32_t aggr_idx = get_aggr_level(sched_ue, PCell_IDX, cell_params);
uint32_t max_nof_cce_locs =
sched_ue.get_locations(ENB_CC_IDX, pdcch_grid_t::MAX_CFI, tti_params.sf_idx_tx_dl)->nof_loc[aggr_idx];
// allocate DL user
uint32_t prev_cfi = pdcch.get_cfi();
srsenb::sched_dci_cce_t* dci_cce = sched_ue.get_locations(ENB_CC_IDX, prev_cfi, tti_params.sf_idx_tx_dl);
uint32_t prev_nof_cce_locs = dci_cce->nof_loc[aggr_idx];
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, aggr_idx, &sched_ue));
TESTASSERT(pdcch.nof_allocs() == 1);
if (prev_nof_cce_locs == pdcch.nof_allocs() - 1) {
// CFI must be increased
TESTASSERT(pdcch.get_cfi() > prev_cfi);
} else {
// Previous CFI should be fine
TESTASSERT(pdcch.get_cfi() == prev_cfi);
}
dci_cce = sched_ue.get_locations(ENB_CC_IDX, pdcch.get_cfi(), tti_params.sf_idx_tx_dl);
uint32_t nof_dci_locs = dci_cce->nof_loc[aggr_idx];
const uint32_t* dci_locs = dci_cce->cce_start[aggr_idx];
pdcch_grid_t::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0);
TESTASSERT(pdcch_result.size() == 1);
TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti());
TESTASSERT(pdcch_result[0]->total_mask.size() == cell_params[ENB_CC_IDX].nof_cce_table[pdcch.get_cfi() - 1]);
TESTASSERT(pdcch_result[0]->current_mask == pdcch_result[0]->total_mask);
TESTASSERT(pdcch_result[0]->current_mask.count() == 1u << aggr_idx);
TESTASSERT(std::count(dci_locs, dci_locs + nof_dci_locs, pdcch_result[0]->dci_pos.ncce) > 0);
// allocate UL user
if (max_nof_cce_locs == pdcch.nof_allocs()) {
// no more space
continue;
}
prev_nof_cce_locs = nof_dci_locs;
prev_cfi = pdcch.get_cfi();
TESTASSERT(pdcch.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, &sched_ue));
TESTASSERT(pdcch.nof_allocs() == 2);
if (prev_nof_cce_locs == pdcch.nof_allocs() - 1) {
// CFI must be increased
TESTASSERT(pdcch.get_cfi() > prev_cfi);
} else {
// Previous CFI should be fine
TESTASSERT(pdcch.get_cfi() == prev_cfi);
}
dci_cce = sched_ue.get_locations(ENB_CC_IDX, pdcch.get_cfi(), tti_params.sf_idx_tx_dl);
nof_dci_locs = dci_cce->nof_loc[aggr_idx];
dci_locs = dci_cce->cce_start[aggr_idx];
pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0);
TESTASSERT(pdcch_result.size() == pdcch.nof_allocs());
TESTASSERT(pdcch_result[1]->rnti == sched_ue.get_rnti());
TESTASSERT(pdcch_result[1]->total_mask.size() == cell_params[ENB_CC_IDX].nof_cce_table[pdcch.get_cfi() - 1]);
TESTASSERT((pdcch_result[1]->current_mask & pdcch_result[0]->current_mask).none());
TESTASSERT(pdcch_result[1]->current_mask.count() == 1u << aggr_idx);
TESTASSERT(pdcch_result[1]->total_mask == (pdcch_result[0]->current_mask | pdcch_result[1]->current_mask));
TESTASSERT(std::count(dci_locs, dci_locs + nof_dci_locs, pdcch_result[0]->dci_pos.ncce) > 0);
}
TESTASSERT(tti_counter == nof_ttis);
return SRSLTE_SUCCESS;
}
int main()
{
srsenb::set_randseed(seed);
printf("This is the chosen seed: %u\n", seed);
TESTASSERT(test_pdcch_one_ue() == SRSLTE_SUCCESS);
}

@ -244,7 +244,7 @@ int sched_tester::assert_no_empty_allocs()
int sched_tester::test_pdcch_collisions()
{
srslte::bounded_bitset<128, true> used_cce;
used_cce.resize(srslte_regs_pdcch_ncce(sched_cell_params[CARRIER_IDX].regs.get(), sched_cfg.nof_ctrl_symbols));
used_cce.resize(srslte_regs_pdcch_ncce(sched_cell_params[CARRIER_IDX].regs.get(), sched_cfg.max_nof_ctrl_symbols));
/* TEST: Check if there are collisions in the PDCCH */
TESTASSERT(output_tester[CARRIER_IDX].test_pdcch_collisions(tti_info.dl_sched_result[CARRIER_IDX],

Loading…
Cancel
Save