|
|
|
@ -16,6 +16,231 @@
|
|
|
|
|
|
|
|
|
|
namespace srsenb {
|
|
|
|
|
|
|
|
|
|
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch)
|
|
|
|
|
{
|
|
|
|
|
if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) {
|
|
|
|
|
return n1_pucch == ue_pucch_cfg.n_pucch_sr;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sf_cch_allocator2::init(const sched_cell_params_t& cell_params_)
|
|
|
|
|
{
|
|
|
|
|
cc_cfg = &cell_params_;
|
|
|
|
|
pucch_cfg_common = cc_cfg->pucch_cfg_common;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sf_cch_allocator2::new_tti(tti_point tti_rx_)
|
|
|
|
|
{
|
|
|
|
|
tti_rx = tti_rx_;
|
|
|
|
|
|
|
|
|
|
dci_record_list.clear();
|
|
|
|
|
last_dci_dfs.clear();
|
|
|
|
|
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
|
|
|
|
|
current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const cce_cfi_position_table*
|
|
|
|
|
sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
|
|
|
|
|
{
|
|
|
|
|
switch (alloc_type) {
|
|
|
|
|
case alloc_type_t::DL_BC:
|
|
|
|
|
return &cc_cfg->common_locations[cfix];
|
|
|
|
|
case alloc_type_t::DL_PCCH:
|
|
|
|
|
return &cc_cfg->common_locations[cfix];
|
|
|
|
|
case alloc_type_t::DL_RAR:
|
|
|
|
|
return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix];
|
|
|
|
|
case alloc_type_t::DL_DATA:
|
|
|
|
|
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
|
|
|
|
|
case alloc_type_t::UL_DATA:
|
|
|
|
|
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
|
|
|
|
|
{
|
|
|
|
|
temp_dci_dfs.clear();
|
|
|
|
|
uint32_t start_cfix = current_cfix;
|
|
|
|
|
|
|
|
|
|
alloc_record record;
|
|
|
|
|
record.user = user;
|
|
|
|
|
record.aggr_idx = aggr_idx;
|
|
|
|
|
record.alloc_type = alloc_type;
|
|
|
|
|
record.pusch_uci = has_pusch_grant;
|
|
|
|
|
|
|
|
|
|
// Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI
|
|
|
|
|
// positions
|
|
|
|
|
do {
|
|
|
|
|
bool success = alloc_dfs_node(record, 0);
|
|
|
|
|
if (success) {
|
|
|
|
|
// DCI record allocation successful
|
|
|
|
|
dci_record_list.push_back(record);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
if (temp_dci_dfs.empty()) {
|
|
|
|
|
temp_dci_dfs = last_dci_dfs;
|
|
|
|
|
}
|
|
|
|
|
} while (get_next_dfs());
|
|
|
|
|
|
|
|
|
|
// Revert steps to initial state, before dci record allocation was attempted
|
|
|
|
|
last_dci_dfs.swap(temp_dci_dfs);
|
|
|
|
|
current_cfix = start_cfix;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// bool sf_cch_allocator2::get_next_dfs()
|
|
|
|
|
//{
|
|
|
|
|
// if (last_dci_dfs.empty()) {
|
|
|
|
|
// // If we reach root, increase CFI
|
|
|
|
|
// if (current_cfix < cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) {
|
|
|
|
|
// current_cfix++;
|
|
|
|
|
// return true;
|
|
|
|
|
// }
|
|
|
|
|
// return false;
|
|
|
|
|
// }
|
|
|
|
|
//
|
|
|
|
|
// uint32_t dfs_level = last_dci_dfs.size() - 1;
|
|
|
|
|
// uint32_t start_child_idx = last_dci_dfs.back().dci_pos_idx + 1;
|
|
|
|
|
// last_dci_dfs.pop_back();
|
|
|
|
|
// while (not alloc_dfs_node(dci_record_list[dfs_level], start_child_idx)) {
|
|
|
|
|
// start_child_idx = 0;
|
|
|
|
|
// // If failed to allocate record, go one level lower in DFS
|
|
|
|
|
// if (not get_next_dfs()) {
|
|
|
|
|
// // If no more options in DFS, return false
|
|
|
|
|
// return false;
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
//}
|
|
|
|
|
|
|
|
|
|
bool sf_cch_allocator2::get_next_dfs()
|
|
|
|
|
{
|
|
|
|
|
do {
|
|
|
|
|
uint32_t start_child_idx = 0;
|
|
|
|
|
if (last_dci_dfs.empty()) {
|
|
|
|
|
// If we reach root, increase CFI
|
|
|
|
|
current_cfix++;
|
|
|
|
|
if (current_cfix > cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Attempt to re-add last tree node, but with a higher node child index
|
|
|
|
|
start_child_idx = last_dci_dfs.back().dci_pos_idx + 1;
|
|
|
|
|
last_dci_dfs.pop_back();
|
|
|
|
|
}
|
|
|
|
|
while (last_dci_dfs.size() < dci_record_list.size() and
|
|
|
|
|
alloc_dfs_node(dci_record_list[last_dci_dfs.size()], start_child_idx)) {
|
|
|
|
|
start_child_idx = 0;
|
|
|
|
|
}
|
|
|
|
|
} while (last_dci_dfs.size() < dci_record_list.size());
|
|
|
|
|
|
|
|
|
|
// Finished computation of next DFS node
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx)
|
|
|
|
|
{
|
|
|
|
|
// Get DCI Location Table
|
|
|
|
|
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, current_cfix);
|
|
|
|
|
if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
const cce_position_list& dci_pos_list = (*dci_locs)[record.aggr_idx];
|
|
|
|
|
if (start_dci_idx >= dci_pos_list.size()) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tree_node node;
|
|
|
|
|
node.dci_pos_idx = start_dci_idx;
|
|
|
|
|
node.dci_pos.L = record.aggr_idx;
|
|
|
|
|
node.rnti = record.user != nullptr ? record.user->get_rnti() : SRSLTE_INVALID_RNTI;
|
|
|
|
|
node.current_mask.resize(nof_cces());
|
|
|
|
|
// get cumulative pdcch & pucch masks
|
|
|
|
|
if (not last_dci_dfs.empty()) {
|
|
|
|
|
node.total_mask = last_dci_dfs.back().total_mask;
|
|
|
|
|
node.total_pucch_mask = last_dci_dfs.back().total_pucch_mask;
|
|
|
|
|
} else {
|
|
|
|
|
node.total_mask.resize(nof_cces());
|
|
|
|
|
node.total_pucch_mask.resize(cc_cfg->nof_prb());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (; node.dci_pos_idx < dci_pos_list.size(); ++node.dci_pos_idx) {
|
|
|
|
|
node.dci_pos.ncce = dci_pos_list[node.dci_pos_idx];
|
|
|
|
|
|
|
|
|
|
if (record.alloc_type == alloc_type_t::DL_DATA and not record.pusch_uci) {
|
|
|
|
|
// The UE needs to allocate space in PUCCH for HARQ-ACK
|
|
|
|
|
pucch_cfg_common.n_pucch = node.dci_pos.ncce + pucch_cfg_common.N_pucch_1;
|
|
|
|
|
|
|
|
|
|
if (is_pucch_sr_collision(record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx), pucch_cfg_common.n_pucch)) {
|
|
|
|
|
// avoid collision of HARQ-ACK with own SR n(1)_pucch
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
node.pucch_n_prb = srslte_pucch_n_prb(&cc_cfg->cfg.cell, &pucch_cfg_common, 0);
|
|
|
|
|
if (not cc_cfg->sched_cfg->pucch_mux_enabled and node.total_pucch_mask.test(node.pucch_n_prb)) {
|
|
|
|
|
// PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
node.current_mask.reset();
|
|
|
|
|
node.current_mask.fill(node.dci_pos.ncce, node.dci_pos.ncce + (1U << record.aggr_idx));
|
|
|
|
|
if ((node.total_mask & node.current_mask).any()) {
|
|
|
|
|
// there is a PDCCH collision. Try another CCE position
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Allocation successful
|
|
|
|
|
node.total_mask |= node.current_mask;
|
|
|
|
|
if (node.pucch_n_prb >= 0) {
|
|
|
|
|
node.total_pucch_mask.set(node.pucch_n_prb);
|
|
|
|
|
}
|
|
|
|
|
last_dci_dfs.push_back(node);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sf_cch_allocator2::rem_last_dci()
|
|
|
|
|
{
|
|
|
|
|
assert(not dci_record_list.empty());
|
|
|
|
|
|
|
|
|
|
// Remove DCI record
|
|
|
|
|
last_dci_dfs.pop_back();
|
|
|
|
|
dci_record_list.pop_back();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
|
|
|
|
|
{
|
|
|
|
|
if (vec != nullptr) {
|
|
|
|
|
vec->clear();
|
|
|
|
|
|
|
|
|
|
vec->resize(last_dci_dfs.size());
|
|
|
|
|
for (uint32_t i = 0; i < last_dci_dfs.size(); ++i) {
|
|
|
|
|
(*vec)[i] = &last_dci_dfs[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tot_mask != nullptr) {
|
|
|
|
|
if (last_dci_dfs.empty()) {
|
|
|
|
|
tot_mask->resize(nof_cces());
|
|
|
|
|
tot_mask->reset();
|
|
|
|
|
} else {
|
|
|
|
|
*tot_mask = last_dci_dfs.back().total_mask;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string sf_cch_allocator2::result_to_string(bool verbose) const
|
|
|
|
|
{
|
|
|
|
|
return "";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/////////////////////////
|
|
|
|
|
|
|
|
|
|
void sf_cch_allocator::init(const sched_cell_params_t& cell_params_)
|
|
|
|
|
{
|
|
|
|
|
cc_cfg = &cell_params_;
|
|
|
|
@ -226,14 +451,6 @@ void sf_cch_allocator::alloc_tree_t::reset()
|
|
|
|
|
dci_alloc_tree.clear();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch)
|
|
|
|
|
{
|
|
|
|
|
if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) {
|
|
|
|
|
return n1_pucch == ue_pucch_cfg.n_pucch_sr;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Algorithm to compute a valid PDCCH allocation
|
|
|
|
|
bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx,
|
|
|
|
|
const alloc_record_t& dci_record,
|
|
|
|
|