Make PUCCH HARQ region a configurable parameter. Skip grants when ACK falls outside region

master
Ismael Gomez 3 years ago
parent 28b956d128
commit 3eaf5c50cb

@ -171,6 +171,10 @@ enable = false
# pusch_max_mcs: Optional PUSCH MCS limit
# min_nof_ctrl_symbols: Minimum number of control symbols
# max_nof_ctrl_symbols: Maximum number of control symbols
# pucch_multiplex_enable: Allow PUCCH HARQ to collide with PUSCH and other PUCCH
# pucch_harq_max_rb: Maximum number of RB to be used for PUCCH on the edges of the grid.
# If defined and greater than 0, the scheduler will avoid DL PDCCH allocations if
# PUCCH HARQ falls outside this region
# target_bler: Target BLER (in decimal) to achieve via adaptive link
# max_delta_dl_cqi: Maximum shift in CQI for adaptive DL link
# max_delta_ul_snr: Maximum shift in UL SNR for adaptive UL link
@ -196,6 +200,7 @@ enable = false
#min_nof_ctrl_symbols = 1
#max_nof_ctrl_symbols = 3
#pucch_multiplex_enable = false
#pucch_harq_max_rb = 1
#target_bler = 0.05
#max_delta_dl_cqi = 5
#max_delta_ul_snr = 5

@ -58,6 +58,7 @@ public:
int max_aggr_level = 3;
bool adaptive_aggr_level = false;
bool pucch_mux_enabled = false;
int pucch_harq_max_rb = 0;
float target_bler = 0.05;
float max_delta_dl_cqi = 5;
float max_delta_ul_snr = 5;

@ -152,6 +152,7 @@ void parse_args(all_args_t* args, int argc, char* argv[])
("scheduler.max_nof_ctrl_symbols", bpo::value<uint32_t>(&args->stack.mac.sched.max_nof_ctrl_symbols)->default_value(3), "Number of control symbols")
("scheduler.min_nof_ctrl_symbols", bpo::value<uint32_t>(&args->stack.mac.sched.min_nof_ctrl_symbols)->default_value(1), "Minimum number of control symbols")
("scheduler.pucch_multiplex_enable", bpo::value<bool>(&args->stack.mac.sched.pucch_mux_enabled)->default_value(false), "Enable PUCCH multiplexing")
("scheduler.pucch_harq_max_rb", bpo::value<int>(&args->stack.mac.sched.pucch_harq_max_rb)->default_value(0), "Maximum number of RB to be used for PUCCH on the edges of the grid")
("scheduler.target_bler", bpo::value<float>(&args->stack.mac.sched.target_bler)->default_value(0.05), "Target BLER (in decimal) to achieve via adaptive link")
("scheduler.max_delta_dl_cqi", bpo::value<float>(&args->stack.mac.sched.max_delta_dl_cqi)->default_value(5.0), "Maximum shift in CQI for adaptive DL link")
("scheduler.max_delta_ul_snr", bpo::value<float>(&args->stack.mac.sched.max_delta_ul_snr)->default_value(5.0), "Maximum shift in UL SNR for adaptive UL link")

@ -80,9 +80,15 @@ void sf_grid_t::init(const sched_cell_params_t& cell_params_)
pucch_mask.resize(cc_cfg->nof_prb());
pucch_nrb = (cc_cfg->cfg.nrb_pucch > 0) ? (uint32_t)cc_cfg->cfg.nrb_pucch : 0;
srsran_pucch_cfg_t pucch_cfg = cell_params_.pucch_cfg_common;
pucch_cfg.n_pucch =
cc_cfg->nof_cce_table[cell_params_.sched_cfg->max_nof_ctrl_symbols - 1] - 1 + cc_cfg->cfg.n1pucch_an;
pucch_nrb = std::max(pucch_nrb, srsran_pucch_m(&pucch_cfg, cc_cfg->cfg.cell.cp) / 2 + 1);
uint32_t harq_pucch = 0;
if (cc_cfg->sched_cfg->pucch_harq_max_rb > 0) {
harq_pucch = cc_cfg->sched_cfg->pucch_harq_max_rb;
} else {
pucch_cfg.n_pucch =
cc_cfg->nof_cce_table[cell_params_.sched_cfg->max_nof_ctrl_symbols - 1] - 1 + cc_cfg->cfg.n1pucch_an;
harq_pucch = srsran_pucch_m(&pucch_cfg, cc_cfg->cfg.cell.cp) / 2 + 1;
}
pucch_nrb = std::max(pucch_nrb, harq_pucch);
if (pucch_nrb > 0) {
pucch_mask.fill(0, pucch_nrb);
pucch_mask.fill(cc_cfg->nof_prb() - pucch_nrb, cc_cfg->nof_prb());

@ -181,6 +181,15 @@ bool sf_cch_allocator::alloc_dfs_node(const alloc_record& record, uint32_t start
// PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position
continue;
}
int low_rb = node.pucch_n_prb < (int)cc_cfg->cfg.cell.nof_prb / 2
? node.pucch_n_prb
: cc_cfg->cfg.cell.nof_prb - node.pucch_n_prb - 1;
if (cc_cfg->sched_cfg->pucch_harq_max_rb > 0 && low_rb >= cc_cfg->sched_cfg->pucch_harq_max_rb) {
// PUCCH allocation would fall outside the maximum allowed PUCCH HARQ region. Try another CCE position
logger.info("Skipping PDCCH allocation for CCE=%d due to PUCCH HARQ falling outside region\n",
node.dci_pos.ncce);
continue;
}
}
node.current_mask.reset();

Loading…
Cancel
Save