sched refactor, use enb_cc_idx rather than ue_cc_idx primarily to avoid uneeded conversions

master
Francisco Paisana 4 years ago
parent 30439c12e0
commit 3892194d98

@ -132,33 +132,33 @@ public:
* Custom functions * Custom functions
*******************************************************/ *******************************************************/
const dl_harq_proc& get_dl_harq(uint32_t idx, uint32_t cc_idx) const; const dl_harq_proc& get_dl_harq(uint32_t idx, uint32_t enb_cc_idx) const;
uint16_t get_rnti() const { return rnti; } uint16_t get_rnti() const { return rnti; }
std::pair<bool, uint32_t> get_active_cell_index(uint32_t enb_cc_idx) const; std::pair<bool, uint32_t> get_active_cell_index(uint32_t enb_cc_idx) const;
const ue_cfg_t& get_ue_cfg() const { return cfg; } const ue_cfg_t& get_ue_cfg() const { return cfg; }
uint32_t get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits); uint32_t get_aggr_level(uint32_t enb_cc_idx, uint32_t nof_bits);
void ul_buffer_add(uint8_t lcid, uint32_t bytes); void ul_buffer_add(uint8_t lcid, uint32_t bytes);
/******************************************************* /*******************************************************
* Functions used by scheduler metric objects * Functions used by scheduler metric objects
*******************************************************/ *******************************************************/
uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes); uint32_t get_required_prb_ul(uint32_t enb_cc_idx, uint32_t req_bytes);
rbg_interval get_required_dl_rbgs(uint32_t ue_cc_idx); rbg_interval get_required_dl_rbgs(uint32_t enb_cc_idx);
srslte::interval<uint32_t> get_requested_dl_bytes(uint32_t ue_cc_idx); srslte::interval<uint32_t> get_requested_dl_bytes(uint32_t enb_cc_idx);
uint32_t get_pending_dl_rlc_data() const; uint32_t get_pending_dl_rlc_data() const;
uint32_t get_expected_dl_bitrate(uint32_t ue_cc_idx, int nof_rbgs = -1) const; uint32_t get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs = -1) const;
uint32_t get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc_idx); uint32_t get_pending_ul_data_total(tti_point tti_tx_ul, int this_enb_cc_idx);
uint32_t get_pending_ul_new_data(tti_point tti_tx_ul, int this_ue_cc_idx); uint32_t get_pending_ul_new_data(tti_point tti_tx_ul, int this_enb_cc_idx);
uint32_t get_pending_ul_old_data(); uint32_t get_pending_ul_old_data();
uint32_t get_pending_ul_old_data(uint32_t cc_idx); uint32_t get_pending_ul_old_data(uint32_t enb_cc_idx);
uint32_t get_expected_ul_bitrate(uint32_t ue_cc_idx, int nof_prbs = -1) const; uint32_t get_expected_ul_bitrate(uint32_t enb_cc_idx, int nof_prbs = -1) const;
dl_harq_proc* get_pending_dl_harq(tti_point tti_tx_dl, uint32_t cc_idx); dl_harq_proc* get_pending_dl_harq(tti_point tti_tx_dl, uint32_t enb_cc_idx);
dl_harq_proc* get_empty_dl_harq(tti_point tti_tx_dl, uint32_t cc_idx); dl_harq_proc* get_empty_dl_harq(tti_point tti_tx_dl, uint32_t enb_cc_idx);
ul_harq_proc* get_ul_harq(tti_point tti_tx_ul, uint32_t ue_cc_idx); ul_harq_proc* get_ul_harq(tti_point tti_tx_ul, uint32_t enb_cc_idx);
/******************************************************* /*******************************************************
* Functions used by the scheduler carrier object * Functions used by the scheduler carrier object
@ -176,12 +176,12 @@ public:
int generate_dl_dci_format(uint32_t pid, int generate_dl_dci_format(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
int generate_format0(sched_interface::ul_sched_data_t* data, int generate_format0(sched_interface::ul_sched_data_t* data,
tti_point tti_tx_ul, tti_point tti_tx_ul,
uint32_t cc_idx, uint32_t enb_cc_idx,
prb_interval alloc, prb_interval alloc,
bool needs_pdcch, bool needs_pdcch,
srslte_dci_location_t cce_range, srslte_dci_location_t cce_range,
@ -209,34 +209,34 @@ private:
dl_harq_proc* h, dl_harq_proc* h,
const rbgmask_t& user_mask, const rbgmask_t& user_mask,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
uint32_t tb); uint32_t tb);
tbs_info compute_mcs_and_tbs(uint32_t ue_cc_idx, tbs_info compute_mcs_and_tbs(uint32_t enb_cc_idx,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t nof_alloc_prbs, uint32_t nof_alloc_prbs,
uint32_t cfi, uint32_t cfi,
const srslte_dci_dl_t& dci); const srslte_dci_dl_t& dci);
bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false); bool needs_cqi(uint32_t tti, uint32_t enb_cc_idx, bool will_send = false);
int generate_format1(uint32_t pid, int generate_format1(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
int generate_format2a(uint32_t pid, int generate_format2a(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
int generate_format2(uint32_t pid, int generate_format2(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);

@ -95,10 +95,7 @@ uint32_t allocate_mac_sdus(sched_interface::dl_sched_data_t* data,
* @param total_tbs available space in bytes for allocations * @param total_tbs available space in bytes for allocations
* @return number of bytes allocated * @return number of bytes allocated
*/ */
uint32_t allocate_mac_ces(sched_interface::dl_sched_data_t* data, uint32_t allocate_mac_ces(sched_interface::dl_sched_data_t* data, lch_ue_manager& lch_handler, uint32_t total_tbs);
lch_ue_manager& lch_handler,
uint32_t total_tbs,
uint32_t ue_cc_idx);
} // namespace srsenb } // namespace srsenb

@ -780,10 +780,10 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
} }
// Check if allocation would cause segmentation // Check if allocation would cause segmentation
const dl_harq_proc& h = user->get_dl_harq(pid, ue_cc_idx); const dl_harq_proc& h = user->get_dl_harq(pid, cc_cfg->enb_cc_idx);
if (h.is_empty()) { if (h.is_empty()) {
// It is newTx // It is newTx
rbg_interval r = user->get_required_dl_rbgs(ue_cc_idx); rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx);
if (r.start() > user_mask.count()) { if (r.start() > user_mask.count()) {
log_h->warning("The number of RBGs allocated to rnti=0x%x will force segmentation\n", user->get_rnti()); log_h->warning("The number of RBGs allocated to rnti=0x%x will force segmentation\n", user->get_rnti());
return alloc_outcome_t::NOF_RB_INVALID; return alloc_outcome_t::NOF_RB_INVALID;
@ -799,7 +799,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
if (not has_pusch_grant) { if (not has_pusch_grant) {
// Try to allocate small PUSCH grant, if there are no allocated PUSCH grants for this TTI yet // Try to allocate small PUSCH grant, if there are no allocated PUSCH grants for this TTI yet
prb_interval alloc = {}; prb_interval alloc = {};
uint32_t L = user->get_required_prb_ul(ue_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8)); uint32_t L = user->get_required_prb_ul(cc_cfg->enb_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8));
tti_alloc.find_ul_alloc(L, &alloc); tti_alloc.find_ul_alloc(L, &alloc);
bool ul_alloc_success = alloc.length() > 0 and alloc_ul_user(user, alloc); bool ul_alloc_success = alloc.length() > 0 and alloc_ul_user(user, alloc);
if (ue_cc_idx != 0 and not ul_alloc_success) { if (ue_cc_idx != 0 and not ul_alloc_success) {
@ -866,7 +866,7 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc)
{ {
// check whether adaptive/non-adaptive retx/newtx // check whether adaptive/non-adaptive retx/newtx
ul_alloc_t::type_t alloc_type; ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_active_cell_index(cc_cfg->enb_cc_idx).second); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
bool has_retx = h->has_pending_retx(); bool has_retx = h->has_pending_retx();
if (has_retx) { if (has_retx) {
if (h->retx_requires_pdcch(tti_point{get_tti_tx_ul()}, alloc)) { if (h->retx_requires_pdcch(tti_point{get_tti_tx_ul()}, alloc)) {
@ -895,9 +895,8 @@ bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_s
// user does not support this carrier // user does not support this carrier
return false; return false;
} }
uint32_t cell_index = p.second;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cell_index); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
/* Indicate PHICH acknowledgment if needed */ /* Indicate PHICH acknowledgment if needed */
if (h->has_pending_phich()) { if (h->has_pending_phich()) {
@ -1036,13 +1035,12 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_
continue; continue;
} }
sched_ue* user = &ue_it->second; sched_ue* user = &ue_it->second;
uint32_t cell_index = user->get_active_cell_index(cc_cfg->enb_cc_idx).second; uint32_t data_before = user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop();
uint32_t data_before = user->get_requested_dl_bytes(cell_index).stop(); const dl_harq_proc& dl_harq = user->get_dl_harq(data_alloc.pid, cc_cfg->enb_cc_idx);
const dl_harq_proc& dl_harq = user->get_dl_harq(data_alloc.pid, cell_index);
bool is_newtx = dl_harq.is_empty(); bool is_newtx = dl_harq.is_empty();
int tbs = user->generate_dl_dci_format( int tbs = user->generate_dl_dci_format(
data_alloc.pid, data, get_tti_tx_dl(), cell_index, tti_alloc.get_cfi(), data_alloc.user_mask); data_alloc.pid, data, get_tti_tx_dl(), cc_cfg->enb_cc_idx, tti_alloc.get_cfi(), data_alloc.user_mask);
if (tbs <= 0) { if (tbs <= 0) {
log_h->warning("SCHED: DL %s failed rnti=0x%x, pid=%d, mask=%s, tbs=%d, buffer=%d\n", log_h->warning("SCHED: DL %s failed rnti=0x%x, pid=%d, mask=%s, tbs=%d, buffer=%d\n",
@ -1051,7 +1049,7 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_
data_alloc.pid, data_alloc.pid,
data_alloc.user_mask.to_hex().c_str(), data_alloc.user_mask.to_hex().c_str(),
tbs, tbs,
user->get_requested_dl_bytes(cell_index).stop()); user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop());
continue; continue;
} }
@ -1067,7 +1065,7 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_
dl_harq.nof_retx(0) + dl_harq.nof_retx(1), dl_harq.nof_retx(0) + dl_harq.nof_retx(1),
tbs, tbs,
data_before, data_before,
user->get_requested_dl_bytes(cell_index).stop()); user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop());
dl_result->nof_data_elems++; dl_result->nof_data_elems++;
} }
@ -1171,8 +1169,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
if (ue_it == ue_list.end()) { if (ue_it == ue_list.end()) {
continue; continue;
} }
sched_ue* user = &ue_it->second; sched_ue* user = &ue_it->second;
uint32_t cell_index = user->get_active_cell_index(cc_cfg->enb_cc_idx).second;
srslte_dci_location_t cce_range = {0, 0}; srslte_dci_location_t cce_range = {0, 0};
if (ul_alloc.needs_pdcch()) { if (ul_alloc.needs_pdcch()) {
@ -1183,18 +1180,18 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
uci_pusch_t uci_type = is_uci_included(this, *cc_results, user, cc_cfg->enb_cc_idx); uci_pusch_t uci_type = is_uci_included(this, *cc_results, user, cc_cfg->enb_cc_idx);
/* Generate DCI Format1A */ /* Generate DCI Format1A */
uint32_t total_data_before = user->get_pending_ul_data_total(get_tti_tx_ul(), cell_index); uint32_t total_data_before = user->get_pending_ul_data_total(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
int tbs = user->generate_format0(pusch, int tbs = user->generate_format0(pusch,
get_tti_tx_ul(), get_tti_tx_ul(),
cell_index, cc_cfg->enb_cc_idx,
ul_alloc.alloc, ul_alloc.alloc,
ul_alloc.needs_pdcch(), ul_alloc.needs_pdcch(),
cce_range, cce_range,
ul_alloc.msg3_mcs, ul_alloc.msg3_mcs,
uci_type); uci_type);
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cell_index); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
uint32_t new_pending_bytes = user->get_pending_ul_new_data(get_tti_tx_ul(), cell_index); uint32_t new_pending_bytes = user->get_pending_ul_new_data(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
// Allow TBS=0 in case of UCI-only PUSCH // Allow TBS=0 in case of UCI-only PUSCH
if (tbs < 0 || (tbs == 0 && pusch->dci.tb.mcs_idx != 29)) { if (tbs < 0 || (tbs == 0 && pusch->dci.tb.mcs_idx != 29)) {
log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=%s, bsr=%d\n", log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=%s, bsr=%d\n",
@ -1254,9 +1251,8 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db)
for (uint32_t i = 0; i < cc_result->ul_sched_result.nof_phich_elems; ++i) { for (uint32_t i = 0; i < cc_result->ul_sched_result.nof_phich_elems; ++i) {
auto& phich = phich_list[i]; auto& phich = phich_list[i];
if (phich.phich == phich_t::NACK) { if (phich.phich == phich_t::NACK) {
auto& ue = ue_db[phich.rnti]; auto& ue = ue_db[phich.rnti];
int ue_cc_idx = ue.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx); ul_harq_proc* h = ue.get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
ul_harq_proc* h = (ue_cc_idx >= 0) ? ue.get_ul_harq(get_tti_tx_ul(), ue_cc_idx) : nullptr;
if (not is_ul_alloc(ue.get_rnti()) and h != nullptr and not h->is_empty()) { if (not is_ul_alloc(ue.get_rnti()) and h != nullptr and not h->is_empty()) {
// There was a missed UL harq retx. Halt+Resume the HARQ // There was a missed UL harq retx. Halt+Resume the HARQ
phich.phich = phich_t::ACK; phich.phich = phich_t::ACK;

@ -425,26 +425,25 @@ void sched_ue::set_ul_snr(tti_point tti_rx, uint32_t enb_cc_idx, float snr, uint
/** /**
* Allocate MAC PDU for a UE HARQ pid * Allocate MAC PDU for a UE HARQ pid
* @param data
* @param total_tbs
* @param ue_cc_idx
* @return pair with allocated tbs and mcs * @return pair with allocated tbs and mcs
*/ */
tbs_info sched_ue::allocate_new_dl_mac_pdu(sched::dl_sched_data_t* data, tbs_info sched_ue::allocate_new_dl_mac_pdu(sched::dl_sched_data_t* data,
dl_harq_proc* h, dl_harq_proc* h,
const rbgmask_t& user_mask, const rbgmask_t& user_mask,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
uint32_t tb) uint32_t tb)
{ {
srslte_dci_dl_t* dci = &data->dci; srslte_dci_dl_t* dci = &data->dci;
uint32_t nof_prb = count_prb_per_tb(user_mask); uint32_t nof_prb = count_prb_per_tb(user_mask);
tbs_info tb_info = compute_mcs_and_tbs(ue_cc_idx, tti_tx_dl, nof_prb, cfi, *dci); tbs_info tb_info = compute_mcs_and_tbs(enb_cc_idx, tti_tx_dl, nof_prb, cfi, *dci);
// Allocate MAC PDU (subheaders, CEs, and SDUS) // Allocate MAC PDU (subheaders, CEs, and SDUS)
int rem_tbs = tb_info.tbs_bytes; int rem_tbs = tb_info.tbs_bytes;
rem_tbs -= allocate_mac_ces(data, lch_handler, rem_tbs, ue_cc_idx); if (cells[enb_cc_idx].get_ue_cc_idx() == 0) {
rem_tbs -= allocate_mac_ces(data, lch_handler, rem_tbs);
}
rem_tbs -= allocate_mac_sdus(data, lch_handler, rem_tbs, tb); rem_tbs -= allocate_mac_sdus(data, lch_handler, rem_tbs, tb);
// Allocate DL UE Harq // Allocate DL UE Harq
@ -474,7 +473,7 @@ tbs_info sched_ue::allocate_new_dl_mac_pdu(sched::dl_sched_data_t* data,
int sched_ue::generate_dl_dci_format(uint32_t pid, int sched_ue::generate_dl_dci_format(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
@ -483,13 +482,13 @@ int sched_ue::generate_dl_dci_format(uint32_t pid,
switch (dci_format) { switch (dci_format) {
case SRSLTE_DCI_FORMAT1: case SRSLTE_DCI_FORMAT1:
tbs = generate_format1(pid, data, tti_tx_dl, ue_cc_idx, cfi, user_mask); tbs = generate_format1(pid, data, tti_tx_dl, enb_cc_idx, cfi, user_mask);
break; break;
case SRSLTE_DCI_FORMAT2: case SRSLTE_DCI_FORMAT2:
tbs = generate_format2(pid, data, tti_tx_dl, ue_cc_idx, cfi, user_mask); tbs = generate_format2(pid, data, tti_tx_dl, enb_cc_idx, cfi, user_mask);
break; break;
case SRSLTE_DCI_FORMAT2A: case SRSLTE_DCI_FORMAT2A:
tbs = generate_format2a(pid, data, tti_tx_dl, ue_cc_idx, cfi, user_mask); tbs = generate_format2a(pid, data, tti_tx_dl, enb_cc_idx, cfi, user_mask);
break; break;
default: default:
Error("DCI format (%d) not implemented\n", dci_format); Error("DCI format (%d) not implemented\n", dci_format);
@ -502,12 +501,13 @@ int sched_ue::generate_dl_dci_format(uint32_t pid,
int sched_ue::generate_format1(uint32_t pid, int sched_ue::generate_format1(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
dl_harq_proc* h = &carriers[ue_cc_idx].harq_ent.dl_harq_procs()[pid]; uint32_t ue_cc_idx = cells[enb_cc_idx].get_ue_cc_idx();
srslte_dci_dl_t* dci = &data->dci; dl_harq_proc* h = &carriers[ue_cc_idx].harq_ent.dl_harq_procs()[pid];
srslte_dci_dl_t* dci = &data->dci;
// If the size of Format1 and Format1A is ambiguous in the common SS, use Format1A since the UE assumes // If the size of Format1 and Format1A is ambiguous in the common SS, use Format1A since the UE assumes
// Common SS when spaces collide // Common SS when spaces collide
@ -532,7 +532,7 @@ int sched_ue::generate_format1(uint32_t pid,
tbs_info tbinfo; tbs_info tbinfo;
if (h->is_empty(0)) { if (h->is_empty(0)) {
tbinfo = allocate_new_dl_mac_pdu(data, h, user_mask, tti_tx_dl, ue_cc_idx, cfi, 0); tbinfo = allocate_new_dl_mac_pdu(data, h, user_mask, tti_tx_dl, enb_cc_idx, cfi, 0);
} else { } else {
h->new_retx(user_mask, 0, tti_tx_dl, &tbinfo.mcs, &tbinfo.tbs_bytes, data->dci.location.ncce); h->new_retx(user_mask, 0, tti_tx_dl, &tbinfo.mcs, &tbinfo.tbs_bytes, data->dci.location.ncce);
Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", tbinfo.mcs, tbinfo.tbs_bytes); Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", tbinfo.mcs, tbinfo.tbs_bytes);
@ -555,23 +555,25 @@ int sched_ue::generate_format1(uint32_t pid,
/** /**
* Based on the amount of tx data, allocated PRBs, DCI params, etc. compute a valid MCS and resulting TBS * Based on the amount of tx data, allocated PRBs, DCI params, etc. compute a valid MCS and resulting TBS
* @param ue_cc_idx user carrier index * @param enb_cc_idx user carrier index
* @param tti_tx_dl tti when the tx will occur * @param tti_tx_dl tti when the tx will occur
* @param nof_alloc_prbs number of PRBs that were allocated * @param nof_alloc_prbs number of PRBs that were allocated
* @param cfi Number of control symbols in Subframe * @param cfi Number of control symbols in Subframe
* @param dci contains the RBG mask, and alloc type * @param dci contains the RBG mask, and alloc type
* @return pair with MCS and TBS (in bytes) * @return pair with MCS and TBS (in bytes)
*/ */
tbs_info sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_idx, tbs_info sched_ue::compute_mcs_and_tbs(uint32_t enb_cc_idx,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t nof_alloc_prbs, uint32_t nof_alloc_prbs,
uint32_t cfi, uint32_t cfi,
const srslte_dci_dl_t& dci) const srslte_dci_dl_t& dci)
{ {
srslte::interval<uint32_t> req_bytes = get_requested_dl_bytes(ue_cc_idx); uint32_t ue_cc_idx = cells[enb_cc_idx].get_ue_cc_idx();
srslte::interval<uint32_t> req_bytes = get_requested_dl_bytes(enb_cc_idx);
// Calculate exact number of RE for this PRB allocation // Calculate exact number of RE for this PRB allocation
uint32_t nof_re = carriers[ue_cc_idx].get_cell_cfg()->get_dl_nof_res(tti_tx_dl, dci, cfi); uint32_t nof_re = cells[enb_cc_idx].cell_cfg->get_dl_nof_res(tti_tx_dl, dci, cfi);
// Compute MCS+TBS // Compute MCS+TBS
tbs_info tb = carriers[ue_cc_idx].alloc_tbs_dl(nof_alloc_prbs, nof_re, req_bytes.stop()); tbs_info tb = carriers[ue_cc_idx].alloc_tbs_dl(nof_alloc_prbs, nof_re, req_bytes.stop());
@ -588,10 +590,11 @@ tbs_info sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_idx,
int sched_ue::generate_format2a(uint32_t pid, int sched_ue::generate_format2a(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
uint32_t ue_cc_idx = cells[enb_cc_idx].get_ue_cc_idx();
dl_harq_proc* h = &carriers[ue_cc_idx].harq_ent.dl_harq_procs()[pid]; dl_harq_proc* h = &carriers[ue_cc_idx].harq_ent.dl_harq_procs()[pid];
bool tb_en[SRSLTE_MAX_TB] = {false}; bool tb_en[SRSLTE_MAX_TB] = {false};
@ -630,7 +633,7 @@ int sched_ue::generate_format2a(uint32_t pid,
if (!h->is_empty(tb)) { if (!h->is_empty(tb)) {
h->new_retx(user_mask, tb, tti_tx_dl, &tbinfo.mcs, &tbinfo.tbs_bytes, data->dci.location.ncce); h->new_retx(user_mask, tb, tti_tx_dl, &tbinfo.mcs, &tbinfo.tbs_bytes, data->dci.location.ncce);
} else if (tb_en[tb] && no_retx) { } else if (tb_en[tb] && no_retx) {
tbinfo = allocate_new_dl_mac_pdu(data, h, user_mask, tti_tx_dl, ue_cc_idx, cfi, tb); tbinfo = allocate_new_dl_mac_pdu(data, h, user_mask, tti_tx_dl, enb_cc_idx, cfi, tb);
} }
/* Fill DCI TB dedicated fields */ /* Fill DCI TB dedicated fields */
@ -664,19 +667,20 @@ int sched_ue::generate_format2a(uint32_t pid,
int sched_ue::generate_format2(uint32_t pid, int sched_ue::generate_format2(uint32_t pid,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
tti_point tti_tx_dl, tti_point tti_tx_dl,
uint32_t cc_idx, uint32_t enb_cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
/* Call Format 2a (common) */ /* Call Format 2a (common) */
int ret = generate_format2a(pid, data, tti_tx_dl, cc_idx, cfi, user_mask); int ret = generate_format2a(pid, data, tti_tx_dl, enb_cc_idx, cfi, user_mask);
/* Compute precoding information */ /* Compute precoding information */
data->dci.format = SRSLTE_DCI_FORMAT2; uint32_t ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx);
data->dci.format = SRSLTE_DCI_FORMAT2;
if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) { if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) {
data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi + 1) % (uint8_t)5; data->dci.pinfo = (uint8_t)(carriers[ue_cc_idx].dl_pmi + 1) % (uint8_t)5;
} else { } else {
data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi & 1u); data->dci.pinfo = (uint8_t)(carriers[ue_cc_idx].dl_pmi & 1u);
} }
return ret; return ret;
@ -684,17 +688,18 @@ int sched_ue::generate_format2(uint32_t pid,
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
tti_point tti_tx_ul, tti_point tti_tx_ul,
uint32_t ue_cc_idx, uint32_t enb_cc_idx,
prb_interval alloc, prb_interval alloc,
bool needs_pdcch, bool needs_pdcch,
srslte_dci_location_t dci_pos, srslte_dci_location_t dci_pos,
int explicit_mcs, int explicit_mcs,
uci_pusch_t uci_type) uci_pusch_t uci_type)
{ {
ul_harq_proc* h = get_ul_harq(tti_tx_ul, ue_cc_idx); uint32_t ue_cc_idx = cells[enb_cc_idx].get_ue_cc_idx();
srslte_dci_ul_t* dci = &data->dci; ul_harq_proc* h = get_ul_harq(tti_tx_ul, enb_cc_idx);
srslte_dci_ul_t* dci = &data->dci;
bool cqi_request = needs_cqi(tti_tx_ul.to_uint(), true); bool cqi_request = needs_cqi(tti_tx_ul.to_uint(), enb_cc_idx, true);
// Set DCI position // Set DCI position
data->needs_pdcch = needs_pdcch; data->needs_pdcch = needs_pdcch;
@ -715,7 +720,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
tbinfo.tbs_bytes = get_tbs_bytes(tbinfo.mcs, alloc.length(), false, true); tbinfo.tbs_bytes = get_tbs_bytes(tbinfo.mcs, alloc.length(), false, true);
} else { } else {
// dynamic mcs // dynamic mcs
uint32_t req_bytes = get_pending_ul_new_data(tti_tx_ul, ue_cc_idx); uint32_t req_bytes = get_pending_ul_new_data(tti_tx_ul, enb_cc_idx);
uint32_t N_srs = 0; uint32_t N_srs = 0;
uint32_t nof_symb = 2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs; uint32_t nof_symb = 2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs;
uint32_t nof_re = nof_symb * alloc.length() * SRSLTE_NRE; uint32_t nof_re = nof_symb * alloc.length() * SRSLTE_NRE;
@ -807,12 +812,12 @@ uint32_t sched_ue::get_max_retx()
return cfg.maxharq_tx; return cfg.maxharq_tx;
} }
bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send) bool sched_ue::needs_cqi(uint32_t tti, uint32_t enb_cc_idx, bool will_send)
{ {
bool ret = false; bool ret = false;
if (phy_config_dedicated_enabled && cfg.supported_cc_list[0].aperiodic_cqi_period && if (phy_config_dedicated_enabled && cfg.supported_cc_list[0].aperiodic_cqi_period &&
lch_handler.has_pending_dl_txs()) { lch_handler.has_pending_dl_txs()) {
uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti_rx.to_uint()); uint32_t interval = srslte_tti_interval(tti, carriers[enb_to_ue_cc_idx(enb_cc_idx)].dl_cqi_tti_rx.to_uint());
bool needscqi = interval >= cfg.supported_cc_list[0].aperiodic_cqi_period; bool needscqi = interval >= cfg.supported_cc_list[0].aperiodic_cqi_period;
if (needscqi) { if (needscqi) {
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti); uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
@ -830,17 +835,19 @@ bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send)
/** /**
* Compute the range of RBGs that avoids segmentation of TM and MAC subheader data. Always computed for highest CFI * Compute the range of RBGs that avoids segmentation of TM and MAC subheader data. Always computed for highest CFI
* @param ue_cc_idx carrier of the UE * @param enb_cc_idx carrier of the UE
* @return range of number of RBGs that a UE can allocate in a given subframe * @return range of number of RBGs that a UE can allocate in a given subframe
*/ */
rbg_interval sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx) rbg_interval sched_ue::get_required_dl_rbgs(uint32_t enb_cc_idx)
{ {
srslte::interval<uint32_t> req_bytes = get_requested_dl_bytes(ue_cc_idx); assert(cells[enb_cc_idx].get_ue_cc_idx() >= 0);
const auto* cellparams = cells[enb_cc_idx].cell_cfg;
uint32_t ue_cc_idx = cells[enb_cc_idx].get_ue_cc_idx();
srslte::interval<uint32_t> req_bytes = get_requested_dl_bytes(enb_cc_idx);
if (req_bytes == srslte::interval<uint32_t>{0, 0}) { if (req_bytes == srslte::interval<uint32_t>{0, 0}) {
return {0, 0}; return {0, 0};
} }
const auto* cellparams = carriers[ue_cc_idx].get_cell_cfg(); int pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(to_tx_dl(current_tti), req_bytes.start());
int pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(to_tx_dl(current_tti), req_bytes.start());
if (pending_prbs < 0) { if (pending_prbs < 0) {
// Cannot fit allocation in given PRBs // Cannot fit allocation in given PRBs
log_h->error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. " log_h->error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. "
@ -868,11 +875,14 @@ rbg_interval sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx)
* - the upper boundary is set as a sum of: * - the upper boundary is set as a sum of:
* - total data in all SRBs and DRBs including the MAC subheaders * - total data in all SRBs and DRBs including the MAC subheaders
* - All CEs (ConRes and others) including respective MAC subheaders * - All CEs (ConRes and others) including respective MAC subheaders
* @ue_cc_idx carrier where allocation is being made * @enb_cc_idx carrier where allocation is being made
* @return * @return
*/ */
srslte::interval<uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_idx) srslte::interval<uint32_t> sched_ue::get_requested_dl_bytes(uint32_t enb_cc_idx)
{ {
assert(cells.at(enb_cc_idx).get_ue_cc_idx() >= 0);
uint32_t ue_cc_idx = cells[enb_cc_idx].get_ue_cc_idx();
/* Set Maximum boundary */ /* Set Maximum boundary */
// Ensure there is space for ConRes and RRC Setup // Ensure there is space for ConRes and RRC Setup
// SRB0 is a special case due to being RLC TM (no segmentation possible) // SRB0 is a special case due to being RLC TM (no segmentation possible)
@ -927,9 +937,9 @@ uint32_t sched_ue::get_pending_dl_rlc_data() const
return lch_handler.get_dl_tx_total(); return lch_handler.get_dl_tx_total();
} }
uint32_t sched_ue::get_expected_dl_bitrate(uint32_t ue_cc_idx, int nof_rbgs) const uint32_t sched_ue::get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs) const
{ {
const cc_sched_ue* cc = &carriers[ue_cc_idx]; const cc_sched_ue* cc = &carriers[cells.at(enb_cc_idx).get_ue_cc_idx()];
uint32_t nof_re = cc->get_cell_cfg()->get_dl_lb_nof_re( uint32_t nof_re = cc->get_cell_cfg()->get_dl_lb_nof_re(
to_tx_dl(current_tti), count_prb_per_tb_approx(nof_rbgs, cc->get_cell_cfg()->nof_prb())); to_tx_dl(current_tti), count_prb_per_tb_approx(nof_rbgs, cc->get_cell_cfg()->nof_prb()));
float max_coderate = srslte_cqi_to_coderate(std::min(cc->dl_cqi + 1u, 15u), cfg.use_tbs_index_alt); float max_coderate = srslte_cqi_to_coderate(std::min(cc->dl_cqi + 1u, 15u), cfg.use_tbs_index_alt);
@ -939,9 +949,9 @@ uint32_t sched_ue::get_expected_dl_bitrate(uint32_t ue_cc_idx, int nof_rbgs) con
return tbs / tti_duration_ms; return tbs / tti_duration_ms;
} }
uint32_t sched_ue::get_expected_ul_bitrate(uint32_t ue_cc_idx, int nof_prbs) const uint32_t sched_ue::get_expected_ul_bitrate(uint32_t enb_cc_idx, int nof_prbs) const
{ {
const cc_sched_ue* cc = &carriers[ue_cc_idx]; const cc_sched_ue* cc = &carriers[cells.at(enb_cc_idx).get_ue_cc_idx()];
uint32_t nof_prbs_alloc = nof_prbs < 0 ? cell.nof_prb : nof_prbs; uint32_t nof_prbs_alloc = nof_prbs < 0 ? cell.nof_prb : nof_prbs;
uint32_t N_srs = 0; uint32_t N_srs = 0;
@ -956,10 +966,10 @@ uint32_t sched_ue::get_expected_ul_bitrate(uint32_t ue_cc_idx, int nof_prbs) con
/// Returns nof bytes allocated to active UL HARQs in the carrier cc_idx. /// Returns nof bytes allocated to active UL HARQs in the carrier cc_idx.
/// NOTE: The returned value accounts for the MAC header and payload (RLC headers and actual data) /// NOTE: The returned value accounts for the MAC header and payload (RLC headers and actual data)
uint32_t sched_ue::get_pending_ul_old_data(uint32_t ue_cc_idx) uint32_t sched_ue::get_pending_ul_old_data(uint32_t enb_cc_idx)
{ {
uint32_t pending_data = 0; uint32_t pending_data = 0;
for (auto& h : carriers[ue_cc_idx].harq_ent.ul_harq_procs()) { for (auto& h : carriers[enb_to_ue_cc_idx(enb_cc_idx)].harq_ent.ul_harq_procs()) {
pending_data += h.get_pending_data(); pending_data += h.get_pending_data();
} }
return pending_data; return pending_data;
@ -970,14 +980,15 @@ uint32_t sched_ue::get_pending_ul_old_data()
{ {
uint32_t pending_ul_data = 0; uint32_t pending_ul_data = 0;
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) { for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
pending_ul_data += get_pending_ul_old_data(cc_idx); pending_ul_data += get_pending_ul_old_data(carriers[cc_idx].get_cell_cfg()->enb_cc_idx);
} }
return pending_ul_data; return pending_ul_data;
} }
uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc_idx) uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_enb_cc_idx)
{ {
static constexpr uint32_t lbsr_size = 4, sbsr_size = 2; static constexpr uint32_t lbsr_size = 4, sbsr_size = 2;
uint32_t this_ue_cc_idx = enb_to_ue_cc_idx(this_enb_cc_idx);
// Note: If there are no active bearers, scheduling requests are also ignored. // Note: If there are no active bearers, scheduling requests are also ignored.
uint32_t pending_data = 0; uint32_t pending_data = 0;
@ -995,7 +1006,7 @@ uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc
// may be fully occupied by a BSR, and RRC the message transmission needs to be postponed. // may be fully occupied by a BSR, and RRC the message transmission needs to be postponed.
pending_data += (pending_lcgs <= 1) ? sbsr_size : lbsr_size; pending_data += (pending_lcgs <= 1) ? sbsr_size : lbsr_size;
} else { } else {
if (is_sr_triggered() and this_ue_cc_idx >= 0) { if (is_sr_triggered() and this_enb_cc_idx >= 0) {
// Check if this_cc_idx is the carrier with highest CQI // Check if this_cc_idx is the carrier with highest CQI
uint32_t max_cqi = 0, max_cc_idx = 0; uint32_t max_cqi = 0, max_cc_idx = 0;
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) { for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
@ -1005,12 +1016,12 @@ uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc
max_cc_idx = cc_idx; max_cc_idx = cc_idx;
} }
} }
if ((int)max_cc_idx == this_ue_cc_idx) { if (max_cc_idx == this_ue_cc_idx) {
return 512; return 512;
} }
} }
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) { for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
if (needs_cqi(tti_tx_ul.to_uint(), cc_idx)) { if (needs_cqi(tti_tx_ul.to_uint(), carriers[cc_idx].get_cell_cfg()->enb_cc_idx)) {
return 128; return 128;
} }
} }
@ -1019,9 +1030,9 @@ uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc
return pending_data; return pending_data;
} }
uint32_t sched_ue::get_pending_ul_new_data(tti_point tti_tx_ul, int this_ue_cc_idx) uint32_t sched_ue::get_pending_ul_new_data(tti_point tti_tx_ul, int this_enb_cc_idx)
{ {
uint32_t pending_data = get_pending_ul_data_total(tti_tx_ul, this_ue_cc_idx); uint32_t pending_data = get_pending_ul_data_total(tti_tx_ul, this_enb_cc_idx);
// Subtract all the UL data already allocated in the UL harqs // Subtract all the UL data already allocated in the UL harqs
uint32_t pending_ul_data = get_pending_ul_old_data(); uint32_t pending_ul_data = get_pending_ul_old_data();
@ -1036,9 +1047,9 @@ uint32_t sched_ue::get_pending_ul_new_data(tti_point tti_tx_ul, int this_ue_cc_i
return pending_data; return pending_data;
} }
uint32_t sched_ue::get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes) uint32_t sched_ue::get_required_prb_ul(uint32_t enb_cc_idx, uint32_t req_bytes)
{ {
return carriers[cc_idx].get_required_prb_ul(req_bytes); return carriers[enb_to_ue_cc_idx(enb_cc_idx)].get_required_prb_ul(req_bytes);
} }
bool sched_ue::is_sr_triggered() bool sched_ue::is_sr_triggered()
@ -1047,32 +1058,36 @@ bool sched_ue::is_sr_triggered()
} }
/* Gets HARQ process with oldest pending retx */ /* Gets HARQ process with oldest pending retx */
dl_harq_proc* sched_ue::get_pending_dl_harq(tti_point tti_tx_dl, uint32_t ue_cc_idx) dl_harq_proc* sched_ue::get_pending_dl_harq(tti_point tti_tx_dl, uint32_t enb_cc_idx)
{ {
uint32_t ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx);
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) {
return carriers[ue_cc_idx].harq_ent.get_pending_dl_harq(tti_tx_dl); return carriers[ue_cc_idx].harq_ent.get_pending_dl_harq(tti_tx_dl);
} }
return nullptr; return nullptr;
} }
dl_harq_proc* sched_ue::get_empty_dl_harq(tti_point tti_tx_dl, uint32_t ue_cc_idx) dl_harq_proc* sched_ue::get_empty_dl_harq(tti_point tti_tx_dl, uint32_t enb_cc_idx)
{ {
uint32_t ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx);
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) {
return carriers[ue_cc_idx].harq_ent.get_empty_dl_harq(tti_tx_dl); return carriers[ue_cc_idx].harq_ent.get_empty_dl_harq(tti_tx_dl);
} }
return nullptr; return nullptr;
} }
ul_harq_proc* sched_ue::get_ul_harq(tti_point tti_tx_ul, uint32_t ue_cc_idx) ul_harq_proc* sched_ue::get_ul_harq(tti_point tti_tx_ul, uint32_t enb_cc_idx)
{ {
uint32_t ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx);
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) {
return carriers[ue_cc_idx].harq_ent.get_ul_harq(tti_tx_ul); return carriers[ue_cc_idx].harq_ent.get_ul_harq(tti_tx_ul);
} }
return nullptr; return nullptr;
} }
const dl_harq_proc& sched_ue::get_dl_harq(uint32_t idx, uint32_t ue_cc_idx) const const dl_harq_proc& sched_ue::get_dl_harq(uint32_t idx, uint32_t enb_cc_idx) const
{ {
uint32_t ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx);
return carriers[ue_cc_idx].harq_ent.dl_harq_procs()[idx]; return carriers[ue_cc_idx].harq_ent.dl_harq_procs()[idx];
} }
@ -1089,8 +1104,9 @@ std::pair<bool, uint32_t> sched_ue::get_active_cell_index(uint32_t enb_cc_idx) c
return {false, std::numeric_limits<uint32_t>::max()}; return {false, std::numeric_limits<uint32_t>::max()};
} }
uint32_t sched_ue::get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits) uint32_t sched_ue::get_aggr_level(uint32_t enb_cc_idx, uint32_t nof_bits)
{ {
uint32_t ue_cc_idx = enb_to_ue_cc_idx(enb_cc_idx);
return carriers[ue_cc_idx].get_aggr_level(nof_bits); return carriers[ue_cc_idx].get_aggr_level(nof_bits);
} }
@ -1162,7 +1178,7 @@ std::bitset<SRSLTE_MAX_CARRIERS> sched_ue::scell_activation_mask() const
int sched_ue::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const int sched_ue::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const
{ {
return cells.at(enb_cc_idx).get_ue_cc_idx(); return enb_cc_idx < cells.size() ? cells[enb_cc_idx].get_ue_cc_idx() : -1;
} }
float diff_coderate_maxcoderate(int mcs, float diff_coderate_maxcoderate(int mcs,

@ -350,15 +350,8 @@ uint32_t allocate_mac_sdus(sched_interface::dl_sched_data_t* data,
return total_tbs - rem_tbs; return total_tbs - rem_tbs;
} }
uint32_t allocate_mac_ces(sched_interface::dl_sched_data_t* data, uint32_t allocate_mac_ces(sched_interface::dl_sched_data_t* data, lch_ue_manager& lch_handler, uint32_t total_tbs)
lch_ue_manager& lch_handler,
uint32_t total_tbs,
uint32_t ue_cc_idx)
{ {
if (ue_cc_idx != 0) {
return 0;
}
int rem_tbs = total_tbs; int rem_tbs = total_tbs;
while (not lch_handler.pending_ces.empty() and data->nof_pdu_elems[0] < sched_interface::MAX_RLC_PDU_LIST) { while (not lch_handler.pending_ces.empty() and data->nof_pdu_elems[0] < sched_interface::MAX_RLC_PDU_LIST) {
int toalloc = srslte::ce_total_size(lch_handler.pending_ces.front()); int toalloc = srslte::ce_total_size(lch_handler.pending_ces.front());

@ -90,20 +90,18 @@ int get_ue_cc_idx_if_pdsch_enabled(const sched_ue& user, sf_sched* tti_sched)
} }
const dl_harq_proc* get_dl_retx_harq(sched_ue& user, sf_sched* tti_sched) const dl_harq_proc* get_dl_retx_harq(sched_ue& user, sf_sched* tti_sched)
{ {
int ue_cc_idx = get_ue_cc_idx_if_pdsch_enabled(user, tti_sched); if (get_ue_cc_idx_if_pdsch_enabled(user, tti_sched) < 0) {
if (ue_cc_idx < 0) {
return nullptr; return nullptr;
} }
dl_harq_proc* h = user.get_pending_dl_harq(tti_sched->get_tti_tx_dl(), ue_cc_idx); dl_harq_proc* h = user.get_pending_dl_harq(tti_sched->get_tti_tx_dl(), tti_sched->get_enb_cc_idx());
return h; return h;
} }
const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched) const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched)
{ {
int ue_cc_idx = get_ue_cc_idx_if_pdsch_enabled(user, tti_sched); if (get_ue_cc_idx_if_pdsch_enabled(user, tti_sched) < 0) {
if (ue_cc_idx < 0) {
return nullptr; return nullptr;
} }
return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), ue_cc_idx); return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), tti_sched->get_enb_cc_idx());
} }
int get_ue_cc_idx_if_pusch_enabled(const sched_ue& user, sf_sched* tti_sched, bool needs_pdcch) int get_ue_cc_idx_if_pusch_enabled(const sched_ue& user, sf_sched* tti_sched, bool needs_pdcch)
@ -126,20 +124,18 @@ int get_ue_cc_idx_if_pusch_enabled(const sched_ue& user, sf_sched* tti_sched, bo
} }
const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched) const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched)
{ {
int ue_cc_idx = get_ue_cc_idx_if_pusch_enabled(user, tti_sched, false); if (get_ue_cc_idx_if_pusch_enabled(user, tti_sched, false) < 0) {
if (ue_cc_idx < 0) {
return nullptr; return nullptr;
} }
const ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_tx_ul(), ue_cc_idx); const ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_tx_ul(), tti_sched->get_enb_cc_idx());
return h->has_pending_retx() ? h : nullptr; return h->has_pending_retx() ? h : nullptr;
} }
const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched) const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched)
{ {
int ue_cc_idx = get_ue_cc_idx_if_pusch_enabled(user, tti_sched, true); if (get_ue_cc_idx_if_pusch_enabled(user, tti_sched, true) < 0) {
if (ue_cc_idx < 0) {
return nullptr; return nullptr;
} }
const ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_tx_ul(), ue_cc_idx); const ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_tx_ul(), tti_sched->get_enb_cc_idx());
return h->is_empty() ? h : nullptr; return h->is_empty() ? h : nullptr;
} }

@ -79,7 +79,7 @@ uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
} }
} }
if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) { if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) {
rbg_interval req_rbgs = ue.get_required_dl_rbgs(ue_ctxt.ue_cc_idx); rbg_interval req_rbgs = ue.get_required_dl_rbgs(cc_cfg->enb_cc_idx);
// Check if there is an empty harq for the newtx // Check if there is an empty harq for the newtx
if (req_rbgs.stop() == 0) { if (req_rbgs.stop() == 0) {
return 0; return 0;
@ -90,7 +90,7 @@ uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
// empty RBGs were found // empty RBGs were found
code = tti_sched->alloc_dl_user(&ue, newtx_mask, ue_ctxt.dl_newtx_h->get_id()); code = tti_sched->alloc_dl_user(&ue, newtx_mask, ue_ctxt.dl_newtx_h->get_id());
if (code == alloc_outcome_t::SUCCESS) { if (code == alloc_outcome_t::SUCCESS) {
return ue.get_expected_dl_bitrate(ue_ctxt.ue_cc_idx, newtx_mask.count()) * tti_duration_ms / 8; return ue.get_expected_dl_bitrate(cc_cfg->enb_cc_idx, newtx_mask.count()) * tti_duration_ms / 8;
} }
} }
} }
@ -136,19 +136,19 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
estim_tbs_bytes = code == alloc_outcome_t::SUCCESS ? ue_ctxt.ul_h->get_pending_data() : 0; estim_tbs_bytes = code == alloc_outcome_t::SUCCESS ? ue_ctxt.ul_h->get_pending_data() : 0;
} else { } else {
// Note: h->is_empty check is required, in case CA allocated a small UL grant for UCI // Note: h->is_empty check is required, in case CA allocated a small UL grant for UCI
uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), ue_ctxt.ue_cc_idx); uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), cc_cfg->enb_cc_idx);
// Check if there is a empty harq, and data to transmit // Check if there is a empty harq, and data to transmit
if (pending_data == 0) { if (pending_data == 0) {
return 0; return 0;
} }
uint32_t pending_rb = ue.get_required_prb_ul(ue_ctxt.ue_cc_idx, pending_data); uint32_t pending_rb = ue.get_required_prb_ul(cc_cfg->enb_cc_idx, pending_data);
prb_interval alloc = find_contiguous_ul_prbs(pending_rb, tti_sched->get_ul_mask()); prb_interval alloc = find_contiguous_ul_prbs(pending_rb, tti_sched->get_ul_mask());
if (alloc.empty()) { if (alloc.empty()) {
return 0; return 0;
} }
code = tti_sched->alloc_ul_user(&ue, alloc); code = tti_sched->alloc_ul_user(&ue, alloc);
estim_tbs_bytes = code == alloc_outcome_t::SUCCESS estim_tbs_bytes = code == alloc_outcome_t::SUCCESS
? ue.get_expected_ul_bitrate(ue_ctxt.ue_cc_idx, alloc.length()) * tti_duration_ms / 8 ? ue.get_expected_ul_bitrate(cc_cfg->enb_cc_idx, alloc.length()) * tti_duration_ms / 8
: 0; : 0;
} }
if (code == alloc_outcome_t::DCI_COLLISION) { if (code == alloc_outcome_t::DCI_COLLISION) {
@ -178,7 +178,7 @@ void sched_time_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue&
dl_newtx_h = get_dl_newtx_harq(ue, tti_sched); dl_newtx_h = get_dl_newtx_harq(ue, tti_sched);
if (dl_retx_h != nullptr or dl_newtx_h != nullptr) { if (dl_retx_h != nullptr or dl_newtx_h != nullptr) {
// calculate DL PF priority // calculate DL PF priority
float r = ue.get_expected_dl_bitrate(ue_cc_idx) / 8; float r = ue.get_expected_dl_bitrate(cell.enb_cc_idx) / 8;
float R = dl_avg_rate(); float R = dl_avg_rate();
dl_prio = (R != 0) ? r / pow(R, fairness_coeff) : (r == 0 ? 0 : std::numeric_limits<float>::max()); dl_prio = (R != 0) ? r / pow(R, fairness_coeff) : (r == 0 ? 0 : std::numeric_limits<float>::max());
} }
@ -189,7 +189,7 @@ void sched_time_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue&
ul_h = get_ul_newtx_harq(ue, tti_sched); ul_h = get_ul_newtx_harq(ue, tti_sched);
} }
if (ul_h != nullptr) { if (ul_h != nullptr) {
float r = ue.get_expected_ul_bitrate(ue_cc_idx) / 8; float r = ue.get_expected_ul_bitrate(cell.enb_cc_idx) / 8;
float R = ul_avg_rate(); float R = ul_avg_rate();
ul_prio = (R != 0) ? r / pow(R, fairness_coeff) : (r == 0 ? 0 : std::numeric_limits<float>::max()); ul_prio = (R != 0) ? r / pow(R, fairness_coeff) : (r == 0 ? 0 : std::numeric_limits<float>::max());
} }

@ -65,13 +65,12 @@ void sched_time_rr::sched_dl_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sche
if (iter == ue_db.end()) { if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue& user = iter->second; sched_ue& user = iter->second;
int ue_cc_idx = user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx); if (user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx) < 0) {
if (ue_cc_idx < 0) {
continue; continue;
} }
const dl_harq_proc* h = get_dl_newtx_harq(user, tti_sched); const dl_harq_proc* h = get_dl_newtx_harq(user, tti_sched);
rbg_interval req_rbgs = user.get_required_dl_rbgs(ue_cc_idx); rbg_interval req_rbgs = user.get_required_dl_rbgs(cc_cfg->enb_cc_idx);
// Check if there is an empty harq for the newtx // Check if there is an empty harq for the newtx
if (h == nullptr or req_rbgs.stop() == 0) { if (h == nullptr or req_rbgs.stop() == 0) {
continue; continue;
@ -138,13 +137,12 @@ void sched_time_rr::sched_ul_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sche
if (h == nullptr) { if (h == nullptr) {
continue; continue;
} }
uint32_t ue_cc_idx = user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx); uint32_t pending_data = user.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), cc_cfg->enb_cc_idx);
uint32_t pending_data = user.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), ue_cc_idx);
// Check if there is a empty harq, and data to transmit // Check if there is a empty harq, and data to transmit
if (pending_data == 0) { if (pending_data == 0) {
continue; continue;
} }
uint32_t pending_rb = user.get_required_prb_ul(ue_cc_idx, pending_data); uint32_t pending_rb = user.get_required_prb_ul(cc_cfg->enb_cc_idx, pending_data);
prb_interval alloc = find_contiguous_ul_prbs(pending_rb, tti_sched->get_ul_mask()); prb_interval alloc = find_contiguous_ul_prbs(pending_rb, tti_sched->get_ul_mask());
if (alloc.empty()) { if (alloc.empty()) {
continue; continue;

@ -20,11 +20,11 @@ const uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count(
const uint32_t PCell_IDX = 0; const uint32_t PCell_IDX = 0;
const std::array<uint32_t, 6> prb_list = {6, 15, 25, 50, 75, 100}; const std::array<uint32_t, 6> prb_list = {6, 15, 25, 50, 75, 100};
uint32_t get_aggr_level(sched_ue& sched_ue, uint32_t ue_cc_idx, const std::vector<sched_cell_params_t>& cell_params) uint32_t get_aggr_level(sched_ue& sched_ue, uint32_t enb_cc_idx, const std::vector<sched_cell_params_t>& cell_params)
{ {
srslte_dci_format_t dci_format = sched_ue.get_dci_format(); srslte_dci_format_t dci_format = sched_ue.get_dci_format();
uint32_t nof_dci_bits = srslte_dci_format_sizeof(&cell_params[ue_cc_idx].cfg.cell, nullptr, nullptr, dci_format); uint32_t nof_dci_bits = srslte_dci_format_sizeof(&cell_params[enb_cc_idx].cfg.cell, nullptr, nullptr, dci_format);
uint32_t aggr_level = sched_ue.get_aggr_level(ue_cc_idx, nof_dci_bits); uint32_t aggr_level = sched_ue.get_aggr_level(enb_cc_idx, nof_dci_bits);
return aggr_level; return aggr_level;
} }

Loading…
Cancel
Save