From c689343d814767868e850b8c42d4526455c5a6e1 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Mon, 21 May 2018 15:40:11 +0100 Subject: [PATCH] Scheduler fix (#200) * scheduler still not working with RGBs. The reservation of RGBs is not sufficient for the RAR allocation. * now in the scheduler, we allocate space not only for pending data but also for headers and CE * cant connect. going to check if it is an issue of the next branch * cleaned up the interface * removed obsolete functioN * minor fix * function name change --- srsenb/hdr/mac/scheduler_metric.h | 10 ++--- srsenb/hdr/mac/scheduler_ue.h | 18 +++++---- srsenb/src/mac/scheduler.cc | 6 +-- srsenb/src/mac/scheduler_metric.cc | 48 ++++++++++++------------ srsenb/src/mac/scheduler_ue.cc | 60 +++++++++++++++++++++--------- 5 files changed, 85 insertions(+), 57 deletions(-) diff --git a/srsenb/hdr/mac/scheduler_metric.h b/srsenb/hdr/mac/scheduler_metric.h index 4685f932a..9c40eda5c 100644 --- a/srsenb/hdr/mac/scheduler_metric.h +++ b/srsenb/hdr/mac/scheduler_metric.h @@ -34,7 +34,7 @@ namespace srsenb { class dl_metric_rr : public sched::metric_dl { public: - void new_tti(std::map &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols, uint32_t tti); + void new_tti(std::map &ue_db, uint32_t start_rbg, uint32_t nof_rbg, uint32_t nof_ctrl_symbols, uint32_t tti); dl_harq_proc* get_user_allocation(sched_ue *user); private: @@ -49,14 +49,14 @@ private: uint32_t count_rbg(uint32_t mask); uint32_t calc_rbg_mask(bool mask[25]); - bool used_rb[MAX_RBG]; + bool used_rbg[MAX_RBG]; uint32_t current_tti; - uint32_t total_rb; - uint32_t used_rb_mask; + uint32_t total_rbg; + uint32_t used_rbg_mask; uint32_t nof_ctrl_symbols; - uint32_t available_rb; + uint32_t available_rbg; }; class ul_metric_rr : public sched::metric_ul diff --git a/srsenb/hdr/mac/scheduler_ue.h b/srsenb/hdr/mac/scheduler_ue.h index aaa4b49b7..aafad89a6 100644 --- a/srsenb/hdr/mac/scheduler_ue.h +++ b/srsenb/hdr/mac/scheduler_ue.h @@ -93,11 +93,15 @@ public: * Functions used by scheduler metric objects *******************************************************/ - uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols); - uint32_t get_required_prb_ul(uint32_t req_bytes); + uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols); + uint32_t get_required_prb_ul(uint32_t req_bytes); + uint32_t prb_to_rbg(uint32_t nof_prb); + uint32_t rgb_to_prb(uint32_t nof_rbg); + uint32_t get_pending_dl_new_data(uint32_t tti); uint32_t get_pending_ul_new_data(uint32_t tti); + uint32_t get_pending_dl_new_data_total(uint32_t tti); dl_harq_proc *get_pending_dl_harq(uint32_t tti); dl_harq_proc *get_empty_dl_harq(); @@ -147,11 +151,10 @@ private: static bool bearer_is_ul(ue_bearer_t *lch); static bool bearer_is_dl(ue_bearer_t *lch); - + bool is_first_dl_tx(); - - - sched_interface::ue_cfg_t cfg; + + sched_interface::ue_cfg_t cfg; srslte_cell_t cell; srslte::log* log_h; @@ -175,7 +178,8 @@ private: uint32_t max_mcs_dl; uint32_t max_mcs_ul; int fixed_mcs_ul; - int fixed_mcs_dl; + int fixed_mcs_dl; + uint32_t P; int next_tpc_pusch; int next_tpc_pucch; diff --git a/srsenb/src/mac/scheduler.cc b/srsenb/src/mac/scheduler.cc index 53b3cd939..75d4cbaa3 100644 --- a/srsenb/src/mac/scheduler.cc +++ b/srsenb/src/mac/scheduler.cc @@ -610,7 +610,7 @@ int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]) int nof_rar_elems = 0; for (uint32_t i=0;i 0 && avail_rbg >= rar_n_rb) + if (pending_rar[i].buf_rar > 0 && avail_rbg >= (uint32_t)ceil((float)rar_n_rb/P)) { /* Check if we are still within the RAR window, otherwise discard it */ if (current_tti <= (pending_rar[i].rar_tti + cfg.prach_rar_window + 3)%10240 && current_tti >= pending_rar[i].rar_tti + 3) @@ -664,8 +664,8 @@ int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]) if (generate_format1a(start_rbg*P, rar_n_rb, buf_rar, 0, &rar[nof_rar_elems].dci) >= 0) { rar[nof_rar_elems].tbs = buf_rar; nof_rar_elems++; - avail_rbg -= rar_n_rb; - start_rbg += rar_n_rb; + avail_rbg -= (uint32_t)ceil((float)rar_n_rb/P); + start_rbg += (uint32_t)ceil((float)rar_n_rb/P); } else { Error("SCHED: Allocating Format1A grant\n"); } diff --git a/srsenb/src/mac/scheduler_metric.cc b/srsenb/src/mac/scheduler_metric.cc index f6ec18555..ff12d6641 100644 --- a/srsenb/src/mac/scheduler_metric.cc +++ b/srsenb/src/mac/scheduler_metric.cc @@ -47,9 +47,9 @@ uint32_t dl_metric_rr::calc_rbg_mask(bool mask[MAX_RBG]) { // Build RBG bitmask uint32_t rbg_bitmask = 0; - for (uint32_t n=0;nget_rbgmask()); } uint32_t pending_data = user->get_pending_dl_new_data(current_tti); - return user->get_required_prb_dl(pending_data, nof_ctrl_symbols); + return user->prb_to_rbg(user->get_required_prb_dl(pending_data, nof_ctrl_symbols)); } -void dl_metric_rr::new_tti(std::map &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti) +void dl_metric_rr::new_tti(std::map &ue_db, uint32_t start_rbg, uint32_t nof_rbg, uint32_t nof_ctrl_symbols_, uint32_t tti) { - total_rb = start_rb+nof_rb; - for (uint32_t i=0;i 0;i++) { - if (used_rb[i]) { + for (uint32_t i=0;i 0;i++) { + if (used_rbg[i]) { mask_bit[i] = false; } else { mask_bit[i] = true; @@ -126,24 +126,24 @@ bool dl_metric_rr::new_allocation(uint32_t nof_rbg, uint32_t *rbgmask) { } void dl_metric_rr::update_allocation(uint32_t new_mask) { - used_rb_mask |= new_mask; - for (uint32_t n=0;nget_pending_dl_new_data(current_tti); dl_harq_proc *h = user->get_pending_dl_harq(current_tti); + uint32_t req_bytes = user->get_pending_dl_new_data_total(current_tti); // Schedule retx if we have space #if ASYNC_DL_SCHED @@ -160,7 +160,7 @@ dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) { // If not, try to find another mask in the current tti uint32_t nof_rbg = count_rbg(retx_mask); - if (nof_rbg < available_rb) { + if (nof_rbg < available_rbg) { if (new_allocation(nof_rbg, &retx_mask)) { update_allocation(retx_mask); h->set_rbgmask(retx_mask); @@ -176,10 +176,10 @@ dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) { if (h && h->is_empty()) { #endif // Allocate resources based on pending data - if (pending_data) { - uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols); + if (req_bytes) { + uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, nof_ctrl_symbols)); uint32_t newtx_mask = 0; - new_allocation(pending_rb, &newtx_mask); + new_allocation(pending_rbg, &newtx_mask); if (newtx_mask) { update_allocation(newtx_mask); h->set_rbgmask(newtx_mask); diff --git a/srsenb/src/mac/scheduler_ue.cc b/srsenb/src/mac/scheduler_ue.cc index f9245ac9b..2eacf06f0 100644 --- a/srsenb/src/mac/scheduler_ue.cc +++ b/srsenb/src/mac/scheduler_ue.cc @@ -71,6 +71,7 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in rnti = rnti_; log_h = log_h_; memcpy(&cell, &cell_cfg->cell, sizeof(srslte_cell_t)); + P = srslte_ra_type0_P(cell.nof_prb); max_mcs_dl = 28; max_mcs_ul = 28; @@ -710,6 +711,22 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti) return pending_data; } +/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data, +/// the RAR resources, and headers +/// \param tti +/// \return number of bytes to be allocated +uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti) +{ + uint32_t req_bytes = get_pending_dl_new_data(tti); + if(req_bytes>0) { + req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header + if(is_first_dl_tx()) { + req_bytes += 6; // count for RAR + } + } + return req_bytes; +} + uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti) { uint32_t pending_data = 0; @@ -746,32 +763,39 @@ uint32_t sched_ue::get_pending_ul_old_data() return pending_data; } +uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb) +{ + return (uint32_t) ceil((float) nof_prb / P); +} -uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols) +uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg) { - int mcs = 0; - uint32_t nbytes = 0; - uint32_t n = 0; - if (req_bytes == 0) { - return 0; - } - - uint32_t nof_re = 0; - int tbs = 0; - for (n=1;n<=cell.nof_prb && nbytes < req_bytes;n++) { - nof_re = srslte_ra_dl_approx_nof_re(cell, n, nof_ctrl_symbols); - if (fixed_mcs_dl < 0) { - tbs = alloc_tbs_dl(n, nof_re, 0, &mcs); + return P*nof_rbg; +} + +uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols) +{ + int mcs = 0; + uint32_t nof_re = 0; + int tbs = 0; + + uint32_t nbytes = 0; + uint32_t n; + for (n=0; n < cell.nof_prb && nbytes < req_bytes; ++n) { + nof_re = srslte_ra_dl_approx_nof_re(cell, n+1, nof_ctrl_symbols); + if(fixed_mcs_dl < 0) { + tbs = alloc_tbs_dl(n+1, nof_re, 0, &mcs); } else { - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl), n)/8; + tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl), n+1)/8; } if (tbs > 0) { - nbytes = tbs; + nbytes = tbs; } else if (tbs < 0) { - return 0; + return 0; } } - return n; + + return n; } uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)