diff --git a/lib/include/srslte/common/bounded_bitset.h b/lib/include/srslte/common/bounded_bitset.h index d571e6ea9..407544068 100644 --- a/lib/include/srslte/common/bounded_bitset.h +++ b/lib/include/srslte/common/bounded_bitset.h @@ -43,7 +43,7 @@ class bounded_bitset public: constexpr bounded_bitset() : buffer(), cur_size(0) {} - constexpr bounded_bitset(size_t cur_size_) : buffer(), cur_size(cur_size_) {} + constexpr explicit bounded_bitset(size_t cur_size_) : buffer(), cur_size(cur_size_) {} constexpr size_t max_size() const noexcept { return N; } diff --git a/lib/include/srslte/interfaces/sched_interface.h b/lib/include/srslte/interfaces/sched_interface.h index ba9e5480e..3f33f125d 100644 --- a/lib/include/srslte/interfaces/sched_interface.h +++ b/lib/include/srslte/interfaces/sched_interface.h @@ -29,8 +29,11 @@ namespace srsenb { class sched_interface { -public: - +public: + const static uint32_t max_cce = 128; + const static uint32_t max_prb = 100; + const static uint32_t max_rbg = 25; + const static int MAX_SIB_PAYLOAD_LEN = 2048; const static int MAX_SIBS = 16; const static int MAX_LC = 6; diff --git a/lib/src/upper/rlc_am.cc b/lib/src/upper/rlc_am.cc index 2c2810ab2..ac778d8bd 100644 --- a/lib/src/upper/rlc_am.cc +++ b/lib/src/upper/rlc_am.cc @@ -1297,7 +1297,7 @@ void rlc_am::rlc_am_rx::handle_data_pdu(uint8_t *payload, uint32_t nof_bytes, rl } memcpy(pdu.buf->msg, payload, nof_bytes); pdu.buf->N_bytes = nof_bytes; - pdu.header = header; + pdu.header = header; rx_window[header.sn] = pdu; @@ -1382,7 +1382,7 @@ void rlc_am::rlc_am_rx::handle_data_pdu_segment(uint8_t *payload, uint32_t nof_b memcpy(segment.buf->msg, payload, nof_bytes); segment.buf->N_bytes = nof_bytes; - segment.header = header; + segment.header = header; // Check if we already have a segment from the same PDU it = rx_segments.find(header.sn); diff --git a/lib/test/upper/rlc_am_data_test.cc b/lib/test/upper/rlc_am_data_test.cc index 282770be7..98556f4eb 100644 --- a/lib/test/upper/rlc_am_data_test.cc +++ b/lib/test/upper/rlc_am_data_test.cc @@ -146,7 +146,8 @@ void test4() assert(b2.msg[i] == b1.msg[i]); } -int main(int argc, char **argv) { +int main(int argc, char** argv) +{ test1(); test2(); test3(); diff --git a/srsenb/hdr/mac/scheduler.h b/srsenb/hdr/mac/scheduler.h index 65b737b9a..4bc61e1bd 100644 --- a/srsenb/hdr/mac/scheduler.h +++ b/srsenb/hdr/mac/scheduler.h @@ -24,7 +24,7 @@ #include "scheduler_harq.h" #include "scheduler_ue.h" -#include "srslte/common/bounded_bitset.h" +#include "srsenb/hdr/mac/scheduler_grid.h" #include "srslte/common/log.h" #include "srslte/interfaces/enb_interfaces.h" #include "srslte/interfaces/sched_interface.h" @@ -42,39 +42,53 @@ namespace srsenb { */ class sched : public sched_interface -{ - - -public: +{ +public: + // handle for DL metric + class dl_tti_sched_t + { + public: + virtual alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) = 0; + virtual const rbgmask_t& get_dl_mask() const = 0; + virtual uint32_t get_tti_tx_dl() const = 0; + virtual uint32_t get_nof_ctrl_symbols() const = 0; + virtual bool is_dl_alloc(sched_ue* user) const = 0; + }; + + // handle for UL metric + class ul_tti_sched_t + { + public: + virtual alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) = 0; + virtual const prbmask_t& get_ul_mask() const = 0; + virtual uint32_t get_tti_tx_ul() const = 0; + virtual bool is_ul_alloc(sched_ue* user) const = 0; + }; - /************************************************************* * * Scheduling metric interface definition * ************************************************************/ - + class metric_dl { public: /* Virtual methods for user metric calculation */ - virtual void - sched_users(std::map& ue_db, rbgmask_t* dl_mask, uint32_t nof_ctrl_symbols, uint32_t tti) = 0; + virtual void set_log(srslte::log* log_) = 0; + virtual void sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched) = 0; }; class metric_ul { public: - /* Virtual methods for user metric calculation */ - virtual void sched_users(std::map& ue_db, ul_mask_t* ul_mask, uint32_t tti) = 0; - virtual bool update_allocation(ul_harq_proc::ul_alloc_t alloc) = 0; + virtual void set_log(srslte::log* log_) = 0; + virtual void sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched) = 0; }; - - /************************************************************* * * FAPI-like Interface @@ -117,25 +131,24 @@ public: int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true); int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len); int ul_phr(uint16_t rnti, int phr); - int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code); - - int dl_sched(uint32_t tti, dl_sched_res_t *sched_result); - int ul_sched(uint32_t tti, ul_sched_res_t *sched_result); + int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code); + + int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) final; + int ul_sched(uint32_t tti, ul_sched_res_t* sched_result) final; /* Custom TPC functions */ void tpc_inc(uint16_t rnti); void tpc_dec(uint16_t rnti); - - + // Static Methods static uint32_t get_rvidx(uint32_t retx_idx) { const static int rv_idx[4] = {0, 2, 3, 1}; return rv_idx[retx_idx%4]; } - static void generate_cce_location( srslte_regs_t* regs, sched_ue::sched_dci_cce_t* location, uint32_t cfi, uint32_t sf_idx = 0, uint16_t rnti = 0); + static uint32_t aggr_level(uint32_t aggr_idx) { return 1u << aggr_idx; } protected: metric_dl *dl_metric; @@ -149,30 +162,8 @@ protected: cell_cfg_t cfg; sched_args_t sched_cfg; - const static int MAX_PRB = 100; - const static int MAX_RBG = 25; - const static int MAX_CCE = 128; - // This is for computing DCI locations srslte_regs_t regs; - class sched_vars - { - public: - struct tti_vars_t { - srslte::bounded_bitset used_cce; - uint32_t tti_rx = 0; - tti_vars_t() : used_cce(MAX_CCE) {} - }; - void init(sched* parent_); - tti_vars_t& new_tti(uint32_t tti_rx); - tti_vars_t& tti_vars(uint32_t tti_rx); - - private: - static const uint32_t tti_array_size = 16; - sched* parent = NULL; - tti_vars_t tti_vars_[tti_array_size]; - }; - sched_vars sched_vars; typedef struct { int buf_rar; @@ -187,30 +178,124 @@ protected: uint32_t n_tx; } sched_sib_t; - int dl_sched_bc(dl_sched_bc_t bc[MAX_BC_LIST]); - int dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]); - int dl_sched_data(dl_sched_data_t data[MAX_DATA_LIST]); + class tti_sched_t : public dl_tti_sched_t, public ul_tti_sched_t + { + public: + struct ctrl_alloc_t { + size_t dci_idx; + rbg_range_t rbg_range; + uint16_t rnti; + uint32_t req_bytes; + alloc_type_t alloc_type; + }; + struct rar_alloc_t : public ctrl_alloc_t { + dl_sched_rar_t rar_grant; + rar_alloc_t() = default; + explicit rar_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} + }; + struct bc_alloc_t : public ctrl_alloc_t { + uint32_t rv = 0; + uint32_t sib_idx = 0; + bc_alloc_t() = default; + explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} + }; + struct dl_alloc_t { + size_t dci_idx; + sched_ue* user_ptr; + rbgmask_t user_mask; + uint32_t pid; + }; + struct ul_alloc_t { + enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 }; + size_t dci_idx; + type_t type; + sched_ue* user_ptr; + ul_harq_proc::ul_alloc_t alloc; + uint32_t mcs = 0; + bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; } + bool is_msg3() const { return type == MSG3; } + bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; } + }; + typedef std::pair rar_code_t; + typedef std::pair ctrl_code_t; + + // TTI scheduler result + pdcch_mask_t pdcch_mask; + sched_interface::dl_sched_res_t dl_sched_result; + sched_interface::ul_sched_res_t ul_sched_result; + + void init(sched* parent_); + void new_tti(uint32_t tti_rx_, uint32_t start_cfi); + alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx); + alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload); + rar_code_t alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant, uint32_t rar_tti, uint32_t buf_rar); + void generate_dcis(); + // dl_tti_sched itf + alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final; + uint32_t get_tti_tx_dl() const final { return tti_alloc.get_tti_tx_dl(); } + uint32_t get_nof_ctrl_symbols() const final; + const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); } + // ul_tti_sched itf + alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final; + alloc_outcome_t alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs); + const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); } + uint32_t get_tti_tx_ul() const final { return tti_alloc.get_tti_tx_ul(); } + + // getters + const pdcch_mask_t& get_pdcch_mask() const { return pdcch_mask; } + rbgmask_t& get_dl_mask() { return tti_alloc.get_dl_mask(); } + prbmask_t& get_ul_mask() { return tti_alloc.get_ul_mask(); } + const std::vector& get_ul_allocs() const { return ul_data_allocs; } + uint32_t get_cfi() const { return tti_alloc.get_cfi(); } + uint32_t get_tti_rx() const { return tti_alloc.get_tti_rx(); } + uint32_t get_sfn() const { return tti_alloc.get_sfn(); } + uint32_t get_sf_idx() const { return tti_alloc.get_sf_idx(); } - void ul_sched_msg3(); + private: + bool is_dl_alloc(sched_ue* user) const final; + bool is_ul_alloc(sched_ue* user) const final; + ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); + alloc_outcome_t alloc_ul(sched_ue* user, + ul_harq_proc::ul_alloc_t alloc, + tti_sched_t::ul_alloc_t::type_t alloc_type, + uint32_t msg3 = 0); + int generate_format1a( + uint32_t rb_start, uint32_t l_crb, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci); + void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + + // consts + sched* parent = NULL; + srslte::log* log_h = NULL; + uint32_t P; + cell_cfg_sib_t* sibs_cfg = NULL; + + // internal state + tti_grid_t tti_alloc; + std::vector rar_allocs; + std::vector bc_allocs; + std::vector data_allocs; + std::vector ul_data_allocs; + }; - int generate_format1a( - uint32_t rb_start, uint32_t l_crb, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci); - int find_empty_dci(sched_ue::sched_dci_cce_t* locations, - uint32_t aggr_level, - sched_vars::tti_vars_t* tti_vars, - sched_ue* user = NULL); - bool generate_dci(srslte_dci_location_t* sched_location, - sched_ue::sched_dci_cce_t* locations, - uint32_t aggr_level, - sched_vars::tti_vars_t* tti_vars, - sched_ue* user = NULL); + const static uint32_t nof_sched_ttis = 10; + tti_sched_t tti_scheds[nof_sched_ttis]; + tti_sched_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; } - std::map ue_db; - typedef std::map::iterator ue_db_it_t; + tti_sched_t* new_tti(uint32_t tti_rx); + void generate_phich(tti_sched_t* tti_sched); + int generate_dl_sched(tti_sched_t* tti_sched); + int generate_ul_sched(tti_sched_t* tti_sched); + void dl_sched_bc(tti_sched_t* tti_sched); + void dl_sched_rar(tti_sched_t* tti_sched); + void dl_sched_data(tti_sched_t* tti_sched); + void ul_sched_msg3(tti_sched_t* tti_sched); + std::map ue_db; sched_sib_t pending_sibs[MAX_SIBS]; - - + typedef struct { bool enabled; uint16_t rnti; @@ -224,36 +309,27 @@ protected: pending_msg3_t pending_msg3[10]; // Allowed DCI locations for SIB and RAR per CFI - sched_ue::sched_dci_cce_t common_locations[3]; - sched_ue::sched_dci_cce_t rar_locations[3][10]; + sched_ue::sched_dci_cce_t common_locations[3]; + sched_ue::sched_dci_cce_t rar_locations[3][10]; + + // derived from args + uint32_t P; + uint32_t si_n_rbg; + uint32_t rar_n_rbg; + uint32_t nof_rbg; + prbmask_t prach_mask; + prbmask_t pucch_mask; uint32_t bc_aggr_level; uint32_t rar_aggr_level; uint32_t pdsch_re[10]; - uint32_t avail_rbg; - uint32_t P; - uint32_t start_rbg; - uint32_t si_n_rbg; - uint32_t rar_n_rbg; - uint32_t nof_rbg; - uint32_t sf_idx; - uint32_t sfn; uint32_t current_tti; - uint32_t current_cfi; - - ul_mask_t ul_mask; - rbgmask_t dl_mask; - bool fail_dci_alloc = false; bool configured; }; - - - - } #endif // SRSENB_SCHEDULER_H diff --git a/srsenb/hdr/mac/scheduler_grid.h b/srsenb/hdr/mac/scheduler_grid.h new file mode 100644 index 000000000..cbf3b5eb4 --- /dev/null +++ b/srsenb/hdr/mac/scheduler_grid.h @@ -0,0 +1,152 @@ +/* + * Copyright 2013-2019 Software Radio Systems Limited + * + * This file is part of srsLTE. + * + * srsLTE is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * srsLTE is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * A copy of the GNU Affero General Public License can be found in + * the LICENSE file in the top-level directory of this distribution + * and at http://www.gnu.org/licenses/. + * + */ + +#ifndef SRSLTE_SCHEDULER_GRID_H +#define SRSLTE_SCHEDULER_GRID_H + +#include "lib/include/srslte/interfaces/sched_interface.h" +#include "srsenb/hdr/mac/scheduler_ue.h" +#include "srslte/common/bounded_bitset.h" +#include "srslte/common/log.h" +#include + +namespace srsenb { + +// Type of Allocation +enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA }; + +// Result of alloc attempt +struct alloc_outcome_t { + enum result_enum { SUCCESS, DCI_COLLISION, RB_COLLISION, ERROR }; + result_enum result = ERROR; + alloc_outcome_t() = default; + alloc_outcome_t(result_enum e) : result(e) {} + operator result_enum() { return result; } + operator bool() { return result == SUCCESS; } + const char* to_string() const; +}; + +class pdcch_grid_t +{ +public: + struct alloc_t { + uint16_t rnti; + srslte_dci_location_t dci_pos = {0, 0}; + pdcch_mask_t current_mask; + pdcch_mask_t total_mask; + }; + typedef std::vector alloc_result_t; + + void init(srslte::log* log_, + srslte_regs_t* regs, + sched_ue::sched_dci_cce_t* common_locs, + sched_ue::sched_dci_cce_t (*rar_locs)[10]); + void new_tti(uint32_t tti_rx_, uint32_t start_cfi); + bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = NULL); + bool set_cfi(uint32_t cfi); + + // getters + uint32_t get_cfi() const { return current_cfix + 1; } + void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const; + uint32_t nof_cces() const { return cce_size_array[current_cfix]; } + size_t nof_allocs() const { return nof_dci_allocs; } + size_t nof_alloc_combinations() const { return prev_end - prev_start; } + void print_result(bool verbose = false) const; + uint32_t get_sf_idx() const { return sf_idx; } + +private: + const static uint32_t nof_cfis = 3; + typedef std::pair tree_node_t; + + void reset(); + const sched_ue::sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const; + void update_alloc_tree(int node_idx, + uint32_t aggr_idx, + sched_ue* user, + alloc_type_t alloc_type, + const sched_ue::sched_dci_cce_t* dci_locs); + + // consts + srslte::log* log_h = nullptr; + sched_ue::sched_dci_cce_t* common_locations = nullptr; + sched_ue::sched_dci_cce_t* rar_locations[10]; + uint32_t cce_size_array[nof_cfis]; + + // tti vars + uint32_t tti_rx; + uint32_t sf_idx; + uint32_t current_cfix; + size_t prev_start, prev_end; + std::vector dci_alloc_tree; + size_t nof_dci_allocs; +}; + +class tti_grid_t +{ +public: + typedef std::pair ctrl_alloc_t; + + void init(srslte::log* log_, sched_interface::cell_cfg_t* cell_, const pdcch_grid_t& pdcch_grid); + void new_tti(uint32_t tti_rx_, uint32_t start_cfi); + ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); + alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask); + alloc_outcome_t alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch); + + // getters + uint32_t get_avail_rbgs() const { return avail_rbg; } + rbgmask_t& get_dl_mask() { return dl_mask; } + const rbgmask_t& get_dl_mask() const { return dl_mask; } + prbmask_t& get_ul_mask() { return ul_mask; } + const prbmask_t& get_ul_mask() const { return ul_mask; } + uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); } + const pdcch_grid_t& get_pdcch_grid() const { return pdcch_alloc; } + uint32_t get_tti_rx() const { return tti_rx; } + uint32_t get_tti_tx_dl() const { return tti_tx_dl; } + uint32_t get_tti_tx_ul() const { return tti_tx_ul; } + uint32_t get_sfn() const { return sfn; } + uint32_t get_sf_idx() const { return pdcch_alloc.get_sf_idx(); } + +private: + alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = NULL); + + // consts + srslte::log* log_h = nullptr; + sched_interface::cell_cfg_t* cell_cfg = nullptr; + uint32_t nof_prbs; + uint32_t nof_rbgs; + uint32_t si_n_rbg, rar_n_rbg; + + // tti const + uint32_t tti_rx = 10241; + // derived + uint32_t tti_tx_dl, tti_tx_ul; + uint32_t sfn; + pdcch_grid_t pdcch_alloc; + + // internal state + uint32_t avail_rbg = 0; + rbgmask_t dl_mask; + prbmask_t ul_mask; +}; + +} // namespace srsenb + +#endif // SRSLTE_SCHEDULER_GRID_H diff --git a/srsenb/hdr/mac/scheduler_harq.h b/srsenb/hdr/mac/scheduler_harq.h index e9957a6d3..230c313a2 100644 --- a/srsenb/hdr/mac/scheduler_harq.h +++ b/srsenb/hdr/mac/scheduler_harq.h @@ -29,31 +29,55 @@ namespace srsenb { +// MASK used for CCE allocations +typedef srslte::bounded_bitset pdcch_mask_t; + +// Range of RBGs +class prb_range_t; +struct rbg_range_t { + uint32_t rbg_start = 0, rbg_end = 0; + rbg_range_t() = default; + rbg_range_t(uint32_t s, uint32_t e) : rbg_start(s), rbg_end(e) {} + rbg_range_t(const prb_range_t& rbgs, uint32_t P); + uint32_t length() const { return rbg_end - rbg_start; } +}; + +// Range of PRBs +struct prb_range_t { + uint32_t prb_start = 0, prb_end = 0; + prb_range_t() = default; + prb_range_t(uint32_t s, uint32_t e) : prb_start(s), prb_end(e) {} + prb_range_t(const rbg_range_t& rbgs, uint32_t P); + uint32_t length() { return prb_end - prb_start; } + static prb_range_t riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1); +}; + class harq_proc { public: void config(uint32_t id, uint32_t max_retx, srslte::log* log_h); - void set_max_retx(uint32_t max_retx); void reset(uint32_t tb_idx); uint32_t get_id() const; bool is_empty() const; bool is_empty(uint32_t tb_idx) const; - bool get_ack(uint32_t tb_idx) const; - void set_ack(uint32_t tb_idx, bool ack); - uint32_t nof_tx(uint32_t tb_idx) const; uint32_t nof_retx(uint32_t tb_idx) const; uint32_t get_tti() const; bool get_ndi(uint32_t tb_idx) const; + uint32_t max_nof_retx() const; protected: void new_tx_common(uint32_t tb_idx, uint32_t tti, int mcs, int tbs); void new_retx_common(uint32_t tb_idx, uint32_t tti, int* mcs, int* tbs); bool has_pending_retx_common(uint32_t tb_idx) const; + void set_ack_common(uint32_t tb_idx, bool ack); + void reset_pending_data_common(); + + enum ack_t { NULL_ACK, NACK, ACK }; - bool ack[SRSLTE_MAX_TB]; + ack_t ack_state[SRSLTE_MAX_TB]; bool active[SRSLTE_MAX_TB]; bool ndi[SRSLTE_MAX_TB]; uint32_t id; @@ -63,11 +87,8 @@ protected: int tti; int last_mcs[SRSLTE_MAX_TB]; int last_tbs[SRSLTE_MAX_TB]; - - srslte::log* log_h; - private: - bool ack_received[SRSLTE_MAX_TB]; + srslte::log* log_h; }; typedef srslte::bounded_bitset<25, true> rbgmask_t; @@ -76,13 +97,15 @@ class dl_harq_proc : public harq_proc { public: dl_harq_proc(); - void new_tx(uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce); - void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs); - rbgmask_t get_rbgmask(); - void set_rbgmask(rbgmask_t new_mask); + void new_tx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce_); + void new_retx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, uint32_t n_cce_); + void set_ack(uint32_t tb_idx, bool ack); + rbgmask_t get_rbgmask() const; bool has_pending_retx(uint32_t tb_idx, uint32_t tti) const; int get_tbs(uint32_t tb_idx) const; - uint32_t get_n_cce(); + uint32_t get_n_cce() const; + void reset_pending_data(); + private: rbgmask_t rbgmask; uint32_t n_cce; @@ -100,46 +123,31 @@ public: RB_start = start; L = len; } + uint32_t RB_end() const { return RB_start + L; } }; - void new_tx(uint32_t tti, int mcs, int tbs); - void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs); + void new_tx(uint32_t tti, int mcs, int tbs, ul_alloc_t alloc, uint32_t max_retx_); + void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, ul_alloc_t alloc); + void set_ack(uint32_t tb_idx, bool ack); - ul_alloc_t get_alloc(); - void set_alloc(ul_alloc_t alloc); - void set_realloc(ul_alloc_t alloc); - bool has_pending_retx(); - bool is_adaptive_retx(); - bool is_rar_tx(); - bool is_new_tx(); + ul_alloc_t get_alloc() const; + bool has_pending_retx() const; + bool is_adaptive_retx() const; void reset_pending_data(); - bool has_pending_ack(); - uint32_t get_pending_data(); - - void set_rar_mcs(uint32_t mcs); - bool get_rar_mcs(int* mcs); + bool has_pending_ack() const; + bool get_pending_ack() const; + uint32_t get_pending_data() const; private: ul_alloc_t allocation; - bool need_ack; int pending_data; - uint32_t rar_mcs; - bool has_rar_mcs; bool is_adaptive; - bool is_rar; + ack_t pending_ack; }; -class ul_mask_t : public srslte::bounded_bitset<100> -{ - typedef srslte::bounded_bitset<100> base_type; +typedef srslte::bounded_bitset<100, true> prbmask_t; -public: - using srslte::bounded_bitset<100>::any; - using srslte::bounded_bitset<100>::fill; - bool any(ul_harq_proc::ul_alloc_t alloc) const noexcept; - void fill(ul_harq_proc::ul_alloc_t alloc) noexcept; -}; } // namespace srsenb #endif // SRSENB_SCHEDULER_HARQ_H diff --git a/srsenb/hdr/mac/scheduler_metric.h b/srsenb/hdr/mac/scheduler_metric.h index b7f29f011..29332c8a3 100644 --- a/srsenb/hdr/mac/scheduler_metric.h +++ b/srsenb/hdr/mac/scheduler_metric.h @@ -25,40 +25,36 @@ #include "scheduler.h" namespace srsenb { - + class dl_metric_rr : public sched::metric_dl { const static int MAX_RBG = 25; public: - void sched_users(std::map& ue_db, rbgmask_t* dl_mask, uint32_t nof_ctrl_symbols, uint32_t tti); + void set_log(srslte::log* log_) final; + void sched_users(std::map& ue_db, sched::dl_tti_sched_t* tti_sched) final; private: bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask); - void update_allocation(rbgmask_t new_mask); - bool allocation_is_valid(rbgmask_t mask); dl_harq_proc* allocate_user(sched_ue* user); - uint32_t current_tti; - // rbgmask_t used_rbg_mask; - rbgmask_t* used_rbg; - uint32_t nof_ctrl_symbols; - uint32_t available_rbg; + srslte::log* log_h = nullptr; + sched::dl_tti_sched_t* tti_alloc = nullptr; }; class ul_metric_rr : public sched::metric_ul { public: - void sched_users(std::map& ue_db, ul_mask_t* start_mask, uint32_t tti); + void set_log(srslte::log* log_) final; + void sched_users(std::map& ue_db, sched::ul_tti_sched_t* tti_sched) final; private: bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc); - bool allocation_is_valid(ul_harq_proc::ul_alloc_t alloc); - bool update_allocation(ul_harq_proc::ul_alloc_t alloc); ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user); ul_harq_proc* allocate_user_retx_prbs(sched_ue *user); - ul_mask_t* used_rb; + srslte::log* log_h = nullptr; + sched::ul_tti_sched_t* tti_alloc = nullptr; uint32_t current_tti; }; diff --git a/srsenb/hdr/mac/scheduler_ue.h b/srsenb/hdr/mac/scheduler_ue.h index 97368c619..7c2aef74b 100644 --- a/srsenb/hdr/mac/scheduler_ue.h +++ b/srsenb/hdr/mac/scheduler_ue.h @@ -86,12 +86,9 @@ public: void set_max_mcs(int mcs_ul, int mcs_dl); void set_fixed_mcs(int mcs_ul, int mcs_dl); - void set_dl_alloc(dl_harq_proc* alloc); - dl_harq_proc* get_dl_alloc(); - void set_ul_alloc(ul_harq_proc* alloc); - ul_harq_proc* get_ul_alloc(); - dl_harq_proc* find_dl_harq(uint32_t tti); - const dl_harq_proc* get_dl_harq(uint32_t idx) const; + dl_harq_proc* find_dl_harq(uint32_t tti); + dl_harq_proc* get_dl_harq(uint32_t idx); + uint16_t get_rnti() const { return rnti; } /******************************************************* * Functions used by scheduler metric objects @@ -108,7 +105,7 @@ public: uint32_t get_pending_ul_old_data(); uint32_t get_pending_dl_new_data_total(uint32_t tti); - void reset_timeout_dl_harq(uint32_t tti); + void reset_pending_pids(uint32_t tti_rx); dl_harq_proc *get_pending_dl_harq(uint32_t tti); dl_harq_proc* get_empty_dl_harq(); ul_harq_proc* get_ul_harq(uint32_t tti); @@ -120,10 +117,18 @@ public: void set_sr(); void unset_sr(); - int generate_format1(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi); - int generate_format2a(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi); - int generate_format2(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi); - int generate_format0(ul_harq_proc *h, sched_interface::ul_sched_data_t *data, uint32_t tti, bool cqi_request); + int generate_format1( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask); + int generate_format2a( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask); + int generate_format2( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask); + int generate_format0(sched_interface::ul_sched_data_t* data, + uint32_t tti, + ul_harq_proc::ul_alloc_t alloc, + bool needs_pdcch, + srslte_dci_location_t cce_range, + int explicit_mcs = -1); srslte_dci_format_t get_dci_format(); uint32_t get_aggr_level(uint32_t nof_bits); @@ -164,7 +169,8 @@ private: bool needs_cqi_unlocked(uint32_t tti, bool will_send = false); - int generate_format2a_unlocked(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi); + int generate_format2a_unlocked( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask); bool is_first_dl_tx(); @@ -191,14 +197,15 @@ private: uint32_t ul_cqi; uint32_t ul_cqi_tti; uint16_t rnti; - uint32_t max_mcs_dl; - uint32_t max_mcs_ul; + uint32_t max_mcs_dl; + uint32_t max_mcs_ul; + uint32_t max_msg3retx; int fixed_mcs_ul; int fixed_mcs_dl; uint32_t P; int next_tpc_pusch; - int next_tpc_pucch; + int next_tpc_pucch; // Allowed DCI locations per CFI and per subframe sched_dci_cce_t dci_locations[3][10]; @@ -210,8 +217,6 @@ private: bool phy_config_dedicated_enabled; asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info; - dl_harq_proc* next_dl_harq_proc; - ul_harq_proc* next_ul_harq_proc; }; } diff --git a/srsenb/hdr/mac/ue.h b/srsenb/hdr/mac/ue.h index b02e4f651..035a14eb9 100644 --- a/srsenb/hdr/mac/ue.h +++ b/srsenb/hdr/mac/ue.h @@ -52,7 +52,7 @@ public: srslte_softbuffer_tx_t* get_tx_softbuffer(uint32_t harq_process, uint32_t tb_idx); srslte_softbuffer_rx_t* get_rx_softbuffer(uint32_t tti); - + bool process_pdus(); uint8_t* request_buffer(uint32_t tti, uint32_t len); void process_pdu(uint8_t* pdu, uint32_t nof_bytes, srslte::pdu_queue::channel_t channel); diff --git a/srsenb/src/mac/scheduler.cc b/srsenb/src/mac/scheduler.cc index ffe748043..7d9197167 100644 --- a/srsenb/src/mac/scheduler.cc +++ b/srsenb/src/mac/scheduler.cc @@ -19,6 +19,7 @@ * */ +#include #include #include "srslte/srslte.h" @@ -32,14 +33,538 @@ namespace srsenb { - +/******************************************************* + * TTI resource Scheduling Methods + *******************************************************/ + +void sched::tti_sched_t::init(sched* parent_) +{ + parent = parent_; + log_h = parent->log_h; + P = parent->P; + sibs_cfg = parent->cfg.sibs; + + pdcch_grid_t pdcch_alloc; + pdcch_alloc.init(log_h, &parent->regs, parent->common_locations, parent->rar_locations); + tti_alloc.init(log_h, &parent->cfg, pdcch_alloc); +} + +void sched::tti_sched_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) +{ + tti_alloc.new_tti(tti_rx_, start_cfi); + + // internal state + rar_allocs.clear(); + bc_allocs.clear(); + data_allocs.clear(); + ul_data_allocs.clear(); + + // TTI result + pdcch_mask.reset(); + pdcch_mask.resize(tti_alloc.get_pdcch_grid().nof_cces()); + bzero(&dl_sched_result, sizeof(dl_sched_result)); + bzero(&ul_sched_result, sizeof(ul_sched_result)); +} + +bool sched::tti_sched_t::is_dl_alloc(sched_ue* user) const +{ + for (const auto& a : data_allocs) { + if (a.user_ptr == user) { + return true; + } + } + return false; +} + +bool sched::tti_sched_t::is_ul_alloc(sched_ue* user) const +{ + for (const auto& a : ul_data_allocs) { + if (a.user_ptr == user) { + return true; + } + } + return false; +} + +sched::tti_sched_t::ctrl_code_t sched::tti_sched_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti) +{ + ctrl_alloc_t ctrl_alloc{}; + + // based on rnti, check which type of alloc + alloc_type_t alloc_type = alloc_type_t::DL_RAR; + if (rnti == SRSLTE_SIRNTI) { + alloc_type = alloc_type_t::DL_BC; + } else if (rnti == SRSLTE_PRNTI) { + alloc_type = alloc_type_t::DL_PCCH; + } + + /* Allocate space in the DL RBG and PDCCH grids */ + tti_grid_t::ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type); + if (not ret.first) { + return {ret.first, ctrl_alloc}; + } + + // Allocation Successful + ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + ctrl_alloc.rbg_range = ret.second; + ctrl_alloc.rnti = rnti; + ctrl_alloc.req_bytes = tbs_bytes; + ctrl_alloc.alloc_type = alloc_type; + + return {ret.first, ctrl_alloc}; +} + +alloc_outcome_t sched::tti_sched_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) +{ + uint32_t sib_len = sibs_cfg[sib_idx].len; + uint32_t rv = get_rvidx(sib_ntx); + ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI); + if (not ret.first) { + Warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s\n", + sib_idx + 1, + aggr_lvl, + sib_len, + ret.first.to_string()); + return ret.first; + } + + // BC allocation successful + bc_alloc_t bc_alloc(ret.second); + bc_alloc.rv = rv; + bc_alloc.sib_idx = sib_idx; + bc_allocs.push_back(bc_alloc); + + return ret.first; +} + +alloc_outcome_t sched::tti_sched_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload) +{ + ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI); + if (not ret.first) { + Warning( + "SCHED: Could not allocate Paging with payload length=%d, cause=%s\n", paging_payload, ret.first.to_string()); + return ret.first; + } + + // Paging allocation successful + bc_alloc_t bc_alloc(ret.second); + bc_allocs.push_back(bc_alloc); + + return ret.first; +} + +sched::tti_sched_t::rar_code_t +sched::tti_sched_t::alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant, uint32_t rar_tti, uint32_t buf_rar) +{ + uint16_t rar_sfidx = (uint16_t)((rar_tti + 1) % 10); + + ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, buf_rar, rar_sfidx); + if (not ret.first) { + Warning("SCHED: Could not allocate RAR for L=%d, cause=%s\n", aggr_lvl, ret.first.to_string()); + return {ret.first, NULL}; + } + + // Allocation successful + rar_alloc_t rar_alloc(ret.second); + rar_alloc.rar_grant = rar_grant; + rar_allocs.push_back(rar_alloc); + + return {ret.first, &rar_allocs.back()}; +} + +alloc_outcome_t sched::tti_sched_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) +{ + if (is_dl_alloc(user)) { + log_h->warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x\n", user->get_rnti()); + return alloc_outcome_t::ERROR; + } + + // Try to allocate RBGs and DCI + alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask); + if (not ret) { + return ret; + } + + // Allocation Successful + dl_alloc_t alloc; + alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + alloc.user_ptr = user; + alloc.user_mask = user_mask; + alloc.pid = pid; + data_allocs.push_back(alloc); + + return alloc_outcome_t::SUCCESS; +} + +alloc_outcome_t sched::tti_sched_t::alloc_ul(sched_ue* user, + ul_harq_proc::ul_alloc_t alloc, + tti_sched_t::ul_alloc_t::type_t alloc_type, + uint32_t mcs) +{ + // Check whether user was already allocated + if (is_ul_alloc(user)) { + log_h->warning("SCHED: Attempt to assign multiple ul_harq_proc to the same user rnti=0x%x\n", user->get_rnti()); + return alloc_outcome_t::ERROR; + } + + // Allocate RBGs and DCI space + bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or alloc_type == ul_alloc_t::NEWTX; + alloc_outcome_t ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch); + if (not ret) { + return ret; + } + + ul_alloc_t ul_alloc; + ul_alloc.type = alloc_type; + ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + ul_alloc.user_ptr = user; + ul_alloc.alloc = alloc; + ul_alloc.mcs = mcs; + ul_data_allocs.push_back(ul_alloc); + + return alloc_outcome_t::SUCCESS; +} + +alloc_outcome_t sched::tti_sched_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) +{ + // check whether adaptive/non-adaptive retx/newtx + tti_sched_t::ul_alloc_t::type_t alloc_type; + ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul()); + bool has_retx = h->has_pending_retx(); + if (has_retx) { + ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc(); + if (prev_alloc.L == alloc.L and prev_alloc.RB_start == prev_alloc.L) { + alloc_type = ul_alloc_t::NOADAPT_RETX; + } else { + alloc_type = ul_alloc_t::ADAPT_RETX; + } + } else { + alloc_type = ul_alloc_t::NEWTX; + } + + return alloc_ul(user, alloc, alloc_type); +} + +alloc_outcome_t sched::tti_sched_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs) +{ + return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs); +} + +void sched::tti_sched_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +{ + for (const auto& bc_alloc : bc_allocs) { + sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems]; + + // assign NCCE/L + bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; + + /* Generate DCI format1A */ + prb_range_t prb_range = prb_range_t(bc_alloc.rbg_range, P); + int tbs = generate_format1a( + prb_range.prb_start, prb_range.length(), bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci); + + // Setup BC/Paging processes + if (bc_alloc.alloc_type == alloc_type_t::DL_BC) { + if (tbs <= (int)bc_alloc.req_bytes) { + log_h->warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d\n", + bc_alloc.sib_idx + 1, + bc_alloc.rbg_range.rbg_start, + bc_alloc.rbg_range.rbg_end, + bc->dci.location.L, + bc->dci.location.ncce, + bc_alloc.req_bytes); + continue; + } + + // Setup BC process + bc->index = bc_alloc.sib_idx; + bc->type = sched_interface::dl_sched_bc_t::BCCH; + bc->tbs = (uint32_t)bc_alloc.req_bytes; + + log_h->info("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d\n", + bc_alloc.sib_idx + 1, + bc_alloc.rbg_range.rbg_start, + bc_alloc.rbg_range.rbg_end, + bc->dci.location.L, + bc->dci.location.ncce, + bc_alloc.rv, + bc_alloc.req_bytes, + sibs_cfg[bc_alloc.sib_idx].period_rf, + bc->dci.tb[0].mcs_idx); + } else { + // Paging + if (tbs <= 0) { + log_h->warning("SCHED: Error Paging, rbgs=(%d,%d), dci=(%d,%d)\n", + bc_alloc.rbg_range.rbg_start, + bc_alloc.rbg_range.rbg_end, + bc->dci.location.L, + bc->dci.location.ncce); + continue; + } + + // Setup Paging process + bc->type = sched_interface::dl_sched_bc_t::PCCH; + bc->tbs = (uint32_t)tbs; + + log_h->info("SCHED: PCH, rbgs=(%d,%d), dci=(%d,%d), tbs=%d, mcs=%d\n", + bc_alloc.rbg_range.rbg_start, + bc_alloc.rbg_range.rbg_end, + bc->dci.location.L, + bc->dci.location.ncce, + tbs, + bc->dci.tb[0].mcs_idx); + } + + dl_sched_result.nof_bc_elems++; + } +} + +void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +{ + for (const auto& rar_alloc : rar_allocs) { + sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems]; + + // Assign NCCE/L + rar->dci.location = dci_result[rar_alloc.dci_idx]->dci_pos; + + /* Generate DCI format1A */ + prb_range_t prb_range = prb_range_t(rar_alloc.rbg_range, P); + int tbs = + generate_format1a(prb_range.prb_start, prb_range.length(), rar_alloc.req_bytes, 0, rar_alloc.rnti, &rar->dci); + if (tbs <= 0) { + log_h->warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=(%d,%d), dci=(%d,%d)\n", + rar_alloc.rnti, + rar_alloc.rbg_range.rbg_start, + rar_alloc.rbg_range.rbg_end, + rar->dci.location.L, + rar->dci.location.ncce); + continue; + } + + // Setup RAR process + rar->tbs = rar_alloc.req_bytes; + rar->nof_grants = rar_alloc.rar_grant.nof_grants; + memcpy(rar->msg3_grant, rar_alloc.rar_grant.msg3_grant, sizeof(dl_sched_rar_grant_t) * rar->nof_grants); + + // Print RAR allocation result + for (uint32_t i = 0; i < rar->nof_grants; ++i) { + const auto& msg3_grant = rar->msg3_grant[i]; + uint32_t pending_tti = (get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % 10; + uint16_t expected_rnti = parent->pending_msg3[pending_tti].rnti; // FIXME + log_h->info("SCHED: RAR, ra_id=%d, rnti=0x%x, rarnti_idx=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, " + "rar_grant_mcs=%d\n", + msg3_grant.ra_id, + expected_rnti, + rar_alloc.rnti, + rar_alloc.rbg_range.rbg_start, + rar_alloc.rbg_range.rbg_end, + rar->dci.location.L, + rar->dci.location.ncce, + msg3_grant.grant.rba, + msg3_grant.grant.trunc_mcs); + } + + dl_sched_result.nof_rar_elems++; + } +} + +void sched::tti_sched_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +{ + for (const auto& data_alloc : data_allocs) { + sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems]; + + // Assign NCCE/L + data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos; + + // Generate DCI Format1/2/2A + sched_ue* user = data_alloc.user_ptr; + dl_harq_proc* h = user->get_dl_harq(data_alloc.pid); + uint32_t data_before = user->get_pending_dl_new_data(get_tti_tx_dl()); + srslte_dci_format_t dci_format = user->get_dci_format(); + bool is_newtx = h->is_empty(); + + int tbs = 0; + switch (dci_format) { + case SRSLTE_DCI_FORMAT1: + tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); + break; + case SRSLTE_DCI_FORMAT2: + tbs = user->generate_format2(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); + break; + case SRSLTE_DCI_FORMAT2A: + tbs = user->generate_format2a(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); + break; + default: + Error("DCI format (%d) not implemented\n", dci_format); + } + + if (tbs <= 0) { + log_h->warning("SCHED: Error DL %s rnti=0x%x, pid=%d, mask=%s, tbs=%d, buffer=%d\n", + is_newtx ? "tx" : "retx", + user->get_rnti(), + h->get_id(), + data_alloc.user_mask.to_hex().c_str(), + tbs, + user->get_pending_dl_new_data(get_tti_tx_dl())); + continue; + } + + // Print Resulting DL Allocation + log_h->info("SCHED: DL %s rnti=0x%x, pid=%d, mask=0x%s, dci=(%d,%d), n_rtx=%d, tbs=%d, buffer=%d/%d\n", + !is_newtx ? "retx" : "tx", + user->get_rnti(), + h->get_id(), + data_alloc.user_mask.to_hex().c_str(), + data->dci.location.L, + data->dci.location.ncce, + h->nof_retx(0) + h->nof_retx(1), + tbs, + data_before, + user->get_pending_dl_new_data(get_tti_tx_dl())); + + dl_sched_result.nof_data_elems++; + } +} + +void sched::tti_sched_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +{ + /* Set UL data DCI locs and format */ + for (const auto& ul_alloc : ul_data_allocs) { + sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems]; + + sched_ue* user = ul_alloc.user_ptr; + + srslte_dci_location_t cce_range = {0, 0}; + if (ul_alloc.needs_pdcch()) { + cce_range = dci_result[ul_alloc.dci_idx]->dci_pos; + } + + /* Set fixed mcs if specified */ + int fixed_mcs = (ul_alloc.type == ul_alloc_t::MSG3) ? ul_alloc.mcs : -1; + + /* Generate DCI Format1A */ + uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul()); + int tbs = + user->generate_format0(pusch, get_tti_tx_ul(), ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs); + + ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul()); + if (tbs <= 0) { + log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n", + ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL", + ul_alloc.is_retx() ? "retx" : "tx", + user->get_rnti(), + h->get_id(), + pusch->dci.location.L, + pusch->dci.location.ncce, + ul_alloc.alloc.RB_start, + ul_alloc.alloc.RB_start + ul_alloc.alloc.L, + tbs, + user->get_pending_ul_new_data(get_tti_tx_ul())); + continue; + } + + // Allocation was successful + if (ul_alloc.type == ul_alloc_t::NEWTX) { + // Un-trigger SR + user->unset_sr(); + } + + // Print Resulting UL Allocation + log_h->info("SCHED: %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), n_rtx=%d, tbs=%d, bsr=%d (%d-%d)\n", + ul_alloc.is_msg3() ? "Msg3" : "UL", + ul_alloc.is_retx() ? "retx" : "tx", + user->get_rnti(), + h->get_id(), + pusch->dci.location.L, + pusch->dci.location.ncce, + ul_alloc.alloc.RB_start, + ul_alloc.alloc.RB_start + ul_alloc.alloc.L, + h->nof_retx(0), + tbs, + user->get_pending_ul_new_data(get_tti_tx_ul()), + pending_data_before, + user->get_pending_ul_old_data()); + + ul_sched_result.nof_dci_elems++; + } +} + +void sched::tti_sched_t::generate_dcis() +{ + /* Pick one of the possible DCI masks */ + pdcch_grid_t::alloc_result_t dci_result; + // tti_alloc.get_pdcch_grid().print_result(); + tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &pdcch_mask); + + /* Register final CFI */ + dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi(); + + /* Generate DCI formats and fill sched_result structs */ + set_bc_sched_result(dci_result); + + set_rar_sched_result(dci_result); + + set_dl_data_sched_result(dci_result); + + set_ul_sched_result(dci_result); +} + +uint32_t sched::tti_sched_t::get_nof_ctrl_symbols() const +{ + return tti_alloc.get_cfi() + (parent->cfg.cell.nof_prb <= 10) ? 1 : 0; +} + +int sched::tti_sched_t::generate_format1a( + uint32_t rb_start, uint32_t l_crb, uint32_t tbs_bytes, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci) +{ + /* Calculate I_tbs for this TBS */ + int tbs = tbs_bytes * 8; + int i; + int mcs = -1; + for (i = 0; i < 27; i++) { + if (srslte_ra_tbs_from_idx(i, 2) >= tbs) { + dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2; + mcs = i; + tbs = srslte_ra_tbs_from_idx(i, 2); + break; + } else if (srslte_ra_tbs_from_idx(i, 3) >= tbs) { + dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3; + mcs = i; + tbs = srslte_ra_tbs_from_idx(i, 3); + break; + } + } + if (i == 28) { + Error("Can't allocate Format 1A for TBS=%d\n", tbs); + return -1; + } + + Debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d\n", + srslte_ra_tbs_from_idx(mcs, 2), + srslte_ra_tbs_from_idx(mcs, 3), + tbs_bytes, + tbs, + mcs); + + dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2; + dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; + dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, parent->cfg.cell.nof_prb); + dci->pid = 0; + dci->tb[0].mcs_idx = mcs; + dci->tb[0].rv = rv; + dci->format = SRSLTE_DCI_FORMAT1A; + dci->rnti = rnti; + + return tbs; +} + /******************************************************* * * Initialization and sched configuration functions * *******************************************************/ -sched::sched() : bc_aggr_level(0), rar_aggr_level(0), avail_rbg(0), P(0), start_rbg(0), si_n_rbg(0), rar_n_rbg(0), - nof_rbg(0), sf_idx(0), sfn(0), current_cfi(0) { +sched::sched() : bc_aggr_level(0), rar_aggr_level(0), P(0), si_n_rbg(0), rar_n_rbg(0), nof_rbg(0) +{ current_tti = 0; log_h = NULL; dl_metric = NULL; @@ -71,12 +596,12 @@ sched::~sched() pthread_mutex_destroy(&sched_mutex); } -void sched::init(rrc_interface_mac *rrc_, srslte::log* log) +void sched::init(rrc_interface_mac* rrc_, srslte::log* log) { - sched_cfg.pdsch_max_mcs = 28; - sched_cfg.pdsch_mcs = -1; - sched_cfg.pusch_max_mcs = 28; - sched_cfg.pusch_mcs = -1; + sched_cfg.pdsch_max_mcs = 28; + sched_cfg.pdsch_mcs = -1; + sched_cfg.pusch_max_mcs = 28; + sched_cfg.pusch_mcs = -1; sched_cfg.nof_ctrl_symbols = 3; log_h = log; rrc = rrc_; @@ -85,8 +610,8 @@ void sched::init(rrc_interface_mac *rrc_, srslte::log* log) int sched::reset() { - bzero(pending_msg3, sizeof(pending_msg3_t)*10); - bzero(pending_rar, sizeof(sched_rar_t)*SCHED_MAX_PENDING_RAR); + bzero(pending_msg3, sizeof(pending_msg3_t) * 10); + bzero(pending_rar, sizeof(sched_rar_t) * SCHED_MAX_PENDING_RAR); bzero(pending_sibs, sizeof(sched_sib_t) * MAX_SIBS); configured = false; pthread_rwlock_wrlock(&rwlock); @@ -104,8 +629,10 @@ void sched::set_sched_cfg(sched_interface::sched_args_t* sched_cfg_) void sched::set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_) { - dl_metric = dl_metric_; - ul_metric = ul_metric_; + dl_metric = dl_metric_; + ul_metric = ul_metric_; + dl_metric->set_log(log_h); + ul_metric->set_log(log_h); } int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) @@ -118,7 +645,7 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) cfg = *cell_cfg; - // Get DCI locations + // Get DCI locations if (srslte_regs_init(®s, cfg.cell)) { Error("Getting DCI locations\n"); return SRSLTE_ERROR; @@ -128,22 +655,45 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) si_n_rbg = srslte::ceil_div(4, P); rar_n_rbg = srslte::ceil_div(3, P); nof_rbg = srslte::ceil_div(cfg.cell.nof_prb, P); - sched_vars.init(this); + pucch_mask.resize(cfg.cell.nof_prb); + if (cfg.nrb_pucch > 0) { + pucch_mask.fill(0, (uint32_t)cfg.nrb_pucch); + pucch_mask.fill(cfg.cell.nof_prb - cfg.nrb_pucch, cfg.cell.nof_prb); + } + prach_mask.resize(cfg.cell.nof_prb); + prach_mask.fill(cfg.prach_freq_offset, cfg.prach_freq_offset + 6); // Compute Common locations for DCI for each CFI - for (uint32_t cfi=0;cfi<3;cfi++) { - generate_cce_location(®s, &common_locations[cfi], cfi+1); + for (uint32_t cfi = 0; cfi < 3; cfi++) { + generate_cce_location(®s, &common_locations[cfi], cfi + 1); } - // Compute UE locations for RA-RNTI - for (int cfi=0;cfi<3;cfi++) { - for (int sf_idx=0;sf_idx<10;sf_idx++) { - uint16_t ra_rnti = 1+sf_idx; - generate_cce_location(®s, &rar_locations[cfi][sf_idx], cfi+1, sf_idx); + // Compute UE locations for RA-RNTI + for (uint32_t cfi = 0; cfi < 3; cfi++) { + for (uint32_t sf_idx = 0; sf_idx < 10; sf_idx++) { + generate_cce_location(®s, &rar_locations[cfi][sf_idx], cfi + 1, sf_idx); } - } + } + + // Initiate the tti_scheduler for each TTI + for (uint32_t i = 0; i < nof_sched_ttis; ++i) { + pdcch_grid_t pdcch_alloc; + pdcch_alloc.init(log_h, ®s, common_locations, rar_locations); + tti_scheds[i].init(this); + } configured = true; + // PRACH has to fit within the PUSCH space + bool invalid_prach = cfg.cell.nof_prb == 6 and (cfg.prach_freq_offset + 6 > cfg.cell.nof_prb); + invalid_prach |= cfg.cell.nof_prb > 6 and ((cfg.prach_freq_offset + 6) > (cfg.cell.nof_prb - cfg.nrb_pucch) or + (int) cfg.prach_freq_offset < cfg.nrb_pucch); + if (invalid_prach) { + log_h->error("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset); + log_h->console("Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", + cfg.prach_freq_offset); + return -1; + } + if (common_locations[sched_cfg.nof_ctrl_symbols - 1].nof_loc[2] == 0) { Error("SCHED: Current cfi=%d is not valid for broadcast (check scheduler.nof_ctrl_symbols in conf file).\n", sched_cfg.nof_ctrl_symbols); @@ -162,9 +712,9 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) * *******************************************************/ -int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t *ue_cfg) +int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* ue_cfg) { - // Add or config user + // Add or config user pthread_rwlock_rdlock(&rwlock); ue_db[rnti].set_cfg(rnti, ue_cfg, &cfg, ®s, log_h); ue_db[rnti].set_max_mcs(sched_cfg.pusch_max_mcs, sched_cfg.pdsch_max_mcs); @@ -188,7 +738,7 @@ int sched::ue_rem(uint16_t rnti) return ret; } -bool sched::ue_exists(uint16_t rnti) +bool sched::ue_exists(uint16_t rnti) { pthread_rwlock_rdlock(&rwlock); bool ret = (ue_db.count(rnti) == 1); @@ -207,7 +757,7 @@ void sched::phy_config_enabled(uint16_t rnti, bool enabled) pthread_rwlock_unlock(&rwlock); } -int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t *cfg) +int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg) { int ret = 0; pthread_rwlock_rdlock(&rwlock); @@ -375,18 +925,17 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) int sched::dl_rach_info(uint32_t tti, uint32_t ra_id, uint16_t rnti, uint32_t estimated_size) { - for (int i=0;iget_tti_rx()) { + uint32_t start_cfi = sched_cfg.nof_ctrl_symbols - ((cfg.cell.nof_prb >= 10) ? 0 : 1); + tti_sched->new_tti(tti_rx, start_cfi); + + pthread_mutex_lock(&sched_mutex); // Protects access to pending_rar[], pending_msg3[], pending_sibs[], rlc buffers + pthread_rwlock_rdlock(&rwlock); + + /* Schedule PHICH */ + generate_phich(tti_sched); + + /* Schedule DL */ + generate_dl_sched(tti_sched); + + /* Schedule UL */ + generate_ul_sched(tti_sched); + + /* Generate DCI */ + tti_sched->generate_dcis(); + + /* reset PIDs with pending data or blocked */ + for (auto& user : ue_db) { + user.second.reset_pending_pids(tti_rx); + } + + pthread_rwlock_unlock(&rwlock); + pthread_mutex_unlock(&sched_mutex); + } + + return tti_sched; +} + // Schedules Broadcast messages (SIB) -int sched::dl_sched_bc(dl_sched_bc_t bc[MAX_BC_LIST]) +void sched::dl_sched_bc(tti_sched_t* tti_sched) { - int nof_bc_elems = 0; - for (int i=0;i 0) { - x = (i-1)*cfg.si_window_ms; - sf = x%10; - } - if ((sfn%(cfg.sibs[i].period_rf)) == x/10 && sf_idx == sf) { - pending_sibs[i].is_in_window = true; - pending_sibs[i].window_start = current_tti; - pending_sibs[i].n_tx = 0; + /* Activate/Deactivate SI windows */ + for (int i = 0; i < MAX_SIBS; i++) { + // There is SIB data + if (cfg.sibs[i].len == 0) { + continue; + } + + if (!pending_sibs[i].is_in_window) { + uint32_t sf = 5; + uint32_t x = 0; + if (i > 0) { + x = (i - 1) * cfg.si_window_ms; + sf = x % 10; + } + if ((tti_sched->get_sfn() % (cfg.sibs[i].period_rf)) == x / 10 && tti_sched->get_sf_idx() == sf) { + pending_sibs[i].is_in_window = true; + pending_sibs[i].window_start = tti_sched->get_tti_tx_dl(); + pending_sibs[i].n_tx = 0; + } + } else { + if (i > 0) { + if (srslte_tti_interval(tti_sched->get_tti_tx_dl(), pending_sibs[i].window_start) > cfg.si_window_ms) { + // the si window has passed + pending_sibs[i].is_in_window = false; + pending_sibs[i].window_start = 0; } } else { - if (i > 0) { - if (srslte_tti_interval(current_tti, pending_sibs[i].window_start) > cfg.si_window_ms) { - pending_sibs[i].is_in_window = false; - pending_sibs[i].window_start = 0; - } - } else { - // SIB1 is always in window - if (pending_sibs[0].n_tx == 4) { - pending_sibs[0].n_tx = 0; - } + // SIB1 is always in window + if (pending_sibs[0].n_tx == 4) { + pending_sibs[0].n_tx = 0; } } } } - uint32_t tti_rx = (current_tti + 10240 - TX_DELAY) % 10240; - sched_vars::tti_vars_t* tti_vars = &sched_vars.tti_vars(tti_rx); - for (int i=0;i si_n_rbg) - { - uint32_t nof_tx = 4; - if (i > 0) { - if (cfg.si_window_ms <= 10) { - nof_tx = 1; - } else if (cfg.si_window_ms <= 20) { - nof_tx = 2; - } else if (cfg.si_window_ms <= 30) { - nof_tx = 3; - } else { - nof_tx = 4; - } - } - uint32_t n_sf = (current_tti-pending_sibs[i].window_start); - if ((i == 0 && (sfn%2) == 0 && sf_idx == 5) || - (i > 0 && n_sf >= (cfg.si_window_ms/nof_tx)*pending_sibs[i].n_tx && sf_idx==9)) - { - uint32_t rv = get_rvidx(pending_sibs[i].n_tx); - - // Try to allocate DCI first - if (generate_dci(&bc[nof_bc_elems].dci.location, &common_locations[current_cfi - 1], bc_aggr_level, tti_vars)) { - int tbs = - generate_format1a(start_rbg * P, si_n_rbg * P, cfg.sibs[i].len, rv, SRSLTE_SIRNTI, &bc[nof_bc_elems].dci); - if (tbs >= (int) cfg.sibs[i].len) { - bc[nof_bc_elems].index = i; - bc[nof_bc_elems].type = sched_interface::dl_sched_bc_t::BCCH; - bc[nof_bc_elems].tbs = tbs; - - Debug("SCHED: SIB%d, start_rb=%d, n_rb=%d, rv=%d, len=%d, period=%d, mcs=%d\n", - i + 1, - start_rbg * P, - si_n_rbg * P, - rv, - cfg.sibs[i].len, - cfg.sibs[i].period_rf, - bc[nof_bc_elems].dci.tb[0].mcs_idx); - - pending_sibs[i].n_tx++; - - nof_bc_elems++; - avail_rbg -= si_n_rbg; - start_rbg += si_n_rbg; - } else { - Error("Could not allocate DCI Format1A for SIB%d, len=%d\n", i+1, cfg.sibs[i].len); - } - } else { - Warning("SCHED: Could not schedule DCI for SIB=%d, L=%d\n", i+1, bc_aggr_level); - } + /* Allocate DCIs and RBGs for each SIB */ + for (int i = 0; i < MAX_SIBS; i++) { + if (cfg.sibs[i].len && pending_sibs[i].is_in_window && pending_sibs[i].n_tx < 4) { + uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(CEILFRAC(cfg.si_window_ms, 10), 4) : 4; + uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start); + + // Check if there is any SIB to tx + bool sib1_flag = i == 0 and (tti_sched->get_sfn() % 2) == 0 and tti_sched->get_sf_idx() == 5; + bool other_sibs_flag = + i > 0 and n_sf >= (cfg.si_window_ms / nof_tx) * pending_sibs[i].n_tx and tti_sched->get_sf_idx() == 9; + if (!sib1_flag and !other_sibs_flag) { + continue; } - } + + // Schedule SIB + tti_sched->alloc_bc(bc_aggr_level, i, pending_sibs[i].n_tx); + pending_sibs[i].n_tx++; + } } - // Schedule Paging + /* Allocate DCIs and RBGs for paging */ if (rrc) { uint32_t paging_payload = 0; - if (rrc->is_paging_opportunity(current_tti, &paging_payload)) { - if (avail_rbg > si_n_rbg && paging_payload) - { - if (generate_dci(&bc[nof_bc_elems].dci.location, &common_locations[current_cfi - 1], bc_aggr_level, tti_vars)) { - int tbs = - generate_format1a(start_rbg * P, si_n_rbg * P, paging_payload, 0, SRSLTE_PRNTI, &bc[nof_bc_elems].dci); - if (tbs > 0) { - - bc[nof_bc_elems].type = sched_interface::dl_sched_bc_t::PCCH; - bc[nof_bc_elems].tbs = tbs; - nof_bc_elems++; - - Info("SCHED: PCH start_rb=%d, tbs=%d, mcs=%d\n", start_rbg, tbs, bc[nof_bc_elems].dci.tb[0].mcs_idx); - - avail_rbg -= si_n_rbg; - start_rbg += si_n_rbg; - - } - } - } + if (rrc->is_paging_opportunity(current_tti, &paging_payload) and paging_payload) { + tti_sched->alloc_paging(bc_aggr_level, paging_payload); } } - - return nof_bc_elems; } bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) @@ -621,480 +1151,230 @@ bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) } // Schedules RAR -int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]) +void sched::dl_sched_rar(tti_sched_t* tti_sched) { - - int nof_rar_elems = 0; for (uint32_t i = 0; i < SCHED_MAX_PENDING_RAR; i++) { - if (pending_rar[i].buf_rar > 0 && avail_rbg >= rar_n_rbg) { - /* Check if we are still within the RAR window, otherwise discard it */ - if (is_in_tti_interval( - current_tti, pending_rar[i].rar_tti + 3, pending_rar[i].rar_tti + 3 + cfg.prach_rar_window)) { - // Try to schedule DCI for this RAR - uint32_t tti_rx = (current_tti + 10240 - TX_DELAY) % 10240; - sched_vars::tti_vars_t* tti_vars = &sched_vars.tti_vars(tti_rx); - if (generate_dci( - &rar[nof_rar_elems].dci.location, &rar_locations[current_cfi - 1][sf_idx], rar_aggr_level, tti_vars)) { - - /* Find all pending RARs with same transmission TTI */ - uint32_t tti = pending_rar[i].rar_tti; - uint32_t rar_sfidx = (tti+1)%10; - uint32_t buf_rar = 0; - uint32_t nof_grants = 0; - for (int j = 0; j < SCHED_MAX_PENDING_RAR; j++) { - if (pending_rar[j].rar_tti == tti) { - - uint32_t L_prb = 3; - uint32_t n_prb = 2; - - if (nof_grants == 0) { - bzero(&rar[nof_rar_elems].msg3_grant[nof_grants], sizeof(srslte_dci_rar_grant_t)); - rar[nof_rar_elems].msg3_grant[nof_grants].grant.tpc_pusch = 3; - rar[nof_rar_elems].msg3_grant[nof_grants].grant.trunc_mcs = 0; - rar[nof_rar_elems].msg3_grant[nof_grants].grant.rba = - srslte_ra_type2_to_riv(L_prb, n_prb, cfg.cell.nof_prb); - rar[nof_rar_elems].msg3_grant[nof_grants].ra_id = pending_rar[j].ra_id; - buf_rar += pending_rar[i].buf_rar; - pending_rar[j].buf_rar = 0; - pending_rar[j].rar_tti = 0; - - // Save UL resources - uint32_t pending_tti = (current_tti + MSG3_DELAY_MS + TX_DELAY) % 10; - pending_msg3[pending_tti].enabled = true; - pending_msg3[pending_tti].rnti = pending_rar[j].rnti; - pending_msg3[pending_tti].L = L_prb; - pending_msg3[pending_tti].n_prb = n_prb; - pending_msg3[pending_tti].mcs = rar[nof_rar_elems].msg3_grant[nof_grants].grant.trunc_mcs; - - log_h->info("SCHED: RAR, ra_id=%d, rnti=0x%x, rarnti_idx=%d, start_rb=%d, n_rb=%d, rar_grant_rba=%d, " - "rar_grant_mcs=%d\n", - pending_rar[j].ra_id, - pending_rar[j].rnti, - rar_sfidx, - start_rbg * P, - rar_n_rbg * P, - rar[nof_rar_elems].msg3_grant[nof_grants].grant.rba, - rar[nof_rar_elems].msg3_grant[nof_grants].grant.trunc_mcs); - } else { - log_h->warning("Only 1 RA is responded at a time. Found %d for TTI=%d\n", nof_grants + 1, tti); - } - nof_grants++; - } - } - - rar[nof_rar_elems].nof_grants = nof_grants; - - if (generate_format1a(start_rbg * P, rar_n_rbg * P, buf_rar, 0, rar_sfidx, &rar[nof_rar_elems].dci) >= 0) { - rar[nof_rar_elems].tbs = buf_rar; - nof_rar_elems++; - avail_rbg -= rar_n_rbg; - start_rbg += rar_n_rbg; - } else { - Error("SCHED: Allocating Format1A dci\n"); - } - - } else { - log_h->warning("SCHED: Could not schedule DCI for RAR tti=%d, L=%d\n", pending_rar[i].rar_tti, rar_aggr_level); - } - } else { - log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", - pending_rar[i].rar_tti, - cfg.prach_rar_window, - current_tti); - log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", + // check if the RAR is inactive or was already scheduled + if (pending_rar[i].buf_rar == 0) { + continue; + } + // Check if we are still within the RAR window, otherwise discard it + if (not is_in_tti_interval(tti_sched->get_tti_tx_dl(), + pending_rar[i].rar_tti + 3, + pending_rar[i].rar_tti + 3 + cfg.prach_rar_window)) { + log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", pending_rar[i].rar_tti, cfg.prach_rar_window, current_tti); - pending_rar[i].buf_rar = 0; - pending_rar[i].rar_tti = 0; + log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", + pending_rar[i].rar_tti, + cfg.prach_rar_window, + current_tti); + pending_rar[i].buf_rar = 0; + pending_rar[i].rar_tti = 0; + continue; + } + + /* Group pending RARs with same transmission TTI */ + uint32_t tti = pending_rar[i].rar_tti; + uint32_t buf_rar = 0; + dl_sched_rar_t rar_grant; + uint32_t L_prb = 3; + uint32_t n_prb = 2; + bzero(&rar_grant, sizeof(rar_grant)); + uint32_t rba = srslte_ra_type2_to_riv(L_prb, n_prb, cfg.cell.nof_prb); + for (uint32_t j = i; j < SCHED_MAX_PENDING_RAR; ++j) { + if (pending_rar[j].rar_tti != pending_rar[i].rar_tti) { + continue; + } + if (rar_grant.nof_grants > 0) { + log_h->warning("Only 1 RA is responded at a time. Found %d for TTI=%d\n", rar_grant.nof_grants + 1, tti); + continue; + } + + dl_sched_rar_grant_t* grant = &rar_grant.msg3_grant[rar_grant.nof_grants]; + grant->grant.tpc_pusch = 3; + grant->grant.trunc_mcs = 0; + grant->grant.rba = rba; + grant->ra_id = pending_rar[j].ra_id; + buf_rar += pending_rar[j].buf_rar; + rar_grant.nof_grants++; + } + + // Try to schedule DCI + RBGs for RAR Grant + tti_sched_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level, rar_grant, pending_rar[i].rar_tti, buf_rar); + if (not ret.first) { + continue; + } + + // Schedule Msg3 + uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % 10; + pending_msg3[pending_tti].enabled = true; + pending_msg3[pending_tti].rnti = pending_rar[i].rnti; // FIXME + pending_msg3[pending_tti].L = L_prb; + pending_msg3[pending_tti].n_prb = n_prb; + dl_sched_rar_grant_t* last_msg3 = &rar_grant.msg3_grant[rar_grant.nof_grants - 1]; + pending_msg3[pending_tti].mcs = last_msg3->grant.trunc_mcs; + + // Reset allocated RARs + for (uint32_t j = i; j < SCHED_MAX_PENDING_RAR; ++j) { + if (pending_rar[j].rar_tti == pending_rar[i].rar_tti) { + pending_rar[j].buf_rar = 0; + pending_rar[j].rar_tti = 0; } } } - return nof_rar_elems; } -// Schedules data to users -int sched::dl_sched_data(dl_sched_data_t data[MAX_DATA_LIST]) +void sched::dl_sched_data(tti_sched_t* tti_sched) { - // reset global RBG mask - dl_mask.resize(nof_rbg); - dl_mask.reset(); - dl_mask.fill(0, start_rbg); - fail_dci_alloc = false; - // dl_mask.fill(dl_mask.size()-start_rbg, dl_mask.size()); - // NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions - uint32_t tti_rx_ack = TTI_RX_ACK(TTI_SUB(current_tti, TX_DELAY)); + uint32_t tti_rx_ack = TTI_RX_ACK(tti_sched->get_tti_rx()); uint32_t pending_tti = tti_rx_ack % 10; if (cfg.cell.nof_prb == 6 and (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_rx_ack, -1) or pending_msg3[pending_tti].enabled)) { - start_rbg = nof_rbg; - avail_rbg = 0; - log_h->debug("SCHED: Skip tti=%d, as it would cause a conflict between PUCCH and PRACH\n", current_tti); - } - - typedef std::map::iterator it_t; - uint32_t nof_ctrl_symbols = SRSLTE_NOF_CTRL_SYMBOLS(cfg.cell, current_cfi); - - // clear previous user allocations - for (it_t it = ue_db.begin(); it != ue_db.end(); ++it) { - it->second.set_dl_alloc(NULL); + tti_sched->get_dl_mask().fill(0, tti_sched->get_dl_mask().size()); } // call scheduler metric to fill RB grid - dl_metric->sched_users(ue_db, &dl_mask, nof_ctrl_symbols, current_tti); - - int nof_data_elems = 0; - for (it_t iter = ue_db.begin(); iter != ue_db.end(); ++iter) { - sched_ue* user = (sched_ue*)&iter->second; - uint16_t rnti = (uint16_t) iter->first; - - dl_harq_proc* h = user->get_dl_alloc(); - srslte_dci_format_t dci_format = user->get_dci_format(); - - if (h) { - uint32_t data_before = user->get_pending_dl_new_data(current_tti); - uint32_t aggr_level = user->get_aggr_level(srslte_dci_format_sizeof(&cfg.cell, NULL, NULL, dci_format)); - // Try to schedule DCI first - uint32_t tti_rx = (current_tti + 10240 - TX_DELAY) % 10240; - sched_vars::tti_vars_t* tti_vars = &sched_vars.tti_vars(tti_rx); - if (generate_dci(&data[nof_data_elems].dci.location, - user->get_locations(current_cfi, sf_idx), - aggr_level, - tti_vars, - user)) { - bool is_newtx = h->is_empty(0) && h->is_empty(1); - int tbs = 0; - switch(dci_format) { - case SRSLTE_DCI_FORMAT1: - tbs = user->generate_format1(h, &data[nof_data_elems], current_tti, current_cfi); - break; - case SRSLTE_DCI_FORMAT2: - tbs = user->generate_format2(h, &data[nof_data_elems], current_tti, current_cfi); - break; - case SRSLTE_DCI_FORMAT2A: - tbs = user->generate_format2a(h, &data[nof_data_elems], current_tti, current_cfi); - break; - default: - Error("DCI format (%d) not implemented\n", dci_format); - } - if (tbs > 0) { - - log_h->info("SCHED: DL %s rnti=0x%x, pid=%d, mask=%s, dci=(%d,%d), n_rtx=%d, tbs=%d, buffer=%d/%d\n", - !is_newtx ? "retx" : "tx", - rnti, - h->get_id(), - h->get_rbgmask().to_hex().c_str(), - data[nof_data_elems].dci.location.L, - data[nof_data_elems].dci.location.ncce, - h->nof_retx(0) + h->nof_retx(1), - tbs, - data_before, - user->get_pending_dl_new_data(current_tti)); - nof_data_elems++; - } else { - log_h->warning("SCHED: Error DL %s rnti=0x%x, pid=%d, mask=%s, dci=%d,%d, tbs=%d, buffer=%d\n", - !is_newtx ? "retx" : "tx", - rnti, - h->get_id(), - h->get_rbgmask().to_hex().c_str(), - data[nof_data_elems].dci.location.L, - data[nof_data_elems].dci.location.ncce, - tbs, - user->get_pending_dl_new_data(current_tti)); - } - } else { - for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { - h->reset(tb); - } - uint32_t tti_rx = TTI_SUB(current_tti, TX_DELAY); - Warning("SCHED: Could not schedule DL DCI for rnti=0x%x, pid=%d, cfi=%d, used_cce=%s\n", - rnti, - h->get_id(), - current_cfi, - sched_vars.tti_vars(tti_rx).used_cce.to_string().c_str()); - fail_dci_alloc = true; - } - } - - // Reset blocked PIDs - user->reset_timeout_dl_harq(current_tti); - - } - - return nof_data_elems; -} + dl_metric->sched_users(ue_db, tti_sched); +} -// Downlink Scheduler -int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) +// Compute DL scheduler result +int sched::generate_dl_sched(tti_sched_t* tti_sched) { - if (!configured) { - return 0; - } - - /* If ul_sched() not yet called this tti, reset CCE state */ - uint32_t tti_rx = (tti + 10240 - TX_DELAY) % 10240; - sched_vars::tti_vars_t* tti_vars = &sched_vars.new_tti(tti_rx); - - bzero(sched_result, sizeof(sched_interface::dl_sched_res_t)); - - pthread_mutex_lock(&sched_mutex); - pthread_rwlock_rdlock(&rwlock); - /* Initialize variables */ - current_tti = tti; - sfn = tti / 10; - sf_idx = tti % 10; - avail_rbg = nof_rbg; - start_rbg = 0; - current_cfi = sched_cfg.nof_ctrl_symbols; + current_tti = tti_sched->get_tti_tx_dl(); bc_aggr_level = 2; rar_aggr_level = 2; /* Schedule Broadcast data */ - sched_result->nof_bc_elems += dl_sched_bc(sched_result->bc); + dl_sched_bc(tti_sched); /* Schedule RAR */ - sched_result->nof_rar_elems += dl_sched_rar(sched_result->rar); + dl_sched_rar(tti_sched); /* Schedule pending RLC data */ - sched_result->nof_data_elems += dl_sched_data(sched_result->data); + dl_sched_data(tti_sched); - pthread_rwlock_unlock(&rwlock); - pthread_mutex_unlock(&sched_mutex); - - /* Set CFI */ - sched_result->cfi = current_cfi; - return 0; } -// Uplink sched - -void sched::ul_sched_msg3() +void sched::generate_phich(tti_sched_t* tti_sched) { - uint32_t pending_tti = current_tti % 10; - if (pending_msg3[pending_tti].enabled) { - ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L}; - if (ul_mask.any(msg3)) { - log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L); - } - ul_mask.fill(msg3); - uint16_t rnti = pending_msg3[pending_tti].rnti; - if (ue_db.count(rnti)) { // TODO: is this needed? - sched_ue* user = &ue_db[rnti]; - ul_harq_proc* h = user->get_ul_harq(current_tti); - h->set_alloc(msg3); - h->set_rar_mcs(pending_msg3[pending_tti].mcs); - pending_msg3[pending_tti].enabled = false; - user->set_ul_alloc(h); - } else { - log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti); + // Allocate user PHICHs + uint32_t nof_phich_elems = 0; + for (auto& ue_pair : ue_db) { + sched_ue& user = ue_pair.second; + uint16_t rnti = ue_pair.first; + + // user.has_pucch = false; // FIXME: What is this for? + + ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_rx()); + + /* Indicate PHICH acknowledgment if needed */ + if (h->has_pending_ack()) { + tti_sched->ul_sched_result.phich[nof_phich_elems].phich = + h->get_pending_ack() ? ul_sched_phich_t::ACK : ul_sched_phich_t::NACK; + tti_sched->ul_sched_result.phich[nof_phich_elems].rnti = rnti; + log_h->debug("SCHED: Allocated PHICH for rnti=0x%x, value=%d\n", + rnti, + tti_sched->ul_sched_result.phich[nof_phich_elems].phich); + nof_phich_elems++; } } + tti_sched->ul_sched_result.nof_phich_elems = nof_phich_elems; } -int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched_result) +void sched::ul_sched_msg3(tti_sched_t* tti_sched) { - typedef std::map::iterator it_t; - - if (!configured) { - return 0; + uint32_t pending_tti = tti_sched->get_tti_tx_ul() % 10; + if (not pending_msg3[pending_tti].enabled) { + return; } - /* If dl_sched() not yet called this tti (this tti is +4ms advanced), reset CCE state */ - uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240; - sched_vars::tti_vars_t* tti_vars = &sched_vars.new_tti(tti_rx); - - // current_cfi is set in dl_sched() - bzero(sched_result, sizeof(sched_interface::ul_sched_res_t)); + uint16_t rnti = pending_msg3[pending_tti].rnti; + if (not ue_db.count(rnti)) { + log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti); + return; + } + sched_ue* user = &ue_db[rnti]; - pthread_mutex_lock(&sched_mutex); - pthread_rwlock_rdlock(&rwlock); + /* Allocate RBGs and HARQ for Msg3 */ + ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L}; + if (not tti_sched->alloc_ul_msg3(user, msg3, pending_msg3[pending_tti].mcs)) { + log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L); + return; + } + pending_msg3[pending_tti].enabled = false; +} +// Compute UL scheduler result +int sched::generate_ul_sched(tti_sched_t* tti_sched) +{ /* Initialize variables */ - current_tti = tti; - sfn = tti/10; - if (tti > TX_DELAY) { - sf_idx = (tti - TX_DELAY) % 10; - } else { - sf_idx = (tti + 10240 - TX_DELAY) % 10; - } - int nof_dci_elems = 0; - int nof_phich_elems = 0; + current_tti = tti_sched->get_tti_tx_ul(); + prbmask_t& ul_mask = tti_sched->get_ul_mask(); - // clear previous UL allocations - for (it_t it = ue_db.begin(); it != ue_db.end(); ++it) { - it->second.set_ul_alloc(NULL); + // reserve PRBs for PRACH + if (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_sched->get_tti_tx_ul(), -1)) { + ul_mask = prach_mask; + log_h->debug("SCHED: Allocated PRACH RBs. Mask: 0x%s\n", prach_mask.to_hex().c_str()); } - ul_mask.resize(cfg.cell.nof_prb); - ul_mask.reset(); - // Get HARQ process for this TTI - for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { - sched_ue* user = (sched_ue*)&iter->second; - uint16_t rnti = (uint16_t)iter->first; + // Update available allocation if there's a pending RAR + ul_sched_msg3(tti_sched); - user->has_pucch = false; + // reserve PRBs for PUCCH + if (cfg.cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) { + log_h->error("There was a collision with the PUCCH. current mask=0x%s, pucch_mask=0x%s\n", + ul_mask.to_hex().c_str(), + pucch_mask.to_hex().c_str()); + } + ul_mask |= pucch_mask; - ul_harq_proc* h = user->get_ul_harq(current_tti); + // Call scheduler for UL data + ul_metric->sched_users(ue_db, tti_sched); - /* Indicate PHICH acknowledgment if needed */ - if (h->has_pending_ack()) { - sched_result->phich[nof_phich_elems].phich = h->get_ack(0)?ul_sched_phich_t::ACK:ul_sched_phich_t::NACK; - sched_result->phich[nof_phich_elems].rnti = rnti; - log_h->debug( - "SCHED: Allocated PHICH for rnti=0x%x, value=%d\n", rnti, sched_result->phich[nof_phich_elems].phich); - nof_phich_elems++; - } + // Update pending data counters after this TTI + for (auto& user : ue_db) { + user.second.get_ul_harq(tti_sched->get_tti_tx_ul())->reset_pending_data(); } - // reserve PRBs for PRACH - if (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti, -1)) { - ul_harq_proc::ul_alloc_t prach = {cfg.prach_freq_offset, 6}; - if (ul_mask.any(prach)) { - log_h->warning("SCHED: Failed to allocate PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L); - } - ul_mask.fill(prach); - log_h->debug("SCHED: Allocated PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L); - } + return SRSLTE_SUCCESS; +} - // Update available allocation if there's a pending RAR - ul_sched_msg3(); - - // Allocate PUCCH resources - if (cfg.nrb_pucch >= 0) { - ul_harq_proc::ul_alloc_t pucch = {0, (uint32_t)cfg.nrb_pucch}; - if (ul_mask.any(pucch) and cfg.cell.nof_prb != 6) { - log_h->warning( - "SCHED: There was a collision with the PUCCH (%d, %d)\n", pucch.RB_start, pucch.RB_start + pucch.L); - } else { - ul_mask.fill(pucch); - } - pucch.RB_start = cfg.cell.nof_prb - cfg.nrb_pucch; - pucch.L = (uint32_t)cfg.nrb_pucch; - if (ul_mask.any(pucch) and cfg.cell.nof_prb != 6) { - log_h->warning("SCHED: There was a collision with the PUCCH (%d, %d)\n", pucch.RB_start, pucch.RB_start+pucch.L); - } else { - ul_mask.fill(pucch); - } - } else { - for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { - sched_ue *user = (sched_ue*) &iter->second; - uint32_t prb_idx[2] = {0, 0}; - if (user->get_pucch_sched(current_tti, prb_idx)) { - user->has_pucch = true; - // allocate PUCCH - for (int i = 0; i < 2; i++) { - ul_harq_proc::ul_alloc_t pucch = {prb_idx[i], 1}; - ul_mask.fill(pucch); - } - } - } +// Downlink Scheduler API +int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) +{ + if (!configured) { + return 0; } - // Call scheduler for normal data - ul_metric->sched_users(ue_db, &ul_mask, current_tti); - - // Now allocate PUSCH - for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { - sched_ue *user = (sched_ue*) &iter->second; - uint16_t rnti = (uint16_t)iter->first; - ul_harq_proc* h = NULL; - - h = user->get_ul_alloc(); - if (h) { - ul_harq_proc::ul_alloc_t ul_alloc = h->get_alloc(); - bool is_rar = h->is_rar_tx(); - bool is_newtx = h->is_empty(0); - bool needs_pdcch = not is_rar and (is_newtx or h->is_adaptive_retx()); - - // Set number of retx - if (is_newtx) { - if (is_rar) { - h->set_max_retx(cfg.maxharq_msg3tx); - } else { - h->set_max_retx(user->get_max_retx()); - } - } + uint32_t tti_rx = TTI_SUB(tti, TX_DELAY); - // Generate PDCCH except for RAR and non-adaptive retx - if (needs_pdcch) { - uint32_t aggr_level = user->get_aggr_level(srslte_dci_format_sizeof(&cfg.cell, NULL, NULL, SRSLTE_DCI_FORMAT0)); - if (!generate_dci(&sched_result->pusch[nof_dci_elems].dci.location, - user->get_locations(current_cfi, sf_idx), - aggr_level, - tti_vars)) { - h->reset(0); - log_h->warning("SCHED: Could not schedule UL DCI rnti=0x%x, pid=%d, L=%d, sf_idx=%d\n", - rnti, - h->get_id(), - aggr_level, - sf_idx); - - sched_result->pusch[nof_dci_elems].needs_pdcch = false; - } else { - sched_result->pusch[nof_dci_elems].needs_pdcch = true; - } - } else { - sched_result->pusch[nof_dci_elems].needs_pdcch = false; - } + // Compute scheduling Result for tti_rx + tti_sched_t* tti_sched = new_tti(tti_rx); - // Generate dci unless DCI could not be generated and was required - if (sched_result->pusch[nof_dci_elems].needs_pdcch == needs_pdcch) { - uint32_t pending_data_before = user->get_pending_ul_new_data(current_tti); - if (user->generate_format0(h, &sched_result->pusch[nof_dci_elems], current_tti, user->needs_cqi(tti, true)) > 0) - { - - if (is_newtx) { - // Un-trigger SR - user->unset_sr(); - } - - log_h->info("SCHED: %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), n_rtx=%d, tbs=%d, bsr=%d (%d-%d)\n", - is_rar ? "Msg3" : "UL", - is_newtx ? "tx" : "retx", - rnti, - h->get_id(), - sched_result->pusch[nof_dci_elems].dci.location.L, - sched_result->pusch[nof_dci_elems].dci.location.ncce, - ul_alloc.RB_start, - ul_alloc.RB_start + ul_alloc.L, - h->nof_retx(0), - sched_result->pusch[nof_dci_elems].tbs, - user->get_pending_ul_new_data(current_tti), - pending_data_before, - user->get_pending_ul_old_data()); - - nof_dci_elems++; - } else { - log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n", - is_rar ? "Msg3" : "UL", - is_newtx ? "tx" : "retx", - rnti, - h->get_id(), - sched_result->pusch[nof_dci_elems].dci.location.L, - sched_result->pusch[nof_dci_elems].dci.location.ncce, - ul_alloc.RB_start, - ul_alloc.RB_start + ul_alloc.L, - sched_result->pusch[nof_dci_elems].tbs, - user->get_pending_ul_new_data(current_tti)); - } - } - } - } + // copy result + *sched_result = tti_sched->dl_sched_result; - // Update pending data counters after this TTI - for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { - sched_ue *user = (sched_ue *) &iter->second; - user->get_ul_harq(current_tti)->reset_pending_data(); + return 0; +} + +// Uplink Scheduler API +int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched_result) +{ + if (!configured) { + return 0; } - pthread_rwlock_unlock(&rwlock); - pthread_mutex_unlock(&sched_mutex); + // Compute scheduling Result for tti_rx + uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240; + tti_sched_t* tti_sched = new_tti(tti_rx); - sched_result->nof_dci_elems = nof_dci_elems; - sched_result->nof_phich_elems = nof_phich_elems; + // Copy results + *sched_result = tti_sched->ul_sched_result; return SRSLTE_SUCCESS; } @@ -1126,125 +1406,6 @@ void sched::generate_cce_location( } } -#define NCCE(L) (1 << L) - -int sched::find_empty_dci(sched_ue::sched_dci_cce_t* locations, - uint32_t aggr_level, - sched_vars::tti_vars_t* tti_vars, - sched_ue* user) -{ - if (!locations->nof_loc[aggr_level]) { - Error("In generate_dci(): No locations for aggr_level=%d\n", aggr_level); - return -1; - } - uint32_t nof_cand = 0; - uint32_t test_cand = rand()%locations->nof_loc[aggr_level]; - - while (nof_cand < locations->nof_loc[aggr_level]) { - uint32_t ncce = locations->cce_start[aggr_level][test_cand]; - bool used = false; - if (user) { - used = user->pucch_sr_collision(current_tti, ncce); - } - used |= tti_vars->used_cce.any(ncce, ncce + NCCE(aggr_level)); - if (used) { - test_cand++; - if (test_cand == locations->nof_loc[aggr_level]) { - test_cand = 0; - } - nof_cand++; - } else { - return ncce; - } - } - return -1; -} - -bool sched::generate_dci(srslte_dci_location_t* sched_location, - sched_ue::sched_dci_cce_t* locations, - uint32_t aggr_level, - sched_vars::tti_vars_t* tti_vars, - sched_ue* user) -{ - int ncce = find_empty_dci(locations, aggr_level, tti_vars, user); - if (ncce < 0) { - return false; - } - tti_vars->used_cce.fill(ncce, ncce + NCCE(aggr_level), true); - Debug("SCHED: Allocated DCI L=%d, ncce=%d\n", aggr_level, ncce); - - if (sched_location) { - sched_location->L = aggr_level; - sched_location->ncce = (uint32_t)ncce; - } - return true; -} - -int sched::generate_format1a( - uint32_t rb_start, uint32_t l_crb, uint32_t tbs_bytes, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci) -{ - /* Calculate I_tbs for this TBS */ - int tbs = tbs_bytes * 8; - int i; - int mcs = -1; - for (i = 0; i < 27; i++) { - if (srslte_ra_tbs_from_idx(i, 2) >= tbs) { - dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2; - mcs = i; - tbs = srslte_ra_tbs_from_idx(i, 2); - break; - } else if (srslte_ra_tbs_from_idx(i, 3) >= tbs) { - dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3; - mcs = i; - tbs = srslte_ra_tbs_from_idx(i, 3); - break; - } - } - if (i == 28) { - Error("Can't allocate Format 1A for TBS=%d\n", tbs); - return -1; - } - - Debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d\n", - srslte_ra_tbs_from_idx(mcs, 2),srslte_ra_tbs_from_idx(mcs, 3),tbs_bytes,tbs,mcs); - - dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2; - dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; - dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, cfg.cell.nof_prb); - dci->pid = 0; - dci->tb[0].mcs_idx = mcs; - dci->tb[0].rv = rv; - dci->format = SRSLTE_DCI_FORMAT1A; - dci->rnti = rnti; - - return tbs; -} - -void sched::sched_vars::init(sched* parent_) -{ - parent = parent_; - for (uint32_t i = 0; i < tti_array_size; ++i) { - tti_vars_[i].used_cce.resize(srslte_regs_pdcch_ncce(&parent->regs, parent->sched_cfg.nof_ctrl_symbols)); - tti_vars_[i].used_cce.reset(); - } -} - -sched::sched_vars::tti_vars_t& sched::sched_vars::new_tti(uint32_t tti_rx) -{ - tti_vars_t& ret = tti_vars_[tti_rx % 16]; - // if it is the first time tti is run, reset vars - if (ret.tti_rx != tti_rx) { - ret.used_cce.resize(srslte_regs_pdcch_ncce(&parent->regs, parent->sched_cfg.nof_ctrl_symbols)); - ret.used_cce.reset(); - ret.tti_rx = tti_rx; - } - return ret; -} - -sched::sched_vars::tti_vars_t& sched::sched_vars::tti_vars(uint32_t tti_rx) -{ - return tti_vars_[tti_rx % 16]; -} } diff --git a/srsenb/src/mac/scheduler_grid.cc b/srsenb/src/mac/scheduler_grid.cc new file mode 100644 index 000000000..2761e74ac --- /dev/null +++ b/srsenb/src/mac/scheduler_grid.cc @@ -0,0 +1,369 @@ +/* + * Copyright 2013-2019 Software Radio Systems Limited + * + * This file is part of srsLTE. + * + * srsLTE is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * srsLTE is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * A copy of the GNU Affero General Public License can be found in + * the LICENSE file in the top-level directory of this distribution + * and at http://www.gnu.org/licenses/. + * + */ + +#include "srsenb/hdr/mac/scheduler_grid.h" +#include "srsenb/hdr/mac/scheduler.h" +#include + +namespace srsenb { + +const char* alloc_outcome_t::to_string() const +{ + switch (result) { + case SUCCESS: + return "success"; + case DCI_COLLISION: + return "dci_collision"; + case RB_COLLISION: + return "rb_collision"; + case ERROR: + return "error"; + } + return "unknown error"; +} + +/******************************************************* + * PDCCH Allocation Methods + *******************************************************/ + +void pdcch_grid_t::init(srslte::log* log_, + srslte_regs_t* regs, + sched_ue::sched_dci_cce_t* common_locs, + sched_ue::sched_dci_cce_t (*rar_locs)[10]) +{ + log_h = log_; + common_locations = common_locs; + for (uint32_t cfix = 0; cfix < 3; ++cfix) { + rar_locations[cfix] = rar_locs[cfix]; + } + + // precompute nof_cces + for (uint32_t cfix = 0; cfix < nof_cfis; ++cfix) { + int ret = srslte_regs_pdcch_ncce(regs, cfix + 1); + if (ret < 0) { + log_h->error("SCHED: Failed to calculate the number of CCEs in the PDCCH\n"); + } + cce_size_array[cfix] = (uint32_t)ret; + } + + reset(); +} + +void pdcch_grid_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) +{ + tti_rx = tti_rx_; + sf_idx = TTI_TX(tti_rx) % 10; + current_cfix = start_cfi - 1; + reset(); +} + +const sched_ue::sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const +{ + switch (alloc_type) { + case alloc_type_t::DL_BC: + return &common_locations[current_cfix]; + case alloc_type_t::DL_PCCH: + return &common_locations[current_cfix]; + case alloc_type_t::DL_RAR: + return &rar_locations[current_cfix][sf_idx]; + case alloc_type_t::DL_DATA: + return user->get_locations(current_cfix + 1, sf_idx); + case alloc_type_t::UL_DATA: + return user->get_locations(current_cfix + 1, sf_idx); + } + return NULL; +} + +bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user) +{ + // FIXME: Make the alloc tree update lazy + + /* Get DCI Location Table */ + const sched_ue::sched_dci_cce_t* dci_locs = get_cce_loc_table(alloc_type, user); + if (!dci_locs) { + return false; + } + + /* Search for potential DCI positions */ + if (prev_end > 0) { + for (size_t j = prev_start; j < prev_end; ++j) { + update_alloc_tree((int)j, aggr_idx, user, alloc_type, dci_locs); + } + } else { + update_alloc_tree(-1, aggr_idx, user, alloc_type, dci_locs); + } + + // if no pdcch space was available + if (dci_alloc_tree.size() == prev_end) { + return false; + } + + prev_start = prev_end; + prev_end = dci_alloc_tree.size(); + + nof_dci_allocs++; + + return true; +} + +void pdcch_grid_t::update_alloc_tree(int parent_node_idx, + uint32_t aggr_idx, + sched_ue* user, + alloc_type_t alloc_type, + const sched_ue::sched_dci_cce_t* dci_locs) +{ + alloc_t alloc; + alloc.rnti = (user != nullptr) ? user->get_rnti() : (uint16_t)0u; + alloc.dci_pos.L = aggr_idx; + + // get cumulative pdcch mask + pdcch_mask_t cum_mask; + if (parent_node_idx >= 0) { + cum_mask = dci_alloc_tree[parent_node_idx].second.total_mask; + } else { + cum_mask.resize(nof_cces()); + } + + uint32_t nof_locs = dci_locs->nof_loc[aggr_idx]; + for (uint32_t i = 0; i < nof_locs; ++i) { + uint32_t startpos = dci_locs->cce_start[aggr_idx][i]; + + if (alloc_type == alloc_type_t::DL_DATA and user->pucch_sr_collision(TTI_TX(tti_rx), startpos)) { + // will cause a collision in the PUCCH + continue; + } + + pdcch_mask_t alloc_mask(nof_cces()); + alloc_mask.fill(startpos, startpos + (1u << aggr_idx)); + if ((cum_mask & alloc_mask).any()) { + // there is collision. Try another mask + continue; + } + + // Allocation successful + alloc.current_mask = alloc_mask; + alloc.total_mask = cum_mask | alloc_mask; + alloc.dci_pos.ncce = startpos; + + // Prune if repetition + uint32_t j = prev_end; + for (; j < dci_alloc_tree.size(); ++j) { + if (dci_alloc_tree[j].second.total_mask == alloc.total_mask) { + break; + } + } + if (j < dci_alloc_tree.size()) { + continue; + } + + // Register allocation + dci_alloc_tree.emplace_back(parent_node_idx, alloc); + } +} + +bool pdcch_grid_t::set_cfi(uint32_t cfi) +{ + current_cfix = cfi - 1; + // FIXME: use this function for dynamic cfi + // FIXME: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences + return true; +} + +void pdcch_grid_t::reset() +{ + prev_start = 0; + prev_end = 0; + dci_alloc_tree.clear(); + nof_dci_allocs = 0; +} + +void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const +{ + // if alloc tree is empty + if (prev_start == prev_end) { + if (vec) + vec->clear(); + if (tot_mask) { + tot_mask->reset(); + } + return; + } + + // set vector of allocations + if (vec) { + vec->clear(); + size_t i = prev_start + idx; + while (dci_alloc_tree[i].first >= 0) { + vec->push_back(&dci_alloc_tree[i].second); + i = (size_t)dci_alloc_tree[i].first; + } + vec->push_back(&dci_alloc_tree[i].second); + std::reverse(vec->begin(), vec->end()); + } + + // set final cce mask + if (tot_mask) { + *tot_mask = dci_alloc_tree[prev_start + idx].second.total_mask; + } +} + +void pdcch_grid_t::print_result(bool verbose) const +{ + if (prev_start == prev_end) { + log_h->info("SCHED: No DCI allocations\n"); + } + + std::stringstream ss; + ss << "SCHED: cfi=" << get_cfi() << ", " << prev_end - prev_start << " DCI allocation combinations:\n"; + // get all the possible combinations of DCI allocations + uint32_t count = 0; + for (size_t i = prev_start; i < prev_end; ++i) { + alloc_result_t vec; + pdcch_mask_t tot_mask; + get_allocs(&vec, &tot_mask, i - prev_start); + + ss << " combination " << count << ": mask=0x" << tot_mask.to_hex().c_str(); + if (verbose) { + ss << ", DCI allocs:\n"; + for (const auto& dci_alloc : vec) { + char hex[5]; + sprintf(hex, "%x", dci_alloc->rnti); + ss << " > rnti=0x" << hex << ": " << dci_alloc->current_mask.to_hex().c_str() << " / " + << dci_alloc->total_mask.to_hex().c_str() << "\n"; + } + } else { + ss << "\n"; + } + count++; + } + + log_h->info("%s", ss.str().c_str()); +} + +/******************************************************* + * TTI resource Scheduling Methods + *******************************************************/ + +void tti_grid_t::init(srslte::log* log_, sched_interface::cell_cfg_t* cell_, const pdcch_grid_t& pdcch_grid) +{ + log_h = log_; + cell_cfg = cell_; + nof_prbs = cell_cfg->cell.nof_prb; + uint32_t P = srslte_ra_type0_P(cell_cfg->cell.nof_prb); + nof_rbgs = srslte::ceil_div(cell_cfg->cell.nof_prb, P); + si_n_rbg = srslte::ceil_div(4, P); + rar_n_rbg = srslte::ceil_div(3, P); + + pdcch_alloc = pdcch_grid; +} + +void tti_grid_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) +{ + tti_rx = tti_rx_; + + // derived + tti_tx_dl = TTI_TX(tti_rx); + tti_tx_ul = TTI_RX_ACK(tti_rx); + sfn = tti_tx_dl / 10; + + // internal state + avail_rbg = nof_rbgs; + dl_mask.reset(); + dl_mask.resize(nof_rbgs); + ul_mask.reset(); + ul_mask.resize(nof_prbs); + pdcch_alloc.new_tti(tti_rx, start_cfi); +} + +alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user) +{ + // Check RBG collision + if ((dl_mask & alloc_mask).any()) { + return alloc_outcome_t::RB_COLLISION; + } + + // Allocate DCI in PDCCH + if (not pdcch_alloc.alloc_dci(alloc_type, aggr_lvl, user)) { + return alloc_outcome_t::DCI_COLLISION; + } + + // Allocate RBGs + dl_mask |= alloc_mask; + avail_rbg -= alloc_mask.count(); + + return alloc_outcome_t::SUCCESS; +} + +tti_grid_t::ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type) +{ + rbg_range_t range; + range.rbg_start = nof_rbgs - avail_rbg; + range.rbg_end = range.rbg_start + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg); + + if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and + alloc_type != alloc_type_t::DL_PCCH) { + log_h->error("SCHED: DL control allocations must be RAR/BC/PDCCH\n"); + return {alloc_outcome_t::ERROR, range}; + } + // Setup range starting from left + if (range.rbg_end > nof_rbgs) { + return {alloc_outcome_t::RB_COLLISION, range}; + } + + // allocate DCI and RBGs + rbgmask_t new_mask(dl_mask.size()); + new_mask.fill(range.rbg_start, range.rbg_end); + return {alloc_dl(aggr_lvl, alloc_type, new_mask), range}; +} + +alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask) +{ + srslte_dci_format_t dci_format = user->get_dci_format(); + uint32_t aggr_level = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, NULL, NULL, dci_format)); + return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user); +} + +alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch) +{ + if (alloc.RB_start + alloc.L > ul_mask.size()) { + return alloc_outcome_t::ERROR; + } + + prbmask_t newmask(ul_mask.size()); + newmask.fill(alloc.RB_start, alloc.RB_start + alloc.L); + if ((ul_mask & newmask).any()) { + return alloc_outcome_t::RB_COLLISION; + } + + // Generate PDCCH except for RAR and non-adaptive retx + if (needs_pdcch) { + uint32_t aggr_idx = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, NULL, NULL, SRSLTE_DCI_FORMAT0)); + if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) { + return alloc_outcome_t::DCI_COLLISION; + } + } + + ul_mask |= newmask; + + return alloc_outcome_t::SUCCESS; +} + +} // namespace srsenb \ No newline at end of file diff --git a/srsenb/src/mac/scheduler_harq.cc b/srsenb/src/mac/scheduler_harq.cc index 18527454d..4992acde3 100644 --- a/srsenb/src/mac/scheduler_harq.cc +++ b/srsenb/src/mac/scheduler_harq.cc @@ -32,6 +32,24 @@ namespace srsenb { +rbg_range_t::rbg_range_t(const prb_range_t& rbgs, uint32_t P) : + rbg_range_t(srslte::ceil_div(rbgs.prb_start, P), srslte::ceil_div(rbgs.prb_end, P)) +{ +} + +prb_range_t::prb_range_t(const rbg_range_t& rbgs, uint32_t P) : prb_range_t(rbgs.rbg_start * P, rbgs.rbg_end * P) {} + +prb_range_t prb_range_t::riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs) +{ + prb_range_t p; + if (nof_vrbs < 0) { + nof_vrbs = nof_prbs; + } + srslte_ra_type2_from_riv(riv, &p.prb_end, &p.prb_start, nof_prbs, (uint32_t)nof_vrbs); + p.prb_end += p.prb_start; + return p; +} + /****************************************************** * * These classes manage the HARQ Processes. @@ -49,16 +67,10 @@ void harq_proc::config(uint32_t id_, uint32_t max_retx_, srslte::log* log_h_) } } -void harq_proc::set_max_retx(uint32_t max_retx_) { - log_h->debug("Set max_retx=%d pid=%d\n", max_retx_, id); - max_retx = max_retx_; -} - void harq_proc::reset(uint32_t tb_idx) { + ack_state[tb_idx] = NULL_ACK; active[tb_idx] = false; - ack[tb_idx] = true; - ack_received[tb_idx] = false; n_rtx[tb_idx] = 0; tti = 0; last_mcs[tb_idx] = -1; @@ -83,12 +95,12 @@ bool harq_proc::is_empty() const bool harq_proc::is_empty(uint32_t tb_idx) const { - return !active[tb_idx] || (active[tb_idx] && ack[tb_idx] && ack_received[tb_idx]); + return !active[tb_idx]; } bool harq_proc::has_pending_retx_common(uint32_t tb_idx) const { - return !ack[tb_idx] && n_rtx[tb_idx] < max_retx; + return !is_empty(tb_idx) && ack_state[tb_idx] == NACK; } uint32_t harq_proc::get_tti() const @@ -96,19 +108,15 @@ uint32_t harq_proc::get_tti() const return (uint32_t) tti; } -bool harq_proc::get_ack(uint32_t tb_idx) const +void harq_proc::set_ack_common(uint32_t tb_idx, bool ack_) { - return ack[tb_idx]; -} - -void harq_proc::set_ack(uint32_t tb_idx, bool ack_) -{ - ack[tb_idx] = ack_; - ack_received[tb_idx] = true; + ack_state[tb_idx] = ack_ ? ACK : NACK; log_h->debug("ACK=%d received pid=%d, tb_idx=%d, n_rtx=%d, max_retx=%d\n", ack_, id, tb_idx, n_rtx[tb_idx], max_retx); if (!ack_ && (n_rtx[tb_idx] + 1 >= max_retx)) { Warning("SCHED: discarting TB %d pid=%d, tti=%d, maximum number of retx exceeded (%d)\n", tb_idx, id, tti, max_retx); active[tb_idx] = false; + } else if (ack_) { + active[tb_idx] = false; } } @@ -121,17 +129,13 @@ void harq_proc::new_tx_common(uint32_t tb_idx, uint32_t tti_, int mcs, int tbs) last_mcs[tb_idx] = mcs; last_tbs[tb_idx] = tbs; - if (max_retx) { - active[tb_idx] = true; - } else { - active[tb_idx] = false; // Can reuse this process if no retx are allowed - } + active[tb_idx] = true; } void harq_proc::new_retx_common(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs) { - ack_received[tb_idx] = false; - tti = tti_; + ack_state[tb_idx] = NACK; + tti = tti_; n_rtx[tb_idx]++; if (mcs) { *mcs = last_mcs[tb_idx]; @@ -141,6 +145,16 @@ void harq_proc::new_retx_common(uint32_t tb_idx, uint32_t tti_, int* mcs, int* t } } +void harq_proc::reset_pending_data_common() +{ + // reuse harqs with no retxs + if (max_retx == 0 and !is_empty()) { + for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) { + active[tb] = false; + } + } +} + uint32_t harq_proc::nof_tx(uint32_t tb_idx) const { return tx_cnt[tb_idx]; @@ -156,47 +170,55 @@ bool harq_proc::get_ndi(uint32_t tb_idx) const return ndi[tb_idx]; } +uint32_t harq_proc::max_nof_retx() const +{ + return max_retx; +} + /****************************************************** - * UE::DL HARQ class * + * UE::DL HARQ class * ******************************************************/ -dl_harq_proc::dl_harq_proc() +dl_harq_proc::dl_harq_proc() : harq_proc() { - n_cce = 0; - rbgmask = 0; + n_cce = 0; } -void dl_harq_proc::new_tx(uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce_) +void dl_harq_proc::new_tx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce_) { - n_cce = n_cce_; + n_cce = n_cce_; + rbgmask = new_mask; new_tx_common(tb_idx, tti, mcs, tbs); } -void dl_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs) +void dl_harq_proc::new_retx( + const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, uint32_t n_cce_) { + n_cce = n_cce_; + rbgmask = new_mask; new_retx_common(tb_idx, tti_, mcs, tbs); } -uint32_t dl_harq_proc::get_n_cce() +void dl_harq_proc::set_ack(uint32_t tb_idx, bool ack) { - return n_cce; + set_ack_common(tb_idx, ack); } -rbgmask_t dl_harq_proc::get_rbgmask() +uint32_t dl_harq_proc::get_n_cce() const { - return rbgmask; + return n_cce; } -void dl_harq_proc::set_rbgmask(rbgmask_t new_mask) +rbgmask_t dl_harq_proc::get_rbgmask() const { - rbgmask = new_mask; + return rbgmask; } bool dl_harq_proc::has_pending_retx(uint32_t tb_idx, uint32_t current_tti) const { uint32_t tti_diff = srslte_tti_interval(current_tti, tti); // NOTE: tti may be ahead of current_tti due to thread flip - return (tti_diff < (10240 / 2)) and (tti_diff >= SRSLTE_FDD_NOF_HARQ) and !is_empty(tb_idx); + return (tti_diff < (10240 / 2)) and (tti_diff >= SRSLTE_FDD_NOF_HARQ) and has_pending_retx_common(tb_idx); } int dl_harq_proc::get_tbs(uint32_t tb_idx) const @@ -204,131 +226,77 @@ int dl_harq_proc::get_tbs(uint32_t tb_idx) const return last_tbs[tb_idx]; } -/****************************************************** - * UE::UL RB MASK * - ******************************************************/ - -bool ul_mask_t::any(ul_harq_proc::ul_alloc_t alloc) const noexcept -{ - return base_type::any(alloc.RB_start, alloc.RB_start + alloc.L); -} - -void ul_mask_t::fill(srsenb::ul_harq_proc::ul_alloc_t alloc) noexcept +void dl_harq_proc::reset_pending_data() { - base_type::fill(alloc.RB_start, alloc.RB_start + alloc.L, true); + reset_pending_data_common(); } /****************************************************** * UE::UL HARQ class * ******************************************************/ -ul_harq_proc::ul_alloc_t ul_harq_proc::get_alloc() +ul_harq_proc::ul_alloc_t ul_harq_proc::get_alloc() const { return allocation; } -void ul_harq_proc::set_alloc(ul_harq_proc::ul_alloc_t alloc) -{ - if (not is_empty(0)) { - log_h->error("Trying to overwrite an on-going harq procedure\n"); - return; - } - is_rar = false; // can be set to true through set_rar_mcs() - is_adaptive = false; - allocation = alloc; -} - -void ul_harq_proc::set_realloc(ul_harq_proc::ul_alloc_t alloc) -{ - if (is_empty(0)) { - log_h->error("Trying to reallocate an empty harq procedure\n"); - return; - } - if (alloc.L != allocation.L or alloc.RB_start != allocation.RB_start) { - is_adaptive = true; - } - allocation = alloc; -} - -bool ul_harq_proc::has_pending_retx() +bool ul_harq_proc::has_pending_retx() const { - return active[0] and has_pending_retx_common(0) and need_ack; + return has_pending_retx_common(0); } -bool ul_harq_proc::is_adaptive_retx() +bool ul_harq_proc::is_adaptive_retx() const { return is_adaptive and has_pending_retx(); } -bool ul_harq_proc::is_rar_tx() -{ - return is_rar; -} - -bool ul_harq_proc::is_new_tx() +void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs, ul_harq_proc::ul_alloc_t alloc, uint32_t max_retx_) { - return active[0] and not has_pending_retx(); -} - -void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs) -{ - need_ack = true; + max_retx = (uint32_t)max_retx_; + is_adaptive = false; + allocation = alloc; new_tx_common(0, tti_, mcs, tbs); pending_data = tbs; + pending_ack = NULL_ACK; } -void ul_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs) +void ul_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, ul_harq_proc::ul_alloc_t alloc) { + if (alloc.L != allocation.L or alloc.RB_start != allocation.RB_start) { + is_adaptive = true; + } + allocation = alloc; new_retx_common(tb_idx, tti_, mcs, tbs); } -bool ul_harq_proc::has_pending_ack() +void ul_harq_proc::set_ack(uint32_t tb_idx, bool ack_) { - bool ret = need_ack; - - // Reset if already received a positive ACK - if (active[0] && ack[0]) { - active[0] = false; - } - if (!active[0]) { - need_ack = false; - } - return ret; + pending_ack = ack_ ? ACK : NACK; + set_ack_common(tb_idx, ack_); } - - -void ul_harq_proc::reset_pending_data() +bool ul_harq_proc::has_pending_ack() const { - if (!active[0]) { - pending_data = 0; - } + return pending_ack != NULL_ACK; } - -uint32_t ul_harq_proc::get_pending_data() +bool ul_harq_proc::get_pending_ack() const { - return (uint32_t) pending_data; + return pending_ack == ACK; } -void ul_harq_proc::set_rar_mcs(uint32_t mcs) +void ul_harq_proc::reset_pending_data() { - rar_mcs = mcs; - has_rar_mcs = true; - is_rar = true; + reset_pending_data_common(); + pending_ack = NULL_ACK; + if (is_empty(0)) { + pending_data = 0; + } } -bool ul_harq_proc::get_rar_mcs(int *mcs) +uint32_t ul_harq_proc::get_pending_data() const { - if (has_rar_mcs) { - if (mcs) { - *mcs = (int) rar_mcs; - } - has_rar_mcs = false; - is_rar = false; - return true; - } - return false; + return (uint32_t) pending_data; } } diff --git a/srsenb/src/mac/scheduler_metric.cc b/srsenb/src/mac/scheduler_metric.cc index 3252d9774..401665a13 100644 --- a/srsenb/src/mac/scheduler_metric.cc +++ b/srsenb/src/mac/scheduler_metric.cc @@ -36,40 +36,39 @@ namespace srsenb { * *****************************************************************/ -void dl_metric_rr::sched_users(std::map& ue_db, - rbgmask_t* dl_mask, - uint32_t nof_ctrl_symbols_, - uint32_t tti) +void dl_metric_rr::set_log(srslte::log* log_) +{ + log_h = log_; +} + +void dl_metric_rr::sched_users(std::map& ue_db, sched::dl_tti_sched_t* tti_sched) { typedef std::map::iterator it_t; - current_tti = tti; - nof_ctrl_symbols = nof_ctrl_symbols_; - used_rbg = dl_mask; - available_rbg = (uint32_t)(used_rbg->size() - used_rbg->count()); // nof_rbg; + tti_alloc = tti_sched; if (ue_db.empty()) return; // give priority in a time-domain RR basis - uint32_t priority_idx = current_tti % (uint32_t)ue_db.size(); + uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size(); it_t iter = ue_db.begin(); - std::advance(iter,priority_idx); + std::advance(iter, priority_idx); for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) { if(iter==ue_db.end()) { iter = ue_db.begin(); // wrap around } - sched_ue *user = (sched_ue*) &iter->second; - user->set_dl_alloc(allocate_user(user)); + sched_ue* user = &iter->second; + allocate_user(user); } } bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask) { - *rbgmask = ~(*used_rbg); + *rbgmask = ~(tti_alloc->get_dl_mask()); uint32_t i = 0; - for (; i < used_rbg->size() and nof_rbg > 0; ++i) { + for (; i < rbgmask->size() and nof_rbg > 0; ++i) { if (rbgmask->test(i)) { nof_rbg--; } @@ -79,26 +78,16 @@ bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask) return nof_rbg == 0; } -void dl_metric_rr::update_allocation(rbgmask_t new_rbgmask) -{ - *used_rbg |= new_rbgmask; -} - -/** - * Checks if a mask can fit in the current RBG grid - * @param mask - * @return Returns true if all the mask entries set to true are empty - */ -bool dl_metric_rr::allocation_is_valid(rbgmask_t mask) -{ - return (mask & (*used_rbg)).none(); -} - dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) { + if (tti_alloc->is_dl_alloc(user)) { + return nullptr; + } // FIXME: First do reTxs for all users. Only then do the rest. - dl_harq_proc *h = user->get_pending_dl_harq(current_tti); - uint32_t req_bytes = user->get_pending_dl_new_data_total(current_tti); + alloc_outcome_t code; + uint32_t tti_dl = tti_alloc->get_tti_tx_dl(); + dl_harq_proc* h = user->get_pending_dl_harq(tti_dl); + uint32_t req_bytes = user->get_pending_dl_new_data_total(tti_dl); // Schedule retx if we have space #if ASYNC_DL_SCHED @@ -106,23 +95,28 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) #else if (h && !h->is_empty()) { #endif + // Try to reuse the same mask rbgmask_t retx_mask = h->get_rbgmask(); - uint32_t nof_rbg = retx_mask.count(); - if (nof_rbg <= available_rbg) { - // Try to reuse the same mask - if (allocation_is_valid(retx_mask)) { - update_allocation(retx_mask); - return h; - } + code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id()); + if (code == alloc_outcome_t::SUCCESS) { + return h; + } else if (code == alloc_outcome_t::DCI_COLLISION) { + // No DCIs available for this user. Move to next + return NULL; + } - // If previous mask does not fit, find another with exact same number of rbgs - if (find_allocation(nof_rbg, &retx_mask)) { - update_allocation(retx_mask); - h->set_rbgmask(retx_mask); + // If previous mask does not fit, find another with exact same number of rbgs + size_t nof_rbg = retx_mask.count(); + if (find_allocation(nof_rbg, &retx_mask)) { + code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id()); + if (code == alloc_outcome_t::SUCCESS) { return h; + } else if (code == alloc_outcome_t::DCI_COLLISION) { + return NULL; } } } + // If could not schedule the reTx, or there wasn't any pending retx, find an empty PID #if ASYNC_DL_SCHED h = user->get_empty_dl_harq(); @@ -132,13 +126,14 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) #endif // Allocate resources based on pending data if (req_bytes) { - uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, nof_ctrl_symbols)); - rbgmask_t newtx_mask(used_rbg->size()); + uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, tti_alloc->get_nof_ctrl_symbols())); + rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size()); find_allocation(pending_rbg, &newtx_mask); if (newtx_mask.any()) { // some empty spaces were found - update_allocation(newtx_mask); - h->set_rbgmask(newtx_mask); - return h; + code = tti_alloc->alloc_dl_user(user, newtx_mask, h->get_id()); + if (code == alloc_outcome_t::SUCCESS) { + return h; + } } } } @@ -152,12 +147,17 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) * *****************************************************************/ -void ul_metric_rr::sched_users(std::map& ue_db, ul_mask_t* start_mask, uint32_t tti) +void ul_metric_rr::set_log(srslte::log* log_) +{ + log_h = log_; +} + +void ul_metric_rr::sched_users(std::map& ue_db, sched::ul_tti_sched_t* tti_sched) { typedef std::map::iterator it_t; - current_tti = tti; - used_rb = start_mask; + tti_alloc = tti_sched; + current_tti = tti_alloc->get_tti_tx_ul(); if(ue_db.size()==0) return; @@ -172,10 +172,8 @@ void ul_metric_rr::sched_users(std::map& ue_db, ul_mask_t* s if(iter==ue_db.end()) { iter = ue_db.begin(); // wrap around } - sched_ue *user = (sched_ue *) &iter->second; - if (user->get_ul_alloc() == NULL) { // can already be allocated for msg3 - user->set_ul_alloc(allocate_user_retx_prbs(user)); - } + sched_ue* user = &iter->second; + allocate_user_retx_prbs(user); } // give priority in a time-domain RR basis @@ -185,21 +183,11 @@ void ul_metric_rr::sched_users(std::map& ue_db, ul_mask_t* s if(iter==ue_db.end()) { iter = ue_db.begin(); // wrap around } - sched_ue *user = (sched_ue*) &iter->second; - if (user->get_ul_alloc() == NULL) { - user->set_ul_alloc(allocate_user_newtx_prbs(user)); - } + sched_ue* user = &iter->second; + allocate_user_newtx_prbs(user); } } -bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc) -{ - if (alloc.RB_start + alloc.L > used_rb->size()) { - return false; - } - return not used_rb->any(alloc); -} - /** * Finds a range of L contiguous PRBs that are empty * @param L Size of the requested UL allocation in PRBs @@ -208,6 +196,7 @@ bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc) */ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) { + const prbmask_t* used_rb = &tti_alloc->get_ul_mask(); bzero(alloc, sizeof(ul_harq_proc::ul_alloc_t)); for (uint32_t n = 0; n < used_rb->size() && alloc->L < L; n++) { if (not used_rb->test(n) && alloc->L == 0) { @@ -218,8 +207,8 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) } else if (alloc->L > 0) { // avoid edges if (n < 3) { - alloc->RB_start = 0; - alloc->L = 0; + alloc->RB_start = 0; + alloc->L = 0; } else { break; } @@ -236,38 +225,34 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) return alloc->L == L; } -bool ul_metric_rr::update_allocation(ul_harq_proc::ul_alloc_t alloc) -{ - bool ret = false; - if(allocation_is_valid(alloc)) { - used_rb->fill(alloc); - return true; - } - return ret; -} - ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user) { - ul_harq_proc *h = user->get_ul_harq(current_tti); + if (tti_alloc->is_ul_alloc(user)) { + return NULL; + } + alloc_outcome_t ret; + ul_harq_proc* h = user->get_ul_harq(current_tti); // if there are procedures and we have space if (h->has_pending_retx()) { ul_harq_proc::ul_alloc_t alloc = h->get_alloc(); // If can schedule the same mask, do it - if (update_allocation(alloc)) { - h->set_realloc(alloc); + ret = tti_alloc->alloc_ul_user(user, alloc); + if (ret == alloc_outcome_t::SUCCESS) { return h; + } else if (ret == alloc_outcome_t::DCI_COLLISION) { + log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti()); + return NULL; } - // If not, try to find another mask in the current tti with the same number of PRBs if (find_allocation(alloc.L, &alloc)) { - if(not update_allocation(alloc)) { - printf("ERROR: Scheduler failed to allocate user\n"); - return NULL; + ret = tti_alloc->alloc_ul_user(user, alloc); + if (ret == alloc_outcome_t::SUCCESS) { + return h; + } else if (ret == alloc_outcome_t::DCI_COLLISION) { + log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti()); } - h->set_realloc(alloc); - return h; } } return NULL; @@ -275,8 +260,11 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user) ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user) { - uint32_t pending_data = user->get_pending_ul_new_data(current_tti); - ul_harq_proc *h = user->get_ul_harq(current_tti); + if (tti_alloc->is_ul_alloc(user)) { + return NULL; + } + uint32_t pending_data = user->get_pending_ul_new_data(current_tti); + ul_harq_proc* h = user->get_ul_harq(current_tti); // find an empty PID if (h->is_empty(0) and pending_data) { @@ -285,12 +273,12 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user) find_allocation(pending_rb, &alloc); if (alloc.L > 0) { // at least one PRB was scheduled - if (not update_allocation(alloc)) { - printf("ERROR: Scheduler failed to allocate user\n"); - return NULL; + alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc); + if (ret == alloc_outcome_t::SUCCESS) { + return h; + } else if (ret == alloc_outcome_t::DCI_COLLISION) { + log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti()); } - h->set_alloc(alloc); - return h; } } return NULL; diff --git a/srsenb/src/mac/scheduler_ue.cc b/srsenb/src/mac/scheduler_ue.cc index bde192f14..6b5cd57a3 100644 --- a/srsenb/src/mac/scheduler_ue.cc +++ b/srsenb/src/mac/scheduler_ue.cc @@ -48,8 +48,6 @@ namespace srsenb { *******************************************************/ sched_ue::sched_ue() : - next_dl_harq_proc(NULL), - next_ul_harq_proc(NULL), has_pucch(false), power_headroom(0), rnti(0), @@ -94,6 +92,7 @@ void sched_ue::set_cfg(uint16_t rnti_, max_mcs_dl = 28; max_mcs_ul = 28; + max_msg3retx = cell_cfg->maxharq_msg3tx; cfg = *cfg_; @@ -105,7 +104,6 @@ void sched_ue::set_cfg(uint16_t rnti_, for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) { dl_harq[i].config(i, cfg.maxharq_tx, log_h); ul_harq[i].config(i, cfg.maxharq_tx, log_h); - dl_harq[i].set_rbgmask(rbgmask_t((uint32_t)ceil((float)cell.nof_prb / P))); } // Generate allowed CCE locations @@ -437,7 +435,9 @@ void sched_ue::tpc_dec() { *******************************************************/ // Generates a Format1 dci -int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi) +// > return 0 if TBSalloc_type = SRSLTE_RA_ALLOC_TYPE0; - dci->type0_alloc.rbg_bitmask = (uint32_t)h->get_rbgmask().to_uint64(); + dci->type0_alloc.rbg_bitmask = (uint32_t)user_mask.to_uint64(); // If this is the first transmission for this UE, make room for MAC Contention Resolution ID bool need_conres_ce = false; @@ -456,9 +456,11 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t } if (h->is_empty(0)) { + // Get total available data to transmit (includes MAC header) uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti); - uint32_t nof_prb = format1_count_prb((uint32_t)h->get_rbgmask().to_uint64(), cell.nof_prb); + uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb); + // Calculate exact number of RE for this PRB allocation srslte_pdsch_grant_t grant = {}; srslte_dl_sf_cfg_t dl_sf = {}; @@ -467,14 +469,15 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t srslte_ra_dl_grant_to_grant_prb_allocation(dci, &grant, cell.nof_prb); uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell, &dl_sf, &grant); + int mcs0 = fixed_mcs_dl; if (need_conres_ce and cell.nof_prb < 10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(MCS_FIRST_DL, false), nof_prb) / 8; - mcs = MCS_FIRST_DL; - } else if (fixed_mcs_dl < 0) { + mcs0 = MCS_FIRST_DL; + } + if (mcs0 < 0) { // dynamic MCS tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs); } else { - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl, false), nof_prb) / 8; - mcs = fixed_mcs_dl; + tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), nof_prb) / 8; + mcs = mcs0; } if (tbs < MIN_DATA_TBS) { @@ -482,7 +485,7 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t return 0; } - h->new_tx(0, tti, mcs, tbs, data->dci.location.ncce); + h->new_tx(user_mask, 0, tti, mcs, tbs, data->dci.location.ncce); // Allocate MAC ConRes CE if (need_conres_ce) { @@ -503,7 +506,7 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes); } else { - h->new_retx(0, tti, &mcs, &tbs); + h->new_retx(user_mask, 0, tti, &mcs, &tbs, data->dci.location.ncce); Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs); } @@ -526,31 +529,27 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t } // Generates a Format2a dci -int sched_ue::generate_format2a(dl_harq_proc *h, - sched_interface::dl_sched_data_t *data, - uint32_t tti, - uint32_t cfi) +int sched_ue::generate_format2a( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask) { pthread_mutex_lock(&mutex); - int ret = generate_format2a_unlocked(h, data, tti, cfi); + int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask); pthread_mutex_unlock(&mutex); return ret; } // Generates a Format2a dci -int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, - sched_interface::dl_sched_data_t* data, - uint32_t tti, - uint32_t cfi) +int sched_ue::generate_format2a_unlocked( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask) { bool tb_en[SRSLTE_MAX_TB] = {false}; srslte_dci_dl_t* dci = &data->dci; dci->alloc_type = SRSLTE_RA_ALLOC_TYPE0; - dci->type0_alloc.rbg_bitmask = (uint32_t)h->get_rbgmask().to_uint64(); + dci->type0_alloc.rbg_bitmask = (uint32_t)user_mask.to_uint64(); - uint32_t nof_prb = format1_count_prb((uint32_t)h->get_rbgmask().to_uint64(), cell.nof_prb); + uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb); // FIXME: format1??? // Calculate exact number of RE for this PRB allocation srslte_pdsch_grant_t grant = {}; @@ -591,7 +590,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, int tbs = 0; if (!h->is_empty(tb)) { - h->new_retx(tb, tti, &mcs, &tbs); + h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce); Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs); } else if (tb_en[tb] && req_bytes && no_retx) { if (fixed_mcs_dl < 0) { @@ -600,7 +599,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, tbs = srslte_ra_tbs_from_idx((uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)fixed_mcs_dl, false), nof_prb) / 8; mcs = fixed_mcs_dl; } - h->new_tx(tb, tti, mcs, tbs, data->dci.location.ncce); + h->new_tx(user_mask, tb, tti, mcs, tbs, data->dci.location.ncce); int rem_tbs = tbs; int x = 0; @@ -643,16 +642,14 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, } // Generates a Format2 dci -int sched_ue::generate_format2(dl_harq_proc *h, - sched_interface::dl_sched_data_t *data, - uint32_t tti, - uint32_t cfi) +int sched_ue::generate_format2( + dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask) { pthread_mutex_lock(&mutex); /* Call Format 2a (common) */ - int ret = generate_format2a_unlocked(h, data, tti, cfi); + int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask); /* Compute precoding information */ data->dci.format = SRSLTE_DCI_FORMAT2; @@ -667,40 +664,49 @@ int sched_ue::generate_format2(dl_harq_proc *h, return ret; } -int sched_ue::generate_format0(ul_harq_proc* h, sched_interface::ul_sched_data_t* data, uint32_t tti, bool cqi_request) +int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, + uint32_t tti, + ul_harq_proc::ul_alloc_t alloc, + bool needs_pdcch, + srslte_dci_location_t dci_pos, + int explicit_mcs) { pthread_mutex_lock(&mutex); + ul_harq_proc* h = get_ul_harq(tti); srslte_dci_ul_t* dci = &data->dci; - int mcs = 0; + bool cqi_request = needs_cqi_unlocked(tti, true); + + // Set DCI position + data->needs_pdcch = needs_pdcch; + dci->location = dci_pos; + + int mcs = (explicit_mcs >= 0) ? explicit_mcs : fixed_mcs_ul; int tbs = 0; - ul_harq_proc::ul_alloc_t allocation = h->get_alloc(); - - bool is_newtx = true; - if (h->get_rar_mcs(&mcs)) { - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), allocation.L) / 8; - h->new_tx(tti, mcs, tbs); - } else if (h->is_empty(0)) { - - uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti); - - uint32_t N_srs = 0; - uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*allocation.L*SRSLTE_NRE; - if (fixed_mcs_ul < 0) { - tbs = alloc_tbs_ul(allocation.L, nof_re, req_bytes, &mcs); + bool is_newtx = h->is_empty(0); + if (is_newtx) { + uint32_t nof_retx; + + // If Msg3 set different nof retx + nof_retx = (data->needs_pdcch) ? get_max_retx() : max_msg3retx; + + if (mcs >= 0) { + tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), alloc.L) / 8; } else { - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), allocation.L) / 8; - mcs = fixed_mcs_ul; + // dynamic mcs + uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti); + uint32_t N_srs = 0; + uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE; + tbs = alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs); } - - h->new_tx(tti, mcs, tbs); + h->new_tx(tti, mcs, tbs, alloc, nof_retx); } else { - h->new_retx(0, tti, &mcs, NULL); - is_newtx = false; - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), allocation.L) / 8; + // retx + h->new_retx(0, tti, &mcs, NULL, alloc); + tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), alloc.L) / 8; } data->tbs = tbs; @@ -708,7 +714,7 @@ int sched_ue::generate_format0(ul_harq_proc* h, sched_interface::ul_sched_data_t if (tbs > 0) { dci->rnti = rnti; dci->format = SRSLTE_DCI_FORMAT0; - dci->type2_alloc.riv = srslte_ra_type2_to_riv(allocation.L, allocation.RB_start, cell.nof_prb); + dci->type2_alloc.riv = srslte_ra_type2_to_riv(alloc.L, alloc.RB_start, cell.nof_prb); dci->tb.rv = sched::get_rvidx(h->nof_retx(0)); if (!is_newtx && h->is_adaptive_retx()) { dci->tb.mcs_idx = 28 + dci->tb.rv; @@ -966,12 +972,20 @@ bool sched_ue::is_sr_triggered() return sr; } -void sched_ue::reset_timeout_dl_harq(uint32_t tti) { +void sched_ue::reset_pending_pids(uint32_t tti_rx) +{ + uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx); + + // UL harqs + get_ul_harq(tti_tx_ul)->reset_pending_data(); + + // DL harqs for (int i=0;i 50 and tti_diff < 10240 / 2) { - log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti); + log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti_tx_dl); dl_harq[i].reset(0); dl_harq[i].reset(1); } @@ -1030,26 +1044,6 @@ ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti) return &ul_harq[tti % SCHED_MAX_HARQ_PROC]; } -void sched_ue::set_dl_alloc(dl_harq_proc* alloc) -{ - next_dl_harq_proc = alloc; -} - -dl_harq_proc* sched_ue::get_dl_alloc() -{ - return next_dl_harq_proc; -} - -void sched_ue::set_ul_alloc(ul_harq_proc* alloc) -{ - next_ul_harq_proc = alloc; -} - -ul_harq_proc* sched_ue::get_ul_alloc() -{ - return next_ul_harq_proc; -} - dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti) { for (uint32_t i = 0; i < SCHED_MAX_HARQ_PROC; ++i) { @@ -1060,7 +1054,7 @@ dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti) return NULL; } -const dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx) const +dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx) { return &dl_harq[idx]; } diff --git a/srsenb/test/mac/CMakeLists.txt b/srsenb/test/mac/CMakeLists.txt index da42d75e9..c881bfaf5 100644 --- a/srsenb/test/mac/CMakeLists.txt +++ b/srsenb/test/mac/CMakeLists.txt @@ -10,7 +10,7 @@ # # srsLTE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # A copy of the GNU Affero General Public License can be found in diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 13a69d52a..ca9c86e1a 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -37,8 +38,14 @@ #include "srslte/phy/utils/debug.h" #include "srslte/radio/radio.h" -// Create classes -long int seed = time(NULL); +// uint32_t const seed = std::random_device()(); +uint32_t const seed = 2452071795; // time(NULL); +std::default_random_engine rand_gen(seed); +std::uniform_real_distribution unif_dist(0, 1.0); +float randf() +{ + return unif_dist(rand_gen); +} uint32_t err_counter = 0; uint32_t warn_counter = 0; struct ue_stats_t { @@ -47,6 +54,18 @@ struct ue_stats_t { }; std::map ue_stats; +template +void erase_if(MapContainer& c, Predicate should_remove) +{ + for (auto it = c.begin(); it != c.end();) { + if (should_remove(*it)) { + it = c.erase(it); + } else { + ++it; + } + } +} + /******************* * Logging * *******************/ @@ -77,7 +96,7 @@ void log_on_exit() for (auto& e : ue_stats) { log_out.info("0x%x: {DL RBs: %lu, UL RBs: %lu}\n", e.first, e.second.nof_dl_rbs, e.second.nof_ul_rbs); } - log_out.info("[TESTER] This was the seed: %ld\n", seed); + log_out.info("[TESTER] This was the seed: %u\n", seed); } #define Warning(fmt, ...) \ @@ -97,10 +116,6 @@ void log_on_exit() /******************* * Dummies * *******************/ -float randf() -{ - return (float)((double)rand() / (RAND_MAX)); -} struct sched_sim_args { struct tti_event_t { @@ -127,22 +142,6 @@ struct sched_sim_args { // Designed for testing purposes struct sched_tester : public srsenb::sched { - struct dl_harq_params_t { - uint32_t pid; - uint32_t nof_retxs; - uint32_t tti; - bool is_empty = true; - bool pending_retx = false; - dl_harq_params_t() = default; - dl_harq_params_t(const srsenb::dl_harq_proc& h, uint32_t tti_tx_dl) - { - pid = h.get_id(); - nof_retxs = h.nof_retx(0); - tti = h.get_tti(); - is_empty = h.is_empty(); - pending_retx = h.has_pending_retx(0, tti_tx_dl); // or h.has_pending_retx(1, h.get_tti()); - } - }; struct tester_user_results { uint32_t dl_pending_data = 0; uint32_t ul_pending_data = 0; ///< data pending for UL @@ -154,7 +153,8 @@ struct sched_tester : public srsenb::sched { bool ul_retx_got_delayed = false; srsenb::sched_interface::ul_sched_data_t* ul_sched = NULL; // fast lookup srsenb::sched_interface::dl_sched_data_t* dl_sched = NULL; // fast lookup - dl_harq_params_t dl_harqs[2 * FDD_HARQ_DELAY_MS]; + srsenb::dl_harq_proc dl_harqs[2 * FDD_HARQ_DELAY_MS]; + srsenb::ul_harq_proc ul_harq; }; struct sched_tti_data { bool is_prach_tti_tx_ul = false; @@ -186,16 +186,22 @@ struct sched_tester : public srsenb::sched { uint32_t tti; bool dl_ack; uint32_t retx_delay; - dl_harq_params_t dl_harq; + srsenb::dl_harq_proc dl_harq; ack_info_t() : dl_ack(false), retx_delay(0) {} }; + struct ul_ack_info_t { + uint16_t rnti; + uint32_t tti_ack, tti_tx_ul; + bool ack = false; + srsenb::ul_harq_proc ul_harq; + }; sched_sim_args sim_args; // tester control data - typedef std::map::iterator ue_it_t; std::map tester_ues; std::multimap to_ack; + std::multimap to_ul_ack; typedef std::multimap::iterator ack_it_t; // sched results @@ -206,14 +212,14 @@ struct sched_tester : public srsenb::sched { srsenb::sched_interface::ue_cfg_t ue_cfg_); void rem_user(uint16_t rnti); void test_ra(); - void test_dci_locations(); + void test_tti_result(); void assert_no_empty_allocs(); void test_collisions(); void test_harqs(); void run_tti(uint32_t tti_rx); private: - void new_tti(uint32_t tti_); + void new_test_tti(uint32_t tti_); void process_tti_args(); void before_sched(); void process_results(); @@ -249,7 +255,7 @@ void sched_tester::rem_user(uint16_t rnti) tti_data.ue_data.erase(rnti); } -void sched_tester::new_tti(uint32_t tti_) +void sched_tester::new_test_tti(uint32_t tti_) { // NOTE: make a local copy, since some of these variables may be cleared during scheduling tti_data.tti_rx = tti_; @@ -332,8 +338,10 @@ void sched_tester::before_sched() for (uint32_t i = 0; i < 2 * FDD_HARQ_DELAY_MS; ++i) { const srsenb::dl_harq_proc* h = user->get_dl_harq(i); - tti_data.ue_data[rnti].dl_harqs[i] = dl_harq_params_t(*h, tti_data.tti_tx_dl); + tti_data.ue_data[rnti].dl_harqs[i] = *h; } + // NOTE: ACK might have just cleared the harq for tti_data.tti_tx_ul + tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_data.tti_tx_ul); } // TODO: Check whether pending pending_rar.rar_tti correspond to a prach_tti @@ -358,7 +366,7 @@ void sched_tester::process_results() rnti); } - test_dci_locations(); + test_tti_result(); test_ra(); test_collisions(); assert_no_empty_allocs(); @@ -367,7 +375,7 @@ void sched_tester::process_results() void sched_tester::run_tti(uint32_t tti_rx) { - new_tti(tti_rx); + new_test_tti(tti_rx); log_out.info("[TESTER] ---- tti=%u | nof_ues=%lu ----\n", tti_rx, ue_db.size()); process_tti_args(); @@ -395,37 +403,39 @@ void sched_tester::test_ra() // Check whether RA has completed correctly int prach_tti = userinfo.prach_tti; - if (userinfo.msg3_tti <= prach_tti) { // Msg3 not yet sent - bool rar_not_sent = prach_tti >= userinfo.rar_tti; - uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg.prach_rar_window}; - if (rar_not_sent) { - CondError(tti_data.tti_tx_dl > window[1], "[TESTER] There was no RAR scheduled within the RAR Window\n"); - if (tti_data.tti_tx_dl >= window[0]) { - for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) { - for (uint32_t j = 0; j < tti_data.sched_result_dl.rar[i].nof_grants; ++j) { - if (tti_data.sched_result_dl.rar[i].msg3_grant[j].ra_id == userinfo.ra_id) { - userinfo.rar_tti = tti_data.tti_tx_dl; - } + if (userinfo.msg3_tti > prach_tti) { // Msg3 already scheduled + continue; + } + + bool rar_not_sent = prach_tti >= userinfo.rar_tti; + uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg.prach_rar_window}; + if (rar_not_sent) { + CondError(tti_data.tti_tx_dl > window[1], "[TESTER] There was no RAR scheduled within the RAR Window\n"); + if (tti_data.tti_tx_dl >= window[0]) { + for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) { + for (uint32_t j = 0; j < tti_data.sched_result_dl.rar[i].nof_grants; ++j) { + if (tti_data.sched_result_dl.rar[i].msg3_grant[j].ra_id == userinfo.ra_id) { + userinfo.rar_tti = tti_data.tti_tx_dl; } } } - } else { // RAR completed, check for Msg3 - uint32_t msg3_tti = (uint32_t)(userinfo.rar_tti + FDD_HARQ_DELAY_MS + MSG3_DELAY_MS) % 10240; - if (msg3_tti == tti_data.tti_tx_ul) { - for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { - if (tti_data.sched_result_ul.pusch[i].dci.rnti == rnti) { - CondError(tti_data.sched_result_ul.pusch[i].needs_pdcch, - "[TESTER] Msg3 allocations do not require PDCCH\n"); - CondError(tti_data.ul_pending_msg3.rnti != rnti, "[TESTER] The UL pending msg3 RNTI did not match\n"); - CondError(not tti_data.ul_pending_msg3.enabled, "[TESTER] The UL pending msg3 RNTI did not match\n"); - userinfo.msg3_tti = tti_data.tti_tx_ul; - msg3_count++; - } + } + } else { // RAR completed, check for Msg3 + uint32_t msg3_tti = (uint32_t)(userinfo.rar_tti + FDD_HARQ_DELAY_MS + MSG3_DELAY_MS) % 10240; + if (msg3_tti == tti_data.tti_tx_ul) { + for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { + if (tti_data.sched_result_ul.pusch[i].dci.rnti == rnti) { + CondError(tti_data.sched_result_ul.pusch[i].needs_pdcch, + "[TESTER] Msg3 allocations do not require PDCCH\n"); + CondError(tti_data.ul_pending_msg3.rnti != rnti, "[TESTER] The UL pending msg3 RNTI did not match\n"); + CondError(not tti_data.ul_pending_msg3.enabled, "[TESTER] The UL pending msg3 RNTI did not match\n"); + userinfo.msg3_tti = tti_data.tti_tx_ul; + msg3_count++; } - CondError(msg3_count == 0, "[TESTER] No UL msg3 allocation was made\n"); - } else if (msg3_tti < tti_data.tti_tx_ul) { - TestError("[TESTER] No UL msg3 allocation was made\n"); } + CondError(msg3_count == 0, "[TESTER] No UL msg3 allocation was made\n"); + } else if (msg3_tti < tti_data.tti_tx_ul) { + TestError("[TESTER] No UL msg3 allocation was made\n"); } } } @@ -448,7 +458,7 @@ void sched_tester::assert_no_empty_allocs() TestError("[TESTER] There was a user without data that got allocated in UL\n"); } srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul); - iter.second.ul_retx_got_delayed = iter.second.has_ul_retx and hul->is_new_tx(); + iter.second.ul_retx_got_delayed = iter.second.has_ul_retx and iter.second.ul_harq.is_empty(0); tti_data.total_ues.ul_retx_got_delayed |= iter.second.ul_retx_got_delayed; // Retxs cannot give space to newtx allocations CondError( @@ -470,10 +480,12 @@ void sched_tester::assert_no_empty_allocs() /** * Tests whether there were collisions in the DCI allocations */ -void sched_tester::test_dci_locations() +void sched_tester::test_tti_result() { - // checks if there is any collision. If not, fills the mask - auto try_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) { + tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx); + + // Helper Function: checks if there is any collision. If not, fills the mask + auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) { uint32_t cce_start = dci_loc.ncce, cce_stop = dci_loc.ncce + (1u << dci_loc.L); if (tti_data.used_cce.any(cce_start, cce_stop)) { TestError("[TESTER] %s DCI collision between CCE positions (%u, %u)\n", ch, cce_start, cce_stop); @@ -481,30 +493,59 @@ void sched_tester::test_dci_locations() tti_data.used_cce.fill(cce_start, cce_stop); }; - // verify there are no dci collisions for UL, DL data, BC, RAR + /* verify there are no dci collisions for UL, DL data, BC, RAR */ for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { - if (not tti_data.sched_result_ul.pusch[i].needs_pdcch) { - // In case of adaptive retx or Msg3 + const auto& pusch = tti_data.sched_result_ul.pusch[i]; + CondError(pusch.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", pusch.tbs); + CondError(ue_db.count(pusch.dci.rnti) == 0, "The allocated rnti=0x%x does not exist\n", pusch.dci.rnti); + if (not pusch.needs_pdcch) { + // In case of non-adaptive retx or Msg3 continue; } - srslte_dci_location_t& dci_loc = tti_data.sched_result_ul.pusch[i].dci.location; - CondError(dci_loc.L == 0, "[TESTER] Invalid aggregation level %d\n", dci_loc.L); // TODO: Extend this test - try_fill(dci_loc, "UL"); + CondError(pusch.dci.location.L == 0, + "[TESTER] Invalid aggregation level %d\n", + pusch.dci.location.L); // TODO: Extend this test + try_cce_fill(pusch.dci.location, "UL"); } for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) { - try_fill(tti_data.sched_result_dl.data[i].dci.location, "DL data"); + auto& data = tti_data.sched_result_dl.data[i]; + try_cce_fill(data.dci.location, "DL data"); + CondError(ue_db.count(data.dci.rnti) == 0, "Allocated rnti=0x%x that does not exist\n", data.dci.rnti); } for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_bc_elems; ++i) { - try_fill(tti_data.sched_result_dl.bc[i].dci.location, "DL BC"); + auto& bc = tti_data.sched_result_dl.bc[i]; + try_cce_fill(bc.dci.location, "DL BC"); + if (bc.type == sched_interface::dl_sched_bc_t::BCCH) { + CondError(bc.index >= MAX_SIBS, "Invalid SIB idx=%d\n", bc.index + 1); + CondError(bc.tbs < cfg.sibs[bc.index].len, + "Allocated BC process with TBS=%d < sib_len=%d\n", + bc.tbs, + cfg.sibs[bc.index].len); + } else if (bc.type == sched_interface::dl_sched_bc_t::PCCH) { + CondError(bc.tbs == 0, "Allocated paging process with invalid TBS=%d\n", bc.tbs); + } else { + TestError("Invalid broadcast process id=%d\n", (int)bc.type); + } } for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) { - try_fill(tti_data.sched_result_dl.rar[i].dci.location, "DL RAR"); + const auto& rar = tti_data.sched_result_dl.rar[i]; + try_cce_fill(rar.dci.location, "DL RAR"); + CondError(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", rar.tbs); + for (uint32_t j = 0; j < rar.nof_grants; ++j) { + const auto& msg3_grant = rar.msg3_grant[j]; + uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % 10; + CondError(not pending_msg3[pending_tti].enabled, "Pending Msg3 should have been set\n"); + uint32_t rba = + srslte_ra_type2_to_riv(pending_msg3[pending_tti].L, pending_msg3[pending_tti].n_prb, cfg.cell.nof_prb); + CondError(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n"); + } } - // verify if sched_result "used_cce" coincide with sched "used_cce" - if (tti_data.used_cce != sched_vars.tti_vars(tti_data.tti_rx).used_cce) { - TestError("[TESTER] The used_cce do not match: %s\n", - sched_vars.tti_vars(tti_data.tti_rx).used_cce.to_string().c_str()); + /* verify if sched_result "used_cce" coincide with sched "used_cce" */ + auto* tti_alloc = get_tti_sched(tti_data.tti_rx); + if (tti_data.used_cce != tti_alloc->get_pdcch_mask()) { + std::string mask_str = tti_alloc->get_pdcch_mask().to_string(); + TestError("[TESTER] The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), tti_data.used_cce.to_hex().c_str()); } // FIXME: Check postponed retxs @@ -523,46 +564,103 @@ void sched_tester::test_dci_locations() void sched_tester::test_harqs() { - // check consistency of harq procedures and allocations + /* check consistency of DL harq procedures and allocations */ for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) { - uint32_t h_id = tti_data.sched_result_dl.data[i].dci.pid; - uint16_t rnti = tti_data.sched_result_dl.data[i].dci.rnti; + const auto& data = tti_data.sched_result_dl.data[i]; + uint32_t h_id = data.dci.pid; + uint16_t rnti = data.dci.rnti; const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id); - CondError(h == NULL, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id); + CondError(h == nullptr, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id); CondError(h->is_empty(), "[TESTER] Cannot schedule an empty harq proc\n"); CondError(h->get_tti() != tti_data.tti_tx_dl, - "[TESTER] The scheduled DL harq pid=%d does not a valid tti=%u", + "[TESTER] The scheduled DL harq pid=%d does not a valid tti=%u\n", h_id, tti_data.tti_tx_dl); - if (tti_data.ue_data[rnti].dl_harqs[h_id].pending_retx) { // retx - CondError(tti_data.ue_data[rnti].dl_harqs[h_id].nof_retxs + 1 != h->nof_retx(0), + CondError(h->get_n_cce() != data.dci.location.ncce, "[TESTER] Harq DCI location does not match with result\n"); + if (tti_data.ue_data[rnti].dl_harqs[h_id].has_pending_retx(0, tti_data.tti_tx_dl)) { // retx + CondError(tti_data.ue_data[rnti].dl_harqs[h_id].nof_retx(0) + 1 != h->nof_retx(0), "[TESTER] A dl harq of user rnti=0x%x was likely overwritten.\n", rnti); + CondError(h->nof_retx(0) >= sim_args.ue_cfg.maxharq_tx, + "[TESTER] The number of retx=%d exceeded its max=%d\n", + h->nof_retx(0), + sim_args.ue_cfg.maxharq_tx); } else { // newtx CondError(h->nof_retx(0) != 0, "[TESTER] A new harq was scheduled but with invalid number of retxs\n"); } } + for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { + const auto& pusch = tti_data.sched_result_ul.pusch[i]; + uint16_t rnti = pusch.dci.rnti; + const auto& ue_data = tti_data.ue_data[rnti]; + const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul); + CondError(h == nullptr or h->is_empty(), "[TESTER] scheduled UL harq does not exist or is empty\n"); + CondError(h->get_tti() != tti_data.tti_tx_ul, + "[TESTER] The scheduled UL harq does not a valid tti=%u\n", + tti_data.tti_tx_ul); + CondError(h->has_pending_ack(), "[TESTER] At the end of the TTI, there shouldnt be any pending ACKs\n"); + + if (h->has_pending_retx()) { + // retx + CondError(ue_data.ul_harq.is_empty(0), "[TESTER] reTx in an UL harq that was empty\n"); + CondError(h->nof_retx(0) != ue_data.ul_harq.nof_retx(0) + 1, + "[TESTER] A retx UL harq was scheduled but with invalid number of retxs\n"); + CondError(h->is_adaptive_retx() and not pusch.needs_pdcch, "[TESTER] Adaptive retxs need PDCCH alloc\n"); + } else { + CondError(h->nof_retx(0) != 0, "[TESTER] A new harq was scheduled but with invalid number of retxs\n"); + CondError(not ue_data.ul_harq.is_empty(0), "[TESTER] UL new tx in a UL harq that was not empty\n"); + } + } + + /* Check PHICH allocations */ + for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_phich_elems; ++i) { + const auto& phich = tti_data.sched_result_ul.phich[i]; + CondError(tti_data.ue_data.count(phich.rnti) == 0, "[TESTER] Allocated PHICH rnti no longer exists\n"); + const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq; + const auto* h = ue_db[phich.rnti].get_ul_harq(tti_data.tti_tx_ul); + CondError(not hprev.has_pending_ack(), "[TESTER] Alloc PHICH did not have any pending ack\n"); + bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx(); + if (phich.phich == sched_interface::ul_sched_phich_t::ACK) { + CondError(!hprev.is_empty(), "[TESTER] ack phich for UL harq that is not empty\n"); + } else { + CondError(h->get_pending_data() == 0 and !maxretx_flag, "[TESTER] NACKed harq has no pending data\n"); + } + } + for (const auto& ue : ue_db) { + const auto& hprev = tti_data.ue_data[ue.first].ul_harq; + if (not hprev.has_pending_ack()) + continue; + uint32_t i = 0; + for (; i < tti_data.sched_result_ul.nof_phich_elems; ++i) { + const auto& phich = tti_data.sched_result_ul.phich[i]; + if (phich.rnti == ue.first) + break; + } + CondError(i == tti_data.sched_result_ul.nof_phich_elems, + "[TESTER] harq had pending ack but no phich was allocked\n"); + } + // schedule future acks for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) { ack_info_t ack_data; ack_data.rnti = tti_data.sched_result_dl.data[i].dci.rnti; ack_data.tti = FDD_HARQ_DELAY_MS + tti_data.tti_tx_dl; const srsenb::dl_harq_proc* dl_h = ue_db[ack_data.rnti].get_dl_harq(tti_data.sched_result_dl.data[i].dci.pid); - ack_data.dl_harq = dl_harq_params_t(*dl_h, tti_data.tti_tx_dl); - if (ack_data.dl_harq.nof_retxs == 0) { + ack_data.dl_harq = *dl_h; + if (ack_data.dl_harq.nof_retx(0) == 0) { ack_data.dl_ack = randf() > sim_args.P_retx; } else { // always ack after three retxs - ack_data.dl_ack = ack_data.dl_harq.nof_retxs == 3; + ack_data.dl_ack = ack_data.dl_harq.nof_retx(0) == 3; } // Remove harq from the ack list if there was a harq rewrite ack_it_t it = to_ack.begin(); while (it != to_ack.end() and it->first < ack_data.tti) { - if (it->second.rnti == ack_data.rnti and it->second.dl_harq.pid == ack_data.dl_harq.pid) { + if (it->second.rnti == ack_data.rnti and it->second.dl_harq.get_id() == ack_data.dl_harq.get_id()) { CondError(it->second.tti + 2 * FDD_HARQ_DELAY_MS > ack_data.tti, "[TESTER] The retx dl harq id=%d was transmitted too soon\n", - ack_data.dl_harq.pid); + ack_data.dl_harq.get_id()); ack_it_t toerase_it = it++; to_ack.erase(toerase_it); continue; @@ -574,72 +672,78 @@ void sched_tester::test_harqs() to_ack.insert(std::make_pair(ack_data.tti, ack_data)); } - // // Check whether some pids got old - // for (auto& e : ue_db) { - // for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) { - // if (not(e.second.get_dl_harq(i)->is_empty(0) and e.second.get_dl_harq(1))) { - // if (srslte_tti_interval(tti_data.tti_tx_dl, e.second.get_dl_harq(i)->get_tti()) > 49) { - // TestError("[TESTER] The pid=%d for rnti=0x%x got old.\n", e.second.get_dl_harq(i)->get_id(), e.first); - // } - // } - // } - // } + /* Schedule UL ACKs */ + for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { + const auto& pusch = tti_data.sched_result_ul.pusch[i]; + ul_ack_info_t ack_data; + ack_data.rnti = pusch.dci.rnti; + ack_data.ul_harq = *ue_db[ack_data.rnti].get_ul_harq(tti_data.tti_tx_ul); + ack_data.tti_tx_ul = tti_data.tti_tx_ul; + ack_data.tti_ack = tti_data.tti_tx_ul + FDD_HARQ_DELAY_MS; + if (ack_data.ul_harq.nof_retx(0) == 0) { + ack_data.ack = randf() > sim_args.P_retx; + } else { + ack_data.ack = ack_data.ul_harq.nof_retx(0) == 3; + } + to_ul_ack.insert(std::make_pair(ack_data.tti_tx_ul, ack_data)); + } + + // Check whether some pids got old + for (auto& user : ue_db) { + for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) { + if (not(user.second.get_dl_harq(i)->is_empty(0) and user.second.get_dl_harq(1))) { + if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i)->get_tti()) > 49) { + TestError("[TESTER] The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i)->get_id(), user.first); + } + } + } + } } void sched_tester::test_collisions() { - srsenb::ul_mask_t ul_allocs; - ul_allocs.resize(cfg.cell.nof_prb); + tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx); + + srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb); + + // Helper function to fill RBG mask + auto try_ul_fill = [&](srsenb::ul_harq_proc::ul_alloc_t alloc, const char* ch_str, bool strict = true) { + CondError((alloc.RB_start + alloc.L) > cfg.cell.nof_prb, + "[TESTER] Allocated RBs (%d,%d) out of bounds\n", + alloc.RB_start, + alloc.RB_start + alloc.L); + CondError(alloc.L == 0, "[TESTER] Allocations must have at least one PRB\n"); + if (strict and ul_allocs.any(alloc.RB_start, alloc.RB_start + alloc.L)) { + TestError("[TESTER] There is a collision of %s alloc=(%d,%d) and cumulative_mask=%s\n", + ch_str, + alloc.RB_start, + alloc.RB_start + alloc.L, + ul_allocs.to_hex().c_str()); + } + ul_allocs.fill(alloc.RB_start, alloc.RB_start + alloc.L, true); + }; - // TEST: Check if there is space for PRACH + /* TEST: Check if there is space for PRACH */ if (tti_data.is_prach_tti_tx_ul) { - srsenb::ul_harq_proc::ul_alloc_t prach_alloc = {cfg.prach_freq_offset, 6}; - if (ul_allocs.any(prach_alloc)) { - TestError("[TESTER] There is a collision with the PRACH\n"); - } - ul_allocs.fill(prach_alloc); + try_ul_fill({cfg.prach_freq_offset, 6}, "PRACH"); } - // TEST: check collisions in the UL PUSCH and PUCCH + /* TEST: check collisions in the UL PUSCH and PUCCH */ for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { uint32_t L, RBstart; srslte_ra_type2_from_riv( tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv, &L, &RBstart, cfg.cell.nof_prb, cfg.cell.nof_prb); - CondError((RBstart + L) > cfg.cell.nof_prb, - "[TESTER] Allocated RBs (%d,%d) out of bounds (0,%d)\n", - RBstart, - RBstart + L, - cfg.cell.nof_prb); + try_ul_fill({RBstart, L}, "PUSCH"); ue_stats[tti_data.sched_result_ul.pusch[i].dci.rnti].nof_ul_rbs += L; - - if (ul_allocs.any(RBstart, RBstart + L)) { - TestError("[TESTER] There is a collision for UE UL data alloc=(%d,%d) with joint mask=%s\n", - RBstart, - RBstart + L, - ul_allocs.to_hex().c_str()); - } - ul_allocs.fill(RBstart, RBstart + L, true); } - // Fill PUCCH - if (cfg.cell.nof_prb != 6 or (not tti_data.is_prach_tti_tx_ul and not tti_data.ul_pending_msg3.enabled)) { - if (ul_allocs.any(0, cfg.nrb_pucch) or ul_allocs.any(cfg.cell.nof_prb - cfg.nrb_pucch, cfg.cell.nof_prb)) { - TestError("[TESTER] There is a collision with the PUCCH\n"); - } - } - ul_allocs.fill(0, cfg.nrb_pucch); - ul_allocs.fill(cfg.cell.nof_prb - cfg.nrb_pucch, cfg.cell.nof_prb); + /* TEST: check collisions with PUCCH */ + bool strict = cfg.cell.nof_prb != 6 or (not tti_data.is_prach_tti_tx_ul and not tti_data.ul_pending_msg3.enabled); + try_ul_fill({0, (uint32_t)cfg.nrb_pucch}, "PUCCH", strict); + try_ul_fill({cfg.cell.nof_prb - cfg.nrb_pucch, (uint32_t)cfg.nrb_pucch}, "PUCCH", strict); - // TEST: Check if there is a collision with Msg3 or Msg3 alloc data is not consistent + /* TEST: Check if there is a collision with Msg3 or Msg3 alloc data is not consistent */ if (tti_data.ul_pending_msg3.enabled) { - srsenb::ul_harq_proc::ul_alloc_t msg3_alloc = {tti_data.ul_pending_msg3.n_prb, tti_data.ul_pending_msg3.L}; - for (uint32_t i = msg3_alloc.RB_start; i < msg3_alloc.RB_start + msg3_alloc.L; ++i) { - if (not ul_allocs.test(i)) { - TestError( - "[TESTER] The RB %d was not allocated for the msg3 alloc=(%d,%d)\n", i, msg3_alloc.RB_start, msg3_alloc.L); - } - } - bool passed = false; for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { if (tti_data.ul_pending_msg3.rnti == tti_data.sched_result_ul.pusch[i].dci.rnti) { @@ -657,15 +761,12 @@ void sched_tester::test_collisions() CondError(not passed, "[TESTER] No Msg3 allocation was found in the sched_result\n"); } - // NOTE: Not possible until DCI conflict issue is resolved - // // TEST: final mask - // if(ul_allocs != ul_mask) { - // TestError("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n"); - // } + /* TEST: check whether cumulative UL PRB masks coincide */ + if (ul_allocs != tti_sched->get_ul_mask()) { + TestError("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n"); + } - srslte::bounded_bitset<100, true> dl_allocs, alloc_mask; - dl_allocs.resize(cfg.cell.nof_prb); - alloc_mask.resize(cfg.cell.nof_prb); + srslte::bounded_bitset<100, true> dl_allocs(cfg.cell.nof_prb), alloc_mask(cfg.cell.nof_prb); srslte_dl_sf_cfg_t dl_sf; ZERO_OBJECT(dl_sf); @@ -691,16 +792,14 @@ void sched_tester::test_collisions() ue_stats[tti_data.sched_result_dl.data[i].dci.rnti].nof_dl_rbs += alloc_mask.count(); } for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_bc_elems; ++i) { - alloc_mask.reset(); srslte_pdsch_grant_t grant; CondError(srslte_ra_dl_dci_to_grant(&cfg.cell, &dl_sf, SRSLTE_TM1, &tti_data.sched_result_dl.bc[i].dci, &grant) == SRSLTE_ERROR, "Failed to decode PDSCH grant\n"); + alloc_mask.reset(); for (uint32_t i = 0; i < alloc_mask.size(); ++i) { if (grant.prb_idx[0][i]) { alloc_mask.set(i); - } else { - alloc_mask.reset(i); } } if ((dl_allocs & alloc_mask).any()) { @@ -744,47 +843,91 @@ void sched_tester::test_collisions() rbgmask.reset(i); } } - if (rbgmask != dl_mask and not fail_dci_alloc) { + if (rbgmask != get_tti_sched(tti_data.tti_rx)->get_dl_mask()) { TestError("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n"); } } void sched_tester::ack_txs() { - typedef std::map::iterator it_t; - - for (ack_it_t it = to_ack.begin(); it != to_ack.end() and it->first <= tti_data.tti_rx;) { - if (ue_db.count(it->second.rnti) == 0) { - ack_it_t erase_it = it++; - to_ack.erase(erase_it); + /* check if user was removed. If so, clean respective acks */ + erase_if(to_ack, + [this](std::pair& elem) { return this->ue_db.count(elem.second.rnti) == 0; }); + erase_if(to_ul_ack, + [this](std::pair& elem) { return this->ue_db.count(elem.second.rnti) == 0; }); + + /* Ack DL HARQs */ + for (const auto& ack_it : to_ack) { + if (ack_it.second.tti != tti_data.tti_rx) { continue; } - if (it->second.tti == tti_data.tti_rx) { - bool ret = false; - for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) { - ret |= dl_ack_info(tti_data.tti_rx, it->second.rnti, tb, it->second.dl_ack) > 0; + srsenb::dl_harq_proc* h = ue_db[ack_it.second.rnti].get_dl_harq(ack_it.second.dl_harq.get_id()); + const srsenb::dl_harq_proc& hack = ack_it.second.dl_harq; + CondError(hack.is_empty(), "[TESTER] The acked DL harq was not active\n"); + + bool ret = false; + for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) { + if (ack_it.second.dl_harq.is_empty(tb)) { + continue; } - CondError(not ret, "[TESTER] The dl harq proc that was acked does not exist\n"); - if (it->second.dl_ack) - log_out.info( - "[TESTER] DL ACK tti=%u rnti=0x%x pid=%d\n", tti_data.tti_rx, it->second.rnti, it->second.dl_harq.pid); - ack_it_t erase_it = it++; - to_ack.erase(erase_it); - continue; + ret |= dl_ack_info(tti_data.tti_rx, ack_it.second.rnti, tb, ack_it.second.dl_ack) > 0; + } + CondError(not ret, "[TESTER] The dl harq proc that was acked does not exist\n"); + + if (ack_it.second.dl_ack) { + CondError(!h->is_empty(), "[TESTER] ACKed dl harq was not emptied\n"); + CondError(h->has_pending_retx(0, tti_data.tti_tx_dl), "[TESTER] ACKed dl harq still has pending retx\n"); + log_out.info("[TESTER] DL ACK tti=%u rnti=0x%x pid=%d\n", + tti_data.tti_rx, + ack_it.second.rnti, + ack_it.second.dl_harq.get_id()); + } else { + CondError(h->is_empty() and hack.nof_retx(0) + 1 < hack.max_nof_retx(), "[TESTER] NACKed DL harq got emptied\n"); } - ++it; } - bool ack = true; //(tti_data.tti_rx % 3) == 0; - if (tti_data.tti_rx >= FDD_HARQ_DELAY_MS) { - for (it_t it = ue_db.begin(); it != ue_db.end(); ++it) { - uint16_t rnti = it->first; - srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_rx); - if (h != NULL and not h->is_empty()) { - ul_crc_info(tti_data.tti_rx, rnti, ack); - } + /* Ack UL HARQs */ + for (const auto& ack_it : to_ul_ack) { + if (ack_it.first != tti_data.tti_rx) { + continue; + } + srsenb::ul_harq_proc* h = ue_db[ack_it.second.rnti].get_ul_harq(tti_data.tti_rx); + const srsenb::ul_harq_proc& hack = ack_it.second.ul_harq; + CondError(h == nullptr or h->get_tti() != hack.get_tti(), "[TESTER] UL Harq TTI does not match the ACK TTI\n"); + CondError(h->is_empty(0), "[TESTER] The acked UL harq is not active\n"); + CondError(hack.is_empty(0), "[TESTER] The acked UL harq was not active\n"); + + ul_crc_info(tti_data.tti_rx, ack_it.second.rnti, ack_it.second.ack); + + CondError(!h->get_pending_data(), "[TESTER] UL harq lost its pending data\n"); + CondError(!h->has_pending_ack(), "[TESTER] ACK/NACKed UL harq should have a pending ACK\n"); + + if (ack_it.second.ack) { + CondError(!h->is_empty(), "[TESTER] ACKed UL harq did not get emptied\n"); + CondError(h->has_pending_retx(), "[TESTER] ACKed UL harq still has pending retx\n"); + log_out.info("[TESTER] UL ACK tti=%u rnti=0x%x pid=%d\n", tti_data.tti_rx, ack_it.second.rnti, hack.get_id()); + } else { + // NACK + CondError(!h->is_empty() and !h->has_pending_retx(), "[TESTER] If NACKed, UL harq has to have pending retx\n"); + CondError(h->is_empty() and hack.nof_retx(0) + 1 < hack.max_nof_retx(), + "[TESTER] Nacked UL harq did get emptied\n"); } } + + // erase processed acks + to_ack.erase(tti_data.tti_rx); + to_ul_ack.erase(tti_data.tti_rx); + + // bool ack = true; //(tti_data.tti_rx % 3) == 0; + // if (tti_data.tti_rx >= FDD_HARQ_DELAY_MS) { + // for (auto it = ue_db.begin(); it != ue_db.end(); ++it) { + // uint16_t rnti = it->first; + // srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_rx); + // if (h != NULL and not h->is_empty()) { + // ul_crc_info(tti_data.tti_rx, rnti, ack); + // } + // } + // } } srsenb::sched_interface::cell_cfg_t generate_cell_cfg() @@ -916,7 +1059,7 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c int main(int argc, char* argv[]) { - printf("[TESTER] This is the chosen seed: %lu\n", seed); + printf("[TESTER] This is the chosen seed: %u\n", seed); /* initialize random seed: */ srand(seed); uint32_t N_runs = 1, nof_ttis = 10240 + 10; @@ -936,5 +1079,5 @@ int main(int argc, char* argv[]) printf("[TESTER] Number of assertion warnings: %u\n", warn_counter); printf("[TESTER] Number of assertion errors: %u\n", err_counter); - printf("[TESTER] This was the chosen seed: %lu\n", seed); + printf("[TESTER] This was the chosen seed: %u\n", seed); }