updated scheduler interfaces to accommodate multiple carriers

master
Francisco Paisana 5 years ago
parent 6d896ee453
commit cab9327b1a

@ -74,7 +74,9 @@ public:
ul_sched_ack_t phich[MAX_GRANTS]; ul_sched_ack_t phich[MAX_GRANTS];
uint32_t nof_grants; uint32_t nof_grants;
uint32_t nof_phich; uint32_t nof_phich;
} ul_sched_t; } ul_sched_t; // per carrier
typedef std::vector<ul_sched_t> ul_sched_list_t;
virtual int sr_detected(uint32_t tti, uint16_t rnti) = 0; virtual int sr_detected(uint32_t tti, uint16_t rnti) = 0;
virtual int rach_detected(uint32_t tti, uint32_t primary_cc_idx, uint32_t preamble_idx, uint32_t time_adv) = 0; virtual int rach_detected(uint32_t tti, uint32_t primary_cc_idx, uint32_t preamble_idx, uint32_t time_adv) = 0;

@ -230,19 +230,19 @@ public:
virtual int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) = 0; virtual int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) = 0;
/* DL information */ /* DL information */
virtual int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) = 0; virtual int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack) = 0;
virtual int dl_rach_info(dl_sched_rar_info_t rar_info) = 0; virtual int dl_rach_info(dl_sched_rar_info_t rar_info) = 0;
virtual int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) = 0; virtual int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value) = 0;
virtual int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) = 0; virtual int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value) = 0;
virtual int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) = 0; virtual int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value) = 0;
/* UL information */ /* UL information */
virtual int ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) = 0; virtual int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, bool crc) = 0;
virtual int ul_sr_info(uint32_t tti, uint16_t rnti) = 0; virtual int ul_sr_info(uint32_t tti, uint16_t rnti) = 0;
virtual int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) = 0; virtual int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) = 0;
virtual int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) = 0; virtual int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) = 0;
virtual int ul_phr(uint16_t rnti, int phr) = 0; virtual int ul_phr(uint16_t rnti, int phr) = 0;
virtual int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) = 0; virtual int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code) = 0;
/* Run Scheduler for this tti */ /* Run Scheduler for this tti */
virtual int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) = 0; virtual int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) = 0;

@ -88,7 +88,7 @@ public:
public: public:
/* Virtual methods for user metric calculation */ /* Virtual methods for user metric calculation */
virtual void set_log(srslte::log* log_) = 0; virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched) = 0; virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx) = 0;
}; };
class metric_ul class metric_ul
@ -96,7 +96,7 @@ public:
public: public:
/* Virtual methods for user metric calculation */ /* Virtual methods for user metric calculation */
virtual void set_log(srslte::log* log_) = 0; virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched) = 0; virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx) = 0;
}; };
/************************************************************* /*************************************************************
@ -119,7 +119,7 @@ public:
bool ue_exists(uint16_t rnti) final; bool ue_exists(uint16_t rnti) final;
void ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd); void ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd);
void phy_config_enabled(uint16_t rnti, bool enabled); void phy_config_enabled(uint16_t rnti, uint32_t cc_idx, bool enabled);
int bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, ue_bearer_cfg_t* cfg) final; int bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, ue_bearer_cfg_t* cfg) final;
int bearer_ue_rem(uint16_t rnti, uint32_t lc_id) final; int bearer_ue_rem(uint16_t rnti, uint32_t lc_id) final;
@ -131,18 +131,18 @@ public:
int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) final; int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) final;
int dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated); int dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated);
int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) final; int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack) final;
int dl_rach_info(dl_sched_rar_info_t rar_info) final; int dl_rach_info(dl_sched_rar_info_t rar_info) final;
int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) final; int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value) final;
int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) final; int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value) final;
int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) final; int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value) final;
int ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) final; int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, bool crc) final;
int ul_sr_info(uint32_t tti, uint16_t rnti) override; int ul_sr_info(uint32_t tti, uint16_t rnti) override;
int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) final; int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) final;
int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) final; int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) final;
int ul_phr(uint16_t rnti, int phr) final; int ul_phr(uint16_t rnti, int phr) final;
int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) final; int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code) final;
int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) final; int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) final;
int ul_sched(uint32_t tti, ul_sched_res_t* sched_result) final; int ul_sched(uint32_t tti, ul_sched_res_t* sched_result) final;

@ -32,7 +32,7 @@ class ra_sched;
class sched::carrier_sched class sched::carrier_sched
{ {
public: public:
explicit carrier_sched(sched* sched_); explicit carrier_sched(sched* sched_, uint32_t cc_idx_);
void reset(); void reset();
void carrier_cfg(); void carrier_cfg();
void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_); void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_);
@ -60,6 +60,7 @@ private:
srslte::log* log_h = nullptr; srslte::log* log_h = nullptr;
metric_dl* dl_metric = nullptr; metric_dl* dl_metric = nullptr;
metric_ul* ul_metric = nullptr; metric_ul* ul_metric = nullptr;
const uint32_t cc_idx;
// derived from args // derived from args
prbmask_t prach_mask; prbmask_t prach_mask;

@ -218,7 +218,7 @@ public:
sched_interface::dl_sched_res_t dl_sched_result; sched_interface::dl_sched_res_t dl_sched_result;
sched_interface::ul_sched_res_t ul_sched_result; sched_interface::ul_sched_res_t ul_sched_result;
void init(const sched_params_t& sched_params_); void init(const sched_params_t& sched_params_, uint32_t cc_idx_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi); void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx); alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload); alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
@ -268,6 +268,7 @@ private:
// consts // consts
const sched_params_t* sched_params = nullptr; const sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr; srslte::log* log_h = nullptr;
uint32_t cc_idx = 0;
// internal state // internal state
tti_params_t tti_params{10241}; tti_params_t tti_params{10241};

@ -32,11 +32,11 @@ class dl_metric_rr : public sched::metric_dl
public: public:
void set_log(srslte::log* log_) final; void set_log(srslte::log* log_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched) final; void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx) final;
private: private:
bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask); bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask);
dl_harq_proc* allocate_user(sched_ue* user); dl_harq_proc* allocate_user(sched_ue* user, uint32_t cc_idx);
srslte::log* log_h = nullptr; srslte::log* log_h = nullptr;
dl_tti_sched_t* tti_alloc = nullptr; dl_tti_sched_t* tti_alloc = nullptr;
@ -46,12 +46,12 @@ class ul_metric_rr : public sched::metric_ul
{ {
public: public:
void set_log(srslte::log* log_) final; void set_log(srslte::log* log_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched) final; void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx) final;
private: private:
bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc); bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user); ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user, uint32_t cc_idx);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user); ul_harq_proc* allocate_user_retx_prbs(sched_ue* user, uint32_t cc_idx);
srslte::log* log_h = nullptr; srslte::log* log_h = nullptr;
ul_tti_sched_t* tti_alloc = nullptr; ul_tti_sched_t* tti_alloc = nullptr;

@ -25,6 +25,7 @@
#include "srslte/common/log.h" #include "srslte/common/log.h"
#include "srslte/interfaces/sched_interface.h" #include "srslte/interfaces/sched_interface.h"
#include <map> #include <map>
#include <vector>
#include "scheduler_harq.h" #include "scheduler_harq.h"
#include "srslte/asn1/rrc_asn1.h" #include "srslte/asn1/rrc_asn1.h"
@ -34,6 +35,54 @@ namespace srsenb {
class sched_params_t; class sched_params_t;
struct sched_ue_carrier {
const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ;
sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
srslte_cell_t* cell_cfg_,
uint16_t rnti_,
uint32_t cc_idx_,
srslte::log* log_);
void reset();
// Harq access
void reset_old_pending_pids(uint32_t tti_rx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl);
dl_harq_proc* get_empty_dl_harq();
int set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack);
ul_harq_proc* get_ul_harq(uint32_t tti);
uint32_t get_pending_ul_old_data();
uint32_t get_aggr_level(uint32_t nof_bits);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
uint32_t get_required_prb_ul(uint32_t req_bytes);
std::array<dl_harq_proc, SCHED_MAX_HARQ_PROC> dl_harq = {};
std::array<ul_harq_proc, SCHED_MAX_HARQ_PROC> ul_harq = {};
uint32_t dl_ri = 0;
uint32_t dl_ri_tti = 0;
uint32_t dl_pmi = 0;
uint32_t dl_pmi_tti = 0;
uint32_t dl_cqi = 0;
uint32_t dl_cqi_tti = 0;
uint32_t ul_cqi = 0;
uint32_t ul_cqi_tti = 0;
int max_mcs_dl = 28, max_mcs_ul = 28;
uint32_t max_aggr_level = 3;
int fixed_mcs_ul = 0, fixed_mcs_dl = 0;
private:
srslte::log* log_h = nullptr;
sched_interface::ue_cfg_t* cfg = nullptr;
srslte_cell_t* cell = nullptr;
uint32_t cc_idx;
uint16_t rnti;
};
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from /** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads. * higher layers and mac threads.
* *
@ -58,7 +107,7 @@ public:
************************************************************/ ************************************************************/
sched_ue(); sched_ue();
void reset(); void reset();
void phy_config_enabled(uint32_t tti, bool enabled); void phy_config_enabled(uint32_t tti, uint32_t cc_idx, bool enabled);
void set_cfg(uint16_t rnti, const sched_params_t& sched_params_, sched_interface::ue_cfg_t* cfg); void set_cfg(uint16_t rnti, const sched_params_t& sched_params_, sched_interface::ue_cfg_t* cfg);
void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg); void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg);
@ -70,12 +119,12 @@ public:
void mac_buffer_state(uint32_t ce_code); void mac_buffer_state(uint32_t ce_code);
void ul_recv_len(uint32_t lcid, uint32_t len); void ul_recv_len(uint32_t lcid, uint32_t len);
void set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated); void set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated);
void set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code); void set_ul_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code);
void set_dl_ri(uint32_t tti, uint32_t ri); void set_dl_ri(uint32_t tti, uint32_t cc_idx, uint32_t ri);
void set_dl_pmi(uint32_t tti, uint32_t ri); void set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t ri);
void set_dl_cqi(uint32_t tti, uint32_t cqi); void set_dl_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi);
int set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack); int set_ack_info(uint32_t tti, uint32_t cc_idx, uint32_t tb_idx, bool ack);
void set_ul_crc(uint32_t tti, bool crc_res); void set_ul_crc(uint32_t tti, uint32_t cc_idx, bool crc_res);
/******************************************************* /*******************************************************
* Custom functions * Custom functions
@ -87,28 +136,28 @@ public:
void set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level = -1); void set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level = -1);
void set_fixed_mcs(int mcs_ul, int mcs_dl); void set_fixed_mcs(int mcs_ul, int mcs_dl);
dl_harq_proc* find_dl_harq(uint32_t tti); dl_harq_proc* find_dl_harq(uint32_t tti_rx, uint32_t cc_idx);
dl_harq_proc* get_dl_harq(uint32_t idx); dl_harq_proc* get_dl_harq(uint32_t idx, uint32_t cc_idx);
uint16_t get_rnti() const { return rnti; } uint16_t get_rnti() const { return rnti; }
/******************************************************* /*******************************************************
* Functions used by scheduler metric objects * Functions used by scheduler metric objects
*******************************************************/ *******************************************************/
uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols); uint32_t get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t req_bytes); uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes);
uint32_t prb_to_rbg(uint32_t nof_prb); uint32_t prb_to_rbg(uint32_t nof_prb);
uint32_t rgb_to_prb(uint32_t nof_rbg); uint32_t rgb_to_prb(uint32_t nof_rbg);
uint32_t get_pending_dl_new_data(uint32_t tti); uint32_t get_pending_dl_new_data();
uint32_t get_pending_ul_new_data(uint32_t tti); uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_old_data(); uint32_t get_pending_ul_old_data(uint32_t cc_idx);
uint32_t get_pending_dl_new_data_total(uint32_t tti); uint32_t get_pending_dl_new_data_total();
void reset_pending_pids(uint32_t tti_rx); void reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti); dl_harq_proc* get_pending_dl_harq(uint32_t tti, uint32_t cc_idx);
dl_harq_proc* get_empty_dl_harq(); dl_harq_proc* get_empty_dl_harq(uint32_t cc_idx);
ul_harq_proc* get_ul_harq(uint32_t tti); ul_harq_proc* get_ul_harq(uint32_t tti, uint32_t cc_idx);
/******************************************************* /*******************************************************
* Functions used by the scheduler object * Functions used by the scheduler object
@ -122,35 +171,47 @@ public:
int generate_format1(dl_harq_proc* h, int generate_format1(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
int generate_format2a(dl_harq_proc* h, int generate_format2a(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
int generate_format2(dl_harq_proc* h, int generate_format2(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
int generate_format0(sched_interface::ul_sched_data_t* data, int generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
ul_harq_proc::ul_alloc_t alloc, ul_harq_proc::ul_alloc_t alloc,
bool needs_pdcch, bool needs_pdcch,
srslte_dci_location_t cce_range, srslte_dci_location_t cce_range,
int explicit_mcs = -1); int explicit_mcs = -1);
srslte_dci_format_t get_dci_format(); srslte_dci_format_t get_dci_format();
uint32_t get_aggr_level(uint32_t nof_bits);
sched_dci_cce_t* get_locations(uint32_t current_cfi, uint32_t sf_idx); sched_dci_cce_t* get_locations(uint32_t current_cfi, uint32_t sf_idx);
sched_ue_carrier* get_ue_carrier(uint32_t cc_idx) { return &carriers[cc_idx]; }
bool needs_cqi(uint32_t tti, bool will_send = false); bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false);
uint32_t get_max_retx(); uint32_t get_max_retx();
bool get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]); bool get_pucch_sched(uint32_t current_tti, uint32_t cc_idx, uint32_t prb_idx[2]);
bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce); bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce);
static int cqi_to_tbs(uint32_t cqi,
uint32_t nof_prb,
uint32_t nof_re,
uint32_t max_mcs,
uint32_t max_Qm,
bool is_ul,
uint32_t* mcs);
private: private:
typedef struct { typedef struct {
sched_interface::ue_bearer_cfg_t cfg; sched_interface::ue_bearer_cfg_t cfg;
@ -163,34 +224,26 @@ private:
int alloc_pdu(int tbs, sched_interface::dl_sched_pdu_t* pdu); int alloc_pdu(int tbs, sched_interface::dl_sched_pdu_t* pdu);
static uint32_t format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb); static uint32_t format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb);
static int cqi_to_tbs(uint32_t cqi,
uint32_t nof_prb,
uint32_t nof_re,
uint32_t max_mcs,
uint32_t max_Qm,
bool is_ul,
uint32_t* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
static bool bearer_is_ul(ue_bearer_t* lch); static bool bearer_is_ul(ue_bearer_t* lch);
static bool bearer_is_dl(ue_bearer_t* lch); static bool bearer_is_dl(ue_bearer_t* lch);
uint32_t get_pending_dl_new_data_unlocked(uint32_t tti); uint32_t get_pending_dl_new_data_unlocked();
uint32_t get_pending_ul_old_data_unlocked(); uint32_t get_pending_ul_old_data_unlocked(uint32_t cc_idx);
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti); uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
uint32_t get_pending_dl_new_data_total_unlocked(uint32_t tti); uint32_t get_pending_dl_new_data_total_unlocked();
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false); bool needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_send = false);
int generate_format2a_unlocked(dl_harq_proc* h, int generate_format2a_unlocked(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask); const rbgmask_t& user_mask);
bool is_first_dl_tx(); bool is_first_dl_tx();
bool is_first_dl_tx(uint32_t cc_idx);
sched_interface::ue_cfg_t cfg = {}; sched_interface::ue_cfg_t cfg = {};
srslte_cell_t cell = {}; srslte_cell_t cell = {};
@ -206,22 +259,9 @@ private:
std::array<ue_bearer_t, sched_interface::MAX_LC> lch = {}; std::array<ue_bearer_t, sched_interface::MAX_LC> lch = {};
int power_headroom = 0; int power_headroom = 0;
uint32_t dl_ri = 0;
uint32_t dl_ri_tti = 0;
uint32_t dl_pmi = 0;
uint32_t dl_pmi_tti = 0;
uint32_t dl_cqi = 0;
uint32_t dl_cqi_tti = 0;
uint32_t cqi_request_tti = 0; uint32_t cqi_request_tti = 0;
uint32_t ul_cqi = 0;
uint32_t ul_cqi_tti = 0;
uint16_t rnti = 0; uint16_t rnti = 0;
uint32_t max_mcs_dl = 0;
uint32_t max_aggr_level = 0;
uint32_t max_mcs_ul = 0;
uint32_t max_msg3retx = 0; uint32_t max_msg3retx = 0;
int fixed_mcs_ul = 0;
int fixed_mcs_dl = 0;
uint32_t nof_ta_cmd = 0; uint32_t nof_ta_cmd = 0;
@ -231,12 +271,10 @@ private:
// Allowed DCI locations per CFI and per subframe // Allowed DCI locations per CFI and per subframe
std::array<std::array<sched_dci_cce_t, 10>, 3> dci_locations = {}; std::array<std::array<sched_dci_cce_t, 10>, 3> dci_locations = {};
const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ;
std::array<dl_harq_proc, SCHED_MAX_HARQ_PROC> dl_harq = {};
std::array<ul_harq_proc, SCHED_MAX_HARQ_PROC> ul_harq = {};
bool phy_config_dedicated_enabled = false; bool phy_config_dedicated_enabled = false;
asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info; asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info;
std::vector<sched_ue_carrier> carriers;
}; };
} // namespace srsenb } // namespace srsenb

@ -203,7 +203,9 @@ int mac::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
void mac::phy_config_enabled(uint16_t rnti, bool enabled) void mac::phy_config_enabled(uint16_t rnti, bool enabled)
{ {
scheduler.phy_config_enabled(rnti, enabled); // FIXME: "cc_idx must be specified"
uint32_t cc_idx = 0;
scheduler.phy_config_enabled(rnti, cc_idx, enabled);
} }
// Update UE configuration // Update UE configuration
@ -320,9 +322,11 @@ void mac::rl_ok(uint16_t rnti)
int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
{ {
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
log_h->step(tti); log_h->step(tti);
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, tb_idx, ack); uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, cc_idx, tb_idx, ack);
ue_db[rnti]->metrics_tx(ack, nof_bytes); ue_db[rnti]->metrics_tx(ack, nof_bytes);
if (ack) { if (ack) {
@ -337,6 +341,8 @@ int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc) int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
{ {
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti); log_h->step(tti);
int ret = -1; int ret = -1;
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
@ -354,7 +360,7 @@ int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
ue_db[rnti]->deallocate_pdu(tti); ue_db[rnti]->deallocate_pdu(tti);
} }
ret = scheduler.ul_crc_info(tti, rnti, crc); ret = scheduler.ul_crc_info(tti, rnti, cc_idx, crc);
} else { } else {
Error("User rnti=0x%x not found\n", rnti); Error("User rnti=0x%x not found\n", rnti);
} }
@ -378,11 +384,13 @@ int mac::set_dl_ant_info(uint16_t rnti, phys_cfg_ded_s::ant_info_c_* dl_ant_info
int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
{ {
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti); log_h->step(tti);
int ret = -1; int ret = -1;
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) { if (ue_db.count(rnti)) {
scheduler.dl_ri_info(tti, rnti, ri_value); scheduler.dl_ri_info(tti, rnti, cc_idx, ri_value);
ue_db[rnti]->metrics_dl_ri(ri_value); ue_db[rnti]->metrics_dl_ri(ri_value);
ret = 0; ret = 0;
} else { } else {
@ -394,11 +402,13 @@ int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
{ {
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti); log_h->step(tti);
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
int ret = -1; int ret = -1;
if (ue_db.count(rnti)) { if (ue_db.count(rnti)) {
scheduler.dl_pmi_info(tti, rnti, pmi_value); scheduler.dl_pmi_info(tti, rnti, cc_idx, pmi_value);
ue_db[rnti]->metrics_dl_pmi(pmi_value); ue_db[rnti]->metrics_dl_pmi(pmi_value);
ret = 0; ret = 0;
} else { } else {
@ -410,12 +420,14 @@ int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{ {
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti); log_h->step(tti);
int ret = -1; int ret = -1;
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) { if (ue_db.count(rnti)) {
scheduler.dl_cqi_info(tti, rnti, cqi_value); scheduler.dl_cqi_info(tti, rnti, cc_idx, cqi_value);
ue_db[rnti]->metrics_dl_cqi(cqi_value); ue_db[rnti]->metrics_dl_cqi(cqi_value);
ret = 0; ret = 0;
} else { } else {
@ -427,12 +439,14 @@ int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
int mac::snr_info(uint32_t tti, uint16_t rnti, float snr) int mac::snr_info(uint32_t tti, uint16_t rnti, float snr)
{ {
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti); log_h->step(tti);
int ret = -1; int ret = -1;
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) { if (ue_db.count(rnti)) {
uint32_t cqi = srslte_cqi_from_snr(snr); uint32_t cqi = srslte_cqi_from_snr(snr);
scheduler.ul_cqi_info(tti, rnti, cqi, 0); scheduler.ul_cqi_info(tti, rnti, cc_idx, cqi, 0);
ret = 0; ret = 0;
} else { } else {
Error("User rnti=0x%x not found\n", rnti); Error("User rnti=0x%x not found\n", rnti);

@ -109,7 +109,7 @@ sched::sched()
pthread_rwlock_init(&rwlock, nullptr); pthread_rwlock_init(&rwlock, nullptr);
// Initialize Independent carrier schedulers // Initialize Independent carrier schedulers
carrier_schedulers.emplace_back(new carrier_sched{this}); carrier_schedulers.emplace_back(new carrier_sched{this, 0});
reset(); reset();
} }
@ -243,7 +243,7 @@ bool sched::ue_exists(uint16_t rnti)
void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd) void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd)
{ {
pthread_rwlock_rdlock(&rwlock); pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) { if (ue_db.count(rnti) > 0) {
ue_db[rnti].set_needs_ta_cmd(nof_ta_cmd); ue_db[rnti].set_needs_ta_cmd(nof_ta_cmd);
} else { } else {
Error("User rnti=0x%x not found\n", rnti); Error("User rnti=0x%x not found\n", rnti);
@ -251,10 +251,10 @@ void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd)
pthread_rwlock_unlock(&rwlock); pthread_rwlock_unlock(&rwlock);
} }
void sched::phy_config_enabled(uint16_t rnti, bool enabled) void sched::phy_config_enabled(uint16_t rnti, uint32_t cc_idx, bool enabled)
{ {
// FIXME: Check if correct use of current_tti // FIXME: Check if correct use of current_tti
ue_db_access(rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(current_tti, enabled); }); ue_db_access(rnti, [this, cc_idx, enabled](sched_ue& ue) { ue.phy_config_enabled(current_tti, cc_idx, enabled); });
} }
int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_) int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_)
@ -271,7 +271,7 @@ uint32_t sched::get_dl_buffer(uint16_t rnti)
{ {
// FIXME: Check if correct use of current_tti // FIXME: Check if correct use of current_tti
uint32_t ret = 0; uint32_t ret = 0;
ue_db_access(rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(current_tti); }); ue_db_access(rnti, [&ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(); });
return ret; return ret;
} }
@ -299,31 +299,32 @@ int sched::dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dl
return ue_db_access(rnti, [dl_ant_info](sched_ue& ue) { ue.set_dl_ant_info(dl_ant_info); }); return ue_db_access(rnti, [dl_ant_info](sched_ue& ue) { ue.set_dl_ant_info(dl_ant_info); });
} }
int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
{ {
int ret = -1; int ret = -1;
ue_db_access(rnti, [tti, tb_idx, ack, &ret](sched_ue& ue) { ret = ue.set_ack_info(tti, tb_idx, ack); }); ue_db_access(rnti,
[tti, cc_idx, tb_idx, ack, &ret](sched_ue& ue) { ret = ue.set_ack_info(tti, cc_idx, tb_idx, ack); });
return ret; return ret;
} }
int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) int sched::ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, bool crc)
{ {
return ue_db_access(rnti, [tti, crc](sched_ue& ue) { ue.set_ul_crc(tti, crc); }); return ue_db_access(rnti, [tti, cc_idx, crc](sched_ue& ue) { ue.set_ul_crc(tti, cc_idx, crc); });
} }
int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value)
{ {
return ue_db_access(rnti, [tti, ri_value](sched_ue& ue) { ue.set_dl_ri(tti, ri_value); }); return ue_db_access(rnti, [tti, cc_idx, ri_value](sched_ue& ue) { ue.set_dl_ri(tti, cc_idx, ri_value); });
} }
int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value)
{ {
return ue_db_access(rnti, [tti, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti, pmi_value); }); return ue_db_access(rnti, [tti, cc_idx, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti, cc_idx, pmi_value); });
} }
int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value)
{ {
return ue_db_access(rnti, [tti, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti, cqi_value); }); return ue_db_access(rnti, [tti, cc_idx, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti, cc_idx, cqi_value); });
} }
int sched::dl_rach_info(dl_sched_rar_info_t rar_info) int sched::dl_rach_info(dl_sched_rar_info_t rar_info)
@ -331,9 +332,10 @@ int sched::dl_rach_info(dl_sched_rar_info_t rar_info)
return carrier_schedulers[0]->dl_rach_info(rar_info); return carrier_schedulers[0]->dl_rach_info(rar_info);
} }
int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code)
{ {
return ue_db_access(rnti, [tti, cqi, ul_ch_code](sched_ue& ue) { ue.set_ul_cqi(tti, cqi, ul_ch_code); }); return ue_db_access(rnti,
[tti, cc_idx, cqi, ul_ch_code](sched_ue& ue) { ue.set_ul_cqi(tti, cc_idx, cqi, ul_ch_code); });
} }
int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value) int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value)

@ -275,7 +275,7 @@ const ra_sched::pending_msg3_t& ra_sched::find_pending_msg3(uint32_t tti) const
* Carrier scheduling * Carrier scheduling
*******************************************************/ *******************************************************/
sched::carrier_sched::carrier_sched(sched* sched_) : sched_ptr(sched_) sched::carrier_sched::carrier_sched(sched* sched_, uint32_t cc_idx_) : sched_ptr(sched_), cc_idx(cc_idx_)
{ {
tti_dl_mask.resize(1, 0); tti_dl_mask.resize(1, 0);
} }
@ -314,7 +314,7 @@ void sched::carrier_sched::carrier_cfg()
// Initiate the tti_scheduler for each TTI // Initiate the tti_scheduler for each TTI
for (tti_sched_result_t& tti_sched : tti_scheds) { for (tti_sched_result_t& tti_sched : tti_scheds) {
tti_sched.init(*sched_params); tti_sched.init(*sched_params, cc_idx);
} }
} }
@ -370,7 +370,7 @@ tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
/* reset PIDs with pending data or blocked */ /* reset PIDs with pending data or blocked */
for (auto& user : sched_ptr->ue_db) { for (auto& user : sched_ptr->ue_db) {
user.second.reset_pending_pids(tti_rx); user.second.reset_pending_pids(tti_rx, cc_idx);
} }
} }
@ -387,7 +387,7 @@ void sched::carrier_sched::generate_phich(tti_sched_result_t* tti_sched)
// user.has_pucch = false; // FIXME: What is this for? // user.has_pucch = false; // FIXME: What is this for?
ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_rx()); ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_rx(), cc_idx);
/* Indicate PHICH acknowledgment if needed */ /* Indicate PHICH acknowledgment if needed */
if (h->has_pending_ack()) { if (h->has_pending_ack()) {
@ -422,7 +422,7 @@ void sched::carrier_sched::alloc_dl_users(tti_sched_result_t* tti_result)
} }
// call DL scheduler metric to fill RB grid // call DL scheduler metric to fill RB grid
dl_metric->sched_users(sched_ptr->ue_db, tti_result); dl_metric->sched_users(sched_ptr->ue_db, tti_result, cc_idx);
} }
int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched) int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched)
@ -448,11 +448,11 @@ int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched)
ul_mask |= pucch_mask; ul_mask |= pucch_mask;
/* Call scheduler for UL data */ /* Call scheduler for UL data */
ul_metric->sched_users(sched_ptr->ue_db, tti_sched); ul_metric->sched_users(sched_ptr->ue_db, tti_sched, cc_idx);
/* Update pending data counters after this TTI */ /* Update pending data counters after this TTI */
for (auto& user : sched_ptr->ue_db) { for (auto& user : sched_ptr->ue_db) {
user.second.get_ul_harq(tti_tx_ul)->reset_pending_data(); user.second.get_ul_harq(tti_tx_ul, cc_idx)->reset_pending_data();
} }
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;

@ -342,8 +342,8 @@ tti_grid_t::dl_ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_t
alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask) alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
{ {
srslte_dci_format_t dci_format = user->get_dci_format(); srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t aggr_level = uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, dci_format);
user->get_aggr_level(srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, dci_format)); uint32_t aggr_level = user->get_ue_carrier(cc_idx)->get_aggr_level(nof_bits);
return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user); return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user);
} }
@ -361,8 +361,8 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc
// Generate PDCCH except for RAR and non-adaptive retx // Generate PDCCH except for RAR and non-adaptive retx
if (needs_pdcch) { if (needs_pdcch) {
uint32_t aggr_idx = uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, SRSLTE_DCI_FORMAT0);
user->get_aggr_level(srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, SRSLTE_DCI_FORMAT0)); uint32_t aggr_idx = user->get_ue_carrier(cc_idx)->get_aggr_level(nof_bits);
if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) { if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) {
if (log_h->get_level() == srslte::LOG_LEVEL_DEBUG) { if (log_h->get_level() == srslte::LOG_LEVEL_DEBUG) {
log_h->debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s\n", log_h->debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s\n",
@ -382,9 +382,10 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc
* TTI resource Scheduling Methods * TTI resource Scheduling Methods
*******************************************************/ *******************************************************/
void tti_sched_result_t::init(const sched_params_t& sched_params_) void tti_sched_result_t::init(const sched_params_t& sched_params_, uint32_t cc_idx_)
{ {
sched_params = &sched_params_; sched_params = &sched_params_;
cc_idx = cc_idx_;
log_h = sched_params->log_h; log_h = sched_params->log_h;
tti_alloc.init(*sched_params, 0); tti_alloc.init(*sched_params, 0);
} }
@ -575,7 +576,7 @@ alloc_outcome_t tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::
{ {
// check whether adaptive/non-adaptive retx/newtx // check whether adaptive/non-adaptive retx/newtx
tti_sched_result_t::ul_alloc_t::type_t alloc_type; tti_sched_result_t::ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul()); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_idx);
bool has_retx = h->has_pending_retx(); bool has_retx = h->has_pending_retx();
if (has_retx) { if (has_retx) {
ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc(); ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc();
@ -722,21 +723,21 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
// Generate DCI Format1/2/2A // Generate DCI Format1/2/2A
sched_ue* user = data_alloc.user_ptr; sched_ue* user = data_alloc.user_ptr;
dl_harq_proc* h = user->get_dl_harq(data_alloc.pid); dl_harq_proc* h = user->get_dl_harq(data_alloc.pid, cc_idx);
uint32_t data_before = user->get_pending_dl_new_data(get_tti_tx_dl()); uint32_t data_before = user->get_pending_dl_new_data();
srslte_dci_format_t dci_format = user->get_dci_format(); srslte_dci_format_t dci_format = user->get_dci_format();
bool is_newtx = h->is_empty(); bool is_newtx = h->is_empty();
int tbs = 0; int tbs = 0;
switch (dci_format) { switch (dci_format) {
case SRSLTE_DCI_FORMAT1: case SRSLTE_DCI_FORMAT1:
tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); tbs = user->generate_format1(h, data, get_tti_tx_dl(), cc_idx, get_cfi(), data_alloc.user_mask);
break; break;
case SRSLTE_DCI_FORMAT2: case SRSLTE_DCI_FORMAT2:
tbs = user->generate_format2(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); tbs = user->generate_format2(h, data, get_tti_tx_dl(), cc_idx, get_cfi(), data_alloc.user_mask);
break; break;
case SRSLTE_DCI_FORMAT2A: case SRSLTE_DCI_FORMAT2A:
tbs = user->generate_format2a(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); tbs = user->generate_format2a(h, data, get_tti_tx_dl(), cc_idx, get_cfi(), data_alloc.user_mask);
break; break;
default: default:
Error("DCI format (%d) not implemented\n", dci_format); Error("DCI format (%d) not implemented\n", dci_format);
@ -749,7 +750,7 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
h->get_id(), h->get_id(),
data_alloc.user_mask.to_hex().c_str(), data_alloc.user_mask.to_hex().c_str(),
tbs, tbs,
user->get_pending_dl_new_data(get_tti_tx_dl())); user->get_pending_dl_new_data());
continue; continue;
} }
@ -764,7 +765,7 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
h->nof_retx(0) + h->nof_retx(1), h->nof_retx(0) + h->nof_retx(1),
tbs, tbs,
data_before, data_before,
user->get_pending_dl_new_data(get_tti_tx_dl())); user->get_pending_dl_new_data());
dl_sched_result.nof_data_elems++; dl_sched_result.nof_data_elems++;
} }
@ -788,10 +789,10 @@ void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t&
/* Generate DCI Format1A */ /* Generate DCI Format1A */
uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul()); uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul());
int tbs = int tbs = user->generate_format0(
user->generate_format0(pusch, get_tti_tx_ul(), ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs); pusch, get_tti_tx_ul(), cc_idx, ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs);
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul()); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_idx);
if (tbs <= 0) { if (tbs <= 0) {
log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n", log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n",
ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL", ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL",
@ -827,7 +828,7 @@ void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t&
tbs, tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()), user->get_pending_ul_new_data(get_tti_tx_ul()),
pending_data_before, pending_data_before,
user->get_pending_ul_old_data()); user->get_pending_ul_old_data(cc_idx));
ul_sched_result.nof_dci_elems++; ul_sched_result.nof_dci_elems++;
} }

@ -41,25 +41,24 @@ void dl_metric_rr::set_log(srslte::log* log_)
log_h = log_; log_h = log_;
} }
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched) void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx)
{ {
typedef std::map<uint16_t, sched_ue>::iterator it_t;
tti_alloc = tti_sched; tti_alloc = tti_sched;
if (ue_db.empty()) if (ue_db.empty()) {
return; return;
}
// give priority in a time-domain RR basis // give priority in a time-domain RR basis
uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size(); uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size();
it_t iter = ue_db.begin(); auto iter = ue_db.begin();
std::advance(iter, priority_idx); std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) { for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) { if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue* user = &iter->second; sched_ue* user = &iter->second;
allocate_user(user); allocate_user(user, cc_idx);
} }
} }
@ -78,7 +77,7 @@ bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask)
return nof_rbg == 0; return nof_rbg == 0;
} }
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user, uint32_t cc_idx)
{ {
if (tti_alloc->is_dl_alloc(user)) { if (tti_alloc->is_dl_alloc(user)) {
return nullptr; return nullptr;
@ -86,8 +85,8 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
// FIXME: First do reTxs for all users. Only then do the rest. // FIXME: First do reTxs for all users. Only then do the rest.
alloc_outcome_t code; alloc_outcome_t code;
uint32_t tti_dl = tti_alloc->get_tti_tx_dl(); uint32_t tti_dl = tti_alloc->get_tti_tx_dl();
dl_harq_proc* h = user->get_pending_dl_harq(tti_dl); dl_harq_proc* h = user->get_pending_dl_harq(tti_dl, cc_idx);
uint32_t req_bytes = user->get_pending_dl_new_data_total(tti_dl); uint32_t req_bytes = user->get_pending_dl_new_data_total();
// Schedule retx if we have space // Schedule retx if we have space
#if ASYNC_DL_SCHED #if ASYNC_DL_SCHED
@ -100,7 +99,8 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id()); code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) { if (code == alloc_outcome_t::SUCCESS) {
return h; return h;
} else if (code == alloc_outcome_t::DCI_COLLISION) { }
if (code == alloc_outcome_t::DCI_COLLISION) {
// No DCIs available for this user. Move to next // No DCIs available for this user. Move to next
return nullptr; return nullptr;
} }
@ -119,14 +119,15 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID // If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
#if ASYNC_DL_SCHED #if ASYNC_DL_SCHED
h = user->get_empty_dl_harq(); h = user->get_empty_dl_harq(cc_idx);
if (h) { if (h) {
#else #else
if (h && h->is_empty()) { if (h && h->is_empty()) {
#endif #endif
// Allocate resources based on pending data // Allocate resources based on pending data
if (req_bytes) { if (req_bytes) {
uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, tti_alloc->get_nof_ctrl_symbols())); uint32_t pending_rbg =
user->prb_to_rbg(user->get_required_prb_dl(cc_idx, req_bytes, tti_alloc->get_nof_ctrl_symbols()));
rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size()); rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size());
find_allocation(pending_rbg, &newtx_mask); find_allocation(pending_rbg, &newtx_mask);
if (newtx_mask.any()) { // some empty spaces were found if (newtx_mask.any()) { // some empty spaces were found
@ -152,29 +153,28 @@ void ul_metric_rr::set_log(srslte::log* log_)
log_h = log_; log_h = log_;
} }
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched) void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx)
{ {
typedef std::map<uint16_t, sched_ue>::iterator it_t;
tti_alloc = tti_sched; tti_alloc = tti_sched;
current_tti = tti_alloc->get_tti_tx_ul(); current_tti = tti_alloc->get_tti_tx_ul();
if (ue_db.size() == 0) if (ue_db.empty()) {
return; return;
}
// give priority in a time-domain RR basis // give priority in a time-domain RR basis
uint32_t priority_idx = uint32_t priority_idx =
(current_tti + (uint32_t)ue_db.size() / 2) % (uint32_t)ue_db.size(); // make DL and UL interleaved (current_tti + (uint32_t)ue_db.size() / 2) % (uint32_t)ue_db.size(); // make DL and UL interleaved
// allocate reTxs first // allocate reTxs first
it_t iter = ue_db.begin(); auto iter = ue_db.begin();
std::advance(iter, priority_idx); std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) { for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) { if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue* user = &iter->second; sched_ue* user = &iter->second;
allocate_user_retx_prbs(user); allocate_user_retx_prbs(user, cc_idx);
} }
// give priority in a time-domain RR basis // give priority in a time-domain RR basis
@ -185,7 +185,7 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue* user = &iter->second; sched_ue* user = &iter->second;
allocate_user_newtx_prbs(user); allocate_user_newtx_prbs(user, cc_idx);
} }
} }
@ -226,13 +226,13 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
return alloc->L == L; return alloc->L == L;
} }
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user) ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user, uint32_t cc_idx)
{ {
if (tti_alloc->is_ul_alloc(user)) { if (tti_alloc->is_ul_alloc(user)) {
return nullptr; return nullptr;
} }
alloc_outcome_t ret; alloc_outcome_t ret;
ul_harq_proc* h = user->get_ul_harq(current_tti); ul_harq_proc* h = user->get_ul_harq(current_tti, cc_idx);
// if there are procedures and we have space // if there are procedures and we have space
if (h->has_pending_retx()) { if (h->has_pending_retx()) {
@ -261,17 +261,17 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
return nullptr; return nullptr;
} }
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user) ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user, uint32_t cc_idx)
{ {
if (tti_alloc->is_ul_alloc(user)) { if (tti_alloc->is_ul_alloc(user)) {
return nullptr; return nullptr;
} }
uint32_t pending_data = user->get_pending_ul_new_data(current_tti); uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc* h = user->get_ul_harq(current_tti); ul_harq_proc* h = user->get_ul_harq(current_tti, cc_idx);
// find an empty PID // find an empty PID
if (h->is_empty(0) and pending_data > 0) { if (h->is_empty(0) and pending_data > 0) {
uint32_t pending_rb = user->get_required_prb_ul(pending_data); uint32_t pending_rb = user->get_required_prb_ul(cc_idx, pending_data);
ul_harq_proc::ul_alloc_t alloc; ul_harq_proc::ul_alloc_t alloc;
find_allocation(pending_rb, &alloc); find_allocation(pending_rb, &alloc);
@ -279,7 +279,8 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc); alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) { if (ret == alloc_outcome_t::SUCCESS) {
return h; return h;
} else if (ret == alloc_outcome_t::DCI_COLLISION) { }
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti()); log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti());
} }
} }

@ -53,8 +53,6 @@ sched_ue::sched_ue()
bzero(&cell, sizeof(cell)); bzero(&cell, sizeof(cell));
bzero(&lch, sizeof(lch)); bzero(&lch, sizeof(lch));
bzero(&dci_locations, sizeof(dci_locations)); bzero(&dci_locations, sizeof(dci_locations));
bzero(&dl_harq, sizeof(dl_harq));
bzero(&ul_harq, sizeof(ul_harq));
bzero(&dl_ant_info, sizeof(dl_ant_info)); bzero(&dl_ant_info, sizeof(dl_ant_info));
reset(); reset();
@ -71,9 +69,6 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
log_h = sched_params->log_h; log_h = sched_params->log_h;
cell = sched_params->cfg->cell; cell = sched_params->cfg->cell;
max_mcs_dl = 28;
max_mcs_ul = 28;
max_aggr_level = 3;
max_msg3retx = sched_params->cfg->maxharq_msg3tx; max_msg3retx = sched_params->cfg->maxharq_msg3tx;
cfg = *cfg_; cfg = *cfg_;
@ -82,11 +77,10 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
cfg.dl_cfg.tm = SRSLTE_TM1; cfg.dl_cfg.tm = SRSLTE_TM1;
Info("SCHED: Added user rnti=0x%x\n", rnti); Info("SCHED: Added user rnti=0x%x\n", rnti);
// Config HARQ processes
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) { // Init sched_ue carriers
dl_harq[i].config(i, cfg.maxharq_tx, log_h); // TODO: check config for number of carriers
ul_harq[i].config(i, cfg.maxharq_tx, log_h); carriers.emplace_back(&cfg, &cell, rnti, 0, log_h);
}
// Generate allowed CCE locations // Generate allowed CCE locations
for (int cfi = 0; cfi < 3; cfi++) { for (int cfi = 0; cfi < 3; cfi++) {
@ -117,20 +111,9 @@ void sched_ue::reset()
buf_mac = 0; buf_mac = 0;
buf_ul = 0; buf_ul = 0;
phy_config_dedicated_enabled = false; phy_config_dedicated_enabled = false;
dl_cqi = 1;
ul_cqi = 1;
dl_cqi_tti = 0;
ul_cqi_tti = 0;
dl_ri = 0;
dl_ri_tti = 0;
dl_pmi = 0;
dl_pmi_tti = 0;
cqi_request_tti = 0; cqi_request_tti = 0;
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) { for (auto& c : carriers) {
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { c.reset();
dl_harq[i].reset(tb);
ul_harq[i].reset(tb);
}
} }
} }
@ -142,27 +125,22 @@ void sched_ue::reset()
void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl) void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
fixed_mcs_ul = mcs_ul; for (auto& c : carriers) {
fixed_mcs_dl = mcs_dl; c.fixed_mcs_dl = mcs_dl;
c.fixed_mcs_ul = mcs_ul;
}
} }
void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_) void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
if (mcs_ul < 0) { uint32_t max_mcs_ul = mcs_ul >= 0 ? mcs_ul : 28;
max_mcs_ul = 28; uint32_t max_mcs_dl = mcs_dl >= 0 ? mcs_dl : 28;
} else { uint32_t max_aggr_level = max_aggr_level_ >= 0 ? max_aggr_level_ : 3;
max_mcs_ul = mcs_ul; for (auto& c : carriers) {
} c.max_mcs_dl = max_mcs_dl;
if (mcs_dl < 0) { c.max_mcs_ul = max_mcs_ul;
max_mcs_dl = 28; c.max_aggr_level = max_aggr_level;
} else {
max_mcs_dl = mcs_dl;
}
if (max_aggr_level_ < 0) {
max_aggr_level = 3;
} else {
max_aggr_level = max_aggr_level_;
} }
} }
@ -193,9 +171,10 @@ void sched_ue::rem_bearer(uint32_t lc_id)
} }
} }
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled) void sched_ue::phy_config_enabled(uint32_t tti, uint32_t cc_idx, bool enabled)
{ {
dl_cqi_tti = tti; carriers[cc_idx].dl_cqi_tti = tti;
// FIXME: "why do we need this?"
phy_config_dedicated_enabled = enabled; phy_config_dedicated_enabled = enabled;
} }
@ -262,7 +241,7 @@ bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
} }
} }
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]) bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t cc_idx, uint32_t prb_idx[2])
{ {
bool ret = false; bool ret = false;
@ -279,9 +258,9 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
ret |= cfg.pucch_cfg.uci_cfg.is_scheduling_request_tti; ret |= cfg.pucch_cfg.uci_cfg.is_scheduling_request_tti;
// Pending ACKs // Pending ACKs
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) { for (auto& h : carriers[cc_idx].dl_harq) {
if (TTI_TX(dl_harq[i].get_tti()) == current_tti) { if (TTI_TX(h.get_tti()) == current_tti) {
cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = dl_harq[i].get_n_cce(); cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = h.get_n_cce();
cfg.pucch_cfg.uci_cfg.ack[0].nof_acks = 1; cfg.pucch_cfg.uci_cfg.ack[0].nof_acks = 1;
ret = true; ret = true;
} }
@ -310,24 +289,10 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
return ret; return ret;
} }
int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack) int sched_ue::set_ack_info(uint32_t tti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
int ret; return carriers[cc_idx].set_ack_info(tti, tb_idx, ack);
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (TTI_TX(dl_harq[i].get_tti()) == tti) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, i, tb_idx, tti);
dl_harq[i].set_ack(tb_idx, ack);
ret = dl_harq[i].get_tbs(tb_idx);
goto unlock;
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti);
ret = -1;
unlock:
return ret;
} }
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len) void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
@ -350,31 +315,31 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr); Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
} }
void sched_ue::set_ul_crc(uint32_t tti, bool crc_res) void sched_ue::set_ul_crc(uint32_t tti, uint32_t cc_idx, bool crc_res)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
get_ul_harq(tti)->set_ack(0, crc_res); get_ul_harq(tti, cc_idx)->set_ack(0, crc_res);
} }
void sched_ue::set_dl_ri(uint32_t tti, uint32_t ri) void sched_ue::set_dl_ri(uint32_t tti, uint32_t cc_idx, uint32_t ri)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
dl_ri = ri; carriers[cc_idx].dl_ri = ri;
dl_ri_tti = tti; carriers[cc_idx].dl_ri_tti = tti;
} }
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t pmi) void sched_ue::set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t pmi)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
dl_pmi = pmi; carriers[cc_idx].dl_pmi = pmi;
dl_pmi_tti = tti; carriers[cc_idx].dl_pmi_tti = tti;
} }
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cqi) void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
dl_cqi = cqi; carriers[cc_idx].dl_cqi = cqi;
dl_cqi_tti = tti; carriers[cc_idx].dl_cqi_tti = tti;
} }
void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d) void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d)
@ -383,11 +348,11 @@ void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d)
dl_ant_info = *d; dl_ant_info = *d;
} }
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code) void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
ul_cqi = cqi; carriers[cc_idx].ul_cqi = cqi;
ul_cqi_tti = tti; carriers[cc_idx].ul_cqi_tti = tti;
} }
void sched_ue::tpc_inc() void sched_ue::tpc_inc()
@ -418,7 +383,8 @@ void sched_ue::tpc_dec()
// > return 0 if TBS<MIN_DATA_TBS // > return 0 if TBS<MIN_DATA_TBS
int sched_ue::generate_format1(dl_harq_proc* h, int sched_ue::generate_format1(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti_tx_dl,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
@ -434,13 +400,13 @@ int sched_ue::generate_format1(dl_harq_proc* h,
// If this is the first transmission for this UE, make room for MAC Contention Resolution ID // If this is the first transmission for this UE, make room for MAC Contention Resolution ID
bool need_conres_ce = false; bool need_conres_ce = false;
if (is_first_dl_tx()) { if (is_first_dl_tx(cc_idx)) {
need_conres_ce = true; need_conres_ce = true;
} }
if (h->is_empty(0)) { if (h->is_empty(0)) {
// Get total available data to transmit (includes MAC header) // Get total available data to transmit (includes MAC header)
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti); uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb); uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb);
@ -448,16 +414,16 @@ int sched_ue::generate_format1(dl_harq_proc* h,
srslte_pdsch_grant_t grant = {}; srslte_pdsch_grant_t grant = {};
srslte_dl_sf_cfg_t dl_sf = {}; srslte_dl_sf_cfg_t dl_sf = {};
dl_sf.cfi = cfi; dl_sf.cfi = cfi;
dl_sf.tti = tti; dl_sf.tti = tti_tx_dl;
srslte_ra_dl_grant_to_grant_prb_allocation(dci, &grant, cell.nof_prb); srslte_ra_dl_grant_to_grant_prb_allocation(dci, &grant, cell.nof_prb);
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell, &dl_sf, &grant); uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell, &dl_sf, &grant);
int mcs0 = fixed_mcs_dl; int mcs0 = carriers[cc_idx].fixed_mcs_dl;
if (need_conres_ce and cell.nof_prb < 10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs if (need_conres_ce and cell.nof_prb < 10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs
mcs0 = MCS_FIRST_DL; mcs0 = MCS_FIRST_DL;
} }
if (mcs0 < 0) { // dynamic MCS if (mcs0 < 0) { // dynamic MCS
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs); tbs = carriers[cc_idx].alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
} else { } else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), nof_prb) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), nof_prb) / 8;
mcs = mcs0; mcs = mcs0;
@ -468,7 +434,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
return 0; return 0;
} }
h->new_tx(user_mask, 0, tti, mcs, tbs, data->dci.location.ncce); h->new_tx(user_mask, 0, tti_tx_dl, mcs, tbs, data->dci.location.ncce);
int rem_tbs = tbs; int rem_tbs = tbs;
int x = 0; int x = 0;
@ -500,7 +466,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes); Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes);
} else { } else {
h->new_retx(user_mask, 0, tti, &mcs, &tbs, data->dci.location.ncce); h->new_retx(user_mask, 0, tti_tx_dl, &mcs, &tbs, data->dci.location.ncce);
Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs); Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs);
} }
@ -525,11 +491,12 @@ int sched_ue::generate_format1(dl_harq_proc* h,
int sched_ue::generate_format2a(dl_harq_proc* h, int sched_ue::generate_format2a(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask); int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
return ret; return ret;
} }
@ -537,6 +504,7 @@ int sched_ue::generate_format2a(dl_harq_proc* h,
int sched_ue::generate_format2a_unlocked(dl_harq_proc* h, int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
@ -559,7 +527,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
bool no_retx = true; bool no_retx = true;
if (dl_ri == 0) { if (carriers[cc_idx].dl_ri == 0) {
if (h->is_empty(1)) { if (h->is_empty(1)) {
/* One layer, tb1 buffer is empty, send tb0 only */ /* One layer, tb1 buffer is empty, send tb0 only */
tb_en[0] = true; tb_en[0] = true;
@ -583,7 +551,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
} }
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti); uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
int mcs = 0; int mcs = 0;
int tbs = 0; int tbs = 0;
@ -591,11 +559,13 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce); h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce);
Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs); Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs);
} else if (tb_en[tb] && req_bytes && no_retx) { } else if (tb_en[tb] && req_bytes && no_retx) {
if (fixed_mcs_dl < 0) { if (carriers[cc_idx].fixed_mcs_dl < 0) {
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs); tbs = carriers[cc_idx].alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
} else { } else {
tbs = srslte_ra_tbs_from_idx((uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)fixed_mcs_dl, false), nof_prb) / 8; tbs = srslte_ra_tbs_from_idx(
mcs = fixed_mcs_dl; (uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)carriers[cc_idx].fixed_mcs_dl, false), nof_prb) /
8;
mcs = carriers[cc_idx].fixed_mcs_dl;
} }
h->new_tx(user_mask, tb, tti, mcs, tbs, data->dci.location.ncce); h->new_tx(user_mask, tb, tti, mcs, tbs, data->dci.location.ncce);
@ -643,6 +613,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
int sched_ue::generate_format2(dl_harq_proc* h, int sched_ue::generate_format2(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data, sched_interface::dl_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
uint32_t cfi, uint32_t cfi,
const rbgmask_t& user_mask) const rbgmask_t& user_mask)
{ {
@ -650,14 +621,14 @@ int sched_ue::generate_format2(dl_harq_proc* h,
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
/* Call Format 2a (common) */ /* Call Format 2a (common) */
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask); int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
/* Compute precoding information */ /* Compute precoding information */
data->dci.format = SRSLTE_DCI_FORMAT2; data->dci.format = SRSLTE_DCI_FORMAT2;
if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) { if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) {
data->dci.pinfo = (uint8_t)(dl_pmi + 1) % (uint8_t)5; data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi + 1) % (uint8_t)5;
} else { } else {
data->dci.pinfo = (uint8_t)(dl_pmi & 1u); data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi & 1u);
} }
return ret; return ret;
@ -665,6 +636,7 @@ int sched_ue::generate_format2(dl_harq_proc* h,
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx,
ul_harq_proc::ul_alloc_t alloc, ul_harq_proc::ul_alloc_t alloc,
bool needs_pdcch, bool needs_pdcch,
srslte_dci_location_t dci_pos, srslte_dci_location_t dci_pos,
@ -672,7 +644,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
ul_harq_proc* h = get_ul_harq(tti); ul_harq_proc* h = get_ul_harq(tti, cc_idx);
srslte_dci_ul_t* dci = &data->dci; srslte_dci_ul_t* dci = &data->dci;
bool cqi_request = needs_cqi_unlocked(tti, true); bool cqi_request = needs_cqi_unlocked(tti, true);
@ -681,7 +653,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
data->needs_pdcch = needs_pdcch; data->needs_pdcch = needs_pdcch;
dci->location = dci_pos; dci->location = dci_pos;
int mcs = (explicit_mcs >= 0) ? explicit_mcs : fixed_mcs_ul; int mcs = (explicit_mcs >= 0) ? explicit_mcs : carriers[cc_idx].fixed_mcs_ul;
int tbs = 0; int tbs = 0;
bool is_newtx = h->is_empty(0); bool is_newtx = h->is_empty(0);
@ -698,7 +670,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti); uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0; uint32_t N_srs = 0;
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE; uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE;
tbs = alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs); tbs = carriers[cc_idx].alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs);
} }
h->new_tx(tti, mcs, tbs, alloc, nof_retx); h->new_tx(tti, mcs, tbs, alloc, nof_retx);
@ -755,27 +727,37 @@ uint32_t sched_ue::get_max_retx()
bool sched_ue::is_first_dl_tx() bool sched_ue::is_first_dl_tx()
{ {
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) { for (uint32_t i = 0; i < carriers.size(); ++i) {
if (dl_harq[i].nof_tx(0) > 0) { if (not is_first_dl_tx(i)) {
return false;
}
}
return true;
}
bool sched_ue::is_first_dl_tx(uint32_t cc_idx)
{
for (auto& h : carriers[cc_idx].dl_harq) {
if (h.nof_tx(0) > 0) {
return false; return false;
} }
} }
return true; return true;
} }
bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent) bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
bool ret = needs_cqi_unlocked(tti, will_be_sent); bool ret = needs_cqi_unlocked(tti, cc_idx, will_be_sent);
return ret; return ret;
} }
// Private lock-free implemenentation // Private lock-free implemenentation
bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent) bool sched_ue::needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
{ {
bool ret = false; bool ret = false;
if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked(tti) > 0) { if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked() > 0) {
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti); uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti);
bool needscqi = interval >= cfg.aperiodic_cqi_period; bool needscqi = interval >= cfg.aperiodic_cqi_period;
if (needscqi) { if (needscqi) {
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti); uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
@ -791,27 +773,24 @@ bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
return ret; return ret;
} }
uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data()
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_dl_new_data_unlocked(tti); return get_pending_dl_new_data_unlocked();
return pending_data;
} }
/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data, /// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data,
/// the RAR resources, and headers /// the RAR resources, and headers
/// \param tti
/// \return number of bytes to be allocated /// \return number of bytes to be allocated
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data_total()
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti); return get_pending_dl_new_data_total_unlocked();
return req_bytes;
} }
uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data_total_unlocked()
{ {
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti); uint32_t req_bytes = get_pending_dl_new_data_unlocked();
if (req_bytes > 0) { if (req_bytes > 0) {
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
if (is_first_dl_tx()) { if (is_first_dl_tx()) {
@ -822,7 +801,7 @@ uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
} }
// Private lock-free implementation // Private lock-free implementation
uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data_unlocked()
{ {
uint32_t pending_data = 0; uint32_t pending_data = 0;
for (int i = 0; i < sched_interface::MAX_LC; i++) { for (int i = 0; i < sched_interface::MAX_LC; i++) {
@ -830,7 +809,7 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
pending_data += lch[i].buf_retx + lch[i].buf_tx; pending_data += lch[i].buf_retx + lch[i].buf_tx;
} }
} }
if (!is_first_dl_tx() && nof_ta_cmd) { if (not is_first_dl_tx() and nof_ta_cmd > 0) {
pending_data += nof_ta_cmd * 2; pending_data += nof_ta_cmd * 2;
} }
return pending_data; return pending_data;
@ -839,15 +818,13 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti) uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_ul_new_data_unlocked(tti); return get_pending_ul_new_data_unlocked(tti);
return pending_data;
} }
uint32_t sched_ue::get_pending_ul_old_data() uint32_t sched_ue::get_pending_ul_old_data(uint32_t cc_idx)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_ul_old_data_unlocked(); return get_pending_ul_old_data_unlocked(cc_idx);
return pending_data;
} }
// Private lock-free implementation // Private lock-free implementation
@ -859,19 +836,25 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
pending_data += lch[i].bsr; pending_data += lch[i].bsr;
} }
} }
if (!pending_data && is_sr_triggered()) { if (pending_data == 0) {
if (is_sr_triggered()) {
return 512; return 512;
} }
if (!pending_data && needs_cqi_unlocked(tti)) { for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
if (needs_cqi_unlocked(tti, cc_idx)) {
return 128; return 128;
} }
uint32_t pending_ul_data = get_pending_ul_old_data_unlocked();
if (pending_data > pending_ul_data) {
pending_data -= pending_ul_data;
} else {
pending_data = 0;
} }
if (pending_data) { }
// Subtract all the UL data already allocated in the UL harqs
uint32_t pending_ul_data = 0;
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
pending_ul_data += get_pending_ul_old_data_unlocked(cc_idx);
}
pending_data = (pending_data > pending_ul_data) ? pending_data - pending_ul_data : 0;
if (pending_data > 0) {
Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n", Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n",
pending_data, pending_data,
pending_ul_data, pending_ul_data,
@ -884,13 +867,9 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
} }
// Private lock-free implementation // Private lock-free implementation
uint32_t sched_ue::get_pending_ul_old_data_unlocked() uint32_t sched_ue::get_pending_ul_old_data_unlocked(uint32_t cc_idx)
{ {
uint32_t pending_data = 0; return carriers[cc_idx].get_pending_ul_old_data();
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
pending_data += ul_harq[i].get_pending_data();
}
return pending_data;
} }
uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb) uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb)
@ -903,7 +882,7 @@ uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
return sched_params->P * nof_rbg; return sched_params->P * nof_rbg;
} }
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols) uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
@ -913,11 +892,11 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
uint32_t nbytes = 0; uint32_t nbytes = 0;
uint32_t n; uint32_t n;
int mcs0 = (is_first_dl_tx() and cell.nof_prb == 6) ? MCS_FIRST_DL : fixed_mcs_dl; int mcs0 = (is_first_dl_tx(cc_idx) and cell.nof_prb == 6) ? MCS_FIRST_DL : carriers[cc_idx].fixed_mcs_dl;
for (n = 0; n < cell.nof_prb && nbytes < req_bytes; ++n) { for (n = 0; n < cell.nof_prb && nbytes < req_bytes; ++n) {
nof_re = srslte_ra_dl_approx_nof_re(&cell, n + 1, nof_ctrl_symbols); nof_re = srslte_ra_dl_approx_nof_re(&cell, n + 1, nof_ctrl_symbols);
if (mcs0 < 0) { if (mcs0 < 0) {
tbs = alloc_tbs_dl(n + 1, nof_re, 0, &mcs); tbs = carriers[cc_idx].alloc_tbs_dl(n + 1, nof_re, 0, &mcs);
} else { } else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), n + 1) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), n + 1) / 8;
} }
@ -931,37 +910,10 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
return n; return n;
} }
uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes) uint32_t sched_ue::get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes)
{ {
int mcs = 0;
uint32_t nbytes = 0;
uint32_t N_srs = 0;
uint32_t n = 0;
if (req_bytes == 0) {
return 0;
}
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].get_required_prb_ul(req_bytes);
for (n = 1; n < cell.nof_prb && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * n * SRSLTE_NRE;
int tbs = 0;
if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
}
if (tbs > 0) {
nbytes = tbs;
}
}
while (!srslte_dft_precoding_valid_prb(n) && n <= cell.nof_prb) {
n++;
}
return n;
} }
bool sched_ue::is_sr_triggered() bool sched_ue::is_sr_triggered()
@ -969,88 +921,42 @@ bool sched_ue::is_sr_triggered()
return sr; return sr;
} }
void sched_ue::reset_pending_pids(uint32_t tti_rx) void sched_ue::reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx)
{ {
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx); carriers[cc_idx].reset_old_pending_pids(tti_rx);
// UL harqs
get_ul_harq(tti_tx_ul)->reset_pending_data();
// DL harqs
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
dl_harq[i].reset_pending_data();
if (not dl_harq[i].is_empty()) {
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, dl_harq[i].get_tti());
if (tti_diff > 50 and tti_diff < 10240 / 2) {
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti_tx_dl);
dl_harq[i].reset(0);
dl_harq[i].reset(1);
}
}
}
} }
/* Gets HARQ process with oldest pending retx */ /* Gets HARQ process with oldest pending retx */
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti) dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx)
{ {
#if ASYNC_DL_SCHED
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].get_pending_dl_harq(tti_tx_dl);
int oldest_idx = -1;
uint32_t oldest_tti = 0;
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (dl_harq[i].has_pending_retx(0, tti) || dl_harq[i].has_pending_retx(1, tti)) {
uint32_t x = srslte_tti_interval(tti, dl_harq[i].get_tti());
if (x > oldest_tti) {
oldest_idx = i;
oldest_tti = x;
}
}
}
dl_harq_proc* h = nullptr;
if (oldest_idx >= 0) {
h = &dl_harq[oldest_idx];
}
return h;
#else
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
#endif
} }
dl_harq_proc* sched_ue::get_empty_dl_harq() dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t cc_idx)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].get_empty_dl_harq();
dl_harq_proc* h = nullptr;
for (int i = 0; i < SCHED_MAX_HARQ_PROC && !h; i++) {
if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) {
h = &dl_harq[i];
}
}
return h;
} }
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti) ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t cc_idx)
{ {
return &ul_harq[tti % SCHED_MAX_HARQ_PROC]; return carriers[cc_idx].get_ul_harq(tti_tx_ul);
} }
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti) dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti_rx, uint32_t cc_idx)
{ {
for (uint32_t i = 0; i < SCHED_MAX_HARQ_PROC; ++i) { for (auto& h : carriers[cc_idx].dl_harq) {
if (dl_harq[i].get_tti() == tti) { if (h.get_tti() == tti_rx) {
return &dl_harq[i]; return &h;
} }
} }
return nullptr; return nullptr;
} }
dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx) dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx, uint32_t cc_idx)
{ {
return &dl_harq[idx]; return &carriers[cc_idx].dl_harq[idx];
} }
srslte_dci_format_t sched_ue::get_dci_format() srslte_dci_format_t sched_ue::get_dci_format()
@ -1084,33 +990,6 @@ srslte_dci_format_t sched_ue::get_dci_format()
return ret; return ret;
} }
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
{
std::lock_guard<std::mutex> lock(mutex);
uint32_t l = 0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99;
float factor = 1.5;
uint32_t l_max = 3;
if (cell.nof_prb == 6) {
factor = 1.0;
l_max = 2;
}
l_max = SRSLTE_MIN(max_aggr_level, l_max);
do {
coderate = srslte_pdcch_coderate(nof_bits, l);
l++;
} while (l < l_max && factor * coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
dl_cqi,
l,
nof_bits,
coderate,
max_coderate);
return l;
}
sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx) sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx)
{ {
if (cfi > 0 && cfi <= 3) { if (cfi > 0 && cfi <= 3) {
@ -1192,20 +1071,159 @@ int sched_ue::cqi_to_tbs(uint32_t cqi,
return tbs; return tbs;
} }
int sched_ue::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs) /************************************************************************************************
* sched_ue::sched_ue_carrier
***********************************************************************************************/
sched_ue_carrier::sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
srslte_cell_t* cell_cfg_,
uint16_t rnti_,
uint32_t cc_idx_,
srslte::log* log_) :
cfg(cfg_),
cell(cell_cfg_),
rnti(rnti_),
cc_idx(cc_idx_),
log_h(log_)
{ {
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs); // Config HARQ processes
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
dl_harq[i].config(i, cfg->maxharq_tx, log_h);
ul_harq[i].config(i, cfg->maxharq_tx, log_h);
}
} }
int sched_ue::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs) void sched_ue_carrier::reset()
{ {
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs); dl_ri = 0;
dl_ri_tti = 0;
dl_pmi = 0;
dl_pmi_tti = 0;
dl_cqi = 1;
dl_cqi_tti = 0;
ul_cqi = 1;
ul_cqi_tti = 0;
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
dl_harq[i].reset(tb);
ul_harq[i].reset(tb);
}
}
}
void sched_ue_carrier::reset_old_pending_pids(uint32_t tti_rx)
{
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx);
// UL Harqs
get_ul_harq(tti_tx_ul)->reset_pending_data();
// DL harqs
for (auto& h : dl_harq) {
h.reset_pending_data();
if (not h.is_empty()) {
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, h.get_tti());
if (tti_diff > 50 and tti_diff < 10240 / 2) {
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", h.get_id(), h.get_tti(), tti_tx_dl);
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
h.reset(tb);
}
}
}
}
}
dl_harq_proc* sched_ue_carrier::get_pending_dl_harq(uint32_t tti_tx_dl)
{
#if ASYNC_DL_SCHED
int oldest_idx = -1;
uint32_t oldest_tti = 0;
for (auto& h : dl_harq) {
if (h.has_pending_retx(0, tti_tx_dl) or h.has_pending_retx(1, tti_tx_dl)) {
uint32_t x = srslte_tti_interval(tti_tx_dl, h.get_tti());
if (x > oldest_tti) {
oldest_idx = h.get_id();
oldest_tti = x;
}
}
}
dl_harq_proc* h = nullptr;
if (oldest_idx >= 0) {
h = &dl_harq[oldest_idx];
}
return h;
#else
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
#endif
}
dl_harq_proc* sched_ue_carrier::get_empty_dl_harq()
{
auto it =
std::find_if(dl_harq.begin(), dl_harq.end(), [](dl_harq_proc& h) { return h.is_empty(0) and h.is_empty(1); });
return it != dl_harq.end() ? &(*it) : nullptr;
}
int sched_ue_carrier::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack)
{
for (auto& h : dl_harq) {
if (TTI_TX(h.get_tti()) == tti_rx) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, h.get_id(), tb_idx, tti_rx);
h.set_ack(tb_idx, ack);
return h.get_tbs(tb_idx);
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx);
return -1;
}
ul_harq_proc* sched_ue_carrier::get_ul_harq(uint32_t tti)
{
return &ul_harq[tti % SCHED_MAX_HARQ_PROC];
}
uint32_t sched_ue_carrier::get_pending_ul_old_data()
{
uint32_t pending_data = 0;
for (auto& h : ul_harq) {
pending_data += h.get_pending_data();
}
return pending_data;
}
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue_carrier::get_aggr_level(uint32_t nof_bits)
{
uint32_t l = 0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99;
float factor = 1.5;
uint32_t l_max = 3;
if (cell->nof_prb == 6) {
factor = 1.0;
l_max = 2;
}
l_max = SRSLTE_MIN(max_aggr_level, l_max);
do {
coderate = srslte_pdcch_coderate(nof_bits, l);
l++;
} while (l < l_max && factor * coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
dl_cqi,
l,
nof_bits,
coderate,
max_coderate);
return l;
} }
/* In this scheduler we tend to use all the available bandwidth and select the MCS /* In this scheduler we tend to use all the available bandwidth and select the MCS
* that approximates the minimum between the capacity and the requested rate * that approximates the minimum between the capacity and the requested rate
*/ */
int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs) int sched_ue_carrier::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs)
{ {
uint32_t sel_mcs = 0; uint32_t sel_mcs = 0;
@ -1214,7 +1232,7 @@ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, b
uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration // TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
int tbs_bytes = cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8; int tbs_bytes = sched_ue::cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8;
/* If less bytes are requested, lower the MCS */ /* If less bytes are requested, lower the MCS */
if (tbs_bytes > (int)req_bytes && req_bytes > 0) { if (tbs_bytes > (int)req_bytes && req_bytes > 0) {
@ -1240,4 +1258,45 @@ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, b
return tbs_bytes; return tbs_bytes;
} }
int sched_ue_carrier::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
{
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs);
}
int sched_ue_carrier::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
{
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs);
}
uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes)
{
int mcs = 0;
uint32_t nbytes = 0;
uint32_t N_srs = 0;
uint32_t n = 0;
if (req_bytes == 0) {
return 0;
}
for (n = 1; n < cell->nof_prb && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell->cp) - 1) - N_srs) * n * SRSLTE_NRE;
int tbs = 0;
if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
}
if (tbs > 0) {
nbytes = tbs;
}
}
while (!srslte_dft_precoding_valid_prb(n) && n <= cell->nof_prb) {
n++;
}
return n;
}
} // namespace srsenb } // namespace srsenb

@ -110,7 +110,7 @@ int main(int argc, char *argv[])
cell_cfg.sibs[1].period_rf = 16; cell_cfg.sibs[1].period_rf = 16;
cell_cfg.si_window_ms = 40; cell_cfg.si_window_ms = 40;
my_sched.init(NULL, &log_out); my_sched.init(nullptr, &log_out);
my_sched.set_metric(&dl_metric, &ul_metric); my_sched.set_metric(&dl_metric, &ul_metric);
my_sched.cell_cfg(&cell_cfg); my_sched.cell_cfg(&cell_cfg);
@ -132,7 +132,7 @@ int main(int argc, char *argv[])
my_sched.ue_cfg(rnti, &ue_cfg); my_sched.ue_cfg(rnti, &ue_cfg);
my_sched.bearer_ue_cfg(rnti, 0, &bearer_cfg); my_sched.bearer_ue_cfg(rnti, 0, &bearer_cfg);
// my_sched.dl_rlc_buffer_state(rnti, 0, 1e6, 0); // my_sched.dl_rlc_buffer_state(rnti, 0, 1e6, 0);
my_sched.ul_bsr(rnti, 0, 1e6, true); my_sched.ul_bsr(rnti, 0, 1e6f, true);
bool running = true; bool running = true;
uint32_t tti = 0; uint32_t tti = 0;
@ -143,11 +143,9 @@ int main(int argc, char *argv[])
} }
my_sched.dl_sched(tti, &sched_result_dl); my_sched.dl_sched(tti, &sched_result_dl);
my_sched.ul_sched(tti, &sched_result_ul); my_sched.ul_sched(tti, &sched_result_ul);
tti = (tti+1)%10240; tti = (tti + 1) % 10240;
if (tti >= 4) { if (tti >= 4) {
my_sched.ul_crc_info(tti, rnti, tti%2); my_sched.ul_crc_info(tti, rnti, 0, tti % 2);
} }
} }
} }

@ -125,6 +125,8 @@ bool check_old_pids = true;
* Dummies * * Dummies *
*******************/ *******************/
constexpr uint32_t CARRIER_IDX = 0;
struct sched_sim_args { struct sched_sim_args {
struct tti_event_t { struct tti_event_t {
struct user_event_t { struct user_event_t {
@ -311,7 +313,7 @@ int sched_tester::process_tti_args()
if (e.second.dl_data > 0) { if (e.second.dl_data > 0) {
uint32_t lcid = 0; uint32_t lcid = 0;
// FIXME: Does it need TTI for checking pending data? // FIXME: Does it need TTI for checking pending data?
uint32_t tot_dl_data = ue_db[e.first].get_pending_dl_new_data(tti_data.tti_tx_dl) + e.second.dl_data; uint32_t tot_dl_data = ue_db[e.first].get_pending_dl_new_data() + e.second.dl_data;
dl_rlc_buffer_state(e.first, lcid, tot_dl_data, 0); dl_rlc_buffer_state(e.first, lcid, tot_dl_data, 0);
} }
} }
@ -326,15 +328,15 @@ void sched_tester::before_sched()
uint16_t rnti = it.first; uint16_t rnti = it.first;
srsenb::sched_ue* user = &it.second; srsenb::sched_ue* user = &it.second;
tester_user_results d; tester_user_results d;
srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul); srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
d.ul_pending_data = get_ul_buffer(rnti); d.ul_pending_data = get_ul_buffer(rnti);
// user->get_pending_ul_new_data(tti_data.tti_tx_ul) or hul->has_pending_retx(); // get_ul_buffer(rnti); // user->get_pending_ul_new_data(tti_data.tti_tx_ul) or hul->has_pending_retx(); // get_ul_buffer(rnti);
d.dl_pending_data = get_dl_buffer(rnti); d.dl_pending_data = get_dl_buffer(rnti);
d.has_ul_retx = hul->has_pending_retx(); d.has_ul_retx = hul->has_pending_retx();
d.has_ul_tx = d.has_ul_retx or d.ul_pending_data > 0; d.has_ul_tx = d.has_ul_retx or d.ul_pending_data > 0;
srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_data.tti_tx_dl); srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_data.tti_tx_dl, CARRIER_IDX);
d.has_dl_retx = (hdl != nullptr) and hdl->has_pending_retx(0, tti_data.tti_tx_dl); d.has_dl_retx = (hdl != nullptr) and hdl->has_pending_retx(0, tti_data.tti_tx_dl);
d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq() != nullptr and d.dl_pending_data > 0); d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq(CARRIER_IDX) != nullptr and d.dl_pending_data > 0);
d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0; d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0;
tti_data.ue_data.insert(std::make_pair(rnti, d)); tti_data.ue_data.insert(std::make_pair(rnti, d));
tti_data.total_ues.dl_pending_data += d.dl_pending_data; tti_data.total_ues.dl_pending_data += d.dl_pending_data;
@ -344,11 +346,11 @@ void sched_tester::before_sched()
tti_data.total_ues.has_ul_newtx |= d.has_ul_newtx; tti_data.total_ues.has_ul_newtx |= d.has_ul_newtx;
for (uint32_t i = 0; i < 2 * FDD_HARQ_DELAY_MS; ++i) { for (uint32_t i = 0; i < 2 * FDD_HARQ_DELAY_MS; ++i) {
const srsenb::dl_harq_proc* h = user->get_dl_harq(i); const srsenb::dl_harq_proc* h = user->get_dl_harq(i, CARRIER_IDX);
tti_data.ue_data[rnti].dl_harqs[i] = *h; tti_data.ue_data[rnti].dl_harqs[i] = *h;
} }
// NOTE: ACK might have just cleared the harq for tti_data.tti_tx_ul // NOTE: ACK might have just cleared the harq for tti_data.tti_tx_ul
tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_data.tti_tx_ul); tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
} }
// TODO: Check whether pending pending_rar.rar_tti correspond to a prach_tti // TODO: Check whether pending pending_rar.rar_tti correspond to a prach_tti
@ -583,7 +585,7 @@ int sched_tester::test_harqs()
const auto& data = tti_data.sched_result_dl.data[i]; const auto& data = tti_data.sched_result_dl.data[i];
uint32_t h_id = data.dci.pid; uint32_t h_id = data.dci.pid;
uint16_t rnti = data.dci.rnti; uint16_t rnti = data.dci.rnti;
const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id); const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX);
CONDERROR(h == nullptr, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id); CONDERROR(h == nullptr, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id);
CONDERROR(h->is_empty(), "[TESTER] Cannot schedule an empty harq proc\n"); CONDERROR(h->is_empty(), "[TESTER] Cannot schedule an empty harq proc\n");
CONDERROR(h->get_tti() != tti_data.tti_tx_dl, CONDERROR(h->get_tti() != tti_data.tti_tx_dl,
@ -608,7 +610,7 @@ int sched_tester::test_harqs()
const auto& pusch = tti_data.sched_result_ul.pusch[i]; const auto& pusch = tti_data.sched_result_ul.pusch[i];
uint16_t rnti = pusch.dci.rnti; uint16_t rnti = pusch.dci.rnti;
const auto& ue_data = tti_data.ue_data[rnti]; const auto& ue_data = tti_data.ue_data[rnti];
const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul); const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
CONDERROR(h == nullptr or h->is_empty(), "[TESTER] scheduled UL harq does not exist or is empty\n"); CONDERROR(h == nullptr or h->is_empty(), "[TESTER] scheduled UL harq does not exist or is empty\n");
CONDERROR(h->get_tti() != tti_data.tti_tx_ul, CONDERROR(h->get_tti() != tti_data.tti_tx_ul,
"[TESTER] The scheduled UL harq does not a valid tti=%u\n", "[TESTER] The scheduled UL harq does not a valid tti=%u\n",
@ -632,7 +634,7 @@ int sched_tester::test_harqs()
const auto& phich = tti_data.sched_result_ul.phich[i]; const auto& phich = tti_data.sched_result_ul.phich[i];
CONDERROR(tti_data.ue_data.count(phich.rnti) == 0, "[TESTER] Allocated PHICH rnti no longer exists\n"); CONDERROR(tti_data.ue_data.count(phich.rnti) == 0, "[TESTER] Allocated PHICH rnti no longer exists\n");
const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq; const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq;
const auto* h = ue_db[phich.rnti].get_ul_harq(tti_data.tti_tx_ul); const auto* h = ue_db[phich.rnti].get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
CONDERROR(not hprev.has_pending_ack(), "[TESTER] Alloc PHICH did not have any pending ack\n"); CONDERROR(not hprev.has_pending_ack(), "[TESTER] Alloc PHICH did not have any pending ack\n");
bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx(); bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx();
if (phich.phich == sched_interface::ul_sched_phich_t::ACK) { if (phich.phich == sched_interface::ul_sched_phich_t::ACK) {
@ -662,7 +664,8 @@ int sched_tester::test_harqs()
ack_info_t ack_data; ack_info_t ack_data;
ack_data.rnti = tti_data.sched_result_dl.data[i].dci.rnti; ack_data.rnti = tti_data.sched_result_dl.data[i].dci.rnti;
ack_data.tti = FDD_HARQ_DELAY_MS + tti_data.tti_tx_dl; ack_data.tti = FDD_HARQ_DELAY_MS + tti_data.tti_tx_dl;
const srsenb::dl_harq_proc* dl_h = ue_db[ack_data.rnti].get_dl_harq(tti_data.sched_result_dl.data[i].dci.pid); const srsenb::dl_harq_proc* dl_h =
ue_db[ack_data.rnti].get_dl_harq(tti_data.sched_result_dl.data[i].dci.pid, CARRIER_IDX);
ack_data.dl_harq = *dl_h; ack_data.dl_harq = *dl_h;
if (ack_data.dl_harq.nof_retx(0) == 0) { if (ack_data.dl_harq.nof_retx(0) == 0) {
ack_data.dl_ack = randf() > sim_args.P_retx; ack_data.dl_ack = randf() > sim_args.P_retx;
@ -693,7 +696,7 @@ int sched_tester::test_harqs()
const auto& pusch = tti_data.sched_result_ul.pusch[i]; const auto& pusch = tti_data.sched_result_ul.pusch[i];
ul_ack_info_t ack_data; ul_ack_info_t ack_data;
ack_data.rnti = pusch.dci.rnti; ack_data.rnti = pusch.dci.rnti;
ack_data.ul_harq = *ue_db[ack_data.rnti].get_ul_harq(tti_data.tti_tx_ul); ack_data.ul_harq = *ue_db[ack_data.rnti].get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
ack_data.tti_tx_ul = tti_data.tti_tx_ul; ack_data.tti_tx_ul = tti_data.tti_tx_ul;
ack_data.tti_ack = tti_data.tti_tx_ul + FDD_HARQ_DELAY_MS; ack_data.tti_ack = tti_data.tti_tx_ul + FDD_HARQ_DELAY_MS;
if (ack_data.ul_harq.nof_retx(0) == 0) { if (ack_data.ul_harq.nof_retx(0) == 0) {
@ -708,9 +711,11 @@ int sched_tester::test_harqs()
if (check_old_pids) { if (check_old_pids) {
for (auto& user : ue_db) { for (auto& user : ue_db) {
for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) { for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) {
if (not(user.second.get_dl_harq(i)->is_empty(0) and user.second.get_dl_harq(1))) { if (not(user.second.get_dl_harq(i, CARRIER_IDX)->is_empty(0) and user.second.get_dl_harq(1, CARRIER_IDX))) {
if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i)->get_tti()) > 49) { if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i, CARRIER_IDX)->get_tti()) > 49) {
TESTERROR("[TESTER] The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i)->get_id(), user.first); TESTERROR("[TESTER] The pid=%d for rnti=0x%x got old.\n",
user.second.get_dl_harq(i, CARRIER_IDX)->get_id(),
user.first);
} }
} }
} }
@ -920,7 +925,7 @@ int sched_tester::ack_txs()
if (ack_it.second.tti != tti_data.tti_rx) { if (ack_it.second.tti != tti_data.tti_rx) {
continue; continue;
} }
srsenb::dl_harq_proc* h = ue_db[ack_it.second.rnti].get_dl_harq(ack_it.second.dl_harq.get_id()); srsenb::dl_harq_proc* h = ue_db[ack_it.second.rnti].get_dl_harq(ack_it.second.dl_harq.get_id(), CARRIER_IDX);
const srsenb::dl_harq_proc& hack = ack_it.second.dl_harq; const srsenb::dl_harq_proc& hack = ack_it.second.dl_harq;
CONDERROR(hack.is_empty(), "[TESTER] The acked DL harq was not active\n"); CONDERROR(hack.is_empty(), "[TESTER] The acked DL harq was not active\n");
@ -929,7 +934,7 @@ int sched_tester::ack_txs()
if (ack_it.second.dl_harq.is_empty(tb)) { if (ack_it.second.dl_harq.is_empty(tb)) {
continue; continue;
} }
ret |= dl_ack_info(tti_data.tti_rx, ack_it.second.rnti, tb, ack_it.second.dl_ack) > 0; ret |= dl_ack_info(tti_data.tti_rx, ack_it.second.rnti, CARRIER_IDX, tb, ack_it.second.dl_ack) > 0;
} }
CONDERROR(not ret, "[TESTER] The dl harq proc that was acked does not exist\n"); CONDERROR(not ret, "[TESTER] The dl harq proc that was acked does not exist\n");
@ -950,13 +955,13 @@ int sched_tester::ack_txs()
if (ack_it.first != tti_data.tti_rx) { if (ack_it.first != tti_data.tti_rx) {
continue; continue;
} }
srsenb::ul_harq_proc* h = ue_db[ack_it.second.rnti].get_ul_harq(tti_data.tti_rx); srsenb::ul_harq_proc* h = ue_db[ack_it.second.rnti].get_ul_harq(tti_data.tti_rx, CARRIER_IDX);
const srsenb::ul_harq_proc& hack = ack_it.second.ul_harq; const srsenb::ul_harq_proc& hack = ack_it.second.ul_harq;
CONDERROR(h == nullptr or h->get_tti() != hack.get_tti(), "[TESTER] UL Harq TTI does not match the ACK TTI\n"); CONDERROR(h == nullptr or h->get_tti() != hack.get_tti(), "[TESTER] UL Harq TTI does not match the ACK TTI\n");
CONDERROR(h->is_empty(0), "[TESTER] The acked UL harq is not active\n"); CONDERROR(h->is_empty(0), "[TESTER] The acked UL harq is not active\n");
CONDERROR(hack.is_empty(0), "[TESTER] The acked UL harq was not active\n"); CONDERROR(hack.is_empty(0), "[TESTER] The acked UL harq was not active\n");
ul_crc_info(tti_data.tti_rx, ack_it.second.rnti, ack_it.second.ack); ul_crc_info(tti_data.tti_rx, ack_it.second.rnti, CARRIER_IDX, ack_it.second.ack);
CONDERROR(!h->get_pending_data(), "[TESTER] UL harq lost its pending data\n"); CONDERROR(!h->get_pending_data(), "[TESTER] UL harq lost its pending data\n");
CONDERROR(!h->has_pending_ack(), "[TESTER] ACK/NACKed UL harq should have a pending ACK\n"); CONDERROR(!h->has_pending_ack(), "[TESTER] ACK/NACKed UL harq should have a pending ACK\n");

Loading…
Cancel
Save