updated scheduler interfaces to accommodate multiple carriers

master
Francisco Paisana 5 years ago
parent 6d896ee453
commit cab9327b1a

@ -74,7 +74,9 @@ public:
ul_sched_ack_t phich[MAX_GRANTS];
uint32_t nof_grants;
uint32_t nof_phich;
} ul_sched_t;
} ul_sched_t; // per carrier
typedef std::vector<ul_sched_t> ul_sched_list_t;
virtual int sr_detected(uint32_t tti, uint16_t rnti) = 0;
virtual int rach_detected(uint32_t tti, uint32_t primary_cc_idx, uint32_t preamble_idx, uint32_t time_adv) = 0;

@ -230,19 +230,19 @@ public:
virtual int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) = 0;
/* DL information */
virtual int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) = 0;
virtual int dl_rach_info(dl_sched_rar_info_t rar_info) = 0;
virtual int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) = 0;
virtual int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) = 0;
virtual int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) = 0;
virtual int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack) = 0;
virtual int dl_rach_info(dl_sched_rar_info_t rar_info) = 0;
virtual int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value) = 0;
virtual int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value) = 0;
virtual int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value) = 0;
/* UL information */
virtual int ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) = 0;
virtual int ul_sr_info(uint32_t tti, uint16_t rnti) = 0;
virtual int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) = 0;
virtual int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) = 0;
virtual int ul_phr(uint16_t rnti, int phr) = 0;
virtual int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) = 0;
virtual int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, bool crc) = 0;
virtual int ul_sr_info(uint32_t tti, uint16_t rnti) = 0;
virtual int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) = 0;
virtual int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) = 0;
virtual int ul_phr(uint16_t rnti, int phr) = 0;
virtual int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code) = 0;
/* Run Scheduler for this tti */
virtual int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) = 0;

@ -87,16 +87,16 @@ public:
{
public:
/* Virtual methods for user metric calculation */
virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched) = 0;
virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx) = 0;
};
class metric_ul
{
public:
/* Virtual methods for user metric calculation */
virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched) = 0;
virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx) = 0;
};
/*************************************************************
@ -119,7 +119,7 @@ public:
bool ue_exists(uint16_t rnti) final;
void ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd);
void phy_config_enabled(uint16_t rnti, bool enabled);
void phy_config_enabled(uint16_t rnti, uint32_t cc_idx, bool enabled);
int bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, ue_bearer_cfg_t* cfg) final;
int bearer_ue_rem(uint16_t rnti, uint32_t lc_id) final;
@ -131,18 +131,18 @@ public:
int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) final;
int dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated);
int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) final;
int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack) final;
int dl_rach_info(dl_sched_rar_info_t rar_info) final;
int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) final;
int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) final;
int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) final;
int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value) final;
int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value) final;
int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value) final;
int ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) final;
int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, bool crc) final;
int ul_sr_info(uint32_t tti, uint16_t rnti) override;
int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) final;
int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) final;
int ul_phr(uint16_t rnti, int phr) final;
int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) final;
int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code) final;
int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) final;
int ul_sched(uint32_t tti, ul_sched_res_t* sched_result) final;

@ -32,7 +32,7 @@ class ra_sched;
class sched::carrier_sched
{
public:
explicit carrier_sched(sched* sched_);
explicit carrier_sched(sched* sched_, uint32_t cc_idx_);
void reset();
void carrier_cfg();
void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_);
@ -60,6 +60,7 @@ private:
srslte::log* log_h = nullptr;
metric_dl* dl_metric = nullptr;
metric_ul* ul_metric = nullptr;
const uint32_t cc_idx;
// derived from args
prbmask_t prach_mask;

@ -218,7 +218,7 @@ public:
sched_interface::dl_sched_res_t dl_sched_result;
sched_interface::ul_sched_res_t ul_sched_result;
void init(const sched_params_t& sched_params_);
void init(const sched_params_t& sched_params_, uint32_t cc_idx_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
@ -268,6 +268,7 @@ private:
// consts
const sched_params_t* sched_params = nullptr;
srslte::log* log_h = nullptr;
uint32_t cc_idx = 0;
// internal state
tti_params_t tti_params{10241};

@ -32,11 +32,11 @@ class dl_metric_rr : public sched::metric_dl
public:
void set_log(srslte::log* log_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx) final;
private:
bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask);
dl_harq_proc* allocate_user(sched_ue* user);
dl_harq_proc* allocate_user(sched_ue* user, uint32_t cc_idx);
srslte::log* log_h = nullptr;
dl_tti_sched_t* tti_alloc = nullptr;
@ -46,12 +46,12 @@ class ul_metric_rr : public sched::metric_ul
{
public:
void set_log(srslte::log* log_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx) final;
private:
bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user, uint32_t cc_idx);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user, uint32_t cc_idx);
srslte::log* log_h = nullptr;
ul_tti_sched_t* tti_alloc = nullptr;

@ -25,6 +25,7 @@
#include "srslte/common/log.h"
#include "srslte/interfaces/sched_interface.h"
#include <map>
#include <vector>
#include "scheduler_harq.h"
#include "srslte/asn1/rrc_asn1.h"
@ -34,6 +35,54 @@ namespace srsenb {
class sched_params_t;
struct sched_ue_carrier {
const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ;
sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
srslte_cell_t* cell_cfg_,
uint16_t rnti_,
uint32_t cc_idx_,
srslte::log* log_);
void reset();
// Harq access
void reset_old_pending_pids(uint32_t tti_rx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl);
dl_harq_proc* get_empty_dl_harq();
int set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack);
ul_harq_proc* get_ul_harq(uint32_t tti);
uint32_t get_pending_ul_old_data();
uint32_t get_aggr_level(uint32_t nof_bits);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
uint32_t get_required_prb_ul(uint32_t req_bytes);
std::array<dl_harq_proc, SCHED_MAX_HARQ_PROC> dl_harq = {};
std::array<ul_harq_proc, SCHED_MAX_HARQ_PROC> ul_harq = {};
uint32_t dl_ri = 0;
uint32_t dl_ri_tti = 0;
uint32_t dl_pmi = 0;
uint32_t dl_pmi_tti = 0;
uint32_t dl_cqi = 0;
uint32_t dl_cqi_tti = 0;
uint32_t ul_cqi = 0;
uint32_t ul_cqi_tti = 0;
int max_mcs_dl = 28, max_mcs_ul = 28;
uint32_t max_aggr_level = 3;
int fixed_mcs_ul = 0, fixed_mcs_dl = 0;
private:
srslte::log* log_h = nullptr;
sched_interface::ue_cfg_t* cfg = nullptr;
srslte_cell_t* cell = nullptr;
uint32_t cc_idx;
uint16_t rnti;
};
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads.
*
@ -58,7 +107,7 @@ public:
************************************************************/
sched_ue();
void reset();
void phy_config_enabled(uint32_t tti, bool enabled);
void phy_config_enabled(uint32_t tti, uint32_t cc_idx, bool enabled);
void set_cfg(uint16_t rnti, const sched_params_t& sched_params_, sched_interface::ue_cfg_t* cfg);
void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg);
@ -70,12 +119,12 @@ public:
void mac_buffer_state(uint32_t ce_code);
void ul_recv_len(uint32_t lcid, uint32_t len);
void set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated);
void set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code);
void set_dl_ri(uint32_t tti, uint32_t ri);
void set_dl_pmi(uint32_t tti, uint32_t ri);
void set_dl_cqi(uint32_t tti, uint32_t cqi);
int set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack);
void set_ul_crc(uint32_t tti, bool crc_res);
void set_ul_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code);
void set_dl_ri(uint32_t tti, uint32_t cc_idx, uint32_t ri);
void set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t ri);
void set_dl_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi);
int set_ack_info(uint32_t tti, uint32_t cc_idx, uint32_t tb_idx, bool ack);
void set_ul_crc(uint32_t tti, uint32_t cc_idx, bool crc_res);
/*******************************************************
* Custom functions
@ -87,28 +136,28 @@ public:
void set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level = -1);
void set_fixed_mcs(int mcs_ul, int mcs_dl);
dl_harq_proc* find_dl_harq(uint32_t tti);
dl_harq_proc* get_dl_harq(uint32_t idx);
dl_harq_proc* find_dl_harq(uint32_t tti_rx, uint32_t cc_idx);
dl_harq_proc* get_dl_harq(uint32_t idx, uint32_t cc_idx);
uint16_t get_rnti() const { return rnti; }
/*******************************************************
* Functions used by scheduler metric objects
*******************************************************/
uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t req_bytes);
uint32_t get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes);
uint32_t prb_to_rbg(uint32_t nof_prb);
uint32_t rgb_to_prb(uint32_t nof_rbg);
uint32_t get_pending_dl_new_data(uint32_t tti);
uint32_t get_pending_dl_new_data();
uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_old_data();
uint32_t get_pending_dl_new_data_total(uint32_t tti);
uint32_t get_pending_ul_old_data(uint32_t cc_idx);
uint32_t get_pending_dl_new_data_total();
void reset_pending_pids(uint32_t tti_rx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti);
dl_harq_proc* get_empty_dl_harq();
ul_harq_proc* get_ul_harq(uint32_t tti);
void reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti, uint32_t cc_idx);
dl_harq_proc* get_empty_dl_harq(uint32_t cc_idx);
ul_harq_proc* get_ul_harq(uint32_t tti, uint32_t cc_idx);
/*******************************************************
* Functions used by the scheduler object
@ -122,35 +171,47 @@ public:
int generate_format1(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask);
int generate_format2a(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask);
int generate_format2(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask);
int generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
ul_harq_proc::ul_alloc_t alloc,
bool needs_pdcch,
srslte_dci_location_t cce_range,
int explicit_mcs = -1);
srslte_dci_format_t get_dci_format();
uint32_t get_aggr_level(uint32_t nof_bits);
sched_dci_cce_t* get_locations(uint32_t current_cfi, uint32_t sf_idx);
sched_ue_carrier* get_ue_carrier(uint32_t cc_idx) { return &carriers[cc_idx]; }
bool needs_cqi(uint32_t tti, bool will_send = false);
bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false);
uint32_t get_max_retx();
bool get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]);
bool get_pucch_sched(uint32_t current_tti, uint32_t cc_idx, uint32_t prb_idx[2]);
bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce);
static int cqi_to_tbs(uint32_t cqi,
uint32_t nof_prb,
uint32_t nof_re,
uint32_t max_mcs,
uint32_t max_Qm,
bool is_ul,
uint32_t* mcs);
private:
typedef struct {
sched_interface::ue_bearer_cfg_t cfg;
@ -163,34 +224,26 @@ private:
int alloc_pdu(int tbs, sched_interface::dl_sched_pdu_t* pdu);
static uint32_t format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb);
static int cqi_to_tbs(uint32_t cqi,
uint32_t nof_prb,
uint32_t nof_re,
uint32_t max_mcs,
uint32_t max_Qm,
bool is_ul,
uint32_t* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
static bool bearer_is_ul(ue_bearer_t* lch);
static bool bearer_is_dl(ue_bearer_t* lch);
uint32_t get_pending_dl_new_data_unlocked(uint32_t tti);
uint32_t get_pending_ul_old_data_unlocked();
uint32_t get_pending_dl_new_data_unlocked();
uint32_t get_pending_ul_old_data_unlocked(uint32_t cc_idx);
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
uint32_t get_pending_dl_new_data_total_unlocked(uint32_t tti);
uint32_t get_pending_dl_new_data_total_unlocked();
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false);
bool needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_send = false);
int generate_format2a_unlocked(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask);
bool is_first_dl_tx();
bool is_first_dl_tx(uint32_t cc_idx);
sched_interface::ue_cfg_t cfg = {};
srslte_cell_t cell = {};
@ -206,22 +259,9 @@ private:
std::array<ue_bearer_t, sched_interface::MAX_LC> lch = {};
int power_headroom = 0;
uint32_t dl_ri = 0;
uint32_t dl_ri_tti = 0;
uint32_t dl_pmi = 0;
uint32_t dl_pmi_tti = 0;
uint32_t dl_cqi = 0;
uint32_t dl_cqi_tti = 0;
uint32_t cqi_request_tti = 0;
uint32_t ul_cqi = 0;
uint32_t ul_cqi_tti = 0;
uint16_t rnti = 0;
uint32_t max_mcs_dl = 0;
uint32_t max_aggr_level = 0;
uint32_t max_mcs_ul = 0;
uint32_t max_msg3retx = 0;
int fixed_mcs_ul = 0;
int fixed_mcs_dl = 0;
uint32_t nof_ta_cmd = 0;
@ -231,12 +271,10 @@ private:
// Allowed DCI locations per CFI and per subframe
std::array<std::array<sched_dci_cce_t, 10>, 3> dci_locations = {};
const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ;
std::array<dl_harq_proc, SCHED_MAX_HARQ_PROC> dl_harq = {};
std::array<ul_harq_proc, SCHED_MAX_HARQ_PROC> ul_harq = {};
bool phy_config_dedicated_enabled = false;
asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info;
std::vector<sched_ue_carrier> carriers;
};
} // namespace srsenb

@ -203,7 +203,9 @@ int mac::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
void mac::phy_config_enabled(uint16_t rnti, bool enabled)
{
scheduler.phy_config_enabled(rnti, enabled);
// FIXME: "cc_idx must be specified"
uint32_t cc_idx = 0;
scheduler.phy_config_enabled(rnti, cc_idx, enabled);
}
// Update UE configuration
@ -320,9 +322,11 @@ void mac::rl_ok(uint16_t rnti)
int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
{
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
pthread_rwlock_rdlock(&rwlock);
log_h->step(tti);
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, tb_idx, ack);
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, cc_idx, tb_idx, ack);
ue_db[rnti]->metrics_tx(ack, nof_bytes);
if (ack) {
@ -337,6 +341,8 @@ int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
{
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
@ -354,7 +360,7 @@ int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
ue_db[rnti]->deallocate_pdu(tti);
}
ret = scheduler.ul_crc_info(tti, rnti, crc);
ret = scheduler.ul_crc_info(tti, rnti, cc_idx, crc);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
@ -378,11 +384,13 @@ int mac::set_dl_ant_info(uint16_t rnti, phys_cfg_ded_s::ant_info_c_* dl_ant_info
int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
{
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_ri_info(tti, rnti, ri_value);
scheduler.dl_ri_info(tti, rnti, cc_idx, ri_value);
ue_db[rnti]->metrics_dl_ri(ri_value);
ret = 0;
} else {
@ -394,11 +402,13 @@ int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
{
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti);
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
scheduler.dl_pmi_info(tti, rnti, pmi_value);
scheduler.dl_pmi_info(tti, rnti, cc_idx, pmi_value);
ue_db[rnti]->metrics_dl_pmi(pmi_value);
ret = 0;
} else {
@ -410,12 +420,14 @@ int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_cqi_info(tti, rnti, cqi_value);
scheduler.dl_cqi_info(tti, rnti, cc_idx, cqi_value);
ue_db[rnti]->metrics_dl_cqi(cqi_value);
ret = 0;
} else {
@ -427,12 +439,14 @@ int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
int mac::snr_info(uint32_t tti, uint16_t rnti, float snr)
{
// FIXME: add cc_idx to interface
uint32_t cc_idx = 0;
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
uint32_t cqi = srslte_cqi_from_snr(snr);
scheduler.ul_cqi_info(tti, rnti, cqi, 0);
scheduler.ul_cqi_info(tti, rnti, cc_idx, cqi, 0);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);

@ -109,7 +109,7 @@ sched::sched()
pthread_rwlock_init(&rwlock, nullptr);
// Initialize Independent carrier schedulers
carrier_schedulers.emplace_back(new carrier_sched{this});
carrier_schedulers.emplace_back(new carrier_sched{this, 0});
reset();
}
@ -243,7 +243,7 @@ bool sched::ue_exists(uint16_t rnti)
void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd)
{
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
if (ue_db.count(rnti) > 0) {
ue_db[rnti].set_needs_ta_cmd(nof_ta_cmd);
} else {
Error("User rnti=0x%x not found\n", rnti);
@ -251,10 +251,10 @@ void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd)
pthread_rwlock_unlock(&rwlock);
}
void sched::phy_config_enabled(uint16_t rnti, bool enabled)
void sched::phy_config_enabled(uint16_t rnti, uint32_t cc_idx, bool enabled)
{
// FIXME: Check if correct use of current_tti
ue_db_access(rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(current_tti, enabled); });
ue_db_access(rnti, [this, cc_idx, enabled](sched_ue& ue) { ue.phy_config_enabled(current_tti, cc_idx, enabled); });
}
int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_)
@ -271,7 +271,7 @@ uint32_t sched::get_dl_buffer(uint16_t rnti)
{
// FIXME: Check if correct use of current_tti
uint32_t ret = 0;
ue_db_access(rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(current_tti); });
ue_db_access(rnti, [&ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(); });
return ret;
}
@ -299,31 +299,32 @@ int sched::dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dl
return ue_db_access(rnti, [dl_ant_info](sched_ue& ue) { ue.set_dl_ant_info(dl_ant_info); });
}
int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
{
int ret = -1;
ue_db_access(rnti, [tti, tb_idx, ack, &ret](sched_ue& ue) { ret = ue.set_ack_info(tti, tb_idx, ack); });
ue_db_access(rnti,
[tti, cc_idx, tb_idx, ack, &ret](sched_ue& ue) { ret = ue.set_ack_info(tti, cc_idx, tb_idx, ack); });
return ret;
}
int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc)
int sched::ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, bool crc)
{
return ue_db_access(rnti, [tti, crc](sched_ue& ue) { ue.set_ul_crc(tti, crc); });
return ue_db_access(rnti, [tti, cc_idx, crc](sched_ue& ue) { ue.set_ul_crc(tti, cc_idx, crc); });
}
int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value)
{
return ue_db_access(rnti, [tti, ri_value](sched_ue& ue) { ue.set_dl_ri(tti, ri_value); });
return ue_db_access(rnti, [tti, cc_idx, ri_value](sched_ue& ue) { ue.set_dl_ri(tti, cc_idx, ri_value); });
}
int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value)
{
return ue_db_access(rnti, [tti, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti, pmi_value); });
return ue_db_access(rnti, [tti, cc_idx, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti, cc_idx, pmi_value); });
}
int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value)
{
return ue_db_access(rnti, [tti, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti, cqi_value); });
return ue_db_access(rnti, [tti, cc_idx, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti, cc_idx, cqi_value); });
}
int sched::dl_rach_info(dl_sched_rar_info_t rar_info)
@ -331,9 +332,10 @@ int sched::dl_rach_info(dl_sched_rar_info_t rar_info)
return carrier_schedulers[0]->dl_rach_info(rar_info);
}
int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code)
int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code)
{
return ue_db_access(rnti, [tti, cqi, ul_ch_code](sched_ue& ue) { ue.set_ul_cqi(tti, cqi, ul_ch_code); });
return ue_db_access(rnti,
[tti, cc_idx, cqi, ul_ch_code](sched_ue& ue) { ue.set_ul_cqi(tti, cc_idx, cqi, ul_ch_code); });
}
int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value)

@ -275,7 +275,7 @@ const ra_sched::pending_msg3_t& ra_sched::find_pending_msg3(uint32_t tti) const
* Carrier scheduling
*******************************************************/
sched::carrier_sched::carrier_sched(sched* sched_) : sched_ptr(sched_)
sched::carrier_sched::carrier_sched(sched* sched_, uint32_t cc_idx_) : sched_ptr(sched_), cc_idx(cc_idx_)
{
tti_dl_mask.resize(1, 0);
}
@ -314,7 +314,7 @@ void sched::carrier_sched::carrier_cfg()
// Initiate the tti_scheduler for each TTI
for (tti_sched_result_t& tti_sched : tti_scheds) {
tti_sched.init(*sched_params);
tti_sched.init(*sched_params, cc_idx);
}
}
@ -370,7 +370,7 @@ tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
/* reset PIDs with pending data or blocked */
for (auto& user : sched_ptr->ue_db) {
user.second.reset_pending_pids(tti_rx);
user.second.reset_pending_pids(tti_rx, cc_idx);
}
}
@ -387,7 +387,7 @@ void sched::carrier_sched::generate_phich(tti_sched_result_t* tti_sched)
// user.has_pucch = false; // FIXME: What is this for?
ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_rx());
ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_rx(), cc_idx);
/* Indicate PHICH acknowledgment if needed */
if (h->has_pending_ack()) {
@ -422,7 +422,7 @@ void sched::carrier_sched::alloc_dl_users(tti_sched_result_t* tti_result)
}
// call DL scheduler metric to fill RB grid
dl_metric->sched_users(sched_ptr->ue_db, tti_result);
dl_metric->sched_users(sched_ptr->ue_db, tti_result, cc_idx);
}
int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched)
@ -448,11 +448,11 @@ int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched)
ul_mask |= pucch_mask;
/* Call scheduler for UL data */
ul_metric->sched_users(sched_ptr->ue_db, tti_sched);
ul_metric->sched_users(sched_ptr->ue_db, tti_sched, cc_idx);
/* Update pending data counters after this TTI */
for (auto& user : sched_ptr->ue_db) {
user.second.get_ul_harq(tti_tx_ul)->reset_pending_data();
user.second.get_ul_harq(tti_tx_ul, cc_idx)->reset_pending_data();
}
return SRSLTE_SUCCESS;

@ -342,8 +342,8 @@ tti_grid_t::dl_ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_t
alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
{
srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t aggr_level =
user->get_aggr_level(srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, dci_format));
uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, dci_format);
uint32_t aggr_level = user->get_ue_carrier(cc_idx)->get_aggr_level(nof_bits);
return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user);
}
@ -361,8 +361,8 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc
// Generate PDCCH except for RAR and non-adaptive retx
if (needs_pdcch) {
uint32_t aggr_idx =
user->get_aggr_level(srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, SRSLTE_DCI_FORMAT0));
uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, SRSLTE_DCI_FORMAT0);
uint32_t aggr_idx = user->get_ue_carrier(cc_idx)->get_aggr_level(nof_bits);
if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) {
if (log_h->get_level() == srslte::LOG_LEVEL_DEBUG) {
log_h->debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s\n",
@ -382,9 +382,10 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc
* TTI resource Scheduling Methods
*******************************************************/
void tti_sched_result_t::init(const sched_params_t& sched_params_)
void tti_sched_result_t::init(const sched_params_t& sched_params_, uint32_t cc_idx_)
{
sched_params = &sched_params_;
cc_idx = cc_idx_;
log_h = sched_params->log_h;
tti_alloc.init(*sched_params, 0);
}
@ -575,7 +576,7 @@ alloc_outcome_t tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::
{
// check whether adaptive/non-adaptive retx/newtx
tti_sched_result_t::ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_idx);
bool has_retx = h->has_pending_retx();
if (has_retx) {
ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc();
@ -722,21 +723,21 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
// Generate DCI Format1/2/2A
sched_ue* user = data_alloc.user_ptr;
dl_harq_proc* h = user->get_dl_harq(data_alloc.pid);
uint32_t data_before = user->get_pending_dl_new_data(get_tti_tx_dl());
dl_harq_proc* h = user->get_dl_harq(data_alloc.pid, cc_idx);
uint32_t data_before = user->get_pending_dl_new_data();
srslte_dci_format_t dci_format = user->get_dci_format();
bool is_newtx = h->is_empty();
int tbs = 0;
switch (dci_format) {
case SRSLTE_DCI_FORMAT1:
tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
tbs = user->generate_format1(h, data, get_tti_tx_dl(), cc_idx, get_cfi(), data_alloc.user_mask);
break;
case SRSLTE_DCI_FORMAT2:
tbs = user->generate_format2(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
tbs = user->generate_format2(h, data, get_tti_tx_dl(), cc_idx, get_cfi(), data_alloc.user_mask);
break;
case SRSLTE_DCI_FORMAT2A:
tbs = user->generate_format2a(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
tbs = user->generate_format2a(h, data, get_tti_tx_dl(), cc_idx, get_cfi(), data_alloc.user_mask);
break;
default:
Error("DCI format (%d) not implemented\n", dci_format);
@ -749,7 +750,7 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
h->get_id(),
data_alloc.user_mask.to_hex().c_str(),
tbs,
user->get_pending_dl_new_data(get_tti_tx_dl()));
user->get_pending_dl_new_data());
continue;
}
@ -764,7 +765,7 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
h->nof_retx(0) + h->nof_retx(1),
tbs,
data_before,
user->get_pending_dl_new_data(get_tti_tx_dl()));
user->get_pending_dl_new_data());
dl_sched_result.nof_data_elems++;
}
@ -788,10 +789,10 @@ void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t&
/* Generate DCI Format1A */
uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul());
int tbs =
user->generate_format0(pusch, get_tti_tx_ul(), ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs);
int tbs = user->generate_format0(
pusch, get_tti_tx_ul(), cc_idx, ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs);
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_idx);
if (tbs <= 0) {
log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), tbs=%d, bsr=%d\n",
ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL",
@ -827,7 +828,7 @@ void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t&
tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()),
pending_data_before,
user->get_pending_ul_old_data());
user->get_pending_ul_old_data(cc_idx));
ul_sched_result.nof_dci_elems++;
}

@ -41,25 +41,24 @@ void dl_metric_rr::set_log(srslte::log* log_)
log_h = log_;
}
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched)
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx)
{
typedef std::map<uint16_t, sched_ue>::iterator it_t;
tti_alloc = tti_sched;
if (ue_db.empty())
if (ue_db.empty()) {
return;
}
// give priority in a time-domain RR basis
uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size();
it_t iter = ue_db.begin();
auto iter = ue_db.begin();
std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user(user);
allocate_user(user, cc_idx);
}
}
@ -78,7 +77,7 @@ bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask)
return nof_rbg == 0;
}
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user, uint32_t cc_idx)
{
if (tti_alloc->is_dl_alloc(user)) {
return nullptr;
@ -86,8 +85,8 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
// FIXME: First do reTxs for all users. Only then do the rest.
alloc_outcome_t code;
uint32_t tti_dl = tti_alloc->get_tti_tx_dl();
dl_harq_proc* h = user->get_pending_dl_harq(tti_dl);
uint32_t req_bytes = user->get_pending_dl_new_data_total(tti_dl);
dl_harq_proc* h = user->get_pending_dl_harq(tti_dl, cc_idx);
uint32_t req_bytes = user->get_pending_dl_new_data_total();
// Schedule retx if we have space
#if ASYNC_DL_SCHED
@ -100,7 +99,8 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
} else if (code == alloc_outcome_t::DCI_COLLISION) {
}
if (code == alloc_outcome_t::DCI_COLLISION) {
// No DCIs available for this user. Move to next
return nullptr;
}
@ -119,14 +119,15 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
#if ASYNC_DL_SCHED
h = user->get_empty_dl_harq();
h = user->get_empty_dl_harq(cc_idx);
if (h) {
#else
if (h && h->is_empty()) {
#endif
// Allocate resources based on pending data
if (req_bytes) {
uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, tti_alloc->get_nof_ctrl_symbols()));
uint32_t pending_rbg =
user->prb_to_rbg(user->get_required_prb_dl(cc_idx, req_bytes, tti_alloc->get_nof_ctrl_symbols()));
rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size());
find_allocation(pending_rbg, &newtx_mask);
if (newtx_mask.any()) { // some empty spaces were found
@ -152,29 +153,28 @@ void ul_metric_rr::set_log(srslte::log* log_)
log_h = log_;
}
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched)
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx)
{
typedef std::map<uint16_t, sched_ue>::iterator it_t;
tti_alloc = tti_sched;
current_tti = tti_alloc->get_tti_tx_ul();
if (ue_db.size() == 0)
if (ue_db.empty()) {
return;
}
// give priority in a time-domain RR basis
uint32_t priority_idx =
(current_tti + (uint32_t)ue_db.size() / 2) % (uint32_t)ue_db.size(); // make DL and UL interleaved
// allocate reTxs first
it_t iter = ue_db.begin();
auto iter = ue_db.begin();
std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user_retx_prbs(user);
allocate_user_retx_prbs(user, cc_idx);
}
// give priority in a time-domain RR basis
@ -185,7 +185,7 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user_newtx_prbs(user);
allocate_user_newtx_prbs(user, cc_idx);
}
}
@ -226,13 +226,13 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
return alloc->L == L;
}
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user, uint32_t cc_idx)
{
if (tti_alloc->is_ul_alloc(user)) {
return nullptr;
}
alloc_outcome_t ret;
ul_harq_proc* h = user->get_ul_harq(current_tti);
ul_harq_proc* h = user->get_ul_harq(current_tti, cc_idx);
// if there are procedures and we have space
if (h->has_pending_retx()) {
@ -261,17 +261,17 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
return nullptr;
}
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user, uint32_t cc_idx)
{
if (tti_alloc->is_ul_alloc(user)) {
return nullptr;
}
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc* h = user->get_ul_harq(current_tti);
ul_harq_proc* h = user->get_ul_harq(current_tti, cc_idx);
// find an empty PID
if (h->is_empty(0) and pending_data > 0) {
uint32_t pending_rb = user->get_required_prb_ul(pending_data);
uint32_t pending_rb = user->get_required_prb_ul(cc_idx, pending_data);
ul_harq_proc::ul_alloc_t alloc;
find_allocation(pending_rb, &alloc);
@ -279,7 +279,8 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
} else if (ret == alloc_outcome_t::DCI_COLLISION) {
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti());
}
}

@ -53,8 +53,6 @@ sched_ue::sched_ue()
bzero(&cell, sizeof(cell));
bzero(&lch, sizeof(lch));
bzero(&dci_locations, sizeof(dci_locations));
bzero(&dl_harq, sizeof(dl_harq));
bzero(&ul_harq, sizeof(ul_harq));
bzero(&dl_ant_info, sizeof(dl_ant_info));
reset();
@ -71,10 +69,7 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
log_h = sched_params->log_h;
cell = sched_params->cfg->cell;
max_mcs_dl = 28;
max_mcs_ul = 28;
max_aggr_level = 3;
max_msg3retx = sched_params->cfg->maxharq_msg3tx;
max_msg3retx = sched_params->cfg->maxharq_msg3tx;
cfg = *cfg_;
@ -82,11 +77,10 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
cfg.dl_cfg.tm = SRSLTE_TM1;
Info("SCHED: Added user rnti=0x%x\n", rnti);
// Config HARQ processes
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
dl_harq[i].config(i, cfg.maxharq_tx, log_h);
ul_harq[i].config(i, cfg.maxharq_tx, log_h);
}
// Init sched_ue carriers
// TODO: check config for number of carriers
carriers.emplace_back(&cfg, &cell, rnti, 0, log_h);
// Generate allowed CCE locations
for (int cfi = 0; cfi < 3; cfi++) {
@ -117,20 +111,9 @@ void sched_ue::reset()
buf_mac = 0;
buf_ul = 0;
phy_config_dedicated_enabled = false;
dl_cqi = 1;
ul_cqi = 1;
dl_cqi_tti = 0;
ul_cqi_tti = 0;
dl_ri = 0;
dl_ri_tti = 0;
dl_pmi = 0;
dl_pmi_tti = 0;
cqi_request_tti = 0;
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
dl_harq[i].reset(tb);
ul_harq[i].reset(tb);
}
for (auto& c : carriers) {
c.reset();
}
}
@ -142,27 +125,22 @@ void sched_ue::reset()
void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl)
{
std::lock_guard<std::mutex> lock(mutex);
fixed_mcs_ul = mcs_ul;
fixed_mcs_dl = mcs_dl;
for (auto& c : carriers) {
c.fixed_mcs_dl = mcs_dl;
c.fixed_mcs_ul = mcs_ul;
}
}
void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_)
{
std::lock_guard<std::mutex> lock(mutex);
if (mcs_ul < 0) {
max_mcs_ul = 28;
} else {
max_mcs_ul = mcs_ul;
}
if (mcs_dl < 0) {
max_mcs_dl = 28;
} else {
max_mcs_dl = mcs_dl;
}
if (max_aggr_level_ < 0) {
max_aggr_level = 3;
} else {
max_aggr_level = max_aggr_level_;
uint32_t max_mcs_ul = mcs_ul >= 0 ? mcs_ul : 28;
uint32_t max_mcs_dl = mcs_dl >= 0 ? mcs_dl : 28;
uint32_t max_aggr_level = max_aggr_level_ >= 0 ? max_aggr_level_ : 3;
for (auto& c : carriers) {
c.max_mcs_dl = max_mcs_dl;
c.max_mcs_ul = max_mcs_ul;
c.max_aggr_level = max_aggr_level;
}
}
@ -193,9 +171,10 @@ void sched_ue::rem_bearer(uint32_t lc_id)
}
}
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
void sched_ue::phy_config_enabled(uint32_t tti, uint32_t cc_idx, bool enabled)
{
dl_cqi_tti = tti;
carriers[cc_idx].dl_cqi_tti = tti;
// FIXME: "why do we need this?"
phy_config_dedicated_enabled = enabled;
}
@ -262,7 +241,7 @@ bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
}
}
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t cc_idx, uint32_t prb_idx[2])
{
bool ret = false;
@ -279,9 +258,9 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
ret |= cfg.pucch_cfg.uci_cfg.is_scheduling_request_tti;
// Pending ACKs
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (TTI_TX(dl_harq[i].get_tti()) == current_tti) {
cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = dl_harq[i].get_n_cce();
for (auto& h : carriers[cc_idx].dl_harq) {
if (TTI_TX(h.get_tti()) == current_tti) {
cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = h.get_n_cce();
cfg.pucch_cfg.uci_cfg.ack[0].nof_acks = 1;
ret = true;
}
@ -310,24 +289,10 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
return ret;
}
int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack)
int sched_ue::set_ack_info(uint32_t tti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
{
std::lock_guard<std::mutex> lock(mutex);
int ret;
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (TTI_TX(dl_harq[i].get_tti()) == tti) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, i, tb_idx, tti);
dl_harq[i].set_ack(tb_idx, ack);
ret = dl_harq[i].get_tbs(tb_idx);
goto unlock;
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti);
ret = -1;
unlock:
return ret;
return carriers[cc_idx].set_ack_info(tti, tb_idx, ack);
}
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
@ -350,31 +315,31 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
}
void sched_ue::set_ul_crc(uint32_t tti, bool crc_res)
void sched_ue::set_ul_crc(uint32_t tti, uint32_t cc_idx, bool crc_res)
{
std::lock_guard<std::mutex> lock(mutex);
get_ul_harq(tti)->set_ack(0, crc_res);
get_ul_harq(tti, cc_idx)->set_ack(0, crc_res);
}
void sched_ue::set_dl_ri(uint32_t tti, uint32_t ri)
void sched_ue::set_dl_ri(uint32_t tti, uint32_t cc_idx, uint32_t ri)
{
std::lock_guard<std::mutex> lock(mutex);
dl_ri = ri;
dl_ri_tti = tti;
carriers[cc_idx].dl_ri = ri;
carriers[cc_idx].dl_ri_tti = tti;
}
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t pmi)
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t pmi)
{
std::lock_guard<std::mutex> lock(mutex);
dl_pmi = pmi;
dl_pmi_tti = tti;
carriers[cc_idx].dl_pmi = pmi;
carriers[cc_idx].dl_pmi_tti = tti;
}
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cqi)
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi)
{
std::lock_guard<std::mutex> lock(mutex);
dl_cqi = cqi;
dl_cqi_tti = tti;
carriers[cc_idx].dl_cqi = cqi;
carriers[cc_idx].dl_cqi_tti = tti;
}
void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d)
@ -383,11 +348,11 @@ void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d)
dl_ant_info = *d;
}
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code)
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code)
{
std::lock_guard<std::mutex> lock(mutex);
ul_cqi = cqi;
ul_cqi_tti = tti;
carriers[cc_idx].ul_cqi = cqi;
carriers[cc_idx].ul_cqi_tti = tti;
}
void sched_ue::tpc_inc()
@ -418,7 +383,8 @@ void sched_ue::tpc_dec()
// > return 0 if TBS<MIN_DATA_TBS
int sched_ue::generate_format1(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t tti_tx_dl,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask)
{
@ -434,13 +400,13 @@ int sched_ue::generate_format1(dl_harq_proc* h,
// If this is the first transmission for this UE, make room for MAC Contention Resolution ID
bool need_conres_ce = false;
if (is_first_dl_tx()) {
if (is_first_dl_tx(cc_idx)) {
need_conres_ce = true;
}
if (h->is_empty(0)) {
// Get total available data to transmit (includes MAC header)
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb);
@ -448,16 +414,16 @@ int sched_ue::generate_format1(dl_harq_proc* h,
srslte_pdsch_grant_t grant = {};
srslte_dl_sf_cfg_t dl_sf = {};
dl_sf.cfi = cfi;
dl_sf.tti = tti;
dl_sf.tti = tti_tx_dl;
srslte_ra_dl_grant_to_grant_prb_allocation(dci, &grant, cell.nof_prb);
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell, &dl_sf, &grant);
int mcs0 = fixed_mcs_dl;
int mcs0 = carriers[cc_idx].fixed_mcs_dl;
if (need_conres_ce and cell.nof_prb < 10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs
mcs0 = MCS_FIRST_DL;
}
if (mcs0 < 0) { // dynamic MCS
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
tbs = carriers[cc_idx].alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), nof_prb) / 8;
mcs = mcs0;
@ -468,7 +434,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
return 0;
}
h->new_tx(user_mask, 0, tti, mcs, tbs, data->dci.location.ncce);
h->new_tx(user_mask, 0, tti_tx_dl, mcs, tbs, data->dci.location.ncce);
int rem_tbs = tbs;
int x = 0;
@ -500,7 +466,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes);
} else {
h->new_retx(user_mask, 0, tti, &mcs, &tbs, data->dci.location.ncce);
h->new_retx(user_mask, 0, tti_tx_dl, &mcs, &tbs, data->dci.location.ncce);
Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs);
}
@ -525,11 +491,12 @@ int sched_ue::generate_format1(dl_harq_proc* h,
int sched_ue::generate_format2a(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask)
{
std::lock_guard<std::mutex> lock(mutex);
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
return ret;
}
@ -537,6 +504,7 @@ int sched_ue::generate_format2a(dl_harq_proc* h,
int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask)
{
@ -559,7 +527,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
bool no_retx = true;
if (dl_ri == 0) {
if (carriers[cc_idx].dl_ri == 0) {
if (h->is_empty(1)) {
/* One layer, tb1 buffer is empty, send tb0 only */
tb_en[0] = true;
@ -583,7 +551,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
}
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
int mcs = 0;
int tbs = 0;
@ -591,11 +559,13 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce);
Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs);
} else if (tb_en[tb] && req_bytes && no_retx) {
if (fixed_mcs_dl < 0) {
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
if (carriers[cc_idx].fixed_mcs_dl < 0) {
tbs = carriers[cc_idx].alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx((uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)fixed_mcs_dl, false), nof_prb) / 8;
mcs = fixed_mcs_dl;
tbs = srslte_ra_tbs_from_idx(
(uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)carriers[cc_idx].fixed_mcs_dl, false), nof_prb) /
8;
mcs = carriers[cc_idx].fixed_mcs_dl;
}
h->new_tx(user_mask, tb, tti, mcs, tbs, data->dci.location.ncce);
@ -643,6 +613,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
int sched_ue::generate_format2(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t cfi,
const rbgmask_t& user_mask)
{
@ -650,14 +621,14 @@ int sched_ue::generate_format2(dl_harq_proc* h,
std::lock_guard<std::mutex> lock(mutex);
/* Call Format 2a (common) */
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
/* Compute precoding information */
data->dci.format = SRSLTE_DCI_FORMAT2;
if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) {
data->dci.pinfo = (uint8_t)(dl_pmi + 1) % (uint8_t)5;
data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi + 1) % (uint8_t)5;
} else {
data->dci.pinfo = (uint8_t)(dl_pmi & 1u);
data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi & 1u);
}
return ret;
@ -665,6 +636,7 @@ int sched_ue::generate_format2(dl_harq_proc* h,
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
ul_harq_proc::ul_alloc_t alloc,
bool needs_pdcch,
srslte_dci_location_t dci_pos,
@ -672,7 +644,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
{
std::lock_guard<std::mutex> lock(mutex);
ul_harq_proc* h = get_ul_harq(tti);
ul_harq_proc* h = get_ul_harq(tti, cc_idx);
srslte_dci_ul_t* dci = &data->dci;
bool cqi_request = needs_cqi_unlocked(tti, true);
@ -681,7 +653,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
data->needs_pdcch = needs_pdcch;
dci->location = dci_pos;
int mcs = (explicit_mcs >= 0) ? explicit_mcs : fixed_mcs_ul;
int mcs = (explicit_mcs >= 0) ? explicit_mcs : carriers[cc_idx].fixed_mcs_ul;
int tbs = 0;
bool is_newtx = h->is_empty(0);
@ -698,7 +670,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0;
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE;
tbs = alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs);
tbs = carriers[cc_idx].alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs);
}
h->new_tx(tti, mcs, tbs, alloc, nof_retx);
@ -755,27 +727,37 @@ uint32_t sched_ue::get_max_retx()
bool sched_ue::is_first_dl_tx()
{
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (dl_harq[i].nof_tx(0) > 0) {
for (uint32_t i = 0; i < carriers.size(); ++i) {
if (not is_first_dl_tx(i)) {
return false;
}
}
return true;
}
bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
bool sched_ue::is_first_dl_tx(uint32_t cc_idx)
{
for (auto& h : carriers[cc_idx].dl_harq) {
if (h.nof_tx(0) > 0) {
return false;
}
}
return true;
}
bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
{
std::lock_guard<std::mutex> lock(mutex);
bool ret = needs_cqi_unlocked(tti, will_be_sent);
bool ret = needs_cqi_unlocked(tti, cc_idx, will_be_sent);
return ret;
}
// Private lock-free implemenentation
bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
bool sched_ue::needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
{
bool ret = false;
if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked(tti) > 0) {
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti);
if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked() > 0) {
uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti);
bool needscqi = interval >= cfg.aperiodic_cqi_period;
if (needscqi) {
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
@ -791,27 +773,24 @@ bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
return ret;
}
uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
uint32_t sched_ue::get_pending_dl_new_data()
{
std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_dl_new_data_unlocked(tti);
return pending_data;
return get_pending_dl_new_data_unlocked();
}
/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data,
/// the RAR resources, and headers
/// \param tti
/// \return number of bytes to be allocated
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti)
uint32_t sched_ue::get_pending_dl_new_data_total()
{
std::lock_guard<std::mutex> lock(mutex);
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
return req_bytes;
return get_pending_dl_new_data_total_unlocked();
}
uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_dl_new_data_total_unlocked()
{
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
uint32_t req_bytes = get_pending_dl_new_data_unlocked();
if (req_bytes > 0) {
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
if (is_first_dl_tx()) {
@ -822,7 +801,7 @@ uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_dl_new_data_unlocked()
{
uint32_t pending_data = 0;
for (int i = 0; i < sched_interface::MAX_LC; i++) {
@ -830,7 +809,7 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
pending_data += lch[i].buf_retx + lch[i].buf_tx;
}
}
if (!is_first_dl_tx() && nof_ta_cmd) {
if (not is_first_dl_tx() and nof_ta_cmd > 0) {
pending_data += nof_ta_cmd * 2;
}
return pending_data;
@ -839,15 +818,13 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{
std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_ul_new_data_unlocked(tti);
return pending_data;
return get_pending_ul_new_data_unlocked(tti);
}
uint32_t sched_ue::get_pending_ul_old_data()
uint32_t sched_ue::get_pending_ul_old_data(uint32_t cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_ul_old_data_unlocked();
return pending_data;
return get_pending_ul_old_data_unlocked(cc_idx);
}
// Private lock-free implementation
@ -859,19 +836,25 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
pending_data += lch[i].bsr;
}
}
if (!pending_data && is_sr_triggered()) {
return 512;
}
if (!pending_data && needs_cqi_unlocked(tti)) {
return 128;
if (pending_data == 0) {
if (is_sr_triggered()) {
return 512;
}
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
if (needs_cqi_unlocked(tti, cc_idx)) {
return 128;
}
}
}
uint32_t pending_ul_data = get_pending_ul_old_data_unlocked();
if (pending_data > pending_ul_data) {
pending_data -= pending_ul_data;
} else {
pending_data = 0;
// Subtract all the UL data already allocated in the UL harqs
uint32_t pending_ul_data = 0;
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
pending_ul_data += get_pending_ul_old_data_unlocked(cc_idx);
}
if (pending_data) {
pending_data = (pending_data > pending_ul_data) ? pending_data - pending_ul_data : 0;
if (pending_data > 0) {
Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n",
pending_data,
pending_ul_data,
@ -884,13 +867,9 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_ul_old_data_unlocked()
uint32_t sched_ue::get_pending_ul_old_data_unlocked(uint32_t cc_idx)
{
uint32_t pending_data = 0;
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
pending_data += ul_harq[i].get_pending_data();
}
return pending_data;
return carriers[cc_idx].get_pending_ul_old_data();
}
uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb)
@ -903,7 +882,7 @@ uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
return sched_params->P * nof_rbg;
}
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{
std::lock_guard<std::mutex> lock(mutex);
@ -913,11 +892,11 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
uint32_t nbytes = 0;
uint32_t n;
int mcs0 = (is_first_dl_tx() and cell.nof_prb == 6) ? MCS_FIRST_DL : fixed_mcs_dl;
int mcs0 = (is_first_dl_tx(cc_idx) and cell.nof_prb == 6) ? MCS_FIRST_DL : carriers[cc_idx].fixed_mcs_dl;
for (n = 0; n < cell.nof_prb && nbytes < req_bytes; ++n) {
nof_re = srslte_ra_dl_approx_nof_re(&cell, n + 1, nof_ctrl_symbols);
if (mcs0 < 0) {
tbs = alloc_tbs_dl(n + 1, nof_re, 0, &mcs);
tbs = carriers[cc_idx].alloc_tbs_dl(n + 1, nof_re, 0, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), n + 1) / 8;
}
@ -931,37 +910,10 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
return n;
}
uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
uint32_t sched_ue::get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes)
{
int mcs = 0;
uint32_t nbytes = 0;
uint32_t N_srs = 0;
uint32_t n = 0;
if (req_bytes == 0) {
return 0;
}
std::lock_guard<std::mutex> lock(mutex);
for (n = 1; n < cell.nof_prb && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * n * SRSLTE_NRE;
int tbs = 0;
if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
}
if (tbs > 0) {
nbytes = tbs;
}
}
while (!srslte_dft_precoding_valid_prb(n) && n <= cell.nof_prb) {
n++;
}
return n;
return carriers[cc_idx].get_required_prb_ul(req_bytes);
}
bool sched_ue::is_sr_triggered()
@ -969,88 +921,42 @@ bool sched_ue::is_sr_triggered()
return sr;
}
void sched_ue::reset_pending_pids(uint32_t tti_rx)
void sched_ue::reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx)
{
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx);
// UL harqs
get_ul_harq(tti_tx_ul)->reset_pending_data();
// DL harqs
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
dl_harq[i].reset_pending_data();
if (not dl_harq[i].is_empty()) {
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, dl_harq[i].get_tti());
if (tti_diff > 50 and tti_diff < 10240 / 2) {
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti_tx_dl);
dl_harq[i].reset(0);
dl_harq[i].reset(1);
}
}
}
carriers[cc_idx].reset_old_pending_pids(tti_rx);
}
/* Gets HARQ process with oldest pending retx */
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx)
{
#if ASYNC_DL_SCHED
std::lock_guard<std::mutex> lock(mutex);
int oldest_idx = -1;
uint32_t oldest_tti = 0;
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (dl_harq[i].has_pending_retx(0, tti) || dl_harq[i].has_pending_retx(1, tti)) {
uint32_t x = srslte_tti_interval(tti, dl_harq[i].get_tti());
if (x > oldest_tti) {
oldest_idx = i;
oldest_tti = x;
}
}
}
dl_harq_proc* h = nullptr;
if (oldest_idx >= 0) {
h = &dl_harq[oldest_idx];
}
return h;
#else
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
#endif
return carriers[cc_idx].get_pending_dl_harq(tti_tx_dl);
}
dl_harq_proc* sched_ue::get_empty_dl_harq()
dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
dl_harq_proc* h = nullptr;
for (int i = 0; i < SCHED_MAX_HARQ_PROC && !h; i++) {
if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) {
h = &dl_harq[i];
}
}
return h;
return carriers[cc_idx].get_empty_dl_harq();
}
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti)
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t cc_idx)
{
return &ul_harq[tti % SCHED_MAX_HARQ_PROC];
return carriers[cc_idx].get_ul_harq(tti_tx_ul);
}
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti)
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti_rx, uint32_t cc_idx)
{
for (uint32_t i = 0; i < SCHED_MAX_HARQ_PROC; ++i) {
if (dl_harq[i].get_tti() == tti) {
return &dl_harq[i];
for (auto& h : carriers[cc_idx].dl_harq) {
if (h.get_tti() == tti_rx) {
return &h;
}
}
return nullptr;
}
dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx)
dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx, uint32_t cc_idx)
{
return &dl_harq[idx];
return &carriers[cc_idx].dl_harq[idx];
}
srslte_dci_format_t sched_ue::get_dci_format()
@ -1084,33 +990,6 @@ srslte_dci_format_t sched_ue::get_dci_format()
return ret;
}
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
{
std::lock_guard<std::mutex> lock(mutex);
uint32_t l = 0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99;
float factor = 1.5;
uint32_t l_max = 3;
if (cell.nof_prb == 6) {
factor = 1.0;
l_max = 2;
}
l_max = SRSLTE_MIN(max_aggr_level, l_max);
do {
coderate = srslte_pdcch_coderate(nof_bits, l);
l++;
} while (l < l_max && factor * coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
dl_cqi,
l,
nof_bits,
coderate,
max_coderate);
return l;
}
sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx)
{
if (cfi > 0 && cfi <= 3) {
@ -1192,20 +1071,159 @@ int sched_ue::cqi_to_tbs(uint32_t cqi,
return tbs;
}
int sched_ue::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
/************************************************************************************************
* sched_ue::sched_ue_carrier
***********************************************************************************************/
sched_ue_carrier::sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
srslte_cell_t* cell_cfg_,
uint16_t rnti_,
uint32_t cc_idx_,
srslte::log* log_) :
cfg(cfg_),
cell(cell_cfg_),
rnti(rnti_),
cc_idx(cc_idx_),
log_h(log_)
{
// Config HARQ processes
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
dl_harq[i].config(i, cfg->maxharq_tx, log_h);
ul_harq[i].config(i, cfg->maxharq_tx, log_h);
}
}
void sched_ue_carrier::reset()
{
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs);
dl_ri = 0;
dl_ri_tti = 0;
dl_pmi = 0;
dl_pmi_tti = 0;
dl_cqi = 1;
dl_cqi_tti = 0;
ul_cqi = 1;
ul_cqi_tti = 0;
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
dl_harq[i].reset(tb);
ul_harq[i].reset(tb);
}
}
}
int sched_ue::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
void sched_ue_carrier::reset_old_pending_pids(uint32_t tti_rx)
{
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs);
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx);
// UL Harqs
get_ul_harq(tti_tx_ul)->reset_pending_data();
// DL harqs
for (auto& h : dl_harq) {
h.reset_pending_data();
if (not h.is_empty()) {
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, h.get_tti());
if (tti_diff > 50 and tti_diff < 10240 / 2) {
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", h.get_id(), h.get_tti(), tti_tx_dl);
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
h.reset(tb);
}
}
}
}
}
dl_harq_proc* sched_ue_carrier::get_pending_dl_harq(uint32_t tti_tx_dl)
{
#if ASYNC_DL_SCHED
int oldest_idx = -1;
uint32_t oldest_tti = 0;
for (auto& h : dl_harq) {
if (h.has_pending_retx(0, tti_tx_dl) or h.has_pending_retx(1, tti_tx_dl)) {
uint32_t x = srslte_tti_interval(tti_tx_dl, h.get_tti());
if (x > oldest_tti) {
oldest_idx = h.get_id();
oldest_tti = x;
}
}
}
dl_harq_proc* h = nullptr;
if (oldest_idx >= 0) {
h = &dl_harq[oldest_idx];
}
return h;
#else
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
#endif
}
dl_harq_proc* sched_ue_carrier::get_empty_dl_harq()
{
auto it =
std::find_if(dl_harq.begin(), dl_harq.end(), [](dl_harq_proc& h) { return h.is_empty(0) and h.is_empty(1); });
return it != dl_harq.end() ? &(*it) : nullptr;
}
int sched_ue_carrier::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack)
{
for (auto& h : dl_harq) {
if (TTI_TX(h.get_tti()) == tti_rx) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, h.get_id(), tb_idx, tti_rx);
h.set_ack(tb_idx, ack);
return h.get_tbs(tb_idx);
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx);
return -1;
}
ul_harq_proc* sched_ue_carrier::get_ul_harq(uint32_t tti)
{
return &ul_harq[tti % SCHED_MAX_HARQ_PROC];
}
uint32_t sched_ue_carrier::get_pending_ul_old_data()
{
uint32_t pending_data = 0;
for (auto& h : ul_harq) {
pending_data += h.get_pending_data();
}
return pending_data;
}
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue_carrier::get_aggr_level(uint32_t nof_bits)
{
uint32_t l = 0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99;
float factor = 1.5;
uint32_t l_max = 3;
if (cell->nof_prb == 6) {
factor = 1.0;
l_max = 2;
}
l_max = SRSLTE_MIN(max_aggr_level, l_max);
do {
coderate = srslte_pdcch_coderate(nof_bits, l);
l++;
} while (l < l_max && factor * coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
dl_cqi,
l,
nof_bits,
coderate,
max_coderate);
return l;
}
/* In this scheduler we tend to use all the available bandwidth and select the MCS
* that approximates the minimum between the capacity and the requested rate
*/
int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs)
int sched_ue_carrier::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs)
{
uint32_t sel_mcs = 0;
@ -1214,7 +1232,7 @@ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, b
uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
int tbs_bytes = cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8;
int tbs_bytes = sched_ue::cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8;
/* If less bytes are requested, lower the MCS */
if (tbs_bytes > (int)req_bytes && req_bytes > 0) {
@ -1240,4 +1258,45 @@ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, b
return tbs_bytes;
}
int sched_ue_carrier::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
{
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs);
}
int sched_ue_carrier::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
{
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs);
}
uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes)
{
int mcs = 0;
uint32_t nbytes = 0;
uint32_t N_srs = 0;
uint32_t n = 0;
if (req_bytes == 0) {
return 0;
}
for (n = 1; n < cell->nof_prb && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell->cp) - 1) - N_srs) * n * SRSLTE_NRE;
int tbs = 0;
if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
}
if (tbs > 0) {
nbytes = tbs;
}
}
while (!srslte_dft_precoding_valid_prb(n) && n <= cell->nof_prb) {
n++;
}
return n;
}
} // namespace srsenb

@ -106,11 +106,11 @@ int main(int argc, char *argv[])
memcpy(&cell_cfg.cell, &cell_cfg_phy, sizeof(srslte_cell_t));
cell_cfg.sibs[0].len = 18;
cell_cfg.sibs[0].period_rf = 8;
cell_cfg.sibs[1].len = 41;
cell_cfg.sibs[1].len = 41;
cell_cfg.sibs[1].period_rf = 16;
cell_cfg.si_window_ms = 40;
cell_cfg.si_window_ms = 40;
my_sched.init(NULL, &log_out);
my_sched.init(nullptr, &log_out);
my_sched.set_metric(&dl_metric, &ul_metric);
my_sched.cell_cfg(&cell_cfg);
@ -132,7 +132,7 @@ int main(int argc, char *argv[])
my_sched.ue_cfg(rnti, &ue_cfg);
my_sched.bearer_ue_cfg(rnti, 0, &bearer_cfg);
// my_sched.dl_rlc_buffer_state(rnti, 0, 1e6, 0);
my_sched.ul_bsr(rnti, 0, 1e6, true);
my_sched.ul_bsr(rnti, 0, 1e6f, true);
bool running = true;
uint32_t tti = 0;
@ -143,11 +143,9 @@ int main(int argc, char *argv[])
}
my_sched.dl_sched(tti, &sched_result_dl);
my_sched.ul_sched(tti, &sched_result_ul);
tti = (tti+1)%10240;
tti = (tti + 1) % 10240;
if (tti >= 4) {
my_sched.ul_crc_info(tti, rnti, tti%2);
my_sched.ul_crc_info(tti, rnti, 0, tti % 2);
}
}
}

@ -125,6 +125,8 @@ bool check_old_pids = true;
* Dummies *
*******************/
constexpr uint32_t CARRIER_IDX = 0;
struct sched_sim_args {
struct tti_event_t {
struct user_event_t {
@ -311,7 +313,7 @@ int sched_tester::process_tti_args()
if (e.second.dl_data > 0) {
uint32_t lcid = 0;
// FIXME: Does it need TTI for checking pending data?
uint32_t tot_dl_data = ue_db[e.first].get_pending_dl_new_data(tti_data.tti_tx_dl) + e.second.dl_data;
uint32_t tot_dl_data = ue_db[e.first].get_pending_dl_new_data() + e.second.dl_data;
dl_rlc_buffer_state(e.first, lcid, tot_dl_data, 0);
}
}
@ -326,15 +328,15 @@ void sched_tester::before_sched()
uint16_t rnti = it.first;
srsenb::sched_ue* user = &it.second;
tester_user_results d;
srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul);
srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
d.ul_pending_data = get_ul_buffer(rnti);
// user->get_pending_ul_new_data(tti_data.tti_tx_ul) or hul->has_pending_retx(); // get_ul_buffer(rnti);
d.dl_pending_data = get_dl_buffer(rnti);
d.has_ul_retx = hul->has_pending_retx();
d.has_ul_tx = d.has_ul_retx or d.ul_pending_data > 0;
srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_data.tti_tx_dl);
srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_data.tti_tx_dl, CARRIER_IDX);
d.has_dl_retx = (hdl != nullptr) and hdl->has_pending_retx(0, tti_data.tti_tx_dl);
d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq() != nullptr and d.dl_pending_data > 0);
d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq(CARRIER_IDX) != nullptr and d.dl_pending_data > 0);
d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0;
tti_data.ue_data.insert(std::make_pair(rnti, d));
tti_data.total_ues.dl_pending_data += d.dl_pending_data;
@ -344,11 +346,11 @@ void sched_tester::before_sched()
tti_data.total_ues.has_ul_newtx |= d.has_ul_newtx;
for (uint32_t i = 0; i < 2 * FDD_HARQ_DELAY_MS; ++i) {
const srsenb::dl_harq_proc* h = user->get_dl_harq(i);
const srsenb::dl_harq_proc* h = user->get_dl_harq(i, CARRIER_IDX);
tti_data.ue_data[rnti].dl_harqs[i] = *h;
}
// NOTE: ACK might have just cleared the harq for tti_data.tti_tx_ul
tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_data.tti_tx_ul);
tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
}
// TODO: Check whether pending pending_rar.rar_tti correspond to a prach_tti
@ -583,7 +585,7 @@ int sched_tester::test_harqs()
const auto& data = tti_data.sched_result_dl.data[i];
uint32_t h_id = data.dci.pid;
uint16_t rnti = data.dci.rnti;
const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id);
const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX);
CONDERROR(h == nullptr, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id);
CONDERROR(h->is_empty(), "[TESTER] Cannot schedule an empty harq proc\n");
CONDERROR(h->get_tti() != tti_data.tti_tx_dl,
@ -608,7 +610,7 @@ int sched_tester::test_harqs()
const auto& pusch = tti_data.sched_result_ul.pusch[i];
uint16_t rnti = pusch.dci.rnti;
const auto& ue_data = tti_data.ue_data[rnti];
const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul);
const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
CONDERROR(h == nullptr or h->is_empty(), "[TESTER] scheduled UL harq does not exist or is empty\n");
CONDERROR(h->get_tti() != tti_data.tti_tx_ul,
"[TESTER] The scheduled UL harq does not a valid tti=%u\n",
@ -632,7 +634,7 @@ int sched_tester::test_harqs()
const auto& phich = tti_data.sched_result_ul.phich[i];
CONDERROR(tti_data.ue_data.count(phich.rnti) == 0, "[TESTER] Allocated PHICH rnti no longer exists\n");
const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq;
const auto* h = ue_db[phich.rnti].get_ul_harq(tti_data.tti_tx_ul);
const auto* h = ue_db[phich.rnti].get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
CONDERROR(not hprev.has_pending_ack(), "[TESTER] Alloc PHICH did not have any pending ack\n");
bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx();
if (phich.phich == sched_interface::ul_sched_phich_t::ACK) {
@ -660,10 +662,11 @@ int sched_tester::test_harqs()
// schedule future acks
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) {
ack_info_t ack_data;
ack_data.rnti = tti_data.sched_result_dl.data[i].dci.rnti;
ack_data.tti = FDD_HARQ_DELAY_MS + tti_data.tti_tx_dl;
const srsenb::dl_harq_proc* dl_h = ue_db[ack_data.rnti].get_dl_harq(tti_data.sched_result_dl.data[i].dci.pid);
ack_data.dl_harq = *dl_h;
ack_data.rnti = tti_data.sched_result_dl.data[i].dci.rnti;
ack_data.tti = FDD_HARQ_DELAY_MS + tti_data.tti_tx_dl;
const srsenb::dl_harq_proc* dl_h =
ue_db[ack_data.rnti].get_dl_harq(tti_data.sched_result_dl.data[i].dci.pid, CARRIER_IDX);
ack_data.dl_harq = *dl_h;
if (ack_data.dl_harq.nof_retx(0) == 0) {
ack_data.dl_ack = randf() > sim_args.P_retx;
} else { // always ack after three retxs
@ -693,7 +696,7 @@ int sched_tester::test_harqs()
const auto& pusch = tti_data.sched_result_ul.pusch[i];
ul_ack_info_t ack_data;
ack_data.rnti = pusch.dci.rnti;
ack_data.ul_harq = *ue_db[ack_data.rnti].get_ul_harq(tti_data.tti_tx_ul);
ack_data.ul_harq = *ue_db[ack_data.rnti].get_ul_harq(tti_data.tti_tx_ul, CARRIER_IDX);
ack_data.tti_tx_ul = tti_data.tti_tx_ul;
ack_data.tti_ack = tti_data.tti_tx_ul + FDD_HARQ_DELAY_MS;
if (ack_data.ul_harq.nof_retx(0) == 0) {
@ -708,9 +711,11 @@ int sched_tester::test_harqs()
if (check_old_pids) {
for (auto& user : ue_db) {
for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) {
if (not(user.second.get_dl_harq(i)->is_empty(0) and user.second.get_dl_harq(1))) {
if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i)->get_tti()) > 49) {
TESTERROR("[TESTER] The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i)->get_id(), user.first);
if (not(user.second.get_dl_harq(i, CARRIER_IDX)->is_empty(0) and user.second.get_dl_harq(1, CARRIER_IDX))) {
if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i, CARRIER_IDX)->get_tti()) > 49) {
TESTERROR("[TESTER] The pid=%d for rnti=0x%x got old.\n",
user.second.get_dl_harq(i, CARRIER_IDX)->get_id(),
user.first);
}
}
}
@ -920,7 +925,7 @@ int sched_tester::ack_txs()
if (ack_it.second.tti != tti_data.tti_rx) {
continue;
}
srsenb::dl_harq_proc* h = ue_db[ack_it.second.rnti].get_dl_harq(ack_it.second.dl_harq.get_id());
srsenb::dl_harq_proc* h = ue_db[ack_it.second.rnti].get_dl_harq(ack_it.second.dl_harq.get_id(), CARRIER_IDX);
const srsenb::dl_harq_proc& hack = ack_it.second.dl_harq;
CONDERROR(hack.is_empty(), "[TESTER] The acked DL harq was not active\n");
@ -929,7 +934,7 @@ int sched_tester::ack_txs()
if (ack_it.second.dl_harq.is_empty(tb)) {
continue;
}
ret |= dl_ack_info(tti_data.tti_rx, ack_it.second.rnti, tb, ack_it.second.dl_ack) > 0;
ret |= dl_ack_info(tti_data.tti_rx, ack_it.second.rnti, CARRIER_IDX, tb, ack_it.second.dl_ack) > 0;
}
CONDERROR(not ret, "[TESTER] The dl harq proc that was acked does not exist\n");
@ -950,13 +955,13 @@ int sched_tester::ack_txs()
if (ack_it.first != tti_data.tti_rx) {
continue;
}
srsenb::ul_harq_proc* h = ue_db[ack_it.second.rnti].get_ul_harq(tti_data.tti_rx);
srsenb::ul_harq_proc* h = ue_db[ack_it.second.rnti].get_ul_harq(tti_data.tti_rx, CARRIER_IDX);
const srsenb::ul_harq_proc& hack = ack_it.second.ul_harq;
CONDERROR(h == nullptr or h->get_tti() != hack.get_tti(), "[TESTER] UL Harq TTI does not match the ACK TTI\n");
CONDERROR(h->is_empty(0), "[TESTER] The acked UL harq is not active\n");
CONDERROR(hack.is_empty(0), "[TESTER] The acked UL harq was not active\n");
ul_crc_info(tti_data.tti_rx, ack_it.second.rnti, ack_it.second.ack);
ul_crc_info(tti_data.tti_rx, ack_it.second.rnti, CARRIER_IDX, ack_it.second.ack);
CONDERROR(!h->get_pending_data(), "[TESTER] UL harq lost its pending data\n");
CONDERROR(!h->has_pending_ack(), "[TESTER] ACK/NACKed UL harq should have a pending ACK\n");

Loading…
Cancel
Save