clang-format on all the scheduler files

master
Francisco Paisana 5 years ago
parent c1f9d8768c
commit 00d24872d9

@ -41,7 +41,7 @@ struct alloc_outcome_t {
alloc_outcome_t(result_enum e) : result(e) {} alloc_outcome_t(result_enum e) : result(e) {}
operator result_enum() { return result; } operator result_enum() { return result; }
operator bool() { return result == SUCCESS; } operator bool() { return result == SUCCESS; }
const char* to_string() const; const char* to_string() const;
}; };
//! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions. //! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions.

@ -52,7 +52,7 @@ struct prb_range_t {
static prb_range_t riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1); static prb_range_t riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1);
}; };
class harq_proc class harq_proc
{ {
public: public:
void config(uint32_t id, uint32_t max_retx, srslte::log* log_h); void config(uint32_t id, uint32_t max_retx, srslte::log* log_h);
@ -68,20 +68,19 @@ public:
uint32_t max_nof_retx() const; uint32_t max_nof_retx() const;
protected: protected:
void new_tx_common(uint32_t tb_idx, uint32_t tti, int mcs, int tbs);
void new_tx_common(uint32_t tb_idx, uint32_t tti, int mcs, int tbs); void new_retx_common(uint32_t tb_idx, uint32_t tti, int* mcs, int* tbs);
void new_retx_common(uint32_t tb_idx, uint32_t tti, int* mcs, int* tbs); bool has_pending_retx_common(uint32_t tb_idx) const;
bool has_pending_retx_common(uint32_t tb_idx) const; void set_ack_common(uint32_t tb_idx, bool ack);
void set_ack_common(uint32_t tb_idx, bool ack); void reset_pending_data_common();
void reset_pending_data_common();
enum ack_t { NULL_ACK, NACK, ACK }; enum ack_t { NULL_ACK, NACK, ACK };
ack_t ack_state[SRSLTE_MAX_TB]; ack_t ack_state[SRSLTE_MAX_TB];
bool active[SRSLTE_MAX_TB]; bool active[SRSLTE_MAX_TB];
bool ndi[SRSLTE_MAX_TB]; bool ndi[SRSLTE_MAX_TB];
uint32_t id; uint32_t id;
uint32_t max_retx; uint32_t max_retx;
uint32_t n_rtx[SRSLTE_MAX_TB]; uint32_t n_rtx[SRSLTE_MAX_TB];
uint32_t tx_cnt[SRSLTE_MAX_TB]; uint32_t tx_cnt[SRSLTE_MAX_TB];
int tti; int tti;
@ -114,7 +113,6 @@ private:
class ul_harq_proc : public harq_proc class ul_harq_proc : public harq_proc
{ {
public: public:
struct ul_alloc_t { struct ul_alloc_t {
uint32_t RB_start; uint32_t RB_start;
uint32_t L; uint32_t L;
@ -134,15 +132,15 @@ public:
bool has_pending_retx() const; bool has_pending_retx() const;
bool is_adaptive_retx() const; bool is_adaptive_retx() const;
void reset_pending_data(); void reset_pending_data();
bool has_pending_ack() const; bool has_pending_ack() const;
bool get_pending_ack() const; bool get_pending_ack() const;
uint32_t get_pending_data() const; uint32_t get_pending_data() const;
private: private:
ul_alloc_t allocation; ul_alloc_t allocation;
int pending_data; int pending_data;
bool is_adaptive; bool is_adaptive;
ack_t pending_ack; ack_t pending_ack;
}; };

@ -22,9 +22,9 @@
#ifndef SRSENB_SCHEDULER_UE_H #ifndef SRSENB_SCHEDULER_UE_H
#define SRSENB_SCHEDULER_UE_H #define SRSENB_SCHEDULER_UE_H
#include <map>
#include "srslte/common/log.h" #include "srslte/common/log.h"
#include "srslte/interfaces/sched_interface.h" #include "srslte/interfaces/sched_interface.h"
#include <map>
#include "scheduler_harq.h" #include "scheduler_harq.h"
#include "srslte/asn1/rrc_asn1.h" #include "srslte/asn1/rrc_asn1.h"
@ -32,40 +32,43 @@
namespace srsenb { namespace srsenb {
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from /** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads. * higher layers and mac threads.
* *
* 1 mutex is created for every user and only access to same user variables are mutexed * 1 mutex is created for every user and only access to same user variables are mutexed
*/ */
class sched_ue { class sched_ue
{
public: public:
// used by sched_metric to store the pdsch/pusch allocations // used by sched_metric to store the pdsch/pusch allocations
bool has_pucch; bool has_pucch;
typedef struct { typedef struct {
uint32_t cce_start[4][6]; uint32_t cce_start[4][6];
uint32_t nof_loc[4]; uint32_t nof_loc[4];
} sched_dci_cce_t; } sched_dci_cce_t;
/************************************************************* /*************************************************************
* *
* FAPI-like Interface * FAPI-like Interface
* *
************************************************************/ ************************************************************/
sched_ue(); sched_ue();
void reset(); void reset();
void phy_config_enabled(uint32_t tti, bool enabled); void phy_config_enabled(uint32_t tti, bool enabled);
void set_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg, sched_interface::cell_cfg_t *cell_cfg, void set_cfg(uint16_t rnti,
srslte_regs_t *regs, srslte::log *log_h); sched_interface::ue_cfg_t* cfg,
sched_interface::cell_cfg_t* cell_cfg,
srslte_regs_t* regs,
srslte::log* log_h);
void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg); void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg);
void rem_bearer(uint32_t lc_id); void rem_bearer(uint32_t lc_id);
void dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue); void dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue);
void ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value = true); void ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value = true);
void ul_phr(int phr); void ul_phr(int phr);
void mac_buffer_state(uint32_t ce_code); void mac_buffer_state(uint32_t ce_code);
void ul_recv_len(uint32_t lcid, uint32_t len); void ul_recv_len(uint32_t lcid, uint32_t len);
void set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated); void set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated);
@ -76,11 +79,11 @@ public:
int set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack); int set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack);
void set_ul_crc(uint32_t tti, bool crc_res); void set_ul_crc(uint32_t tti, bool crc_res);
/******************************************************* /*******************************************************
* Custom functions * Custom functions
*******************************************************/ *******************************************************/
void tpc_inc(); void tpc_inc();
void tpc_dec(); void tpc_dec();
void set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level = -1); void set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level = -1);
@ -94,19 +97,18 @@ public:
* Functions used by scheduler metric objects * Functions used by scheduler metric objects
*******************************************************/ *******************************************************/
uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols); uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t req_bytes); uint32_t get_required_prb_ul(uint32_t req_bytes);
uint32_t prb_to_rbg(uint32_t nof_prb); uint32_t prb_to_rbg(uint32_t nof_prb);
uint32_t rgb_to_prb(uint32_t nof_rbg); uint32_t rgb_to_prb(uint32_t nof_rbg);
uint32_t get_pending_dl_new_data(uint32_t tti); uint32_t get_pending_dl_new_data(uint32_t tti);
uint32_t get_pending_ul_new_data(uint32_t tti); uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_old_data(); uint32_t get_pending_ul_old_data();
uint32_t get_pending_dl_new_data_total(uint32_t tti); uint32_t get_pending_dl_new_data_total(uint32_t tti);
void reset_pending_pids(uint32_t tti_rx); void reset_pending_pids(uint32_t tti_rx);
dl_harq_proc *get_pending_dl_harq(uint32_t tti); dl_harq_proc* get_pending_dl_harq(uint32_t tti);
dl_harq_proc* get_empty_dl_harq(); dl_harq_proc* get_empty_dl_harq();
ul_harq_proc* get_ul_harq(uint32_t tti); ul_harq_proc* get_ul_harq(uint32_t tti);
@ -114,8 +116,8 @@ public:
* Functions used by the scheduler object * Functions used by the scheduler object
*******************************************************/ *******************************************************/
void set_sr(); void set_sr();
void unset_sr(); void unset_sr();
void set_needs_ta_cmd(uint32_t nof_ta_cmd); void set_needs_ta_cmd(uint32_t nof_ta_cmd);
@ -133,43 +135,42 @@ public:
int explicit_mcs = -1); int explicit_mcs = -1);
srslte_dci_format_t get_dci_format(); srslte_dci_format_t get_dci_format();
uint32_t get_aggr_level(uint32_t nof_bits); uint32_t get_aggr_level(uint32_t nof_bits);
sched_dci_cce_t *get_locations(uint32_t current_cfi, uint32_t sf_idx); sched_dci_cce_t* get_locations(uint32_t current_cfi, uint32_t sf_idx);
bool needs_cqi(uint32_t tti, bool will_send = false); bool needs_cqi(uint32_t tti, bool will_send = false);
uint32_t get_max_retx(); uint32_t get_max_retx();
bool get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]); bool get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]);
bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce); bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce);
private: private:
typedef struct { typedef struct {
sched_interface::ue_bearer_cfg_t cfg; sched_interface::ue_bearer_cfg_t cfg;
int buf_tx; int buf_tx;
int buf_retx; int buf_retx;
int bsr; int bsr;
} ue_bearer_t; } ue_bearer_t;
bool is_sr_triggered(); bool is_sr_triggered();
int alloc_pdu(int tbs, sched_interface::dl_sched_pdu_t* pdu); int alloc_pdu(int tbs, sched_interface::dl_sched_pdu_t* pdu);
static uint32_t format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb); static uint32_t format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb);
static int cqi_to_tbs( static int cqi_to_tbs(
uint32_t cqi, uint32_t nof_prb, uint32_t nof_re, uint32_t max_mcs, uint32_t max_Qm, bool is_ul, uint32_t* mcs); uint32_t cqi, uint32_t nof_prb, uint32_t nof_re, uint32_t max_mcs, uint32_t max_Qm, bool is_ul, uint32_t* mcs);
int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int *mcs); int alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int *mcs); int alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs);
int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int *mcs); int alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs);
static bool bearer_is_ul(ue_bearer_t *lch);
static bool bearer_is_dl(ue_bearer_t *lch);
uint32_t get_pending_dl_new_data_unlocked(uint32_t tti); static bool bearer_is_ul(ue_bearer_t* lch);
uint32_t get_pending_ul_old_data_unlocked(); static bool bearer_is_dl(ue_bearer_t* lch);
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
uint32_t get_pending_dl_new_data_total_unlocked(uint32_t tti);
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false); uint32_t get_pending_dl_new_data_unlocked(uint32_t tti);
uint32_t get_pending_ul_old_data_unlocked();
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
uint32_t get_pending_dl_new_data_total_unlocked(uint32_t tti);
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false);
int generate_format2a_unlocked( int generate_format2a_unlocked(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask); dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask);
@ -177,33 +178,33 @@ private:
bool is_first_dl_tx(); bool is_first_dl_tx();
sched_interface::ue_cfg_t cfg; sched_interface::ue_cfg_t cfg;
srslte_cell_t cell; srslte_cell_t cell;
srslte::log* log_h; srslte::log* log_h;
std::mutex mutex; std::mutex mutex;
/* Buffer states */ /* Buffer states */
bool sr; bool sr;
int buf_mac; int buf_mac;
int buf_ul; int buf_ul;
ue_bearer_t lch[sched_interface::MAX_LC]; ue_bearer_t lch[sched_interface::MAX_LC];
int power_headroom; int power_headroom;
uint32_t dl_ri; uint32_t dl_ri;
uint32_t dl_ri_tti; uint32_t dl_ri_tti;
uint32_t dl_pmi; uint32_t dl_pmi;
uint32_t dl_pmi_tti; uint32_t dl_pmi_tti;
uint32_t dl_cqi; uint32_t dl_cqi;
uint32_t dl_cqi_tti; uint32_t dl_cqi_tti;
uint32_t cqi_request_tti; uint32_t cqi_request_tti;
uint32_t ul_cqi; uint32_t ul_cqi;
uint32_t ul_cqi_tti; uint32_t ul_cqi_tti;
uint16_t rnti; uint16_t rnti;
uint32_t max_mcs_dl; uint32_t max_mcs_dl;
uint32_t max_aggr_level; uint32_t max_aggr_level;
uint32_t max_mcs_ul; uint32_t max_mcs_ul;
uint32_t max_msg3retx; uint32_t max_msg3retx;
int fixed_mcs_ul; int fixed_mcs_ul;
int fixed_mcs_dl; int fixed_mcs_dl;
uint32_t P; uint32_t P;
@ -216,14 +217,12 @@ private:
sched_dci_cce_t dci_locations[3][10]; sched_dci_cce_t dci_locations[3][10];
const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ; const static int SCHED_MAX_HARQ_PROC = SRSLTE_FDD_NOF_HARQ;
dl_harq_proc dl_harq[SCHED_MAX_HARQ_PROC]; dl_harq_proc dl_harq[SCHED_MAX_HARQ_PROC];
ul_harq_proc ul_harq[SCHED_MAX_HARQ_PROC]; ul_harq_proc ul_harq[SCHED_MAX_HARQ_PROC];
bool phy_config_dedicated_enabled;
asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info;
bool phy_config_dedicated_enabled;
asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info;
}; };
} } // namespace srsenb
#endif // SRSENB_SCHEDULER_UE_H #endif // SRSENB_SCHEDULER_UE_H

@ -27,10 +27,10 @@
#include "srslte/common/pdu.h" #include "srslte/common/pdu.h"
#include "srslte/srslte.h" #include "srslte/srslte.h"
#define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__)
#define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__) #define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__)
#define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__) #define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__)
#define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__) #define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__)
namespace srsenb { namespace srsenb {
@ -118,9 +118,9 @@ sched::tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes,
alloc_outcome_t sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) alloc_outcome_t sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
{ {
uint32_t sib_len = sibs_cfg[sib_idx].len; uint32_t sib_len = sibs_cfg[sib_idx].len;
uint32_t rv = get_rvidx(sib_ntx); uint32_t rv = get_rvidx(sib_ntx);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI); ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI);
if (not ret.first) { if (not ret.first) {
Warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s\n", Warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s\n",
sib_idx + 1, sib_idx + 1,
@ -132,7 +132,7 @@ alloc_outcome_t sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t
// BC allocation successful // BC allocation successful
bc_alloc_t bc_alloc(ret.second); bc_alloc_t bc_alloc(ret.second);
bc_alloc.rv = rv; bc_alloc.rv = rv;
bc_alloc.sib_idx = sib_idx; bc_alloc.sib_idx = sib_idx;
bc_allocs.push_back(bc_alloc); bc_allocs.push_back(bc_alloc);
@ -222,11 +222,11 @@ alloc_outcome_t sched::tti_sched_result_t::alloc_ul(sched_ue*
} }
ul_alloc_t ul_alloc = {}; ul_alloc_t ul_alloc = {};
ul_alloc.type = alloc_type; ul_alloc.type = alloc_type;
ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ul_alloc.user_ptr = user; ul_alloc.user_ptr = user;
ul_alloc.alloc = alloc; ul_alloc.alloc = alloc;
ul_alloc.mcs = mcs; ul_alloc.mcs = mcs;
ul_data_allocs.push_back(ul_alloc); ul_data_allocs.push_back(ul_alloc);
return alloc_outcome_t::SUCCESS; return alloc_outcome_t::SUCCESS;
@ -388,7 +388,7 @@ void sched::tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::all
srslte_dci_format_t dci_format = user->get_dci_format(); srslte_dci_format_t dci_format = user->get_dci_format();
bool is_newtx = h->is_empty(); bool is_newtx = h->is_empty();
int tbs = 0; int tbs = 0;
switch (dci_format) { switch (dci_format) {
case SRSLTE_DCI_FORMAT1: case SRSLTE_DCI_FORMAT1:
tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask); tbs = user->generate_format1(h, data, get_tti_tx_dl(), get_cfi(), data_alloc.user_mask);
@ -437,7 +437,7 @@ void sched::tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_re
for (const auto& ul_alloc : ul_data_allocs) { for (const auto& ul_alloc : ul_data_allocs) {
sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems]; sched_interface::ul_sched_data_t* pusch = &ul_sched_result.pusch[ul_sched_result.nof_dci_elems];
sched_ue* user = ul_alloc.user_ptr; sched_ue* user = ul_alloc.user_ptr;
srslte_dci_location_t cce_range = {0, 0}; srslte_dci_location_t cce_range = {0, 0};
if (ul_alloc.needs_pdcch()) { if (ul_alloc.needs_pdcch()) {
@ -712,11 +712,10 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
return 0; return 0;
} }
/******************************************************* /*******************************************************
* *
* FAPI-like main sched interface. Wrappers to UE object * FAPI-like main sched interface. Wrappers to UE object
* *
*******************************************************/ *******************************************************/
int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* ue_cfg) int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* ue_cfg)
@ -984,8 +983,8 @@ void sched::generate_phich(tti_sched_result_t* tti_sched)
int sched::generate_ul_sched(tti_sched_result_t* tti_sched) int sched::generate_ul_sched(tti_sched_result_t* tti_sched)
{ {
/* Initialize variables */ /* Initialize variables */
current_tti = tti_sched->get_tti_tx_ul(); current_tti = tti_sched->get_tti_tx_ul();
prbmask_t& ul_mask = tti_sched->get_ul_mask(); prbmask_t& ul_mask = tti_sched->get_ul_mask();
// reserve PRBs for PRACH // reserve PRBs for PRACH
if (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_sched->get_tti_tx_ul(), -1)) { if (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_sched->get_tti_tx_ul(), -1)) {

@ -131,7 +131,7 @@ void pdcch_grid_t::update_alloc_tree(int parent_nod
const sched_ue::sched_dci_cce_t* dci_locs) const sched_ue::sched_dci_cce_t* dci_locs)
{ {
alloc_t alloc; alloc_t alloc;
alloc.rnti = (user != nullptr) ? user->get_rnti() : (uint16_t)0u; alloc.rnti = (user != nullptr) ? user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = aggr_idx; alloc.dci_pos.L = aggr_idx;
// get cumulative pdcch mask // get cumulative pdcch mask

@ -25,10 +25,10 @@
#include "srslte/common/pdu.h" #include "srslte/common/pdu.h"
#include "srslte/srslte.h" #include "srslte/srslte.h"
#define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__)
#define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__) #define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__)
#define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__) #define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__)
#define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__) #define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__)
namespace srsenb { namespace srsenb {
@ -50,18 +50,18 @@ prb_range_t prb_range_t::riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vr
return p; return p;
} }
/****************************************************** /******************************************************
* *
* These classes manage the HARQ Processes. * These classes manage the HARQ Processes.
* There is a common class and two child classes for UL and DL. * There is a common class and two child classes for UL and DL.
* *
******************************************************/ ******************************************************/
void harq_proc::config(uint32_t id_, uint32_t max_retx_, srslte::log* log_h_) void harq_proc::config(uint32_t id_, uint32_t max_retx_, srslte::log* log_h_)
{ {
log_h = log_h_; log_h = log_h_;
id = id_; id = id_;
max_retx = max_retx_; max_retx = max_retx_;
for (int i = 0; i < SRSLTE_MAX_TB; i++) { for (int i = 0; i < SRSLTE_MAX_TB; i++) {
ndi[i] = false; ndi[i] = false;
} }
@ -70,12 +70,12 @@ void harq_proc::config(uint32_t id_, uint32_t max_retx_, srslte::log* log_h_)
void harq_proc::reset(uint32_t tb_idx) void harq_proc::reset(uint32_t tb_idx)
{ {
ack_state[tb_idx] = NULL_ACK; ack_state[tb_idx] = NULL_ACK;
active[tb_idx] = false; active[tb_idx] = false;
n_rtx[tb_idx] = 0; n_rtx[tb_idx] = 0;
tti = 0; tti = 0;
last_mcs[tb_idx] = -1; last_mcs[tb_idx] = -1;
last_tbs[tb_idx] = -1; last_tbs[tb_idx] = -1;
tx_cnt[tb_idx] = 0; tx_cnt[tb_idx] = 0;
} }
uint32_t harq_proc::get_id() const uint32_t harq_proc::get_id() const
@ -105,7 +105,7 @@ bool harq_proc::has_pending_retx_common(uint32_t tb_idx) const
uint32_t harq_proc::get_tti() const uint32_t harq_proc::get_tti() const
{ {
return (uint32_t) tti; return (uint32_t)tti;
} }
void harq_proc::set_ack_common(uint32_t tb_idx, bool ack_) void harq_proc::set_ack_common(uint32_t tb_idx, bool ack_)
@ -113,7 +113,8 @@ void harq_proc::set_ack_common(uint32_t tb_idx, bool ack_)
ack_state[tb_idx] = ack_ ? ACK : NACK; ack_state[tb_idx] = ack_ ? ACK : NACK;
log_h->debug("ACK=%d received pid=%d, tb_idx=%d, n_rtx=%d, max_retx=%d\n", ack_, id, tb_idx, n_rtx[tb_idx], max_retx); log_h->debug("ACK=%d received pid=%d, tb_idx=%d, n_rtx=%d, max_retx=%d\n", ack_, id, tb_idx, n_rtx[tb_idx], max_retx);
if (!ack_ && (n_rtx[tb_idx] + 1 >= max_retx)) { if (!ack_ && (n_rtx[tb_idx] + 1 >= max_retx)) {
Warning("SCHED: discarting TB %d pid=%d, tti=%d, maximum number of retx exceeded (%d)\n", tb_idx, id, tti, max_retx); Warning(
"SCHED: discarting TB %d pid=%d, tti=%d, maximum number of retx exceeded (%d)\n", tb_idx, id, tti, max_retx);
active[tb_idx] = false; active[tb_idx] = false;
} else if (ack_) { } else if (ack_) {
active[tb_idx] = false; active[tb_idx] = false;
@ -121,10 +122,10 @@ void harq_proc::set_ack_common(uint32_t tb_idx, bool ack_)
} }
void harq_proc::new_tx_common(uint32_t tb_idx, uint32_t tti_, int mcs, int tbs) void harq_proc::new_tx_common(uint32_t tb_idx, uint32_t tti_, int mcs, int tbs)
{ {
reset(tb_idx); reset(tb_idx);
ndi[tb_idx] = !ndi[tb_idx]; ndi[tb_idx] = !ndi[tb_idx];
tti = tti_; tti = tti_;
tx_cnt[tb_idx]++; tx_cnt[tb_idx]++;
last_mcs[tb_idx] = mcs; last_mcs[tb_idx] = mcs;
last_tbs[tb_idx] = tbs; last_tbs[tb_idx] = tbs;
@ -206,7 +207,7 @@ void dl_harq_proc::set_ack(uint32_t tb_idx, bool ack)
uint32_t dl_harq_proc::get_n_cce() const uint32_t dl_harq_proc::get_n_cce() const
{ {
return n_cce; return n_cce;
} }
rbgmask_t dl_harq_proc::get_rbgmask() const rbgmask_t dl_harq_proc::get_rbgmask() const
@ -231,7 +232,7 @@ void dl_harq_proc::reset_pending_data()
reset_pending_data_common(); reset_pending_data_common();
} }
/****************************************************** /******************************************************
* UE::UL HARQ class * * UE::UL HARQ class *
******************************************************/ ******************************************************/
@ -296,7 +297,7 @@ void ul_harq_proc::reset_pending_data()
uint32_t ul_harq_proc::get_pending_data() const uint32_t ul_harq_proc::get_pending_data() const
{ {
return (uint32_t) pending_data; return (uint32_t)pending_data;
} }
} } // namespace srsenb

@ -23,10 +23,10 @@
#include "srsenb/hdr/stack/mac/scheduler_harq.h" #include "srsenb/hdr/stack/mac/scheduler_harq.h"
#include <string.h> #include <string.h>
#define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__)
#define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__) #define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__)
#define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__) #define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__)
#define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__) #define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__)
namespace srsenb { namespace srsenb {
@ -54,8 +54,8 @@ void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched
uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size(); uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size();
it_t iter = ue_db.begin(); it_t iter = ue_db.begin();
std::advance(iter, priority_idx); std::advance(iter, priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) { for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if(iter==ue_db.end()) { if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue* user = &iter->second; sched_ue* user = &iter->second;
@ -122,7 +122,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
h = user->get_empty_dl_harq(); h = user->get_empty_dl_harq();
if (h) { if (h) {
#else #else
if (h && h->is_empty()) { if (h && h->is_empty()) {
#endif #endif
// Allocate resources based on pending data // Allocate resources based on pending data
if (req_bytes) { if (req_bytes) {
@ -159,17 +159,18 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched
tti_alloc = tti_sched; tti_alloc = tti_sched;
current_tti = tti_alloc->get_tti_tx_ul(); current_tti = tti_alloc->get_tti_tx_ul();
if(ue_db.size()==0) if (ue_db.size() == 0)
return; return;
// give priority in a time-domain RR basis // give priority in a time-domain RR basis
uint32_t priority_idx = (current_tti+(uint32_t)ue_db.size()/2) % (uint32_t)ue_db.size(); // make DL and UL interleaved uint32_t priority_idx =
(current_tti + (uint32_t)ue_db.size() / 2) % (uint32_t)ue_db.size(); // make DL and UL interleaved
// allocate reTxs first // allocate reTxs first
it_t iter = ue_db.begin(); it_t iter = ue_db.begin();
std::advance(iter, priority_idx); std::advance(iter, priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) { for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if(iter==ue_db.end()) { if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue* user = &iter->second; sched_ue* user = &iter->second;
@ -178,9 +179,9 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched
// give priority in a time-domain RR basis // give priority in a time-domain RR basis
iter = ue_db.begin(); iter = ue_db.begin();
std::advance(iter,priority_idx); std::advance(iter, priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) { for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if(iter==ue_db.end()) { if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around iter = ue_db.begin(); // wrap around
} }
sched_ue* user = &iter->second; sched_ue* user = &iter->second;
@ -200,12 +201,12 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
bzero(alloc, sizeof(ul_harq_proc::ul_alloc_t)); bzero(alloc, sizeof(ul_harq_proc::ul_alloc_t));
for (uint32_t n = 0; n < used_rb->size() && alloc->L < L; n++) { for (uint32_t n = 0; n < used_rb->size() && alloc->L < L; n++) {
if (not used_rb->test(n) && alloc->L == 0) { if (not used_rb->test(n) && alloc->L == 0) {
alloc->RB_start = n; alloc->RB_start = n;
} }
if (not used_rb->test(n)) { if (not used_rb->test(n)) {
alloc->L++; alloc->L++;
} else if (alloc->L > 0) { } else if (alloc->L > 0) {
// avoid edges // avoid edges
if (n < 3) { if (n < 3) {
alloc->RB_start = 0; alloc->RB_start = 0;
alloc->L = 0; alloc->L = 0;
@ -214,15 +215,15 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
} }
} }
} }
if (alloc->L==0) { if (alloc->L == 0) {
return false; return false;
} }
// Make sure L is allowed by SC-FDMA modulation // Make sure L is allowed by SC-FDMA modulation
while (!srslte_dft_precoding_valid_prb(alloc->L)) { while (!srslte_dft_precoding_valid_prb(alloc->L)) {
alloc->L--; alloc->L--;
} }
return alloc->L == L; return alloc->L == L;
} }
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user) ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)

@ -34,17 +34,16 @@
#define MCS_FIRST_DL 4 #define MCS_FIRST_DL 4
#define MIN_DATA_TBS 4 #define MIN_DATA_TBS 4
/****************************************************** /******************************************************
* UE class * * UE class *
******************************************************/ ******************************************************/
namespace srsenb { namespace srsenb {
/******************************************************* /*******************************************************
* *
* Initialization and configuration functions * Initialization and configuration functions
* *
*******************************************************/ *******************************************************/
sched_ue::sched_ue() : sched_ue::sched_ue() :
@ -110,10 +109,9 @@ void sched_ue::set_cfg(uint16_t rnti_,
} }
} }
for (int i=0;i<sched_interface::MAX_LC;i++) { for (int i = 0; i < sched_interface::MAX_LC; i++) {
set_bearer_cfg(i, &cfg.ue_bearers[i]); set_bearer_cfg(i, &cfg.ue_bearers[i]);
} }
} }
void sched_ue::reset() void sched_ue::reset()
@ -144,12 +142,13 @@ void sched_ue::reset()
} }
} }
for (int i=0;i<sched_interface::MAX_LC; i++) { for (int i = 0; i < sched_interface::MAX_LC; i++) {
rem_bearer(i); rem_bearer(i);
} }
} }
void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl) { void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl)
{
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
fixed_mcs_ul = mcs_ul; fixed_mcs_ul = mcs_ul;
fixed_mcs_dl = mcs_dl; fixed_mcs_dl = mcs_dl;
@ -158,14 +157,14 @@ void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl) {
void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_) { void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_) {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
if (mcs_ul < 0) { if (mcs_ul < 0) {
max_mcs_ul = 28; max_mcs_ul = 28;
} else { } else {
max_mcs_ul = mcs_ul; max_mcs_ul = mcs_ul;
} }
if (mcs_dl < 0) { if (mcs_dl < 0) {
max_mcs_dl = 28; max_mcs_dl = 28;
} else { } else {
max_mcs_dl = mcs_dl; max_mcs_dl = mcs_dl;
} }
if (max_aggr_level_ < 0) { if (max_aggr_level_ < 0) {
max_aggr_level = 3; max_aggr_level = 3;
@ -174,11 +173,10 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_) {
} }
} }
/******************************************************* /*******************************************************
* *
* FAPI-like main scheduler interface. * FAPI-like main scheduler interface.
* *
*******************************************************/ *******************************************************/
void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg) void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg)
@ -186,10 +184,10 @@ void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t*
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
if (lc_id < sched_interface::MAX_LC) { if (lc_id < sched_interface::MAX_LC) {
memcpy(&lch[lc_id].cfg, cfg, sizeof(sched_interface::ue_bearer_cfg_t)); memcpy(&lch[lc_id].cfg, cfg, sizeof(sched_interface::ue_bearer_cfg_t));
lch[lc_id].buf_tx = 0; lch[lc_id].buf_tx = 0;
lch[lc_id].buf_retx = 0; lch[lc_id].buf_retx = 0;
if (lch[lc_id].cfg.direction != sched_interface::ue_bearer_cfg_t::IDLE) { if (lch[lc_id].cfg.direction != sched_interface::ue_bearer_cfg_t::IDLE) {
Info("SCHED: Set bearer config lc_id=%d, direction=%d\n", lc_id, (int) lch[lc_id].cfg.direction); Info("SCHED: Set bearer config lc_id=%d, direction=%d\n", lc_id, (int)lch[lc_id].cfg.direction);
} }
} }
} }
@ -204,8 +202,8 @@ void sched_ue::rem_bearer(uint32_t lc_id)
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled) void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
{ {
dl_cqi_tti = tti; dl_cqi_tti = tti;
phy_config_dedicated_enabled = enabled; phy_config_dedicated_enabled = enabled;
} }
void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value) void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
@ -218,12 +216,11 @@ void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
lch[lc_id].bsr += bsr; lch[lc_id].bsr += bsr;
} }
} }
Debug("SCHED: bsr=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", bsr, lc_id, Debug("SCHED: bsr=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", bsr, lc_id, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
} }
void sched_ue::ul_phr(int phr) void sched_ue::ul_phr(int phr)
{ {
power_headroom = phr; power_headroom = phr;
} }
@ -245,12 +242,12 @@ void sched_ue::mac_buffer_state(uint32_t ce_code)
void sched_ue::set_sr() void sched_ue::set_sr()
{ {
sr = true; sr = true;
} }
void sched_ue::unset_sr() void sched_ue::unset_sr()
{ {
sr = false; sr = false;
} }
void sched_ue::set_needs_ta_cmd(uint32_t nof_ta_cmd_) { void sched_ue::set_needs_ta_cmd(uint32_t nof_ta_cmd_) {
@ -292,7 +289,7 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
if (TTI_TX(dl_harq[i].get_tti()) == current_tti) { if (TTI_TX(dl_harq[i].get_tti()) == current_tti) {
cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = dl_harq[i].get_n_cce(); cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = dl_harq[i].get_n_cce();
cfg.pucch_cfg.uci_cfg.ack[0].nof_acks = 1; cfg.pucch_cfg.uci_cfg.ack[0].nof_acks = 1;
ret = true; ret = true;
} }
} }
// Periodic CQI // Periodic CQI
@ -322,8 +319,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack) int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
int ret = -1; int ret = -1;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) { for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (TTI_TX(dl_harq[i].get_tti()) == tti) { if (TTI_TX(dl_harq[i].get_tti()) == tti) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, i, tb_idx, tti); Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, i, tb_idx, tti);
dl_harq[i].set_ack(tb_idx, ack); dl_harq[i].set_ack(tb_idx, ack);
@ -345,19 +342,18 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
// Remove PDCP header?? // Remove PDCP header??
if (len > 4) { if (len > 4) {
len -= 4; len -= 4;
} }
if (lcid < sched_interface::MAX_LC) { if (lcid < sched_interface::MAX_LC) {
if (bearer_is_ul(&lch[lcid])) { if (bearer_is_ul(&lch[lcid])) {
if (lch[lcid].bsr > (int) len) { if (lch[lcid].bsr > (int)len) {
lch[lcid].bsr -= len; lch[lcid].bsr -= len;
} else { } else {
lch[lcid].bsr = 0; lch[lcid].bsr = 0;
} }
} }
} }
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
} }
void sched_ue::set_ul_crc(uint32_t tti, bool crc_res) void sched_ue::set_ul_crc(uint32_t tti, bool crc_res)
@ -400,16 +396,18 @@ void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code)
ul_cqi_tti = tti; ul_cqi_tti = tti;
} }
void sched_ue::tpc_inc() { void sched_ue::tpc_inc()
{
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
if (power_headroom > 0) { if (power_headroom > 0) {
next_tpc_pusch = 3; next_tpc_pusch = 3;
next_tpc_pucch = 3; next_tpc_pucch = 3;
} }
log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti); log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti);
} }
void sched_ue::tpc_dec() { void sched_ue::tpc_dec()
{
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
next_tpc_pusch = 0; next_tpc_pusch = 0;
next_tpc_pucch = 0; next_tpc_pucch = 0;
@ -417,9 +415,9 @@ void sched_ue::tpc_dec() {
} }
/******************************************************* /*******************************************************
* *
* Functions used to generate DCI grants * Functions used to generate DCI grants
* *
*******************************************************/ *******************************************************/
// Generates a Format1 dci // Generates a Format1 dci
@ -531,7 +529,7 @@ int sched_ue::generate_format2a(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask) dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask); int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
return ret; return ret;
} }
@ -571,7 +569,7 @@ int sched_ue::generate_format2a_unlocked(
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
if (!h->is_empty(tb)) { if (!h->is_empty(tb)) {
tb_en[tb] = true; tb_en[tb] = true;
no_retx = false; no_retx = false;
} }
} }
/* Two layers, no retransmissions... */ /* Two layers, no retransmissions... */
@ -583,8 +581,8 @@ int sched_ue::generate_format2a_unlocked(
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti); uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
int mcs = 0; int mcs = 0;
int tbs = 0; int tbs = 0;
if (!h->is_empty(tb)) { if (!h->is_empty(tb)) {
h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce); h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce);
@ -631,7 +629,7 @@ int sched_ue::generate_format2a_unlocked(
dci->format = SRSLTE_DCI_FORMAT2A; dci->format = SRSLTE_DCI_FORMAT2A;
dci->rnti = rnti; dci->rnti = rnti;
dci->pid = h->get_id(); dci->pid = h->get_id();
dci->tpc_pucch = (uint8_t) next_tpc_pucch; dci->tpc_pucch = (uint8_t)next_tpc_pucch;
next_tpc_pucch = 1; next_tpc_pucch = 1;
int ret = data->tbs[0] + data->tbs[1]; int ret = data->tbs[0] + data->tbs[1];
@ -651,9 +649,9 @@ int sched_ue::generate_format2(
/* Compute precoding information */ /* Compute precoding information */
data->dci.format = SRSLTE_DCI_FORMAT2; data->dci.format = SRSLTE_DCI_FORMAT2;
if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) { if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) {
data->dci.pinfo = (uint8_t) (dl_pmi + 1) % (uint8_t) 5; data->dci.pinfo = (uint8_t)(dl_pmi + 1) % (uint8_t)5;
} else { } else {
data->dci.pinfo = (uint8_t) (dl_pmi & 1); data->dci.pinfo = (uint8_t)(dl_pmi & 1);
} }
return ret; return ret;
@ -701,7 +699,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
} else { } else {
// retx // retx
h->new_retx(0, tti, &mcs, NULL, alloc); h->new_retx(0, tti, &mcs, NULL, alloc);
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), alloc.L) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), alloc.L) / 8;
} }
data->tbs = tbs; data->tbs = tbs;
@ -719,45 +717,50 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
dci->tb.ndi = h->get_ndi(0); dci->tb.ndi = h->get_ndi(0);
dci->cqi_request = cqi_request; dci->cqi_request = cqi_request;
dci->freq_hop_fl = srslte_dci_ul_t::SRSLTE_RA_PUSCH_HOP_DISABLED; dci->freq_hop_fl = srslte_dci_ul_t::SRSLTE_RA_PUSCH_HOP_DISABLED;
dci->tpc_pusch = next_tpc_pusch; dci->tpc_pusch = next_tpc_pusch;
next_tpc_pusch = 1; next_tpc_pusch = 1;
} }
return tbs; return tbs;
} }
/******************************************************* /*******************************************************
* *
* Functions used by scheduler or scheduler metric objects * Functions used by scheduler or scheduler metric objects
* *
*******************************************************/ *******************************************************/
bool sched_ue::bearer_is_ul(ue_bearer_t *lch) { bool sched_ue::bearer_is_ul(ue_bearer_t* lch)
return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::UL || lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH; {
return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::UL ||
lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH;
} }
bool sched_ue::bearer_is_dl(ue_bearer_t *lch) { bool sched_ue::bearer_is_dl(ue_bearer_t* lch)
return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::DL || lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH; {
return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::DL ||
lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH;
} }
uint32_t sched_ue::get_max_retx() { uint32_t sched_ue::get_max_retx()
return cfg.maxharq_tx; {
return cfg.maxharq_tx;
} }
bool sched_ue::is_first_dl_tx() bool sched_ue::is_first_dl_tx()
{ {
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) { for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (dl_harq[i].nof_tx(0) > 0) { if (dl_harq[i].nof_tx(0) > 0) {
return false; return false;
} }
} }
return true; return true;
} }
bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent) bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
bool ret = needs_cqi_unlocked(tti, will_be_sent); bool ret = needs_cqi_unlocked(tti, will_be_sent);
return ret; return ret;
} }
@ -765,12 +768,9 @@ bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent) bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
{ {
bool ret = false; bool ret = false;
if (phy_config_dedicated_enabled && if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked(tti) > 0) {
cfg.aperiodic_cqi_period &&
get_pending_dl_new_data_unlocked(tti) > 0)
{
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti); uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti);
bool needscqi = interval >= cfg.aperiodic_cqi_period; bool needscqi = interval >= cfg.aperiodic_cqi_period;
if (needscqi) { if (needscqi) {
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti); uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
if (interval_sent >= 16) { if (interval_sent >= 16) {
@ -788,7 +788,7 @@ bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_dl_new_data_unlocked(tti); uint32_t pending_data = get_pending_dl_new_data_unlocked(tti);
return pending_data; return pending_data;
} }
@ -799,16 +799,16 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti); uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
return req_bytes; return req_bytes;
} }
uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
{ {
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti); uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
if(req_bytes>0) { if (req_bytes > 0) {
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
if(is_first_dl_tx()) { if (is_first_dl_tx()) {
req_bytes += 6; // count for RAR req_bytes += 6; // count for RAR
} }
} }
@ -819,7 +819,7 @@ uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti) uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
{ {
uint32_t pending_data = 0; uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) { for (int i = 0; i < sched_interface::MAX_LC; i++) {
if (bearer_is_dl(&lch[i])) { if (bearer_is_dl(&lch[i])) {
pending_data += lch[i].buf_retx + lch[i].buf_tx; pending_data += lch[i].buf_retx + lch[i].buf_tx;
} }
@ -833,14 +833,14 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti) uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_ul_new_data_unlocked(tti); uint32_t pending_data = get_pending_ul_new_data_unlocked(tti);
return pending_data; return pending_data;
} }
uint32_t sched_ue::get_pending_ul_old_data() uint32_t sched_ue::get_pending_ul_old_data()
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t pending_data = get_pending_ul_old_data_unlocked(); uint32_t pending_data = get_pending_ul_old_data_unlocked();
return pending_data; return pending_data;
} }
@ -848,7 +848,7 @@ uint32_t sched_ue::get_pending_ul_old_data()
uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti) uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
{ {
uint32_t pending_data = 0; uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) { for (int i = 0; i < sched_interface::MAX_LC; i++) {
if (bearer_is_ul(&lch[i])) { if (bearer_is_ul(&lch[i])) {
pending_data += lch[i].bsr; pending_data += lch[i].bsr;
} }
@ -866,8 +866,13 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
pending_data = 0; pending_data = 0;
} }
if (pending_data) { if (pending_data) {
Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n", pending_data,pending_ul_data, Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n",
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr); pending_data,
pending_ul_data,
lch[0].bsr,
lch[1].bsr,
lch[2].bsr,
lch[3].bsr);
} }
return pending_data; return pending_data;
} }
@ -876,7 +881,7 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_ul_old_data_unlocked() uint32_t sched_ue::get_pending_ul_old_data_unlocked()
{ {
uint32_t pending_data = 0; uint32_t pending_data = 0;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) { for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
pending_data += ul_harq[i].get_pending_data(); pending_data += ul_harq[i].get_pending_data();
} }
return pending_data; return pending_data;
@ -884,12 +889,12 @@ uint32_t sched_ue::get_pending_ul_old_data_unlocked()
uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb) uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb)
{ {
return (uint32_t) ceil((float) nof_prb / P); return (uint32_t)ceil((float)nof_prb / P);
} }
uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg) uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
{ {
return P*nof_rbg; return P * nof_rbg;
} }
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols) uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
@ -903,10 +908,10 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
uint32_t nbytes = 0; uint32_t nbytes = 0;
uint32_t n; uint32_t n;
int mcs0 = (is_first_dl_tx() and cell.nof_prb == 6) ? MCS_FIRST_DL : fixed_mcs_dl; int mcs0 = (is_first_dl_tx() and cell.nof_prb == 6) ? MCS_FIRST_DL : fixed_mcs_dl;
for (n=0; n < cell.nof_prb && nbytes < req_bytes; ++n) { for (n = 0; n < cell.nof_prb && nbytes < req_bytes; ++n) {
nof_re = srslte_ra_dl_approx_nof_re(&cell, n + 1, nof_ctrl_symbols); nof_re = srslte_ra_dl_approx_nof_re(&cell, n + 1, nof_ctrl_symbols);
if (mcs0 < 0) { if (mcs0 < 0) {
tbs = alloc_tbs_dl(n+1, nof_re, 0, &mcs); tbs = alloc_tbs_dl(n + 1, nof_re, 0, &mcs);
} else { } else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), n + 1) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), n + 1) / 8;
} }
@ -920,33 +925,33 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
return n; return n;
} }
uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes) uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
{ {
int mcs = 0; int mcs = 0;
uint32_t nbytes = 0; uint32_t nbytes = 0;
uint32_t N_srs = 0; uint32_t N_srs = 0;
uint32_t n = 0; uint32_t n = 0;
if (req_bytes == 0) { if (req_bytes == 0) {
return 0; return 0;
} }
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
for (n = 1; n < cell.nof_prb && nbytes < req_bytes + 4; n++) { for (n = 1; n < cell.nof_prb && nbytes < req_bytes + 4; n++) {
uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*n*SRSLTE_NRE; uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * n * SRSLTE_NRE;
int tbs = 0; int tbs = 0;
if (fixed_mcs_ul < 0) { if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs); tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
} else { } else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
} }
if (tbs > 0) { if (tbs > 0) {
nbytes = tbs; nbytes = tbs;
} }
} }
while (!srslte_dft_precoding_valid_prb(n) && n<=cell.nof_prb) { while (!srslte_dft_precoding_valid_prb(n) && n <= cell.nof_prb) {
n++; n++;
} }
@ -955,7 +960,7 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
bool sched_ue::is_sr_triggered() bool sched_ue::is_sr_triggered()
{ {
return sr; return sr;
} }
void sched_ue::reset_pending_pids(uint32_t tti_rx) void sched_ue::reset_pending_pids(uint32_t tti_rx)
@ -966,7 +971,7 @@ void sched_ue::reset_pending_pids(uint32_t tti_rx)
get_ul_harq(tti_tx_ul)->reset_pending_data(); get_ul_harq(tti_tx_ul)->reset_pending_data();
// DL harqs // DL harqs
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) { for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
dl_harq[i].reset_pending_data(); dl_harq[i].reset_pending_data();
if (not dl_harq[i].is_empty()) { if (not dl_harq[i].is_empty()) {
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, dl_harq[i].get_tti()); uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, dl_harq[i].get_tti());
@ -986,18 +991,18 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
int oldest_idx=-1; int oldest_idx = -1;
uint32_t oldest_tti = 0; uint32_t oldest_tti = 0;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) { for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
if (dl_harq[i].has_pending_retx(0, tti) || dl_harq[i].has_pending_retx(1, tti)) { if (dl_harq[i].has_pending_retx(0, tti) || dl_harq[i].has_pending_retx(1, tti)) {
uint32_t x = srslte_tti_interval(tti, dl_harq[i].get_tti()); uint32_t x = srslte_tti_interval(tti, dl_harq[i].get_tti());
if (x > oldest_tti) { if (x > oldest_tti) {
oldest_idx = i; oldest_idx = i;
oldest_tti = x; oldest_tti = x;
} }
} }
} }
dl_harq_proc *h = NULL; dl_harq_proc* h = NULL;
if (oldest_idx >= 0) { if (oldest_idx >= 0) {
h = &dl_harq[oldest_idx]; h = &dl_harq[oldest_idx];
} }
@ -1005,7 +1010,7 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
return h; return h;
#else #else
return &dl_harq[tti%SCHED_MAX_HARQ_PROC]; return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
#endif #endif
} }
@ -1013,8 +1018,8 @@ dl_harq_proc* sched_ue::get_empty_dl_harq()
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
dl_harq_proc *h = NULL; dl_harq_proc* h = NULL;
for (int i=0;i<SCHED_MAX_HARQ_PROC && !h;i++) { for (int i = 0; i < SCHED_MAX_HARQ_PROC && !h; i++) {
if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) { if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) {
h = &dl_harq[i]; h = &dl_harq[i];
} }
@ -1042,7 +1047,8 @@ dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx)
return &dl_harq[idx]; return &dl_harq[idx];
} }
srslte_dci_format_t sched_ue::get_dci_format() { srslte_dci_format_t sched_ue::get_dci_format()
{
srslte_dci_format_t ret = SRSLTE_DCI_FORMAT1; srslte_dci_format_t ret = SRSLTE_DCI_FORMAT1;
if (phy_config_dedicated_enabled) { if (phy_config_dedicated_enabled) {
@ -1072,16 +1078,15 @@ srslte_dci_format_t sched_ue::get_dci_format() {
return ret; return ret;
} }
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */ /* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue::get_aggr_level(uint32_t nof_bits) uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
uint32_t l=0; uint32_t l = 0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi); float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99; float coderate = 99;
float factor=1.5; float factor = 1.5;
uint32_t l_max = 3; uint32_t l_max = 3;
if (cell.nof_prb == 6) { if (cell.nof_prb == 6) {
factor = 1.0; factor = 1.0;
l_max = 2; l_max = 2;
@ -1090,53 +1095,59 @@ uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
do { do {
coderate = srslte_pdcch_coderate(nof_bits, l); coderate = srslte_pdcch_coderate(nof_bits, l);
l++; l++;
} while(l<l_max && factor*coderate > max_coderate); } while (l < l_max && factor * coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n", dl_cqi, l, nof_bits, coderate, max_coderate); Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
dl_cqi,
l,
nof_bits,
coderate,
max_coderate);
return l; return l;
} }
sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx) sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx)
{ {
if (cfi > 0 && cfi <= 3) { if (cfi > 0 && cfi <= 3) {
return &dci_locations[cfi-1][sf_idx]; return &dci_locations[cfi - 1][sf_idx];
} else { } else {
Error("SCHED: Invalid CFI=%d\n", cfi); Error("SCHED: Invalid CFI=%d\n", cfi);
return &dci_locations[0][sf_idx]; return &dci_locations[0][sf_idx];
} }
} }
/* Allocates first available RLC PDU */ /* Allocates first available RLC PDU */
int sched_ue::alloc_pdu(int tbs_bytes, sched_interface::dl_sched_pdu_t* pdu) int sched_ue::alloc_pdu(int tbs_bytes, sched_interface::dl_sched_pdu_t* pdu)
{ {
// TODO: Implement lcid priority (now lowest index is lowest priority) // TODO: Implement lcid priority (now lowest index is lowest priority)
int x = 0; int x = 0;
int i = 0; int i = 0;
for (i=0;i<sched_interface::MAX_LC && !x;i++) { for (i = 0; i < sched_interface::MAX_LC && !x; i++) {
if (lch[i].buf_retx) { if (lch[i].buf_retx) {
x = SRSLTE_MIN(lch[i].buf_retx, tbs_bytes); x = SRSLTE_MIN(lch[i].buf_retx, tbs_bytes);
lch[i].buf_retx -= x; lch[i].buf_retx -= x;
} else if (lch[i].buf_tx) { } else if (lch[i].buf_tx) {
x = SRSLTE_MIN(lch[i].buf_tx, tbs_bytes); x = SRSLTE_MIN(lch[i].buf_tx, tbs_bytes);
lch[i].buf_tx -= x; lch[i].buf_tx -= x;
} }
} }
if (x) { if (x) {
pdu->lcid = i-1; pdu->lcid = i - 1;
pdu->nbytes = x; pdu->nbytes = x;
Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", pdu->lcid, pdu->nbytes, tbs_bytes); Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", pdu->lcid, pdu->nbytes, tbs_bytes);
} }
return x; return x;
} }
uint32_t sched_ue::format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb) { uint32_t sched_ue::format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb)
uint32_t P = srslte_ra_type0_P(cell_nof_prb); {
uint32_t nb = (int) ceilf((float) cell_nof_prb / P); uint32_t P = srslte_ra_type0_P(cell_nof_prb);
uint32_t nb = (int)ceilf((float)cell_nof_prb / P);
uint32_t nof_prb = 0;
uint32_t nof_prb = 0;
for (uint32_t i = 0; i < nb; i++) { for (uint32_t i = 0; i < nb; i++) {
if (bitmask & (1 << (nb - i - 1))) { if (bitmask & (1 << (nb - i - 1))) {
for (uint32_t j = 0; j < P; j++) { for (uint32_t j = 0; j < P; j++) {
if (i*P+j < cell_nof_prb) { if (i * P + j < cell_nof_prb) {
nof_prb++; nof_prb++;
} }
} }
@ -1145,12 +1156,12 @@ uint32_t sched_ue::format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb) {
return nof_prb; return nof_prb;
} }
int sched_ue::cqi_to_tbs(uint32_t cqi, uint32_t nof_prb, uint32_t nof_re, uint32_t max_mcs, uint32_t max_Qm, bool is_ul, int sched_ue::cqi_to_tbs(
uint32_t* mcs) uint32_t cqi, uint32_t nof_prb, uint32_t nof_re, uint32_t max_mcs, uint32_t max_Qm, bool is_ul, uint32_t* mcs)
{ {
float max_coderate = srslte_cqi_to_coderate(cqi); float max_coderate = srslte_cqi_to_coderate(cqi);
int sel_mcs = max_mcs + 1; int sel_mcs = max_mcs + 1;
float coderate = 99; float coderate = 99;
float eff_coderate = 99; float eff_coderate = 99;
uint32_t Qm = 1; uint32_t Qm = 1;
int tbs = 0; int tbs = 0;
@ -1162,43 +1173,33 @@ int sched_ue::cqi_to_tbs(uint32_t cqi, uint32_t nof_prb, uint32_t nof_re, uint32
coderate = srslte_coderate(tbs, nof_re); coderate = srslte_coderate(tbs, nof_re);
srslte_mod_t mod = (is_ul) ? srslte_ra_ul_mod_from_mcs(sel_mcs) : srslte_ra_dl_mod_from_mcs(sel_mcs); srslte_mod_t mod = (is_ul) ? srslte_ra_ul_mod_from_mcs(sel_mcs) : srslte_ra_dl_mod_from_mcs(sel_mcs);
Qm = SRSLTE_MIN(max_Qm, srslte_mod_bits_x_symbol(mod)); Qm = SRSLTE_MIN(max_Qm, srslte_mod_bits_x_symbol(mod));
eff_coderate = coderate/Qm; eff_coderate = coderate / Qm;
} while((sel_mcs > 0 && coderate > max_coderate) || eff_coderate > 0.930); } while ((sel_mcs > 0 && coderate > max_coderate) || eff_coderate > 0.930);
if (mcs) { if (mcs) {
*mcs = (uint32_t)sel_mcs; *mcs = (uint32_t)sel_mcs;
} }
return tbs; return tbs;
} }
int sched_ue::alloc_tbs_dl(uint32_t nof_prb, int sched_ue::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
uint32_t nof_re,
uint32_t req_bytes,
int *mcs)
{ {
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs); return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs);
} }
int sched_ue::alloc_tbs_ul(uint32_t nof_prb, int sched_ue::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
uint32_t nof_re,
uint32_t req_bytes,
int *mcs)
{ {
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs); return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs);
} }
/* In this scheduler we tend to use all the available bandwidth and select the MCS /* In this scheduler we tend to use all the available bandwidth and select the MCS
* that approximates the minimum between the capacity and the requested rate * that approximates the minimum between the capacity and the requested rate
*/ */
int sched_ue::alloc_tbs(uint32_t nof_prb, int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs)
uint32_t nof_re,
uint32_t req_bytes,
bool is_ul,
int *mcs)
{ {
uint32_t sel_mcs = 0; uint32_t sel_mcs = 0;
uint32_t cqi = is_ul?ul_cqi:dl_cqi; uint32_t cqi = is_ul ? ul_cqi : dl_cqi;
uint32_t max_mcs = is_ul?max_mcs_ul:max_mcs_dl; uint32_t max_mcs = is_ul ? max_mcs_ul : max_mcs_dl;
uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration // TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
@ -1222,11 +1223,10 @@ int sched_ue::alloc_tbs(uint32_t nof_prb,
} }
if (mcs && tbs_bytes >= 0) { if (mcs && tbs_bytes >= 0) {
*mcs = (int) sel_mcs; *mcs = (int)sel_mcs;
} }
return tbs_bytes; return tbs_bytes;
} }
} // namespace srsenb
}

@ -181,10 +181,10 @@ struct sched_tester : public srsenb::sched {
ra_sched_t::pending_msg3_t ul_pending_msg3; ra_sched_t::pending_msg3_t ul_pending_msg3;
srslte::bounded_bitset<128, true> used_cce; srslte::bounded_bitset<128, true> used_cce;
// std::vector<bool> used_cce; // std::vector<bool> used_cce;
std::map<uint16_t, tester_user_results> ue_data; ///< stores buffer state of each user std::map<uint16_t, tester_user_results> ue_data; ///< stores buffer state of each user
tester_user_results total_ues; ///< stores combined UL/DL buffer state tester_user_results total_ues; ///< stores combined UL/DL buffer state
srsenb::sched_interface::ul_sched_res_t sched_result_ul; srsenb::sched_interface::ul_sched_res_t sched_result_ul;
srsenb::sched_interface::dl_sched_res_t sched_result_dl; srsenb::sched_interface::dl_sched_res_t sched_result_dl;
}; };
struct ue_info { struct ue_info {
int prach_tti = -1, rar_tti = -1, msg3_tti = -1; int prach_tti = -1, rar_tti = -1, msg3_tti = -1;

Loading…
Cancel
Save