created separate class to manage bearers buffers/status reports in the scheduler

master
Francisco Paisana 5 years ago
parent 63f1ea1bec
commit cb7f620254

@ -40,6 +40,7 @@ public:
const static int MAX_SIB_PAYLOAD_LEN = 2048; const static int MAX_SIB_PAYLOAD_LEN = 2048;
const static int MAX_SIBS = 16; const static int MAX_SIBS = 16;
const static int MAX_LC = 6; const static int MAX_LC = 6;
const static int MAX_LC_GROUP = 4;
const static int MAX_DATA_LIST = 32; const static int MAX_DATA_LIST = 32;
const static int MAX_RAR_LIST = 8; const static int MAX_RAR_LIST = 8;
const static int MAX_BC_LIST = 8; const static int MAX_BC_LIST = 8;
@ -104,11 +105,11 @@ public:
}; };
struct ue_bearer_cfg_t { struct ue_bearer_cfg_t {
int priority = 0; int priority = 0;
int bsd = 0; int bsd = 0;
int pbr = 0; int pbr = 0;
int group = 0; int group = 0;
enum { IDLE = 0, UL, DL, BOTH } direction = IDLE; enum direction_t { IDLE = 0, UL, DL, BOTH } direction = IDLE;
}; };
struct ant_info_ded_t { struct ant_info_ded_t {
@ -286,8 +287,7 @@ public:
/* UL information */ /* UL information */
virtual int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, bool crc) = 0; virtual int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, bool crc) = 0;
virtual int ul_sr_info(uint32_t tti, uint16_t rnti) = 0; virtual int ul_sr_info(uint32_t tti, uint16_t rnti) = 0;
virtual int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) = 0; virtual int ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr) = 0;
virtual int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) = 0;
virtual int ul_phr(uint16_t rnti, int phr) = 0; virtual int ul_phr(uint16_t rnti, int phr) = 0;
virtual int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) = 0; virtual int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) = 0;
@ -296,8 +296,9 @@ public:
virtual int ul_sched(uint32_t tti, uint32_t enb_cc_idx, ul_sched_res_t& sched_result) = 0; virtual int ul_sched(uint32_t tti, uint32_t enb_cc_idx, ul_sched_res_t& sched_result) = 0;
/* Custom */ /* Custom */
virtual void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) = 0; virtual void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) = 0;
virtual std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) = 0; virtual std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) = 0;
virtual int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) = 0;
}; };
} // namespace srsenb } // namespace srsenb

@ -120,8 +120,7 @@ public:
int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi_value) final; int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi_value) final;
int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, bool crc) final; int ul_crc_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, bool crc) final;
int ul_sr_info(uint32_t tti, uint16_t rnti) override; int ul_sr_info(uint32_t tti, uint16_t rnti) override;
int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) final; int ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr) final;
int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) final;
int ul_phr(uint16_t rnti, int phr) final; int ul_phr(uint16_t rnti, int phr) final;
int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) final; int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) final;
@ -134,6 +133,7 @@ public:
void tpc_inc(uint16_t rnti); void tpc_inc(uint16_t rnti);
void tpc_dec(uint16_t rnti); void tpc_dec(uint16_t rnti);
std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) final; std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) final;
int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) final;
class carrier_sched; class carrier_sched;

@ -82,6 +82,45 @@ private:
bool active = false; bool active = false;
}; };
const char* to_string(sched_interface::ue_bearer_cfg_t::direction_t dir);
class lch_manager
{
public:
void set_cfg(const sched_interface::ue_cfg_t& cfg_);
void config_lcid(uint32_t lcg_id, const sched_interface::ue_bearer_cfg_t& bearer_cfg);
void ul_bsr(uint8_t lcg_id, uint32_t bsr);
void ul_buffer_add(uint8_t lcid, uint32_t bytes);
// void ul_recv(uint8_t lcg_id, uint32_t len);
void dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t retx_queue);
bool alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* lcid, int rem_bytes);
bool is_bearer_active(uint32_t lcid) const;
bool is_bearer_ul(uint32_t lcid) const;
bool is_bearer_dl(uint32_t lcid) const;
int get_dl_tx(uint32_t lcid) const;
int get_dl_retx(uint32_t lcid) const;
int get_bsr(uint32_t lcid) const;
std::string get_bsr_text() const;
private:
struct ue_bearer_t {
sched_interface::ue_bearer_cfg_t cfg = {};
int buf_tx = 0;
int buf_retx = 0;
};
int alloc_retx_bytes(uint8_t lcid, uint32_t rem_bytes);
int alloc_tx_bytes(uint8_t lcid, uint32_t rem_bytes);
srslte::log_ref log_h{"MAC"};
std::array<ue_bearer_t, sched_interface::MAX_LC> lch = {};
std::array<int, 4> lcg_bsr = {};
};
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from /** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads. * higher layers and mac threads.
*/ */
@ -103,10 +142,9 @@ public:
void rem_bearer(uint32_t lc_id); void rem_bearer(uint32_t lc_id);
void dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue); void dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue);
void ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value = true); void ul_buffer_state(uint8_t lcg_id, uint32_t bsr);
void ul_phr(int phr); void ul_phr(int phr);
void mac_buffer_state(uint32_t ce_code, uint32_t nof_cmds); void mac_buffer_state(uint32_t ce_code, uint32_t nof_cmds);
void ul_recv_len(uint32_t lcid, uint32_t len);
void set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code); void set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code);
void set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri); void set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri);
@ -127,6 +165,7 @@ public:
std::pair<bool, uint32_t> get_cell_index(uint32_t enb_cc_idx) const; std::pair<bool, uint32_t> get_cell_index(uint32_t enb_cc_idx) const;
const sched_interface::ue_cfg_t& get_ue_cfg() const { return cfg; } const sched_interface::ue_cfg_t& get_ue_cfg() const { return cfg; }
uint32_t get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits); uint32_t get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits);
void ul_buffer_add(uint8_t lcid, uint32_t bytes);
/******************************************************* /*******************************************************
* Functions used by scheduler metric objects * Functions used by scheduler metric objects
@ -191,18 +230,8 @@ public:
uint32_t* mcs); uint32_t* mcs);
private: private:
struct ue_bearer_t {
sched_interface::ue_bearer_cfg_t cfg = {};
int buf_tx = 0;
int buf_retx = 0;
int bsr = 0;
};
void set_bearer_cfg_unlocked(uint32_t lc_id, const sched_interface::ue_bearer_cfg_t& cfg_);
bool is_sr_triggered(); bool is_sr_triggered();
int alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* mac_sdu, int rem_tbs);
uint32_t allocate_mac_sdus(sched_interface::dl_sched_data_t* data, uint32_t total_tbs, uint32_t tbidx); uint32_t allocate_mac_sdus(sched_interface::dl_sched_data_t* data, uint32_t total_tbs, uint32_t tbidx);
uint32_t allocate_mac_ces(sched_interface::dl_sched_data_t* data, uint32_t total_tbs, uint32_t ue_cc_idx); uint32_t allocate_mac_ces(sched_interface::dl_sched_data_t* data, uint32_t total_tbs, uint32_t ue_cc_idx);
std::pair<int, int> allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data, std::pair<int, int> allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data,
@ -220,9 +249,6 @@ private:
uint32_t cfi, uint32_t cfi,
const srslte_dci_dl_t& dci); const srslte_dci_dl_t& dci);
static bool bearer_is_ul(const ue_bearer_t* lch);
static bool bearer_is_dl(const ue_bearer_t* lch);
uint32_t get_pending_ul_old_data_unlocked(uint32_t cc_idx); uint32_t get_pending_ul_old_data_unlocked(uint32_t cc_idx);
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti); uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
@ -255,8 +281,8 @@ private:
const sched_cell_params_t* main_cc_params = nullptr; const sched_cell_params_t* main_cc_params = nullptr;
/* Buffer states */ /* Buffer states */
bool sr = false; bool sr = false;
std::array<ue_bearer_t, sched_interface::MAX_LC> lch = {}; lch_manager lch_handler;
int power_headroom = 0; int power_headroom = 0;
uint32_t cqi_request_tti = 0; uint32_t cqi_request_tti = 0;

@ -84,8 +84,6 @@ public:
void push_pdu(const uint32_t ue_cc_idx, const uint32_t tti, uint32_t len); void push_pdu(const uint32_t ue_cc_idx, const uint32_t tti, uint32_t len);
void deallocate_pdu(const uint32_t ue_cc_idx, const uint32_t tti); void deallocate_pdu(const uint32_t ue_cc_idx, const uint32_t tti);
void set_lcg(uint32_t lcid, uint32_t lcg);
void metrics_read(srsenb::mac_metrics_t* metrics); void metrics_read(srsenb::mac_metrics_t* metrics);
void metrics_rx(bool crc, uint32_t tbs); void metrics_rx(bool crc, uint32_t tbs);
void metrics_tx(bool crc, uint32_t tbs); void metrics_tx(bool crc, uint32_t tbs);
@ -105,8 +103,6 @@ private:
bool process_ce(srslte::sch_subh* subh); bool process_ce(srslte::sch_subh* subh);
void allocate_ce(srslte::sch_pdu* pdu, uint32_t lcid); void allocate_ce(srslte::sch_pdu* pdu, uint32_t lcid);
std::vector<uint32_t> lc_groups[4];
uint32_t phr_counter = 0; uint32_t phr_counter = 0;
uint32_t dl_cqi_counter = 0; uint32_t dl_cqi_counter = 0;
uint32_t dl_ri_counter = 0; uint32_t dl_ri_counter = 0;

@ -165,8 +165,6 @@ int mac::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer
int ret = -1; int ret = -1;
srslte::rwlock_read_guard lock(rwlock); srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) { if (ue_db.count(rnti)) {
// configure BSR group in UE
ue_db[rnti]->set_lcg(lc_id, (uint32_t)cfg->group);
ret = scheduler.bearer_ue_cfg(rnti, lc_id, cfg); ret = scheduler.bearer_ue_cfg(rnti, lc_id, cfg);
} else { } else {
Error("User rnti=0x%x not found\n", rnti); Error("User rnti=0x%x not found\n", rnti);

@ -315,14 +315,14 @@ int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_
return ue_db_access(rnti, [&](sched_ue& ue) { ue.set_ul_cqi(tti, enb_cc_idx, cqi, ul_ch_code); }); return ue_db_access(rnti, [&](sched_ue& ue) { ue.set_ul_cqi(tti, enb_cc_idx, cqi, ul_ch_code); });
} }
int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value) int sched::ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr)
{ {
return ue_db_access(rnti, [lcid, bsr, set_value](sched_ue& ue) { ue.ul_buffer_state(lcid, bsr, set_value); }); return ue_db_access(rnti, [lcg_id, bsr](sched_ue& ue) { ue.ul_buffer_state(lcg_id, bsr); });
} }
int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) int sched::ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes)
{ {
return ue_db_access(rnti, [lcid, len](sched_ue& ue) { ue.ul_recv_len(lcid, len); }, __PRETTY_FUNCTION__); return ue_db_access(rnti, [lcid, bytes](sched_ue& ue) { ue.ul_buffer_add(lcid, bytes); });
} }
int sched::ul_phr(uint16_t rnti, int phr) int sched::ul_phr(uint16_t rnti, int phr)

@ -124,9 +124,7 @@ void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_)
cfg = cfg_; cfg = cfg_;
// update bearer cfgs // update bearer cfgs
for (uint32_t i = 0; i < sched_interface::MAX_LC; ++i) { lch_handler.set_cfg(cfg_);
set_bearer_cfg_unlocked(i, cfg.ue_bearers[i]);
}
// either add a new carrier, or reconfigure existing one // either add a new carrier, or reconfigure existing one
bool scell_activation_state_changed = false; bool scell_activation_state_changed = false;
@ -166,7 +164,7 @@ void sched_ue::reset()
// erase all bearers // erase all bearers
for (uint32_t i = 0; i < cfg.ue_bearers.size(); ++i) { for (uint32_t i = 0; i < cfg.ue_bearers.size(); ++i) {
set_bearer_cfg_unlocked(i, {}); lch_handler.config_lcid(i, {});
} }
} }
@ -179,13 +177,13 @@ void sched_ue::reset()
void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_) void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_)
{ {
cfg.ue_bearers[lc_id] = *cfg_; cfg.ue_bearers[lc_id] = *cfg_;
set_bearer_cfg_unlocked(lc_id, *cfg_); lch_handler.config_lcid(lc_id, *cfg_);
} }
void sched_ue::rem_bearer(uint32_t lc_id) void sched_ue::rem_bearer(uint32_t lc_id)
{ {
cfg.ue_bearers[lc_id] = sched_interface::ue_bearer_cfg_t{}; cfg.ue_bearers[lc_id] = sched_interface::ue_bearer_cfg_t{};
set_bearer_cfg_unlocked(lc_id, sched_interface::ue_bearer_cfg_t{}); lch_handler.config_lcid(lc_id, sched_interface::ue_bearer_cfg_t{});
} }
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled) void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
@ -196,16 +194,14 @@ void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
phy_config_dedicated_enabled = enabled; phy_config_dedicated_enabled = enabled;
} }
void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value) void sched_ue::ul_buffer_state(uint8_t lcg_id, uint32_t bsr)
{ {
if (lc_id < sched_interface::MAX_LC) { lch_handler.ul_bsr(lcg_id, bsr);
if (set_value) { }
lch[lc_id].bsr = bsr;
} else { void sched_ue::ul_buffer_add(uint8_t lcid, uint32_t bytes)
lch[lc_id].bsr += bsr; {
} lch_handler.ul_buffer_add(lcid, bytes);
}
Debug("SCHED: bsr=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", bsr, lc_id, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
} }
void sched_ue::ul_phr(int phr) void sched_ue::ul_phr(int phr)
@ -215,11 +211,7 @@ void sched_ue::ul_phr(int phr)
void sched_ue::dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue) void sched_ue::dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{ {
if (lc_id < sched_interface::MAX_LC) { lch_handler.dl_buffer_state(lc_id, tx_queue, retx_queue);
lch[lc_id].buf_retx = retx_queue;
lch[lc_id].buf_tx = tx_queue;
Debug("SCHED: DL lcid=%d buffer_state=%d,%d\n", lc_id, tx_queue, retx_queue);
}
} }
void sched_ue::mac_buffer_state(uint32_t ce_code, uint32_t nof_cmds) void sched_ue::mac_buffer_state(uint32_t ce_code, uint32_t nof_cmds)
@ -274,24 +266,6 @@ int sched_ue::set_ack_info(uint32_t tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx
return tbs_acked; return tbs_acked;
} }
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
{
// Remove PDCP header??
if (len > 4) {
len -= 4;
}
if (lcid < sched_interface::MAX_LC) {
if (bearer_is_ul(&lch[lcid])) {
if (lch[lcid].bsr > (int)len) {
lch[lcid].bsr -= len;
} else {
lch[lcid].bsr = 0;
}
}
}
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
}
void sched_ue::set_ul_crc(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool crc_res) void sched_ue::set_ul_crc(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool crc_res)
{ {
auto p = get_cell_index(enb_cc_idx); auto p = get_cell_index(enb_cc_idx);
@ -388,7 +362,7 @@ uint32_t sched_ue::allocate_mac_sdus(sched_interface::dl_sched_data_t* data, uin
// if we do not have enough bytes to fit MAC subheader and RLC header, skip MAC SDU allocation // if we do not have enough bytes to fit MAC subheader and RLC header, skip MAC SDU allocation
while (rem_tbs >= min_mac_sdu_size) { while (rem_tbs >= min_mac_sdu_size) {
uint32_t max_sdu_bytes = rem_tbs - compute_subheader_size(rem_tbs - 2); uint32_t max_sdu_bytes = rem_tbs - compute_subheader_size(rem_tbs - 2);
uint32_t alloc_sdu_bytes = alloc_rlc_pdu(&data->pdu[tbidx][data->nof_pdu_elems[tbidx]], max_sdu_bytes); uint32_t alloc_sdu_bytes = lch_handler.alloc_rlc_pdu(&data->pdu[tbidx][data->nof_pdu_elems[tbidx]], max_sdu_bytes);
if (alloc_sdu_bytes == 0) { if (alloc_sdu_bytes == 0) {
break; break;
} }
@ -764,18 +738,6 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
* *
*******************************************************/ *******************************************************/
bool sched_ue::bearer_is_ul(const ue_bearer_t* lch)
{
return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::UL ||
lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH;
}
bool sched_ue::bearer_is_dl(const ue_bearer_t* lch)
{
return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::DL ||
lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH;
}
uint32_t sched_ue::get_max_retx() uint32_t sched_ue::get_max_retx()
{ {
return cfg.maxharq_tx; return cfg.maxharq_tx;
@ -878,7 +840,7 @@ std::pair<uint32_t, uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_id
/* Set Maximum boundary */ /* Set Maximum boundary */
// Ensure there is space for ConRes and RRC Setup // Ensure there is space for ConRes and RRC Setup
// SRB0 is a special case due to being RLC TM (no segmentation possible) // SRB0 is a special case due to being RLC TM (no segmentation possible)
if (not bearer_is_dl(&lch[0])) { if (not lch_handler.is_bearer_dl(0)) {
log_h->error("SRB0 must always be activated for DL\n"); log_h->error("SRB0 must always be activated for DL\n");
return {0, 0}; return {0, 0};
} }
@ -889,9 +851,9 @@ std::pair<uint32_t, uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_id
uint32_t max_data = 0, min_data = 0; uint32_t max_data = 0, min_data = 0;
uint32_t srb0_data = 0, rb_data = 0, sum_ce_data = 0; uint32_t srb0_data = 0, rb_data = 0, sum_ce_data = 0;
bool is_dci_format1 = get_dci_format() == SRSLTE_DCI_FORMAT1; bool is_dci_format1 = get_dci_format() == SRSLTE_DCI_FORMAT1;
if (is_dci_format1 and (lch[0].buf_tx > 0 or lch[0].buf_retx > 0)) { if (is_dci_format1) {
srb0_data = compute_sdu_total_bytes(0, lch[0].buf_retx); srb0_data += compute_sdu_total_bytes(0, lch_handler.get_dl_retx(0));
srb0_data += compute_sdu_total_bytes(0, lch[0].buf_tx); srb0_data += compute_sdu_total_bytes(0, lch_handler.get_dl_tx(0));
} }
// Add pending CEs // Add pending CEs
if (ue_cc_idx == 0) { if (ue_cc_idx == 0) {
@ -905,9 +867,9 @@ std::pair<uint32_t, uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_id
} }
// Add pending data in remaining RLC buffers // Add pending data in remaining RLC buffers
for (int i = 1; i < sched_interface::MAX_LC; i++) { for (int i = 1; i < sched_interface::MAX_LC; i++) {
if (bearer_is_dl(&lch[i])) { if (lch_handler.is_bearer_dl(i)) {
rb_data += compute_sdu_total_bytes(i, lch[i].buf_retx); rb_data += compute_sdu_total_bytes(i, lch_handler.get_dl_retx(i));
rb_data += compute_sdu_total_bytes(i, lch[i].buf_tx); rb_data += compute_sdu_total_bytes(i, lch_handler.get_dl_tx(i));
} }
} }
max_data = srb0_data + sum_ce_data + rb_data; max_data = srb0_data + sum_ce_data + rb_data;
@ -940,8 +902,8 @@ uint32_t sched_ue::get_pending_dl_new_data()
uint32_t pending_data = 0; uint32_t pending_data = 0;
for (int i = 0; i < sched_interface::MAX_LC; i++) { for (int i = 0; i < sched_interface::MAX_LC; i++) {
if (bearer_is_dl(&lch[i])) { if (lch_handler.is_bearer_dl(i)) {
pending_data += lch[i].buf_retx + lch[i].buf_tx; pending_data += lch_handler.get_dl_tx(i) + lch_handler.get_dl_retx(i);
} }
} }
for (auto& ce : pending_ces) { for (auto& ce : pending_ces) {
@ -966,9 +928,9 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
// Note: If there are no active bearers, scheduling requests are also ignored. // Note: If there are no active bearers, scheduling requests are also ignored.
uint32_t pending_data = 0; uint32_t pending_data = 0;
bool ul_bearers_found = false; bool ul_bearers_found = false;
for (int i = 0; i < sched_interface::MAX_LC; i++) { for (int i = 0; i < sched_interface::MAX_LC_GROUP; i++) {
if (bearer_is_ul(&lch[i])) { if (lch_handler.is_bearer_ul(i)) {
pending_data += lch[i].bsr; pending_data += lch_handler.get_bsr(i);
ul_bearers_found = true; ul_bearers_found = true;
} }
} }
@ -991,13 +953,10 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
pending_data = (pending_data > pending_ul_data) ? pending_data - pending_ul_data : 0; pending_data = (pending_data > pending_ul_data) ? pending_data - pending_ul_data : 0;
if (pending_data > 0) { if (pending_data > 0) {
Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n", Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr=%s\n",
pending_data, pending_data,
pending_ul_data, pending_ul_data,
lch[0].bsr, lch_handler.get_bsr_text().c_str());
lch[1].bsr,
lch[2].bsr,
lch[3].bsr);
} }
return pending_data; return pending_data;
} }
@ -1017,22 +976,6 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes)
return carriers[cc_idx].get_required_prb_ul(req_bytes); return carriers[cc_idx].get_required_prb_ul(req_bytes);
} }
void sched_ue::set_bearer_cfg_unlocked(uint32_t lc_id, const sched_interface::ue_bearer_cfg_t& cfg_)
{
if (lc_id < sched_interface::MAX_LC) {
bool is_idle = lch[lc_id].cfg.direction == sched_interface::ue_bearer_cfg_t::IDLE;
bool is_equal = memcmp(&cfg_, &lch[lc_id].cfg, sizeof(cfg_)) == 0;
lch[lc_id].cfg = cfg_;
if (lch[lc_id].cfg.direction != sched_interface::ue_bearer_cfg_t::IDLE) {
if (not is_equal) {
Info("SCHED: Set bearer config lc_id=%d, direction=%d\n", lc_id, (int)lch[lc_id].cfg.direction);
}
} else if (not is_idle) {
Info("SCHED: Removed bearer config lc_id=%d, direction=%d\n", lc_id, (int)lch[lc_id].cfg.direction);
}
}
}
bool sched_ue::is_sr_triggered() bool sched_ue::is_sr_triggered()
{ {
return sr; return sr;
@ -1146,29 +1089,6 @@ cc_sched_ue* sched_ue::get_ue_carrier(uint32_t enb_cc_idx)
return &carriers[p.second]; return &carriers[p.second];
} }
/* Allocates first available RLC PDU */
int sched_ue::alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* mac_sdu, int rem_tbs)
{
// TODO: Implement lcid priority (now lowest index is lowest priority)
int alloc_bytes = 0;
int i = 0;
for (i = 0; i < sched_interface::MAX_LC and alloc_bytes == 0; i++) {
if (lch[i].buf_retx > 0) {
alloc_bytes = SRSLTE_MIN(lch[i].buf_retx, rem_tbs);
lch[i].buf_retx -= alloc_bytes;
} else if (lch[i].buf_tx > 0) {
alloc_bytes = SRSLTE_MIN(lch[i].buf_tx, rem_tbs);
lch[i].buf_tx -= alloc_bytes;
}
}
if (alloc_bytes > 0) {
mac_sdu->lcid = i - 1;
mac_sdu->nbytes = alloc_bytes;
Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", mac_sdu->lcid, mac_sdu->nbytes, rem_tbs);
}
return alloc_bytes;
}
int sched_ue::cqi_to_tbs(uint32_t cqi, int sched_ue::cqi_to_tbs(uint32_t cqi,
uint32_t nof_prb, uint32_t nof_prb,
uint32_t nof_re, uint32_t nof_re,
@ -1411,4 +1331,156 @@ void cc_sched_ue::set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi_)
} }
} }
/*******************************************************
*
* Logical Channel Management
*
*******************************************************/
const char* to_string(sched_interface::ue_bearer_cfg_t::direction_t dir)
{
switch (dir) {
case sched_interface::ue_bearer_cfg_t::IDLE:
return "idle";
case sched_interface::ue_bearer_cfg_t::BOTH:
return "bi-dir";
case sched_interface::ue_bearer_cfg_t::DL:
return "DL";
case sched_interface::ue_bearer_cfg_t::UL:
return "UL";
default:
return "unrecognized direction";
}
}
void lch_manager::set_cfg(const sched_interface::ue_cfg_t& cfg)
{
for (uint32_t lcid = 0; lcid < sched_interface::MAX_LC; lcid++) {
config_lcid(lcid, cfg.ue_bearers[lcid]);
}
}
void lch_manager::config_lcid(uint32_t lc_id, const sched_interface::ue_bearer_cfg_t& bearer_cfg)
{
if (lc_id >= sched_interface::MAX_LC) {
Warning("Adding bearer with invalid logical channel id=%d\n", lc_id);
return;
}
if (bearer_cfg.group >= sched_interface::MAX_LC_GROUP) {
Warning("Adding bearer with invalid logical channel group id=%d\n", bearer_cfg.group);
return;
}
// update bearer config
bool is_equal = memcmp(&bearer_cfg, &lch[lc_id].cfg, sizeof(bearer_cfg)) == 0;
if (not is_equal) {
lch[lc_id].cfg = bearer_cfg;
Info("SCHED: bearer configured: lc_id=%d, mode=%s, prio=%d\n",
lc_id,
to_string(lch[lc_id].cfg.direction),
lch[lc_id].cfg.priority);
}
}
void lch_manager::ul_bsr(uint8_t lcg_id, uint32_t bsr)
{
if (lcg_id >= sched_interface::MAX_LC_GROUP) {
Warning("The provided logical channel group id=%d is not valid\n", lcg_id);
return;
}
lcg_bsr[lcg_id] = bsr;
Debug("SCHED: bsr=%d, lcg_id=%d, bsr=%s\n", bsr, lcg_id, get_bsr_text().c_str());
}
void lch_manager::ul_buffer_add(uint8_t lcid, uint32_t bytes)
{
if (lcid >= sched_interface::MAX_LC) {
Warning("The provided lcid=%d is not valid\n", lcid);
return;
}
lcg_bsr[lch[lcid].cfg.group] += bytes;
Debug("SCHED: UL buffer update=%d, lcg_id=%d, bsr=%s\n", bytes, lch[lcid].cfg.group, get_bsr_text().c_str());
}
void lch_manager::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t retx_queue)
{
if (lcid >= sched_interface::MAX_LC) {
Warning("The provided lcid=%d is not valid\n", lcid);
return;
}
lch[lcid].buf_retx = retx_queue;
lch[lcid].buf_tx = tx_queue;
Debug("SCHED: DL lcid=%d buffer_state=%d,%d\n", lcid, tx_queue, retx_queue);
}
/* Allocates first available RLC PDU */
bool lch_manager::alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* rlc_pdu, int rem_bytes)
{
// TODO: Implement lcid priority (now lowest index is lowest priority)
int alloc_bytes = 0;
int i = 0;
for (i = 0; i < sched_interface::MAX_LC and alloc_bytes == 0; i++) {
alloc_bytes = alloc_retx_bytes(i, rem_bytes);
if (alloc_bytes == 0) {
alloc_bytes = alloc_tx_bytes(i, rem_bytes);
}
}
if (alloc_bytes > 0) {
rlc_pdu->nbytes = alloc_bytes;
rlc_pdu->lcid = i - 1;
Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", rlc_pdu->lcid, rlc_pdu->nbytes, rem_bytes);
}
return alloc_bytes > 0;
}
int lch_manager::alloc_retx_bytes(uint8_t lcid, uint32_t rem_bytes)
{
int alloc = std::min((int)rem_bytes, get_dl_retx(lcid));
lch[lcid].buf_retx -= alloc;
return alloc;
}
int lch_manager::alloc_tx_bytes(uint8_t lcid, uint32_t rem_bytes)
{
int alloc = std::min((int)rem_bytes, get_dl_tx(lcid));
lch[lcid].buf_tx -= alloc;
return alloc;
}
bool lch_manager::is_bearer_active(uint32_t lcid) const
{
return lch[lcid].cfg.direction != sched_interface::ue_bearer_cfg_t::IDLE;
}
bool lch_manager::is_bearer_ul(uint32_t lcid) const
{
return is_bearer_active(lcid) and lch[lcid].cfg.direction != sched_interface::ue_bearer_cfg_t::DL;
}
bool lch_manager::is_bearer_dl(uint32_t lcid) const
{
return is_bearer_active(lcid) and lch[lcid].cfg.direction != sched_interface::ue_bearer_cfg_t::UL;
}
int lch_manager::get_dl_tx(uint32_t lcid) const
{
return is_bearer_dl(lcid) ? lch[lcid].buf_tx : 0;
}
int lch_manager::get_dl_retx(uint32_t lcid) const
{
return is_bearer_dl(lcid) ? lch[lcid].buf_retx : 0;
}
int lch_manager::get_bsr(uint32_t lcid) const
{
return is_bearer_ul(lcid) ? lcg_bsr[lch[lcid].cfg.group] : 0;
}
std::string lch_manager::get_bsr_text() const
{
std::stringstream ss;
ss << "{" << lcg_bsr[0] << ", " << lcg_bsr[1] << ", " << lcg_bsr[2] << ", " << lcg_bsr[3] << "}";
return ss.str();
}
} // namespace srsenb } // namespace srsenb

@ -69,10 +69,6 @@ ue::ue(uint16_t rnti_,
// Allocate buffer for PCell // Allocate buffer for PCell
allocate_cc_buffers(); allocate_cc_buffers();
// Set LCID group for SRB0 and SRB1
set_lcg(0, 0);
set_lcg(1, 0);
} }
ue::~ue() ue::~ue()
@ -158,15 +154,6 @@ void ue::start_pcap(srslte::mac_pcap* pcap_)
pcap = pcap_; pcap = pcap_;
} }
void ue::set_lcg(uint32_t lcid, uint32_t lcg)
{
// find and remove if already exists
for (int i = 0; i < 4; i++) {
lc_groups[lcg].erase(std::remove(lc_groups[lcg].begin(), lc_groups[lcg].end(), lcid), lc_groups[lcg].end());
}
lc_groups[lcg].push_back(lcid);
}
srslte_softbuffer_rx_t* ue::get_rx_softbuffer(const uint32_t ue_cc_idx, const uint32_t tti) srslte_softbuffer_rx_t* ue::get_rx_softbuffer(const uint32_t ue_cc_idx, const uint32_t tti)
{ {
return &softbuffer_rx.at(ue_cc_idx).at(tti % nof_rx_harq_proc); return &softbuffer_rx.at(ue_cc_idx).at(tti % nof_rx_harq_proc);
@ -315,7 +302,7 @@ void ue::process_pdu(uint8_t* pdu, uint32_t nof_bytes, srslte::pdu_queue::channe
// If BSR is not received means that new data has arrived and there is no space for BSR transmission // If BSR is not received means that new data has arrived and there is no space for BSR transmission
if (!bsr_received && lcid_most_data > 2) { if (!bsr_received && lcid_most_data > 2) {
// Add BSR to the LCID for which most data was received // Add BSR to the LCID for which most data was received
sched->ul_bsr(rnti, lcid_most_data, 256, false); // false adds BSR instead of setting sched->ul_buffer_add(rnti, lcid_most_data, 256);
Debug("BSR not received. Giving extra dci\n"); Debug("BSR not received. Giving extra dci\n");
} }
@ -374,10 +361,8 @@ bool ue::process_ce(srslte::sch_subh* subh)
Error("Invalid Index Passed to lc groups\n"); Error("Invalid Index Passed to lc groups\n");
break; break;
} }
for (uint32_t i = 0; i < lc_groups[idx].size(); i++) { // Indicate BSR to scheduler
// Indicate BSR to scheduler sched->ul_bsr(rnti, idx, buff_size[idx]);
sched->ul_bsr(rnti, lc_groups[idx][i], buff_size[idx]);
}
Info("CE: Received %s BSR rnti=0x%x, lcg=%d, value=%d\n", Info("CE: Received %s BSR rnti=0x%x, lcg=%d, value=%d\n",
subh->ul_sch_ce_type() == srslte::ul_sch_lcid::SHORT_BSR ? "Short" : "Trunc", subh->ul_sch_ce_type() == srslte::ul_sch_lcid::SHORT_BSR ? "Short" : "Trunc",
rnti, rnti,
@ -387,11 +372,7 @@ bool ue::process_ce(srslte::sch_subh* subh)
break; break;
case srslte::ul_sch_lcid::LONG_BSR: case srslte::ul_sch_lcid::LONG_BSR:
subh->get_bsr(buff_size); subh->get_bsr(buff_size);
for (idx = 0; idx < 4; idx++) { sched->ul_bsr(rnti, idx, buff_size[idx]);
for (uint32_t i = 0; i < lc_groups[idx].size(); i++) {
sched->ul_bsr(rnti, lc_groups[idx][i], buff_size[idx]);
}
}
is_bsr = true; is_bsr = true;
Info("CE: Received Long BSR rnti=0x%x, value=%d,%d,%d,%d\n", Info("CE: Received Long BSR rnti=0x%x, value=%d,%d,%d,%d\n",
rnti, rnti,

@ -21,6 +21,7 @@
#include "scheduler_test_common.h" #include "scheduler_test_common.h"
#include "srsenb/hdr/stack/mac/scheduler.h" #include "srsenb/hdr/stack/mac/scheduler.h"
#include "srsenb/hdr/stack/upper/common_enb.h"
#include "srslte/mac/pdu.h" #include "srslte/mac/pdu.h"
#include "srslte/common/test_common.h" #include "srslte/common/test_common.h"
@ -981,7 +982,7 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev)
// Msg3 has been received but Msg4 has not been yet transmitted // Msg3 has been received but Msg4 has not been yet transmitted
uint32_t pending_dl_new_data = ue_db[ue_ev.rnti].get_pending_dl_new_data(); uint32_t pending_dl_new_data = ue_db[ue_ev.rnti].get_pending_dl_new_data();
if (pending_dl_new_data == 0) { if (pending_dl_new_data == 0) {
uint32_t lcid = 0; // Use SRB0 to schedule Msg4 uint32_t lcid = RB_ID_SRB0; // Use SRB0 to schedule Msg4
dl_rlc_buffer_state(ue_ev.rnti, lcid, 50, 0); dl_rlc_buffer_state(ue_ev.rnti, lcid, 50, 0);
dl_mac_buffer_state(ue_ev.rnti, (uint32_t)srslte::dl_sch_lcid::CON_RES_ID); dl_mac_buffer_state(ue_ev.rnti, (uint32_t)srslte::dl_sch_lcid::CON_RES_ID);
} else { } else {
@ -994,16 +995,17 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev)
CONDERROR(user == nullptr, "TESTER ERROR: Trying to schedule data for user that does not exist\n"); CONDERROR(user == nullptr, "TESTER ERROR: Trying to schedule data for user that does not exist\n");
if (ue_ev.buffer_ev->dl_data > 0 and user->msg4_tti.is_valid()) { if (ue_ev.buffer_ev->dl_data > 0 and user->msg4_tti.is_valid()) {
// If Msg4 has already been tx and there DL data to transmit // If Msg4 has already been tx and there DL data to transmit
uint32_t lcid = 2; uint32_t lcid = RB_ID_DRB1;
uint32_t pending_dl_new_data = ue_db[ue_ev.rnti].get_pending_dl_new_data(); uint32_t pending_dl_new_data = ue_db[ue_ev.rnti].get_pending_dl_new_data();
if (user->drb_cfg_flag or pending_dl_new_data == 0) { if (user->drb_cfg_flag or pending_dl_new_data == 0) {
// If RRCSetup finished // If RRCSetup finished
if (not user->drb_cfg_flag) { if (not user->drb_cfg_flag) {
// setup lcid==2 bearer // setup lcid==drb1 bearer
sched::ue_bearer_cfg_t cfg = {}; sched::ue_bearer_cfg_t cfg = {};
cfg.direction = ue_bearer_cfg_t::BOTH; cfg.direction = ue_bearer_cfg_t::BOTH;
ue_tester->bearer_cfg(ue_ev.rnti, 2, cfg); cfg.group = 1;
bearer_ue_cfg(ue_ev.rnti, 2, &cfg); ue_tester->bearer_cfg(ue_ev.rnti, lcid, cfg);
bearer_ue_cfg(ue_ev.rnti, lcid, &cfg);
} }
// DRB is set. Update DL buffer // DRB is set. Update DL buffer
uint32_t tot_dl_data = pending_dl_new_data + ue_ev.buffer_ev->dl_data; // TODO: derive pending based on rx uint32_t tot_dl_data = pending_dl_new_data + ue_ev.buffer_ev->dl_data; // TODO: derive pending based on rx
@ -1016,8 +1018,8 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev)
if (ue_ev.buffer_ev->sr_data > 0 and user->drb_cfg_flag) { if (ue_ev.buffer_ev->sr_data > 0 and user->drb_cfg_flag) {
uint32_t tot_ul_data = uint32_t tot_ul_data =
ue_db[ue_ev.rnti].get_pending_ul_new_data(tti_info.tti_params.tti_tx_ul) + ue_ev.buffer_ev->sr_data; ue_db[ue_ev.rnti].get_pending_ul_new_data(tti_info.tti_params.tti_tx_ul) + ue_ev.buffer_ev->sr_data;
uint32_t lcid = 2; uint32_t lcg = 1;
ul_bsr(ue_ev.rnti, lcid, tot_ul_data, true); ul_bsr(ue_ev.rnti, lcg, tot_ul_data);
} }
} }
} }

Loading…
Cancel
Save