now the scheduler only activates a scell after receiving a valid CQI for the SCell

master
Francisco Paisana 5 years ago
parent f571b7c16b
commit 0f3ef11f8b

@ -54,7 +54,7 @@ struct sched_ue_carrier {
// Harq access
void reset_old_pending_pids(uint32_t tti_rx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl);
dl_harq_proc* get_empty_dl_harq();
dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl);
int set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack);
ul_harq_proc* get_ul_harq(uint32_t tti);
uint32_t get_pending_ul_old_data();
@ -66,7 +66,7 @@ struct sched_ue_carrier {
uint32_t get_required_prb_ul(uint32_t req_bytes);
const sched_cell_params_t* get_cell_cfg() const { return cell_params; }
bool is_active() const { return active; }
uint32_t get_ue_cc_idx() const { return ue_cc_idx; }
void update_cell_activity();
std::array<dl_harq_proc, SCHED_MAX_HARQ_PROC> dl_harq = {};
std::array<ul_harq_proc, SCHED_MAX_HARQ_PROC> ul_harq = {};
@ -94,9 +94,7 @@ private:
const sched_cell_params_t* cell_params = nullptr;
uint16_t rnti;
uint32_t ue_cc_idx = 0;
// state
bool active = false;
bool active = false;
};
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
@ -160,8 +158,8 @@ public:
uint32_t get_pending_dl_new_data_total();
void reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti, uint32_t cc_idx);
dl_harq_proc* get_empty_dl_harq(uint32_t cc_idx);
dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx);
dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx);
ul_harq_proc* get_ul_harq(uint32_t tti, uint32_t cc_idx);
/*******************************************************

@ -81,9 +81,11 @@ bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask)
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
{
// Do not allocate a user multiple times in the same tti
if (tti_alloc->is_dl_alloc(user)) {
return nullptr;
}
// Do not allocate a user to an inactive carrier
auto p = user->get_cell_index(cc_cfg->enb_cc_idx);
if (not p.first) {
return nullptr;
@ -97,11 +99,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
uint32_t req_bytes = user->get_pending_dl_new_data_total();
// Schedule retx if we have space
#if ASYNC_DL_SCHED
if (h != nullptr) {
#else
if (h && !h->is_empty()) {
#endif
// Try to reuse the same mask
rbgmask_t retx_mask = h->get_rbgmask();
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
@ -127,12 +125,8 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
}
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
#if ASYNC_DL_SCHED
h = user->get_empty_dl_harq(cell_idx);
h = user->get_empty_dl_harq(tti_dl, cell_idx);
if (h != nullptr) {
#else
if (h && h->is_empty()) {
#endif
// Allocate resources based on pending data
if (req_bytes > 0) {
uint32_t pending_prbs = user->get_required_prb_dl(cell_idx, req_bytes, tti_alloc->get_nof_ctrl_symbols());

@ -117,19 +117,15 @@ void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_)
if (ue_idx >= prev_supported_cc_list.size()) {
// New carrier needs to be added
carriers.emplace_back(cfg, (*cell_params_list)[cc_cfg.enb_cc_idx], rnti, ue_idx);
scell_activation_state_changed |= ue_idx > 0 and carriers.back().is_active();
} else if (cc_cfg.enb_cc_idx != prev_supported_cc_list[ue_idx].enb_cc_idx) {
// TODO: Check if this will ever happen.
// One carrier was added in the place of another
carriers[ue_idx] = sched_ue_carrier{cfg, (*cell_params_list)[cc_cfg.enb_cc_idx], rnti, ue_idx};
scell_activation_state_changed |= ue_idx > 0 and carriers[ue_idx].is_active();
} else {
// The enb_cc_idx, ue_cc_idx match previous configuration.
// The SCell state may have changed. In such case we will schedule a SCell Activation CE
scell_activation_state_changed = carriers[ue_idx].is_active() != cc_cfg.active and ue_idx > 0;
// reconfiguration of carrier might be needed.
// The SCell internal configuration may have changed
carriers[ue_idx].set_cfg(cfg);
}
scell_activation_state_changed |= carriers[ue_idx].is_active() != cc_cfg.active and ue_idx > 0;
}
if (scell_activation_state_changed) {
pending_ces.emplace_back(srslte::sch_subh::SCELL_ACTIVATION);
@ -254,7 +250,13 @@ bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
int sched_ue::set_ack_info(uint32_t tti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
{
std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].set_ack_info(tti, tb_idx, ack);
int ret = -1;
if (cc_idx < carriers.size()) {
ret = carriers[cc_idx].set_ack_info(tti, tb_idx, ack);
} else {
log_h->warning("Received DL ACK for invalid cell index %d\n", cc_idx);
}
return ret;
}
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
@ -293,6 +295,10 @@ void sched_ue::set_dl_ri(uint32_t tti, uint32_t cc_idx, uint32_t ri)
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t pmi)
{
std::lock_guard<std::mutex> lock(mutex);
if (cc_idx >= carriers.size()) {
log_h->warning("Received DL PMI for invalid cell index %d\n", cc_idx);
return;
}
carriers[cc_idx].dl_pmi = pmi;
carriers[cc_idx].dl_pmi_tti = tti;
}
@ -300,8 +306,13 @@ void sched_ue::set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t pmi)
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi)
{
std::lock_guard<std::mutex> lock(mutex);
if (cc_idx >= carriers.size()) {
log_h->warning("Received DL CQI for invalid cell index %d\n", cc_idx);
return;
}
carriers[cc_idx].dl_cqi = cqi;
carriers[cc_idx].dl_cqi_tti = tti;
carriers[cc_idx].update_cell_activity();
}
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code)
@ -898,18 +909,27 @@ void sched_ue::reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx)
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].get_pending_dl_harq(tti_tx_dl);
if (cc_idx < carriers.size() and carriers[cc_idx].is_active()) {
return carriers[cc_idx].get_pending_dl_harq(tti_tx_dl);
}
return nullptr;
}
dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t cc_idx)
dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].get_empty_dl_harq();
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) {
return carriers[ue_cc_idx].get_empty_dl_harq(tti_tx_dl);
}
return nullptr;
}
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t cc_idx)
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t ue_cc_idx)
{
return carriers[cc_idx].get_ul_harq(tti_tx_ul);
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) {
return carriers[ue_cc_idx].get_ul_harq(tti_tx_ul);
}
return nullptr;
}
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti_rx, uint32_t cc_idx)
@ -929,14 +949,15 @@ dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx, uint32_t cc_idx)
std::pair<bool, uint32_t> sched_ue::get_cell_index(uint32_t enb_cc_idx) const
{
auto it =
std::find_if(cfg.supported_cc_list.begin(),
cfg.supported_cc_list.end(),
[enb_cc_idx](const sched_interface::ue_cfg_t::cc_cfg_t& u) { return u.enb_cc_idx == enb_cc_idx; });
if (it == cfg.supported_cc_list.end()) {
return std::make_pair(false, 0);
auto it = std::find_if(
cfg.supported_cc_list.begin(),
cfg.supported_cc_list.end(),
[enb_cc_idx](const sched_interface::ue_cfg_t::cc_cfg_t& u) { return u.enb_cc_idx == enb_cc_idx and u.active; });
if (it != cfg.supported_cc_list.end()) {
uint32_t ue_cc_idx = std::distance(cfg.supported_cc_list.begin(), it);
return {carriers[ue_cc_idx].is_active(), enb_cc_idx};
}
return std::make_pair(true, it->enb_cc_idx);
return {false, 0};
}
void sched_ue::finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx)
@ -1080,6 +1101,9 @@ sched_ue_carrier::sched_ue_carrier(const sched_interface::ue_cfg_t& cfg_,
log_h(srslte::logmap::get("MAC ")),
ue_cc_idx(ue_cc_idx_)
{
// only PCell starts active. Remaining ones wait for valid CQI
active = ue_cc_idx == 0;
// Init HARQ processes
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
dl_harq[i].init(i);
@ -1125,7 +1149,7 @@ void sched_ue_carrier::reset()
void sched_ue_carrier::set_cfg(const sched_interface::ue_cfg_t& cfg_)
{
if (cfg != nullptr and cfg->maxharq_tx == cfg_.maxharq_tx and active == cfg->supported_cc_list[ue_cc_idx].active) {
if (cfg != nullptr and cfg->maxharq_tx == cfg_.maxharq_tx) {
// nothing changed
return;
}
@ -1135,7 +1159,6 @@ void sched_ue_carrier::set_cfg(const sched_interface::ue_cfg_t& cfg_)
dl_harq[i].set_cfg(cfg->maxharq_tx);
ul_harq[i].set_cfg(cfg->maxharq_tx);
}
active = cfg->supported_cc_list[ue_cc_idx].active;
}
void sched_ue_carrier::reset_old_pending_pids(uint32_t tti_rx)
@ -1162,7 +1185,10 @@ void sched_ue_carrier::reset_old_pending_pids(uint32_t tti_rx)
dl_harq_proc* sched_ue_carrier::get_pending_dl_harq(uint32_t tti_tx_dl)
{
#if ASYNC_DL_SCHED
if (not ASYNC_DL_SCHED) {
dl_harq_proc* h = &dl_harq[tti_tx_dl % SCHED_MAX_HARQ_PROC];
return h->is_empty() ? nullptr : h;
}
int oldest_idx = -1;
uint32_t oldest_tti = 0;
@ -1180,16 +1206,16 @@ dl_harq_proc* sched_ue_carrier::get_pending_dl_harq(uint32_t tti_tx_dl)
h = &dl_harq[oldest_idx];
}
return h;
#else
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
#endif
}
dl_harq_proc* sched_ue_carrier::get_empty_dl_harq()
dl_harq_proc* sched_ue_carrier::get_empty_dl_harq(uint32_t tti_tx_dl)
{
auto it =
std::find_if(dl_harq.begin(), dl_harq.end(), [](dl_harq_proc& h) { return h.is_empty(0) and h.is_empty(1); });
if (not ASYNC_DL_SCHED) {
dl_harq_proc* h = &dl_harq[tti_tx_dl % SCHED_MAX_HARQ_PROC];
return h->is_empty() ? nullptr : h;
}
auto it = std::find_if(dl_harq.begin(), dl_harq.end(), [](dl_harq_proc& h) { return h.is_empty(); });
return it != dl_harq.end() ? &(*it) : nullptr;
}
@ -1202,7 +1228,6 @@ int sched_ue_carrier::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack)
return h.get_tbs(tb_idx);
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx);
return -1;
}
@ -1326,6 +1351,14 @@ uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes)
return n;
}
void sched_ue_carrier::update_cell_activity()
{
if (ue_cc_idx > 0 and active != cfg->supported_cc_list[ue_cc_idx].active) {
active = cfg->supported_cc_list[ue_cc_idx].active;
log_h->info("SCell index=%d is now %s\n", ue_cc_idx, active ? "active" : "inactive");
}
}
/*******************************************************
* MAC CE Command
******************************************************/

@ -110,13 +110,6 @@ int run_sim1()
uint32_t prach_tti = 1, msg4_tot_delay = 10; // TODO: check correct value
uint32_t msg4_size = 20; // TODO: Check
uint32_t duration = 1000;
// auto process_ttis = [&generator, &tti_start, &tester]() {
// for (; tester.tti_counter <= generator.tti_counter;) {
// uint32_t tti = (tti_start + tester.tti_count) % 10240;
// log_global->step(tti);
// tester.run_tti(generator.tti_events[tester.tti_count]);
// }
// };
/* Simulation */
@ -161,7 +154,8 @@ int run_sim1()
user->ue_cfg->supported_cc_list[i].enb_cc_idx = i;
}
tester.test_next_ttis(generator.tti_events);
// When a new DL tx takes place, it should also encode the CE
// When a DL newtx takes place, it should also encode the CE
for (uint32_t i = 0; i < 100; ++i) {
TESTASSERT(tester.tti_info.dl_sched_result[pcell_idx].nof_data_elems > 0);
if (tester.tti_info.dl_sched_result[pcell_idx].data[0].nof_pdu_elems[0] > 0) {
@ -172,11 +166,24 @@ int run_sim1()
}
generator.step_tti();
tester.test_next_ttis(generator.tti_events);
// now we have two CCs
}
// now we have two CCs
for (uint32_t i = 0; i < TX_DELAY; ++i) {
generator.step_tti();
}
tester.test_next_ttis(generator.tti_events);
// The UE has now received the CE
// Event: Generate a bit more data, it should *not* go through SCells until we send a CQI
generate_data(5, P_dl, P_ul_sr);
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[0] > 0);
TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[1] == 0);
TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[0] > 0);
TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[1] == 0);
// Event: Generate a bit more data, now it should go through both cells
// Event: Scheduler receives dl_cqi for SCell. Data should go through SCells
const uint32_t cqi = 14;
tester.dl_cqi_info(tester.tti_info.tti_params.tti_rx, rnti1, 1, cqi);
generate_data(10, 1.0, 1.0);
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[0] > 0);

@ -164,7 +164,9 @@ void sched_tester::before_sched()
d.has_ul_retx = hul->has_pending_retx();
d.has_ul_tx = d.has_ul_retx or d.ul_pending_data > 0;
srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_info.tti_params.tti_tx_dl, CARRIER_IDX);
d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq(CARRIER_IDX) != nullptr and d.dl_pending_data > 0);
d.has_dl_tx =
(hdl != nullptr) or
(it.second.get_empty_dl_harq(tti_info.tti_params.tti_tx_dl, CARRIER_IDX) != nullptr and d.dl_pending_data > 0);
d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0;
tti_data.ue_data.insert(std::make_pair(rnti, d));
tti_data.total_ues.dl_pending_data += d.dl_pending_data;

Loading…
Cancel
Save