favour carriers with best channel conditions for UL grants due to SRs

master
Francisco Paisana 4 years ago
parent 0ef8e1fdac
commit 688dda30a4

@ -181,7 +181,7 @@ public:
rbg_interval get_required_dl_rbgs(uint32_t ue_cc_idx);
srslte::interval<uint32_t> get_requested_dl_bytes(uint32_t ue_cc_idx);
uint32_t get_pending_dl_new_data();
uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_new_data(uint32_t tti, int this_ue_cc_idx);
uint32_t get_pending_ul_old_data(uint32_t cc_idx);
uint32_t get_pending_dl_new_data_total();
@ -247,7 +247,6 @@ private:
const srslte_dci_dl_t& dci);
uint32_t get_pending_ul_old_data_unlocked(uint32_t cc_idx);
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
bool needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_send = false);

@ -255,8 +255,7 @@ uint32_t sched::get_dl_buffer(uint16_t rnti)
{
// TODO: Check if correct use of last_tti
uint32_t ret = SRSLTE_ERROR;
ue_db_access(
rnti, [&ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(); }, __PRETTY_FUNCTION__);
ue_db_access(rnti, [&ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(); }, __PRETTY_FUNCTION__);
return ret;
}
@ -265,7 +264,7 @@ uint32_t sched::get_ul_buffer(uint16_t rnti)
// TODO: Check if correct use of last_tti
uint32_t ret = SRSLTE_ERROR;
ue_db_access(
rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(last_tti); }, __PRETTY_FUNCTION__);
rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(last_tti, -1); }, __PRETTY_FUNCTION__);
return ret;
}

@ -1152,7 +1152,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
uci_pusch_t uci_type = is_uci_included(this, *cc_results, user, cc_cfg->enb_cc_idx);
/* Generate DCI Format1A */
uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul());
uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul(), cell_index);
int tbs = user->generate_format0(pusch,
get_tti_tx_ul(),
cell_index,
@ -1173,7 +1173,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
pusch->dci.location.L,
pusch->dci.location.ncce,
ul_alloc.alloc.to_string().c_str(),
user->get_pending_ul_new_data(get_tti_tx_ul()));
user->get_pending_ul_new_data(get_tti_tx_ul(), cell_index));
continue;
}
@ -1189,7 +1189,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
ul_alloc.alloc.to_string().c_str(),
h->nof_retx(0),
tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()),
user->get_pending_ul_new_data(get_tti_tx_ul(), cell_index),
pending_data_before,
user->get_pending_ul_old_data(cell_index));

@ -287,7 +287,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
}
uint32_t cell_idx = p.second;
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
uint32_t pending_data = user->get_pending_ul_new_data(current_tti, cell_idx);
ul_harq_proc* h = user->get_ul_harq(current_tti, cell_idx);
// find an empty PID

@ -681,14 +681,14 @@ int sched_ue::generate_format2(uint32_t pid,
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti,
uint32_t cc_idx,
uint32_t ue_cc_idx,
prb_interval alloc,
bool needs_pdcch,
srslte_dci_location_t dci_pos,
int explicit_mcs,
uci_pusch_t uci_type)
{
ul_harq_proc* h = get_ul_harq(tti, cc_idx);
ul_harq_proc* h = get_ul_harq(tti, ue_cc_idx);
srslte_dci_ul_t* dci = &data->dci;
bool cqi_request = needs_cqi_unlocked(tti, true);
@ -697,7 +697,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
data->needs_pdcch = needs_pdcch;
dci->location = dci_pos;
int mcs = (explicit_mcs >= 0) ? explicit_mcs : carriers[cc_idx].fixed_mcs_ul;
int mcs = (explicit_mcs >= 0) ? explicit_mcs : carriers[ue_cc_idx].fixed_mcs_ul;
int tbs = 0;
bool is_newtx = h->is_empty(0);
@ -711,11 +711,11 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8;
} else {
// dynamic mcs
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t req_bytes = get_pending_ul_new_data(tti, ue_cc_idx);
uint32_t N_srs = 0;
uint32_t nof_symb = 2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs;
uint32_t nof_re = nof_symb * alloc.length() * SRSLTE_NRE;
tbs = carriers[cc_idx].alloc_tbs_ul(alloc.length(), nof_re, req_bytes, &mcs);
tbs = carriers[ue_cc_idx].alloc_tbs_ul(alloc.length(), nof_re, req_bytes, &mcs);
// Reduce MCS to fit UCI if transmitted in this grant
if (uci_type != UCI_PUSCH_NONE) {
@ -733,7 +733,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
}
// Recompute again the MCS and TBS with the new spectral efficiency (based on the available RE for data)
if (nof_re >= nof_uci_re) {
tbs = carriers[cc_idx].alloc_tbs_ul(alloc.length(), nof_re - nof_uci_re, req_bytes, &mcs);
tbs = carriers[ue_cc_idx].alloc_tbs_ul(alloc.length(), nof_re - nof_uci_re, req_bytes, &mcs);
}
// NOTE: if (nof_re < nof_uci_re) we should set TBS=0
}
@ -753,7 +753,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
data->tbs = tbs;
dci->rnti = rnti;
dci->format = SRSLTE_DCI_FORMAT0;
dci->ue_cc_idx = cc_idx;
dci->ue_cc_idx = ue_cc_idx;
dci->tb.ndi = h->get_ndi(0);
dci->cqi_request = cqi_request;
dci->freq_hop_fl = srslte_dci_ul_t::SRSLTE_RA_PUSCH_HOP_DISABLED;
@ -972,18 +972,12 @@ uint32_t sched_ue::get_pending_dl_new_data()
return pending_data;
}
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{
return get_pending_ul_new_data_unlocked(tti);
}
uint32_t sched_ue::get_pending_ul_old_data(uint32_t cc_idx)
{
return get_pending_ul_old_data_unlocked(cc_idx);
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti, int this_ue_cc_idx)
{
// Note: If there are no active bearers, scheduling requests are also ignored.
uint32_t pending_data = 0;
@ -995,8 +989,19 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
}
}
if (pending_data == 0) {
if (is_sr_triggered() and ul_bearers_found) {
return 512;
if (is_sr_triggered() and ul_bearers_found and this_ue_cc_idx >= 0) {
// Check if this_cc_idx is the carrier with highest CQI
uint32_t max_cqi = 0, max_cc_idx = 0;
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
uint32_t sum_cqi = carriers[cc_idx].dl_cqi + carriers[cc_idx].ul_cqi;
if (carriers[cc_idx].is_active() and sum_cqi > max_cqi) {
max_cqi = sum_cqi;
max_cc_idx = cc_idx;
}
}
if ((int)max_cc_idx == this_ue_cc_idx) {
return 512;
}
}
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
if (needs_cqi_unlocked(tti, cc_idx)) {

@ -1030,7 +1030,7 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev)
if (ue_ev.buffer_ev->sr_data > 0 and user->drb_cfg_flag) {
uint32_t tot_ul_data =
ue_db[ue_ev.rnti].get_pending_ul_new_data(tti_info.tti_params.tti_tx_ul) + ue_ev.buffer_ev->sr_data;
ue_db[ue_ev.rnti].get_pending_ul_new_data(tti_info.tti_params.tti_tx_ul, -1) + ue_ev.buffer_ev->sr_data;
uint32_t lcg = 1;
ul_bsr(ue_ev.rnti, lcg, tot_ul_data);
}

Loading…
Cancel
Save