implementation of basic search for optimal rbgmask for subband CQI

master
Francisco 4 years ago committed by Francisco Paisana
parent f8b6eae2bf
commit 4d3ff0d139

@ -83,6 +83,7 @@ public:
/// Get total pending bytes to be transmitted in DL. /// Get total pending bytes to be transmitted in DL.
/// The amount of CEs to transmit depends on whether enb_cc_idx is UE's PCell /// The amount of CEs to transmit depends on whether enb_cc_idx is UE's PCell
uint32_t get_pending_dl_bytes(uint32_t enb_cc_idx); uint32_t get_pending_dl_bytes(uint32_t enb_cc_idx);
srsran::interval<uint32_t> get_requested_dl_bytes(uint32_t enb_cc_idx);
rbg_interval get_required_dl_rbgs(uint32_t enb_cc_idx); rbg_interval get_required_dl_rbgs(uint32_t enb_cc_idx);
uint32_t get_pending_dl_rlc_data() const; uint32_t get_pending_dl_rlc_data() const;
uint32_t get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs = -1) const; uint32_t get_expected_dl_bitrate(uint32_t enb_cc_idx, int nof_rbgs = -1) const;
@ -139,8 +140,6 @@ public:
bool pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const; bool pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const;
private: private:
srsran::interval<uint32_t> get_requested_dl_bytes(uint32_t enb_cc_idx);
bool is_sr_triggered(); bool is_sr_triggered();
tbs_info allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data, tbs_info allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data,

@ -36,15 +36,17 @@ public:
K(K_), K(K_),
wb_cqi_avg(init_dl_cqi), wb_cqi_avg(init_dl_cqi),
bp_list(nof_bandwidth_parts(cell_nof_prb_), bandwidth_part_context(init_dl_cqi)), bp_list(nof_bandwidth_parts(cell_nof_prb_), bandwidth_part_context(init_dl_cqi)),
subband_cqi(srsran_cqi_hl_get_no_subbands(cell_nof_prb), 0) subband_cqi(std::max(1, srsran_cqi_hl_get_no_subbands(cell_nof_prb)), 0)
{ {
srsran_assert(K <= 4, "K=%d outside of {0, 4}", K); srsran_assert(K <= 4, "K=%d outside of {0, 4}", K);
srsran_assert(K == 0 or cell_nof_prb_ > 6, "K > 0 not allowed for nof_prbs=6");
} }
/// Set K value from upper layers. See TS 36.331, CQI-ReportPeriodic /// Set K value from upper layers. See TS 36.331, CQI-ReportPeriodic
void set_K(uint32_t K_) void set_K(uint32_t K_)
{ {
srsran_assert(K <= 4, "K=%d outside of {0, 4}", K); srsran_assert(K <= 4, "K=%d outside of {0, 4}", K);
srsran_assert(K == 0 or cell_nof_prb > 6, "K > 0 not allowed for nof_prbs=6");
K = K_; K = K_;
} }
@ -158,6 +160,19 @@ public:
tti_point last_cqi_info_tti() const { return last_pos_cqi_tti; } tti_point last_cqi_info_tti() const { return last_pos_cqi_tti; }
int get_wb_cqi_info() const { return wb_cqi_avg; }
uint32_t rbg_to_sb_index(uint32_t rbg_index) const { return rbg_index * N() / cell_nof_rbg; }
/// Get CQI of given subband index
int get_subband_cqi(uint32_t subband_index) const
{
if (subband_cqi_enabled()) {
return get_wb_cqi_info();
}
return bp_list[get_bp_index(subband_index)].last_feedback_tti.is_valid() ? subband_cqi[subband_index] : wb_cqi_avg;
}
private: private:
static const uint32_t max_subband_size = 8; static const uint32_t max_subband_size = 8;
static const uint32_t max_nof_subbands = 13; static const uint32_t max_nof_subbands = 13;
@ -177,8 +192,6 @@ private:
uint32_t prb_to_sb_index(uint32_t prb_index) const { return prb_index * N() / cell_nof_prb; } uint32_t prb_to_sb_index(uint32_t prb_index) const { return prb_index * N() / cell_nof_prb; }
uint32_t rbg_to_sb_index(uint32_t rbg_index) const { return rbg_index * N() / cell_nof_rbg; }
srsran::interval<uint32_t> get_bp_sb_indexes(uint32_t bp_idx) const srsran::interval<uint32_t> get_bp_sb_indexes(uint32_t bp_idx) const
{ {
return srsran::interval<uint32_t>{bp_idx * N() / J(), (bp_idx + 1) * N() / J()}; return srsran::interval<uint32_t>{bp_idx * N() / J(), (bp_idx + 1) * N() / J()};
@ -212,6 +225,15 @@ private:
srsran::bounded_vector<float, max_nof_subbands> subband_cqi; srsran::bounded_vector<float, max_nof_subbands> subband_cqi;
}; };
/// Get {RBG index, CQI} tuple which correspond to the set RBG with the lowest CQI
std::tuple<uint32_t, int> find_min_cqi_rbg(const rbgmask_t& mask, const sched_dl_cqi& dl_cqi);
/// Returns the same RBG mask, but with the RBGs of the subband with the lowest CQI reset
rbgmask_t remove_min_cqi_subband(const rbgmask_t& rbgmask, const sched_dl_cqi& dl_cqi);
/// Returns the same RBG mask, but with the RBG with the lowest CQI reset
rbgmask_t remove_min_cqi_rbg(const rbgmask_t& rbgmask, const sched_dl_cqi& dl_cqi);
} // namespace srsenb } // namespace srsenb
#endif // SRSRAN_SCHED_DL_CQI_H #endif // SRSRAN_SCHED_DL_CQI_H

@ -93,7 +93,7 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
const rbgmask_t& rbgs, const rbgmask_t& rbgs,
uint32_t nof_re, uint32_t nof_re,
srsran_dci_format_t dci_format, srsran_dci_format_t dci_format,
int req_bytes = -1); uint32_t req_bytes = std::numeric_limits<uint32_t>::max());
/// Compute UL grant optimal TBS and MCS given UE cell context and UL grant parameters /// Compute UL grant optimal TBS and MCS given UE cell context and UL grant parameters
tbs_info tbs_info
@ -105,6 +105,19 @@ int get_required_prb_dl(const sched_ue_cell& cell,
uint32_t req_bytes); uint32_t req_bytes);
uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes); uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes);
tbs_info compute_mcs_and_tbs_lower_bound(const sched_ue_cell& ue_cell,
tti_point tti_tx_dl,
const rbgmask_t& rbg_mask,
srsran_dci_format_t dci_format);
bool find_optimal_rbgmask(const sched_ue_cell& ue_cell,
tti_point tti_tx_dl,
const rbgmask_t& dl_mask,
srsran_dci_format_t dci_format,
srsran::interval<uint32_t> req_bytes,
tbs_info& tb,
rbgmask_t& newtxmask);
} // namespace srsenb } // namespace srsenb
#endif // SRSRAN_SCHED_UE_CELL_H #endif // SRSRAN_SCHED_UE_CELL_H

@ -9,7 +9,8 @@
add_subdirectory(schedulers) add_subdirectory(schedulers)
set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_ue_ctrl/sched_harq.cc sched_ue.cc set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_ue_ctrl/sched_harq.cc sched_ue.cc
sched_ue_ctrl/sched_lch.cc sched_ue_ctrl/sched_ue_cell.cc sched_ue_ctrl/sched_dl_cqi.cc sched_phy_ch/sf_cch_allocator.cc sched_phy_ch/sched_dci.cc sched_helpers.cc) sched_ue_ctrl/sched_lch.cc sched_ue_ctrl/sched_ue_cell.cc sched_ue_ctrl/sched_dl_cqi.cc
sched_phy_ch/sf_cch_allocator.cc sched_phy_ch/sched_dci.cc sched_helpers.cc)
add_library(srsenb_mac STATIC ${SOURCES} $<TARGET_OBJECTS:mac_schedulers>) add_library(srsenb_mac STATIC ${SOURCES} $<TARGET_OBJECTS:mac_schedulers>)
set(SOURCES mac_nr.cc) set(SOURCES mac_nr.cc)

@ -495,23 +495,25 @@ alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask,
return alloc_result::no_rnti_opportunity; return alloc_result::no_rnti_opportunity;
} }
// Check if allocation would cause segmentation srsran_dci_format_t dci_format = user->get_dci_format();
if (dci_format == SRSRAN_DCI_FORMAT1A and not is_contiguous(user_mask)) {
logger.warning("SCHED: Can't use distributed RBGs for DCI format 1A");
return alloc_result::invalid_grant_params;
}
// Check if allocation is too small to fit headers, BSR or would cause SRB0 segmentation
const dl_harq_proc& h = user->get_dl_harq(pid, cc_cfg->enb_cc_idx); const dl_harq_proc& h = user->get_dl_harq(pid, cc_cfg->enb_cc_idx);
if (h.is_empty()) { if (h.is_empty()) {
// It is newTx // It is newTx
rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx); srsran::interval<uint32_t> req_bytes = user->get_requested_dl_bytes(get_enb_cc_idx());
if (r.start() > user_mask.count()) { tbs_info tb = compute_mcs_and_tbs_lower_bound(*cc, get_tti_tx_dl(), user_mask, dci_format);
logger.debug("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti()); if ((int)req_bytes.start() > tb.tbs_bytes) {
logger.debug("SCHED: The number of RBGs allocated to rnti=0x%x is too small to fit essential control information",
user->get_rnti());
return alloc_result::invalid_grant_params; return alloc_result::invalid_grant_params;
} }
} }
srsran_dci_format_t dci_format = user->get_dci_format();
if (dci_format == SRSRAN_DCI_FORMAT1A and not is_contiguous(user_mask)) {
logger.warning("SCHED: Can't use distributed RBGs for DCI format 1A");
return alloc_result::invalid_grant_params;
}
bool has_pusch_grant = is_ul_alloc(user->get_rnti()) or cc_results->is_ul_alloc(user->get_rnti()); bool has_pusch_grant = is_ul_alloc(user->get_rnti()) or cc_results->is_ul_alloc(user->get_rnti());
// Check if there is space in the PUCCH for HARQ ACKs // Check if there is space in the PUCCH for HARQ ACKs

@ -141,7 +141,7 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb,
{ {
// get max MCS/TBS that meets max coderate requirements // get max MCS/TBS that meets max coderate requirements
tbs_info tb_max = compute_mcs_and_tbs(nof_prb, nof_re, cqi, max_mcs, is_ul, ulqam64_enabled, use_tbs_index_alt); tbs_info tb_max = compute_mcs_and_tbs(nof_prb, nof_re, cqi, max_mcs, is_ul, ulqam64_enabled, use_tbs_index_alt);
if (tb_max.tbs_bytes + 8 <= (int)req_bytes or tb_max.mcs == 0 or req_bytes <= 0) { if (tb_max.tbs_bytes + 8 <= (int)req_bytes or tb_max.mcs == 0) {
// if mcs cannot be lowered or a decrease in TBS index won't meet req_bytes requirement // if mcs cannot be lowered or a decrease in TBS index won't meet req_bytes requirement
return tb_max; return tb_max;
} }

@ -13,7 +13,7 @@
#include "srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h" #include "srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h"
#include "srsenb/hdr/stack/mac/schedulers/sched_base.h" #include "srsenb/hdr/stack/mac/schedulers/sched_base.h"
using namespace srsenb; namespace srsenb {
rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs, bool max_flag) const rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs, bool max_flag) const
{ {
@ -50,3 +50,56 @@ rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req
return emptymask; return emptymask;
} }
std::tuple<uint32_t, int> find_min_cqi_rbg(const rbgmask_t& mask, const sched_dl_cqi& dl_cqi)
{
if (mask.none()) {
return std::make_tuple(mask.size(), -1);
}
int rbg = mask.find_lowest(0, mask.size());
if (not dl_cqi.subband_cqi_enabled()) {
return std::make_tuple(rbg, dl_cqi.get_wb_cqi_info());
}
int min_cqi = std::numeric_limits<int>::max();
uint32_t min_rbg = mask.size();
for (; rbg != -1; rbg = mask.find_lowest(rbg, mask.size())) {
uint32_t sb = dl_cqi.rbg_to_sb_index(rbg);
int cqi = dl_cqi.get_subband_cqi(sb);
if (cqi < min_cqi) {
min_cqi = cqi;
min_rbg = rbg;
}
rbg = (int)srsran::ceil_div((sb + 1U) * mask.size(), dl_cqi.nof_subbands()); // skip to next subband index
}
return min_cqi != std::numeric_limits<int>::max() ? std::make_tuple(min_rbg, min_cqi) : std::make_tuple(0u, -1);
}
rbgmask_t remove_min_cqi_subband(const rbgmask_t& rbgmask, const sched_dl_cqi& dl_cqi)
{
std::tuple<uint32_t, int> tup = find_min_cqi_rbg(rbgmask, dl_cqi);
if (std::get<1>(tup) < 0) {
return rbgmask_t(rbgmask.size());
}
uint32_t sb = dl_cqi.rbg_to_sb_index(std::get<0>(tup));
uint32_t rbg_begin = sb * rbgmask.size() / dl_cqi.nof_subbands();
uint32_t rbg_end = srsran::ceil_div((sb + 1) * rbgmask.size(), dl_cqi.nof_subbands());
rbgmask_t ret(rbgmask);
ret.fill(rbg_begin, rbg_end, false);
return ret;
}
rbgmask_t remove_min_cqi_rbg(const rbgmask_t& rbgmask, const sched_dl_cqi& dl_cqi)
{
std::tuple<uint32_t, int> tup = find_min_cqi_rbg(rbgmask, dl_cqi);
if (std::get<1>(tup) < 0) {
return rbgmask_t(rbgmask.size());
}
rbgmask_t ret(rbgmask);
ret.set(std::get<0>(tup), false);
return ret;
}
} // namespace srsenb

@ -13,6 +13,7 @@
#include "srsenb/hdr/stack/mac/sched_ue_ctrl/sched_ue_cell.h" #include "srsenb/hdr/stack/mac/sched_ue_ctrl/sched_ue_cell.h"
#include "srsenb/hdr/stack/mac/sched_helpers.h" #include "srsenb/hdr/stack/mac/sched_helpers.h"
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h" #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h"
#include "srsenb/hdr/stack/mac/schedulers/sched_base.h"
#include <numeric> #include <numeric>
namespace srsenb { namespace srsenb {
@ -234,7 +235,7 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
const rbgmask_t& rbgs, const rbgmask_t& rbgs,
uint32_t nof_re, uint32_t nof_re,
srsran_dci_format_t dci_format, srsran_dci_format_t dci_format,
int req_bytes) uint32_t req_bytes)
{ {
bool use_tbs_index_alt = cell.get_ue_cfg()->use_tbs_index_alt and dci_format != SRSRAN_DCI_FORMAT1A; bool use_tbs_index_alt = cell.get_ue_cfg()->use_tbs_index_alt and dci_format != SRSRAN_DCI_FORMAT1A;
uint32_t nof_prbs = count_prb_per_tb(rbgs); uint32_t nof_prbs = count_prb_per_tb(rbgs);
@ -242,10 +243,10 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
tbs_info ret; tbs_info ret;
if (cell.fixed_mcs_dl < 0 or not cell.dl_cqi().is_cqi_info_received()) { if (cell.fixed_mcs_dl < 0 or not cell.dl_cqi().is_cqi_info_received()) {
// Dynamic MCS configured or first Tx // Dynamic MCS configured or first Tx
uint32_t dl_cqi_avg = cell.dl_cqi().get_grant_avg_cqi(rbgs); uint32_t dl_cqi = std::get<1>(find_min_cqi_rbg(rbgs, cell.dl_cqi()));
ret = compute_min_mcs_and_tbs_from_required_bytes( ret = compute_min_mcs_and_tbs_from_required_bytes(
nof_prbs, nof_re, dl_cqi_avg, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt); nof_prbs, nof_re, dl_cqi, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt);
// If coderate > SRSRAN_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly // If coderate > SRSRAN_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR // handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
@ -296,7 +297,7 @@ int get_required_prb_dl(const sched_ue_cell& cell,
auto compute_tbs_approx = [tti_tx_dl, &cell, dci_format](uint32_t nof_prb) { auto compute_tbs_approx = [tti_tx_dl, &cell, dci_format](uint32_t nof_prb) {
uint32_t nof_re = cell.cell_cfg->get_dl_lb_nof_re(tti_tx_dl, nof_prb); uint32_t nof_re = cell.cell_cfg->get_dl_lb_nof_re(tti_tx_dl, nof_prb);
rbgmask_t min_cqi_rbgs = cell.dl_cqi().get_optim_rbgmask(nof_prb, false); rbgmask_t min_cqi_rbgs = cell.dl_cqi().get_optim_rbgmask(nof_prb, false);
tbs_info tb = cqi_to_tbs_dl(cell, min_cqi_rbgs, nof_re, dci_format, -1); tbs_info tb = cqi_to_tbs_dl(cell, min_cqi_rbgs, nof_re, dci_format);
return tb.tbs_bytes; return tb.tbs_bytes;
}; };
@ -340,4 +341,96 @@ uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes)
return req_prbs; return req_prbs;
} }
/// Computes the minimum TBS/MCS achievable for provided UE cell configuration, RBG mask, TTI, DCI format
tbs_info compute_mcs_and_tbs_lower_bound(const sched_ue_cell& ue_cell,
tti_point tti_tx_dl,
const rbgmask_t& rbg_mask,
srsran_dci_format_t dci_format)
{
uint32_t nof_prbs = count_prb_per_tb(rbg_mask);
if (nof_prbs == 0) {
return tbs_info{};
}
uint32_t nof_re_lb = ue_cell.cell_cfg->get_dl_lb_nof_re(tti_tx_dl, nof_prbs);
return cqi_to_tbs_dl(ue_cell, rbg_mask, nof_re_lb, dci_format);
}
bool find_optimal_rbgmask(const sched_ue_cell& ue_cell,
tti_point tti_tx_dl,
const rbgmask_t& dl_mask,
srsran_dci_format_t dci_format,
srsran::interval<uint32_t> req_bytes,
tbs_info& tb,
rbgmask_t& newtxmask)
{
// Find the largest set of available RBGs possible
newtxmask = find_available_rbgmask(dl_mask.size(), dci_format == SRSRAN_DCI_FORMAT1A, dl_mask);
// Compute MCS/TBS if all available RBGs were allocated
tb = compute_mcs_and_tbs_lower_bound(ue_cell, tti_tx_dl, newtxmask, dci_format);
if (not ue_cell.dl_cqi().subband_cqi_enabled()) {
// Wideband CQI case
// NOTE: for wideband CQI, the TBS is directly proportional to the nof_prbs, so we can use an iterative method
// to compute the best mask given "req_bytes"
if (tb.tbs_bytes < (int)req_bytes.start()) {
// the grant is too small. it may lead to srb0 segmentation or not space for headers
return false;
}
if (tb.tbs_bytes <= (int)req_bytes.stop()) {
// the grant is not sufficiently large to fit max required bytes. Stop search at this point
return true;
}
// Reduce DL grant size to the minimum that can fit the pending DL bytes
srsran::bounded_vector<tbs_info, MAX_NOF_RBGS> tb_table(newtxmask.count());
auto compute_tbs_approx = [tti_tx_dl, &ue_cell, dci_format, &tb_table](uint32_t nof_rbgs) {
rbgmask_t search_mask(ue_cell.cell_cfg->nof_rbgs);
search_mask.fill(0, nof_rbgs);
tb_table[nof_rbgs - 1] = compute_mcs_and_tbs_lower_bound(ue_cell, tti_tx_dl, search_mask, dci_format);
return tb_table[nof_rbgs - 1].tbs_bytes;
};
std::tuple<uint32_t, int, uint32_t, int> ret = false_position_method(
1U, tb_table.size(), (int)req_bytes.stop(), compute_tbs_approx, [](int y) { return y == SRSRAN_ERROR; });
uint32_t upper_nprb = std::get<2>(ret);
int upper_tbs = std::get<3>(ret);
if (upper_tbs >= (int)req_bytes.stop()) {
tb = tb_table[upper_nprb - 1];
}
return true;
}
// Subband CQI case
// NOTE: There is no monotonically increasing guarantee between TBS and nof allocated prbs.
// One single subband CQI could be dropping the CQI of the whole TB.
// We start with largest RBG allocation and continue removing RBGs. However, there is no guarantee this is
// going to be the optimal solution
// Subtract whole CQI subbands until objective is not met
// TODO: can be optimized
rbgmask_t smaller_mask;
tbs_info tb2;
do {
smaller_mask = remove_min_cqi_subband(newtxmask, ue_cell.dl_cqi());
tb2 = compute_mcs_and_tbs_lower_bound(ue_cell, tti_tx_dl, smaller_mask, dci_format);
if (tb2.tbs_bytes >= (int)req_bytes.stop() or tb.tbs_bytes <= tb2.tbs_bytes) {
tb = tb2;
newtxmask = smaller_mask;
}
} while (tb2.tbs_bytes > (int)req_bytes.stop());
if (tb.tbs_bytes <= (int)req_bytes.stop()) {
return true;
}
do {
smaller_mask = remove_min_cqi_rbg(newtxmask, ue_cell.dl_cqi());
tb2 = compute_mcs_and_tbs_lower_bound(ue_cell, tti_tx_dl, smaller_mask, dci_format);
if (tb2.tbs_bytes >= (int)req_bytes.stop() or tb.tbs_bytes <= tb2.tbs_bytes) {
tb = tb2;
newtxmask = smaller_mask;
}
} while (tb2.tbs_bytes > (int)req_bytes.stop());
return true;
}
} // namespace srsenb } // namespace srsenb

@ -153,22 +153,25 @@ alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const
} }
// If there is no data to transmit, no need to allocate // If there is no data to transmit, no need to allocate
rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx()); srsran::interval<uint32_t> req_bytes = ue.get_requested_dl_bytes(tti_sched.get_enb_cc_idx());
if (req_rbgs.stop() == 0) { if (req_bytes.stop() == 0) {
return alloc_result::no_rnti_opportunity; return alloc_result::no_rnti_opportunity;
} }
// Find RBG mask that accommodates pending data sched_ue_cell* ue_cell = ue.find_ue_carrier(tti_sched.get_enb_cc_idx());
bool is_contiguous_alloc = ue.get_dci_format() == SRSRAN_DCI_FORMAT1A; srsran_assert(ue_cell != nullptr, "dl newtx alloc called for invalid cell");
rbgmask_t newtxmask = find_available_rbgmask(req_rbgs.stop(), is_contiguous_alloc, current_mask); srsran_dci_format_t dci_format = ue.get_dci_format();
if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) { tbs_info tb;
rbgmask_t opt_mask;
if (not find_optimal_rbgmask(
*ue_cell, tti_sched.get_tti_tx_dl(), current_mask, dci_format, req_bytes, tb, opt_mask)) {
return alloc_result::no_sch_space; return alloc_result::no_sch_space;
} }
// empty RBGs were found. Attempt allocation // empty RBGs were found. Attempt allocation
alloc_result ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id()); alloc_result ret = tti_sched.alloc_dl_user(&ue, opt_mask, h.get_id());
if (ret == alloc_result::success and result_mask != nullptr) { if (ret == alloc_result::success and result_mask != nullptr) {
*result_mask = newtxmask; *result_mask = opt_mask;
} }
return ret; return ret;
} }

Loading…
Cancel
Save