[sched, feature enhancement] - allow variable nof prbs allocated per SIB / Paging message

master
Francisco 4 years ago committed by Francisco Paisana
parent 76103065f7
commit 47f1175502

@ -154,7 +154,7 @@ uint32_t ra_re_x_prb(const srslte_cell_t* cell, srslte_dl_sf_cfg_t* sf, uint32_t
/** Compute PRB allocation for Downlink as defined in 7.1.6 of 36.213
* Decode grant->type?_alloc to grant
* This function only reads grant->type?_alloc and grant->alloc_type fields.
* This function only reads dci->type?_alloc (e.g. rbg_bitmask, mode, riv) and dci->alloc_type fields.
* This function only writes grant->prb_idx and grant->nof_prb.
*/
/** Compute PRB allocation for Downlink as defined in 7.1.6 of 36.213 */

@ -91,6 +91,7 @@ private:
// args
const sched_cell_params_t* cc_cfg = nullptr;
rrc_interface_mac* rrc = nullptr;
srslog::basic_logger& logger;
std::array<sched_sib_t, sched_interface::MAX_SIBS> pending_sibs;

@ -36,7 +36,8 @@ struct alloc_outcome_t {
ALREADY_ALLOC,
NO_DATA,
INVALID_PRBMASK,
INVALID_CARRIER
INVALID_CARRIER,
INVALID_CODERATE
};
result_enum result = ERROR;
alloc_outcome_t() = default;
@ -101,6 +102,7 @@ public:
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx);
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true);
@ -194,8 +196,8 @@ public:
void new_tti(srslte::tti_point tti_rx_, sf_sched_result* cc_results);
// DL alloc methods
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs);
std::pair<alloc_outcome_t, uint32_t> alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant);
bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); }
const std::vector<rar_alloc_t>& get_allocated_rars() const { return rar_allocs; }
@ -244,12 +246,13 @@ private:
sf_sched_result* cc_results; ///< Results of other CCs for the same Subframe
// internal state
sf_grid_t tti_alloc;
std::vector<bc_alloc_t> bc_allocs;
std::vector<rar_alloc_t> rar_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
sf_grid_t tti_alloc;
srslte::bounded_vector<bc_alloc_t, sched_interface::MAX_BC_LIST> bc_allocs;
std::vector<rar_alloc_t> rar_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
// Next TTI state
tti_point tti_rx;

@ -34,14 +34,16 @@ protected:
/**************** Helper methods ****************/
rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& current_mask);
/**
* Finds a bitmask of available RBG resources for a given UE in a greedy fashion
* @param ue UE being allocated
* @param enb_cc_idx carrier index
* @param is_contiguous whether to find a contiguous range of RBGs
* @param current_mask bitmask of occupied RBGs, where to search for available RBGs
* @return bitmask of found RBGs. If a valid mask wasn't found, bitmask::size() == 0
*/
rbgmask_t compute_user_rbgmask_greedy(sched_ue& ue, uint32_t enb_cc_idx, const rbgmask_t& current_mask);
rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask);
/**
* Finds a range of L contiguous PRBs that are empty

@ -26,7 +26,9 @@ using srslte::tti_point;
* Broadcast (SIB+Paging) scheduling
*******************************************************/
bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cc_cfg(&cfg_), rrc(rrc_) {}
bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) :
cc_cfg(&cfg_), rrc(rrc_), logger(srslog::fetch_basic_logger("MAC"))
{}
void bc_sched::dl_sched(sf_sched* tti_sched)
{
@ -86,36 +88,66 @@ void bc_sched::update_si_windows(sf_sched* tti_sched)
void bc_sched::alloc_sibs(sf_sched* tti_sched)
{
uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx();
uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn();
const uint32_t max_nof_prbs_sib = 4;
uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx();
uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn();
for (uint32_t i = 0; i < pending_sibs.size(); i++) {
if (cc_cfg->cfg.sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) {
uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4;
uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start);
for (uint32_t sib_idx = 0; sib_idx < pending_sibs.size(); sib_idx++) {
sched_sib_t& pending_sib = pending_sibs[sib_idx];
if (cc_cfg->cfg.sibs[sib_idx].len > 0 and pending_sib.is_in_window and pending_sib.n_tx < 4) {
uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4;
uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start);
// Check if there is any SIB to tx
bool sib1_flag = (i == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5;
bool other_sibs_flag =
(i > 0) and (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[i].n_tx) and current_sf_idx == 9;
bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5;
bool other_sibs_flag = (sib_idx > 0) and
(n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and
current_sf_idx == 9;
if (not sib1_flag and not other_sibs_flag) {
continue;
}
// Schedule SIB
tti_sched->alloc_bc(bc_aggr_level, i, pending_sibs[i].n_tx);
pending_sibs[i].n_tx++;
// Attempt different number of RBGs
bool success = false;
for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.empty()) {
break;
}
alloc_outcome_t ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv);
if (ret != alloc_outcome_t::INVALID_CODERATE) {
if (ret == alloc_outcome_t::SUCCESS) {
// SIB scheduled successfully
success = true;
pending_sibs[sib_idx].n_tx++;
}
break;
}
// Attempt again, but with more RBGs
}
if (not success) {
logger.warning("SCHED: Could not allocate SIB=%d, len=%d", sib_idx + 1, cc_cfg->cfg.sibs[sib_idx].len);
}
}
}
}
void bc_sched::alloc_paging(sf_sched* tti_sched)
{
/* Allocate DCIs and RBGs for paging */
if (rrc != nullptr) {
uint32_t paging_payload = 0;
if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) {
tti_sched->alloc_paging(bc_aggr_level, paging_payload);
uint32_t paging_payload = 0;
if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) {
alloc_outcome_t ret = alloc_outcome_t::ERROR;
for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv);
if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::RB_COLLISION) {
break;
}
}
if (ret != alloc_outcome_t::SUCCESS) {
logger.warning(
"SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string());
}
}
}
@ -143,10 +175,12 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
tti_point tti_tx_dl = tti_sched->get_tti_tx_dl();
rar_aggr_level = 2;
while (not pending_rars.empty()) {
pending_rar_t& rar = pending_rars.front();
for (auto it = pending_rars.begin(); it != pending_rars.end();) {
auto& rar = *it;
// Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit
// In case of RAR outside RAR window:
// - if window has passed, discard RAR
// - if window hasn't started, stop loop, as RARs are ordered by TTI
srslte::tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET,
rar.prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window};
if (not rar_window.contains(tti_tx_dl)) {
@ -159,34 +193,40 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
tti_tx_dl);
srslte::console("%s\n", srslte::to_c_str(str_buffer));
logger.error("%s", srslte::to_c_str(str_buffer));
// Remove from pending queue and get next one if window has passed already
pending_rars.pop_front();
it = pending_rars.erase(it);
continue;
}
// If window not yet started do not look for more pending RARs
return;
}
// Try to schedule DCI + RBGs for RAR Grant
std::pair<alloc_outcome_t, uint32_t> ret = tti_sched->alloc_rar(rar_aggr_level, rar);
// If RAR allocation was successful:
// - in case all Msg3 grants were allocated, remove pending RAR
// - otherwise, erase only Msg3 grants that were allocated.
if (ret.first == alloc_outcome_t::SUCCESS) {
uint32_t nof_rar_allocs = ret.second;
if (nof_rar_allocs == rar.msg3_grant.size()) {
pending_rars.erase(it);
} else {
std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin());
rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs);
}
break;
}
// If RAR allocation was not successful:
// - in case of unavailable RBGs, stop loop
// - otherwise, attempt to schedule next pending RAR
logger.info("SCHED: Could not allocate RAR for L=%d, cause=%s", rar_aggr_level, ret.first.to_string());
if (ret.first == alloc_outcome_t::RB_COLLISION) {
// there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI
return;
}
if (ret.first != alloc_outcome_t::SUCCESS) {
// try to scheduler next RAR with different RA-RNTI
continue;
}
uint32_t nof_rar_allocs = ret.second;
if (nof_rar_allocs == rar.msg3_grant.size()) {
// all RAR grants were allocated. Remove pending RAR
pending_rars.pop_front();
} else {
// keep the RAR grants that were not scheduled, so we can schedule in next TTI
std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin());
rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs);
}
// For any other type of error, continue with next pending RAR
++it;
}
}

@ -202,20 +202,25 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_typ
rbg_interval range{nof_rbgs - avail_rbg,
nof_rbgs - avail_rbg + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg)};
return {alloc_dl_ctrl(aggr_idx, range, alloc_type), range};
}
alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type)
{
if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and
alloc_type != alloc_type_t::DL_PCCH) {
logger.error("SCHED: DL control allocations must be RAR/BC/PDCCH");
return {alloc_outcome_t::ERROR, range};
return alloc_outcome_t::ERROR;
}
// Setup range starting from left
if (range.stop() > nof_rbgs) {
return {alloc_outcome_t::RB_COLLISION, range};
// Setup rbg_range starting from left
if (rbg_range.stop() > nof_rbgs) {
return alloc_outcome_t::RB_COLLISION;
}
// allocate DCI and RBGs
rbgmask_t new_mask(dl_mask.size());
new_mask.fill(range.start(), range.stop());
return {alloc_dl(aggr_idx, alloc_type, new_mask), range};
new_mask.fill(rbg_range.start(), rbg_range.stop());
return alloc_dl(aggr_idx, alloc_type, new_mask);
}
//! Allocates CCEs and RBs for a user DL data alloc.
@ -404,59 +409,60 @@ sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_by
return {ret.outcome, ctrl_alloc};
}
alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs)
{
uint32_t sib_len = cc_cfg->cfg.sibs[sib_idx].len;
uint32_t rv = get_rvidx(sib_ntx);
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI);
if (not ret.first) {
logger.warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s",
sib_idx + 1,
aggr_lvl,
sib_len,
ret.first.to_string());
return ret.first;
if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) {
logger.warning("SCHED: Maximum number of Broadcast allocations reached");
return alloc_outcome_t::ERROR;
}
bc_alloc_t bc_alloc;
// BC allocation successful
bc_alloc_t bc_alloc(ret.second);
// Generate DCI for SIB
if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
return alloc_outcome_t::INVALID_CODERATE;
}
if (not generate_sib_dci(
bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) {
logger.warning("SCHED: FAIL");
return alloc_outcome_t::ERROR;
// Allocate SIB RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}
// Allocation Successful
bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
bc_alloc.rbg_range = rbgs;
bc_alloc.req_bytes = cc_cfg->cfg.sibs[sib_idx].len;
bc_allocs.push_back(bc_alloc);
return ret.first;
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload)
alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs)
{
if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) {
logger.warning("SCHED: Maximum number of Broadcast allocations reached");
return alloc_outcome_t::ERROR;
}
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI);
if (not ret.first) {
logger.warning(
"SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.first.to_string());
return ret.first;
}
bc_alloc_t bc_alloc;
// Paging allocation successful
bc_alloc_t bc_alloc(ret.second);
// Generate DCI for Paging message
if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) {
return alloc_outcome_t::INVALID_CODERATE;
}
if (not generate_paging_dci(
bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) {
logger.warning("SCHED: FAIL");
return alloc_outcome_t::ERROR;
// Allocate Paging RBGs and PDCCH
alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}
// Allocation Successful
bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
bc_alloc.rbg_range = rbgs;
bc_alloc.req_bytes = paging_payload;
bc_allocs.push_back(bc_alloc);
return ret.first;
return alloc_outcome_t::SUCCESS;
}
std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar)
@ -499,7 +505,6 @@ std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, cons
return ret;
}
} else if (ret.first != alloc_outcome_t::RB_COLLISION) {
logger.warning("SCHED: Could not allocate RAR for L=%d, cause=%s", aggr_lvl, ret.first.to_string());
return ret;
}

@ -60,7 +60,12 @@ rbgmask_t find_available_rb_mask(const rbgmask_t& in_mask, uint32_t max_size)
return localmask;
}
rbgmask_t compute_user_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask)
rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& current_mask)
{
return find_contiguous_interval(current_mask, max_nof_rbgs);
}
rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask)
{
// Allocate enough RBs that accommodate pending data
rbgmask_t newtx_mask(current_mask.size());
@ -119,7 +124,7 @@ alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_ha
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
retx_mask = compute_user_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask());
retx_mask = compute_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask());
if (retx_mask.count() == nof_rbg) {
return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
}
@ -147,7 +152,7 @@ try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc&
// Find RBG mask that accommodates pending data
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
rbgmask_t newtxmask = compute_user_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask);
rbgmask_t newtxmask = compute_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask);
if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) {
return alloc_outcome_t::RB_COLLISION;
}

@ -479,6 +479,7 @@ void rrc::ue::rrc_mobility::fill_mobility_reconf_common(asn1::rrc::dl_dcch_msg_s
intralte.next_hop_chaining_count = rrc_ue->ue_security_cfg.get_ncc();
// Add MeasConfig of target cell
rrc_ue->current_ue_cfg = {};
recfg_r8.meas_cfg_present = apply_meascfg_updates(
recfg_r8.meas_cfg, rrc_ue->current_ue_cfg.meas_cfg, rrc_ue->ue_cell_list, src_dl_earfcn, src_pci);

@ -17,6 +17,7 @@
#include "sched_common_test_suite.h"
#include "sched_ue_ded_test_suite.h"
#include "srslte/common/test_common.h"
#include "srslte/interfaces/enb_rrc_interfaces.h"
using namespace srsenb;
@ -42,6 +43,15 @@ std::default_random_engine& ::srsenb::get_rand_gen()
return rand_gen;
}
struct rrc_dummy : public rrc_interface_mac {
public:
int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& init_ue_cfg) { return SRSLTE_SUCCESS; }
void upd_user(uint16_t new_rnti, uint16_t old_rnti) {}
void set_activity_user(uint16_t rnti) {}
bool is_paging_opportunity(uint32_t tti, uint32_t* payload_len) { return false; }
uint8_t* read_pdu_bcch_dlsch(const uint8_t enb_cc_idx, const uint32_t sib_index) { return nullptr; }
};
/***********************
* User State Tester
***********************/
@ -116,6 +126,10 @@ sched_result_stats::user_stats* sched_result_stats::get_user(uint16_t rnti)
* Common Sched Tester
**********************/
common_sched_tester::common_sched_tester() : logger(srslog::fetch_basic_logger("TEST")) {}
common_sched_tester::~common_sched_tester() {}
const sched::ue_cfg_t* common_sched_tester::get_current_ue_cfg(uint16_t rnti) const
{
return sched_sim->get_user_cfg(rnti);
@ -124,8 +138,9 @@ const sched::ue_cfg_t* common_sched_tester::get_current_ue_cfg(uint16_t rnti) co
int common_sched_tester::sim_cfg(sim_sched_args args)
{
sim_args0 = std::move(args);
rrc_ptr.reset(new rrc_dummy());
sched::init(nullptr, sim_args0.sched_args);
sched::init(rrc_ptr.get(), sim_args0.sched_args);
sched_sim.reset(new sched_sim_random{this, sim_args0.cell_cfg});
sched_stats.reset(new sched_result_stats{sim_args0.cell_cfg});

@ -79,8 +79,8 @@ public:
std::vector<sched_interface::ul_sched_res_t> ul_sched_result;
};
common_sched_tester() : logger(srslog::fetch_basic_logger("TEST")) {}
~common_sched_tester() override = default;
common_sched_tester();
~common_sched_tester() override;
const ue_cfg_t* get_current_ue_cfg(uint16_t rnti) const;
@ -114,6 +114,8 @@ public:
protected:
virtual void new_test_tti();
virtual void before_sched() {}
std::unique_ptr<rrc_interface_mac> rrc_ptr;
};
} // namespace srsenb

Loading…
Cancel
Save