separated sf sched result from sf_sched class. The interface became way simpler. No need for finish_tti() method, sf_sched::new_tti() is called automatically when we access the sf_sched.

master
Francisco Paisana 5 years ago committed by Francisco Paisana
parent bb38fa7119
commit d1356568e0

@ -36,18 +36,22 @@ public:
void reset();
void carrier_cfg(const sched_cell_params_t& sched_params_);
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs);
sf_sched* generate_tti_result(uint32_t tti_rx);
const sf_sched_result& generate_tti_result(uint32_t tti_rx);
int dl_rach_info(dl_sched_rar_info_t rar_info);
// getters
const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); }
const sf_sched* get_sf_sched_ptr(uint32_t tti_rx) const { return &sf_scheds[tti_rx % sf_scheds.size()]; }
//! Get a subframe result for a given tti
const sf_sched_result& get_sf_result(uint32_t tti_rx) const;
private:
//! Compute DL scheduler result for given TTI
void alloc_dl_users(sf_sched* tti_result);
//! Compute UL scheduler result for given TTI
int alloc_ul_users(sf_sched* tti_sched);
//! Get sf_sched for a given TTI
sf_sched* get_sf_sched(uint32_t tti_rx);
sf_sched_result* get_next_sf_result(uint32_t tti_rx);
// args
const sched_cell_params_t* cc_cfg = nullptr;
@ -64,7 +68,8 @@ private:
// TTI result storage and management
std::array<sf_sched, TTIMOD_SZ> sf_scheds;
sf_sched* get_sf_sched(uint32_t tti_rx) { return &sf_scheds[tti_rx % sf_scheds.size()]; }
std::array<sf_sched_result, TTIMOD_SZ * 2> sf_sched_results;
std::vector<uint8_t> sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS
std::unique_ptr<bc_sched> bc_sched_ptr;

@ -55,6 +55,16 @@ struct tti_params_t {
explicit tti_params_t(uint32_t tti_rx_);
};
//! Result of a Subframe sched computation
struct sf_sched_result {
tti_params_t tti_params{10241};
sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {};
rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations
prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations
pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations
};
//! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions.
class pdcch_grid_t
{
@ -69,7 +79,6 @@ public:
void init(const sched_cell_params_t& cell_params_);
void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi);
void reset();
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr);
bool set_cfi(uint32_t cfi);
@ -115,7 +124,6 @@ public:
void init(const sched_cell_params_t& cell_params_);
void new_tti(const tti_params_t& tti_params_, uint32_t start_cfi);
void reset();
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
@ -176,14 +184,6 @@ public:
class sf_sched : public dl_sf_sched_itf, public ul_sf_sched_itf
{
public:
struct sf_sched_result {
tti_params_t tti_params{10241};
sched_interface::dl_sched_res_t dl_sched_result;
sched_interface::ul_sched_res_t ul_sched_result;
rbgmask_t dl_mask; ///< Accumulation of all DL RBG allocations
prbmask_t ul_mask; ///< Accumulation of all UL PRB allocations
pdcch_mask_t pdcch_mask; ///< Accumulation of all CCE allocations
};
struct ctrl_alloc_t {
size_t dci_idx;
rbg_range_t rbg_range;
@ -239,7 +239,6 @@ public:
sf_sched();
void init(const sched_cell_params_t& cell_params_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
void finish_tti();
// DL alloc methods
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
@ -252,26 +251,24 @@ public:
alloc_outcome_t
alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t mcs = 0);
bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); }
bool alloc_phich(sched_ue* user);
bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result);
// compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI
void generate_sched_results();
void generate_sched_results(sf_sched_result* sf_result);
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
uint32_t get_tti_tx_dl() const final { return tti_params->tti_tx_dl; }
uint32_t get_tti_tx_dl() const final { return tti_params.tti_tx_dl; }
uint32_t get_nof_ctrl_symbols() const final;
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final;
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_params->tti_tx_ul; }
uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; }
// getters
uint32_t get_tti_rx() const { return tti_params->tti_rx; }
const tti_params_t& get_tti_params() const { return *tti_params; }
const sf_sched_result& last_sched_result() const { return *last_sf_result; }
uint32_t get_tti_rx() const { return tti_params.tti_rx; }
const tti_params_t& get_tti_params() const { return tti_params; }
private:
bool is_dl_alloc(sched_ue* user) const final;
@ -283,10 +280,11 @@ private:
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result);
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::ul_sched_res_t* ul_result);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
@ -299,16 +297,9 @@ private:
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
std::array<sf_sched_result, 2> sched_result_resources = {};
// Next TTI state
sf_sched_result* current_sf_result = nullptr;
sched_interface::dl_sched_res_t* dl_sched_result = nullptr;
sched_interface::ul_sched_res_t* ul_sched_result = nullptr;
tti_params_t* tti_params = nullptr;
// Last subframe scheduler result
sf_sched_result* last_sf_result = nullptr;
tti_params_t tti_params{10241};
};
} // namespace srsenb

@ -383,10 +383,10 @@ int sched::dl_sched(uint32_t tti, uint32_t cc_idx, sched_interface::dl_sched_res
if (cc_idx < carrier_schedulers.size()) {
// Compute scheduling Result for tti_rx
sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
const sf_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
// copy result
sched_result = tti_sched->last_sched_result().dl_sched_result;
sched_result = tti_sched.dl_sched_result;
}
return 0;
@ -404,10 +404,10 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s
uint32_t tti_rx = sched_utils::tti_subtract(tti, 2 * FDD_HARQ_DELAY_MS);
if (cc_idx < carrier_schedulers.size()) {
sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
const sf_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
// copy result
sched_result = tti_sched->last_sched_result().ul_sched_result;
sched_result = tti_sched.ul_sched_result;
}
return SRSLTE_SUCCESS;

@ -248,7 +248,8 @@ void ra_sched::sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sch
msg3.mcs = grant.grant.trunc_mcs;
msg3.rnti = grant.data.temp_crnti;
if (not sf_msg3_sched->alloc_msg3(&(*ue_db)[msg3.rnti], msg3)) {
auto it = ue_db->find(msg3.rnti);
if (it == ue_db->end() or not sf_msg3_sched->alloc_msg3(&it->second, msg3)) {
log_h->error(
"SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", msg3.rnti, sf_msg3_sched->get_tti_tx_ul());
} else {
@ -314,19 +315,20 @@ void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs);
}
sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
{
sf_sched* tti_sched = get_sf_sched(tti_rx);
sf_sched_result* sf_result = get_next_sf_result(tti_rx);
// if it is the first time tti is run, reset vars
if (tti_rx != tti_sched->last_sched_result().tti_params.tti_rx) {
uint32_t start_cfi = cc_cfg->sched_cfg->nof_ctrl_symbols;
if (tti_rx != sf_result->tti_params.tti_rx) {
sf_sched* tti_sched = get_sf_sched(tti_rx);
*sf_result = {};
bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0;
tti_sched->new_tti(tti_rx, start_cfi);
/* Schedule PHICH */
for (auto& ue_pair : *ue_db) {
tti_sched->alloc_phich(&ue_pair.second);
tti_sched->alloc_phich(&ue_pair.second, &sf_result->ul_sched_result);
}
/* Schedule DL control data */
@ -351,24 +353,21 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
}
/* Select the winner DCI allocation combination, store all the scheduling results */
tti_sched->generate_sched_results();
tti_sched->generate_sched_results(sf_result);
/* Enqueue Msg3s derived from allocated RARs */
if (dl_active) {
sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS);
ra_sched_ptr->sched_msg3(sf_msg3_sched, tti_sched->last_sched_result().dl_sched_result);
ra_sched_ptr->sched_msg3(sf_msg3_sched, sf_result->dl_sched_result);
}
/* Reset ue harq pending ack state, clean-up blocked pids */
for (auto& user : *ue_db) {
user.second.finish_tti(tti_sched->get_tti_params(), enb_cc_idx);
user.second.finish_tti(sf_result->tti_params, enb_cc_idx);
}
/* Reset sf_sched tti state */
tti_sched->finish_tti();
}
return tti_sched;
return *sf_result;
}
void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result)
@ -408,6 +407,26 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
return SRSLTE_SUCCESS;
}
sf_sched* sched::carrier_sched::get_sf_sched(uint32_t tti_rx)
{
sf_sched* ret = &sf_scheds[tti_rx % sf_scheds.size()];
if (ret->get_tti_rx() != tti_rx) {
// start new TTI. Bind the struct where the result is going to be stored
ret->new_tti(tti_rx, cc_cfg->sched_cfg->nof_ctrl_symbols);
}
return ret;
}
sf_sched_result* sched::carrier_sched::get_next_sf_result(uint32_t tti_rx)
{
return &sf_sched_results[tti_rx % sf_sched_results.size()];
}
const sf_sched_result& sched::carrier_sched::get_sf_result(uint32_t tti_rx) const
{
return sf_sched_results[tti_rx % sf_sched_results.size()];
}
int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
{
return ra_sched_ptr->dl_rach_info(rar_info);

@ -64,21 +64,17 @@ void pdcch_grid_t::init(const sched_cell_params_t& cell_params_)
cc_cfg = &cell_params_;
log_h = srslte::logmap::get("MAC ");
current_cfix = cc_cfg->sched_cfg->nof_ctrl_symbols - 1;
reset();
}
void pdcch_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi)
{
tti_params = &tti_params_;
set_cfi(start_cfi);
}
void pdcch_grid_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
nof_dci_allocs = 0;
set_cfi(start_cfi);
}
const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const
@ -278,23 +274,18 @@ void sf_grid_t::init(const sched_cell_params_t& cell_params_)
ul_mask.resize(cc_cfg->nof_prb());
pdcch_alloc.init(*cc_cfg);
reset();
}
void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi)
{
tti_params = &tti_params_;
// internal state
pdcch_alloc.new_tti(*tti_params, start_cfi);
}
void sf_grid_t::reset()
{
dl_mask.reset();
ul_mask.reset();
pdcch_alloc.reset();
avail_rbg = nof_rbgs;
// internal state
pdcch_alloc.new_tti(*tti_params, start_cfi);
}
//! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging)
@ -412,15 +403,7 @@ bool sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict)
* TTI resource Scheduling Methods
*******************************************************/
sf_sched::sf_sched() :
current_sf_result(&sched_result_resources[0]),
last_sf_result(&sched_result_resources[1]),
log_h(srslte::logmap::get("MAC "))
{
dl_sched_result = &current_sf_result->dl_sched_result;
ul_sched_result = &current_sf_result->ul_sched_result;
tti_params = &current_sf_result->tti_params;
}
sf_sched::sf_sched() : log_h(srslte::logmap::get("MAC ")) {}
void sf_sched::init(const sched_cell_params_t& cell_params_)
{
@ -430,40 +413,22 @@ void sf_sched::init(const sched_cell_params_t& cell_params_)
}
void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
{
if (tti_params->tti_rx != tti_rx_) {
if (tti_params->tti_rx < 10240) {
log_h->warning("expected TTI for the given sf_sched does not match current_tti\n");
}
*tti_params = tti_params_t{tti_rx_};
}
tti_alloc.new_tti(*tti_params, start_cfi);
// setup first prb to be used for msg3 alloc. Account for potential PRACH alloc
last_msg3_prb = cc_cfg->cfg.nrb_pucch;
uint32_t tti_msg3_alloc = TTI_ADD(tti_params->tti_tx_ul, MSG3_DELAY_MS);
if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) {
last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6);
}
}
void sf_sched::finish_tti()
{
// reset internal state
bc_allocs.clear();
rar_allocs.clear();
data_allocs.clear();
ul_data_allocs.clear();
tti_alloc.reset();
// set new current_sf_result
current_sf_result = &sched_result_resources[(last_sf_result == &sched_result_resources[0]) ? 1 : 0];
dl_sched_result = &current_sf_result->dl_sched_result;
ul_sched_result = &current_sf_result->ul_sched_result;
tti_params = &current_sf_result->tti_params;
*dl_sched_result = {};
*ul_sched_result = {};
*tti_params = tti_params_t{last_sf_result->tti_params.tti_rx + TTIMOD_SZ};
tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params, start_cfi);
// setup first prb to be used for msg3 alloc. Account for potential PRACH alloc
last_msg3_prb = cc_cfg->cfg.nrb_pucch;
uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS);
if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) {
last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6);
}
}
bool sf_sched::is_dl_alloc(sched_ue* user) const
@ -678,10 +643,10 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t
return alloc_ul(user, alloc, alloc_type);
}
bool sf_sched::alloc_phich(sched_ue* user)
bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result)
{
using phich_t = sched_interface::ul_sched_phich_t;
auto& phich_list = ul_sched_result->phich[ul_sched_result->nof_phich_elems];
auto& phich_list = ul_sf_result->phich[ul_sf_result->nof_phich_elems];
auto p = user->get_cell_index(cc_cfg->enb_cc_idx);
if (not p.first) {
@ -690,7 +655,7 @@ bool sf_sched::alloc_phich(sched_ue* user)
}
uint32_t cell_index = p.second;
ul_harq_proc* h = user->get_ul_harq(tti_params->tti_tx_ul, cell_index);
ul_harq_proc* h = user->get_ul_harq(tti_params.tti_tx_ul, cell_index);
/* Indicate PHICH acknowledgment if needed */
if (h->has_pending_ack()) {
@ -700,16 +665,17 @@ bool sf_sched::alloc_phich(sched_ue* user)
user->get_rnti(),
phich_list.phich == phich_t::ACK ? "ACK" : "NACK");
ul_sched_result->nof_phich_elems++;
ul_sf_result->nof_phich_elems++;
return true;
}
return false;
}
void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_sched_result->bc[dl_sched_result->nof_bc_elems];
sched_interface::dl_sched_bc_t* bc = &dl_result->bc[dl_result->nof_bc_elems];
// assign NCCE/L
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
@ -771,14 +737,15 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
bc->dci.tb[0].mcs_idx);
}
dl_sched_result->nof_bc_elems++;
dl_result->nof_bc_elems++;
}
}
void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_sched_result->rar[dl_sched_result->nof_rar_elems];
sched_interface::dl_sched_rar_t* rar = &dl_result->rar[dl_result->nof_rar_elems];
// Assign NCCE/L
rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
@ -822,14 +789,15 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
msg3_grant.grant.trunc_mcs);
}
dl_sched_result->nof_rar_elems++;
dl_result->nof_rar_elems++;
}
}
void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& data_alloc : data_allocs) {
sched_interface::dl_sched_data_t* data = &dl_sched_result->data[dl_sched_result->nof_data_elems];
sched_interface::dl_sched_data_t* data = &dl_result->data[dl_result->nof_data_elems];
// Assign NCCE/L
data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos;
@ -882,15 +850,16 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_
data_before,
user->get_pending_dl_new_data());
dl_sched_result->nof_data_elems++;
dl_result->nof_data_elems++;
}
}
void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result)
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
sched_interface::ul_sched_data_t* pusch = &ul_sched_result->pusch[ul_sched_result->nof_dci_elems];
sched_interface::ul_sched_data_t* pusch = &ul_result->pusch[ul_result->nof_dci_elems];
sched_ue* user = ul_alloc.user_ptr;
uint32_t cell_index = user->get_cell_index(cc_cfg->enb_cc_idx).second;
@ -940,7 +909,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
pending_data_before,
user->get_pending_ul_old_data(cell_index));
ul_sched_result->nof_dci_elems++;
ul_result->nof_dci_elems++;
}
}
@ -955,29 +924,29 @@ alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const pending_msg3_t& msg3)
return ret;
}
void sf_sched::generate_sched_results()
void sf_sched::generate_sched_results(sf_sched_result* sf_result)
{
/* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &current_sf_result->pdcch_mask);
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &sf_result->pdcch_mask);
/* Register final CFI */
dl_sched_result->cfi = tti_alloc.get_pdcch_grid().get_cfi();
sf_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi();
/* Generate DCI formats and fill sched_result structs */
set_bc_sched_result(dci_result);
set_bc_sched_result(dci_result, &sf_result->dl_sched_result);
set_rar_sched_result(dci_result);
set_rar_sched_result(dci_result, &sf_result->dl_sched_result);
set_dl_data_sched_result(dci_result);
set_dl_data_sched_result(dci_result, &sf_result->dl_sched_result);
set_ul_sched_result(dci_result);
set_ul_sched_result(dci_result, &sf_result->ul_sched_result);
/* Store sf_sched results for this TTI */
last_sf_result = current_sf_result;
last_sf_result->dl_mask = tti_alloc.get_dl_mask();
last_sf_result->ul_mask = tti_alloc.get_ul_mask();
/* Store remaining sf_sched results for this TTI */
sf_result->dl_mask = tti_alloc.get_dl_mask();
sf_result->ul_mask = tti_alloc.get_ul_mask();
sf_result->tti_params = tti_params;
}
uint32_t sf_sched::get_nof_ctrl_symbols() const

@ -188,8 +188,8 @@ void sched_tester::before_sched()
int sched_tester::process_results()
{
const auto* sf_sched = carrier_schedulers[CARRIER_IDX]->get_sf_sched_ptr(tti_info.tti_params.tti_rx);
TESTASSERT(tti_info.tti_params.tti_rx == sf_sched->last_sched_result().tti_params.tti_rx);
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx);
TESTASSERT(tti_info.tti_params.tti_rx == sf_result.tti_params.tti_rx);
test_pdcch_collisions();
TESTASSERT(ue_tester->test_all(0, tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) ==
@ -255,10 +255,9 @@ int sched_tester::test_pdcch_collisions()
tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) == SRSLTE_SUCCESS);
/* verify if sched_result "used_cce" coincide with sched "used_cce" */
auto* tti_alloc = carrier_schedulers[0]->get_sf_sched_ptr(tti_info.tti_params.tti_rx);
srsenb::pdcch_mask_t mask = tti_alloc->last_sched_result().pdcch_mask;
if (used_cce != mask) {
std::string mask_str = mask.to_string();
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx);
if (used_cce != sf_result.pdcch_mask) {
std::string mask_str = sf_result.pdcch_mask.to_string();
TESTERROR("The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), used_cce.to_string().c_str());
}
@ -379,7 +378,7 @@ int sched_tester::test_harqs()
int sched_tester::test_sch_collisions()
{
const srsenb::sf_sched* tti_sched = carrier_schedulers[CARRIER_IDX]->get_sf_sched_ptr(tti_info.tti_params.tti_rx);
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx);
srsenb::prbmask_t ul_allocs(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb);
@ -388,7 +387,7 @@ int sched_tester::test_sch_collisions()
tti_info.tti_params, tti_info.ul_sched_result[CARRIER_IDX], ul_allocs) == SRSLTE_SUCCESS);
/* TEST: check whether cumulative UL PRB masks coincide */
if (ul_allocs != tti_sched->last_sched_result().ul_mask) {
if (ul_allocs != sf_result.ul_mask) {
TESTERROR("The UL PRB mask and the scheduler result UL mask are not consistent\n");
}
@ -418,10 +417,10 @@ int sched_tester::test_sch_collisions()
}
// TEST: check if resulting DL mask is equal to scheduler internal DL mask
if (rbgmask != tti_sched->last_sched_result().dl_mask) {
if (rbgmask != sf_result.dl_mask) {
TESTERROR("The DL PRB mask and the scheduler result DL mask are not consistent (%s!=%s)\n",
rbgmask.to_string().c_str(),
tti_sched->last_sched_result().dl_mask.to_string().c_str());
sf_result.dl_mask.to_string().c_str());
}
return SRSLTE_SUCCESS;
}

Loading…
Cancel
Save