moved sf result to sched class. This way each carrier has a view to the results of the remaining carriers in the same subframe

master
Francisco Paisana 4 years ago
parent 5d33acdb53
commit 53b85691b5

@ -153,6 +153,9 @@ protected:
// independent schedulers for each carrier // independent schedulers for each carrier
std::vector<std::unique_ptr<carrier_sched> > carrier_schedulers; std::vector<std::unique_ptr<carrier_sched> > carrier_schedulers;
// Storage of past scheduling results
sched_result_list sched_results;
uint32_t last_tti = 0; uint32_t last_tti = 0;
std::mutex sched_mutex; std::mutex sched_mutex;
bool configured = false; bool configured = false;

@ -32,18 +32,21 @@ class ra_sched;
class sched::carrier_sched class sched::carrier_sched
{ {
public: public:
explicit carrier_sched(rrc_interface_mac* rrc_, std::map<uint16_t, sched_ue>* ue_db_, uint32_t enb_cc_idx_); explicit carrier_sched(rrc_interface_mac* rrc_,
std::map<uint16_t, sched_ue>* ue_db_,
uint32_t enb_cc_idx_,
sched_result_list* sched_results_);
~carrier_sched(); ~carrier_sched();
void reset(); void reset();
void carrier_cfg(const sched_cell_params_t& sched_params_); void carrier_cfg(const sched_cell_params_t& sched_params_);
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs); void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs);
const sf_sched_result& generate_tti_result(uint32_t tti_rx); const cc_sched_result& generate_tti_result(uint32_t tti_rx);
int dl_rach_info(dl_sched_rar_info_t rar_info); int dl_rach_info(dl_sched_rar_info_t rar_info);
// getters // getters
const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); } const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); }
//! Get a subframe result for a given tti //! Get a subframe result for a given tti
const sf_sched_result& get_sf_result(uint32_t tti_rx) const; const sf_sched_result* get_sf_result(uint32_t tti_rx) const;
private: private:
//! Compute DL scheduler result for given TTI //! Compute DL scheduler result for given TTI
@ -51,8 +54,7 @@ private:
//! Compute UL scheduler result for given TTI //! Compute UL scheduler result for given TTI
int alloc_ul_users(sf_sched* tti_sched); int alloc_ul_users(sf_sched* tti_sched);
//! Get sf_sched for a given TTI //! Get sf_sched for a given TTI
sf_sched* get_sf_sched(uint32_t tti_rx); sf_sched* get_sf_sched(uint32_t tti_rx);
sf_sched_result* get_next_sf_result(uint32_t tti_rx);
// args // args
const sched_cell_params_t* cc_cfg = nullptr; const sched_cell_params_t* cc_cfg = nullptr;
@ -67,9 +69,11 @@ private:
prbmask_t prach_mask; prbmask_t prach_mask;
prbmask_t pucch_mask; prbmask_t pucch_mask;
// TTI result storage and management // Subframe scheduling logic
std::array<sf_sched, TTIMOD_SZ> sf_scheds; std::array<sf_sched, TTIMOD_SZ> sf_scheds;
std::array<sf_sched_result, TTIMOD_SZ * 2> sf_sched_results;
// scheduling results
sched_result_list* prev_sched_results;
std::vector<uint8_t> sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS std::vector<uint8_t> sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS

@ -42,11 +42,11 @@ struct alloc_outcome_t {
alloc_outcome_t(result_enum e) : result(e) {} alloc_outcome_t(result_enum e) : result(e) {}
operator result_enum() { return result; } operator result_enum() { return result; }
operator bool() { return result == SUCCESS; } operator bool() { return result == SUCCESS; }
const char* to_string() const; const char* to_string() const;
}; };
//! Result of a Subframe sched computation //! Result of a Subframe sched computation
struct sf_sched_result { struct cc_sched_result {
tti_params_t tti_params{10241}; tti_params_t tti_params{10241};
sched_interface::dl_sched_res_t dl_sched_result = {}; sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {}; sched_interface::ul_sched_res_t ul_sched_result = {};
@ -55,6 +55,33 @@ struct sf_sched_result {
pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations
}; };
struct sf_sched_result {
srslte::tti_point tti_rx;
std::vector<cc_sched_result> enb_cc_list;
cc_sched_result* new_cc(uint32_t enb_cc_idx);
const cc_sched_result* get_cc(uint32_t enb_cc_idx) const
{
return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr;
}
cc_sched_result* get_cc(uint32_t enb_cc_idx)
{
return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr;
}
};
struct sched_result_list {
public:
sf_sched_result* new_tti(srslte::tti_point tti_rx);
sf_sched_result* get_sf(srslte::tti_point tti_rx);
const sf_sched_result* get_sf(srslte::tti_point tti_rx) const;
const cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const;
cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx);
private:
std::array<sf_sched_result, TTIMOD_SZ> results;
};
//! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions. //! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions.
class pdcch_grid_t class pdcch_grid_t
{ {
@ -207,8 +234,7 @@ public:
sf_sched::ctrl_alloc_t alloc_data; sf_sched::ctrl_alloc_t alloc_data;
sched_interface::dl_sched_rar_t rar_grant; sched_interface::dl_sched_rar_t rar_grant;
rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r) rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r)
{ {}
}
}; };
struct bc_alloc_t : public ctrl_alloc_t { struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0; uint32_t rv = 0;
@ -250,7 +276,7 @@ public:
// Control/Configuration Methods // Control/Configuration Methods
sf_sched(); sf_sched();
void init(const sched_cell_params_t& cell_params_); void init(const sched_cell_params_t& cell_params_);
void new_tti(uint32_t tti_rx_); void new_tti(uint32_t tti_rx_, sf_sched_result* cc_results);
// DL alloc methods // DL alloc methods
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx); alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
@ -267,7 +293,7 @@ public:
bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result); bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result);
// compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI // compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI
void generate_sched_results(sf_sched_result* sf_result); void generate_sched_results();
// dl_tti_sched itf // dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final; alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
@ -302,6 +328,7 @@ private:
// consts // consts
const sched_cell_params_t* cc_cfg = nullptr; const sched_cell_params_t* cc_cfg = nullptr;
srslte::log_ref log_h; srslte::log_ref log_h;
sf_sched_result* cc_results; ///< Results of other CCs for the same Subframe
// internal state // internal state
sf_grid_t tti_alloc; sf_grid_t tti_alloc;

@ -144,7 +144,7 @@ void sched::init(rrc_interface_mac* rrc_)
rrc = rrc_; rrc = rrc_;
// Initialize first carrier scheduler // Initialize first carrier scheduler
carrier_schedulers.emplace_back(new carrier_sched{rrc, &ue_db, 0}); carrier_schedulers.emplace_back(new carrier_sched{rrc, &ue_db, 0, &sched_results});
reset(); reset();
} }
@ -183,7 +183,7 @@ int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
uint32_t prev_size = carrier_schedulers.size(); uint32_t prev_size = carrier_schedulers.size();
carrier_schedulers.resize(sched_cell_params.size()); carrier_schedulers.resize(sched_cell_params.size());
for (uint32_t i = prev_size; i < sched_cell_params.size(); ++i) { for (uint32_t i = prev_size; i < sched_cell_params.size(); ++i) {
carrier_schedulers[i].reset(new carrier_sched{rrc, &ue_db, i}); carrier_schedulers[i].reset(new carrier_sched{rrc, &ue_db, i, &sched_results});
} }
// setup all carriers cfg params // setup all carriers cfg params
@ -387,7 +387,7 @@ int sched::dl_sched(uint32_t tti_tx_dl, uint32_t cc_idx, sched_interface::dl_sch
if (cc_idx < carrier_schedulers.size()) { if (cc_idx < carrier_schedulers.size()) {
// Compute scheduling Result for tti_rx // Compute scheduling Result for tti_rx
const sf_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); const cc_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
// copy result // copy result
sched_result = tti_sched.dl_sched_result; sched_result = tti_sched.dl_sched_result;
@ -408,7 +408,7 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s
uint32_t tti_rx = sched_utils::tti_subtract(tti, FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS); uint32_t tti_rx = sched_utils::tti_subtract(tti, FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
if (cc_idx < carrier_schedulers.size()) { if (cc_idx < carrier_schedulers.size()) {
const sf_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); const cc_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
// copy result // copy result
sched_result = tti_sched.ul_sched_result; sched_result = tti_sched.ul_sched_result;

@ -259,11 +259,13 @@ void ra_sched::reset()
sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_,
std::map<uint16_t, sched_ue>* ue_db_, std::map<uint16_t, sched_ue>* ue_db_,
uint32_t enb_cc_idx_) : uint32_t enb_cc_idx_,
sched_result_list* sched_results_) :
rrc(rrc_), rrc(rrc_),
ue_db(ue_db_), ue_db(ue_db_),
log_h(srslte::logmap::get("MAC ")), log_h(srslte::logmap::get("MAC ")),
enb_cc_idx(enb_cc_idx_) enb_cc_idx(enb_cc_idx_),
prev_sched_results(sched_results_)
{ {
sf_dl_mask.resize(1, 0); sf_dl_mask.resize(1, 0);
} }
@ -311,20 +313,21 @@ void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs); sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs);
} }
const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx) const cc_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
{ {
sf_sched_result* sf_result = get_next_sf_result(tti_rx); cc_sched_result* cc_result = prev_sched_results->get_cc(srslte::tti_point{tti_rx}, enb_cc_idx);
// if it is the first time tti is run, reset vars // if it is the first time tti is run, reset vars
if (tti_rx != sf_result->tti_params.tti_rx) { if (cc_result == nullptr) {
sf_sched* tti_sched = get_sf_sched(tti_rx); sf_sched* tti_sched = get_sf_sched(tti_rx);
*sf_result = {}; sf_sched_result* sf_result = prev_sched_results->get_sf(srslte::tti_point{tti_rx});
cc_result = sf_result->new_cc(enb_cc_idx);
bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0; bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0;
/* Schedule PHICH */ /* Schedule PHICH */
for (auto& ue_pair : *ue_db) { for (auto& ue_pair : *ue_db) {
tti_sched->alloc_phich(&ue_pair.second, &sf_result->ul_sched_result); tti_sched->alloc_phich(&ue_pair.second, &cc_result->ul_sched_result);
} }
/* Schedule DL control data */ /* Schedule DL control data */
@ -353,15 +356,15 @@ const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx
} }
/* Select the winner DCI allocation combination, store all the scheduling results */ /* Select the winner DCI allocation combination, store all the scheduling results */
tti_sched->generate_sched_results(sf_result); tti_sched->generate_sched_results();
/* Reset ue harq pending ack state, clean-up blocked pids */ /* Reset ue harq pending ack state, clean-up blocked pids */
for (auto& user : *ue_db) { for (auto& user : *ue_db) {
user.second.finish_tti(sf_result->tti_params, enb_cc_idx); user.second.finish_tti(cc_result->tti_params, enb_cc_idx);
} }
} }
return *sf_result; return *cc_result;
} }
void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result) void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result)
@ -405,20 +408,20 @@ sf_sched* sched::carrier_sched::get_sf_sched(uint32_t tti_rx)
{ {
sf_sched* ret = &sf_scheds[tti_rx % sf_scheds.size()]; sf_sched* ret = &sf_scheds[tti_rx % sf_scheds.size()];
if (ret->get_tti_rx() != tti_rx) { if (ret->get_tti_rx() != tti_rx) {
// start new TTI. Bind the struct where the result is going to be stored sf_sched_result* sf_res = prev_sched_results->get_sf(srslte::tti_point{tti_rx});
ret->new_tti(tti_rx); if (sf_res == nullptr) {
// Reset if tti_rx has not been yet set in the sched results
sf_res = prev_sched_results->new_tti(srslte::tti_point{tti_rx});
}
// start new TTI for the given CC.
ret->new_tti(tti_rx, sf_res);
} }
return ret; return ret;
} }
sf_sched_result* sched::carrier_sched::get_next_sf_result(uint32_t tti_rx) const sf_sched_result* sched::carrier_sched::get_sf_result(uint32_t tti_rx) const
{
return &sf_sched_results[tti_rx % sf_sched_results.size()];
}
const sf_sched_result& sched::carrier_sched::get_sf_result(uint32_t tti_rx) const
{ {
return sf_sched_results[tti_rx % sf_sched_results.size()]; return prev_sched_results->get_sf(srslte::tti_point{tti_rx});
} }
int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info) int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info)

@ -52,6 +52,46 @@ tti_params_t::tti_params_t(uint32_t tti_rx_) :
sfn_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) / 10) sfn_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) / 10)
{} {}
cc_sched_result* sf_sched_result::new_cc(uint32_t enb_cc_idx)
{
if (enb_cc_idx >= enb_cc_list.size()) {
enb_cc_list.resize(enb_cc_idx + 1);
}
return &enb_cc_list[enb_cc_idx];
}
sf_sched_result* sched_result_list::new_tti(srslte::tti_point tti_rx)
{
sf_sched_result* res = &results[tti_rx.to_uint() % results.size()];
res->tti_rx = tti_rx;
res->enb_cc_list.clear();
return res;
}
sf_sched_result* sched_result_list::get_sf(srslte::tti_point tti_rx)
{
sf_sched_result* res = &results[tti_rx.to_uint() % results.size()];
return (res->tti_rx != tti_rx) ? nullptr : res;
}
const sf_sched_result* sched_result_list::get_sf(srslte::tti_point tti_rx) const
{
const sf_sched_result* res = &results[tti_rx.to_uint() % results.size()];
return (res->tti_rx != tti_rx) ? nullptr : res;
}
const cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const
{
const sf_sched_result* res = get_sf(tti_rx);
return res != nullptr ? res->get_cc(enb_cc_idx) : nullptr;
}
cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx)
{
sf_sched_result* res = get_sf(tti_rx);
return res != nullptr ? res->get_cc(enb_cc_idx) : nullptr;
}
/******************************************************* /*******************************************************
* PDCCH Allocation Methods * PDCCH Allocation Methods
*******************************************************/ *******************************************************/
@ -473,7 +513,7 @@ void sf_sched::init(const sched_cell_params_t& cell_params_)
max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch); max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch);
} }
void sf_sched::new_tti(uint32_t tti_rx_) void sf_sched::new_tti(uint32_t tti_rx_, sf_sched_result* cc_results_)
{ {
// reset internal state // reset internal state
bc_allocs.clear(); bc_allocs.clear();
@ -483,6 +523,7 @@ void sf_sched::new_tti(uint32_t tti_rx_)
tti_params = tti_params_t{tti_rx_}; tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params); tti_alloc.new_tti(tti_params);
cc_results = cc_results_;
// setup first prb to be used for msg3 alloc. Account for potential PRACH alloc // setup first prb to be used for msg3 alloc. Account for potential PRACH alloc
last_msg3_prb = cc_cfg->cfg.nrb_pucch; last_msg3_prb = cc_cfg->cfg.nrb_pucch;
@ -988,29 +1029,31 @@ alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_s
return ret; return ret;
} }
void sf_sched::generate_sched_results(sf_sched_result* sf_result) void sf_sched::generate_sched_results()
{ {
cc_sched_result* cc_result = cc_results->get_cc(cc_cfg->enb_cc_idx);
/* Pick one of the possible DCI masks */ /* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result; pdcch_grid_t::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string(); // tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &sf_result->pdcch_mask); tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &cc_result->pdcch_mask);
/* Register final CFI */ /* Register final CFI */
sf_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi(); cc_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi();
/* Generate DCI formats and fill sched_result structs */ /* Generate DCI formats and fill sched_result structs */
set_bc_sched_result(dci_result, &sf_result->dl_sched_result); set_bc_sched_result(dci_result, &cc_result->dl_sched_result);
set_rar_sched_result(dci_result, &sf_result->dl_sched_result); set_rar_sched_result(dci_result, &cc_result->dl_sched_result);
set_dl_data_sched_result(dci_result, &sf_result->dl_sched_result); set_dl_data_sched_result(dci_result, &cc_result->dl_sched_result);
set_ul_sched_result(dci_result, &sf_result->ul_sched_result); set_ul_sched_result(dci_result, &cc_result->ul_sched_result);
/* Store remaining sf_sched results for this TTI */ /* Store remaining sf_sched results for this TTI */
sf_result->dl_mask = tti_alloc.get_dl_mask(); cc_result->dl_mask = tti_alloc.get_dl_mask();
sf_result->ul_mask = tti_alloc.get_ul_mask(); cc_result->ul_mask = tti_alloc.get_ul_mask();
sf_result->tti_params = tti_params; cc_result->tti_params = tti_params;
} }
uint32_t sf_sched::get_nof_ctrl_symbols() const uint32_t sf_sched::get_nof_ctrl_symbols() const

@ -188,8 +188,9 @@ void sched_tester::before_sched()
int sched_tester::process_results() int sched_tester::process_results()
{ {
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx); const srsenb::cc_sched_result* cc_result =
TESTASSERT(tti_info.tti_params.tti_rx == sf_result.tti_params.tti_rx); sched_results.get_cc(srslte::tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX);
TESTASSERT(tti_info.tti_params.tti_rx == cc_result->tti_params.tti_rx);
test_pdcch_collisions(); test_pdcch_collisions();
TESTASSERT(ue_tester->test_all(0, tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) == TESTASSERT(ue_tester->test_all(0, tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) ==
@ -255,9 +256,9 @@ int sched_tester::test_pdcch_collisions()
tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) == SRSLTE_SUCCESS); tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) == SRSLTE_SUCCESS);
/* verify if sched_result "used_cce" coincide with sched "used_cce" */ /* verify if sched_result "used_cce" coincide with sched "used_cce" */
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx); const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX);
if (used_cce != sf_result.pdcch_mask) { if (used_cce != cc_result->pdcch_mask) {
std::string mask_str = sf_result.pdcch_mask.to_string(); std::string mask_str = cc_result->pdcch_mask.to_string();
TESTERROR("The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), used_cce.to_string().c_str()); TESTERROR("The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), used_cce.to_string().c_str());
} }
@ -365,7 +366,7 @@ int sched_tester::test_harqs()
int sched_tester::test_sch_collisions() int sched_tester::test_sch_collisions()
{ {
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx); const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX);
srsenb::prbmask_t ul_allocs(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); srsenb::prbmask_t ul_allocs(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb);
@ -374,7 +375,7 @@ int sched_tester::test_sch_collisions()
tti_info.tti_params, tti_info.ul_sched_result[CARRIER_IDX], ul_allocs) == SRSLTE_SUCCESS); tti_info.tti_params, tti_info.ul_sched_result[CARRIER_IDX], ul_allocs) == SRSLTE_SUCCESS);
/* TEST: check whether cumulative UL PRB masks coincide */ /* TEST: check whether cumulative UL PRB masks coincide */
if (ul_allocs != sf_result.ul_mask) { if (ul_allocs != cc_result->ul_mask) {
TESTERROR("The UL PRB mask and the scheduler result UL mask are not consistent\n"); TESTERROR("The UL PRB mask and the scheduler result UL mask are not consistent\n");
} }
@ -404,10 +405,10 @@ int sched_tester::test_sch_collisions()
} }
// TEST: check if resulting DL mask is equal to scheduler internal DL mask // TEST: check if resulting DL mask is equal to scheduler internal DL mask
if (rbgmask != sf_result.dl_mask) { if (rbgmask != cc_result->dl_mask) {
TESTERROR("The DL PRB mask and the scheduler result DL mask are not consistent (%s!=%s)\n", TESTERROR("The DL PRB mask and the scheduler result DL mask are not consistent (%s!=%s)\n",
rbgmask.to_string().c_str(), rbgmask.to_string().c_str(),
sf_result.dl_mask.to_string().c_str()); cc_result->dl_mask.to_string().c_str());
} }
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }

Loading…
Cancel
Save