moved sf result to sched class. This way each carrier has a view to the results of the remaining carriers in the same subframe

master
Francisco Paisana 4 years ago
parent 5d33acdb53
commit 53b85691b5

@ -153,6 +153,9 @@ protected:
// independent schedulers for each carrier
std::vector<std::unique_ptr<carrier_sched> > carrier_schedulers;
// Storage of past scheduling results
sched_result_list sched_results;
uint32_t last_tti = 0;
std::mutex sched_mutex;
bool configured = false;

@ -32,18 +32,21 @@ class ra_sched;
class sched::carrier_sched
{
public:
explicit carrier_sched(rrc_interface_mac* rrc_, std::map<uint16_t, sched_ue>* ue_db_, uint32_t enb_cc_idx_);
explicit carrier_sched(rrc_interface_mac* rrc_,
std::map<uint16_t, sched_ue>* ue_db_,
uint32_t enb_cc_idx_,
sched_result_list* sched_results_);
~carrier_sched();
void reset();
void carrier_cfg(const sched_cell_params_t& sched_params_);
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs);
const sf_sched_result& generate_tti_result(uint32_t tti_rx);
const cc_sched_result& generate_tti_result(uint32_t tti_rx);
int dl_rach_info(dl_sched_rar_info_t rar_info);
// getters
const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); }
//! Get a subframe result for a given tti
const sf_sched_result& get_sf_result(uint32_t tti_rx) const;
const sf_sched_result* get_sf_result(uint32_t tti_rx) const;
private:
//! Compute DL scheduler result for given TTI
@ -51,8 +54,7 @@ private:
//! Compute UL scheduler result for given TTI
int alloc_ul_users(sf_sched* tti_sched);
//! Get sf_sched for a given TTI
sf_sched* get_sf_sched(uint32_t tti_rx);
sf_sched_result* get_next_sf_result(uint32_t tti_rx);
sf_sched* get_sf_sched(uint32_t tti_rx);
// args
const sched_cell_params_t* cc_cfg = nullptr;
@ -67,9 +69,11 @@ private:
prbmask_t prach_mask;
prbmask_t pucch_mask;
// TTI result storage and management
std::array<sf_sched, TTIMOD_SZ> sf_scheds;
std::array<sf_sched_result, TTIMOD_SZ * 2> sf_sched_results;
// Subframe scheduling logic
std::array<sf_sched, TTIMOD_SZ> sf_scheds;
// scheduling results
sched_result_list* prev_sched_results;
std::vector<uint8_t> sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS

@ -42,11 +42,11 @@ struct alloc_outcome_t {
alloc_outcome_t(result_enum e) : result(e) {}
operator result_enum() { return result; }
operator bool() { return result == SUCCESS; }
const char* to_string() const;
const char* to_string() const;
};
//! Result of a Subframe sched computation
struct sf_sched_result {
struct cc_sched_result {
tti_params_t tti_params{10241};
sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {};
@ -55,6 +55,33 @@ struct sf_sched_result {
pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations
};
struct sf_sched_result {
srslte::tti_point tti_rx;
std::vector<cc_sched_result> enb_cc_list;
cc_sched_result* new_cc(uint32_t enb_cc_idx);
const cc_sched_result* get_cc(uint32_t enb_cc_idx) const
{
return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr;
}
cc_sched_result* get_cc(uint32_t enb_cc_idx)
{
return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr;
}
};
struct sched_result_list {
public:
sf_sched_result* new_tti(srslte::tti_point tti_rx);
sf_sched_result* get_sf(srslte::tti_point tti_rx);
const sf_sched_result* get_sf(srslte::tti_point tti_rx) const;
const cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const;
cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx);
private:
std::array<sf_sched_result, TTIMOD_SZ> results;
};
//! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions.
class pdcch_grid_t
{
@ -207,8 +234,7 @@ public:
sf_sched::ctrl_alloc_t alloc_data;
sched_interface::dl_sched_rar_t rar_grant;
rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r)
{
}
{}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
@ -250,7 +276,7 @@ public:
// Control/Configuration Methods
sf_sched();
void init(const sched_cell_params_t& cell_params_);
void new_tti(uint32_t tti_rx_);
void new_tti(uint32_t tti_rx_, sf_sched_result* cc_results);
// DL alloc methods
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
@ -267,7 +293,7 @@ public:
bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result);
// compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI
void generate_sched_results(sf_sched_result* sf_result);
void generate_sched_results();
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
@ -302,6 +328,7 @@ private:
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslte::log_ref log_h;
sf_sched_result* cc_results; ///< Results of other CCs for the same Subframe
// internal state
sf_grid_t tti_alloc;

@ -144,7 +144,7 @@ void sched::init(rrc_interface_mac* rrc_)
rrc = rrc_;
// Initialize first carrier scheduler
carrier_schedulers.emplace_back(new carrier_sched{rrc, &ue_db, 0});
carrier_schedulers.emplace_back(new carrier_sched{rrc, &ue_db, 0, &sched_results});
reset();
}
@ -183,7 +183,7 @@ int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
uint32_t prev_size = carrier_schedulers.size();
carrier_schedulers.resize(sched_cell_params.size());
for (uint32_t i = prev_size; i < sched_cell_params.size(); ++i) {
carrier_schedulers[i].reset(new carrier_sched{rrc, &ue_db, i});
carrier_schedulers[i].reset(new carrier_sched{rrc, &ue_db, i, &sched_results});
}
// setup all carriers cfg params
@ -387,7 +387,7 @@ int sched::dl_sched(uint32_t tti_tx_dl, uint32_t cc_idx, sched_interface::dl_sch
if (cc_idx < carrier_schedulers.size()) {
// Compute scheduling Result for tti_rx
const sf_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
const cc_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
// copy result
sched_result = tti_sched.dl_sched_result;
@ -408,7 +408,7 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s
uint32_t tti_rx = sched_utils::tti_subtract(tti, FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
if (cc_idx < carrier_schedulers.size()) {
const sf_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
const cc_sched_result& tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
// copy result
sched_result = tti_sched.ul_sched_result;

@ -259,11 +259,13 @@ void ra_sched::reset()
sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_,
std::map<uint16_t, sched_ue>* ue_db_,
uint32_t enb_cc_idx_) :
uint32_t enb_cc_idx_,
sched_result_list* sched_results_) :
rrc(rrc_),
ue_db(ue_db_),
log_h(srslte::logmap::get("MAC ")),
enb_cc_idx(enb_cc_idx_)
enb_cc_idx(enb_cc_idx_),
prev_sched_results(sched_results_)
{
sf_dl_mask.resize(1, 0);
}
@ -311,20 +313,21 @@ void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs);
}
const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
const cc_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
{
sf_sched_result* sf_result = get_next_sf_result(tti_rx);
cc_sched_result* cc_result = prev_sched_results->get_cc(srslte::tti_point{tti_rx}, enb_cc_idx);
// if it is the first time tti is run, reset vars
if (tti_rx != sf_result->tti_params.tti_rx) {
sf_sched* tti_sched = get_sf_sched(tti_rx);
*sf_result = {};
if (cc_result == nullptr) {
sf_sched* tti_sched = get_sf_sched(tti_rx);
sf_sched_result* sf_result = prev_sched_results->get_sf(srslte::tti_point{tti_rx});
cc_result = sf_result->new_cc(enb_cc_idx);
bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0;
/* Schedule PHICH */
for (auto& ue_pair : *ue_db) {
tti_sched->alloc_phich(&ue_pair.second, &sf_result->ul_sched_result);
tti_sched->alloc_phich(&ue_pair.second, &cc_result->ul_sched_result);
}
/* Schedule DL control data */
@ -353,15 +356,15 @@ const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx
}
/* Select the winner DCI allocation combination, store all the scheduling results */
tti_sched->generate_sched_results(sf_result);
tti_sched->generate_sched_results();
/* Reset ue harq pending ack state, clean-up blocked pids */
for (auto& user : *ue_db) {
user.second.finish_tti(sf_result->tti_params, enb_cc_idx);
user.second.finish_tti(cc_result->tti_params, enb_cc_idx);
}
}
return *sf_result;
return *cc_result;
}
void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result)
@ -405,20 +408,20 @@ sf_sched* sched::carrier_sched::get_sf_sched(uint32_t tti_rx)
{
sf_sched* ret = &sf_scheds[tti_rx % sf_scheds.size()];
if (ret->get_tti_rx() != tti_rx) {
// start new TTI. Bind the struct where the result is going to be stored
ret->new_tti(tti_rx);
sf_sched_result* sf_res = prev_sched_results->get_sf(srslte::tti_point{tti_rx});
if (sf_res == nullptr) {
// Reset if tti_rx has not been yet set in the sched results
sf_res = prev_sched_results->new_tti(srslte::tti_point{tti_rx});
}
// start new TTI for the given CC.
ret->new_tti(tti_rx, sf_res);
}
return ret;
}
sf_sched_result* sched::carrier_sched::get_next_sf_result(uint32_t tti_rx)
{
return &sf_sched_results[tti_rx % sf_sched_results.size()];
}
const sf_sched_result& sched::carrier_sched::get_sf_result(uint32_t tti_rx) const
const sf_sched_result* sched::carrier_sched::get_sf_result(uint32_t tti_rx) const
{
return sf_sched_results[tti_rx % sf_sched_results.size()];
return prev_sched_results->get_sf(srslte::tti_point{tti_rx});
}
int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info)

@ -52,6 +52,46 @@ tti_params_t::tti_params_t(uint32_t tti_rx_) :
sfn_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) / 10)
{}
cc_sched_result* sf_sched_result::new_cc(uint32_t enb_cc_idx)
{
if (enb_cc_idx >= enb_cc_list.size()) {
enb_cc_list.resize(enb_cc_idx + 1);
}
return &enb_cc_list[enb_cc_idx];
}
sf_sched_result* sched_result_list::new_tti(srslte::tti_point tti_rx)
{
sf_sched_result* res = &results[tti_rx.to_uint() % results.size()];
res->tti_rx = tti_rx;
res->enb_cc_list.clear();
return res;
}
sf_sched_result* sched_result_list::get_sf(srslte::tti_point tti_rx)
{
sf_sched_result* res = &results[tti_rx.to_uint() % results.size()];
return (res->tti_rx != tti_rx) ? nullptr : res;
}
const sf_sched_result* sched_result_list::get_sf(srslte::tti_point tti_rx) const
{
const sf_sched_result* res = &results[tti_rx.to_uint() % results.size()];
return (res->tti_rx != tti_rx) ? nullptr : res;
}
const cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const
{
const sf_sched_result* res = get_sf(tti_rx);
return res != nullptr ? res->get_cc(enb_cc_idx) : nullptr;
}
cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx)
{
sf_sched_result* res = get_sf(tti_rx);
return res != nullptr ? res->get_cc(enb_cc_idx) : nullptr;
}
/*******************************************************
* PDCCH Allocation Methods
*******************************************************/
@ -473,7 +513,7 @@ void sf_sched::init(const sched_cell_params_t& cell_params_)
max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch);
}
void sf_sched::new_tti(uint32_t tti_rx_)
void sf_sched::new_tti(uint32_t tti_rx_, sf_sched_result* cc_results_)
{
// reset internal state
bc_allocs.clear();
@ -483,6 +523,7 @@ void sf_sched::new_tti(uint32_t tti_rx_)
tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params);
cc_results = cc_results_;
// setup first prb to be used for msg3 alloc. Account for potential PRACH alloc
last_msg3_prb = cc_cfg->cfg.nrb_pucch;
@ -988,29 +1029,31 @@ alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_s
return ret;
}
void sf_sched::generate_sched_results(sf_sched_result* sf_result)
void sf_sched::generate_sched_results()
{
cc_sched_result* cc_result = cc_results->get_cc(cc_cfg->enb_cc_idx);
/* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &sf_result->pdcch_mask);
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &cc_result->pdcch_mask);
/* Register final CFI */
sf_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi();
cc_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi();
/* Generate DCI formats and fill sched_result structs */
set_bc_sched_result(dci_result, &sf_result->dl_sched_result);
set_bc_sched_result(dci_result, &cc_result->dl_sched_result);
set_rar_sched_result(dci_result, &sf_result->dl_sched_result);
set_rar_sched_result(dci_result, &cc_result->dl_sched_result);
set_dl_data_sched_result(dci_result, &sf_result->dl_sched_result);
set_dl_data_sched_result(dci_result, &cc_result->dl_sched_result);
set_ul_sched_result(dci_result, &sf_result->ul_sched_result);
set_ul_sched_result(dci_result, &cc_result->ul_sched_result);
/* Store remaining sf_sched results for this TTI */
sf_result->dl_mask = tti_alloc.get_dl_mask();
sf_result->ul_mask = tti_alloc.get_ul_mask();
sf_result->tti_params = tti_params;
cc_result->dl_mask = tti_alloc.get_dl_mask();
cc_result->ul_mask = tti_alloc.get_ul_mask();
cc_result->tti_params = tti_params;
}
uint32_t sf_sched::get_nof_ctrl_symbols() const

@ -188,8 +188,9 @@ void sched_tester::before_sched()
int sched_tester::process_results()
{
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx);
TESTASSERT(tti_info.tti_params.tti_rx == sf_result.tti_params.tti_rx);
const srsenb::cc_sched_result* cc_result =
sched_results.get_cc(srslte::tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX);
TESTASSERT(tti_info.tti_params.tti_rx == cc_result->tti_params.tti_rx);
test_pdcch_collisions();
TESTASSERT(ue_tester->test_all(0, tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) ==
@ -255,9 +256,9 @@ int sched_tester::test_pdcch_collisions()
tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) == SRSLTE_SUCCESS);
/* verify if sched_result "used_cce" coincide with sched "used_cce" */
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx);
if (used_cce != sf_result.pdcch_mask) {
std::string mask_str = sf_result.pdcch_mask.to_string();
const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX);
if (used_cce != cc_result->pdcch_mask) {
std::string mask_str = cc_result->pdcch_mask.to_string();
TESTERROR("The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), used_cce.to_string().c_str());
}
@ -365,7 +366,7 @@ int sched_tester::test_harqs()
int sched_tester::test_sch_collisions()
{
const auto& sf_result = carrier_schedulers[CARRIER_IDX]->get_sf_result(tti_info.tti_params.tti_rx);
const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX);
srsenb::prbmask_t ul_allocs(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb);
@ -374,7 +375,7 @@ int sched_tester::test_sch_collisions()
tti_info.tti_params, tti_info.ul_sched_result[CARRIER_IDX], ul_allocs) == SRSLTE_SUCCESS);
/* TEST: check whether cumulative UL PRB masks coincide */
if (ul_allocs != sf_result.ul_mask) {
if (ul_allocs != cc_result->ul_mask) {
TESTERROR("The UL PRB mask and the scheduler result UL mask are not consistent\n");
}
@ -404,10 +405,10 @@ int sched_tester::test_sch_collisions()
}
// TEST: check if resulting DL mask is equal to scheduler internal DL mask
if (rbgmask != sf_result.dl_mask) {
if (rbgmask != cc_result->dl_mask) {
TESTERROR("The DL PRB mask and the scheduler result DL mask are not consistent (%s!=%s)\n",
rbgmask.to_string().c_str(),
sf_result.dl_mask.to_string().c_str());
cc_result->dl_mask.to_string().c_str());
}
return SRSLTE_SUCCESS;
}

Loading…
Cancel
Save