changed rar/msg3 scheduling to accommodate multiple rars per TTI

master
Francisco Paisana 5 years ago committed by Francisco Paisana
parent dc782e514b
commit a74dcd947f

@ -42,7 +42,7 @@ public:
// getters
const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); }
const sf_sched* get_sf_sched(uint32_t tti_rx) const { return &sf_scheds[tti_rx % sf_scheds.size()]; }
const sf_sched* get_sf_sched_ptr(uint32_t tti_rx) const { return &sf_scheds[tti_rx % sf_scheds.size()]; }
private:
void generate_phich(sf_sched* tti_sched);
@ -114,20 +114,13 @@ public:
using dl_sched_rar_info_t = sched_interface::dl_sched_rar_info_t;
using dl_sched_rar_t = sched_interface::dl_sched_rar_t;
using dl_sched_rar_grant_t = sched_interface::dl_sched_rar_grant_t;
struct pending_msg3_t {
bool enabled = false;
uint16_t rnti = 0;
uint32_t L = 0;
uint32_t n_prb = 0;
uint32_t mcs = 0;
};
explicit ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_);
void dl_sched(sf_sched* tti_sched);
void ul_sched(sf_sched* tti_sched);
int dl_rach_info(dl_sched_rar_info_t rar_info);
void reset();
const pending_msg3_t& find_pending_msg3(uint32_t tti) const;
void sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sched_res_t& dl_sched_result);
private:
// args
@ -135,9 +128,7 @@ private:
const sched::cell_cfg_t* cfg = nullptr;
std::map<uint16_t, sched_ue>* ue_db = nullptr;
std::queue<dl_sched_rar_info_t> pending_rars;
std::array<pending_msg3_t, TTIMOD_SZ> pending_msg3;
uint32_t tti_tx_dl = 0;
std::deque<sf_sched::pending_rar_t> pending_rars;
uint32_t rar_aggr_level = 2;
};

@ -26,6 +26,7 @@
#include "scheduler_ue.h"
#include "srslte/common/bounded_bitset.h"
#include "srslte/common/log.h"
#include <deque>
#include <vector>
namespace srsenb {
@ -182,10 +183,12 @@ public:
uint32_t req_bytes;
alloc_type_t alloc_type;
};
struct rar_alloc_t : public ctrl_alloc_t {
struct rar_alloc_t {
sf_sched::ctrl_alloc_t alloc_data;
sched_interface::dl_sched_rar_t rar_grant;
rar_alloc_t() = default;
explicit rar_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r)
{
}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
@ -210,7 +213,17 @@ public:
bool is_msg3() const { return type == MSG3; }
bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; }
};
typedef std::pair<alloc_outcome_t, const rar_alloc_t*> rar_code_t;
struct pending_msg3_t {
uint16_t rnti = 0;
uint32_t L = 0;
uint32_t n_prb = 0;
uint32_t mcs = 0;
};
struct pending_rar_t {
uint16_t ra_rnti = 0;
uint32_t nof_grants = 0;
sched_interface::dl_sched_rar_info_t msg3_grant[sched_interface::MAX_RAR_LIST];
};
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
// TTI scheduler result
@ -220,11 +233,19 @@ public:
void init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
// DL alloc
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
rar_code_t
alloc_rar(uint32_t aggr_lvl, const sched_interface::dl_sched_rar_t& rar_grant, uint32_t rar_tti, uint32_t buf_rar);
alloc_outcome_t alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant);
// UL alloc
alloc_outcome_t alloc_msg3(const pending_msg3_t& msg3);
alloc_outcome_t
alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t mcs = 0);
void generate_dcis();
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
uint32_t get_tti_tx_dl() const final { return tti_params.tti_tx_dl; }
@ -232,7 +253,6 @@ public:
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final;
alloc_outcome_t alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs);
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; }
@ -245,15 +265,14 @@ public:
uint32_t get_tti_rx() const { return tti_params.tti_rx; }
uint32_t get_sfn() const { return tti_params.sfn; }
uint32_t get_sf_idx() const { return tti_params.sf_idx; }
uint32_t get_cc_idx() const { return enb_cc_idx; }
const tti_params_t& get_tti_params() const { return tti_params; }
std::deque<pending_msg3_t>& get_pending_msg3() { return pending_msg3s; }
const std::deque<pending_msg3_t>& get_pending_msg3() const { return pending_msg3s; }
private:
bool is_dl_alloc(sched_ue* user) const final;
bool is_ul_alloc(sched_ue* user) const final;
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
alloc_outcome_t
alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t msg3 = 0);
int generate_format1a(uint32_t rb_start,
uint32_t l_crb,
uint32_t tbs,
@ -273,10 +292,12 @@ private:
// internal state
tti_params_t tti_params{10241};
sf_grid_t tti_alloc;
std::vector<rar_alloc_t> rar_allocs;
std::vector<bc_alloc_t> bc_allocs;
std::vector<rar_alloc_t> rar_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
std::deque<pending_msg3_t> pending_msg3s;
uint32_t last_msg3_prb = 0;
};
} // namespace srsenb

@ -146,101 +146,68 @@ ra_sched::ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map<ui
// discard it.
void ra_sched::dl_sched(srsenb::sf_sched* tti_sched)
{
tti_tx_dl = tti_sched->get_tti_tx_dl();
uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl();
rar_aggr_level = 2;
while (not pending_rars.empty()) {
sf_sched::pending_rar_t& rar = pending_rars.front();
uint32_t prach_tti = rar.msg3_grant[0].prach_tti;
// Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit
while (!pending_rars.empty()) {
dl_sched_rar_info_t rar = pending_rars.front();
if (not sched_utils::is_in_tti_interval(tti_tx_dl, rar.prach_tti + 3, rar.prach_tti + 3 + cfg->prach_rar_window)) {
if (tti_tx_dl >= rar.prach_tti + 3 + cfg->prach_rar_window) {
if (not sched_utils::is_in_tti_interval(tti_tx_dl, prach_tti + 3, prach_tti + 3 + cfg->prach_rar_window)) {
if (tti_tx_dl >= prach_tti + 3 + cfg->prach_rar_window) {
log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n",
rar.prach_tti,
prach_tti,
cfg->prach_rar_window,
tti_tx_dl);
log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n",
rar.prach_tti,
prach_tti,
cfg->prach_rar_window,
tti_tx_dl);
// Remove from pending queue and get next one if window has passed already
pending_rars.pop();
pending_rars.pop_front();
continue;
}
// If window not yet started do not look for more pending RARs
return;
}
/* Since we do a fixed Msg3 scheduling for all RAR, we can only allocate 1 RAR per TTI.
* If we have enough space in the window, every call to this function we'll allocate 1 pending RAR and associate a
* Msg3 transmission
*/
dl_sched_rar_t rar_grant;
uint32_t L_prb = 3;
uint32_t n_prb = cfg->nrb_pucch > 0 ? cfg->nrb_pucch : 2;
bzero(&rar_grant, sizeof(rar_grant));
uint32_t rba = srslte_ra_type2_to_riv(L_prb, n_prb, cfg->cell.nof_prb);
dl_sched_rar_grant_t* grant = &rar_grant.msg3_grant[0];
grant->grant.tpc_pusch = 3;
grant->grant.trunc_mcs = 0;
grant->grant.rba = rba;
grant->data = rar;
rar_grant.nof_grants++;
// Try to schedule DCI + RBGs for RAR Grant
sf_sched::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level,
rar_grant,
rar.prach_tti,
7 * rar_grant.nof_grants); // fixme: check RAR size
alloc_outcome_t ret = tti_sched->alloc_rar(rar_aggr_level, rar);
// If we can allocate, schedule Msg3 and remove from pending
if (!ret.first) {
if (not ret) {
return;
}
// Schedule Msg3 only if there is a requirement for Msg3 data
uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ;
pending_msg3[pending_tti].enabled = true;
pending_msg3[pending_tti].rnti = rar.temp_crnti; // TODO
pending_msg3[pending_tti].L = L_prb;
pending_msg3[pending_tti].n_prb = n_prb;
dl_sched_rar_grant_t* last_msg3 = &rar_grant.msg3_grant[rar_grant.nof_grants - 1];
pending_msg3[pending_tti].mcs = last_msg3->grant.trunc_mcs;
log_h->info("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d\n",
rar.temp_crnti,
tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
// Remove pending RAR
pending_rars.pop();
return;
pending_rars.pop_front();
}
}
// Schedules Msg3
void ra_sched::ul_sched(sf_sched* tti_sched)
{
uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ;
/* schedule pending Msg3s */
while (not tti_sched->get_pending_msg3().empty()) {
sf_sched::pending_msg3_t& msg3 = tti_sched->get_pending_msg3().front();
// check if there is a Msg3 to allocate
if (not pending_msg3[pending_tti].enabled) {
return;
// Verify if user still exists
auto user_it = ue_db->find(msg3.rnti);
if (user_it == ue_db->end()) {
log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", msg3.rnti);
tti_sched->get_pending_msg3().pop_front();
continue;
}
uint16_t rnti = pending_msg3[pending_tti].rnti;
auto user_it = ue_db->find(rnti);
if (user_it == ue_db->end()) {
log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti);
return;
// Allocate RBGs and HARQ for pending Msg3
ul_harq_proc::ul_alloc_t msg3_alloc = {msg3.n_prb, msg3.L};
if (not tti_sched->alloc_ul(&user_it->second, msg3_alloc, sf_sched::ul_alloc_t::MSG3, msg3.mcs)) {
log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.n_prb, msg3.n_prb + msg3.L);
}
/* Allocate RBGs and HARQ for Msg3 */
ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L};
if (not tti_sched->alloc_ul_msg3(&user_it->second, msg3, pending_msg3[pending_tti].mcs)) {
log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L);
return;
tti_sched->get_pending_msg3().pop_front();
}
pending_msg3[pending_tti].enabled = false;
}
int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
@ -251,25 +218,56 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
rar_info.temp_crnti,
rar_info.ta_cmd,
rar_info.msg3_size);
pending_rars.push(rar_info);
return 0;
// RA-RNTI = 1 + t_id + f_id
// t_id = index of first subframe specified by PRACH (0<=t_id<10)
// f_id = index of the PRACH within subframe, in ascending order of freq domain (0<=f_id<6) (for FDD, f_id=0)
uint16_t ra_rnti = 1 + (uint16_t)(rar_info.prach_tti % 10u);
// find pending rar with same RA-RNTI
for (sf_sched::pending_rar_t& r : pending_rars) {
if (r.ra_rnti == ra_rnti) {
r.msg3_grant[r.nof_grants] = rar_info;
r.nof_grants++;
return SRSLTE_SUCCESS;
}
}
// create new RAR
sf_sched::pending_rar_t p;
p.ra_rnti = ra_rnti;
p.nof_grants = 1;
p.msg3_grant[0] = rar_info;
pending_rars.push_back(p);
return SRSLTE_SUCCESS;
}
void ra_sched::reset()
{
tti_tx_dl = 0;
for (auto& msg3 : pending_msg3) {
msg3 = {};
}
while (not pending_rars.empty()) {
pending_rars.pop();
}
pending_rars.clear();
}
const ra_sched::pending_msg3_t& ra_sched::find_pending_msg3(uint32_t tti) const
void ra_sched::sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sched_res_t& dl_sched_result)
{
uint32_t pending_tti = tti % TTIMOD_SZ;
return pending_msg3[pending_tti];
// Go through all scheduled RARs, and pre-allocate Msg3s in UL channel accordingly
for (uint32_t i = 0; i < dl_sched_result.nof_rar_elems; ++i) {
for (uint32_t j = 0; j < dl_sched_result.rar[i].nof_grants; ++j) {
auto& grant = dl_sched_result.rar[i].msg3_grant[j];
sf_sched::pending_msg3_t msg3;
srslte_ra_type2_from_riv(grant.grant.rba, &msg3.L, &msg3.n_prb, cfg->cell.nof_prb, cfg->cell.nof_prb);
msg3.mcs = grant.grant.trunc_mcs;
msg3.rnti = grant.data.temp_crnti;
if (not sf_msg3_sched->alloc_msg3(msg3)) {
log_h->error(
"SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", msg3.rnti, sf_msg3_sched->get_tti_tx_ul());
} else {
log_h->debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d\n", msg3.rnti, sf_msg3_sched->get_tti_tx_ul());
}
}
}
}
/*******************************************************
@ -342,6 +340,7 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
// if it is the first time tti is run, reset vars
if (tti_rx != tti_sched->get_tti_rx()) {
uint32_t start_cfi = sched_params->sched_cfg.nof_ctrl_symbols;
bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0;
tti_sched->new_tti(tti_rx, start_cfi);
// Protects access to pending_rar[], pending_msg3[], ra_sched, bc_sched, rlc buffers
@ -351,7 +350,7 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
generate_phich(tti_sched);
/* Schedule DL control data */
if (sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0) {
if (dl_active) {
/* Schedule Broadcast data (SIB and paging) */
bc_sched_ptr->dl_sched(tti_sched);
@ -374,6 +373,12 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
/* Select the winner DCI allocation combination */
tti_sched->generate_dcis();
/* Enqueue Msg3s derived from allocated RARs */
if (dl_active) {
sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS);
ra_sched_ptr->sched_msg3(sf_msg3_sched, tti_sched->dl_sched_result);
}
/* clean-up blocked pids */
for (auto& user : *ue_db) {
user.second.finish_tti(tti_sched->get_tti_params(), enb_cc_idx);
@ -424,11 +429,7 @@ void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result)
// NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions
if (sched_params->cfg->cell.nof_prb == 6) {
uint32_t tti_rx_ack = TTI_RX_ACK(tti_result->get_tti_rx());
bool msg3_enabled = false;
if (ra_sched_ptr != nullptr and ra_sched_ptr->find_pending_msg3(tti_rx_ack).enabled) {
msg3_enabled = true;
}
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_rx_ack, -1) or msg3_enabled) {
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_rx_ack, -1)) {
tti_result->get_dl_mask().fill(0, tti_result->get_dl_mask().size());
}
}

@ -395,17 +395,24 @@ void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
tti_params = tti_params_t{tti_rx_};
tti_alloc.new_tti(tti_params, start_cfi);
// internal state
rar_allocs.clear();
// reset sf result
pdcch_mask.reset();
pdcch_mask.resize(tti_alloc.get_pdcch_grid().nof_cces());
dl_sched_result = {};
ul_sched_result = {};
// reset internal state
bc_allocs.clear();
rar_allocs.clear();
data_allocs.clear();
ul_data_allocs.clear();
// TTI result
pdcch_mask.reset();
pdcch_mask.resize(tti_alloc.get_pdcch_grid().nof_cces());
bzero(&dl_sched_result, sizeof(dl_sched_result));
bzero(&ul_sched_result, sizeof(ul_sched_result));
// setup first prb to be used for msg3 alloc
last_msg3_prb = sched_params->cfg->nrb_pucch;
uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS);
if (srslte_prach_tti_opportunity_config_fdd(sched_params->cfg->prach_config, tti_msg3_alloc, -1)) {
last_msg3_prb = std::max(last_msg3_prb, sched_params->cfg->prach_freq_offset + 6);
}
}
bool sf_sched::is_dl_alloc(sched_ue* user) const
@ -495,28 +502,40 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa
return ret.first;
}
sf_sched::rar_code_t sf_sched::alloc_rar(uint32_t aggr_lvl,
const sched_interface::dl_sched_rar_t& rar_grant,
uint32_t prach_tti,
uint32_t buf_rar)
alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar)
{
// RA-RNTI = 1 + t_id + f_id
// t_id = index of first subframe specified by PRACH (0<=t_id<10)
// f_id = index of the PRACH within subframe, in ascending order of freq domain (0<=f_id<6) (for FDD, f_id=0)
uint16_t ra_rnti = 1 + (uint16_t)(prach_tti % 10);
uint32_t buf_rar = 7 * rar.nof_grants; // TODO: check RAR size
uint32_t msg3_grant_size = 3;
uint32_t total_msg3_size = msg3_grant_size * rar.nof_grants;
// check if there is enough space for Msg3
if (last_msg3_prb + total_msg3_size > sched_params->cfg->cell.nof_prb - sched_params->cfg->nrb_pucch) {
return alloc_outcome_t::RB_COLLISION;
}
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, buf_rar, ra_rnti);
// allocate RBs and PDCCH
sf_sched::ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, buf_rar, rar.ra_rnti);
if (not ret.first) {
Warning("SCHED: Could not allocate RAR for L=%d, cause=%s\n", aggr_lvl, ret.first.to_string());
return {ret.first, nullptr};
log_h->warning("SCHED: Could not allocate RAR for L=%d, cause=%s\n", aggr_lvl, ret.first.to_string());
return ret.first;
}
// Allocation successful
rar_alloc_t rar_alloc(ret.second);
rar_alloc.rar_grant = rar_grant;
rar_allocs.push_back(rar_alloc);
// RAR allocation successful
sched_interface::dl_sched_rar_t rar_grant = {};
rar_grant.nof_grants = rar.nof_grants;
for (uint32_t i = 0; i < rar.nof_grants; ++i) {
rar_grant.msg3_grant[i].data = rar.msg3_grant[i];
rar_grant.msg3_grant[i].grant.tpc_pusch = 3;
rar_grant.msg3_grant[i].grant.trunc_mcs = 0;
uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, sched_params->cfg->cell.nof_prb);
rar_grant.msg3_grant[i].grant.rba = rba;
last_msg3_prb += msg3_grant_size;
}
return {ret.first, &rar_allocs.back()};
rar_allocs.emplace_back(ret.second, rar_grant);
return ret.first;
}
alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
@ -592,11 +611,6 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t
return alloc_ul(user, alloc, alloc_type);
}
alloc_outcome_t sf_sched::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs)
{
return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs);
}
void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& bc_alloc : bc_allocs) {
@ -672,24 +686,28 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems];
// Assign NCCE/L
rar->dci.location = dci_result[rar_alloc.dci_idx]->dci_pos;
rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_range_t prb_range = prb_range_t(rar_alloc.rbg_range, sched_params->P);
int tbs =
generate_format1a(prb_range.prb_start, prb_range.length(), rar_alloc.req_bytes, 0, rar_alloc.rnti, &rar->dci);
prb_range_t prb_range = prb_range_t(rar_alloc.alloc_data.rbg_range, sched_params->P);
int tbs = generate_format1a(prb_range.prb_start,
prb_range.length(),
rar_alloc.alloc_data.req_bytes,
0,
rar_alloc.alloc_data.rnti,
&rar->dci);
if (tbs <= 0) {
log_h->warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=(%d,%d), dci=(%d,%d)\n",
rar_alloc.rnti,
rar_alloc.rbg_range.rbg_start,
rar_alloc.rbg_range.rbg_end,
rar_alloc.alloc_data.rnti,
rar_alloc.alloc_data.rbg_range.rbg_start,
rar_alloc.alloc_data.rbg_range.rbg_end,
rar->dci.location.L,
rar->dci.location.ncce);
continue;
}
// Setup RAR process
rar->tbs = rar_alloc.req_bytes;
rar->tbs = rar_alloc.alloc_data.req_bytes;
rar->nof_grants = rar_alloc.rar_grant.nof_grants;
std::copy(&rar_alloc.rar_grant.msg3_grant[0], &rar_alloc.rar_grant.msg3_grant[rar->nof_grants], rar->msg3_grant);
@ -700,9 +718,9 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, "
"rar_grant_mcs=%d\n",
expected_rnti,
rar_alloc.rnti,
rar_alloc.rbg_range.rbg_start,
rar_alloc.rbg_range.rbg_end,
rar_alloc.alloc_data.rnti,
rar_alloc.alloc_data.rbg_range.rbg_start,
rar_alloc.alloc_data.rbg_range.rbg_end,
rar->dci.location.L,
rar->dci.location.ncce,
msg3_grant.grant.rba,
@ -836,6 +854,12 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
}
}
alloc_outcome_t sf_sched::alloc_msg3(const pending_msg3_t& msg3)
{
pending_msg3s.push_back(msg3);
return alloc_outcome_t::SUCCESS;
}
void sf_sched::generate_dcis()
{
/* Pick one of the possible DCI masks */

@ -171,7 +171,8 @@ struct sched_tester : public srsenb::sched {
uint32_t tti_tx_dl;
uint32_t tti_tx_ul;
uint32_t current_cfi;
srsenb::ra_sched::pending_msg3_t ul_pending_msg3;
bool ul_pending_msg3_present = false;
srsenb::sf_sched::pending_msg3_t ul_pending_msg3;
srslte::bounded_bitset<128, true> used_cce;
// std::vector<bool> used_cce;
std::map<uint16_t, tester_user_results> ue_data; ///< stores buffer state of each user
@ -277,7 +278,12 @@ void sched_tester::new_test_tti(uint32_t tti_)
} else {
tti_data.ul_sf_idx = (tti_data.tti_tx_ul + 10240 - FDD_HARQ_DELAY_MS) % 10;
}
tti_data.ul_pending_msg3 = carrier_schedulers[0]->get_ra_sched()->find_pending_msg3(tti_data.tti_tx_ul);
auto& pending_msg3s = carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx)->get_pending_msg3();
tti_data.ul_pending_msg3_present = false;
if (not pending_msg3s.empty()) {
tti_data.ul_pending_msg3_present = true;
tti_data.ul_pending_msg3 = pending_msg3s.front();
}
tti_data.current_cfi = sched_params.sched_cfg.nof_ctrl_symbols;
tti_data.used_cce.resize(srslte_regs_pdcch_ncce(&regs, tti_data.current_cfi));
tti_data.used_cce.reset();
@ -454,7 +460,7 @@ int sched_tester::test_ra()
CONDERROR(tti_data.sched_result_ul.pusch[i].needs_pdcch,
"[TESTER] Msg3 allocations do not require PDCCH\n");
CONDERROR(tti_data.ul_pending_msg3.rnti != rnti, "[TESTER] The UL pending msg3 RNTI did not match\n");
CONDERROR(not tti_data.ul_pending_msg3.enabled, "[TESTER] The UL pending msg3 RNTI did not match\n");
CONDERROR(not tti_data.ul_pending_msg3_present, "[TESTER] The UL pending msg3 RNTI did not match\n");
userinfo.msg3_tti = tti_data.tti_tx_ul;
msg3_count++;
}
@ -527,7 +533,7 @@ int sched_tester::assert_no_empty_allocs()
*/
int sched_tester::test_tti_result()
{
const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx);
const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx);
// Helper Function: checks if there is any collision. If not, fills the mask
auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) {
@ -579,16 +585,17 @@ int sched_tester::test_tti_result()
CONDERROR(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", rar.tbs);
for (uint32_t j = 0; j < rar.nof_grants; ++j) {
const auto& msg3_grant = rar.msg3_grant[j];
const srsenb::ra_sched::pending_msg3_t& p = carrier_schedulers[0]->get_ra_sched()->find_pending_msg3(
tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
CONDERROR(not p.enabled, "Pending Msg3 should have been set\n");
const auto& msg3_list =
carrier_schedulers[0]->get_sf_sched_ptr(tti_sched->get_tti_rx() + MSG3_DELAY_MS)->get_pending_msg3();
const auto& p = msg3_list.front();
CONDERROR(msg3_list.empty(), "Pending Msg3 should have been set\n");
uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg.cell.nof_prb);
CONDERROR(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n");
}
}
/* verify if sched_result "used_cce" coincide with sched "used_cce" */
auto* tti_alloc = carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx);
auto* tti_alloc = carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx);
if (tti_data.used_cce != tti_alloc->get_pdcch_mask()) {
std::string mask_str = tti_alloc->get_pdcch_mask().to_string();
TESTERROR("[TESTER] The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), tti_data.used_cce.to_hex().c_str());
@ -793,7 +800,7 @@ int sched_tester::test_sibs()
int sched_tester::test_collisions()
{
const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx);
const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx);
srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb);
@ -830,12 +837,12 @@ int sched_tester::test_collisions()
}
/* TEST: check collisions with PUCCH */
bool strict = cfg.cell.nof_prb != 6 or (not tti_data.is_prach_tti_tx_ul and not tti_data.ul_pending_msg3.enabled);
bool strict = cfg.cell.nof_prb != 6 or (not tti_data.is_prach_tti_tx_ul and not tti_data.ul_pending_msg3_present);
try_ul_fill({0, (uint32_t)cfg.nrb_pucch}, "PUCCH", strict);
try_ul_fill({cfg.cell.nof_prb - cfg.nrb_pucch, (uint32_t)cfg.nrb_pucch}, "PUCCH", strict);
/* TEST: Check if there is a collision with Msg3 or Msg3 alloc data is not consistent */
if (tti_data.ul_pending_msg3.enabled) {
if (tti_data.ul_pending_msg3_present) {
bool passed = false;
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
if (tti_data.ul_pending_msg3.rnti == tti_data.sched_result_ul.pusch[i].dci.rnti) {
@ -937,7 +944,7 @@ int sched_tester::test_collisions()
rbgmask.reset(i);
}
}
if (rbgmask != carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx)->get_dl_mask()) {
if (rbgmask != carrier_schedulers[0]->get_sf_sched_ptr(tti_data.tti_rx)->get_dl_mask()) {
TESTERROR("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n");
}
return SRSLTE_SUCCESS;

Loading…
Cancel
Save