sched,nr: use PRBs instead of RBGs as the basic unit for PRB grants in the NR scheduler

master
Francisco Paisana 4 years ago
parent b81cbd1334
commit d63ea00a5d

@ -14,7 +14,7 @@
#define SRSRAN_SCHED_NR_CFG_H
#include "sched_nr_interface.h"
#include "srsran/adt/bounded_bitset.h"
#include "sched_nr_rb.h"
namespace srsenb {
@ -27,9 +27,10 @@ namespace sched_nr_impl {
const static size_t MAX_GRANTS = sched_nr_interface::MAX_GRANTS;
using pucch_resource_grant = sched_nr_interface::pucch_resource_grant;
using pucch_grant = sched_nr_interface::pucch_grant;
using pucch_list_t = sched_nr_interface::pucch_list_t;
using pucch_t = mac_interface_phy_nr::pucch_t;
using pucch_list_t = srsran::bounded_vector<pucch_t, MAX_GRANTS>;
using pusch_t = mac_interface_phy_nr::pusch_t;
using pusch_list_t = srsran::bounded_vector<pusch_t, MAX_GRANTS>;
using sched_cfg_t = sched_nr_interface::sched_cfg_t;
using cell_cfg_t = sched_nr_interface::cell_cfg_t;
@ -67,7 +68,8 @@ struct sched_params {
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using prb_bitmap = srsran::bounded_bitset<SRSRAN_MAX_PRB_NR, true>;
using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using pdcchmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
@ -90,7 +92,7 @@ public:
explicit bwp_ue_cfg(uint16_t rnti, const bwp_params& bwp_cfg, const ue_cfg_t& uecfg_);
const ue_cfg_t* ue_cfg() const { return cfg_; }
const srsran::phy_cfg_nr_t& cfg() const { return cfg_->phy_cfg; }
const srsran::phy_cfg_nr_t& phy() const { return cfg_->phy_cfg; }
const bwp_cce_pos_list& cce_pos_list(uint32_t search_id) const
{
return cce_positions_list[ss_id_to_cce_idx[search_id]];

@ -31,19 +31,25 @@ public:
}
bool empty(uint32_t tb_idx) const { return not tb[tb_idx].active; }
bool has_pending_retx(tti_point tti_rx) const { return not empty() and not tb[0].ack_state and tti_ack <= tti_rx; }
uint32_t nof_retx() const { return tb[0].n_rtx; }
uint32_t max_nof_retx() const { return max_retx; }
uint32_t tbs() const { return tb[0].tbs; }
uint32_t ndi() const { return tb[0].ndi; }
uint32_t mcs() const { return tb[0].mcs; }
uint32_t nof_retx() const { return tb[0].n_rtx; }
uint32_t max_nof_retx() const { return max_retx; }
uint32_t tbs() const { return tb[0].tbs; }
uint32_t ndi() const { return tb[0].ndi; }
uint32_t mcs() const { return tb[0].mcs; }
const prb_grant& prbs() const { return prbs_; }
tti_point harq_tti_ack() const { return tti_ack; }
bool ack_info(uint32_t tb_idx, bool ack);
void new_tti(tti_point tti_rx);
void reset();
bool
new_tx(tti_point tti_tx, tti_point tti_ack, const rbgmask_t& rbgmask, uint32_t mcs, uint32_t tbs, uint32_t max_retx);
bool new_retx(tti_point tti_tx, tti_point tti_ack, const rbgmask_t& rbgmask, int* mcs, int* tbs);
new_tx(tti_point tti_tx, tti_point tti_ack, const prb_grant& grant, uint32_t mcs, uint32_t tbs, uint32_t max_retx);
bool new_retx(tti_point tti_tx, tti_point tti_ack, const prb_grant& grant);
bool new_retx(tti_point tti_tx, tti_point tti_ack);
// NOTE: Has to be used before first tx is dispatched
bool set_tbs(uint32_t tbs);
const uint32_t pid;
@ -60,7 +66,7 @@ private:
uint32_t max_retx = 1;
tti_point tti_tx;
tti_point tti_ack;
rbgmask_t rbgmask;
prb_grant prbs_;
std::array<tb_t, SCHED_NR_MAX_TB> tb;
};

@ -26,7 +26,7 @@ namespace srsenb {
const static size_t SCHED_NR_MAX_CARRIERS = 4;
const static uint16_t SCHED_NR_INVALID_RNTI = 0;
const static size_t SCHED_NR_MAX_NOF_RBGS = 17;
const static size_t SCHED_NR_MAX_NOF_RBGS = 18;
const static size_t SCHED_NR_MAX_TB = 1;
const static size_t SCHED_NR_MAX_HARQ = 16;
const static size_t SCHED_NR_MAX_BWP_PER_CELL = 2;
@ -93,16 +93,6 @@ public:
using dl_sched_t = mac_interface_phy_nr::dl_sched_t;
using ul_sched_t = mac_interface_phy_nr::ul_sched_t;
struct pucch_resource_grant {
uint16_t rnti;
uint32_t resource_set_id;
uint32_t resource_id;
};
struct pucch_grant {
pucch_resource_grant resource;
};
using pucch_list_t = srsran::bounded_vector<pucch_grant, MAX_GRANTS>;
virtual ~sched_nr_interface() = default;
virtual int cell_cfg(srsran::const_span<sched_nr_interface::cell_cfg_t> ue_cfg) = 0;
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;

@ -71,7 +71,7 @@ private:
// List of PDCCH grants
struct alloc_record {
uint32_t aggr_idx;
uint32_t search_space_id;
uint32_t ss_id;
uint32_t idx;
pdcch_grant_type_t alloc_type;
slot_ue* ue;

@ -18,26 +18,25 @@
namespace srsenb {
namespace sched_nr_impl {
uint32_t get_P(uint32_t bwp_nof_prb, bool config_1_or_2);
uint32_t get_nof_rbgs(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2);
using pdsch_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using pusch_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using rbg_interval = srsran::interval<uint32_t>;
rbg_interval find_empty_rbg_interval(const pdsch_bitmap& bitmap, uint32_t max_nof_rbgs);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool fill_dci_rar(rbg_interval rbginterv, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci);
bool fill_dci_rar(prb_interval interv, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci);
class slot_ue;
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci);
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_ul_nr_t& dci);
void fill_pdsch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_sch_cfg_nr_t& sch);
void fill_pusch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_sch_cfg_nr_t& sch);
pucch_resource_grant find_pucch_resource(const slot_ue& ue, const rbgmask_t& rbgs, uint32_t tbs);
/// Generate PDCCH DL DCI fields
void fill_dl_dci_ue_fields(const slot_ue& ue,
const bwp_params& bwp_cfg,
uint32_t ss_id,
srsran_dci_location_t dci_pos,
srsran_dci_dl_nr_t& dci);
/// Generate PDCCH UL DCI fields
void fill_ul_dci_ue_fields(const slot_ue& ue,
const bwp_params& bwp_cfg,
uint32_t ss_id,
srsran_dci_location_t dci_pos,
srsran_dci_ul_nr_t& dci);
} // namespace sched_nr_impl
} // namespace srsenb

@ -0,0 +1,238 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_RB_H
#define SRSRAN_SCHED_NR_RB_H
#include "srsenb/hdr/stack/mac/nr/sched_nr_interface.h"
#include "srsran/adt/bounded_bitset.h"
#include "srsran/phy/common/phy_common_nr.h"
namespace srsenb {
namespace sched_nr_impl {
using prb_bitmap = srsran::bounded_bitset<SRSRAN_MAX_PRB_NR, true>;
using rbg_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
/// TS 38.214, Table 6.1.2.2.1-1 - Nominal RBG size P
uint32_t get_P(uint32_t bwp_nof_prb, bool config_1_or_2);
/// TS 38.214 - total number of RBGs for a uplink bandwidth part of size "bwp_nof_prb" PRBs
uint32_t get_nof_rbgs(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2);
/// Struct to express a {min,...,max} range of PRBs
struct prb_interval : public srsran::interval<uint32_t> {
using interval::interval;
};
struct prb_grant {
prb_grant() = default;
prb_grant(const prb_interval& other) noexcept : alloc_type_0(false), alloc(other) {}
prb_grant(const rbg_bitmap& other) noexcept : alloc_type_0(true), alloc(other) {}
prb_grant(const prb_grant& other) noexcept : alloc_type_0(other.alloc_type_0), alloc(other.alloc_type_0, other.alloc)
{}
prb_grant& operator=(const prb_grant& other) noexcept
{
if (this == &other) {
return *this;
}
if (other.alloc_type_0) {
*this = other.rbgs();
} else {
*this = other.prbs();
}
return *this;
}
prb_grant& operator=(const prb_interval& prbs)
{
if (alloc_type_0) {
alloc_type_0 = false;
alloc.rbgs.~rbg_bitmap();
new (&alloc.interv) prb_interval(prbs);
} else {
alloc.interv = alloc.interv;
}
return *this;
}
prb_grant& operator=(const rbg_bitmap& rbgs)
{
if (alloc_type_0) {
alloc.rbgs = rbgs;
} else {
alloc_type_0 = true;
alloc.interv.~prb_interval();
new (&alloc.rbgs) rbg_bitmap(rbgs);
}
return *this;
}
~prb_grant()
{
if (is_alloc_type0()) {
alloc.rbgs.~rbg_bitmap();
} else {
alloc.interv.~prb_interval();
}
}
bool is_alloc_type0() const { return alloc_type_0; }
bool is_alloc_type1() const { return not is_alloc_type0(); }
const rbg_bitmap& rbgs() const
{
srsran_assert(is_alloc_type0(), "Invalid access to rbgs() field of grant with alloc type 1");
return alloc.rbgs;
}
const prb_interval& prbs() const
{
srsran_assert(is_alloc_type1(), "Invalid access to prbs() field of grant with alloc type 0");
return alloc.interv;
}
rbg_bitmap& rbgs()
{
srsran_assert(is_alloc_type0(), "Invalid access to rbgs() field of grant with alloc type 1");
return alloc.rbgs;
}
prb_interval& prbs()
{
srsran_assert(is_alloc_type1(), "Invalid access to prbs() field of grant with alloc type 0");
return alloc.interv;
}
private:
bool alloc_type_0 = false;
union alloc_t {
rbg_bitmap rbgs;
prb_interval interv;
alloc_t() : interv(0, 0) {}
explicit alloc_t(const prb_interval& prbs) : interv(prbs) {}
explicit alloc_t(const rbg_bitmap& rbgs_) : rbgs(rbgs_) {}
alloc_t(bool type0, const alloc_t& other)
{
if (type0) {
new (&rbgs) rbg_bitmap(other.rbgs);
} else {
new (&interv) prb_interval(other.interv);
}
}
} alloc;
};
struct bwp_rb_bitmap {
public:
bwp_rb_bitmap() = default;
bwp_rb_bitmap(uint32_t bwp_nof_prbs, uint32_t bwp_prb_start_, bool config1_or_2);
void reset()
{
prbs_.reset();
rbgs_.reset();
}
template <typename T>
void operator|=(const T& grant)
{
add(grant);
}
void add(const prb_interval& prbs)
{
prbs_.fill(prbs.start(), prbs.stop());
add_prbs_to_rbgs(prbs);
}
void add(const prb_bitmap& grant)
{
prbs_ |= grant;
add_prbs_to_rbgs(grant);
}
void add(const rbg_bitmap& grant)
{
rbgs_ |= grant;
add_rbgs_to_prbs(grant);
}
void add(const prb_grant& grant)
{
if (grant.is_alloc_type0()) {
add(grant.rbgs());
} else {
add(grant.prbs());
}
}
bool collides(const prb_grant& grant) const
{
if (grant.is_alloc_type0()) {
return (rbgs() & grant.rbgs()).any();
}
return prbs().any(grant.prbs().start(), grant.prbs().stop());
}
bool test(uint32_t prb_idx) { return prbs().test(prb_idx); }
void set(uint32_t prb_idx)
{
prbs_.set(prb_idx);
rbgs_.set(prb_to_rbg_idx(prb_idx));
}
const prb_bitmap& prbs() const { return prbs_; }
const rbg_bitmap& rbgs() const { return rbgs_; }
uint32_t P() const { return P_; }
uint32_t nof_prbs() const { return prbs_.size(); }
uint32_t nof_rbgs() const { return rbgs_.size(); }
uint32_t prb_to_rbg_idx(uint32_t prb_idx) const;
private:
prb_bitmap prbs_;
rbg_bitmap rbgs_;
uint32_t bwp_prb_start = 0;
uint32_t P_ = 0;
uint32_t Pnofbits = 0;
uint32_t first_rbg_size = 0;
void add_prbs_to_rbgs(const prb_bitmap& grant);
void add_prbs_to_rbgs(const prb_interval& grant);
void add_rbgs_to_prbs(const rbg_bitmap& grant);
};
inline prb_interval
find_next_empty_interval(const prb_bitmap& mask, size_t start_prb_idx = 0, size_t last_prb_idx = SRSRAN_MAX_PRB_NR)
{
int rb_start = mask.find_lowest(start_prb_idx, std::min(mask.size(), last_prb_idx), false);
if (rb_start != -1) {
int rb_end = mask.find_lowest(rb_start + 1, std::min(mask.size(), last_prb_idx), true);
return {(uint32_t)rb_start, (uint32_t)(rb_end < 0 ? mask.size() : rb_end)};
}
return {};
}
inline prb_interval find_empty_interval_of_length(const prb_bitmap& mask, size_t nof_prbs, uint32_t start_prb_idx = 0)
{
prb_interval max_interv;
do {
prb_interval interv = find_next_empty_interval(mask, start_prb_idx, mask.size());
if (interv.empty()) {
break;
}
if (interv.length() >= nof_prbs) {
max_interv.set(interv.start(), interv.start() + nof_prbs);
break;
}
if (interv.length() > max_interv.length()) {
max_interv = interv;
}
start_prb_idx = interv.stop() + 1;
} while (start_prb_idx < mask.size());
return max_interv;
}
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_RB_H

@ -30,17 +30,22 @@ struct pending_rar_t;
const static size_t MAX_CORESET_PER_BWP = 3;
using slot_coreset_list = std::array<srsran::optional<coreset_region>, MAX_CORESET_PER_BWP>;
using pdsch_t = mac_interface_phy_nr::pdsch_t;
using pdsch_list_t = srsran::bounded_vector<pdsch_t, MAX_GRANTS>;
struct bwp_slot_grid {
uint32_t slot_idx;
const bwp_params* cfg;
bool is_dl, is_ul;
pdsch_bitmap dl_rbgs;
pusch_bitmap ul_rbgs;
bwp_rb_bitmap dl_prbs;
bwp_rb_bitmap ul_prbs;
pdcch_dl_list_t dl_pdcchs;
pdcch_ul_list_t ul_pdcchs;
pdsch_list_t pdschs;
slot_coreset_list coresets;
pucch_list_t pucchs;
pusch_list_t puschs;
bwp_slot_grid() = default;
explicit bwp_slot_grid(const bwp_params& bwp_params, uint32_t slot_idx_);
@ -68,8 +73,8 @@ public:
void new_slot(tti_point pdcch_tti_) { pdcch_tti = pdcch_tti_; }
alloc_result alloc_rar(uint32_t aggr_idx, const pending_rar_t& rar, rbg_interval interv, uint32_t max_nof_grants);
alloc_result alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask);
alloc_result alloc_rar(uint32_t aggr_idx, const pending_rar_t& rar, prb_interval interv, uint32_t max_nof_grants);
alloc_result alloc_pdsch(slot_ue& ue, const prb_grant& dl_grant);
alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask);
tti_point get_pdcch_tti() const { return pdcch_tti; }

@ -7,6 +7,6 @@
#
set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc sched_nr_rb_grid.cc sched_nr_harq.cc
sched_nr_pdcch.cc sched_nr_cfg.cc sched_nr_phy.cc sched_nr_bwp.cc)
sched_nr_pdcch.cc sched_nr_cfg.cc sched_nr_phy.cc sched_nr_bwp.cc sched_nr_rb.cc)
add_library(srsgnb_mac STATIC ${SOURCES})

@ -22,16 +22,18 @@ ra_sched::ra_sched(const bwp_params& bwp_cfg_) : bwp_cfg(&bwp_cfg_), logger(srsl
alloc_result
ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid, const pending_rar_t& rar, uint32_t& nof_grants_alloc)
{
const uint32_t rar_aggr_level = 2;
auto& pdsch_bitmap = slot_grid.res_grid()[slot_grid.get_pdcch_tti()].dl_rbgs;
const uint32_t rar_aggr_level = 2;
const prb_bitmap& prbs = slot_grid.res_grid()[slot_grid.get_pdcch_tti()].dl_prbs.prbs();
alloc_result ret = alloc_result::other_cause;
for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) {
ret = alloc_result::invalid_coderate;
for (uint32_t nrbg = 1; nrbg < bwp_cfg->N_rbg and ret == alloc_result::invalid_coderate; ++nrbg) {
rbg_interval rbg_interv = find_empty_rbg_interval(pdsch_bitmap, nrbg);
if (rbg_interv.length() == nrbg) {
ret = slot_grid.alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc);
ret = alloc_result::invalid_coderate;
uint32_t start_prb_idx = 0;
for (uint32_t nprb = 1; nprb < bwp_cfg->N_rbg and ret == alloc_result::invalid_coderate; ++nprb) {
prb_interval interv = find_empty_interval_of_length(prbs, nprb, start_prb_idx);
start_prb_idx = interv.stop();
if (interv.length() == nprb) {
ret = slot_grid.alloc_rar(rar_aggr_level, rar, interv, nof_grants_alloc);
} else {
ret = alloc_result::no_sch_space;
}

@ -61,7 +61,7 @@ bwp_ue_cfg::bwp_ue_cfg(uint16_t rnti_, const bwp_params& bwp_cfg_, const ue_cfg_
rnti(rnti_), cfg_(&uecfg_), bwp_cfg(&bwp_cfg_)
{
std::fill(ss_id_to_cce_idx.begin(), ss_id_to_cce_idx.end(), SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE);
const auto& pdcch = cfg().pdcch;
const auto& pdcch = phy().pdcch;
for (uint32_t i = 0; i < SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE; ++i) {
if (pdcch.search_space_present[i]) {
const auto& ss = pdcch.search_space[i];

@ -45,7 +45,7 @@ void harq_proc::reset()
bool harq_proc::new_tx(tti_point tti_tx_,
tti_point tti_ack_,
const rbgmask_t& rbgmask_,
const prb_grant& grant,
uint32_t mcs,
uint32_t tbs,
uint32_t max_retx_)
@ -57,7 +57,7 @@ bool harq_proc::new_tx(tti_point tti_tx_,
max_retx = max_retx_;
tti_tx = tti_tx_;
tti_ack = tti_ack_;
rbgmask = rbgmask_;
prbs_ = grant;
tb[0].ndi = !tb[0].ndi;
tb[0].mcs = mcs;
tb[0].tbs = tbs;
@ -65,22 +65,38 @@ bool harq_proc::new_tx(tti_point tti_tx_,
return true;
}
bool harq_proc::new_retx(tti_point tti_tx_, tti_point tti_ack_, const rbgmask_t& rbgmask_, int* mcs, int* tbs)
bool harq_proc::set_tbs(uint32_t tbs)
{
if (empty() or rbgmask.count() != rbgmask.count()) {
if (empty() or nof_retx() > 0) {
return false;
}
tb[0].tbs = tbs;
return true;
}
bool harq_proc::new_retx(tti_point tti_tx_, tti_point tti_ack_, const prb_grant& grant)
{
if (grant.is_alloc_type0() != prbs_.is_alloc_type0() or
(grant.is_alloc_type0() and grant.rbgs().count() != prbs_.rbgs().count()) or
(grant.is_alloc_type1() and grant.prbs().length() == prbs_.prbs().length())) {
return false;
}
if (new_retx(tti_tx_, tti_ack_)) {
prbs_ = grant;
return true;
}
return false;
}
bool harq_proc::new_retx(tti_point tti_tx_, tti_point tti_ack_)
{
if (empty()) {
return false;
}
tti_tx = tti_tx_;
tti_ack = tti_ack_;
rbgmask = rbgmask_;
tb[0].ack_state = false;
tb[0].n_rtx++;
if (mcs != nullptr) {
*mcs = tb[0].mcs;
}
if (tbs != nullptr) {
*tbs = tb[0].tbs;
}
return true;
}

@ -55,10 +55,10 @@ bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
saved_dfs_tree.clear();
alloc_record record;
record.ue = user;
record.aggr_idx = aggr_idx;
record.search_space_id = search_space_id;
record.alloc_type = alloc_type;
record.ue = user;
record.aggr_idx = aggr_idx;
record.ss_id = search_space_id;
record.alloc_type = alloc_type;
if (record.alloc_type == pdcch_grant_type_t::ul_data) {
record.idx = pdcch_ul_list.size();
pdcch_ul_list.emplace_back();
@ -158,7 +158,6 @@ bool coreset_region::alloc_dfs_node(const alloc_record& record, uint32_t start_d
// Allocation successful
node.total_mask |= node.current_mask;
alloc_dfs.push_back(node);
// set new DCI position
if (record.alloc_type == pdcch_grant_type_t::ul_data) {
pdcch_ul_t& pdcch_ul = pdcch_ul_list[record.idx];
pdcch_ul.dci.ctx.location = node.dci_pos;
@ -176,9 +175,9 @@ srsran::span<const uint32_t> coreset_region::get_cce_loc_table(const alloc_recor
{
switch (record.alloc_type) {
case pdcch_grant_type_t::dl_data:
return record.ue->cfg->cce_pos_list(record.search_space_id)[slot_idx][record.aggr_idx];
return record.ue->cfg->cce_pos_list(record.ss_id)[slot_idx][record.aggr_idx];
case pdcch_grant_type_t::ul_data:
return record.ue->cfg->cce_pos_list(record.search_space_id)[slot_idx][record.aggr_idx];
return record.ue->cfg->cce_pos_list(record.ss_id)[slot_idx][record.aggr_idx];
default:
break;
}

@ -17,171 +17,68 @@
namespace srsenb {
namespace sched_nr_impl {
/// TS 38.214, Table 6.1.2.2.1-1 - Nominal RBG size P
uint32_t get_P(uint32_t bwp_nof_prb, bool config_1_or_2)
{
srsran_assert(bwp_nof_prb > 0 and bwp_nof_prb <= 275, "Invalid BWP size");
if (bwp_nof_prb <= 36) {
return config_1_or_2 ? 2 : 4;
}
if (bwp_nof_prb <= 72) {
return config_1_or_2 ? 4 : 8;
}
if (bwp_nof_prb <= 144) {
return config_1_or_2 ? 8 : 16;
}
return 16;
}
/// TS 38.214 - total number of RBGs for a uplink bandwidth part of size "bwp_nof_prb" PRBs
uint32_t get_nof_rbgs(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2)
{
uint32_t P = get_P(bwp_nof_prb, config1_or_2);
return srsran::ceil_div(bwp_nof_prb + (bwp_start % P), P);
}
uint32_t get_rbg_size(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2, uint32_t rbg_idx)
{
uint32_t P = get_P(bwp_nof_prb, config1_or_2);
uint32_t nof_rbgs = get_nof_rbgs(bwp_nof_prb, bwp_start, config1_or_2);
if (rbg_idx == 0) {
return P - (bwp_start % P);
}
if (rbg_idx == nof_rbgs - 1) {
uint32_t ret = (bwp_start + bwp_nof_prb) % P;
return ret > 0 ? ret : P;
}
return P;
}
void bitmap_to_prb_array(const rbgmask_t& bitmap, uint32_t bwp_nof_prb, srsran::span<bool> prbs)
{
uint32_t count = 0;
for (uint32_t rbg = 0; rbg < bitmap.size(); ++rbg) {
bool val = bitmap.test(rbg);
uint32_t rbg_size = get_rbg_size(bwp_nof_prb, 0, true, rbg);
for (uint32_t prb_idx = count; prb_idx < count + rbg_size; ++prb_idx) {
prbs[prb_idx] = val;
}
}
}
srsran::interval<uint32_t> find_first_interval(const rbgmask_t& mask)
{
int rb_start = mask.find_lowest(0, mask.size());
if (rb_start != -1) {
int rb_end = mask.find_lowest(rb_start + 1, mask.size(), false);
return {(uint32_t)rb_start, (uint32_t)(rb_end < 0 ? mask.size() : rb_end)};
}
return {};
}
int bitmap_to_riv(const rbgmask_t& bitmap, uint32_t cell_nof_prb)
{
srsran::interval<uint32_t> interv = find_first_interval(bitmap);
srsran_assert(interv.length() == bitmap.count(), "Trying to acquire riv for non-contiguous bitmap");
return srsran_ra_nr_type1_riv(cell_nof_prb, interv.start(), interv.length());
}
rbg_interval find_empty_rbg_interval(const pdsch_bitmap& in_mask, uint32_t max_size)
{
rbg_interval max_interv;
for (size_t n = 0; n < in_mask.size();) {
int pos = in_mask.find_lowest(n, in_mask.size(), false);
if (pos < 0) {
break;
}
size_t max_pos = std::min(in_mask.size(), (size_t)pos + max_size);
int pos2 = in_mask.find_lowest(pos + 1, max_pos, true);
rbg_interval interv(pos, pos2 < 0 ? max_pos : pos2);
if (interv.length() >= max_size) {
return interv;
}
if (interv.length() > max_interv.length()) {
max_interv = interv;
}
n = interv.stop();
}
return max_interv;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool fill_dci_rar(rbg_interval rbginterv, const bwp_params& cell, srsran_dci_dl_nr_t& dci)
bool fill_dci_rar(prb_interval interv, const bwp_params& cell, srsran_dci_dl_nr_t& dci)
{
dci.mcs = 5;
return true;
}
template <typename DciDlOrUl>
void fill_dci_common(const slot_ue& ue, const rbgmask_t& bitmap, const bwp_params& bwp_cfg, DciDlOrUl& dci)
void fill_dci_common(const slot_ue& ue, const bwp_params& bwp_cfg, DciDlOrUl& dci)
{
const static uint32_t rv_idx[4] = {0, 2, 3, 1};
// Note: PDCCH DCI position already filled at this point
dci.bwp_id = ue.bwp_id;
dci.cc_id = ue.cc;
dci.freq_domain_assigment = bitmap_to_riv(bitmap, bwp_cfg.cfg.rb_width);
dci.ctx.rnti = ue.rnti;
dci.ctx.rnti_type = srsran_rnti_type_c;
dci.tpc = 1;
dci.bwp_id = ue.bwp_id;
dci.cc_id = ue.cc;
dci.tpc = 1;
// harq
harq_proc* h = std::is_same<DciDlOrUl, srsran_dci_dl_nr_t>::value ? ue.h_dl : ue.h_ul;
dci.pid = h->pid;
dci.ndi = h->ndi();
dci.mcs = h->mcs();
dci.rv = rv_idx[h->nof_retx() % 4];
// PRB assignment
const prb_grant& grant = h->prbs();
if (grant.is_alloc_type0()) {
dci.freq_domain_assigment = grant.rbgs().to_uint64();
} else {
dci.freq_domain_assigment =
srsran_ra_nr_type1_riv(bwp_cfg.cfg.rb_width, grant.prbs().start(), grant.prbs().length());
}
dci.time_domain_assigment = 0;
}
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_dl_nr_t& dci)
{
fill_dci_common(ue, rbgmask, bwp_cfg, dci);
}
void fill_dci_ue_cfg(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& bwp_cfg, srsran_dci_ul_nr_t& dci)
{
fill_dci_common(ue, rbgmask, bwp_cfg, dci);
}
void fill_sch_ue_common(const slot_ue& ue,
const rbgmask_t& rbgmask,
const bwp_params& bwp_cfg,
srsran_sch_cfg_nr_t& sch)
{
sch.grant.rnti_type = srsran_rnti_type_c;
sch.grant.rnti = ue.rnti;
sch.grant.nof_layers = 1;
sch.grant.nof_prb = bwp_cfg.cfg.rb_width;
}
void fill_pdsch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& cc_cfg, srsran_sch_cfg_nr_t& sch)
void fill_dl_dci_ue_fields(const slot_ue& ue,
const bwp_params& bwp_cfg,
uint32_t ss_id,
srsran_dci_location_t dci_pos,
srsran_dci_dl_nr_t& dci)
{
fill_sch_ue_common(ue, rbgmask, cc_cfg, sch);
sch.grant.k = ue.cc_cfg->pdsch_res_list[0].k0;
sch.grant.dci_format = srsran_dci_format_nr_1_0;
// Note: DCI location may not be the final one, as scheduler may rellocate the UE PDCCH. However, the remaining DCI
// params are independent of the exact DCI location
bool ret = ue.cfg->phy().get_dci_ctx_pdsch_rnti_c(ss_id, dci_pos, ue.rnti, dci.ctx);
srsran_assert(ret, "Invalid DL DCI format");
fill_dci_common(ue, bwp_cfg, dci);
if (dci.ctx.format == srsran_dci_format_nr_1_0) {
dci.harq_feedback = ue.cfg->phy().harq_ack.dl_data_to_ul_ack[ue.pdsch_tti.sf_idx()] - 1;
} else {
dci.harq_feedback = ue.pdsch_tti.sf_idx();
}
}
void fill_pusch_ue(const slot_ue& ue, const rbgmask_t& rbgmask, const bwp_params& cc_cfg, srsran_sch_cfg_nr_t& sch)
void fill_ul_dci_ue_fields(const slot_ue& ue,
const bwp_params& bwp_cfg,
uint32_t ss_id,
srsran_dci_location_t dci_pos,
srsran_dci_ul_nr_t& dci)
{
fill_sch_ue_common(ue, rbgmask, cc_cfg, sch);
sch.grant.k = ue.cc_cfg->pusch_res_list[0].k2;
sch.grant.dci_format = srsran_dci_format_nr_0_1;
}
bool ret = ue.cfg->phy().get_dci_ctx_pdsch_rnti_c(ss_id, dci_pos, ue.rnti, dci.ctx);
srsran_assert(ret, "Invalid DL DCI format");
pucch_resource_grant find_pucch_resource(const slot_ue& ue, const rbgmask_t& rbgs, uint32_t tbs)
{
if (ue.cfg->cfg().pucch.enabled) {
for (uint32_t i = 0; i < SRSRAN_PUCCH_NR_MAX_NOF_SETS; ++i) {
const auto& rset = ue.cfg->cfg().pucch.sets[i];
if (rset.max_payload_size >= tbs) {
for (uint32_t sid = 0; sid < rset.nof_resources; ++sid) {
return pucch_resource_grant{ue.rnti, i, sid};
}
}
}
}
return pucch_resource_grant{SRSRAN_INVALID_RNTI, 0, 0};
fill_dci_common(ue, bwp_cfg, dci);
}
} // namespace sched_nr_impl

@ -0,0 +1,106 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_rb.h"
namespace srsenb {
namespace sched_nr_impl {
/// TS 38.214, Table 6.1.2.2.1-1 - Nominal RBG size P
uint32_t get_P(uint32_t bwp_nof_prb, bool config_1_or_2)
{
srsran_assert(bwp_nof_prb > 0 and bwp_nof_prb <= 275, "Invalid BWP size");
if (bwp_nof_prb <= 36) {
return config_1_or_2 ? 2 : 4;
}
if (bwp_nof_prb <= 72) {
return config_1_or_2 ? 4 : 8;
}
if (bwp_nof_prb <= 144) {
return config_1_or_2 ? 8 : 16;
}
return 16;
}
/// TS 38.214 - total number of RBGs for a uplink bandwidth part of size "bwp_nof_prb" PRBs
uint32_t get_nof_rbgs(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2)
{
uint32_t P = get_P(bwp_nof_prb, config1_or_2);
return srsran::ceil_div(bwp_nof_prb + (bwp_start % P), P);
}
uint32_t get_rbg_size(uint32_t bwp_nof_prb, uint32_t bwp_start, bool config1_or_2, uint32_t rbg_idx)
{
uint32_t P = get_P(bwp_nof_prb, config1_or_2);
uint32_t nof_rbgs = get_nof_rbgs(bwp_nof_prb, bwp_start, config1_or_2);
if (rbg_idx == 0) {
return P - (bwp_start % P);
}
if (rbg_idx == nof_rbgs - 1) {
uint32_t ret = (bwp_start + bwp_nof_prb) % P;
return ret > 0 ? ret : P;
}
return P;
}
bwp_rb_bitmap::bwp_rb_bitmap(uint32_t bwp_nof_prbs, uint32_t bwp_prb_start_, bool config1_or_2) :
prbs_(bwp_nof_prbs),
rbgs_(get_nof_rbgs(bwp_nof_prbs, bwp_prb_start_, config1_or_2)),
bwp_prb_start(bwp_prb_start_),
P_(get_P(bwp_nof_prbs, config1_or_2)),
Pnofbits(log2(P_)),
first_rbg_size(get_rbg_size(bwp_nof_prbs, bwp_prb_start_, config1_or_2, 0))
{}
uint32_t bwp_rb_bitmap::prb_to_rbg_idx(uint32_t prb_idx) const
{
return ((prb_idx + P_ - first_rbg_size) >> Pnofbits);
}
void bwp_rb_bitmap::add_prbs_to_rbgs(const prb_bitmap& grant)
{
int idx = 0;
do {
idx = grant.find_lowest(idx, grant.size(), true);
if (idx < 0) {
return;
}
uint32_t rbg_idx = prb_to_rbg_idx(idx);
rbgs_.set(rbg_idx, true);
idx++;
} while (idx != (int)prbs_.size());
}
void bwp_rb_bitmap::add_prbs_to_rbgs(const prb_interval& grant)
{
uint32_t rbg_start = prb_to_rbg_idx(grant.start());
uint32_t rbg_stop = std::min(prb_to_rbg_idx(grant.stop() - 1) + 1u, (uint32_t)rbgs_.size());
rbgs_.fill(rbg_start, rbg_stop);
}
void bwp_rb_bitmap::add_rbgs_to_prbs(const rbg_bitmap& grant)
{
int idx = 0;
do {
idx = grant.find_lowest(idx, grant.size(), true);
if (idx < 0) {
return;
}
uint32_t prb_idx = (idx - 1) * P_ + first_rbg_size;
uint32_t prb_end = std::min(prb_idx + ((idx == 0) ? first_rbg_size : P_), (uint32_t)prbs_.size());
prbs_.fill(prb_idx, prb_end);
idx++;
} while (idx != (int)prbs_.size());
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -19,8 +19,8 @@ namespace sched_nr_impl {
#define NUMEROLOGY_IDX 0
bwp_slot_grid::bwp_slot_grid(const bwp_params& bwp_cfg_, uint32_t slot_idx_) :
dl_rbgs(bwp_cfg_.N_rbg),
ul_rbgs(bwp_cfg_.N_rbg),
dl_prbs(bwp_cfg_.cfg.rb_width, bwp_cfg_.cfg.start_rb, bwp_cfg_.cfg.pdsch.rbg_size_cfg_1),
ul_prbs(bwp_cfg_.cfg.rb_width, bwp_cfg_.cfg.start_rb, bwp_cfg_.cfg.pdsch.rbg_size_cfg_1),
slot_idx(slot_idx_),
cfg(&bwp_cfg_),
is_dl(srsran_tdd_nr_is_dl(&bwp_cfg_.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_)),
@ -41,8 +41,8 @@ void bwp_slot_grid::reset()
coreset->reset();
}
}
dl_rbgs.reset();
ul_rbgs.reset();
dl_prbs.reset();
ul_prbs.reset();
dl_pdcchs.clear();
ul_pdcchs.clear();
pucchs.clear();
@ -63,7 +63,7 @@ bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_) :
alloc_result bwp_slot_allocator::alloc_rar(uint32_t aggr_idx,
const srsenb::sched_nr_impl::pending_rar_t& rar,
srsenb::sched_nr_impl::rbg_interval interv,
prb_interval interv,
uint32_t nof_grants)
{
static const uint32_t msg3_nof_prbs = 3;
@ -77,8 +77,8 @@ alloc_result bwp_slot_allocator::alloc_rar(uint32_t
}
// Check DL RB collision
rbgmask_t& pdsch_mask = bwp_pdcch_slot.dl_rbgs;
rbgmask_t dl_mask(pdsch_mask.size());
const prb_bitmap& pdsch_mask = bwp_pdcch_slot.dl_prbs.prbs();
prb_bitmap dl_mask(pdsch_mask.size());
dl_mask.fill(interv.start(), interv.stop());
if ((pdsch_mask & dl_mask).any()) {
logger.debug("SCHED: Provided RBG mask collides with allocation previously made.");
@ -88,7 +88,7 @@ alloc_result bwp_slot_allocator::alloc_rar(uint32_t
// Check Msg3 RB collision
uint32_t total_ul_nof_prbs = msg3_nof_prbs * nof_grants;
uint32_t total_ul_nof_rbgs = srsran::ceil_div(total_ul_nof_prbs, get_P(bwp_grid.nof_prbs(), false));
rbg_interval msg3_rbgs = find_empty_rbg_interval(bwp_msg3_slot.ul_rbgs, total_ul_nof_rbgs);
prb_interval msg3_rbgs = find_empty_interval_of_length(bwp_msg3_slot.ul_prbs.prbs(), total_ul_nof_rbgs);
if (msg3_rbgs.length() < total_ul_nof_rbgs) {
logger.debug("SCHED: No space in PUSCH for Msg3.");
return alloc_result::sch_collision;
@ -112,12 +112,12 @@ alloc_result bwp_slot_allocator::alloc_rar(uint32_t
}
// RAR allocation successful.
bwp_pdcch_slot.dl_rbgs.fill(interv.start(), interv.stop());
bwp_pdcch_slot.dl_prbs.add(interv);
return alloc_result::success;
}
alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_grant)
{
if (ue.h_dl == nullptr) {
logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue.rnti);
@ -135,49 +135,65 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_ma
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space;
}
rbgmask_t& pdsch_mask = bwp_pdsch_slot.dl_rbgs;
if ((pdsch_mask & dl_mask).any()) {
if (bwp_pdcch_slot.dl_prbs.collides(dl_grant)) {
return alloc_result::sch_collision;
}
const uint32_t aggr_idx = 2, search_space_id = 1;
uint32_t coreset_id = ue.cfg->cfg().pdcch.search_space[search_space_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::dl_data, aggr_idx, search_space_id, &ue)) {
// Find space in PUCCH
// TODO
// Find space and allocate PDCCH
const uint32_t aggr_idx = 2, ss_id = 1;
uint32_t coreset_id = ue.cfg->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::dl_data, aggr_idx, ss_id, &ue)) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
int mcs = -1, tbs = -1;
// Allocate HARQ
if (ue.h_dl->empty()) {
mcs = 20;
tbs = 100;
bool ret = ue.h_dl->new_tx(ue.pdsch_tti, ue.uci_tti, dl_mask, mcs, tbs, 4);
int mcs = 20;
int tbs = 100;
bool ret = ue.h_dl->new_tx(ue.pdsch_tti, ue.uci_tti, dl_grant, mcs, tbs, 4);
srsran_assert(ret, "Failed to allocate DL HARQ");
} else {
bool ret = ue.h_dl->new_retx(ue.pdsch_tti, ue.uci_tti, dl_mask, &mcs, &tbs);
bool ret = ue.h_dl->new_retx(ue.pdsch_tti, ue.uci_tti, dl_grant);
srsran_assert(ret, "Failed to allocate DL HARQ retx");
}
pucch_resource_grant pucch_res = find_pucch_resource(ue, bwp_uci_slot.ul_rbgs, tbs);
if (pucch_res.rnti != SRSRAN_INVALID_RNTI) {
// Could not find space in PUCCH for HARQ-ACK
bwp_pdcch_slot.coresets[coreset_id]->rem_last_dci();
return alloc_result::no_cch_space;
}
// Allocation Successful
// Generate PDCCH
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
fill_dci_ue_cfg(ue, dl_mask, *bwp_grid.cfg, pdcch.dci);
pdsch_mask |= dl_mask;
fill_dl_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
pdcch.dci.pucch_resource = 0;
pdcch.dci.dai = std::count_if(bwp_uci_slot.pucchs.begin(), bwp_uci_slot.pucchs.end(), [&ue](const pucch_t& p) {
return p.uci_cfg.pucch.rnti == ue.rnti;
});
// Generate PUCCH
bwp_uci_slot.pucchs.emplace_back();
pucch_grant& pucch = bwp_uci_slot.pucchs.back();
pucch.resource = pucch_res;
bwp_uci_slot.ul_rbgs.set(
ue.cfg->cfg().pucch.sets[pucch_res.resource_set_id].resources[pucch_res.resource_id].starting_prb);
pucch_t& pucch = bwp_uci_slot.pucchs.back();
pucch.uci_cfg.pucch.rnti = ue.rnti;
// Generate PDSCH
bwp_pdsch_slot.dl_prbs |= dl_grant;
bwp_pdsch_slot.pdschs.emplace_back();
pdsch_t& pdsch = bwp_pdsch_slot.pdschs.back();
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = ue.pdsch_tti.sf_idx();
bool ret = ue.cfg->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch);
srsran_assert(ret, "Error converting DCI to grant");
if (ue.h_dl->nof_retx() == 0) {
ue.h_dl->set_tbs(pdsch.sch.grant.tb[0].tbs); // update HARQ with correct TBS
} else {
srsran_assert(pdsch.sch.grant.tb[0].tbs == (int)ue.h_dl->tbs(), "The TBS did not remain constant in retx");
}
return alloc_result::success;
}
alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbg_bitmap& ul_mask)
{
if (ue.h_ul == nullptr) {
logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti);
@ -194,33 +210,32 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_ma
logger.warning("SCHED: Maximum number of UL allocations reached");
return alloc_result::no_grant_space;
}
rbgmask_t& pusch_mask = bwp_pusch_slot.ul_rbgs;
const rbg_bitmap& pusch_mask = bwp_pusch_slot.ul_prbs.rbgs();
if ((pusch_mask & ul_mask).any()) {
return alloc_result::sch_collision;
}
const uint32_t aggr_idx = 2, search_space_id = 1;
uint32_t coreset_id = ue.cfg->cfg().pdcch.search_space[search_space_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id].value().alloc_dci(
pdcch_grant_type_t::ul_data, aggr_idx, search_space_id, &ue)) {
const uint32_t aggr_idx = 2, ss_id = 1;
uint32_t coreset_id = ue.cfg->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id].value().alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, ss_id, &ue)) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
int mcs = -1, tbs = -1;
if (ue.h_ul->empty()) {
mcs = 20;
tbs = 100;
int mcs = 20;
int tbs = 100;
bool ret = ue.h_ul->new_tx(ue.pusch_tti, ue.pusch_tti, ul_mask, mcs, tbs, ue.cfg->ue_cfg()->maxharq_tx);
srsran_assert(ret, "Failed to allocate UL HARQ");
} else {
srsran_assert(ue.h_ul->new_retx(ue.pusch_tti, ue.pusch_tti, ul_mask, &mcs, &tbs),
"Failed to allocate UL HARQ retx");
srsran_assert(ue.h_ul->new_retx(ue.pusch_tti, ue.pusch_tti, ul_mask), "Failed to allocate UL HARQ retx");
}
// Allocation Successful
// Generate PDCCH
pdcch_ul_t& pdcch = pdcchs.back();
fill_dci_ue_cfg(ue, ul_mask, *bwp_grid.cfg, pdcch.dci);
pusch_mask |= ul_mask;
fill_ul_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
// Generate PUSCH
bwp_pusch_slot.ul_prbs.add(ul_mask);
return alloc_result::success;
}

@ -196,12 +196,9 @@ bool sched_worker_manager::get_sched_result(tti_point pdcch_tti, uint32_t cc, dl
dl_res.pdcch_dl = pdcch_bwp_slot.dl_pdcchs;
dl_res.pdcch_ul = pdcch_bwp_slot.ul_pdcchs;
ul_res.pucch.resize(pdcch_bwp_slot.pucchs.size());
for (uint32_t i = 0; i < pdcch_bwp_slot.pucchs.size(); ++i) {
ul_res.pucch[i].uci_cfg.pucch.rnti = pdcch_bwp_slot.pucchs[i].resource.rnti;
ul_res.pucch[i].uci_cfg.pucch.resource_id = pdcch_bwp_slot.pucchs[i].resource.resource_id;
}
dl_res.pdsch = pdcch_bwp_slot.pdschs;
ul_res.pucch = pdcch_bwp_slot.pucchs;
ul_res.pusch = pdcch_bwp_slot.puschs;
// clear up BWP slot
pdcch_bwp_slot.reset();

@ -13,3 +13,11 @@ target_link_libraries(sched_nr_test
${CMAKE_THREAD_LIBS_INIT}
${Boost_LIBRARIES})
add_test(sched_nr_test sched_nr_test)
add_executable(sched_nr_prb_test sched_nr_prb_test.cc)
target_link_libraries(sched_nr_prb_test
srsgnb_mac
srsran_common
${CMAKE_THREAD_LIBS_INIT}
${Boost_LIBRARIES})
add_test(sched_nr_prb_test sched_nr_prb_test)

@ -0,0 +1,130 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_rb.h"
#include "srsran/common/test_common.h"
using namespace srsenb;
using namespace srsenb::sched_nr_impl;
void test_bwp_prb_grant()
{
// TEST: default ctor
prb_grant grant;
TESTASSERT(grant.is_alloc_type1());
TESTASSERT(grant.prbs().length() == 0);
// TEST: assignment of RBGs
rbg_bitmap rbgs(18);
rbgs.set(1);
grant = rbgs;
TESTASSERT(grant.is_alloc_type0() and grant.rbgs().count() == 1);
// TEST: assignment of PRBs
prb_interval prb_interv(2, 5);
grant = prb_interv;
TESTASSERT(grant.is_alloc_type1() and grant.prbs().length() == 3);
// TEST: non-default ctor
prb_grant grant2(prb_interv), grant3(rbgs);
TESTASSERT(grant2.is_alloc_type1() and grant2.prbs().length() == 3);
TESTASSERT(grant3.is_alloc_type0() and grant3.rbgs().count() == 1);
// TEST: copy ctor
prb_grant grant4(grant2), grant5(grant3);
TESTASSERT(grant4.is_alloc_type1() and grant4.prbs().length() == 3);
TESTASSERT(grant5.is_alloc_type0() and grant5.rbgs().count() == 1);
// TEST: copy assignment
grant = grant3;
TESTASSERT(grant.is_alloc_type0() and grant.rbgs().count() == 1);
grant = grant2;
TESTASSERT(grant.is_alloc_type1() and grant.prbs().length() == 3);
}
void test_bwp_rb_bitmap()
{
bwp_rb_bitmap rb_bitmap(275, 0, true);
TESTASSERT(rb_bitmap.P() == 16);
TESTASSERT(rb_bitmap.rbgs().none());
TESTASSERT(rb_bitmap.prbs().none());
TESTASSERT(rb_bitmap.prbs().size() == 275 and rb_bitmap.nof_prbs() == 275);
TESTASSERT(rb_bitmap.rbgs().size() == 18 and rb_bitmap.nof_rbgs() == 18);
rb_bitmap.add(prb_interval{0, 1});
TESTASSERT(rb_bitmap.prbs().count() == 1 and rb_bitmap.prbs().test(0));
TESTASSERT(rb_bitmap.rbgs().count() == 1 and rb_bitmap.rbgs().test(0));
rb_bitmap.add(prb_interval{2, 4});
TESTASSERT(rb_bitmap.prbs().count() == 3 and rb_bitmap.prbs().test(2) and not rb_bitmap.prbs().test(1));
TESTASSERT(rb_bitmap.rbgs().count() == 1 and rb_bitmap.rbgs().test(0));
prb_bitmap prbs(rb_bitmap.nof_prbs());
prbs.set(1);
prbs.set(2);
prbs.set(15);
rb_bitmap.add(prbs);
TESTASSERT(rb_bitmap.prbs().count() == 5 and rb_bitmap.prbs().test(1) and rb_bitmap.prbs().test(15));
TESTASSERT(rb_bitmap.rbgs().count() == 1 and rb_bitmap.rbgs().test(0));
prbs.set(16);
rb_bitmap |= prbs;
TESTASSERT(rb_bitmap.prbs().count() == 6 and rb_bitmap.prbs().test(16));
TESTASSERT(rb_bitmap.rbgs().count() == 2 and rb_bitmap.rbgs().test(1));
rbg_bitmap rbgs(rb_bitmap.nof_rbgs());
rbgs.set(3);
rbgs.set(17);
rb_bitmap |= rbgs;
TESTASSERT(rb_bitmap.prbs().count() == (6 + 16 + 3) and rb_bitmap.prbs().test(rb_bitmap.nof_prbs() - 1));
TESTASSERT(rb_bitmap.rbgs().count() == 4 and rb_bitmap.rbgs().test(3) and rb_bitmap.rbgs().test(17));
rbgs.set(0);
rb_bitmap |= rbgs;
TESTASSERT(rb_bitmap.prbs().count() == (16 + 1 + 16 + 3) and rb_bitmap.prbs().test(rb_bitmap.nof_prbs() - 1));
TESTASSERT(rb_bitmap.rbgs().count() == 4 and rb_bitmap.rbgs().test(3) and rb_bitmap.rbgs().test(17));
// TEST: collides operator
TESTASSERT(rb_bitmap.collides(rbgs));
TESTASSERT(rb_bitmap.collides(prb_interval{0, 2}));
}
void test_bwp_rb_bitmap_search()
{
bwp_rb_bitmap rb_bitmap(275, 0, true);
prb_interval prbs = find_empty_interval_of_length(rb_bitmap.prbs(), 5);
TESTASSERT(prbs == prb_interval(0, 5));
prbs = find_empty_interval_of_length(rb_bitmap.prbs(), rb_bitmap.prbs().size());
TESTASSERT(prbs == prb_interval(0, rb_bitmap.prbs().size()));
rb_bitmap |= prb_interval{1, 5};
prbs = find_empty_interval_of_length(rb_bitmap.prbs(), rb_bitmap.prbs().size());
TESTASSERT(prbs == prb_interval(5, rb_bitmap.prbs().size()));
rb_bitmap |= prb_interval{16, 32};
prbs = find_empty_interval_of_length(rb_bitmap.prbs(), rb_bitmap.prbs().size());
TESTASSERT(prbs == prb_interval(32, rb_bitmap.prbs().size()));
rb_bitmap |= prb_interval{270, 275};
prbs = find_empty_interval_of_length(rb_bitmap.prbs(), rb_bitmap.prbs().size());
TESTASSERT(prbs == prb_interval(32, 270));
prbs = find_empty_interval_of_length(rb_bitmap.prbs(), 1);
TESTASSERT(prbs == prb_interval(0, 1));
prbs = find_empty_interval_of_length(rb_bitmap.prbs(), 5);
TESTASSERT(prbs == prb_interval(5, 10));
}
int main()
{
test_bwp_prb_grant();
test_bwp_rb_bitmap();
test_bwp_rb_bitmap_search();
}

@ -49,11 +49,11 @@ sched_nr_interface::cell_cfg_t get_default_cell_cfg()
cell_cfg.tdd.pattern2.period_ms = 0;
cell_cfg.bwps.resize(1);
cell_cfg.bwps[0].pdcch.coreset_present[0] = true;
cell_cfg.bwps[0].pdcch.coreset[0] = get_default_coreset0();
cell_cfg.bwps[0].pdcch.coreset_present[1] = true;
cell_cfg.bwps[0].pdcch.coreset[1] = default_phy_cfg.pdcch.coreset[1];
cell_cfg.bwps[0].pdcch = default_phy_cfg.pdcch;
cell_cfg.bwps[0].pdsch = default_phy_cfg.pdsch;
cell_cfg.bwps[0].pdcch.coreset_present[0] = true;
cell_cfg.bwps[0].pdcch.coreset[0] = get_default_coreset0();
cell_cfg.bwps[0].pdcch.search_space_present[0] = true;
auto& ss = cell_cfg.bwps[0].pdcch.search_space[0];
ss.id = 0;
@ -67,8 +67,6 @@ sched_nr_interface::cell_cfg_t get_default_cell_cfg()
ss.nof_candidates[4] = 0;
ss.nof_formats = 1;
ss.formats[0] = srsran_dci_format_nr_1_0;
cell_cfg.bwps[0].pdcch.search_space_present[1] = true;
cell_cfg.bwps[0].pdcch.search_space[1] = default_phy_cfg.pdcch.search_space[1];
cell_cfg.bwps[0].pdcch.ra_search_space_present = true;
cell_cfg.bwps[0].pdcch.ra_search_space = cell_cfg.bwps[0].pdcch.search_space[1];

Loading…
Cancel
Save