nr,gnb,mac: implement basic sib1 scheduling

master
Francisco 3 years ago committed by Francisco Paisana
parent 30c0ffe3e5
commit 60d4d12070

@ -90,7 +90,8 @@ private:
// Encoding
srsran::byte_buffer_t* assemble_rar(srsran::const_span<sched_nr_interface::msg3_grant_t> grants);
srsran::unique_byte_buffer_t rar_pdu_buffer = nullptr;
srsran::unique_byte_buffer_t rar_pdu_buffer;
// Interaction with other components
phy_interface_stack_nr* phy = nullptr;

@ -15,36 +15,13 @@
#include "sched_nr_cfg.h"
#include "sched_nr_grant_allocator.h"
#include "sched_nr_signalling.h"
#include "sched_nr_time_rr.h"
#include "srsran/adt/pool/cached_alloc.h"
namespace srsenb {
namespace sched_nr_impl {
/// SIB scheduler
class si_sched
{
public:
explicit si_sched(const bwp_params_t& bwp_cfg_);
void run_slot(bwp_slot_allocator& slot_alloc);
private:
const bwp_params_t* bwp_cfg = nullptr;
srslog::basic_logger& logger;
struct sched_si_t {
uint32_t n = 0;
uint32_t len = 0;
uint32_t win_len = 0;
uint32_t period = 0;
uint32_t n_tx = 0;
alloc_result result = alloc_result::invalid_coderate;
slot_point win_start;
};
srsran::bounded_vector<sched_si_t, 10> pending_sis;
};
using dl_sched_rar_info_t = sched_nr_interface::rar_info_t;
/// RAR/Msg3 scheduler
@ -87,6 +64,7 @@ public:
const bwp_params_t* cfg;
// channel-specific schedulers
si_sched si;
ra_sched ra;
std::unique_ptr<sched_nr_base> data_sched;

@ -54,6 +54,8 @@ struct bwp_slot_grid {
slot_coreset_list coresets;
harq_ack_list_t pending_acks;
srsran::bounded_vector<uint32_t, MAX_GRANTS> sib_idxs;
srsran::unique_pool_ptr<tx_harq_softbuffer> rar_softbuffer;
bwp_slot_grid() = default;
@ -101,7 +103,10 @@ public:
slot_point get_pdcch_tti() const { return pdcch_slot; }
slot_point get_tti_rx() const { return pdcch_slot - TX_ENB_DELAY; }
const bwp_res_grid& res_grid() const { return bwp_grid; }
const bwp_slot_grid& tx_slot_grid() const { return bwp_grid[pdcch_slot]; }
bwp_slot_grid& tx_slot_grid() { return bwp_grid[pdcch_slot]; }
srslog::basic_logger& logger;
const bwp_params_t& cfg;
private:
@ -110,7 +115,6 @@ private:
alloc_result verify_pusch_space(bwp_slot_grid& pusch_grid, bwp_slot_grid* pdcch_grid = nullptr) const;
alloc_result verify_ue_cfg(const ue_carrier_params_t& ue_cfg, harq_proc* harq) const;
srslog::basic_logger& logger;
bwp_res_grid& bwp_grid;
slot_point pdcch_slot;

@ -25,6 +25,8 @@ struct bwp_res_grid;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool fill_dci_sib(prb_interval interv, uint32_t sib_idx, const bwp_params_t& bwp_cfg, srsran_dci_dl_nr_t& dci);
bool fill_dci_rar(prb_interval interv, uint16_t ra_rnti, const bwp_params_t& bwp_cfg, srsran_dci_dl_nr_t& dci);
bool fill_dci_msg3(const slot_ue& ue, const bwp_params_t& bwp_cfg, srsran_dci_ul_nr_t& dci);

@ -89,7 +89,7 @@ public:
using ue_cc_cfg_t = sched_nr_ue_cc_cfg_t;
using ue_cfg_t = sched_nr_ue_cfg_t;
////// RA procedure //////
////// RA signalling //////
struct rar_info_t {
uint32_t preamble_idx; // is this the RAPID?
@ -117,6 +117,8 @@ public:
struct dl_res_t {
dl_sched_t phy;
sched_rar_list_t rar;
srsran::bounded_vector<uint32_t, MAX_GRANTS> sib_idxs;
};
virtual ~sched_nr_interface() = default;

@ -13,12 +13,15 @@
#ifndef SRSRAN_SCHED_NR_SIGNALLING_H
#define SRSRAN_SCHED_NR_SIGNALLING_H
#include "../sched_common.h"
#include "sched_nr_cfg.h"
#include "sched_nr_interface.h"
namespace srsenb {
namespace sched_nr_impl {
class bwp_slot_allocator;
/// Schedule NZP-CSI-RS resources for given slot
void sched_nzp_csi_rs(srsran::const_span<srsran_csi_rs_nzp_set_t> nzp_csi_rs_sets,
const srsran_slot_cfg_t& slot_cfg,
@ -41,11 +44,35 @@ void sched_nzp_csi_rs(srsran::const_span<srsran_csi_rs_nzp_set_t> nzp_csi_rs_set
*/
void sched_ssb_basic(const slot_point& sl_point, uint32_t ssb_periodicity, ssb_list& ssb_list);
/// Fill DCI fields with SIB info
bool fill_dci_sib(prb_interval interv, uint32_t sib_idx, const bwp_params_t& bwp_cfg, srsran_dci_dl_nr_t& dci);
/// For a given BWP and slot, schedule SSB, NZP CSI RS and SIBs
void sched_dl_signalling(const bwp_params_t& bwp_params,
slot_point sl_pdcch,
ssb_list& ssb_list,
nzp_csi_rs_list& nzp_csi_rs);
void sched_dl_signalling(bwp_slot_allocator& bwp_alloc);
/// scheduler for SIBs
class si_sched
{
public:
explicit si_sched(const bwp_params_t& bwp_cfg_);
void run_slot(bwp_slot_allocator& slot_alloc);
private:
const bwp_params_t* bwp_cfg = nullptr;
srslog::basic_logger& logger;
struct si_msg_ctxt_t {
uint32_t n = 0; /// index in schedulingInfoList in si-SchedulingInfo in SIB1
uint32_t len = 0;
uint32_t win_len = 0;
uint32_t period = 0;
uint32_t n_tx = 0;
alloc_result result = alloc_result::invalid_coderate; /// last attempt to schedule SI
slot_point win_start; /// start of SI window, invalid if outside
};
srsran::bounded_vector<si_msg_ctxt_t, 10> pending_sis;
};
} // namespace sched_nr_impl
} // namespace srsenb

@ -260,7 +260,7 @@ int mac_nr::cell_cfg(const std::vector<srsenb::sched_nr_interface::cell_cfg_t>&
if (true) {
sib_info_t sib = {};
sib.index = i;
sib.periodicity = 4; // TODO: read period_rf from config
sib.periodicity = 160; // TODO: read period_rf from config
sib.payload = srsran::make_byte_buffer();
if (sib.payload == nullptr) {
logger.error("Couldn't allocate PDU in %s().", __FUNCTION__);
@ -460,7 +460,7 @@ mac_nr::dl_sched_t* mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg)
}
// Generate MAC DL PDUs
uint32_t rar_count = 0;
uint32_t rar_count = 0, si_count = 0;
srsran::rwlock_read_guard rw_lock(rwmutex);
for (pdsch_t& pdsch : dl_res->phy.pdsch) {
if (pdsch.sch.grant.rnti_type == srsran_rnti_type_c) {
@ -484,6 +484,9 @@ mac_nr::dl_sched_t* mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg)
sched_nr_interface::rar_t& rar = dl_res->rar[rar_count++];
// for RARs we could actually move the byte_buffer to the PHY, as there are no retx
pdsch.data[0] = assemble_rar(rar.grants);
} else if (pdsch.sch.grant.rnti_type == srsran_rnti_type_si) {
uint32_t sib_idx = dl_res->sib_idxs[si_count++];
pdsch.data[0] = bcch_dlsch_payload[sib_idx].payload.get();
}
}
for (auto& u : ue_db) {

@ -17,66 +17,6 @@
namespace srsenb {
namespace sched_nr_impl {
si_sched::si_sched(const bwp_params_t& bwp_cfg_) :
bwp_cfg(&bwp_cfg_), logger(srslog::fetch_basic_logger(bwp_cfg_.sched_cfg.logger_name))
{}
void si_sched::run_slot(bwp_slot_allocator& slot_alloc)
{
const uint32_t si_aggr_level = 2;
slot_point pdcch_slot = slot_alloc.get_pdcch_tti();
const prb_bitmap& prbs = slot_alloc.res_grid()[pdcch_slot].dl_prbs.prbs();
// Update SI windows
uint32_t N = bwp_cfg->slots.size();
for (sched_si_t& si : pending_sis) {
uint32_t x = (si.n - 1) * si.win_len;
if (not si.win_start.valid() and (pdcch_slot.sfn() % si.period == x / N) and
pdcch_slot.slot_idx() == x % bwp_cfg->slots.size()) {
// If start o SI message window
si.win_start = pdcch_slot;
} else if (si.win_start.valid() and si.win_start + si.win_len >= pdcch_slot) {
// If end of SI message window
logger.warning(
"SCHED: Could not allocate SI message idx=%d, len=%d. Cause: %s", si.n, si.len, to_string(si.result));
si.win_start.clear();
}
}
// Schedule pending SIs
if (bwp_cfg->slots[pdcch_slot.slot_idx()].is_dl) {
for (sched_si_t& si : pending_sis) {
if (not si.win_start.valid()) {
continue;
}
// TODO: NOTE 2: The UE is not required to monitor PDCCH monitoring occasion(s) corresponding to each transmitted
// SSB in SI-window.
// Attempt grants with increasing number of PRBs (if the number of PRBs is too low, the coderate is invalid)
si.result = alloc_result::invalid_coderate;
uint32_t prb_start_idx = 0;
for (uint32_t nprbs = 4; nprbs < bwp_cfg->cfg.rb_width and si.result == alloc_result::invalid_coderate; ++nprbs) {
prb_interval grant = find_empty_interval_of_length(prbs, nprbs, prb_start_idx);
prb_start_idx = grant.start();
if (grant.length() != nprbs) {
si.result = alloc_result::no_sch_space;
break;
}
si.result = slot_alloc.alloc_si(si_aggr_level, si.n, si.n_tx, grant);
if (si.result == alloc_result::success) {
// SIB scheduled successfully
si.win_start.clear();
si.n_tx++;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ra_sched::ra_sched(const bwp_params_t& bwp_cfg_) :
bwp_cfg(&bwp_cfg_), logger(srslog::fetch_basic_logger(bwp_cfg_.sched_cfg.logger_name))
{}
@ -221,7 +161,7 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info)
}
bwp_manager::bwp_manager(const bwp_params_t& bwp_cfg) :
cfg(&bwp_cfg), ra(bwp_cfg), grid(bwp_cfg), data_sched(new sched_nr_time_rr())
cfg(&bwp_cfg), ra(bwp_cfg), si(bwp_cfg), grid(bwp_cfg), data_sched(new sched_nr_time_rr())
{}
} // namespace sched_nr_impl

@ -47,6 +47,7 @@ void bwp_slot_grid::reset()
dl.phy.pdcch_ul.clear();
dl.phy.pdsch.clear();
dl.rar.clear();
dl.sib_idxs.clear();
ul.pusch.clear();
ul.pucch.clear();
pending_acks.clear();
@ -68,20 +69,42 @@ bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_, slot_point pdcch
alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, uint32_t si_ntx, const prb_interval& prbs)
{
bwp_slot_grid& bwp_pdcch_slot = bwp_grid[pdcch_slot];
if (not bwp_pdcch_slot.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdcch_slot.slot_idx);
return alloc_result::no_sch_space;
}
pdcch_dl_list_t& pdsch_grants = bwp_pdcch_slot.dl.phy.pdcch_dl;
if (pdsch_grants.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space;
alloc_result ret = verify_pdsch_space(bwp_pdcch_slot, bwp_pdcch_slot);
if (ret != alloc_result::success) {
return ret;
}
if (bwp_pdcch_slot.dl_prbs.collides(prbs)) {
return alloc_result::sch_collision;
}
// TODO: Allocate PDCCH and PDSCH
const uint32_t coreset_id = 0;
const uint32_t ss_id = 0;
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::sib, aggr_idx, ss_id)) {
logger.warning("SCHED: Cannot allocate SIB1.");
return alloc_result::no_cch_space;
}
// RAR allocation successful.
bwp_pdcch_slot.dl_prbs |= prbs;
// Generate DCI for RAR with given RA-RNTI
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl.phy.pdcch_dl.back();
if (not fill_dci_sib(prbs, si_idx, *bwp_grid.cfg, pdcch.dci)) {
// Cancel on-going PDCCH allocation
bwp_pdcch_slot.coresets[coreset_id]->rem_last_dci();
return alloc_result::invalid_coderate;
}
// Generate PDSCH
bwp_pdcch_slot.dl.phy.pdsch.emplace_back();
pdsch_t& pdsch = bwp_pdcch_slot.dl.phy.pdsch.back();
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = pdcch_slot.to_uint();
int code = srsran_ra_dl_dci_to_grant_nr(
&cfg.cell_cfg.carrier, &slot_cfg, &cfg.cfg.pdsch, &pdcch.dci, &pdsch.sch, &pdsch.sch.grant);
srsran_assert(code == SRSRAN_SUCCESS, "Error converting DCI to grant");
// Store SI msg index
bwp_pdcch_slot.sib_idxs.push_back(si_idx);
return alloc_result::success;
}
@ -104,6 +127,11 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
if (ret != alloc_result::success) {
return ret;
}
if (not bwp_pdcch_slot.dl.phy.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.debug("SCHED: skipping RAR allocation. Cause: concurrent PDSCH and SSB not yet supported");
return alloc_result::no_sch_space;
}
if (pending_rachs.size() > bwp_pdcch_slot.dl.rar.capacity() - bwp_pdcch_slot.dl.rar.size()) {
logger.error("SCHED: Trying to allocate too many Msg3 grants in a single slot (%zd)", pending_rachs.size());
return alloc_result::invalid_grant_params;
@ -210,6 +238,11 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
if (result != alloc_result::success) {
return result;
}
if (not bwp_pdsch_slot.dl.phy.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.debug("SCHED: skipping PDSCH allocation. Cause: concurrent PDSCH and SSB not yet supported");
return alloc_result::no_sch_space;
}
if (bwp_pdsch_slot.dl_prbs.collides(dl_grant)) {
return alloc_result::sch_collision;
}
@ -386,11 +419,6 @@ alloc_result bwp_slot_allocator::verify_pdsch_space(bwp_slot_grid& pdsch_grid,
return alloc_result::no_grant_space;
}
}
if (not pdsch_grid.dl.phy.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.debug("SCHED: skipping PDSCH allocation. Cause: concurrent PDSCH and SSB not yet supported");
return alloc_result::no_sch_space;
}
return alloc_result::success;
}

@ -55,7 +55,6 @@ bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
srsran_assert((user == nullptr) xor
(alloc_type == pdcch_grant_type_t::dl_data or alloc_type == pdcch_grant_type_t::ul_data),
"UE should be only provided for DL or UL data allocations");
srsran_assert(not dci_list.full(), "SCHED: Unable to allocate DCI");
saved_dfs_tree.clear();
alloc_record record;

@ -11,6 +11,7 @@
*/
#include "srsgnb/hdr/stack/mac/sched_nr_signalling.h"
#include "srsgnb/hdr/stack/mac/sched_nr_grant_allocator.h"
#define POS_IN_BURST_FIRST_BIT_IDX 0
#define POS_IN_BURST_SECOND_BIT_IDX 1
@ -83,23 +84,119 @@ void sched_ssb_basic(const slot_point& sl_point, uint32_t ssb_periodicity, ssb_l
}
}
void sched_dl_signalling(const bwp_params_t& bwp_params,
slot_point sl_pdcch,
ssb_list& ssb_list,
nzp_csi_rs_list& nzp_csi_rs)
void sched_dl_signalling(bwp_slot_allocator& bwp_alloc)
{
const bwp_params_t& bwp_params = bwp_alloc.cfg;
slot_point sl_pdcch = bwp_alloc.get_pdcch_tti();
bwp_slot_grid& sl_grid = bwp_alloc.tx_slot_grid();
srsran_slot_cfg_t cfg;
cfg.idx = sl_pdcch.to_uint();
// Schedule SSB
sched_ssb_basic(sl_pdcch, bwp_params.cell_cfg.ssb.periodicity_ms, ssb_list);
sched_ssb_basic(sl_pdcch, bwp_params.cell_cfg.ssb.periodicity_ms, sl_grid.dl.phy.ssb);
// Schedule NZP-CSI-RS
sched_nzp_csi_rs(bwp_params.cfg.pdsch.nzp_csi_rs_sets, cfg, nzp_csi_rs);
sched_nzp_csi_rs(bwp_params.cfg.pdsch.nzp_csi_rs_sets, cfg, sl_grid.dl.phy.nzp_csi_rs);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool fill_dci_sib(prb_interval interv, uint32_t sib_id, const bwp_params_t& bwp_cfg, srsran_dci_dl_nr_t& dci)
{
dci.mcs = 5;
dci.ctx.format = srsran_dci_format_nr_1_0;
dci.ctx.ss_type = srsran_search_space_type_common_0;
dci.ctx.rnti_type = srsran_rnti_type_si;
dci.ctx.rnti = SRSRAN_SIRNTI;
dci.ctx.coreset_id = 0;
dci.freq_domain_assigment = srsran_ra_nr_type1_riv(bwp_cfg.cfg.rb_width, interv.start(), interv.length());
dci.time_domain_assigment = 0;
dci.tpc = 1;
dci.bwp_id = bwp_cfg.bwp_id;
dci.cc_id = bwp_cfg.cc;
dci.rv = 0;
dci.sii = sib_id == 1 ? 0 : 1;
return true;
}
si_sched::si_sched(const bwp_params_t& bwp_cfg_) :
bwp_cfg(&bwp_cfg_), logger(srslog::fetch_basic_logger(bwp_cfg_.sched_cfg.logger_name))
{}
void si_sched::run_slot(bwp_slot_allocator& bwp_alloc)
{
if (true) {
// CORESET#0 must be present, otherwise SIs are not allocated
// TODO: provide proper config
return;
}
const uint32_t si_aggr_level = 2;
slot_point sl_pdcch = bwp_alloc.get_pdcch_tti();
const prb_bitmap& prbs = bwp_alloc.res_grid()[sl_pdcch].dl_prbs.prbs();
// SIB1 case
if (sl_pdcch.to_uint() % 160 == 0) {
// TODO: compute if SIB1 slot based on config
const uint32_t aggr_lvl_idx = 2;
const uint32_t sib_id = 1;
const uint32_t sib1len = 77; // TODO: extract from config
alloc_result ret = bwp_alloc.alloc_si(aggr_lvl_idx, sib_id, sib1len, prb_interval{0, 7});
if (ret != alloc_result::success) {
bwp_alloc.logger.warning("SCHED: Cannot allocate SIB1.");
}
}
// Update SI windows
uint32_t N = bwp_cfg->slots.size();
for (si_msg_ctxt_t& si : pending_sis) {
uint32_t x = (si.n - 1) * si.win_len;
if (not si.win_start.valid() and (sl_pdcch.sfn() % si.period == x / N) and
sl_pdcch.slot_idx() == x % bwp_cfg->slots.size()) {
// If start of SI message window
si.win_start = sl_pdcch;
} else if (si.win_start.valid() and si.win_start + si.win_len >= sl_pdcch) {
// If end of SI message window
logger.warning(
"SCHED: Could not allocate SI message idx=%d, len=%d. Cause: %s", si.n, si.len, to_string(si.result));
si.win_start.clear();
}
}
// Schedule pending SIs
if (bwp_cfg->slots[sl_pdcch.slot_idx()].is_dl) {
for (si_msg_ctxt_t& si : pending_sis) {
if (not si.win_start.valid()) {
continue;
}
// Schedule SIBs
// TODO
// TODO: NOTE 2: The UE is not required to monitor PDCCH monitoring occasion(s) corresponding to each transmitted
// SSB in SI-window.
// Attempt grants with increasing number of PRBs (if the number of PRBs is too low, the coderate is invalid)
si.result = alloc_result::invalid_coderate;
uint32_t prb_start_idx = 0;
for (uint32_t nprbs = 4; nprbs < bwp_cfg->cfg.rb_width and si.result == alloc_result::invalid_coderate; ++nprbs) {
prb_interval grant = find_empty_interval_of_length(prbs, nprbs, prb_start_idx);
prb_start_idx = grant.start();
if (grant.length() != nprbs) {
si.result = alloc_result::no_sch_space;
break;
}
si.result = bwp_alloc.alloc_si(si_aggr_level, si.n, si.n_tx, grant);
if (si.result == alloc_result::success) {
// SIB scheduled successfully
si.win_start.clear();
si.n_tx++;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace sched_nr_impl
} // namespace srsenb

@ -69,8 +69,10 @@ dl_sched_res_t* cc_worker::run_slot(slot_point pdcch_slot, ue_map_t& ue_db)
log_sched_slot_ues(logger, pdcch_slot, cfg.cc, slot_ues);
// Allocate cell DL signalling
bwp_slot_grid& bwp_pdcch_slot = bwps[0].grid[pdcch_slot];
sched_dl_signalling(*bwps[0].cfg, pdcch_slot, bwp_pdcch_slot.dl.phy.ssb, bwp_pdcch_slot.dl.phy.nzp_csi_rs);
sched_dl_signalling(bwp_alloc);
// Allocate pending SIBs
bwps[0].si.run_slot(bwp_alloc);
// Allocate pending RARs
bwps[0].ra.run_slot(bwp_alloc);
@ -88,7 +90,7 @@ dl_sched_res_t* cc_worker::run_slot(slot_point pdcch_slot, ue_map_t& ue_db)
// releases UE resources
slot_ues.clear();
return &bwp_pdcch_slot.dl;
return &bwp_alloc.tx_slot_grid().dl;
}
ul_sched_t* cc_worker::get_ul_sched(slot_point sl)

Loading…
Cancel
Save