nr,gnb,sched: implement ConRes CE scheduling operations in NR sched and extend sched nr interface

master
Francisco 3 years ago committed by Francisco Paisana
parent 17f7a86647
commit 6ad6f4511e

@ -50,8 +50,9 @@ struct sched_nr_ue_cfg_t {
class sched_nr_interface
{
public:
static const size_t MAX_GRANTS = mac_interface_phy_nr::MAX_GRANTS;
static const size_t MAX_SIBS = 2;
static const size_t MAX_GRANTS = mac_interface_phy_nr::MAX_GRANTS;
static const size_t MAX_SIBS = 2;
static const size_t MAX_SUBPDUS = 8;
///// Configuration /////
@ -111,17 +112,25 @@ public:
srsran::bounded_vector<msg3_grant_t, MAX_GRANTS> grants;
};
////// DL data signalling //////
struct dl_pdu_t {
srsran::bounded_vector<uint32_t, MAX_SUBPDUS> subpdus;
};
///// Sched Result /////
using dl_sched_t = mac_interface_phy_nr::dl_sched_t;
using ul_res_t = mac_interface_phy_nr::ul_sched_t;
using sched_rar_list_t = srsran::bounded_vector<rar_t, MAX_GRANTS>;
using sched_sib_list_t = srsran::bounded_vector<uint32_t, MAX_GRANTS>; /// list of SI indexes
using sched_rar_list_t = srsran::bounded_vector<rar_t, MAX_GRANTS>;
using sched_dl_pdu_list_t = srsran::bounded_vector<dl_pdu_t, MAX_GRANTS>;
struct dl_res_t {
dl_sched_t phy;
sched_rar_list_t rar;
srsran::bounded_vector<uint32_t, MAX_GRANTS> sib_idxs;
dl_sched_t phy;
sched_dl_pdu_list_t data;
sched_rar_list_t rar;
sched_sib_list_t sib_idxs;
};
virtual ~sched_nr_interface() = default;

@ -39,6 +39,7 @@ public:
using base_type::get_bsr_state;
using base_type::get_dl_prio_tx;
using base_type::get_dl_tx;
using base_type::get_dl_tx_total;
using base_type::is_bearer_active;
using base_type::is_bearer_dl;
using base_type::is_bearer_ul;
@ -48,7 +49,26 @@ public:
int get_dl_tx_total() const;
// Control Element Command queue
srsran::deque<uint32_t> pending_ces;
struct ce_t {
uint32_t lcid;
uint32_t cc;
};
srsran::deque<ce_t> pending_ces;
/// Protected, thread-safe interface of "ue_buffer_manager" for "slot_ue"
struct slot_itf {
slot_itf() = default;
explicit slot_itf(uint32_t cc_, ue_buffer_manager& parent_) : cc(cc_), parent(&parent_) {}
void alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu);
private:
uint32_t cc = SRSRAN_MAX_CARRIERS;
ue_buffer_manager* parent = nullptr;
};
private:
/// Update of buffers is mutexed when carrier aggreg. is in place
std::mutex mutex;
};
class slot_ue;
@ -111,7 +131,6 @@ public:
}
uint32_t pcell_cc() const { return ue_cfg.carriers[0].cc; }
ue_buffer_manager buffers;
std::array<std::unique_ptr<ue_carrier>, SCHED_NR_MAX_CARRIERS> carriers;
const uint16_t rnti;
@ -119,18 +138,24 @@ public:
private:
const sched_params_t& sched_cfg;
ue_cfg_t ue_cfg;
slot_point last_pdcch_slot;
slot_point last_sr_slot;
int ul_pending_bytes = 0, dl_pending_bytes = 0;
ue_cfg_t ue_cfg;
ue_buffer_manager buffers;
};
class slot_ue
{
public:
slot_ue() = default;
explicit slot_ue(ue_carrier& ue, slot_point slot_tx_, uint32_t dl_pending_bytes, uint32_t ul_pending_bytes);
explicit slot_ue(ue_carrier& ue,
slot_point slot_tx_,
uint32_t dl_pending_bytes,
uint32_t ul_pending_bytes,
ue_buffer_manager::slot_itf buffers_);
slot_ue(slot_ue&&) noexcept = default;
slot_ue& operator=(slot_ue&&) noexcept = default;
bool empty() const { return ue == nullptr; }
@ -148,14 +173,15 @@ public:
uint32_t dl_bytes = 0, ul_bytes = 0;
// UE parameters that are sector specific
bool dl_active;
bool ul_active;
slot_point pdcch_slot;
slot_point pdsch_slot;
slot_point pusch_slot;
slot_point uci_slot;
dl_harq_proc* h_dl = nullptr;
ul_harq_proc* h_ul = nullptr;
bool dl_active;
bool ul_active;
slot_point pdcch_slot;
slot_point pdsch_slot;
slot_point pusch_slot;
slot_point uci_slot;
dl_harq_proc* h_dl = nullptr;
ul_harq_proc* h_ul = nullptr;
ue_buffer_manager::slot_itf buffers;
private:
ue_carrier* ue = nullptr;

@ -49,7 +49,7 @@ public:
void set_active(bool active) { active_state.store(active, std::memory_order_relaxed); }
bool is_active() const { return active_state.load(std::memory_order_relaxed); }
int generate_pdu(srsran::byte_buffer_t* pdu, uint32_t grant_size);
int generate_pdu(srsran::byte_buffer_t* pdu, uint32_t grant_size, srsran::const_span<uint32_t> subpdu_lcids);
std::mutex metrics_mutex = {};
void metrics_read(mac_ue_metrics_t* metrics_);

@ -476,7 +476,7 @@ mac_nr::dl_sched_t* mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg)
}
// Generate MAC DL PDUs
uint32_t rar_count = 0, si_count = 0;
uint32_t rar_count = 0, si_count = 0, data_count = 0;
srsran::rwlock_read_guard rw_lock(rwmutex);
for (pdsch_t& pdsch : dl_res->phy.pdsch) {
if (pdsch.sch.grant.rnti_type == srsran_rnti_type_c) {
@ -487,7 +487,8 @@ mac_nr::dl_sched_t* mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg)
for (auto& tb_data : pdsch.data) {
if (tb_data != nullptr and tb_data->N_bytes == 0) {
// TODO: exclude retx from packing
ue_db[rnti]->generate_pdu(tb_data, pdsch.sch.grant.tb->tbs / 8);
const sched_nr_interface::dl_pdu_t& pdu = dl_res->data[data_count++];
ue_db[rnti]->generate_pdu(tb_data, pdsch.sch.grant.tb->tbs / 8, pdu.subpdus);
if (pcap != nullptr) {
uint32_t pid = 0; // TODO: get PID from PDCCH struct?

@ -46,6 +46,7 @@ void bwp_slot_grid::reset()
dl.phy.pdcch_dl.clear();
dl.phy.pdcch_ul.clear();
dl.phy.pdsch.clear();
dl.data.clear();
dl.rar.clear();
dl.sib_idxs.clear();
ul.pusch.clear();
@ -336,6 +337,10 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
logger.warning("Couldn't find mcs that leads to R<0.9");
}
// Select scheduled LCIDs and update UE buffer state
bwp_pdsch_slot.dl.data.emplace_back();
ue.buffers.alloc_subpdus(ue.h_dl->tbs(), bwp_pdsch_slot.dl.data.back());
return alloc_result::success;
}

@ -144,14 +144,14 @@ void log_sched_bwp_result(srslog::basic_logger& logger,
const slot_ue_map_t& slot_ues)
{
const bwp_slot_grid& bwp_slot = res_grid[pdcch_slot];
size_t rar_count = 0, si_count = 0;
size_t rar_count = 0, si_count = 0, data_count = 0;
for (const pdcch_dl_t& pdcch : bwp_slot.dl.phy.pdcch_dl) {
fmt::memory_buffer fmtbuf;
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: DL {}, cc={}, rnti=0x{:x}, pid={}, cs={}, f={}, prbs={}, nrtx={}, dai={}, "
"tbs={}, bs={}, pdsch_slot={}, ack_slot={}",
"lcids=[{}], tbs={}, bs={}, pdsch_slot={}, ack_slot={}",
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
res_grid.cfg->cc,
ue->rnti,
@ -161,10 +161,12 @@ void log_sched_bwp_result(srslog::basic_logger& logger,
ue.h_dl->prbs(),
ue.h_dl->nof_retx(),
pdcch.dci.dai,
fmt::join(bwp_slot.dl.data[data_count].subpdus, ", "),
ue.h_dl->tbs() / 8u,
ue.dl_bytes,
ue.pdsch_slot,
ue.uci_slot);
data_count++;
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_ra) {
const pdsch_t& pdsch = bwp_slot.dl.phy.pdsch[std::distance(bwp_slot.dl.phy.pdcch_dl.data(), &pdcch)];
srsran::const_span<bool> prbs{pdsch.sch.grant.prb_idx, pdsch.sch.grant.prb_idx + pdsch.sch.grant.nof_prb};

@ -21,16 +21,44 @@ namespace sched_nr_impl {
int ue_buffer_manager::get_dl_tx_total() const
{
int total_bytes = base_type::get_dl_tx_total();
for (uint32_t lcid : pending_ces) {
total_bytes += srsran::mac_sch_subpdu_nr::sizeof_ce(lcid, false);
for (ue_buffer_manager::ce_t ce : pending_ces) {
total_bytes += srsran::mac_sch_subpdu_nr::sizeof_ce(ce.lcid, false);
}
return total_bytes;
}
void ue_buffer_manager::slot_itf::alloc_subpdus(uint32_t rem_bytes, sched_nr_interface::dl_pdu_t& pdu)
{
for (ce_t ce : parent->pending_ces) {
if (ce.cc == cc) {
// Note: This check also avoids thread collisions across UE carriers
uint32_t size_ce = srsran::mac_sch_subpdu_nr::sizeof_ce(ce.lcid, false);
if (size_ce > rem_bytes) {
break;
}
rem_bytes -= size_ce;
pdu.subpdus.push_back(ce.lcid);
parent->pending_ces.pop_front();
}
}
for (uint32_t lcid = 0; rem_bytes > 0 and is_lcid_valid(lcid); ++lcid) {
uint32_t pending_lcid_bytes = parent->get_dl_tx_total(lcid);
if (pending_lcid_bytes > 0) {
rem_bytes -= std::min(rem_bytes, pending_lcid_bytes);
pdu.subpdus.push_back(lcid);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
slot_ue::slot_ue(ue_carrier& ue_, slot_point slot_tx_, uint32_t dl_pending_bytes, uint32_t ul_pending_bytes) :
ue(&ue_), pdcch_slot(slot_tx_)
slot_ue::slot_ue(ue_carrier& ue_,
slot_point slot_tx_,
uint32_t dl_pending_bytes,
uint32_t ul_pending_bytes,
ue_buffer_manager::slot_itf buffers_) :
ue(&ue_), pdcch_slot(slot_tx_), buffers(buffers_)
{
const uint32_t k0 = 0;
pdsch_slot = pdcch_slot + k0;
@ -133,7 +161,8 @@ void ue::set_cfg(const ue_cfg_t& cfg)
void ue::mac_buffer_state(uint32_t ce_lcid, uint32_t nof_cmds)
{
for (uint32_t i = 0; i < nof_cmds; ++i) {
buffers.pending_ces.push_back(ce_lcid);
// If not specified otherwise, the CE is transmitted in PCell
buffers.pending_ces.push_back(ue_buffer_manager::ce_t{ce_lcid, cfg().carriers[0].cc});
}
}
@ -179,7 +208,11 @@ void ue::new_slot(slot_point pdcch_slot)
slot_ue ue::make_slot_ue(slot_point pdcch_slot, uint32_t cc)
{
srsran_assert(carriers[cc] != nullptr, "make_slot_ue() called for inexistent rnti=0x%x,cc=%d", rnti, cc);
return slot_ue(*carriers[cc], pdcch_slot, dl_pending_bytes, ul_pending_bytes);
return slot_ue(*carriers[cc],
pdcch_slot,
dl_pending_bytes,
ul_pending_bytes,
ue_buffer_manager::slot_itf{cfg().carriers[cc].cc, buffers});
}
} // namespace sched_nr_impl

@ -101,6 +101,7 @@ void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_result_view& cc_out)
auto& h = ctxt.cc_list[cc].ul_harqs[data.dci.pid];
if (h.nof_txs == 0 or h.ndi != data.dci.ndi) {
// It is newtx
h.is_msg3 = false;
h.nof_retxs = 0;
h.ndi = data.dci.ndi;
h.first_slot_tx = cc_out.slot + 4; // TODO
@ -131,6 +132,7 @@ void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_result_view& cc_out)
auto& h = ctxt.cc_list[cc].ul_harqs[msg3_grant.msg3_dci.pid];
if (h.nof_txs == 0) {
// It is newtx
h.is_msg3 = true;
h.nof_retxs = 0;
h.ndi = msg3_grant.msg3_dci.ndi;
h.first_slot_tx = cc_out.slot + 4 + MSG3_DELAY_MS; // TODO
@ -378,6 +380,10 @@ int sched_nr_base_tester::apply_slot_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_
"UL ACK rnti=0x%x, slot_ul_tx=%u, cc=%d pid=%d", ue_ctxt.rnti, h.last_slot_tx.to_uint(), enb_cc_idx, h.pid);
}
if (h.is_msg3) {
logger.info("STATUS: rnti=0x%x received Msg3", ue_ctxt.rnti);
}
// update scheduler
sched_ptr->ul_crc_info(ue_ctxt.rnti, enb_cc_idx, ack.pid, ack.ack);
}

@ -31,6 +31,7 @@ const static uint32_t MAX_GRANTS = mac_interface_phy_nr::MAX_GRANTS;
struct ue_nr_harq_ctxt_t {
bool active = false;
bool ndi = false;
bool is_msg3 = false;
uint32_t pid = 0;
uint32_t nof_txs = 0;
uint32_t nof_retxs = std::numeric_limits<uint32_t>::max();

@ -74,7 +74,7 @@ void run_sched_nr_test()
int main()
{
auto& test_logger = srslog::fetch_basic_logger("TEST");
test_logger.set_level(srslog::basic_levels::warning);
test_logger.set_level(srslog::basic_levels::info);
auto& mac_nr_logger = srslog::fetch_basic_logger("MAC-NR");
mac_nr_logger.set_level(srslog::basic_levels::debug);
auto& pool_logger = srslog::fetch_basic_logger("POOL");

@ -64,7 +64,7 @@ uint32_t ue_nr::read_pdu(uint32_t lcid, uint8_t* payload, uint32_t requested_byt
return rlc->read_pdu(rnti, lcid, payload, requested_bytes);
}
int ue_nr::generate_pdu(srsran::byte_buffer_t* pdu, uint32_t grant_size)
int ue_nr::generate_pdu(srsran::byte_buffer_t* pdu, uint32_t grant_size, srsran::const_span<uint32_t> subpdu_lcids)
{
std::lock_guard<std::mutex> lock(mutex);

Loading…
Cancel
Save