nr,gnb,sched: Simplify sched api and locking design to better reflect the mac-phy FAPI interface

- removal of condition variables from sched main calls
- addition of sched sched::slot_indication call as a point of synchronization per slot
- removal of sched_worker_manager class
- removal of serv_cell_manager class
- centralization of cc-specific functionality and components in cc_worker
master
Francisco 3 years ago committed by Francisco Paisana
parent fa09a9d11a
commit f2409534e4

@ -18,7 +18,6 @@
#include "srsenb/hdr/common/rnti_pool.h"
#include "srsenb/hdr/stack/enb_stack_base.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsenb/hdr/stack/mac/nr/ue_nr.h"
#include "srsran/common/task_scheduler.h"
#include "srsran/interfaces/enb_metrics_interface.h"
@ -35,6 +34,8 @@ struct mac_nr_args_t {
srsenb::pcap_args_t pcap;
};
class sched_nr;
class mac_nr final : public mac_interface_phy_nr, public mac_interface_rrc_nr, public mac_interface_rlc_nr
{
public:
@ -110,7 +111,7 @@ private:
std::atomic<bool> started = {false};
const static uint32_t NUMEROLOGY_IDX = 0; /// only 15kHz supported at this stage
srsenb::sched_nr sched;
std::unique_ptr<srsenb::sched_nr> sched;
std::vector<sched_nr_interface::cell_cfg_t> cell_config;
// Map of active UEs

@ -26,8 +26,9 @@ extern "C" {
namespace srsenb {
namespace sched_nr_impl {
class sched_worker_manager;
class serv_cell_manager;
class cc_worker;
} // namespace sched_nr_impl
class ul_sched_result_buffer;
@ -37,6 +38,8 @@ class sched_nr final : public sched_nr_interface
public:
explicit sched_nr();
~sched_nr() override;
void stop();
int config(const sched_args_t& sched_cfg, srsran::const_span<cell_cfg_t> cell_list) override;
void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override;
void ue_rem(uint16_t rnti) override;
@ -50,30 +53,56 @@ public:
void ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr) override;
void dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx);
int run_slot(slot_point pdsch_tti, uint32_t cc, dl_res_t& result) override;
/// Called once per slot in a non-concurrent fashion
void slot_indication(slot_point slot_tx) override;
int get_dl_sched(slot_point pdsch_tti, uint32_t cc, dl_res_t& result) override;
int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_res_t& result) override;
void get_metrics(mac_metrics_t& metrics);
private:
class feedback_manager
{
public:
struct ue_event_t {
uint16_t rnti;
srsran::move_callback<void()> callback;
};
void enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev);
void get_pending_events(srsran::deque<ue_event_t>& current_events);
private:
std::mutex event_mutex;
srsran::deque<ue_event_t> next_slot_events;
};
void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
// args
sched_nr_impl::sched_params cfg;
srslog::basic_logger* logger = nullptr;
using sched_worker_manager = sched_nr_impl::sched_worker_manager;
std::unique_ptr<sched_worker_manager> sched_workers;
// slot-specific
slot_point current_slot_tx;
std::atomic<int> worker_count{0};
using slot_cc_worker = sched_nr_impl::cc_worker;
std::vector<std::unique_ptr<sched_nr_impl::cc_worker> > cc_workers;
using ue_map_t = sched_nr_impl::ue_map_t;
std::mutex ue_db_mutex;
ue_map_t ue_db;
// management of Sched Result buffering
std::unique_ptr<ul_sched_result_buffer> pending_results;
// management of cell resources
std::vector<std::unique_ptr<sched_nr_impl::serv_cell_manager> > cells;
// Feedback management
feedback_manager pending_feedback;
srsran::deque<feedback_manager::ue_event_t> current_slot_events;
// metrics extraction
class ue_metrics_manager;
std::unique_ptr<ue_metrics_manager> metrics_handler;
};
} // namespace srsenb

@ -10,8 +10,8 @@
*
*/
#ifndef SRSRAN_SCHED_NR_CELL_H
#define SRSRAN_SCHED_NR_CELL_H
#ifndef SRSRAN_SCHED_NR_BWP_H
#define SRSRAN_SCHED_NR_BWP_H
#include "sched_nr_cfg.h"
#include "sched_nr_grant_allocator.h"
@ -79,10 +79,10 @@ private:
srsran::deque<pending_rar_t> pending_rars;
};
class bwp_ctxt
class bwp_manager
{
public:
explicit bwp_ctxt(const bwp_params_t& bwp_cfg);
explicit bwp_manager(const bwp_params_t& bwp_cfg);
const bwp_params_t* cfg;
@ -94,21 +94,7 @@ public:
bwp_res_grid grid;
};
class serv_cell_manager
{
public:
using feedback_callback_t = srsran::move_callback<void(ue_carrier&)>;
explicit serv_cell_manager(const cell_params_t& cell_cfg_);
srsran::bounded_vector<bwp_ctxt, SCHED_NR_MAX_BWP_PER_CELL> bwps;
const cell_params_t& cfg;
private:
srslog::basic_logger& logger;
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_CELL_H
#endif // SRSRAN_SCHED_NR_BWP_H

@ -94,13 +94,7 @@ private:
class bwp_slot_allocator
{
public:
explicit bwp_slot_allocator(bwp_res_grid& bwp_grid_);
void new_slot(slot_point pdcch_slot_, slot_ue_map_t& ues_)
{
pdcch_slot = pdcch_slot_;
slot_ues = &ues_;
}
explicit bwp_slot_allocator(bwp_res_grid& bwp_grid_, slot_point pdcch_slot_, slot_ue_map_t& ues_);
alloc_result alloc_si(uint32_t aggr_idx, uint32_t si_idx, uint32_t si_ntx, const prb_interval& prbs);
alloc_result alloc_rar_and_msg3(uint16_t ra_rnti,

@ -128,7 +128,9 @@ public:
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
virtual void ue_rem(uint16_t rnti) = 0;
virtual bool ue_exists(uint16_t rnti) = 0;
virtual int run_slot(slot_point slot_rx, uint32_t cc, dl_res_t& result) = 0;
virtual void slot_indication(slot_point slot_tx) = 0;
virtual int get_dl_sched(slot_point slot_rx, uint32_t cc, dl_res_t& result) = 0;
virtual int get_ul_sched(slot_point slot_rx, uint32_t cc, ul_res_t& result) = 0;
virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;

@ -81,7 +81,6 @@ public:
// metrics
mac_ue_metrics_t metrics = {};
std::mutex metrics_mutex;
private:
bwp_ue_cfg bwp_cfg;

@ -13,7 +13,7 @@
#ifndef SRSRAN_SCHED_NR_WORKER_H
#define SRSRAN_SCHED_NR_WORKER_H
#include "sched_nr_cell.h"
#include "sched_nr_bwp.h"
#include "sched_nr_cfg.h"
#include "sched_nr_grant_allocator.h"
#include "sched_nr_ue.h"
@ -30,109 +30,73 @@ struct mac_metrics_t;
namespace sched_nr_impl {
class slot_cc_worker
/// Class to manage the locking, storing and processing of carrier-specific feedback (UE-specific or common)
class carrier_feedback_manager
{
public:
using feedback_callback_t = srsran::move_callback<void(ue_carrier&)>;
struct feedback_t {
uint16_t rnti;
feedback_callback_t fdbk;
};
explicit slot_cc_worker(serv_cell_manager& sched);
void run(slot_point pdcch_slot, ue_map_t& ue_db_);
bool running() const { return slot_rx.valid(); }
explicit carrier_feedback_manager(const cell_params_t& cell_cfg);
void enqueue_cc_event(srsran::move_callback<void()> ev);
/// Enqueue cell-specific event not directly at a given UE (e.g. PRACH)
void enqueue_common_event(srsran::move_callback<void()> ev);
/// Enqueue feedback directed at a given UE in a given cell
void enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk);
/// Enqueue feedback directed at a given UE in a given cell (e.g. ACKs, CQI)
void enqueue_ue_feedback(uint16_t rnti, feedback_callback_t fdbk);
private:
/// Run all pending feedback. This should be called at the beginning of a TTI
void run_feedback(ue_map_t& ue_db);
void alloc_dl_ues();
void alloc_ul_ues();
void postprocess_decisions();
void run(ue_map_t& ue_db);
private:
const cell_params_t& cfg;
serv_cell_manager& cell;
srslog::basic_logger& logger;
slot_point slot_rx;
bwp_slot_allocator bwp_alloc;
// Process of UE cell-specific feedback
struct feedback_t {
uint16_t rnti;
feedback_callback_t fdbk;
};
std::mutex feedback_mutex;
srsran::deque<feedback_t> pending_feedback, tmp_feedback_to_run;
srsran::deque<srsran::move_callback<void()> > pending_events, tmp_events_to_run;
slot_ue_map_t slot_ues;
};
class sched_worker_manager
class cc_worker
{
struct slot_worker_ctxt {
std::mutex slot_mutex; // lock of all workers of the same slot.
std::condition_variable cvar;
slot_point slot_rx;
int nof_workers_waiting = 0;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<slot_cc_worker> workers;
};
public:
explicit sched_worker_manager(ue_map_t& ue_db_,
const sched_params& cfg_,
srsran::span<std::unique_ptr<serv_cell_manager> > cells_);
sched_worker_manager(const sched_worker_manager&) = delete;
sched_worker_manager(sched_worker_manager&&) = delete;
~sched_worker_manager();
using feedback_callback_t = srsran::move_callback<void(ue_carrier&)>;
void run_slot(slot_point slot_tx, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res);
explicit cc_worker(const cell_params_t& params);
void get_metrics(mac_metrics_t& metrics);
void run_slot(slot_point pdcch_slot, ue_map_t& ue_db_, dl_sched_res_t& dl_res, ul_sched_t& ul_res);
void enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev);
void enqueue_cc_event(uint32_t cc, srsran::move_callback<void()> ev);
void enqueue_cc_feedback(uint16_t rnti, uint32_t cc, slot_cc_worker::feedback_callback_t fdbk)
{
cc_worker_list[cc]->worker.enqueue_cc_feedback(rnti, std::move(fdbk));
}
// const params
const cell_params_t& cfg;
srslog::basic_logger& logger;
private:
void update_ue_db(slot_point slot_tx, bool locked_context);
void get_metrics_nolocking(mac_metrics_t& metrics);
bool save_sched_result(slot_point pdcch_slot, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res);
carrier_feedback_manager pending_feedback;
const sched_params& cfg;
ue_map_t& ue_db;
srsran::span<std::unique_ptr<serv_cell_manager> > cells;
srslog::basic_logger& logger;
// cc-specific resources
srsran::bounded_vector<bwp_manager, SCHED_NR_MAX_BWP_PER_CELL> bwps;
struct ue_event_t {
uint16_t rnti;
srsran::move_callback<void()> callback;
};
std::mutex event_mutex;
srsran::deque<ue_event_t> next_slot_events, slot_events;
private:
/// Derive the remaining scheduling parameters and save result
bool save_sched_result(dl_sched_res_t& dl_res, ul_sched_t& ul_res, slot_point slot_tx);
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_worker_ctxts;
struct cc_context {
std::condition_variable cvar;
int waiting = 0;
slot_cc_worker worker;
void alloc_dl_ues(bwp_slot_allocator& bwp_alloc);
void alloc_ul_ues(bwp_slot_allocator& bwp_alloc);
void postprocess_decisions(bwp_slot_allocator& bwp_alloc);
cc_context(serv_cell_manager& sched) : worker(sched) {}
};
// {slot,cc} specific variables
slot_ue_map_t slot_ues;
};
std::mutex slot_mutex;
std::condition_variable cvar;
slot_point current_slot;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<std::unique_ptr<cc_context> > cc_worker_list;
class sched_worker_manager
{
public:
void get_metrics(mac_metrics_t& metrics);
private:
void get_metrics_nolocking(mac_metrics_t& metrics);
};
} // namespace sched_nr_impl

@ -587,7 +587,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
return 0;
}
trace_threshold_complete_event("mac::run_slot", "total_time", std::chrono::microseconds(100));
trace_threshold_complete_event("mac::get_dl_sched", "total_time", std::chrono::microseconds(100));
logger.set_context(TTI_SUB(tti_tx_dl, FDD_HARQ_DELAY_UL_MS));
if (do_padding) {
add_padding();

@ -16,7 +16,7 @@ set(SOURCES mac_nr.cc
sched_nr_pdcch.cc
sched_nr_cfg.cc
sched_nr_helpers.cc
sched_nr_cell.cc
sched_nr_bwp.cc
sched_nr_rb.cc
sched_nr_time_rr.cc
harq_softbuffer.cc

@ -11,18 +11,14 @@
*/
#include "srsenb/hdr/stack/mac/nr/mac_nr.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsran/common/buffer_pool.h"
#include "srsran/common/log_helper.h"
#include "srsran/common/phy_cfg_nr_default.h"
#include "srsran/common/rwlock_guard.h"
#include "srsran/common/standard_streams.h"
#include "srsran/common/string_helpers.h"
#include "srsran/common/time_prof.h"
#include "srsran/mac/mac_rar_pdu_nr.h"
#include <pthread.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
namespace srsenb {
@ -30,7 +26,8 @@ mac_nr::mac_nr(srsran::task_sched_handle task_sched_) :
logger(srslog::fetch_basic_logger("MAC-NR")),
task_sched(task_sched_),
bcch_bch_payload(srsran::make_byte_buffer()),
rar_pdu_buffer(srsran::make_byte_buffer())
rar_pdu_buffer(srsran::make_byte_buffer()),
sched(new sched_nr{})
{
stack_task_queue = task_sched.make_task_queue();
}
@ -67,12 +64,12 @@ int mac_nr::init(const mac_nr_args_t& args_,
void mac_nr::stop()
{
if (started) {
bool started_prev = started.exchange(false);
if (started_prev) {
sched->stop();
if (pcap != nullptr) {
pcap->close();
}
started = false;
}
}
@ -84,7 +81,7 @@ void mac_nr::get_metrics(srsenb::mac_metrics_t& metrics)
// TODO: We should comment on the logic we follow to get the metrics. Some of them are retrieved from MAC, some
// others from the scheduler.
get_metrics_nolock(metrics);
sched.get_metrics(metrics);
sched->get_metrics(metrics);
}
void mac_nr::get_metrics_nolock(srsenb::mac_metrics_t& metrics)
@ -105,7 +102,7 @@ void mac_nr::get_metrics_nolock(srsenb::mac_metrics_t& metrics)
int mac_nr::cell_cfg(const std::vector<srsenb::sched_nr_interface::cell_cfg_t>& nr_cells)
{
cell_config = nr_cells;
sched.config(args.sched_cfg, nr_cells);
sched->config(args.sched_cfg, nr_cells);
detected_rachs.resize(nr_cells.size());
// read SIBs from RRC (SIB1 for now only)
@ -134,7 +131,7 @@ int mac_nr::cell_cfg(const std::vector<srsenb::sched_nr_interface::cell_cfg_t>&
int mac_nr::ue_cfg(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg)
{
sched.ue_cfg(rnti, ue_cfg);
sched->ue_cfg(rnti, ue_cfg);
return SRSRAN_SUCCESS;
}
@ -145,7 +142,7 @@ uint16_t mac_nr::reserve_rnti(uint32_t enb_cc_idx, const sched_nr_ue_cfg_t& uecf
return rnti;
}
sched.ue_cfg(rnti, uecfg);
sched->ue_cfg(rnti, uecfg);
return rnti;
}
@ -185,7 +182,7 @@ void mac_nr::rach_detected(const rach_info_t& rach_info)
rar_info.ta_cmd = rach_info.time_adv;
rar_info.prach_slot = slot_point{NUMEROLOGY_IDX, rach_info.slot_index};
// TODO: fill remaining fields as required
sched.dl_rach_info(enb_cc_idx, rar_info);
sched->dl_rach_info(enb_cc_idx, rar_info);
rrc->add_user(rnti, uecfg);
logger.info("RACH: slot=%d, cc=%d, preamble=%d, offset=%d, temp_crnti=0x%x",
@ -221,7 +218,7 @@ uint16_t mac_nr::alloc_ue(uint32_t enb_cc_idx)
}
// Allocate and initialize UE object
std::unique_ptr<ue_nr> ue_ptr = std::unique_ptr<ue_nr>(new ue_nr(rnti, enb_cc_idx, &sched, rrc, rlc, phy, logger));
std::unique_ptr<ue_nr> ue_ptr(new ue_nr(rnti, enb_cc_idx, sched.get(), rrc, rlc, phy, logger));
// Add UE to rnti map
srsran::rwlock_write_guard rw_lock(rwmutex);
@ -244,7 +241,7 @@ int mac_nr::remove_ue(uint16_t rnti)
{
srsran::rwlock_write_guard lock(rwmutex);
if (is_rnti_active_nolock(rnti)) {
sched.ue_rem(rnti);
sched->ue_rem(rnti);
ue_db.erase(rnti);
} else {
logger.error("User rnti=0x%x not found", rnti);
@ -282,13 +279,13 @@ bool mac_nr::is_rnti_active_nolock(uint16_t rnti)
int mac_nr::rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
sched.dl_buffer_state(rnti, lc_id, tx_queue, retx_queue);
sched->dl_buffer_state(rnti, lc_id, tx_queue, retx_queue);
return SRSRAN_SUCCESS;
}
void mac_nr::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr)
{
sched.ul_bsr(rnti, lcid, bsr);
sched->ul_bsr(rnti, lcid, bsr);
}
int mac_nr::slot_indication(const srsran_slot_cfg_t& slot_cfg)
@ -302,11 +299,13 @@ int mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched
logger.set_context((pdsch_slot - TX_ENB_DELAY).to_uint());
// Run Scheduler
// Initiate new slot and sync UE internal states
sched->slot_indication(pdsch_slot);
// Run DL Scheduler for CC
sched_nr_interface::sched_rar_list_t rar_list;
sched_nr_interface::dl_res_t dl_res(rar_list, dl_sched);
int ret = sched.run_slot(pdsch_slot, 0, dl_res);
int ret = sched->get_dl_sched(pdsch_slot, 0, dl_res);
if (ret != SRSRAN_SUCCESS) {
return ret;
}
@ -349,7 +348,7 @@ int mac_nr::get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched
int ret = 0;
slot_point pusch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx};
ret = sched.get_ul_sched(pusch_slot, 0, ul_sched);
ret = sched->get_ul_sched(pusch_slot, 0, ul_sched);
srsran::rwlock_read_guard rw_lock(rwmutex);
for (auto& pusch : ul_sched.pusch) {
@ -383,7 +382,7 @@ bool mac_nr::handle_uci_data(const uint16_t rnti, const srsran_uci_cfg_nr_t& cfg
for (uint32_t i = 0; i < cfg_.ack.count; i++) {
const srsran_harq_ack_bit_t* ack_bit = &cfg_.ack.bits[i];
bool is_ok = (value.ack[i] == 1) and value.valid;
sched.dl_ack_info(rnti, 0, ack_bit->pid, 0, is_ok);
sched->dl_ack_info(rnti, 0, ack_bit->pid, 0, is_ok);
srsran::rwlock_read_guard rw_lock(rwmutex);
if (ue_db.contains(rnti)) {
ue_db[rnti]->metrics_tx(is_ok, 0 /*TODO get size of packet from scheduler somehow*/);
@ -392,7 +391,7 @@ bool mac_nr::handle_uci_data(const uint16_t rnti, const srsran_uci_cfg_nr_t& cfg
// Process SR
if (value.valid and value.sr > 0) {
sched.ul_sr_info(cfg_.pucch.rnti);
sched->ul_sr_info(cfg_.pucch.rnti);
}
// Process CQI
@ -417,7 +416,7 @@ int mac_nr::pusch_info(const srsran_slot_cfg_t& slot_cfg, mac_interface_phy_nr::
return SRSRAN_ERROR;
}
sched.ul_crc_info(rnti, 0, pusch_info.pid, pusch_info.pusch_data.tb[0].crc);
sched->ul_crc_info(rnti, 0, pusch_info.pid, pusch_info.pusch_data.tb[0].crc);
// process only PDUs with CRC=OK
if (pusch_info.pusch_data.tb[0].crc) {

@ -13,7 +13,7 @@
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsenb/hdr/stack/mac/common/mac_metrics.h"
#include "srsenb/hdr/stack/mac/nr/harq_softbuffer.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_worker.h"
#include "srsran/common/thread_pool.h"
@ -66,9 +66,100 @@ private:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr() : logger(&srslog::fetch_basic_logger("MAC-NR")) {}
void sched_nr::feedback_manager::enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev)
{
std::lock_guard<std::mutex> lock(event_mutex);
next_slot_events.push_back(ue_event_t{rnti, std::move(ev)});
}
void sched_nr::feedback_manager::get_pending_events(srsran::deque<ue_event_t>& current_events)
{
current_events.clear();
std::lock_guard<std::mutex> ev_lock(event_mutex);
next_slot_events.swap(current_events);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class sched_nr::ue_metrics_manager
{
public:
ue_metrics_manager(ue_map_t& ues_) : ues(ues_) {}
void stop()
{
std::unique_lock<std::mutex> lock(mutex);
if (not stopped) {
stopped = true;
// requests during sched::stop may not be fulfilled by sched main thread
save_metrics_nolock();
}
}
/// Blocking call that waits for the metrics to be filled
void get_metrics(mac_metrics_t& requested_metrics)
{
std::unique_lock<std::mutex> lock(mutex);
pending_metrics = &requested_metrics;
if (not stopped) {
cvar.wait(lock, [this]() { return pending_metrics == nullptr; });
} else {
save_metrics_nolock();
}
}
sched_nr::~sched_nr() {}
/// called from within the scheduler main thread to save metrics
void save_metrics()
{
{
std::unique_lock<std::mutex> lock(mutex);
save_metrics_nolock();
}
cvar.notify_one();
}
private:
void save_metrics_nolock()
{
if (pending_metrics == nullptr) {
return;
}
for (mac_ue_metrics_t& ue_metric : pending_metrics->ues) {
if (ues.contains(ue_metric.rnti) and ues[ue_metric.rnti]->carriers[0] != nullptr) {
auto& ue_cc = *ues[ue_metric.rnti]->carriers[0];
ue_metric.tx_brate = ue_cc.metrics.tx_brate;
ue_metric.tx_errors = ue_cc.metrics.tx_errors;
ue_metric.tx_pkts = ue_cc.metrics.tx_pkts;
ue_cc.metrics = {};
}
}
pending_metrics = nullptr;
}
ue_map_t& ues;
std::mutex mutex;
std::condition_variable cvar;
mac_metrics_t* pending_metrics = nullptr;
bool stopped = false;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr() : logger(&srslog::fetch_basic_logger("MAC-NR"))
{
metrics_handler.reset(new ue_metrics_manager{ue_db});
}
sched_nr::~sched_nr()
{
stop();
}
void sched_nr::stop()
{
metrics_handler->stop();
}
int sched_nr::config(const sched_args_t& sched_cfg, srsran::const_span<cell_cfg_t> cell_list)
{
@ -81,27 +172,26 @@ int sched_nr::config(const sched_args_t& sched_cfg, srsran::const_span<cell_cfg_
cfg.cells.emplace_back(cc, cell_list[cc], cfg.sched_cfg);
}
pending_results.reset(new ul_sched_result_buffer(cell_list.size()));
// Initiate cell-specific schedulers
cells.reserve(cell_list.size());
for (uint32_t cc = 0; cc < cell_list.size(); ++cc) {
cells.emplace_back(new serv_cell_manager{cfg.cells[cc]});
cc_workers.resize(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
cc_workers[cc].reset(new slot_cc_worker{cfg.cells[cc]});
}
pending_results.reset(new ul_sched_result_buffer(cell_list.size()));
sched_workers.reset(new sched_nr_impl::sched_worker_manager(ue_db, cfg, cells));
return SRSRAN_SUCCESS;
}
void sched_nr::ue_cfg(uint16_t rnti, const ue_cfg_t& uecfg)
{
srsran_assert(assert_ue_cfg_valid(rnti, uecfg) == SRSRAN_SUCCESS, "Invalid UE configuration");
sched_workers->enqueue_event(rnti, [this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); });
pending_feedback.enqueue_event(rnti, [this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); });
}
void sched_nr::ue_rem(uint16_t rnti)
{
sched_workers->enqueue_event(rnti, [this, rnti]() {
pending_feedback.enqueue_event(rnti, [this, rnti]() {
auto ue_it = ue_db.find(rnti);
if (ue_it == ue_db.end()) {
logger->warning("SCHED: ue_rem(rnti) called for inexistent rnti=0x%x", rnti);
@ -130,14 +220,74 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
}
}
// NOTE: there is no parallelism in these operations
void sched_nr::slot_indication(slot_point slot_tx)
{
srsran_assert(worker_count.load(std::memory_order_relaxed) == 0,
"Call of sched slot_indication when previous TTI has not been completed");
// mark the start of slot.
current_slot_tx = slot_tx;
worker_count.store(static_cast<int>(cfg.cells.size()), std::memory_order_relaxed);
// Extract pending feedback events
pending_feedback.get_pending_events(current_slot_events);
// process non-cc specific feedback if pending (e.g. SRs, buffer state updates, UE config) for CA-enabled UEs
// Note: non-CA UEs are updated later in get_dl_sched, to leverage parallelism
for (feedback_manager::ue_event_t& ev : current_slot_events) {
auto ue_it = ue_db.find(ev.rnti);
bool contains_ue = ue_it != ue_db.end();
if (not contains_ue or (ue_it->second->has_ca())) {
ev.callback();
}
}
// prepare CA-enabled UEs internal state for new slot
// Note: non-CA UEs are updated later in get_dl_sched, to leverage parallelism
for (auto& u : ue_db) {
if (u.second->has_ca()) {
u.second->new_slot(slot_tx);
}
}
// If UE metrics were externally requested, store the current UE state
metrics_handler->save_metrics();
}
/// Generate {pdcch_slot,cc} scheduling decision
int sched_nr::run_slot(slot_point slot_dl, uint32_t cc, dl_res_t& result)
int sched_nr::get_dl_sched(slot_point pdsch_tti, uint32_t cc, dl_res_t& result)
{
srsran_assert(pdsch_tti == current_slot_tx, "Unexpected pdsch_tti slot received");
// Copy UL results to intermediate buffer
ul_res_t& ul_res = pending_results->add_ul_result(slot_dl, cc);
ul_res_t& ul_res = pending_results->add_ul_result(pdsch_tti, cc);
// process non-cc specific feedback if pending (e.g. SRs, buffer state updates, UE config) for non-CA UEs
for (feedback_manager::ue_event_t& ev : current_slot_events) {
auto ue_it = ue_db.find(ev.rnti);
bool contains_ue = ue_it != ue_db.end();
if (contains_ue and not ue_it->second->has_ca() and ue_it->second->carriers[cc] != nullptr) {
ev.callback();
}
}
// prepare non-CA UEs internal state for new slot
for (auto& u : ue_db) {
if (not u.second->has_ca() and u.second->carriers[cc] != nullptr) {
u.second->new_slot(current_slot_tx);
}
}
// Generate {slot_idx,cc} result
sched_workers->run_slot(slot_dl, cc, result, ul_res);
// Process pending CC-specific feedback, generate {slot_idx,cc} scheduling decision
cc_workers[cc]->run_slot(pdsch_tti, ue_db, result, ul_res);
// decrement the number of active workers
int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to get_dl_sched(slot, cc)");
if (rem_workers == 0) {
// Last Worker to finish slot
// TODO: Sync sched results with ue_db state
}
return SRSRAN_SUCCESS;
}
@ -158,21 +308,21 @@ int sched_nr::get_ul_sched(slot_point slot_ul, uint32_t cc, ul_res_t& result)
void sched_nr::get_metrics(mac_metrics_t& metrics)
{
sched_workers->get_metrics(metrics);
metrics_handler->get_metrics(metrics);
}
int sched_nr::dl_rach_info(uint32_t cc, const rar_info_t& rar_info)
{
sched_workers->enqueue_cc_event(cc, [this, cc, rar_info]() { cells[cc]->bwps[0].ra.dl_rach_info(rar_info); });
cc_workers[cc]->pending_feedback.enqueue_common_event(
[this, cc, rar_info]() { cc_workers[cc]->bwps[0].ra.dl_rach_info(rar_info); });
return SRSRAN_SUCCESS;
}
void sched_nr::dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack)
{
sched_workers->enqueue_cc_feedback(rnti, cc, [this, pid, tb_idx, ack](ue_carrier& ue_cc) {
cc_workers[cc]->pending_feedback.enqueue_ue_feedback(rnti, [this, pid, tb_idx, ack](ue_carrier& ue_cc) {
int tbs = ue_cc.harq_ent.dl_ack_info(pid, tb_idx, ack);
if (tbs >= 0) {
std::lock_guard<std::mutex> lock(ue_cc.metrics_mutex);
if (ack) {
ue_cc.metrics.tx_brate += tbs;
} else {
@ -187,7 +337,7 @@ void sched_nr::dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb
void sched_nr::ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc)
{
sched_workers->enqueue_cc_feedback(rnti, cc, [this, pid, crc](ue_carrier& ue_cc) {
cc_workers[cc]->pending_feedback.enqueue_ue_feedback(rnti, [this, pid, crc](ue_carrier& ue_cc) {
if (ue_cc.harq_ent.ul_crc_info(pid, crc) < 0) {
logger->warning("SCHED: rnti=0x%x, received CRC for empty pid=%d", ue_cc.rnti, pid);
}
@ -196,7 +346,7 @@ void sched_nr::ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc)
void sched_nr::ul_sr_info(uint16_t rnti)
{
sched_workers->enqueue_event(rnti, [this, rnti]() {
pending_feedback.enqueue_event(rnti, [this, rnti]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->ul_sr_info();
} else {
@ -207,7 +357,7 @@ void sched_nr::ul_sr_info(uint16_t rnti)
void sched_nr::ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr)
{
sched_workers->enqueue_event(rnti, [this, rnti, lcg_id, bsr]() {
pending_feedback.enqueue_event(rnti, [this, rnti, lcg_id, bsr]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->ul_bsr(lcg_id, bsr);
} else {
@ -218,7 +368,7 @@ void sched_nr::ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr)
void sched_nr::dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx)
{
sched_workers->enqueue_event(rnti, [this, rnti, lcid, newtx, retx]() {
pending_feedback.enqueue_event(rnti, [this, rnti, lcid, newtx, retx]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->rlc_buffer_state(lcid, newtx, retx);
} else {

@ -10,7 +10,7 @@
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsran/common/standard_streams.h"
#include "srsran/common/string_helpers.h"
@ -220,22 +220,9 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info)
return SRSRAN_SUCCESS;
}
bwp_ctxt::bwp_ctxt(const bwp_params_t& bwp_cfg) :
bwp_manager::bwp_manager(const bwp_params_t& bwp_cfg) :
cfg(&bwp_cfg), ra(bwp_cfg), grid(bwp_cfg), data_sched(new sched_nr_time_rr())
{}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
serv_cell_manager::serv_cell_manager(const cell_params_t& cell_cfg_) :
cfg(cell_cfg_), logger(srslog::fetch_basic_logger(cell_cfg_.sched_args.logger_name))
{
for (uint32_t bwp_id = 0; bwp_id < cfg.cfg.bwps.size(); ++bwp_id) {
bwps.emplace_back(cell_cfg_.bwps[bwp_id]);
}
// Pre-allocate HARQs in common pool of softbuffers
harq_softbuffer_pool::get_instance().init_pool(cfg.nof_prb());
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -11,7 +11,7 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_grant_allocator.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_helpers.h"
namespace srsenb {
@ -61,8 +61,8 @@ bwp_res_grid::bwp_res_grid(const bwp_params_t& bwp_cfg_) : cfg(&bwp_cfg_)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_) :
logger(bwp_grid_.cfg->logger), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_)
bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_, slot_point pdcch_slot_, slot_ue_map_t& ues_) :
logger(bwp_grid_.cfg->logger), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_), pdcch_slot(pdcch_slot_), slot_ues(&ues_)
{}
alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, uint32_t si_ntx, const prb_interval& prbs)

@ -18,21 +18,17 @@
namespace srsenb {
namespace sched_nr_impl {
slot_cc_worker::slot_cc_worker(serv_cell_manager& cc_sched) :
cell(cc_sched),
cfg(cc_sched.cfg),
bwp_alloc(cc_sched.bwps[0].grid),
logger(srslog::fetch_basic_logger(cc_sched.cfg.sched_args.logger_name))
carrier_feedback_manager::carrier_feedback_manager(const cell_params_t& cell_cfg) :
cfg(cell_cfg), logger(srslog::fetch_basic_logger(cell_cfg.sched_args.logger_name))
{}
void slot_cc_worker::enqueue_cc_event(srsran::move_callback<void()> ev)
void carrier_feedback_manager::enqueue_common_event(srsran::move_callback<void()> ev)
{
std::lock_guard<std::mutex> lock(feedback_mutex);
pending_events.emplace_back();
pending_events.back() = std::move(ev);
pending_events.emplace_back(std::move(ev));
}
void slot_cc_worker::enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk)
void carrier_feedback_manager::enqueue_ue_feedback(uint16_t rnti, feedback_callback_t fdbk)
{
std::lock_guard<std::mutex> lock(feedback_mutex);
pending_feedback.emplace_back();
@ -40,7 +36,7 @@ void slot_cc_worker::enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk
pending_feedback.back().fdbk = std::move(fdbk);
}
void slot_cc_worker::run_feedback(ue_map_t& ue_db)
void carrier_feedback_manager::run(ue_map_t& ue_db)
{
{
std::lock_guard<std::mutex> lock(feedback_mutex);
@ -63,14 +59,46 @@ void slot_cc_worker::run_feedback(ue_map_t& ue_db)
tmp_feedback_to_run.clear();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cc_worker::cc_worker(const cell_params_t& params) :
cfg(params), logger(srslog::fetch_basic_logger(params.sched_args.logger_name)), pending_feedback(params)
{
for (uint32_t bwp_id = 0; bwp_id < cfg.cfg.bwps.size(); ++bwp_id) {
bwps.emplace_back(cfg.bwps[bwp_id]);
}
// Pre-allocate HARQs in common pool of softbuffers
harq_softbuffer_pool::get_instance().init_pool(cfg.nof_prb());
}
bool cc_worker::save_sched_result(dl_sched_res_t& dl_res, ul_sched_t& ul_res, slot_point slot_tx)
{
auto& bwp_slot = bwps[0].grid[slot_tx];
dl_res.dl_sched.pdcch_dl = bwp_slot.dl_pdcchs;
dl_res.dl_sched.pdcch_ul = bwp_slot.ul_pdcchs;
dl_res.dl_sched.pdsch = bwp_slot.pdschs;
dl_res.rar = bwp_slot.rar;
dl_res.dl_sched.ssb = bwp_slot.ssb;
dl_res.dl_sched.nzp_csi_rs = bwp_slot.nzp_csi_rs;
ul_res.pusch = bwp_slot.puschs;
ul_res.pucch = bwp_slot.pucch;
// clear up BWP slot
bwp_slot.reset();
return true;
}
/// Called within a locked context, to generate {slot, cc} scheduling decision
void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
void cc_worker::run_slot(slot_point pdcch_slot, ue_map_t& ue_db, dl_sched_res_t& dl_res, ul_sched_t& ul_res)
{
srsran_assert(not running(), "scheduler worker::start() called for active worker");
slot_rx = pdcch_slot - TX_ENB_DELAY;
// Create an BWP allocator object that will passed along to RA, SI, Data schedulers
bwp_slot_allocator bwp_alloc{bwps[0].grid, pdcch_slot, slot_ues};
// Run pending cell feedback (process feedback)
run_feedback(ue_db);
pending_feedback.run(ue_db);
// Reserve UEs for this worker slot (select candidate UEs)
for (auto& ue_pair : ue_db) {
@ -93,49 +121,52 @@ void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
// UE acquired successfully for scheduling in this {slot, cc}
}
// Create an BWP allocator object that will passed along to RA, SI, Data schedulers
bwp_alloc.new_slot(slot_rx + TX_ENB_DELAY, slot_ues);
// Log UEs state for slot
log_sched_slot_ues(logger, bwp_alloc.get_pdcch_tti(), cfg.cc, slot_ues);
log_sched_slot_ues(logger, pdcch_slot, cfg.cc, slot_ues);
// Allocate cell DL signalling
bwp_slot_grid& bwp_pdcch_slot = bwps[0].grid[pdcch_slot];
sched_dl_signalling(*bwps[0].cfg, pdcch_slot, bwp_pdcch_slot.ssb, bwp_pdcch_slot.nzp_csi_rs);
// Allocate pending RARs
cell.bwps[0].ra.run_slot(bwp_alloc);
bwps[0].ra.run_slot(bwp_alloc);
// TODO: Prioritize PDCCH scheduling for DL and UL data in a Round-Robin fashion
alloc_dl_ues();
alloc_ul_ues();
alloc_dl_ues(bwp_alloc);
alloc_ul_ues(bwp_alloc);
// Post-processing of scheduling decisions
postprocess_decisions();
postprocess_decisions(bwp_alloc);
// Log CC scheduler result
log_sched_bwp_result(logger, bwp_alloc.get_pdcch_tti(), cell.bwps[0].grid, slot_ues);
log_sched_bwp_result(logger, bwp_alloc.get_pdcch_tti(), bwps[0].grid, slot_ues);
// Post-process and copy results to intermediate buffer
save_sched_result(dl_res, ul_res, pdcch_slot);
// releases UE resources
slot_ues.clear();
slot_rx = {};
}
void slot_cc_worker::alloc_dl_ues()
void cc_worker::alloc_dl_ues(bwp_slot_allocator& bwp_alloc)
{
if (not cfg.sched_args.pdsch_enabled) {
return;
}
cell.bwps[0].data_sched->sched_dl_users(slot_ues, bwp_alloc);
bwps[0].data_sched->sched_dl_users(slot_ues, bwp_alloc);
}
void slot_cc_worker::alloc_ul_ues()
void cc_worker::alloc_ul_ues(bwp_slot_allocator& bwp_alloc)
{
if (not cfg.sched_args.pusch_enabled) {
return;
}
cell.bwps[0].data_sched->sched_ul_users(slot_ues, bwp_alloc);
bwps[0].data_sched->sched_ul_users(slot_ues, bwp_alloc);
}
void slot_cc_worker::postprocess_decisions()
void cc_worker::postprocess_decisions(bwp_slot_allocator& bwp_alloc)
{
auto& bwp_slot = cell.bwps[0].grid[bwp_alloc.get_pdcch_tti()];
auto& bwp_slot = bwps[0].grid[bwp_alloc.get_pdcch_tti()];
srsran_slot_cfg_t slot_cfg{};
slot_cfg.idx = bwp_alloc.get_pdcch_tti().to_uint();
@ -218,178 +249,5 @@ void slot_cc_worker::postprocess_decisions()
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_,
const sched_params& cfg_,
srsran::span<std::unique_ptr<serv_cell_manager> > cells_) :
cfg(cfg_), ue_db(ue_db_), logger(srslog::fetch_basic_logger(cfg_.sched_cfg.logger_name)), cells(cells_)
{
cc_worker_list.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
cc_worker_list.emplace_back(new cc_context{*cells[cc]});
}
}
sched_worker_manager::~sched_worker_manager() = default;
void sched_worker_manager::enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev)
{
std::lock_guard<std::mutex> lock(event_mutex);
next_slot_events.push_back(ue_event_t{rnti, std::move(ev)});
}
void sched_worker_manager::enqueue_cc_event(uint32_t cc, srsran::move_callback<void()> ev)
{
cc_worker_list[cc]->worker.enqueue_cc_event(std::move(ev));
}
/**
* Update UEs state that is non-CC specific (e.g. SRs, buffer status, UE configuration)
* @param slot_tx
* @param locked_context to update only UEs with CA enabled or not
*/
void sched_worker_manager::update_ue_db(slot_point slot_tx, bool locked_context)
{
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config)
for (ue_event_t& ev : slot_events) {
if ((locked_context and not ue_db.contains(ev.rnti)) or
(ue_db.contains(ev.rnti) and ue_db[ev.rnti]->has_ca() == locked_context)) {
ev.callback();
}
}
// prepare UEs internal state for new slot
for (auto& u : ue_db) {
if (u.second->has_ca() == locked_context) {
u.second->new_slot(slot_tx);
}
}
}
void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res)
{
// Fill DL signalling messages that do not depend on UEs state
serv_cell_manager& serv_cell = *cells[cc];
bwp_slot_grid& bwp_slot = serv_cell.bwps[0].grid[slot_tx];
sched_dl_signalling(*serv_cell.bwps[0].cfg, slot_tx, bwp_slot.ssb, bwp_slot.nzp_csi_rs);
// Synchronization point between CC workers, to avoid concurrency in UE state access
srsran::bounded_vector<std::condition_variable*, SRSRAN_MAX_CARRIERS> waiting_cvars;
{
std::unique_lock<std::mutex> lock(slot_mutex);
while (current_slot.valid() and current_slot != slot_tx) {
// Wait for previous slot to finish
cc_worker_list[cc]->waiting++;
cc_worker_list[cc]->cvar.wait(lock);
cc_worker_list[cc]->waiting--;
}
if (not current_slot.valid()) {
/* First Worker to start slot */
// process non-cc specific feedback if pending for UEs with CA
// NOTE: there is no parallelism in these operations
slot_events.clear();
{
std::lock_guard<std::mutex> ev_lock(event_mutex);
next_slot_events.swap(slot_events);
}
update_ue_db(slot_tx, true);
// mark the start of slot. awake remaining workers if locking on the mutex
current_slot = slot_tx;
worker_count.store(static_cast<int>(cc_worker_list.size()), std::memory_order_relaxed);
for (auto& w : cc_worker_list) {
if (w->waiting > 0) {
waiting_cvars.push_back(&w->cvar);
}
}
lock.unlock();
for (auto& w : waiting_cvars) {
w->notify_one();
}
waiting_cvars.clear();
}
}
/* Parallel Region */
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs without CA
update_ue_db(slot_tx, false);
// process pending feedback, generate {slot, cc} scheduling decision
cc_worker_list[cc]->worker.run(slot_tx, ue_db);
// decrement the number of active workers
int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to run_slot(slot, cc)");
if (rem_workers == 0) {
/* Last Worker to finish slot */
// Signal the release of slot if it is the last worker that finished its own generation
std::unique_lock<std::mutex> lock(slot_mutex);
current_slot = {};
// All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state
for (auto& c : cc_worker_list) {
if (c->waiting > 0) {
waiting_cvars.push_back(&c->cvar);
}
}
// Awake waiting workers
lock.unlock();
for (auto& c : waiting_cvars) {
c->notify_one();
}
}
// Post-process and copy results to intermediate buffer
save_sched_result(slot_tx, cc, dl_res, ul_res);
}
void sched_worker_manager::get_metrics(mac_metrics_t& metrics)
{
std::unique_lock<std::mutex> lock(slot_mutex);
get_metrics_nolocking(metrics);
}
bool sched_worker_manager::save_sched_result(slot_point pdcch_slot,
uint32_t cc,
dl_sched_res_t& dl_res,
ul_sched_t& ul_res)
{
// NOTE: Unlocked region
auto& bwp_slot = cells[cc]->bwps[0].grid[pdcch_slot];
dl_res.dl_sched.pdcch_dl = bwp_slot.dl_pdcchs;
dl_res.dl_sched.pdcch_ul = bwp_slot.ul_pdcchs;
dl_res.dl_sched.pdsch = bwp_slot.pdschs;
dl_res.rar = bwp_slot.rar;
dl_res.dl_sched.ssb = bwp_slot.ssb;
dl_res.dl_sched.nzp_csi_rs = bwp_slot.nzp_csi_rs;
ul_res.pusch = bwp_slot.puschs;
ul_res.pucch = bwp_slot.pucch;
// clear up BWP slot
bwp_slot.reset();
return true;
}
void sched_worker_manager::get_metrics_nolocking(mac_metrics_t& metrics)
{
for (mac_ue_metrics_t& ue_metric : metrics.ues) {
if (ue_db.contains(ue_metric.rnti) and ue_db[ue_metric.rnti]->carriers[0] != nullptr) {
auto& ue_cc = *ue_db[ue_metric.rnti]->carriers[0];
std::lock_guard<std::mutex> lock(ue_cc.metrics_mutex);
ue_metric.tx_brate = ue_cc.metrics.tx_brate;
ue_metric.tx_errors = ue_cc.metrics.tx_errors;
ue_metric.tx_pkts = ue_cc.metrics.tx_pkts;
ue_cc.metrics = {};
}
}
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -12,7 +12,7 @@
#include "sched_nr_cfg_generators.h"
#include "sched_nr_common_test.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsran/common/test_common.h"
#include "srsran/support/srsran_test.h"
#include <random>
@ -44,7 +44,6 @@ void test_single_prach()
TESTASSERT(rasched.empty());
std::unique_ptr<bwp_res_grid> res_grid(new bwp_res_grid{bwpparams});
bwp_slot_allocator alloc(*res_grid);
// Create UE
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(1);
@ -54,7 +53,7 @@ void test_single_prach()
slot_point prach_slot{0, std::uniform_int_distribution<uint32_t>{TX_ENB_DELAY, 20}(rgen)};
const bwp_slot_grid* result = nullptr;
auto run_slot = [&alloc, &rasched, &pdcch_slot, &slot_ues, &u]() -> const bwp_slot_grid* {
auto run_slot = [&res_grid, &rasched, &pdcch_slot, &slot_ues, &u]() -> const bwp_slot_grid* {
mac_logger.set_context(pdcch_slot.to_uint());
u.new_slot(pdcch_slot);
u.carriers[0]->new_slot(pdcch_slot);
@ -63,7 +62,7 @@ void test_single_prach()
if (not sfu.empty()) {
slot_ues.insert(rnti, std::move(sfu));
}
alloc.new_slot(pdcch_slot, slot_ues);
bwp_slot_allocator alloc(*res_grid, pdcch_slot, slot_ues);
rasched.run_slot(alloc);

@ -192,6 +192,8 @@ void sched_nr_base_tester::run_slot(slot_point slot_tx)
slot_ctxt = get_enb_ctxt();
slot_start_tp = std::chrono::steady_clock::now();
sched_ptr->slot_indication(current_slot_tx);
// Generate CC result (parallel or serialized)
uint32_t worker_idx = 0;
for (uint32_t cc = 0; cc < cell_params.size(); ++cc) {
@ -208,7 +210,7 @@ void sched_nr_base_tester::generate_cc_result(uint32_t cc)
{
// Run scheduler
sched_nr_interface::dl_res_t dl_sched(cc_results[cc].rar, cc_results[cc].dl_res);
sched_ptr->run_slot(current_slot_tx, cc, dl_sched);
sched_ptr->get_dl_sched(current_slot_tx, cc, dl_sched);
cc_results[cc].rar = dl_sched.rar;
sched_ptr->get_ul_sched(current_slot_tx, cc, cc_results[cc].ul_res);
auto tp2 = std::chrono::steady_clock::now();

@ -454,6 +454,13 @@ public:
~gnb_dummy_stack() = default;
void stop()
{
if (not use_dummy_mac) {
mac->stop();
}
}
bool is_valid() const { return valid; }
int slot_indication(const srsran_slot_cfg_t& slot_cfg) override { return 0; }

@ -132,6 +132,7 @@ public:
gnb_phy_com.stop();
gnb_phy.stop();
ue_phy.stop();
gnb_stack.stop();
}
~test_bench() = default;

Loading…
Cancel
Save