|
|
|
@ -16,24 +16,57 @@
|
|
|
|
|
namespace srsenb {
|
|
|
|
|
namespace sched_nr_impl {
|
|
|
|
|
|
|
|
|
|
slot_cc_worker::slot_cc_worker(serv_cell_ctxt& cc_sched) :
|
|
|
|
|
cell(cc_sched), cfg(*cc_sched.cfg), bwp_alloc(cc_sched.bwps[0].grid), logger(srslog::fetch_basic_logger("MAC"))
|
|
|
|
|
slot_cc_worker::slot_cc_worker(serv_cell_manager& cc_sched) :
|
|
|
|
|
cell(cc_sched), cfg(cc_sched.cfg), bwp_alloc(cc_sched.bwps[0].grid), logger(srslog::fetch_basic_logger("MAC"))
|
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
void slot_cc_worker::enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(feedback_mutex);
|
|
|
|
|
pending_feedback.emplace_back();
|
|
|
|
|
pending_feedback.back().rnti = rnti;
|
|
|
|
|
pending_feedback.back().fdbk = std::move(fdbk);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void slot_cc_worker::run_feedback(ue_map_t& ue_db)
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(feedback_mutex);
|
|
|
|
|
tmp_feedback_to_run.swap(pending_feedback);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (feedback_t& f : tmp_feedback_to_run) {
|
|
|
|
|
if (ue_db.contains(f.rnti) and ue_db[f.rnti]->carriers[cfg.cc] != nullptr) {
|
|
|
|
|
f.fdbk(*ue_db[f.rnti]->carriers[cfg.cc]);
|
|
|
|
|
} else {
|
|
|
|
|
logger.warning("SCHED: feedback received for invalid rnti=0x%x, cc=%d", f.rnti, cfg.cc);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
tmp_feedback_to_run.clear();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Called at the beginning of TTI in a locked context, to reserve available UE resources
|
|
|
|
|
void slot_cc_worker::start(tti_point tti_rx_, ue_map_t& ue_db)
|
|
|
|
|
void slot_cc_worker::start(tti_point pdcch_tti, ue_map_t& ue_db)
|
|
|
|
|
{
|
|
|
|
|
srsran_assert(not running(), "scheduler worker::start() called for active worker");
|
|
|
|
|
tti_rx = tti_rx_;
|
|
|
|
|
tti_rx = pdcch_tti - TX_ENB_DELAY;
|
|
|
|
|
|
|
|
|
|
// Run pending cell feedback
|
|
|
|
|
run_feedback(ue_db);
|
|
|
|
|
|
|
|
|
|
// Try reserve UE cells for this worker
|
|
|
|
|
// Reserve UEs for this worker slot
|
|
|
|
|
for (auto& ue_pair : ue_db) {
|
|
|
|
|
uint16_t rnti = ue_pair.first;
|
|
|
|
|
ue& u = *ue_pair.second;
|
|
|
|
|
if (u.carriers[cfg.cc] == nullptr) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
u.carriers[cfg.cc]->new_tti(pdcch_tti, u.cfg());
|
|
|
|
|
|
|
|
|
|
slot_ues.insert(rnti, u.try_reserve(tti_rx, cfg.cc));
|
|
|
|
|
slot_ues.insert(rnti, u.try_reserve(pdcch_tti, cfg.cc));
|
|
|
|
|
if (slot_ues[rnti].empty()) {
|
|
|
|
|
// Failed to synchronize because UE is being used by another worker
|
|
|
|
|
// Failed to generate slot UE because UE has no conditions for DL/UL tx
|
|
|
|
|
slot_ues.erase(rnti);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
@ -56,18 +89,17 @@ void slot_cc_worker::run()
|
|
|
|
|
|
|
|
|
|
// Log CC scheduler result
|
|
|
|
|
log_result();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void slot_cc_worker::end_tti()
|
|
|
|
|
{
|
|
|
|
|
srsran_assert(running(), "scheduler worker::end() called for non-active worker");
|
|
|
|
|
|
|
|
|
|
// releases UE resources
|
|
|
|
|
slot_ues.clear();
|
|
|
|
|
|
|
|
|
|
tti_rx = {};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void slot_cc_worker::finish()
|
|
|
|
|
{
|
|
|
|
|
// synchronize results
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void slot_cc_worker::alloc_dl_ues()
|
|
|
|
|
{
|
|
|
|
|
if (slot_ues.empty()) {
|
|
|
|
@ -108,7 +140,7 @@ void slot_cc_worker::log_result() const
|
|
|
|
|
fmt::format_to(fmtbuf,
|
|
|
|
|
"SCHED: DL {}, cc={}, rnti=0x{:x}, pid={}, nrtx={}, dai={}, tti_pdsch={}, tti_ack={}",
|
|
|
|
|
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
|
|
|
|
|
cell.cfg->cc,
|
|
|
|
|
cell.cfg.cc,
|
|
|
|
|
ue.rnti,
|
|
|
|
|
ue.h_dl->pid,
|
|
|
|
|
ue.h_dl->nof_retx(),
|
|
|
|
@ -116,7 +148,7 @@ void slot_cc_worker::log_result() const
|
|
|
|
|
ue.pdsch_tti,
|
|
|
|
|
ue.uci_tti);
|
|
|
|
|
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_ra) {
|
|
|
|
|
fmt::format_to(fmtbuf, "SCHED: DL RAR, cc={}", cell.cfg->cc);
|
|
|
|
|
fmt::format_to(fmtbuf, "SCHED: DL RAR, cc={}", cell.cfg.cc);
|
|
|
|
|
} else {
|
|
|
|
|
fmt::format_to(fmtbuf, "SCHED: unknown format");
|
|
|
|
|
}
|
|
|
|
@ -127,105 +159,112 @@ void slot_cc_worker::log_result() const
|
|
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_) :
|
|
|
|
|
cfg(cfg_), ue_db(ue_db_), logger(srslog::fetch_basic_logger("MAC"))
|
|
|
|
|
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_,
|
|
|
|
|
const sched_params& cfg_,
|
|
|
|
|
srsran::span<std::unique_ptr<serv_cell_manager> > cells_) :
|
|
|
|
|
cfg(cfg_), ue_db(ue_db_), logger(srslog::fetch_basic_logger("MAC")), cells(cells_)
|
|
|
|
|
{
|
|
|
|
|
cc_worker_list.reserve(cfg.cells.size());
|
|
|
|
|
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
|
|
|
|
|
cell_grid_list.emplace_back(cfg.cells[cc]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Note: For now, we only allow parallelism at the sector level
|
|
|
|
|
slot_worker_ctxts.resize(cfg.sched_cfg.nof_concurrent_subframes);
|
|
|
|
|
for (size_t i = 0; i < cfg.sched_cfg.nof_concurrent_subframes; ++i) {
|
|
|
|
|
slot_worker_ctxts[i].reset(new slot_worker_ctxt());
|
|
|
|
|
slot_worker_ctxts[i]->workers.reserve(cfg.cells.size());
|
|
|
|
|
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
|
|
|
|
|
slot_worker_ctxts[i]->workers.emplace_back(cell_grid_list[cc]);
|
|
|
|
|
}
|
|
|
|
|
cc_worker_list.emplace_back(new cc_context{*cells[cc]});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sched_worker_manager::~sched_worker_manager() = default;
|
|
|
|
|
|
|
|
|
|
sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx)
|
|
|
|
|
void sched_worker_manager::enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev)
|
|
|
|
|
{
|
|
|
|
|
return *slot_worker_ctxts[tti_rx.to_uint() % slot_worker_ctxts.size()];
|
|
|
|
|
std::lock_guard<std::mutex> lock(event_mutex);
|
|
|
|
|
next_slot_events.push_back(ue_event_t{rnti, std::move(ev)});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_worker_manager::start_slot(tti_point tti_rx, srsran::move_callback<void()> process_feedback)
|
|
|
|
|
void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc)
|
|
|
|
|
{
|
|
|
|
|
auto& sf_worker_ctxt = get_sf(tti_rx);
|
|
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> lock(sf_worker_ctxt.slot_mutex);
|
|
|
|
|
while ((sf_worker_ctxt.tti_rx.is_valid() and sf_worker_ctxt.tti_rx != tti_rx)) {
|
|
|
|
|
// wait for previous slot to finish
|
|
|
|
|
sf_worker_ctxt.nof_workers_waiting++;
|
|
|
|
|
sf_worker_ctxt.cvar.wait(lock);
|
|
|
|
|
sf_worker_ctxt.nof_workers_waiting--;
|
|
|
|
|
}
|
|
|
|
|
if (sf_worker_ctxt.tti_rx == tti_rx) {
|
|
|
|
|
// another worker with the same slot idx already started
|
|
|
|
|
return;
|
|
|
|
|
srsran::bounded_vector<std::condition_variable*, SRSRAN_MAX_CARRIERS> waiting_cvars;
|
|
|
|
|
{
|
|
|
|
|
std::unique_lock<std::mutex> lock(slot_mutex);
|
|
|
|
|
while (current_tti.is_valid() and current_tti != tti_tx) {
|
|
|
|
|
// Wait for previous slot to finish
|
|
|
|
|
cc_worker_list[cc]->waiting = true;
|
|
|
|
|
cc_worker_list[cc]->cvar.wait(lock);
|
|
|
|
|
cc_worker_list[cc]->waiting = false;
|
|
|
|
|
}
|
|
|
|
|
if (not current_tti.is_valid()) {
|
|
|
|
|
/* First Worker to start slot */
|
|
|
|
|
|
|
|
|
|
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs with CA
|
|
|
|
|
// NOTE: there is no parallelism in these operations
|
|
|
|
|
slot_events.clear();
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> db_lock(ue_db_mutex);
|
|
|
|
|
|
|
|
|
|
process_feedback();
|
|
|
|
|
std::lock_guard<std::mutex> ev_lock(event_mutex);
|
|
|
|
|
next_slot_events.swap(slot_events);
|
|
|
|
|
}
|
|
|
|
|
for (ue_event_t& ev : slot_events) {
|
|
|
|
|
if (not ue_db.contains(ev.rnti) or ue_db[ev.rnti]->has_ca()) {
|
|
|
|
|
ev.callback();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) {
|
|
|
|
|
sf_worker_ctxt.workers[cc].start(tti_rx, ue_db);
|
|
|
|
|
// mark the start of slot. awake remaining workers if locking on the mutex
|
|
|
|
|
current_tti = tti_tx;
|
|
|
|
|
worker_count.store(static_cast<int>(cc_worker_list.size()), std::memory_order_relaxed);
|
|
|
|
|
for (auto& w : cc_worker_list) {
|
|
|
|
|
if (w->waiting) {
|
|
|
|
|
waiting_cvars.push_back(&w->cvar);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
lock.unlock();
|
|
|
|
|
for (auto& w : waiting_cvars) {
|
|
|
|
|
w->notify_one();
|
|
|
|
|
}
|
|
|
|
|
waiting_cvars.clear();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sf_worker_ctxt.tti_rx = tti_rx;
|
|
|
|
|
sf_worker_ctxt.worker_count.store(static_cast<int>(sf_worker_ctxt.workers.size()), std::memory_order_relaxed);
|
|
|
|
|
if (sf_worker_ctxt.nof_workers_waiting > 0) {
|
|
|
|
|
sf_worker_ctxt.cvar.notify_all();
|
|
|
|
|
/* Parallel Region */
|
|
|
|
|
|
|
|
|
|
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs without CA
|
|
|
|
|
for (ue_event_t& ev : slot_events) {
|
|
|
|
|
if (ue_db.contains(ev.rnti) and not ue_db[ev.rnti]->has_ca() and ue_db[ev.rnti]->pcell_cc() == cc) {
|
|
|
|
|
ev.callback();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sched_worker_manager::run_slot(tti_point tti_rx_, uint32_t cc)
|
|
|
|
|
{
|
|
|
|
|
auto& sf_worker_ctxt = get_sf(tti_rx_);
|
|
|
|
|
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
|
|
|
|
|
// process pending feedback and pre-cache UE state for slot decision
|
|
|
|
|
cc_worker_list[cc]->worker.start(tti_tx, ue_db);
|
|
|
|
|
|
|
|
|
|
// Get {tti, cc} scheduling decision
|
|
|
|
|
sf_worker_ctxt.workers[cc].run();
|
|
|
|
|
cc_worker_list[cc]->worker.run();
|
|
|
|
|
|
|
|
|
|
// decrement the number of active workers
|
|
|
|
|
int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1;
|
|
|
|
|
int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1;
|
|
|
|
|
srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)");
|
|
|
|
|
if (rem_workers == 0) {
|
|
|
|
|
/* Last Worker to finish slot */
|
|
|
|
|
|
|
|
|
|
return rem_workers == 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_worker_manager::release_slot(tti_point tti_rx_)
|
|
|
|
|
{
|
|
|
|
|
auto& sf_worker_ctxt = get_sf(tti_rx_);
|
|
|
|
|
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
|
|
|
|
|
srsran_assert(sf_worker_ctxt.worker_count == 0, "invalid number of calls to run_tti(tti, cc)");
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(ue_db_mutex);
|
|
|
|
|
// Signal the release of slot if it is the last worker that finished its own generation
|
|
|
|
|
std::unique_lock<std::mutex> lock(slot_mutex);
|
|
|
|
|
current_tti = {};
|
|
|
|
|
|
|
|
|
|
// All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state
|
|
|
|
|
for (slot_cc_worker& worker : sf_worker_ctxt.workers) {
|
|
|
|
|
worker.end_tti();
|
|
|
|
|
for (auto& c : cc_worker_list) {
|
|
|
|
|
c->worker.finish();
|
|
|
|
|
if (c->waiting) {
|
|
|
|
|
waiting_cvars.push_back(&c->cvar);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> lock(sf_worker_ctxt.slot_mutex);
|
|
|
|
|
sf_worker_ctxt.tti_rx = {};
|
|
|
|
|
if (sf_worker_ctxt.nof_workers_waiting > 0) {
|
|
|
|
|
// Awake waiting workers
|
|
|
|
|
lock.unlock();
|
|
|
|
|
sf_worker_ctxt.cvar.notify_one();
|
|
|
|
|
for (auto& c : waiting_cvars) {
|
|
|
|
|
c->notify_one();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sched_worker_manager::save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res)
|
|
|
|
|
{
|
|
|
|
|
auto& bwp_slot = cell_grid_list[cc].bwps[0].grid[pdcch_tti];
|
|
|
|
|
auto& bwp_slot = cells[cc]->bwps[0].grid[pdcch_tti];
|
|
|
|
|
|
|
|
|
|
dl_res.pdcch_dl = bwp_slot.dl_pdcchs;
|
|
|
|
|
dl_res.pdcch_ul = bwp_slot.ul_pdcchs;
|
|
|
|
|