sched,nr: changed scheduler nr api to not require slot_indication function

master
Francisco Paisana 3 years ago
parent 23afc66a86
commit 4f0b954cde

@ -30,6 +30,7 @@ class sched_worker_manager;
} }
class ue_event_manager; class ue_event_manager;
class sched_result_manager;
class sched_nr final : public sched_nr_interface class sched_nr final : public sched_nr_interface
{ {
@ -39,13 +40,14 @@ public:
int cell_cfg(srsran::const_span<cell_cfg_t> cell_list) override; int cell_cfg(srsran::const_span<cell_cfg_t> cell_list) override;
void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override; void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override;
void slot_indication(tti_point tti_rx) override;
int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& tti_req) override;
void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override; void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override;
void ul_sr_info(tti_point tti_rx, uint16_t rnti) override; void ul_sr_info(tti_point tti_rx, uint16_t rnti) override;
int get_dl_sched(tti_point pdsch_tti, uint32_t cc, dl_sched_t& result) override;
int get_ul_sched(tti_point pdcch_tti, uint32_t cc, ul_sched_t& result) override;
private: private:
int generate_slot_result(tti_point pdcch_tti, uint32_t cc);
void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg); void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
// args // args
@ -59,8 +61,11 @@ private:
std::mutex ue_db_mutex; std::mutex ue_db_mutex;
ue_map_t ue_db; ue_map_t ue_db;
// management of PHY UE feedback // management of UE feedback
std::unique_ptr<ue_event_manager> pending_events; std::unique_ptr<ue_event_manager> pending_events;
// management of Sched Result buffering
std::unique_ptr<sched_result_manager> pending_results;
}; };
} // namespace srsenb } // namespace srsenb

@ -84,23 +84,8 @@ public:
///// Sched Result ///// ///// Sched Result /////
using pdcch_dl_t = mac_interface_phy_nr::pdcch_dl_t; using dl_sched_t = mac_interface_phy_nr::dl_sched_t;
using pdcch_ul_t = mac_interface_phy_nr::pdcch_ul_t; using ul_sched_t = mac_interface_phy_nr::ul_sched_t;
struct pdsch_grant {
srsran_dci_dl_nr_t dci = {};
};
using pdsch_list_t = srsran::bounded_vector<pdsch_grant, MAX_GRANTS>;
struct dl_tti_request_t {
tti_point pdsch_tti;
pdsch_list_t pdschs;
};
struct pusch_grant {
srsran_dci_ul_nr_t dci = {};
};
using pusch_list_t = srsran::bounded_vector<pusch_grant, MAX_GRANTS>;
struct pucch_resource_grant { struct pucch_resource_grant {
uint16_t rnti; uint16_t rnti;
@ -112,22 +97,11 @@ public:
}; };
using pucch_list_t = srsran::bounded_vector<pucch_grant, MAX_GRANTS>; using pucch_list_t = srsran::bounded_vector<pucch_grant, MAX_GRANTS>;
struct ul_tti_request_t { virtual ~sched_nr_interface() = default;
tti_point pusch_tti; virtual int cell_cfg(srsran::const_span<sched_nr_interface::cell_cfg_t> ue_cfg) = 0;
pusch_list_t puschs; virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
pucch_list_t pucchs; virtual int get_dl_sched(tti_point tti_rx, uint32_t cc, dl_sched_t& result) = 0;
}; virtual int get_ul_sched(tti_point tti_rx, uint32_t cc, ul_sched_t& result) = 0;
struct tti_request_t {
dl_tti_request_t dl_res;
ul_tti_request_t ul_res;
};
virtual ~sched_nr_interface() = default;
virtual int cell_cfg(srsran::const_span<sched_nr_interface::cell_cfg_t> ue_cfg) = 0;
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
virtual void slot_indication(tti_point tti_rx) = 0;
virtual int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& result) = 0;
virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0; virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;
virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; virtual void ul_sr_info(tti_point, uint16_t rnti) = 0;

@ -29,11 +29,11 @@ enum class pdcch_grant_type_t { sib, dl_data, ul_data };
class slot_ue; class slot_ue;
using bwp_cfg_t = sched_nr_interface::bwp_cfg_t; using bwp_cfg_t = sched_nr_interface::bwp_cfg_t;
using pdsch_list_t = sched_nr_interface::pdsch_list_t; using pdcch_dl_t = mac_interface_phy_nr::pdcch_dl_t;
using pdsch_grant = sched_nr_interface::pdsch_grant; using pdcch_ul_t = mac_interface_phy_nr::pdcch_ul_t;
using pusch_list_t = sched_nr_interface::pusch_list_t; using pdcch_dl_list_t = srsran::bounded_vector<pdcch_dl_t, MAX_GRANTS>;
using pusch_grant = sched_nr_interface::pusch_grant; using pdcch_ul_list_t = srsran::bounded_vector<pdcch_ul_t, MAX_GRANTS>;
class coreset_region class coreset_region
{ {
@ -41,8 +41,8 @@ public:
coreset_region(const bwp_cfg_t& bwp_cfg_, coreset_region(const bwp_cfg_t& bwp_cfg_,
uint32_t coreset_id_, uint32_t coreset_id_,
uint32_t slot_idx, uint32_t slot_idx,
pdsch_list_t& pdcch_dl_list, pdcch_dl_list_t& pdcch_dl_list,
pusch_list_t& pdcch_ul_list); pdcch_ul_list_t& pdcch_ul_list);
void reset(); void reset();
/** /**
@ -76,8 +76,8 @@ private:
slot_ue* ue; slot_ue* ue;
}; };
srsran::bounded_vector<alloc_record, MAX_GRANTS> dci_list; srsran::bounded_vector<alloc_record, MAX_GRANTS> dci_list;
pdsch_list_t& pdcch_dl_list; pdcch_dl_list_t& pdcch_dl_list;
pusch_list_t& pdcch_ul_list; pdcch_ul_list_t& pdcch_ul_list;
// DFS decision tree of PDCCH grants // DFS decision tree of PDCCH grants
struct tree_node { struct tree_node {

@ -25,11 +25,6 @@ namespace sched_nr_impl {
using pdsch_bitmap = srsran::bounded_bitset<25, true>; using pdsch_bitmap = srsran::bounded_bitset<25, true>;
using pusch_bitmap = srsran::bounded_bitset<25, true>; using pusch_bitmap = srsran::bounded_bitset<25, true>;
using pdsch_t = sched_nr_interface::pdsch_grant;
using pdsch_list_t = sched_nr_interface::pdsch_list_t;
using pusch_list = sched_nr_interface::pusch_list_t;
const static size_t MAX_CORESET_PER_BWP = 3; const static size_t MAX_CORESET_PER_BWP = 3;
using slot_coreset_list = srsran::bounded_vector<coreset_region, MAX_CORESET_PER_BWP>; using slot_coreset_list = srsran::bounded_vector<coreset_region, MAX_CORESET_PER_BWP>;
@ -39,8 +34,8 @@ struct bwp_slot_grid {
bool is_dl, is_ul; bool is_dl, is_ul;
pdsch_bitmap dl_rbgs; pdsch_bitmap dl_rbgs;
pusch_bitmap ul_rbgs; pusch_bitmap ul_rbgs;
pdsch_list_t pdschs; pdcch_dl_list_t dl_pdcchs;
pusch_list_t puschs; pdcch_ul_list_t ul_pdcchs;
slot_coreset_list coresets; slot_coreset_list coresets;
pucch_list_t pucchs; pucch_list_t pucchs;
@ -52,8 +47,8 @@ struct bwp_slot_grid {
struct bwp_res_grid { struct bwp_res_grid {
bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_); bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_);
bwp_slot_grid& operator[](tti_point tti) { return slots[tti.sf_idx()]; }; bwp_slot_grid& operator[](tti_point tti) { return slots[tti.to_uint() % slots.capacity()]; };
const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.sf_idx()]; }; const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.to_uint() % slots.capacity()]; };
uint32_t id() const { return bwp_id; } uint32_t id() const { return bwp_id; }
uint32_t nof_prbs() const { return cell_cfg->cell_cfg.nof_prb; } uint32_t nof_prbs() const { return cell_cfg->cell_cfg.nof_prb; }
const sched_cell_params& cell_params() const { return *cell_cfg; } const sched_cell_params& cell_params() const { return *cell_cfg; }

@ -22,12 +22,12 @@
#include "srsran/adt/span.h" #include "srsran/adt/span.h"
#include <condition_variable> #include <condition_variable>
#include <mutex> #include <mutex>
#include <semaphore.h>
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
using slot_res_t = sched_nr_interface::tti_request_t; using dl_sched_t = sched_nr_interface::dl_sched_t;
using ul_sched_t = sched_nr_interface::ul_sched_t;
class slot_cc_worker class slot_cc_worker
{ {
@ -55,27 +55,31 @@ private:
class sched_worker_manager class sched_worker_manager
{ {
struct slot_worker_ctxt {
std::mutex slot_mutex; // lock of all workers of the same slot.
std::condition_variable cvar;
tti_point tti_rx;
int nof_workers_waiting = 0;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<slot_cc_worker> workers;
};
public: public:
explicit sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_); explicit sched_worker_manager(ue_map_t& ue_db_, const sched_params& cfg_);
sched_worker_manager(const sched_worker_manager&) = delete; sched_worker_manager(const sched_worker_manager&) = delete;
sched_worker_manager(sched_worker_manager&&) = delete; sched_worker_manager(sched_worker_manager&&) = delete;
~sched_worker_manager(); ~sched_worker_manager();
void reserve_workers(tti_point tti_rx); void start_slot(tti_point tti_rx, srsran::move_callback<void()> process_feedback);
void start_tti(tti_point tti_rx); bool run_slot(tti_point tti_rx, uint32_t cc);
bool run_tti(tti_point tti_rx, uint32_t cc, sched_nr_interface::tti_request_t& req); void release_slot(tti_point tti_rx);
void end_tti(tti_point tti_rx); bool get_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res);
private: private:
const sched_params& cfg; const sched_params& cfg;
ue_map_t& ue_db; ue_map_t& ue_db;
std::mutex ue_db_mutex;
struct slot_worker_ctxt {
sem_t sf_sem; // lock of all workers of the same slot. unlocked by last slot_cc_worker
tti_point tti_rx;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<slot_cc_worker> workers;
};
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_ctxts; std::vector<std::unique_ptr<slot_worker_ctxt> > slot_ctxts;
srsran::bounded_vector<cell_res_grid, SCHED_NR_MAX_CARRIERS> cell_grid_list; srsran::bounded_vector<cell_res_grid, SCHED_NR_MAX_CARRIERS> cell_grid_list;

@ -16,10 +16,7 @@
namespace srsenb { namespace srsenb {
using sched_nr_impl::sched_worker_manager; using namespace sched_nr_impl;
using sched_nr_impl::ue;
using sched_nr_impl::ue_carrier;
using sched_nr_impl::ue_map_t;
static int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t& uecfg); static int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t& uecfg);
@ -44,7 +41,7 @@ public:
feedback_list.back().cc = cc; feedback_list.back().cc = cc;
feedback_list.back().callback = std::move(event); feedback_list.back().callback = std::move(event);
} }
void new_tti() void new_slot()
{ {
{ {
std::lock_guard<std::mutex> lock(common_mutex); std::lock_guard<std::mutex> lock(common_mutex);
@ -81,6 +78,68 @@ private:
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class sched_result_manager
{
public:
explicit sched_result_manager(uint32_t nof_cc_)
{
for (auto& v : results) {
v.resize(nof_cc_);
}
}
dl_sched_t& add_dl_result(tti_point tti, uint32_t cc)
{
if (not has_dl_result(tti, cc)) {
results[tti.to_uint()][cc].tti_dl = tti;
results[tti.to_uint()][cc].dl_res = {};
}
return results[tti.to_uint()][cc].dl_res;
}
ul_sched_t& add_ul_result(tti_point tti, uint32_t cc)
{
if (not has_ul_result(tti, cc)) {
results[tti.to_uint()][cc].tti_ul = tti;
results[tti.to_uint()][cc].ul_res = {};
}
return results[tti.to_uint()][cc].ul_res;
}
bool has_dl_result(tti_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].tti_dl == tti; }
bool has_ul_result(tti_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].tti_ul == tti; }
dl_sched_t pop_dl_result(tti_point tti, uint32_t cc)
{
if (has_dl_result(tti, cc)) {
results[tti.to_uint()][cc].tti_dl.reset();
return results[tti.to_uint()][cc].dl_res;
}
return {};
}
ul_sched_t pop_ul_result(tti_point tti, uint32_t cc)
{
if (has_ul_result(tti, cc)) {
results[tti.to_uint()][cc].tti_ul.reset();
return results[tti.to_uint()][cc].ul_res;
}
return {};
}
private:
struct slot_result_t {
tti_point tti_dl;
tti_point tti_ul;
dl_sched_t dl_res;
ul_sched_t ul_res;
};
srsran::circular_array<std::vector<slot_result_t>, TTIMOD_SZ> results;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr(const sched_cfg_t& sched_cfg) : sched_nr::sched_nr(const sched_cfg_t& sched_cfg) :
cfg(sched_cfg), pending_events(new ue_event_manager(ue_db)), logger(srslog::fetch_basic_logger("MAC")) cfg(sched_cfg), pending_events(new ue_event_manager(ue_db)), logger(srslog::fetch_basic_logger("MAC"))
{} {}
@ -94,6 +153,7 @@ int sched_nr::cell_cfg(srsran::const_span<cell_cfg_t> cell_list)
cfg.cells.emplace_back(cc, cell_list[cc], cfg.sched_cfg); cfg.cells.emplace_back(cc, cell_list[cc], cfg.sched_cfg);
} }
pending_results.reset(new sched_result_manager(cell_list.size()));
sched_workers.reset(new sched_nr_impl::sched_worker_manager(ue_db, cfg)); sched_workers.reset(new sched_nr_impl::sched_worker_manager(ue_db, cfg));
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }
@ -113,33 +173,50 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
} }
} }
void sched_nr::slot_indication(tti_point tti_rx) /// Generate {tti,cc} scheduling decision
int sched_nr::generate_slot_result(tti_point pdcch_tti, uint32_t cc)
{ {
// Lock slot workers for provided tti_rx tti_point tti_rx = pdcch_tti - TX_ENB_DELAY;
sched_workers->reserve_workers(tti_rx);
{ // Lock carrier workers for provided tti_rx
sched_workers->start_slot(tti_rx, [this]() {
// In case it is first worker for the given slot
// synchronize {tti,cc} state. e.g. reserve UE resources for {tti,cc} decision, process feedback // synchronize {tti,cc} state. e.g. reserve UE resources for {tti,cc} decision, process feedback
std::lock_guard<std::mutex> lock(ue_db_mutex); pending_events->new_slot();
// Process pending events });
pending_events->new_tti();
sched_workers->start_tti(tti_rx); // unlocked, parallel region
bool all_workers_finished = sched_workers->run_slot(tti_rx, cc);
if (all_workers_finished) {
// once all workers of the same subframe finished, synchronize sched outcome with ue_db
sched_workers->release_slot(tti_rx);
} }
// Copy results to intermediate buffer
dl_sched_t& dl_res = pending_results->add_dl_result(pdcch_tti, cc);
ul_sched_t& ul_res = pending_results->add_ul_result(pdcch_tti, cc);
sched_workers->get_sched_result(pdcch_tti, cc, dl_res, ul_res);
return SRSRAN_SUCCESS;
} }
/// Generate {tti,cc} scheduling decision int sched_nr::get_dl_sched(tti_point tti_tx, uint32_t cc, dl_sched_t& result)
int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& req)
{ {
// unlocked, parallel region if (not pending_results->has_dl_result(tti_tx, cc)) {
bool all_workers_finished = sched_workers->run_tti(tti_rx, cc, req); generate_slot_result(tti_tx, cc);
}
if (all_workers_finished) { result = pending_results->pop_dl_result(tti_tx, cc);
// once all workers of the same subframe finished, synchronize sched outcome with ue_db return SRSRAN_SUCCESS;
std::lock_guard<std::mutex> lock(ue_db_mutex); }
sched_workers->end_tti(tti_rx); int sched_nr::get_ul_sched(tti_point tti_rx, uint32_t cc, ul_sched_t& result)
{
if (not pending_results->has_ul_result(tti_rx, cc)) {
return SRSRAN_ERROR;
} }
result = pending_results->pop_ul_result(tti_rx, cc);
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }

@ -19,8 +19,8 @@ namespace sched_nr_impl {
coreset_region::coreset_region(const bwp_cfg_t& bwp_cfg_, coreset_region::coreset_region(const bwp_cfg_t& bwp_cfg_,
uint32_t coreset_id_, uint32_t coreset_id_,
uint32_t slot_idx_, uint32_t slot_idx_,
pdsch_list_t& dl_list_, pdcch_dl_list_t& dl_list_,
pusch_list_t& ul_list_) : pdcch_ul_list_t& ul_list_) :
bwp_cfg(&bwp_cfg_), bwp_cfg(&bwp_cfg_),
coreset_cfg(&bwp_cfg_.coresets[coreset_id_ - 1].value()), coreset_cfg(&bwp_cfg_.coresets[coreset_id_ - 1].value()),
coreset_id(coreset_id_), coreset_id(coreset_id_),
@ -156,10 +156,10 @@ bool coreset_region::alloc_dfs_node(const alloc_record& record, uint32_t start_d
alloc_dfs.push_back(node); alloc_dfs.push_back(node);
// set new DCI position // set new DCI position
if (record.alloc_type == pdcch_grant_type_t::ul_data) { if (record.alloc_type == pdcch_grant_type_t::ul_data) {
pusch_grant& pdcch_ul = pdcch_ul_list[record.idx]; pdcch_ul_t& pdcch_ul = pdcch_ul_list[record.idx];
pdcch_ul.dci.ctx.location = node.dci_pos; pdcch_ul.dci.ctx.location = node.dci_pos;
} else { } else {
pdsch_grant& pdcch_dl = pdcch_dl_list[record.idx]; pdcch_dl_t& pdcch_dl = pdcch_dl_list[record.idx];
pdcch_dl.dci.ctx.location = node.dci_pos; pdcch_dl.dci.ctx.location = node.dci_pos;
} }
return true; return true;

@ -27,7 +27,7 @@ bwp_slot_grid::bwp_slot_grid(const sched_cell_params& cell_params, uint32_t bwp_
is_ul(srsran_tdd_nr_is_ul(&cell_params.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_)) is_ul(srsran_tdd_nr_is_ul(&cell_params.cell_cfg.tdd, NUMEROLOGY_IDX, slot_idx_))
{ {
const uint32_t coreset_id = 1; // Note: for now only one coreset per BWP supported const uint32_t coreset_id = 1; // Note: for now only one coreset per BWP supported
coresets.emplace_back(cell_params.cell_cfg.bwps[0], coreset_id, slot_idx_, pdschs, puschs); coresets.emplace_back(cell_params.cell_cfg.bwps[0], coreset_id, slot_idx_, dl_pdcchs, ul_pdcchs);
} }
void bwp_slot_grid::reset() void bwp_slot_grid::reset()
@ -37,15 +37,15 @@ void bwp_slot_grid::reset()
} }
dl_rbgs.reset(); dl_rbgs.reset();
ul_rbgs.reset(); ul_rbgs.reset();
pdschs.clear(); dl_pdcchs.clear();
puschs.clear(); ul_pdcchs.clear();
pucchs.clear(); pucchs.clear();
} }
bwp_res_grid::bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_) : bwp_id(bwp_id_), cell_cfg(&cell_cfg_) bwp_res_grid::bwp_res_grid(const sched_cell_params& cell_cfg_, uint32_t bwp_id_) : bwp_id(bwp_id_), cell_cfg(&cell_cfg_)
{ {
for (uint32_t sl = 0; sl < SCHED_NR_NOF_SUBFRAMES; ++sl) { for (uint32_t sl = 0; sl < slots.capacity(); ++sl) {
slots.emplace_back(cell_cfg_, bwp_id, sl); slots.emplace_back(cell_cfg_, bwp_id, sl % static_cast<uint32_t>(SRSRAN_NSLOTS_PER_FRAME_NR(0u)));
} }
} }
@ -75,7 +75,7 @@ alloc_result slot_bwp_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch_slot.slot_idx); logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch_slot.slot_idx);
return alloc_result::no_sch_space; return alloc_result::no_sch_space;
} }
pdsch_list_t& pdsch_grants = bwp_pdsch_slot.pdschs; pdcch_dl_list_t& pdsch_grants = bwp_pdsch_slot.dl_pdcchs;
if (pdsch_grants.full()) { if (pdsch_grants.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached"); logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space; return alloc_result::no_grant_space;
@ -109,7 +109,7 @@ alloc_result slot_bwp_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
} }
// Allocation Successful // Allocation Successful
pdsch_grant& pdcch = bwp_pdcch_slot.pdschs.back(); pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
fill_dci_ue_cfg(ue, dl_mask, bwp_grid.cell_params(), pdcch.dci); fill_dci_ue_cfg(ue, dl_mask, bwp_grid.cell_params(), pdcch.dci);
pdsch_mask |= dl_mask; pdsch_mask |= dl_mask;
bwp_uci_slot.pucchs.emplace_back(); bwp_uci_slot.pucchs.emplace_back();
@ -127,13 +127,14 @@ alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti); logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity; return alloc_result::no_rnti_opportunity;
} }
auto& bwp_pdcch_slot = bwp_grid[ue.pdcch_tti];
auto& bwp_pusch_slot = bwp_grid[ue.pusch_tti]; auto& bwp_pusch_slot = bwp_grid[ue.pusch_tti];
if (not bwp_pusch_slot.is_ul) { if (not bwp_pusch_slot.is_ul) {
logger.warning("SCHED: Trying to allocate PUSCH in TDD non-UL slot index=%d", bwp_pusch_slot.slot_idx); logger.warning("SCHED: Trying to allocate PUSCH in TDD non-UL slot index=%d", bwp_pusch_slot.slot_idx);
return alloc_result::no_sch_space; return alloc_result::no_sch_space;
} }
pusch_list& pusch_grants = bwp_pusch_slot.puschs; pdcch_ul_list_t& pdcchs = bwp_pdcch_slot.ul_pdcchs;
if (pusch_grants.full()) { if (pdcchs.full()) {
logger.warning("SCHED: Maximum number of UL allocations reached"); logger.warning("SCHED: Maximum number of UL allocations reached");
return alloc_result::no_grant_space; return alloc_result::no_grant_space;
} }
@ -142,7 +143,7 @@ alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
return alloc_result::sch_collision; return alloc_result::sch_collision;
} }
const uint32_t aggr_idx = 2, coreset_id = 0; const uint32_t aggr_idx = 2, coreset_id = 0;
if (not bwp_grid[ue.pdcch_tti].coresets[coreset_id].alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, &ue)) { if (not bwp_pdcch_slot.coresets[coreset_id].alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, &ue)) {
// Could not find space in PDCCH // Could not find space in PDCCH
return alloc_result::no_cch_space; return alloc_result::no_cch_space;
} }
@ -159,8 +160,8 @@ alloc_result slot_bwp_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
} }
// Allocation Successful // Allocation Successful
pdsch_grant& pdsch = bwp_grid[ue.pdcch_tti].pdschs.back(); pdcch_ul_t& pdcch = pdcchs.back();
fill_dci_ue_cfg(ue, ul_mask, bwp_grid.cell_params(), pdsch.dci); fill_dci_ue_cfg(ue, ul_mask, bwp_grid.cell_params(), pdcch.dci);
pusch_mask |= ul_mask; pusch_mask |= ul_mask;
return alloc_result::success; return alloc_result::success;

@ -103,7 +103,6 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params&
slot_ctxts.resize(cfg.sched_cfg.nof_concurrent_subframes); slot_ctxts.resize(cfg.sched_cfg.nof_concurrent_subframes);
for (size_t i = 0; i < cfg.sched_cfg.nof_concurrent_subframes; ++i) { for (size_t i = 0; i < cfg.sched_cfg.nof_concurrent_subframes; ++i) {
slot_ctxts[i].reset(new slot_worker_ctxt()); slot_ctxts[i].reset(new slot_worker_ctxt());
sem_init(&slot_ctxts[i]->sf_sem, 0, 1);
slot_ctxts[i]->workers.reserve(cfg.cells.size()); slot_ctxts[i]->workers.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) { for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
slot_ctxts[i]->workers.emplace_back(cfg.cells[cc], cell_grid_list[cc]); slot_ctxts[i]->workers.emplace_back(cfg.cells[cc], cell_grid_list[cc]);
@ -111,44 +110,47 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params&
} }
} }
sched_worker_manager::~sched_worker_manager() sched_worker_manager::~sched_worker_manager() = default;
{
// acquire all slot worker contexts
for (auto& slot_ctxt : slot_ctxts) {
sem_wait(&slot_ctxt->sf_sem);
}
// destroy all slot worker contexts
for (auto& slot_ctxt : slot_ctxts) {
sem_destroy(&slot_ctxt->sf_sem);
}
}
sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx) sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx)
{ {
return *slot_ctxts[tti_rx.to_uint() % slot_ctxts.size()]; return *slot_ctxts[tti_rx.to_uint() % slot_ctxts.size()];
} }
void sched_worker_manager::reserve_workers(tti_point tti_rx_) void sched_worker_manager::start_slot(tti_point tti_rx, srsran::move_callback<void()> process_feedback)
{ {
// lock if slot worker is already being used auto& sf_worker_ctxt = get_sf(tti_rx);
auto& sf_worker_ctxt = get_sf(tti_rx_);
sem_wait(&sf_worker_ctxt.sf_sem); std::unique_lock<std::mutex> lock(sf_worker_ctxt.slot_mutex);
while ((sf_worker_ctxt.tti_rx.is_valid() and sf_worker_ctxt.tti_rx != tti_rx)) {
// wait for previous slot to finish
sf_worker_ctxt.nof_workers_waiting++;
sf_worker_ctxt.cvar.wait(lock);
sf_worker_ctxt.nof_workers_waiting--;
}
if (sf_worker_ctxt.tti_rx == tti_rx) {
// another worker with the same slot idx already started
return;
}
sf_worker_ctxt.tti_rx = tti_rx_; {
sf_worker_ctxt.worker_count.store(static_cast<int>(sf_worker_ctxt.workers.size()), std::memory_order_relaxed); std::lock_guard<std::mutex> db_lock(ue_db_mutex);
}
void sched_worker_manager::start_tti(tti_point tti_rx_) process_feedback();
{
auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) { for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) {
sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx, ue_db); sf_worker_ctxt.workers[cc].start(tti_rx, ue_db);
}
}
sf_worker_ctxt.tti_rx = tti_rx;
sf_worker_ctxt.worker_count.store(static_cast<int>(sf_worker_ctxt.workers.size()), std::memory_order_relaxed);
if (sf_worker_ctxt.nof_workers_waiting > 0) {
sf_worker_ctxt.cvar.notify_all();
} }
} }
bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& tti_req) bool sched_worker_manager::run_slot(tti_point tti_rx_, uint32_t cc)
{ {
auto& sf_worker_ctxt = get_sf(tti_rx_); auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
@ -156,15 +158,6 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& t
// Get {tti, cc} scheduling decision // Get {tti, cc} scheduling decision
sf_worker_ctxt.workers[cc].run(); sf_worker_ctxt.workers[cc].run();
// Copy requested TTI DL and UL sched result
tti_req.dl_res.pdsch_tti = tti_rx_ + TX_ENB_DELAY;
tti_req.dl_res.pdschs = cell_grid_list[cc].bwps[0][tti_req.dl_res.pdsch_tti].pdschs;
cell_grid_list[cc].bwps[0][tti_req.dl_res.pdsch_tti].reset();
tti_req.ul_res.pusch_tti = tti_rx_ + TX_ENB_DELAY;
tti_req.ul_res.puschs = cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].puschs;
tti_req.ul_res.pucchs = cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].pucchs;
cell_grid_list[cc].bwps[0][tti_req.ul_res.pusch_tti].reset();
// decrement the number of active workers // decrement the number of active workers
int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1; int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)"); srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)");
@ -172,18 +165,45 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& t
return rem_workers == 0; return rem_workers == 0;
} }
void sched_worker_manager::end_tti(tti_point tti_rx_) void sched_worker_manager::release_slot(tti_point tti_rx_)
{ {
auto& sf_worker_ctxt = get_sf(tti_rx_); auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
srsran_assert(sf_worker_ctxt.worker_count == 0, "invalid number of calls to run_tti(tti, cc)"); srsran_assert(sf_worker_ctxt.worker_count == 0, "invalid number of calls to run_tti(tti, cc)");
// All the workers of the same TTI have finished. Synchronize scheduling decisions with UEs state {
for (slot_cc_worker& worker : sf_worker_ctxt.workers) { std::lock_guard<std::mutex> lock(ue_db_mutex);
worker.end_tti();
// All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state
for (slot_cc_worker& worker : sf_worker_ctxt.workers) {
worker.end_tti();
}
}
std::unique_lock<std::mutex> lock(sf_worker_ctxt.slot_mutex);
sf_worker_ctxt.tti_rx = {};
if (sf_worker_ctxt.nof_workers_waiting > 0) {
sf_worker_ctxt.cvar.notify_one();
} }
}
bool sched_worker_manager::get_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res)
{
auto& pdcch_bwp_slot = cell_grid_list[cc].bwps[0][pdcch_tti];
dl_res.pdcch_dl = pdcch_bwp_slot.dl_pdcchs;
dl_res.pdcch_ul = pdcch_bwp_slot.ul_pdcchs;
ul_res.pucch.resize(pdcch_bwp_slot.pucchs.size());
for (uint32_t i = 0; i < pdcch_bwp_slot.pucchs.size(); ++i) {
ul_res.pucch[i].uci_cfg.pucch.rnti = pdcch_bwp_slot.pucchs[i].resource.rnti;
ul_res.pucch[i].uci_cfg.pucch.resource_id = pdcch_bwp_slot.pucchs[i].resource.resource_id;
}
// clear up BWP slot
pdcch_bwp_slot.reset();
sem_post(&sf_worker_ctxt.sf_sem); return true;
} }
} // namespace sched_nr_impl } // namespace sched_nr_impl

@ -44,8 +44,8 @@ int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out)
void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out) void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out)
{ {
uint32_t cc = cc_out.cc; uint32_t cc = cc_out.cc;
for (uint32_t i = 0; i < cc_out.dl_cc_result->pdschs.size(); ++i) { for (uint32_t i = 0; i < cc_out.dl_cc_result->pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result->pdschs[i]; const auto& data = cc_out.dl_cc_result->pdcch_dl[i];
if (data.dci.ctx.rnti != ctxt.rnti) { if (data.dci.ctx.rnti != ctxt.rnti) {
continue; continue;
} }
@ -98,7 +98,7 @@ int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;
} }
void sched_nr_sim_base::slot_indication(srsran::tti_point tti_rx) void sched_nr_sim_base::new_slot(srsran::tti_point tti_rx)
{ {
{ {
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
@ -113,7 +113,6 @@ void sched_nr_sim_base::slot_indication(srsran::tti_point tti_rx)
apply_tti_events(ue.second.get_ctxt(), events); apply_tti_events(ue.second.get_ctxt(), events);
} }
} }
sched_ptr->slot_indication(tti_rx);
} }
void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out) void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out)

@ -20,10 +20,10 @@
namespace srsenb { namespace srsenb {
struct sched_nr_cc_output_res_t { struct sched_nr_cc_output_res_t {
tti_point tti_rx; tti_point tti_rx;
uint32_t cc; uint32_t cc;
sched_nr_interface::dl_tti_request_t* dl_cc_result; sched_nr_interface::dl_sched_t* dl_cc_result;
sched_nr_interface::ul_tti_request_t* ul_cc_result; sched_nr_interface::ul_sched_t* ul_cc_result;
}; };
struct ue_nr_cc_ctxt_t { struct ue_nr_cc_ctxt_t {
@ -75,7 +75,7 @@ public:
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx); int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx);
void slot_indication(srsran::tti_point tti_rx); void new_slot(srsran::tti_point tti_rx);
void update(sched_nr_cc_output_res_t& cc_out); void update(sched_nr_cc_output_res_t& cc_out);
sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); } sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); }

@ -17,6 +17,8 @@
namespace srsenb { namespace srsenb {
using dl_sched_t = sched_nr_interface::dl_sched_t;
srsran_coreset_t get_default_coreset() srsran_coreset_t get_default_coreset()
{ {
srsran_coreset_t coreset{}; srsran_coreset_t coreset{};
@ -87,37 +89,46 @@ sched_nr_interface::ue_cfg_t get_default_ue_cfg(uint32_t nof_cc)
} }
struct task_job_manager { struct task_job_manager {
std::mutex mutex; std::mutex mutex;
std::condition_variable cond_var; int res_count = 0;
int tasks = 0; int pdsch_count = 0;
int res_count = 0; srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST");
int pdsch_count = 0; struct slot_guard {
int max_tasks = std::numeric_limits<int>::max() / 2; int count = 0;
srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST"); std::condition_variable cvar;
};
void start_task() srsran::bounded_vector<slot_guard, 10> slot_counter{};
explicit task_job_manager(int max_concurrent_slots = 4) : slot_counter(max_concurrent_slots) {}
void start_slot(tti_point tti, int nof_sectors)
{ {
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
while (tasks >= max_tasks) { auto& sl = slot_counter[tti.to_uint() % slot_counter.size()];
cond_var.wait(lock); while (sl.count > 0) {
sl.cvar.wait(lock);
} }
tasks++; sl.count = nof_sectors;
} }
void finish_task(const sched_nr_interface::tti_request_t& res) void finish_cc(tti_point tti, const dl_sched_t& dl_res, const sched_nr_interface::ul_sched_t& ul_res)
{ {
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
TESTASSERT(res.dl_res.pdschs.size() <= 1); TESTASSERT(dl_res.pdcch_dl.size() <= 1);
res_count++; res_count++;
pdsch_count += res.dl_res.pdschs.size(); pdsch_count += dl_res.pdcch_dl.size();
if (tasks-- >= max_tasks or tasks == 0) { auto& sl = slot_counter[tti.to_uint() % slot_counter.size()];
cond_var.notify_one(); if (--sl.count == 0) {
sl.cvar.notify_one();
} }
} }
void wait_task_finish() void wait_task_finish()
{ {
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
while (tasks > 0) { for (auto& sl : slot_counter) {
cond_var.wait(lock); while (sl.count > 0) {
sl.cvar.wait(lock);
}
sl.count = 1;
} }
} }
void print_results() const void print_results() const
@ -142,17 +153,19 @@ void sched_nr_cfg_serialized_test()
sched_tester.add_user(0x46, uecfg, 0); sched_tester.add_user(0x46, uecfg, 0);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti_rx(nof_ttis % 10240);
sched_tester.slot_indication(tti); tti_point tti_tx = tti_rx + TX_ENB_DELAY;
tasks.start_slot(tti_rx, nof_sectors);
sched_tester.new_slot(tti_tx);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task(); sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::tti_request_t res; sched_nr_interface::ul_sched_t ul_res;
TESTASSERT(sched_tester.get_sched()->generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS);
sched_nr_cc_output_res_t out{tti, cc, &res.dl_res, &res.ul_res}; TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS);
sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res};
sched_tester.update(out); sched_tester.update(out);
tasks.finish_task(res); tasks.finish_cc(tti_rx, dl_res, ul_res);
TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (tti + TX_ENB_DELAY).sf_idx()) or TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (tti_tx).sf_idx()) or dl_res.pdcch_dl.size() == 1);
res.dl_res.pdschs.size() == 1);
} }
} }
@ -162,11 +175,12 @@ void sched_nr_cfg_serialized_test()
void sched_nr_cfg_parallel_cc_test() void sched_nr_cfg_parallel_cc_test()
{ {
uint32_t nof_sectors = 4;
uint32_t max_nof_ttis = 1000; uint32_t max_nof_ttis = 1000;
task_job_manager tasks; task_job_manager tasks;
sched_nr_interface::sched_cfg_t cfg; sched_nr_interface::sched_cfg_t cfg;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(4); std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(nof_sectors);
sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test"); sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test");
@ -174,16 +188,19 @@ void sched_nr_cfg_parallel_cc_test()
sched_tester.add_user(0x46, uecfg, 0); sched_tester.add_user(0x46, uecfg, 0);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti_rx(nof_ttis % 10240);
sched_tester.slot_indication(tti); tti_point tti_tx = tti_rx + TX_ENB_DELAY;
tasks.start_slot(tti_tx, nof_sectors);
sched_tester.new_slot(tti_tx);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task(); srsran::get_background_workers().push_task([cc, tti_tx, &tasks, &sched_tester]() {
srsran::get_background_workers().push_task([cc, tti, &tasks, &sched_tester]() { sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::tti_request_t res; sched_nr_interface::ul_sched_t ul_res;
TESTASSERT(sched_tester.get_sched()->generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS);
sched_nr_cc_output_res_t out{tti, cc, &res.dl_res, &res.ul_res}; TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS);
sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res};
sched_tester.update(out); sched_tester.update(out);
tasks.finish_task(res); tasks.finish_cc(tti_tx, dl_res, ul_res);
}); });
} }
} }
@ -191,6 +208,7 @@ void sched_nr_cfg_parallel_cc_test()
tasks.wait_task_finish(); tasks.wait_task_finish();
tasks.print_results(); tasks.print_results();
TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors * 0.6));
} }
void sched_nr_cfg_parallel_sf_test() void sched_nr_cfg_parallel_sf_test()
@ -211,13 +229,14 @@ void sched_nr_cfg_parallel_sf_test()
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti(nof_ttis % 10240);
sched.slot_indication(tti); tasks.start_slot(tti, nof_sectors);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task();
srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() { srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() {
sched_nr_interface::tti_request_t res; sched_nr_interface::dl_sched_t dl_res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); sched_nr_interface::ul_sched_t ul_res;
tasks.finish_task(res); TESTASSERT(sched.get_dl_sched(tti, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched.get_ul_sched(tti, cc, ul_res) == SRSRAN_SUCCESS);
tasks.finish_cc(tti, dl_res, ul_res);
}); });
} }
} }

Loading…
Cancel
Save