sched,nr: fix time-domain parallelization sched nr test. creation of util class to handle acquisition of boolean resources

master
Francisco 3 years ago committed by Francisco Paisana
parent 35a236b1b9
commit d950433cbd

@ -44,6 +44,46 @@ struct sched_params {
using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>; using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
struct resource_guard {
public:
resource_guard() = default;
resource_guard(const resource_guard& other) = delete;
resource_guard(resource_guard&& other) = delete;
resource_guard& operator=(const resource_guard& other) = delete;
resource_guard& operator=(resource_guard&& other) = delete;
bool busy() const { return flag; }
struct token {
token() = default;
token(resource_guard& parent) : flag(parent.busy() ? nullptr : &parent.flag)
{
if (flag != nullptr) {
*flag = true;
}
}
token(token&&) noexcept = default;
token& operator=(token&&) noexcept = default;
void release() { flag.reset(); }
bool owns_token() const { return flag != nullptr; }
bool empty() const { return flag == nullptr; }
private:
struct release_deleter {
void operator()(bool* ptr)
{
if (ptr != nullptr) {
srsran_assert(*ptr == true, "resource token: detected inconsistency token state");
*ptr = false;
}
}
};
std::unique_ptr<bool, release_deleter> flag;
};
private:
bool flag = false;
};
} // namespace sched_nr_impl } // namespace sched_nr_impl
} // namespace srsenb } // namespace srsenb

@ -30,11 +30,11 @@ class slot_ue
{ {
public: public:
slot_ue() = default; slot_ue() = default;
explicit slot_ue(bool& busy_signal, tti_point tti_rx_, uint32_t cc); explicit slot_ue(resource_guard::token ue_token, tti_point tti_rx_, uint32_t cc);
~slot_ue(); ~slot_ue();
slot_ue(slot_ue&&) noexcept = default; slot_ue(slot_ue&&) noexcept = default;
slot_ue& operator=(slot_ue&&) noexcept = default; slot_ue& operator=(slot_ue&&) noexcept = default;
bool empty() const { return busy_signal == nullptr; } bool empty() const { return ue_token.empty(); }
void release(); void release();
tti_point tti_rx; tti_point tti_rx;
@ -51,10 +51,7 @@ public:
harq_proc* h_ul = nullptr; harq_proc* h_ul = nullptr;
private: private:
struct noop { resource_guard::token ue_token;
void operator()(bool* ptr) {}
};
std::unique_ptr<bool, noop> busy_signal;
}; };
class ue_carrier class ue_carrier
@ -77,8 +74,8 @@ public:
private: private:
const sched_nr_ue_cfg* cfg = nullptr; const sched_nr_ue_cfg* cfg = nullptr;
bool busy{false}; resource_guard busy;
tti_point last_tti_rx; tti_point last_tti_rx;
srsran::deque<srsran::move_callback<void(ue_carrier&)> > pending_feedback; srsran::deque<srsran::move_callback<void(ue_carrier&)> > pending_feedback;
}; };

@ -15,8 +15,8 @@
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
slot_ue::slot_ue(bool& busy_signal_, tti_point tti_rx_, uint32_t cc_) : slot_ue::slot_ue(resource_guard::token ue_token_, tti_point tti_rx_, uint32_t cc_) :
busy_signal(&busy_signal_), tti_rx(tti_rx_), cc(cc_) ue_token(std::move(ue_token_)), tti_rx(tti_rx_), cc(cc_)
{} {}
slot_ue::~slot_ue() slot_ue::~slot_ue()
@ -26,9 +26,7 @@ slot_ue::~slot_ue()
void slot_ue::release() void slot_ue::release()
{ {
if (busy_signal != nullptr) { ue_token.release();
*busy_signal = false;
}
} }
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -48,12 +46,11 @@ void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback
slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_) slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_)
{ {
slot_ue sfu = (busy) ? slot_ue() : slot_ue(busy, tti_rx, cc); slot_ue sfu(busy, tti_rx, cc);
if (sfu.empty()) { if (sfu.empty()) {
return sfu; return sfu;
} }
// successfully acquired. Process any CC-specific pending feedback // successfully acquired. Process any CC-specific pending feedback
busy = true;
if (cfg != &uecfg_) { if (cfg != &uecfg_) {
set_cfg(uecfg_); set_cfg(uecfg_);
} }

@ -111,8 +111,13 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params&
sched_worker_manager::~sched_worker_manager() sched_worker_manager::~sched_worker_manager()
{ {
for (uint32_t sf = 0; sf < slot_ctxts.size(); ++sf) { // acquire all slot worker contexts
sem_destroy(&slot_ctxts[sf]->sf_sem); for (auto& slot_ctxt : slot_ctxts) {
sem_wait(&slot_ctxt->sf_sem);
}
// destroy all slot worker contexts
for (auto& slot_ctxt : slot_ctxts) {
sem_destroy(&slot_ctxt->sf_sem);
} }
} }

@ -20,6 +20,7 @@ struct task_job_manager {
std::mutex mutex; std::mutex mutex;
std::condition_variable cond_var; std::condition_variable cond_var;
int tasks = 0; int tasks = 0;
int res_count = 0;
int pdsch_count = 0; int pdsch_count = 0;
int max_tasks = std::numeric_limits<int>::max() / 2; int max_tasks = std::numeric_limits<int>::max() / 2;
@ -35,6 +36,7 @@ struct task_job_manager {
{ {
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
TESTASSERT(res.dl_res.data.size() <= 1); TESTASSERT(res.dl_res.data.size() <= 1);
res_count++;
pdsch_count += res.dl_res.data.size(); pdsch_count += res.dl_res.data.size();
if (tasks-- >= max_tasks or tasks == 0) { if (tasks-- >= max_tasks or tasks == 0) {
cond_var.notify_one(); cond_var.notify_one();
@ -47,6 +49,7 @@ struct task_job_manager {
cond_var.wait(lock); cond_var.wait(lock);
} }
} }
void print_results() const { printf("TESTER: %f PDSCH/{slot,cc} were allocated\n", pdsch_count / (double)res_count); }
}; };
void sched_nr_cfg_serialized_test() void sched_nr_cfg_serialized_test()
@ -66,7 +69,7 @@ void sched_nr_cfg_serialized_test()
uecfg.carriers[0].active = true; uecfg.carriers[0].active = true;
sched.ue_cfg(0x46, uecfg); sched.ue_cfg(0x46, uecfg);
for (uint32_t nof_ttis = 0; nof_ttis < 1000; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti(nof_ttis % 10240);
sched.new_tti(tti); sched.new_tti(tti);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
@ -77,7 +80,7 @@ void sched_nr_cfg_serialized_test()
} }
} }
printf("TESTER: %f PDSCH/slot were allocated\n", tasks.pdsch_count / (double)max_nof_ttis); tasks.print_results();
} }
void sched_nr_cfg_parallel_cc_test() void sched_nr_cfg_parallel_cc_test()
@ -114,7 +117,7 @@ void sched_nr_cfg_parallel_cc_test()
tasks.wait_task_finish(); tasks.wait_task_finish();
printf("TESTER: %f PDSCH/slot were allocated\n", tasks.pdsch_count / (double)max_nof_ttis); tasks.print_results();
} }
void sched_nr_cfg_parallel_sf_test() void sched_nr_cfg_parallel_sf_test()
@ -141,8 +144,8 @@ void sched_nr_cfg_parallel_sf_test()
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240); tti_point tti(nof_ttis % 10240);
sched.new_tti(tti); sched.new_tti(tti);
tasks.start_task();
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task();
srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() { srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() {
sched_nr_res_t res; sched_nr_res_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS); TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
@ -153,7 +156,7 @@ void sched_nr_cfg_parallel_sf_test()
tasks.wait_task_finish(); tasks.wait_task_finish();
printf("TESTER: %f PDSCH/slot were allocated\n", tasks.pdsch_count / (double)max_nof_ttis); tasks.print_results();
} }
} // namespace srsenb } // namespace srsenb
@ -169,5 +172,5 @@ int main()
srsenb::sched_nr_cfg_serialized_test(); srsenb::sched_nr_cfg_serialized_test();
srsenb::sched_nr_cfg_parallel_cc_test(); srsenb::sched_nr_cfg_parallel_cc_test();
// srsenb::sched_nr_cfg_parallel_sf_test(); srsenb::sched_nr_cfg_parallel_sf_test();
} }
Loading…
Cancel
Save