sched,nr: implementation of sched NR harq retxs. Refactor of rb grid classes.

master
Francisco 4 years ago committed by Francisco Paisana
parent d950433cbd
commit f1b3cfc764

@ -34,20 +34,19 @@ class ue_event_manager;
class sched_nr final : public sched_nr_interface
{
public:
explicit sched_nr(const sched_nr_cfg& sched_cfg);
explicit sched_nr(const sched_cfg_t& sched_cfg);
~sched_nr() override;
int cell_cfg(const std::vector<sched_nr_cell_cfg>& cell_list);
void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& cfg) override;
int cell_cfg(const std::vector<cell_cfg_t>& cell_list);
void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override;
void new_tti(tti_point tti_rx) override;
int generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result);
void slot_indication(tti_point tti_rx) override;
int generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& tti_req);
void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override;
void ul_sr_info(tti_point tti_rx, uint16_t rnti) override;
private:
void ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& cfg);
void run_tti(tti_point tti_rx, uint32_t cc);
void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
// args
sched_nr_impl::sched_params cfg;
@ -55,8 +54,6 @@ private:
using sched_worker_manager = sched_nr_impl::sched_worker_manager;
std::unique_ptr<sched_worker_manager> sched_workers;
std::array<std::array<sched_nr_res_t, SCHED_NR_MAX_CARRIERS>, SCHED_NR_NOF_SUBFRAMES> sched_results;
using ue_map_t = sched_nr_impl::ue_map_t;
std::mutex ue_db_mutex;
ue_map_t ue_db;

@ -20,29 +20,32 @@ namespace srsenb {
const static size_t SCHED_NR_MAX_USERS = 4;
const static size_t SCHED_NR_NOF_SUBFRAMES = 10;
const static size_t SCHED_NR_MAX_NOF_RBGS = 25;
const static size_t SCHED_NR_NOF_HARQS = 16;
namespace sched_nr_impl {
using sched_cfg_t = sched_nr_interface::sched_cfg_t;
using cell_cfg_t = sched_nr_interface::cell_cfg_t;
struct sched_cell_params {
const uint32_t cc;
const sched_nr_cell_cfg cell_cfg;
const sched_nr_cfg& sched_cfg;
const uint32_t cc;
const cell_cfg_t cell_cfg;
const sched_cfg_t& sched_cfg;
sched_cell_params(uint32_t cc_, const sched_nr_cell_cfg& cell, const sched_nr_cfg& sched_cfg_) :
sched_cell_params(uint32_t cc_, const cell_cfg_t& cell, const sched_cfg_t& sched_cfg_) :
cc(cc_), cell_cfg(cell), sched_cfg(sched_cfg_)
{}
};
struct sched_params {
const sched_nr_cfg sched_cfg;
const sched_cfg_t sched_cfg;
std::vector<sched_cell_params> cells;
explicit sched_params(const sched_nr_cfg& sched_cfg_) : sched_cfg(sched_cfg_) {}
explicit sched_params(const sched_cfg_t& sched_cfg_) : sched_cfg(sched_cfg_) {}
};
using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using pdcchmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
struct resource_guard {
public:

@ -25,14 +25,23 @@ class harq_proc
public:
explicit harq_proc(uint32_t id_, uint32_t max_nof_tb_ = 1) : pid(id_), max_nof_tb(max_nof_tb_) {}
bool empty() const { return not tb[0].active and not tb[1].active; }
bool empty() const
{
return std::none_of(tb.begin(), tb.end(), [](const tb_t& t) { return t.active; });
}
bool empty(uint32_t tb_idx) const { return tb[tb_idx].active; }
bool has_pending_retx(tti_point tti_rx) const { return not empty() and not tb[0].ack_state and tti_ack <= tti_rx; }
uint32_t nof_retx() const { return tb[0].n_rtx; }
uint32_t max_nof_retx() const { return max_retx; }
uint32_t tbs() const { return tb[0].tbs; }
void ack_info(uint32_t tb_idx, bool ack) { tb[tb_idx].ack_state = ack; }
bool ack_info(uint32_t tb_idx, bool ack);
bool has_pending_retx(tti_point tti_rx) const { return not empty() and tti_tx + ack_delay <= tti_rx; }
bool new_tx(tti_point tti_tx, const rbgmask_t& rbgmask, uint32_t mcs, uint32_t ack_delay);
void new_tti(tti_point tti_rx);
void reset();
bool
new_tx(tti_point tti_tx, tti_point tti_ack, const rbgmask_t& rbgmask, uint32_t mcs, uint32_t tbs, uint32_t max_retx);
bool new_retx(tti_point tti_tx, tti_point tti_ack, const rbgmask_t& rbgmask, int* mcs, int* tbs);
const uint32_t pid;
@ -43,12 +52,14 @@ private:
bool ndi = false;
uint32_t n_rtx = 0;
uint32_t mcs = 0;
uint32_t tbs = 0;
};
const uint32_t max_nof_tb;
uint32_t max_retx = 1;
tti_point tti_tx;
uint32_t ack_delay = 0;
tti_point tti_ack;
rbgmask_t rbgmask;
std::array<tb_t, SCHED_NR_MAX_TB> tb;
};
@ -56,13 +67,11 @@ private:
class harq_entity
{
public:
harq_entity();
explicit harq_entity(uint32_t nof_harq_procs = 16);
void new_tti(tti_point tti_rx_);
void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); }
harq_proc& get_dl_harq(uint32_t pid) { return dl_harqs[pid]; }
harq_proc* find_pending_dl_retx()
{
return find_dl([this](const harq_proc& h) { return h.has_pending_retx(tti_rx); });

@ -13,60 +13,90 @@
#ifndef SRSRAN_SCHED_NR_INTERFACE_H
#define SRSRAN_SCHED_NR_INTERFACE_H
#include "srsran/adt/bounded_bitset.h"
#include "srsran/adt/bounded_vector.h"
#include "srsran/common/tti_point.h"
#include "srsran/phy/phch/dci_nr.h"
namespace srsenb {
const static size_t SCHED_NR_MAX_CARRIERS = 4;
const static uint16_t SCHED_NR_INVALID_RNTI = 0;
const static size_t SCHED_NR_MAX_PDSCH_DATA = 16;
const static size_t SCHED_NR_MAX_PUSCH_DATA = 16;
const static size_t SCHED_NR_MAX_TB = 2;
struct sched_nr_cell_cfg {
uint32_t nof_prb = 100;
uint32_t nof_rbg = 25;
uint32_t K0 = 0;
uint32_t K1 = 4;
uint32_t K2 = 4;
};
struct sched_nr_cfg {
uint32_t nof_concurrent_subframes = 1;
};
struct sched_nr_ue_cc_cfg {
bool active = false;
};
struct sched_nr_ue_cfg {
srsran::bounded_vector<sched_nr_ue_cc_cfg, SCHED_NR_MAX_CARRIERS> carriers;
};
struct sched_nr_data_t {
srsran::bounded_vector<uint32_t, SCHED_NR_MAX_TB> tbs;
};
struct sched_nr_dl_res_t {
srsran::bounded_vector<sched_nr_data_t, SCHED_NR_MAX_PDSCH_DATA> data;
};
struct sched_nr_ul_res_t {
srsran::bounded_vector<sched_nr_data_t, SCHED_NR_MAX_PUSCH_DATA> pusch;
};
struct sched_nr_res_t {
sched_nr_dl_res_t dl_res;
sched_nr_ul_res_t ul_res;
};
const static size_t SCHED_NR_MAX_NOF_RBGS = 25;
const static size_t SCHED_NR_MAX_UL_ALLOCS = 16;
const static size_t SCHED_NR_MAX_TB = 1;
class sched_nr_interface
{
public:
virtual ~sched_nr_interface() = default;
virtual void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& ue_cfg) = 0;
virtual void new_tti(tti_point tti_rx) = 0;
using pdcch_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
using rbg_bitmap = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
///// Configuration /////
struct pdsch_td_res_alloc {
uint8_t k0 = 0; // 0..32
uint8_t k1 = 4; // 0..32
};
using pdsch_td_res_alloc_list = srsran::bounded_vector<pdsch_td_res_alloc, SCHED_NR_MAX_UL_ALLOCS>;
struct pusch_td_res_alloc {
uint8_t k2 = 4; // 0..32
};
using pusch_td_res_alloc_list = srsran::bounded_vector<pusch_td_res_alloc, SCHED_NR_MAX_UL_ALLOCS>;
struct cell_cfg_t {
uint32_t nof_prb = 100;
uint32_t nof_rbg = 25;
};
struct sched_cfg_t {
uint32_t nof_concurrent_subframes = 1;
};
struct ue_cc_cfg_t {
bool active = false;
pdsch_td_res_alloc_list pdsch_res_list{1};
pusch_td_res_alloc_list pusch_res_list{1};
};
struct ue_cfg_t {
uint32_t maxharq_tx = 4;
srsran::bounded_vector<ue_cc_cfg_t, SCHED_NR_MAX_CARRIERS> carriers;
};
///// Sched Result /////
struct pdsch_grant {
srsran_dci_dl_nr_t dci;
rbg_bitmap bitmap;
};
using pdsch_list = srsran::bounded_vector<pdsch_grant, SCHED_NR_MAX_PDSCH_DATA>;
struct dl_tti_request_t {
tti_point pdsch_tti;
pdsch_list pdsch;
};
struct pusch_grant {
srsran_dci_ul_nr_t dci;
rbg_bitmap bitmap;
};
using pusch_list = srsran::bounded_vector<pusch_grant, SCHED_NR_MAX_PDSCH_DATA>;
struct ul_tti_request_t {
tti_point pusch_tti;
srsran::bounded_vector<pusch_grant, SCHED_NR_MAX_UL_ALLOCS> pusch;
};
struct tti_request_t {
dl_tti_request_t dl_res;
ul_tti_request_t ul_res;
};
virtual ~sched_nr_interface() = default;
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
virtual void slot_indication(tti_point tti_rx) = 0;
virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;
virtual void ul_sr_info(tti_point, uint16_t rnti) = 0;

@ -13,32 +13,86 @@
#ifndef SRSRAN_SCHED_NR_RB_GRID_H
#define SRSRAN_SCHED_NR_RB_GRID_H
#include "lib/include/srsran/adt/circular_array.h"
#include "sched_nr_interface.h"
#include "sched_nr_ue.h"
namespace srsenb {
namespace sched_nr_impl {
class slot_grid
using pdsch_list = sched_nr_interface::pdsch_list;
using pusch_list = sched_nr_interface::pusch_list;
struct pdcch_t {};
struct pdsch_t {};
struct pusch_t {};
struct pucch_t {};
struct phy_slot_grid {
pdcchmask_t pdcch_tot_mask;
rbgmask_t pdsch_tot_mask;
rbgmask_t ul_tot_mask;
pdsch_list pdsch_grants;
pusch_list pusch_grants;
srsran::bounded_vector<pucch_t, SCHED_NR_MAX_PDSCH_DATA> pucch_grants;
};
using phy_cell_rb_grid = srsran::circular_array<phy_slot_grid, TTIMOD_SZ>;
struct slot_ue_grid {
phy_slot_grid* pdcch_slot;
phy_slot_grid* pdsch_slot;
phy_slot_grid* pusch_slot;
phy_slot_grid* pucch_slot;
pdcch_t* pdcch_alloc = nullptr;
pdsch_t* pdsch_alloc = nullptr;
pusch_t* pusch_alloc = nullptr;
pucch_t* pucch_alloc = nullptr;
slot_ue_grid(phy_slot_grid& pdcch_sl, phy_slot_grid& pdsch_sl, phy_slot_grid& pusch_sl, phy_slot_grid& pucch_sl) :
pdcch_slot(&pdcch_sl), pdsch_slot(&pdsch_sl), pusch_slot(&pusch_sl), pucch_slot(&pucch_sl)
{}
};
class rb_alloc_grid
{
public:
explicit slot_grid(const sched_cell_params& cfg_);
void new_tti(tti_point tti_rx_, sched_nr_res_t& sched_res_);
bool alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask);
bool alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask);
slot_ue_grid get_slot_ue_grid(tti_point pdcch_tti, uint8_t K0, uint8_t K1, uint8_t K2)
{
phy_slot_grid& pdcch_slot = phy_grid[pdcch_tti.to_uint()];
phy_slot_grid& pdsch_slot = phy_grid[(pdcch_tti + K0).to_uint()];
phy_slot_grid& pucch_slot = phy_grid[(pdcch_tti + K0 + K1).to_uint()];
phy_slot_grid& pusch_slot = phy_grid[(pdcch_tti + K2).to_uint()];
return slot_ue_grid{pdcch_slot, pdsch_slot, pusch_slot, pucch_slot};
}
void generate_dcis();
private:
phy_cell_rb_grid phy_grid;
};
/// Error code of alloc attempt
enum class alloc_result { success, sch_collision, no_grant_space, no_rnti_opportunity };
inline const char* to_string(alloc_result res)
{
return "";
}
class slot_sched
{
public:
explicit slot_sched(const sched_cell_params& cfg_, phy_cell_rb_grid& phy_grid_);
void new_tti(tti_point tti_rx_);
alloc_result alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask);
alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask);
tti_point tti_tx_dl() const { return tti_rx + TX_ENB_DELAY; }
tti_point tti_tx_ul() const { return tti_tx_dl() + cfg.cell_cfg.K2; }
void generate_dcis();
const sched_cell_params& cfg;
private:
tti_point tti_rx;
rbgmask_t pdsch_mask;
rbgmask_t pusch_mask;
sched_nr_res_t* sched_res = nullptr;
srslog::basic_logger& logger;
phy_cell_rb_grid phy_grid;
tti_point tti_rx;
};
} // namespace sched_nr_impl

@ -24,31 +24,38 @@ namespace srsenb {
namespace sched_nr_impl {
using ue_cfg_t = sched_nr_interface::ue_cfg_t;
using ue_cc_cfg_t = sched_nr_interface::ue_cc_cfg_t;
class ue_carrier;
class slot_ue
{
public:
slot_ue() = default;
explicit slot_ue(resource_guard::token ue_token, tti_point tti_rx_, uint32_t cc);
~slot_ue();
explicit slot_ue(resource_guard::token ue_token, uint16_t rnti_, tti_point tti_rx_, uint32_t cc);
slot_ue(slot_ue&&) noexcept = default;
slot_ue& operator=(slot_ue&&) noexcept = default;
bool empty() const { return ue_token.empty(); }
void release();
void release() { ue_token.release(); }
uint16_t rnti = SCHED_NR_INVALID_RNTI;
tti_point tti_rx;
uint32_t cc = SCHED_NR_MAX_CARRIERS;
// UE parameters common to all sectors
const sched_nr_ue_cfg* cfg = nullptr;
bool pending_sr;
const ue_cfg_t* cfg = nullptr;
bool pending_sr;
// UE parameters that are sector specific
uint32_t dl_cqi;
uint32_t ul_cqi;
harq_proc* h_dl = nullptr;
harq_proc* h_ul = nullptr;
const ue_cc_cfg_t* cc_cfg = nullptr;
tti_point pdsch_tti;
tti_point pusch_tti;
tti_point uci_tti;
uint32_t dl_cqi;
uint32_t ul_cqi;
harq_proc* h_dl = nullptr;
harq_proc* h_ul = nullptr;
private:
resource_guard::token ue_token;
@ -57,10 +64,10 @@ private:
class ue_carrier
{
public:
ue_carrier(uint16_t rnti, uint32_t cc, const sched_nr_ue_cfg& cfg);
slot_ue try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& cfg);
ue_carrier(uint16_t rnti, uint32_t cc, const ue_cfg_t& cfg);
slot_ue try_reserve(tti_point pdcch_tti, const ue_cfg_t& cfg);
void push_feedback(srsran::move_callback<void(ue_carrier&)> callback);
void set_cfg(const sched_nr_ue_cfg& uecfg);
void set_cfg(const ue_cfg_t& uecfg);
const uint16_t rnti;
const uint32_t cc;
@ -72,7 +79,7 @@ public:
harq_entity harq_ent;
private:
const sched_nr_ue_cfg* cfg = nullptr;
const ue_cfg_t* cfg = nullptr;
resource_guard busy;
tti_point last_tti_rx;
@ -83,11 +90,11 @@ private:
class ue
{
public:
ue(uint16_t rnti, const sched_nr_ue_cfg& cfg);
ue(uint16_t rnti, const ue_cfg_t& cfg);
slot_ue try_reserve(tti_point tti_rx, uint32_t cc);
void set_cfg(const sched_nr_ue_cfg& cfg);
void set_cfg(const ue_cfg_t& cfg);
void ul_sr_info(tti_point tti_rx) { pending_sr = true; }
@ -96,8 +103,8 @@ public:
private:
bool pending_sr = false;
int current_idx = 0;
std::array<sched_nr_ue_cfg, 4> ue_cfgs;
int current_idx = 0;
std::array<ue_cfg_t, 4> ue_cfgs;
};
using ue_map_t = srsran::static_circular_map<uint16_t, std::unique_ptr<ue>, SCHED_NR_MAX_USERS>;

@ -27,12 +27,16 @@
namespace srsenb {
namespace sched_nr_impl {
using slot_res_t = sched_nr_interface::tti_request_t;
class slot_cc_worker
{
public:
explicit slot_cc_worker(const sched_cell_params& cell_params) : cfg(cell_params), res_grid(cfg) {}
explicit slot_cc_worker(const sched_cell_params& cell_params, phy_cell_rb_grid& phy_grid) :
cfg(cell_params), res_grid(cfg, phy_grid)
{}
void start(tti_point tti_rx_, sched_nr_res_t& bwp_result, ue_map_t& ue_db_);
void start(tti_point tti_rx_, ue_map_t& ue_db_);
void run();
void end_tti();
bool running() const { return tti_rx.is_valid(); }
@ -43,8 +47,8 @@ private:
const sched_cell_params& cfg;
tti_point tti_rx;
slot_grid res_grid;
tti_point tti_rx;
slot_sched res_grid;
srsran::static_circular_map<uint16_t, slot_ue, SCHED_NR_MAX_USERS> slot_ues;
};
@ -57,9 +61,9 @@ public:
sched_worker_manager(sched_worker_manager&&) = delete;
~sched_worker_manager();
void reserve_workers(tti_point tti_rx, srsran::span<sched_nr_res_t> sf_result_);
void reserve_workers(tti_point tti_rx);
void start_tti(tti_point tti_rx);
bool run_tti(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result);
bool run_tti(tti_point tti_rx, uint32_t cc, sched_nr_interface::tti_request_t& req);
void end_tti(tti_point tti_rx);
private:
@ -67,14 +71,15 @@ private:
ue_map_t& ue_db;
struct slot_worker_ctxt {
sem_t sf_sem; // lock of all workers of the same slot. unlocked by last slot_cc_worker
tti_point tti_rx;
srsran::span<sched_nr_res_t> sf_result;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<slot_cc_worker> workers;
sem_t sf_sem; // lock of all workers of the same slot. unlocked by last slot_cc_worker
tti_point tti_rx;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<slot_cc_worker> workers;
};
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_ctxts;
std::array<phy_cell_rb_grid, SCHED_NR_MAX_CARRIERS> phy_grid;
slot_worker_ctxt& get_sf(tti_point tti_rx);
};

@ -78,11 +78,11 @@ private:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr(const sched_nr_cfg& sched_cfg) : cfg(sched_cfg), pending_events(new ue_event_manager(ue_db)) {}
sched_nr::sched_nr(const sched_cfg_t& sched_cfg) : cfg(sched_cfg), pending_events(new ue_event_manager(ue_db)) {}
sched_nr::~sched_nr() {}
int sched_nr::cell_cfg(const std::vector<sched_nr_cell_cfg>& cell_list)
int sched_nr::cell_cfg(const std::vector<cell_cfg_t>& cell_list)
{
cfg.cells.reserve(cell_list.size());
for (uint32_t cc = 0; cc < cell_list.size(); ++cc) {
@ -93,12 +93,12 @@ int sched_nr::cell_cfg(const std::vector<sched_nr_cell_cfg>& cell_list)
return SRSRAN_SUCCESS;
}
void sched_nr::ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& uecfg)
void sched_nr::ue_cfg(uint16_t rnti, const ue_cfg_t& uecfg)
{
pending_events->push_event([this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); });
}
void sched_nr::ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& uecfg)
void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
{
if (not ue_db.contains(rnti)) {
ue_db.insert(rnti, std::unique_ptr<ue>(new ue{rnti, uecfg}));
@ -107,10 +107,10 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& uecfg)
}
}
void sched_nr::new_tti(tti_point tti_rx)
void sched_nr::slot_indication(tti_point tti_rx)
{
// Lock slot workers for provided tti_rx
sched_workers->reserve_workers(tti_rx, sched_results[tti_rx.sf_idx()]);
sched_workers->reserve_workers(tti_rx);
{
// synchronize {tti,cc} state. e.g. reserve UE resources for {tti,cc} decision, process feedback
@ -123,10 +123,10 @@ void sched_nr::new_tti(tti_point tti_rx)
}
/// Generate {tti,cc} scheduling decision
int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result)
int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, tti_request_t& req)
{
// unlocked, parallel region
bool all_workers_finished = sched_workers->run_tti(tti_rx, cc, result);
bool all_workers_finished = sched_workers->run_tti(tti_rx, cc, req);
if (all_workers_finished) {
// once all workers of the same subframe finished, synchronize sched outcome with ue_db

@ -15,23 +15,80 @@
namespace srsenb {
namespace sched_nr_impl {
bool harq_proc::new_tx(tti_point tti_tx_, const rbgmask_t& rbgmask_, uint32_t mcs, uint32_t ack_delay_)
bool harq_proc::ack_info(uint32_t tb_idx, bool ack)
{
if (empty(tb_idx)) {
return false;
}
tb[tb_idx].ack_state = ack;
if (ack) {
tb[tb_idx].active = false;
}
return true;
}
void harq_proc::new_tti(tti_point tti_rx)
{
if (has_pending_retx(tti_rx) and nof_retx() + 1 >= max_nof_retx()) {
tb[0].active = false;
}
}
void harq_proc::reset()
{
tb[0].ack_state = false;
tb[0].active = false;
tb[0].n_rtx = 0;
tb[0].mcs = std::numeric_limits<uint32_t>::max();
tb[0].tbs = std::numeric_limits<uint32_t>::max();
}
bool harq_proc::new_tx(tti_point tti_tx_,
tti_point tti_ack_,
const rbgmask_t& rbgmask_,
uint32_t mcs,
uint32_t tbs,
uint32_t max_retx_)
{
if (not empty()) {
return false;
}
tti_tx = tti_tx_;
ack_delay = ack_delay_;
rbgmask = rbgmask_;
tb[0].mcs = mcs;
reset();
max_retx = max_retx_;
tti_tx = tti_tx_;
tti_ack = tti_ack_;
rbgmask = rbgmask_;
tb[0].ndi = !tb[0].ndi;
tb[0].mcs = mcs;
tb[0].tbs = tbs;
tb[0].active = true;
return true;
}
bool harq_proc::new_retx(tti_point tti_tx_, tti_point tti_ack_, const rbgmask_t& rbgmask_, int* mcs, int* tbs)
{
if (empty() or rbgmask.count() != rbgmask.count()) {
return false;
}
tti_tx = tti_tx_;
tti_ack = tti_ack_;
rbgmask = rbgmask_;
tb[0].ack_state = false;
tb[0].n_rtx++;
if (mcs != nullptr) {
*mcs = tb[0].mcs;
}
if (tbs != nullptr) {
*tbs = tb[0].tbs;
}
return true;
}
harq_entity::harq_entity()
harq_entity::harq_entity(uint32_t nof_harq_procs)
{
dl_harqs.reserve(SCHED_NR_NOF_HARQS);
ul_harqs.reserve(SCHED_NR_NOF_HARQS);
for (uint32_t pid = 0; pid < SCHED_NR_NOF_HARQS; ++pid) {
dl_harqs.reserve(nof_harq_procs);
ul_harqs.reserve(nof_harq_procs);
for (uint32_t pid = 0; pid < nof_harq_procs; ++pid) {
dl_harqs.emplace_back(pid);
ul_harqs.emplace_back(pid);
}
@ -40,6 +97,12 @@ harq_entity::harq_entity()
void harq_entity::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
for (harq_proc& dl_h : dl_harqs) {
dl_h.new_tti(tti_rx);
}
for (harq_proc& ul_h : ul_harqs) {
ul_h.new_tti(tti_rx);
}
}
} // namespace sched_nr_impl

@ -15,68 +15,94 @@
namespace srsenb {
namespace sched_nr_impl {
slot_grid::slot_grid(const sched_cell_params& cfg_) :
cfg(cfg_), pdsch_mask(cfg.cell_cfg.nof_rbg), pusch_mask(cfg.cell_cfg.nof_rbg)
using pdsch_grant = sched_nr_interface::pdsch_grant;
using pusch_grant = sched_nr_interface::pusch_grant;
slot_sched::slot_sched(const sched_cell_params& cfg_, phy_cell_rb_grid& phy_grid_) :
logger(srslog::fetch_basic_logger("MAC")), cfg(cfg_), phy_grid(phy_grid_)
{}
void slot_grid::new_tti(tti_point tti_rx_, sched_nr_res_t& sched_res_)
void slot_sched::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
sched_res = &sched_res_;
pdsch_mask.reset();
pusch_mask.reset();
*sched_res = {};
tti_rx = tti_rx_;
}
bool slot_grid::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
alloc_result slot_sched::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
{
const uint32_t tbs = 100, mcs = 20;
if (ue.h_dl == nullptr) {
return false;
logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity;
}
if ((pdsch_mask & dl_mask).any()) {
return false;
pdsch_list& pdsch_grants = phy_grid[ue.pdsch_tti.to_uint()].pdsch_grants;
if (pdsch_grants.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space;
}
if (sched_res->dl_res.data.full()) {
return false;
rbgmask_t& pdsch_mask = phy_grid[ue.pdsch_tti.to_uint()].pdsch_tot_mask;
if ((pdsch_mask & dl_mask).any()) {
return alloc_result::sch_collision;
}
if (not ue.h_dl->new_tx(tti_tx_dl(), dl_mask, mcs, cfg.cell_cfg.K1)) {
return false;
int mcs = -1, tbs = -1;
if (ue.h_dl->empty()) {
mcs = 20;
tbs = 100;
srsran_assert(ue.h_dl->new_tx(ue.pdsch_tti, ue.uci_tti, dl_mask, mcs, tbs, 4), "Failed to allocate DL HARQ");
} else {
srsran_assert(ue.h_dl->new_retx(ue.pdsch_tti, ue.uci_tti, dl_mask, &mcs, &tbs), "Failed to allocate DL HARQ retx");
}
// Allocation Successful
pdsch_grants.emplace_back();
pdsch_grant& grant = pdsch_grants.back();
grant.dci.ctx.rnti = ue.rnti;
grant.dci.pid = ue.h_dl->pid;
grant.bitmap = dl_mask;
pdsch_mask |= dl_mask;
sched_res->dl_res.data.emplace_back();
sched_nr_data_t& data = sched_res->dl_res.data.back();
data.tbs.resize(1);
data.tbs[0] = tbs;
return true;
return alloc_result::success;
}
bool slot_grid::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
alloc_result slot_sched::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
{
const uint32_t tbs = 100, mcs = 20;
if ((pusch_mask & ul_mask).any()) {
return false;
if (ue.h_ul == nullptr) {
logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity;
}
pusch_list& pusch_grants = phy_grid[ue.pusch_tti.to_uint()].pusch_grants;
if (pusch_grants.full()) {
logger.warning("SCHED: Maximum number of UL allocations reached");
return alloc_result::no_grant_space;
}
if (sched_res->ul_res.pusch.full()) {
return false;
rbgmask_t& pusch_mask = phy_grid[ue.pusch_tti.to_uint()].ul_tot_mask;
if ((pusch_mask & ul_mask).any()) {
return alloc_result::sch_collision;
}
if (not ue.h_ul->new_tx(tti_tx_ul(), ul_mask, mcs, 0)) {
return false;
int mcs = -1, tbs = -1;
if (ue.h_ul->empty()) {
mcs = 20;
tbs = 100;
bool ret = ue.h_ul->new_tx(ue.pusch_tti, ue.pusch_tti, ul_mask, mcs, tbs, ue.cfg->maxharq_tx);
srsran_assert(ret, "Failed to allocate UL HARQ");
} else {
srsran_assert(ue.h_ul->new_retx(ue.pusch_tti, ue.pusch_tti, ul_mask, &mcs, &tbs),
"Failed to allocate UL HARQ retx");
}
// Allocation Successful
pusch_grants.emplace_back();
pusch_grant& grant = pusch_grants.back();
grant.dci.ctx.rnti = ue.rnti;
grant.dci.pid = ue.h_dl->pid;
grant.dci.mcs = mcs;
grant.bitmap = ul_mask;
pusch_mask |= ul_mask;
sched_res->ul_res.pusch.emplace_back();
sched_nr_data_t& data = sched_res->ul_res.pusch.back();
data.tbs.resize(1);
data.tbs[0] = tbs;
return true;
return alloc_result::success;
}
void slot_grid::generate_dcis() {}
void slot_sched::generate_dcis() {}
} // namespace sched_nr_impl
} // namespace srsenb

@ -15,26 +15,15 @@
namespace srsenb {
namespace sched_nr_impl {
slot_ue::slot_ue(resource_guard::token ue_token_, tti_point tti_rx_, uint32_t cc_) :
ue_token(std::move(ue_token_)), tti_rx(tti_rx_), cc(cc_)
slot_ue::slot_ue(resource_guard::token ue_token_, uint16_t rnti_, tti_point tti_rx_, uint32_t cc_) :
ue_token(std::move(ue_token_)), rnti(rnti_), tti_rx(tti_rx_), cc(cc_)
{}
slot_ue::~slot_ue()
{
release();
}
void slot_ue::release()
{
ue_token.release();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const sched_nr_ue_cfg& uecfg_) : rnti(rnti_), cc(cc_), cfg(&uecfg_)
{}
ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const ue_cfg_t& uecfg_) : rnti(rnti_), cc(cc_), cfg(&uecfg_) {}
void ue_carrier::set_cfg(const sched_nr_ue_cfg& uecfg)
void ue_carrier::set_cfg(const ue_cfg_t& uecfg)
{
cfg = &uecfg;
}
@ -44,9 +33,9 @@ void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback
pending_feedback.push_back(std::move(callback));
}
slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_)
slot_ue ue_carrier::try_reserve(tti_point tti_rx, const ue_cfg_t& uecfg_)
{
slot_ue sfu(busy, tti_rx, cc);
slot_ue sfu(busy, rnti, tti_rx, cc);
if (sfu.empty()) {
return sfu;
}
@ -71,9 +60,13 @@ slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_)
sfu.cfg = &uecfg_;
// copy cc-specific parameters and find available HARQs
sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi;
sfu.h_dl = harq_ent.find_pending_dl_retx();
sfu.cc_cfg = &uecfg_.carriers[cc];
sfu.pdsch_tti = tti_rx + TX_ENB_DELAY + sfu.cc_cfg->pdsch_res_list[0].k0;
sfu.pusch_tti = tti_rx + TX_ENB_DELAY + sfu.cc_cfg->pusch_res_list[0].k2;
sfu.uci_tti = sfu.pdsch_tti + sfu.cc_cfg->pdsch_res_list[0].k1;
sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi;
sfu.h_dl = harq_ent.find_pending_dl_retx();
if (sfu.h_dl == nullptr) {
sfu.h_dl = harq_ent.find_empty_dl_harq();
}
@ -92,8 +85,9 @@ slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_)
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue::ue(uint16_t rnti, const sched_nr_ue_cfg& cfg)
ue::ue(uint16_t rnti, const ue_cfg_t& cfg)
{
ue_cfgs[0] = cfg;
for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) {
if (cfg.carriers[cc].active) {
carriers[cc].reset(new ue_carrier(rnti, cc, cfg));
@ -101,7 +95,7 @@ ue::ue(uint16_t rnti, const sched_nr_ue_cfg& cfg)
}
}
void ue::set_cfg(const sched_nr_ue_cfg& cfg)
void ue::set_cfg(const ue_cfg_t& cfg)
{
current_idx = (current_idx + 1) % ue_cfgs.size();
ue_cfgs[current_idx] = cfg;

@ -16,7 +16,7 @@ namespace srsenb {
namespace sched_nr_impl {
/// Called at the beginning of TTI in a locked context, to reserve available UE resources
void slot_cc_worker::start(tti_point tti_rx_, sched_nr_res_t& bwp_result_, ue_map_t& ue_db)
void slot_cc_worker::start(tti_point tti_rx_, ue_map_t& ue_db)
{
srsran_assert(not running(), "scheduler worker::start() called for active worker");
// Try reserve UE cells for this worker
@ -33,7 +33,7 @@ void slot_cc_worker::start(tti_point tti_rx_, sched_nr_res_t& bwp_result_, ue_ma
// UE acquired successfully for scheduling in this {tti, cc}
}
res_grid.new_tti(tti_rx_, bwp_result_);
res_grid.new_tti(tti_rx_);
tti_rx = tti_rx_;
}
@ -104,7 +104,7 @@ sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_params&
sem_init(&slot_ctxts[i]->sf_sem, 0, 1);
slot_ctxts[i]->workers.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
slot_ctxts[i]->workers.emplace_back(cfg.cells[cc]);
slot_ctxts[i]->workers.emplace_back(cfg.cells[cc], phy_grid[cc]);
}
}
}
@ -126,14 +126,13 @@ sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point t
return *slot_ctxts[tti_rx.to_uint() % slot_ctxts.size()];
}
void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span<sched_nr_res_t> sf_result_)
void sched_worker_manager::reserve_workers(tti_point tti_rx_)
{
// lock if slot worker is already being used
auto& sf_worker_ctxt = get_sf(tti_rx_);
sem_wait(&sf_worker_ctxt.sf_sem);
sf_worker_ctxt.sf_result = sf_result_;
sf_worker_ctxt.tti_rx = tti_rx_;
sf_worker_ctxt.tti_rx = tti_rx_;
sf_worker_ctxt.worker_count.store(static_cast<int>(sf_worker_ctxt.workers.size()), std::memory_order_relaxed);
}
@ -143,11 +142,11 @@ void sched_worker_manager::start_tti(tti_point tti_rx_)
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) {
sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx, sf_worker_ctxt.sf_result[cc], ue_db);
sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx, ue_db);
}
}
bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, sched_nr_res_t& result)
bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, slot_res_t& tti_req)
{
auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
@ -155,12 +154,20 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, sched_nr_res_
// Get {tti, cc} scheduling decision
sf_worker_ctxt.workers[cc].run();
// copy sched result
result = sf_worker_ctxt.sf_result[cc];
// Copy requested TTI DL and UL sched result
tti_req.dl_res.pdsch_tti = tti_rx_ + TX_ENB_DELAY;
tti_req.dl_res.pdsch = phy_grid[cc][tti_req.dl_res.pdsch_tti.to_uint()].pdsch_grants;
tti_req.ul_res.pusch_tti = tti_rx_ + TX_ENB_DELAY;
tti_req.ul_res.pusch = phy_grid[cc][tti_req.ul_res.pusch_tti.to_uint()].pusch_grants;
// decrement the number of active workers
int rem_workers = sf_worker_ctxt.worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)");
if (rem_workers == 0) {
// Clear one slot of PHY grid, so it can be reused in the next TTIs
phy_grid[cc][sf_worker_ctxt.tti_rx.to_uint()] = {};
}
return rem_workers == 0;
}
@ -171,11 +178,10 @@ void sched_worker_manager::end_tti(tti_point tti_rx_)
srsran_assert(sf_worker_ctxt.worker_count == 0, "invalid number of calls to run_tti(tti, cc)");
// All the workers of the same TTI have finished. Synchronize scheduling decisions with UEs state
for (auto& worker : sf_worker_ctxt.workers) {
for (slot_cc_worker& worker : sf_worker_ctxt.workers) {
worker.end_tti();
}
sf_worker_ctxt.sf_result = {};
sem_post(&sf_worker_ctxt.sf_sem);
}

@ -32,12 +32,12 @@ struct task_job_manager {
}
tasks++;
}
void finish_task(const sched_nr_res_t& res)
void finish_task(const sched_nr_interface::tti_request_t& res)
{
std::unique_lock<std::mutex> lock(mutex);
TESTASSERT(res.dl_res.data.size() <= 1);
TESTASSERT(res.dl_res.pdsch.size() <= 1);
res_count++;
pdsch_count += res.dl_res.data.size();
pdsch_count += res.dl_res.pdsch.size();
if (tasks-- >= max_tasks or tasks == 0) {
cond_var.notify_one();
}
@ -57,24 +57,24 @@ void sched_nr_cfg_serialized_test()
uint32_t max_nof_ttis = 1000;
task_job_manager tasks;
sched_nr_cfg cfg;
std::vector<sched_nr_cell_cfg> cells_cfg;
cells_cfg.resize(1);
sched_nr_interface::sched_cfg_t cfg;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg(2);
sched_nr sched(cfg);
sched.cell_cfg(cells_cfg);
sched_nr_ue_cfg uecfg;
uecfg.carriers.resize(1);
sched_nr_interface::ue_cfg_t uecfg;
uecfg.carriers.resize(2);
uecfg.carriers[0].active = true;
uecfg.carriers[1].active = true;
sched.ue_cfg(0x46, uecfg);
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
sched.new_tti(tti);
sched.slot_indication(tti);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task();
sched_nr_res_t res;
sched_nr_interface::tti_request_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
tasks.finish_task(res);
}
@ -88,14 +88,13 @@ void sched_nr_cfg_parallel_cc_test()
uint32_t max_nof_ttis = 1000;
task_job_manager tasks;
sched_nr_cfg cfg;
std::vector<sched_nr_cell_cfg> cells_cfg;
cells_cfg.resize(4);
sched_nr_interface::sched_cfg_t cfg;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg(4);
sched_nr sched(cfg);
sched.cell_cfg(cells_cfg);
sched_nr_ue_cfg uecfg;
sched_nr_interface::ue_cfg_t uecfg;
uecfg.carriers.resize(cells_cfg.size());
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
uecfg.carriers[cc].active = true;
@ -104,11 +103,11 @@ void sched_nr_cfg_parallel_cc_test()
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
sched.new_tti(tti);
sched.slot_indication(tti);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task();
srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() {
sched_nr_res_t res;
sched_nr_interface::tti_request_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
tasks.finish_task(res);
});
@ -126,15 +125,15 @@ void sched_nr_cfg_parallel_sf_test()
uint32_t nof_sectors = 2;
task_job_manager tasks;
sched_nr_cfg cfg;
sched_nr_interface::sched_cfg_t cfg;
cfg.nof_concurrent_subframes = 2;
std::vector<sched_nr_cell_cfg> cells_cfg;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg;
cells_cfg.resize(nof_sectors);
sched_nr sched(cfg);
sched.cell_cfg(cells_cfg);
sched_nr_ue_cfg uecfg;
sched_nr_interface::ue_cfg_t uecfg;
uecfg.carriers.resize(cells_cfg.size());
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
uecfg.carriers[cc].active = true;
@ -143,11 +142,11 @@ void sched_nr_cfg_parallel_sf_test()
for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) {
tti_point tti(nof_ttis % 10240);
sched.new_tti(tti);
sched.slot_indication(tti);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
tasks.start_task();
srsran::get_background_workers().push_task([cc, &sched, tti, &tasks]() {
sched_nr_res_t res;
sched_nr_interface::tti_request_t res;
TESTASSERT(sched.generate_sched_result(tti, cc, res) == SRSRAN_SUCCESS);
tasks.finish_task(res);
});

Loading…
Cancel
Save