Merge branch 'next' into agpl_next

master
Codebot 3 years ago committed by Your Name
commit 63fe471fa6

@ -143,7 +143,7 @@ bool make_mac_dl_harq_cfg_nr_t(const asn1::rrc_nr::pdsch_ser
/***************************
* RLC Config
**************************/
int make_rlc_config_t(const asn1::rrc_nr::rlc_cfg_c& asn1_type, rlc_config_t* rlc_config_out);
int make_rlc_config_t(const asn1::rrc_nr::rlc_cfg_c& asn1_type, uint8_t bearer_id, rlc_config_t* rlc_config_out);
/***************************
* PDCP Config

@ -60,6 +60,7 @@ const char* __tsan_default_suppressions()
"deadlock:srsenb::mac::rlc_buffer_state\n"
"deadlock:srsenb::mac::snr_info\n"
"deadlock:srsenb::mac::ack_info\n"
"deadlock:srsenb::mac::cqi_info\n"
"deadlock:srsenb::rlc::rb_is_um\n"
"deadlock:srsenb::mac::sr_detected\n";
}

@ -289,8 +289,8 @@ public:
};
virtual int slot_indication(const srsran_slot_cfg_t& slot_cfg) = 0;
virtual int get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched) = 0;
virtual int get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched) = 0;
virtual dl_sched_t* get_dl_sched(const srsran_slot_cfg_t& slot_cfg) = 0;
virtual ul_sched_t* get_ul_sched(const srsran_slot_cfg_t& slot_cfg) = 0;
virtual int pucch_info(const srsran_slot_cfg_t& slot_cfg, const pucch_info_t& pucch_info) = 0;
virtual int pusch_info(const srsran_slot_cfg_t& slot_cfg, pusch_info_t& pusch_info) = 0;
virtual void rach_detected(const rach_info_t& rach_info) = 0;

@ -120,6 +120,7 @@ struct rlc_um_nr_config_t {
rlc_um_nr_sn_size_t sn_field_length; // Number of bits used for sequence number
int32_t t_reassembly_ms; // Timer used by rx to detect PDU loss (ms)
uint8_t bearer_id; // This is not in the 3GPP TS 38.322
};
#define RLC_TX_QUEUE_LEN (256)

@ -38,7 +38,7 @@ class mac_sch_subpdu_nr
{
public:
// 3GPP 38.321 v15.3.0 Combined Tables 6.2.1-1, 6.2.1-2
typedef enum {
enum nr_lcid_sch_t {
// Values for DL-SCH
CCCH = 0b000000,
DRX_CMD = 0b111100,
@ -58,7 +58,7 @@ public:
// Common
PADDING = 0b111111,
} nr_lcid_sch_t;
};
// SDUs up to 256 B can use the short 8-bit L field
static const int32_t MAC_SUBHEADER_LEN_THRESHOLD = 256;
@ -66,17 +66,18 @@ public:
mac_sch_subpdu_nr(mac_sch_pdu_nr* parent_) : parent(parent_), logger(&srslog::fetch_basic_logger("MAC-NR")){};
nr_lcid_sch_t get_type();
bool is_sdu();
bool is_sdu() const;
bool is_valid_lcid();
bool is_var_len_ce(uint32_t lcid);
bool is_ul_ccch();
int32_t read_subheader(const uint8_t* ptr);
uint32_t get_total_length();
uint32_t get_sdu_length();
uint32_t get_lcid();
uint32_t get_total_length() const;
uint32_t get_sdu_length() const;
uint32_t get_lcid() const;
uint8_t* get_sdu();
uint16_t get_c_rnti();
const uint8_t* get_sdu() const;
uint16_t get_c_rnti() const;
// both return the reported values as per TS 38.321, mapping to dB according to TS 38.133 Sec 10.1.17 not done here
uint8_t get_phr();
@ -87,13 +88,13 @@ public:
uint8_t lcg_id;
uint8_t buffer_size;
};
lcg_bsr_t get_sbsr();
lcg_bsr_t get_sbsr() const;
static const uint8_t max_num_lcg_lbsr = 8;
struct lbsr_t {
uint8_t bitmap; // the first octet of LBSR and Long Trunc BSR
std::vector<lcg_bsr_t> list; // one entry for each reported LCG
};
lbsr_t get_lbsr();
lbsr_t get_lbsr() const;
// TA
struct ta_t {
@ -180,6 +181,7 @@ private:
}
/// Returns the SDU pointer.
const uint8_t* ptr() const { return sdu; }
uint8_t* ptr() { return sdu; }
};
@ -195,8 +197,9 @@ public:
void pack();
int unpack(const uint8_t* payload, const uint32_t& len);
uint32_t get_num_subpdus();
const mac_sch_subpdu_nr& get_subpdu(const uint32_t& index);
uint32_t get_num_subpdus() const { return subpdus.size(); }
const mac_sch_subpdu_nr& get_subpdu(const uint32_t& index) const;
mac_sch_subpdu_nr& get_subpdu(uint32_t index);
bool is_ulsch();
int init_tx(byte_buffer_t* buffer_, uint32_t pdu_len_, bool is_ulsch_ = false);

@ -50,6 +50,9 @@ public:
~rlc_um_nr();
bool configure(const rlc_config_t& cnfg);
// logging helpers
std::string get_rb_name() const;
private:
// Transmitter sub-class for NR
class rlc_um_nr_tx : public rlc_um_base_tx

@ -110,7 +110,7 @@ rach_nr_cfg_t make_mac_rach_cfg(const rach_cfg_common_s& asn1_type)
return rach_nr_cfg;
};
int make_rlc_config_t(const rlc_cfg_c& asn1_type, rlc_config_t* cfg_out)
int make_rlc_config_t(const rlc_cfg_c& asn1_type, uint8_t bearer_id, rlc_config_t* cfg_out)
{
rlc_config_t rlc_cfg = rlc_config_t::default_rlc_um_nr_config();
rlc_cfg.rat = srsran_rat_t::nr;
@ -121,7 +121,7 @@ int make_rlc_config_t(const rlc_cfg_c& asn1_type, rlc_config_t* cfg_out)
case rlc_cfg_c::types_opts::um_bi_dir:
rlc_cfg.rlc_mode = rlc_mode_t::um;
rlc_cfg.um_nr.t_reassembly_ms = asn1_type.um_bi_dir().dl_um_rlc.t_reassembly.to_number();
rlc_cfg.um_nr.bearer_id = bearer_id;
if (asn1_type.um_bi_dir().dl_um_rlc.sn_field_len_present &&
asn1_type.um_bi_dir().ul_um_rlc.sn_field_len_present &&
asn1_type.um_bi_dir().dl_um_rlc.sn_field_len != asn1_type.um_bi_dir().ul_um_rlc.sn_field_len) {

@ -143,7 +143,7 @@ void enb_bearer_manager::add_eps_bearer(uint16_t rnti, uint8_t eps_bearer_id, sr
lcid,
to_string(rat).c_str());
} else {
logger.error("Bearers: EPS bearer ID %d for rnti=0x%x already registered", eps_bearer_id, rnti);
logger.warning("Bearers: EPS bearer ID %d for rnti=0x%x already registered", eps_bearer_id, rnti);
}
}
@ -151,14 +151,14 @@ void enb_bearer_manager::remove_eps_bearer(uint16_t rnti, uint8_t eps_bearer_id)
{
auto user_it = users_map.find(rnti);
if (user_it == users_map.end()) {
logger.error("Bearers: No EPS bearer registered for rnti=0x%x", rnti);
logger.info("Bearers: No EPS bearer registered for rnti=0x%x", rnti);
return;
}
if (user_it->second.remove_eps_bearer(eps_bearer_id)) {
logger.info("Bearers: Removed mapping for EPS bearer ID %d for rnti=0x%x", eps_bearer_id, rnti);
} else {
logger.error("Bearers: Can't remove EPS bearer ID %d, rnti=0x%x", eps_bearer_id, rnti);
logger.info("Bearers: Can't remove EPS bearer ID %d, rnti=0x%x", eps_bearer_id, rnti);
}
}
@ -166,7 +166,7 @@ void enb_bearer_manager::rem_user(uint16_t rnti)
{
auto user_it = users_map.find(rnti);
if (user_it == users_map.end()) {
logger.error("Bearers: No EPS bearer registered for rnti=0x%x", rnti);
logger.info("Bearers: No EPS bearer registered for rnti=0x%x", rnti);
return;
}

@ -32,7 +32,7 @@ mac_sch_subpdu_nr::nr_lcid_sch_t mac_sch_subpdu_nr::get_type()
return CCCH;
}
bool mac_sch_subpdu_nr::is_sdu()
bool mac_sch_subpdu_nr::is_sdu() const
{
return (lcid <= 32);
}
@ -200,17 +200,17 @@ uint32_t mac_sch_subpdu_nr::write_subpdu(const uint8_t* start_)
return ptr - start_;
}
uint32_t mac_sch_subpdu_nr::get_total_length()
uint32_t mac_sch_subpdu_nr::get_total_length() const
{
return (header_length + sdu_length);
}
uint32_t mac_sch_subpdu_nr::get_sdu_length()
uint32_t mac_sch_subpdu_nr::get_sdu_length() const
{
return sdu_length;
}
uint32_t mac_sch_subpdu_nr::get_lcid()
uint32_t mac_sch_subpdu_nr::get_lcid() const
{
return lcid;
}
@ -220,10 +220,15 @@ uint8_t* mac_sch_subpdu_nr::get_sdu()
return sdu.ptr();
}
uint16_t mac_sch_subpdu_nr::get_c_rnti()
const uint8_t* mac_sch_subpdu_nr::get_sdu() const
{
return sdu.ptr();
}
uint16_t mac_sch_subpdu_nr::get_c_rnti() const
{
if (parent->is_ulsch() && lcid == CRNTI) {
uint8_t* ptr = sdu.ptr();
const uint8_t* ptr = sdu.ptr();
return le16toh((uint16_t)ptr[0] << 8 | ptr[1]);
}
return 0;
@ -258,24 +263,24 @@ mac_sch_subpdu_nr::ta_t mac_sch_subpdu_nr::get_ta()
return ta;
}
mac_sch_subpdu_nr::lcg_bsr_t mac_sch_subpdu_nr::get_sbsr()
mac_sch_subpdu_nr::lcg_bsr_t mac_sch_subpdu_nr::get_sbsr() const
{
lcg_bsr_t sbsr = {};
if (parent->is_ulsch() && (lcid == SHORT_BSR || lcid == SHORT_TRUNC_BSR)) {
uint8_t* ptr = sdu.ptr();
const uint8_t* ptr = sdu.ptr();
sbsr.lcg_id = (ptr[0] & 0xe0) >> 5;
sbsr.buffer_size = ptr[0] & 0x1f;
}
return sbsr;
}
mac_sch_subpdu_nr::lbsr_t mac_sch_subpdu_nr::get_lbsr()
mac_sch_subpdu_nr::lbsr_t mac_sch_subpdu_nr::get_lbsr() const
{
lbsr_t lbsr = {};
lbsr.list.reserve(mac_sch_subpdu_nr::max_num_lcg_lbsr);
if (parent->is_ulsch() && (lcid == LONG_BSR || lcid == LONG_TRUNC_BSR)) {
uint8_t* ptr = sdu.ptr();
const uint8_t* ptr = sdu.ptr();
lbsr.bitmap = *ptr; // read LCG bitmap
ptr++; // skip LCG bitmap
@ -448,12 +453,12 @@ int mac_sch_pdu_nr::unpack(const uint8_t* payload, const uint32_t& len)
return SRSRAN_SUCCESS;
}
uint32_t mac_sch_pdu_nr::get_num_subpdus()
const mac_sch_subpdu_nr& mac_sch_pdu_nr::get_subpdu(const uint32_t& index) const
{
return subpdus.size();
return subpdus.at(index);
}
const mac_sch_subpdu_nr& mac_sch_pdu_nr::get_subpdu(const uint32_t& index)
mac_sch_subpdu_nr& mac_sch_pdu_nr::get_subpdu(uint32_t index)
{
return subpdus.at(index);
}
@ -560,11 +565,11 @@ uint32_t mac_sch_pdu_nr::add_sudpdu(mac_sch_subpdu_nr& subpdu)
return SRSRAN_SUCCESS;
}
void mac_sch_pdu_nr::to_string(fmt::memory_buffer& buffer)
void mac_sch_pdu_nr::to_string(fmt::memory_buffer& fmtbuffer)
{
fmt::format_to(buffer, "{}", is_ulsch() ? "UL" : "DL");
fmt::format_to(fmtbuffer, "{}", is_ulsch() ? "UL" : "DL");
for (auto& subpdu : subpdus) {
subpdu.to_string(buffer);
subpdu.to_string(fmtbuffer);
}
}

@ -643,8 +643,11 @@ void radio::set_rx_freq(const uint32_t& carrier_idx, const double& freq)
for (uint32_t i = 0; i < nof_antennas; i++) {
channel_mapping::device_mapping_t dm = rx_channel_mapping.get_device_mapping(carrier_idx, i);
if (dm.device_idx >= rf_devices.size() or dm.carrier_idx >= nof_channels_x_dev) {
logger.error(
"Invalid port mapping %d:%d to logical carrier %d on f_rx=%.1f MHz", carrier_idx, i, freq / 1e6);
logger.error("Invalid port mapping %d:%d to logical carrier %d on f_rx=%.1f MHz",
dm.device_idx,
dm.channel_idx,
carrier_idx,
freq / 1e6);
return;
}
@ -779,8 +782,11 @@ void radio::set_tx_freq(const uint32_t& carrier_idx, const double& freq)
for (uint32_t i = 0; i < nof_antennas; i++) {
device_mapping = tx_channel_mapping.get_device_mapping(carrier_idx, i);
if (device_mapping.device_idx >= rf_devices.size() or device_mapping.carrier_idx >= nof_channels_x_dev) {
logger.error(
"Invalid port mapping %d:%d to logical carrier %d on f_rx=%.1f MHz", carrier_idx, i, freq / 1e6);
logger.error("Invalid port mapping %d:%d to logical carrier %d on f_rx=%.1f MHz",
device_mapping.device_idx,
device_mapping.channel_idx,
carrier_idx,
freq / 1e6);
return;
}

@ -42,12 +42,12 @@ rlc_um_nr::~rlc_um_nr()
bool rlc_um_nr::configure(const rlc_config_t& cnfg_)
{
// determine bearer name and configure Rx/Tx objects
rb_name = get_rb_name(rrc, lcid, cnfg_.um.is_mrb);
// store config
cfg = cnfg_;
// determine bearer name and configure Rx/Tx objects
rb_name = get_rb_name();
rx.reset(new rlc_um_nr_rx(this));
if (not rx->configure(cfg, rb_name)) {
return false;
@ -70,6 +70,16 @@ bool rlc_um_nr::configure(const rlc_config_t& cnfg_)
return true;
}
/****************************************************************************
* Logging helpers
***************************************************************************/
std::string rlc_um_nr::get_rb_name() const
{
fmt::memory_buffer fmtbuf;
fmt::format_to(fmtbuf, "DRB{}", cfg.um_nr.bearer_id);
return fmt::to_string(fmtbuf);
}
/****************************************************************************
* Tx Subclass implementation
***************************************************************************/
@ -262,6 +272,8 @@ bool rlc_um_nr::rlc_um_nr_rx::configure(const rlc_config_t& cnfg_, std::string r
[this](uint32_t tid) { timer_expired(tid); });
}
rb_name = rb_name_;
return true;
}

@ -44,7 +44,8 @@ int test_rlc_config()
srslog::fetch_basic_logger("RRC").info("RLC NR Config: \n %s", jw.to_string().c_str());
rlc_config_t rlc_cfg;
TESTASSERT(make_rlc_config_t(rlc_cfg_asn1, &rlc_cfg) == SRSRAN_SUCCESS);
// We hard-code the bearer_id=1 and rb_type=DRB
TESTASSERT(make_rlc_config_t(rlc_cfg_asn1, /* bearer_id */ 1, &rlc_cfg) == SRSRAN_SUCCESS);
TESTASSERT(rlc_cfg.rat == srsran_rat_t::nr);
TESTASSERT(rlc_cfg.um_nr.sn_field_length == rlc_um_nr_sn_size_t::size12bits);
return SRSRAN_SUCCESS;

@ -107,7 +107,8 @@ rx_gain = 40
# add an entry with DLT=150, Payload Protocol=s1ap.
#
# mac_enable: Enable MAC layer packet captures (true/false)
# mac_filename: File path to use for packet captures
# filename: File path to use for LTE MAC packet captures
# nr_filename: File path to use for NR MAC packet captures
# s1ap_enable: Enable or disable the PCAP.
# s1ap_filename: File name where to save the PCAP.
#
@ -118,16 +119,17 @@ rx_gain = 40
# client_port Client IP address for MAC network trace (default: 5847)
#####################################################################
[pcap]
enable = false
filename = /tmp/enb.pcap
s1ap_enable = false
s1ap_filename = /tmp/enb_s1ap.pcap
#enable = false
#filename = /tmp/enb_mac.pcap
#nr_filename = /tmp/enb_mac_nr.pcap
#s1ap_enable = false
#s1ap_filename = /tmp/enb_s1ap.pcap
mac_net_enable = false
bind_ip = 0.0.0.0
bind_port = 5687
client_ip = 127.0.0.1
client_port = 5847
#mac_net_enable = false
#bind_ip = 0.0.0.0
#bind_port = 5687
#client_ip = 127.0.0.1
#client_port = 5847
#####################################################################
# Log configuration

@ -81,8 +81,8 @@ public:
void toggle_padding() override {}
int slot_indication(const srsran_slot_cfg_t& slot_cfg) override;
int get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched) override;
int get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched) override;
dl_sched_t* get_dl_sched(const srsran_slot_cfg_t& slot_cfg) override;
ul_sched_t* get_ul_sched(const srsran_slot_cfg_t& slot_cfg) override;
int pucch_info(const srsran_slot_cfg_t& slot_cfg, const pucch_info_t& pucch_info) override;
int pusch_info(const srsran_slot_cfg_t& slot_cfg, pusch_info_t& pusch_info) override;
void rach_detected(const rach_info_t& rach_info) override;

@ -53,8 +53,8 @@ public:
void config_lcid(uint32_t lcid, const mac_lc_ch_cfg_t& bearer_cfg);
// Buffer Status update
void ul_bsr(uint32_t lcg_id, uint32_t val);
void dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue);
int ul_bsr(uint32_t lcg_id, uint32_t val);
int dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue);
// Configuration getters
uint16_t get_rnti() const { return rnti; }

@ -27,7 +27,6 @@
#include "srsenb/hdr/common/rnti_pool.h"
#include "srsenb/hdr/stack/enb_stack_base.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsenb/hdr/stack/mac/nr/ue_nr.h"
#include "srsran/common/task_scheduler.h"
#include "srsran/interfaces/enb_metrics_interface.h"
@ -44,6 +43,8 @@ struct mac_nr_args_t {
srsenb::pcap_args_t pcap;
};
class sched_nr;
class mac_nr final : public mac_interface_phy_nr, public mac_interface_rrc_nr, public mac_interface_rlc_nr
{
public:
@ -72,8 +73,8 @@ public:
// Interface for PHY
int slot_indication(const srsran_slot_cfg_t& slot_cfg) override;
int get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched) override;
int get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched) override;
dl_sched_t* get_dl_sched(const srsran_slot_cfg_t& slot_cfg) override;
ul_sched_t* get_ul_sched(const srsran_slot_cfg_t& slot_cfg) override;
int pucch_info(const srsran_slot_cfg_t& slot_cfg, const pucch_info_t& pucch_info) override;
int pusch_info(const srsran_slot_cfg_t& slot_cfg, pusch_info_t& pusch_info) override;
void rach_detected(const rach_info_t& rach_info) override;
@ -119,7 +120,7 @@ private:
std::atomic<bool> started = {false};
const static uint32_t NUMEROLOGY_IDX = 0; /// only 15kHz supported at this stage
srsenb::sched_nr sched;
std::unique_ptr<srsenb::sched_nr> sched;
std::vector<sched_nr_interface::cell_cfg_t> cell_config;
// Map of active UEs

@ -35,23 +35,23 @@ extern "C" {
namespace srsenb {
namespace sched_nr_impl {
class sched_worker_manager;
class serv_cell_manager;
} // namespace sched_nr_impl
class ul_sched_result_buffer;
class cc_worker;
} // namespace sched_nr_impl
class sched_nr final : public sched_nr_interface
{
public:
explicit sched_nr();
~sched_nr() override;
void stop();
int config(const sched_args_t& sched_cfg, srsran::const_span<cell_cfg_t> cell_list) override;
void ue_cfg(uint16_t rnti, const ue_cfg_t& cfg) override;
void ue_rem(uint16_t rnti) override;
bool ue_exists(uint16_t rnti) override;
int dl_rach_info(uint32_t cc, const rar_info_t& rar_info);
int dl_rach_info(const rar_info_t& rar_info, const ue_cfg_t& uecfg);
void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override;
void ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) override;
@ -59,30 +59,38 @@ public:
void ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr) override;
void dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx);
int run_slot(slot_point pdsch_tti, uint32_t cc, dl_res_t& result) override;
int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_res_t& result) override;
/// Called once per slot in a non-concurrent fashion
void slot_indication(slot_point slot_tx) override;
dl_res_t* get_dl_sched(slot_point pdsch_tti, uint32_t cc) override;
ul_res_t* get_ul_sched(slot_point pusch_tti, uint32_t cc) override;
void get_metrics(mac_metrics_t& metrics);
private:
void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
int ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg);
int add_ue_impl(uint16_t rnti, std::unique_ptr<sched_nr_impl::ue> u);
// args
sched_nr_impl::sched_params cfg;
sched_nr_impl::sched_params_t cfg;
srslog::basic_logger* logger = nullptr;
using sched_worker_manager = sched_nr_impl::sched_worker_manager;
std::unique_ptr<sched_worker_manager> sched_workers;
// slot-specific
slot_point current_slot_tx;
std::atomic<int> worker_count{0};
using slot_cc_worker = sched_nr_impl::cc_worker;
std::vector<std::unique_ptr<sched_nr_impl::cc_worker> > cc_workers;
using ue_map_t = sched_nr_impl::ue_map_t;
std::mutex ue_db_mutex;
ue_map_t ue_db;
// management of Sched Result buffering
std::unique_ptr<ul_sched_result_buffer> pending_results;
// Feedback management
class event_manager;
std::unique_ptr<event_manager> pending_events;
// management of cell resources
std::vector<std::unique_ptr<sched_nr_impl::serv_cell_manager> > cells;
// metrics extraction
class ue_metrics_manager;
std::unique_ptr<ue_metrics_manager> metrics_handler;
};
} // namespace srsenb

@ -19,8 +19,8 @@
*
*/
#ifndef SRSRAN_SCHED_NR_CELL_H
#define SRSRAN_SCHED_NR_CELL_H
#ifndef SRSRAN_SCHED_NR_BWP_H
#define SRSRAN_SCHED_NR_BWP_H
#include "sched_nr_cfg.h"
#include "sched_nr_grant_allocator.h"
@ -88,10 +88,10 @@ private:
srsran::deque<pending_rar_t> pending_rars;
};
class bwp_ctxt
class bwp_manager
{
public:
explicit bwp_ctxt(const bwp_params_t& bwp_cfg);
explicit bwp_manager(const bwp_params_t& bwp_cfg);
const bwp_params_t* cfg;
@ -103,21 +103,7 @@ public:
bwp_res_grid grid;
};
class serv_cell_manager
{
public:
using feedback_callback_t = srsran::move_callback<void(ue_carrier&)>;
explicit serv_cell_manager(const cell_params_t& cell_cfg_);
srsran::bounded_vector<bwp_ctxt, SCHED_NR_MAX_BWP_PER_CELL> bwps;
const cell_params_t& cfg;
private:
srslog::basic_logger& logger;
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_CELL_H
#endif // SRSRAN_SCHED_NR_BWP_H

@ -116,26 +116,30 @@ struct cell_params_t {
};
/// Structure packing both the sched args and all gNB NR cell configurations
struct sched_params {
struct sched_params_t {
sched_args_t sched_cfg;
std::vector<cell_params_t> cells;
sched_params() = default;
explicit sched_params(const sched_args_t& sched_cfg_);
sched_params_t() = default;
explicit sched_params_t(const sched_args_t& sched_cfg_);
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration of a UE for a given BWP
class bwp_ue_cfg
/// Semi-static configuration of a UE for a given CC.
class ue_carrier_params_t
{
public:
bwp_ue_cfg() = default;
explicit bwp_ue_cfg(uint16_t rnti, const bwp_params_t& bwp_cfg, const ue_cfg_t& uecfg_);
ue_carrier_params_t() = default;
explicit ue_carrier_params_t(uint16_t rnti, const bwp_params_t& active_bwp_cfg, const ue_cfg_t& uecfg_);
const ue_cfg_t* ue_cfg() const { return cfg_; }
uint16_t rnti = SRSRAN_INVALID_RNTI;
uint32_t cc = SRSRAN_MAX_CARRIERS;
const ue_cfg_t& ue_cfg() const { return *cfg_; }
const srsran::phy_cfg_nr_t& phy() const { return cfg_->phy_cfg; }
const bwp_params_t& active_bwp() const { return *bwp_cfg; }
srsran::const_span<uint32_t> cce_pos_list(uint32_t search_id, uint32_t slot_idx, uint32_t aggr_idx) const
{
if (cce_positions_list.size() > ss_id_to_cce_idx[search_id]) {
@ -159,7 +163,6 @@ public:
int fixed_pusch_mcs() const { return bwp_cfg->sched_cfg.fixed_ul_mcs; }
private:
uint16_t rnti = SRSRAN_INVALID_RNTI;
const ue_cfg_t* cfg_ = nullptr;
const bwp_params_t* bwp_cfg = nullptr;
@ -167,37 +170,6 @@ private:
std::array<uint32_t, SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE> ss_id_to_cce_idx;
};
class ue_cfg_extended : public ue_cfg_t
{
public:
struct search_space_params {
const srsran_search_space_t* cfg;
bwp_cce_pos_list cce_positions;
};
struct coreset_params {
srsran_coreset_t* cfg = nullptr;
std::vector<uint32_t> ss_list;
};
struct bwp_params {
std::array<srsran::optional<search_space_params>, SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE> ss_list;
std::vector<coreset_params> coresets;
};
struct cc_params {
srsran::bounded_vector<bwp_params, SCHED_NR_MAX_BWP_PER_CELL> bwps;
};
ue_cfg_extended() = default;
explicit ue_cfg_extended(uint16_t rnti, const ue_cfg_t& uecfg);
const bwp_cce_pos_list& get_dci_pos_list(uint32_t cc, uint32_t bwp_id, uint32_t search_space_id) const
{
return cc_params[cc].bwps[bwp_id].ss_list[search_space_id]->cce_positions;
}
uint16_t rnti;
std::vector<cc_params> cc_params;
};
} // namespace sched_nr_impl
} // namespace srsenb

@ -58,15 +58,9 @@ struct bwp_slot_grid {
bwp_rb_bitmap dl_prbs;
bwp_rb_bitmap ul_prbs;
ssb_list ssb;
nzp_csi_rs_list nzp_csi_rs;
pdcch_dl_list_t dl_pdcchs;
pdcch_ul_list_t ul_pdcchs;
pdsch_list_t pdschs;
pucch_list_t pucch;
sched_rar_list_t rar;
dl_sched_res_t dl;
ul_sched_t ul;
slot_coreset_list coresets;
pusch_list_t puschs;
harq_ack_list_t pending_acks;
srsran::unique_pool_ptr<tx_harq_softbuffer> rar_softbuffer;
@ -103,13 +97,7 @@ private:
class bwp_slot_allocator
{
public:
explicit bwp_slot_allocator(bwp_res_grid& bwp_grid_);
void new_slot(slot_point pdcch_slot_, slot_ue_map_t& ues_)
{
pdcch_slot = pdcch_slot_;
slot_ues = &ues_;
}
explicit bwp_slot_allocator(bwp_res_grid& bwp_grid_, slot_point pdcch_slot_, slot_ue_map_t& ues_);
alloc_result alloc_si(uint32_t aggr_idx, uint32_t si_idx, uint32_t si_ntx, const prb_interval& prbs);
alloc_result alloc_rar_and_msg3(uint16_t ra_rnti,
@ -126,14 +114,16 @@ public:
const bwp_params_t& cfg;
private:
alloc_result verify_pdsch_space(bwp_slot_grid& pdsch_grid, bwp_slot_grid& pdcch_grid) const;
alloc_result
verify_pdsch_space(bwp_slot_grid& pdsch_grid, bwp_slot_grid& pdcch_grid, bwp_slot_grid* uci_grid = nullptr) const;
alloc_result verify_pusch_space(bwp_slot_grid& pusch_grid, bwp_slot_grid* pdcch_grid = nullptr) const;
alloc_result verify_ue_cfg(const ue_carrier_params_t& ue_cfg, harq_proc* harq) const;
srslog::basic_logger& logger;
bwp_res_grid& bwp_grid;
slot_point pdcch_slot;
slot_ue_map_t* slot_ues = nullptr;
slot_ue_map_t& slot_ues;
};
} // namespace sched_nr_impl

@ -62,16 +62,6 @@ public:
///// Configuration /////
struct pdsch_td_res_alloc {
uint8_t k0 = 0; // 0..32
uint8_t k1 = 4; // 0..32
};
using pdsch_td_res_alloc_list = srsran::bounded_vector<pdsch_td_res_alloc, MAX_GRANTS>;
struct pusch_td_res_alloc {
uint8_t k2 = 4; // 0..32
};
using pusch_td_res_alloc_list = srsran::bounded_vector<pusch_td_res_alloc, MAX_GRANTS>;
struct bwp_cfg_t {
uint32_t start_rb = 0;
uint32_t rb_width = 100;
@ -127,18 +117,18 @@ public:
using sched_rar_list_t = srsran::bounded_vector<rar_t, MAX_GRANTS>;
struct dl_res_t {
sched_rar_list_t& rar;
dl_sched_t& dl_sched;
dl_res_t(sched_rar_list_t& rar_, dl_sched_t& dl_sched_) : rar(rar_), dl_sched(dl_sched_) {}
dl_sched_t phy;
sched_rar_list_t rar;
};
virtual ~sched_nr_interface() = default;
virtual int config(const sched_args_t& sched_cfg, srsran::const_span<sched_nr_interface::cell_cfg_t> ue_cfg) = 0;
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
virtual void ue_rem(uint16_t rnti) = 0;
virtual bool ue_exists(uint16_t rnti) = 0;
virtual int run_slot(slot_point slot_rx, uint32_t cc, dl_res_t& result) = 0;
virtual int get_ul_sched(slot_point slot_rx, uint32_t cc, ul_res_t& result) = 0;
virtual void slot_indication(slot_point slot_tx) = 0;
virtual dl_res_t* get_dl_sched(slot_point slot_rx, uint32_t cc) = 0;
virtual ul_res_t* get_ul_sched(slot_point slot_rx, uint32_t cc) = 0;
virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;
virtual void ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) = 0;

@ -38,8 +38,6 @@ enum class pdcch_grant_type_t { sib, rar, dl_data, ul_data };
class slot_ue;
using bwp_cfg_t = sched_nr_interface::bwp_cfg_t;
class coreset_region
{
public:
@ -57,7 +55,10 @@ public:
* @param user UE object or null in case of broadcast/RAR/paging allocation
* @return if the allocation was successful
*/
bool alloc_dci(pdcch_grant_type_t alloc_type, uint32_t aggr_idx, uint32_t search_space_id, slot_ue* user = nullptr);
bool alloc_dci(pdcch_grant_type_t alloc_type,
uint32_t aggr_idx,
uint32_t search_space_id,
const ue_carrier_params_t* user = nullptr);
void rem_last_dci();
@ -79,7 +80,7 @@ private:
uint32_t ss_id;
uint32_t idx;
pdcch_grant_type_t alloc_type;
slot_ue* ue;
const ue_carrier_params_t* ue;
};
srsran::bounded_vector<alloc_record, 2 * MAX_GRANTS> dci_list;
pdcch_dl_list_t& pdcch_dl_list;

@ -35,52 +35,22 @@ namespace srsenb {
namespace sched_nr_impl {
class ue_carrier;
class slot_ue
{
public:
slot_ue() = default;
explicit slot_ue(uint16_t rnti_, slot_point slot_rx_, uint32_t cc);
slot_ue(slot_ue&&) noexcept = default;
slot_ue& operator=(slot_ue&&) noexcept = default;
bool empty() const { return rnti == SCHED_NR_INVALID_RNTI; }
void release() { rnti = SCHED_NR_INVALID_RNTI; }
uint16_t rnti = SCHED_NR_INVALID_RNTI;
slot_point slot_rx;
uint32_t cc = SCHED_NR_MAX_CARRIERS;
// UE parameters common to all sectors
int dl_pending_bytes = 0, ul_pending_bytes = 0;
// UE parameters that are sector specific
const bwp_ue_cfg* cfg = nullptr;
harq_entity* harq_ent = nullptr;
slot_point pdcch_slot;
slot_point pdsch_slot;
slot_point pusch_slot;
slot_point uci_slot;
uint32_t dl_cqi = 0;
uint32_t ul_cqi = 0;
dl_harq_proc* h_dl = nullptr;
ul_harq_proc* h_ul = nullptr;
srsran_uci_cfg_nr_t uci_cfg = {};
};
class slot_ue;
class ue_carrier
{
public:
ue_carrier(uint16_t rnti, const ue_cfg_t& cfg, const cell_params_t& cell_params_);
void set_cfg(const ue_cfg_t& ue_cfg);
/// Called after CC Feedback has been processed
void new_slot(slot_point slot_tx);
void set_cfg(const ue_cfg_t& ue_cfg);
const ue_carrier_params_t& cfg() const { return bwp_cfg; }
slot_ue try_reserve(slot_point pdcch_slot, uint32_t dl_harq_bytes, uint32_t ul_harq_bytes);
int dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack);
int ul_crc_info(uint32_t pid, bool crc);
const uint16_t rnti;
const uint32_t cc;
const cell_params_t& cell_params;
// Channel state
uint32_t dl_cqi = 1;
@ -90,25 +60,27 @@ public:
// metrics
mac_ue_metrics_t metrics = {};
std::mutex metrics_mutex;
private:
bwp_ue_cfg bwp_cfg;
const cell_params_t& cell_params;
friend class slot_ue;
srslog::basic_logger& logger;
ue_carrier_params_t bwp_cfg;
};
class ue
{
public:
ue(uint16_t rnti, const ue_cfg_t& cfg, const sched_params& sched_cfg_);
ue(uint16_t rnti, const ue_cfg_t& cfg, const sched_params_t& sched_cfg_);
void new_slot(slot_point pdcch_slot);
slot_ue try_reserve(slot_point pdcch_slot, uint32_t cc);
slot_ue make_slot_ue(slot_point pdcch_slot, uint32_t cc);
void set_cfg(const ue_cfg_t& cfg);
const ue_cfg_t& cfg() const { return ue_cfg; }
/// UE state feedback
void rlc_buffer_state(uint32_t lcid, uint32_t newtx, uint32_t retx) { buffers.dl_buffer_state(lcid, newtx, retx); }
void ul_bsr(uint32_t lcg, uint32_t bsr_val) { buffers.ul_bsr(lcg, bsr_val); }
void ul_sr_info() { last_sr_slot = last_pdcch_slot - TX_ENB_DELAY; }
@ -124,9 +96,10 @@ public:
ue_buffer_manager<true> buffers;
std::array<std::unique_ptr<ue_carrier>, SCHED_NR_MAX_CARRIERS> carriers;
private:
const uint16_t rnti;
const sched_params& sched_cfg;
private:
const sched_params_t& sched_cfg;
slot_point last_pdcch_slot;
slot_point last_sr_slot;
@ -135,6 +108,41 @@ private:
ue_cfg_t ue_cfg;
};
class slot_ue
{
public:
slot_ue() = default;
explicit slot_ue(ue_carrier& ue, slot_point slot_tx_, uint32_t dl_pending_bytes, uint32_t ul_pending_bytes);
slot_ue(slot_ue&&) noexcept = default;
slot_ue& operator=(slot_ue&&) noexcept = default;
bool empty() const { return ue == nullptr; }
void release() { ue = nullptr; }
const ue_carrier_params_t& cfg() const { return ue->bwp_cfg; }
const ue_carrier_params_t& operator*() const { return ue->bwp_cfg; }
const ue_carrier_params_t* operator->() const { return &ue->bwp_cfg; }
// mutable interface to ue_carrier state
dl_harq_proc* find_empty_dl_harq() { return ue->harq_ent.find_empty_dl_harq(); }
ul_harq_proc* find_empty_ul_harq() { return ue->harq_ent.find_empty_ul_harq(); }
// UE parameters common to all sectors
uint32_t dl_bytes = 0, ul_bytes = 0;
// UE parameters that are sector specific
bool dl_active;
bool ul_active;
slot_point pdcch_slot;
slot_point pdsch_slot;
slot_point pusch_slot;
slot_point uci_slot;
dl_harq_proc* h_dl = nullptr;
ul_harq_proc* h_ul = nullptr;
private:
ue_carrier* ue = nullptr;
};
using ue_map_t = rnti_map_t<std::unique_ptr<ue> >;
using slot_ue_map_t = rnti_map_t<slot_ue>;

@ -22,7 +22,7 @@
#ifndef SRSRAN_SCHED_NR_WORKER_H
#define SRSRAN_SCHED_NR_WORKER_H
#include "sched_nr_cell.h"
#include "sched_nr_bwp.h"
#include "sched_nr_cfg.h"
#include "sched_nr_grant_allocator.h"
#include "sched_nr_ue.h"
@ -39,109 +39,30 @@ struct mac_metrics_t;
namespace sched_nr_impl {
class slot_cc_worker
class cc_worker
{
public:
using feedback_callback_t = srsran::move_callback<void(ue_carrier&)>;
explicit cc_worker(const cell_params_t& params);
explicit slot_cc_worker(serv_cell_manager& sched);
void dl_rach_info(const sched_nr_interface::rar_info_t& rar_info);
void run(slot_point pdcch_slot, ue_map_t& ue_db_);
bool running() const { return slot_rx.valid(); }
void enqueue_cc_event(srsran::move_callback<void()> ev);
/// Enqueue feedback directed at a given UE in a given cell
void enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk);
private:
/// Run all pending feedback. This should be called at the beginning of a TTI
void run_feedback(ue_map_t& ue_db);
void alloc_dl_ues();
void alloc_ul_ues();
void postprocess_decisions();
dl_sched_res_t* run_slot(slot_point pdcch_slot, ue_map_t& ue_db_);
ul_sched_t* get_ul_sched(slot_point sl);
// const params
const cell_params_t& cfg;
serv_cell_manager& cell;
srslog::basic_logger& logger;
slot_point slot_rx;
bwp_slot_allocator bwp_alloc;
// Process of UE cell-specific feedback
struct feedback_t {
uint16_t rnti;
feedback_callback_t fdbk;
};
std::mutex feedback_mutex;
srsran::deque<feedback_t> pending_feedback, tmp_feedback_to_run;
srsran::deque<srsran::move_callback<void()> > pending_events, tmp_events_to_run;
slot_ue_map_t slot_ues;
};
class sched_worker_manager
{
struct slot_worker_ctxt {
std::mutex slot_mutex; // lock of all workers of the same slot.
std::condition_variable cvar;
slot_point slot_rx;
int nof_workers_waiting = 0;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<slot_cc_worker> workers;
};
public:
explicit sched_worker_manager(ue_map_t& ue_db_,
const sched_params& cfg_,
srsran::span<std::unique_ptr<serv_cell_manager> > cells_);
sched_worker_manager(const sched_worker_manager&) = delete;
sched_worker_manager(sched_worker_manager&&) = delete;
~sched_worker_manager();
void run_slot(slot_point slot_tx, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res);
void get_metrics(mac_metrics_t& metrics);
void enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev);
void enqueue_cc_event(uint32_t cc, srsran::move_callback<void()> ev);
void enqueue_cc_feedback(uint16_t rnti, uint32_t cc, slot_cc_worker::feedback_callback_t fdbk)
{
cc_worker_list[cc]->worker.enqueue_cc_feedback(rnti, std::move(fdbk));
}
// cc-specific resources
srsran::bounded_vector<bwp_manager, SCHED_NR_MAX_BWP_PER_CELL> bwps;
private:
void update_ue_db(slot_point slot_tx, bool locked_context);
void get_metrics_nolocking(mac_metrics_t& metrics);
bool save_sched_result(slot_point pdcch_slot, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res);
void alloc_dl_ues(bwp_slot_allocator& bwp_alloc);
void alloc_ul_ues(bwp_slot_allocator& bwp_alloc);
void postprocess_decisions(bwp_slot_allocator& bwp_alloc);
const sched_params& cfg;
ue_map_t& ue_db;
srsran::span<std::unique_ptr<serv_cell_manager> > cells;
srslog::basic_logger& logger;
struct ue_event_t {
uint16_t rnti;
srsran::move_callback<void()> callback;
};
std::mutex event_mutex;
srsran::deque<ue_event_t> next_slot_events, slot_events;
std::vector<std::unique_ptr<slot_worker_ctxt> > slot_worker_ctxts;
struct cc_context {
std::condition_variable cvar;
int waiting = 0;
slot_cc_worker worker;
cc_context(serv_cell_manager& sched) : worker(sched) {}
};
std::mutex slot_mutex;
std::condition_variable cvar;
slot_point current_slot;
std::atomic<int> worker_count{0}; // variable shared across slot_cc_workers
std::vector<std::unique_ptr<cc_context> > cc_worker_list;
// {slot,cc} specific variables
slot_ue_map_t slot_ues;
};
} // namespace sched_nr_impl

@ -80,7 +80,7 @@ public:
private:
// helper methods
uint32_t buff_size_field_to_bytes(uint32_t buff_size_index, const srsran::bsr_format_nr_t& format);
int process_ce_subpdu(srsran::mac_sch_subpdu_nr& subpdu);
int process_ce_subpdu(const srsran::mac_sch_subpdu_nr& subpdu);
rlc_interface_mac* rlc = nullptr;
rrc_interface_mac_nr* rrc = nullptr;

@ -41,7 +41,6 @@ public:
// Inherited methods from ue_buffer_manager base class
using base_type::config_lcid;
using base_type::dl_buffer_state;
using base_type::get_bsr;
using base_type::get_bsr_state;
using base_type::get_dl_prio_tx;
@ -51,8 +50,9 @@ public:
using base_type::is_bearer_dl;
using base_type::is_bearer_ul;
using base_type::is_lcg_active;
using base_type::ul_bsr;
void dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue);
void ul_bsr(uint32_t lcg_id, uint32_t val);
void ul_buffer_add(uint8_t lcid, uint32_t bytes);
int alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* lcid, int rem_bytes);

@ -102,7 +102,7 @@ public:
/// Called after RRCReestablishmentComplete, to add E-RABs of old rnti
void reestablish_bearers(bearer_cfg_handler&& old_rnti_bearers);
int add_erab(uint8_t erab_id,
int addmod_erab(uint8_t erab_id,
const asn1::s1ap::erab_level_qos_params_s& qos,
const asn1::bounded_bitstring<1, 160, true, true>& addr,
uint32_t teid_out,

@ -110,6 +110,21 @@ public:
int set_aggregate_max_bitrate(uint16_t rnti, const asn1::ngap_nr::ue_aggregate_maximum_bit_rate_s& rates);
int allocate_lcid(uint16_t rnti);
// logging
typedef enum { Rx = 0, Tx } direction_t;
template <class T>
void log_rrc_message(const std::string& source,
const direction_t dir,
const asn1::dyn_octstring& oct,
const T& msg,
const std::string& msg_type);
template <class T>
void log_rrc_message(const std::string& source,
const direction_t dir,
const srsran::byte_buffer_t& pdu,
const T& msg,
const std::string& msg_type);
class ue
{
public:
@ -195,6 +210,10 @@ public:
int add_drb();
// logging helpers
template <class T, class M>
void log_rrc_message(const direction_t dir, const M& pdu, const T& msg, const std::string& msg_type);
// state
rrc_nr_state_t state = rrc_nr_state_t::RRC_IDLE;
uint8_t transaction_id = 0;
@ -247,10 +266,9 @@ private:
/// This gets called by rrc_nr::sgnb_addition_request and WILL NOT TRIGGER the RX MSG3 activity timer
int add_user(uint16_t rnti, const sched_nr_ue_cfg_t& uecfg, bool start_msg3_timer);
// logging
typedef enum { Rx = 0, Tx } direction_t;
// Helper to create PDU from RRC message
template <class T>
void log_rrc_message(const std::string& source, direction_t dir, const srsran::byte_buffer_t* pdu, const T& msg);
srsran::unique_byte_buffer_t pack_into_pdu(const T& msg);
};
} // namespace srsenb

@ -262,6 +262,9 @@ private:
bool send_erab_release_indication(const std::vector<uint16_t>& erabs_successfully_released);
bool send_ue_cap_info_indication(srsran::unique_byte_buffer_t ue_radio_cap);
/// TS 36.413 8.4.5 - Handover Cancellation
void send_ho_cancel(const asn1::s1ap::cause_c& cause);
bool was_uectxtrelease_requested() const { return release_requested; }
void

@ -142,10 +142,10 @@ void parse_args(all_args_t* args, int argc, char* argv[])
/* PCAP */
("pcap.enable", bpo::value<bool>(&args->stack.mac_pcap.enable)->default_value(false), "Enable MAC packet captures for wireshark")
("pcap.filename", bpo::value<string>(&args->stack.mac_pcap.filename)->default_value("enb_mac.pcap"), "MAC layer capture filename")
("pcap.nr_filename", bpo::value<string>(&args->nr_stack.mac.pcap.filename)->default_value("enb_mac_nr.pcap"), "NR MAC layer capture filename")
("pcap.filename", bpo::value<string>(&args->stack.mac_pcap.filename)->default_value("/tmp/enb_mac.pcap"), "MAC layer capture filename")
("pcap.nr_filename", bpo::value<string>(&args->nr_stack.mac.pcap.filename)->default_value("/tmp/enb_mac_nr.pcap"), "NR MAC layer capture filename")
("pcap.s1ap_enable", bpo::value<bool>(&args->stack.s1ap_pcap.enable)->default_value(false), "Enable S1AP packet captures for wireshark")
("pcap.s1ap_filename", bpo::value<string>(&args->stack.s1ap_pcap.filename)->default_value("enb_s1ap.pcap"), "S1AP layer capture filename")
("pcap.s1ap_filename", bpo::value<string>(&args->stack.s1ap_pcap.filename)->default_value("/tmp/enb_s1ap.pcap"), "S1AP layer capture filename")
("pcap.mac_net_enable", bpo::value<bool>(&args->stack.mac_pcap_net.enable)->default_value(false), "Enable MAC network captures")
("pcap.bind_ip", bpo::value<string>(&args->stack.mac_pcap_net.bind_ip)->default_value("0.0.0.0"), "Bind IP address for MAC network trace")
("pcap.bind_port", bpo::value<uint16_t>(&args->stack.mac_pcap_net.bind_port)->default_value(5687), "Bind port for MAC network trace")

@ -151,13 +151,13 @@ void slot_worker::set_context(const srsran::phy_common_interface::worker_context
bool slot_worker::work_ul()
{
stack_interface_phy_nr::ul_sched_t ul_sched = {};
if (stack.get_ul_sched(ul_slot_cfg, ul_sched) < SRSRAN_SUCCESS) {
stack_interface_phy_nr::ul_sched_t* ul_sched = stack.get_ul_sched(ul_slot_cfg);
if (ul_sched == nullptr) {
logger.error("Error retrieving UL scheduling");
return false;
}
if (ul_sched.pucch.empty() && ul_sched.pusch.empty()) {
if (ul_sched->pucch.empty() && ul_sched->pusch.empty()) {
// early exit if nothing has been scheduled
return true;
}
@ -169,7 +169,7 @@ bool slot_worker::work_ul()
}
// For each PUCCH...
for (stack_interface_phy_nr::pucch_t& pucch : ul_sched.pucch) {
for (stack_interface_phy_nr::pucch_t& pucch : ul_sched->pucch) {
srsran::bounded_vector<stack_interface_phy_nr::pucch_info_t, stack_interface_phy_nr::MAX_PUCCH_CANDIDATES>
pucch_info(pucch.candidates.size());
@ -220,7 +220,7 @@ bool slot_worker::work_ul()
}
// For each PUSCH...
for (stack_interface_phy_nr::pusch_t& pusch : ul_sched.pusch) {
for (stack_interface_phy_nr::pusch_t& pusch : ul_sched->pusch) {
// Prepare PUSCH
stack_interface_phy_nr::pusch_info_t pusch_info = {};
pusch_info.uci_cfg = pusch.sch.uci;
@ -274,14 +274,13 @@ bool slot_worker::work_dl()
sync.wait(this);
// Retrieve Scheduling for the current processing DL slot
stack_interface_phy_nr::dl_sched_t dl_sched = {};
bool dl_sched_fail = stack.get_dl_sched(dl_slot_cfg, dl_sched) < SRSRAN_SUCCESS;
const stack_interface_phy_nr::dl_sched_t* dl_sched_ptr = stack.get_dl_sched(dl_slot_cfg);
// Releases synchronization lock and allow next worker to retrieve scheduling results
sync.release();
// Abort if the scheduling failed
if (dl_sched_fail) {
if (dl_sched_ptr == nullptr) {
logger.error("Error retrieving DL scheduling");
return false;
}
@ -292,7 +291,7 @@ bool slot_worker::work_dl()
}
// Encode PDCCH for DL transmissions
for (const stack_interface_phy_nr::pdcch_dl_t& pdcch : dl_sched.pdcch_dl) {
for (const stack_interface_phy_nr::pdcch_dl_t& pdcch : dl_sched_ptr->pdcch_dl) {
// Set PDCCH configuration, including DCI dedicated
if (srsran_gnb_dl_set_pdcch_config(&gnb_dl, &pdcch_cfg, &pdcch.dci_cfg) < SRSRAN_SUCCESS) {
logger.error("PDCCH: Error setting DL configuration");
@ -314,7 +313,7 @@ bool slot_worker::work_dl()
}
// Encode PDCCH for UL transmissions
for (const stack_interface_phy_nr::pdcch_ul_t& pdcch : dl_sched.pdcch_ul) {
for (const stack_interface_phy_nr::pdcch_ul_t& pdcch : dl_sched_ptr->pdcch_ul) {
// Set PDCCH configuration, including DCI dedicated
if (srsran_gnb_dl_set_pdcch_config(&gnb_dl, &pdcch_cfg, &pdcch.dci_cfg) < SRSRAN_SUCCESS) {
logger.error("PDCCH: Error setting DL configuration");
@ -336,7 +335,7 @@ bool slot_worker::work_dl()
}
// Encode PDSCH
for (stack_interface_phy_nr::pdsch_t& pdsch : dl_sched.pdsch) {
for (const stack_interface_phy_nr::pdsch_t& pdsch : dl_sched_ptr->pdsch) {
// convert MAC to PHY buffer data structures
uint8_t* data[SRSRAN_MAX_TB] = {};
for (uint32_t i = 0; i < SRSRAN_MAX_TB; ++i) {
@ -367,7 +366,7 @@ bool slot_worker::work_dl()
}
// Put NZP-CSI-RS
for (srsran_csi_rs_nzp_resource_t& nzp_csi_rs : dl_sched.nzp_csi_rs) {
for (const srsran_csi_rs_nzp_resource_t& nzp_csi_rs : dl_sched_ptr->nzp_csi_rs) {
if (srsran_gnb_dl_nzp_csi_rs_put(&gnb_dl, &dl_slot_cfg, &nzp_csi_rs) < SRSRAN_SUCCESS) {
logger.error("NZP-CSI-RS: Error putting signal");
return false;
@ -378,7 +377,7 @@ bool slot_worker::work_dl()
srsran_gnb_dl_gen_signal(&gnb_dl);
// Add SSB to the baseband signal
for (const stack_interface_phy_nr::ssb_t& ssb : dl_sched.ssb) {
for (const stack_interface_phy_nr::ssb_t& ssb : dl_sched_ptr->ssb) {
if (srsran_gnb_dl_add_ssb(&gnb_dl, &ssb.pbch_msg, dl_slot_cfg.idx) < SRSRAN_SUCCESS) {
logger.error("SSB: Error putting signal");
return false;

@ -195,13 +195,13 @@ int gnb_stack_nr::slot_indication(const srsran_slot_cfg_t& slot_cfg)
{
return mac.slot_indication(slot_cfg);
}
int gnb_stack_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched)
gnb_stack_nr::dl_sched_t* gnb_stack_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg)
{
return mac.get_dl_sched(slot_cfg, dl_sched);
return mac.get_dl_sched(slot_cfg);
}
int gnb_stack_nr::get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched)
gnb_stack_nr::ul_sched_t* gnb_stack_nr::get_ul_sched(const srsran_slot_cfg_t& slot_cfg)
{
return mac.get_ul_sched(slot_cfg, ul_sched);
return mac.get_ul_sched(slot_cfg);
}
int gnb_stack_nr::pucch_info(const srsran_slot_cfg_t& slot_cfg, const mac_interface_phy_nr::pucch_info_t& pucch_info)
{

@ -24,6 +24,9 @@
#include "srsran/common/string_helpers.h"
#include "srsran/srslog/bundled/fmt/format.h"
#include "srsran/srslog/bundled/fmt/ranges.h"
extern "C" {
#include "srsran/config.h"
}
namespace srsenb {
@ -153,37 +156,26 @@ int ue_buffer_manager<isNR>::get_bsr() const
}
template <bool isNR>
void ue_buffer_manager<isNR>::ul_bsr(uint32_t lcg_id, uint32_t val)
int ue_buffer_manager<isNR>::ul_bsr(uint32_t lcg_id, uint32_t val)
{
if (not is_lcg_valid(lcg_id)) {
logger.warning("SCHED: The provided lcg_id=%d for rnti=0x%x is not valid", lcg_id, rnti);
return;
return SRSRAN_ERROR;
}
lcg_bsr[lcg_id] = val;
if (logger.debug.enabled()) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", lcg_bsr);
logger.debug(
"SCHED: rnti=0x%x, lcg_id=%d, bsr=%d. Current state=%s", rnti, lcg_id, val, srsran::to_c_str(str_buffer));
}
return SRSRAN_SUCCESS;
}
template <bool isNR>
void ue_buffer_manager<isNR>::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue)
int ue_buffer_manager<isNR>::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue)
{
if (not is_lcid_valid(lcid)) {
logger.warning("The provided lcid=%d is not valid", lcid);
return;
}
if (lcid <= MAX_SRB_LC_ID and
(channels[lcid].buf_tx != (int)tx_queue or channels[lcid].buf_prio_tx != (int)prio_tx_queue)) {
logger.info("SCHED: rnti=0x%x DL lcid=%d buffer_state=%d,%d", rnti, lcid, tx_queue, prio_tx_queue);
} else {
logger.debug("SCHED: rnti=0x%x DL lcid=%d buffer_state=%d,%d", rnti, lcid, tx_queue, prio_tx_queue);
return SRSRAN_ERROR;
}
channels[lcid].buf_prio_tx = prio_tx_queue;
channels[lcid].buf_tx = tx_queue;
return SRSRAN_SUCCESS;
}
// Explicit instantiation

@ -596,7 +596,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
return 0;
}
trace_threshold_complete_event("mac::run_slot", "total_time", std::chrono::microseconds(100));
trace_threshold_complete_event("mac::get_dl_sched", "total_time", std::chrono::microseconds(100));
logger.set_context(TTI_SUB(tti_tx_dl, FDD_HARQ_DELAY_UL_MS));
if (do_padding) {
add_padding();

@ -28,7 +28,7 @@ set(SOURCES mac_nr.cc
sched_nr_pdcch.cc
sched_nr_cfg.cc
sched_nr_helpers.cc
sched_nr_cell.cc
sched_nr_bwp.cc
sched_nr_rb.cc
sched_nr_time_rr.cc
harq_softbuffer.cc

@ -20,18 +20,14 @@
*/
#include "srsenb/hdr/stack/mac/nr/mac_nr.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsran/common/buffer_pool.h"
#include "srsran/common/log_helper.h"
#include "srsran/common/phy_cfg_nr_default.h"
#include "srsran/common/rwlock_guard.h"
#include "srsran/common/standard_streams.h"
#include "srsran/common/string_helpers.h"
#include "srsran/common/time_prof.h"
#include "srsran/mac/mac_rar_pdu_nr.h"
#include <pthread.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
namespace srsenb {
@ -39,7 +35,8 @@ mac_nr::mac_nr(srsran::task_sched_handle task_sched_) :
logger(srslog::fetch_basic_logger("MAC-NR")),
task_sched(task_sched_),
bcch_bch_payload(srsran::make_byte_buffer()),
rar_pdu_buffer(srsran::make_byte_buffer())
rar_pdu_buffer(srsran::make_byte_buffer()),
sched(new sched_nr{})
{
stack_task_queue = task_sched.make_task_queue();
}
@ -76,12 +73,12 @@ int mac_nr::init(const mac_nr_args_t& args_,
void mac_nr::stop()
{
if (started) {
bool started_prev = started.exchange(false);
if (started_prev) {
sched->stop();
if (pcap != nullptr) {
pcap->close();
}
started = false;
}
}
@ -93,7 +90,7 @@ void mac_nr::get_metrics(srsenb::mac_metrics_t& metrics)
// TODO: We should comment on the logic we follow to get the metrics. Some of them are retrieved from MAC, some
// others from the scheduler.
get_metrics_nolock(metrics);
sched.get_metrics(metrics);
sched->get_metrics(metrics);
}
void mac_nr::get_metrics_nolock(srsenb::mac_metrics_t& metrics)
@ -114,7 +111,7 @@ void mac_nr::get_metrics_nolock(srsenb::mac_metrics_t& metrics)
int mac_nr::cell_cfg(const std::vector<srsenb::sched_nr_interface::cell_cfg_t>& nr_cells)
{
cell_config = nr_cells;
sched.config(args.sched_cfg, nr_cells);
sched->config(args.sched_cfg, nr_cells);
detected_rachs.resize(nr_cells.size());
// read SIBs from RRC (SIB1 for now only)
@ -143,7 +140,7 @@ int mac_nr::cell_cfg(const std::vector<srsenb::sched_nr_interface::cell_cfg_t>&
int mac_nr::ue_cfg(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg)
{
sched.ue_cfg(rnti, ue_cfg);
sched->ue_cfg(rnti, ue_cfg);
return SRSRAN_SUCCESS;
}
@ -154,7 +151,7 @@ uint16_t mac_nr::reserve_rnti(uint32_t enb_cc_idx, const sched_nr_ue_cfg_t& uecf
return rnti;
}
sched.ue_cfg(rnti, uecfg);
sched->ue_cfg(rnti, uecfg);
return rnti;
}
@ -173,7 +170,7 @@ void mac_nr::rach_detected(const rach_info_t& rach_info)
sched_nr_ue_cfg_t uecfg = {};
uecfg.carriers.resize(1);
uecfg.carriers[0].active = true;
uecfg.carriers[0].cc = 0;
uecfg.carriers[0].cc = enb_cc_idx;
uecfg.ue_bearers[0].direction = mac_lc_ch_cfg_t::BOTH;
srsran::phy_cfg_nr_default_t::reference_cfg_t ref_args{};
ref_args.duplex = cell_config[0].duplex.mode == SRSRAN_DUPLEX_MODE_TDD
@ -182,7 +179,7 @@ void mac_nr::rach_detected(const rach_info_t& rach_info)
uecfg.phy_cfg = srsran::phy_cfg_nr_default_t{ref_args};
uecfg.phy_cfg.csi = {}; // disable CSI until RA is complete
uint16_t rnti = reserve_rnti(enb_cc_idx, uecfg);
uint16_t rnti = alloc_ue(enb_cc_idx);
// Log this event.
++detected_rachs[enb_cc_idx];
@ -194,7 +191,7 @@ void mac_nr::rach_detected(const rach_info_t& rach_info)
rar_info.ta_cmd = rach_info.time_adv;
rar_info.prach_slot = slot_point{NUMEROLOGY_IDX, rach_info.slot_index};
// TODO: fill remaining fields as required
sched.dl_rach_info(enb_cc_idx, rar_info);
sched->dl_rach_info(rar_info, uecfg);
rrc->add_user(rnti, uecfg);
logger.info("RACH: slot=%d, cc=%d, preamble=%d, offset=%d, temp_crnti=0x%x",
@ -230,7 +227,7 @@ uint16_t mac_nr::alloc_ue(uint32_t enb_cc_idx)
}
// Allocate and initialize UE object
std::unique_ptr<ue_nr> ue_ptr = std::unique_ptr<ue_nr>(new ue_nr(rnti, enb_cc_idx, &sched, rrc, rlc, phy, logger));
std::unique_ptr<ue_nr> ue_ptr(new ue_nr(rnti, enb_cc_idx, sched.get(), rrc, rlc, phy, logger));
// Add UE to rnti map
srsran::rwlock_write_guard rw_lock(rwmutex);
@ -253,7 +250,7 @@ int mac_nr::remove_ue(uint16_t rnti)
{
srsran::rwlock_write_guard lock(rwmutex);
if (is_rnti_active_nolock(rnti)) {
sched.ue_rem(rnti);
sched->ue_rem(rnti);
ue_db.erase(rnti);
} else {
logger.error("User rnti=0x%x not found", rnti);
@ -291,13 +288,13 @@ bool mac_nr::is_rnti_active_nolock(uint16_t rnti)
int mac_nr::rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
sched.dl_buffer_state(rnti, lc_id, tx_queue, retx_queue);
sched->dl_buffer_state(rnti, lc_id, tx_queue, retx_queue);
return SRSRAN_SUCCESS;
}
void mac_nr::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr)
{
sched.ul_bsr(rnti, lcid, bsr);
sched->ul_bsr(rnti, lcid, bsr);
}
int mac_nr::slot_indication(const srsran_slot_cfg_t& slot_cfg)
@ -305,25 +302,25 @@ int mac_nr::slot_indication(const srsran_slot_cfg_t& slot_cfg)
return 0;
}
int mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched)
mac_nr::dl_sched_t* mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg)
{
slot_point pdsch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx};
logger.set_context((pdsch_slot - TX_ENB_DELAY).to_uint());
// Run Scheduler
sched_nr_interface::sched_rar_list_t rar_list;
sched_nr_interface::dl_res_t dl_res(rar_list, dl_sched);
// Initiate new slot and sync UE internal states
sched->slot_indication(pdsch_slot);
int ret = sched.run_slot(pdsch_slot, 0, dl_res);
if (ret != SRSRAN_SUCCESS) {
return ret;
// Run DL Scheduler for CC
sched_nr::dl_res_t* dl_res = sched->get_dl_sched(pdsch_slot, 0);
if (dl_res == nullptr) {
return nullptr;
}
// Generate MAC DL PDUs
uint32_t rar_count = 0;
srsran::rwlock_read_guard rw_lock(rwmutex);
for (pdsch_t& pdsch : dl_sched.pdsch) {
for (pdsch_t& pdsch : dl_res->phy.pdsch) {
if (pdsch.sch.grant.rnti_type == srsran_rnti_type_c) {
uint16_t rnti = pdsch.sch.grant.rnti;
if (not is_rnti_active_nolock(rnti)) {
@ -342,7 +339,7 @@ int mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched
}
}
} else if (pdsch.sch.grant.rnti_type == srsran_rnti_type_ra) {
sched_nr_interface::rar_t& rar = dl_res.rar[rar_count++];
sched_nr_interface::rar_t& rar = dl_res->rar[rar_count++];
// for RARs we could actually move the byte_buffer to the PHY, as there are no retx
pdsch.data[0] = assemble_rar(rar.grants);
}
@ -350,23 +347,22 @@ int mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched
for (auto& u : ue_db) {
u.second->metrics_cnt();
}
return SRSRAN_SUCCESS;
return &dl_res->phy;
}
int mac_nr::get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched)
mac_nr::ul_sched_t* mac_nr::get_ul_sched(const srsran_slot_cfg_t& slot_cfg)
{
int ret = 0;
slot_point pusch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx};
ret = sched.get_ul_sched(pusch_slot, 0, ul_sched);
ul_sched_t* ul_sched = sched->get_ul_sched(pusch_slot, 0);
srsran::rwlock_read_guard rw_lock(rwmutex);
for (auto& pusch : ul_sched.pusch) {
for (auto& pusch : ul_sched->pusch) {
if (ue_db.contains(pusch.sch.grant.rnti)) {
ue_db[pusch.sch.grant.rnti]->metrics_ul_mcs(pusch.sch.grant.tb->mcs);
}
}
return ret;
return ul_sched;
}
int mac_nr::pucch_info(const srsran_slot_cfg_t& slot_cfg, const mac_interface_phy_nr::pucch_info_t& pucch_info)
@ -392,7 +388,7 @@ bool mac_nr::handle_uci_data(const uint16_t rnti, const srsran_uci_cfg_nr_t& cfg
for (uint32_t i = 0; i < cfg_.ack.count; i++) {
const srsran_harq_ack_bit_t* ack_bit = &cfg_.ack.bits[i];
bool is_ok = (value.ack[i] == 1) and value.valid;
sched.dl_ack_info(rnti, 0, ack_bit->pid, 0, is_ok);
sched->dl_ack_info(rnti, 0, ack_bit->pid, 0, is_ok);
srsran::rwlock_read_guard rw_lock(rwmutex);
if (ue_db.contains(rnti)) {
ue_db[rnti]->metrics_tx(is_ok, 0 /*TODO get size of packet from scheduler somehow*/);
@ -401,7 +397,7 @@ bool mac_nr::handle_uci_data(const uint16_t rnti, const srsran_uci_cfg_nr_t& cfg
// Process SR
if (value.valid and value.sr > 0) {
sched.ul_sr_info(cfg_.pucch.rnti);
sched->ul_sr_info(cfg_.pucch.rnti);
}
// Process CQI
@ -426,7 +422,7 @@ int mac_nr::pusch_info(const srsran_slot_cfg_t& slot_cfg, mac_interface_phy_nr::
return SRSRAN_ERROR;
}
sched.ul_crc_info(rnti, 0, pusch_info.pid, pusch_info.pusch_data.tb[0].crc);
sched->ul_crc_info(rnti, 0, pusch_info.pid, pusch_info.pusch_data.tb[0].crc);
// process only PDUs with CRC=OK
if (pusch_info.pusch_data.tb[0].crc) {

@ -22,8 +22,10 @@
#include "srsenb/hdr/stack/mac/nr/sched_nr.h"
#include "srsenb/hdr/stack/mac/common/mac_metrics.h"
#include "srsenb/hdr/stack/mac/nr/harq_softbuffer.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_worker.h"
#include "srsran/common/phy_cfg_nr_default.h"
#include "srsran/common/string_helpers.h"
#include "srsran/common/thread_pool.h"
namespace srsenb {
@ -34,54 +36,272 @@ static int assert_ue_cfg_valid(uint16_t rnti, const sched_nr_interface::ue_cfg_t
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class ul_sched_result_buffer
/// Class that stores events that are not specific to a CC (e.g. SRs, removal of UEs, buffer state updates)
class sched_nr::event_manager
{
public:
explicit ul_sched_result_buffer(uint32_t nof_cc_)
/// class used to accummulate all processed event messages of a single {slot,cc} and print them in a single log line
struct logger {
explicit logger(int cc_, srslog::basic_logger& logger_) :
log_enabled(logger_.debug.enabled()), cc(cc_), sched_logger(logger_)
{}
logger(const logger&) = delete;
logger(logger&&) = delete;
logger& operator=(const logger&) = delete;
logger& operator=(logger&&) = delete;
~logger()
{
for (auto& v : results) {
v.resize(nof_cc_);
if (log_enabled and event_fmtbuf.size() > 0) {
if (cc < 0) {
sched_logger.debug("SCHED: slot events: [%s]", srsran::to_c_str(event_fmtbuf));
} else {
sched_logger.debug("SCHED: slot events, cc=%d: [%s]", cc, srsran::to_c_str(event_fmtbuf));
}
}
}
template <typename... Args>
void push(const char* fmt, Args&&... args)
{
if (log_enabled) {
if (event_fmtbuf.size() > 0) {
fmt::format_to(event_fmtbuf, ", ");
}
fmt::format_to(event_fmtbuf, fmt, std::forward<Args>(args)...);
}
}
private:
bool log_enabled;
int cc;
srslog::basic_logger& sched_logger;
fmt::memory_buffer event_fmtbuf;
};
explicit event_manager(sched_params_t& params) :
sched_logger(srslog::fetch_basic_logger(params.sched_cfg.logger_name)), carriers(params.cells.size())
{}
/// Enqueue an event that does not map into a ue method (e.g. rem_user, add_user)
void enqueue_event(const char* event_name, srsran::move_callback<void(logger&)> ev)
{
std::lock_guard<std::mutex> lock(event_mutex);
next_slot_events.emplace_back(event_name, std::move(ev));
}
/// Enqueue an event that directly maps into a ue method (e.g. ul_sr_info, ul_bsr, etc.)
/// Note: these events can be processed sequentially or in parallel, depending on whether the UE supports CA
void enqueue_ue_event(const char* event_name, uint16_t rnti, srsran::move_callback<void(ue&, logger&)> callback)
{
srsran_assert(rnti != SRSRAN_INVALID_RNTI, "Invalid rnti=0x%x passed to common event manager", rnti);
std::lock_guard<std::mutex> lock(event_mutex);
next_slot_ue_events.emplace_back(rnti, event_name, std::move(callback));
}
/// Enqueue feedback directed at a given UE in a given cell (e.g. ACKs, CQI)
void enqueue_ue_cc_feedback(const char* event_name,
uint16_t rnti,
uint32_t cc,
srsran::move_callback<void(ue_carrier&, logger&)> callback)
{
srsran_assert(rnti != SRSRAN_INVALID_RNTI, "Invalid rnti=0x%x passed to event manager", rnti);
srsran_assert(cc < carriers.size(), "Invalid cc=%d passed to event manager", cc);
std::lock_guard<std::mutex> lock(carriers[cc].event_cc_mutex);
carriers[cc].next_slot_ue_events.emplace_back(rnti, cc, event_name, std::move(callback));
}
ul_sched_t& add_ul_result(slot_point tti, uint32_t cc)
/// Process all events that are not specific to a carrier or that are directed at CA-enabled UEs
/// Note: non-CA UEs are updated later in get_dl_sched, to leverage parallelism
void process_common(ue_map_t& ues)
{
if (not has_ul_result(tti, cc)) {
results[tti.to_uint()][cc].slot_ul = tti;
results[tti.to_uint()][cc].ul_res = {};
// Extract pending feedback events
current_slot_ue_events.clear();
current_slot_events.clear();
{
std::lock_guard<std::mutex> ev_lock(event_mutex);
next_slot_ue_events.swap(current_slot_ue_events);
next_slot_events.swap(current_slot_events);
}
return results[tti.to_uint()][cc].ul_res;
logger evlogger(-1, sched_logger);
// non-UE specific events
for (event_t& ev : current_slot_events) {
ev.callback(evlogger);
}
bool has_ul_result(slot_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].slot_ul == tti; }
for (ue_event_t& ev : current_slot_ue_events) {
auto ue_it = ues.find(ev.rnti);
if (ue_it == ues.end()) {
sched_logger.warning("SCHED: \"%s\" called for inexistent rnti=0x%x.", ev.event_name, ev.rnti);
ev.rnti = SRSRAN_INVALID_RNTI;
} else if (ue_it->second->has_ca()) {
// events specific to existing UEs with CA
ev.callback(*ue_it->second, evlogger);
ev.rnti = SRSRAN_INVALID_RNTI;
}
}
}
/// Process events synchronized during slot_indication() that are directed at non CA-enabled UEs
void process_cc_events(ue_map_t& ues, uint32_t cc)
{
logger evlogger(cc, sched_logger);
ul_sched_t pop_ul_result(slot_point tti, uint32_t cc)
{
if (has_ul_result(tti, cc)) {
results[tti.to_uint()][cc].slot_ul.clear();
return results[tti.to_uint()][cc].ul_res;
carriers[cc].current_slot_ue_events.clear();
std::lock_guard<std::mutex> lock(carriers[cc].event_cc_mutex);
carriers[cc].current_slot_ue_events.swap(carriers[cc].next_slot_ue_events);
}
for (ue_event_t& ev : current_slot_ue_events) {
if (ev.rnti == SRSRAN_INVALID_RNTI) {
// events already processed
continue;
}
auto ue_it = ues.find(ev.rnti);
if (ue_it == ues.end()) {
sched_logger.warning("SCHED: \"%s\" called for inexistent rnti=0x%x.", ev.event_name, ev.rnti);
ev.rnti = SRSRAN_INVALID_RNTI;
} else if (not ue_it->second->has_ca() and ue_it->second->carriers[cc] != nullptr) {
ev.callback(*ue_it->second, evlogger);
ev.rnti = SRSRAN_INVALID_RNTI;
}
}
for (ue_cc_event_t& ev : carriers[cc].current_slot_ue_events) {
auto ue_it = ues.find(ev.rnti);
if (ue_it != ues.end() and ue_it->second->carriers[cc] != nullptr) {
ev.callback(*ue_it->second->carriers[cc], evlogger);
} else {
sched_logger.warning("SCHED: \"%s\" called for inexistent rnti=0x%x,cc=%d.", ev.event_name, ev.rnti, ev.cc);
}
}
return {};
}
private:
struct slot_result_t {
slot_point slot_ul;
ul_sched_t ul_res;
struct event_t {
const char* event_name;
srsran::move_callback<void(logger&)> callback;
event_t(const char* event_name_, srsran::move_callback<void(logger&)> c) :
event_name(event_name_), callback(std::move(c))
{}
};
struct ue_event_t {
uint16_t rnti;
const char* event_name;
srsran::move_callback<void(ue&, logger&)> callback;
ue_event_t(uint16_t rnti_, const char* event_name_, srsran::move_callback<void(ue&, logger&)> c) :
rnti(rnti_), event_name(event_name_), callback(std::move(c))
{}
};
struct ue_cc_event_t {
uint16_t rnti;
uint32_t cc;
const char* event_name;
srsran::move_callback<void(ue_carrier&, logger&)> callback;
ue_cc_event_t(uint16_t rnti_,
uint32_t cc_,
const char* event_name_,
srsran::move_callback<void(ue_carrier&, logger&)> c) :
rnti(rnti_), cc(cc_), event_name(event_name_), callback(std::move(c))
{}
};
srslog::basic_logger& sched_logger;
srsran::circular_array<std::vector<slot_result_t>, TTIMOD_SZ> results;
std::mutex event_mutex;
srsran::deque<event_t> next_slot_events, current_slot_events;
srsran::deque<ue_event_t> next_slot_ue_events, current_slot_ue_events;
struct cc_events {
std::mutex event_cc_mutex;
srsran::deque<ue_cc_event_t> next_slot_ue_events, current_slot_ue_events;
};
std::vector<cc_events> carriers;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr() : logger(&srslog::fetch_basic_logger("MAC-NR")) {}
class sched_nr::ue_metrics_manager
{
public:
explicit ue_metrics_manager(ue_map_t& ues_) : ues(ues_) {}
void stop()
{
std::unique_lock<std::mutex> lock(mutex);
if (not stopped) {
stopped = true;
// requests during sched::stop may not be fulfilled by sched main thread
save_metrics_nolock();
}
}
/// Blocking call that waits for the metrics to be filled
void get_metrics(mac_metrics_t& requested_metrics)
{
std::unique_lock<std::mutex> lock(mutex);
pending_metrics = &requested_metrics;
if (not stopped) {
cvar.wait(lock, [this]() { return pending_metrics == nullptr; });
} else {
save_metrics_nolock();
}
}
sched_nr::~sched_nr() {}
/// called from within the scheduler main thread to save metrics
void save_metrics()
{
{
std::unique_lock<std::mutex> lock(mutex);
save_metrics_nolock();
}
cvar.notify_one();
}
private:
void save_metrics_nolock()
{
if (pending_metrics == nullptr) {
return;
}
for (mac_ue_metrics_t& ue_metric : pending_metrics->ues) {
if (ues.contains(ue_metric.rnti) and ues[ue_metric.rnti]->carriers[0] != nullptr) {
auto& ue_cc = *ues[ue_metric.rnti]->carriers[0];
ue_metric.tx_brate = ue_cc.metrics.tx_brate;
ue_metric.tx_errors = ue_cc.metrics.tx_errors;
ue_metric.tx_pkts = ue_cc.metrics.tx_pkts;
ue_cc.metrics = {};
}
}
pending_metrics = nullptr;
}
ue_map_t& ues;
std::mutex mutex;
std::condition_variable cvar;
mac_metrics_t* pending_metrics = nullptr;
bool stopped = false;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_nr::sched_nr() : logger(&srslog::fetch_basic_logger("MAC-NR")), metrics_handler(new ue_metrics_manager{ue_db}) {}
sched_nr::~sched_nr()
{
stop();
}
void sched_nr::stop()
{
metrics_handler->stop();
}
int sched_nr::config(const sched_args_t& sched_cfg, srsran::const_span<cell_cfg_t> cell_list)
{
cfg = sched_params{sched_cfg};
cfg = sched_params_t{sched_cfg};
logger = &srslog::fetch_basic_logger(sched_cfg.logger_name);
// Initiate Common Sched Configuration
@ -90,149 +310,183 @@ int sched_nr::config(const sched_args_t& sched_cfg, srsran::const_span<cell_cfg_
cfg.cells.emplace_back(cc, cell_list[cc], cfg.sched_cfg);
}
pending_events.reset(new event_manager{cfg});
// Initiate cell-specific schedulers
cells.reserve(cell_list.size());
for (uint32_t cc = 0; cc < cell_list.size(); ++cc) {
cells.emplace_back(new serv_cell_manager{cfg.cells[cc]});
cc_workers.resize(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
cc_workers[cc].reset(new slot_cc_worker{cfg.cells[cc]});
}
pending_results.reset(new ul_sched_result_buffer(cell_list.size()));
sched_workers.reset(new sched_nr_impl::sched_worker_manager(ue_db, cfg, cells));
return SRSRAN_SUCCESS;
}
void sched_nr::ue_cfg(uint16_t rnti, const ue_cfg_t& uecfg)
{
srsran_assert(assert_ue_cfg_valid(rnti, uecfg) == SRSRAN_SUCCESS, "Invalid UE configuration");
sched_workers->enqueue_event(rnti, [this, rnti, uecfg]() { ue_cfg_impl(rnti, uecfg); });
pending_events->enqueue_event("ue_cfg", [this, rnti, uecfg](event_manager::logger& ev_logger) {
if (ue_cfg_impl(rnti, uecfg) == SRSRAN_SUCCESS) {
ev_logger.push("ue_cfg(0x{:x})", rnti);
} else {
logger->warning("Failed to create UE object for rnti=0x{:x}", rnti);
}
});
}
void sched_nr::ue_rem(uint16_t rnti)
{
sched_workers->enqueue_event(rnti, [this, rnti]() {
auto ue_it = ue_db.find(rnti);
if (ue_it == ue_db.end()) {
logger->warning("SCHED: ue_rem(rnti) called for inexistent rnti=0x%x", rnti);
return;
}
pending_events->enqueue_event("ue_rem", [this, rnti](event_manager::logger& ev_logger) {
ue_db.erase(rnti);
logger->info("SCHED: Removed user rnti=0x%x", rnti);
ev_logger.push("ue_rem(0x{:x})", rnti);
});
}
bool sched_nr::ue_exists(uint16_t rnti)
int sched_nr::add_ue_impl(uint16_t rnti, std::unique_ptr<sched_nr_impl::ue> u)
{
return ue_db.contains(rnti);
logger->info("SCHED: New user rnti=0x%x, cc=%d", rnti, cfg.cells[0].cc);
return ue_db.insert(rnti, std::move(u)).has_value() ? SRSRAN_SUCCESS : SRSRAN_ERROR;
}
void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
int sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
{
if (not ue_db.contains(rnti)) {
auto ret = ue_db.insert(rnti, std::unique_ptr<ue>(new ue{rnti, uecfg, cfg}));
if (ret.has_value()) {
logger->info("SCHED: New user rnti=0x%x, cc=%d", rnti, cfg.cells[0].cc);
} else {
logger->error("SCHED: Failed to create new user rnti=0x%x", rnti);
return add_ue_impl(rnti, std::unique_ptr<ue>(new ue{rnti, uecfg, cfg}));
}
} else {
ue_db[rnti]->set_cfg(uecfg);
return SRSRAN_SUCCESS;
}
// NOTE: there is no parallelism in these operations
void sched_nr::slot_indication(slot_point slot_tx)
{
srsran_assert(worker_count.load(std::memory_order_relaxed) == 0,
"Call of sched slot_indication when previous TTI has not been completed");
// mark the start of slot.
current_slot_tx = slot_tx;
worker_count.store(static_cast<int>(cfg.cells.size()), std::memory_order_relaxed);
// process non-cc specific feedback if pending (e.g. SRs, buffer state updates, UE config) for CA-enabled UEs
// Note: non-CA UEs are updated later in get_dl_sched, to leverage parallelism
pending_events->process_common(ue_db);
// prepare CA-enabled UEs internal state for new slot
// Note: non-CA UEs are updated later in get_dl_sched, to leverage parallelism
for (auto& u : ue_db) {
if (u.second->has_ca()) {
u.second->new_slot(slot_tx);
}
}
// If UE metrics were externally requested, store the current UE state
metrics_handler->save_metrics();
}
/// Generate {pdcch_slot,cc} scheduling decision
int sched_nr::run_slot(slot_point slot_dl, uint32_t cc, dl_res_t& result)
sched_nr::dl_res_t* sched_nr::get_dl_sched(slot_point pdsch_tti, uint32_t cc)
{
// Copy UL results to intermediate buffer
ul_res_t& ul_res = pending_results->add_ul_result(slot_dl, cc);
srsran_assert(pdsch_tti == current_slot_tx, "Unexpected pdsch_tti slot received");
// Generate {slot_idx,cc} result
sched_workers->run_slot(slot_dl, cc, result, ul_res);
// process non-cc specific feedback if pending (e.g. SRs, buffer state updates, UE config) for non-CA UEs
pending_events->process_cc_events(ue_db, cc);
return SRSRAN_SUCCESS;
// prepare non-CA UEs internal state for new slot
for (auto& u : ue_db) {
if (not u.second->has_ca() and u.second->carriers[cc] != nullptr) {
u.second->new_slot(current_slot_tx);
}
}
// Process pending CC-specific feedback, generate {slot_idx,cc} scheduling decision
sched_nr::dl_res_t* ret = cc_workers[cc]->run_slot(pdsch_tti, ue_db);
// decrement the number of active workers
int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to get_dl_sched(slot, cc)");
if (rem_workers == 0) {
// Last Worker to finish slot
// TODO: Sync sched results with ue_db state
}
return ret;
}
/// Fetch {ul_slot,cc} UL scheduling decision
int sched_nr::get_ul_sched(slot_point slot_ul, uint32_t cc, ul_res_t& result)
sched_nr::ul_res_t* sched_nr::get_ul_sched(slot_point slot_ul, uint32_t cc)
{
if (not pending_results->has_ul_result(slot_ul, cc)) {
// sched result hasn't been generated
result.pucch.clear();
result.pusch.clear();
return SRSRAN_SUCCESS;
}
result = pending_results->pop_ul_result(slot_ul, cc);
return SRSRAN_SUCCESS;
return cc_workers[cc]->get_ul_sched(slot_ul);
}
void sched_nr::get_metrics(mac_metrics_t& metrics)
{
sched_workers->get_metrics(metrics);
metrics_handler->get_metrics(metrics);
}
int sched_nr::dl_rach_info(uint32_t cc, const rar_info_t& rar_info)
int sched_nr::dl_rach_info(const rar_info_t& rar_info, const ue_cfg_t& uecfg)
{
sched_workers->enqueue_cc_event(cc, [this, cc, rar_info]() { cells[cc]->bwps[0].ra.dl_rach_info(rar_info); });
// enqueue UE creation event + RACH handling
auto add_ue = [this, uecfg, rar_info](event_manager::logger& ev_logger) {
// create user
// Note: UEs being created in sched main thread, which has higher priority
logger->info("SCHED: New user rnti=0x%x, cc=%d", rar_info.temp_crnti, uecfg.carriers[0].cc);
std::unique_ptr<ue> u{new ue{rar_info.temp_crnti, uecfg, cfg}};
uint16_t rnti = rar_info.temp_crnti;
if (add_ue_impl(rnti, std::move(u)) == SRSRAN_SUCCESS) {
ev_logger.push("dl_rach_info(temp c-rnti=0x{:x})", rar_info.temp_crnti);
// RACH is handled only once the UE object is created and inserted in the ue_db
uint32_t cc = uecfg.carriers[0].cc;
cc_workers[cc]->dl_rach_info(rar_info);
} else {
logger->warning("Failed to create UE object with rnti=0x%x", rar_info.temp_crnti);
}
};
pending_events->enqueue_event("dl_rach_info", add_ue);
return SRSRAN_SUCCESS;
}
void sched_nr::dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack)
{
sched_workers->enqueue_cc_feedback(rnti, cc, [this, pid, tb_idx, ack](ue_carrier& ue_cc) {
int tbs = ue_cc.harq_ent.dl_ack_info(pid, tb_idx, ack);
if (tbs >= 0) {
std::lock_guard<std::mutex> lock(ue_cc.metrics_mutex);
if (ack) {
ue_cc.metrics.tx_brate += tbs;
} else {
ue_cc.metrics.tx_errors++;
}
ue_cc.metrics.tx_pkts++;
} else {
logger->warning("SCHED: rnti=0x%x, received DL HARQ-ACK for empty pid=%d", ue_cc.rnti, pid);
auto callback = [pid, tb_idx, ack](ue_carrier& ue_cc, event_manager::logger& ev_logger) {
if (ue_cc.dl_ack_info(pid, tb_idx, ack) >= 0) {
ev_logger.push("0x{:x}: dl_ack_info(pid={}, ack={})", ue_cc.rnti, pid, ack ? "OK" : "KO");
}
});
};
pending_events->enqueue_ue_cc_feedback("dl_ack_info", rnti, cc, callback);
}
void sched_nr::ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc)
{
sched_workers->enqueue_cc_feedback(rnti, cc, [this, pid, crc](ue_carrier& ue_cc) {
if (ue_cc.harq_ent.ul_crc_info(pid, crc) < 0) {
logger->warning("SCHED: rnti=0x%x, received CRC for empty pid=%d", ue_cc.rnti, pid);
auto callback = [pid, crc](ue_carrier& ue_cc, event_manager::logger& ev_logger) {
if (ue_cc.ul_crc_info(pid, crc) >= 0) {
ev_logger.push("0x{:x}: ul_crc_info(pid={}, crc={})", ue_cc.rnti, pid, crc ? "OK" : "KO");
}
});
};
pending_events->enqueue_ue_cc_feedback("ul_crc_info", rnti, cc, callback);
}
void sched_nr::ul_sr_info(uint16_t rnti)
{
sched_workers->enqueue_event(rnti, [this, rnti]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->ul_sr_info();
} else {
logger->warning("Received SR for inexistent rnti=0x%x", rnti);
}
pending_events->enqueue_ue_event("ul_sr_info", rnti, [](ue& u, event_manager::logger& evlogger) {
u.ul_sr_info();
evlogger.push("0x{:x}: ul_sr_info()", u.rnti);
});
}
void sched_nr::ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr)
{
sched_workers->enqueue_event(rnti, [this, rnti, lcg_id, bsr]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->ul_bsr(lcg_id, bsr);
} else {
logger->warning("Received BSR=%d for inexistent rnti=0x%x", bsr, rnti);
}
pending_events->enqueue_ue_event("ul_bsr", rnti, [lcg_id, bsr](ue& u, event_manager::logger& evlogger) {
u.ul_bsr(lcg_id, bsr);
evlogger.push("0x{:x}: ul_bsr(lcg={}, bsr={})", u.rnti, lcg_id, bsr);
});
}
void sched_nr::dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx)
{
sched_workers->enqueue_event(rnti, [this, rnti, lcid, newtx, retx]() {
if (ue_db.contains(rnti)) {
ue_db[rnti]->rlc_buffer_state(lcid, newtx, retx);
} else {
logger->warning("Received DL buffer state=%d/%d for inexistent rnti=0x%x", newtx, retx, rnti);
}
pending_events->enqueue_ue_event(
"dl_buffer_state", rnti, [lcid, newtx, retx](ue& u, event_manager::logger& event_logger) {
u.rlc_buffer_state(lcid, newtx, retx);
event_logger.push("0x{:x}: dl_buffer_state(lcid={}, bsr={},{})", u.rnti, lcid, newtx, retx);
});
}

@ -19,7 +19,7 @@
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsran/common/standard_streams.h"
#include "srsran/common/string_helpers.h"
@ -229,22 +229,9 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info)
return SRSRAN_SUCCESS;
}
bwp_ctxt::bwp_ctxt(const bwp_params_t& bwp_cfg) :
bwp_manager::bwp_manager(const bwp_params_t& bwp_cfg) :
cfg(&bwp_cfg), ra(bwp_cfg), grid(bwp_cfg), data_sched(new sched_nr_time_rr())
{}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
serv_cell_manager::serv_cell_manager(const cell_params_t& cell_cfg_) :
cfg(cell_cfg_), logger(srslog::fetch_basic_logger(cell_cfg_.sched_args.logger_name))
{
for (uint32_t bwp_id = 0; bwp_id < cfg.cfg.bwps.size(); ++bwp_id) {
bwps.emplace_back(cell_cfg_.bwps[bwp_id]);
}
// Pre-allocate HARQs in common pool of softbuffers
harq_softbuffer_pool::get_instance().init_pool(cfg.nof_prb());
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -110,7 +110,7 @@ cell_params_t::cell_params_t(uint32_t cc_, const cell_cfg_t& cell, const sched_a
srsran_assert(not bwps.empty(), "No BWPs were configured");
}
sched_params::sched_params(const sched_args_t& sched_cfg_) : sched_cfg(sched_cfg_)
sched_params_t::sched_params_t(const sched_args_t& sched_cfg_) : sched_cfg(sched_cfg_)
{
srsran_assert(sched_cfg.fixed_dl_mcs >= 0, "Dynamic DL MCS not supported");
srsran_assert(sched_cfg.fixed_ul_mcs >= 0, "Dynamic DL MCS not supported");
@ -118,8 +118,8 @@ sched_params::sched_params(const sched_args_t& sched_cfg_) : sched_cfg(sched_cfg
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bwp_ue_cfg::bwp_ue_cfg(uint16_t rnti_, const bwp_params_t& bwp_cfg_, const ue_cfg_t& uecfg_) :
rnti(rnti_), cfg_(&uecfg_), bwp_cfg(&bwp_cfg_)
ue_carrier_params_t::ue_carrier_params_t(uint16_t rnti_, const bwp_params_t& bwp_cfg_, const ue_cfg_t& uecfg_) :
rnti(rnti_), cc(bwp_cfg_.cc), cfg_(&uecfg_), bwp_cfg(&bwp_cfg_)
{
std::fill(ss_id_to_cce_idx.begin(), ss_id_to_cce_idx.end(), SRSRAN_UE_DL_NR_MAX_NOF_SEARCH_SPACE);
const auto& pdcch = phy().pdcch;
@ -136,31 +136,5 @@ bwp_ue_cfg::bwp_ue_cfg(uint16_t rnti_, const bwp_params_t& bwp_cfg_, const ue_cf
}
}
ue_cfg_extended::ue_cfg_extended(uint16_t rnti_, const ue_cfg_t& uecfg) : ue_cfg_t(uecfg), rnti(rnti_)
{
auto ss_view = srsran::make_optional_span(phy_cfg.pdcch.search_space, phy_cfg.pdcch.search_space_present);
auto coreset_view = srsran::make_optional_span(phy_cfg.pdcch.coreset, phy_cfg.pdcch.coreset_present);
cc_params.resize(carriers.size());
for (uint32_t cc = 0; cc < cc_params.size(); ++cc) {
cc_params[cc].bwps.resize(1);
auto& bwp = cc_params[cc].bwps[0];
for (auto& ss : ss_view) {
bwp.ss_list[ss.id].emplace();
bwp.ss_list[ss.id]->cfg = &ss;
get_dci_locs(phy_cfg.pdcch.coreset[ss.coreset_id], ss, rnti, bwp.ss_list[ss.id]->cce_positions);
}
for (auto& coreset_cfg : coreset_view) {
bwp.coresets.emplace_back();
auto& coreset = bwp.coresets.back();
coreset.cfg = &coreset_cfg;
for (auto& ss : bwp.ss_list) {
if (ss.has_value() and ss->cfg->coreset_id == coreset.cfg->id) {
coreset.ss_list.push_back(ss->cfg->id);
}
}
}
}
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -20,7 +20,7 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_grant_allocator.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_helpers.h"
namespace srsenb {
@ -36,7 +36,7 @@ bwp_slot_grid::bwp_slot_grid(const bwp_params_t& bwp_cfg_, uint32_t slot_idx_) :
for (uint32_t cs_idx = 0; cs_idx < SRSRAN_UE_DL_NR_MAX_NOF_CORESET; ++cs_idx) {
if (cfg->cfg.pdcch.coreset_present[cs_idx]) {
uint32_t cs_id = cfg->cfg.pdcch.coreset[cs_idx].id;
coresets[cs_id].emplace(*cfg, cs_id, slot_idx_, dl_pdcchs, ul_pdcchs);
coresets[cs_id].emplace(*cfg, cs_id, slot_idx_, dl.phy.pdcch_dl, dl.phy.pdcch_ul);
}
}
}
@ -50,15 +50,15 @@ void bwp_slot_grid::reset()
}
dl_prbs.reset();
ul_prbs.reset();
dl_pdcchs.clear();
ul_pdcchs.clear();
pdschs.clear();
puschs.clear();
dl.phy.ssb.clear();
dl.phy.nzp_csi_rs.clear();
dl.phy.pdcch_dl.clear();
dl.phy.pdcch_ul.clear();
dl.phy.pdsch.clear();
dl.rar.clear();
ul.pusch.clear();
ul.pucch.clear();
pending_acks.clear();
pucch.clear();
ssb.clear();
nzp_csi_rs.clear();
rar.clear();
}
bwp_res_grid::bwp_res_grid(const bwp_params_t& bwp_cfg_) : cfg(&bwp_cfg_)
@ -70,8 +70,8 @@ bwp_res_grid::bwp_res_grid(const bwp_params_t& bwp_cfg_) : cfg(&bwp_cfg_)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_) :
logger(bwp_grid_.cfg->logger), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_)
bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_, slot_point pdcch_slot_, slot_ue_map_t& ues_) :
logger(bwp_grid_.cfg->logger), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_), pdcch_slot(pdcch_slot_), slot_ues(ues_)
{}
alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, uint32_t si_ntx, const prb_interval& prbs)
@ -81,7 +81,7 @@ alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, ui
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdcch_slot.slot_idx);
return alloc_result::no_sch_space;
}
pdcch_dl_list_t& pdsch_grants = bwp_pdcch_slot.dl_pdcchs;
pdcch_dl_list_t& pdsch_grants = bwp_pdcch_slot.dl.phy.pdcch_dl;
if (pdsch_grants.full()) {
logger.warning("SCHED: Maximum number of DL allocations reached");
return alloc_result::no_grant_space;
@ -98,17 +98,11 @@ alloc_result bwp_slot_allocator::alloc_si(uint32_t aggr_idx, uint32_t si_idx, ui
alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t ra_rnti,
uint32_t aggr_idx,
prb_interval interv,
srsran::const_span<dl_sched_rar_info_t> pending_rars)
srsran::const_span<dl_sched_rar_info_t> pending_rachs)
{
static const uint32_t msg3_nof_prbs = 3, m = 0;
bwp_slot_grid& bwp_pdcch_slot = bwp_grid[pdcch_slot];
if (not bwp_pdcch_slot.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.info("SCHED: skipping ra-rnti=0x%x RAR allocation. Cause: concurrent PDSCH and SSB not yet supported",
ra_rnti);
return alloc_result::no_sch_space;
}
slot_point msg3_slot = pdcch_slot + cfg.pusch_ra_list[m].msg3_delay;
bwp_slot_grid& bwp_msg3_slot = bwp_grid[msg3_slot];
alloc_result ret = verify_pusch_space(bwp_msg3_slot, nullptr);
@ -119,17 +113,15 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
if (ret != alloc_result::success) {
return ret;
}
if (bwp_pdcch_slot.rar.full()) {
return alloc_result::no_grant_space;
}
if (pending_rars.size() > MAX_GRANTS) {
logger.error("SCHED: Trying to allocate too many Msg3 grants in a single slot (%zd)", pending_rars.size());
if (pending_rachs.size() > bwp_pdcch_slot.dl.rar.capacity() - bwp_pdcch_slot.dl.rar.size()) {
logger.error("SCHED: Trying to allocate too many Msg3 grants in a single slot (%zd)", pending_rachs.size());
return alloc_result::invalid_grant_params;
}
for (auto& rar : pending_rars) {
if (not slot_ues->contains(rar.temp_crnti)) {
for (auto& rach : pending_rachs) {
auto ue_it = slot_ues.find(rach.temp_crnti);
if (ue_it == slot_ues.end()) {
logger.info("SCHED: Postponing rnti=0x%x RAR allocation. Cause: The ue object not yet fully created",
rar.temp_crnti);
rach.temp_crnti);
return alloc_result::no_rnti_opportunity;
}
}
@ -141,7 +133,7 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
}
// Check Msg3 RB collision
uint32_t total_ul_nof_prbs = msg3_nof_prbs * pending_rars.size();
uint32_t total_ul_nof_prbs = msg3_nof_prbs * pending_rachs.size();
uint32_t total_ul_nof_rbgs = srsran::ceil_div(total_ul_nof_prbs, get_P(bwp_grid.nof_prbs(), false));
prb_interval msg3_rbs = find_empty_interval_of_length(bwp_msg3_slot.ul_prbs.prbs(), total_ul_nof_rbgs);
if (msg3_rbs.length() < total_ul_nof_rbgs) {
@ -161,18 +153,18 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
// RAR allocation successful.
bwp_pdcch_slot.dl_prbs |= interv;
// Generate DCI for RAR with given RA-RNTI
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl.phy.pdcch_dl.back();
if (not fill_dci_rar(interv, ra_rnti, *bwp_grid.cfg, pdcch.dci)) {
// Cancel on-going PDCCH allocation
bwp_pdcch_slot.coresets[coreset_id]->rem_last_dci();
return alloc_result::invalid_coderate;
}
auto& phy_cfg = (*slot_ues)[pending_rars[0].temp_crnti].cfg->phy();
auto& phy_cfg = slot_ues[pending_rachs[0].temp_crnti]->phy();
pdcch.dci_cfg = phy_cfg.get_dci_cfg();
// Generate RAR PDSCH
// TODO: Properly fill Msg3 grants
bwp_pdcch_slot.pdschs.emplace_back();
pdsch_t& pdsch = bwp_pdcch_slot.pdschs.back();
bwp_pdcch_slot.dl.phy.pdsch.emplace_back();
pdsch_t& pdsch = bwp_pdcch_slot.dl.phy.pdsch.back();
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = pdcch_slot.to_uint();
bool success = phy_cfg.get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch);
@ -183,10 +175,10 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
uint32_t last_msg3 = msg3_rbs.start();
const int mcs = 0, max_harq_msg3_retx = 4;
slot_cfg.idx = msg3_slot.to_uint();
bwp_pdcch_slot.rar.emplace_back();
sched_nr_interface::rar_t& rar_out = bwp_pdcch_slot.rar.back();
for (const dl_sched_rar_info_t& grant : pending_rars) {
slot_ue& ue = (*slot_ues)[grant.temp_crnti];
bwp_pdcch_slot.dl.rar.emplace_back();
sched_nr_interface::rar_t& rar_out = bwp_pdcch_slot.dl.rar.back();
for (const dl_sched_rar_info_t& grant : pending_rachs) {
slot_ue& ue = slot_ues[grant.temp_crnti];
// Generate RAR grant
rar_out.grants.emplace_back();
@ -194,15 +186,15 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
rar_grant.data = grant;
prb_interval msg3_interv{last_msg3, last_msg3 + msg3_nof_prbs};
last_msg3 += msg3_nof_prbs;
ue.h_ul = ue.harq_ent->find_empty_ul_harq();
ue.h_ul = ue.find_empty_ul_harq();
success = ue.h_ul->new_tx(msg3_slot, msg3_slot, msg3_interv, mcs, max_harq_msg3_retx);
srsran_assert(success, "Failed to allocate Msg3");
fill_dci_msg3(ue, *bwp_grid.cfg, rar_grant.msg3_dci);
// Generate PUSCH
bwp_msg3_slot.puschs.emplace_back();
pusch_t& pusch = bwp_msg3_slot.puschs.back();
success = ue.cfg->phy().get_pusch_cfg(slot_cfg, rar_grant.msg3_dci, pusch.sch);
bwp_msg3_slot.ul.pusch.emplace_back();
pusch_t& pusch = bwp_msg3_slot.ul.pusch.back();
success = ue->phy().get_pusch_cfg(slot_cfg, rar_grant.msg3_dci, pusch.sch);
srsran_assert(success, "Error converting DCI to PUSCH grant");
pusch.sch.grant.tb[0].softbuffer.rx = ue.h_ul->get_softbuffer().get();
ue.h_ul->set_tbs(pusch.sch.grant.tb[0].tbs);
@ -216,35 +208,20 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
// func computes the grant allocation for this UE
alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_grant)
{
if (ue.cfg->active_bwp().bwp_id != bwp_grid.cfg->bwp_id) {
logger.warning(
"SCHED: Trying to allocate PDSCH for rnti=0x%x in inactive BWP id=%d", ue.rnti, ue.cfg->active_bwp().bwp_id);
return alloc_result::no_rnti_opportunity;
}
if (ue.h_dl == nullptr) {
logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity;
}
bwp_slot_grid& bwp_pdcch_slot = bwp_grid[ue.pdcch_slot];
bwp_slot_grid& bwp_pdsch_slot = bwp_grid[ue.pdsch_slot];
bwp_slot_grid& bwp_uci_slot = bwp_grid[ue.uci_slot]; // UCI : UL control info
alloc_result result = verify_pdsch_space(bwp_pdsch_slot, bwp_pdcch_slot);
alloc_result result = verify_pdsch_space(bwp_pdsch_slot, bwp_pdcch_slot, &bwp_uci_slot);
if (result != alloc_result::success) {
return result;
}
if (bwp_uci_slot.pending_acks.full()) {
logger.warning("SCHED: PDSCH allocation for rnti=0x%x failed due to lack of space for respective ACK", ue.rnti);
return alloc_result::no_grant_space;
result = verify_ue_cfg(ue.cfg(), ue.h_dl);
if (result != alloc_result::success) {
return result;
}
if (bwp_pdsch_slot.dl_prbs.collides(dl_grant)) {
return alloc_result::sch_collision;
}
if (not bwp_pdcch_slot.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.info("SCHED: skipping rnti=0x%x PDSCH allocation. Cause: concurrent PDSCH and SSB not yet supported",
ue.rnti);
return alloc_result::no_sch_space;
}
// Find space in PUCCH
// TODO
@ -254,20 +231,20 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
// Choose the ss_id the highest number of candidates
uint32_t ss_id = 0, max_nof_candidates = 0;
for (uint32_t i = 0; i < 3; ++i) {
uint32_t nof_candidates = ue.cfg->cce_pos_list(i, pdcch_slot.slot_idx(), aggr_idx).size();
uint32_t nof_candidates = ue->cce_pos_list(i, pdcch_slot.slot_idx(), aggr_idx).size();
if (nof_candidates > max_nof_candidates) {
ss_id = i;
max_nof_candidates = nof_candidates;
}
}
uint32_t coreset_id = ue.cfg->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::dl_data, aggr_idx, ss_id, &ue)) {
uint32_t coreset_id = ue->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id]->alloc_dci(pdcch_grant_type_t::dl_data, aggr_idx, ss_id, &ue.cfg())) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
// Allocate HARQ
int mcs = ue.cfg->fixed_pdsch_mcs();
int mcs = ue->fixed_pdsch_mcs();
if (ue.h_dl->empty()) {
bool ret = ue.h_dl->new_tx(ue.pdsch_slot, ue.uci_slot, dl_grant, mcs, 4);
srsran_assert(ret, "Failed to allocate DL HARQ");
@ -282,28 +259,28 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
const static float max_R = 0.93;
while (true) {
// Generate PDCCH
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl.phy.pdcch_dl.back();
fill_dl_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
pdcch.dci.pucch_resource = 0;
pdcch.dci.dai = std::count_if(bwp_uci_slot.pending_acks.begin(),
bwp_uci_slot.pending_acks.end(),
[&ue](const harq_ack_t& p) { return p.res.rnti == ue.rnti; });
[&ue](const harq_ack_t& p) { return p.res.rnti == ue->rnti; });
pdcch.dci.dai %= 4;
pdcch.dci_cfg = ue.cfg->phy().get_dci_cfg();
pdcch.dci_cfg = ue->phy().get_dci_cfg();
// Generate PUCCH
bwp_uci_slot.pending_acks.emplace_back();
bwp_uci_slot.pending_acks.back().phy_cfg = &ue.cfg->phy();
srsran_assert(ue.cfg->phy().get_pdsch_ack_resource(pdcch.dci, bwp_uci_slot.pending_acks.back().res),
bwp_uci_slot.pending_acks.back().phy_cfg = &ue->phy();
srsran_assert(ue->phy().get_pdsch_ack_resource(pdcch.dci, bwp_uci_slot.pending_acks.back().res),
"Error getting ack resource");
// Generate PDSCH
bwp_pdsch_slot.dl_prbs |= dl_grant;
bwp_pdsch_slot.pdschs.emplace_back();
pdsch_t& pdsch = bwp_pdsch_slot.pdschs.back();
bwp_pdsch_slot.dl.phy.pdsch.emplace_back();
pdsch_t& pdsch = bwp_pdsch_slot.dl.phy.pdsch.back();
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = ue.pdsch_slot.to_uint();
bool ret = ue.cfg->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch);
bool ret = ue->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch);
srsran_assert(ret, "Error converting DCI to grant");
pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get();
@ -313,13 +290,13 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
} else {
srsran_assert(pdsch.sch.grant.tb[0].tbs == (int)ue.h_dl->tbs(), "The TBS did not remain constant in retx");
}
if (ue.h_dl->nof_retx() > 0 or bwp_pdsch_slot.pdschs.back().sch.grant.tb[0].R_prime < max_R or mcs <= 0) {
if (ue.h_dl->nof_retx() > 0 or bwp_pdsch_slot.dl.phy.pdsch.back().sch.grant.tb[0].R_prime < max_R or mcs <= 0) {
break;
}
// Decrease MCS if first tx and rate is too high
mcs--;
ue.h_dl->set_mcs(mcs);
bwp_pdsch_slot.pdschs.pop_back();
bwp_pdsch_slot.dl.phy.pdsch.pop_back();
bwp_uci_slot.pending_acks.pop_back();
}
if (mcs == 0) {
@ -337,12 +314,11 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
if (ret != alloc_result::success) {
return ret;
}
if (ue.h_ul == nullptr) {
logger.warning("SCHED: Trying to allocate PUSCH for rnti=0x%x with no available HARQs", ue.rnti);
return alloc_result::no_rnti_opportunity;
ret = verify_ue_cfg(ue.cfg(), ue.h_ul);
if (ret != alloc_result::success) {
return ret;
}
pdcch_ul_list_t& pdcchs = bwp_pdcch_slot.ul_pdcchs;
pdcch_ul_list_t& pdcchs = bwp_pdcch_slot.dl.phy.pdcch_ul;
if (bwp_pusch_slot.ul_prbs.collides(ul_prbs)) {
return alloc_result::sch_collision;
}
@ -350,41 +326,42 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
// Choose the ss_id the highest number of candidates
uint32_t ss_id = 0, max_nof_candidates = 0;
for (uint32_t i = 0; i < 3; ++i) {
uint32_t nof_candidates = ue.cfg->cce_pos_list(i, pdcch_slot.slot_idx(), aggr_idx).size();
uint32_t nof_candidates = ue->cce_pos_list(i, pdcch_slot.slot_idx(), aggr_idx).size();
if (nof_candidates > max_nof_candidates) {
ss_id = i;
max_nof_candidates = nof_candidates;
}
}
uint32_t coreset_id = ue.cfg->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id].value().alloc_dci(pdcch_grant_type_t::ul_data, aggr_idx, ss_id, &ue)) {
uint32_t coreset_id = ue->phy().pdcch.search_space[ss_id].coreset_id;
if (not bwp_pdcch_slot.coresets[coreset_id].value().alloc_dci(
pdcch_grant_type_t::ul_data, aggr_idx, ss_id, &ue.cfg())) {
// Could not find space in PDCCH
return alloc_result::no_cch_space;
}
// Allocation Successful
if (ue.h_ul->empty()) {
int mcs = ue.cfg->fixed_pusch_mcs();
int tbs = 100;
bool success = ue.h_ul->new_tx(ue.pusch_slot, ue.pusch_slot, ul_prbs, mcs, ue.cfg->ue_cfg()->maxharq_tx);
int mcs = ue->fixed_pusch_mcs();
bool success = ue.h_ul->new_tx(ue.pusch_slot, ue.pusch_slot, ul_prbs, mcs, ue->ue_cfg().maxharq_tx);
srsran_assert(success, "Failed to allocate UL HARQ");
} else {
bool success = ue.h_ul->new_retx(ue.pusch_slot, ue.pusch_slot, ul_prbs);
srsran_assert(success, "Failed to allocate UL HARQ retx");
}
// Allocation Successful
// Generate PDCCH
pdcch_ul_t& pdcch = pdcchs.back();
fill_ul_dci_ue_fields(ue, *bwp_grid.cfg, ss_id, pdcch.dci.ctx.location, pdcch.dci);
pdcch.dci_cfg = ue.cfg->phy().get_dci_cfg();
pdcch.dci_cfg = ue->phy().get_dci_cfg();
// Generate PUSCH
bwp_pusch_slot.ul_prbs |= ul_prbs;
bwp_pusch_slot.puschs.emplace_back();
pusch_t& pusch = bwp_pusch_slot.puschs.back();
bwp_pusch_slot.ul.pusch.emplace_back();
pusch_t& pusch = bwp_pusch_slot.ul.pusch.back();
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = ue.pusch_slot.to_uint();
pusch.pid = ue.h_ul->pid;
bool success = ue.cfg->phy().get_pusch_cfg(slot_cfg, pdcch.dci, pusch.sch);
bool success = ue->phy().get_pusch_cfg(slot_cfg, pdcch.dci, pusch.sch);
srsran_assert(success, "Error converting DCI to PUSCH grant");
pusch.sch.grant.tb[0].softbuffer.rx = ue.h_ul->get_softbuffer().get();
if (ue.h_ul->nof_retx() == 0) {
@ -396,20 +373,33 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
return alloc_result::success;
}
alloc_result bwp_slot_allocator::verify_pdsch_space(bwp_slot_grid& bwp_pdsch, bwp_slot_grid& bwp_pdcch) const
alloc_result bwp_slot_allocator::verify_pdsch_space(bwp_slot_grid& pdsch_grid,
bwp_slot_grid& pdcch_grid,
bwp_slot_grid* uci_grid) const
{
if (not bwp_pdsch.is_dl() or not bwp_pdcch.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch.slot_idx);
if (not pdsch_grid.is_dl() or not pdcch_grid.is_dl()) {
logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", pdsch_grid.slot_idx);
return alloc_result::no_sch_space;
}
if (bwp_pdcch.dl_pdcchs.full()) {
if (pdcch_grid.dl.phy.pdcch_dl.full()) {
logger.warning("SCHED: Maximum number of DL PDCCH allocations reached");
return alloc_result::no_cch_space;
}
if (bwp_pdsch.pdschs.full()) {
if (pdsch_grid.dl.phy.pdsch.full()) {
logger.warning("SCHED: Maximum number of DL PDSCH grants reached");
return alloc_result::no_sch_space;
}
if (uci_grid != nullptr) {
if (uci_grid->pending_acks.full()) {
logger.warning("SCHED: No space for ACK.");
return alloc_result::no_grant_space;
}
}
if (not pdsch_grid.dl.phy.ssb.empty()) {
// TODO: support concurrent PDSCH and SSB
logger.debug("SCHED: skipping PDSCH allocation. Cause: concurrent PDSCH and SSB not yet supported");
return alloc_result::no_sch_space;
}
return alloc_result::success;
}
@ -425,17 +415,31 @@ alloc_result bwp_slot_allocator::verify_pusch_space(bwp_slot_grid& pusch_grid, b
logger.warning("SCHED: Trying to allocate PDCCH in TDD non-DL slot index=%d", pdcch_grid->slot_idx);
return alloc_result::no_sch_space;
}
if (pdcch_grid->ul_pdcchs.full()) {
if (pdcch_grid->dl.phy.pdcch_ul.full()) {
logger.warning("SCHED: Maximum number of PUSCH allocations reached");
return alloc_result::no_grant_space;
}
}
if (pusch_grid.puschs.full()) {
if (pusch_grid.ul.pusch.full()) {
logger.warning("SCHED: Maximum number of PUSCH allocations reached");
return alloc_result::no_grant_space;
}
return alloc_result::success;
}
alloc_result bwp_slot_allocator::verify_ue_cfg(const ue_carrier_params_t& ue_cfg, harq_proc* harq) const
{
if (ue_cfg.active_bwp().bwp_id != cfg.bwp_id) {
logger.warning(
"SCHED: Trying to allocate rnti=0x%x in inactive BWP id=%d", ue_cfg.rnti, ue_cfg.active_bwp().bwp_id);
return alloc_result::no_rnti_opportunity;
}
if (harq == nullptr) {
logger.warning("SCHED: Trying to allocate rnti=0x%x with no available HARQs", ue_cfg.rnti);
return alloc_result::no_rnti_opportunity;
}
return alloc_result::success;
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -35,8 +35,8 @@ void fill_dci_common(const slot_ue& ue, const bwp_params_t& bwp_cfg, DciDlOrUl&
{
const static uint32_t rv_idx[4] = {0, 2, 3, 1};
dci.bwp_id = ue.cfg->active_bwp().bwp_id;
dci.cc_id = ue.cc;
dci.bwp_id = ue->active_bwp().bwp_id;
dci.cc_id = ue->cc;
dci.tpc = 1;
// harq
harq_proc* h = std::is_same<DciDlOrUl, srsran_dci_dl_nr_t>::value ? static_cast<harq_proc*>(ue.h_dl)
@ -77,9 +77,9 @@ bool fill_dci_rar(prb_interval interv, uint16_t ra_rnti, const bwp_params_t& bwp
bool fill_dci_msg3(const slot_ue& ue, const bwp_params_t& bwp_cfg, srsran_dci_ul_nr_t& msg3_dci)
{
fill_dci_common(ue, bwp_cfg, msg3_dci);
msg3_dci.ctx.coreset_id = ue.cfg->phy().pdcch.ra_search_space.coreset_id;
msg3_dci.ctx.coreset_id = ue->phy().pdcch.ra_search_space.coreset_id;
msg3_dci.ctx.rnti_type = srsran_rnti_type_tc;
msg3_dci.ctx.rnti = ue.rnti;
msg3_dci.ctx.rnti = ue->rnti;
msg3_dci.ctx.ss_type = srsran_search_space_type_rar;
if (ue.h_ul->nof_retx() == 0) {
msg3_dci.ctx.format = srsran_dci_format_nr_rar;
@ -98,7 +98,7 @@ void fill_dl_dci_ue_fields(const slot_ue& ue,
{
// Note: DCI location may not be the final one, as scheduler may rellocate the UE PDCCH. However, the remaining DCI
// params are independent of the exact DCI location
bool ret = ue.cfg->phy().get_dci_ctx_pdsch_rnti_c(ss_id, dci_pos, ue.rnti, dci.ctx);
bool ret = ue->phy().get_dci_ctx_pdsch_rnti_c(ss_id, dci_pos, ue->rnti, dci.ctx);
srsran_assert(ret, "Invalid DL DCI format");
fill_dci_common(ue, bwp_cfg, dci);
@ -115,7 +115,7 @@ void fill_ul_dci_ue_fields(const slot_ue& ue,
srsran_dci_location_t dci_pos,
srsran_dci_ul_nr_t& dci)
{
bool ret = ue.cfg->phy().get_dci_ctx_pusch_rnti_c(ss_id, dci_pos, ue.rnti, dci.ctx);
bool ret = ue->phy().get_dci_ctx_pusch_rnti_c(ss_id, dci_pos, ue->rnti, dci.ctx);
srsran_assert(ret, "Invalid DL DCI format");
fill_dci_common(ue, bwp_cfg, dci);
@ -134,8 +134,7 @@ void log_sched_slot_ues(srslog::basic_logger& logger, slot_point pdcch_slot, uin
for (const auto& ue_pair : slot_ues) {
auto& ue = ue_pair->second;
fmt::format_to(
fmtbuf, "{}{{rnti=0x{:x}, dl_bs={}, ul_bs={}}}", use_comma, ue.rnti, ue.dl_pending_bytes, ue.ul_pending_bytes);
fmt::format_to(fmtbuf, "{}{{rnti=0x{:x}, dl_bs={}, ul_bs={}}}", use_comma, ue->rnti, ue.dl_bytes, ue.ul_bytes);
use_comma = ", ";
}
@ -149,16 +148,16 @@ void log_sched_bwp_result(srslog::basic_logger& logger,
{
const bwp_slot_grid& bwp_slot = res_grid[pdcch_slot];
size_t rar_count = 0;
for (const pdcch_dl_t& pdcch : bwp_slot.dl_pdcchs) {
for (const pdcch_dl_t& pdcch : bwp_slot.dl.phy.pdcch_dl) {
fmt::memory_buffer fmtbuf;
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: DL {}, cc={}, rnti=0x{:x}, pid={}, cs={}, f={}, prbs={}, nrtx={}, dai={}, "
"tbs={}, bs={}, pdsch_slot={}, tti_ack={}",
"tbs={}, bs={}, pdsch_slot={}, ack_slot={}",
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
res_grid.cfg->cc,
ue.rnti,
ue->rnti,
pdcch.dci.pid,
pdcch.dci.ctx.coreset_id,
srsran_dci_format_nr_string(pdcch.dci.ctx.format),
@ -166,11 +165,11 @@ void log_sched_bwp_result(srslog::basic_logger& logger,
ue.h_dl->nof_retx(),
pdcch.dci.dai,
ue.h_dl->tbs() / 8u,
ue.dl_pending_bytes,
ue.dl_bytes,
ue.pdsch_slot,
ue.uci_slot);
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_ra) {
const pdsch_t& pdsch = bwp_slot.pdschs[std::distance(bwp_slot.dl_pdcchs.data(), &pdcch)];
const pdsch_t& pdsch = bwp_slot.dl.phy.pdsch[std::distance(bwp_slot.dl.phy.pdcch_dl.data(), &pdcch)];
srsran::const_span<bool> prbs{pdsch.sch.grant.prb_idx, pdsch.sch.grant.prb_idx + pdsch.sch.grant.nof_prb};
uint32_t start_idx = std::distance(prbs.begin(), std::find(prbs.begin(), prbs.end(), true));
uint32_t end_idx = std::distance(prbs.begin(), std::find(prbs.begin() + start_idx, prbs.end(), false));
@ -181,7 +180,7 @@ void log_sched_bwp_result(srslog::basic_logger& logger,
srsran::interval<uint32_t>{start_idx, end_idx},
pdcch_slot,
pdcch_slot + res_grid.cfg->pusch_ra_list[0].msg3_delay,
bwp_slot.rar[rar_count].grants.size());
bwp_slot.dl.rar[rar_count].grants.size());
rar_count++;
} else {
fmt::format_to(fmtbuf, "SCHED: unknown format");
@ -189,28 +188,28 @@ void log_sched_bwp_result(srslog::basic_logger& logger,
logger.info("%s", srsran::to_c_str(fmtbuf));
}
for (const pdcch_ul_t& pdcch : bwp_slot.ul_pdcchs) {
for (const pdcch_ul_t& pdcch : bwp_slot.dl.phy.pdcch_ul) {
fmt::memory_buffer fmtbuf;
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: UL {}, cc={}, rnti=0x{:x}, pid={}, cs={}, f={}, nrtx={}, tbs={}, bs={}, tti_pusch={}",
"SCHED: UL {}, cc={}, rnti=0x{:x}, pid={}, cs={}, f={}, nrtx={}, tbs={}, bs={}, pusch_slot={}",
ue.h_ul->nof_retx() == 0 ? "tx" : "retx",
res_grid.cfg->cc,
ue.rnti,
ue->rnti,
pdcch.dci.pid,
pdcch.dci.ctx.coreset_id,
srsran_dci_format_nr_string(pdcch.dci.ctx.format),
ue.h_ul->nof_retx(),
ue.h_ul->tbs() / 8u,
ue.ul_pending_bytes,
ue.ul_bytes,
ue.pusch_slot);
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_tc) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: UL Msg3, cc={}, tc-rnti=0x{:x}, pid={}, nrtx={}, f={}, tti_pusch={}",
res_grid.cfg->cc,
ue.rnti,
ue->rnti,
pdcch.dci.pid,
ue.h_ul->nof_retx(),
srsran_dci_format_nr_string(pdcch.dci.ctx.format),

@ -20,7 +20,6 @@
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_pdcch.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_ue.h"
namespace srsenb {
namespace sched_nr_impl {
@ -59,7 +58,7 @@ void coreset_region::reset()
bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
uint32_t aggr_idx,
uint32_t search_space_id,
slot_ue* user)
const ue_carrier_params_t* user)
{
srsran_assert(aggr_idx <= 4, "Invalid DCI aggregation level=%d", 1U << aggr_idx);
srsran_assert((user == nullptr) xor
@ -189,9 +188,8 @@ srsran::span<const uint32_t> coreset_region::get_cce_loc_table(const alloc_recor
{
switch (record.alloc_type) {
case pdcch_grant_type_t::dl_data:
return record.ue->cfg->cce_pos_list(record.ss_id, slot_idx, record.aggr_idx);
case pdcch_grant_type_t::ul_data:
return record.ue->cfg->cce_pos_list(record.ss_id, slot_idx, record.aggr_idx);
return record.ue->cce_pos_list(record.ss_id, slot_idx, record.aggr_idx);
case pdcch_grant_type_t::rar:
return rar_cce_list[slot_idx][record.aggr_idx];
default:

@ -60,7 +60,7 @@ void sched_nr_time_rr::sched_dl_users(slot_ue_map_t& ue_db, bwp_slot_allocator&
// Move on to new txs
round_robin_apply(ue_db, slot_alloc.get_pdcch_tti().to_uint(), [&slot_alloc](slot_ue& ue) {
if (ue.h_dl != nullptr and ue.h_dl->empty()) {
if (ue.dl_bytes > 0 and ue.h_dl != nullptr and ue.h_dl->empty()) {
alloc_result res = slot_alloc.alloc_pdsch(ue, prb_interval{0, slot_alloc.cfg.cfg.rb_width});
if (res == alloc_result::success) {
return true;
@ -87,7 +87,7 @@ void sched_nr_time_rr::sched_ul_users(slot_ue_map_t& ue_db, bwp_slot_allocator&
// Move on to new txs
round_robin_apply(ue_db, slot_alloc.get_pdcch_tti().to_uint(), [&slot_alloc](slot_ue& ue) {
if (ue.h_ul != nullptr and ue.h_ul->empty()) {
if (ue.ul_bytes > 0 and ue.h_ul != nullptr and ue.h_ul->empty()) {
alloc_result res = slot_alloc.alloc_pusch(ue, prb_interval{0, slot_alloc.cfg.cfg.rb_width});
if (res == alloc_result::success) {
return true;

@ -26,13 +26,42 @@
namespace srsenb {
namespace sched_nr_impl {
slot_ue::slot_ue(uint16_t rnti_, slot_point slot_rx_, uint32_t cc_) : rnti(rnti_), slot_rx(slot_rx_), cc(cc_) {}
slot_ue::slot_ue(ue_carrier& ue_, slot_point slot_tx_, uint32_t dl_pending_bytes, uint32_t ul_pending_bytes) :
ue(&ue_), pdcch_slot(slot_tx_)
{
const uint32_t k0 = 0;
pdsch_slot = pdcch_slot + k0;
uint32_t k1 = ue->bwp_cfg.get_k1(pdsch_slot);
uci_slot = pdsch_slot + k1;
uint32_t k2 = ue->bwp_cfg.active_bwp().pusch_ra_list[0].K;
pusch_slot = pdcch_slot + k2;
const srsran_duplex_config_nr_t& tdd_cfg = ue->cell_params.cfg.duplex;
dl_active = srsran_duplex_nr_is_dl(&tdd_cfg, 0, pdsch_slot.slot_idx());
if (dl_active) {
dl_bytes = dl_pending_bytes;
h_dl = ue->harq_ent.find_pending_dl_retx();
if (h_dl == nullptr) {
h_dl = ue->harq_ent.find_empty_dl_harq();
}
}
ul_active = srsran_duplex_nr_is_ul(&tdd_cfg, 0, pusch_slot.slot_idx());
if (ul_active) {
ul_bytes = ul_pending_bytes;
h_ul = ue->harq_ent.find_pending_ul_retx();
if (h_ul == nullptr) {
h_ul = ue->harq_ent.find_empty_ul_harq();
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue_carrier::ue_carrier(uint16_t rnti_, const ue_cfg_t& uecfg_, const cell_params_t& cell_params_) :
rnti(rnti_),
cc(cell_params_.cc),
logger(srslog::fetch_basic_logger(cell_params_.sched_args.logger_name)),
bwp_cfg(rnti_, cell_params_.bwps[0], uecfg_),
cell_params(cell_params_),
harq_ent(rnti_, cell_params_.nof_prb(), SCHED_NR_MAX_HARQ, cell_params_.bwps[0].logger)
@ -40,58 +69,37 @@ ue_carrier::ue_carrier(uint16_t rnti_, const ue_cfg_t& uecfg_, const cell_params
void ue_carrier::set_cfg(const ue_cfg_t& ue_cfg)
{
bwp_cfg = bwp_ue_cfg(rnti, cell_params.bwps[0], ue_cfg);
bwp_cfg = ue_carrier_params_t(rnti, cell_params.bwps[0], ue_cfg);
}
void ue_carrier::new_slot(slot_point slot_tx)
int ue_carrier::dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack)
{
harq_ent.new_slot(slot_tx - TX_ENB_DELAY);
int tbs = harq_ent.dl_ack_info(pid, tb_idx, ack);
if (tbs < 0) {
logger.warning("SCHED: rnti=0x%x received DL HARQ-ACK for empty pid=%d", rnti, pid);
return tbs;
}
if (ack) {
metrics.tx_brate += tbs;
} else {
metrics.tx_errors++;
}
metrics.tx_pkts++;
return tbs;
}
slot_ue ue_carrier::try_reserve(slot_point pdcch_slot, uint32_t dl_pending_bytes, uint32_t ul_pending_bytes)
int ue_carrier::ul_crc_info(uint32_t pid, bool crc)
{
slot_point slot_rx = pdcch_slot - TX_ENB_DELAY;
// copy cc-specific parameters and find available HARQs
slot_ue sfu(rnti, slot_rx, cc);
sfu.cfg = &bwp_cfg;
sfu.pdcch_slot = pdcch_slot;
sfu.harq_ent = &harq_ent;
const uint32_t k0 = 0;
sfu.pdsch_slot = sfu.pdcch_slot + k0;
uint32_t k1 = sfu.cfg->get_k1(sfu.pdsch_slot);
sfu.uci_slot = sfu.pdsch_slot + k1;
uint32_t k2 = bwp_cfg.active_bwp().pusch_ra_list[0].K;
sfu.pusch_slot = sfu.pdcch_slot + k2;
sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi;
// set UE-common parameters
sfu.dl_pending_bytes = dl_pending_bytes;
sfu.ul_pending_bytes = ul_pending_bytes;
const srsran_duplex_config_nr_t& tdd_cfg = cell_params.cfg.duplex;
if (srsran_duplex_nr_is_dl(&tdd_cfg, 0, sfu.pdsch_slot.slot_idx())) {
// If DL enabled
sfu.h_dl = harq_ent.find_pending_dl_retx();
if (sfu.h_dl == nullptr and sfu.dl_pending_bytes > 0) {
sfu.h_dl = harq_ent.find_empty_dl_harq();
}
}
if (srsran_duplex_nr_is_ul(&tdd_cfg, 0, sfu.pusch_slot.slot_idx())) {
// If UL enabled
sfu.h_ul = harq_ent.find_pending_ul_retx();
if (sfu.h_ul == nullptr and sfu.ul_pending_bytes > 0) {
sfu.h_ul = harq_ent.find_empty_ul_harq();
}
int ret = harq_ent.ul_crc_info(pid, crc);
if (ret < 0) {
logger.warning("SCHED: rnti=0x%x,cc=%d received CRC for empty pid=%d", rnti, cc, pid);
}
return sfu;
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue::ue(uint16_t rnti_, const ue_cfg_t& cfg, const sched_params& sched_cfg_) :
ue::ue(uint16_t rnti_, const ue_cfg_t& cfg, const sched_params_t& sched_cfg_) :
rnti(rnti_), sched_cfg(sched_cfg_), buffers(rnti_, srslog::fetch_basic_logger(sched_cfg_.sched_cfg.logger_name))
{
set_cfg(cfg);
@ -117,6 +125,12 @@ void ue::new_slot(slot_point pdcch_slot)
{
last_pdcch_slot = pdcch_slot;
for (std::unique_ptr<ue_carrier>& cc : carriers) {
if (cc != nullptr) {
cc->harq_ent.new_slot(pdcch_slot - TX_ENB_DELAY);
}
}
// Compute pending DL/UL bytes for {rnti, pdcch_slot}
if (sched_cfg.sched_cfg.auto_refill_buffer) {
dl_pending_bytes = 1000000;
@ -146,10 +160,10 @@ void ue::new_slot(slot_point pdcch_slot)
}
}
slot_ue ue::try_reserve(slot_point pdcch_slot, uint32_t cc)
slot_ue ue::make_slot_ue(slot_point pdcch_slot, uint32_t cc)
{
srsran_assert(carriers[cc] != nullptr, "try_reserve() called for inexistent rnti=0x%x,cc=%d", rnti, cc);
return carriers[cc]->try_reserve(pdcch_slot, dl_pending_bytes, ul_pending_bytes);
srsran_assert(carriers[cc] != nullptr, "make_slot_ue() called for inexistent rnti=0x%x,cc=%d", rnti, cc);
return slot_ue(*carriers[cc], pdcch_slot, dl_pending_bytes, ul_pending_bytes);
}
} // namespace sched_nr_impl

@ -27,59 +27,31 @@
namespace srsenb {
namespace sched_nr_impl {
slot_cc_worker::slot_cc_worker(serv_cell_manager& cc_sched) :
cell(cc_sched),
cfg(cc_sched.cfg),
bwp_alloc(cc_sched.bwps[0].grid),
logger(srslog::fetch_basic_logger(cc_sched.cfg.sched_args.logger_name))
{}
void slot_cc_worker::enqueue_cc_event(srsran::move_callback<void()> ev)
cc_worker::cc_worker(const cell_params_t& params) :
cfg(params), logger(srslog::fetch_basic_logger(params.sched_args.logger_name))
{
std::lock_guard<std::mutex> lock(feedback_mutex);
pending_events.emplace_back();
pending_events.back() = std::move(ev);
}
for (uint32_t bwp_id = 0; bwp_id < cfg.cfg.bwps.size(); ++bwp_id) {
bwps.emplace_back(cfg.bwps[bwp_id]);
}
void slot_cc_worker::enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk)
{
std::lock_guard<std::mutex> lock(feedback_mutex);
pending_feedback.emplace_back();
pending_feedback.back().rnti = rnti;
pending_feedback.back().fdbk = std::move(fdbk);
// Pre-allocate HARQs in common pool of softbuffers
harq_softbuffer_pool::get_instance().init_pool(cfg.nof_prb());
}
void slot_cc_worker::run_feedback(ue_map_t& ue_db)
void cc_worker::dl_rach_info(const sched_nr_interface::rar_info_t& rar_info)
{
{
std::lock_guard<std::mutex> lock(feedback_mutex);
tmp_feedback_to_run.swap(pending_feedback);
tmp_events_to_run.swap(pending_events);
}
for (srsran::move_callback<void()>& ev : tmp_events_to_run) {
ev();
}
tmp_events_to_run.clear();
for (feedback_t& f : tmp_feedback_to_run) {
if (ue_db.contains(f.rnti) and ue_db[f.rnti]->carriers[cfg.cc] != nullptr) {
f.fdbk(*ue_db[f.rnti]->carriers[cfg.cc]);
} else {
logger.info("SCHED: feedback received for rnti=0x%x, cc=%d that has been removed.", f.rnti, cfg.cc);
}
}
tmp_feedback_to_run.clear();
bwps[0].ra.dl_rach_info(rar_info);
}
/// Called within a locked context, to generate {slot, cc} scheduling decision
void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
{
srsran_assert(not running(), "scheduler worker::start() called for active worker");
slot_rx = pdcch_slot - TX_ENB_DELAY;
// Run pending cell feedback (process feedback)
run_feedback(ue_db);
dl_sched_res_t* cc_worker::run_slot(slot_point pdcch_slot, ue_map_t& ue_db)
{
// Reset old sched outputs
slot_point old_slot = pdcch_slot - TX_ENB_DELAY - 1;
for (bwp_manager& bwp : bwps) {
bwp.grid[old_slot].reset();
}
// Reserve UEs for this worker slot (select candidate UEs)
for (auto& ue_pair : ue_db) {
@ -89,11 +61,8 @@ void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
continue;
}
// Update UE CC state
u.carriers[cfg.cc]->new_slot(pdcch_slot);
// info for a given UE on a slot to be process
slot_ues.insert(rnti, u.try_reserve(pdcch_slot, cfg.cc));
slot_ues.insert(rnti, u.make_slot_ue(pdcch_slot, cfg.cc));
if (slot_ues[rnti].empty()) {
// Failed to generate slot UE because UE has no conditions for DL/UL tx
slot_ues.erase(rnti);
@ -103,48 +72,58 @@ void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
}
// Create an BWP allocator object that will passed along to RA, SI, Data schedulers
bwp_alloc.new_slot(slot_rx + TX_ENB_DELAY, slot_ues);
bwp_slot_allocator bwp_alloc{bwps[0].grid, pdcch_slot, slot_ues};
// Log UEs state for slot
log_sched_slot_ues(logger, bwp_alloc.get_pdcch_tti(), cfg.cc, slot_ues);
log_sched_slot_ues(logger, pdcch_slot, cfg.cc, slot_ues);
// Allocate cell DL signalling
bwp_slot_grid& bwp_pdcch_slot = bwps[0].grid[pdcch_slot];
sched_dl_signalling(*bwps[0].cfg, pdcch_slot, bwp_pdcch_slot.dl.phy.ssb, bwp_pdcch_slot.dl.phy.nzp_csi_rs);
// Allocate pending RARs
cell.bwps[0].ra.run_slot(bwp_alloc);
bwps[0].ra.run_slot(bwp_alloc);
// TODO: Prioritize PDCCH scheduling for DL and UL data in a Round-Robin fashion
alloc_dl_ues();
alloc_ul_ues();
alloc_dl_ues(bwp_alloc);
alloc_ul_ues(bwp_alloc);
// Post-processing of scheduling decisions
postprocess_decisions();
postprocess_decisions(bwp_alloc);
// Log CC scheduler result
log_sched_bwp_result(logger, bwp_alloc.get_pdcch_tti(), cell.bwps[0].grid, slot_ues);
log_sched_bwp_result(logger, bwp_alloc.get_pdcch_tti(), bwps[0].grid, slot_ues);
// releases UE resources
slot_ues.clear();
slot_rx = {};
return &bwp_pdcch_slot.dl;
}
void slot_cc_worker::alloc_dl_ues()
ul_sched_t* cc_worker::get_ul_sched(slot_point sl)
{
return &bwps[0].grid[sl].ul;
}
void cc_worker::alloc_dl_ues(bwp_slot_allocator& bwp_alloc)
{
if (not cfg.sched_args.pdsch_enabled) {
return;
}
cell.bwps[0].data_sched->sched_dl_users(slot_ues, bwp_alloc);
bwps[0].data_sched->sched_dl_users(slot_ues, bwp_alloc);
}
void slot_cc_worker::alloc_ul_ues()
void cc_worker::alloc_ul_ues(bwp_slot_allocator& bwp_alloc)
{
if (not cfg.sched_args.pusch_enabled) {
return;
}
cell.bwps[0].data_sched->sched_ul_users(slot_ues, bwp_alloc);
bwps[0].data_sched->sched_ul_users(slot_ues, bwp_alloc);
}
void slot_cc_worker::postprocess_decisions()
void cc_worker::postprocess_decisions(bwp_slot_allocator& bwp_alloc)
{
auto& bwp_slot = cell.bwps[0].grid[bwp_alloc.get_pdcch_tti()];
auto& bwp_slot = bwps[0].grid[bwp_alloc.get_pdcch_tti()];
srsran_slot_cfg_t slot_cfg{};
slot_cfg.idx = bwp_alloc.get_pdcch_tti().to_uint();
@ -154,7 +133,7 @@ void slot_cc_worker::postprocess_decisions()
srsran_pdsch_ack_nr_t ack = {};
for (auto& h_ack : bwp_slot.pending_acks) {
if (h_ack.res.rnti == ue.rnti) {
if (h_ack.res.rnti == ue->rnti) {
ack.nof_cc = 1;
srsran_harq_ack_m_t ack_m = {};
@ -165,7 +144,7 @@ void slot_cc_worker::postprocess_decisions()
}
srsran_uci_cfg_nr_t uci_cfg = {};
if (not ue.cfg->phy().get_uci_cfg(slot_cfg, ack, uci_cfg)) {
if (not ue->phy().get_uci_cfg(slot_cfg, ack, uci_cfg)) {
logger.error("Error getting UCI configuration");
continue;
}
@ -175,15 +154,15 @@ void slot_cc_worker::postprocess_decisions()
}
bool has_pusch = false;
for (auto& pusch : bwp_slot.puschs) {
if (pusch.sch.grant.rnti == ue.rnti) {
for (auto& pusch : bwp_slot.ul.pusch) {
if (pusch.sch.grant.rnti == ue->rnti) {
// Put UCI configuration in PUSCH config
has_pusch = true;
// If has PUSCH, no SR shall be received
uci_cfg.o_sr = 0;
if (not ue.cfg->phy().get_pusch_uci_cfg(slot_cfg, uci_cfg, pusch.sch)) {
if (not ue->phy().get_pusch_uci_cfg(slot_cfg, uci_cfg, pusch.sch)) {
logger.error("Error setting UCI configuration in PUSCH");
continue;
}
@ -192,17 +171,17 @@ void slot_cc_worker::postprocess_decisions()
}
if (not has_pusch) {
// If any UCI information is triggered, schedule PUCCH
if (bwp_slot.pucch.full()) {
if (bwp_slot.ul.pucch.full()) {
logger.warning("SCHED: Cannot fit pending UCI into PUCCH");
continue;
}
bwp_slot.pucch.emplace_back();
mac_interface_phy_nr::pucch_t& pucch = bwp_slot.pucch.back();
bwp_slot.ul.pucch.emplace_back();
mac_interface_phy_nr::pucch_t& pucch = bwp_slot.ul.pucch.back();
uci_cfg.pucch.rnti = ue.rnti;
uci_cfg.pucch.rnti = ue->rnti;
pucch.candidates.emplace_back();
pucch.candidates.back().uci_cfg = uci_cfg;
if (not ue.cfg->phy().get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.candidates.back().resource)) {
if (not ue->phy().get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.candidates.back().resource)) {
logger.error("Error getting UCI CFG");
continue;
}
@ -218,7 +197,7 @@ void slot_cc_worker::postprocess_decisions()
// Append new resource
pucch.candidates.emplace_back();
pucch.candidates.back().uci_cfg = uci_cfg;
if (not ue.cfg->phy().get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.candidates.back().resource)) {
if (not ue->phy().get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.candidates.back().resource)) {
logger.error("Error getting UCI CFG");
continue;
}
@ -227,178 +206,5 @@ void slot_cc_worker::postprocess_decisions()
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_,
const sched_params& cfg_,
srsran::span<std::unique_ptr<serv_cell_manager> > cells_) :
cfg(cfg_), ue_db(ue_db_), logger(srslog::fetch_basic_logger(cfg_.sched_cfg.logger_name)), cells(cells_)
{
cc_worker_list.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
cc_worker_list.emplace_back(new cc_context{*cells[cc]});
}
}
sched_worker_manager::~sched_worker_manager() = default;
void sched_worker_manager::enqueue_event(uint16_t rnti, srsran::move_callback<void()> ev)
{
std::lock_guard<std::mutex> lock(event_mutex);
next_slot_events.push_back(ue_event_t{rnti, std::move(ev)});
}
void sched_worker_manager::enqueue_cc_event(uint32_t cc, srsran::move_callback<void()> ev)
{
cc_worker_list[cc]->worker.enqueue_cc_event(std::move(ev));
}
/**
* Update UEs state that is non-CC specific (e.g. SRs, buffer status, UE configuration)
* @param slot_tx
* @param locked_context to update only UEs with CA enabled or not
*/
void sched_worker_manager::update_ue_db(slot_point slot_tx, bool locked_context)
{
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config)
for (ue_event_t& ev : slot_events) {
if ((locked_context and not ue_db.contains(ev.rnti)) or
(ue_db.contains(ev.rnti) and ue_db[ev.rnti]->has_ca() == locked_context)) {
ev.callback();
}
}
// prepare UEs internal state for new slot
for (auto& u : ue_db) {
if (u.second->has_ca() == locked_context) {
u.second->new_slot(slot_tx);
}
}
}
void sched_worker_manager::run_slot(slot_point slot_tx, uint32_t cc, dl_sched_res_t& dl_res, ul_sched_t& ul_res)
{
// Fill DL signalling messages that do not depend on UEs state
serv_cell_manager& serv_cell = *cells[cc];
bwp_slot_grid& bwp_slot = serv_cell.bwps[0].grid[slot_tx];
sched_dl_signalling(*serv_cell.bwps[0].cfg, slot_tx, bwp_slot.ssb, bwp_slot.nzp_csi_rs);
// Synchronization point between CC workers, to avoid concurrency in UE state access
srsran::bounded_vector<std::condition_variable*, SRSRAN_MAX_CARRIERS> waiting_cvars;
{
std::unique_lock<std::mutex> lock(slot_mutex);
while (current_slot.valid() and current_slot != slot_tx) {
// Wait for previous slot to finish
cc_worker_list[cc]->waiting++;
cc_worker_list[cc]->cvar.wait(lock);
cc_worker_list[cc]->waiting--;
}
if (not current_slot.valid()) {
/* First Worker to start slot */
// process non-cc specific feedback if pending for UEs with CA
// NOTE: there is no parallelism in these operations
slot_events.clear();
{
std::lock_guard<std::mutex> ev_lock(event_mutex);
next_slot_events.swap(slot_events);
}
update_ue_db(slot_tx, true);
// mark the start of slot. awake remaining workers if locking on the mutex
current_slot = slot_tx;
worker_count.store(static_cast<int>(cc_worker_list.size()), std::memory_order_relaxed);
for (auto& w : cc_worker_list) {
if (w->waiting > 0) {
waiting_cvars.push_back(&w->cvar);
}
}
lock.unlock();
for (auto& w : waiting_cvars) {
w->notify_one();
}
waiting_cvars.clear();
}
}
/* Parallel Region */
// process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs without CA
update_ue_db(slot_tx, false);
// process pending feedback, generate {slot, cc} scheduling decision
cc_worker_list[cc]->worker.run(slot_tx, ue_db);
// decrement the number of active workers
int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1;
srsran_assert(rem_workers >= 0, "invalid number of calls to run_slot(slot, cc)");
if (rem_workers == 0) {
/* Last Worker to finish slot */
// Signal the release of slot if it is the last worker that finished its own generation
std::unique_lock<std::mutex> lock(slot_mutex);
current_slot = {};
// All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state
for (auto& c : cc_worker_list) {
if (c->waiting > 0) {
waiting_cvars.push_back(&c->cvar);
}
}
// Awake waiting workers
lock.unlock();
for (auto& c : waiting_cvars) {
c->notify_one();
}
}
// Post-process and copy results to intermediate buffer
save_sched_result(slot_tx, cc, dl_res, ul_res);
}
void sched_worker_manager::get_metrics(mac_metrics_t& metrics)
{
std::unique_lock<std::mutex> lock(slot_mutex);
get_metrics_nolocking(metrics);
}
bool sched_worker_manager::save_sched_result(slot_point pdcch_slot,
uint32_t cc,
dl_sched_res_t& dl_res,
ul_sched_t& ul_res)
{
// NOTE: Unlocked region
auto& bwp_slot = cells[cc]->bwps[0].grid[pdcch_slot];
dl_res.dl_sched.pdcch_dl = bwp_slot.dl_pdcchs;
dl_res.dl_sched.pdcch_ul = bwp_slot.ul_pdcchs;
dl_res.dl_sched.pdsch = bwp_slot.pdschs;
dl_res.rar = bwp_slot.rar;
dl_res.dl_sched.ssb = bwp_slot.ssb;
dl_res.dl_sched.nzp_csi_rs = bwp_slot.nzp_csi_rs;
ul_res.pusch = bwp_slot.puschs;
ul_res.pucch = bwp_slot.pucch;
// clear up BWP slot
bwp_slot.reset();
return true;
}
void sched_worker_manager::get_metrics_nolocking(mac_metrics_t& metrics)
{
for (mac_ue_metrics_t& ue_metric : metrics.ues) {
if (ue_db.contains(ue_metric.rnti) and ue_db[ue_metric.rnti]->carriers[0] != nullptr) {
auto& ue_cc = *ue_db[ue_metric.rnti]->carriers[0];
std::lock_guard<std::mutex> lock(ue_cc.metrics_mutex);
ue_metric.tx_brate = ue_cc.metrics.tx_brate;
ue_metric.tx_errors = ue_cc.metrics.tx_errors;
ue_metric.tx_pkts = ue_cc.metrics.tx_pkts;
ue_cc.metrics = {};
}
}
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -83,29 +83,35 @@ int ue_nr::process_pdu(srsran::unique_byte_buffer_t pdu)
logger.info("Rx PDU: rnti=0x%x, %s", rnti, srsran::to_c_str(str_buffer));
}
// First, process MAC CEs in reverse order (CE like C-RNTI get handled first)
for (uint32_t n = mac_pdu_ul.get_num_subpdus(), i = mac_pdu_ul.get_num_subpdus() - 1; n > 0; --n, i = n - 1) {
srsran::mac_sch_subpdu_nr subpdu = mac_pdu_ul.get_subpdu(i);
if (not subpdu.is_sdu()) {
// Process MAC CRNTI CE first, if it exists
uint32_t crnti_ce_pos = mac_pdu_ul.get_num_subpdus();
for (uint32_t n = mac_pdu_ul.get_num_subpdus(); n > 0; --n) {
srsran::mac_sch_subpdu_nr& subpdu = mac_pdu_ul.get_subpdu(n - 1);
if (subpdu.get_lcid() == srsran::mac_sch_subpdu_nr::nr_lcid_sch_t::CRNTI) {
if (process_ce_subpdu(subpdu) != SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
crnti_ce_pos = n - 1;
}
}
// Second, handle all SDUs in order to avoid unnecessary reordering at higher layers
for (uint32_t i = 0; i < mac_pdu_ul.get_num_subpdus(); ++i) {
srsran::mac_sch_subpdu_nr subpdu = mac_pdu_ul.get_subpdu(i);
// Process SDUs and remaining MAC CEs
for (uint32_t n = 0; n < mac_pdu_ul.get_num_subpdus(); ++n) {
srsran::mac_sch_subpdu_nr& subpdu = mac_pdu_ul.get_subpdu(n);
if (subpdu.is_sdu()) {
rrc->set_activity_user(rnti);
rlc->write_pdu(rnti, subpdu.get_lcid(), subpdu.get_sdu(), subpdu.get_sdu_length());
} else if (n != crnti_ce_pos) {
if (process_ce_subpdu(subpdu) != SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
}
}
return SRSRAN_SUCCESS;
}
int ue_nr::process_ce_subpdu(srsran::mac_sch_subpdu_nr& subpdu)
int ue_nr::process_ce_subpdu(const srsran::mac_sch_subpdu_nr& subpdu)
{
// Handle MAC CEs
switch (subpdu.get_lcid()) {
@ -238,9 +244,8 @@ void ue_nr::metrics_read(mac_ue_metrics_t* metrics_)
ue_metrics.dl_buffer = dl_buffer;
// set PCell sector id
std::array<int, SRSRAN_MAX_CARRIERS> cc_list; //= sched->get_enb_ue_cc_map(rnti);
auto it = std::find(cc_list.begin(), cc_list.end(), 0);
ue_metrics.cc_idx = std::distance(cc_list.begin(), it);
// TODO: use ue_cfg when multiple NR carriers are supported
ue_metrics.cc_idx = 0;
*metrics_ = ue_metrics;
phr_counter = 0;

@ -82,6 +82,25 @@ void lch_ue_manager::new_tti()
}
}
void lch_ue_manager::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue)
{
if (base_type::dl_buffer_state(lcid, tx_queue, prio_tx_queue) == SRSRAN_SUCCESS) {
logger.debug("SCHED: rnti=0x%x DL lcid=%d buffer_state=%d,%d", rnti, lcid, tx_queue, prio_tx_queue);
}
}
void lch_ue_manager::ul_bsr(uint32_t lcg_id, uint32_t val)
{
if (base_type::ul_bsr(lcg_id, val) == SRSRAN_SUCCESS) {
if (logger.debug.enabled()) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", lcg_bsr);
logger.debug(
"SCHED: rnti=0x%x, lcg_id=%d, bsr=%d. Current state=%s", rnti, lcg_id, val, srsran::to_c_str(str_buffer));
}
}
}
void lch_ue_manager::ul_buffer_add(uint8_t lcid, uint32_t bytes)
{
if (lcid >= sched_interface::MAX_LC) {

@ -221,7 +221,7 @@ void bearer_cfg_handler::reestablish_bearers(bearer_cfg_handler&& old_rnti_beare
old_rnti_bearers.current_drbs.clear();
}
int bearer_cfg_handler::add_erab(uint8_t erab_id,
int bearer_cfg_handler::addmod_erab(uint8_t erab_id,
const asn1::s1ap::erab_level_qos_params_s& qos,
const asn1::bounded_bitstring<1, 160, true, true>& addr,
uint32_t teid_out,
@ -290,6 +290,16 @@ int bearer_cfg_handler::add_erab(uint8_t
}
}
// If it is an E-RAB modification, remove previous DRB object
if (erabs.count(erab_id) > 0) {
for (auto& drb : current_drbs) {
if (drb.eps_bearer_id_present and drb.eps_bearer_id == erab_id) {
srsran::rem_rrc_obj_id(current_drbs, drb.drb_id);
break;
}
}
}
// Consider ERAB as accepted
erabs[erab_id].id = erab_id;
erabs[erab_id].lcid = lcid;
@ -361,8 +371,7 @@ int bearer_cfg_handler::modify_erab(uint8_t e
}
auto address = erab_it->second.address;
uint32_t teid_out = erab_it->second.teid_out;
release_erab(erab_id);
return add_erab(erab_id, qos, address, teid_out, nas_pdu, cause);
return addmod_erab(erab_id, qos, address, teid_out, nas_pdu, cause);
}
int bearer_cfg_handler::add_gtpu_bearer(uint32_t erab_id)

@ -961,7 +961,7 @@ bool rrc::ue::rrc_mobility::apply_ho_prep_cfg(const ho_prep_info_r8_ies_s&
uint32_t teid_out = 0;
srsran::uint8_to_uint32(erab.gtp_teid.data(), &teid_out);
asn1::s1ap::cause_c erab_cause;
if (rrc_ue->bearer_list.add_erab(
if (rrc_ue->bearer_list.addmod_erab(
erab.erab_id, erab.erab_level_qos_params, erab.transport_layer_address, teid_out, {}, erab_cause) !=
SRSRAN_SUCCESS) {
erabs_failed_to_setup.emplace_back();

@ -98,23 +98,46 @@ void rrc_nr::stop()
template <class T>
void rrc_nr::log_rrc_message(const std::string& source,
const direction_t dir,
const srsran::byte_buffer_t* pdu,
const T& msg)
const asn1::dyn_octstring& oct,
const T& msg,
const std::string& msg_type)
{
if (logger.debug.enabled()) {
asn1::json_writer json_writer;
msg.to_json(json_writer);
logger.debug(pdu->msg,
pdu->N_bytes,
logger.debug(oct.data(),
oct.size(),
"%s - %s %s (%d B)",
source.c_str(),
dir == Tx ? "Tx" : "Rx",
msg.msg.c1().type().to_string(),
pdu->N_bytes);
msg_type.c_str(),
oct.size());
logger.debug("Content:\n%s", json_writer.to_string().c_str());
} else if (logger.info.enabled()) {
logger.info(
"%s - %s %s (%d B)", source.c_str(), dir == Tx ? "Tx" : "Rx", msg.msg.c1().type().to_string(), pdu->N_bytes);
logger.info("%s - %s %s (%d B)", source.c_str(), dir == Tx ? "Tx" : "Rx", msg_type.c_str(), oct.size());
}
}
template <class T>
void rrc_nr::log_rrc_message(const std::string& source,
const direction_t dir,
const srsran::byte_buffer_t& pdu,
const T& msg,
const std::string& msg_type)
{
if (logger.debug.enabled()) {
asn1::json_writer json_writer;
msg.to_json(json_writer);
logger.debug(pdu.msg,
pdu.N_bytes,
"%s - %s %s (%d B)",
source.c_str(),
(dir == Rx) ? "Rx" : "Tx",
msg_type.c_str(),
pdu.N_bytes);
logger.debug("Content:%s", json_writer.to_string().c_str());
} else if (logger.info.enabled()) {
logger.info("%s - %s %s (%d B)", source.c_str(), (dir == Rx) ? "Rx" : "Tx", msg_type.c_str(), pdu.N_bytes);
}
}
@ -361,23 +384,17 @@ int32_t rrc_nr::generate_sibs()
// Pack payload for all messages
for (uint32_t msg_index = 0; msg_index < nof_messages + 1; msg_index++) {
srsran::unique_byte_buffer_t sib = srsran::make_byte_buffer();
srsran::unique_byte_buffer_t sib = pack_into_pdu(msg[msg_index]);
if (sib == nullptr) {
logger.error("Couldn't allocate PDU in %s().", __FUNCTION__);
return SRSRAN_ERROR;
}
asn1::bit_ref bref(sib->msg, sib->get_tailroom());
if (msg[msg_index].pack(bref) != asn1::SRSASN_SUCCESS) {
logger.error("Failed to pack SIB message %d", msg_index);
logger.error("Failed to pack SIB");
return SRSRAN_ERROR;
}
sib->N_bytes = bref.distance_bytes();
sib_buffer.push_back(std::move(sib));
// Log SIBs in JSON format
fmt::memory_buffer strbuf;
fmt::format_to(strbuf, "SI message={} payload", msg_index);
log_rrc_message(fmt::to_string(strbuf), Tx, sib_buffer.back().get(), msg[msg_index]);
log_rrc_message(fmt::to_string(strbuf), Tx, *sib_buffer.back().get(), msg[msg_index], "");
}
nof_si_messages = sib_buffer.size() - 1;
@ -681,20 +698,31 @@ void rrc_nr::ue::send_connection_setup()
void rrc_nr::ue::send_dl_ccch(dl_ccch_msg_s* dl_ccch_msg)
{
// Allocate a new PDU buffer, pack the message and send to PDCP
srsran::unique_byte_buffer_t pdu = parent->pack_into_pdu(*dl_ccch_msg);
if (pdu == nullptr) {
parent->logger.error("Failed to send DL-CCCH");
return;
}
log_rrc_message(Tx, *pdu.get(), *dl_ccch_msg, "DL-CCCH");
parent->rlc->write_sdu(rnti, (uint32_t)srsran::nr_srb::srb0, std::move(pdu));
}
template <class T>
srsran::unique_byte_buffer_t rrc_nr::pack_into_pdu(const T& msg)
{
// Allocate a new PDU buffer and pack the
srsran::unique_byte_buffer_t pdu = srsran::make_byte_buffer();
if (pdu == nullptr) {
parent->logger.error("Allocating pdu");
logger.error("Couldn't allocate PDU in %s().", __FUNCTION__);
return nullptr;
}
asn1::bit_ref bref(pdu->msg, pdu->get_tailroom());
if (dl_ccch_msg->pack(bref) == asn1::SRSASN_ERROR_ENCODE_FAIL) {
parent->logger.error("Failed to pack DL-CCCH message. Discarding msg.");
if (msg.pack(bref) == asn1::SRSASN_ERROR_ENCODE_FAIL) {
logger.error("Failed to pack message. Discarding it.");
return nullptr;
}
pdu->N_bytes = bref.distance_bytes();
char buf[32] = {};
sprintf(buf, "SRB0 - rnti=0x%x", rnti);
parent->log_rrc_message(buf, Tx, pdu.get(), *dl_ccch_msg);
parent->rlc->write_sdu(rnti, (uint32_t)srsran::nr_srb::srb0, std::move(pdu));
return pdu;
}
int rrc_nr::ue::pack_secondary_cell_group_rlc_cfg(asn1::rrc_nr::cell_group_cfg_s& cell_group_cfg_pack)
@ -1226,6 +1254,8 @@ int rrc_nr::ue::pack_secondary_cell_group_cfg(asn1::dyn_octstring& packed_second
}
packed_secondary_cell_config.resize(bref_pack.distance_bytes());
log_rrc_message(Tx, packed_secondary_cell_config, cell_group_cfg_pack, "nr-SecondaryCellGroupConfig-r15");
return SRSRAN_SUCCESS;
}
@ -1281,6 +1311,8 @@ int rrc_nr::ue::pack_nr_radio_bearer_config(asn1::dyn_octstring& packed_nr_beare
// resize to packed length
packed_nr_bearer_config.resize(bref_pack.distance_bytes());
log_rrc_message(Tx, packed_nr_bearer_config, radio_bearer_cfg_pack, "nr-RadioBearerConfig1-r15");
return SRSRAN_SUCCESS;
}
@ -1379,7 +1411,10 @@ int rrc_nr::ue::add_drb()
// add RLC bearer
srsran::rlc_config_t rlc_cfg;
if (srsran::make_rlc_config_t(cell_group_cfg.rlc_bearer_to_add_mod_list[0].rlc_cfg, &rlc_cfg) != SRSRAN_SUCCESS) {
/// NOTE, we need to pass the radio-bearer to the rlc_config
if (srsran::make_rlc_config_t(cell_group_cfg.rlc_bearer_to_add_mod_list[0].rlc_cfg,
rlc_bearer.served_radio_bearer.drb_id(),
&rlc_cfg) != SRSRAN_SUCCESS) {
parent->logger.error("Failed to build RLC config");
return SRSRAN_ERROR;
}
@ -1447,4 +1482,12 @@ void rrc_nr::ue::deactivate_bearers()
parent->mac->ue_cfg(rnti, uecfg);
}
template <class T, class M>
void rrc_nr::ue::log_rrc_message(const direction_t dir, const M& pdu, const T& msg, const std::string& msg_type)
{
fmt::memory_buffer strbuf;
fmt::format_to(strbuf, "rnti=0x{:x}", rnti);
parent->log_rrc_message(fmt::to_string(strbuf), Tx, pdu, msg, msg_type);
}
} // namespace srsenb

@ -1228,7 +1228,7 @@ int rrc::ue::setup_erab(uint16_t erab_
cause.set_radio_network().value = asn1::s1ap::cause_radio_network_opts::multiple_erab_id_instances;
return SRSRAN_ERROR;
}
if (bearer_list.add_erab(erab_id, qos_params, addr, gtpu_teid_out, nas_pdu, cause) != SRSRAN_SUCCESS) {
if (bearer_list.addmod_erab(erab_id, qos_params, addr, gtpu_teid_out, nas_pdu, cause) != SRSRAN_SUCCESS) {
parent->logger.error("Couldn't add E-RAB id=%d for rnti=0x%x", erab_id, rnti);
return SRSRAN_ERROR;
}

@ -1301,16 +1301,7 @@ void s1ap::send_ho_cancel(uint16_t rnti, const asn1::s1ap::cause_c& cause)
return;
}
s1ap_pdu_c tx_pdu;
tx_pdu.set_init_msg().load_info_obj(ASN1_S1AP_ID_HO_CANCEL);
ho_cancel_ies_container& container = tx_pdu.init_msg().value.ho_cancel().protocol_ies;
container.mme_ue_s1ap_id.value = user_ptr->ctxt.mme_ue_s1ap_id.value();
container.enb_ue_s1ap_id.value = user_ptr->ctxt.enb_ue_s1ap_id;
container.cause.value = cause;
sctp_send_s1ap_pdu(tx_pdu, rnti, "HandoverCancel");
user_ptr->send_ho_cancel(cause);
}
bool s1ap::release_erabs(uint16_t rnti, const std::vector<uint16_t>& erabs_successfully_released)
@ -1739,6 +1730,24 @@ bool s1ap::ue::send_ue_cap_info_indication(srsran::unique_byte_buffer_t ue_radio
return s1ap_ptr->sctp_send_s1ap_pdu(tx_pdu, ctxt.rnti, "UECapabilityInfoIndication");
}
void s1ap::ue::send_ho_cancel(const asn1::s1ap::cause_c& cause)
{
// Stop handover timers
ts1_reloc_prep.stop();
ts1_reloc_overall.stop();
// Send S1AP Handover Cancel
s1ap_pdu_c tx_pdu;
tx_pdu.set_init_msg().load_info_obj(ASN1_S1AP_ID_HO_CANCEL);
ho_cancel_ies_container& container = tx_pdu.init_msg().value.ho_cancel().protocol_ies;
container.mme_ue_s1ap_id.value = ctxt.mme_ue_s1ap_id.value();
container.enb_ue_s1ap_id.value = ctxt.enb_ue_s1ap_id;
container.cause.value = cause;
s1ap_ptr->sctp_send_s1ap_pdu(tx_pdu, ctxt.rnti, "HandoverCancel");
}
void s1ap::ue::set_state(s1ap_proc_id_t next_state,
const erab_id_list& erabs_updated,
const erab_item_list& erabs_failed_to_modify)

@ -439,7 +439,7 @@ void gtpu::write_pdu(uint16_t rnti, uint32_t eps_bearer_id, srsran::unique_byte_
{
srsran::span<gtpu_tunnel_manager::bearer_teid_pair> teids = tunnels.find_rnti_bearer_tunnels(rnti, eps_bearer_id);
if (teids.empty()) {
logger.warning("The rnti=0x%x,eps-BearerID=%d does not have any pdcp_active tunnel", rnti, eps_bearer_id);
logger.warning("The rnti=0x%x, eps-BearerID=%d does not have any pdcp_active tunnel", rnti, eps_bearer_id);
return;
}
const gtpu_tunnel& tx_tun = *tunnels.find_tunnel(teids[0].teid);

@ -21,7 +21,7 @@
#include "sched_nr_cfg_generators.h"
#include "sched_nr_common_test.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_bwp.h"
#include "srsran/common/test_common.h"
#include "srsran/support/srsran_test.h"
#include <random>
@ -44,7 +44,7 @@ void test_single_prach()
// Set cells configuration
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(1);
sched_params schedparams{sched_cfg};
sched_params_t schedparams{sched_cfg};
schedparams.cells.emplace_back(0, cells_cfg[0], sched_cfg);
const bwp_params_t& bwpparams = schedparams.cells[0].bwps[0];
slot_ue_map_t slot_ues;
@ -53,7 +53,6 @@ void test_single_prach()
TESTASSERT(rasched.empty());
std::unique_ptr<bwp_res_grid> res_grid(new bwp_res_grid{bwpparams});
bwp_slot_allocator alloc(*res_grid);
// Create UE
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(1);
@ -63,22 +62,28 @@ void test_single_prach()
slot_point prach_slot{0, std::uniform_int_distribution<uint32_t>{TX_ENB_DELAY, 20}(rgen)};
const bwp_slot_grid* result = nullptr;
auto run_slot = [&alloc, &rasched, &pdcch_slot, &slot_ues, &u]() -> const bwp_slot_grid* {
auto run_slot = [&res_grid, &rasched, &pdcch_slot, &slot_ues, &u]() -> const bwp_slot_grid* {
mac_logger.set_context(pdcch_slot.to_uint());
// delete old outputs
(*res_grid)[pdcch_slot - TX_ENB_DELAY - 1].reset();
// setup UE state for slot
u.new_slot(pdcch_slot);
u.carriers[0]->new_slot(pdcch_slot);
// pre-calculate UE slot vars
slot_ues.clear();
slot_ue sfu = u.try_reserve(pdcch_slot, 0);
slot_ue sfu = u.make_slot_ue(pdcch_slot, 0);
if (not sfu.empty()) {
slot_ues.insert(rnti, std::move(sfu));
}
alloc.new_slot(pdcch_slot, slot_ues);
bwp_slot_allocator alloc(*res_grid, pdcch_slot, slot_ues);
rasched.run_slot(alloc);
log_sched_bwp_result(mac_logger, alloc.get_pdcch_tti(), alloc.res_grid(), slot_ues);
const bwp_slot_grid* result = &alloc.res_grid()[alloc.get_pdcch_tti()];
test_dl_pdcch_consistency(result->dl_pdcchs);
test_dl_pdcch_consistency(result->dl.phy.pdcch_dl);
++pdcch_slot;
return result;
};
@ -87,7 +92,7 @@ void test_single_prach()
for (; pdcch_slot - TX_ENB_DELAY < prach_slot;) {
result = run_slot();
TESTASSERT(result->dl_pdcchs.empty());
TESTASSERT(result->dl.phy.pdcch_dl.empty());
}
// A PRACH arrives...
@ -108,15 +113,15 @@ void test_single_prach()
result = run_slot();
if (bwpparams.slots[current_slot.slot_idx()].is_dl and
bwpparams.slots[(current_slot + bwpparams.pusch_ra_list[0].msg3_delay).slot_idx()].is_ul) {
TESTASSERT_EQ(result->dl_pdcchs.size(), 1);
const auto& pdcch = result->dl_pdcchs[0];
TESTASSERT_EQ(result->dl.phy.pdcch_dl.size(), 1);
const auto& pdcch = result->dl.phy.pdcch_dl[0];
TESTASSERT_EQ(pdcch.dci.ctx.rnti, ra_rnti);
TESTASSERT_EQ(pdcch.dci.ctx.rnti_type, srsran_rnti_type_ra);
TESTASSERT(current_slot < prach_slot + prach_duration + bwpparams.cfg.rar_window_size);
rar_slot = current_slot;
break;
} else {
TESTASSERT(result->dl_pdcchs.empty());
TESTASSERT(result->dl.phy.pdcch_dl.empty());
}
}
@ -124,7 +129,7 @@ void test_single_prach()
while (pdcch_slot <= msg3_slot) {
result = run_slot();
}
TESTASSERT(result->puschs.size() == 1);
TESTASSERT(result->ul.pusch.size() == 1);
}
} // namespace srsenb

@ -51,8 +51,8 @@ int sched_nr_ue_sim::update(const sched_nr_cc_result_view& cc_out)
{
update_dl_harqs(cc_out);
for (uint32_t i = 0; i < cc_out.dl_cc_result.dl_sched.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result.dl_sched.pdcch_dl[i];
for (uint32_t i = 0; i < cc_out.dl->phy.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl->phy.pdcch_dl[i];
if (data.dci.ctx.rnti != ctxt.rnti) {
continue;
}
@ -73,8 +73,8 @@ int sched_nr_ue_sim::update(const sched_nr_cc_result_view& cc_out)
void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_result_view& cc_out)
{
uint32_t cc = cc_out.cc;
for (uint32_t i = 0; i < cc_out.dl_cc_result.dl_sched.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result.dl_sched.pdcch_dl[i];
for (uint32_t i = 0; i < cc_out.dl->phy.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl->phy.pdcch_dl[i];
if (data.dci.ctx.rnti != ctxt.rnti) {
continue;
}
@ -105,7 +105,7 @@ sched_nr_base_tester::sched_nr_base_tester(const sched_nr_interface::sched_args_
std::string test_name_,
uint32_t nof_workers) :
logger(srslog::fetch_basic_logger("TEST")),
mac_logger(srslog::fetch_basic_logger("MAC")),
mac_logger(srslog::fetch_basic_logger("MAC-NR")),
sched_ptr(new sched_nr()),
test_name(std::move(test_name_))
{
@ -155,7 +155,6 @@ int sched_nr_base_tester::add_user(uint16_t rnti,
uint32_t preamble_idx)
{
sem_wait(&slot_sem);
sched_ptr->ue_cfg(rnti, ue_cfg_);
TESTASSERT(ue_db.count(rnti) == 0);
@ -166,7 +165,7 @@ int sched_nr_base_tester::add_user(uint16_t rnti,
rach_info.prach_slot = tti_rx;
rach_info.preamble_idx = preamble_idx;
rach_info.msg3_size = 7;
sched_ptr->dl_rach_info(ue_cfg_.carriers[0].cc, rach_info);
sched_ptr->dl_rach_info(rach_info, ue_cfg_);
sem_post(&slot_sem);
@ -201,6 +200,8 @@ void sched_nr_base_tester::run_slot(slot_point slot_tx)
slot_ctxt = get_enb_ctxt();
slot_start_tp = std::chrono::steady_clock::now();
sched_ptr->slot_indication(current_slot_tx);
// Generate CC result (parallel or serialized)
uint32_t worker_idx = 0;
for (uint32_t cc = 0; cc < cell_params.size(); ++cc) {
@ -216,10 +217,10 @@ void sched_nr_base_tester::run_slot(slot_point slot_tx)
void sched_nr_base_tester::generate_cc_result(uint32_t cc)
{
// Run scheduler
sched_nr_interface::dl_res_t dl_sched(cc_results[cc].rar, cc_results[cc].dl_res);
sched_ptr->run_slot(current_slot_tx, cc, dl_sched);
cc_results[cc].rar = dl_sched.rar;
sched_ptr->get_ul_sched(current_slot_tx, cc, cc_results[cc].ul_res);
cc_results[cc].res.slot = current_slot_tx;
cc_results[cc].res.cc = cc;
cc_results[cc].res.dl = sched_ptr->get_dl_sched(current_slot_tx, cc);
cc_results[cc].res.ul = sched_ptr->get_ul_sched(current_slot_tx, cc);
auto tp2 = std::chrono::steady_clock::now();
cc_results[cc].cc_latency_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - slot_start_tp);
@ -241,13 +242,12 @@ void sched_nr_base_tester::process_results()
process_slot_result(slot_ctxt, cc_results);
for (uint32_t cc = 0; cc < cell_params.size(); ++cc) {
sched_nr_cc_result_view cc_out{
current_slot_tx, cc, cc_results[cc].rar, cc_results[cc].dl_res, cc_results[cc].ul_res};
sched_nr_cc_result_view cc_out = cc_results[cc].res;
// Run common tests
test_dl_pdcch_consistency(cc_out.dl_cc_result.dl_sched.pdcch_dl);
test_pdsch_consistency(cc_out.dl_cc_result.dl_sched.pdsch);
test_ssb_scheduled_grant(cc_out.slot, cell_params[cc_out.cc].cfg, cc_out.dl_cc_result.dl_sched.ssb);
test_dl_pdcch_consistency(cc_out.dl->phy.pdcch_dl);
test_pdsch_consistency(cc_out.dl->phy.pdsch);
test_ssb_scheduled_grant(cc_out.slot, cell_params[cc_out.cc].cfg, cc_out.dl->phy.ssb);
// Run UE-dedicated tests
test_dl_sched_result(slot_ctxt, cc_out);

@ -49,17 +49,9 @@ struct ue_nr_harq_ctxt_t {
};
struct sched_nr_cc_result_view {
slot_point slot;
uint32_t cc;
const sched_nr_interface::dl_res_t dl_cc_result;
const sched_nr_interface::ul_res_t* ul_cc_result;
sched_nr_cc_result_view(slot_point slot_,
uint32_t cc_,
sched_nr_interface::sched_rar_list_t& rar,
sched_nr_interface::dl_sched_t& dl_res,
sched_nr_interface::ul_res_t& ul_res) :
slot(slot_), cc(cc_), dl_cc_result(rar, dl_res), ul_cc_result(&ul_res)
{}
uint32_t cc = 0;
const sched_nr_interface::dl_res_t* dl = nullptr;
const sched_nr_interface::ul_res_t* ul = nullptr;
};
struct ue_nr_cc_ctxt_t {
@ -125,11 +117,7 @@ class sched_nr_base_tester
{
public:
struct cc_result_t {
slot_point slot_tx;
uint32_t cc;
sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::sched_rar_list_t rar;
sched_nr_interface::ul_res_t ul_res;
sched_nr_cc_result_view res;
std::chrono::nanoseconds cc_latency_ns;
};

@ -45,16 +45,16 @@ public:
})->cc_latency_ns.count();
for (auto& cc_out : cc_list) {
pdsch_count += cc_out.dl_res.pdcch_dl.size();
pdsch_count += cc_out.res.dl->phy.pdcch_dl.size();
cc_res_count++;
bool is_dl_slot = srsran_duplex_nr_is_dl(&cell_params[cc_out.cc].cfg.duplex, 0, current_slot_tx.slot_idx());
bool is_dl_slot = srsran_duplex_nr_is_dl(&cell_params[cc_out.res.cc].cfg.duplex, 0, current_slot_tx.slot_idx());
if (is_dl_slot) {
if (cc_out.dl_res.ssb.empty()) {
TESTASSERT(slot_ctxt.ue_db.empty() or cc_out.dl_res.pdcch_dl.size() == 1);
if (cc_out.res.dl->phy.ssb.empty()) {
TESTASSERT(slot_ctxt.ue_db.empty() or cc_out.res.dl->phy.pdcch_dl.size() == 1);
} else {
TESTASSERT(cc_out.dl_res.pdcch_dl.size() == 0);
TESTASSERT(cc_out.res.dl->phy.pdcch_dl.size() == 0);
}
}
}

@ -30,7 +30,7 @@ using namespace srsenb::sched_nr_impl;
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_result_view& cc_out)
{
slot_point pdcch_slot = cc_out.slot;
const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result.dl_sched.pdcch_dl;
const pdcch_dl_list_t& pdcchs = cc_out.dl->phy.pdcch_dl;
// Iterate over UE PDCCH allocations
for (const pdcch_dl_t& pdcch : pdcchs) {

@ -54,8 +54,11 @@ void proc_sr_nr::reset_nolock()
int32_t proc_sr_nr::set_config(const srsran::sr_cfg_nr_t& cfg_)
{
{
std::lock_guard<std::mutex> lock(mutex);
// disable by default
cfg.enabled = false;
}
if (cfg_.num_items != 1) {
logger.error("Only one SR config supported. Disabling SR.");
@ -78,8 +81,11 @@ int32_t proc_sr_nr::set_config(const srsran::sr_cfg_nr_t& cfg_)
logger.info("SR: Disabling procedure");
}
{
std::lock_guard<std::mutex> lock(mutex);
// store config
cfg = cfg_;
}
return SRSRAN_SUCCESS;
}

@ -466,12 +466,15 @@ bool rrc_nr::apply_rlc_add_mod(const rlc_bearer_cfg_s& rlc_bearer_cfg)
uint32_t drb_id = 0;
uint32_t srb_id = 0;
rlc_config_t rlc_cfg;
// We set this to true if below we detect it's a DRB
bool is_drb = false;
lc_ch_id = rlc_bearer_cfg.lc_ch_id;
if (rlc_bearer_cfg.served_radio_bearer_present == true) {
if (rlc_bearer_cfg.served_radio_bearer.type() == rlc_bearer_cfg_s::served_radio_bearer_c_::types::drb_id) {
drb_id = rlc_bearer_cfg.served_radio_bearer.drb_id();
add_lcid_drb(lc_ch_id, drb_id);
is_drb = true;
}
} else {
logger.error("In RLC bearer cfg does not contain served radio bearer");
@ -479,7 +482,8 @@ bool rrc_nr::apply_rlc_add_mod(const rlc_bearer_cfg_s& rlc_bearer_cfg)
}
if (rlc_bearer_cfg.rlc_cfg_present == true) {
if (srsran::make_rlc_config_t(rlc_bearer_cfg.rlc_cfg, &rlc_cfg) != SRSRAN_SUCCESS) {
uint8_t bearer_id = static_cast<uint8_t>(is_drb ? drb_id : srb_id);
if (srsran::make_rlc_config_t(rlc_bearer_cfg.rlc_cfg, bearer_id, &rlc_cfg) != SRSRAN_SUCCESS) {
logger.error("Failed to build RLC config");
return false;
}

@ -463,11 +463,18 @@ public:
~gnb_dummy_stack() = default;
void stop()
{
if (not use_dummy_mac) {
mac->stop();
}
}
bool is_valid() const { return valid; }
int slot_indication(const srsran_slot_cfg_t& slot_cfg) override { return 0; }
int get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched) override
dl_sched_t* get_dl_sched(const srsran_slot_cfg_t& slot_cfg) override
{
logger.set_context(slot_cfg.idx);
sched_logger.set_context(slot_cfg.idx);
@ -478,36 +485,41 @@ public:
mac->ul_bsr(rnti, 0, 100000);
}
int ret = mac->get_dl_sched(slot_cfg, dl_sched);
dl_sched_t* dl_res = mac->get_dl_sched(slot_cfg);
if (dl_res == nullptr) {
return nullptr;
}
for (pdsch_t& pdsch : dl_sched.pdsch) {
for (pdsch_t& pdsch : dl_res->pdsch) {
// Set TBS
// Select grant and set data
pdsch.data[0] = tx_harq_proc[slot_cfg.idx].get_tb(pdsch.sch.grant.tb[0].tbs);
pdsch.data[1] = nullptr;
}
return ret;
return dl_res;
}
dl_sched_t& dl_sched = dl_scheds[slot_cfg.idx];
dl_sched = {};
// Check if it is TDD DL slot and PDSCH mask, if no PDSCH shall be scheduled, do not set any grant and skip
if (not srsran_duplex_nr_is_dl(&phy_cfg.duplex, phy_cfg.carrier.scs, slot_cfg.idx)) {
return SRSRAN_SUCCESS;
return nullptr;
}
if (not schedule_pdsch(slot_cfg, dl_sched)) {
logger.error("Error scheduling PDSCH");
return SRSRAN_ERROR;
return nullptr;
}
// Check if the UL slot is valid, if not skip UL scheduling
if (not srsran_duplex_nr_is_ul(&phy_cfg.duplex, phy_cfg.carrier.scs, TTI_TX(slot_cfg.idx))) {
return SRSRAN_SUCCESS;
return &dl_sched;
}
if (not schedule_pusch(slot_cfg, dl_sched)) {
logger.error("Error scheduling PUSCH");
return SRSRAN_ERROR;
return nullptr;
}
// Schedule NZP-CSI-RS, iterate all NZP-CSI-RS sets
@ -543,19 +555,21 @@ public:
}
}
return SRSRAN_SUCCESS;
return &dl_sched;
}
int get_ul_sched(const srsran_slot_cfg_t& slot_cfg, ul_sched_t& ul_sched) override
ul_sched_t* get_ul_sched(const srsran_slot_cfg_t& slot_cfg) override
{
logger.set_context(slot_cfg.idx);
sched_logger.set_context(slot_cfg.idx);
if (not use_dummy_mac) {
int ret = mac->get_ul_sched(slot_cfg, ul_sched);
return ret;
ul_sched_t* ul_res = mac->get_ul_sched(slot_cfg);
return ul_res;
}
ul_sched_t& ul_sched = ul_scheds[slot_cfg.idx];
ul_sched.pucch.clear();
ul_sched.pusch.clear();
// Get ACK information
srsran_pdsch_ack_nr_t ack = pending_ack[slot_cfg.idx % pending_ack.size()].get_ack();
@ -575,7 +589,7 @@ public:
srsran_uci_cfg_nr_t uci_cfg = {};
if (not phy_cfg.get_uci_cfg(slot_cfg, ack, uci_cfg)) {
logger.error("Error getting UCI configuration");
return SRSRAN_ERROR;
return nullptr;
}
// Schedule PUSCH
@ -586,15 +600,12 @@ public:
// Put UCI configuration in PUSCH config
if (not phy_cfg.get_pusch_uci_cfg(slot_cfg, uci_cfg, pusch.sch)) {
logger.error("Error setting UCI configuration in PUSCH");
return SRSRAN_ERROR;
return nullptr;
}
ul_sched.pusch.push_back(pusch);
return SRSRAN_SUCCESS;
}
} else if (uci_cfg.ack.count > 0 || uci_cfg.nof_csi > 0 || uci_cfg.o_sr > 0) {
// If any UCI information is triggered, schedule PUCCH
if (uci_cfg.ack.count > 0 || uci_cfg.nof_csi > 0 || uci_cfg.o_sr > 0) {
ul_sched.pucch.emplace_back();
uci_cfg.pucch.rnti = rnti;
@ -604,7 +615,7 @@ public:
pucch.candidates.back().uci_cfg = uci_cfg;
if (not phy_cfg.get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.candidates.back().resource)) {
logger.error("Error getting UCI CFG");
return SRSRAN_ERROR;
return nullptr;
}
// If this slot has a SR opportunity and the selected PUCCH format is 1, consider positive SR.
@ -620,15 +631,12 @@ public:
pucch.candidates.back().uci_cfg = uci_cfg;
if (not phy_cfg.get_pucch_uci_cfg(slot_cfg, uci_cfg, pucch.pucch_cfg, pucch.candidates.back().resource)) {
logger.error("Error getting UCI CFG");
return SRSRAN_ERROR;
return nullptr;
}
}
return SRSRAN_SUCCESS;
}
// Otherwise no UL scheduling
return SRSRAN_SUCCESS;
return &ul_sched;
}
int pucch_info(const srsran_slot_cfg_t& slot_cfg, const pucch_info_t& pucch_info) override
@ -730,6 +738,10 @@ public:
}
return metrics;
}
private:
srsran::circular_array<dl_sched_t, TTIMOD_SZ> dl_scheds;
srsran::circular_array<ul_sched_t, TTIMOD_SZ> ul_scheds;
};
#endif // SRSRAN_DUMMY_GNB_STACK_H

@ -141,6 +141,7 @@ public:
gnb_phy_com.stop();
gnb_phy.stop();
ue_phy.stop();
gnb_stack.stop();
}
~test_bench() = default;

Loading…
Cancel
Save