Merge branch 'next' into agpl_next

master
Codebot 3 years ago committed by Your Name
commit 83f103fe4b

@ -27,6 +27,8 @@
extern "C" {
#include "srsran/phy/common/phy_common_nr.h"
#include "srsran/phy/fec/softbuffer.h"
#include "srsran/phy/phch/sch_nr.h"
#include "srsran/phy/utils/vector.h"
}
namespace srsenb {
@ -35,7 +37,11 @@ class tx_harq_softbuffer
{
public:
tx_harq_softbuffer() { bzero(&buffer, sizeof(buffer)); }
explicit tx_harq_softbuffer(uint32_t nof_prb_) { srsran_softbuffer_tx_init(&buffer, nof_prb_); }
explicit tx_harq_softbuffer(uint32_t nof_prb_)
{
// Note: for now we use same size regardless of nof_prb_
srsran_softbuffer_tx_init_guru(&buffer, SRSRAN_SCH_NR_MAX_NOF_CB_LDPC, SRSRAN_LDPC_MAX_LEN_ENCODED_CB);
}
tx_harq_softbuffer(const tx_harq_softbuffer&) = delete;
tx_harq_softbuffer(tx_harq_softbuffer&& other) noexcept
{
@ -73,7 +79,11 @@ class rx_harq_softbuffer
{
public:
rx_harq_softbuffer() { bzero(&buffer, sizeof(buffer)); }
explicit rx_harq_softbuffer(uint32_t nof_prb_) { srsran_softbuffer_rx_init(&buffer, nof_prb_); }
explicit rx_harq_softbuffer(uint32_t nof_prb_)
{
// Note: for now we use same size regardless of nof_prb_
srsran_softbuffer_rx_init_guru(&buffer, SRSRAN_SCH_NR_MAX_NOF_CB_LDPC, SRSRAN_LDPC_MAX_LEN_ENCODED_CB);
}
rx_harq_softbuffer(const rx_harq_softbuffer&) = delete;
rx_harq_softbuffer(rx_harq_softbuffer&& other) noexcept
{

@ -23,7 +23,8 @@
#define SRSRAN_SCHED_NR_CELL_H
#include "sched_nr_cfg.h"
#include "sched_nr_rb_grid.h"
#include "sched_nr_grant_allocator.h"
#include "sched_nr_time_rr.h"
#include "srsran/adt/pool/cached_alloc.h"
namespace srsenb {
@ -31,12 +32,6 @@ namespace sched_nr_impl {
using dl_sched_rar_info_t = sched_nr_interface::dl_sched_rar_info_t;
struct pending_rar_t {
uint16_t ra_rnti = 0;
slot_point prach_slot;
srsran::bounded_vector<dl_sched_rar_info_t, sched_interface::MAX_RAR_LIST> msg3_grant;
};
/// RAR/Msg3 scheduler
class ra_sched
{
@ -48,6 +43,13 @@ public:
size_t empty() const { return pending_rars.empty(); }
private:
struct pending_rar_t {
uint16_t ra_rnti = 0;
slot_point prach_slot;
slot_interval rar_win;
srsran::bounded_vector<dl_sched_rar_info_t, sched_interface::MAX_RAR_LIST> msg3_grant;
};
alloc_result allocate_pending_rar(bwp_slot_allocator& slot_grid,
const pending_rar_t& rar,
slot_ue_map_t& slot_ues,
@ -68,6 +70,7 @@ public:
// channel-specific schedulers
ra_sched ra;
std::unique_ptr<sched_nr_base> data_sched;
// Stores pending allocations and PRB bitmaps
bwp_res_grid grid;

@ -19,8 +19,8 @@
*
*/
#ifndef SRSRAN_SCHED_NR_RB_GRID_H
#define SRSRAN_SCHED_NR_RB_GRID_H
#ifndef SRSRAN_SCHED_NR_GRANT_ALLOCATOR_H
#define SRSRAN_SCHED_NR_GRANT_ALLOCATOR_H
#include "../sched_common.h"
#include "lib/include/srsran/adt/circular_array.h"
@ -32,7 +32,7 @@
namespace srsenb {
namespace sched_nr_impl {
struct pending_rar_t;
using dl_sched_rar_info_t = sched_nr_interface::dl_sched_rar_info_t;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -92,15 +92,16 @@ public:
void new_slot(slot_point pdcch_slot_) { pdcch_slot = pdcch_slot_; }
alloc_result alloc_rar_and_msg3(uint32_t aggr_idx,
const pending_rar_t& rar,
alloc_result alloc_rar_and_msg3(uint16_t ra_rnti,
uint32_t aggr_idx,
prb_interval interv,
slot_ue_map_t& ues,
uint32_t max_nof_grants);
srsran::const_span<dl_sched_rar_info_t> pending_rars);
alloc_result alloc_pdsch(slot_ue& ue, const prb_grant& dl_grant);
alloc_result alloc_pusch(slot_ue& ue, const prb_grant& dl_mask);
slot_point get_pdcch_tti() const { return pdcch_slot; }
slot_point get_tti_rx() const { return pdcch_slot - TX_ENB_DELAY; }
const bwp_res_grid& res_grid() const { return bwp_grid; }
const bwp_params& cfg;
@ -117,4 +118,4 @@ private:
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_RB_GRID_H
#endif // SRSRAN_SCHED_NR_GRANT_ALLOCATOR_H

@ -28,6 +28,7 @@ namespace srsenb {
namespace sched_nr_impl {
class slot_ue;
class ul_harq_proc;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

@ -63,7 +63,7 @@ public:
srsran_pdcch_cfg_nr_t pdcch = {};
srsran_sch_hl_cfg_nr_t pdsch = {};
srsran_sch_hl_cfg_nr_t pusch = {};
uint32_t rar_window_size = 3;
uint32_t rar_window_size = 8;
};
struct cell_cfg_t {

@ -0,0 +1,47 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_TIME_RR_H
#define SRSRAN_SCHED_NR_TIME_RR_H
#include "sched_nr_grant_allocator.h"
#include "srsran/common/slot_point.h"
namespace srsenb {
namespace sched_nr_impl {
/**
* Base class for scheduler algorithms implementations
*/
class sched_nr_base
{
public:
virtual ~sched_nr_base() = default;
virtual void sched_dl_users(slot_ue_map_t& ue_db, bwp_slot_allocator& slot_alloc) = 0;
virtual void sched_ul_users(slot_ue_map_t& ue_db, bwp_slot_allocator& slot_alloc) = 0;
protected:
srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC");
};
class sched_nr_time_rr : public sched_nr_base
{
public:
void sched_dl_users(slot_ue_map_t& ue_db, bwp_slot_allocator& slot_alloc) override;
void sched_ul_users(slot_ue_map_t& ue_db, bwp_slot_allocator& slot_alloc) override;
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_TIME_RR_H

@ -54,6 +54,7 @@ public:
// UE parameters that are sector specific
const bwp_ue_cfg* cfg = nullptr;
harq_entity* harq_ent = nullptr;
slot_point pdcch_slot;
slot_point pdsch_slot;
slot_point pusch_slot;

@ -24,7 +24,7 @@
#include "sched_nr_cell.h"
#include "sched_nr_cfg.h"
#include "sched_nr_rb_grid.h"
#include "sched_nr_grant_allocator.h"
#include "sched_nr_ue.h"
#include "srsran/adt/circular_array.h"
#include "srsran/adt/optional.h"
@ -80,7 +80,7 @@ private:
srsran::deque<feedback_t> pending_feedback, tmp_feedback_to_run;
srsran::deque<srsran::move_callback<void()> > pending_events, tmp_events_to_run;
srsran::static_circular_map<uint16_t, slot_ue, SCHED_NR_MAX_USERS> slot_ues;
slot_ue_map_t slot_ues;
};
class sched_worker_manager

@ -23,13 +23,14 @@ set(SOURCES mac_nr.cc
sched_nr.cc
sched_nr_ue.cc
sched_nr_worker.cc
sched_nr_rb_grid.cc
sched_nr_grant_allocator.cc
sched_nr_harq.cc
sched_nr_pdcch.cc
sched_nr_cfg.cc
sched_nr_helpers.cc
sched_nr_cell.cc
sched_nr_rb.cc
sched_nr_time_rr.cc
harq_softbuffer.cc)
add_library(srsgnb_mac STATIC ${SOURCES})

@ -37,6 +37,7 @@ alloc_result ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid,
const prb_bitmap& prbs = slot_grid.res_grid()[slot_grid.get_pdcch_tti()].dl_prbs.prbs();
alloc_result ret = alloc_result::other_cause;
srsran::const_span<dl_sched_rar_info_t> msg3_grants{rar.msg3_grant};
for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) {
ret = alloc_result::invalid_coderate;
uint32_t start_prb_idx = 0;
@ -44,7 +45,8 @@ alloc_result ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid,
prb_interval interv = find_empty_interval_of_length(prbs, nprb, start_prb_idx);
start_prb_idx = interv.stop();
if (interv.length() == nprb) {
ret = slot_grid.alloc_rar_and_msg3(rar_aggr_level, rar, interv, slot_ues, nof_grants_alloc);
ret = slot_grid.alloc_rar_and_msg3(
rar.ra_rnti, rar_aggr_level, interv, slot_ues, msg3_grants.subspan(0, nof_grants_alloc));
} else {
ret = alloc_result::no_sch_space;
}
@ -63,10 +65,21 @@ alloc_result ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid,
void ra_sched::run_slot(bwp_slot_allocator& slot_grid, slot_ue_map_t& slot_ues)
{
static const uint32_t PRACH_RAR_OFFSET = 3;
slot_point pdcch_slot = slot_grid.get_pdcch_tti();
slot_point msg3_slot = pdcch_slot + bwp_cfg->pusch_ra_list[0].msg3_delay;
if (not slot_grid.res_grid()[pdcch_slot].is_dl or not slot_grid.res_grid()[msg3_slot].is_ul) {
if (not slot_grid.res_grid()[pdcch_slot].is_dl) {
// RAR only allowed if PDCCH is available
return;
}
// Mark RAR window start, regardless of whether PUSCH is available
for (auto& rar : pending_rars) {
if (rar.rar_win.empty()) {
rar.rar_win = {pdcch_slot, pdcch_slot + bwp_cfg->cfg.rar_window_size};
}
}
if (not slot_grid.res_grid()[msg3_slot].is_ul) {
// RAR only allowed if respective Msg3 slot is available for UL
return;
}
@ -77,14 +90,12 @@ void ra_sched::run_slot(bwp_slot_allocator& slot_grid, slot_ue_map_t& slot_ues)
// In case of RAR outside RAR window:
// - if window has passed, discard RAR
// - if window hasn't started, stop loop, as RARs are ordered by TTI
slot_interval rar_window{rar.prach_slot + PRACH_RAR_OFFSET,
rar.prach_slot + PRACH_RAR_OFFSET + bwp_cfg->cfg.rar_window_size};
if (not rar_window.contains(pdcch_slot)) {
if (pdcch_slot >= rar_window.stop()) {
if (not rar.rar_win.contains(pdcch_slot)) {
if (pdcch_slot >= rar.rar_win.stop()) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer,
"SCHED: Could not transmit RAR within the window Window={}, PRACH={}, RAR={}",
rar_window,
rar.rar_win,
rar.prach_slot,
pdcch_slot);
srsran::console("%s\n", srsran::to_c_str(str_buffer));
@ -162,7 +173,9 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info)
return SRSRAN_SUCCESS;
}
bwp_ctxt::bwp_ctxt(const bwp_params& bwp_cfg) : cfg(&bwp_cfg), ra(bwp_cfg), grid(bwp_cfg) {}
bwp_ctxt::bwp_ctxt(const bwp_params& bwp_cfg) :
cfg(&bwp_cfg), ra(bwp_cfg), grid(bwp_cfg), data_sched(new sched_nr_time_rr())
{}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

@ -19,7 +19,7 @@
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_grant_allocator.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_cell.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_helpers.h"
@ -73,11 +73,11 @@ bwp_slot_allocator::bwp_slot_allocator(bwp_res_grid& bwp_grid_) :
logger(srslog::fetch_basic_logger("MAC")), cfg(*bwp_grid_.cfg), bwp_grid(bwp_grid_)
{}
alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t aggr_idx,
const srsenb::sched_nr_impl::pending_rar_t& rar,
alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t ra_rnti,
uint32_t aggr_idx,
prb_interval interv,
slot_ue_map_t& ues,
uint32_t max_nof_grants)
srsran::const_span<dl_sched_rar_info_t> pending_rars)
{
static const uint32_t msg3_nof_prbs = 3, m = 0;
@ -104,7 +104,7 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t
}
// Check Msg3 RB collision
uint32_t total_ul_nof_prbs = msg3_nof_prbs * max_nof_grants;
uint32_t total_ul_nof_prbs = msg3_nof_prbs * pending_rars.size();
uint32_t total_ul_nof_rbgs = srsran::ceil_div(total_ul_nof_prbs, get_P(bwp_grid.nof_prbs(), false));
prb_interval msg3_rbs = find_empty_interval_of_length(bwp_msg3_slot.ul_prbs.prbs(), total_ul_nof_rbgs);
if (msg3_rbs.length() < total_ul_nof_rbgs) {
@ -123,9 +123,9 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t
// RAR allocation successful.
// Generate DCI for RAR
// Generate DCI for RAR with given RA-RNTI
pdcch_dl_t& pdcch = bwp_pdcch_slot.dl_pdcchs.back();
if (not fill_dci_rar(interv, rar.ra_rnti, *bwp_grid.cfg, pdcch.dci)) {
if (not fill_dci_rar(interv, ra_rnti, *bwp_grid.cfg, pdcch.dci)) {
// Cancel on-going PDCCH allocation
bwp_pdcch_slot.coresets[coreset_id]->rem_last_dci();
return alloc_result::invalid_coderate;
@ -139,9 +139,10 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t
int dai = 0;
srsran_slot_cfg_t slot_cfg;
slot_cfg.idx = msg3_slot.slot_idx();
for (const auto& grant : rar.msg3_grant) {
for (const dl_sched_rar_info_t& grant : pending_rars) {
slot_ue& ue = ues[grant.temp_crnti];
prb_interval msg3_interv{last_msg3, last_msg3 + msg3_nof_prbs};
ue.h_ul = ue.harq_ent->find_empty_ul_harq();
bool success = ue.h_ul->new_tx(msg3_slot, msg3_slot, msg3_interv, mcs, 100, max_harq_msg3_retx);
srsran_assert(success, "Failed to allocate Msg3");
last_msg3 += msg3_nof_prbs;
@ -153,9 +154,7 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t
success = ue.cfg->phy().get_pusch_cfg(slot_cfg, msg3_pdcch.dci, pusch.sch);
srsran_assert(success, "Error converting DCI to PUSCH grant");
pusch.sch.grant.tb[0].softbuffer.rx = ue.h_ul->get_softbuffer().get();
if (ue.h_ul->nof_retx() > 0) {
bwp_pdcch_slot.ul_pdcchs.push_back(msg3_pdcch);
}
ue.h_ul->set_tbs(pusch.sch.grant.tb[0].tbs);
}
bwp_msg3_slot.ul_prbs.add(msg3_rbs);
@ -298,6 +297,11 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const prb_grant& ul_pr
bool success = ue.cfg->phy().get_pusch_cfg(slot_cfg, pdcch.dci, pusch.sch);
srsran_assert(success, "Error converting DCI to PUSCH grant");
pusch.sch.grant.tb[0].softbuffer.rx = ue.h_ul->get_softbuffer().get();
if (ue.h_ul->nof_retx() == 0) {
ue.h_ul->set_tbs(pusch.sch.grant.tb[0].tbs); // update HARQ with correct TBS
} else {
srsran_assert(pusch.sch.grant.tb[0].tbs == (int)ue.h_ul->tbs(), "The TBS did not remain constant in retx");
}
return alloc_result::success;
}

@ -0,0 +1,92 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_time_rr.h"
namespace srsenb {
namespace sched_nr_impl {
template <typename Predicate>
bool round_robin_apply(slot_ue_map_t& ue_db, uint32_t rr_count, Predicate p)
{
if (ue_db.empty()) {
return false;
}
auto it = ue_db.begin();
std::advance(it, (rr_count % ue_db.size()));
for (uint32_t count = 0; count < ue_db.size(); ++count, ++it) {
if (it == ue_db.end()) {
it = ue_db.begin();
}
if (p(it->second)) {
return true;
}
}
return false;
}
void sched_nr_time_rr::sched_dl_users(slot_ue_map_t& ue_db, bwp_slot_allocator& slot_alloc)
{
// Start with retxs
if (round_robin_apply(ue_db, slot_alloc.get_pdcch_tti().to_uint(), [&slot_alloc](slot_ue& ue) {
if (ue.h_dl != nullptr and ue.h_dl->has_pending_retx(slot_alloc.get_tti_rx())) {
alloc_result res = slot_alloc.alloc_pdsch(ue, ue.h_dl->prbs());
if (res == alloc_result::success) {
return true;
}
}
return false;
})) {
return;
}
// Move on to new txs
round_robin_apply(ue_db, slot_alloc.get_pdcch_tti().to_uint(), [&slot_alloc](slot_ue& ue) {
if (ue.h_dl != nullptr and ue.h_dl->empty()) {
alloc_result res = slot_alloc.alloc_pdsch(ue, prb_interval{0, slot_alloc.cfg.cfg.rb_width});
if (res == alloc_result::success) {
return true;
}
}
return false;
});
}
void sched_nr_time_rr::sched_ul_users(slot_ue_map_t& ue_db, bwp_slot_allocator& slot_alloc)
{
// Start with retxs
if (round_robin_apply(ue_db, slot_alloc.get_pdcch_tti().to_uint(), [&slot_alloc](slot_ue& ue) {
if (ue.h_ul != nullptr and ue.h_ul->has_pending_retx(slot_alloc.get_tti_rx())) {
alloc_result res = slot_alloc.alloc_pusch(ue, ue.h_ul->prbs());
if (res == alloc_result::success) {
return true;
}
}
return false;
})) {
return;
}
// Move on to new txs
round_robin_apply(ue_db, slot_alloc.get_pdcch_tti().to_uint(), [&slot_alloc](slot_ue& ue) {
if (ue.h_ul != nullptr and ue.h_ul->empty()) {
alloc_result res = slot_alloc.alloc_pusch(ue, prb_interval{0, slot_alloc.cfg.cfg.rb_width});
if (res == alloc_result::success) {
return true;
}
}
return false;
});
}
} // namespace sched_nr_impl
} // namespace srsenb

@ -52,6 +52,7 @@ slot_ue ue_carrier::try_reserve(slot_point pdcch_slot)
// copy cc-specific parameters and find available HARQs
slot_ue sfu(rnti, slot_rx, cc);
sfu.cfg = &bwp_cfg;
sfu.harq_ent = &harq_ent;
sfu.pdcch_slot = pdcch_slot;
const uint32_t k0 = 0;
sfu.pdsch_slot = sfu.pdcch_slot + k0;

@ -127,16 +127,7 @@ void slot_cc_worker::alloc_dl_ues()
if (not cfg.sched_cfg.pdsch_enabled) {
return;
}
if (slot_ues.empty()) {
return;
}
slot_ue& ue = slot_ues.begin()->second;
if (ue.h_dl == nullptr) {
return;
}
prb_interval prbs(0, cfg.bwps[0].N_rbg);
bwp_alloc.alloc_pdsch(ue, prbs);
cell.bwps[0].data_sched->sched_dl_users(slot_ues, bwp_alloc);
}
void slot_cc_worker::alloc_ul_ues()
@ -144,16 +135,7 @@ void slot_cc_worker::alloc_ul_ues()
if (not cfg.sched_cfg.pusch_enabled) {
return;
}
if (slot_ues.empty()) {
return;
}
slot_ue& ue = slot_ues.begin()->second;
if (ue.h_ul == nullptr) {
return;
}
prb_interval prbs(0, cfg.bwps[0].N_rbg);
bwp_alloc.alloc_pusch(ue, prbs);
cell.bwps[0].data_sched->sched_ul_users(slot_ues, bwp_alloc);
}
void slot_cc_worker::log_result() const
@ -164,14 +146,15 @@ void slot_cc_worker::log_result() const
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: DL {}, cc={}, rnti=0x{:x}, pid={}, nrtx={}, f={}, dai={}, tti_pdsch={}, tti_ack={}",
"SCHED: DL {}, cc={}, rnti=0x{:x}, pid={}, f={}, nrtx={}, dai={}, tbs={}, tti_pdsch={}, tti_ack={}",
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
cell.cfg.cc,
ue.rnti,
pdcch.dci.pid,
ue.h_dl->nof_retx(),
srsran_dci_format_nr_string(pdcch.dci.ctx.format),
ue.h_dl->nof_retx(),
pdcch.dci.dai,
ue.h_dl->tbs(),
ue.pdsch_slot,
ue.uci_slot);
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_ra) {
@ -187,13 +170,14 @@ void slot_cc_worker::log_result() const
if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];
fmt::format_to(fmtbuf,
"SCHED: UL {}, cc={}, rnti=0x{:x}, pid={}, nrtx={}, f={}, tti_pusch={}",
"SCHED: UL {}, cc={}, rnti=0x{:x}, pid={}, f={}, nrtx={}, tbs={}, tti_pusch={}",
ue.h_dl->nof_retx() == 0 ? "tx" : "retx",
cell.cfg.cc,
ue.rnti,
pdcch.dci.pid,
ue.h_dl->nof_retx(),
srsran_dci_format_nr_string(pdcch.dci.ctx.format),
ue.h_dl->nof_retx(),
ue.h_ul->tbs(),
ue.pusch_slot);
} else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_tc) {
const slot_ue& ue = slot_ues[pdcch.dci.ctx.rnti];

@ -121,12 +121,23 @@ sched_nr_sim_base::~sched_nr_sim_base()
logger.info("=========== End %s ==========\n", test_name.c_str());
}
int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx)
int sched_nr_sim_base::add_user(uint16_t rnti,
const sched_nr_interface::ue_cfg_t& ue_cfg_,
slot_point tti_rx,
uint32_t preamble_idx)
{
TESTASSERT(ue_db.count(rnti) == 0);
sched_ptr->ue_cfg(rnti, ue_cfg_);
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_slot_tx, preamble_idx)));
sched_nr_interface::dl_sched_rar_info_t rach_info{};
rach_info.temp_crnti = rnti;
rach_info.prach_slot = tti_rx;
rach_info.preamble_idx = preamble_idx;
rach_info.msg3_size = 7;
sched_ptr->dl_rach_info(ue_cfg_.carriers[0].cc, rach_info);
return SRSRAN_SUCCESS;
}

@ -115,7 +115,7 @@ public:
std::string test_name);
virtual ~sched_nr_sim_base();
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx);
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, slot_point tti_rx, uint32_t preamble_idx);
void new_slot(slot_point slot_tx);
void update(sched_nr_cc_output_res_t& cc_out);

@ -97,7 +97,7 @@ void sched_nr_cfg_serialized_test()
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(nof_sectors);
uecfg.fixed_dl_mcs = 15;
uecfg.fixed_ul_mcs = 15;
sched_tester.add_user(0x46, uecfg, 0);
sched_tester.add_user(0x46, uecfg, slot_point{0, 0}, 0);
std::vector<long> count_per_cc(nof_sectors, 0);
for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) {
@ -145,7 +145,7 @@ void sched_nr_cfg_parallel_cc_test()
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(cells_cfg.size());
uecfg.fixed_dl_mcs = 15;
uecfg.fixed_ul_mcs = 15;
sched_tester.add_user(0x46, uecfg, 0);
sched_tester.add_user(0x46, uecfg, slot_point{0, 0}, 0);
std::array<std::atomic<long>, SRSRAN_MAX_CARRIERS> nano_count{};
for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) {

@ -20,7 +20,7 @@
*/
#include "sched_nr_ue_ded_test_suite.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h"
#include "srsenb/hdr/stack/mac/nr/sched_nr_grant_allocator.h"
#include "srsran/common/test_common.h"
namespace srsenb {

@ -37,7 +37,7 @@ namespace srsue {
class nas_base
{
public:
nas_base(srslog::basic_logger& logger_);
nas_base(srslog::basic_logger& logger_, uint32_t mac_offset, uint32_t seq_offset_, uint32_t bearer_id_);
// PCAP
void start_pcap(srsran::nas_pcap* pcap_) { pcap = pcap_; }
@ -70,6 +70,10 @@ protected:
bool integrity_check(srsran::byte_buffer_t* pdu);
void cipher_encrypt(srsran::byte_buffer_t* pdu);
void cipher_decrypt(srsran::byte_buffer_t* pdu);
uint32_t mac_offset = 0;
uint32_t seq_offset = 0;
uint32_t bearer_id = 0;
};
} // namespace srsue

@ -35,6 +35,10 @@
#include "srsue/hdr/stack/upper/nas.h"
#include "srsue/hdr/stack/upper/nas_idle_procedures.h"
#define LTE_MAC_OFFSET 1
#define LTE_SEQ_OFFSET 5
#define LTE_NAS_BEARER 0
using namespace srsran;
namespace srsue {
@ -44,7 +48,7 @@ namespace srsue {
********************************************************************/
nas::nas(srslog::basic_logger& logger_, srsran::task_sched_handle task_sched_) :
nas_base(logger_),
nas_base(logger_, LTE_MAC_OFFSET, LTE_SEQ_OFFSET, LTE_NAS_BEARER),
plmn_searcher(this),
task_sched(task_sched_),
t3402(task_sched_.get_unique_timer()),

@ -36,6 +36,10 @@
#include <iostream>
#include <unistd.h>
#define MAC_5G_OFFSET 2
#define SEQ_5G_OFFSET 6
#define NAS_5G_BEARER 1
using namespace srsran;
using namespace srsran::nas_5g;
@ -46,7 +50,7 @@ namespace srsue {
********************************************************************/
nas_5g::nas_5g(srslog::basic_logger& logger_, srsran::task_sched_handle task_sched_) :
nas_base(logger_),
nas_base(logger_, MAC_5G_OFFSET, SEQ_5G_OFFSET, NAS_5G_BEARER),
task_sched(task_sched_),
t3502(task_sched_.get_unique_timer()),
t3510(task_sched_.get_unique_timer()),

@ -23,7 +23,9 @@
using namespace srsran;
namespace srsue {
nas_base::nas_base(srslog::basic_logger& logger_) : logger(logger_) {}
nas_base::nas_base(srslog::basic_logger& logger_, uint32_t mac_offset_, uint32_t seq_offset_, uint32_t bearer_id_) :
logger(logger_), mac_offset(mac_offset_), seq_offset(seq_offset_), bearer_id(bearer_id_)
{}
int nas_base::parse_security_algorithm_list(std::string algorithm_string, bool* algorithm_caps)
{
@ -60,31 +62,13 @@ void nas_base::integrity_generate(uint8_t* key_128,
case INTEGRITY_ALGORITHM_ID_EIA0:
break;
case INTEGRITY_ALGORITHM_ID_128_EIA1:
security_128_eia1(key_128,
count,
0, // Bearer always 0 for NAS
direction,
msg,
msg_len,
mac);
security_128_eia1(key_128, count, bearer_id, direction, msg, msg_len, mac);
break;
case INTEGRITY_ALGORITHM_ID_128_EIA2:
security_128_eia2(key_128,
count,
0, // Bearer always 0 for NAS
direction,
msg,
msg_len,
mac);
security_128_eia2(key_128, count, bearer_id, direction, msg, msg_len, mac);
break;
case INTEGRITY_ALGORITHM_ID_128_EIA3:
security_128_eia3(key_128,
count,
0, // Bearer always 0 for NAS
direction,
msg,
msg_len,
mac);
security_128_eia3(key_128, count, bearer_id, direction, msg, msg_len, mac);
break;
default:
break;
@ -100,14 +84,18 @@ bool nas_base::integrity_check(byte_buffer_t* pdu)
return false;
}
if (pdu->N_bytes > 5) {
if (pdu->N_bytes > seq_offset) {
uint8_t exp_mac[4] = {0};
uint8_t* mac = &pdu->msg[1];
uint8_t* mac = &pdu->msg[mac_offset];
// generate expected MAC
uint32_t count_est = (ctxt.rx_count & 0x00FFFF00u) | pdu->msg[5];
integrity_generate(
&k_nas_int[16], count_est, SECURITY_DIRECTION_DOWNLINK, &pdu->msg[5], pdu->N_bytes - 5, &exp_mac[0]);
uint32_t count_est = (ctxt.rx_count & 0x00FFFF00u) | pdu->msg[seq_offset];
integrity_generate(&k_nas_int[16],
count_est,
SECURITY_DIRECTION_DOWNLINK,
&pdu->msg[seq_offset],
pdu->N_bytes - seq_offset,
&exp_mac[0]);
// Check if expected mac equals the sent mac
for (int i = 0; i < 4; i++) {
@ -119,7 +107,7 @@ bool nas_base::integrity_check(byte_buffer_t* pdu)
exp_mac[1],
exp_mac[2],
exp_mac[3],
pdu->msg[5],
pdu->msg[seq_offset],
mac[0],
mac[1],
mac[2],
@ -129,7 +117,7 @@ bool nas_base::integrity_check(byte_buffer_t* pdu)
}
logger.info("Integrity check ok. Local: count=%d, Received: count=%d [%02x %02x %02x %02x]",
count_est,
pdu->msg[5],
pdu->msg[seq_offset],
mac[0],
mac[1],
mac[2],
@ -161,32 +149,32 @@ void nas_base::cipher_encrypt(byte_buffer_t* pdu)
case CIPHERING_ALGORITHM_ID_128_EEA1:
security_128_eea1(&k_nas_enc[16],
ctxt.tx_count,
0, // Bearer always 0 for NAS
bearer_id,
SECURITY_DIRECTION_UPLINK,
&pdu->msg[6],
pdu->N_bytes - 6,
&pdu_tmp.msg[6]);
memcpy(&pdu->msg[6], &pdu_tmp.msg[6], pdu->N_bytes - 6);
&pdu->msg[seq_offset + 1],
pdu->N_bytes - seq_offset + 1,
&pdu_tmp.msg[seq_offset + 1]);
memcpy(&pdu->msg[seq_offset + 1], &pdu_tmp.msg[seq_offset + 1], pdu->N_bytes - seq_offset + 1);
break;
case CIPHERING_ALGORITHM_ID_128_EEA2:
security_128_eea2(&k_nas_enc[16],
ctxt.tx_count,
0, // Bearer always 0 for NAS
bearer_id,
SECURITY_DIRECTION_UPLINK,
&pdu->msg[6],
pdu->N_bytes - 6,
&pdu_tmp.msg[6]);
memcpy(&pdu->msg[6], &pdu_tmp.msg[6], pdu->N_bytes - 6);
&pdu->msg[seq_offset + 1],
pdu->N_bytes - seq_offset + 1,
&pdu_tmp.msg[seq_offset + 1]);
memcpy(&pdu->msg[seq_offset + 1], &pdu_tmp.msg[seq_offset + 1], pdu->N_bytes - seq_offset + 1);
break;
case CIPHERING_ALGORITHM_ID_128_EEA3:
security_128_eea3(&k_nas_enc[16],
ctxt.tx_count,
0, // Bearer always 0 for NAS
bearer_id,
SECURITY_DIRECTION_UPLINK,
&pdu->msg[6],
pdu->N_bytes - 6,
&pdu_tmp.msg[6]);
memcpy(&pdu->msg[6], &pdu_tmp.msg[6], pdu->N_bytes - 6);
&pdu->msg[seq_offset + 1],
pdu->N_bytes - seq_offset + 1,
&pdu_tmp.msg[seq_offset + 1]);
memcpy(&pdu->msg[seq_offset + 1], &pdu_tmp.msg[seq_offset + 1], pdu->N_bytes - seq_offset + 1);
break;
default:
logger.error("Ciphering algorithm not known");
@ -209,34 +197,34 @@ void nas_base::cipher_decrypt(byte_buffer_t* pdu)
case CIPHERING_ALGORITHM_ID_128_EEA1:
security_128_eea1(&k_nas_enc[16],
count_est,
0, // Bearer always 0 for NAS
bearer_id,
SECURITY_DIRECTION_DOWNLINK,
&pdu->msg[6],
pdu->N_bytes - 6,
&tmp_pdu.msg[6]);
memcpy(&pdu->msg[6], &tmp_pdu.msg[6], pdu->N_bytes - 6);
&pdu->msg[seq_offset + 1],
pdu->N_bytes - seq_offset + 1,
&tmp_pdu.msg[seq_offset + 1]);
memcpy(&pdu->msg[seq_offset + 1], &tmp_pdu.msg[seq_offset + 1], pdu->N_bytes - seq_offset + 1);
break;
case CIPHERING_ALGORITHM_ID_128_EEA2:
security_128_eea2(&k_nas_enc[16],
count_est,
0, // Bearer always 0 for NAS
bearer_id,
SECURITY_DIRECTION_DOWNLINK,
&pdu->msg[6],
pdu->N_bytes - 6,
&tmp_pdu.msg[6]);
&pdu->msg[seq_offset + 1],
pdu->N_bytes - seq_offset + 1,
&tmp_pdu.msg[seq_offset + 1]);
logger.debug(tmp_pdu.msg, pdu->N_bytes, "Decrypted");
memcpy(&pdu->msg[6], &tmp_pdu.msg[6], pdu->N_bytes - 6);
memcpy(&pdu->msg[seq_offset + 1], &tmp_pdu.msg[seq_offset + 1], pdu->N_bytes - seq_offset + 1);
break;
case CIPHERING_ALGORITHM_ID_128_EEA3:
security_128_eea3(&k_nas_enc[16],
count_est,
0, // Bearer always 0 for NAS
bearer_id,
SECURITY_DIRECTION_DOWNLINK,
&pdu->msg[6],
pdu->N_bytes - 6,
&tmp_pdu.msg[6]);
&pdu->msg[seq_offset + 1],
pdu->N_bytes - seq_offset + 1,
&tmp_pdu.msg[seq_offset + 1]);
logger.debug(tmp_pdu.msg, pdu->N_bytes, "Decrypted");
memcpy(&pdu->msg[6], &tmp_pdu.msg[6], pdu->N_bytes - 6);
memcpy(&pdu->msg[seq_offset + 1], &tmp_pdu.msg[seq_offset + 1], pdu->N_bytes - seq_offset + 1);
break;
default:
logger.error("Ciphering algorithms not known");

Loading…
Cancel
Save