Merge branch 'next' into agpl_next

# Conflicts:
#	lib/src/srslog/sinks/single_write_file_sink.h
master
srsLTE codebot 4 years ago committed by Your Name
commit 80e17d2986

@ -7,6 +7,7 @@
# - init of static memory may cause an exception (cert-err58)
# - forbidden implicit conversion from pointer/int to bool
# - recommended auto
# - remove llvm-specific checks (header guard style, usage of llvm namespace, restriction of libc includes, etc.)
# Naming conventions set to snake_case
Checks: '*,-fuchsia-*,
-cppcoreguidelines-pro-type-vararg,-hicpp-vararg,
@ -14,12 +15,15 @@ Checks: '*,-fuchsia-*,
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-hicpp-no-array-decay,
-cppcoreguidelines-pro-bounds-constant-array-index,-cppcoreguidelines-pro-type-cstyle-cast,
-cppcoreguidelines-pro-type-union-access,
-cppcoreguidelines-pro-type-static-cast-downcast,
-modernize-use-using,-modernize-use-trailing-return-type,
-modernize-use-auto,-hicpp-use-auto,
-llvmlibc-callee-namespace,
-llvmlibc-callee-namespace,-llvmlibc-implementation-in-namespace,-llvmlibc-restrict-system-libc-headers,
-llvm-header-guard,
-google-runtime-references,-google-readability-casting,-google-build-using-namespace,
google-default-arguments,-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cert-err58-cpp,
-readability-function-cognitive-complexity,-readability-isolate-declaration,
-misc-non-private-member-variables-in-classes,-altera-struct-pack-align,-readability-uppercase-literal-suffix,
-cppcoreguidelines-non-private-member-variables-in-classes,
readability-identifier-naming'

@ -116,8 +116,8 @@ public:
}
T& front() { return (*this)[0]; }
const T& front() const { return (*this)[0]; }
T* data() { return &front(); }
const T* data() const { return &front(); }
T* data() { return reinterpret_cast<T*>(buffer); }
const T* data() const { return reinterpret_cast<const T*>(buffer); }
// Iterators
iterator begin() { return data(); }

@ -51,7 +51,11 @@ public:
srslte::task_queue_handle make_task_queue(uint32_t qsize) { return external_tasks.get_queue_handler(qsize); }
//! Delays a task processing by duration_ms
void defer_callback(uint32_t duration_ms, std::function<void()> func) { timers.defer_callback(duration_ms, func); }
template <typename F>
void defer_callback(uint32_t duration_ms, F&& func)
{
timers.defer_callback(duration_ms, std::forward<F>(func));
}
//! Enqueues internal task to be run in next tic
void defer_task(srslte::move_task_t func) { internal_tasks.push_back(std::move(func)); }
@ -123,9 +127,10 @@ public:
{
sched->notify_background_task_result(std::move(task));
}
void defer_callback(uint32_t duration_ms, std::function<void()> func)
template <typename F>
void defer_callback(uint32_t duration_ms, F&& func)
{
sched->defer_callback(duration_ms, std::move(func));
sched->defer_callback(duration_ms, std::forward<F>(func));
}
void defer_task(srslte::move_task_t func) { sched->defer_task(std::move(func)); }
@ -145,9 +150,10 @@ public:
sched->notify_background_task_result(std::move(task));
}
srslte::task_queue_handle make_task_queue() { return sched->make_task_queue(); }
void defer_callback(uint32_t duration_ms, std::function<void()> func)
template <typename F>
void defer_callback(uint32_t duration_ms, F&& func)
{
sched->defer_callback(duration_ms, std::move(func));
sched->defer_callback(duration_ms, std::forward<F>(func));
}
private:

@ -33,6 +33,7 @@ namespace srslte {
**************************/
struct phy_cfg_nr_t {
srslte_tdd_config_nr_t tdd = {};
srslte_sch_hl_cfg_nr_t pdsch = {};
srslte_sch_hl_cfg_nr_t pusch = {};
srslte_pucch_nr_hl_cfg_t pucch = {};
@ -43,9 +44,6 @@ struct phy_cfg_nr_t {
phy_cfg_nr_t()
{
// Default PDSCH configuration
pdsch.sch_cfg.mcs_table = srslte_mcs_table_256qam;
// Default PRACH configuration
prach.is_nr = true;
prach.config_idx = 16;
@ -55,6 +53,21 @@ struct phy_cfg_nr_t {
prach.num_ra_preambles = 64;
prach.hs_flag = false;
// tdd-UL-DL-ConfigurationCommon
// referenceSubcarrierSpacing: kHz15 (0)
// pattern1
// dl-UL-TransmissionPeriodicity: ms10 (7)
// nrofDownlinkSlots: 7
// nrofDownlinkSymbols: 6
// nrofUplinkSlots: 2
// nrofUplinkSymbols: 4
tdd.pattern1.period_ms = 10;
tdd.pattern1.nof_dl_slots = 7;
tdd.pattern1.nof_dl_symbols = 6;
tdd.pattern1.nof_ul_slots = 2;
tdd.pattern1.nof_ul_symbols = 4;
tdd.pattern2.period_ms = 0;
// physicalCellGroupConfig
// pdsch-HARQ-ACK-Codebook: dynamic (1)
harq_ack.pdsch_harq_ack_codebook = srslte_pdsch_harq_ack_codebook_dynamic;
@ -93,8 +106,8 @@ struct phy_cfg_nr_t {
srslte_search_space_t search_space1 = {};
search_space1.id = 1;
search_space1.coreset_id = 1;
search_space1.nof_candidates[0] = 0;
search_space1.nof_candidates[1] = 0;
search_space1.nof_candidates[0] = 1;
search_space1.nof_candidates[1] = 1;
search_space1.nof_candidates[2] = 1;
search_space1.nof_candidates[3] = 0;
search_space1.nof_candidates[4] = 0;
@ -108,6 +121,56 @@ struct phy_cfg_nr_t {
pdcch.ra_search_space.type = srslte_search_space_type_common_1;
pdcch.ra_search_space_present = true;
// spCellConfigDedicated
// initialDownlinkBWP
// pdcch-Config: setup (1)
// setup
// controlResourceSetToAddModList: 1 item
// Item 0
// ControlResourceSet
// controlResourceSetId: 2
// frequencyDomainResources: ff0000000000 [bit length 45, 3 LSB pad bits, 1111 1111 0000
// 0000 0000 0000 0000 0000 0000 0000 0000 0... decimal value 35046933135360]
// duration: 1
// cce-REG-MappingType: nonInterleaved (1)
// nonInterleaved: NULL
// precoderGranularity: sameAsREG-bundle (0)
pdcch.coreset[2].id = 2;
pdcch.coreset[2].precoder_granularity = srslte_coreset_precoder_granularity_reg_bundle;
pdcch.coreset[2].duration = 1;
pdcch.coreset[2].mapping_type = srslte_coreset_mapping_type_non_interleaved;
for (uint32_t i = 0; i < SRSLTE_CORESET_FREQ_DOMAIN_RES_SIZE; i++) {
pdcch.coreset[2].freq_resources[i] = (i < 8);
}
pdcch.coreset_present[2] = true;
// searchSpacesToAddModList: 1 item
// Item 0
// SearchSpace
// searchSpaceId: 2
// controlResourceSetId: 2
// monitoringSlotPeriodicityAndOffset: sl1 (0)
// sl1: NULL
// monitoringSymbolsWithinSlot: 8000 [bit length 14, 2 LSB pad bits, 1000 0000 0000
// 00.. decimal value 8192] nrofCandidates
// aggregationLevel1: n0 (0)
// aggregationLevel2: n2 (2)
// aggregationLevel4: n1 (1)
// aggregationLevel8: n0 (0)
// aggregationLevel16: n0 (0)
// searchSpaceType: ue-Specific (1)
// ue-Specific
// dci-Formats: formats0-0-And-1-0 (0)
pdcch.search_space[2].id = 2;
pdcch.search_space[2].coreset_id = 2;
pdcch.search_space[2].nof_candidates[0] = 0;
pdcch.search_space[2].nof_candidates[1] = 2;
pdcch.search_space[2].nof_candidates[2] = 1;
pdcch.search_space[2].nof_candidates[3] = 0;
pdcch.search_space[2].nof_candidates[4] = 0;
pdcch.search_space[2].type = srslte_search_space_type_ue;
pdcch.search_space_present[2] = true;
// pdsch-ConfigCommon: setup (1)
// setup
// pdsch-TimeDomainAllocationList: 2 items
@ -187,11 +250,22 @@ struct phy_cfg_nr_t {
// betaOffsetACK-Index1: 9
// betaOffsetACK-Index2: 9
// betaOffsetACK-Index3: 9
pusch.beta_offsets.ack_index1 = 9;
pusch.beta_offsets.ack_index2 = 9;
pusch.beta_offsets.ack_index3 = 9;
// betaOffsetCSI-Part1-Index1: 6
// betaOffsetCSI-Part1-Index2: 6
pusch.beta_offsets.csi1_index1 = 6;
pusch.beta_offsets.csi1_index2 = 6;
// betaOffsetCSI-Part2-Index1: 6
// betaOffsetCSI-Part2-Index2: 6
pusch.beta_offsets.csi2_index1 = 6;
pusch.beta_offsets.csi2_index2 = 6;
// scaling: f1 (3)
pusch.scaling = 1;
// pucch-Config: setup (1)
// setup

@ -223,27 +223,22 @@ public:
} dl_sched_bc_t;
typedef struct {
uint32_t cfi;
uint32_t nof_data_elems;
uint32_t nof_rar_elems;
uint32_t nof_bc_elems;
dl_sched_data_t data[MAX_DATA_LIST];
dl_sched_rar_t rar[MAX_RAR_LIST];
dl_sched_bc_t bc[MAX_BC_LIST];
} dl_sched_res_t;
struct dl_sched_res_t {
uint32_t cfi;
srslte::bounded_vector<dl_sched_data_t, MAX_DATA_LIST> data;
srslte::bounded_vector<dl_sched_rar_t, MAX_RAR_LIST> rar;
srslte::bounded_vector<dl_sched_bc_t, MAX_BC_LIST> bc;
};
typedef struct {
uint16_t rnti;
enum phich_elem { ACK, NACK } phich;
} ul_sched_phich_t;
typedef struct {
uint32_t nof_dci_elems;
uint32_t nof_phich_elems;
ul_sched_data_t pusch[MAX_DATA_LIST];
ul_sched_phich_t phich[MAX_PHICH_LIST];
} ul_sched_res_t;
struct ul_sched_res_t {
srslte::bounded_vector<ul_sched_data_t, MAX_DATA_LIST> pusch;
srslte::bounded_vector<ul_sched_phich_t, MAX_PHICH_LIST> phich;
};
/******************* Scheduler Control ****************************/

@ -113,8 +113,8 @@ class phy_interface_mac_common
{
public:
/* Time advance commands */
virtual void set_timeadv_rar(uint32_t ta_cmd) = 0;
virtual void set_timeadv(uint32_t ta_cmd) = 0;
virtual void set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) = 0;
virtual void set_timeadv(uint32_t tti, uint32_t ta_cmd) = 0;
/* Activate / Disactivate SCell*/
virtual void set_activation_deactivation_scell(uint32_t cmd, uint32_t tti) = 0;

@ -56,7 +56,7 @@ typedef struct SRSLTE_API {
float epre_dBfs;
float snr;
float snr_db;
float cfo;
float cfo_hz;
float ta_us;
} srslte_chest_ul_res_t;

@ -95,10 +95,17 @@ extern "C" {
#define SRSLTE_PDCCH_MAX_RE ((SRSLTE_NRE - 3U) * (1U << (SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR - 1U)) * 6U)
/**
* @brief defines the maximum number of candidates for a given Aggregation level
* @brief defines the maximum number of candidates for a given search-space and aggregation level according to TS 38.331
* SearchSpace sequence
*/
#define SRSLTE_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR 8
/**
* @brief defines the maximum number of monitored PDCCH candidates per slot and per serving cell according to TS 38.213
* Table 10.1-2
*/
#define SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR 44
/**
* @brief defines the maximum number of resource elements per PRB
* @remark Defined in TS 38.214 V15.10.0 5.1.3.2 Transport block size determination, point 1, second bullet
@ -310,6 +317,25 @@ typedef struct SRSLTE_API {
uint32_t nof_candidates[SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR];
} srslte_search_space_t;
/**
* @brief TDD pattern configuration
*/
typedef struct SRSLTE_API {
uint32_t period_ms; ///< Period in milliseconds, set to 0 if not present
uint32_t nof_dl_slots; ///< Number of consecutive full DL slots at the beginning of each DL-UL pattern
uint32_t nof_dl_symbols; ///< Number of consecutive DL symbols in the beginning of the slot following the last DL slot
uint32_t nof_ul_slots; ///< Number of consecutive full UL slots at the end of each DL-UL pattern
uint32_t nof_ul_symbols; ///< Number of consecutive UL symbols in the end of the slot preceding the first full UL slot
} srslte_tdd_pattern_t;
/**
* @brief TDD configuration as described in TS 38.331 v15.10.0 TDD-UL-DL-ConfigCommon
*/
typedef struct SRSLTE_API {
srslte_tdd_pattern_t pattern1;
srslte_tdd_pattern_t pattern2;
} srslte_tdd_config_nr_t;
/**
* @brief Get the RNTI type name for NR
* @param rnti_type RNTI type name
@ -381,6 +407,24 @@ SRSLTE_API uint32_t srslte_min_symbol_sz_rb(uint32_t nof_prb);
*/
SRSLTE_API float srslte_symbol_distance_s(uint32_t l0, uint32_t l1, uint32_t numerology);
/**
* @brief Decides whether a given slot is configured as Downlink
* @param cfg Provides TDD configuration
* @param numerology Provides BWP numerology
* @param slot_idx Slot index in the frame for the given numerology
* @return true if the provided slot index is configured for Downlink
*/
SRSLTE_API bool srslte_tdd_nr_is_dl(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx);
/**
* @brief Decides whether a given slot is configured as Uplink
* @param cfg Provides TDD configuration
* @param numerology Provides BWP numerology
* @param slot_idx Slot index in the frame for the given numerology
* @return true if the provided slot index is configured for Uplink
*/
SRSLTE_API bool srslte_tdd_nr_is_ul(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx);
#ifdef __cplusplus
}
#endif

@ -153,6 +153,20 @@ typedef struct SRSLTE_API {
srslte_sch_tb_t tb[SRSLTE_MAX_TB];
} srslte_sch_grant_nr_t;
/**
* @brief Beta offset configuration provided from upper layers
* @remark Configure according to TS 38.331 BetaOffsets
*/
typedef struct {
uint32_t ack_index1; ///< Use for up to 2 HARQ-ACK bits. Set to 11 if absent.
uint32_t ack_index2; ///< Use for up to 11 HARQ-ACK bits. Set to 11 if absent.
uint32_t ack_index3; ///< Use for more than 11 HARQ-ACK bits. Set to 11 if absent.
uint32_t csi1_index1; ///< Use for up to 11 CSI bits. Set to 13 if absent.
uint32_t csi1_index2; ///< Use for more than 11 CSI bits. Set to 13 if absent.
uint32_t csi2_index1; ///< Use for up to 11 CSI bits. Set to 13 if absent.
uint32_t csi2_index2; ///< Use for more than 11 CSI bits. Set to 13 if absent.
} srslte_beta_offsets_t;
/**
* @brief flatten SCH configuration parameters provided by higher layers
* @remark Described in TS 38.331 V15.10.0 Section PDSCH-Config
@ -197,6 +211,10 @@ typedef struct SRSLTE_API {
bool rbg_size_cfg_1; ///< RBG size configuration (1 or 2)
srslte_sch_cfg_t sch_cfg; ///< Common shared channel parameters
/// PUSCH only
srslte_beta_offsets_t beta_offsets; /// Semi-static only.
float scaling; /// Indicates a scaling factor to limit the number of resource elements assigned to UCI on PUSCH.
} srslte_sch_hl_cfg_nr_t;
/**
@ -216,6 +234,7 @@ typedef struct SRSLTE_API {
bool enable_transform_precoder;
float beta_harq_ack_offset;
float beta_csi_part1_offset;
float beta_csi_part2_offset;
float scaling;
bool freq_hopping_enabled;
} srslte_sch_cfg_nr_t;

@ -124,6 +124,7 @@ SRSLTE_API uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q,
SRSLTE_API uint32_t srslte_pusch_nr_tx_info(const srslte_pusch_nr_t* q,
const srslte_sch_cfg_nr_t* cfg,
const srslte_sch_grant_nr_t* grant,
const srslte_uci_value_nr_t* uci_value,
char* str,
uint32_t str_len);

@ -130,4 +130,18 @@ SRSLTE_API int srslte_ra_ul_dci_to_grant_nr(const srslte_carrier_nr_t* carrie
srslte_sch_cfg_nr_t* pusch_cfg,
srslte_sch_grant_nr_t* pusch_grant);
/**
* @brief Set up the Uplink Control Information configuration for a PUSCH transmission
*
* @remark Implement procedure described in TS 38.213 9.3 UCI reporting in physical uplink shared channel
*
* @param pusch_hl_cfg PUSCH configuration provided by higher layers
* @param uci_cfg Uplink Control Information configuration for this PUSCH transmission
* @param pusch_cfg PUSCH configuration after applying the procedure
* @return SRSLTE_SUCCESS if the procedure is successful, SRSLTE_ERROR code otherwise
*/
SRSLTE_API int srslte_ra_ul_set_grant_uci_nr(const srslte_sch_hl_cfg_nr_t* pusch_hl_cfg,
const srslte_uci_cfg_nr_t* uci_cfg,
srslte_sch_cfg_nr_t* pusch_cfg);
#endif // SRSLTE_RA_NR_H

@ -111,6 +111,14 @@ typedef struct SRSLTE_API {
uint32_t nof_dl_data_to_ul_ack;
} srslte_ue_dl_nr_harq_ack_cfg_t;
typedef struct SRSLTE_API {
uint32_t coreset_id;
uint32_t ss_id;
srslte_dci_location_t location;
srslte_dmrs_pdcch_measure_t measure;
srslte_pdcch_nr_res_t result;
} srslte_ue_dl_nr_pdcch_info_t;
typedef struct SRSLTE_API {
uint32_t max_prb;
uint32_t nof_rx_antennas;
@ -131,6 +139,14 @@ typedef struct SRSLTE_API {
srslte_pdcch_nr_t pdcch;
srslte_dmrs_pdcch_ce_t* pdcch_ce;
/// Store Blind-search information from all possible candidate locations for debug purposes
srslte_ue_dl_nr_pdcch_info_t pdcch_info[SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR];
uint32_t pdcch_info_count;
/// Temporally stores Found DCI messages from all SS
srslte_dci_msg_nr_t dci_msg[SRSLTE_MAX_DCI_MSG_NR];
uint32_t dci_msg_count;
srslte_dci_msg_nr_t pending_ul_dci_msg[SRSLTE_MAX_DCI_MSG_NR];
uint32_t pending_ul_dci_count;
} srslte_ue_dl_nr_t;

@ -76,8 +76,11 @@ SRSLTE_API int srslte_ue_ul_nr_encode_pucch(srslte_ue_ul_nr_t*
SRSLTE_API void srslte_ue_ul_nr_free(srslte_ue_ul_nr_t* q);
SRSLTE_API int
srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q, const srslte_sch_cfg_nr_t* cfg, char* str, uint32_t str_len);
SRSLTE_API int srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q,
const srslte_sch_cfg_nr_t* cfg,
const srslte_uci_value_nr_t* uci_value,
char* str,
uint32_t str_len);
SRSLTE_API int srslte_ue_ul_nr_pucch_info(const srslte_pucch_nr_resource_t* resource,
const srslte_uci_data_nr_t* uci_data,

@ -109,6 +109,7 @@ private:
std::array<std::vector<cf_t>, SRSLTE_MAX_CHANNELS> rx_buffer;
std::array<srslte_resampler_fft_t, SRSLTE_MAX_CHANNELS> interpolators = {};
std::array<srslte_resampler_fft_t, SRSLTE_MAX_CHANNELS> decimators = {};
bool decimator_busy = false; ///< Indicates the decimator is changing the rate
rf_timestamp_t end_of_burst_time = {};
bool is_start_of_burst = false;

@ -27,6 +27,9 @@
namespace srslog {
/// This type is used to store small strings without doing any memory allocation.
using small_str_buffer = fmt::basic_memory_buffer<char, 64>;
namespace detail {
/// This structure gives the user a way to log generic information as a context.
@ -40,13 +43,14 @@ struct log_context {
/// Metadata fields carried for each log entry.
struct log_entry_metadata {
std::chrono::high_resolution_clock::time_point tp;
log_context context;
const char* fmtstring;
std::chrono::high_resolution_clock::time_point tp;
log_context context;
const char* fmtstring;
fmt::dynamic_format_arg_store<fmt::printf_context> store;
std::string log_name;
char log_tag;
std::vector<uint8_t> hex_dump;
std::string log_name;
char log_tag;
small_str_buffer small_str;
std::vector<uint8_t> hex_dump;
};
} // namespace detail

@ -120,7 +120,7 @@ public:
// Did we wake up on timeout?
if (timedout && queue.empty()) {
cond_var.unlock();
return {false, T{}};
return {false, T()};
}
// Here we have been woken up normally.

@ -122,7 +122,34 @@ public:
fmtstr,
std::move(store),
log_name,
log_tag}};
log_tag,
small_str_buffer()}};
backend.push(std::move(entry));
}
/// Builds the provided log entry and passes it to the backend. When the
/// channel is disabled the log entry will be discarded.
void operator()(small_str_buffer &&str)
{
if (!enabled()) {
return;
}
// Send the log entry to the backend.
log_formatter& formatter = log_sink.get_formatter();
detail::log_entry entry = {
&log_sink,
[&formatter](detail::log_entry_metadata&& metadata,
fmt::memory_buffer& buffer) {
formatter.format(std::move(metadata), buffer);
},
{std::chrono::high_resolution_clock::now(),
{ctx_value, should_print_context},
nullptr,
{},
log_name,
log_tag,
std::move(str)}};
backend.push(std::move(entry));
}
@ -161,6 +188,7 @@ public:
std::move(store),
log_name,
log_tag,
small_str_buffer(),
std::vector<uint8_t>(buffer, buffer + len)}};
backend.push(std::move(entry));
}
@ -187,7 +215,8 @@ public:
nullptr,
{},
log_name,
log_tag}};
log_tag,
small_str_buffer()}};
backend.push(std::move(entry));
}
@ -218,7 +247,8 @@ public:
fmtstr,
std::move(store),
log_name,
log_tag}};
log_tag,
small_str_buffer()}};
backend.push(std::move(entry));
}

@ -483,14 +483,7 @@ public:
bool ret = true;
pdu->N_bytes = static_cast<uint32_t>(n_recv);
if (flags & MSG_NOTIFICATION) {
// Received notification
union sctp_notification* notification = (union sctp_notification*)pdu->msg;
if (notification->sn_header.sn_type == SCTP_SHUTDOWN_EVENT) {
// Socket Shutdown
ret = false;
}
}
// SCTP notifications handled in callback.
func(std::move(pdu), from, sri, flags);
return ret;
}

@ -100,6 +100,10 @@ inline int LTE_PCAP_PACK_MAC_CONTEXT_TO_BUFFER(MAC_Context_Info_t* context, uint
buffer[offset++] = MAC_LTE_CRC_STATUS_TAG;
buffer[offset++] = context->crcStatusOK;
/* CARRIER ID */
buffer[offset++] = MAC_LTE_CARRIER_ID_TAG;
buffer[offset++] = context->cc_idx;
/* NB-IoT mode tag */
buffer[offset++] = MAC_LTE_NB_MODE_TAG;
buffer[offset++] = context->nbiotMode;

@ -297,6 +297,15 @@ static void chest_ul_estimate(srslte_chest_ul_t* q,
uint32_t n_prb[SRSLTE_NOF_SLOTS_PER_SF],
srslte_chest_ul_res_t* res)
{
// Calculate CFO
if (nslots == 2) {
float phase = cargf(srslte_vec_dot_prod_conj_ccc(
&q->pilot_estimates[0 * nrefs_sym], &q->pilot_estimates[1 * nrefs_sym], nrefs_sym));
res->cfo_hz = phase / (2.0f * (float)M_PI * 0.0005f);
} else {
res->cfo_hz = NAN;
}
// Calculate time alignment error
float ta_err = 0.0f;
if (meas_ta_en) {

@ -121,7 +121,7 @@ uint32_t csi_rs_cinit(const srslte_carrier_nr_t* carrier,
const srslte_csi_rs_nzp_resource_t* resource,
uint32_t l)
{
uint32_t n = slot_cfg->idx % SRSLTE_NSLOTS_PER_FRAME_NR(carrier->numerology);
uint32_t n = SRSLTE_SLOT_NR_MOD(carrier->numerology, slot_cfg->idx);
uint32_t n_id = resource->scrambling_id;
return ((SRSLTE_NSYMB_PER_SLOT_NR * n + l + 1UL) * (2UL * n_id) << 10UL) + n_id;

@ -265,7 +265,7 @@ int srslte_dmrs_pucch_format1_estimate(const srslte_pucch_nr_t* q,
}
// Measure CFO
res->cfo = NAN; // Not implemented
res->cfo_hz = NAN; // Not implemented
// Do averaging here
// ... Not implemented
@ -298,7 +298,7 @@ static uint32_t dmrs_pucch_format2_cinit(const srslte_carrier_nr_t* car
const srslte_slot_cfg_t* slot,
uint32_t l)
{
uint64_t n = slot->idx;
uint64_t n = SRSLTE_SLOT_NR_MOD(carrier->numerology, slot->idx);
uint64_t n_id = (cfg->scrambling_id_present) ? cfg->scambling_id : carrier->id;
return SRSLTE_SEQUENCE_MOD((((SRSLTE_NSYMB_PER_SLOT_NR * n + l + 1UL) * (2UL * n_id + 1UL)) << 17UL) + 2UL * n_id);

@ -460,8 +460,6 @@ static uint32_t srslte_dmrs_sch_seed(const srslte_carrier_nr_t* carrier,
{
const srslte_dmrs_sch_cfg_t* dmrs_cfg = &cfg->dmrs;
slot_idx = slot_idx % SRSLTE_NSLOTS_PER_FRAME_NR(carrier->numerology);
// Calculate scrambling IDs
uint32_t n_id = carrier->id;
uint32_t n_scid = (grant->n_scid) ? 1 : 0;
@ -584,8 +582,8 @@ int srslte_dmrs_sch_put_sf(srslte_dmrs_sch_t* q,
// Iterate symbols
for (uint32_t i = 0; i < nof_symbols; i++) {
uint32_t l = symbols[i]; // Symbol index inside the slot
uint32_t slot_idx = slot_cfg->idx; // Slot index in the frame
uint32_t l = symbols[i]; // Symbol index inside the slot
uint32_t slot_idx = SRSLTE_SLOT_NR_MOD(q->carrier.numerology, slot_cfg->idx); // Slot index in the frame
uint32_t cinit = srslte_dmrs_sch_seed(&q->carrier, pdsch_cfg, grant, slot_idx, l);
srslte_dmrs_sch_put_symbol(q, pdsch_cfg, grant, cinit, delta, &sf_symbols[symbol_sz * l]);
@ -708,7 +706,8 @@ int srslte_dmrs_sch_estimate(srslte_dmrs_sch_t* q,
for (uint32_t i = 0; i < nof_symbols; i++) {
uint32_t l = symbols[i]; // Symbol index inside the slot
uint32_t cinit = srslte_dmrs_sch_seed(&q->carrier, pdsch_cfg, grant, slot_cfg->idx, l);
uint32_t cinit = srslte_dmrs_sch_seed(
&q->carrier, pdsch_cfg, grant, SRSLTE_SLOT_NR_MOD(q->carrier.numerology, slot_cfg->idx), l);
nof_pilots_x_symbol = srslte_dmrs_sch_get_symbol(
q, pdsch_cfg, grant, cinit, delta, &sf_symbols[symbol_sz * l], &q->pilot_estimates[nof_pilots_x_symbol * i]);

@ -155,3 +155,49 @@ float srslte_symbol_distance_s(uint32_t l0, uint32_t l1, uint32_t numerology)
// Return symbol distance in microseconds
return (N << numerology) * SRSLTE_LTE_TS;
}
bool srslte_tdd_nr_is_dl(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx)
{
if (cfg == NULL) {
return false;
}
// Calculate slot index within the TDD overall period
uint32_t slot_x_ms = 1U << numerology; // Number of slots per millisecond
uint32_t period_sum = (cfg->pattern1.period_ms + cfg->pattern2.period_ms) * slot_x_ms; // Total perdiod sum
uint32_t slot_idx_period = slot_idx % period_sum; // Slot index within the period
// Select pattern
const srslte_tdd_pattern_t* pattern = &cfg->pattern1;
if ((slot_idx_period >= cfg->pattern1.period_ms * slot_x_ms)) {
pattern = &cfg->pattern2;
slot_idx_period -= cfg->pattern1.period_ms * slot_x_ms; // Remove pattern 1 offset
}
return (slot_idx_period < pattern->nof_dl_slots ||
(slot_idx_period == pattern->nof_dl_slots && pattern->nof_dl_symbols != 0));
}
bool srslte_tdd_nr_is_ul(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx)
{
if (cfg == NULL) {
return false;
}
// Calculate slot index within the TDD overall period
uint32_t slot_x_ms = 1U << numerology; // Number of slots per millisecond
uint32_t period_sum = (cfg->pattern1.period_ms + cfg->pattern2.period_ms) * slot_x_ms; // Total perdiod sum
uint32_t slot_idx_period = slot_idx % period_sum; // Slot index within the period
// Select pattern
const srslte_tdd_pattern_t* pattern = &cfg->pattern1;
if ((slot_idx_period >= cfg->pattern1.period_ms * slot_x_ms)) {
pattern = &cfg->pattern2;
slot_idx_period -= cfg->pattern1.period_ms * slot_x_ms; // Remove pattern 1 offset
}
// Calculate slot in which UL starts
uint32_t start_ul = (pattern->period_ms * slot_x_ms - pattern->nof_ul_slots) - 1;
return (slot_idx_period > start_ul || (slot_idx_period == start_ul && pattern->nof_ul_symbols != 0));
}

@ -799,7 +799,7 @@ static int dci_nr_format_1_0_to_str(const srslte_dci_dl_nr_t* dci, char* str, ui
// Downlink assignment index 2 bits
if (dci->rnti_type == srslte_rnti_type_c || dci->rnti_type == srslte_rnti_type_tc) {
len = srslte_print_check(str, str_len, len, "sii=%d ", dci->sii);
len = srslte_print_check(str, str_len, len, "dai=%d ", dci->dai);
}
// TPC command for scheduled PUCCH 2 bits

@ -37,16 +37,17 @@
/**
* @brief Recursive Y_p_n function
*/
static uint32_t srslte_pdcch_calculate_Y_p_n(uint32_t coreset_id, uint16_t rnti, int n)
static uint32_t srslte_pdcch_calculate_Y_p_n(uint32_t coreset_id, uint16_t rnti, uint32_t n)
{
static const uint32_t A_p[3] = {39827, 39829, 39839};
const uint32_t D = 65537;
if (n < 0) {
return rnti;
uint32_t Y_p_n = (uint32_t)rnti;
for (uint32_t i = 0; i <= n; i++) {
Y_p_n = (A_p[coreset_id % 3] * Y_p_n) % D;
}
return (A_p[coreset_id % 3] * srslte_pdcch_calculate_Y_p_n(coreset_id, rnti, n - 1)) % D;
return Y_p_n;
}
/**

@ -685,13 +685,10 @@ static uint32_t srslte_pdsch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg,
uint32_t len = 0;
len = srslte_print_check(str, str_len, len, "rnti=0x%x", grant->rnti);
char freq_str[SRSLTE_MAX_PRB_NR + 1] = {};
for (uint32_t i = 0, nof_prb = 0; i < SRSLTE_MAX_PRB_NR && nof_prb < grant->nof_prb; i++) {
uint32_t first_prb = SRSLTE_MAX_PRB_NR;
for (uint32_t i = 0; i < SRSLTE_MAX_PRB_NR && first_prb == SRSLTE_MAX_PRB_NR; i++) {
if (grant->prb_idx[i]) {
freq_str[i] = '1';
nof_prb++;
} else {
freq_str[i] = '0';
first_prb = i;
}
}
@ -699,9 +696,10 @@ static uint32_t srslte_pdsch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg,
len = srslte_print_check(str,
str_len,
len,
",k0=%d,freq=%s,symb=%d:%d,mapping=%s",
",k0=%d,prb=%d:%d,symb=%d:%d,mapping=%s",
grant->k,
freq_str,
first_prb,
grant->nof_prb,
grant->S,
grant->L,
srslte_sch_mapping_type_to_str(grant->mapping));

@ -80,7 +80,7 @@ int srslte_pucch_nr_alpha_idx(const srslte_carrier_nr_t* carrier,
}
// Compute number of slot
uint32_t n_slot = slot->idx % SRSLTE_NSLOTS_PER_FRAME_NR(carrier->numerology);
uint32_t n_slot = SRSLTE_SLOT_NR_MOD(carrier->numerology, slot->idx);
// Generate pseudo-random sequence
uint32_t cinit = cfg->hopping_id_present ? cfg->hopping_id : carrier->id;

@ -524,6 +524,11 @@ uint32_t srslte_pusch_rx_info(srslte_pusch_cfg_t* cfg,
len = srslte_print_check(str, str_len, len, ", ta=%.1f us", chest_res->ta_us);
}
// Append CFO information if available
if (!isnan(chest_res->cfo_hz)) {
len = srslte_print_check(str, str_len, len, ", cfo=%.1f hz", chest_res->cfo_hz);
}
// Append EVM measurement if available
if (cfg->meas_evm_en) {
len = srslte_print_check(str, str_len, len, ", evm=%.1f %%", res->evm * 100);

@ -1246,13 +1246,10 @@ static uint32_t srslte_pusch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg,
uint32_t len = 0;
len = srslte_print_check(str, str_len, len, "rnti=0x%x", grant->rnti);
char freq_str[SRSLTE_MAX_PRB_NR + 1] = {};
for (uint32_t i = 0, nof_prb = 0; i < SRSLTE_MAX_PRB_NR && nof_prb < grant->nof_prb; i++) {
uint32_t first_prb = SRSLTE_MAX_PRB_NR;
for (uint32_t i = 0; i < SRSLTE_MAX_PRB_NR && first_prb == SRSLTE_MAX_PRB_NR; i++) {
if (grant->prb_idx[i]) {
freq_str[i] = '1';
nof_prb++;
} else {
freq_str[i] = '0';
first_prb = i;
}
}
@ -1260,9 +1257,10 @@ static uint32_t srslte_pusch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg,
len = srslte_print_check(str,
str_len,
len,
",k2=%d,freq=%s,S=%d,L=%d,mapping=%s",
",k2=%d,prb=%d:%d,S=%d,L=%d,mapping=%s",
grant->k,
freq_str,
first_prb,
grant->nof_prb,
grant->S,
grant->L,
srslte_sch_mapping_type_to_str(grant->mapping));
@ -1293,6 +1291,10 @@ uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q,
{
uint32_t len = 0;
if (q == NULL || cfg == NULL || grant == NULL || str == NULL || str_len == 0) {
return 0;
}
len += srslte_pusch_nr_grant_info(cfg, grant, &str[len], str_len - len);
if (q->evm_buffer != NULL) {
@ -1311,6 +1313,11 @@ uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q,
}
if (res != NULL) {
srslte_uci_data_nr_t uci_data = {};
uci_data.cfg = cfg->uci;
uci_data.value = res[0].uci;
len += srslte_uci_nr_info(&uci_data, &str[len], str_len - len);
len = srslte_print_check(str, str_len, len, ",crc={", 0);
for (uint32_t i = 0; i < SRSLTE_MAX_CODEWORDS; i++) {
if (grant->tb[i].enabled) {
@ -1335,13 +1342,25 @@ uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q,
uint32_t srslte_pusch_nr_tx_info(const srslte_pusch_nr_t* q,
const srslte_sch_cfg_nr_t* cfg,
const srslte_sch_grant_nr_t* grant,
const srslte_uci_value_nr_t* uci_value,
char* str,
uint32_t str_len)
{
uint32_t len = 0;
if (q == NULL || cfg == NULL || grant == NULL || str == NULL || str_len == 0) {
return 0;
}
len += srslte_pusch_nr_grant_info(cfg, grant, &str[len], str_len - len);
if (uci_value != NULL) {
srslte_uci_data_nr_t uci_data = {};
uci_data.cfg = cfg->uci;
uci_data.value = *uci_value;
len += srslte_uci_nr_info(&uci_data, &str[len], str_len - len);
}
if (q->meas_time_en) {
len = srslte_print_check(str, str_len, len, ", t=%d us", q->meas_time_us);
}

@ -163,7 +163,7 @@ uint32_t ra_re_x_prb(const srslte_cell_t* cell, srslte_dl_sf_cfg_t* sf, uint32_t
/** Compute PRB allocation for Downlink as defined in 7.1.6 of 36.213
* Decode grant->type?_alloc to grant
* This function only reads grant->type?_alloc and grant->alloc_type fields.
* This function only reads dci->type?_alloc (e.g. rbg_bitmask, mode, riv) and dci->alloc_type fields.
* This function only writes grant->prb_idx and grant->nof_prb.
*/
/** Compute PRB allocation for Downlink as defined in 7.1.6 of 36.213 */

@ -20,6 +20,7 @@
*/
#include "srslte/phy/phch/ra_nr.h"
#include "srslte/phy/phch/csi.h"
#include "srslte/phy/phch/pdsch_nr.h"
#include "srslte/phy/phch/ra_dl_nr.h"
#include "srslte/phy/phch/ra_ul_nr.h"
@ -35,6 +36,8 @@ typedef struct {
#define RA_NR_MCS_SIZE_TABLE2 28
#define RA_NR_MCS_SIZE_TABLE3 29
#define RA_NR_TBS_SIZE_TABLE 93
#define RA_NR_BETA_OFFSET_HARQACK_SIZE 32
#define RA_NR_BETA_OFFSET_CSI_SIZE 32
#define RA_NR_READ_TABLE(N) \
static double srslte_ra_nr_R_from_mcs_table##N(uint32_t mcs_idx) \
@ -117,6 +120,23 @@ static const uint32_t ra_nr_tbs_table[RA_NR_TBS_SIZE_TABLE] = {
1192, 1224, 1256, 1288, 1320, 1352, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 2024, 2088, 2152, 2216,
2280, 2408, 2472, 2536, 2600, 2664, 2728, 2792, 2856, 2976, 3104, 3240, 3368, 3496, 3624, 3752, 3824};
/**
* TS 38.213 V15.10.0 Table 9.3-1: Mapping of beta_offset values for HARQ-ACK information and the index signalled by
* higher layers
*/
static const float ra_nr_beta_offset_ack_table[RA_NR_BETA_OFFSET_HARQACK_SIZE] = {
1.000f, 2.000f, 2.500f, 3.125f, 4.000f, 5.000f, 6.250f, 8.000f, 10.000f, 12.625f, 15.875f,
20.000f, 31.000f, 50.000f, 80.000f, 126.000f, NAN, NAN, NAN, NAN, NAN, NAN,
NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN};
/**
* TS 38.213 V15.10.0 Table 9.3-2: Mapping of beta_offset values for CSI and the index signalled by higher layers
*/
static const float ra_nr_beta_offset_csi_table[RA_NR_BETA_OFFSET_HARQACK_SIZE] = {
1.125f, 1.250f, 1.375f, 1.625f, 1.750f, 2.000f, 2.250f, 2.500f, 2.875f, 3.125f, 3.500f,
4.000f, 5.000f, 6.250f, 8.000f, 10.000f, 12.625f, 15.875f, 20.000f, NAN, NAN, NAN,
NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN};
typedef enum { ra_nr_table_1 = 0, ra_nr_table_2, ra_nr_table_3 } ra_nr_table_t;
static ra_nr_table_t ra_nr_select_table_pusch_noprecoding(srslte_mcs_table_t mcs_table,
@ -645,3 +665,94 @@ int srslte_ra_ul_dci_to_grant_nr(const srslte_carrier_nr_t* carrier,
return SRSLTE_SUCCESS;
}
/*
* Implements clauses related to HARQ-ACK beta offset selection from the section `9.3 UCI reporting in physical uplink
* shared channel`
*/
static float ra_ul_beta_offset_ack_semistatic(const srslte_beta_offsets_t* beta_offsets,
const srslte_uci_cfg_nr_t* uci_cfg)
{
// Select Beta Offset index from the number of HARQ-ACK bits
uint32_t beta_offset_index = beta_offsets->ack_index1;
if (uci_cfg->o_ack > 11) {
beta_offset_index = beta_offsets->ack_index3;
} else if (uci_cfg->o_ack > 2) {
beta_offset_index = beta_offsets->ack_index1;
}
// Protect table boundary
if (beta_offset_index > RA_NR_BETA_OFFSET_HARQACK_SIZE) {
ERROR("Beta offset index for HARQ-ACK (%d) for O_ack=%d exceeds table size (%d)",
beta_offset_index,
uci_cfg->o_ack,
RA_NR_BETA_OFFSET_HARQACK_SIZE);
return NAN;
}
// Select beta offset from Table 9.3-1
return ra_nr_beta_offset_ack_table[beta_offset_index];
}
/*
* Implements clauses related to HARQ-ACK beta offset selection from the section `9.3 UCI reporting in physical uplink
* shared channel`
*/
static float ra_ul_beta_offset_csi_semistatic(const srslte_beta_offsets_t* beta_offsets,
const srslte_uci_cfg_nr_t* uci_cfg,
bool part2)
{
// Calculate number of CSI bits; CSI part 2 is not supported.
uint32_t O_csi = part2 ? 0 : srslte_csi_part1_nof_bits(uci_cfg->csi, uci_cfg->nof_csi);
// Select Beta Offset index from the number of HARQ-ACK bits
uint32_t beta_offset_index = part2 ? beta_offsets->csi2_index1 : beta_offsets->csi1_index1;
if (O_csi > 11) {
beta_offset_index = part2 ? beta_offsets->csi2_index2 : beta_offsets->csi1_index2;
}
// Protect table boundary
if (beta_offset_index > RA_NR_BETA_OFFSET_CSI_SIZE) {
ERROR("Beta offset index for CSI (%d) for O_csi=%d exceeds table size (%d)",
beta_offset_index,
O_csi,
RA_NR_BETA_OFFSET_CSI_SIZE);
return NAN;
}
// Select beta offset from Table 9.3-1
return ra_nr_beta_offset_csi_table[beta_offset_index];
}
int srslte_ra_ul_set_grant_uci_nr(const srslte_sch_hl_cfg_nr_t* pusch_hl_cfg,
const srslte_uci_cfg_nr_t* uci_cfg,
srslte_sch_cfg_nr_t* pusch_cfg)
{
// Select beta offsets
pusch_cfg->beta_harq_ack_offset = ra_ul_beta_offset_ack_semistatic(&pusch_hl_cfg->beta_offsets, uci_cfg);
if (!isnormal(pusch_cfg->beta_harq_ack_offset)) {
return SRSLTE_ERROR;
}
pusch_cfg->beta_csi_part1_offset = ra_ul_beta_offset_csi_semistatic(&pusch_hl_cfg->beta_offsets, uci_cfg, false);
if (!isnormal(pusch_cfg->beta_csi_part1_offset)) {
return SRSLTE_ERROR;
}
pusch_cfg->beta_csi_part2_offset = ra_ul_beta_offset_csi_semistatic(&pusch_hl_cfg->beta_offsets, uci_cfg, true);
if (!isnormal(pusch_cfg->beta_csi_part2_offset)) {
return SRSLTE_ERROR;
}
// pusch_cfg->beta_csi_part2_offset = pusch_hl_cfg->beta_offset_csi2;
pusch_cfg->scaling = pusch_hl_cfg->scaling;
if (!isnormal(pusch_cfg->scaling)) {
ERROR("Invalid Scaling (%f)", pusch_cfg->scaling);
return SRSLTE_ERROR;
}
// Copy UCI configuration
pusch_cfg->uci = *uci_cfg;
return SRSLTE_SUCCESS;
}

@ -202,6 +202,63 @@ static int uci_nr_unpack_ack_sr(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequenc
return A;
}
static int uci_nr_pack_ack_sr_csi(const srslte_uci_cfg_nr_t* cfg, const srslte_uci_value_nr_t* value, uint8_t* sequence)
{
int A = 0;
// Append ACK bits
srslte_vec_u8_copy(&sequence[A], value->ack, cfg->o_ack);
A += cfg->o_ack;
// Append SR bits
uint8_t* bits = &sequence[A];
srslte_bit_unpack(value->sr, &bits, cfg->o_sr);
A += cfg->o_sr;
// Append CSI bits
int n = srslte_csi_part1_pack(cfg->csi, value->csi, cfg->nof_csi, bits, SRSLTE_UCI_NR_MAX_NOF_BITS - A);
if (n < SRSLTE_SUCCESS) {
ERROR("Packing CSI part 1");
return SRSLTE_ERROR;
}
A += n;
if (SRSLTE_DEBUG_ENABLED && srslte_verbose >= SRSLTE_VERBOSE_INFO && !handler_registered) {
UCI_NR_INFO_TX("Packed UCI bits: ");
srslte_vec_fprint_byte(stdout, sequence, A);
}
return A;
}
static int uci_nr_unpack_ack_sr_csi(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequence, srslte_uci_value_nr_t* value)
{
int A = 0;
// Append ACK bits
srslte_vec_u8_copy(value->ack, &sequence[A], cfg->o_ack);
A += cfg->o_ack;
// Append SR bits
uint8_t* bits = &sequence[A];
value->sr = srslte_bit_pack(&bits, cfg->o_sr);
A += cfg->o_sr;
if (SRSLTE_DEBUG_ENABLED && srslte_verbose >= SRSLTE_VERBOSE_INFO && !handler_registered) {
UCI_NR_INFO_RX("Unpacked UCI bits: ");
srslte_vec_fprint_byte(stdout, sequence, A);
}
// Append CSI bits
int n = srslte_csi_part1_unpack(cfg->csi, cfg->nof_csi, bits, SRSLTE_UCI_NR_MAX_NOF_BITS - A, value->csi);
if (n < SRSLTE_SUCCESS) {
ERROR("Packing CSI part 1");
return SRSLTE_ERROR;
}
return A;
}
static int uci_nr_A(const srslte_uci_cfg_nr_t* cfg)
{
int o_csi = srslte_csi_part1_nof_bits(cfg->csi, cfg->nof_csi);
@ -236,8 +293,7 @@ static int uci_nr_pack_pucch(const srslte_uci_cfg_nr_t* cfg, const srslte_uci_va
}
// 6.3.1.1.3 HARQ-ACK/SR and CSI
ERROR("HARQ-ACK/SR and CSI encoding are not implemented");
return SRSLTE_ERROR;
return uci_nr_pack_ack_sr_csi(cfg, value, sequence);
}
static int uci_nr_unpack_pucch(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequence, srslte_uci_value_nr_t* value)
@ -256,8 +312,7 @@ static int uci_nr_unpack_pucch(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequence
}
// 6.3.1.1.3 HARQ-ACK/SR and CSI
ERROR("HARQ-ACK/SR and CSI encoding are not implemented");
return SRSLTE_ERROR;
return uci_nr_unpack_ack_sr_csi(cfg, sequence, value);
}
static int uci_nr_encode_1bit(srslte_uci_nr_t* q, const srslte_uci_cfg_nr_t* cfg, uint8_t* o, uint32_t E)
@ -1099,7 +1154,7 @@ static int uci_nr_pusch_Q_prime_csi1(const srslte_uci_nr_pusch_cfg_t* cfg, uint3
return SRSLTE_ERROR;
}
uint32_t M_uci_sum = 0;
uint32_t M_uci_sum = 0;
for (uint32_t l = 0; l < SRSLTE_NSYMB_PER_SLOT_NR; l++) {
M_uci_sum += cfg->M_uci_sc[l];
}

@ -30,20 +30,30 @@ private:
uhd::usrp::multi_usrp::sptr usrp = nullptr;
const uhd::fs_path TREE_DBOARD_RX_FRONTEND_NAME = "/mboards/0/dboards/A/rx_frontends/A/name";
const std::chrono::milliseconds FE_RX_RESET_SLEEP_TIME_MS = std::chrono::milliseconds(2000UL);
uhd::stream_args_t stream_args;
double lo_freq_tx_hz = 0.0;
double lo_freq_rx_hz = 0.0;
uhd::stream_args_t stream_args = {};
double lo_freq_tx_hz = 0.0;
double lo_freq_rx_hz = 0.0;
uhd_error usrp_make_internal(const uhd::device_addr_t& dev_addr) override
{
// Destroy any previous USRP instance
usrp = nullptr;
Debug("Making USRP object with args '" << dev_addr.to_string() << "'");
UHD_SAFE_C_SAVE_ERROR(this, usrp = uhd::usrp::multi_usrp::make(dev_addr);)
}
uhd_error set_tx_subdev(const std::string& string) { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_subdev_spec(string);) }
uhd_error set_rx_subdev(const std::string& string) { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_subdev_spec(string);) }
uhd_error set_tx_subdev(const std::string& string)
{
Info("Setting tx_subdev_spec to '" << string << "'");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_subdev_spec(string);)
}
uhd_error set_rx_subdev(const std::string& string)
{
Info("Setting rx_subdev_spec to '" << string << "'");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_subdev_spec(string);)
}
uhd_error test_ad936x_device(uint32_t nof_channels)
{
@ -113,8 +123,14 @@ private:
}
public:
rf_uhd_generic(){};
virtual ~rf_uhd_generic(){};
rf_uhd_generic() { Info("RF UHD Generic instance constructed"); }
virtual ~rf_uhd_generic()
{
rx_stream = nullptr;
tx_stream = nullptr;
usrp = nullptr;
Debug("RF UHD closed Ok");
}
uhd_error usrp_make(const uhd::device_addr_t& dev_addr_, uint32_t nof_channels) override
{
uhd::device_addr_t dev_addr = dev_addr_;
@ -163,7 +179,6 @@ public:
// Set transmitter subdev spec if specified
if (not tx_subdev.empty()) {
printf("Setting tx_subdev_spec to '%s'\n", tx_subdev.c_str());
err = set_tx_subdev(tx_subdev);
if (err != UHD_ERROR_NONE) {
return err;
@ -172,7 +187,6 @@ public:
// Set receiver subdev spec if specified
if (not rx_subdev.empty()) {
printf("Setting rx_subdev_spec to '%s'\n", rx_subdev.c_str());
err = set_rx_subdev(tx_subdev);
if (err != UHD_ERROR_NONE) {
return err;
@ -256,6 +270,7 @@ public:
}
uhd_error set_time_unknown_pps(const uhd::time_spec_t& timespec) override
{
Debug("Setting Time at next PPS...");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_time_unknown_pps(timespec);)
}
uhd_error get_time_now(uhd::time_spec_t& timespec) override
@ -264,10 +279,11 @@ public:
}
uhd_error set_sync_source(const std::string& source) override
{
Debug("Setting PPS source to '" << source << "'");
#if UHD_VERSION < 3140099
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_clock_source(source); usrp->set_time_source(source);)
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_clock_source(source); usrp->set_time_source(source);)
#else
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_sync_source(source, source);)
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_sync_source(source, source);)
#endif
}
uhd_error get_gain_range(uhd::gain_range_t& tx_gain_range, uhd::gain_range_t& rx_gain_range) override
@ -276,38 +292,61 @@ public:
}
uhd_error set_master_clock_rate(double rate) override
{
Debug("Setting master clock rate to " << rate / 1e6 << " MHz");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_master_clock_rate(rate);)
}
uhd_error set_rx_rate(double rate) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_rate(rate);) }
uhd_error set_tx_rate(double rate) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_rate(rate);) }
uhd_error set_rx_rate(double rate) override
{
Debug("Setting Rx Rate to " << rate / 1e6 << "MHz");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_rate(rate);)
}
uhd_error set_tx_rate(double rate) override
{
Debug("Setting Tx Rate to " << rate / 1e6 << "MHz");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_rate(rate);)
}
uhd_error set_command_time(const uhd::time_spec_t& timespec) override
{
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_command_time(timespec);)
}
uhd_error get_rx_stream(size_t& max_num_samps) override
{
UHD_SAFE_C_SAVE_ERROR(this, rx_stream = nullptr; rx_stream = usrp->get_rx_stream(stream_args);
max_num_samps = rx_stream->get_max_num_samps();
if (max_num_samps == 0UL) {
last_error = "The maximum number of receive samples is zero.";
return UHD_ERROR_VALUE;
})
Debug("Creating Rx stream");
UHD_SAFE_C_SAVE_ERROR(
this, rx_stream = nullptr; rx_stream = usrp->get_rx_stream(stream_args);
max_num_samps = rx_stream->get_max_num_samps();
if (max_num_samps == 0UL) {
last_error = "The maximum number of receive samples is zero.";
return UHD_ERROR_VALUE;
})
}
uhd_error get_tx_stream(size_t& max_num_samps) override
{
UHD_SAFE_C_SAVE_ERROR(this, tx_stream = nullptr; tx_stream = usrp->get_tx_stream(stream_args);
max_num_samps = tx_stream->get_max_num_samps();
if (max_num_samps == 0UL) {
last_error = "The maximum number of transmit samples is zero.";
return UHD_ERROR_VALUE;
})
Debug("Creating Tx stream");
UHD_SAFE_C_SAVE_ERROR(
this, tx_stream = nullptr; tx_stream = usrp->get_tx_stream(stream_args);
max_num_samps = tx_stream->get_max_num_samps();
if (max_num_samps == 0UL) {
last_error = "The maximum number of transmit samples is zero.";
return UHD_ERROR_VALUE;
})
}
uhd_error set_tx_gain(size_t ch, double gain) override
{
Debug("Setting channel " << ch << " Tx gain to " << gain << " dB");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_gain(gain, ch);)
}
uhd_error set_rx_gain(size_t ch, double gain) override
{
Debug("Setting channel " << ch << " Rx gain to " << gain << " dB");
UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_gain(gain, ch);)
}
uhd_error set_tx_gain(size_t ch, double gain) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_gain(gain, ch);) }
uhd_error set_rx_gain(size_t ch, double gain) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_gain(gain, ch);) }
uhd_error get_rx_gain(double& gain) override { UHD_SAFE_C_SAVE_ERROR(this, gain = usrp->get_rx_gain();) }
uhd_error get_tx_gain(double& gain) override { UHD_SAFE_C_SAVE_ERROR(this, gain = usrp->get_tx_gain();) }
uhd_error set_tx_freq(uint32_t ch, double target_freq, double& actual_freq) override
{
Debug("Setting channel " << ch << " Tx frequency to " << target_freq / 1e6 << " MHz");
// Create Tune request
uhd::tune_request_t tune_request(target_freq);
@ -323,6 +362,7 @@ public:
}
uhd_error set_rx_freq(uint32_t ch, double target_freq, double& actual_freq) override
{
Debug("Setting channel " << ch << " Rx frequency to " << target_freq / 1e6 << " MHz");
// Create Tune request
uhd::tune_request_t tune_request(target_freq);

@ -120,17 +120,21 @@ static const std::chrono::milliseconds RF_UHD_IMP_ASYNCH_MSG_SLEEP_MS = std::chr
static const uint32_t RF_UHD_IMP_MAX_RX_TRIALS = 100;
struct rf_uhd_handler_t {
size_t id;
std::string devname;
std::shared_ptr<rf_uhd_safe_interface> uhd = nullptr;
srslte_rf_info_t info;
size_t rx_nof_samples = 0;
size_t tx_nof_samples = 0;
double tx_rate = 1.92e6;
double rx_rate = 1.92e6;
bool dynamic_master_rate = true;
uint32_t nof_rx_channels = 0;
uint32_t nof_tx_channels = 0;
srslte_rf_info_t info;
size_t rx_nof_samples = 0;
size_t tx_nof_samples = 0;
double tx_rate = 1.92e6;
double rx_rate = 1.92e6;
bool dynamic_master_rate = true;
uint32_t nof_rx_channels = 0;
uint32_t nof_tx_channels = 0;
std::array<double, SRSLTE_MAX_CHANNELS> tx_freq = {};
std::array<double, SRSLTE_MAX_CHANNELS> rx_freq = {};
srslte_rf_error_handler_t uhd_error_handler = nullptr;
void* uhd_error_handler_arg = nullptr;
@ -152,6 +156,10 @@ struct rf_uhd_handler_t {
#endif /* HAVE_ASYNC_THREAD */
};
// Store UHD Handler instances as shared pointer to avoid new/delete
static std::map<size_t, std::shared_ptr<rf_uhd_handler_t> > rf_uhd_map;
static size_t uhd_handler_counter = 0;
#if UHD_VERSION < 31100
static void (*handler)(const char*);
@ -286,10 +294,8 @@ static void* async_thread(void* h)
}
#endif
static inline void uhd_free(rf_uhd_handler_t* h)
static inline void uhd_free(rf_uhd_handler_t* handler)
{
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
// NULL handler, return
if (handler == nullptr) {
return;
@ -303,7 +309,8 @@ static inline void uhd_free(rf_uhd_handler_t* h)
}
#endif
delete handler;
// Erase element from MAP
rf_uhd_map.erase(handler->id);
}
void rf_uhd_suppress_stdout(void* h)
@ -462,6 +469,7 @@ const char* rf_uhd_devname(void* h)
bool rf_uhd_rx_wait_lo_locked(void* h)
{
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
Debug("Waiting for Rx LO Locked");
// wait for clock source to lock
std::string sensor_name = "lo_locked";
@ -528,7 +536,15 @@ int rf_uhd_stop_rx_stream(void* h)
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
std::unique_lock<std::mutex> lock(handler->rx_mutex);
return rf_uhd_stop_rx_stream_unsafe(handler);
if (rf_uhd_stop_rx_stream_unsafe(handler) < SRSLTE_SUCCESS) {
return SRSLTE_ERROR;
}
// Make sure the Rx stream is flushed
lock.unlock(); // Flush has its own lock
rf_uhd_flush_buffer(h);
return SRSLTE_SUCCESS;
}
void rf_uhd_flush_buffer(void* h)
@ -569,21 +585,8 @@ int rf_uhd_open(char* args, void** h)
return rf_uhd_open_multi(args, h, 1);
}
int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
static int uhd_init(rf_uhd_handler_t* handler, char* args, uint32_t nof_channels)
{
// Check valid handler pointer
if (h == nullptr) {
return SRSLTE_ERROR_INVALID_INPUTS;
}
if (nof_channels > SRSLTE_MAX_CHANNELS) {
ERROR("Error opening UHD: maximum number of channels exceeded (%d>%d)", nof_channels, SRSLTE_MAX_CHANNELS);
return SRSLTE_ERROR;
}
rf_uhd_handler_t* handler = new rf_uhd_handler_t;
*h = handler;
// Disable fast-path (U/L/O) messages
setenv("UHD_LOG_FASTPATH_DISABLE", "1", 0);
@ -685,6 +688,32 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
}
handler->current_master_clock = device_addr.cast("master_clock_rate", 0.0);
// Parse default frequencies
for (uint32_t i = 0; i < nof_channels; i++) {
// Parse Tx frequency
if (i == 0 and device_addr.has_key("tx_freq")) {
handler->tx_freq[i] = device_addr.cast("tx_freq", handler->tx_freq[i]);
device_addr.pop("tx_freq");
} else {
std::string key = "tx_freq" + std::to_string(i);
if (device_addr.has_key(key)) {
handler->tx_freq[i] = device_addr.cast(key, handler->tx_freq[i]);
device_addr.pop(key);
}
}
// Parse Rx frequency
if (i == 0 and device_addr.has_key("rx_freq")) {
handler->rx_freq[i] = device_addr.cast("rx_freq", handler->rx_freq[i]);
} else {
std::string key = "rx_freq" + std::to_string(i);
if (device_addr.has_key(key)) {
handler->rx_freq[i] = device_addr.cast("rx_freq" + std::to_string(i), handler->rx_freq[i]);
device_addr.pop(key);
}
}
}
// Set dynamic master clock rate configuration
if (device_addr.has_key("type")) {
handler->dynamic_master_rate = RH_UHD_IMP_FIX_MASTER_CLOCK_RATE_DEVICE_LIST.count(device_addr["type"]) == 0;
@ -715,7 +744,6 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
// Make USRP
if (handler->uhd->usrp_make(device_addr, nof_channels) != UHD_ERROR_NONE) {
print_usrp_error(handler);
uhd_free(handler);
return SRSLTE_ERROR;
}
@ -786,6 +814,7 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
handler->nof_rx_channels = nof_channels;
handler->nof_tx_channels = nof_channels;
// Set default Tx/Rx rates
if (handler->uhd->set_rx_rate(handler->rx_rate) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
@ -795,6 +824,7 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
return SRSLTE_ERROR;
}
// Reset timestamps
if (nof_channels > 1 and clock_src != "gpsdo") {
handler->uhd->set_time_unknown_pps(uhd::time_spec_t());
}
@ -809,6 +839,27 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
return SRSLTE_ERROR;
}
// Tune LOs if the default frequency is provided
bool require_wait_rx_lock = false;
for (uint32_t i = 0; i < nof_channels; i++) {
if (std::isnormal(handler->rx_freq[i])) {
if (handler->uhd->set_rx_freq(i, handler->rx_freq[i], handler->rx_freq[i]) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
}
rf_uhd_rx_wait_lo_locked(handler);
require_wait_rx_lock = true;
}
}
for (uint32_t i = 0; i < nof_channels; i++) {
if (std::isnormal(handler->tx_freq[i])) {
if (handler->uhd->set_tx_freq(i, handler->tx_freq[i], handler->tx_freq[i]) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
}
}
}
// Populate RF device info
uhd::gain_range_t tx_gain_range;
uhd::gain_range_t rx_gain_range;
@ -841,6 +892,37 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
return SRSLTE_SUCCESS;
}
int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels)
{
// Check valid handler pointer
if (h == nullptr) {
return SRSLTE_ERROR_INVALID_INPUTS;
}
if (nof_channels > SRSLTE_MAX_CHANNELS) {
ERROR("Error opening UHD: maximum number of channels exceeded (%d>%d)", nof_channels, SRSLTE_MAX_CHANNELS);
return SRSLTE_ERROR;
}
// Create UHD handler
rf_uhd_map[uhd_handler_counter] = std::make_shared<rf_uhd_handler_t>();
rf_uhd_handler_t* handler = rf_uhd_map[uhd_handler_counter].get();
handler->id = uhd_handler_counter;
uhd_handler_counter++;
*h = handler;
// Initialise UHD handler
if (uhd_init(handler, args, nof_channels) < SRSLTE_SUCCESS) {
ERROR("uhd_init failed, freeing...");
// Free/Close UHD handler properly
uhd_free(handler);
*h = nullptr;
return SRSLTE_ERROR;
}
return SRSLTE_SUCCESS;
}
int rf_uhd_close(void* h)
{
// Makes sure Tx is ended
@ -861,7 +943,7 @@ int rf_uhd_close(void* h)
static inline void rf_uhd_set_master_clock_rate_unsafe(rf_uhd_handler_t* handler, double rate)
{
// Set master clock rate if it is allowed and change is required
if (handler->dynamic_master_rate && handler->current_master_clock != rate) {
if (handler->dynamic_master_rate and handler->current_master_clock != rate) {
if (handler->uhd->set_master_clock_rate(rate) != UHD_ERROR_NONE) {
print_usrp_error(handler);
}
@ -1087,44 +1169,61 @@ srslte_rf_info_t* rf_uhd_get_info(void* h)
return info;
}
static bool rf_uhd_set_freq_ch(rf_uhd_handler_t* handler, uint32_t ch, double& freq, bool is_tx)
{
double& curr_freq = (is_tx) ? handler->tx_freq[ch] : handler->rx_freq[ch];
// Skip if frequency is unchanged
if (round(freq) == round(curr_freq)) {
return false;
}
// Set frequency
if (is_tx) {
if (handler->uhd->set_tx_freq(ch, freq, curr_freq) != UHD_ERROR_NONE) {
print_usrp_error(handler);
}
} else {
if (handler->uhd->set_rx_freq(ch, freq, curr_freq) != UHD_ERROR_NONE) {
print_usrp_error(handler);
}
}
return true;
}
double rf_uhd_set_rx_freq(void* h, uint32_t ch, double freq)
{
bool require_rx_wait_lo_locked = false;
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
if (ch < handler->nof_rx_channels) {
if (handler->uhd->set_rx_freq(ch, freq, freq) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
}
require_rx_wait_lo_locked |= rf_uhd_set_freq_ch(handler, ch, freq, false);
} else {
for (uint32_t i = 0; i < handler->nof_rx_channels; i++) {
if (handler->uhd->set_rx_freq(i, freq, freq) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
}
require_rx_wait_lo_locked |= rf_uhd_set_freq_ch(handler, i, freq, false);
}
}
rf_uhd_rx_wait_lo_locked(handler);
return freq;
// wait for LO Locked
if (require_rx_wait_lo_locked) {
rf_uhd_rx_wait_lo_locked(handler);
}
return handler->rx_freq[ch % handler->nof_rx_channels];
}
double rf_uhd_set_tx_freq(void* h, uint32_t ch, double freq)
{
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
if (ch < handler->nof_tx_channels) {
if (handler->uhd->set_tx_freq(ch, freq, freq) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
}
rf_uhd_set_freq_ch(handler, ch, freq, true);
} else {
for (uint32_t i = 0; i < handler->nof_tx_channels; i++) {
if (handler->uhd->set_tx_freq(i, freq, freq) != UHD_ERROR_NONE) {
print_usrp_error(handler);
return SRSLTE_ERROR;
}
rf_uhd_set_freq_ch(handler, i, freq, true);
}
}
return freq;
return handler->tx_freq[ch % handler->nof_tx_channels];
}
void rf_uhd_get_time(void* h, time_t* secs, double* frac_secs)

@ -120,17 +120,18 @@ protected:
public:
std::string last_error;
virtual uhd_error usrp_make(const uhd::device_addr_t& dev_addr, uint32_t nof_channels) = 0;
virtual uhd_error get_mboard_name(std::string& mboard_name) = 0;
virtual uhd_error get_mboard_sensor_names(std::vector<std::string>& sensors) = 0;
virtual uhd_error get_rx_sensor_names(std::vector<std::string>& sensors) = 0;
virtual uhd_error get_sensor(const std::string& sensor_name, double& sensor_value) = 0;
virtual uhd_error get_sensor(const std::string& sensor_name, bool& sensor_value) = 0;
virtual uhd_error get_rx_sensor(const std::string& sensor_name, bool& sensor_value) = 0;
virtual uhd_error set_time_unknown_pps(const uhd::time_spec_t& timespec) = 0;
virtual uhd_error get_time_now(uhd::time_spec_t& timespec) = 0;
virtual uhd_error usrp_make(const uhd::device_addr_t& dev_addr, uint32_t nof_channels) = 0;
virtual uhd_error get_mboard_name(std::string& mboard_name) = 0;
virtual uhd_error get_mboard_sensor_names(std::vector<std::string>& sensors) = 0;
virtual uhd_error get_rx_sensor_names(std::vector<std::string>& sensors) = 0;
virtual uhd_error get_sensor(const std::string& sensor_name, double& sensor_value) = 0;
virtual uhd_error get_sensor(const std::string& sensor_name, bool& sensor_value) = 0;
virtual uhd_error get_rx_sensor(const std::string& sensor_name, bool& sensor_value) = 0;
virtual uhd_error set_time_unknown_pps(const uhd::time_spec_t& timespec) = 0;
virtual uhd_error get_time_now(uhd::time_spec_t& timespec) = 0;
uhd_error start_rx_stream(double delay)
{
Debug("Starting Rx stream");
uhd::time_spec_t time_spec;
uhd_error err = get_time_now(time_spec);
if (err != UHD_ERROR_NONE) {
@ -146,7 +147,9 @@ public:
}
uhd_error stop_rx_stream()
{
Debug("Stopping Rx stream");
UHD_SAFE_C_SAVE_ERROR(this, uhd::stream_cmd_t stream_cmd(uhd::stream_cmd_t::STREAM_MODE_STOP_CONTINUOUS);
stream_cmd.stream_now = true;
rx_stream->issue_stream_cmd(stream_cmd);)
}
virtual uhd_error set_sync_source(const std::string& source) = 0;
@ -156,15 +159,13 @@ public:
virtual uhd_error set_tx_rate(double rate) = 0;
virtual uhd_error set_command_time(const uhd::time_spec_t& timespec) = 0;
virtual uhd_error get_rx_stream(size_t& max_num_samps) = 0;
virtual uhd_error destroy_rx_stream() { UHD_SAFE_C_SAVE_ERROR(this, rx_stream = nullptr;) }
virtual uhd_error get_tx_stream(size_t& max_num_samps) = 0;
virtual uhd_error destroy_tx_stream() { UHD_SAFE_C_SAVE_ERROR(this, rx_stream = nullptr;) }
virtual uhd_error set_tx_gain(size_t ch, double gain) = 0;
virtual uhd_error set_rx_gain(size_t ch, double gain) = 0;
virtual uhd_error get_rx_gain(double& gain) = 0;
virtual uhd_error get_tx_gain(double& gain) = 0;
virtual uhd_error set_tx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0;
virtual uhd_error set_rx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0;
virtual uhd_error get_tx_stream(size_t& max_num_samps) = 0;
virtual uhd_error set_tx_gain(size_t ch, double gain) = 0;
virtual uhd_error set_rx_gain(size_t ch, double gain) = 0;
virtual uhd_error get_rx_gain(double& gain) = 0;
virtual uhd_error get_tx_gain(double& gain) = 0;
virtual uhd_error set_tx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0;
virtual uhd_error set_rx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0;
uhd_error receive(void** buffs,
const size_t nsamps_per_buff,
uhd::rx_metadata_t& metadata,

@ -219,39 +219,52 @@ static int ue_dl_nr_find_dci_ncce(srslte_ue_dl_nr_t* q,
srslte_pdcch_nr_res_t* pdcch_res,
uint32_t coreset_id)
{
srslte_dmrs_pdcch_measure_t m = {};
// Select debug information
srslte_ue_dl_nr_pdcch_info_t* pdcch_info = NULL;
if (q->pdcch_info_count < SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR) {
pdcch_info = &q->pdcch_info[q->pdcch_info_count];
q->pdcch_info_count++;
} else {
ERROR("The UE does not expect more than %d candidates in this serving cell", SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR);
return SRSLTE_ERROR;
}
SRSLTE_MEM_ZERO(pdcch_info, srslte_ue_dl_nr_pdcch_info_t, 1);
pdcch_info->coreset_id = dci_msg->coreset_id;
pdcch_info->ss_id = dci_msg->search_space;
pdcch_info->location = dci_msg->location;
srslte_dmrs_pdcch_measure_t* m = &pdcch_info->measure;
// Measures the PDCCH transmission DMRS
if (srslte_dmrs_pdcch_get_measure(&q->dmrs_pdcch[coreset_id], &dci_msg->location, &m) < SRSLTE_SUCCESS) {
if (srslte_dmrs_pdcch_get_measure(&q->dmrs_pdcch[coreset_id], &dci_msg->location, m) < SRSLTE_SUCCESS) {
ERROR("Error getting measure location L=%d, ncce=%d", dci_msg->location.L, dci_msg->location.ncce);
return SRSLTE_ERROR;
}
// If measured correlation is invalid, early return
if (!isnormal(m.norm_corr)) {
if (!isnormal(m->norm_corr)) {
INFO("Discarded PDCCH candidate L=%d;ncce=%d; Invalid measurement;", dci_msg->location.L, dci_msg->location.ncce);
return SRSLTE_SUCCESS;
}
// Compare EPRE with threshold
if (m.epre_dBfs < q->pdcch_dmrs_epre_thr) {
if (m->epre_dBfs < q->pdcch_dmrs_epre_thr) {
INFO("Discarded PDCCH candidate L=%d;ncce=%d; EPRE is too weak (%.1f<%.1f);",
dci_msg->location.L,
dci_msg->location.ncce,
m.epre_dBfs,
m->epre_dBfs,
q->pdcch_dmrs_epre_thr);
return SRSLTE_SUCCESS;
}
// Compare DMRS correlation with threshold
if (m.norm_corr < q->pdcch_dmrs_corr_thr) {
if (m->norm_corr < q->pdcch_dmrs_corr_thr) {
INFO("Discarded PDCCH candidate L=%d;ncce=%d; Correlation is too low (%.1f<%.1f); EPRE=%+.2f; RSRP=%+.2f;",
dci_msg->location.L,
dci_msg->location.ncce,
m.norm_corr,
m->norm_corr,
q->pdcch_dmrs_corr_thr,
m.epre_dBfs,
m.rsrp_dBfs);
m->epre_dBfs,
m->rsrp_dBfs);
return SRSLTE_SUCCESS;
}
@ -267,6 +280,9 @@ static int ue_dl_nr_find_dci_ncce(srslte_ue_dl_nr_t* q,
return SRSLTE_ERROR;
}
// Save information
pdcch_info->result = *pdcch_res;
return SRSLTE_SUCCESS;
}
@ -290,15 +306,8 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q,
const srslte_slot_cfg_t* slot_cfg,
const srslte_search_space_t* search_space,
uint16_t rnti,
srslte_rnti_type_t rnti_type,
srslte_dci_msg_nr_t* dci_msg_list,
uint32_t nof_dci_msg)
srslte_rnti_type_t rnti_type)
{
// Check inputs
if (q == NULL || slot_cfg == NULL || dci_msg_list == NULL) {
return SRSLTE_ERROR_INVALID_INPUTS;
}
// Select CORESET
uint32_t coreset_id = search_space->coreset_id;
if (coreset_id >= SRSLTE_UE_DL_NR_MAX_NOF_CORESET || !q->cfg.coreset_present[coreset_id]) {
@ -313,8 +322,6 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q,
return SRSLTE_ERROR;
}
uint32_t count = 0;
// Hard-coded values
srslte_dci_format_nr_t dci_format = srslte_dci_format_nr_1_0;
@ -326,17 +333,19 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q,
}
// Iterate all possible aggregation levels
for (uint32_t L = 0; L < SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR && count < nof_dci_msg; L++) {
for (uint32_t L = 0; L < SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR && q->dci_msg_count < SRSLTE_MAX_DCI_MSG_NR;
L++) {
// Calculate possible PDCCH DCI candidates
uint32_t candidates[SRSLTE_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR] = {};
int nof_candidates = srslte_pdcch_nr_locations_coreset(coreset, search_space, rnti, L, slot_cfg->idx, candidates);
int nof_candidates = srslte_pdcch_nr_locations_coreset(
coreset, search_space, rnti, L, SRSLTE_SLOT_NR_MOD(q->carrier.numerology, slot_cfg->idx), candidates);
if (nof_candidates < SRSLTE_SUCCESS) {
ERROR("Error calculating DCI candidate location");
return SRSLTE_ERROR;
}
// Iterate over the candidates
for (int ncce_idx = 0; ncce_idx < nof_candidates && count < nof_dci_msg; ncce_idx++) {
for (int ncce_idx = 0; ncce_idx < nof_candidates && q->dci_msg_count < SRSLTE_MAX_DCI_MSG_NR; ncce_idx++) {
// Set DCI context
srslte_dci_msg_nr_t dci_msg = {};
dci_msg.location.L = L;
@ -379,19 +388,19 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q,
}
// Check if the grant exists already in the message list
if (find_dci_msg(dci_msg_list, count, &dci_msg)) {
if (find_dci_msg(q->dci_msg, q->dci_msg_count, &dci_msg)) {
// The same DCI is in the list, keep moving
continue;
}
INFO("Found DCI in L=%d,ncce=%d", dci_msg.location.L, dci_msg.location.ncce);
// Append DCI message into the list
dci_msg_list[count] = dci_msg;
count++;
q->dci_msg[q->dci_msg_count] = dci_msg;
q->dci_msg_count++;
}
}
return (int)count;
return SRSLTE_SUCCESS;
}
int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q,
@ -401,9 +410,6 @@ int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q,
srslte_dci_dl_nr_t* dci_dl_list,
uint32_t nof_dci_msg)
{
int count = 0;
srslte_dci_msg_nr_t dci_msg_list[SRSLTE_MAX_DCI_MSG_NR] = {};
// Check inputs
if (q == NULL || slot_cfg == NULL || dci_dl_list == NULL) {
return SRSLTE_ERROR_INVALID_INPUTS;
@ -412,45 +418,43 @@ int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q,
// Limit maximum number of DCI messages to find
nof_dci_msg = SRSLTE_MIN(nof_dci_msg, SRSLTE_MAX_DCI_MSG_NR);
// Reset grant and blind search information counters
q->dci_msg_count = 0;
q->pdcch_info_count = 0;
// If the UE looks for a RAR and RA search space is provided, search for it
if (q->cfg.ra_search_space_present && rnti_type == srslte_rnti_type_ra) {
// Find DCIs in the RA search space
int ret = ue_dl_nr_find_dl_dci_ss(q, slot_cfg, &q->cfg.ra_search_space, rnti, rnti_type, dci_msg_list, nof_dci_msg);
int ret = ue_dl_nr_find_dl_dci_ss(q, slot_cfg, &q->cfg.ra_search_space, rnti, rnti_type);
if (ret < SRSLTE_SUCCESS) {
ERROR("Error searching RAR DCI");
return SRSLTE_ERROR;
}
// Count the found DCIs
count += ret;
} else {
// Iterate all possible common and UE search spaces
for (uint32_t i = 0; i < SRSLTE_UE_DL_NR_MAX_NOF_SEARCH_SPACE && count < nof_dci_msg; i++) {
for (uint32_t i = 0; i < SRSLTE_UE_DL_NR_MAX_NOF_SEARCH_SPACE && q->dci_msg_count < nof_dci_msg; i++) {
// Skip search space if not present
if (!q->cfg.search_space_present[i]) {
continue;
}
// Find DCIs in the selected search space
int ret = ue_dl_nr_find_dl_dci_ss(
q, slot_cfg, &q->cfg.search_space[i], rnti, rnti_type, &dci_msg_list[count], nof_dci_msg - count);
int ret = ue_dl_nr_find_dl_dci_ss(q, slot_cfg, &q->cfg.search_space[i], rnti, rnti_type);
if (ret < SRSLTE_SUCCESS) {
ERROR("Error searching DCI");
return SRSLTE_ERROR;
}
// Count the found DCIs
count += ret;
}
}
// Convert found DCI messages into DL grants
for (uint32_t i = 0; i < count; i++) {
const srslte_coreset_t* coreset = &q->cfg.coreset[dci_msg_list[i].coreset_id];
srslte_dci_nr_format_1_0_unpack(&q->carrier, coreset, &dci_msg_list[i], &dci_dl_list[i]);
uint32_t dci_msg_count = SRSLTE_MIN(nof_dci_msg, q->dci_msg_count);
for (uint32_t i = 0; i < dci_msg_count; i++) {
const srslte_coreset_t* coreset = &q->cfg.coreset[q->dci_msg[i].coreset_id];
srslte_dci_nr_format_1_0_unpack(&q->carrier, coreset, &q->dci_msg[i], &dci_dl_list[i]);
}
return count;
return (int)dci_msg_count;
}
int srslte_ue_dl_nr_find_ul_dci(srslte_ue_dl_nr_t* q,

@ -239,12 +239,16 @@ void srslte_ue_ul_nr_free(srslte_ue_ul_nr_t* q)
SRSLTE_MEM_ZERO(q, srslte_ue_ul_nr_t, 1);
}
int srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q, const srslte_sch_cfg_nr_t* cfg, char* str, uint32_t str_len)
int srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q,
const srslte_sch_cfg_nr_t* cfg,
const srslte_uci_value_nr_t* uci_value,
char* str,
uint32_t str_len)
{
int len = 0;
// Append PDSCH info
len += srslte_pusch_nr_tx_info(&q->pusch, cfg, &cfg->grant, &str[len], str_len - len);
len += srslte_pusch_nr_tx_info(&q->pusch, cfg, &cfg->grant, uci_value, &str[len], str_len - len);
return len;
}

@ -289,7 +289,15 @@ bool radio::rx_now(rf_buffer_interface& buffer, rf_timestamp_interface& rxd_time
std::unique_lock<std::mutex> lock(rx_mutex);
bool ret = true;
rf_buffer_t buffer_rx;
uint32_t ratio = SRSLTE_MAX(1, decimators[0].ratio);
// Extract decimation ratio. As the decimation may take some time to set a new ratio, deactivate the decimation and
// keep receiving samples to avoid stalling the RX stream
uint32_t ratio = 1; // No decimation by default
if (decimator_busy) {
lock.unlock();
} else if (decimators[0].ratio > 1) {
ratio = decimators[0].ratio;
}
// Calculate number of samples, considering the decimation ratio
uint32_t nof_samples = buffer.get_nof_samples() * ratio;
@ -683,6 +691,7 @@ void radio::set_rx_srate(const double& srate)
}
// If fix sampling rate...
if (std::isnormal(fix_srate_hz)) {
decimator_busy = true;
std::unique_lock<std::mutex> lock(rx_mutex);
// If the sampling rate was not set, set it
@ -698,6 +707,7 @@ void radio::set_rx_srate(const double& srate)
srslte_resampler_fft_init(&decimators[ch], SRSLTE_RESAMPLER_MODE_DECIMATE, ratio);
}
decimator_busy = false;
} else {
for (srslte_rf_t& rf_device : rf_devices) {
cur_rx_srate = srslte_rf_set_rx_srate(&rf_device, srate);

@ -341,6 +341,7 @@ static void* radio_thread_run(void* arg)
radio_args.nof_carriers = 1;
radio_args.device_args = radios_args[r].empty() ? "auto" : radios_args[r];
radio_args.rx_gain = agc_enable ? -1 : rf_gain;
radio_args.tx_gain = agc_enable ? -1 : rf_gain;
radio_args.device_name = radio_device;
if (radio_h[r]->init(radio_args, &phy) != SRSLTE_SUCCESS) {

@ -99,6 +99,11 @@ void backend_worker::process_log_entry(detail::log_entry&& entry)
assert(entry.format_func && "Invalid format function");
fmt_buffer.clear();
// Already formatted strings in the foreground are passed to the formatter as the fmtstring.
if (entry.metadata.small_str.size()) {
entry.metadata.fmtstring = entry.metadata.small_str.data();
}
entry.format_func(std::move(entry.metadata), fmt_buffer);
if (auto err_str = entry.s->write({fmt_buffer.data(), fmt_buffer.size()})) {
@ -108,8 +113,7 @@ void backend_worker::process_log_entry(detail::log_entry&& entry)
void backend_worker::process_outstanding_entries()
{
assert(!running_flag &&
"Cannot process outstanding entries while thread is running");
assert(!running_flag && "Cannot process outstanding entries while thread is running");
while (true) {
auto item = queue.timed_pop(1);

@ -20,7 +20,7 @@
*/
#include "srslte/srslog/event_trace.h"
#include "sinks/single_write_file_sink.h"
#include "sinks/buffered_file_sink.h"
#include "srslte/srslog/srslog.h"
#include <ctime>
@ -71,16 +71,14 @@ bool srslog::event_trace_init(const std::string& filename, std::size_t capacity)
return false;
}
auto tracer_sink = std::unique_ptr<sink>(new single_write_file_sink(
filename, capacity, get_default_log_formatter()));
auto tracer_sink = std::unique_ptr<sink>(new buffered_file_sink(filename, capacity, get_default_log_formatter()));
if (!install_custom_sink(sink_name, std::move(tracer_sink))) {
return false;
}
if (sink* s = find_sink(sink_name)) {
log_channel& c =
fetch_log_channel("event_trace_channel", *s, {"TRACE", '\0', false});
tracer = &c;
log_channel& c = fetch_log_channel("event_trace_channel", *s, {"TRACE", '\0', false});
tracer = &c;
return true;
}
@ -91,7 +89,7 @@ bool srslog::event_trace_init(const std::string& filename, std::size_t capacity)
static void format_time(char* buffer, size_t len)
{
std::time_t t = std::time(nullptr);
std::tm lt{};
std::tm lt{};
::localtime_r(&t, &lt);
std::strftime(buffer, len, "%FT%T", &lt);
}
@ -106,11 +104,7 @@ void trace_duration_begin(const std::string& category, const std::string& name)
char fmt_time[24];
format_time(fmt_time, sizeof(fmt_time));
(*tracer)("[%s] [TID:%0u] Entering \"%s\": %s",
fmt_time,
(unsigned)::pthread_self(),
category,
name);
(*tracer)("[%s] [TID:%0u] Entering \"%s\": %s", fmt_time, (unsigned)::pthread_self(), category, name);
}
void trace_duration_end(const std::string& category, const std::string& name)
@ -121,11 +115,7 @@ void trace_duration_end(const std::string& category, const std::string& name)
char fmt_time[24];
format_time(fmt_time, sizeof(fmt_time));
(*tracer)("[%s] [TID:%0u] Leaving \"%s\": %s",
fmt_time,
(unsigned)::pthread_self(),
category,
name);
(*tracer)("[%s] [TID:%0u] Leaving \"%s\": %s", fmt_time, (unsigned)::pthread_self(), category, name);
}
} // namespace srslog
@ -133,17 +123,16 @@ void trace_duration_end(const std::string& category, const std::string& name)
/// Private implementation of the complete event destructor.
srslog::detail::scoped_complete_event::~scoped_complete_event()
{
if (!tracer)
if (!tracer) {
return;
}
auto end = std::chrono::steady_clock::now();
unsigned long long diff =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
auto end = std::chrono::steady_clock::now();
unsigned long long diff = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
(*tracer)("[TID:%0u] Complete event \"%s\" (duration %lld us): %s",
(unsigned)::pthread_self(),
category,
diff,
name);
small_str_buffer str;
// Limit to the category and name strings to a predefined length so everything fits in a small string.
fmt::format_to(str, "{:.32} {:.16}, {}", category, name, diff);
str.push_back('\0');
(*tracer)(std::move(str));
}

@ -0,0 +1,89 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSLOG_BUFFERED_FILE_SINK_H
#define SRSLOG_BUFFERED_FILE_SINK_H
#include "file_utils.h"
#include "srslte/srslog/sink.h"
namespace srslog {
/// This class is a wrapper of a file handle that buffers the input data into an internal buffer and writes its contents
/// to the file once the buffer is full or in object destruction.
class buffered_file_sink : public sink
{
public:
buffered_file_sink(std::string filename, std::size_t capacity, std::unique_ptr<log_formatter> f) :
sink(std::move(f)), filename(std::move(filename))
{
buffer.reserve(capacity);
}
~buffered_file_sink() override { flush_buffer(); }
buffered_file_sink(const buffered_file_sink& other) = delete;
buffered_file_sink& operator=(const buffered_file_sink& other) = delete;
detail::error_string write(detail::memory_buffer input_buffer) override
{
// Create a new file the first time we hit this method.
if (!is_file_created) {
is_file_created = true;
assert(!handler && "No handler should be created yet");
if (auto err_str = handler.create(filename)) {
return err_str;
}
}
if (has_room_for(input_buffer.size())) {
buffer.insert(buffer.end(), input_buffer.begin(), input_buffer.end());
return {};
}
return flush_buffer();
}
detail::error_string flush() override
{
if (auto err = flush_buffer()) {
return err;
}
return handler.flush();
}
private:
/// Returns true if the internal buffer has room for the specified input size,
/// otherwise returns false.
bool has_room_for(std::size_t s) const { return s + buffer.size() < buffer.capacity(); }
/// Flushes the buffer contents into the file.
detail::error_string flush_buffer()
{
if (buffer.empty()) {
return {};
}
auto err = handler.write(detail::memory_buffer(buffer.data(), buffer.size()));
buffer.clear();
return err;
}
private:
const std::string filename;
file_utils::file handler;
std::vector<char> buffer;
bool is_file_created = false;
};
} // namespace srslog
#endif // SRSLOG_BUFFERED_FILE_SINK_H

@ -1,99 +0,0 @@
/**
* Copyright 2013-2021 Software Radio Systems Limited
*
* This file is part of srsLTE.
*
* srsLTE is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* srsLTE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* A copy of the GNU Affero General Public License can be found in
* the LICENSE file in the top-level directory of this distribution
* and at http://www.gnu.org/licenses/.
*
*/
#ifndef SRSLOG_SINGLE_WRITE_FILE_SINK_H
#define SRSLOG_SINGLE_WRITE_FILE_SINK_H
#include "file_utils.h"
#include "srslte/srslog/sink.h"
namespace srslog {
/// This class is a wrapper of a file handle that stores the input data into an
/// internal buffer and writes its contents to the file once the buffer is full
/// or in object destruction.
class single_write_file_sink : public sink
{
public:
single_write_file_sink(std::string filename,
std::size_t capacity,
std::unique_ptr<log_formatter> f) :
sink(std::move(f)), filename(std::move(filename))
{
buffer.reserve(capacity);
}
~single_write_file_sink() override
{
if (!is_written) {
write_contents();
}
}
single_write_file_sink(const single_write_file_sink& other) = delete;
single_write_file_sink&
operator=(const single_write_file_sink& other) = delete;
detail::error_string write(detail::memory_buffer input_buffer) override
{
// Nothing to do when the contents have been already written.
if (is_written) {
return {};
}
if (has_room_for(input_buffer.size())) {
buffer.insert(buffer.end(), input_buffer.begin(), input_buffer.end());
return {};
}
return write_contents();
}
detail::error_string flush() override { return handler.flush(); }
private:
/// Returns true if the internal buffer has room for the specified input size,
/// otherwise returns false.
bool has_room_for(std::size_t s) const
{
return s + buffer.size() < buffer.capacity();
}
/// Writes the buffer contents into the file.
detail::error_string write_contents()
{
is_written = true;
if (auto err_str = handler.create(filename)) {
return err_str;
}
return handler.write(detail::memory_buffer(buffer.data(), buffer.size()));
}
private:
const std::string filename;
file_utils::file handler;
std::vector<char> buffer;
bool is_written = false;
};
} // namespace srslog
#endif // SRSLOG_SINGLE_WRITE_FILE_SINK_H

@ -448,7 +448,7 @@ void pdcp_entity_lte::update_rx_counts_queue(uint32_t rx_count)
// If the size of the rx_vector_info is getting very large
// Consider the FMC as lost and update the vector.
if (rx_counts_info.size() > reordering_window) {
logger.debug("Queue too large. Updating. Old FMC=%d, Old back=%d, old queue_size=%d",
logger.debug("Queue too large. Updating. Old FMC=%d, Old back=%d, old queue_size=%zu",
fmc,
rx_counts_info.back(),
rx_counts_info.size());
@ -457,16 +457,16 @@ void pdcp_entity_lte::update_rx_counts_queue(uint32_t rx_count)
rx_counts_info.pop_back();
fmc++;
}
logger.debug("Queue too large. Updating. New FMC=%d, new back=%d, new queue_size=%d",
logger.debug("Queue too large. Updating. New FMC=%d, new back=%d, new queue_size=%zu",
fmc,
rx_counts_info.back(),
rx_counts_info.size());
}
if (rx_counts_info.empty()) {
logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size=%d, FMC=%d", rx_count, rx_counts_info.size(), fmc);
logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size%zu, FMC=%d", rx_count, rx_counts_info.size(), fmc);
} else {
logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size=%d, FMC=%d, back=%d",
logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size=%zu, FMC=%d, back=%d",
rx_count,
rx_counts_info.size(),
fmc,
@ -716,7 +716,7 @@ void pdcp_entity_lte::notify_delivery(const pdcp_sn_vector_t& pdcp_sns)
return;
}
logger.info("Received delivery notification from RLC. Number of PDU notified=%ld", pdcp_sns.size());
logger.info("Received delivery notification from RLC. Number of PDU notified=%zu", pdcp_sns.size());
for (uint32_t sn : pdcp_sns) {
logger.debug("Delivery notification received for PDU with SN=%d", sn);
if (sn == UINT32_MAX) {
@ -746,7 +746,7 @@ void pdcp_entity_lte::notify_failure(const pdcp_sn_vector_t& pdcp_sns)
return;
}
logger.info("Received failure notification from RLC. Number of PDU notified=%ld", pdcp_sns.size());
logger.info("Received failure notification from RLC. Number of PDU notified=%zu", pdcp_sns.size());
for (uint32_t sn : pdcp_sns) {
logger.info("Failure notification received for PDU with SN=%d", sn);
@ -809,7 +809,7 @@ std::map<uint32_t, srslte::unique_byte_buffer_t> pdcp_entity_lte::get_buffered_p
logger.error("Buffered PDUs being requested for non-AM DRB");
return std::map<uint32_t, srslte::unique_byte_buffer_t>{};
}
logger.info("Buffered PDUs requested, buffer_size=%d", undelivered_sdus->size());
logger.info("Buffered PDUs requested, buffer_size=%zu", undelivered_sdus->size());
return undelivered_sdus->get_buffered_sdus();
}

@ -448,15 +448,14 @@ int rlc_am_lte::rlc_am_lte_tx::write_sdu(unique_byte_buffer_t sdu)
return SRSLTE_ERROR;
}
// Store SDU info
logger.debug(
"Storing PDCP SDU info in queue. PDCP_SN=%d, Queue Size=%ld", sdu_pdcp_sn, undelivered_sdu_info_queue.nof_sdus());
if (undelivered_sdu_info_queue.has_pdcp_sn(sdu_pdcp_sn)) {
logger.error("PDCP SDU info already exists. SN=%d", sdu_pdcp_sn);
logger.warning("PDCP_SN=%d already marked as undelivered", sdu_pdcp_sn);
return SRSLTE_ERROR;
}
// Store SDU info
logger.debug("Marking PDCP_SN=%d as undelivered (queue_len=%ld)", sdu_pdcp_sn, undelivered_sdu_info_queue.nof_sdus());
undelivered_sdu_info_queue.add_pdcp_sdu(sdu_pdcp_sn);
return SRSLTE_SUCCESS;
}

@ -49,6 +49,8 @@ void write_pcap_nr_thread_function(srslte::mac_pcap* pcap_handle, const std::arr
int mac_pcap_eutra_test()
{
auto& pcap_logger = srslog::fetch_basic_logger("PCAP");
pcap_logger.info("In mac_pcap_eutra_test");
std::array<uint8_t, 150> tv = {
0x21, 0x08, 0x22, 0x80, 0x82, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02,
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
@ -67,16 +69,17 @@ int mac_pcap_eutra_test()
TESTASSERT(pcap_handle->open("mac_pcap_test.pcap") != SRSLTE_SUCCESS); // open again will fail
std::vector<std::thread> writer_threads;
pcap_logger.info("Start writer_threads");
for (uint32_t i = 0; i < num_threads; i++) {
writer_threads.push_back(std::thread(write_pcap_eutra_thread_function, pcap_handle.get(), tv, num_pdus_per_thread));
}
pcap_logger.info("Wait for writer_threads to finish");
// wait for threads to finish
for (std::thread& thread : writer_threads) {
thread.join();
}
pcap_logger.info("Close PCAP handle");
TESTASSERT(pcap_handle->close() == SRSLTE_SUCCESS);
TESTASSERT(pcap_handle->close() != 0); // closing twice will fail
@ -85,6 +88,8 @@ int mac_pcap_eutra_test()
int mac_pcap_nr_test()
{
auto& pcap_logger = srslog::fetch_basic_logger("PCAP");
pcap_logger.info("In mac_pcap_nr_test");
std::array<uint8_t, 11> tv = {0x42, 0x00, 0x08, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88};
uint32_t num_threads = 10;
@ -95,16 +100,19 @@ int mac_pcap_nr_test()
TESTASSERT(pcap_handle->open("mac_pcap_nr_test.pcap") != SRSLTE_SUCCESS); // open again will fail
std::vector<std::thread> writer_threads;
pcap_logger.info("Start writer_threads");
for (uint32_t i = 0; i < num_threads; i++) {
writer_threads.push_back(std::thread(write_pcap_nr_thread_function, pcap_handle.get(), tv, num_pdus_per_thread));
}
pcap_logger.info("Wait for writer_threads to finish");
// wait for threads to finish
for (std::thread& thread : writer_threads) {
thread.join();
}
pcap_logger.info("Close PCAP handle");
TESTASSERT(pcap_handle->close() == SRSLTE_SUCCESS);
TESTASSERT(pcap_handle->close() != 0); // closing twice will fail
@ -113,6 +121,21 @@ int mac_pcap_nr_test()
int main()
{
// Start the log backend.
srslog::init();
auto& mac_logger = srslog::fetch_basic_logger("MAC", false);
mac_logger.set_level(srslog::basic_levels::debug);
mac_logger.set_hex_dump_max_size(-1);
auto& pcap_logger = srslog::fetch_basic_logger("PCAP", false);
pcap_logger.set_level(srslog::basic_levels::debug);
pcap_logger.set_hex_dump_max_size(-1);
pcap_logger.info("Start mac_pcap_eutra_test");
TESTASSERT(mac_pcap_eutra_test() == SRSLTE_SUCCESS);
pcap_logger.info("Start mac_pcap_nr_test");
TESTASSERT(mac_pcap_nr_test() == SRSLTE_SUCCESS);
srslog::flush();
}

@ -36,7 +36,7 @@ static detail::log_entry_metadata build_log_entry_metadata()
fmt::dynamic_format_arg_store<fmt::printf_context> store;
store.push_back(88);
return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z'};
return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z', small_str_buffer()};
}
static bool when_fully_filled_log_entry_then_everything_is_formatted()

@ -79,19 +79,6 @@ private:
} // namespace
static bool when_backend_is_not_started_then_pushed_log_entries_are_ignored()
{
sink_spy spy;
log_backend_impl backend;
detail::log_entry entry = {&spy};
backend.push(std::move(entry));
ASSERT_EQ(spy.write_invocation_count(), 0);
return true;
}
/// Builds a basic log entry.
static detail::log_entry build_log_entry(sink* s)
{
@ -104,7 +91,19 @@ static detail::log_entry build_log_entry(sink* s)
return {
s,
[](detail::log_entry_metadata&& metadata, fmt::memory_buffer& buffer) {},
{tp, {0, false}, "Text %d", std::move(store), "", '\0'}};
{tp, {0, false}, "Text %d", std::move(store), "", '\0', small_str_buffer()}};
}
static bool when_backend_is_not_started_then_pushed_log_entries_are_ignored()
{
sink_spy spy;
log_backend_impl backend;
backend.push(build_log_entry(&spy));
ASSERT_EQ(spy.write_invocation_count(), 0);
return true;
}
static bool when_backend_is_started_then_pushed_log_entries_are_sent_to_sink()

@ -287,6 +287,31 @@ when_logging_with_context_and_message_then_filled_in_log_entry_is_pushed_into_th
return true;
}
static bool
when_logging_with_small_string_then_filled_in_log_entry_is_pushed_into_the_backend()
{
backend_spy backend;
test_dummies::sink_dummy s;
log_channel log("id", s, backend);
small_str_buffer buf;
fmt::format_to(buf, "A {} {} {}", 1, 2, 3);
buf.push_back('\0');
log(std::move(buf));
ASSERT_EQ(backend.push_invocation_count(), 1);
const detail::log_entry& entry = backend.last_entry();
ASSERT_EQ(&s, entry.s);
ASSERT_NE(entry.format_func, nullptr);
ASSERT_NE(entry.metadata.tp.time_since_epoch().count(), 0);
ASSERT_EQ(entry.metadata.hex_dump.empty(), true);
ASSERT_EQ(std::string(entry.metadata.small_str.data()), "A 1 2 3");
return true;
}
int main()
{
TEST_FUNCTION(when_log_channel_is_created_then_id_matches_expected_value);
@ -305,6 +330,8 @@ int main()
when_logging_with_context_then_filled_in_log_entry_is_pushed_into_the_backend);
TEST_FUNCTION(
when_logging_with_context_and_message_then_filled_in_log_entry_is_pushed_into_the_backend);
TEST_FUNCTION(
when_logging_with_small_string_then_filled_in_log_entry_is_pushed_into_the_backend);
return 0;
}

@ -36,7 +36,7 @@ static detail::log_entry_metadata build_log_entry_metadata()
fmt::dynamic_format_arg_store<fmt::printf_context> store;
store.push_back(88);
return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z'};
return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z', small_str_buffer()};
}
static bool when_fully_filled_log_entry_then_everything_is_formatted()

@ -116,7 +116,7 @@ void parse_args(stress_test_args_t* args, int argc, char* argv[])
("singletx", bpo::value<bool>(&args->single_tx)->default_value(false), "If set to true, only one node is generating data")
("pcap", bpo::value<bool>(&args->write_pcap)->default_value(false), "Whether to write all RLC PDU to PCAP file")
("zeroseed", bpo::value<bool>(&args->zero_seed)->default_value(false), "Whether to initialize random seed to zero")
("max_retx", bpo::value<uint32_t>(&args->max_retx)->default_value(8), "Maximum number of RLC retransmission attempts")
("max_retx", bpo::value<uint32_t>(&args->max_retx)->default_value(32), "Maximum number of RLC retransmission attempts")
("nof_pdu_tti", bpo::value<uint32_t>(&args->nof_pdu_tti)->default_value(1), "Number of PDUs processed in a TTI");
// clang-format on

@ -106,11 +106,11 @@ protected:
std::vector<std::unique_ptr<carrier_sched> > carrier_schedulers;
// Storage of past scheduling results
sched_result_list sched_results;
sched_result_ringbuffer sched_results;
srslte::tti_point last_tti;
std::mutex sched_mutex;
std::atomic<bool> configured;
bool configured;
};
} // namespace srsenb

@ -37,7 +37,7 @@ public:
explicit carrier_sched(rrc_interface_mac* rrc_,
std::map<uint16_t, std::unique_ptr<sched_ue> >* ue_db_,
uint32_t enb_cc_idx_,
sched_result_list* sched_results_);
sched_result_ringbuffer* sched_results_);
~carrier_sched();
void reset();
void carrier_cfg(const sched_cell_params_t& sched_params_);
@ -66,10 +66,10 @@ private:
const uint32_t enb_cc_idx;
// Subframe scheduling logic
std::array<sf_sched, TTIMOD_SZ> sf_scheds;
srslte::circular_array<sf_sched, TTIMOD_SZ> sf_scheds;
// scheduling results
sched_result_list* prev_sched_results;
sched_result_ringbuffer* prev_sched_results;
std::vector<uint8_t> sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS
@ -100,6 +100,7 @@ private:
// args
const sched_cell_params_t* cc_cfg = nullptr;
rrc_interface_mac* rrc = nullptr;
srslog::basic_logger& logger;
std::array<sched_sib_t, sched_interface::MAX_SIBS> pending_sibs;
@ -123,14 +124,16 @@ public:
void reset();
private:
alloc_result allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc);
// args
srslog::basic_logger& logger;
const sched_cell_params_t* cc_cfg = nullptr;
sched_ue_list* ue_db = nullptr;
std::deque<sf_sched::pending_rar_t> pending_rars;
uint32_t rar_aggr_level = 2;
static const uint32_t PRACH_RAR_OFFSET = 3; // TS 36.321 Sec. 5.1.4
std::deque<pending_rar_t> pending_rars;
uint32_t rar_aggr_level = 2;
static const uint32_t PRACH_RAR_OFFSET = 3; // TS 36.321 Sec. 5.1.4
};
} // namespace srsenb

@ -115,6 +115,10 @@ struct prb_interval : public srslte::interval<uint32_t> {
/// Type of Allocation stored in PDSCH/PUSCH
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
inline bool is_dl_ctrl_alloc(alloc_type_t a)
{
return a == alloc_type_t::DL_BC or a == alloc_type_t::DL_PCCH or a == alloc_type_t::DL_RAR;
}
} // namespace srsenb

@ -23,99 +23,96 @@
#define SRSLTE_SCHED_GRID_H
#include "lib/include/srslte/interfaces/sched_interface.h"
#include "sched_phy_ch/sched_result.h"
#include "sched_phy_ch/sf_cch_allocator.h"
#include "sched_ue.h"
#include "srslte/adt/bounded_bitset.h"
#include "srslte/adt/circular_array.h"
#include "srslte/srslog/srslog.h"
#include <deque>
#include <vector>
namespace srsenb {
/// Error code of alloc attempt
struct alloc_outcome_t {
enum result_enum {
SUCCESS,
DCI_COLLISION,
RB_COLLISION,
ERROR,
NOF_RB_INVALID,
PUCCH_COLLISION,
MEASGAP_COLLISION,
ALREADY_ALLOC,
NO_DATA,
INVALID_PRBMASK,
INVALID_CARRIER
};
result_enum result = ERROR;
alloc_outcome_t() = default;
alloc_outcome_t(result_enum e) : result(e) {}
operator result_enum() { return result; }
operator bool() { return result == SUCCESS; }
const char* to_string() const;
};
//! Result of a Subframe sched computation
struct cc_sched_result {
tti_point tti_rx;
sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {};
rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations
prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations
pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations
bool is_generated(tti_point tti_rx_) const { return tti_rx == tti_rx_; }
enum class alloc_result {
success,
sch_collision,
no_cch_space,
no_sch_space,
no_rnti_opportunity,
invalid_grant_params,
invalid_coderate,
no_grant_space,
other_cause
};
const char* to_string(alloc_result res);
struct sf_sched_result {
srslte::tti_point tti_rx;
tti_point tti_rx;
std::vector<cc_sched_result> enb_cc_list;
cc_sched_result* new_cc(uint32_t enb_cc_idx);
void new_tti(tti_point tti_rx);
bool is_generated(uint32_t enb_cc_idx) const
{
return enb_cc_list.size() > enb_cc_idx and enb_cc_list[enb_cc_idx].generated;
}
const cc_sched_result* get_cc(uint32_t enb_cc_idx) const
{
return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr;
assert(enb_cc_idx < enb_cc_list.size());
return &enb_cc_list[enb_cc_idx];
}
cc_sched_result* get_cc(uint32_t enb_cc_idx)
{
return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr;
assert(enb_cc_idx < enb_cc_list.size());
return &enb_cc_list[enb_cc_idx];
}
bool is_ul_alloc(uint16_t rnti) const;
bool is_dl_alloc(uint16_t rnti) const;
};
struct sched_result_list {
struct sched_result_ringbuffer {
public:
sf_sched_result* new_tti(srslte::tti_point tti_rx);
sf_sched_result* get_sf(srslte::tti_point tti_rx);
const sf_sched_result* get_sf(srslte::tti_point tti_rx) const;
const cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const;
cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx);
void set_nof_carriers(uint32_t nof_carriers);
void new_tti(srslte::tti_point tti_rx);
bool has_sf(srslte::tti_point tti_rx) const { return results[tti_rx.to_uint()].tti_rx == tti_rx; }
sf_sched_result* get_sf(srslte::tti_point tti_rx)
{
assert(has_sf(tti_rx));
return &results[tti_rx.to_uint()];
}
const sf_sched_result* get_sf(srslte::tti_point tti_rx) const
{
assert(has_sf(tti_rx));
return &results[tti_rx.to_uint()];
}
const cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const
{
return get_sf(tti_rx)->get_cc(enb_cc_idx);
}
cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) { return get_sf(tti_rx)->get_cc(enb_cc_idx); }
private:
std::array<sf_sched_result, TTIMOD_SZ> results;
uint32_t nof_carriers = 1;
srslte::circular_array<sf_sched_result, TTIMOD_SZ> results;
};
/// manages a subframe grid resources, namely CCE and DL/UL RB allocations
class sf_grid_t
{
public:
struct dl_ctrl_alloc_t {
alloc_outcome_t outcome;
rbg_interval rbg_range;
};
sf_grid_t() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx);
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true);
alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
alloc_outcome_t reserve_ul_prbs(prb_interval alloc, bool strict);
bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx);
alloc_result alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type);
alloc_result alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
void rem_last_alloc_dl(rbg_interval rbgs);
alloc_result alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true);
alloc_result reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
alloc_result reserve_ul_prbs(prb_interval alloc, bool strict);
bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
// getters
const rbgmask_t& get_dl_mask() const { return dl_mask; }
@ -125,17 +122,16 @@ public:
uint32_t get_pucch_width() const { return pucch_nrb; }
private:
alloc_outcome_t alloc_dl(uint32_t aggr_lvl,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user = nullptr,
bool has_pusch_grant = false);
alloc_result alloc_dl(uint32_t aggr_lvl,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user = nullptr,
bool has_pusch_grant = false);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger;
uint32_t nof_rbgs = 0;
uint32_t si_n_rbg = 0, rar_n_rbg = 0;
uint32_t nof_rbgs = 0;
uint32_t pucch_nrb = 0;
prbmask_t pucch_mask;
@ -144,9 +140,8 @@ private:
// internal state
tti_point tti_rx;
uint32_t avail_rbg = 0;
rbgmask_t dl_mask = {};
prbmask_t ul_mask = {};
rbgmask_t dl_mask = {};
prbmask_t ul_mask = {};
};
/** Description: Stores the RAR, broadcast, paging, DL data, UL data allocations for the given subframe
@ -159,21 +154,14 @@ public:
struct ctrl_alloc_t {
size_t dci_idx;
rbg_interval rbg_range;
uint16_t rnti;
uint32_t req_bytes;
alloc_type_t alloc_type;
};
struct rar_alloc_t {
sf_sched::ctrl_alloc_t alloc_data;
sched_interface::dl_sched_rar_t rar_grant;
rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r)
{}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
uint32_t sib_idx = 0;
bc_alloc_t() = default;
explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
sched_interface::dl_sched_bc_t bc_grant;
};
struct dl_alloc_t {
size_t dci_idx;
@ -198,13 +186,6 @@ public:
uint32_t n_prb = 0;
uint32_t mcs = 0;
};
struct pending_rar_t {
uint16_t ra_rnti = 0;
tti_point prach_tti{};
uint32_t nof_grants = 0;
sched_interface::dl_sched_rar_info_t msg3_grant[sched_interface::MAX_RAR_LIST] = {};
};
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
// Control/Configuration Methods
sf_sched();
@ -212,29 +193,32 @@ public:
void new_tti(srslte::tti_point tti_rx_, sf_sched_result* cc_results);
// DL alloc methods
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
std::pair<alloc_outcome_t, uint32_t> alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant);
alloc_result alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs);
alloc_result alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs);
alloc_result alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants);
bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); }
const std::vector<rar_alloc_t>& get_allocated_rars() const { return rar_allocs; }
// UL alloc methods
alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant);
alloc_outcome_t
alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1);
bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); }
bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result);
alloc_result alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant);
alloc_result
alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1);
alloc_result reserve_ul_prbs(const prbmask_t& ulmask, bool strict)
{
return tti_alloc.reserve_ul_prbs(ulmask, strict);
}
alloc_result alloc_phich(sched_ue* user);
// compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI
void generate_sched_results(sched_ue_list& ue_db);
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid);
tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); }
uint32_t get_nof_ctrl_symbols() const;
const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); }
alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc);
const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); }
tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); }
alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid);
tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); }
uint32_t get_nof_ctrl_symbols() const;
const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); }
alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc);
const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); }
tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); }
srslte::const_span<rar_alloc_t> get_allocated_rars() const { return rar_allocs; }
// getters
tti_point get_tti_rx() const { return tti_rx; }
@ -244,18 +228,12 @@ public:
const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; }
private:
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
int generate_format1a(prb_interval prb_range, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result);
void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list);
void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list);
void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list);
void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
@ -263,12 +241,13 @@ private:
sf_sched_result* cc_results; ///< Results of other CCs for the same Subframe
// internal state
sf_grid_t tti_alloc;
std::vector<bc_alloc_t> bc_allocs;
std::vector<rar_alloc_t> rar_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
sf_grid_t tti_alloc;
srslte::bounded_vector<bc_alloc_t, sched_interface::MAX_BC_LIST> bc_allocs;
srslte::bounded_vector<rar_alloc_t, sched_interface::MAX_RAR_LIST> rar_allocs;
srslte::bounded_vector<dl_alloc_t, sched_interface::MAX_DATA_LIST> data_allocs;
srslte::bounded_vector<ul_alloc_t, sched_interface::MAX_DATA_LIST> ul_data_allocs;
uint32_t last_msg3_prb = 0, max_msg3_prb = 0;
// Next TTI state
tti_point tti_rx;

@ -22,7 +22,8 @@
#ifndef SRSLTE_SCHED_DCI_H
#define SRSLTE_SCHED_DCI_H
#include <cstdint>
#include "../sched_common.h"
#include "srslte/adt/bounded_vector.h"
namespace srsenb {
@ -55,7 +56,7 @@ tbs_info compute_mcs_and_tbs(uint32_t nof_prb,
bool use_tbs_index_alt);
/**
* Compute lowest MCS, TBS based on CQI, N_prb that satisfies TBS >= req_bytes
* Compute lowest MCS, TBS based on CQI, N_prb that satisfies TBS >= req_bytes (best effort)
* \remark See TS 36.213 - Table 7.1.7.1-1/1A
* @return resulting TBS (in bytes) and mcs. TBS=-1 if no valid solution was found.
*/
@ -68,6 +69,46 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb,
bool ulqam64_enabled,
bool use_tbs_index_alt);
struct pending_rar_t {
uint16_t ra_rnti = 0;
tti_point prach_tti{};
srslte::bounded_vector<sched_interface::dl_sched_rar_info_t, sched_interface::MAX_RAR_LIST> msg3_grant = {};
};
bool generate_sib_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t sib_idx,
uint32_t sib_ntx,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi);
bool generate_paging_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t req_bytes,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi);
bool generate_rar_dci(sched_interface::dl_sched_rar_t& rar,
tti_point tti_tx_dl,
const pending_rar_t& pending_rar,
rbg_interval rbg_range,
uint32_t nof_grants,
uint32_t start_msg3_prb,
const sched_cell_params_t& cell_params,
uint32_t current_cfi);
void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params);
void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params);
void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range);
} // namespace srsenb
#endif // SRSLTE_SCHED_DCI_H

@ -0,0 +1,41 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSLTE_SCHED_RESULT_H
#define SRSLTE_SCHED_RESULT_H
#include "../sched_common.h"
namespace srsenb {
/// Result of a Subframe sched computation
struct cc_sched_result {
bool generated = false;
tti_point tti_rx{};
/// Accumulation of all DL RBG allocations
rbgmask_t dl_mask = {};
/// Accumulation of all UL PRB allocations
prbmask_t ul_mask = {};
/// Accumulation of all CCE allocations
pdcch_mask_t pdcch_mask = {};
/// Individual allocations information
sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {};
};
} // namespace srsenb
#endif // SRSLTE_SCHED_RESULT_H

@ -20,6 +20,7 @@
*/
#include "../sched_common.h"
#include "sched_result.h"
#ifndef SRSLTE_PDCCH_SCHED_H
#define SRSLTE_PDCCH_SCHED_H
@ -33,15 +34,17 @@ class sf_cch_allocator
{
public:
const static uint32_t MAX_CFI = 3;
struct alloc_t {
int8_t pucch_n_prb; ///< this PUCCH resource identifier
uint16_t rnti = 0;
srslte_dci_location_t dci_pos = {0, 0};
pdcch_mask_t current_mask; ///< this allocation PDCCH mask
pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route)
prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current solution/tree route
struct tree_node {
int8_t pucch_n_prb = -1; ///< this PUCCH resource identifier
uint16_t rnti = SRSLTE_INVALID_RNTI;
uint32_t record_idx = 0;
uint32_t dci_pos_idx = 0;
srslte_dci_location_t dci_pos = {0, 0};
/// Accumulation of all PDCCH masks for the current solution (DFS path)
pdcch_mask_t total_mask, current_mask;
prbmask_t total_pucch_mask;
};
using alloc_result_t = srslte::bounded_vector<const alloc_t*, 16>;
using alloc_result_t = srslte::bounded_vector<const tree_node*, 16>;
sf_cch_allocator() : logger(srslog::fetch_basic_logger("MAC")) {}
@ -57,56 +60,28 @@ public:
*/
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false);
void rem_last_dci();
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; }
size_t nof_allocs() const { return dci_record_list.size(); }
size_t nof_alloc_combinations() const { return get_alloc_tree().nof_leaves(); }
std::string result_to_string(bool verbose = false) const;
private:
/// DCI allocation parameters
struct alloc_record_t {
sched_ue* user;
struct alloc_record {
bool pusch_uci;
uint32_t aggr_idx;
alloc_type_t alloc_type;
bool pusch_uci;
};
/// Tree-based data structure to store possible DCI allocation decisions
struct alloc_tree_t {
struct node_t {
int parent_idx;
alloc_t node;
node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {}
};
// args
size_t nof_cces;
const sched_cell_params_t* cc_cfg = nullptr;
srslte_pucch_cfg_t* pucch_cfg_temp = nullptr;
uint32_t cfi;
// state
std::vector<node_t> dci_alloc_tree;
size_t prev_start = 0, prev_end = 0;
explicit alloc_tree_t(uint32_t this_cfi, const sched_cell_params_t& cc_params, srslte_pucch_cfg_t& pucch_cfg);
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
void get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const;
bool add_tree_node_leaves(int node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_rx);
std::string result_to_string(bool verbose) const;
sched_ue* user;
};
const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; }
const cce_cfi_position_table* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool set_cfi(uint32_t cfi);
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
bool alloc_dfs_node(const alloc_record& record, uint32_t start_child_idx);
bool get_next_dfs();
// consts
const sched_cell_params_t* cc_cfg = nullptr;
@ -114,10 +89,11 @@ private:
srslte_pucch_cfg_t pucch_cfg_common = {};
// tti vars
tti_point tti_rx;
uint32_t current_cfix = 0;
std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index
std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
tti_point tti_rx;
uint32_t current_cfix = 0;
uint32_t current_max_cfix = 0;
std::vector<tree_node> last_dci_dfs, temp_dci_dfs;
std::vector<alloc_record> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
// Helper methods

@ -43,14 +43,16 @@ protected:
/**************** Helper methods ****************/
rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& current_mask);
/**
* Finds a bitmask of available RBG resources for a given UE in a greedy fashion
* @param ue UE being allocated
* @param enb_cc_idx carrier index
* @param is_contiguous whether to find a contiguous range of RBGs
* @param current_mask bitmask of occupied RBGs, where to search for available RBGs
* @return bitmask of found RBGs. If a valid mask wasn't found, bitmask::size() == 0
*/
rbgmask_t compute_user_rbgmask_greedy(sched_ue& ue, uint32_t enb_cc_idx, const rbgmask_t& current_mask);
rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask);
/**
* Finds a range of L contiguous PRBs that are empty
@ -66,10 +68,10 @@ const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched);
const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched);
/// Helper methods to allocate resources in subframe
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h);
alloc_outcome_t
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr);
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h);
alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h);
alloc_result
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr);
alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h);
} // namespace srsenb

@ -68,7 +68,7 @@ private:
srslog::basic_logger* logger;
srslte::pdu_queue* shared_pdu_queue;
srslte::circular_array<std::pair<tti_point, uint8_t*>, SRSLTE_FDD_NOF_HARQ * 2> pdu_map;
srslte::circular_array<std::pair<tti_point, uint8_t*>, SRSLTE_FDD_NOF_HARQ * 8> pdu_map;
};
class cc_buffer_handler

@ -64,6 +64,8 @@ public:
const srslte::rrc_ue_capabilities_t& uecaps);
void handle_ho_prep(const asn1::rrc::ho_prep_info_r8_ies_s& ho_prep);
void handle_max_retx();
const ue_cfg_t& get_ue_sched_cfg() const { return current_sched_ue_cfg; }
bool is_crnti_set() const { return crnti_set; }

@ -25,6 +25,7 @@
#include "common_enb.h"
#include "srslte/common/buffer_pool.h"
#include "srslte/common/task_scheduler.h"
#include "srslte/common/threads.h"
#include "srslte/interfaces/enb_gtpu_interfaces.h"
#include "srslte/phy/common/phy_common.h"
@ -43,7 +44,7 @@ class stack_interface_gtpu_lte;
class gtpu final : public gtpu_interface_rrc, public gtpu_interface_pdcp
{
public:
explicit gtpu(srslog::basic_logger& logger);
explicit gtpu(srslte::task_sched_handle task_sched_, srslog::basic_logger& logger);
int init(std::string gtp_bind_addr_,
std::string mme_addr_,
@ -84,6 +85,7 @@ private:
std::string mme_addr;
srsenb::pdcp_interface_gtpu* pdcp = nullptr;
srslog::basic_logger& logger;
srslte::task_sched_handle task_sched;
// Class to create
class m1u_handler
@ -113,16 +115,17 @@ private:
const uint32_t undefined_pdcp_sn = std::numeric_limits<uint32_t>::max();
struct tunnel {
bool dl_enabled = true;
bool fwd_teid_in_present = false;
bool prior_teid_in_present = false;
uint16_t rnti = SRSLTE_INVALID_RNTI;
uint32_t lcid = SRSENB_N_RADIO_BEARERS;
uint32_t teid_in = 0;
uint32_t teid_out = 0;
uint32_t spgw_addr = 0;
uint32_t fwd_teid_in = 0; ///< forward Rx SDUs to this TEID
uint32_t prior_teid_in = 0; ///< buffer bearer SDUs until this TEID receives an End Marker
bool dl_enabled = true;
bool fwd_teid_in_present = false;
bool prior_teid_in_present = false;
uint16_t rnti = SRSLTE_INVALID_RNTI;
uint32_t lcid = SRSENB_N_RADIO_BEARERS;
uint32_t teid_in = 0;
uint32_t teid_out = 0;
uint32_t spgw_addr = 0;
uint32_t fwd_teid_in = 0; ///< forward Rx SDUs to this TEID
uint32_t prior_teid_in = 0; ///< buffer bearer SDUs until this TEID receives an End Marker
srslte::unique_timer rx_timer;
std::multimap<uint32_t, srslte::unique_byte_buffer_t> buffer;
};
std::unordered_map<uint32_t, tunnel> tunnels;
@ -138,7 +141,9 @@ private:
void echo_response(in_addr_t addr, in_port_t port, uint16_t seq);
void error_indication(in_addr_t addr, in_port_t port, uint32_t err_teid);
void end_marker(uint32_t teidin);
bool end_marker(uint32_t teidin);
void handle_end_marker(tunnel& rx_tunnel);
int create_dl_fwd_tunnel(uint32_t rx_teid_in, uint32_t tx_teid_in);

@ -42,7 +42,7 @@ enb_stack_lte::enb_stack_lte(srslog::sink& log_sink) :
pdcp(&task_sched, pdcp_logger),
mac(&task_sched, mac_logger),
rlc(rlc_logger),
gtpu(gtpu_logger),
gtpu(&task_sched, gtpu_logger),
s1ap(&task_sched, s1ap_logger),
rrc(&task_sched),
mac_pcap(),

@ -593,7 +593,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
srslte::rwlock_read_guard lock(rwlock);
// Copy data grants
for (uint32_t i = 0; i < sched_result.nof_data_elems; i++) {
for (uint32_t i = 0; i < sched_result.data.size(); i++) {
uint32_t tb_count = 0;
// Get UE
@ -654,7 +654,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
}
// Copy RAR grants
for (uint32_t i = 0; i < sched_result.nof_rar_elems; i++) {
for (uint32_t i = 0; i < sched_result.rar.size(); i++) {
// Copy dci info
dl_sched_res->pdsch[n].dci = sched_result.rar[i].dci;
@ -689,7 +689,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
}
// Copy SI and Paging grants
for (uint32_t i = 0; i < sched_result.nof_bc_elems; i++) {
for (uint32_t i = 0; i < sched_result.bc.size(); i++) {
// Copy dci info
dl_sched_res->pdsch[n].dci = sched_result.bc[i].dci;
@ -909,7 +909,7 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list)
// Copy DCI grants
phy_ul_sched_res->nof_grants = 0;
int n = 0;
for (uint32_t i = 0; i < sched_result.nof_dci_elems; i++) {
for (uint32_t i = 0; i < sched_result.pusch.size(); i++) {
if (sched_result.pusch[i].tbs > 0) {
// Get UE
uint16_t rnti = sched_result.pusch[i].dci.rnti;
@ -952,11 +952,11 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list)
}
// Copy PHICH actions
for (uint32_t i = 0; i < sched_result.nof_phich_elems; i++) {
for (uint32_t i = 0; i < sched_result.phich.size(); i++) {
phy_ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK;
phy_ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti;
}
phy_ul_sched_res->nof_phich = sched_result.nof_phich_elems;
phy_ul_sched_res->nof_phich = sched_result.phich.size();
}
// clear old buffers from all users
for (auto& u : ue_db) {

@ -77,6 +77,8 @@ int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
}
}
sched_results.set_nof_carriers(cell_cfg.size());
// Create remaining cells, if not created yet
uint32_t prev_size = carrier_schedulers.size();
carrier_schedulers.resize(sched_cell_params.size());
@ -89,7 +91,7 @@ int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
carrier_schedulers[i]->carrier_cfg(sched_cell_params[i]);
}
configured.store(true, std::memory_order_release);
configured = true;
return 0;
}
@ -296,11 +298,10 @@ std::array<bool, SRSLTE_MAX_CARRIERS> sched::get_scell_activation_mask(uint16_t
// Downlink Scheduler API
int sched::dl_sched(uint32_t tti_tx_dl, uint32_t enb_cc_idx, sched_interface::dl_sched_res_t& sched_result)
{
if (not configured.load(std::memory_order_acquire)) {
std::lock_guard<std::mutex> lock(sched_mutex);
if (not configured) {
return 0;
}
std::lock_guard<std::mutex> lock(sched_mutex);
if (enb_cc_idx >= carrier_schedulers.size()) {
return 0;
}
@ -317,11 +318,10 @@ int sched::dl_sched(uint32_t tti_tx_dl, uint32_t enb_cc_idx, sched_interface::dl
// Uplink Scheduler API
int sched::ul_sched(uint32_t tti, uint32_t enb_cc_idx, srsenb::sched_interface::ul_sched_res_t& sched_result)
{
if (not configured.load(std::memory_order_acquire)) {
std::lock_guard<std::mutex> lock(sched_mutex);
if (not configured) {
return 0;
}
std::lock_guard<std::mutex> lock(sched_mutex);
if (enb_cc_idx >= carrier_schedulers.size()) {
return 0;
}
@ -355,9 +355,7 @@ void sched::new_tti(tti_point tti_rx)
/// Check if TTI result is generated
bool sched::is_generated(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const
{
const sf_sched_result* sf_result = sched_results.get_sf(tti_rx);
return sf_result != nullptr and sf_result->get_cc(enb_cc_idx) != nullptr and
sf_result->get_cc(enb_cc_idx)->is_generated(tti_rx);
return sched_results.has_sf(tti_rx) and sched_results.get_sf(tti_rx)->is_generated(enb_cc_idx);
}
// Common way to access ue_db elements in a read locking way

@ -35,7 +35,9 @@ using srslte::tti_point;
* Broadcast (SIB+Paging) scheduling
*******************************************************/
bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cc_cfg(&cfg_), rrc(rrc_) {}
bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) :
cc_cfg(&cfg_), rrc(rrc_), logger(srslog::fetch_basic_logger("MAC"))
{}
void bc_sched::dl_sched(sf_sched* tti_sched)
{
@ -98,34 +100,69 @@ void bc_sched::alloc_sibs(sf_sched* tti_sched)
uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx();
uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn();
for (uint32_t i = 0; i < pending_sibs.size(); i++) {
if (cc_cfg->cfg.sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) {
uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4;
uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start);
for (uint32_t sib_idx = 0; sib_idx < pending_sibs.size(); sib_idx++) {
sched_sib_t& pending_sib = pending_sibs[sib_idx];
// Check if SIB is configured and within window
if (cc_cfg->cfg.sibs[sib_idx].len == 0 or not pending_sib.is_in_window or pending_sib.n_tx >= 4) {
continue;
}
// Check if there is any SIB to tx
bool sib1_flag = (i == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5;
bool other_sibs_flag =
(i > 0) and (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[i].n_tx) and current_sf_idx == 9;
if (not sib1_flag and not other_sibs_flag) {
continue;
}
// Check if subframe index is the correct one for SIB transmission
uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4;
uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start);
bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5;
bool other_sibs_flag = (sib_idx > 0) and
(n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and
current_sf_idx == 9;
if (not sib1_flag and not other_sibs_flag) {
continue;
}
// Schedule SIB
tti_sched->alloc_bc(bc_aggr_level, i, pending_sibs[i].n_tx);
pending_sibs[i].n_tx++;
// Attempt PDSCH grants with increasing number of RBGs
alloc_result ret = alloc_result::invalid_coderate;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.length() != nrbgs) {
ret = alloc_result::no_sch_space;
break;
}
ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv);
if (ret == alloc_result::success) {
// SIB scheduled successfully
pending_sibs[sib_idx].n_tx++;
}
}
if (ret != alloc_result::success) {
logger.warning("SCHED: Could not allocate SIB=%d, len=%d. Cause: %s",
sib_idx + 1,
cc_cfg->cfg.sibs[sib_idx].len,
to_string(ret));
}
}
}
void bc_sched::alloc_paging(sf_sched* tti_sched)
{
/* Allocate DCIs and RBGs for paging */
if (rrc != nullptr) {
uint32_t paging_payload = 0;
if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) {
tti_sched->alloc_paging(bc_aggr_level, paging_payload);
uint32_t paging_payload = 0;
// Check if pending Paging message
if (not rrc->is_paging_opportunity(tti_sched->get_tti_tx_dl().to_uint(), &paging_payload) or paging_payload == 0) {
return;
}
alloc_result ret = alloc_result::invalid_coderate;
for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbgs) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask());
if (rbg_interv.length() != nrbgs) {
ret = alloc_result::no_sch_space;
break;
}
ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv);
}
if (ret != alloc_result::success) {
logger.warning("SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, to_string(ret));
}
}
@ -144,6 +181,31 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, sched_ue_list& ue_db_) :
cc_cfg(&cfg_), logger(srslog::fetch_basic_logger("MAC")), ue_db(&ue_db_)
{}
alloc_result ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc)
{
alloc_result ret = alloc_result::other_cause;
for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) {
ret = alloc_result::invalid_coderate;
for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbg) {
rbg_interval rbg_interv = find_empty_rbg_interval(nrbg, tti_sched->get_dl_mask());
if (rbg_interv.length() == nrbg) {
ret = tti_sched->alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc);
} else {
ret = alloc_result::no_sch_space;
}
}
// If allocation was not successful because there were not enough RBGs, try allocating fewer Msg3 grants
if (ret != alloc_result::invalid_coderate and ret != alloc_result::no_sch_space) {
break;
}
}
if (ret != alloc_result::success) {
logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, to_string(ret));
}
return ret;
}
// Schedules RAR
// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we
// discard it.
@ -152,10 +214,12 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
tti_point tti_tx_dl = tti_sched->get_tti_tx_dl();
rar_aggr_level = 2;
while (not pending_rars.empty()) {
sf_sched::pending_rar_t& rar = pending_rars.front();
for (auto it = pending_rars.begin(); it != pending_rars.end();) {
auto& rar = *it;
// Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit
// In case of RAR outside RAR window:
// - if window has passed, discard RAR
// - if window hasn't started, stop loop, as RARs are ordered by TTI
srslte::tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET,
rar.prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window};
if (not rar_window.contains(tti_tx_dl)) {
@ -167,34 +231,37 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
rar_window,
tti_tx_dl);
srslte::console("%s\n", srslte::to_c_str(str_buffer));
logger.error("%s", srslte::to_c_str(str_buffer));
// Remove from pending queue and get next one if window has passed already
pending_rars.pop_front();
logger.warning("%s", srslte::to_c_str(str_buffer));
it = pending_rars.erase(it);
continue;
}
// If window not yet started do not look for more pending RARs
return;
}
// Try to schedule DCI + RBGs for RAR Grant
std::pair<alloc_outcome_t, uint32_t> ret = tti_sched->alloc_rar(rar_aggr_level, rar);
if (ret.first == alloc_outcome_t::RB_COLLISION) {
// there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI
return;
}
if (ret.first != alloc_outcome_t::SUCCESS) {
// try to scheduler next RAR with different RA-RNTI
continue;
}
uint32_t nof_rar_allocs = 0;
alloc_result ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs);
if (ret == alloc_result::success) {
// If RAR allocation was successful:
// - in case all Msg3 grants were allocated, remove pending RAR, and continue with following RAR
// - otherwise, erase only Msg3 grants that were allocated, and stop iteration
uint32_t nof_rar_allocs = ret.second;
if (nof_rar_allocs == rar.nof_grants) {
// all RAR grants were allocated. Remove pending RAR
pending_rars.pop_front();
if (nof_rar_allocs == rar.msg3_grant.size()) {
it = pending_rars.erase(it);
} else {
std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin());
rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs);
break;
}
} else {
// keep the RAR grants that were not scheduled, so we can schedule in next TTI
std::copy(&rar.msg3_grant[nof_rar_allocs], &rar.msg3_grant[rar.nof_grants], &rar.msg3_grant[0]);
rar.nof_grants -= nof_rar_allocs;
// If RAR allocation was not successful:
// - in case of unavailable PDCCH space, try next pending RAR allocation
// - otherwise, stop iteration
if (ret != alloc_result::no_cch_space) {
break;
}
++it;
}
}
}
@ -213,24 +280,22 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
uint16_t ra_rnti = 1 + (uint16_t)(rar_info.prach_tti % 10u);
// find pending rar with same RA-RNTI
for (sf_sched::pending_rar_t& r : pending_rars) {
for (pending_rar_t& r : pending_rars) {
if (r.prach_tti.to_uint() == rar_info.prach_tti and ra_rnti == r.ra_rnti) {
if (r.nof_grants >= sched_interface::MAX_RAR_LIST) {
if (r.msg3_grant.size() >= sched_interface::MAX_RAR_LIST) {
logger.warning("PRACH ignored, as the the maximum number of RAR grants per tti has been reached");
return SRSLTE_ERROR;
}
r.msg3_grant[r.nof_grants] = rar_info;
r.nof_grants++;
r.msg3_grant.push_back(rar_info);
return SRSLTE_SUCCESS;
}
}
// create new RAR
sf_sched::pending_rar_t p;
p.ra_rnti = ra_rnti;
p.prach_tti = tti_point{rar_info.prach_tti};
p.nof_grants = 1;
p.msg3_grant[0] = rar_info;
pending_rar_t p;
p.ra_rnti = ra_rnti;
p.prach_tti = tti_point{rar_info.prach_tti};
p.msg3_grant.push_back(rar_info);
pending_rars.push_back(p);
return SRSLTE_SUCCESS;
@ -239,13 +304,14 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
//! Schedule Msg3 grants in UL based on allocated RARs
void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched)
{
const std::vector<sf_sched::rar_alloc_t>& alloc_rars = sf_dl_sched->get_allocated_rars();
srslte::const_span<sf_sched::rar_alloc_t> alloc_rars = sf_dl_sched->get_allocated_rars();
for (const auto& rar : alloc_rars) {
for (const auto& msg3grant : rar.rar_grant.msg3_grant) {
uint16_t crnti = msg3grant.data.temp_crnti;
auto user_it = ue_db->find(crnti);
if (user_it != ue_db->end() and sf_msg3_sched->alloc_msg3(user_it->second.get(), msg3grant)) {
if (user_it != ue_db->end() and
sf_msg3_sched->alloc_msg3(user_it->second.get(), msg3grant) == alloc_result::success) {
logger.debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d", crnti, sf_msg3_sched->get_tti_tx_ul().to_uint());
} else {
logger.error(
@ -264,10 +330,10 @@ void ra_sched::reset()
* Carrier scheduling
*******************************************************/
sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_,
sched_ue_list* ue_db_,
uint32_t enb_cc_idx_,
sched_result_list* sched_results_) :
sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_,
sched_ue_list* ue_db_,
uint32_t enb_cc_idx_,
sched_result_ringbuffer* sched_results_) :
rrc(rrc_),
ue_db(ue_db_),
logger(srslog::fetch_basic_logger("MAC")),
@ -277,7 +343,7 @@ sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_,
sf_dl_mask.resize(1, 0);
}
sched::carrier_sched::~carrier_sched() {}
sched::carrier_sched::~carrier_sched() = default;
void sched::carrier_sched::reset()
{
@ -318,7 +384,7 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r
{
sf_sched* tti_sched = get_sf_sched(tti_rx);
sf_sched_result* sf_result = prev_sched_results->get_sf(tti_rx);
cc_sched_result* cc_result = sf_result->new_cc(enb_cc_idx);
cc_sched_result* cc_result = sf_result->get_cc(enb_cc_idx);
bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl().to_uint() % sf_dl_mask.size()] == 0;
@ -329,10 +395,9 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r
/* Schedule PHICH */
for (auto& ue_pair : *ue_db) {
if (cc_result->ul_sched_result.nof_phich_elems >= MAX_PHICH_LIST) {
if (tti_sched->alloc_phich(ue_pair.second.get()) == alloc_result::no_grant_space) {
break;
}
tti_sched->alloc_phich(ue_pair.second.get(), &cc_result->ul_sched_result);
}
/* Schedule DL control data */
@ -402,13 +467,13 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
sf_sched* sched::carrier_sched::get_sf_sched(tti_point tti_rx)
{
sf_sched* ret = &sf_scheds[tti_rx.to_uint() % sf_scheds.size()];
sf_sched* ret = &sf_scheds[tti_rx.to_uint()];
if (ret->get_tti_rx() != tti_rx) {
sf_sched_result* sf_res = prev_sched_results->get_sf(tti_rx);
if (sf_res == nullptr) {
if (not prev_sched_results->has_sf(tti_rx)) {
// Reset if tti_rx has not been yet set in the sched results
sf_res = prev_sched_results->new_tti(tti_rx);
prev_sched_results->new_tti(tti_rx);
}
sf_sched_result* sf_res = prev_sched_results->get_sf(tti_rx);
// start new TTI for the given CC.
ret->new_tti(tti_rx, sf_res);
}

File diff suppressed because it is too large Load Diff

@ -37,7 +37,7 @@ using dl_sched_res_t = sched_interface::dl_sched_res_t;
using dl_sched_data_t = sched_interface::dl_sched_data_t;
using custom_mem_buffer = fmt::basic_memory_buffer<char, 1024>;
srslog::basic_logger& get_mac_logger()
static srslog::basic_logger& get_mac_logger()
{
static srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC");
return mac_logger;
@ -125,8 +125,7 @@ void log_dl_cc_results(srslog::basic_logger& logger, uint32_t enb_cc_idx, const
}
custom_mem_buffer strbuf;
for (uint32_t i = 0; i < result.nof_data_elems; ++i) {
const dl_sched_data_t& data = result.data[i];
for (const auto& data : result.data) {
if (logger.debug.enabled()) {
fill_dl_cc_result_debug(strbuf, data);
} else {
@ -151,7 +150,7 @@ void log_phich_cc_results(srslog::basic_logger& logger,
return;
}
custom_mem_buffer strbuf;
for (uint32_t i = 0; i < result.nof_phich_elems; ++i) {
for (uint32_t i = 0; i < result.phich.size(); ++i) {
const phich_t& phich = result.phich[i];
const char* prefix = strbuf.size() > 0 ? " | " : "";
const char* val = phich.phich == phich_t::ACK ? "ACK" : "NACK";
@ -392,8 +391,6 @@ sched_cell_params_t::get_dl_nof_res(srslte::tti_point tti_tx_dl, const srslte_dc
}
}
// sanity check
assert(nof_re == srslte_ra_dl_grant_nof_re(&cfg.cell, &dl_sf, &grant));
return nof_re;
}

@ -21,11 +21,20 @@
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h"
#include "srsenb/hdr/stack/mac/sched_common.h"
#include "srsenb/hdr/stack/mac/sched_helpers.h"
#include "srslte/common/string_helpers.h"
#include <cmath>
#include <cstdint>
namespace srsenb {
static srslog::basic_logger& get_mac_logger()
{
static srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC");
return logger;
}
/// Compute max TBS based on max coderate
int coderate_to_tbs(float max_coderate, uint32_t nof_re)
{
@ -83,7 +92,7 @@ tbs_info compute_mcs_and_tbs(uint32_t nof_prb,
float max_coderate = srslte_cqi_to_coderate(std::min(cqi + 1U, 15U), use_tbs_index_alt);
uint32_t max_Qm = (is_ul) ? (ulqam64_enabled ? 6 : 4) : (use_tbs_index_alt ? 8 : 6);
max_coderate = std::min(max_coderate, 0.93F * max_Qm);
max_coderate = std::min(max_coderate, 0.932F * max_Qm);
int mcs = 0;
float prev_max_coderate = 0;
@ -113,7 +122,7 @@ tbs_info compute_mcs_and_tbs(uint32_t nof_prb,
// update max coderate based on mcs
srslte_mod_t mod = (is_ul) ? srslte_ra_ul_mod_from_mcs(mcs) : srslte_ra_dl_mod_from_mcs(mcs, use_tbs_index_alt);
uint32_t Qm = srslte_mod_bits_x_symbol(mod);
max_coderate = std::min(0.93F * Qm, max_coderate);
max_coderate = std::min(0.932F * Qm, max_coderate);
if (coderate <= max_coderate) {
// solution was found
@ -171,4 +180,206 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb,
return tb_max;
}
int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci,
uint16_t rnti,
tti_point tti_tx_dl,
uint32_t req_bytes,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
static const uint32_t Qm = 2, bc_rar_cqi = 4;
static const float max_ctrl_coderate = std::min(srslte_cqi_to_coderate(bc_rar_cqi + 1, false), 0.932F * Qm);
// Calculate I_tbs for this TBS
int tbs = static_cast<int>(req_bytes) * 8;
int mcs = -1;
for (uint32_t i = 0; i < 27; i++) {
if (srslte_ra_tbs_from_idx(i, 2) >= tbs) {
dci.type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 2);
break;
}
if (srslte_ra_tbs_from_idx(i, 3) >= tbs) {
dci.type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 3);
break;
}
}
if (mcs < 0) {
// logger.error("Can't allocate Format 1A for TBS=%d", tbs);
return -1;
}
// Generate remaining DCI Format1A content
dci.alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci.type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
prb_interval prb_range = prb_interval::rbgs_to_prbs(rbg_range, cell_params.nof_prb());
dci.type2_alloc.riv = srslte_ra_type2_to_riv(prb_range.length(), prb_range.start(), cell_params.nof_prb());
dci.pid = 0;
dci.tb[0].mcs_idx = mcs;
dci.tb[0].rv = 0; // used for SIBs
dci.format = SRSLTE_DCI_FORMAT1A;
dci.rnti = rnti;
dci.ue_cc_idx = std::numeric_limits<uint32_t>::max();
// Compute effective code rate and verify it doesn't exceed max code rate
uint32_t nof_re = cell_params.get_dl_nof_res(tti_tx_dl, dci, current_cfi);
if (srslte_coderate(tbs, nof_re) >= max_ctrl_coderate) {
return -1;
}
get_mac_logger().debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d",
srslte_ra_tbs_from_idx(mcs, 2),
srslte_ra_tbs_from_idx(mcs, 3),
req_bytes,
tbs,
mcs);
return tbs;
}
bool generate_sib_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t sib_idx,
uint32_t sib_ntx,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
bc = {};
int tbs_bits = generate_ra_bc_dci_format1a_common(
bc.dci, SRSLTE_SIRNTI, tti_tx_dl, cell_params.cfg.sibs[sib_idx].len, rbg_range, cell_params, current_cfi);
if (tbs_bits < 0) {
return false;
}
// generate SIB-specific fields
bc.index = sib_idx;
bc.type = sched_interface::dl_sched_bc_t::BCCH;
// bc.tbs = sib_len;
bc.tbs = tbs_bits / 8;
bc.dci.tb[0].rv = get_rvidx(sib_ntx);
return true;
}
bool generate_paging_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t req_bytes,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
bc = {};
int tbs_bits = generate_ra_bc_dci_format1a_common(
bc.dci, SRSLTE_PRNTI, tti_tx_dl, req_bytes, rbg_range, cell_params, current_cfi);
if (tbs_bits < 0) {
return false;
}
// generate Paging-specific fields
bc.type = sched_interface::dl_sched_bc_t::PCCH;
bc.tbs = tbs_bits / 8;
return true;
}
bool generate_rar_dci(sched_interface::dl_sched_rar_t& rar,
tti_point tti_tx_dl,
const pending_rar_t& pending_rar,
rbg_interval rbg_range,
uint32_t nof_grants,
uint32_t start_msg3_prb,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
const uint32_t msg3_Lcrb = 3;
uint32_t req_bytes = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff
rar = {};
int tbs_bits = generate_ra_bc_dci_format1a_common(
rar.dci, pending_rar.ra_rnti, tti_tx_dl, req_bytes, rbg_range, cell_params, current_cfi);
if (tbs_bits < 0) {
return false;
}
rar.msg3_grant.resize(nof_grants);
for (uint32_t i = 0; i < nof_grants; ++i) {
rar.msg3_grant[i].data = pending_rar.msg3_grant[i];
rar.msg3_grant[i].grant.tpc_pusch = 3;
rar.msg3_grant[i].grant.trunc_mcs = 0;
rar.msg3_grant[i].grant.rba = srslte_ra_type2_to_riv(msg3_Lcrb, start_msg3_prb, cell_params.nof_prb());
start_msg3_prb += msg3_Lcrb;
}
// rar.tbs = tbs_bits / 8;
rar.tbs = req_bytes;
return true;
}
void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params)
{
if (not get_mac_logger().info.enabled()) {
return;
}
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", rbg_range);
if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) {
get_mac_logger().debug("SCHED: SIB%d, cc=%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d",
bc.index + 1,
cell_params.enb_cc_idx,
rbg_range.start(),
rbg_range.stop(),
bc.dci.location.L,
bc.dci.location.ncce,
bc.dci.tb[0].rv,
cell_params.cfg.sibs[bc.index].len,
cell_params.cfg.sibs[bc.index].period_rf,
bc.dci.tb[0].mcs_idx);
} else {
get_mac_logger().info("SCHED: PCH, cc=%d, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d",
cell_params.enb_cc_idx,
srslte::to_c_str(str_buffer),
bc.dci.location.L,
bc.dci.location.ncce,
bc.tbs,
bc.dci.tb[0].mcs_idx);
}
}
void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range)
{
if (not get_mac_logger().info.enabled()) {
return;
}
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", rbg_range);
fmt::memory_buffer str_buffer2;
for (size_t i = 0; i < rar.msg3_grant.size(); ++i) {
fmt::format_to(str_buffer2,
"{}{{c-rnti=0x{:x}, rba={}, mcs={}}}",
i > 0 ? ", " : "",
rar.msg3_grant[i].data.temp_crnti,
rar.msg3_grant[i].grant.rba,
rar.msg3_grant[i].grant.trunc_mcs);
}
get_mac_logger().info("SCHED: RAR, ra-rnti=%d, rbgs=%s, dci=(%d,%d), msg3 grants=[%s]",
rar.dci.rnti,
srslte::to_c_str(str_buffer),
rar.dci.location.L,
rar.dci.location.ncce,
srslte::to_c_str(str_buffer2));
}
} // namespace srsenb

@ -25,28 +25,28 @@
namespace srsenb {
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch)
{
if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) {
return n1_pucch == ue_pucch_cfg.n_pucch_sr;
}
return false;
}
void sf_cch_allocator::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
pucch_cfg_common = cc_cfg->pucch_cfg_common;
// init alloc trees
alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols);
for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) {
alloc_trees.emplace_back(i + 1, *cc_cfg, pucch_cfg_common);
}
}
void sf_cch_allocator::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
// Reset back all CFIs
for (auto& t : alloc_trees) {
t.reset();
}
dci_record_list.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
last_dci_dfs.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1;
}
const cce_cfi_position_table*
@ -54,13 +54,11 @@ sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uin
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
default:
@ -71,253 +69,189 @@ sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uin
bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
{
// TODO: Make the alloc tree update lazy
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type, .pusch_uci = has_pusch_grant};
// Try to allocate user in PDCCH for given CFI. If it fails, increment CFI.
uint32_t first_cfi = get_cfi();
bool success;
do {
success = alloc_dci_record(record, get_cfi() - 1);
} while (not success and get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols and set_cfi(get_cfi() + 1));
if (not success) {
// DCI allocation failed. go back to original CFI
if (get_cfi() != first_cfi and not set_cfi(first_cfi)) {
logger.error("SCHED: Failed to return back to original PDCCH state");
temp_dci_dfs.clear();
uint32_t start_cfix = current_cfix;
alloc_record record;
record.user = user;
record.aggr_idx = aggr_idx;
record.alloc_type = alloc_type;
record.pusch_uci = has_pusch_grant;
if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and cc_cfg->nof_prb() == 6 and
current_max_cfix > current_cfix) {
// Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc and a low number of PRBs,
// start with an CFI that maximizes nof potential CCE locs
uint32_t nof_locs = 0, lowest_cfix = current_cfix;
for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > lowest_cfix; --cfix_tmp) {
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp);
if ((*dci_locs)[record.aggr_idx].size() > nof_locs) {
nof_locs = (*dci_locs)[record.aggr_idx].size();
current_cfix = cfix_tmp;
} else {
break;
}
}
return false;
}
// DCI record allocation successful
dci_record_list.push_back(record);
return true;
}
bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t cfix)
{
bool ret = false;
auto& tree = alloc_trees[cfix];
// Get DCI Location Table
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix);
if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) {
return ret;
}
if (tree.prev_end > 0) {
for (size_t j = tree.prev_start; j < tree.prev_end; ++j) {
ret |= tree.add_tree_node_leaves((int)j, record, *dci_locs, tti_rx);
// Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI
// positions
do {
bool success = alloc_dfs_node(record, 0);
if (success) {
// DCI record allocation successful
dci_record_list.push_back(record);
if (is_dl_ctrl_alloc(alloc_type)) {
// Dynamic CFI not yet supported for DL control allocations, as coderate can be exceeded
current_max_cfix = current_cfix;
}
return true;
}
} else {
ret = tree.add_tree_node_leaves(-1, record, *dci_locs, tti_rx);
}
if (ret) {
tree.prev_start = tree.prev_end;
tree.prev_end = tree.dci_alloc_tree.size();
}
if (temp_dci_dfs.empty()) {
temp_dci_dfs = last_dci_dfs;
}
} while (get_next_dfs());
return ret;
// Revert steps to initial state, before dci record allocation was attempted
last_dci_dfs.swap(temp_dci_dfs);
current_cfix = start_cfix;
return false;
}
bool sf_cch_allocator::set_cfi(uint32_t cfi)
bool sf_cch_allocator::get_next_dfs()
{
if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
logger.error("Invalid CFI value. Defaulting to current CFI.");
return false;
}
uint32_t new_cfix = cfi - 1;
if (new_cfix == current_cfix) {
return true;
}
// setup new PDCCH alloc tree
auto& new_tree = alloc_trees[new_cfix];
new_tree.reset();
if (not dci_record_list.empty()) {
// there are already PDCCH allocations
// Rebuild Allocation Tree
bool ret = true;
for (const auto& old_record : dci_record_list) {
ret &= alloc_dci_record(old_record, new_cfix);
do {
uint32_t start_child_idx = 0;
if (last_dci_dfs.empty()) {
// If we reach root, increase CFI
current_cfix++;
if (current_cfix > current_max_cfix) {
return false;
}
} else {
// Attempt to re-add last tree node, but with a higher node child index
start_child_idx = last_dci_dfs.back().dci_pos_idx + 1;
last_dci_dfs.pop_back();
}
if (not ret) {
// Fail to rebuild allocation tree. Go back to previous CFI
return false;
while (last_dci_dfs.size() < dci_record_list.size() and
alloc_dfs_node(dci_record_list[last_dci_dfs.size()], start_child_idx)) {
start_child_idx = 0;
}
}
} while (last_dci_dfs.size() < dci_record_list.size());
current_cfix = new_cfix;
// Finished computation of next DFS node
return true;
}
void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
alloc_trees[current_cfix].get_allocs(vec, tot_mask, idx);
}
std::string sf_cch_allocator::result_to_string(bool verbose) const
{
return alloc_trees[current_cfix].result_to_string(verbose);
}
sf_cch_allocator::alloc_tree_t::alloc_tree_t(uint32_t this_cfi,
const sched_cell_params_t& cc_params,
srslte_pucch_cfg_t& pucch_cfg_common) :
cfi(this_cfi), cc_cfg(&cc_params), pucch_cfg_temp(&pucch_cfg_common), nof_cces(cc_params.nof_cce_table[this_cfi - 1])
bool sf_cch_allocator::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx)
{
dci_alloc_tree.reserve(8);
}
void sf_cch_allocator::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch)
{
if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) {
return n1_pucch == ue_pucch_cfg.n_pucch_sr;
// Get DCI Location Table
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, current_cfix);
if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) {
return false;
}
const cce_position_list& dci_pos_list = (*dci_locs)[record.aggr_idx];
if (start_dci_idx >= dci_pos_list.size()) {
return false;
}
return false;
}
/// Algorithm to compute a valid PDCCH allocation
bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_rx_)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : SRSLTE_INVALID_RNTI;
alloc.dci_pos.L = dci_record.aggr_idx;
tree_node node;
node.dci_pos_idx = start_dci_idx;
node.dci_pos.L = record.aggr_idx;
node.rnti = record.user != nullptr ? record.user->get_rnti() : SRSLTE_INVALID_RNTI;
node.current_mask.resize(nof_cces());
// get cumulative pdcch & pucch masks
pdcch_mask_t parent_total_mask;
prbmask_t parent_pucch_mask;
if (parent_node_idx >= 0) {
parent_total_mask = dci_alloc_tree[parent_node_idx].node.total_mask;
parent_pucch_mask = dci_alloc_tree[parent_node_idx].node.total_pucch_mask;
if (not last_dci_dfs.empty()) {
node.total_mask = last_dci_dfs.back().total_mask;
node.total_pucch_mask = last_dci_dfs.back().total_pucch_mask;
} else {
parent_total_mask.resize(nof_cces);
parent_pucch_mask.resize(cc_cfg->nof_prb());
node.total_mask.resize(nof_cces());
node.total_pucch_mask.resize(cc_cfg->nof_prb());
}
for (uint32_t i = 0; i < dci_locs[dci_record.aggr_idx].size(); ++i) {
int8_t pucch_prbidx = -1;
uint32_t ncce_pos = dci_locs[dci_record.aggr_idx][i];
for (; node.dci_pos_idx < dci_pos_list.size(); ++node.dci_pos_idx) {
node.dci_pos.ncce = dci_pos_list[node.dci_pos_idx];
if (dci_record.alloc_type == alloc_type_t::DL_DATA and not dci_record.pusch_uci) {
if (record.alloc_type == alloc_type_t::DL_DATA and not record.pusch_uci) {
// The UE needs to allocate space in PUCCH for HARQ-ACK
pucch_cfg_temp->n_pucch = ncce_pos + pucch_cfg_temp->N_pucch_1;
pucch_cfg_common.n_pucch = node.dci_pos.ncce + pucch_cfg_common.N_pucch_1;
if (is_pucch_sr_collision(
dci_record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx_), pucch_cfg_temp->n_pucch)) {
if (is_pucch_sr_collision(record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx), pucch_cfg_common.n_pucch)) {
// avoid collision of HARQ-ACK with own SR n(1)_pucch
continue;
}
pucch_prbidx = srslte_pucch_n_prb(&cc_cfg->cfg.cell, pucch_cfg_temp, 0);
if (not cc_cfg->sched_cfg->pucch_mux_enabled and parent_pucch_mask.test(pucch_prbidx)) {
node.pucch_n_prb = srslte_pucch_n_prb(&cc_cfg->cfg.cell, &pucch_cfg_common, 0);
if (not cc_cfg->sched_cfg->pucch_mux_enabled and node.total_pucch_mask.test(node.pucch_n_prb)) {
// PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position
continue;
}
}
pdcch_mask_t alloc_mask(nof_cces);
alloc_mask.fill(ncce_pos, ncce_pos + (1u << dci_record.aggr_idx));
if ((parent_total_mask & alloc_mask).any()) {
node.current_mask.reset();
node.current_mask.fill(node.dci_pos.ncce, node.dci_pos.ncce + (1U << record.aggr_idx));
if ((node.total_mask & node.current_mask).any()) {
// there is a PDCCH collision. Try another CCE position
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = parent_total_mask | alloc_mask;
alloc.dci_pos.ncce = ncce_pos;
alloc.pucch_n_prb = pucch_prbidx;
alloc.total_pucch_mask = parent_pucch_mask;
if (pucch_prbidx >= 0) {
alloc.total_pucch_mask.set(pucch_prbidx);
}
// Prune if repetition of total_masks
uint32_t j = prev_end;
for (; j < dci_alloc_tree.size(); ++j) {
if (dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
// leave nested for-loop
break;
}
}
if (j < dci_alloc_tree.size()) {
continue;
node.total_mask |= node.current_mask;
if (node.pucch_n_prb >= 0) {
node.total_pucch_mask.set(node.pucch_n_prb);
}
// Register allocation
dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
last_dci_dfs.push_back(node);
return true;
}
return ret;
return false;
}
void sf_cch_allocator::alloc_tree_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
void sf_cch_allocator::rem_last_dci()
{
// if alloc tree is empty
if (prev_start == prev_end) {
if (vec != nullptr) {
vec->clear();
}
if (tot_mask != nullptr) {
tot_mask->resize(nof_cces);
tot_mask->reset();
}
return;
}
assert(not dci_record_list.empty());
// Remove DCI record
last_dci_dfs.pop_back();
dci_record_list.pop_back();
}
// set vector of allocations
void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
if (vec != nullptr) {
vec->clear();
size_t i = prev_start + idx;
while (dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&dci_alloc_tree[i].node);
i = (size_t)dci_alloc_tree[i].parent_idx;
vec->resize(last_dci_dfs.size());
for (uint32_t i = 0; i < last_dci_dfs.size(); ++i) {
(*vec)[i] = &last_dci_dfs[i];
}
vec->push_back(&dci_alloc_tree[i].node);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask != nullptr) {
*tot_mask = dci_alloc_tree[prev_start + idx].node.total_mask;
if (last_dci_dfs.empty()) {
tot_mask->resize(nof_cces());
tot_mask->reset();
} else {
*tot_mask = last_dci_dfs.back().total_mask;
}
}
}
std::string sf_cch_allocator::alloc_tree_t::result_to_string(bool verbose) const
std::string sf_cch_allocator::result_to_string(bool verbose) const
{
// get all the possible combinations of DCI pos allocations
fmt::basic_memory_buffer<char, 1024> strbuf;
fmt::format_to(strbuf,
"SCHED: PDCCH allocations cfi={}, nof_cce={}, {} possible combinations:\n",
cfi,
nof_cces,
prev_end - prev_start);
uint32_t count = 0;
for (size_t i = prev_start; i < prev_end; ++i) {
if (dci_record_list.empty()) {
fmt::format_to(strbuf, "SCHED: PDCCH allocations cfi={}, nof_cce={}, No allocations.\n", get_cfi(), nof_cces());
} else {
fmt::format_to(strbuf,
"SCHED: PDCCH allocations cfi={}, nof_cce={}, nof_allocs={}, total PDCCH mask=0x{:x}",
get_cfi(),
nof_cces(),
nof_allocs(),
last_dci_dfs.back().total_mask);
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - prev_start);
fmt::format_to(strbuf, "[{}]: total mask=0x{:x}", count, tot_mask);
get_allocs(&vec);
if (verbose) {
fmt::format_to(strbuf, ", allocations:\n");
for (const auto& dci_alloc : vec) {
@ -328,9 +262,8 @@ std::string sf_cch_allocator::alloc_tree_t::result_to_string(bool verbose) const
dci_alloc->total_mask);
}
} else {
fmt::format_to(strbuf, "\n");
fmt::format_to(strbuf, ".\n");
}
count++;
}
return fmt::to_string(strbuf);
}

@ -794,12 +794,6 @@ srslte::interval<uint32_t> sched_ue::get_requested_dl_bytes(uint32_t enb_cc_idx)
assert(cells.at(enb_cc_idx).configured());
/* Set Maximum boundary */
// Ensure there is space for ConRes and RRC Setup
// SRB0 is a special case due to being RLC TM (no segmentation possible)
if (not lch_handler.is_bearer_dl(0)) {
logger.error("SRB0 must always be activated for DL");
return {};
}
if (cells[enb_cc_idx].cc_state() != cc_st::active) {
return {};
}

@ -243,7 +243,7 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
ret = compute_min_mcs_and_tbs_from_required_bytes(
nof_prb, nof_re, cell.dl_cqi, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt);
// If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly
// If coderate > SRSLTE_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
if (ret.tbs_bytes < 0) {
ret.mcs = 0;
@ -269,7 +269,7 @@ tbs_info cqi_to_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof
ret = compute_min_mcs_and_tbs_from_required_bytes(
nof_prb, nof_re, cell.ul_cqi, cell.max_mcs_ul, req_bytes, true, ulqam64_enabled, false);
// If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly
// If coderate > SRSLTE_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
if (ret.tbs_bytes < 0) {
ret.mcs = 0;

@ -69,7 +69,12 @@ rbgmask_t find_available_rb_mask(const rbgmask_t& in_mask, uint32_t max_size)
return localmask;
}
rbgmask_t compute_user_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask)
rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& current_mask)
{
return find_contiguous_interval(current_mask, max_nof_rbgs);
}
rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask)
{
// Allocate enough RBs that accommodate pending data
rbgmask_t newtx_mask(current_mask.size());
@ -116,27 +121,26 @@ const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched)
return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), tti_sched->get_enb_cc_idx());
}
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
{
// Try to reuse the same mask
rbgmask_t retx_mask = h.get_rbgmask();
alloc_outcome_t code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code == alloc_outcome_t::SUCCESS or code == alloc_outcome_t::DCI_COLLISION) {
rbgmask_t retx_mask = h.get_rbgmask();
alloc_result code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code != alloc_result::sch_collision) {
return code;
}
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
retx_mask = compute_user_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask());
retx_mask = compute_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask());
if (retx_mask.count() == nof_rbg) {
return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
}
return alloc_outcome_t::RB_COLLISION;
return alloc_result::sch_collision;
}
alloc_outcome_t
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask)
alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask)
{
if (result_mask != nullptr) {
*result_mask = {};
@ -145,25 +149,25 @@ try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc&
// If all RBGs are occupied, the next steps can be shortcut
const rbgmask_t& current_mask = tti_sched.get_dl_mask();
if (current_mask.all()) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::no_sch_space;
}
// If there is no data to transmit, no need to allocate
rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx());
if (req_rbgs.stop() == 0) {
return alloc_outcome_t::NO_DATA;
return alloc_result::no_rnti_opportunity;
}
// Find RBG mask that accommodates pending data
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
rbgmask_t newtxmask = compute_user_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask);
rbgmask_t newtxmask = compute_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask);
if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::no_sch_space;
}
// empty RBGs were found. Attempt allocation
alloc_outcome_t ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id());
if (ret == alloc_outcome_t::SUCCESS and result_mask != nullptr) {
alloc_result ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id());
if (ret == alloc_result::success and result_mask != nullptr) {
*result_mask = newtxmask;
}
return ret;
@ -232,7 +236,7 @@ const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched)
return h->is_empty() ? h : nullptr;
}
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h)
alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h)
{
prb_interval alloc = h.get_alloc();
if (tti_sched.get_cc_cfg()->nof_prb() == 6 and h.is_msg3()) {
@ -242,20 +246,20 @@ alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_ha
// If can schedule the same mask as in earlier tx, do it
if (not tti_sched.get_ul_mask().any(alloc.start(), alloc.stop())) {
alloc_outcome_t ret = tti_sched.alloc_ul_user(&ue, alloc);
if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::DCI_COLLISION) {
alloc_result ret = tti_sched.alloc_ul_user(&ue, alloc);
if (ret != alloc_result::sch_collision) {
return ret;
}
}
// Avoid measGaps accounting for PDCCH
if (not ue.pusch_enabled(tti_sched.get_tti_rx(), tti_sched.get_enb_cc_idx(), true)) {
return alloc_outcome_t::MEASGAP_COLLISION;
return alloc_result::no_rnti_opportunity;
}
uint32_t nof_prbs = alloc.length();
alloc = find_contiguous_ul_prbs(nof_prbs, tti_sched.get_ul_mask());
if (alloc.length() != nof_prbs) {
return alloc_outcome_t::RB_COLLISION;
return alloc_result::no_sch_space;
}
return tti_sched.alloc_ul_user(&ue, alloc);
}

@ -80,25 +80,22 @@ void sched_time_pf::sched_dl_users(sched_ue_list& ue_db, sf_sched* tti_sched)
uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched)
{
alloc_outcome_t code = alloc_outcome_t::ERROR;
alloc_result code = alloc_result::other_cause;
if (ue_ctxt.dl_retx_h != nullptr) {
code = try_dl_retx_alloc(*tti_sched, ue, *ue_ctxt.dl_retx_h);
if (code == alloc_outcome_t::SUCCESS) {
if (code == alloc_result::success) {
return ue_ctxt.dl_retx_h->get_tbs(0) + ue_ctxt.dl_retx_h->get_tbs(1);
}
}
// There is space in PDCCH and an available DL HARQ
if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) {
if (code != alloc_result::no_cch_space and ue_ctxt.dl_newtx_h != nullptr) {
rbgmask_t alloc_mask;
code = try_dl_newtx_alloc_greedy(*tti_sched, ue, *ue_ctxt.dl_newtx_h, &alloc_mask);
if (code == alloc_outcome_t::SUCCESS) {
if (code == alloc_result::success) {
return ue.get_expected_dl_bitrate(cc_cfg->enb_cc_idx, alloc_mask.count()) * tti_duration_ms / 8;
}
}
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", ue.get_rnti());
}
return 0;
}
@ -131,11 +128,11 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
return ue_ctxt.ul_h->get_pending_data();
}
alloc_outcome_t code;
uint32_t estim_tbs_bytes = 0;
alloc_result code;
uint32_t estim_tbs_bytes = 0;
if (ue_ctxt.ul_h->has_pending_retx()) {
code = try_ul_retx_alloc(*tti_sched, ue, *ue_ctxt.ul_h);
estim_tbs_bytes = code == alloc_outcome_t::SUCCESS ? ue_ctxt.ul_h->get_pending_data() : 0;
estim_tbs_bytes = code == alloc_result::success ? ue_ctxt.ul_h->get_pending_data() : 0;
} else {
// Note: h->is_empty check is required, in case CA allocated a small UL grant for UCI
uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), cc_cfg->enb_cc_idx);
@ -149,13 +146,10 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
return 0;
}
code = tti_sched->alloc_ul_user(&ue, alloc);
estim_tbs_bytes = code == alloc_outcome_t::SUCCESS
estim_tbs_bytes = code == alloc_result::success
? ue.get_expected_ul_bitrate(cc_cfg->enb_cc_idx, alloc.length()) * tti_duration_ms / 8
: 0;
}
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", ue.get_rnti());
}
return estim_tbs_bytes;
}

@ -20,7 +20,6 @@
*/
#include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h"
#include <string.h>
namespace srsenb {
@ -59,10 +58,7 @@ void sched_time_rr::sched_dl_retxs(sched_ue_list& ue_db, sf_sched* tti_sched, si
if (h == nullptr) {
continue;
}
alloc_outcome_t code = try_dl_retx_alloc(*tti_sched, user, *h);
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL retx for rnti=0x%x", user.get_rnti());
}
try_dl_retx_alloc(*tti_sched, user, *h);
}
}
@ -83,7 +79,7 @@ void sched_time_rr::sched_dl_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s
if (h == nullptr) {
continue;
}
if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_outcome_t::DCI_COLLISION) {
if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_result::no_cch_space) {
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", user.get_rnti());
}
}
@ -118,9 +114,9 @@ void sched_time_rr::sched_ul_retxs(sched_ue_list& ue_db, sf_sched* tti_sched, si
if (h == nullptr) {
continue;
}
alloc_outcome_t code = try_ul_retx_alloc(*tti_sched, user, *h);
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", user.get_rnti());
alloc_result code = try_ul_retx_alloc(*tti_sched, user, *h);
if (code == alloc_result::no_cch_space) {
logger.debug("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", user.get_rnti());
}
}
}
@ -149,9 +145,10 @@ void sched_time_rr::sched_ul_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s
if (alloc.empty()) {
continue;
}
alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc);
if (ret == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x", user.get_rnti());
alloc_result ret = tti_sched->alloc_ul_user(&user, alloc);
if (ret == alloc_result::no_cch_space) {
logger.info(
"SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", user.get_rnti(), cc_cfg->enb_cc_idx);
}
}
}

@ -432,9 +432,9 @@ void ue::deallocate_pdu(uint32_t tti, uint32_t ue_cc_idx)
{
std::unique_lock<std::mutex> lock(rx_buffers_mutex);
if (not cc_buffers[ue_cc_idx].get_rx_used_buffers().try_deallocate_pdu(tti_point(tti))) {
logger.warning("UE buffers: Null RX PDU pointer in deallocate_pdu for rnti=0x%x pid=%d cc_idx=%d",
logger.warning("UE buffers: Null RX PDU pointer in deallocate_pdu for rnti=0x%x tti=%d cc_idx=%d",
rnti,
tti % nof_rx_harq_proc,
tti,
ue_cc_idx);
}
}
@ -444,7 +444,7 @@ void ue::push_pdu(uint32_t tti, uint32_t ue_cc_idx, uint32_t len)
std::unique_lock<std::mutex> lock(rx_buffers_mutex);
if (not cc_buffers[ue_cc_idx].get_rx_used_buffers().push_pdu(tti_point(tti), len)) {
logger.warning(
"UE buffers: Failed to push RX PDU for rnti=0x%x pid=%d cc_idx=%d", rnti, tti % nof_rx_harq_proc, ue_cc_idx);
"UE buffers: Failed to push RX PDU for rnti=0x%x tti=%d cc_idx=%d", rnti, tti, ue_cc_idx);
}
}

@ -308,6 +308,11 @@ void mac_controller::handle_ho_prep(const asn1::rrc::ho_prep_info_r8_ies_s& ho_p
}
}
void mac_controller::handle_max_retx()
{
set_drb_activation(false);
}
void mac_controller::set_scell_activation(const std::bitset<SRSLTE_MAX_CARRIERS>& scell_mask)
{
for (uint32_t i = 1; i < current_sched_ue_cfg.supported_cc_list.size(); ++i) {

@ -145,6 +145,8 @@ void rrc::ue::max_retx_reached()
// Give UE time to start re-establishment
set_activity_timeout(UE_REESTABLISH_TIMEOUT);
mac_ctrl.handle_max_retx();
}
}

@ -18,6 +18,7 @@
* and at http://www.gnu.org/licenses/.
*
*/
#include "srslte/upper/gtpu.h"
#include "srsenb/hdr/stack/upper/gtpu.h"
#include "srslte/common/network_utils.h"
@ -35,7 +36,9 @@
using namespace srslte;
namespace srsenb {
gtpu::gtpu(srslog::basic_logger& logger) : m1u(this), logger(logger) {}
gtpu::gtpu(srslte::task_sched_handle task_sched_, srslog::basic_logger& logger) :
m1u(this), task_sched(task_sched_), logger(logger)
{}
int gtpu::init(std::string gtp_bind_addr_,
std::string mme_addr_,
@ -183,6 +186,26 @@ uint32_t gtpu::add_bearer(uint16_t rnti, uint32_t lcid, uint32_t addr, uint32_t
after_tun.dl_enabled = false;
after_tun.prior_teid_in_present = true;
after_tun.prior_teid_in = teid_in;
// Schedule autoremoval of this indirect tunnel
uint32_t after_teidin = after_tun.teid_in;
uint32_t before_teidin = new_tun.teid_in;
new_tun.rx_timer = task_sched.get_unique_timer();
new_tun.rx_timer.set(500, [this, before_teidin, after_teidin](uint32_t tid) {
auto it = tunnels.find(after_teidin);
if (it != tunnels.end()) {
tunnel& after_tun = it->second;
if (after_tun.prior_teid_in_present) {
after_tun.prior_teid_in_present = false;
set_tunnel_status(after_tun.teid_in, true);
}
// else: indirect tunnel already removed
} else {
logger.info("Callback to automatic indirect tunnel deletion called for non-existent TEID=%d", after_teidin);
}
// This will self-destruct the callback object
rem_tunnel(before_teidin);
});
new_tun.rx_timer.run();
}
// Connect tunnels if forwarding is activated
@ -275,16 +298,11 @@ void gtpu::rem_tunnel(uint32_t teidin)
logger.warning("Removing GTPU tunnel TEID In=0x%x", teidin);
return;
}
if (it->second.fwd_teid_in_present) {
// Forward End Marker to forwarding tunnel, before deleting tunnel
end_marker(it->second.fwd_teid_in);
it->second.fwd_teid_in_present = false;
}
auto ue_it = ue_teidin_db.find(it->second.rnti);
std::vector<uint32_t>& lcid_tunnels = ue_it->second[it->second.lcid];
lcid_tunnels.erase(std::remove(lcid_tunnels.begin(), lcid_tunnels.end(), teidin), lcid_tunnels.end());
logger.debug("TEID In=%d for rnti=0x%x erased", teidin, it->second.rnti);
tunnels.erase(it);
logger.debug("TEID In=%d erased", teidin);
}
void gtpu::rem_user(uint16_t rnti)
@ -300,6 +318,39 @@ void gtpu::rem_user(uint16_t rnti)
}
}
void gtpu::handle_end_marker(tunnel& rx_tunnel)
{
uint16_t rnti = rx_tunnel.rnti;
logger.info("Received GTPU End Marker for rnti=0x%x.", rnti);
// TS 36.300, Sec 10.1.2.2.1 - Path Switch upon handover
if (rx_tunnel.fwd_teid_in_present) {
// END MARKER should be forwarded to TeNB if forwarding is activated
end_marker(rx_tunnel.fwd_teid_in);
rx_tunnel.fwd_teid_in_present = false;
rem_tunnel(rx_tunnel.teid_in);
} else {
// TeNB switches paths, and flush PDUs that have been buffered
auto rnti_it = ue_teidin_db.find(rnti);
if (rnti_it == ue_teidin_db.end()) {
logger.error("No rnti=0x%x entry for associated TEID=%d", rnti, rx_tunnel.teid_in);
return;
}
std::vector<uint32_t>& bearer_tunnels = rnti_it->second[rx_tunnel.lcid];
for (uint32_t new_teidin : bearer_tunnels) {
tunnel& new_tun = tunnels.at(new_teidin);
if (new_teidin != rx_tunnel.teid_in and new_tun.prior_teid_in_present and
new_tun.prior_teid_in == rx_tunnel.teid_in) {
rem_tunnel(new_tun.prior_teid_in);
new_tun.prior_teid_in_present = false;
set_tunnel_status(new_tun.teid_in, true);
break;
}
}
}
}
void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const sockaddr_in& addr)
{
logger.debug("Received %d bytes from S1-U interface", pdu->N_bytes);
@ -310,11 +361,20 @@ void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const soc
return;
}
if (header.teid != 0 && tunnels.count(header.teid) == 0) {
// Received G-PDU for non-existing and non-zero TEID.
// Sending GTP-U error indication
error_indication(addr.sin_addr.s_addr, addr.sin_port, header.teid);
return;
tunnel* rx_tunnel = nullptr;
if (header.teid != 0) {
auto it = tunnels.find(header.teid);
if (it == tunnels.end()) {
// Received G-PDU for non-existing and non-zero TEID.
// Sending GTP-U error indication
error_indication(addr.sin_addr.s_addr, addr.sin_port, header.teid);
}
rx_tunnel = &it->second;
if (rx_tunnel->rx_timer.is_valid()) {
// Restart Rx timer
rx_tunnel->rx_timer.run();
}
}
switch (header.message_type) {
@ -353,31 +413,11 @@ void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const soc
}
}
} break;
case GTPU_MSG_END_MARKER: {
tunnel& old_tun = tunnels.find(header.teid)->second;
uint16_t rnti = old_tun.rnti;
logger.info("Received GTPU End Marker for rnti=0x%x.", rnti);
// TS 36.300, Sec 10.1.2.2.1 - Path Switch upon handover
if (old_tun.fwd_teid_in_present) {
// END MARKER should be forwarded to TeNB if forwarding is activated
end_marker(old_tun.fwd_teid_in);
old_tun.fwd_teid_in_present = false;
} else {
// TeNB switches paths, and flush PDUs that have been buffered
std::vector<uint32_t>& bearer_tunnels = ue_teidin_db.find(old_tun.rnti)->second[old_tun.lcid];
for (uint32_t new_teidin : bearer_tunnels) {
tunnel& new_tun = tunnels.at(new_teidin);
if (new_teidin != old_tun.teid_in and new_tun.prior_teid_in_present and
new_tun.prior_teid_in == old_tun.teid_in) {
new_tun.prior_teid_in_present = false;
set_tunnel_status(new_tun.teid_in, true);
}
}
}
case GTPU_MSG_END_MARKER:
handle_end_marker(*rx_tunnel);
break;
}
default:
logger.warning("Unhandled GTPU message type=%d", header.message_type);
break;
}
}
@ -480,13 +520,22 @@ void gtpu::echo_response(in_addr_t addr, in_port_t port, uint16_t seq)
/****************************************************************************
* GTP-U END MARKER
***************************************************************************/
void gtpu::end_marker(uint32_t teidin)
bool gtpu::end_marker(uint32_t teidin)
{
logger.info("TX GTPU End Marker.");
tunnel& tunnel = tunnels.find(teidin)->second;
auto it = tunnels.find(teidin);
if (it == tunnels.end()) {
logger.error("TEID=%d not found to send the end marker to", teidin);
return false;
}
tunnel& tunnel = it->second;
gtpu_header_t header = {};
unique_byte_buffer_t pdu = make_byte_buffer();
if (pdu == nullptr) {
logger.warning("Failed to allocate buffer to send End Marker to TEID=%d", teidin);
return false;
}
// header
header.flags = GTPU_FLAGS_VERSION_V1 | GTPU_FLAGS_GTP_PROTOCOL;
@ -502,6 +551,7 @@ void gtpu::end_marker(uint32_t teidin)
servaddr.sin_port = htons(GTPU_PORT);
sendto(fd, pdu->msg, pdu->N_bytes, MSG_EOR, (struct sockaddr*)&servaddr, sizeof(struct sockaddr_in));
return true;
}
/****************************************************************************

@ -183,11 +183,7 @@ srslte::proc_outcome_t s1ap::s1_setup_proc_t::start_mme_connection()
}
if (not s1ap_ptr->connect_mme()) {
procInfo("Failed to initiate SCTP socket. Attempting reconnection in %d seconds",
s1ap_ptr->mme_connect_timer.duration() / 1000);
srslte::console("Failed to initiate SCTP socket. Attempting reconnection in %d seconds\n",
s1ap_ptr->mme_connect_timer.duration() / 1000);
s1ap_ptr->mme_connect_timer.run();
procInfo("Could not connect to MME");
return srslte::proc_outcome_t::error;
}
@ -212,7 +208,7 @@ srslte::proc_outcome_t s1ap::s1_setup_proc_t::react(const srsenb::s1ap::s1_setup
procInfo("S1Setup procedure completed successfully");
return srslte::proc_outcome_t::success;
}
procError("S1Setup failed. Exiting...");
procError("S1Setup failed.");
srslte::console("S1setup failed\n");
return srslte::proc_outcome_t::error;
}
@ -220,8 +216,15 @@ srslte::proc_outcome_t s1ap::s1_setup_proc_t::react(const srsenb::s1ap::s1_setup
void s1ap::s1_setup_proc_t::then(const srslte::proc_state_t& result) const
{
if (result.is_error()) {
procInfo("Failed to initiate S1 connection. Attempting reconnection in %d seconds",
s1ap_ptr->mme_connect_timer.duration() / 1000);
srslte::console("Failed to initiate S1 connection. Attempting reconnection in %d seconds\n",
s1ap_ptr->mme_connect_timer.duration() / 1000);
s1ap_ptr->mme_connect_timer.run();
s1ap_ptr->stack->remove_mme_socket(s1ap_ptr->s1ap_socket.get_socket());
s1ap_ptr->s1ap_socket.reset();
procInfo("S1AP socket closed.");
// Try again with in 10 seconds
}
}
@ -438,11 +441,13 @@ bool s1ap::connect_mme()
&s1ap_socket, srslte::net_utils::socket_type::seqpacket, args.s1c_bind_addr.c_str())) {
return false;
}
logger.info("SCTP socket opened. fd=%d", s1ap_socket.fd());
// Connect to the MME address
if (not s1ap_socket.connect_to(args.mme_addr.c_str(), MME_PORT, &mme_addr)) {
return false;
}
logger.info("SCTP socket connected with MME. fd=%d", s1ap_socket.fd());
// Assign a handler to rx MME packets (going to run in a different thread)
stack->add_mme_socket(s1ap_socket.fd());

@ -70,3 +70,7 @@ add_test(sched_tpc_test sched_tpc_test)
add_executable(sched_dci_test sched_dci_test.cc)
target_link_libraries(sched_dci_test srslte_common srsenb_mac srslte_mac sched_test_common)
add_test(sched_dci_test sched_dci_test)
add_executable(sched_benchmark_test sched_benchmark.cc)
target_link_libraries(sched_benchmark_test srslte_common srsenb_mac srslte_mac sched_test_common)
add_test(sched_benchmark_test sched_benchmark_test)

@ -0,0 +1,414 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "sched_test_common.h"
#include "srsenb/hdr/stack/mac/sched.h"
#include "srslte/adt/accumulators.h"
#include <chrono>
namespace srsenb {
struct run_params {
uint32_t nof_prbs;
uint32_t nof_ues;
uint32_t nof_ttis;
uint32_t cqi;
const char* sched_policy;
};
struct run_params_range {
std::vector<uint32_t> nof_prbs = {6, 15, 25, 50, 75, 100};
std::vector<uint32_t> nof_ues = {1, 2, 5};
uint32_t nof_ttis = 10000;
std::vector<uint32_t> cqi = {5, 10, 15};
std::vector<const char*> sched_policy = {"time_rr", "time_pf"};
size_t nof_runs() const { return nof_prbs.size() * nof_ues.size() * cqi.size() * sched_policy.size(); }
run_params get_params(size_t idx) const
{
run_params r = {};
r.nof_ttis = nof_ttis;
r.nof_prbs = nof_prbs[idx % nof_prbs.size()];
idx /= nof_prbs.size();
r.nof_ues = nof_ues[idx % nof_ues.size()];
idx /= nof_ues.size();
r.cqi = cqi[idx % cqi.size()];
idx /= cqi.size();
r.sched_policy = sched_policy.at(idx);
return r;
}
};
class sched_tester : public sched_sim_base
{
static std::vector<sched_interface::cell_cfg_t> get_cell_cfg(srslte::span<const sched_cell_params_t> cell_params)
{
std::vector<sched_interface::cell_cfg_t> cell_cfg_list;
for (const auto& c : cell_params) {
cell_cfg_list.push_back(c.cfg);
}
return cell_cfg_list;
}
public:
explicit sched_tester(sched* sched_obj_,
const sched_interface::sched_args_t& sched_args,
const std::vector<sched_interface::cell_cfg_t>& cell_cfg_list) :
sched_sim_base(sched_obj_, sched_args, cell_cfg_list),
sched_ptr(sched_obj_),
dl_result(cell_cfg_list.size()),
ul_result(cell_cfg_list.size())
{}
srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC");
sched* sched_ptr;
uint32_t dl_bytes_per_tti = 100000;
uint32_t ul_bytes_per_tti = 100000;
run_params current_run_params = {};
std::vector<sched_interface::dl_sched_res_t> dl_result;
std::vector<sched_interface::ul_sched_res_t> ul_result;
struct throughput_stats {
srslte::rolling_average<float> mean_dl_tbs, mean_ul_tbs, avg_dl_mcs, avg_ul_mcs;
srslte::rolling_average<float> avg_latency;
};
throughput_stats total_stats;
int advance_tti()
{
tti_point tti_rx = get_tti_rx().is_valid() ? get_tti_rx() + 1 : tti_point(0);
mac_logger.set_context(tti_rx.to_uint());
new_tti(tti_rx);
for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) {
std::chrono::time_point<std::chrono::steady_clock> tp = std::chrono::steady_clock::now();
TESTASSERT(sched_ptr->dl_sched(to_tx_dl(tti_rx).to_uint(), cc, dl_result[cc]) == SRSLTE_SUCCESS);
TESTASSERT(sched_ptr->ul_sched(to_tx_ul(tti_rx).to_uint(), cc, ul_result[cc]) == SRSLTE_SUCCESS);
std::chrono::time_point<std::chrono::steady_clock> tp2 = std::chrono::steady_clock::now();
std::chrono::nanoseconds tdur = std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp);
total_stats.avg_latency.push(tdur.count());
}
sf_output_res_t sf_out{get_cell_params(), tti_rx, ul_result, dl_result};
update(sf_out);
process_stats(sf_out);
return SRSLTE_SUCCESS;
}
void set_external_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events) override
{
// do nothing
if (ue_ctxt.conres_rx) {
sched_ptr->ul_bsr(ue_ctxt.rnti, 1, dl_bytes_per_tti);
sched_ptr->dl_rlc_buffer_state(ue_ctxt.rnti, 3, ul_bytes_per_tti, 0);
if (get_tti_rx().to_uint() % 5 == 0) {
for (auto& cc : pending_events.cc_list) {
cc.dl_cqi = current_run_params.cqi;
cc.ul_snr = 40;
}
}
}
}
void process_stats(sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) {
uint32_t dl_tbs = 0, ul_tbs = 0, dl_mcs = 0, ul_mcs = 0;
for (const auto& data : sf_out.dl_cc_result[cc].data) {
dl_tbs += data.tbs[0];
dl_tbs += data.tbs[1];
dl_mcs = std::max(dl_mcs, data.dci.tb[0].mcs_idx);
}
total_stats.mean_dl_tbs.push(dl_tbs);
if (not sf_out.dl_cc_result[cc].data.empty()) {
total_stats.avg_dl_mcs.push(dl_mcs);
}
for (const auto& pusch : sf_out.ul_cc_result[cc].pusch) {
ul_tbs += pusch.tbs;
ul_mcs = std::max(ul_mcs, pusch.dci.tb.mcs_idx);
}
total_stats.mean_ul_tbs.push(ul_tbs);
if (not sf_out.ul_cc_result[cc].pusch.empty()) {
total_stats.avg_ul_mcs.push(ul_mcs);
}
}
}
};
struct run_data {
run_params params;
float avg_dl_throughput;
float avg_ul_throughput;
float avg_dl_mcs;
float avg_ul_mcs;
std::chrono::microseconds avg_latency;
};
int run_benchmark_scenario(run_params params, std::vector<run_data>& run_results)
{
std::vector<sched_interface::cell_cfg_t> cell_list(1, generate_default_cell_cfg(params.nof_prbs));
sched_interface::ue_cfg_t ue_cfg_default = generate_default_ue_cfg();
sched_interface::sched_args_t sched_args = {};
sched_args.sched_policy = params.sched_policy;
sched sched_obj;
rrc_dummy rrc{};
sched_obj.init(&rrc, sched_args);
sched_tester tester(&sched_obj, sched_args, cell_list);
tester.total_stats = {};
tester.current_run_params = params;
for (uint32_t ue_idx = 0; ue_idx < params.nof_ues; ++ue_idx) {
uint16_t rnti = 0x46 + ue_idx;
// Add user (first need to advance to a PRACH TTI)
while (not srslte_prach_tti_opportunity_config_fdd(
tester.get_cell_params()[ue_cfg_default.supported_cc_list[0].enb_cc_idx].cfg.prach_config,
tester.get_tti_rx().to_uint(),
-1)) {
TESTASSERT(tester.advance_tti() == SRSLTE_SUCCESS);
}
TESTASSERT(tester.add_user(rnti, ue_cfg_default, 16) == SRSLTE_SUCCESS);
TESTASSERT(tester.advance_tti() == SRSLTE_SUCCESS);
}
// Ignore stats of the first TTIs until all UEs DRB1 are created
auto ue_db_ctxt = tester.get_enb_ctxt().ue_db;
while (not std::all_of(ue_db_ctxt.begin(), ue_db_ctxt.end(), [](std::pair<uint16_t, const sim_ue_ctxt_t*> p) {
return p.second->conres_rx;
})) {
tester.advance_tti();
ue_db_ctxt = tester.get_enb_ctxt().ue_db;
}
tester.total_stats = {};
// Run benchmark
for (uint32_t count = 0; count < params.nof_ttis; ++count) {
tester.advance_tti();
}
run_data run_result = {};
run_result.params = params;
run_result.avg_dl_throughput = tester.total_stats.mean_dl_tbs.value() * 8.0F / 1e-3F;
run_result.avg_ul_throughput = tester.total_stats.mean_ul_tbs.value() * 8.0F / 1e-3F;
run_result.avg_dl_mcs = tester.total_stats.avg_dl_mcs.value();
run_result.avg_ul_mcs = tester.total_stats.avg_ul_mcs.value();
run_result.avg_latency = std::chrono::microseconds(static_cast<int>(tester.total_stats.avg_latency.value() / 1000));
run_results.push_back(run_result);
return SRSLTE_SUCCESS;
}
run_data expected_run_result(run_params params)
{
assert(params.cqi == 15 && "only cqi=15 supported for now");
run_data ret{};
int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false);
int tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs);
ret.avg_dl_throughput = static_cast<float>(tbs) * 1e3F; // bps
tbs_idx = srslte_ra_tbs_idx_from_mcs(24, false, true);
uint32_t nof_pusch_prbs = params.nof_prbs - (params.nof_prbs == 6 ? 2 : 4);
tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs);
ret.avg_ul_throughput = static_cast<float>(tbs) * 1e3F; // bps
ret.avg_dl_mcs = 27;
ret.avg_ul_mcs = 22;
switch (params.nof_prbs) {
case 6:
ret.avg_dl_mcs = 25;
ret.avg_dl_throughput *= 0.68;
ret.avg_ul_throughput *= 0.75;
break;
case 15:
ret.avg_dl_throughput *= 0.95;
ret.avg_ul_throughput *= 0.7;
break;
default:
ret.avg_dl_throughput *= 0.97;
ret.avg_ul_throughput *= 0.85;
break;
}
return ret;
}
void print_benchmark_results(const std::vector<run_data>& run_results)
{
srslog::flush();
fmt::print("run | Nprb | cqi | sched pol | Nue | DL/UL [Mbps] | DL/UL mcs | DL/UL OH [%] | latency "
"[usec]\n");
fmt::print("---------------------------------------------------------------------------------------"
"------\n");
for (uint32_t i = 0; i < run_results.size(); ++i) {
const run_data& r = run_results[i];
int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false);
int tbs = srslte_ra_tbs_from_idx(tbs_idx, r.params.nof_prbs);
float dl_rate_overhead = 1.0F - r.avg_dl_throughput / (static_cast<float>(tbs) * 1e3F);
tbs_idx = srslte_ra_tbs_idx_from_mcs(24, false, true);
uint32_t nof_pusch_prbs = r.params.nof_prbs - (r.params.nof_prbs == 6 ? 2 : 4);
tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs);
float ul_rate_overhead = 1.0F - r.avg_ul_throughput / (static_cast<float>(tbs) * 1e3F);
fmt::print("{:>3d}{:>6d}{:>6d}{:>12}{:>6d}{:>9.2}/{:>4.2}{:>9.1f}/{:>4.1f}{:9.1f}/{:>4.1f}{:12d}\n",
i,
r.params.nof_prbs,
r.params.cqi,
r.params.sched_policy,
r.params.nof_ues,
r.avg_dl_throughput / 1e6,
r.avg_ul_throughput / 1e6,
r.avg_dl_mcs,
r.avg_ul_mcs,
dl_rate_overhead * 100,
ul_rate_overhead * 100,
r.avg_latency.count());
}
}
int run_rate_test()
{
fmt::print("\n====== Scheduler Rate Test ======\n\n");
run_params_range run_param_list{};
srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC");
run_param_list.nof_ues = {1};
run_param_list.cqi = {15};
std::vector<run_data> run_results;
size_t nof_runs = run_param_list.nof_runs();
for (size_t r = 0; r < nof_runs; ++r) {
run_params runparams = run_param_list.get_params(r);
mac_logger.info("\n=== New run {} ===\n", r);
TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS);
}
print_benchmark_results(run_results);
bool success = true;
for (auto& run : run_results) {
run_data expected = expected_run_result(run.params);
if (run.avg_dl_mcs < expected.avg_dl_mcs) {
fmt::print(
"Nprb={:>2d}: DL mcs below expected ({} < {})\n", run.params.nof_prbs, run.avg_dl_mcs, expected.avg_dl_mcs);
success = false;
}
if (run.avg_dl_throughput < expected.avg_dl_throughput) {
fmt::print("Nprb={:>2d}: DL rate below expected ({:.2} < {:.2}) Mbps\n",
run.params.nof_prbs,
run.avg_dl_throughput / 1e6,
expected.avg_dl_throughput / 1e6);
success = false;
}
if (run.avg_ul_mcs < expected.avg_ul_mcs) {
fmt::print(
"Nprb={:>2d}: UL mcs below expected ({} < {})\n", run.params.nof_prbs, run.avg_ul_mcs, expected.avg_ul_mcs);
success = false;
}
if (run.avg_ul_throughput < expected.avg_ul_throughput) {
fmt::print("Nprb={:>2d}: UL rate below expected ({:.2} < {:.2}) Mbps\n",
run.params.nof_prbs,
run.avg_ul_throughput / 1e6,
expected.avg_ul_throughput / 1e6);
success = false;
}
}
return success ? SRSLTE_SUCCESS : SRSLTE_ERROR;
}
int run_all()
{
run_params_range run_param_list{};
srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC");
fmt::print("Running all param combinations\n");
std::vector<run_data> run_results;
size_t nof_runs = run_param_list.nof_runs();
for (size_t r = 0; r < nof_runs; ++r) {
run_params runparams = run_param_list.get_params(r);
mac_logger.info("\n### New run {} ###\n", r);
TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS);
}
print_benchmark_results(run_results);
return SRSLTE_SUCCESS;
}
int run_benchmark()
{
run_params_range run_param_list{};
srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC");
run_param_list.nof_ttis = 1000000;
run_param_list.nof_prbs = {100};
run_param_list.cqi = {15};
run_param_list.nof_ues = {5};
run_param_list.sched_policy = {"time_pf"};
std::vector<run_data> run_results;
size_t nof_runs = run_param_list.nof_runs();
fmt::print("Running Benchmark\n");
for (size_t r = 0; r < nof_runs; ++r) {
run_params runparams = run_param_list.get_params(r);
mac_logger.info("\n### New run {} ###\n", r);
TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS);
}
print_benchmark_results(run_results);
return SRSLTE_SUCCESS;
}
} // namespace srsenb
int main(int argc, char* argv[])
{
// Setup the log spy to intercept error and warning log entries.
if (!srslog::install_custom_sink(
srslte::log_sink_spy::name(),
std::unique_ptr<srslte::log_sink_spy>(new srslte::log_sink_spy(srslog::get_default_log_formatter())))) {
return SRSLTE_ERROR;
}
auto* spy = static_cast<srslte::log_sink_spy*>(srslog::find_sink(srslte::log_sink_spy::name()));
if (spy == nullptr) {
return SRSLTE_ERROR;
}
auto& mac_log = srslog::fetch_basic_logger("MAC");
mac_log.set_level(srslog::basic_levels::warning);
auto& test_log = srslog::fetch_basic_logger("TEST", *spy, false);
test_log.set_level(srslog::basic_levels::warning);
// Start the log backend.
srslog::init();
bool run_benchmark = false;
if (argc == 1 or strcmp(argv[1], "test") == 0) {
TESTASSERT(srsenb::run_rate_test() == SRSLTE_SUCCESS);
} else if (strcmp(argv[1], "benchmark") == 0) {
TESTASSERT(srsenb::run_benchmark() == SRSLTE_SUCCESS);
} else {
TESTASSERT(srsenb::run_all() == SRSLTE_SUCCESS);
}
return 0;
}

@ -26,7 +26,7 @@
using namespace srsenb;
uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count();
uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count();
/*******************
* Logging *
@ -166,7 +166,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
}
};
generate_data(20, 1.0, P_ul_sr, randf());
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
// Event: Reconf Complete. Activate SCells. Check if CE correctly transmitted
generator.step_tti();
@ -178,7 +178,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
user->ue_sim_cfg->ue_cfg.supported_cc_list[i].active = true;
user->ue_sim_cfg->ue_cfg.supported_cc_list[i].enb_cc_idx = cc_idxs[i];
}
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
auto activ_list = tester.get_enb_ue_cc_map(rnti1);
for (uint32_t i = 0; i < cc_idxs.size(); ++i) {
TESTASSERT(activ_list[i] >= 0);
@ -186,7 +186,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
// TEST: When a DL newtx takes place, it should also encode the CE
for (uint32_t i = 0; i < 100; ++i) {
if (tester.tti_info.dl_sched_result[params.pcell_idx].nof_data_elems > 0) {
if (not tester.tti_info.dl_sched_result[params.pcell_idx].data.empty()) {
// DL data was allocated
if (tester.tti_info.dl_sched_result[params.pcell_idx].data[0].nof_pdu_elems[0] > 0) {
// it is a new DL tx
@ -196,7 +196,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
}
}
generator.step_tti();
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
}
// Event: Wait for UE to receive and ack CE. Send cqi==0, which should not activate the SCell
@ -207,12 +207,12 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
generator.step_tti();
}
}
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
// The UE should now have received the CE
// Event: Generate a bit more data, it should *not* go through SCells until we send a CQI
generate_data(5, P_dl, P_ul_sr, randf());
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[params.pcell_idx] > 0);
TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[params.pcell_idx] > 0);
for (uint32_t i = 1; i < cc_idxs.size(); ++i) {
@ -226,7 +226,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
tester.dl_cqi_info(tester.tti_rx.to_uint(), rnti1, cc_idxs[i], cqi);
}
generate_data(10, 1.0, 1.0, 1.0);
tester.test_next_ttis(generator.tti_events);
TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS);
uint64_t tot_dl_sched_data = 0;
uint64_t tot_ul_sched_data = 0;
for (const auto& c : cc_idxs) {

@ -64,7 +64,7 @@ int test_pusch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co
try_ul_fill({cell_params.cfg.cell.nof_prb - pucch_nrb, (uint32_t)cell_params.cfg.cell.nof_prb}, "PUCCH", strict);
/* TEST: check collisions in the UL PUSCH */
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) {
uint32_t L, RBstart;
srslte_ra_type2_from_riv(ul_result.pusch[i].dci.type2_alloc.riv, &L, &RBstart, nof_prb, nof_prb);
strict = ul_result.pusch[i].needs_pdcch or nof_prb != 6; // Msg3 may collide with PUCCH at PRB==6
@ -122,12 +122,12 @@ int test_pdsch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co
};
// Decode BC allocations, check collisions, and fill cumulative mask
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
for (uint32_t i = 0; i < dl_result.bc.size(); ++i) {
TESTASSERT(try_dl_mask_fill(dl_result.bc[i].dci, "BC") == SRSLTE_SUCCESS);
}
// Decode RAR allocations, check collisions, and fill cumulative mask
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_result.rar.size(); ++i) {
TESTASSERT(try_dl_mask_fill(dl_result.rar[i].dci, "RAR") == SRSLTE_SUCCESS);
}
@ -140,7 +140,7 @@ int test_pdsch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co
}
// Decode Data allocations, check collisions and fill cumulative mask
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result.data.size(); ++i) {
TESTASSERT(try_dl_mask_fill(dl_result.data[i].dci, "data") == SRSLTE_SUCCESS);
}
@ -179,8 +179,8 @@ int test_sib_scheduling(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
bool sib1_expected = ((sfn % 2) == 0) and sf_idx == 5;
using bc_elem = const sched_interface::dl_sched_bc_t;
bc_elem* bc_begin = &dl_result.bc[0];
bc_elem* bc_end = &dl_result.bc[dl_result.nof_bc_elems];
bc_elem* bc_begin = dl_result.bc.begin();
bc_elem* bc_end = dl_result.bc.end();
/* Test if SIB1 was correctly scheduled */
auto it = std::find_if(bc_begin, bc_end, [](bc_elem& elem) { return elem.index == 0; });
@ -197,6 +197,9 @@ int test_sib_scheduling(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
"Allocated BC process with TBS=%d < sib_len=%d",
bc->tbs,
cell_params.cfg.sibs[bc->index].len);
CONDERROR(bc->dci.rnti != 0xffff, "Invalid rnti=0x%x for SIB%d", bc->dci.rnti, bc->index);
CONDERROR(bc->dci.format != SRSLTE_DCI_FORMAT1A, "Invalid DCI format for SIB%d", bc->index);
uint32_t x = (bc->index - 1) * cell_params.cfg.si_window_ms;
uint32_t sf = x % 10;
uint32_t sfn_start = sfn;
@ -235,7 +238,7 @@ int test_pdcch_collisions(const sf_output_res_t& sf_out,
};
/* TEST: verify there are no dci collisions for UL, DL data, BC, RAR */
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) {
const auto& pusch = ul_result.pusch[i];
if (not pusch.needs_pdcch) {
// In case of non-adaptive retx or Msg3
@ -243,13 +246,13 @@ int test_pdcch_collisions(const sf_output_res_t& sf_out,
}
try_cce_fill(pusch.dci.location, "UL");
}
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result.data.size(); ++i) {
try_cce_fill(dl_result.data[i].dci.location, "DL data");
}
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
for (uint32_t i = 0; i < dl_result.bc.size(); ++i) {
try_cce_fill(dl_result.bc[i].dci.location, "DL BC");
}
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_result.rar.size(); ++i) {
try_cce_fill(dl_result.rar[i].dci.location, "DL RAR");
}
@ -268,7 +271,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
const auto& ul_result = sf_out.ul_cc_result[enb_cc_idx];
std::set<uint16_t> alloc_rntis;
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) {
const auto& pusch = ul_result.pusch[i];
uint16_t rnti = pusch.dci.rnti;
CONDERROR(pusch.tbs == 0, "Allocated PUSCH with invalid TBS=%d", pusch.tbs);
@ -287,7 +290,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
}
alloc_rntis.clear();
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result.data.size(); ++i) {
auto& data = dl_result.data[i];
uint16_t rnti = data.dci.rnti;
CONDERROR(data.tbs[0] == 0 and data.tbs[1] == 0, "Allocated DL data has empty TBS");
@ -311,8 +314,24 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
}
}
}
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
auto& bc = dl_result.bc[i];
// TEST: max coderate is not exceeded for RA and Broadcast
srslte_dl_sf_cfg_t dl_sf = {};
dl_sf.cfi = sf_out.dl_cc_result[enb_cc_idx].cfi;
dl_sf.tti = to_tx_dl(sf_out.tti_rx).to_uint();
auto test_ra_bc_coderate = [&dl_sf, &cell_params](uint32_t tbs, const srslte_dci_dl_t& dci) {
srslte_pdsch_grant_t grant = {};
srslte_ra_dl_grant_to_grant_prb_allocation(&dci, &grant, cell_params.cfg.cell.nof_prb);
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant);
float coderate = srslte_coderate(tbs * 8, nof_re);
const uint32_t Qm = 2;
CONDERROR(
coderate > 0.932f * Qm, "Max coderate was exceeded from %s DCI", dci.rnti == SRSLTE_SIRNTI ? "SIB" : "RAR");
return SRSLTE_SUCCESS;
};
for (uint32_t i = 0; i < dl_result.bc.size(); ++i) {
const sched_interface::dl_sched_bc_t& bc = dl_result.bc[i];
if (bc.type == sched_interface::dl_sched_bc_t::BCCH) {
CONDERROR(bc.tbs < cell_params.cfg.sibs[bc.index].len,
"Allocated BC process with TBS=%d < sib_len=%d",
@ -323,10 +342,16 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
} else {
TESTERROR("Invalid broadcast process id=%d", (int)bc.type);
}
TESTASSERT(test_ra_bc_coderate(bc.tbs, bc.dci) == SRSLTE_SUCCESS);
}
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_result.rar.size(); ++i) {
const auto& rar = dl_result.rar[i];
CONDERROR(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d", rar.tbs);
// TEST: max coderate is not exceeded
TESTASSERT(test_ra_bc_coderate(rar.tbs, rar.dci) == SRSLTE_SUCCESS);
}
return SRSLTE_SUCCESS;

@ -63,7 +63,7 @@ bool lower_coderate(tbs_info tb, uint32_t nof_re, const tbs_test_args& args)
srslte_mod_t mod =
(args.is_ul) ? srslte_ra_ul_mod_from_mcs(tb.mcs) : srslte_ra_dl_mod_from_mcs(tb.mcs, args.use_tbs_index_alt);
float Qm = std::min(args.get_max_Qm(), srslte_mod_bits_x_symbol(mod));
return coderate <= 0.930f * Qm;
return coderate <= 0.932f * Qm;
}
int test_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_test_args& args, tbs_info* result)
@ -112,7 +112,7 @@ int test_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_tes
tbs_info tb2;
for (tb2.mcs = ret.mcs + 1; tb2.mcs <= (int)args.max_mcs; ++tb2.mcs) {
int tbs_idx2 = srslte_ra_tbs_idx_from_mcs(tb2.mcs, args.use_tbs_index_alt, args.is_ul);
tb2.tbs_bytes = srslte_ra_tbs_from_idx(tbs_idx2, args.prb_grant_size) / 8u;
tb2.tbs_bytes = srslte_ra_tbs_from_idx(tbs_idx2, args.prb_grant_size) / 8U;
TESTASSERT(not lower_coderate(tb2, nof_re, args) or (args.prb_grant_size == 1 and tb2.mcs == 6));
}
@ -131,62 +131,55 @@ int test_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_tes
return SRSLTE_SUCCESS;
}
int test_mcs_lookup_specific()
int assert_mcs_tbs_result(uint32_t cell_nof_prb,
uint32_t cqi,
uint32_t prb_grant_size,
uint32_t tbs,
uint32_t mcs,
bool alt_cqi_table = false)
{
sched_cell_params_t cell_params = {};
sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(6);
sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(cell_nof_prb);
sched_interface::sched_args_t sched_args = {};
cell_params.set_cfg(0, cell_cfg, sched_args);
tbs_test_args args;
args.verbose = true;
tbs_info expected_result;
args.verbose = true;
args.cqi = cqi;
args.prb_grant_size = prb_grant_size;
args.use_tbs_index_alt = alt_cqi_table;
if (alt_cqi_table) {
args.max_mcs = std::min(args.max_mcs, 27U); // limited to 27 for 256-QAM
}
/* TEST CASE: DL, no 256-QAM */
// cqi=5,Nprb=1 -> {mcs=3, tbs_idx=3, tbs=40}
tbs_info expected_result;
TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS);
CONDERROR(expected_result != tbs_info(40 / 8, 3),
"TBS computation failure. {%d, %d}!={40, 3}",
CONDERROR(expected_result != tbs_info(tbs / 8, mcs),
"TBS computation failure. {%d, %d}!={%d, %d}",
expected_result.tbs_bytes * 8,
expected_result.mcs);
expected_result.mcs,
tbs,
mcs);
// cqi=15,Nprb=1 -> {mcs=19, tbs_idx=17, tbs=336}
args.cqi = 15;
TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS);
CONDERROR(expected_result != tbs_info(336 / 8, 19),
"TBS computation failure. {%d, %d}!={336, 19}",
expected_result.tbs_bytes * 8,
expected_result.mcs);
return SRSLTE_SUCCESS;
}
// cqi=9,Nprb=1,cell_nprb=100 -> {mcs=28, tbs_idx=17, tbs=712}
cell_params = {};
cell_cfg = generate_default_cell_cfg(100);
cell_params.set_cfg(0, cell_cfg, sched_args);
args.cqi = 9;
TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS);
CONDERROR(expected_result != tbs_info(712 / 8, 28),
"TBS computation failure. {%d, %d}!={712, 28}",
expected_result.tbs_bytes * 8,
expected_result.mcs);
int test_mcs_lookup_specific()
{
/* TEST CASE: DL, no 256-QAM */
// cqi=5,Nprb=1 -> {mcs=3, tbs_idx=3, tbs=40}
TESTASSERT(assert_mcs_tbs_result(6, 5, 1, 40, 3) == SRSLTE_SUCCESS);
TESTASSERT(assert_mcs_tbs_result(6, 5, 4, 256, 4) == SRSLTE_SUCCESS);
// cqi=10,Nprb=10,cell_nprb=100 -> {mcs=28, tbs=5736}
args.prb_grant_size = 10;
args.cqi = 10;
TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS);
CONDERROR(expected_result != tbs_info(5736 / 8, 25),
"TBS computation failure. {%d, %d}!={5736, 25}",
expected_result.tbs_bytes * 8,
expected_result.mcs);
TESTASSERT(assert_mcs_tbs_result(100, 9, 1, 712, 28) == SRSLTE_SUCCESS);
TESTASSERT(assert_mcs_tbs_result(100, 10, 10, 5736, 25) == SRSLTE_SUCCESS);
// cqi=15,Nprb=1,256-QAM -> {mcs=26,tbs_idx=32,tbs=968}
args.prb_grant_size = 1;
args.use_tbs_index_alt = true;
args.max_mcs = 27; // limited to 27 for 256-QAM
args.cqi = 15;
TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS);
CONDERROR(expected_result != tbs_info(968 / 8, 27),
"TBS computation failure. {%d, %d}!={968, 27}",
expected_result.tbs_bytes * 8,
expected_result.mcs);
// cqi=15
TESTASSERT(assert_mcs_tbs_result(6, 15, 1, 336, 19) == SRSLTE_SUCCESS); // I_tbs=17
TESTASSERT(assert_mcs_tbs_result(6, 15, 6, 2152, 19) == SRSLTE_SUCCESS); // I_tbs=17
TESTASSERT(assert_mcs_tbs_result(100, 15, 1, 712, 28) == SRSLTE_SUCCESS); // I_tbs=26
TESTASSERT(assert_mcs_tbs_result(100, 15, 2, 1480, 28) == SRSLTE_SUCCESS); // I_tbs=26
TESTASSERT(assert_mcs_tbs_result(100, 15, 10, 7480, 28) == SRSLTE_SUCCESS); // I_tbs=26
TESTASSERT(assert_mcs_tbs_result(100, 15, 1, 968, 27, true) == SRSLTE_SUCCESS);
return SRSLTE_SUCCESS;
}
@ -217,6 +210,9 @@ int test_mcs_tbs_consistency_all()
return SRSLTE_SUCCESS;
}
/**
* Note: assumes lowest bound for nof of REs
*/
int test_min_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_test_args& args, tbs_info* result)
{
uint32_t nof_re = cell_params.get_dl_lb_nof_re(args.tti_tx_dl, args.prb_grant_size);
@ -288,6 +284,14 @@ int test_min_mcs_tbs_specific()
int main()
{
auto& mac_log = srslog::fetch_basic_logger("MAC");
mac_log.set_level(srslog::basic_levels::info);
auto& test_log = srslog::fetch_basic_logger("TEST");
test_log.set_level(srslog::basic_levels::info);
// Start the log backend.
srslog::init();
TESTASSERT(srsenb::test_mcs_lookup_specific() == SRSLTE_SUCCESS);
TESTASSERT(srsenb::test_mcs_tbs_consistency_all() == SRSLTE_SUCCESS);
TESTASSERT(srsenb::test_min_mcs_tbs_specific() == SRSLTE_SUCCESS);

@ -58,7 +58,6 @@ int test_pdcch_one_ue()
sched_ue sched_ue{rnti, cell_params, ue_cfg};
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_alloc_combinations() == 0);
TESTASSERT(pdcch.nof_allocs() == 0);
uint32_t tti_counter = 0;
@ -101,7 +100,7 @@ int test_pdcch_one_ue()
TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti());
TESTASSERT(pdcch_result[0]->total_mask.size() == cell_params[ENB_CC_IDX].nof_cce_table[pdcch.get_cfi() - 1]);
TESTASSERT(pdcch_result[0]->current_mask == pdcch_result[0]->total_mask);
TESTASSERT(pdcch_result[0]->current_mask.count() == 1u << aggr_idx);
TESTASSERT(pdcch_result[0]->current_mask.count() == 1U << aggr_idx);
TESTASSERT(std::count(dci_locs.begin(), dci_locs.end(), pdcch_result[0]->dci_pos.ncce) > 0);
// allocate UL user
@ -129,7 +128,7 @@ int test_pdcch_one_ue()
TESTASSERT(pdcch_result[1]->rnti == sched_ue.get_rnti());
TESTASSERT(pdcch_result[1]->total_mask.size() == cell_params[ENB_CC_IDX].nof_cce_table[pdcch.get_cfi() - 1]);
TESTASSERT((pdcch_result[1]->current_mask & pdcch_result[0]->current_mask).none());
TESTASSERT(pdcch_result[1]->current_mask.count() == 1u << aggr_idx);
TESTASSERT(pdcch_result[1]->current_mask.count() == 1U << aggr_idx);
TESTASSERT(pdcch_result[1]->total_mask == (pdcch_result[0]->current_mask | pdcch_result[1]->current_mask));
TESTASSERT(std::count(dci_locs2.begin(), dci_locs2.end(), pdcch_result[0]->dci_pos.ncce) > 0);
@ -140,6 +139,118 @@ int test_pdcch_one_ue()
return SRSLTE_SUCCESS;
}
int test_pdcch_ue_and_sibs()
{
// Params
uint32_t nof_prb = 100;
std::vector<sched_cell_params_t> cell_params(1);
sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg();
sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(nof_prb);
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args));
sf_cch_allocator pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg};
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_allocs() == 0);
tti_point tti_rx{std::uniform_int_distribution<uint32_t>(0, 9)(get_rand_gen())};
pdcch.new_tti(tti_rx);
TESTASSERT(pdcch.nof_cces() == cell_params[0].nof_cce_table[0]);
TESTASSERT(pdcch.get_cfi() == 1); // Start at CFI=1
TESTASSERT(pdcch.nof_allocs() == 0);
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, 2));
TESTASSERT(pdcch.nof_allocs() == 1);
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_RAR, 2));
TESTASSERT(pdcch.nof_allocs() == 2);
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, 2, &sched_ue, false));
TESTASSERT(pdcch.nof_allocs() == 3);
// TEST: Ability to revert last allocation
pdcch.rem_last_dci();
TESTASSERT(pdcch.nof_allocs() == 2);
// TEST: DCI positions
uint32_t cfi = pdcch.get_cfi();
sf_cch_allocator::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
pdcch.get_allocs(&dci_result, &result_pdcch_mask);
TESTASSERT(dci_result.size() == 2);
const cce_position_list& bc_dci_locs = cell_params[0].common_locations[cfi - 1][2];
TESTASSERT(bc_dci_locs[0] == dci_result[0]->dci_pos.ncce);
const cce_position_list& rar_dci_locs = cell_params[0].rar_locations[to_tx_dl(tti_rx).sf_idx()][cfi - 1][2];
TESTASSERT(std::any_of(rar_dci_locs.begin(), rar_dci_locs.end(), [&dci_result](uint32_t val) {
return dci_result[1]->dci_pos.ncce == val;
}));
return SRSLTE_SUCCESS;
}
int test_6prbs()
{
std::vector<sched_cell_params_t> cell_params(1);
sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg();
sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(6);
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args));
sf_cch_allocator pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg};
sf_cch_allocator::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_allocs() == 0);
uint32_t opt_cfi = 3;
uint32_t bc_aggr_idx = 2, ue_aggr_idx = 1;
// TEST: The first rnti will pick a DCI position of its 3 possible ones that avoids clash with SIB. The second rnti
// wont find space
tti_point tti_rx{0};
pdcch.new_tti(tti_rx);
const cce_position_list& bc_dci_locs = cell_params[0].common_locations[opt_cfi - 1][bc_aggr_idx];
const cce_position_list& rnti_dci_locs =
(*sched_ue.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx];
const cce_position_list& rnti2_dci_locs =
(*sched_ue2.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx];
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, bc_aggr_idx));
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue, false));
TESTASSERT(not pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue2, false));
TESTASSERT(pdcch.nof_allocs() == 2);
pdcch.get_allocs(&dci_result, &result_pdcch_mask);
TESTASSERT(dci_result.size() == 2);
TESTASSERT(dci_result[0]->dci_pos.ncce == bc_dci_locs[0]);
TESTASSERT(dci_result[1]->dci_pos.ncce == rnti_dci_locs[2]);
// TEST: Two RNTIs can be allocated if one doesnt use the PUCCH
opt_cfi = 2;
tti_rx = tti_point{1};
pdcch.new_tti(tti_rx);
const cce_position_list& rnti_dci_locs3 =
(*sched_ue.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx];
const cce_position_list& rnti_dci_locs4 =
(*sched_ue2.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx];
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue, false));
TESTASSERT(not pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue2, false));
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue2, true));
TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.get_cfi() == opt_cfi);
pdcch.get_allocs(&dci_result, &result_pdcch_mask);
TESTASSERT(dci_result.size() == 2);
TESTASSERT(dci_result[0]->dci_pos.ncce == rnti_dci_locs3[0]);
TESTASSERT(dci_result[1]->dci_pos.ncce == rnti_dci_locs4[0]);
return SRSLTE_SUCCESS;
}
int main()
{
srsenb::set_randseed(seed);
@ -152,6 +263,8 @@ int main()
srslog::init();
TESTASSERT(test_pdcch_one_ue() == SRSLTE_SUCCESS);
TESTASSERT(test_pdcch_ue_and_sibs() == SRSLTE_SUCCESS);
TESTASSERT(test_6prbs() == SRSLTE_SUCCESS);
srslog::flush();

@ -54,7 +54,7 @@ int test_pdu_alloc_successful(srsenb::lch_ue_manager& lch_handler,
int test_retx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_t rlc_payload_size)
{
int start_rlc_bytes = lch_handler.get_dl_retx(lcid);
int nof_pdus = ceil(start_rlc_bytes / (float)rlc_payload_size);
int nof_pdus = ceil(static_cast<float>(start_rlc_bytes) / static_cast<float>(rlc_payload_size));
int rem_rlc_bytes = start_rlc_bytes;
sched_interface::dl_sched_pdu_t pdu;
@ -70,7 +70,7 @@ int test_retx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_
int test_newtx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_t rlc_payload_size)
{
int start_rlc_bytes = lch_handler.get_dl_tx(lcid);
int nof_pdus = ceil(start_rlc_bytes / (float)rlc_payload_size);
int nof_pdus = ceil(static_cast<float>(start_rlc_bytes) / (float)rlc_payload_size);
int rem_rlc_bytes = start_rlc_bytes;
sched_interface::dl_sched_pdu_t pdu;

@ -87,7 +87,7 @@ int ue_sim::update(const sf_output_res_t& sf_out)
void ue_sim::update_dl_harqs(const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < sf_out.cc_params.size(); ++cc) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) {
const auto& data = sf_out.dl_cc_result[cc].data[i];
if (data.dci.rnti != ctxt.rnti) {
continue;
@ -116,7 +116,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out)
uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
for (uint32_t cc = 0; cc < sf_out.cc_params.size(); ++cc) {
// Update UL harqs with PHICH info
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_phich_elems; ++i) {
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].phich.size(); ++i) {
const auto& phich = sf_out.ul_cc_result[cc].phich[i];
if (phich.rnti != ctxt.rnti) {
continue;
@ -137,7 +137,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out)
}
// Update UL harqs with PUSCH grants
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].pusch.size(); ++i) {
const auto& data = sf_out.ul_cc_result[cc].pusch[i];
if (data.dci.rnti != ctxt.rnti) {
continue;
@ -180,7 +180,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
srslte::tti_interval rar_window{ctxt.prach_tti_rx + 3, ctxt.prach_tti_rx + 3 + rar_win_size};
if (rar_window.contains(tti_tx_dl)) {
for (uint32_t i = 0; i < dl_cc_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_result.rar.size(); ++i) {
for (uint32_t j = 0; j < dl_cc_result.rar[i].msg3_grant.size(); ++j) {
const auto& data = dl_cc_result.rar[i].msg3_grant[j].data;
if (data.prach_tti == (uint32_t)ctxt.prach_tti_rx.to_uint() and data.preamble_idx == ctxt.preamble_idx) {
@ -197,7 +197,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
srslte::tti_point expected_msg3_tti_rx = ctxt.rar_tti_rx + MSG3_DELAY_MS;
if (expected_msg3_tti_rx == sf_out.tti_rx) {
// Msg3 should exist
for (uint32_t i = 0; i < ul_cc_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_cc_result.pusch.size(); ++i) {
if (ul_cc_result.pusch[i].dci.rnti == ctxt.rnti) {
ctxt.msg3_tti_rx = sf_out.tti_rx;
}
@ -207,7 +207,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
if (ctxt.msg3_tti_rx.is_valid() and not ctxt.msg4_tti_rx.is_valid()) {
// Msg3 scheduled, but Msg4 not yet scheduled
for (uint32_t i = 0; i < dl_cc_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_result.data.size(); ++i) {
if (dl_cc_result.data[i].dci.rnti == ctxt.rnti) {
for (uint32_t j = 0; j < dl_cc_result.data[i].nof_pdu_elems[0]; ++j) {
if (dl_cc_result.data[i].pdu[0][j].lcid == (uint32_t)srslte::dl_sch_lcid::CON_RES_ID) {
@ -220,10 +220,21 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
}
}
sched_sim_base::sched_sim_base(sched_interface* sched_ptr_,
const sched_interface::sched_args_t& sched_args,
const std::vector<sched_interface::cell_cfg_t>& cell_cfg_list) :
logger(srslog::fetch_basic_logger("TEST")), sched_ptr(sched_ptr_), cell_params(cell_cfg_list.size())
{
for (uint32_t cc = 0; cc < cell_params.size(); ++cc) {
cell_params[cc].set_cfg(cc, cell_cfg_list[cc], sched_args);
}
sched_ptr->cell_cfg(cell_cfg_list); // call parent cfg
}
int sched_sim_base::add_user(uint16_t rnti, const sched_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx)
{
CONDERROR(!srslte_prach_tti_opportunity_config_fdd(
(*cell_params)[ue_cfg_.supported_cc_list[0].enb_cc_idx].prach_config, current_tti_rx.to_uint(), -1),
cell_params[ue_cfg_.supported_cc_list[0].enb_cc_idx].cfg.prach_config, current_tti_rx.to_uint(), -1),
"New user added in a non-PRACH TTI");
TESTASSERT(ue_db.count(rnti) == 0);
@ -287,7 +298,7 @@ sim_enb_ctxt_t sched_sim_base::get_enb_ctxt() const
int sched_sim_base::set_default_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events)
{
pending_events.cc_list.clear();
pending_events.cc_list.resize(cell_params->size());
pending_events.cc_list.resize(cell_params.size());
pending_events.tti_rx = current_tti_rx;
for (uint32_t enb_cc_idx = 0; enb_cc_idx < pending_events.cc_list.size(); ++enb_cc_idx) {
@ -380,8 +391,8 @@ int sched_sim_base::apply_tti_events(sim_ue_ctxt_t& ue_ctxt, const ue_tti_events
sched_ptr->dl_cqi_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.dl_cqi);
}
if (cc_feedback.ul_cqi >= 0) {
sched_ptr->ul_snr_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_cqi, 0);
if (cc_feedback.ul_snr >= 0) {
sched_ptr->ul_snr_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_snr, 0);
}
}

@ -61,8 +61,8 @@ struct sim_ue_ctxt_t {
};
struct sim_enb_ctxt_t {
const std::vector<sched_interface::cell_cfg_t>* cell_params;
std::map<uint16_t, const sim_ue_ctxt_t*> ue_db;
srslte::span<const sched_cell_params_t> cell_params;
std::map<uint16_t, const sim_ue_ctxt_t*> ue_db;
};
struct ue_tti_events {
struct cc_data {
@ -74,7 +74,7 @@ struct ue_tti_events {
int ul_pid = -1;
bool ul_ack = false;
int dl_cqi = -1;
int ul_cqi = -1;
int ul_snr = -1;
};
srslte::tti_point tti_rx;
std::vector<cc_data> cc_list;
@ -108,11 +108,9 @@ private:
class sched_sim_base
{
public:
sched_sim_base(sched_interface* sched_ptr_, const std::vector<sched_interface::cell_cfg_t>& cell_params_) :
logger(srslog::fetch_basic_logger("MAC")), sched_ptr(sched_ptr_), cell_params(&cell_params_)
{
sched_ptr->cell_cfg(cell_params_); // call parent cfg
}
sched_sim_base(sched_interface* sched_ptr_,
const sched_interface::sched_args_t& sched_args,
const std::vector<sched_interface::cell_cfg_t>& cell_params_);
virtual ~sched_sim_base() = default;
int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx);
@ -142,6 +140,9 @@ public:
const ue_sim* ret = find_rnti(rnti);
return ret == nullptr ? nullptr : &ret->get_ctxt().ue_cfg;
}
sched_interface* get_sched() { return sched_ptr; }
srslte::const_span<sched_cell_params_t> get_cell_params() { return cell_params; }
tti_point get_tti_rx() const { return current_tti_rx; }
std::map<uint16_t, ue_sim>::iterator begin() { return ue_db.begin(); }
std::map<uint16_t, ue_sim>::iterator end() { return ue_db.end(); }
@ -153,9 +154,9 @@ private:
int set_default_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events);
int apply_tti_events(sim_ue_ctxt_t& ue_ctxt, const ue_tti_events& events);
srslog::basic_logger& logger;
sched_interface* sched_ptr;
const std::vector<sched_interface::cell_cfg_t>* cell_params;
srslog::basic_logger& logger;
sched_interface* sched_ptr;
std::vector<sched_cell_params_t> cell_params;
srslte::tti_point current_tti_rx;
std::map<uint16_t, ue_sim> ue_db;

@ -83,8 +83,8 @@ void sched_sim_random::set_external_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_
}
// UL CQI
if (cc_feedback.ul_cqi >= 0) {
cc_feedback.ul_cqi = std::uniform_int_distribution<uint32_t>{5, 40}(get_rand_gen());
if (cc_feedback.ul_snr >= 0) {
cc_feedback.ul_snr = std::uniform_int_distribution<uint32_t>{5, 40}(get_rand_gen());
}
}
}
@ -98,12 +98,12 @@ void sched_result_stats::process_results(tti_point
const std::vector<sched_interface::ul_sched_res_t>& ul_result)
{
for (uint32_t ccidx = 0; ccidx < dl_result.size(); ++ccidx) {
for (uint32_t i = 0; i < dl_result[ccidx].nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result[ccidx].data.size(); ++i) {
user_stats* user = get_user(dl_result[ccidx].data[i].dci.rnti);
user->tot_dl_sched_data[ccidx] += dl_result[ccidx].data[i].tbs[0];
user->tot_dl_sched_data[ccidx] += dl_result[ccidx].data[i].tbs[1];
}
for (uint32_t i = 0; i < ul_result[ccidx].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result[ccidx].pusch.size(); ++i) {
user_stats* user = get_user(ul_result[ccidx].pusch[i].dci.rnti);
user->tot_ul_sched_data[ccidx] += ul_result[ccidx].pusch[i].tbs;
}
@ -125,6 +125,10 @@ sched_result_stats::user_stats* sched_result_stats::get_user(uint16_t rnti)
* Common Sched Tester
**********************/
common_sched_tester::common_sched_tester() : logger(srslog::fetch_basic_logger("TEST")) {}
common_sched_tester::~common_sched_tester() {}
const sched::ue_cfg_t* common_sched_tester::get_current_ue_cfg(uint16_t rnti) const
{
return sched_sim->get_user_cfg(rnti);
@ -134,9 +138,9 @@ int common_sched_tester::sim_cfg(sim_sched_args args)
{
sim_args0 = std::move(args);
sched::init(nullptr, sim_args0.sched_args);
sched::init(&rrc_ptr, sim_args0.sched_args);
sched_sim.reset(new sched_sim_random{this, sim_args0.cell_cfg});
sched_sim.reset(new sched_sim_random{this, sim_args0.sched_args, sim_args0.cell_cfg});
sched_stats.reset(new sched_result_stats{sim_args0.cell_cfg});
return SRSLTE_SUCCESS;

@ -25,6 +25,7 @@
#include "sched_sim_ue.h"
#include "sched_test_utils.h"
#include "srsenb/hdr/stack/mac/sched.h"
#include "srslte/interfaces/enb_rrc_interfaces.h"
#include "srslte/srslog/srslog.h"
#include <random>
@ -38,6 +39,15 @@ void set_randseed(uint64_t seed);
float randf();
std::default_random_engine& get_rand_gen();
struct rrc_dummy : public rrc_interface_mac {
public:
int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& init_ue_cfg) { return SRSLTE_SUCCESS; }
void upd_user(uint16_t new_rnti, uint16_t old_rnti) {}
void set_activity_user(uint16_t rnti) {}
bool is_paging_opportunity(uint32_t tti, uint32_t* payload_len) { return false; }
uint8_t* read_pdu_bcch_dlsch(const uint8_t enb_cc_idx, const uint32_t sib_index) { return nullptr; }
};
/**************************
* Testers
*************************/
@ -88,8 +98,8 @@ public:
std::vector<sched_interface::ul_sched_res_t> ul_sched_result;
};
common_sched_tester() : logger(srslog::fetch_basic_logger("TEST")) {}
~common_sched_tester() override = default;
common_sched_tester();
~common_sched_tester() override;
const ue_cfg_t* get_current_ue_cfg(uint16_t rnti) const;
@ -123,6 +133,8 @@ public:
protected:
virtual void new_test_tti();
virtual void before_sched() {}
rrc_dummy rrc_ptr;
};
} // namespace srsenb

@ -148,7 +148,6 @@ int sched_tester::process_results()
{
const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_rx, CARRIER_IDX);
srsenb::sf_output_res_t sf_out{sched_cell_params, tti_rx, tti_info.ul_sched_result, tti_info.dl_sched_result};
TESTASSERT(tti_rx == cc_result->tti_rx);
// Common tests
TESTASSERT(test_pdcch_collisions(sf_out, CARRIER_IDX, &cc_result->pdcch_mask) == SRSLTE_SUCCESS);
@ -168,7 +167,7 @@ int sched_tester::process_results()
int sched_tester::test_harqs()
{
/* check consistency of DL harq procedures and allocations */
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].nof_data_elems; ++i) {
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].data.size(); ++i) {
const auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i];
uint32_t h_id = data.dci.pid;
uint16_t rnti = data.dci.rnti;
@ -181,7 +180,7 @@ int sched_tester::test_harqs()
}
/* Check PHICH allocations */
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) {
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].phich.size(); ++i) {
const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i];
const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq;
const auto* h = ue_db[phich.rnti]->get_ul_harq(srsenb::to_tx_ul(tti_rx), CARRIER_IDX);
@ -192,7 +191,7 @@ int sched_tester::test_harqs()
if (not hprev.is_empty()) {
// In case it was resumed
CONDERROR(h == nullptr or h->is_empty(), "Cannot resume empty UL harq");
for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++j) {
for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].pusch.size(); ++j) {
auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[j];
CONDERROR(pusch.dci.rnti == phich.rnti, "Cannot send PHICH::ACK for same harq that got UL grant.");
}
@ -208,7 +207,7 @@ int sched_tester::test_harqs()
int sched_tester::update_ue_stats()
{
// update ue stats with number of allocated UL PRBs
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].pusch.size(); ++i) {
const auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[i];
uint32_t L, RBstart;
srslte_ra_type2_from_riv(pusch.dci.type2_alloc.riv,
@ -224,7 +223,7 @@ int sched_tester::update_ue_stats()
// update ue stats with number of DL RB allocations
srslte::bounded_bitset<100, true> alloc_mask(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb);
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].nof_data_elems; ++i) {
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].data.size(); ++i) {
auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i];
TESTASSERT(srsenb::extract_dl_prbmask(sched_cell_params[CARRIER_IDX].cfg.cell,
tti_info.dl_sched_result[CARRIER_IDX].data[i].dci,

@ -58,9 +58,9 @@ inline srsenb::sched_interface::cell_cfg_t generate_default_cell_cfg(uint32_t no
cell_cfg.maxharq_msg3tx = 3;
cell_cfg.initial_dl_cqi = 6;
cell_cfg.target_ul_sinr = -1;
cell_cfg.nrb_cqi = 2;
cell_cfg.nrb_cqi = 1;
cell_cfg.n1pucch_an = 12;
cell_cfg.delta_pucch_shift = 2;
cell_cfg.delta_pucch_shift = 1;
cell_cfg.ncs_an = 0;
return cell_cfg;

@ -52,18 +52,16 @@ int sim_ue_ctxt_t::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const
const pusch_t* find_pusch_grant(uint16_t rnti, const sched_interface::ul_sched_res_t& ul_cc_res)
{
const pusch_t* ptr = std::find_if(&ul_cc_res.pusch[0],
&ul_cc_res.pusch[ul_cc_res.nof_dci_elems],
[rnti](const pusch_t& pusch) { return pusch.dci.rnti == rnti; });
return ptr == &ul_cc_res.pusch[ul_cc_res.nof_dci_elems] ? nullptr : ptr;
const pusch_t* ptr = std::find_if(
ul_cc_res.pusch.begin(), ul_cc_res.pusch.end(), [rnti](const pusch_t& pusch) { return pusch.dci.rnti == rnti; });
return ptr == ul_cc_res.pusch.end() ? nullptr : ptr;
}
const pdsch_t* find_pdsch_grant(uint16_t rnti, const sched_interface::dl_sched_res_t& dl_cc_res)
{
const pdsch_t* ptr = std::find_if(&dl_cc_res.data[0],
&dl_cc_res.data[dl_cc_res.nof_data_elems],
[rnti](const pdsch_t& pdsch) { return pdsch.dci.rnti == rnti; });
return ptr == &dl_cc_res.data[dl_cc_res.nof_data_elems] ? nullptr : ptr;
const pdsch_t* ptr = std::find_if(
dl_cc_res.data.begin(), dl_cc_res.data.end(), [rnti](const pdsch_t& pdsch) { return pdsch.dci.rnti == rnti; });
return ptr == dl_cc_res.data.end() ? nullptr : ptr;
}
int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
@ -74,7 +72,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
tti_point tti_rx = sf_out.tti_rx;
const sim_ue_ctxt_t& ue_ctxt = *enb_ctxt.ue_db.at(pdsch.dci.rnti);
const sched_interface::ue_cfg_t::cc_cfg_t* cc_cfg = ue_ctxt.get_cc_cfg(enb_cc_idx);
const sched_interface::cell_cfg_t& cell_params = (*enb_ctxt.cell_params)[enb_cc_idx];
const sched_cell_params_t& cell_params = enb_ctxt.cell_params[enb_cc_idx];
bool has_pusch_grant = find_pusch_grant(pdsch.dci.rnti, sf_out.ul_cc_result[enb_cc_idx]) != nullptr;
// TEST: Check if CC is configured and active
@ -87,7 +85,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
uint32_t nof_retx = get_nof_retx(pdsch.dci.tb[0].rv); // 0..3
if (h.nof_txs == 0 or h.ndi != pdsch.dci.tb[0].ndi) {
// It is newtx
CONDERROR(nof_retx != 0, "Invalid rv index for new tx");
CONDERROR(nof_retx != 0, "Invalid rv index for new DL tx");
CONDERROR(h.active, "DL newtx for already active DL harq pid=%d", h.pid);
} else {
// it is retx
@ -109,19 +107,19 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
srslte_dl_sf_cfg_t dl_sf = {};
dl_sf.cfi = sf_out.dl_cc_result[enb_cc_idx].cfi;
dl_sf.tti = to_tx_dl(tti_rx).to_uint();
srslte_ra_dl_grant_to_grant_prb_allocation(&pdsch.dci, &grant, cell_params.cell.nof_prb);
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cell, &dl_sf, &grant);
srslte_ra_dl_grant_to_grant_prb_allocation(&pdsch.dci, &grant, cell_params.nof_prb());
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant);
float coderate = srslte_coderate(pdsch.tbs[0] * 8, nof_re);
srslte_mod_t mod = srslte_ra_dl_mod_from_mcs(pdsch.dci.tb[0].mcs_idx, ue_ctxt.ue_cfg.use_tbs_index_alt);
uint32_t max_Qm = ue_ctxt.ue_cfg.use_tbs_index_alt ? 8 : 6;
uint32_t Qm = std::min(max_Qm, srslte_mod_bits_x_symbol(mod));
CONDERROR(coderate > 0.930f * Qm, "Max coderate was exceeded");
CONDERROR(coderate > 0.932f * Qm, "Max coderate was exceeded");
}
// TEST: PUCCH-ACK will not collide with SR
CONDERROR(not has_pusch_grant and is_pucch_sr_collision(ue_ctxt.ue_cfg.pucch_cfg,
to_tx_dl_ack(sf_out.tti_rx),
pdsch.dci.location.ncce + cell_params.n1pucch_an),
pdsch.dci.location.ncce + cell_params.cfg.n1pucch_an),
"Collision detected between UE PUCCH-ACK and SR");
return SRSLTE_SUCCESS;
@ -129,8 +127,8 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
int test_dl_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) {
const sched_interface::dl_sched_data_t& data = sf_out.dl_cc_result[cc].data[i];
CONDERROR(
enb_ctxt.ue_db.count(data.dci.rnti) == 0, "Allocated DL grant for non-existent rnti=0x%x", data.dci.rnti);
@ -144,11 +142,11 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
{
uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
for (size_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) {
const auto* phich_begin = &sf_out.ul_cc_result[cc].phich[0];
const auto* phich_end = &sf_out.ul_cc_result[cc].phich[sf_out.ul_cc_result[cc].nof_phich_elems];
const auto* pusch_begin = &sf_out.ul_cc_result[cc].pusch[0];
const auto* pusch_end = &sf_out.ul_cc_result[cc].pusch[sf_out.ul_cc_result[cc].nof_dci_elems];
for (size_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) {
const auto* phich_begin = sf_out.ul_cc_result[cc].phich.begin();
const auto* phich_end = sf_out.ul_cc_result[cc].phich.end();
const auto* pusch_begin = sf_out.ul_cc_result[cc].pusch.begin();
const auto* pusch_end = sf_out.ul_cc_result[cc].pusch.end();
// TEST: rnti must exist for all PHICH
CONDERROR(std::any_of(phich_begin,
@ -205,7 +203,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
if (h.nof_txs == 0 or h.ndi != pusch_ptr->dci.tb.ndi) {
// newtx
CONDERROR(nof_retx != 0, "Invalid rv index for new tx");
CONDERROR(nof_retx != 0, "Invalid rv index for new UL tx");
CONDERROR(pusch_ptr->current_tx_nb != 0, "UL HARQ retxs need to have been previously transmitted");
CONDERROR(not h_inactive, "New tx for already active UL HARQ");
CONDERROR(not pusch_ptr->needs_pdcch and ue.msg3_tti_rx.is_valid() and sf_out.tti_rx > ue.msg3_tti_rx,
@ -237,7 +235,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) {
for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) {
const auto& dl_cc_res = sf_out.dl_cc_result[cc];
const auto& ul_cc_res = sf_out.ul_cc_result[cc];
for (const auto& ue_pair : enb_ctxt.ue_db) {
@ -251,7 +249,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
}
// TEST: RAR allocation
uint32_t rar_win_size = (*enb_ctxt.cell_params)[cc].prach_rar_window;
uint32_t rar_win_size = enb_ctxt.cell_params[cc].cfg.prach_rar_window;
srslte::tti_interval rar_window{ue.prach_tti_rx + 3, ue.prach_tti_rx + 3 + rar_win_size};
srslte::tti_point tti_tx_dl = to_tx_dl(sf_out.tti_rx);
@ -259,14 +257,14 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
CONDERROR(not ue.rar_tti_rx.is_valid() and tti_tx_dl > rar_window.stop(),
"rnti=0x%x RAR not scheduled within the RAR Window",
rnti);
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_rar_elems; ++i) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].rar.size(); ++i) {
CONDERROR(sf_out.dl_cc_result[cc].rar[i].dci.rnti == rnti,
"No RAR allocations allowed outside of user RAR window");
}
} else {
// Inside RAR window
uint32_t nof_rars = ue.rar_tti_rx.is_valid() ? 1 : 0;
for (uint32_t i = 0; i < dl_cc_res.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.rar.size(); ++i) {
for (const auto& grant : dl_cc_res.rar[i].msg3_grant) {
const auto& data = grant.data;
if (data.prach_tti == (uint32_t)ue.prach_tti_rx.to_uint() and data.preamble_idx == ue.preamble_idx) {
@ -287,7 +285,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
if (expected_msg3_tti_rx == sf_out.tti_rx) {
// Msg3 should exist
uint32_t msg3_count = 0;
for (uint32_t i = 0; i < ul_cc_res.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_cc_res.pusch.size(); ++i) {
if (ul_cc_res.pusch[i].dci.rnti == rnti) {
msg3_count++;
CONDERROR(ul_cc_res.pusch[i].needs_pdcch, "Msg3 allocations do not require PDCCH");
@ -304,7 +302,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
if (ue.msg3_tti_rx.is_valid() and not ue.msg4_tti_rx.is_valid()) {
// Msg3 scheduled, but Msg4 not yet scheduled
uint32_t msg4_count = 0;
for (uint32_t i = 0; i < dl_cc_res.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.data.size(); ++i) {
if (dl_cc_res.data[i].dci.rnti == rnti) {
CONDERROR(to_tx_dl(sf_out.tti_rx) < to_tx_ul(ue.msg3_tti_rx),
"Msg4 cannot be scheduled without Msg3 being tx");
@ -325,7 +323,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
if (not ue.msg4_tti_rx.is_valid()) {
// TEST: No UL allocs except for Msg3 before Msg4
for (uint32_t i = 0; i < ul_cc_res.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_cc_res.pusch.size(); ++i) {
if (ul_cc_res.pusch[i].dci.rnti == rnti) {
CONDERROR(not ue.rar_tti_rx.is_valid(), "No UL allocs before RAR allowed");
srslte::tti_point expected_msg3_tti = ue.rar_tti_rx + MSG3_DELAY_MS;
@ -340,7 +338,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
// TEST: No DL allocs before Msg3
if (not ue.msg3_tti_rx.is_valid()) {
for (uint32_t i = 0; i < dl_cc_res.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.data.size(); ++i) {
CONDERROR(dl_cc_res.data[i].dci.rnti == rnti, "No DL data allocs allowed before Msg3 is scheduled");
}
}
@ -348,7 +346,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
}
// TEST: Ensure there are no spurious RARs that do not belong to any user
for (uint32_t i = 0; i < dl_cc_res.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.rar.size(); ++i) {
for (uint32_t j = 0; j < dl_cc_res.rar[i].msg3_grant.size(); ++j) {
uint32_t prach_tti = dl_cc_res.rar[i].msg3_grant[j].data.prach_tti;
uint32_t preamble_idx = dl_cc_res.rar[i].msg3_grant[j].data.preamble_idx;
@ -374,7 +372,7 @@ bool is_in_measgap(srslte::tti_point tti, uint32_t period, uint32_t offset)
int test_meas_gaps(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) {
for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) {
const auto& dl_cc_res = sf_out.dl_cc_result[cc];
const auto& ul_cc_res = sf_out.ul_cc_result[cc];
for (const auto& ue_pair : enb_ctxt.ue_db) {

@ -157,9 +157,10 @@ int test_gtpu_direct_tunneling()
logger1.set_hex_dump_max_size(2048);
srslog::basic_logger& logger2 = srslog::fetch_basic_logger("GTPU2");
logger2.set_hex_dump_max_size(2048);
srsenb::gtpu senb_gtpu(logger1), tenb_gtpu(logger2);
stack_tester senb_stack, tenb_stack;
pdcp_tester senb_pdcp, tenb_pdcp;
srslte::task_scheduler task_sched;
srsenb::gtpu senb_gtpu(&task_sched, logger1), tenb_gtpu(&task_sched, logger2);
stack_tester senb_stack, tenb_stack;
pdcp_tester senb_pdcp, tenb_pdcp;
senb_gtpu.init(senb_addr_str, sgw_addr_str, "", "", &senb_pdcp, &senb_stack, false);
tenb_gtpu.init(tenb_addr_str, sgw_addr_str, "", "", &tenb_pdcp, &tenb_stack, false);

@ -47,6 +47,8 @@ public:
void stop(){};
private:
static const bool FORCE_NEIGHBOUR_CELL = false; // Set to true for printing always neighbour cells
std::string float_to_string(float f, int digits);
std::string float_to_eng_string(float f, int digits);
void print_table(const bool display_neighbours);

@ -149,8 +149,8 @@ public:
int sr_last_tx_tti() final;
// Time advance commands
void set_timeadv_rar(uint32_t ta_cmd) final;
void set_timeadv(uint32_t ta_cmd) final;
void set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) final;
void set_timeadv(uint32_t tti, uint32_t ta_cmd) final;
/* Activate / Disactivate SCell*/
void deactivate_scells() final;

@ -33,6 +33,8 @@ struct info_metrics_t {
struct sync_metrics_t {
float ta_us;
float distance_km;
float speed_kmph;
float cfo;
float sfo;
};

@ -31,11 +31,34 @@ namespace srsue {
class ta_control
{
private:
static const size_t MAX_NOF_SPEED_VALUES = 50; ///< Maximum number of data to store for speed calculation
static const size_t MIN_NOF_SPEED_VALUES = 1; ///< Minimum number of data for calculating the speed
static const size_t MAX_AGE_SPEED_VALUES = 10000; ///< Maximum age of speed data in milliseconds. Discards older data.
srslog::basic_logger& logger;
mutable std::mutex mutex;
uint32_t next_base_nta = 0;
float next_base_sec = 0.0f;
// Vector containing data for calculating speed. The first value is the time increment from TTI and the second value
// is the distance increment from the TA command
struct speed_data_t {
uint32_t tti;
float delta_t;
float delta_d;
};
std::array<speed_data_t, MAX_NOF_SPEED_VALUES> speed_data = {};
int32_t last_tti = -1; // Last TTI writen, -1 if none
uint32_t write_idx = 0;
uint32_t read_idx = 0;
void reset_speed_data()
{
write_idx = 0;
read_idx = 0;
last_tti = -1;
}
public:
ta_control(srslog::basic_logger& logger) : logger(logger) {}
@ -54,6 +77,9 @@ public:
// Update base in nta
next_base_nta = static_cast<uint32_t>(roundf(next_base_sec / SRSLTE_LTE_TS));
// Reset speed data
reset_speed_data();
logger.info("PHY: Set TA base: n_ta: %d, ta_usec: %.1f", next_base_nta, next_base_sec * 1e6f);
}
@ -83,7 +109,7 @@ public:
*
* @param ta_cmd Time Alignment command
*/
void add_ta_cmd_rar(uint32_t ta_cmd)
void add_ta_cmd_rar(uint32_t tti, uint32_t ta_cmd)
{
std::lock_guard<std::mutex> lock(mutex);
@ -93,6 +119,10 @@ public:
// Update base in seconds
next_base_sec = static_cast<float>(next_base_nta) * SRSLTE_LTE_TS;
// Reset speed data
reset_speed_data();
last_tti = tti;
logger.info("PHY: Set TA RAR: ta_cmd: %d, n_ta: %d, ta_usec: %.1f", ta_cmd, next_base_nta, next_base_sec * 1e6f);
}
@ -101,9 +131,10 @@ public:
*
* @param ta_cmd Time Alignment command
*/
void add_ta_cmd_new(uint32_t ta_cmd)
void add_ta_cmd_new(uint32_t tti, uint32_t ta_cmd)
{
std::lock_guard<std::mutex> lock(mutex);
float prev_base_sec = next_base_sec;
// Update base nta
next_base_nta = srslte_N_ta_new(next_base_nta, ta_cmd);
@ -112,6 +143,26 @@ public:
next_base_sec = static_cast<float>(next_base_nta) * SRSLTE_LTE_TS;
logger.info("PHY: Set TA: ta_cmd: %d, n_ta: %d, ta_usec: %.1f", ta_cmd, next_base_nta, next_base_sec * 1e6f);
// Calculate speed data
if (last_tti > 0) {
float delta_t = TTI_SUB(tti, last_tti) * 1e-3f; // Calculate the elapsed time since last time command
float delta_d = (next_base_sec - prev_base_sec) * 3e8f / 2.0f; // Calculate distance difference in metres
// Write new data
speed_data[write_idx].tti = tti;
speed_data[write_idx].delta_t = delta_t;
speed_data[write_idx].delta_d = delta_d;
// Advance write index
write_idx = (write_idx + 1) % MAX_NOF_SPEED_VALUES;
// Advance read index if overlaps with write
if (write_idx == read_idx) {
read_idx = (read_idx + 1) % MAX_NOF_SPEED_VALUES;
}
}
last_tti = tti; // Update last TTI
}
/**
@ -150,7 +201,48 @@ public:
std::lock_guard<std::mutex> lock(mutex);
// Returns the current base, one direction distance
return next_base_sec * (3.6f * 3e8f / 2.0f);
return next_base_sec * (3e8f / 2e3f);
}
/**
* Calculates approximated speed in km/h from the TA commands
*
* @return Distance based on the current time base if enough data has been gathered
*/
float get_speed_kmph(uint32_t tti)
{
std::lock_guard<std::mutex> lock(mutex);
// Advance read pointer for old TTI
while (read_idx != write_idx and TTI_SUB(tti, speed_data[read_idx].tti) > MAX_AGE_SPEED_VALUES) {
read_idx = (read_idx + 1) % MAX_NOF_SPEED_VALUES;
// If there us no data, make last_tti invalid to prevent invalid TTI difference
if (read_idx == write_idx) {
last_tti = -1;
}
}
// Early return if there is not enough data to calculate speed
uint32_t nof_values = ((write_idx + MAX_NOF_SPEED_VALUES) - read_idx) % MAX_NOF_SPEED_VALUES;
if (nof_values < MIN_NOF_SPEED_VALUES) {
return 0.0f;
}
// Compute speed from gathered data
float sum_t = 0.0f;
float sum_d = 0.0f;
for (uint32_t i = read_idx; i != write_idx; i = (i + 1) % MAX_NOF_SPEED_VALUES) {
sum_t += speed_data[i].delta_t;
sum_d += speed_data[i].delta_d;
}
if (!std::isnormal(sum_t)) {
return 0.0f; // Avoid zero division
}
float speed_mps = sum_d / sum_t; // Speed in m/s
// Returns the speed in km/h
return speed_mps * 3.6f;
}
};

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save