From be46806495a3e7ad45a93833c9c026d83daced33 Mon Sep 17 00:00:00 2001 From: David Rupprecht Date: Wed, 10 Mar 2021 09:51:21 +0100 Subject: [PATCH 01/64] PCAP: Enable carrier ID TAG --- lib/src/common/pcap.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/src/common/pcap.c b/lib/src/common/pcap.c index e48319b73..0db6d10d5 100644 --- a/lib/src/common/pcap.c +++ b/lib/src/common/pcap.c @@ -91,6 +91,10 @@ inline int LTE_PCAP_PACK_MAC_CONTEXT_TO_BUFFER(MAC_Context_Info_t* context, uint buffer[offset++] = MAC_LTE_CRC_STATUS_TAG; buffer[offset++] = context->crcStatusOK; + /* CARRIER ID */ + buffer[offset++] = MAC_LTE_CARRIER_ID_TAG; + buffer[offset++] = context->cc_idx; + /* NB-IoT mode tag */ buffer[offset++] = MAC_LTE_NB_MODE_TAG; buffer[offset++] = context->nbiotMode; From 80104c7e65c12e12c0227c2f4f9516151493ecae Mon Sep 17 00:00:00 2001 From: Francisco Date: Sun, 14 Mar 2021 12:08:02 +0000 Subject: [PATCH 02/64] adt, bugfix - fix assert when accessing bounded_vector::data() member --- lib/include/srslte/adt/bounded_vector.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/include/srslte/adt/bounded_vector.h b/lib/include/srslte/adt/bounded_vector.h index 0a0997d12..eba3015a5 100644 --- a/lib/include/srslte/adt/bounded_vector.h +++ b/lib/include/srslte/adt/bounded_vector.h @@ -107,8 +107,8 @@ public: } T& front() { return (*this)[0]; } const T& front() const { return (*this)[0]; } - T* data() { return &front(); } - const T* data() const { return &front(); } + T* data() { return reinterpret_cast(&buffer[0]); } + const T* data() const { return reinterpret_cast(&buffer[0]); } // Iterators iterator begin() { return data(); } From 2e1882725976a2b8fb0c606cad1381a293f2a4bd Mon Sep 17 00:00:00 2001 From: Francisco Date: Sun, 14 Mar 2021 13:17:14 +0000 Subject: [PATCH 03/64] avoid accessing C-array via &[0] for initialized buffer --- lib/include/srslte/adt/bounded_vector.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/include/srslte/adt/bounded_vector.h b/lib/include/srslte/adt/bounded_vector.h index eba3015a5..d940347c7 100644 --- a/lib/include/srslte/adt/bounded_vector.h +++ b/lib/include/srslte/adt/bounded_vector.h @@ -107,13 +107,13 @@ public: } T& front() { return (*this)[0]; } const T& front() const { return (*this)[0]; } - T* data() { return reinterpret_cast(&buffer[0]); } - const T* data() const { return reinterpret_cast(&buffer[0]); } + T* data() { return &front(); } + const T* data() const { return &front(); } // Iterators - iterator begin() { return data(); } + iterator begin() { return reinterpret_cast(buffer); } iterator end() { return begin() + size_; } - const_iterator begin() const { return data(); } + const_iterator begin() const { return reinterpret_cast(buffer); } const_iterator end() const { return begin() + size_; } // Capacity From 9601770196bc46ad1e2d10a634958f08520a3180 Mon Sep 17 00:00:00 2001 From: Francisco Date: Sat, 6 Mar 2021 18:09:19 +0000 Subject: [PATCH 04/64] rrc,bugfix - stop scheduling when RLC AM max_retx is achieved --- srsenb/hdr/stack/rrc/mac_controller.h | 2 ++ srsenb/src/stack/rrc/mac_controller.cc | 8 ++++++++ srsenb/src/stack/rrc/rrc_ue.cc | 2 ++ 3 files changed, 12 insertions(+) diff --git a/srsenb/hdr/stack/rrc/mac_controller.h b/srsenb/hdr/stack/rrc/mac_controller.h index 3a97de60a..e226e0002 100644 --- a/srsenb/hdr/stack/rrc/mac_controller.h +++ b/srsenb/hdr/stack/rrc/mac_controller.h @@ -55,6 +55,8 @@ public: const srslte::rrc_ue_capabilities_t& uecaps); void handle_ho_prep(const asn1::rrc::ho_prep_info_r8_ies_s& ho_prep); + void handle_max_retx(); + const ue_cfg_t& get_ue_sched_cfg() const { return current_sched_ue_cfg; } bool is_crnti_set() const { return crnti_set; } diff --git a/srsenb/src/stack/rrc/mac_controller.cc b/srsenb/src/stack/rrc/mac_controller.cc index b88fc8f12..8924377db 100644 --- a/srsenb/src/stack/rrc/mac_controller.cc +++ b/srsenb/src/stack/rrc/mac_controller.cc @@ -299,6 +299,14 @@ void mac_controller::handle_ho_prep(const asn1::rrc::ho_prep_info_r8_ies_s& ho_p } } +void mac_controller::handle_max_retx() +{ + for (auto& ue_bearer : current_sched_ue_cfg.ue_bearers) { + ue_bearer.direction = sched_interface::ue_bearer_cfg_t::IDLE; + } + update_mac(config_tx); +} + void mac_controller::set_scell_activation(const std::bitset& scell_mask) { for (uint32_t i = 1; i < current_sched_ue_cfg.supported_cc_list.size(); ++i) { diff --git a/srsenb/src/stack/rrc/rrc_ue.cc b/srsenb/src/stack/rrc/rrc_ue.cc index ff365e859..02a9f147c 100644 --- a/srsenb/src/stack/rrc/rrc_ue.cc +++ b/srsenb/src/stack/rrc/rrc_ue.cc @@ -136,6 +136,8 @@ void rrc::ue::max_retx_reached() // Give UE time to start re-establishment set_activity_timeout(UE_REESTABLISH_TIMEOUT); + + mac_ctrl.handle_max_retx(); } } From e43f555fc0ce0a651d63dc38ac93aeda9b25f7f6 Mon Sep 17 00:00:00 2001 From: faluco Date: Mon, 15 Mar 2021 18:33:39 +0100 Subject: [PATCH 05/64] - Provide a fast path for logging pre-formatted strings that are small and fit in 64 bytes without any allocations. - Use this new feature in the tracer. --- .../srslte/srslog/detail/log_entry_metadata.h | 16 +++++---- .../srslte/srslog/detail/support/work_queue.h | 2 +- lib/include/srslte/srslog/log_channel.h | 36 +++++++++++++++++-- lib/src/srslog/backend_worker.cpp | 8 +++-- lib/src/srslog/event_trace.cpp | 13 +++---- lib/test/srslog/json_formatter_test.cpp | 2 +- lib/test/srslog/log_backend_test.cpp | 27 +++++++------- lib/test/srslog/log_channel_test.cpp | 26 ++++++++++++++ lib/test/srslog/text_formatter_test.cpp | 2 +- 9 files changed, 98 insertions(+), 34 deletions(-) diff --git a/lib/include/srslte/srslog/detail/log_entry_metadata.h b/lib/include/srslte/srslog/detail/log_entry_metadata.h index 57f306a6d..e69850c52 100644 --- a/lib/include/srslte/srslog/detail/log_entry_metadata.h +++ b/lib/include/srslte/srslog/detail/log_entry_metadata.h @@ -18,6 +18,9 @@ namespace srslog { +/// This type is used to store small strings without doing any memory allocation. +using small_str_buffer = fmt::basic_memory_buffer; + namespace detail { /// This structure gives the user a way to log generic information as a context. @@ -31,13 +34,14 @@ struct log_context { /// Metadata fields carried for each log entry. struct log_entry_metadata { - std::chrono::high_resolution_clock::time_point tp; - log_context context; - const char* fmtstring; + std::chrono::high_resolution_clock::time_point tp; + log_context context; + const char* fmtstring; fmt::dynamic_format_arg_store store; - std::string log_name; - char log_tag; - std::vector hex_dump; + std::string log_name; + char log_tag; + small_str_buffer small_str; + std::vector hex_dump; }; } // namespace detail diff --git a/lib/include/srslte/srslog/detail/support/work_queue.h b/lib/include/srslte/srslog/detail/support/work_queue.h index 4f1a57d0a..29d0dd888 100644 --- a/lib/include/srslte/srslog/detail/support/work_queue.h +++ b/lib/include/srslte/srslog/detail/support/work_queue.h @@ -111,7 +111,7 @@ public: // Did we wake up on timeout? if (timedout && queue.empty()) { cond_var.unlock(); - return {false, T{}}; + return {false, T()}; } // Here we have been woken up normally. diff --git a/lib/include/srslte/srslog/log_channel.h b/lib/include/srslte/srslog/log_channel.h index 8a5cd9678..75b67f330 100644 --- a/lib/include/srslte/srslog/log_channel.h +++ b/lib/include/srslte/srslog/log_channel.h @@ -113,7 +113,34 @@ public: fmtstr, std::move(store), log_name, - log_tag}}; + log_tag, + small_str_buffer()}}; + backend.push(std::move(entry)); + } + + /// Builds the provided log entry and passes it to the backend. When the + /// channel is disabled the log entry will be discarded. + void operator()(small_str_buffer &&str) + { + if (!enabled()) { + return; + } + + // Send the log entry to the backend. + log_formatter& formatter = log_sink.get_formatter(); + detail::log_entry entry = { + &log_sink, + [&formatter](detail::log_entry_metadata&& metadata, + fmt::memory_buffer& buffer) { + formatter.format(std::move(metadata), buffer); + }, + {std::chrono::high_resolution_clock::now(), + {ctx_value, should_print_context}, + nullptr, + {}, + log_name, + log_tag, + std::move(str)}}; backend.push(std::move(entry)); } @@ -152,6 +179,7 @@ public: std::move(store), log_name, log_tag, + small_str_buffer(), std::vector(buffer, buffer + len)}}; backend.push(std::move(entry)); } @@ -178,7 +206,8 @@ public: nullptr, {}, log_name, - log_tag}}; + log_tag, + small_str_buffer()}}; backend.push(std::move(entry)); } @@ -209,7 +238,8 @@ public: fmtstr, std::move(store), log_name, - log_tag}}; + log_tag, + small_str_buffer()}}; backend.push(std::move(entry)); } diff --git a/lib/src/srslog/backend_worker.cpp b/lib/src/srslog/backend_worker.cpp index 26b63460e..1ca65af51 100644 --- a/lib/src/srslog/backend_worker.cpp +++ b/lib/src/srslog/backend_worker.cpp @@ -90,6 +90,11 @@ void backend_worker::process_log_entry(detail::log_entry&& entry) assert(entry.format_func && "Invalid format function"); fmt_buffer.clear(); + + // Already formatted strings in the foreground are passed to the formatter as the fmtstring. + if (entry.metadata.small_str.size()) { + entry.metadata.fmtstring = entry.metadata.small_str.data(); + } entry.format_func(std::move(entry.metadata), fmt_buffer); if (auto err_str = entry.s->write({fmt_buffer.data(), fmt_buffer.size()})) { @@ -99,8 +104,7 @@ void backend_worker::process_log_entry(detail::log_entry&& entry) void backend_worker::process_outstanding_entries() { - assert(!running_flag && - "Cannot process outstanding entries while thread is running"); + assert(!running_flag && "Cannot process outstanding entries while thread is running"); while (true) { auto item = queue.timed_pop(1); diff --git a/lib/src/srslog/event_trace.cpp b/lib/src/srslog/event_trace.cpp index d3bc13328..196f4373b 100644 --- a/lib/src/srslog/event_trace.cpp +++ b/lib/src/srslog/event_trace.cpp @@ -124,17 +124,18 @@ void trace_duration_end(const std::string& category, const std::string& name) /// Private implementation of the complete event destructor. srslog::detail::scoped_complete_event::~scoped_complete_event() { - if (!tracer) + if (!tracer) { return; + } auto end = std::chrono::steady_clock::now(); unsigned long long diff = std::chrono::duration_cast(end - start) .count(); - (*tracer)("[TID:%0u] Complete event \"%s\" (duration %lld us): %s", - (unsigned)::pthread_self(), - category, - diff, - name); + small_str_buffer str; + // Limit to the category and name strings to a predefined length so everything fits in a small string. + fmt::format_to(str, "{:.32} {:.16}, {}", category, name, diff); + str.push_back('\0'); + (*tracer)(std::move(str)); } diff --git a/lib/test/srslog/json_formatter_test.cpp b/lib/test/srslog/json_formatter_test.cpp index e10f1028c..a5fa1974b 100644 --- a/lib/test/srslog/json_formatter_test.cpp +++ b/lib/test/srslog/json_formatter_test.cpp @@ -27,7 +27,7 @@ static detail::log_entry_metadata build_log_entry_metadata() fmt::dynamic_format_arg_store store; store.push_back(88); - return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z'}; + return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z', small_str_buffer()}; } static bool when_fully_filled_log_entry_then_everything_is_formatted() diff --git a/lib/test/srslog/log_backend_test.cpp b/lib/test/srslog/log_backend_test.cpp index 28a81714d..695bfa967 100644 --- a/lib/test/srslog/log_backend_test.cpp +++ b/lib/test/srslog/log_backend_test.cpp @@ -70,19 +70,6 @@ private: } // namespace -static bool when_backend_is_not_started_then_pushed_log_entries_are_ignored() -{ - sink_spy spy; - log_backend_impl backend; - - detail::log_entry entry = {&spy}; - backend.push(std::move(entry)); - - ASSERT_EQ(spy.write_invocation_count(), 0); - - return true; -} - /// Builds a basic log entry. static detail::log_entry build_log_entry(sink* s) { @@ -95,7 +82,19 @@ static detail::log_entry build_log_entry(sink* s) return { s, [](detail::log_entry_metadata&& metadata, fmt::memory_buffer& buffer) {}, - {tp, {0, false}, "Text %d", std::move(store), "", '\0'}}; + {tp, {0, false}, "Text %d", std::move(store), "", '\0', small_str_buffer()}}; +} + +static bool when_backend_is_not_started_then_pushed_log_entries_are_ignored() +{ + sink_spy spy; + log_backend_impl backend; + + backend.push(build_log_entry(&spy)); + + ASSERT_EQ(spy.write_invocation_count(), 0); + + return true; } static bool when_backend_is_started_then_pushed_log_entries_are_sent_to_sink() diff --git a/lib/test/srslog/log_channel_test.cpp b/lib/test/srslog/log_channel_test.cpp index 1ccb6f2d5..3fa24fb7c 100644 --- a/lib/test/srslog/log_channel_test.cpp +++ b/lib/test/srslog/log_channel_test.cpp @@ -278,6 +278,30 @@ when_logging_with_context_and_message_then_filled_in_log_entry_is_pushed_into_th return true; } +static bool +when_logging_with_small_string_then_filled_in_log_entry_is_pushed_into_the_backend() +{ + backend_spy backend; + test_dummies::sink_dummy s; + + log_channel log("id", s, backend); + + small_str_buffer buf; + fmt::format_to(buf, "A {} {} {}", 1, 2, 3); + log(std::move(buf)); + + ASSERT_EQ(backend.push_invocation_count(), 1); + + const detail::log_entry& entry = backend.last_entry(); + ASSERT_EQ(&s, entry.s); + ASSERT_NE(entry.format_func, nullptr); + ASSERT_NE(entry.metadata.tp.time_since_epoch().count(), 0); + ASSERT_EQ(entry.metadata.hex_dump.empty(), true); + ASSERT_EQ(std::string(entry.metadata.small_str.data()), "A 1 2 3"); + + return true; +} + int main() { TEST_FUNCTION(when_log_channel_is_created_then_id_matches_expected_value); @@ -296,6 +320,8 @@ int main() when_logging_with_context_then_filled_in_log_entry_is_pushed_into_the_backend); TEST_FUNCTION( when_logging_with_context_and_message_then_filled_in_log_entry_is_pushed_into_the_backend); + TEST_FUNCTION( + when_logging_with_small_string_then_filled_in_log_entry_is_pushed_into_the_backend); return 0; } diff --git a/lib/test/srslog/text_formatter_test.cpp b/lib/test/srslog/text_formatter_test.cpp index 3d0e008f5..4aee4e840 100644 --- a/lib/test/srslog/text_formatter_test.cpp +++ b/lib/test/srslog/text_formatter_test.cpp @@ -27,7 +27,7 @@ static detail::log_entry_metadata build_log_entry_metadata() fmt::dynamic_format_arg_store store; store.push_back(88); - return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z'}; + return {tp, {10, true}, "Text %d", std::move(store), "ABC", 'Z', small_str_buffer()}; } static bool when_fully_filled_log_entry_then_everything_is_formatted() From 6f6c20e1942863aad8ad5ecbf5bbd8686adabc98 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 8 Mar 2021 18:56:18 +0100 Subject: [PATCH 06/64] SRSUE: Added procedure for multiplexing UCI in NR-PUSCH --- .../interfaces/rrc_nr_interface_types.h | 11 ++ lib/include/srslte/phy/phch/phch_cfg_nr.h | 19 +++ lib/include/srslte/phy/phch/pusch_nr.h | 1 + lib/include/srslte/phy/phch/ra_nr.h | 14 +++ lib/include/srslte/phy/ue/ue_ul_nr.h | 7 +- lib/src/phy/phch/pusch_nr.c | 21 ++++ lib/src/phy/phch/ra_nr.c | 111 ++++++++++++++++++ lib/src/phy/ue/ue_ul_nr.c | 8 +- srsue/src/phy/nr/cc_worker.cc | 10 +- 9 files changed, 195 insertions(+), 7 deletions(-) diff --git a/lib/include/srslte/interfaces/rrc_nr_interface_types.h b/lib/include/srslte/interfaces/rrc_nr_interface_types.h index 873a5c27b..5c916b81c 100644 --- a/lib/include/srslte/interfaces/rrc_nr_interface_types.h +++ b/lib/include/srslte/interfaces/rrc_nr_interface_types.h @@ -178,11 +178,22 @@ struct phy_cfg_nr_t { // betaOffsetACK-Index1: 9 // betaOffsetACK-Index2: 9 // betaOffsetACK-Index3: 9 + pusch.beta_offsets.ack_index1 = 9; + pusch.beta_offsets.ack_index2 = 9; + pusch.beta_offsets.ack_index3 = 9; + // betaOffsetCSI-Part1-Index1: 6 // betaOffsetCSI-Part1-Index2: 6 + pusch.beta_offsets.csi1_index1 = 6; + pusch.beta_offsets.csi1_index2 = 6; + // betaOffsetCSI-Part2-Index1: 6 // betaOffsetCSI-Part2-Index2: 6 + pusch.beta_offsets.csi2_index1 = 6; + pusch.beta_offsets.csi2_index2 = 6; + // scaling: f1 (3) + pusch.scaling = 1; // pucch-Config: setup (1) // setup diff --git a/lib/include/srslte/phy/phch/phch_cfg_nr.h b/lib/include/srslte/phy/phch/phch_cfg_nr.h index f694eb80f..b6fc8a46e 100644 --- a/lib/include/srslte/phy/phch/phch_cfg_nr.h +++ b/lib/include/srslte/phy/phch/phch_cfg_nr.h @@ -144,6 +144,20 @@ typedef struct SRSLTE_API { srslte_sch_tb_t tb[SRSLTE_MAX_TB]; } srslte_sch_grant_nr_t; +/** + * @brief Beta offset configuration provided from upper layers + * @remark Configure according to TS 38.331 BetaOffsets + */ +typedef struct { + uint32_t ack_index1; ///< Use for up to 2 HARQ-ACK bits. Set to 11 if absent. + uint32_t ack_index2; ///< Use for up to 11 HARQ-ACK bits. Set to 11 if absent. + uint32_t ack_index3; ///< Use for more than 11 HARQ-ACK bits. Set to 11 if absent. + uint32_t csi1_index1; ///< Use for up to 11 CSI bits. Set to 13 if absent. + uint32_t csi1_index2; ///< Use for more than 11 CSI bits. Set to 13 if absent. + uint32_t csi2_index1; ///< Use for up to 11 CSI bits. Set to 13 if absent. + uint32_t csi2_index2; ///< Use for more than 11 CSI bits. Set to 13 if absent. +} srslte_beta_offsets_t; + /** * @brief flatten SCH configuration parameters provided by higher layers * @remark Described in TS 38.331 V15.10.0 Section PDSCH-Config @@ -188,6 +202,10 @@ typedef struct SRSLTE_API { bool rbg_size_cfg_1; ///< RBG size configuration (1 or 2) srslte_sch_cfg_t sch_cfg; ///< Common shared channel parameters + + /// PUSCH only + srslte_beta_offsets_t beta_offsets; /// Semi-static only. + float scaling; /// Indicates a scaling factor to limit the number of resource elements assigned to UCI on PUSCH. } srslte_sch_hl_cfg_nr_t; /** @@ -207,6 +225,7 @@ typedef struct SRSLTE_API { bool enable_transform_precoder; float beta_harq_ack_offset; float beta_csi_part1_offset; + float beta_csi_part2_offset; float scaling; bool freq_hopping_enabled; } srslte_sch_cfg_nr_t; diff --git a/lib/include/srslte/phy/phch/pusch_nr.h b/lib/include/srslte/phy/phch/pusch_nr.h index 2db7268e4..ffac74c42 100644 --- a/lib/include/srslte/phy/phch/pusch_nr.h +++ b/lib/include/srslte/phy/phch/pusch_nr.h @@ -115,6 +115,7 @@ SRSLTE_API uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q, SRSLTE_API uint32_t srslte_pusch_nr_tx_info(const srslte_pusch_nr_t* q, const srslte_sch_cfg_nr_t* cfg, const srslte_sch_grant_nr_t* grant, + const srslte_uci_value_nr_t* uci_value, char* str, uint32_t str_len); diff --git a/lib/include/srslte/phy/phch/ra_nr.h b/lib/include/srslte/phy/phch/ra_nr.h index c5e9eb928..1815e33da 100644 --- a/lib/include/srslte/phy/phch/ra_nr.h +++ b/lib/include/srslte/phy/phch/ra_nr.h @@ -121,4 +121,18 @@ SRSLTE_API int srslte_ra_ul_dci_to_grant_nr(const srslte_carrier_nr_t* carrie srslte_sch_cfg_nr_t* pusch_cfg, srslte_sch_grant_nr_t* pusch_grant); +/** + * @brief Setups the Uplink Control Information configuration for a PUSCH transmission + * + * @remark Implement procedure described in TS 38.213 9.3 UCI reporting in physical uplink shared channel + * + * @param pusch_hl_cfg PUSCH configuration provided by higher layers + * @param uci_cfg Uplink Control Information configuration for this PUSCH transmission + * @param pusch_cfg PUSCH configuration after applying the procedure + * @return SRSLTE_SUCCESS if the procedure is successful, SRSLTE_ERROR code otherwise + */ +SRSLTE_API int srslte_ra_ul_set_grant_uci_nr(const srslte_sch_hl_cfg_nr_t* pusch_hl_cfg, + const srslte_uci_cfg_nr_t* uci_cfg, + srslte_sch_cfg_nr_t* pusch_cfg); + #endif // SRSLTE_RA_NR_H diff --git a/lib/include/srslte/phy/ue/ue_ul_nr.h b/lib/include/srslte/phy/ue/ue_ul_nr.h index d87f35556..6d2575009 100644 --- a/lib/include/srslte/phy/ue/ue_ul_nr.h +++ b/lib/include/srslte/phy/ue/ue_ul_nr.h @@ -67,8 +67,11 @@ SRSLTE_API int srslte_ue_ul_nr_encode_pucch(srslte_ue_ul_nr_t* SRSLTE_API void srslte_ue_ul_nr_free(srslte_ue_ul_nr_t* q); -SRSLTE_API int -srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q, const srslte_sch_cfg_nr_t* cfg, char* str, uint32_t str_len); +SRSLTE_API int srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q, + const srslte_sch_cfg_nr_t* cfg, + const srslte_uci_value_nr_t* uci_value, + char* str, + uint32_t str_len); SRSLTE_API int srslte_ue_ul_nr_pucch_info(const srslte_pucch_nr_resource_t* resource, const srslte_uci_data_nr_t* uci_data, diff --git a/lib/src/phy/phch/pusch_nr.c b/lib/src/phy/phch/pusch_nr.c index 519c3b20c..b69bef529 100644 --- a/lib/src/phy/phch/pusch_nr.c +++ b/lib/src/phy/phch/pusch_nr.c @@ -1284,6 +1284,10 @@ uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q, { uint32_t len = 0; + if (q == NULL || cfg == NULL || grant == NULL || str == NULL || str_len == 0) { + return 0; + } + len += srslte_pusch_nr_grant_info(cfg, grant, &str[len], str_len - len); if (q->evm_buffer != NULL) { @@ -1302,6 +1306,11 @@ uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q, } if (res != NULL) { + srslte_uci_data_nr_t uci_data = {}; + uci_data.cfg = cfg->uci; + uci_data.value = res[0].uci; + len += srslte_uci_nr_info(&uci_data, &str[len], str_len - len); + len = srslte_print_check(str, str_len, len, ",crc={", 0); for (uint32_t i = 0; i < SRSLTE_MAX_CODEWORDS; i++) { if (grant->tb[i].enabled) { @@ -1326,13 +1335,25 @@ uint32_t srslte_pusch_nr_rx_info(const srslte_pusch_nr_t* q, uint32_t srslte_pusch_nr_tx_info(const srslte_pusch_nr_t* q, const srslte_sch_cfg_nr_t* cfg, const srslte_sch_grant_nr_t* grant, + const srslte_uci_value_nr_t* uci_value, char* str, uint32_t str_len) { uint32_t len = 0; + if (q == NULL || cfg == NULL || grant == NULL || str == NULL || str_len == 0) { + return 0; + } + len += srslte_pusch_nr_grant_info(cfg, grant, &str[len], str_len - len); + if (uci_value != NULL) { + srslte_uci_data_nr_t uci_data = {}; + uci_data.cfg = cfg->uci; + uci_data.value = *uci_value; + len += srslte_uci_nr_info(&uci_data, &str[len], str_len - len); + } + if (q->meas_time_en) { len = srslte_print_check(str, str_len, len, ", t=%d us", q->meas_time_us); } diff --git a/lib/src/phy/phch/ra_nr.c b/lib/src/phy/phch/ra_nr.c index 399c881da..d2c7f4ee2 100644 --- a/lib/src/phy/phch/ra_nr.c +++ b/lib/src/phy/phch/ra_nr.c @@ -11,6 +11,7 @@ */ #include "srslte/phy/phch/ra_nr.h" +#include "srslte/phy/phch/csi.h" #include "srslte/phy/phch/pdsch_nr.h" #include "srslte/phy/phch/ra_dl_nr.h" #include "srslte/phy/phch/ra_ul_nr.h" @@ -26,6 +27,8 @@ typedef struct { #define RA_NR_MCS_SIZE_TABLE2 28 #define RA_NR_MCS_SIZE_TABLE3 29 #define RA_NR_TBS_SIZE_TABLE 93 +#define RA_NR_BETA_OFFSET_HARQACK_SIZE 32 +#define RA_NR_BETA_OFFSET_CSI_SIZE 32 #define RA_NR_READ_TABLE(N) \ static double srslte_ra_nr_R_from_mcs_table##N(uint32_t mcs_idx) \ @@ -108,6 +111,23 @@ static const uint32_t ra_nr_tbs_table[RA_NR_TBS_SIZE_TABLE] = { 1192, 1224, 1256, 1288, 1320, 1352, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 2024, 2088, 2152, 2216, 2280, 2408, 2472, 2536, 2600, 2664, 2728, 2792, 2856, 2976, 3104, 3240, 3368, 3496, 3624, 3752, 3824}; +/** + * TS 38.213 V15.10.0 Table 9.3-1: Mapping of beta_offset values for HARQ-ACK information and the index signalled by + * higher layers + */ +static const float ra_nr_beta_offset_ack_table[RA_NR_BETA_OFFSET_HARQACK_SIZE] = { + 1.000f, 2.000f, 2.500f, 3.125f, 4.000f, 5.000f, 6.250f, 8.000f, 10.000f, 12.625f, 15.875f, + 20.000f, 31.000f, 50.000f, 80.000f, 126.000f, NAN, NAN, NAN, NAN, NAN, NAN, + NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN}; + +/** + * TS 38.213 V15.10.0 Table 9.3-2: Mapping of beta_offset values for CSI and the index signalled by higher layers + */ +static const float ra_nr_beta_offset_csi_table[RA_NR_BETA_OFFSET_HARQACK_SIZE] = { + 1.125f, 1.250f, 1.375f, 1.625f, 1.750f, 2.000f, 2.250f, 2.500f, 2.875f, 3.125f, 3.500f, + 4.000f, 5.000f, 6.250f, 8.000f, 10.000f, 12.625f, 15.875f, 20.000f, NAN, NAN, NAN, + NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN}; + typedef enum { ra_nr_table_1 = 0, ra_nr_table_2, ra_nr_table_3 } ra_nr_table_t; static ra_nr_table_t ra_nr_select_table_pusch_noprecoding(srslte_mcs_table_t mcs_table, @@ -636,3 +656,94 @@ int srslte_ra_ul_dci_to_grant_nr(const srslte_carrier_nr_t* carrier, return SRSLTE_SUCCESS; } + +/* + * Implements clauses related to HARQ-ACK beta offset selection from the section `9.3 UCI reporting in physical uplink + * shared channel` + */ +static float ra_ul_beta_offset_ack_semistatic(const srslte_beta_offsets_t* beta_offsets, + const srslte_uci_cfg_nr_t* uci_cfg) +{ + // Select Beta Offset index from the number of HARQ-ACK bits + uint32_t beta_offset_index = beta_offsets->ack_index1; + if (uci_cfg->o_ack > 11) { + beta_offset_index = beta_offsets->ack_index3; + } else if (uci_cfg->o_ack > 2) { + beta_offset_index = beta_offsets->ack_index1; + } + + // Protect table boundary + if (beta_offset_index > RA_NR_BETA_OFFSET_HARQACK_SIZE) { + ERROR("Beta offset index for HARQ-ACK (%d) for O_ack=%d exceeds table size (%d)", + beta_offset_index, + uci_cfg->o_ack, + RA_NR_BETA_OFFSET_HARQACK_SIZE); + return NAN; + } + + // Select beta offset from Table 9.3-1 + return ra_nr_beta_offset_ack_table[beta_offset_index]; +} + +/* + * Implements clauses related to HARQ-ACK beta offset selection from the section `9.3 UCI reporting in physical uplink + * shared channel` + */ +static float ra_ul_beta_offset_csi_semistatic(const srslte_beta_offsets_t* beta_offsets, + const srslte_uci_cfg_nr_t* uci_cfg, + bool part2) +{ + // Calculate number of CSI bits; CSI part 2 is not supported. + uint32_t O_csi = part2 ? 0 : srslte_csi_part1_nof_bits(uci_cfg->csi, uci_cfg->nof_csi); + + // Select Beta Offset index from the number of HARQ-ACK bits + uint32_t beta_offset_index = part2 ? beta_offsets->csi2_index1 : beta_offsets->csi1_index1; + if (O_csi > 11) { + beta_offset_index = part2 ? beta_offsets->csi2_index2 : beta_offsets->csi1_index2; + } + + // Protect table boundary + if (beta_offset_index > RA_NR_BETA_OFFSET_CSI_SIZE) { + ERROR("Beta offset index for CSI (%d) for O_csi=%d exceeds table size (%d)", + beta_offset_index, + O_csi, + RA_NR_BETA_OFFSET_CSI_SIZE); + return NAN; + } + + // Select beta offset from Table 9.3-1 + return ra_nr_beta_offset_csi_table[beta_offset_index]; +} + +int srslte_ra_ul_set_grant_uci_nr(const srslte_sch_hl_cfg_nr_t* pusch_hl_cfg, + const srslte_uci_cfg_nr_t* uci_cfg, + srslte_sch_cfg_nr_t* pusch_cfg) +{ + // Select beta offsets + pusch_cfg->beta_harq_ack_offset = ra_ul_beta_offset_ack_semistatic(&pusch_hl_cfg->beta_offsets, uci_cfg); + if (!isnormal(pusch_cfg->beta_harq_ack_offset)) { + return SRSLTE_ERROR; + } + + pusch_cfg->beta_csi_part1_offset = ra_ul_beta_offset_csi_semistatic(&pusch_hl_cfg->beta_offsets, uci_cfg, false); + if (!isnormal(pusch_cfg->beta_csi_part1_offset)) { + return SRSLTE_ERROR; + } + + pusch_cfg->beta_csi_part2_offset = ra_ul_beta_offset_csi_semistatic(&pusch_hl_cfg->beta_offsets, uci_cfg, true); + if (!isnormal(pusch_cfg->beta_csi_part2_offset)) { + return SRSLTE_ERROR; + } + + // pusch_cfg->beta_csi_part2_offset = pusch_hl_cfg->beta_offset_csi2; + pusch_cfg->scaling = pusch_hl_cfg->scaling; + if (!isnormal(pusch_cfg->scaling)) { + ERROR("Invalid Scaling (%f)", pusch_cfg->scaling); + return SRSLTE_ERROR; + } + + // Copy UCI configuration + pusch_cfg->uci = *uci_cfg; + + return SRSLTE_SUCCESS; +} \ No newline at end of file diff --git a/lib/src/phy/ue/ue_ul_nr.c b/lib/src/phy/ue/ue_ul_nr.c index 43496a887..6c64b9a2b 100644 --- a/lib/src/phy/ue/ue_ul_nr.c +++ b/lib/src/phy/ue/ue_ul_nr.c @@ -230,12 +230,16 @@ void srslte_ue_ul_nr_free(srslte_ue_ul_nr_t* q) SRSLTE_MEM_ZERO(q, srslte_ue_ul_nr_t, 1); } -int srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q, const srslte_sch_cfg_nr_t* cfg, char* str, uint32_t str_len) +int srslte_ue_ul_nr_pusch_info(const srslte_ue_ul_nr_t* q, + const srslte_sch_cfg_nr_t* cfg, + const srslte_uci_value_nr_t* uci_value, + char* str, + uint32_t str_len) { int len = 0; // Append PDSCH info - len += srslte_pusch_nr_tx_info(&q->pusch, cfg, &cfg->grant, &str[len], str_len - len); + len += srslte_pusch_nr_tx_info(&q->pusch, cfg, &cfg->grant, uci_value, &str[len], str_len - len); return len; } diff --git a/srsue/src/phy/nr/cc_worker.cc b/srsue/src/phy/nr/cc_worker.cc index 7915c185d..8d7ea11ee 100644 --- a/srsue/src/phy/nr/cc_worker.cc +++ b/srsue/src/phy/nr/cc_worker.cc @@ -283,14 +283,18 @@ bool cc_worker::work_ul() mac_ul_grant.rnti = pusch_cfg.grant.rnti; mac_ul_grant.tti = ul_slot_cfg.idx; mac_ul_grant.tbs = pusch_cfg.grant.tb[0].tbs; - phy->stack->new_grant_ul(0, mac_ul_grant, &ul_action); - // Assignning MAC provided values to PUSCH config structs + // Set UCI configuration following procedures + srslte_ra_ul_set_grant_uci_nr(&phy->cfg.pusch, &uci_data.cfg, &pusch_cfg); + + // Assigning MAC provided values to PUSCH config structs pusch_cfg.grant.tb[0].softbuffer.tx = ul_action.tb.softbuffer; + // Setup data for encoding srslte_pusch_data_nr_t data = {}; data.payload = ul_action.tb.payload->msg; + data.uci = uci_data.value; // Encode PUSCH transmission if (srslte_ue_ul_nr_encode_pusch(&ue_ul, &ul_slot_cfg, &pusch_cfg, &data) < SRSLTE_SUCCESS) { @@ -301,7 +305,7 @@ bool cc_worker::work_ul() // PUSCH Logging if (logger.info.enabled()) { std::array str; - srslte_ue_ul_nr_pusch_info(&ue_ul, &pusch_cfg, str.data(), str.size()); + srslte_ue_ul_nr_pusch_info(&ue_ul, &pusch_cfg, &data.uci, str.data(), str.size()); logger.info(ul_action.tb.payload->msg, pusch_cfg.grant.tb[0].tbs / 8, "PUSCH (NR): cc=%d, %s, tti_tx=%d", From 4bab0b53b789688b0742d352ff65fbbd9637da17 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 8 Mar 2021 19:37:35 +0100 Subject: [PATCH 07/64] SRSUE: fix slot index overflow --- lib/src/phy/ue/ue_dl_nr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/phy/ue/ue_dl_nr.c b/lib/src/phy/ue/ue_dl_nr.c index dd253931b..23616aa9a 100644 --- a/lib/src/phy/ue/ue_dl_nr.c +++ b/lib/src/phy/ue/ue_dl_nr.c @@ -320,7 +320,8 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q, for (uint32_t L = 0; L < SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR && count < nof_dci_msg; L++) { // Calculate possible PDCCH DCI candidates uint32_t candidates[SRSLTE_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR] = {}; - int nof_candidates = srslte_pdcch_nr_locations_coreset(coreset, search_space, rnti, L, slot_cfg->idx, candidates); + int nof_candidates = srslte_pdcch_nr_locations_coreset( + coreset, search_space, rnti, L, SRSLTE_SLOT_NR_MOD(q->carrier.numerology, slot_cfg->idx), candidates); if (nof_candidates < SRSLTE_SUCCESS) { ERROR("Error calculating DCI candidate location"); return SRSLTE_ERROR; From f3bf0c1c68b8ef4595a6e3de034721ef502d7662 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Tue, 9 Mar 2021 09:18:35 +0100 Subject: [PATCH 08/64] Wrap slot index in modulus macro --- lib/src/phy/ch_estimation/csi_rs.c | 2 +- lib/src/phy/ch_estimation/dmrs_pucch.c | 2 +- lib/src/phy/ch_estimation/dmrs_sch.c | 9 ++++----- lib/src/phy/phch/pucch_nr.c | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/src/phy/ch_estimation/csi_rs.c b/lib/src/phy/ch_estimation/csi_rs.c index 40a070ee6..f13627d63 100644 --- a/lib/src/phy/ch_estimation/csi_rs.c +++ b/lib/src/phy/ch_estimation/csi_rs.c @@ -112,7 +112,7 @@ uint32_t csi_rs_cinit(const srslte_carrier_nr_t* carrier, const srslte_csi_rs_nzp_resource_t* resource, uint32_t l) { - uint32_t n = slot_cfg->idx % SRSLTE_NSLOTS_PER_FRAME_NR(carrier->numerology); + uint32_t n = SRSLTE_SLOT_NR_MOD(carrier->numerology, slot_cfg->idx); uint32_t n_id = resource->scrambling_id; return ((SRSLTE_NSYMB_PER_SLOT_NR * n + l + 1UL) * (2UL * n_id) << 10UL) + n_id; diff --git a/lib/src/phy/ch_estimation/dmrs_pucch.c b/lib/src/phy/ch_estimation/dmrs_pucch.c index 343e5a5ad..51d2aa77d 100644 --- a/lib/src/phy/ch_estimation/dmrs_pucch.c +++ b/lib/src/phy/ch_estimation/dmrs_pucch.c @@ -289,7 +289,7 @@ static uint32_t dmrs_pucch_format2_cinit(const srslte_carrier_nr_t* car const srslte_slot_cfg_t* slot, uint32_t l) { - uint64_t n = slot->idx; + uint64_t n = SRSLTE_SLOT_NR_MOD(carrier->numerology, slot->idx); uint64_t n_id = (cfg->scrambling_id_present) ? cfg->scambling_id : carrier->id; return SRSLTE_SEQUENCE_MOD((((SRSLTE_NSYMB_PER_SLOT_NR * n + l + 1UL) * (2UL * n_id + 1UL)) << 17UL) + 2UL * n_id); diff --git a/lib/src/phy/ch_estimation/dmrs_sch.c b/lib/src/phy/ch_estimation/dmrs_sch.c index 4cdc35ba0..f30b273cc 100644 --- a/lib/src/phy/ch_estimation/dmrs_sch.c +++ b/lib/src/phy/ch_estimation/dmrs_sch.c @@ -451,8 +451,6 @@ static uint32_t srslte_dmrs_sch_seed(const srslte_carrier_nr_t* carrier, { const srslte_dmrs_sch_cfg_t* dmrs_cfg = &cfg->dmrs; - slot_idx = slot_idx % SRSLTE_NSLOTS_PER_FRAME_NR(carrier->numerology); - // Calculate scrambling IDs uint32_t n_id = carrier->id; uint32_t n_scid = (grant->n_scid) ? 1 : 0; @@ -575,8 +573,8 @@ int srslte_dmrs_sch_put_sf(srslte_dmrs_sch_t* q, // Iterate symbols for (uint32_t i = 0; i < nof_symbols; i++) { - uint32_t l = symbols[i]; // Symbol index inside the slot - uint32_t slot_idx = slot_cfg->idx; // Slot index in the frame + uint32_t l = symbols[i]; // Symbol index inside the slot + uint32_t slot_idx = SRSLTE_SLOT_NR_MOD(q->carrier.numerology, slot_cfg->idx); // Slot index in the frame uint32_t cinit = srslte_dmrs_sch_seed(&q->carrier, pdsch_cfg, grant, slot_idx, l); srslte_dmrs_sch_put_symbol(q, pdsch_cfg, grant, cinit, delta, &sf_symbols[symbol_sz * l]); @@ -699,7 +697,8 @@ int srslte_dmrs_sch_estimate(srslte_dmrs_sch_t* q, for (uint32_t i = 0; i < nof_symbols; i++) { uint32_t l = symbols[i]; // Symbol index inside the slot - uint32_t cinit = srslte_dmrs_sch_seed(&q->carrier, pdsch_cfg, grant, slot_cfg->idx, l); + uint32_t cinit = srslte_dmrs_sch_seed( + &q->carrier, pdsch_cfg, grant, SRSLTE_SLOT_NR_MOD(q->carrier.numerology, slot_cfg->idx), l); nof_pilots_x_symbol = srslte_dmrs_sch_get_symbol( q, pdsch_cfg, grant, cinit, delta, &sf_symbols[symbol_sz * l], &q->pilot_estimates[nof_pilots_x_symbol * i]); diff --git a/lib/src/phy/phch/pucch_nr.c b/lib/src/phy/phch/pucch_nr.c index 30950cc96..4f06dde7e 100644 --- a/lib/src/phy/phch/pucch_nr.c +++ b/lib/src/phy/phch/pucch_nr.c @@ -71,7 +71,7 @@ int srslte_pucch_nr_alpha_idx(const srslte_carrier_nr_t* carrier, } // Compute number of slot - uint32_t n_slot = slot->idx % SRSLTE_NSLOTS_PER_FRAME_NR(carrier->numerology); + uint32_t n_slot = SRSLTE_SLOT_NR_MOD(carrier->numerology, slot->idx); // Generate pseudo-random sequence uint32_t cinit = cfg->hopping_id_present ? cfg->hopping_id : carrier->id; From d3cfb002110a011dea270d2eda806557e8924c1c Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 11 Mar 2021 09:49:53 +0100 Subject: [PATCH 09/64] SRSUE: Added TDD DL/UL slot detection --- .../interfaces/rrc_nr_interface_types.h | 19 ++++++-- lib/include/srslte/phy/common/phy_common_nr.h | 37 +++++++++++++++ lib/src/phy/common/phy_common_nr.c | 46 +++++++++++++++++++ srsue/src/phy/nr/cc_worker.cc | 10 ++++ 4 files changed, 109 insertions(+), 3 deletions(-) diff --git a/lib/include/srslte/interfaces/rrc_nr_interface_types.h b/lib/include/srslte/interfaces/rrc_nr_interface_types.h index 5c916b81c..7bc26936a 100644 --- a/lib/include/srslte/interfaces/rrc_nr_interface_types.h +++ b/lib/include/srslte/interfaces/rrc_nr_interface_types.h @@ -24,6 +24,7 @@ namespace srslte { **************************/ struct phy_cfg_nr_t { + srslte_tdd_config_nr_t tdd = {}; srslte_sch_hl_cfg_nr_t pdsch = {}; srslte_sch_hl_cfg_nr_t pusch = {}; srslte_pucch_nr_hl_cfg_t pucch = {}; @@ -34,9 +35,6 @@ struct phy_cfg_nr_t { phy_cfg_nr_t() { - // Default PDSCH configuration - pdsch.sch_cfg.mcs_table = srslte_mcs_table_256qam; - // Default PRACH configuration prach.is_nr = true; prach.config_idx = 16; @@ -46,6 +44,21 @@ struct phy_cfg_nr_t { prach.num_ra_preambles = 64; prach.hs_flag = false; + // tdd-UL-DL-ConfigurationCommon + // referenceSubcarrierSpacing: kHz15 (0) + // pattern1 + // dl-UL-TransmissionPeriodicity: ms10 (7) + // nrofDownlinkSlots: 7 + // nrofDownlinkSymbols: 6 + // nrofUplinkSlots: 2 + // nrofUplinkSymbols: 4 + tdd.pattern1.period_ms = 10; + tdd.pattern1.nof_dl_slots = 7; + tdd.pattern1.nof_dl_symbols = 6; + tdd.pattern1.nof_ul_slots = 2; + tdd.pattern1.nof_ul_symbols = 4; + tdd.pattern2.period_ms = 0; + // physicalCellGroupConfig // pdsch-HARQ-ACK-Codebook: dynamic (1) harq_ack.pdsch_harq_ack_codebook = srslte_pdsch_harq_ack_codebook_dynamic; diff --git a/lib/include/srslte/phy/common/phy_common_nr.h b/lib/include/srslte/phy/common/phy_common_nr.h index d490fe8a5..a416a9dbf 100644 --- a/lib/include/srslte/phy/common/phy_common_nr.h +++ b/lib/include/srslte/phy/common/phy_common_nr.h @@ -301,6 +301,25 @@ typedef struct SRSLTE_API { uint32_t nof_candidates[SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR]; } srslte_search_space_t; +/** + * @brief TDD pattern configuration + */ +typedef struct SRSLTE_API { + uint32_t period_ms; ///< Period in milliseconds, set to 0 if not present + uint32_t nof_dl_slots; ///< Number of consecutive full DL slots at the beginning of each DL-UL pattern + uint32_t nof_dl_symbols; ///< Number of consecutive DL symbols in the beginning of the slot following the last DL slot + uint32_t nof_ul_slots; ///< Number of consecutive full UL slots at the end of each DL-UL pattern + uint32_t nof_ul_symbols; ///< Number of consecutive UL symbols in the end of the slot preceding the first full UL slot +} srslte_tdd_pattern_t; + +/** + * @brief TDD configuration as described in TS 38.331 v15.10.0 TDD-UL-DL-ConfigCommon + */ +typedef struct SRSLTE_API { + srslte_tdd_pattern_t pattern1; + srslte_tdd_pattern_t pattern2; +} srslte_tdd_config_nr_t; + /** * @brief Get the RNTI type name for NR * @param rnti_type RNTI type name @@ -372,6 +391,24 @@ SRSLTE_API uint32_t srslte_min_symbol_sz_rb(uint32_t nof_prb); */ SRSLTE_API float srslte_symbol_distance_s(uint32_t l0, uint32_t l1, uint32_t numerology); +/** + * @brief Decides whether a given slot is configured as Downlink + * @param cfg Provides TDD configuration + * @param numerology Provides BWP numerology + * @param slot_idx Slot index in the frame for the given numerology + * @return true if the provided slot index is configured for Downlink + */ +SRSLTE_API bool srslte_tdd_nr_is_dl(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx); + +/** + * @brief Decides whether a given slot is configured as Uplink + * @param cfg Provides TDD configuration + * @param numerology Provides BWP numerology + * @param slot_idx Slot index in the frame for the given numerology + * @return true if the provided slot index is configured for Uplink + */ +SRSLTE_API bool srslte_tdd_nr_is_ul(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx); + #ifdef __cplusplus } #endif diff --git a/lib/src/phy/common/phy_common_nr.c b/lib/src/phy/common/phy_common_nr.c index 7dd120e98..b4f36d4b5 100644 --- a/lib/src/phy/common/phy_common_nr.c +++ b/lib/src/phy/common/phy_common_nr.c @@ -146,3 +146,49 @@ float srslte_symbol_distance_s(uint32_t l0, uint32_t l1, uint32_t numerology) // Return symbol distance in microseconds return (N << numerology) * SRSLTE_LTE_TS; } + +bool srslte_tdd_nr_is_dl(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx) +{ + if (cfg == NULL) { + return false; + } + + // Calculate slot index within the TDD overall period + uint32_t slot_x_ms = 1U << numerology; // Number of slots per millisecond + uint32_t period_sum = (cfg->pattern1.period_ms + cfg->pattern2.period_ms) * slot_x_ms; // Total perdiod sum + uint32_t slot_idx_period = slot_idx % period_sum; // Slot index within the period + + // Select pattern + const srslte_tdd_pattern_t* pattern = &cfg->pattern1; + if ((slot_idx_period >= cfg->pattern1.period_ms * slot_x_ms)) { + pattern = &cfg->pattern2; + slot_idx_period -= cfg->pattern1.period_ms * slot_x_ms; // Remove pattern 1 offset + } + + return (slot_idx_period < pattern->nof_dl_slots || + (slot_idx_period == pattern->nof_dl_slots && pattern->nof_dl_symbols != 0)); +} + +bool srslte_tdd_nr_is_ul(const srslte_tdd_config_nr_t* cfg, uint32_t numerology, uint32_t slot_idx) +{ + if (cfg == NULL) { + return false; + } + + // Calculate slot index within the TDD overall period + uint32_t slot_x_ms = 1U << numerology; // Number of slots per millisecond + uint32_t period_sum = (cfg->pattern1.period_ms + cfg->pattern2.period_ms) * slot_x_ms; // Total perdiod sum + uint32_t slot_idx_period = slot_idx % period_sum; // Slot index within the period + + // Select pattern + const srslte_tdd_pattern_t* pattern = &cfg->pattern1; + if ((slot_idx_period >= cfg->pattern1.period_ms * slot_x_ms)) { + pattern = &cfg->pattern2; + slot_idx_period -= cfg->pattern1.period_ms * slot_x_ms; // Remove pattern 1 offset + } + + // Calculate slot in which UL starts + uint32_t start_ul = (pattern->period_ms * slot_x_ms - pattern->nof_ul_slots) - 1; + + return (slot_idx_period > start_ul || (slot_idx_period == start_ul && pattern->nof_ul_symbols != 0)); +} diff --git a/srsue/src/phy/nr/cc_worker.cc b/srsue/src/phy/nr/cc_worker.cc index 8d7ea11ee..6dc22c88a 100644 --- a/srsue/src/phy/nr/cc_worker.cc +++ b/srsue/src/phy/nr/cc_worker.cc @@ -181,6 +181,11 @@ void cc_worker::decode_pdcch_ul() bool cc_worker::work_dl() { + // Check if it is a DL slot, if not skip + if (!srslte_tdd_nr_is_dl(&phy->cfg.tdd, 0, dl_slot_cfg.idx)) { + return true; + } + // Run FFT srslte_ue_dl_nr_estimate_fft(&ue_dl, &dl_slot_cfg); @@ -249,6 +254,11 @@ bool cc_worker::work_dl() bool cc_worker::work_ul() { + // Check if it is a DL slot, if not skip + if (!srslte_tdd_nr_is_ul(&phy->cfg.tdd, 0, ul_slot_cfg.idx)) { + return true; + } + srslte_uci_data_nr_t uci_data = {}; uint32_t pid = 0; From fb1a0b6bce4c1bb614f912ce85f5b3a4cacc6b4b Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 11 Mar 2021 09:51:02 +0100 Subject: [PATCH 10/64] SRSUE: added NR-PDCCH information --- lib/include/srslte/phy/common/phy_common_nr.h | 6 +++ lib/include/srslte/phy/ue/ue_dl_nr.h | 11 ++++++ lib/src/phy/ue/ue_dl_nr.c | 37 ++++++++++++++----- srsue/src/phy/nr/cc_worker.cc | 15 ++++++++ 4 files changed, 60 insertions(+), 9 deletions(-) diff --git a/lib/include/srslte/phy/common/phy_common_nr.h b/lib/include/srslte/phy/common/phy_common_nr.h index a416a9dbf..260fe4ae3 100644 --- a/lib/include/srslte/phy/common/phy_common_nr.h +++ b/lib/include/srslte/phy/common/phy_common_nr.h @@ -90,6 +90,12 @@ extern "C" { */ #define SRSLTE_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR 8 +/** + * @brief defines the maximum number of monitored PDCCH candidates per slot and per serving cell according to TS 38.213 + * Table 10.1-2 + */ +#define SRSLTE_MAX_NOF_CANDIDATES_NR 44 + /** * @brief defines the maximum number of resource elements per PRB * @remark Defined in TS 38.214 V15.10.0 5.1.3.2 Transport block size determination, point 1, second bullet diff --git a/lib/include/srslte/phy/ue/ue_dl_nr.h b/lib/include/srslte/phy/ue/ue_dl_nr.h index 3409236e4..c39f9a30f 100644 --- a/lib/include/srslte/phy/ue/ue_dl_nr.h +++ b/lib/include/srslte/phy/ue/ue_dl_nr.h @@ -102,6 +102,14 @@ typedef struct SRSLTE_API { uint32_t nof_dl_data_to_ul_ack; } srslte_ue_dl_nr_harq_ack_cfg_t; +typedef struct SRSLTE_API { + uint32_t coreset_id; + uint32_t ss_id; + srslte_dci_location_t location; + srslte_dmrs_pdcch_measure_t measure; + srslte_pdcch_nr_res_t result; +} srslte_ue_dl_nr_pdcch_info_t; + typedef struct SRSLTE_API { uint32_t max_prb; uint32_t nof_rx_antennas; @@ -122,6 +130,9 @@ typedef struct SRSLTE_API { srslte_pdcch_nr_t pdcch; srslte_dmrs_pdcch_ce_t* pdcch_ce; + srslte_ue_dl_nr_pdcch_info_t pdcch_info[SRSLTE_MAX_NOF_CANDIDATES_NR]; ///< Stores PDCCH blind search info + uint32_t pdcch_info_count; + srslte_dci_msg_nr_t pending_ul_dci_msg[SRSLTE_MAX_DCI_MSG_NR]; uint32_t pending_ul_dci_count; } srslte_ue_dl_nr_t; diff --git a/lib/src/phy/ue/ue_dl_nr.c b/lib/src/phy/ue/ue_dl_nr.c index 23616aa9a..6c4ae3fe7 100644 --- a/lib/src/phy/ue/ue_dl_nr.c +++ b/lib/src/phy/ue/ue_dl_nr.c @@ -210,39 +210,52 @@ static int ue_dl_nr_find_dci_ncce(srslte_ue_dl_nr_t* q, srslte_pdcch_nr_res_t* pdcch_res, uint32_t coreset_id) { - srslte_dmrs_pdcch_measure_t m = {}; + // Select debug information + srslte_ue_dl_nr_pdcch_info_t* pdcch_info = NULL; + if (q->pdcch_info_count < SRSLTE_MAX_NOF_CANDIDATES_NR) { + pdcch_info = &q->pdcch_info[q->pdcch_info_count]; + q->pdcch_info_count++; + } else { + ERROR("The UE does not expect more than %d candidates in this serving cell", SRSLTE_MAX_NOF_CANDIDATES_NR); + return SRSLTE_ERROR; + } + SRSLTE_MEM_ZERO(pdcch_info, srslte_ue_dl_nr_pdcch_info_t, 1); + pdcch_info->coreset_id = dci_msg->coreset_id; + pdcch_info->ss_id = dci_msg->search_space; + pdcch_info->location = dci_msg->location; + srslte_dmrs_pdcch_measure_t* m = &pdcch_info->measure; // Measures the PDCCH transmission DMRS - if (srslte_dmrs_pdcch_get_measure(&q->dmrs_pdcch[coreset_id], &dci_msg->location, &m) < SRSLTE_SUCCESS) { + if (srslte_dmrs_pdcch_get_measure(&q->dmrs_pdcch[coreset_id], &dci_msg->location, m) < SRSLTE_SUCCESS) { ERROR("Error getting measure location L=%d, ncce=%d", dci_msg->location.L, dci_msg->location.ncce); return SRSLTE_ERROR; } // If measured correlation is invalid, early return - if (!isnormal(m.norm_corr)) { + if (!isnormal(m->norm_corr)) { INFO("Discarded PDCCH candidate L=%d;ncce=%d; Invalid measurement;", dci_msg->location.L, dci_msg->location.ncce); return SRSLTE_SUCCESS; } // Compare EPRE with threshold - if (m.epre_dBfs < q->pdcch_dmrs_epre_thr) { + if (m->epre_dBfs < q->pdcch_dmrs_epre_thr) { INFO("Discarded PDCCH candidate L=%d;ncce=%d; EPRE is too weak (%.1f<%.1f);", dci_msg->location.L, dci_msg->location.ncce, - m.epre_dBfs, + m->epre_dBfs, q->pdcch_dmrs_epre_thr); return SRSLTE_SUCCESS; } // Compare DMRS correlation with threshold - if (m.norm_corr < q->pdcch_dmrs_corr_thr) { + if (m->norm_corr < q->pdcch_dmrs_corr_thr) { INFO("Discarded PDCCH candidate L=%d;ncce=%d; Correlation is too low (%.1f<%.1f); EPRE=%+.2f; RSRP=%+.2f;", dci_msg->location.L, dci_msg->location.ncce, - m.norm_corr, + m->norm_corr, q->pdcch_dmrs_corr_thr, - m.epre_dBfs, - m.rsrp_dBfs); + m->epre_dBfs, + m->rsrp_dBfs); return SRSLTE_SUCCESS; } @@ -258,6 +271,9 @@ static int ue_dl_nr_find_dci_ncce(srslte_ue_dl_nr_t* q, return SRSLTE_ERROR; } + // Save information + pdcch_info->result = *pdcch_res; + return SRSLTE_SUCCESS; } @@ -404,6 +420,9 @@ int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q, // Limit maximum number of DCI messages to find nof_dci_msg = SRSLTE_MIN(nof_dci_msg, SRSLTE_MAX_DCI_MSG_NR); + // Reset debug information counter + q->pdcch_info_count = 0; + // If the UE looks for a RAR and RA search space is provided, search for it if (q->cfg.ra_search_space_present && rnti_type == srslte_rnti_type_ra) { // Find DCIs in the RA search space diff --git a/srsue/src/phy/nr/cc_worker.cc b/srsue/src/phy/nr/cc_worker.cc index 6dc22c88a..4fd724988 100644 --- a/srsue/src/phy/nr/cc_worker.cc +++ b/srsue/src/phy/nr/cc_worker.cc @@ -145,6 +145,21 @@ void cc_worker::decode_pdcch_dl() // Enqueue UL grants phy->set_dl_pending_grant(dl_slot_cfg.idx, dci_rx[i]); } + + if (logger.debug.enabled()) { + for (uint32_t i = 0; i < ue_dl.pdcch_info_count; i++) { + const srslte_ue_dl_nr_pdcch_info_t* info = &ue_dl.pdcch_info[i]; + logger.debug("PDCCH: crst_id=%d, ss_id=%d, ncce=%d, al=%d, EPRE=%+.2f, RSRP=%+.2f, corr=%.3f; crc=%s", + info->coreset_id, + info->ss_id, + info->location.ncce, + info->location.L, + info->measure.epre_dBfs, + info->measure.rsrp_dBfs, + info->measure.norm_corr, + info->result.crc ? "OK" : "KO"); + } + } } void cc_worker::decode_pdcch_ul() From 10af89fcdd0911ecf3a07a5454947254bcd09d1e Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 11 Mar 2021 09:52:08 +0100 Subject: [PATCH 11/64] NR-PDCCH aesthetic changes --- lib/src/phy/phch/pdcch_nr.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/src/phy/phch/pdcch_nr.c b/lib/src/phy/phch/pdcch_nr.c index c657287df..27aca08ac 100644 --- a/lib/src/phy/phch/pdcch_nr.c +++ b/lib/src/phy/phch/pdcch_nr.c @@ -28,16 +28,17 @@ /** * @brief Recursive Y_p_n function */ -static uint32_t srslte_pdcch_calculate_Y_p_n(uint32_t coreset_id, uint16_t rnti, int n) +static uint32_t srslte_pdcch_calculate_Y_p_n(uint32_t coreset_id, uint16_t rnti, uint32_t n) { static const uint32_t A_p[3] = {39827, 39829, 39839}; const uint32_t D = 65537; - if (n < 0) { - return rnti; + uint32_t Y_p_n = (uint32_t)rnti; + for (uint32_t i = 0; i <= n; i++) { + Y_p_n = (A_p[coreset_id % 3] * Y_p_n) % D; } - return (A_p[coreset_id % 3] * srslte_pdcch_calculate_Y_p_n(coreset_id, rnti, n - 1)) % D; + return Y_p_n; } /** From 1153555ae1798c2ae66d30b4d2d02c2cf91a4419 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 11 Mar 2021 19:08:12 +0100 Subject: [PATCH 12/64] SRSUE: Added UE specific cell space --- .../interfaces/rrc_nr_interface_types.h | 54 ++++++++++++++++++- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/lib/include/srslte/interfaces/rrc_nr_interface_types.h b/lib/include/srslte/interfaces/rrc_nr_interface_types.h index 7bc26936a..f7f1e2406 100644 --- a/lib/include/srslte/interfaces/rrc_nr_interface_types.h +++ b/lib/include/srslte/interfaces/rrc_nr_interface_types.h @@ -97,8 +97,8 @@ struct phy_cfg_nr_t { srslte_search_space_t search_space1 = {}; search_space1.id = 1; search_space1.coreset_id = 1; - search_space1.nof_candidates[0] = 0; - search_space1.nof_candidates[1] = 0; + search_space1.nof_candidates[0] = 1; + search_space1.nof_candidates[1] = 1; search_space1.nof_candidates[2] = 1; search_space1.nof_candidates[3] = 0; search_space1.nof_candidates[4] = 0; @@ -112,6 +112,56 @@ struct phy_cfg_nr_t { pdcch.ra_search_space.type = srslte_search_space_type_common_1; pdcch.ra_search_space_present = true; + // spCellConfigDedicated + // initialDownlinkBWP + // pdcch-Config: setup (1) + // setup + // controlResourceSetToAddModList: 1 item + // Item 0 + // ControlResourceSet + // controlResourceSetId: 2 + // frequencyDomainResources: ff0000000000 [bit length 45, 3 LSB pad bits, 1111 1111 0000 + // 0000 0000 0000 0000 0000 0000 0000 0000 0... decimal value 35046933135360] + // duration: 1 + // cce-REG-MappingType: nonInterleaved (1) + // nonInterleaved: NULL + // precoderGranularity: sameAsREG-bundle (0) + pdcch.coreset[2].id = 2; + pdcch.coreset[2].precoder_granularity = srslte_coreset_precoder_granularity_reg_bundle; + pdcch.coreset[2].duration = 1; + pdcch.coreset[2].mapping_type = srslte_coreset_mapping_type_non_interleaved; + for (uint32_t i = 0; i < SRSLTE_CORESET_FREQ_DOMAIN_RES_SIZE; i++) { + pdcch.coreset[2].freq_resources[i] = (i < 8); + } + pdcch.coreset_present[2] = true; + + // searchSpacesToAddModList: 1 item + // Item 0 + // SearchSpace + // searchSpaceId: 2 + // controlResourceSetId: 2 + // monitoringSlotPeriodicityAndOffset: sl1 (0) + // sl1: NULL + // monitoringSymbolsWithinSlot: 8000 [bit length 14, 2 LSB pad bits, 1000 0000 0000 + // 00.. decimal value 8192] nrofCandidates + // aggregationLevel1: n0 (0) + // aggregationLevel2: n2 (2) + // aggregationLevel4: n1 (1) + // aggregationLevel8: n0 (0) + // aggregationLevel16: n0 (0) + // searchSpaceType: ue-Specific (1) + // ue-Specific + // dci-Formats: formats0-0-And-1-0 (0) + pdcch.search_space[2].id = 2; + pdcch.search_space[2].coreset_id = 2; + pdcch.search_space[2].nof_candidates[0] = 0; + pdcch.search_space[2].nof_candidates[1] = 2; + pdcch.search_space[2].nof_candidates[2] = 1; + pdcch.search_space[2].nof_candidates[3] = 0; + pdcch.search_space[2].nof_candidates[4] = 0; + pdcch.search_space[2].type = srslte_search_space_type_ue; + pdcch.search_space_present[2] = true; + // pdsch-ConfigCommon: setup (1) // setup // pdsch-TimeDomainAllocationList: 2 items From ddfff3edad194e5f0ffe3c7a30dffabcb928a30a Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 11 Mar 2021 19:09:20 +0100 Subject: [PATCH 13/64] SRSUE: Zero transmit buffer if the slot is not UL --- srsue/src/phy/nr/cc_worker.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/srsue/src/phy/nr/cc_worker.cc b/srsue/src/phy/nr/cc_worker.cc index 4fd724988..1c6550a31 100644 --- a/srsue/src/phy/nr/cc_worker.cc +++ b/srsue/src/phy/nr/cc_worker.cc @@ -271,6 +271,8 @@ bool cc_worker::work_ul() { // Check if it is a DL slot, if not skip if (!srslte_tdd_nr_is_ul(&phy->cfg.tdd, 0, ul_slot_cfg.idx)) { + // No NR signal shall be transmitted + srslte_vec_cf_zero(tx_buffer[0], ue_ul.ifft.sf_sz); return true; } From be8e8cbe3efba6cc132c49e2799ec33e83fb5670 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 11 Mar 2021 19:10:21 +0100 Subject: [PATCH 14/64] Fix radio decimation Rx stream stall --- lib/include/srslte/radio/radio.h | 1 + lib/src/radio/radio.cc | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/lib/include/srslte/radio/radio.h b/lib/include/srslte/radio/radio.h index 4a4654ace..9c242ca06 100644 --- a/lib/include/srslte/radio/radio.h +++ b/lib/include/srslte/radio/radio.h @@ -100,6 +100,7 @@ private: std::array, SRSLTE_MAX_CHANNELS> rx_buffer; std::array interpolators = {}; std::array decimators = {}; + bool decimator_busy = false; ///< Indicates the decimator is changing the rate rf_timestamp_t end_of_burst_time = {}; bool is_start_of_burst = false; diff --git a/lib/src/radio/radio.cc b/lib/src/radio/radio.cc index 9b41c8b8c..00685638a 100644 --- a/lib/src/radio/radio.cc +++ b/lib/src/radio/radio.cc @@ -280,7 +280,13 @@ bool radio::rx_now(rf_buffer_interface& buffer, rf_timestamp_interface& rxd_time std::unique_lock lock(rx_mutex); bool ret = true; rf_buffer_t buffer_rx; - uint32_t ratio = SRSLTE_MAX(1, decimators[0].ratio); + + // Extract decimation ratio. As the decimator may take some time to set a new ratio, deactivate the decimation and + // keep receiving samples to avoid stalling the RX stream + uint32_t ratio = (decimator_busy) ? 0 : SRSLTE_MAX(1, decimators[0].ratio); + if (decimator_busy) { + rx_mutex.unlock(); + } // Calculate number of samples, considering the decimation ratio uint32_t nof_samples = buffer.get_nof_samples() * ratio; @@ -410,13 +416,10 @@ bool radio::tx(rf_buffer_interface& buffer, const rf_timestamp_interface& tx_tim if (ratio > 1 && nof_samples * ratio > tx_buffer[0].size()) { // This is a corner case that could happen during sample rate change transitions, as it does not have a negative // impact, log it as info. - fmt::memory_buffer buff; - fmt::format_to(buff, - "Tx number of samples ({}/{}) exceeds buffer size ({})\n", - buffer.get_nof_samples(), - buffer.get_nof_samples() * ratio, - tx_buffer[0].size()); - logger.info("%s", to_c_str(buff)); + logger.info(fmt::format("Tx number of samples ({}/{}) exceeds buffer size ({})\n", + buffer.get_nof_samples(), + buffer.get_nof_samples() * ratio, + tx_buffer[0].size())); // Limit number of samples to transmit nof_samples = tx_buffer[0].size() / ratio; @@ -674,6 +677,7 @@ void radio::set_rx_srate(const double& srate) } // If fix sampling rate... if (std::isnormal(fix_srate_hz)) { + decimator_busy = true; std::unique_lock lock(rx_mutex); // If the sampling rate was not set, set it @@ -689,6 +693,7 @@ void radio::set_rx_srate(const double& srate) srslte_resampler_fft_init(&decimators[ch], SRSLTE_RESAMPLER_MODE_DECIMATE, ratio); } + decimator_busy = false; } else { for (srslte_rf_t& rf_device : rf_devices) { cur_rx_srate = srslte_rf_set_rx_srate(&rf_device, srate); From 1463b11bad75299b1d4668239d460975a89b8dd2 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Fri, 12 Mar 2021 10:33:43 +0100 Subject: [PATCH 15/64] Fix compilation --- lib/src/radio/radio.cc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/src/radio/radio.cc b/lib/src/radio/radio.cc index 00685638a..bc58d699d 100644 --- a/lib/src/radio/radio.cc +++ b/lib/src/radio/radio.cc @@ -416,10 +416,13 @@ bool radio::tx(rf_buffer_interface& buffer, const rf_timestamp_interface& tx_tim if (ratio > 1 && nof_samples * ratio > tx_buffer[0].size()) { // This is a corner case that could happen during sample rate change transitions, as it does not have a negative // impact, log it as info. - logger.info(fmt::format("Tx number of samples ({}/{}) exceeds buffer size ({})\n", - buffer.get_nof_samples(), - buffer.get_nof_samples() * ratio, - tx_buffer[0].size())); + fmt::memory_buffer buff; + fmt::format_to(buff, + "Tx number of samples ({}/{}) exceeds buffer size ({})\n", + buffer.get_nof_samples(), + buffer.get_nof_samples() * ratio, + tx_buffer[0].size()); + logger.info("%s", to_c_str(buff)); // Limit number of samples to transmit nof_samples = tx_buffer[0].size() / ratio; From 6fa33c890de5471525a1639e2883861e54a918a9 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Fri, 12 Mar 2021 16:23:00 +0100 Subject: [PATCH 16/64] Simultaneous HARQ-ACK, SR and CSI in NR-PUCCH --- lib/src/phy/phch/uci_nr.c | 65 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/lib/src/phy/phch/uci_nr.c b/lib/src/phy/phch/uci_nr.c index 199d8572a..04387ff6b 100644 --- a/lib/src/phy/phch/uci_nr.c +++ b/lib/src/phy/phch/uci_nr.c @@ -193,6 +193,63 @@ static int uci_nr_unpack_ack_sr(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequenc return A; } +static int uci_nr_pack_ack_sr_csi(const srslte_uci_cfg_nr_t* cfg, const srslte_uci_value_nr_t* value, uint8_t* sequence) +{ + int A = 0; + + // Append ACK bits + srslte_vec_u8_copy(&sequence[A], value->ack, cfg->o_ack); + A += cfg->o_ack; + + // Append SR bits + uint8_t* bits = &sequence[A]; + srslte_bit_unpack(value->sr, &bits, cfg->o_sr); + A += cfg->o_sr; + + // Append CSI bits + int n = srslte_csi_part1_pack(cfg->csi, value->csi, cfg->nof_csi, bits, SRSLTE_UCI_NR_MAX_NOF_BITS - A); + if (n < SRSLTE_SUCCESS) { + ERROR("Packing CSI part 1"); + return SRSLTE_ERROR; + } + A += n; + + if (SRSLTE_DEBUG_ENABLED && srslte_verbose >= SRSLTE_VERBOSE_INFO && !handler_registered) { + UCI_NR_INFO_TX("Packed UCI bits: "); + srslte_vec_fprint_byte(stdout, sequence, A); + } + + return A; +} + +static int uci_nr_unpack_ack_sr_csi(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequence, srslte_uci_value_nr_t* value) +{ + int A = 0; + + // Append ACK bits + srslte_vec_u8_copy(value->ack, &sequence[A], cfg->o_ack); + A += cfg->o_ack; + + // Append SR bits + uint8_t* bits = &sequence[A]; + value->sr = srslte_bit_pack(&bits, cfg->o_sr); + A += cfg->o_sr; + + if (SRSLTE_DEBUG_ENABLED && srslte_verbose >= SRSLTE_VERBOSE_INFO && !handler_registered) { + UCI_NR_INFO_RX("Unpacked UCI bits: "); + srslte_vec_fprint_byte(stdout, sequence, A); + } + + // Append CSI bits + int n = srslte_csi_part1_unpack(cfg->csi, cfg->nof_csi, bits, SRSLTE_UCI_NR_MAX_NOF_BITS - A, value->csi); + if (n < SRSLTE_SUCCESS) { + ERROR("Packing CSI part 1"); + return SRSLTE_ERROR; + } + + return A; +} + static int uci_nr_A(const srslte_uci_cfg_nr_t* cfg) { int o_csi = srslte_csi_part1_nof_bits(cfg->csi, cfg->nof_csi); @@ -227,8 +284,7 @@ static int uci_nr_pack_pucch(const srslte_uci_cfg_nr_t* cfg, const srslte_uci_va } // 6.3.1.1.3 HARQ-ACK/SR and CSI - ERROR("HARQ-ACK/SR and CSI encoding are not implemented"); - return SRSLTE_ERROR; + return uci_nr_pack_ack_sr_csi(cfg, value, sequence); } static int uci_nr_unpack_pucch(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequence, srslte_uci_value_nr_t* value) @@ -247,8 +303,7 @@ static int uci_nr_unpack_pucch(const srslte_uci_cfg_nr_t* cfg, uint8_t* sequence } // 6.3.1.1.3 HARQ-ACK/SR and CSI - ERROR("HARQ-ACK/SR and CSI encoding are not implemented"); - return SRSLTE_ERROR; + return uci_nr_unpack_ack_sr_csi(cfg, sequence, value); } static int uci_nr_encode_1bit(srslte_uci_nr_t* q, const srslte_uci_cfg_nr_t* cfg, uint8_t* o, uint32_t E) @@ -1090,7 +1145,7 @@ static int uci_nr_pusch_Q_prime_csi1(const srslte_uci_nr_pusch_cfg_t* cfg, uint3 return SRSLTE_ERROR; } - uint32_t M_uci_sum = 0; + uint32_t M_uci_sum = 0; for (uint32_t l = 0; l < SRSLTE_NSYMB_PER_SLOT_NR; l++) { M_uci_sum += cfg->M_uci_sc[l]; } From d9780e1f13f399c692e35430b0123963f7d3404f Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Fri, 12 Mar 2021 18:12:41 +0100 Subject: [PATCH 17/64] Fix duplicated NR-DCI in different search spaces and CORESET and other aesthetic modification --- lib/include/srslte/phy/ue/ue_dl_nr.h | 7 +++- lib/src/phy/phch/dci_nr.c | 2 +- lib/src/phy/phch/pdsch_nr.c | 14 ++++---- lib/src/phy/phch/pusch_nr.c | 14 ++++---- lib/src/phy/ue/ue_dl_nr.c | 52 ++++++++++------------------ 5 files changed, 37 insertions(+), 52 deletions(-) diff --git a/lib/include/srslte/phy/ue/ue_dl_nr.h b/lib/include/srslte/phy/ue/ue_dl_nr.h index c39f9a30f..d7c26b281 100644 --- a/lib/include/srslte/phy/ue/ue_dl_nr.h +++ b/lib/include/srslte/phy/ue/ue_dl_nr.h @@ -130,9 +130,14 @@ typedef struct SRSLTE_API { srslte_pdcch_nr_t pdcch; srslte_dmrs_pdcch_ce_t* pdcch_ce; - srslte_ue_dl_nr_pdcch_info_t pdcch_info[SRSLTE_MAX_NOF_CANDIDATES_NR]; ///< Stores PDCCH blind search info + /// Store Blind-search information from all possible candidate locations for debug purposes + srslte_ue_dl_nr_pdcch_info_t pdcch_info[SRSLTE_MAX_NOF_CANDIDATES_NR]; uint32_t pdcch_info_count; + /// Temporally stores Found DCI messages from all SS + srslte_dci_msg_nr_t dci_msg[SRSLTE_MAX_DCI_MSG_NR]; + uint32_t dci_msg_count; + srslte_dci_msg_nr_t pending_ul_dci_msg[SRSLTE_MAX_DCI_MSG_NR]; uint32_t pending_ul_dci_count; } srslte_ue_dl_nr_t; diff --git a/lib/src/phy/phch/dci_nr.c b/lib/src/phy/phch/dci_nr.c index 492524529..2bb86b332 100644 --- a/lib/src/phy/phch/dci_nr.c +++ b/lib/src/phy/phch/dci_nr.c @@ -790,7 +790,7 @@ static int dci_nr_format_1_0_to_str(const srslte_dci_dl_nr_t* dci, char* str, ui // Downlink assignment index – 2 bits if (dci->rnti_type == srslte_rnti_type_c || dci->rnti_type == srslte_rnti_type_tc) { - len = srslte_print_check(str, str_len, len, "sii=%d ", dci->sii); + len = srslte_print_check(str, str_len, len, "dai=%d ", dci->dai); } // TPC command for scheduled PUCCH – 2 bits diff --git a/lib/src/phy/phch/pdsch_nr.c b/lib/src/phy/phch/pdsch_nr.c index b7ee4dc0d..38b651e3e 100644 --- a/lib/src/phy/phch/pdsch_nr.c +++ b/lib/src/phy/phch/pdsch_nr.c @@ -676,13 +676,10 @@ static uint32_t srslte_pdsch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg, uint32_t len = 0; len = srslte_print_check(str, str_len, len, "rnti=0x%x", grant->rnti); - char freq_str[SRSLTE_MAX_PRB_NR + 1] = {}; - for (uint32_t i = 0, nof_prb = 0; i < SRSLTE_MAX_PRB_NR && nof_prb < grant->nof_prb; i++) { + uint32_t first_prb = SRSLTE_MAX_PRB_NR; + for (uint32_t i = 0; i < SRSLTE_MAX_PRB_NR && first_prb == SRSLTE_MAX_PRB_NR; i++) { if (grant->prb_idx[i]) { - freq_str[i] = '1'; - nof_prb++; - } else { - freq_str[i] = '0'; + first_prb = i; } } @@ -690,9 +687,10 @@ static uint32_t srslte_pdsch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg, len = srslte_print_check(str, str_len, len, - ",k0=%d,freq=%s,symb=%d:%d,mapping=%s", + ",k0=%d,prb=%d:%d,symb=%d:%d,mapping=%s", grant->k, - freq_str, + first_prb, + grant->nof_prb, grant->S, grant->L, srslte_sch_mapping_type_to_str(grant->mapping)); diff --git a/lib/src/phy/phch/pusch_nr.c b/lib/src/phy/phch/pusch_nr.c index b69bef529..2f887b95c 100644 --- a/lib/src/phy/phch/pusch_nr.c +++ b/lib/src/phy/phch/pusch_nr.c @@ -1237,13 +1237,10 @@ static uint32_t srslte_pusch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg, uint32_t len = 0; len = srslte_print_check(str, str_len, len, "rnti=0x%x", grant->rnti); - char freq_str[SRSLTE_MAX_PRB_NR + 1] = {}; - for (uint32_t i = 0, nof_prb = 0; i < SRSLTE_MAX_PRB_NR && nof_prb < grant->nof_prb; i++) { + uint32_t first_prb = SRSLTE_MAX_PRB_NR; + for (uint32_t i = 0; i < SRSLTE_MAX_PRB_NR && first_prb == SRSLTE_MAX_PRB_NR; i++) { if (grant->prb_idx[i]) { - freq_str[i] = '1'; - nof_prb++; - } else { - freq_str[i] = '0'; + first_prb = i; } } @@ -1251,9 +1248,10 @@ static uint32_t srslte_pusch_nr_grant_info(const srslte_sch_cfg_nr_t* cfg, len = srslte_print_check(str, str_len, len, - ",k2=%d,freq=%s,S=%d,L=%d,mapping=%s", + ",k2=%d,prb=%d:%d,S=%d,L=%d,mapping=%s", grant->k, - freq_str, + first_prb, + grant->nof_prb, grant->S, grant->L, srslte_sch_mapping_type_to_str(grant->mapping)); diff --git a/lib/src/phy/ue/ue_dl_nr.c b/lib/src/phy/ue/ue_dl_nr.c index 6c4ae3fe7..8807ea880 100644 --- a/lib/src/phy/ue/ue_dl_nr.c +++ b/lib/src/phy/ue/ue_dl_nr.c @@ -297,15 +297,8 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q, const srslte_slot_cfg_t* slot_cfg, const srslte_search_space_t* search_space, uint16_t rnti, - srslte_rnti_type_t rnti_type, - srslte_dci_msg_nr_t* dci_msg_list, - uint32_t nof_dci_msg) + srslte_rnti_type_t rnti_type) { - // Check inputs - if (q == NULL || slot_cfg == NULL || dci_msg_list == NULL) { - return SRSLTE_ERROR_INVALID_INPUTS; - } - // Select CORESET uint32_t coreset_id = search_space->coreset_id; if (coreset_id >= SRSLTE_UE_DL_NR_MAX_NOF_CORESET || !q->cfg.coreset_present[coreset_id]) { @@ -320,8 +313,6 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q, return SRSLTE_ERROR; } - uint32_t count = 0; - // Hard-coded values srslte_dci_format_nr_t dci_format = srslte_dci_format_nr_1_0; @@ -333,7 +324,8 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q, } // Iterate all possible aggregation levels - for (uint32_t L = 0; L < SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR && count < nof_dci_msg; L++) { + for (uint32_t L = 0; L < SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR && q->dci_msg_count < SRSLTE_MAX_DCI_MSG_NR; + L++) { // Calculate possible PDCCH DCI candidates uint32_t candidates[SRSLTE_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR] = {}; int nof_candidates = srslte_pdcch_nr_locations_coreset( @@ -344,7 +336,7 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q, } // Iterate over the candidates - for (int ncce_idx = 0; ncce_idx < nof_candidates && count < nof_dci_msg; ncce_idx++) { + for (int ncce_idx = 0; ncce_idx < nof_candidates && q->dci_msg_count < SRSLTE_MAX_DCI_MSG_NR; ncce_idx++) { // Set DCI context srslte_dci_msg_nr_t dci_msg = {}; dci_msg.location.L = L; @@ -387,19 +379,19 @@ static int ue_dl_nr_find_dl_dci_ss(srslte_ue_dl_nr_t* q, } // Check if the grant exists already in the message list - if (find_dci_msg(dci_msg_list, count, &dci_msg)) { + if (find_dci_msg(q->dci_msg, q->dci_msg_count, &dci_msg)) { // The same DCI is in the list, keep moving continue; } INFO("Found DCI in L=%d,ncce=%d", dci_msg.location.L, dci_msg.location.ncce); // Append DCI message into the list - dci_msg_list[count] = dci_msg; - count++; + q->dci_msg[q->dci_msg_count] = dci_msg; + q->dci_msg_count++; } } - return (int)count; + return SRSLTE_SUCCESS; } int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q, @@ -409,9 +401,6 @@ int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q, srslte_dci_dl_nr_t* dci_dl_list, uint32_t nof_dci_msg) { - int count = 0; - srslte_dci_msg_nr_t dci_msg_list[SRSLTE_MAX_DCI_MSG_NR] = {}; - // Check inputs if (q == NULL || slot_cfg == NULL || dci_dl_list == NULL) { return SRSLTE_ERROR_INVALID_INPUTS; @@ -420,48 +409,43 @@ int srslte_ue_dl_nr_find_dl_dci(srslte_ue_dl_nr_t* q, // Limit maximum number of DCI messages to find nof_dci_msg = SRSLTE_MIN(nof_dci_msg, SRSLTE_MAX_DCI_MSG_NR); - // Reset debug information counter + // Reset grant and blind search information counters + q->dci_msg_count = 0; q->pdcch_info_count = 0; // If the UE looks for a RAR and RA search space is provided, search for it if (q->cfg.ra_search_space_present && rnti_type == srslte_rnti_type_ra) { // Find DCIs in the RA search space - int ret = ue_dl_nr_find_dl_dci_ss(q, slot_cfg, &q->cfg.ra_search_space, rnti, rnti_type, dci_msg_list, nof_dci_msg); + int ret = ue_dl_nr_find_dl_dci_ss(q, slot_cfg, &q->cfg.ra_search_space, rnti, rnti_type); if (ret < SRSLTE_SUCCESS) { ERROR("Error searching RAR DCI"); return SRSLTE_ERROR; } - - // Count the found DCIs - count += ret; } else { // Iterate all possible common and UE search spaces - for (uint32_t i = 0; i < SRSLTE_UE_DL_NR_MAX_NOF_SEARCH_SPACE && count < nof_dci_msg; i++) { + for (uint32_t i = 0; i < SRSLTE_UE_DL_NR_MAX_NOF_SEARCH_SPACE && q->dci_msg_count < nof_dci_msg; i++) { // Skip search space if not present if (!q->cfg.search_space_present[i]) { continue; } // Find DCIs in the selected search space - int ret = ue_dl_nr_find_dl_dci_ss( - q, slot_cfg, &q->cfg.search_space[i], rnti, rnti_type, &dci_msg_list[count], nof_dci_msg - count); + int ret = ue_dl_nr_find_dl_dci_ss(q, slot_cfg, &q->cfg.search_space[i], rnti, rnti_type); if (ret < SRSLTE_SUCCESS) { ERROR("Error searching DCI"); return SRSLTE_ERROR; } - - // Count the found DCIs - count += ret; } } // Convert found DCI messages into DL grants - for (uint32_t i = 0; i < count; i++) { - const srslte_coreset_t* coreset = &q->cfg.coreset[dci_msg_list[i].coreset_id]; - srslte_dci_nr_format_1_0_unpack(&q->carrier, coreset, &dci_msg_list[i], &dci_dl_list[i]); + uint32_t dci_msg_count = SRSLTE_MIN(nof_dci_msg, q->dci_msg_count); + for (uint32_t i = 0; i < dci_msg_count; i++) { + const srslte_coreset_t* coreset = &q->cfg.coreset[q->dci_msg[i].coreset_id]; + srslte_dci_nr_format_1_0_unpack(&q->carrier, coreset, &q->dci_msg[i], &dci_dl_list[i]); } - return count; + return (int)dci_msg_count; } int srslte_ue_dl_nr_find_ul_dci(srslte_ue_dl_nr_t* q, From f3c036084210cf53805c7d3d9590d7e834eaa880 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 15 Mar 2021 10:26:20 +0100 Subject: [PATCH 18/64] Applied aesthetical changes --- lib/include/srslte/phy/common/phy_common_nr.h | 5 +++-- lib/include/srslte/phy/phch/ra_nr.h | 2 +- lib/include/srslte/phy/ue/ue_dl_nr.h | 2 +- lib/src/phy/ue/ue_dl_nr.c | 4 ++-- lib/src/radio/radio.cc | 8 +++++--- srsue/src/phy/nr/cc_worker.cc | 2 +- 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/include/srslte/phy/common/phy_common_nr.h b/lib/include/srslte/phy/common/phy_common_nr.h index 260fe4ae3..c358adbeb 100644 --- a/lib/include/srslte/phy/common/phy_common_nr.h +++ b/lib/include/srslte/phy/common/phy_common_nr.h @@ -86,7 +86,8 @@ extern "C" { #define SRSLTE_PDCCH_MAX_RE ((SRSLTE_NRE - 3U) * (1U << (SRSLTE_SEARCH_SPACE_NOF_AGGREGATION_LEVELS_NR - 1U)) * 6U) /** - * @brief defines the maximum number of candidates for a given Aggregation level + * @brief defines the maximum number of candidates for a given search-space and aggregation level according to TS 38.331 + * SearchSpace sequence */ #define SRSLTE_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR 8 @@ -94,7 +95,7 @@ extern "C" { * @brief defines the maximum number of monitored PDCCH candidates per slot and per serving cell according to TS 38.213 * Table 10.1-2 */ -#define SRSLTE_MAX_NOF_CANDIDATES_NR 44 +#define SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR 44 /** * @brief defines the maximum number of resource elements per PRB diff --git a/lib/include/srslte/phy/phch/ra_nr.h b/lib/include/srslte/phy/phch/ra_nr.h index 1815e33da..ba272f4f1 100644 --- a/lib/include/srslte/phy/phch/ra_nr.h +++ b/lib/include/srslte/phy/phch/ra_nr.h @@ -122,7 +122,7 @@ SRSLTE_API int srslte_ra_ul_dci_to_grant_nr(const srslte_carrier_nr_t* carrie srslte_sch_grant_nr_t* pusch_grant); /** - * @brief Setups the Uplink Control Information configuration for a PUSCH transmission + * @brief Set up the Uplink Control Information configuration for a PUSCH transmission * * @remark Implement procedure described in TS 38.213 9.3 UCI reporting in physical uplink shared channel * diff --git a/lib/include/srslte/phy/ue/ue_dl_nr.h b/lib/include/srslte/phy/ue/ue_dl_nr.h index d7c26b281..f56a66a46 100644 --- a/lib/include/srslte/phy/ue/ue_dl_nr.h +++ b/lib/include/srslte/phy/ue/ue_dl_nr.h @@ -131,7 +131,7 @@ typedef struct SRSLTE_API { srslte_dmrs_pdcch_ce_t* pdcch_ce; /// Store Blind-search information from all possible candidate locations for debug purposes - srslte_ue_dl_nr_pdcch_info_t pdcch_info[SRSLTE_MAX_NOF_CANDIDATES_NR]; + srslte_ue_dl_nr_pdcch_info_t pdcch_info[SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR]; uint32_t pdcch_info_count; /// Temporally stores Found DCI messages from all SS diff --git a/lib/src/phy/ue/ue_dl_nr.c b/lib/src/phy/ue/ue_dl_nr.c index 8807ea880..27d8964ce 100644 --- a/lib/src/phy/ue/ue_dl_nr.c +++ b/lib/src/phy/ue/ue_dl_nr.c @@ -212,11 +212,11 @@ static int ue_dl_nr_find_dci_ncce(srslte_ue_dl_nr_t* q, { // Select debug information srslte_ue_dl_nr_pdcch_info_t* pdcch_info = NULL; - if (q->pdcch_info_count < SRSLTE_MAX_NOF_CANDIDATES_NR) { + if (q->pdcch_info_count < SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR) { pdcch_info = &q->pdcch_info[q->pdcch_info_count]; q->pdcch_info_count++; } else { - ERROR("The UE does not expect more than %d candidates in this serving cell", SRSLTE_MAX_NOF_CANDIDATES_NR); + ERROR("The UE does not expect more than %d candidates in this serving cell", SRSLTE_MAX_NOF_CANDIDATES_SLOT_NR); return SRSLTE_ERROR; } SRSLTE_MEM_ZERO(pdcch_info, srslte_ue_dl_nr_pdcch_info_t, 1); diff --git a/lib/src/radio/radio.cc b/lib/src/radio/radio.cc index bc58d699d..aef1a0ab9 100644 --- a/lib/src/radio/radio.cc +++ b/lib/src/radio/radio.cc @@ -281,11 +281,13 @@ bool radio::rx_now(rf_buffer_interface& buffer, rf_timestamp_interface& rxd_time bool ret = true; rf_buffer_t buffer_rx; - // Extract decimation ratio. As the decimator may take some time to set a new ratio, deactivate the decimation and + // Extract decimation ratio. As the decimation may take some time to set a new ratio, deactivate the decimation and // keep receiving samples to avoid stalling the RX stream - uint32_t ratio = (decimator_busy) ? 0 : SRSLTE_MAX(1, decimators[0].ratio); + uint32_t ratio = 1; // No decimation by default if (decimator_busy) { - rx_mutex.unlock(); + lock.unlock(); + } else if (decimators[0].ratio > 1) { + ratio = decimators[0].ratio; } // Calculate number of samples, considering the decimation ratio diff --git a/srsue/src/phy/nr/cc_worker.cc b/srsue/src/phy/nr/cc_worker.cc index 1c6550a31..b55c40445 100644 --- a/srsue/src/phy/nr/cc_worker.cc +++ b/srsue/src/phy/nr/cc_worker.cc @@ -269,7 +269,7 @@ bool cc_worker::work_dl() bool cc_worker::work_ul() { - // Check if it is a DL slot, if not skip + // Check if it is a UL slot, if not skip if (!srslte_tdd_nr_is_ul(&phy->cfg.tdd, 0, ul_slot_cfg.idx)) { // No NR signal shall be transmitted srslte_vec_cf_zero(tx_buffer[0], ue_ul.ifft.sf_sz); From 3e07767f28174adb2e573d9b716bc7a664d6463b Mon Sep 17 00:00:00 2001 From: Andre Puschmann Date: Tue, 16 Mar 2021 15:47:38 +0100 Subject: [PATCH 19/64] rlc_stress_test: set maxRetx for AM to 32 by default since the tests are random we sometimes hit the maxRetx threshold with 8 retx allowed that made the unit test fail. increase threshold to lower the likelihood for this to happen. --- lib/test/upper/rlc_stress_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/test/upper/rlc_stress_test.cc b/lib/test/upper/rlc_stress_test.cc index 965c21125..cf1e882a5 100644 --- a/lib/test/upper/rlc_stress_test.cc +++ b/lib/test/upper/rlc_stress_test.cc @@ -107,7 +107,7 @@ void parse_args(stress_test_args_t* args, int argc, char* argv[]) ("singletx", bpo::value(&args->single_tx)->default_value(false), "If set to true, only one node is generating data") ("pcap", bpo::value(&args->write_pcap)->default_value(false), "Whether to write all RLC PDU to PCAP file") ("zeroseed", bpo::value(&args->zero_seed)->default_value(false), "Whether to initialize random seed to zero") - ("max_retx", bpo::value(&args->max_retx)->default_value(8), "Maximum number of RLC retransmission attempts") + ("max_retx", bpo::value(&args->max_retx)->default_value(32), "Maximum number of RLC retransmission attempts") ("nof_pdu_tti", bpo::value(&args->nof_pdu_tti)->default_value(1), "Number of PDUs processed in a TTI"); // clang-format on From 3a4ae3d69dfd000a72c875d7730a4cb52ae26cb1 Mon Sep 17 00:00:00 2001 From: Francisco Date: Sat, 13 Mar 2021 13:34:15 +0000 Subject: [PATCH 20/64] extended broadcast+RAR DCI encoding scheduler tests --- srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h | 2 +- srsenb/test/mac/sched_common_test_suite.cc | 24 +++++ srsenb/test/mac/sched_dci_test.cc | 88 ++++++++++--------- 3 files changed, 71 insertions(+), 43 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h b/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h index e5aa7fa72..aed040236 100644 --- a/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h +++ b/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h @@ -46,7 +46,7 @@ tbs_info compute_mcs_and_tbs(uint32_t nof_prb, bool use_tbs_index_alt); /** - * Compute lowest MCS, TBS based on CQI, N_prb that satisfies TBS >= req_bytes + * Compute lowest MCS, TBS based on CQI, N_prb that satisfies TBS >= req_bytes (best effort) * \remark See TS 36.213 - Table 7.1.7.1-1/1A * @return resulting TBS (in bytes) and mcs. TBS=-1 if no valid solution was found. */ diff --git a/srsenb/test/mac/sched_common_test_suite.cc b/srsenb/test/mac/sched_common_test_suite.cc index 1e9f68710..f603d9093 100644 --- a/srsenb/test/mac/sched_common_test_suite.cc +++ b/srsenb/test/mac/sched_common_test_suite.cc @@ -188,6 +188,9 @@ int test_sib_scheduling(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) "Allocated BC process with TBS=%d < sib_len=%d", bc->tbs, cell_params.cfg.sibs[bc->index].len); + CONDERROR(bc->dci.rnti != 0xffff, "Invalid rnti=0x%x for SIB%d", bc->dci.rnti, bc->index); + CONDERROR(bc->dci.format != SRSLTE_DCI_FORMAT1A, "Invalid DCI format for SIB%d", bc->index); + uint32_t x = (bc->index - 1) * cell_params.cfg.si_window_ms; uint32_t sf = x % 10; uint32_t sfn_start = sfn; @@ -302,6 +305,21 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) } } } + + // TEST: max coderate is not exceeded for RA and Broadcast + srslte_dl_sf_cfg_t dl_sf = {}; + dl_sf.cfi = sf_out.dl_cc_result[enb_cc_idx].cfi; + dl_sf.tti = to_tx_dl(sf_out.tti_rx).to_uint(); + auto test_ra_bc_coderate = [&dl_sf, &cell_params](uint32_t tbs, const srslte_dci_dl_t& dci) { + srslte_pdsch_grant_t grant = {}; + srslte_ra_dl_grant_to_grant_prb_allocation(&dci, &grant, cell_params.cfg.cell.nof_prb); + uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant); + float coderate = srslte_coderate(tbs * 8, nof_re); + const uint32_t Qm = 2; + CONDERROR(coderate > 0.930f * Qm, "Max coderate was exceeded from broadcast DCI"); + return SRSLTE_SUCCESS; + }; + for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) { auto& bc = dl_result.bc[i]; if (bc.type == sched_interface::dl_sched_bc_t::BCCH) { @@ -314,10 +332,16 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) } else { TESTERROR("Invalid broadcast process id=%d", (int)bc.type); } + + TESTASSERT(test_ra_bc_coderate(bc.tbs, bc.dci) == SRSLTE_SUCCESS); } + for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) { const auto& rar = dl_result.rar[i]; CONDERROR(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d", rar.tbs); + + // TEST: max coderate is not exceeded + TESTASSERT(test_ra_bc_coderate(rar.tbs, rar.dci) == SRSLTE_SUCCESS); } return SRSLTE_SUCCESS; diff --git a/srsenb/test/mac/sched_dci_test.cc b/srsenb/test/mac/sched_dci_test.cc index 662ae3b4e..00b5ed3c0 100644 --- a/srsenb/test/mac/sched_dci_test.cc +++ b/srsenb/test/mac/sched_dci_test.cc @@ -122,6 +122,38 @@ int test_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_tes return SRSLTE_SUCCESS; } +int assert_mcs_tbs_result(uint32_t cell_nof_prb, + uint32_t cqi, + uint32_t prb_grant_size, + uint32_t tbs, + uint32_t mcs, + bool alt_cqi_table = false) +{ + sched_cell_params_t cell_params = {}; + sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(cell_nof_prb); + sched_interface::sched_args_t sched_args = {}; + cell_params.set_cfg(0, cell_cfg, sched_args); + tbs_test_args args; + args.verbose = true; + args.cqi = cqi; + args.prb_grant_size = prb_grant_size; + args.use_tbs_index_alt = alt_cqi_table; + if (alt_cqi_table) { + args.max_mcs = std::min(args.max_mcs, 27u); // limited to 27 for 256-QAM + } + + tbs_info expected_result; + TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS); + CONDERROR(expected_result != tbs_info(tbs / 8, mcs), + "TBS computation failure. {%d, %d}!={%d, %d}", + expected_result.tbs_bytes * 8, + expected_result.mcs, + tbs, + mcs); + + return SRSLTE_SUCCESS; +} + int test_mcs_lookup_specific() { sched_cell_params_t cell_params = {}; @@ -134,50 +166,14 @@ int test_mcs_lookup_specific() /* TEST CASE: DL, no 256-QAM */ // cqi=5,Nprb=1 -> {mcs=3, tbs_idx=3, tbs=40} - TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS); - CONDERROR(expected_result != tbs_info(40 / 8, 3), - "TBS computation failure. {%d, %d}!={40, 3}", - expected_result.tbs_bytes * 8, - expected_result.mcs); + TESTASSERT(assert_mcs_tbs_result(6, 5, 1, 40, 3) == SRSLTE_SUCCESS); - // cqi=15,Nprb=1 -> {mcs=19, tbs_idx=17, tbs=336} - args.cqi = 15; - TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS); - CONDERROR(expected_result != tbs_info(336 / 8, 19), - "TBS computation failure. {%d, %d}!={336, 19}", - expected_result.tbs_bytes * 8, - expected_result.mcs); + TESTASSERT(assert_mcs_tbs_result(6, 15, 1, 336, 19) == SRSLTE_SUCCESS); + TESTASSERT(assert_mcs_tbs_result(6, 5, 4, 256, 4) == SRSLTE_SUCCESS); - // cqi=9,Nprb=1,cell_nprb=100 -> {mcs=28, tbs_idx=17, tbs=712} - cell_params = {}; - cell_cfg = generate_default_cell_cfg(100); - cell_params.set_cfg(0, cell_cfg, sched_args); - args.cqi = 9; - TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS); - CONDERROR(expected_result != tbs_info(712 / 8, 28), - "TBS computation failure. {%d, %d}!={712, 28}", - expected_result.tbs_bytes * 8, - expected_result.mcs); - - // cqi=10,Nprb=10,cell_nprb=100 -> {mcs=28, tbs=5736} - args.prb_grant_size = 10; - args.cqi = 10; - TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS); - CONDERROR(expected_result != tbs_info(5736 / 8, 25), - "TBS computation failure. {%d, %d}!={5736, 25}", - expected_result.tbs_bytes * 8, - expected_result.mcs); - - // cqi=15,Nprb=1,256-QAM -> {mcs=26,tbs_idx=32,tbs=968} - args.prb_grant_size = 1; - args.use_tbs_index_alt = true; - args.max_mcs = 27; // limited to 27 for 256-QAM - args.cqi = 15; - TESTASSERT(test_mcs_tbs_dl_helper(cell_params, args, &expected_result) == SRSLTE_SUCCESS); - CONDERROR(expected_result != tbs_info(968 / 8, 27), - "TBS computation failure. {%d, %d}!={968, 27}", - expected_result.tbs_bytes * 8, - expected_result.mcs); + TESTASSERT(assert_mcs_tbs_result(100, 9, 1, 712, 28) == SRSLTE_SUCCESS); + TESTASSERT(assert_mcs_tbs_result(100, 10, 10, 5736, 25) == SRSLTE_SUCCESS); + TESTASSERT(assert_mcs_tbs_result(100, 15, 1, 968, 27, true) == SRSLTE_SUCCESS); return SRSLTE_SUCCESS; } @@ -279,6 +275,14 @@ int test_min_mcs_tbs_specific() int main() { + auto& mac_log = srslog::fetch_basic_logger("MAC"); + mac_log.set_level(srslog::basic_levels::info); + auto& test_log = srslog::fetch_basic_logger("TEST"); + test_log.set_level(srslog::basic_levels::info); + + // Start the log backend. + srslog::init(); + TESTASSERT(srsenb::test_mcs_lookup_specific() == SRSLTE_SUCCESS); TESTASSERT(srsenb::test_mcs_tbs_consistency_all() == SRSLTE_SUCCESS); TESTASSERT(srsenb::test_min_mcs_tbs_specific() == SRSLTE_SUCCESS); From 76103065f7358f8faf7727955c2d70e2a1186e20 Mon Sep 17 00:00:00 2001 From: Francisco Date: Sun, 14 Mar 2021 23:39:52 +0000 Subject: [PATCH 21/64] collapse the 2-stage DCI generation process of SIB/Paging/RAR into one single stage in the scheduler. --- srsenb/hdr/stack/mac/sched_carrier.h | 6 +- srsenb/hdr/stack/mac/sched_grid.h | 14 +- srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h | 43 +++- srsenb/src/stack/mac/sched_carrier.cc | 24 +- srsenb/src/stack/mac/sched_grid.cc | 224 ++++-------------- srsenb/src/stack/mac/sched_helpers.cc | 2 - .../src/stack/mac/sched_phy_ch/sched_dci.cc | 204 ++++++++++++++++ srsenb/test/mac/sched_common_test_suite.cc | 2 +- 8 files changed, 308 insertions(+), 211 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_carrier.h b/srsenb/hdr/stack/mac/sched_carrier.h index 147ab9826..f535cbb87 100644 --- a/srsenb/hdr/stack/mac/sched_carrier.h +++ b/srsenb/hdr/stack/mac/sched_carrier.h @@ -119,9 +119,9 @@ private: const sched_cell_params_t* cc_cfg = nullptr; sched_ue_list* ue_db = nullptr; - std::deque pending_rars; - uint32_t rar_aggr_level = 2; - static const uint32_t PRACH_RAR_OFFSET = 3; // TS 36.321 Sec. 5.1.4 + std::deque pending_rars; + uint32_t rar_aggr_level = 2; + static const uint32_t PRACH_RAR_OFFSET = 3; // TS 36.321 Sec. 5.1.4 }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 3ef27c71b..00af488df 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -150,9 +150,7 @@ public: struct ctrl_alloc_t { size_t dci_idx; rbg_interval rbg_range; - uint16_t rnti; uint32_t req_bytes; - alloc_type_t alloc_type; }; struct rar_alloc_t { sf_sched::ctrl_alloc_t alloc_data; @@ -161,9 +159,8 @@ public: {} }; struct bc_alloc_t : public ctrl_alloc_t { - uint32_t rv = 0; - uint32_t sib_idx = 0; - bc_alloc_t() = default; + sched_interface::dl_sched_bc_t bc_grant; + bc_alloc_t() = default; explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} }; struct dl_alloc_t { @@ -189,12 +186,6 @@ public: uint32_t n_prb = 0; uint32_t mcs = 0; }; - struct pending_rar_t { - uint16_t ra_rnti = 0; - tti_point prach_tti{}; - uint32_t nof_grants = 0; - sched_interface::dl_sched_rar_info_t msg3_grant[sched_interface::MAX_RAR_LIST] = {}; - }; typedef std::pair ctrl_code_t; // Control/Configuration Methods @@ -236,7 +227,6 @@ public: private: ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); - int generate_format1a(prb_interval prb_range, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci); void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result); void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, diff --git a/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h b/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h index aed040236..9874b1634 100644 --- a/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h +++ b/srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h @@ -13,7 +13,8 @@ #ifndef SRSLTE_SCHED_DCI_H #define SRSLTE_SCHED_DCI_H -#include +#include "../sched_common.h" +#include "srslte/adt/bounded_vector.h" namespace srsenb { @@ -59,6 +60,46 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb, bool ulqam64_enabled, bool use_tbs_index_alt); +struct pending_rar_t { + uint16_t ra_rnti = 0; + tti_point prach_tti{}; + srslte::bounded_vector msg3_grant = {}; +}; + +bool generate_sib_dci(sched_interface::dl_sched_bc_t& bc, + tti_point tti_tx_dl, + uint32_t sib_idx, + uint32_t sib_ntx, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params, + uint32_t current_cfi); + +bool generate_paging_dci(sched_interface::dl_sched_bc_t& bc, + tti_point tti_tx_dl, + uint32_t req_bytes, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params, + uint32_t current_cfi); + +bool generate_rar_dci(sched_interface::dl_sched_rar_t& rar, + tti_point tti_tx_dl, + const pending_rar_t& pending_rar, + rbg_interval rbg_range, + uint32_t nof_grants, + uint32_t start_msg3_prb, + const sched_cell_params_t& cell_params, + uint32_t current_cfi); + +void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params); + +void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params); + +void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range); + } // namespace srsenb #endif // SRSLTE_SCHED_DCI_H diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index dc075e5c6..d3549d221 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -144,7 +144,7 @@ void ra_sched::dl_sched(sf_sched* tti_sched) rar_aggr_level = 2; while (not pending_rars.empty()) { - sf_sched::pending_rar_t& rar = pending_rars.front(); + pending_rar_t& rar = pending_rars.front(); // Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit srslte::tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET, @@ -179,13 +179,13 @@ void ra_sched::dl_sched(sf_sched* tti_sched) } uint32_t nof_rar_allocs = ret.second; - if (nof_rar_allocs == rar.nof_grants) { + if (nof_rar_allocs == rar.msg3_grant.size()) { // all RAR grants were allocated. Remove pending RAR pending_rars.pop_front(); } else { // keep the RAR grants that were not scheduled, so we can schedule in next TTI - std::copy(&rar.msg3_grant[nof_rar_allocs], &rar.msg3_grant[rar.nof_grants], &rar.msg3_grant[0]); - rar.nof_grants -= nof_rar_allocs; + std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin()); + rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs); } } } @@ -204,24 +204,22 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) uint16_t ra_rnti = 1 + (uint16_t)(rar_info.prach_tti % 10u); // find pending rar with same RA-RNTI - for (sf_sched::pending_rar_t& r : pending_rars) { + for (pending_rar_t& r : pending_rars) { if (r.prach_tti.to_uint() == rar_info.prach_tti and ra_rnti == r.ra_rnti) { - if (r.nof_grants >= sched_interface::MAX_RAR_LIST) { + if (r.msg3_grant.size() >= sched_interface::MAX_RAR_LIST) { logger.warning("PRACH ignored, as the the maximum number of RAR grants per tti has been reached"); return SRSLTE_ERROR; } - r.msg3_grant[r.nof_grants] = rar_info; - r.nof_grants++; + r.msg3_grant.push_back(rar_info); return SRSLTE_SUCCESS; } } // create new RAR - sf_sched::pending_rar_t p; - p.ra_rnti = ra_rnti; - p.prach_tti = tti_point{rar_info.prach_tti}; - p.nof_grants = 1; - p.msg3_grant[0] = rar_info; + pending_rar_t p; + p.ra_rnti = ra_rnti; + p.prach_tti = tti_point{rar_info.prach_tti}; + p.msg3_grant.push_back(rar_info); pending_rars.push_back(p); return SRSLTE_SUCCESS; diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index d8d896f0c..8054af1d4 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -14,8 +14,6 @@ #include "srsenb/hdr/stack/mac/sched_helpers.h" #include "srslte/common/string_helpers.h" -using srslte::tti_point; - namespace srsenb { const char* alloc_outcome_t::to_string() const @@ -399,11 +397,9 @@ sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_by } // Allocation Successful - ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; - ctrl_alloc.rbg_range = ret.rbg_range; - ctrl_alloc.rnti = rnti; - ctrl_alloc.req_bytes = tbs_bytes; - ctrl_alloc.alloc_type = alloc_type; + ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + ctrl_alloc.rbg_range = ret.rbg_range; + ctrl_alloc.req_bytes = tbs_bytes; return {ret.outcome, ctrl_alloc}; } @@ -424,8 +420,13 @@ alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t // BC allocation successful bc_alloc_t bc_alloc(ret.second); - bc_alloc.rv = rv; - bc_alloc.sib_idx = sib_idx; + + if (not generate_sib_dci( + bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) { + logger.warning("SCHED: FAIL"); + return alloc_outcome_t::ERROR; + } + bc_allocs.push_back(bc_alloc); return ret.first; @@ -446,6 +447,13 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa // Paging allocation successful bc_alloc_t bc_alloc(ret.second); + + if (not generate_paging_dci( + bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) { + logger.warning("SCHED: FAIL"); + return alloc_outcome_t::ERROR; + } + bc_allocs.push_back(bc_alloc); return ret.first; @@ -460,7 +468,7 @@ std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, cons return ret; } - for (uint32_t nof_grants = rar.nof_grants; nof_grants > 0; nof_grants--) { + for (uint32_t nof_grants = rar.msg3_grant.size(); nof_grants > 0; nof_grants--) { uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff uint32_t total_msg3_size = msg3_grant_size * nof_grants; @@ -475,35 +483,30 @@ std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, cons ret.first = ret2.first.result; ret.second = nof_grants; - // if there was no space for the RAR, try again - if (ret.first == alloc_outcome_t::RB_COLLISION) { - continue; - } - // if any other error, return - if (ret.first != alloc_outcome_t::SUCCESS) { + if (ret.first == alloc_outcome_t::SUCCESS) { + sched_interface::dl_sched_rar_t rar_grant; + if (generate_rar_dci(rar_grant, + get_tti_tx_dl(), + rar, + ret2.second.rbg_range, + nof_grants, + last_msg3_prb, + *cc_cfg, + tti_alloc.get_cfi())) { + // RAR allocation successful + rar_allocs.emplace_back(ret2.second, rar_grant); + last_msg3_prb += msg3_grant_size * nof_grants; + return ret; + } + } else if (ret.first != alloc_outcome_t::RB_COLLISION) { logger.warning("SCHED: Could not allocate RAR for L=%d, cause=%s", aggr_lvl, ret.first.to_string()); return ret; } - // RAR allocation successful - sched_interface::dl_sched_rar_t rar_grant = {}; - rar_grant.msg3_grant.resize(nof_grants); - for (uint32_t i = 0; i < nof_grants; ++i) { - rar_grant.msg3_grant[i].data = rar.msg3_grant[i]; - rar_grant.msg3_grant[i].grant.tpc_pusch = 3; - rar_grant.msg3_grant[i].grant.trunc_mcs = 0; - uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, cc_cfg->cfg.cell.nof_prb); - rar_grant.msg3_grant[i].grant.rba = rba; - - last_msg3_prb += msg3_grant_size; - } - rar_allocs.emplace_back(ret2.second, rar_grant); - - break; - } - if (ret.first != alloc_outcome_t::SUCCESS) { - logger.info("SCHED: RAR allocation postponed due to lack of RBs"); + // if there was no space for the RAR, try again with a lower number of grants } + + logger.info("SCHED: RAR allocation postponed due to lack of RBs"); return ret; } @@ -652,7 +655,7 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc) bool has_retx = h->has_pending_retx(); if (not has_retx) { alloc_type = ul_alloc_t::NEWTX; - } else if (h->retx_requires_pdcch(tti_point{get_tti_tx_ul()}, alloc)) { + } else if (h->retx_requires_pdcch(get_tti_tx_ul(), alloc)) { alloc_type = ul_alloc_t::ADAPT_RETX; } else { alloc_type = ul_alloc_t::NOADAPT_RETX; @@ -694,68 +697,12 @@ void sf_sched::set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_r for (const auto& bc_alloc : bc_allocs) { sched_interface::dl_sched_bc_t* bc = &dl_result->bc[dl_result->nof_bc_elems]; + *bc = bc_alloc.bc_grant; // assign NCCE/L bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; - - /* Generate DCI format1A */ - prb_interval prb_range = prb_interval::rbgs_to_prbs(bc_alloc.rbg_range, cc_cfg->nof_prb()); - int tbs = generate_format1a(prb_range, bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci); - - // Setup BC/Paging processes - if (bc_alloc.alloc_type == alloc_type_t::DL_BC) { - if (tbs <= (int)bc_alloc.req_bytes) { - logger.warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d", - bc_alloc.sib_idx + 1, - bc_alloc.rbg_range.start(), - bc_alloc.rbg_range.stop(), - bc->dci.location.L, - bc->dci.location.ncce, - bc_alloc.req_bytes); - continue; - } - - // Setup BC process - bc->index = bc_alloc.sib_idx; - bc->type = sched_interface::dl_sched_bc_t::BCCH; - bc->tbs = (uint32_t)bc_alloc.req_bytes; - - logger.debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", - bc_alloc.sib_idx + 1, - bc_alloc.rbg_range.start(), - bc_alloc.rbg_range.stop(), - bc->dci.location.L, - bc->dci.location.ncce, - bc_alloc.rv, - bc_alloc.req_bytes, - cc_cfg->cfg.sibs[bc_alloc.sib_idx].period_rf, - bc->dci.tb[0].mcs_idx); - } else { - // Paging - if (tbs <= 0) { - fmt::memory_buffer str_buffer; - fmt::format_to(str_buffer, "{}", bc_alloc.rbg_range); - logger.warning("SCHED: Error Paging, rbgs=%s, dci=(%d,%d)", - srslte::to_c_str(str_buffer), - bc->dci.location.L, - bc->dci.location.ncce); - continue; - } - - // Setup Paging process - bc->type = sched_interface::dl_sched_bc_t::PCCH; - bc->tbs = (uint32_t)tbs; - - fmt::memory_buffer str_buffer; - fmt::format_to(str_buffer, "{}", bc_alloc.rbg_range); - logger.info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", - srslte::to_c_str(str_buffer), - bc->dci.location.L, - bc->dci.location.ncce, - tbs, - bc->dci.tb[0].mcs_idx); - } - dl_result->nof_bc_elems++; + + log_broadcast_allocation(*bc, bc_alloc.rbg_range, *cc_cfg); } } @@ -765,45 +712,13 @@ void sf_sched::set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_ for (const auto& rar_alloc : rar_allocs) { sched_interface::dl_sched_rar_t* rar = &dl_result->rar[dl_result->nof_rar_elems]; + // Setup RAR process + *rar = rar_alloc.rar_grant; // Assign NCCE/L rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; - - /* Generate DCI format1A */ - prb_interval prb_range = prb_interval::rbgs_to_prbs(rar_alloc.alloc_data.rbg_range, cc_cfg->nof_prb()); - int tbs = generate_format1a(prb_range, rar_alloc.alloc_data.req_bytes, 0, rar_alloc.alloc_data.rnti, &rar->dci); - if (tbs <= 0) { - fmt::memory_buffer str_buffer; - fmt::format_to(str_buffer, "{}", rar_alloc.alloc_data.rbg_range); - logger.warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=%s, dci=(%d,%d)", - rar_alloc.alloc_data.rnti, - srslte::to_c_str(str_buffer), - rar->dci.location.L, - rar->dci.location.ncce); - continue; - } - - // Setup RAR process - rar->tbs = rar_alloc.alloc_data.req_bytes; - rar->msg3_grant = rar_alloc.rar_grant.msg3_grant; - - // Print RAR allocation result - for (uint32_t i = 0; i < rar->msg3_grant.size(); ++i) { - const auto& msg3_grant = rar->msg3_grant[i]; - uint16_t expected_rnti = msg3_grant.data.temp_crnti; - fmt::memory_buffer str_buffer; - fmt::format_to(str_buffer, "{}", rar_alloc.alloc_data.rbg_range); - logger.info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=%s, dci=(%d,%d), rar_grant_rba=%d, " - "rar_grant_mcs=%d", - expected_rnti, - rar_alloc.alloc_data.rnti, - srslte::to_c_str(str_buffer), - rar->dci.location.L, - rar->dci.location.ncce, - msg3_grant.grant.rba, - msg3_grant.grant.trunc_mcs); - } - dl_result->nof_rar_elems++; + + log_rar_allocation(*rar, rar_alloc.alloc_data.rbg_range); } } @@ -1093,53 +1008,4 @@ uint32_t sf_sched::get_nof_ctrl_symbols() const return tti_alloc.get_cfi() + ((cc_cfg->cfg.cell.nof_prb <= 10) ? 1 : 0); } -int sf_sched::generate_format1a(prb_interval prb_range, - uint32_t tbs_bytes, - uint32_t rv, - uint16_t rnti, - srslte_dci_dl_t* dci) -{ - /* Calculate I_tbs for this TBS */ - int tbs = tbs_bytes * 8; - int i; - int mcs = -1; - for (i = 0; i < 27; i++) { - if (srslte_ra_tbs_from_idx(i, 2) >= tbs) { - dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2; - mcs = i; - tbs = srslte_ra_tbs_from_idx(i, 2); - break; - } - if (srslte_ra_tbs_from_idx(i, 3) >= tbs) { - dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3; - mcs = i; - tbs = srslte_ra_tbs_from_idx(i, 3); - break; - } - } - if (i == 28) { - logger.error("Can't allocate Format 1A for TBS=%d", tbs); - return -1; - } - - logger.debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d", - srslte_ra_tbs_from_idx(mcs, 2), - srslte_ra_tbs_from_idx(mcs, 3), - tbs_bytes, - tbs, - mcs); - - dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2; - dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; - dci->type2_alloc.riv = srslte_ra_type2_to_riv(prb_range.length(), prb_range.start(), cc_cfg->cfg.cell.nof_prb); - dci->pid = 0; - dci->tb[0].mcs_idx = mcs; - dci->tb[0].rv = rv; - dci->format = SRSLTE_DCI_FORMAT1A; - dci->rnti = rnti; - dci->ue_cc_idx = std::numeric_limits::max(); - - return tbs; -} - } // namespace srsenb diff --git a/srsenb/src/stack/mac/sched_helpers.cc b/srsenb/src/stack/mac/sched_helpers.cc index ad1a8cae3..6f9cb4b12 100644 --- a/srsenb/src/stack/mac/sched_helpers.cc +++ b/srsenb/src/stack/mac/sched_helpers.cc @@ -383,8 +383,6 @@ sched_cell_params_t::get_dl_nof_res(srslte::tti_point tti_tx_dl, const srslte_dc } } - // sanity check - assert(nof_re == srslte_ra_dl_grant_nof_re(&cfg.cell, &dl_sf, &grant)); return nof_re; } diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index b9d2e4ab6..6ae306b75 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -12,11 +12,16 @@ #include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h" #include "srsenb/hdr/stack/mac/sched_common.h" +#include "srsenb/hdr/stack/mac/sched_helpers.h" +#include "srslte/common/string_helpers.h" + #include #include namespace srsenb { +static srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC"); + /// Compute max TBS based on max coderate int coderate_to_tbs(float max_coderate, uint32_t nof_re) { @@ -162,4 +167,203 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb, return tb_max; } +int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci, + uint16_t rnti, + tti_point tti_tx_dl, + uint32_t req_bytes, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params, + uint32_t current_cfi) +{ + static const uint32_t Qm = 2; + + // Calculate I_tbs for this TBS + int tbs = static_cast(req_bytes) * 8; + int mcs = -1; + for (uint32_t i = 0; i < 27; i++) { + if (srslte_ra_tbs_from_idx(i, 2) >= tbs) { + dci.type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2; + mcs = i; + tbs = srslte_ra_tbs_from_idx(i, 2); + break; + } + if (srslte_ra_tbs_from_idx(i, 3) >= tbs) { + dci.type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3; + mcs = i; + tbs = srslte_ra_tbs_from_idx(i, 3); + break; + } + } + if (mcs < 0) { + // logger.error("Can't allocate Format 1A for TBS=%d", tbs); + return -1; + } + + // Generate remaining DCI Format1A content + dci.alloc_type = SRSLTE_RA_ALLOC_TYPE2; + dci.type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; + prb_interval prb_range = prb_interval::rbgs_to_prbs(rbg_range, cell_params.nof_prb()); + dci.type2_alloc.riv = srslte_ra_type2_to_riv(prb_range.length(), prb_range.start(), cell_params.nof_prb()); + dci.pid = 0; + dci.tb[0].mcs_idx = mcs; + dci.tb[0].rv = 0; // used for SIBs + dci.format = SRSLTE_DCI_FORMAT1A; + dci.rnti = rnti; + dci.ue_cc_idx = std::numeric_limits::max(); + + // Compute effective code rate and verify it doesn't exceed max code rate + uint32_t nof_re = cell_params.get_dl_nof_res(tti_tx_dl, dci, current_cfi); + if (srslte_coderate(tbs, nof_re) >= 0.93F * Qm) { + return -1; + } + + logger.debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d", + srslte_ra_tbs_from_idx(mcs, 2), + srslte_ra_tbs_from_idx(mcs, 3), + req_bytes, + tbs, + mcs); + + return tbs; +} + +bool generate_sib_dci(sched_interface::dl_sched_bc_t& bc, + tti_point tti_tx_dl, + uint32_t sib_idx, + uint32_t sib_ntx, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params, + uint32_t current_cfi) +{ + bc = {}; + int tbs_bits = generate_ra_bc_dci_format1a_common( + bc.dci, SRSLTE_SIRNTI, tti_tx_dl, cell_params.cfg.sibs[sib_idx].len, rbg_range, cell_params, current_cfi); + if (tbs_bits < 0) { + return false; + } + + // generate SIB-specific fields + bc.index = sib_idx; + bc.type = sched_interface::dl_sched_bc_t::BCCH; + // bc.tbs = sib_len; + bc.tbs = tbs_bits / 8; + bc.dci.tb[0].rv = get_rvidx(sib_ntx); + + return true; +} + +bool generate_paging_dci(sched_interface::dl_sched_bc_t& bc, + tti_point tti_tx_dl, + uint32_t req_bytes, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params, + uint32_t current_cfi) +{ + bc = {}; + int tbs_bits = generate_ra_bc_dci_format1a_common( + bc.dci, SRSLTE_PRNTI, tti_tx_dl, req_bytes, rbg_range, cell_params, current_cfi); + if (tbs_bits < 0) { + return false; + } + + // generate Paging-specific fields + bc.type = sched_interface::dl_sched_bc_t::PCCH; + bc.tbs = tbs_bits / 8; + + return true; +} + +bool generate_rar_dci(sched_interface::dl_sched_rar_t& rar, + tti_point tti_tx_dl, + const pending_rar_t& pending_rar, + rbg_interval rbg_range, + uint32_t nof_grants, + uint32_t start_msg3_prb, + const sched_cell_params_t& cell_params, + uint32_t current_cfi) +{ + const uint32_t msg3_Lcrb = 3; + uint32_t req_bytes = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff + + rar = {}; + int tbs_bits = generate_ra_bc_dci_format1a_common( + rar.dci, pending_rar.ra_rnti, tti_tx_dl, req_bytes, rbg_range, cell_params, current_cfi); + if (tbs_bits < 0) { + return false; + } + + rar.msg3_grant.resize(nof_grants); + for (uint32_t i = 0; i < nof_grants; ++i) { + rar.msg3_grant[i].data = pending_rar.msg3_grant[i]; + rar.msg3_grant[i].grant.tpc_pusch = 3; + rar.msg3_grant[i].grant.trunc_mcs = 0; + rar.msg3_grant[i].grant.rba = srslte_ra_type2_to_riv(msg3_Lcrb, start_msg3_prb, cell_params.nof_prb()); + + start_msg3_prb += msg3_Lcrb; + } + // rar.tbs = tbs_bits / 8; + rar.tbs = req_bytes; + + return true; +} + +void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, + rbg_interval rbg_range, + const sched_cell_params_t& cell_params) +{ + if (not logger.info.enabled()) { + return; + } + + fmt::memory_buffer str_buffer; + fmt::format_to(str_buffer, "{}", rbg_range); + + if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) { + logger.debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", + bc.index + 1, + rbg_range.start(), + rbg_range.stop(), + bc.dci.location.L, + bc.dci.location.ncce, + bc.dci.tb[0].rv, + cell_params.cfg.sibs[bc.index].len, + cell_params.cfg.sibs[bc.index].period_rf, + bc.dci.tb[0].mcs_idx); + } else { + logger.info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", + srslte::to_c_str(str_buffer), + bc.dci.location.L, + bc.dci.location.ncce, + bc.tbs, + bc.dci.tb[0].mcs_idx); + } +} + +void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range) +{ + if (not logger.info.enabled()) { + return; + } + + fmt::memory_buffer str_buffer; + fmt::format_to(str_buffer, "{}", rbg_range); + + fmt::memory_buffer str_buffer2; + for (size_t i = 0; i < rar.msg3_grant.size(); ++i) { + fmt::format_to(str_buffer2, + "{}{{c-rnti=0x{:x}, rba={}, mcs={}}}", + i > 0 ? ", " : "", + rar.msg3_grant[i].data.temp_crnti, + rar.msg3_grant[i].grant.rba, + rar.msg3_grant[i].grant.trunc_mcs); + } + + logger.info("SCHED: RAR, ra-rnti=%d, rbgs=%s, dci=(%d,%d), msg3 grants=[%s]", + rar.dci.rnti, + srslte::to_c_str(str_buffer), + rar.dci.location.L, + rar.dci.location.ncce, + srslte::to_c_str(str_buffer2)); +} + } // namespace srsenb diff --git a/srsenb/test/mac/sched_common_test_suite.cc b/srsenb/test/mac/sched_common_test_suite.cc index f603d9093..eee535873 100644 --- a/srsenb/test/mac/sched_common_test_suite.cc +++ b/srsenb/test/mac/sched_common_test_suite.cc @@ -321,7 +321,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) }; for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) { - auto& bc = dl_result.bc[i]; + const sched_interface::dl_sched_bc_t& bc = dl_result.bc[i]; if (bc.type == sched_interface::dl_sched_bc_t::BCCH) { CONDERROR(bc.tbs < cell_params.cfg.sibs[bc.index].len, "Allocated BC process with TBS=%d < sib_len=%d", From 47f1175502ef53b3a32a7613abb0e607beb30502 Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 13:01:19 +0000 Subject: [PATCH 22/64] [sched, feature enhancement] - allow variable nof prbs allocated per SIB / Paging message --- lib/src/phy/phch/ra_dl.c | 2 +- srsenb/hdr/stack/mac/sched_carrier.h | 1 + srsenb/hdr/stack/mac/sched_grid.h | 21 ++-- srsenb/hdr/stack/mac/schedulers/sched_base.h | 6 +- srsenb/src/stack/mac/sched_carrier.cc | 114 ++++++++++++------ srsenb/src/stack/mac/sched_grid.cc | 83 +++++++------ srsenb/src/stack/mac/schedulers/sched_base.cc | 11 +- srsenb/src/stack/rrc/rrc_mobility.cc | 1 + srsenb/test/mac/sched_test_common.cc | 17 ++- srsenb/test/mac/sched_test_common.h | 6 +- 10 files changed, 168 insertions(+), 94 deletions(-) diff --git a/lib/src/phy/phch/ra_dl.c b/lib/src/phy/phch/ra_dl.c index 10e062a59..589c16699 100644 --- a/lib/src/phy/phch/ra_dl.c +++ b/lib/src/phy/phch/ra_dl.c @@ -154,7 +154,7 @@ uint32_t ra_re_x_prb(const srslte_cell_t* cell, srslte_dl_sf_cfg_t* sf, uint32_t /** Compute PRB allocation for Downlink as defined in 7.1.6 of 36.213 * Decode grant->type?_alloc to grant - * This function only reads grant->type?_alloc and grant->alloc_type fields. + * This function only reads dci->type?_alloc (e.g. rbg_bitmask, mode, riv) and dci->alloc_type fields. * This function only writes grant->prb_idx and grant->nof_prb. */ /** Compute PRB allocation for Downlink as defined in 7.1.6 of 36.213 */ diff --git a/srsenb/hdr/stack/mac/sched_carrier.h b/srsenb/hdr/stack/mac/sched_carrier.h index f535cbb87..669ab5e02 100644 --- a/srsenb/hdr/stack/mac/sched_carrier.h +++ b/srsenb/hdr/stack/mac/sched_carrier.h @@ -91,6 +91,7 @@ private: // args const sched_cell_params_t* cc_cfg = nullptr; rrc_interface_mac* rrc = nullptr; + srslog::basic_logger& logger; std::array pending_sibs; diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 00af488df..58c03acf7 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -36,7 +36,8 @@ struct alloc_outcome_t { ALREADY_ALLOC, NO_DATA, INVALID_PRBMASK, - INVALID_CARRIER + INVALID_CARRIER, + INVALID_CODERATE }; result_enum result = ERROR; alloc_outcome_t() = default; @@ -101,6 +102,7 @@ public: void init(const sched_cell_params_t& cell_params_); void new_tti(tti_point tti_rx); dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); + alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant); bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true); @@ -194,8 +196,8 @@ public: void new_tti(srslte::tti_point tti_rx_, sf_sched_result* cc_results); // DL alloc methods - alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx); - alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload); + alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs); + alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); std::pair alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant); bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } const std::vector& get_allocated_rars() const { return rar_allocs; } @@ -244,12 +246,13 @@ private: sf_sched_result* cc_results; ///< Results of other CCs for the same Subframe // internal state - sf_grid_t tti_alloc; - std::vector bc_allocs; - std::vector rar_allocs; - std::vector data_allocs; - std::vector ul_data_allocs; - uint32_t last_msg3_prb = 0, max_msg3_prb = 0; + sf_grid_t tti_alloc; + + srslte::bounded_vector bc_allocs; + std::vector rar_allocs; + std::vector data_allocs; + std::vector ul_data_allocs; + uint32_t last_msg3_prb = 0, max_msg3_prb = 0; // Next TTI state tti_point tti_rx; diff --git a/srsenb/hdr/stack/mac/schedulers/sched_base.h b/srsenb/hdr/stack/mac/schedulers/sched_base.h index 7228d2bdf..47ed42006 100644 --- a/srsenb/hdr/stack/mac/schedulers/sched_base.h +++ b/srsenb/hdr/stack/mac/schedulers/sched_base.h @@ -34,14 +34,16 @@ protected: /**************** Helper methods ****************/ +rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& current_mask); + /** * Finds a bitmask of available RBG resources for a given UE in a greedy fashion * @param ue UE being allocated - * @param enb_cc_idx carrier index + * @param is_contiguous whether to find a contiguous range of RBGs * @param current_mask bitmask of occupied RBGs, where to search for available RBGs * @return bitmask of found RBGs. If a valid mask wasn't found, bitmask::size() == 0 */ -rbgmask_t compute_user_rbgmask_greedy(sched_ue& ue, uint32_t enb_cc_idx, const rbgmask_t& current_mask); +rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask); /** * Finds a range of L contiguous PRBs that are empty diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index d3549d221..6070cc34a 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -26,7 +26,9 @@ using srslte::tti_point; * Broadcast (SIB+Paging) scheduling *******************************************************/ -bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cc_cfg(&cfg_), rrc(rrc_) {} +bc_sched::bc_sched(const sched_cell_params_t& cfg_, srsenb::rrc_interface_mac* rrc_) : + cc_cfg(&cfg_), rrc(rrc_), logger(srslog::fetch_basic_logger("MAC")) +{} void bc_sched::dl_sched(sf_sched* tti_sched) { @@ -86,36 +88,66 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) void bc_sched::alloc_sibs(sf_sched* tti_sched) { - uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); - uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); + const uint32_t max_nof_prbs_sib = 4; + uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); + uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); - for (uint32_t i = 0; i < pending_sibs.size(); i++) { - if (cc_cfg->cfg.sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { - uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4; - uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start); + for (uint32_t sib_idx = 0; sib_idx < pending_sibs.size(); sib_idx++) { + sched_sib_t& pending_sib = pending_sibs[sib_idx]; + if (cc_cfg->cfg.sibs[sib_idx].len > 0 and pending_sib.is_in_window and pending_sib.n_tx < 4) { + uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4; + uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start); // Check if there is any SIB to tx - bool sib1_flag = (i == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; - bool other_sibs_flag = - (i > 0) and (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[i].n_tx) and current_sf_idx == 9; + bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; + bool other_sibs_flag = (sib_idx > 0) and + (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and + current_sf_idx == 9; if (not sib1_flag and not other_sibs_flag) { continue; } - // Schedule SIB - tti_sched->alloc_bc(bc_aggr_level, i, pending_sibs[i].n_tx); - pending_sibs[i].n_tx++; + // Attempt different number of RBGs + bool success = false; + for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) { + rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); + if (rbg_interv.empty()) { + break; + } + alloc_outcome_t ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv); + if (ret != alloc_outcome_t::INVALID_CODERATE) { + if (ret == alloc_outcome_t::SUCCESS) { + // SIB scheduled successfully + success = true; + pending_sibs[sib_idx].n_tx++; + } + break; + } + // Attempt again, but with more RBGs + } + if (not success) { + logger.warning("SCHED: Could not allocate SIB=%d, len=%d", sib_idx + 1, cc_cfg->cfg.sibs[sib_idx].len); + } } } } void bc_sched::alloc_paging(sf_sched* tti_sched) { - /* Allocate DCIs and RBGs for paging */ - if (rrc != nullptr) { - uint32_t paging_payload = 0; - if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) { - tti_sched->alloc_paging(bc_aggr_level, paging_payload); + uint32_t paging_payload = 0; + if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) { + alloc_outcome_t ret = alloc_outcome_t::ERROR; + for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) { + rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); + + ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv); + if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::RB_COLLISION) { + break; + } + } + if (ret != alloc_outcome_t::SUCCESS) { + logger.warning( + "SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string()); } } } @@ -143,10 +175,12 @@ void ra_sched::dl_sched(sf_sched* tti_sched) tti_point tti_tx_dl = tti_sched->get_tti_tx_dl(); rar_aggr_level = 2; - while (not pending_rars.empty()) { - pending_rar_t& rar = pending_rars.front(); + for (auto it = pending_rars.begin(); it != pending_rars.end();) { + auto& rar = *it; - // Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit + // In case of RAR outside RAR window: + // - if window has passed, discard RAR + // - if window hasn't started, stop loop, as RARs are ordered by TTI srslte::tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET, rar.prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window}; if (not rar_window.contains(tti_tx_dl)) { @@ -159,34 +193,40 @@ void ra_sched::dl_sched(sf_sched* tti_sched) tti_tx_dl); srslte::console("%s\n", srslte::to_c_str(str_buffer)); logger.error("%s", srslte::to_c_str(str_buffer)); - // Remove from pending queue and get next one if window has passed already - pending_rars.pop_front(); + it = pending_rars.erase(it); continue; } - // If window not yet started do not look for more pending RARs return; } // Try to schedule DCI + RBGs for RAR Grant std::pair ret = tti_sched->alloc_rar(rar_aggr_level, rar); + + // If RAR allocation was successful: + // - in case all Msg3 grants were allocated, remove pending RAR + // - otherwise, erase only Msg3 grants that were allocated. + if (ret.first == alloc_outcome_t::SUCCESS) { + uint32_t nof_rar_allocs = ret.second; + if (nof_rar_allocs == rar.msg3_grant.size()) { + pending_rars.erase(it); + } else { + std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin()); + rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs); + } + break; + } + + // If RAR allocation was not successful: + // - in case of unavailable RBGs, stop loop + // - otherwise, attempt to schedule next pending RAR + logger.info("SCHED: Could not allocate RAR for L=%d, cause=%s", rar_aggr_level, ret.first.to_string()); if (ret.first == alloc_outcome_t::RB_COLLISION) { // there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI return; } - if (ret.first != alloc_outcome_t::SUCCESS) { - // try to scheduler next RAR with different RA-RNTI - continue; - } - uint32_t nof_rar_allocs = ret.second; - if (nof_rar_allocs == rar.msg3_grant.size()) { - // all RAR grants were allocated. Remove pending RAR - pending_rars.pop_front(); - } else { - // keep the RAR grants that were not scheduled, so we can schedule in next TTI - std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin()); - rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs); - } + // For any other type of error, continue with next pending RAR + ++it; } } diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 8054af1d4..1bbb5575c 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -202,20 +202,25 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_typ rbg_interval range{nof_rbgs - avail_rbg, nof_rbgs - avail_rbg + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg)}; + return {alloc_dl_ctrl(aggr_idx, range, alloc_type), range}; +} + +alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type) +{ if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and alloc_type != alloc_type_t::DL_PCCH) { logger.error("SCHED: DL control allocations must be RAR/BC/PDCCH"); - return {alloc_outcome_t::ERROR, range}; + return alloc_outcome_t::ERROR; } - // Setup range starting from left - if (range.stop() > nof_rbgs) { - return {alloc_outcome_t::RB_COLLISION, range}; + // Setup rbg_range starting from left + if (rbg_range.stop() > nof_rbgs) { + return alloc_outcome_t::RB_COLLISION; } // allocate DCI and RBGs rbgmask_t new_mask(dl_mask.size()); - new_mask.fill(range.start(), range.stop()); - return {alloc_dl(aggr_idx, alloc_type, new_mask), range}; + new_mask.fill(rbg_range.start(), rbg_range.stop()); + return alloc_dl(aggr_idx, alloc_type, new_mask); } //! Allocates CCEs and RBs for a user DL data alloc. @@ -404,59 +409,60 @@ sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_by return {ret.outcome, ctrl_alloc}; } -alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) +alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) { - uint32_t sib_len = cc_cfg->cfg.sibs[sib_idx].len; - uint32_t rv = get_rvidx(sib_ntx); - ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, sib_len, SRSLTE_SIRNTI); - if (not ret.first) { - logger.warning("SCHED: Could not allocate SIB=%d, L=%d, len=%d, cause=%s", - sib_idx + 1, - aggr_lvl, - sib_len, - ret.first.to_string()); - return ret.first; + if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { + logger.warning("SCHED: Maximum number of Broadcast allocations reached"); + return alloc_outcome_t::ERROR; } + bc_alloc_t bc_alloc; - // BC allocation successful - bc_alloc_t bc_alloc(ret.second); + // Generate DCI for SIB + if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) { + return alloc_outcome_t::INVALID_CODERATE; + } - if (not generate_sib_dci( - bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) { - logger.warning("SCHED: FAIL"); - return alloc_outcome_t::ERROR; + // Allocate SIB RBGs and PDCCH + alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC); + if (ret != alloc_outcome_t::SUCCESS) { + return ret; } + // Allocation Successful + bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + bc_alloc.rbg_range = rbgs; + bc_alloc.req_bytes = cc_cfg->cfg.sibs[sib_idx].len; bc_allocs.push_back(bc_alloc); - return ret.first; + return alloc_outcome_t::SUCCESS; } -alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload) +alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs) { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); return alloc_outcome_t::ERROR; } - ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI); - if (not ret.first) { - logger.warning( - "SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.first.to_string()); - return ret.first; - } + bc_alloc_t bc_alloc; - // Paging allocation successful - bc_alloc_t bc_alloc(ret.second); + // Generate DCI for Paging message + if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) { + return alloc_outcome_t::INVALID_CODERATE; + } - if (not generate_paging_dci( - bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) { - logger.warning("SCHED: FAIL"); - return alloc_outcome_t::ERROR; + // Allocate Paging RBGs and PDCCH + alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH); + if (ret != alloc_outcome_t::SUCCESS) { + return ret; } + // Allocation Successful + bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + bc_alloc.rbg_range = rbgs; + bc_alloc.req_bytes = paging_payload; bc_allocs.push_back(bc_alloc); - return ret.first; + return alloc_outcome_t::SUCCESS; } std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar) @@ -499,7 +505,6 @@ std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, cons return ret; } } else if (ret.first != alloc_outcome_t::RB_COLLISION) { - logger.warning("SCHED: Could not allocate RAR for L=%d, cause=%s", aggr_lvl, ret.first.to_string()); return ret; } diff --git a/srsenb/src/stack/mac/schedulers/sched_base.cc b/srsenb/src/stack/mac/schedulers/sched_base.cc index 8c7f5c886..107374325 100644 --- a/srsenb/src/stack/mac/schedulers/sched_base.cc +++ b/srsenb/src/stack/mac/schedulers/sched_base.cc @@ -60,7 +60,12 @@ rbgmask_t find_available_rb_mask(const rbgmask_t& in_mask, uint32_t max_size) return localmask; } -rbgmask_t compute_user_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask) +rbg_interval find_empty_rbg_interval(uint32_t max_nof_rbgs, const rbgmask_t& current_mask) +{ + return find_contiguous_interval(current_mask, max_nof_rbgs); +} + +rbgmask_t compute_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask) { // Allocate enough RBs that accommodate pending data rbgmask_t newtx_mask(current_mask.size()); @@ -119,7 +124,7 @@ alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_ha // If previous mask does not fit, find another with exact same number of rbgs size_t nof_rbg = retx_mask.count(); bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A; - retx_mask = compute_user_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask()); + retx_mask = compute_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask()); if (retx_mask.count() == nof_rbg) { return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id()); } @@ -147,7 +152,7 @@ try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& // Find RBG mask that accommodates pending data bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A; - rbgmask_t newtxmask = compute_user_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask); + rbgmask_t newtxmask = compute_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask); if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) { return alloc_outcome_t::RB_COLLISION; } diff --git a/srsenb/src/stack/rrc/rrc_mobility.cc b/srsenb/src/stack/rrc/rrc_mobility.cc index afe7f6b46..73291989c 100644 --- a/srsenb/src/stack/rrc/rrc_mobility.cc +++ b/srsenb/src/stack/rrc/rrc_mobility.cc @@ -479,6 +479,7 @@ void rrc::ue::rrc_mobility::fill_mobility_reconf_common(asn1::rrc::dl_dcch_msg_s intralte.next_hop_chaining_count = rrc_ue->ue_security_cfg.get_ncc(); // Add MeasConfig of target cell + rrc_ue->current_ue_cfg = {}; recfg_r8.meas_cfg_present = apply_meascfg_updates( recfg_r8.meas_cfg, rrc_ue->current_ue_cfg.meas_cfg, rrc_ue->ue_cell_list, src_dl_earfcn, src_pci); diff --git a/srsenb/test/mac/sched_test_common.cc b/srsenb/test/mac/sched_test_common.cc index 4efae7a85..98668c053 100644 --- a/srsenb/test/mac/sched_test_common.cc +++ b/srsenb/test/mac/sched_test_common.cc @@ -17,6 +17,7 @@ #include "sched_common_test_suite.h" #include "sched_ue_ded_test_suite.h" #include "srslte/common/test_common.h" +#include "srslte/interfaces/enb_rrc_interfaces.h" using namespace srsenb; @@ -42,6 +43,15 @@ std::default_random_engine& ::srsenb::get_rand_gen() return rand_gen; } +struct rrc_dummy : public rrc_interface_mac { +public: + int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& init_ue_cfg) { return SRSLTE_SUCCESS; } + void upd_user(uint16_t new_rnti, uint16_t old_rnti) {} + void set_activity_user(uint16_t rnti) {} + bool is_paging_opportunity(uint32_t tti, uint32_t* payload_len) { return false; } + uint8_t* read_pdu_bcch_dlsch(const uint8_t enb_cc_idx, const uint32_t sib_index) { return nullptr; } +}; + /*********************** * User State Tester ***********************/ @@ -116,6 +126,10 @@ sched_result_stats::user_stats* sched_result_stats::get_user(uint16_t rnti) * Common Sched Tester **********************/ +common_sched_tester::common_sched_tester() : logger(srslog::fetch_basic_logger("TEST")) {} + +common_sched_tester::~common_sched_tester() {} + const sched::ue_cfg_t* common_sched_tester::get_current_ue_cfg(uint16_t rnti) const { return sched_sim->get_user_cfg(rnti); @@ -124,8 +138,9 @@ const sched::ue_cfg_t* common_sched_tester::get_current_ue_cfg(uint16_t rnti) co int common_sched_tester::sim_cfg(sim_sched_args args) { sim_args0 = std::move(args); + rrc_ptr.reset(new rrc_dummy()); - sched::init(nullptr, sim_args0.sched_args); + sched::init(rrc_ptr.get(), sim_args0.sched_args); sched_sim.reset(new sched_sim_random{this, sim_args0.cell_cfg}); sched_stats.reset(new sched_result_stats{sim_args0.cell_cfg}); diff --git a/srsenb/test/mac/sched_test_common.h b/srsenb/test/mac/sched_test_common.h index d0b072048..59ac21e31 100644 --- a/srsenb/test/mac/sched_test_common.h +++ b/srsenb/test/mac/sched_test_common.h @@ -79,8 +79,8 @@ public: std::vector ul_sched_result; }; - common_sched_tester() : logger(srslog::fetch_basic_logger("TEST")) {} - ~common_sched_tester() override = default; + common_sched_tester(); + ~common_sched_tester() override; const ue_cfg_t* get_current_ue_cfg(uint16_t rnti) const; @@ -114,6 +114,8 @@ public: protected: virtual void new_test_tti(); virtual void before_sched() {} + + std::unique_ptr rrc_ptr; }; } // namespace srsenb From 1f35c4dc8ba52e9aeab4f5c8a95429ba68e5841d Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 17:13:30 +0000 Subject: [PATCH 23/64] sched, feature enhancement, bugfix - allow RAR DL grants with variable PRB size Some bug fixes had to be solved: - the cfi cannot be dynamic once we set a SIB/paging/RAR allocation. This is too avoid effective coderate to exceed its maximum - the previous bugfix required adding the feature to cancel the last PDCCH+PDSCH allocation --- srsenb/hdr/stack/mac/sched_carrier.h | 2 + srsenb/hdr/stack/mac/sched_common.h | 4 + srsenb/hdr/stack/mac/sched_grid.h | 39 ++-- .../stack/mac/sched_phy_ch/sf_cch_allocator.h | 5 +- srsenb/src/stack/mac/sched_carrier.cc | 163 +++++++++------- srsenb/src/stack/mac/sched_grid.cc | 179 +++++++----------- .../src/stack/mac/sched_phy_ch/sched_dci.cc | 6 +- .../mac/sched_phy_ch/sf_cch_allocator.cc | 41 +++- .../src/stack/mac/schedulers/sched_time_pf.cc | 2 +- .../src/stack/mac/schedulers/sched_time_rr.cc | 3 +- srsenb/test/mac/sched_ca_test.cc | 14 +- srsenb/test/mac/sched_common_test_suite.cc | 3 +- srsenb/test/mac/sched_ue_ded_test_suite.cc | 4 +- 13 files changed, 253 insertions(+), 212 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_carrier.h b/srsenb/hdr/stack/mac/sched_carrier.h index 669ab5e02..01711ec98 100644 --- a/srsenb/hdr/stack/mac/sched_carrier.h +++ b/srsenb/hdr/stack/mac/sched_carrier.h @@ -115,6 +115,8 @@ public: void reset(); private: + alloc_outcome_t allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc); + // args srslog::basic_logger& logger; const sched_cell_params_t* cc_cfg = nullptr; diff --git a/srsenb/hdr/stack/mac/sched_common.h b/srsenb/hdr/stack/mac/sched_common.h index 12a908e5d..f9db0070c 100644 --- a/srsenb/hdr/stack/mac/sched_common.h +++ b/srsenb/hdr/stack/mac/sched_common.h @@ -106,6 +106,10 @@ struct prb_interval : public srslte::interval { /// Type of Allocation stored in PDSCH/PUSCH enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA }; +inline bool is_dl_ctrl_alloc(alloc_type_t a) +{ + return a == alloc_type_t::DL_BC or a == alloc_type_t::DL_PCCH or a == alloc_type_t::DL_RAR; +} } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 58c03acf7..d77e2e1d4 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -37,7 +37,8 @@ struct alloc_outcome_t { NO_DATA, INVALID_PRBMASK, INVALID_CARRIER, - INVALID_CODERATE + CODERATE_TOO_HIGH, + NOF_ALLOCS_LIMIT }; result_enum result = ERROR; alloc_outcome_t() = default; @@ -101,10 +102,11 @@ public: void init(const sched_cell_params_t& cell_params_); void new_tti(tti_point tti_rx); - dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant); bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); + void rem_last_alloc_dl(rbg_interval rbgs); + alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true); alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict); alloc_outcome_t reserve_ul_prbs(prb_interval alloc, bool strict); @@ -127,8 +129,7 @@ private: // consts const sched_cell_params_t* cc_cfg = nullptr; srslog::basic_logger& logger; - uint32_t nof_rbgs = 0; - uint32_t si_n_rbg = 0, rar_n_rbg = 0; + uint32_t nof_rbgs = 0; uint32_t pucch_nrb = 0; prbmask_t pucch_mask; @@ -137,9 +138,8 @@ private: // internal state tti_point tti_rx; - uint32_t avail_rbg = 0; - rbgmask_t dl_mask = {}; - prbmask_t ul_mask = {}; + rbgmask_t dl_mask = {}; + prbmask_t ul_mask = {}; }; /** Description: Stores the RAR, broadcast, paging, DL data, UL data allocations for the given subframe @@ -157,8 +157,6 @@ public: struct rar_alloc_t { sf_sched::ctrl_alloc_t alloc_data; sched_interface::dl_sched_rar_t rar_grant; - rar_alloc_t(const sf_sched::ctrl_alloc_t& c, const sched_interface::dl_sched_rar_t& r) : alloc_data(c), rar_grant(r) - {} }; struct bc_alloc_t : public ctrl_alloc_t { sched_interface::dl_sched_bc_t bc_grant; @@ -198,7 +196,7 @@ public: // DL alloc methods alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs); alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); - std::pair alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant); + alloc_outcome_t alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants); bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } const std::vector& get_allocated_rars() const { return rar_allocs; } @@ -228,17 +226,16 @@ public: const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; } private: - ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); - void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result); - void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result); - void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result, - sched_ue_list& ue_list); - void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::ul_sched_res_t* ul_result, - sched_ue_list& ue_list); + void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result); + void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result); + void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result, + sched_ue_list& ue_list); + void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::ul_sched_res_t* ul_result, + sched_ue_list& ue_list); // consts const sched_cell_params_t* cc_cfg = nullptr; diff --git a/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h b/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h index aa86fc6f0..d7167bbfc 100644 --- a/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h +++ b/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h @@ -48,6 +48,8 @@ public: */ bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false); + void rem_last_dci(); + // getters uint32_t get_cfi() const { return current_cfix + 1; } void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const; @@ -106,7 +108,8 @@ private: // tti vars tti_point tti_rx; - uint32_t current_cfix = 0; + uint32_t current_cfix = 0; + uint32_t current_max_cfix = 0; std::vector alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index std::vector dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far }; diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index 6070cc34a..c5930ab4e 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -88,67 +88,73 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) void bc_sched::alloc_sibs(sf_sched* tti_sched) { - const uint32_t max_nof_prbs_sib = 4; - uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); - uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); + uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); + uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); for (uint32_t sib_idx = 0; sib_idx < pending_sibs.size(); sib_idx++) { sched_sib_t& pending_sib = pending_sibs[sib_idx]; - if (cc_cfg->cfg.sibs[sib_idx].len > 0 and pending_sib.is_in_window and pending_sib.n_tx < 4) { - uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4; - uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start); - - // Check if there is any SIB to tx - bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; - bool other_sibs_flag = (sib_idx > 0) and - (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and - current_sf_idx == 9; - if (not sib1_flag and not other_sibs_flag) { - continue; - } + // Check if SIB is configured and within window + if (cc_cfg->cfg.sibs[sib_idx].len == 0 or not pending_sib.is_in_window or pending_sib.n_tx >= 4) { + continue; + } - // Attempt different number of RBGs - bool success = false; - for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) { - rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); - if (rbg_interv.empty()) { - break; - } - alloc_outcome_t ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv); - if (ret != alloc_outcome_t::INVALID_CODERATE) { - if (ret == alloc_outcome_t::SUCCESS) { - // SIB scheduled successfully - success = true; - pending_sibs[sib_idx].n_tx++; - } - break; - } - // Attempt again, but with more RBGs + // Check if subframe index is the correct one for SIB transmission + uint32_t nof_tx = (sib_idx > 0) ? SRSLTE_MIN(srslte::ceil_div(cc_cfg->cfg.si_window_ms, 10), 4) : 4; + uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[sib_idx].window_start); + bool sib1_flag = (sib_idx == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; + bool other_sibs_flag = (sib_idx > 0) and + (n_sf >= (cc_cfg->cfg.si_window_ms / nof_tx) * pending_sibs[sib_idx].n_tx) and + current_sf_idx == 9; + if (not sib1_flag and not other_sibs_flag) { + continue; + } + + // Attempt PDSCH grants with increasing number of RBGs + alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH; + for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) { + rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); + if (rbg_interv.length() != nrbgs) { + ret = alloc_outcome_t::RB_COLLISION; + break; } - if (not success) { - logger.warning("SCHED: Could not allocate SIB=%d, len=%d", sib_idx + 1, cc_cfg->cfg.sibs[sib_idx].len); + ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv); + if (ret == alloc_outcome_t::SUCCESS) { + // SIB scheduled successfully + pending_sibs[sib_idx].n_tx++; } } + if (ret != alloc_outcome_t::SUCCESS) { + logger.warning("SCHED: Could not allocate SIB=%d, len=%d. Cause: %s", + sib_idx + 1, + cc_cfg->cfg.sibs[sib_idx].len, + ret.to_string()); + } } } void bc_sched::alloc_paging(sf_sched* tti_sched) { uint32_t paging_payload = 0; - if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) { - alloc_outcome_t ret = alloc_outcome_t::ERROR; - for (uint32_t nrbgs = 2; nrbgs < 5; ++nrbgs) { - rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); - ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv); - if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::RB_COLLISION) { - break; - } - } - if (ret != alloc_outcome_t::SUCCESS) { - logger.warning( - "SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string()); + // Check if pending Paging message + if (not rrc->is_paging_opportunity(tti_sched->get_tti_tx_dl().to_uint(), &paging_payload) or paging_payload == 0) { + return; + } + + alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH; + for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) { + rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); + if (rbg_interv.length() != nrbgs) { + ret = alloc_outcome_t::RB_COLLISION; + break; } + + ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv); + } + + if (ret != alloc_outcome_t::SUCCESS) { + logger.warning( + "SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string()); } } @@ -167,6 +173,32 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, sched_ue_list& ue_db_) : cc_cfg(&cfg_), logger(srslog::fetch_basic_logger("MAC")), ue_db(&ue_db_) {} +alloc_outcome_t +ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc) +{ + alloc_outcome_t ret = alloc_outcome_t::ERROR; + for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) { + ret = alloc_outcome_t::CODERATE_TOO_HIGH; + for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbg) { + rbg_interval rbg_interv = find_empty_rbg_interval(nrbg, tti_sched->get_dl_mask()); + if (rbg_interv.length() == nrbg) { + ret = tti_sched->alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc); + } else { + ret = alloc_outcome_t::RB_COLLISION; + } + } + + // If allocation was not successful because there were not enough RBGs, try allocating fewer Msg3 grants + if (ret != alloc_outcome_t::CODERATE_TOO_HIGH and ret != alloc_outcome_t::RB_COLLISION) { + break; + } + } + if (ret != alloc_outcome_t::SUCCESS) { + logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, ret.to_string()); + } + return ret; +} + // Schedules RAR // On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we // discard it. @@ -192,7 +224,7 @@ void ra_sched::dl_sched(sf_sched* tti_sched) rar_window, tti_tx_dl); srslte::console("%s\n", srslte::to_c_str(str_buffer)); - logger.error("%s", srslte::to_c_str(str_buffer)); + logger.warning("%s", srslte::to_c_str(str_buffer)); it = pending_rars.erase(it); continue; } @@ -200,33 +232,30 @@ void ra_sched::dl_sched(sf_sched* tti_sched) } // Try to schedule DCI + RBGs for RAR Grant - std::pair ret = tti_sched->alloc_rar(rar_aggr_level, rar); + uint32_t nof_rar_allocs = 0; + alloc_outcome_t ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs); + + if (ret == alloc_outcome_t::SUCCESS) { + // If RAR allocation was successful: + // - in case all Msg3 grants were allocated, remove pending RAR, and continue with following RAR + // - otherwise, erase only Msg3 grants that were allocated, and stop iteration - // If RAR allocation was successful: - // - in case all Msg3 grants were allocated, remove pending RAR - // - otherwise, erase only Msg3 grants that were allocated. - if (ret.first == alloc_outcome_t::SUCCESS) { - uint32_t nof_rar_allocs = ret.second; if (nof_rar_allocs == rar.msg3_grant.size()) { - pending_rars.erase(it); + it = pending_rars.erase(it); } else { std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin()); rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs); + break; } - break; - } - - // If RAR allocation was not successful: - // - in case of unavailable RBGs, stop loop - // - otherwise, attempt to schedule next pending RAR - logger.info("SCHED: Could not allocate RAR for L=%d, cause=%s", rar_aggr_level, ret.first.to_string()); - if (ret.first == alloc_outcome_t::RB_COLLISION) { - // there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI - return; + } else { + // If RAR allocation was not successful: + // - in case of unavailable PDCCH space, try next pending RAR allocation + // - otherwise, stop iteration + if (ret != alloc_outcome_t::DCI_COLLISION) { + break; + } + ++it; } - - // For any other type of error, continue with next pending RAR - ++it; } } diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 1bbb5575c..78bf9796e 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -22,7 +22,7 @@ const char* alloc_outcome_t::to_string() const case SUCCESS: return "success"; case DCI_COLLISION: - return "dci_collision"; + return "PDCCH position not available"; case RB_COLLISION: return "rb_collision"; case ERROR: @@ -41,6 +41,10 @@ const char* alloc_outcome_t::to_string() const return "invalid rbg mask"; case INVALID_CARRIER: return "invalid eNB carrier"; + case CODERATE_TOO_HIGH: + return "Effective coderate is too high"; + case NOF_ALLOCS_LIMIT: + return "Max number of allocations reached"; default: break; } @@ -116,10 +120,8 @@ cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t en void sf_grid_t::init(const sched_cell_params_t& cell_params_) { - cc_cfg = &cell_params_; - nof_rbgs = cc_cfg->nof_rbgs; - si_n_rbg = srslte::ceil_div(4, cc_cfg->P); - rar_n_rbg = srslte::ceil_div(3, cc_cfg->P); + cc_cfg = &cell_params_; + nof_rbgs = cc_cfg->nof_rbgs; dl_mask.resize(nof_rbgs); ul_mask.resize(cc_cfg->nof_prb()); @@ -144,7 +146,6 @@ void sf_grid_t::new_tti(tti_point tti_rx_) dl_mask.reset(); ul_mask.reset(); - avail_rbg = nof_rbgs; // Reserve PRBs for PUCCH ul_mask |= pucch_mask; @@ -181,7 +182,7 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user, has_pusch_grant)) { if (user != nullptr) { if (logger.debug.enabled()) { - logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation: %s", + logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation:\n%s", user->get_rnti(), pdcch_alloc.result_to_string(true).c_str()); } @@ -191,20 +192,11 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, // Allocate RBGs dl_mask |= alloc_mask; - avail_rbg -= alloc_mask.count(); return alloc_outcome_t::SUCCESS; } -//! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. -sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_type_t alloc_type) -{ - rbg_interval range{nof_rbgs - avail_rbg, - nof_rbgs - avail_rbg + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg)}; - - return {alloc_dl_ctrl(aggr_idx, range, alloc_type), range}; -} - +/// Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type) { if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and @@ -252,7 +244,7 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, boo uint32_t aggr_idx = user->get_aggr_level(cc_cfg->enb_cc_idx, nof_bits); if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) { if (logger.debug.enabled()) { - logger.debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation: %s", + logger.debug("No space in PDCCH for rnti=0x%x UL tx. Current PDCCH allocation:\n%s", user->get_rnti(), pdcch_alloc.result_to_string(true).c_str()); } @@ -271,6 +263,19 @@ bool sf_grid_t::reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg) return true; } +void sf_grid_t::rem_last_alloc_dl(rbg_interval rbgs) +{ + if (pdcch_alloc.nof_allocs() == 0) { + logger.error("Remove DL alloc called for empty Subframe RB grid"); + return; + } + + pdcch_alloc.rem_last_dci(); + rbgmask_t rbgmask(dl_mask.size()); + rbgmask.fill(rbgs.start(), rbgs.stop()); + dl_mask &= ~rbgmask; +} + alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) { if (alloc.stop() > ul_mask.size()) { @@ -365,69 +370,36 @@ void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_) bool sf_sched::is_dl_alloc(uint16_t rnti) const { - for (const auto& a : data_allocs) { - if (a.rnti == rnti) { - return true; - } - } - return false; + return std::any_of(data_allocs.begin(), data_allocs.end(), [rnti](const dl_alloc_t& u) { return u.rnti == rnti; }); } bool sf_sched::is_ul_alloc(uint16_t rnti) const { - for (const auto& a : ul_data_allocs) { - if (a.rnti == rnti) { - return true; - } - } - return false; -} - -sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti) -{ - ctrl_alloc_t ctrl_alloc{}; - - // based on rnti, check which type of alloc - alloc_type_t alloc_type = alloc_type_t::DL_RAR; - if (rnti == SRSLTE_SIRNTI) { - alloc_type = alloc_type_t::DL_BC; - } else if (rnti == SRSLTE_PRNTI) { - alloc_type = alloc_type_t::DL_PCCH; - } - - /* Allocate space in the DL RBG and PDCCH grids */ - sf_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type); - if (not ret.outcome) { - return {ret.outcome, ctrl_alloc}; - } - - // Allocation Successful - ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; - ctrl_alloc.rbg_range = ret.rbg_range; - ctrl_alloc.req_bytes = tbs_bytes; - - return {ret.outcome, ctrl_alloc}; + return std::any_of( + ul_data_allocs.begin(), ul_data_allocs.end(), [rnti](const ul_alloc_t& u) { return u.rnti == rnti; }); } alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); - return alloc_outcome_t::ERROR; + return alloc_outcome_t::NOF_ALLOCS_LIMIT; } bc_alloc_t bc_alloc; - // Generate DCI for SIB - if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) { - return alloc_outcome_t::INVALID_CODERATE; - } - // Allocate SIB RBGs and PDCCH alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC); if (ret != alloc_outcome_t::SUCCESS) { return ret; } + // Generate DCI for SIB + if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) { + // Cancel on-going allocation + tti_alloc.rem_last_alloc_dl(rbgs); + return alloc_outcome_t::CODERATE_TOO_HIGH; + } + // Allocation Successful bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; bc_alloc.rbg_range = rbgs; @@ -441,21 +413,23 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); - return alloc_outcome_t::ERROR; + return alloc_outcome_t::NOF_ALLOCS_LIMIT; } bc_alloc_t bc_alloc; - // Generate DCI for Paging message - if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) { - return alloc_outcome_t::INVALID_CODERATE; - } - // Allocate Paging RBGs and PDCCH alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH); if (ret != alloc_outcome_t::SUCCESS) { return ret; } + // Generate DCI for Paging message + if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) { + // Cancel on-going allocation + tti_alloc.rem_last_alloc_dl(rbgs); + return alloc_outcome_t::CODERATE_TOO_HIGH; + } + // Allocation Successful bc_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; bc_alloc.rbg_range = rbgs; @@ -465,53 +439,44 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa return alloc_outcome_t::SUCCESS; } -std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar) +alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants) { - const uint32_t msg3_grant_size = 3; - std::pair ret = {alloc_outcome_t::ERROR, 0}; + static const uint32_t msg3_nof_prbs = 3; if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) { - logger.warning("SCHED: Maximum number of RAR allocations per TTI reached."); - return ret; + logger.info("SCHED: Maximum number of RAR allocations per TTI reached."); + return alloc_outcome_t::NOF_ALLOCS_LIMIT; } - for (uint32_t nof_grants = rar.msg3_grant.size(); nof_grants > 0; nof_grants--) { - uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff - uint32_t total_msg3_size = msg3_grant_size * nof_grants; + uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff + uint32_t total_ul_nof_prbs = msg3_nof_prbs * nof_grants; - // check if there is enough space for Msg3, try again with a lower number of grants - if (last_msg3_prb + total_msg3_size > max_msg3_prb) { - ret.first = alloc_outcome_t::RB_COLLISION; - continue; - } + // check if there is enough space for Msg3 + if (last_msg3_prb + total_ul_nof_prbs > max_msg3_prb) { + return alloc_outcome_t::RB_COLLISION; + } - // allocate RBs and PDCCH - sf_sched::ctrl_code_t ret2 = alloc_dl_ctrl(aggr_lvl, buf_rar, rar.ra_rnti); - ret.first = ret2.first.result; - ret.second = nof_grants; - - if (ret.first == alloc_outcome_t::SUCCESS) { - sched_interface::dl_sched_rar_t rar_grant; - if (generate_rar_dci(rar_grant, - get_tti_tx_dl(), - rar, - ret2.second.rbg_range, - nof_grants, - last_msg3_prb, - *cc_cfg, - tti_alloc.get_cfi())) { - // RAR allocation successful - rar_allocs.emplace_back(ret2.second, rar_grant); - last_msg3_prb += msg3_grant_size * nof_grants; - return ret; - } - } else if (ret.first != alloc_outcome_t::RB_COLLISION) { - return ret; - } + // allocate RBGs and PDCCH + alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_RAR); + if (ret != alloc_outcome_t::SUCCESS) { + return ret; + } - // if there was no space for the RAR, try again with a lower number of grants + // Generate DCI for RAR + rar_alloc_t rar_alloc; + if (not generate_rar_dci( + rar_alloc.rar_grant, get_tti_tx_dl(), rar, rbgs, nof_grants, last_msg3_prb, *cc_cfg, tti_alloc.get_cfi())) { + // Cancel on-going allocation + tti_alloc.rem_last_alloc_dl(rbgs); + return alloc_outcome_t::CODERATE_TOO_HIGH; } - logger.info("SCHED: RAR allocation postponed due to lack of RBs"); + // RAR allocation successful + rar_alloc.alloc_data.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + rar_alloc.alloc_data.rbg_range = rbgs; + rar_alloc.alloc_data.req_bytes = buf_rar; + rar_allocs.push_back(rar_alloc); + last_msg3_prb += total_ul_nof_prbs * nof_grants; + return ret; } @@ -531,7 +496,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma { if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) { logger.warning("SCHED: Maximum number of DL allocations reached"); - return alloc_outcome_t::ERROR; + return alloc_outcome_t::NOF_ALLOCS_LIMIT; } if (is_dl_alloc(user->get_rnti())) { diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index 6ae306b75..410cce896 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -319,8 +319,9 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, fmt::format_to(str_buffer, "{}", rbg_range); if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) { - logger.debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", + logger.debug("SCHED: SIB%d, cc=%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", bc.index + 1, + cell_params.enb_cc_idx, rbg_range.start(), rbg_range.stop(), bc.dci.location.L, @@ -330,8 +331,9 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, cell_params.cfg.sibs[bc.index].period_rf, bc.dci.tb[0].mcs_idx); } else { - logger.info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", + logger.info("SCHED: PCH, cc=%d, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", srslte::to_c_str(str_buffer), + cell_params.enb_cc_idx, bc.dci.location.L, bc.dci.location.ncce, bc.tbs, diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index 304978dac..ece42ed12 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -37,7 +37,8 @@ void sf_cch_allocator::new_tti(tti_point tti_rx_) t.reset(); } dci_record_list.clear(); - current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1; + current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1; + current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1; } const cce_cfi_position_table* @@ -70,7 +71,7 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch bool success; do { success = alloc_dci_record(record, get_cfi() - 1); - } while (not success and get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols and set_cfi(get_cfi() + 1)); + } while (not success and current_cfix < current_max_cfix and set_cfi(get_cfi() + 1)); if (not success) { // DCI allocation failed. go back to original CFI @@ -82,9 +83,45 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch // DCI record allocation successful dci_record_list.push_back(record); + + if (is_dl_ctrl_alloc(alloc_type)) { + // Dynamic CFI not yet supported for DL control allocations, as coderate can be exceeded + current_max_cfix = current_cfix; + } + return true; } +void sf_cch_allocator::rem_last_dci() +{ + assert(not dci_record_list.empty()); + + // Remove DCI record + dci_record_list.pop_back(); + + // Remove leaves of PDCCH position decisions + auto& tree = alloc_trees[current_cfix]; + tree.prev_end = tree.prev_start; + if (dci_record_list.empty()) { + tree.prev_start = 0; + } else { + tree.prev_start = tree.dci_alloc_tree[tree.prev_start].parent_idx; + // Discover other tree nodes with same level + while (tree.prev_start > 0) { + uint32_t count = 0; + while (tree.dci_alloc_tree[tree.prev_start - 1].parent_idx >= 0) { + count++; + } + if (count == dci_record_list.size()) { + tree.prev_start--; + } else { + break; + } + } + } + tree.dci_alloc_tree.erase(tree.dci_alloc_tree.begin() + tree.prev_end, tree.dci_alloc_tree.end()); +} + bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t cfix) { bool ret = false; diff --git a/srsenb/src/stack/mac/schedulers/sched_time_pf.cc b/srsenb/src/stack/mac/schedulers/sched_time_pf.cc index a7b31fb30..316e36acc 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_pf.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_pf.cc @@ -145,7 +145,7 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t : 0; } if (code == alloc_outcome_t::DCI_COLLISION) { - logger.info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", ue.get_rnti()); + logger.info("SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", ue.get_rnti(), cc_cfg->enb_cc_idx); } return estim_tbs_bytes; } diff --git a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc index 80d758d12..e06c406c0 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc @@ -142,7 +142,8 @@ void sched_time_rr::sched_ul_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s } alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc); if (ret == alloc_outcome_t::DCI_COLLISION) { - logger.info("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x", user.get_rnti()); + logger.info( + "SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", user.get_rnti(), cc_cfg->enb_cc_idx); } } } diff --git a/srsenb/test/mac/sched_ca_test.cc b/srsenb/test/mac/sched_ca_test.cc index 5987fbe1e..9859a8481 100644 --- a/srsenb/test/mac/sched_ca_test.cc +++ b/srsenb/test/mac/sched_ca_test.cc @@ -17,7 +17,7 @@ using namespace srsenb; -uint32_t const seed = std::chrono::system_clock::now().time_since_epoch().count(); +uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count(); /******************* * Logging * @@ -157,7 +157,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para } }; generate_data(20, 1.0, P_ul_sr, randf()); - tester.test_next_ttis(generator.tti_events); + TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS); // Event: Reconf Complete. Activate SCells. Check if CE correctly transmitted generator.step_tti(); @@ -169,7 +169,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para user->ue_sim_cfg->ue_cfg.supported_cc_list[i].active = true; user->ue_sim_cfg->ue_cfg.supported_cc_list[i].enb_cc_idx = cc_idxs[i]; } - tester.test_next_ttis(generator.tti_events); + TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS); auto activ_list = tester.get_enb_ue_cc_map(rnti1); for (uint32_t i = 0; i < cc_idxs.size(); ++i) { TESTASSERT(activ_list[i] >= 0); @@ -187,7 +187,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para } } generator.step_tti(); - tester.test_next_ttis(generator.tti_events); + TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS); } // Event: Wait for UE to receive and ack CE. Send cqi==0, which should not activate the SCell @@ -198,12 +198,12 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para generator.step_tti(); } } - tester.test_next_ttis(generator.tti_events); + TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS); // The UE should now have received the CE // Event: Generate a bit more data, it should *not* go through SCells until we send a CQI generate_data(5, P_dl, P_ul_sr, randf()); - tester.test_next_ttis(generator.tti_events); + TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS); TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[params.pcell_idx] > 0); TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[params.pcell_idx] > 0); for (uint32_t i = 1; i < cc_idxs.size(); ++i) { @@ -217,7 +217,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para tester.dl_cqi_info(tester.tti_rx.to_uint(), rnti1, cc_idxs[i], cqi); } generate_data(10, 1.0, 1.0, 1.0); - tester.test_next_ttis(generator.tti_events); + TESTASSERT(tester.test_next_ttis(generator.tti_events) == SRSLTE_SUCCESS); uint64_t tot_dl_sched_data = 0; uint64_t tot_ul_sched_data = 0; for (const auto& c : cc_idxs) { diff --git a/srsenb/test/mac/sched_common_test_suite.cc b/srsenb/test/mac/sched_common_test_suite.cc index eee535873..3e636af68 100644 --- a/srsenb/test/mac/sched_common_test_suite.cc +++ b/srsenb/test/mac/sched_common_test_suite.cc @@ -316,7 +316,8 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant); float coderate = srslte_coderate(tbs * 8, nof_re); const uint32_t Qm = 2; - CONDERROR(coderate > 0.930f * Qm, "Max coderate was exceeded from broadcast DCI"); + CONDERROR( + coderate > 0.930f * Qm, "Max coderate was exceeded from %s DCI", dci.rnti == SRSLTE_SIRNTI ? "SIB" : "RAR"); return SRSLTE_SUCCESS; }; diff --git a/srsenb/test/mac/sched_ue_ded_test_suite.cc b/srsenb/test/mac/sched_ue_ded_test_suite.cc index 637cededd..10bd69a71 100644 --- a/srsenb/test/mac/sched_ue_ded_test_suite.cc +++ b/srsenb/test/mac/sched_ue_ded_test_suite.cc @@ -78,7 +78,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, uint32_t nof_retx = get_nof_retx(pdsch.dci.tb[0].rv); // 0..3 if (h.nof_txs == 0 or h.ndi != pdsch.dci.tb[0].ndi) { // It is newtx - CONDERROR(nof_retx != 0, "Invalid rv index for new tx"); + CONDERROR(nof_retx != 0, "Invalid rv index for new DL tx"); CONDERROR(h.active, "DL newtx for already active DL harq pid=%d", h.pid); } else { // it is retx @@ -196,7 +196,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& if (h.nof_txs == 0 or h.ndi != pusch_ptr->dci.tb.ndi) { // newtx - CONDERROR(nof_retx != 0, "Invalid rv index for new tx"); + CONDERROR(nof_retx != 0, "Invalid rv index for new UL tx"); CONDERROR(pusch_ptr->current_tx_nb != 0, "UL HARQ retxs need to have been previously transmitted"); CONDERROR(not h_inactive, "New tx for already active UL HARQ"); CONDERROR(not pusch_ptr->needs_pdcch and ue.msg3_tti_rx.is_valid() and sf_out.tti_rx > ue.msg3_tti_rx, From 377831da903f5676c45eecdd1cd89bc2aa5b8ca1 Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 17:25:19 +0000 Subject: [PATCH 24/64] bugfix,minor - remove reset of ue cfg during handover --- srsenb/src/stack/rrc/rrc_mobility.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/srsenb/src/stack/rrc/rrc_mobility.cc b/srsenb/src/stack/rrc/rrc_mobility.cc index 73291989c..afe7f6b46 100644 --- a/srsenb/src/stack/rrc/rrc_mobility.cc +++ b/srsenb/src/stack/rrc/rrc_mobility.cc @@ -479,7 +479,6 @@ void rrc::ue::rrc_mobility::fill_mobility_reconf_common(asn1::rrc::dl_dcch_msg_s intralte.next_hop_chaining_count = rrc_ue->ue_security_cfg.get_ncc(); // Add MeasConfig of target cell - rrc_ue->current_ue_cfg = {}; recfg_r8.meas_cfg_present = apply_meascfg_updates( recfg_r8.meas_cfg, rrc_ue->current_ue_cfg.meas_cfg, rrc_ue->ue_cell_list, src_dl_earfcn, src_pci); From 884c1b04d26d09c0654fd93edf358c881e50113e Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 18:00:50 +0000 Subject: [PATCH 25/64] sched,test - add test to verify cch allocator ability to undo last allocation --- .../mac/sched_phy_ch/sf_cch_allocator.cc | 6 ++- srsenb/test/mac/sched_grid_test.cc | 41 +++++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index ece42ed12..5c0859c75 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -108,9 +108,11 @@ void sf_cch_allocator::rem_last_dci() tree.prev_start = tree.dci_alloc_tree[tree.prev_start].parent_idx; // Discover other tree nodes with same level while (tree.prev_start > 0) { - uint32_t count = 0; - while (tree.dci_alloc_tree[tree.prev_start - 1].parent_idx >= 0) { + uint32_t count = 1; + int parent_idx = tree.dci_alloc_tree[tree.prev_start - 1].parent_idx; + while (parent_idx >= 0) { count++; + parent_idx = tree.dci_alloc_tree[parent_idx].parent_idx; } if (count == dci_record_list.size()) { tree.prev_start--; diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index 412a344ac..1e3be7269 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -131,6 +131,46 @@ int test_pdcch_one_ue() return SRSLTE_SUCCESS; } +int test_pdcch_ue_and_sibs() +{ + const uint32_t ENB_CC_IDX = 0; + // Params + uint32_t nof_prb = 100; + + std::vector cell_params(1); + sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg(); + sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(nof_prb); + sched_interface::sched_args_t sched_args{}; + TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args)); + + sf_cch_allocator pdcch; + sched_ue sched_ue{0x46, cell_params, ue_cfg}; + + pdcch.init(cell_params[PCell_IDX]); + TESTASSERT(pdcch.nof_alloc_combinations() == 0); + TESTASSERT(pdcch.nof_allocs() == 0); + + tti_point tti_rx{0}; + + pdcch.new_tti(tti_rx); + TESTASSERT(pdcch.nof_cces() == cell_params[0].nof_cce_table[0]); + TESTASSERT(pdcch.get_cfi() == 1); // Start at CFI=1 + TESTASSERT(pdcch.nof_alloc_combinations() == 0); + + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, 2)); + TESTASSERT(pdcch.nof_alloc_combinations() == 4); + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_RAR, 2)); + TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.nof_alloc_combinations() == 6); + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, 2, &sched_ue, false)); + TESTASSERT(pdcch.nof_allocs() == 3 and pdcch.nof_alloc_combinations() == 4); + + // TEST: Ability to revert last allocation + pdcch.rem_last_dci(); + TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.nof_alloc_combinations() == 6); + + return SRSLTE_SUCCESS; +} + int main() { srsenb::set_randseed(seed); @@ -143,6 +183,7 @@ int main() srslog::init(); TESTASSERT(test_pdcch_one_ue() == SRSLTE_SUCCESS); + TESTASSERT(test_pdcch_ue_and_sibs() == SRSLTE_SUCCESS); srslog::flush(); From d0672d03fbbacd6231d5bc975bfaac632ef1abe4 Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 19:04:32 +0000 Subject: [PATCH 26/64] bugfix - mac logger was being fetched before a sink was assigned, leading to it printing to the console --- srsenb/src/stack/mac/sched_grid.cc | 2 +- srsenb/src/stack/mac/sched_helpers.cc | 2 +- .../src/stack/mac/sched_phy_ch/sched_dci.cc | 70 ++++++++++--------- 3 files changed, 39 insertions(+), 35 deletions(-) diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 78bf9796e..925578861 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -732,7 +732,7 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& // Print Resulting DL Allocation fmt::memory_buffer str_buffer; fmt::format_to(str_buffer, - "SCHED: DL {} rnti=0x{:x}, cc={}, pid={}, mask={:x}, dci=({}, {}), n_rtx={}, tbs={}, buffer={}/{}", + "SCHED: DL {} rnti=0x{:x}, cc={}, pid={}, mask=0x{:x}, dci=({}, {}), n_rtx={}, tbs={}, buffer={}/{}", is_newtx ? "tx" : "retx", user->get_rnti(), cc_cfg->enb_cc_idx, diff --git a/srsenb/src/stack/mac/sched_helpers.cc b/srsenb/src/stack/mac/sched_helpers.cc index 6f9cb4b12..b1ce375ce 100644 --- a/srsenb/src/stack/mac/sched_helpers.cc +++ b/srsenb/src/stack/mac/sched_helpers.cc @@ -28,7 +28,7 @@ using dl_sched_res_t = sched_interface::dl_sched_res_t; using dl_sched_data_t = sched_interface::dl_sched_data_t; using custom_mem_buffer = fmt::basic_memory_buffer; -srslog::basic_logger& get_mac_logger() +static srslog::basic_logger& get_mac_logger() { static srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); return mac_logger; diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index 410cce896..a24c27fb3 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -20,7 +20,11 @@ namespace srsenb { -static srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC"); +static srslog::basic_logger& get_mac_logger() +{ + static srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC"); + return logger; +} /// Compute max TBS based on max coderate int coderate_to_tbs(float max_coderate, uint32_t nof_re) @@ -217,12 +221,12 @@ int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci, return -1; } - logger.debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d", - srslte_ra_tbs_from_idx(mcs, 2), - srslte_ra_tbs_from_idx(mcs, 3), - req_bytes, - tbs, - mcs); + get_mac_logger().debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d", + srslte_ra_tbs_from_idx(mcs, 2), + srslte_ra_tbs_from_idx(mcs, 3), + req_bytes, + tbs, + mcs); return tbs; } @@ -311,7 +315,7 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, rbg_interval rbg_range, const sched_cell_params_t& cell_params) { - if (not logger.info.enabled()) { + if (not get_mac_logger().info.enabled()) { return; } @@ -319,31 +323,31 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, fmt::format_to(str_buffer, "{}", rbg_range); if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) { - logger.debug("SCHED: SIB%d, cc=%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", - bc.index + 1, - cell_params.enb_cc_idx, - rbg_range.start(), - rbg_range.stop(), - bc.dci.location.L, - bc.dci.location.ncce, - bc.dci.tb[0].rv, - cell_params.cfg.sibs[bc.index].len, - cell_params.cfg.sibs[bc.index].period_rf, - bc.dci.tb[0].mcs_idx); + get_mac_logger().debug("SCHED: SIB%d, cc=%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d", + bc.index + 1, + cell_params.enb_cc_idx, + rbg_range.start(), + rbg_range.stop(), + bc.dci.location.L, + bc.dci.location.ncce, + bc.dci.tb[0].rv, + cell_params.cfg.sibs[bc.index].len, + cell_params.cfg.sibs[bc.index].period_rf, + bc.dci.tb[0].mcs_idx); } else { - logger.info("SCHED: PCH, cc=%d, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", - srslte::to_c_str(str_buffer), - cell_params.enb_cc_idx, - bc.dci.location.L, - bc.dci.location.ncce, - bc.tbs, - bc.dci.tb[0].mcs_idx); + get_mac_logger().info("SCHED: PCH, cc=%d, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", + srslte::to_c_str(str_buffer), + cell_params.enb_cc_idx, + bc.dci.location.L, + bc.dci.location.ncce, + bc.tbs, + bc.dci.tb[0].mcs_idx); } } void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range) { - if (not logger.info.enabled()) { + if (not get_mac_logger().info.enabled()) { return; } @@ -360,12 +364,12 @@ void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rar.msg3_grant[i].grant.trunc_mcs); } - logger.info("SCHED: RAR, ra-rnti=%d, rbgs=%s, dci=(%d,%d), msg3 grants=[%s]", - rar.dci.rnti, - srslte::to_c_str(str_buffer), - rar.dci.location.L, - rar.dci.location.ncce, - srslte::to_c_str(str_buffer2)); + get_mac_logger().info("SCHED: RAR, ra-rnti=%d, rbgs=%s, dci=(%d,%d), msg3 grants=[%s]", + rar.dci.rnti, + srslte::to_c_str(str_buffer), + rar.dci.location.L, + rar.dci.location.ncce, + srslte::to_c_str(str_buffer2)); } } // namespace srsenb From 544fe03db3c9a5033d3cfa6bcba797d5215a77be Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 20:08:22 +0000 Subject: [PATCH 27/64] sched,optimization - considering that cfi is static for sib/rar allocations, pick from the start of the tti, the optimal CFI in terms of largest number of CCE positions available --- .../stack/mac/sched_phy_ch/sf_cch_allocator.cc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index 5c0859c75..6dc393d99 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -66,6 +66,21 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch // TODO: Make the alloc tree update lazy alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type, .pusch_uci = has_pusch_grant}; + if (is_dl_ctrl_alloc(alloc_type) and current_max_cfix > current_cfix) { + // Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc, start with optimal CFI + // in terms of nof CCE locs + uint32_t nof_locs = 0; + for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > current_cfix; ++cfix_tmp) { + const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp); + if ((*dci_locs)[record.aggr_idx].size() >= nof_locs) { + nof_locs = (*dci_locs)[record.aggr_idx].size(); + current_cfix = cfix_tmp; + } else { + break; + } + } + } + // Try to allocate user in PDCCH for given CFI. If it fails, increment CFI. uint32_t first_cfi = get_cfi(); bool success; From 19a043683c3d47755ca64a323bbcdc8e3eb243cb Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 20:11:21 +0000 Subject: [PATCH 28/64] bugfix - fix compilation issue in gcc10. The interfaces don't have virtual dtors --- srsenb/test/mac/sched_test_common.cc | 10 ---------- srsenb/test/mac/sched_test_common.h | 12 +++++++++++- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/srsenb/test/mac/sched_test_common.cc b/srsenb/test/mac/sched_test_common.cc index 98668c053..140de9924 100644 --- a/srsenb/test/mac/sched_test_common.cc +++ b/srsenb/test/mac/sched_test_common.cc @@ -17,7 +17,6 @@ #include "sched_common_test_suite.h" #include "sched_ue_ded_test_suite.h" #include "srslte/common/test_common.h" -#include "srslte/interfaces/enb_rrc_interfaces.h" using namespace srsenb; @@ -43,15 +42,6 @@ std::default_random_engine& ::srsenb::get_rand_gen() return rand_gen; } -struct rrc_dummy : public rrc_interface_mac { -public: - int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& init_ue_cfg) { return SRSLTE_SUCCESS; } - void upd_user(uint16_t new_rnti, uint16_t old_rnti) {} - void set_activity_user(uint16_t rnti) {} - bool is_paging_opportunity(uint32_t tti, uint32_t* payload_len) { return false; } - uint8_t* read_pdu_bcch_dlsch(const uint8_t enb_cc_idx, const uint32_t sib_index) { return nullptr; } -}; - /*********************** * User State Tester ***********************/ diff --git a/srsenb/test/mac/sched_test_common.h b/srsenb/test/mac/sched_test_common.h index 59ac21e31..a53aa5b35 100644 --- a/srsenb/test/mac/sched_test_common.h +++ b/srsenb/test/mac/sched_test_common.h @@ -16,6 +16,7 @@ #include "sched_sim_ue.h" #include "sched_test_utils.h" #include "srsenb/hdr/stack/mac/sched.h" +#include "srslte/interfaces/enb_rrc_interfaces.h" #include "srslte/srslog/srslog.h" #include @@ -29,6 +30,15 @@ void set_randseed(uint64_t seed); float randf(); std::default_random_engine& get_rand_gen(); +struct rrc_dummy : public rrc_interface_mac { +public: + int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& init_ue_cfg) { return SRSLTE_SUCCESS; } + void upd_user(uint16_t new_rnti, uint16_t old_rnti) {} + void set_activity_user(uint16_t rnti) {} + bool is_paging_opportunity(uint32_t tti, uint32_t* payload_len) { return false; } + uint8_t* read_pdu_bcch_dlsch(const uint8_t enb_cc_idx, const uint32_t sib_index) { return nullptr; } +}; + /************************** * Testers *************************/ @@ -115,7 +125,7 @@ protected: virtual void new_test_tti(); virtual void before_sched() {} - std::unique_ptr rrc_ptr; + std::unique_ptr rrc_ptr; }; } // namespace srsenb From 248f05bf08c5e2f75d633f1d66baf98c77c34d1d Mon Sep 17 00:00:00 2001 From: Francisco Date: Mon, 15 Mar 2021 21:53:32 +0000 Subject: [PATCH 29/64] sched,bugfix - fix optimal cfi computation for sib/rar allocations --- srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc | 4 ++-- srsenb/test/mac/sched_grid_test.cc | 2 +- srsenb/test/mac/sched_test_common.cc | 3 +-- srsenb/test/mac/sched_test_common.h | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index 6dc393d99..d029c8178 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -66,11 +66,11 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch // TODO: Make the alloc tree update lazy alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type, .pusch_uci = has_pusch_grant}; - if (is_dl_ctrl_alloc(alloc_type) and current_max_cfix > current_cfix) { + if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and current_max_cfix > current_cfix) { // Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc, start with optimal CFI // in terms of nof CCE locs uint32_t nof_locs = 0; - for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > current_cfix; ++cfix_tmp) { + for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > current_cfix; --cfix_tmp) { const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp); if ((*dci_locs)[record.aggr_idx].size() >= nof_locs) { nof_locs = (*dci_locs)[record.aggr_idx].size(); diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index 1e3be7269..58e1b2577 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -162,7 +162,7 @@ int test_pdcch_ue_and_sibs() TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_RAR, 2)); TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.nof_alloc_combinations() == 6); TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, 2, &sched_ue, false)); - TESTASSERT(pdcch.nof_allocs() == 3 and pdcch.nof_alloc_combinations() == 4); + TESTASSERT(pdcch.nof_allocs() == 3 and pdcch.nof_alloc_combinations() == 9); // TEST: Ability to revert last allocation pdcch.rem_last_dci(); diff --git a/srsenb/test/mac/sched_test_common.cc b/srsenb/test/mac/sched_test_common.cc index 140de9924..7f55dfca2 100644 --- a/srsenb/test/mac/sched_test_common.cc +++ b/srsenb/test/mac/sched_test_common.cc @@ -128,9 +128,8 @@ const sched::ue_cfg_t* common_sched_tester::get_current_ue_cfg(uint16_t rnti) co int common_sched_tester::sim_cfg(sim_sched_args args) { sim_args0 = std::move(args); - rrc_ptr.reset(new rrc_dummy()); - sched::init(rrc_ptr.get(), sim_args0.sched_args); + sched::init(&rrc_ptr, sim_args0.sched_args); sched_sim.reset(new sched_sim_random{this, sim_args0.cell_cfg}); sched_stats.reset(new sched_result_stats{sim_args0.cell_cfg}); diff --git a/srsenb/test/mac/sched_test_common.h b/srsenb/test/mac/sched_test_common.h index a53aa5b35..4b61f63b1 100644 --- a/srsenb/test/mac/sched_test_common.h +++ b/srsenb/test/mac/sched_test_common.h @@ -125,7 +125,7 @@ protected: virtual void new_test_tti(); virtual void before_sched() {} - std::unique_ptr rrc_ptr; + rrc_dummy rrc_ptr; }; } // namespace srsenb From f9689ec956ba66ccd957b50c6198f49d2279535a Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Wed, 7 Oct 2020 16:39:41 +0200 Subject: [PATCH 30/64] Added PUSCH CFO estimation --- lib/include/srslte/phy/ch_estimation/chest_ul.h | 2 +- lib/src/phy/ch_estimation/chest_ul.c | 9 +++++++++ lib/src/phy/ch_estimation/dmrs_pucch.c | 2 +- lib/src/phy/phch/pusch.c | 5 +++++ 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/include/srslte/phy/ch_estimation/chest_ul.h b/lib/include/srslte/phy/ch_estimation/chest_ul.h index b40c34014..04688c250 100644 --- a/lib/include/srslte/phy/ch_estimation/chest_ul.h +++ b/lib/include/srslte/phy/ch_estimation/chest_ul.h @@ -47,7 +47,7 @@ typedef struct SRSLTE_API { float epre_dBfs; float snr; float snr_db; - float cfo; + float cfo_hz; float ta_us; } srslte_chest_ul_res_t; diff --git a/lib/src/phy/ch_estimation/chest_ul.c b/lib/src/phy/ch_estimation/chest_ul.c index bdd8aa654..e7fe3fcb2 100644 --- a/lib/src/phy/ch_estimation/chest_ul.c +++ b/lib/src/phy/ch_estimation/chest_ul.c @@ -288,6 +288,15 @@ static void chest_ul_estimate(srslte_chest_ul_t* q, uint32_t n_prb[SRSLTE_NOF_SLOTS_PER_SF], srslte_chest_ul_res_t* res) { + // Calculate CFO + if (nslots == 2) { + float phase = cargf(srslte_vec_dot_prod_conj_ccc( + &q->pilot_estimates[0 * nrefs_sym], &q->pilot_estimates[1 * nrefs_sym], nrefs_sym)); + res->cfo_hz = phase / (2.0f * (float)M_PI * 0.0005f); + } else { + res->cfo_hz = NAN; + } + // Calculate time alignment error float ta_err = 0.0f; if (meas_ta_en) { diff --git a/lib/src/phy/ch_estimation/dmrs_pucch.c b/lib/src/phy/ch_estimation/dmrs_pucch.c index 51d2aa77d..b53bee7a1 100644 --- a/lib/src/phy/ch_estimation/dmrs_pucch.c +++ b/lib/src/phy/ch_estimation/dmrs_pucch.c @@ -256,7 +256,7 @@ int srslte_dmrs_pucch_format1_estimate(const srslte_pucch_nr_t* q, } // Measure CFO - res->cfo = NAN; // Not implemented + res->cfo_hz = NAN; // Not implemented // Do averaging here // ... Not implemented diff --git a/lib/src/phy/phch/pusch.c b/lib/src/phy/phch/pusch.c index cfa11b3dd..8c142333a 100644 --- a/lib/src/phy/phch/pusch.c +++ b/lib/src/phy/phch/pusch.c @@ -515,6 +515,11 @@ uint32_t srslte_pusch_rx_info(srslte_pusch_cfg_t* cfg, len = srslte_print_check(str, str_len, len, ", ta=%.1f us", chest_res->ta_us); } + // Append CFO information if available + if (!isnan(chest_res->cfo_hz)) { + len = srslte_print_check(str, str_len, len, ", cfo=%.1f hz", chest_res->cfo_hz); + } + // Append EVM measurement if available if (cfg->meas_evm_en) { len = srslte_print_check(str, str_len, len, ", evm=%.1f %%", res->evm * 100); From a8b5b8efa10d4047a191eee7b786ea76b8450ec8 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Thu, 14 Jan 2021 16:34:51 +0100 Subject: [PATCH 31/64] UHD: stability improvements --- lib/src/phy/rf/rf_uhd_generic.h | 94 +++++++++---- lib/src/phy/rf/rf_uhd_imp.cc | 195 +++++++++++++++++++------- lib/src/phy/rf/rf_uhd_safe.h | 37 ++--- lib/src/radio/test/benchmark_radio.cc | 1 + srsue/src/phy/sync.cc | 4 + 5 files changed, 238 insertions(+), 93 deletions(-) diff --git a/lib/src/phy/rf/rf_uhd_generic.h b/lib/src/phy/rf/rf_uhd_generic.h index 7a5cfd0b7..0856b41a1 100644 --- a/lib/src/phy/rf/rf_uhd_generic.h +++ b/lib/src/phy/rf/rf_uhd_generic.h @@ -21,20 +21,30 @@ private: uhd::usrp::multi_usrp::sptr usrp = nullptr; const uhd::fs_path TREE_DBOARD_RX_FRONTEND_NAME = "/mboards/0/dboards/A/rx_frontends/A/name"; const std::chrono::milliseconds FE_RX_RESET_SLEEP_TIME_MS = std::chrono::milliseconds(2000UL); - uhd::stream_args_t stream_args; - double lo_freq_tx_hz = 0.0; - double lo_freq_rx_hz = 0.0; + uhd::stream_args_t stream_args = {}; + double lo_freq_tx_hz = 0.0; + double lo_freq_rx_hz = 0.0; uhd_error usrp_make_internal(const uhd::device_addr_t& dev_addr) override { // Destroy any previous USRP instance usrp = nullptr; + Debug("Making USRP object with args '" << dev_addr.to_string() << "'"); + UHD_SAFE_C_SAVE_ERROR(this, usrp = uhd::usrp::multi_usrp::make(dev_addr);) } - uhd_error set_tx_subdev(const std::string& string) { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_subdev_spec(string);) } - uhd_error set_rx_subdev(const std::string& string) { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_subdev_spec(string);) } + uhd_error set_tx_subdev(const std::string& string) + { + Info("Setting tx_subdev_spec to '" << string << "'"); + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_subdev_spec(string);) + } + uhd_error set_rx_subdev(const std::string& string) + { + Info("Setting rx_subdev_spec to '" << string << "'"); + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_subdev_spec(string);) + } uhd_error test_ad936x_device(uint32_t nof_channels) { @@ -104,8 +114,14 @@ private: } public: - rf_uhd_generic(){}; - virtual ~rf_uhd_generic(){}; + rf_uhd_generic() { Info("RF UHD Generic instance constructed"); } + virtual ~rf_uhd_generic() + { + rx_stream = nullptr; + tx_stream = nullptr; + usrp = nullptr; + Debug("RF UHD closed Ok"); + } uhd_error usrp_make(const uhd::device_addr_t& dev_addr_, uint32_t nof_channels) override { uhd::device_addr_t dev_addr = dev_addr_; @@ -154,7 +170,6 @@ public: // Set transmitter subdev spec if specified if (not tx_subdev.empty()) { - printf("Setting tx_subdev_spec to '%s'\n", tx_subdev.c_str()); err = set_tx_subdev(tx_subdev); if (err != UHD_ERROR_NONE) { return err; @@ -163,7 +178,6 @@ public: // Set receiver subdev spec if specified if (not rx_subdev.empty()) { - printf("Setting rx_subdev_spec to '%s'\n", rx_subdev.c_str()); err = set_rx_subdev(tx_subdev); if (err != UHD_ERROR_NONE) { return err; @@ -247,6 +261,7 @@ public: } uhd_error set_time_unknown_pps(const uhd::time_spec_t& timespec) override { + Debug("Setting Time at next PPS..."); UHD_SAFE_C_SAVE_ERROR(this, usrp->set_time_unknown_pps(timespec);) } uhd_error get_time_now(uhd::time_spec_t& timespec) override @@ -255,10 +270,11 @@ public: } uhd_error set_sync_source(const std::string& source) override { + Debug("Setting PPS source to '" << source << "'") #if UHD_VERSION < 3140099 - UHD_SAFE_C_SAVE_ERROR(this, usrp->set_clock_source(source); usrp->set_time_source(source);) + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_clock_source(source); usrp->set_time_source(source);) #else - UHD_SAFE_C_SAVE_ERROR(this, usrp->set_sync_source(source, source);) + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_sync_source(source, source);) #endif } uhd_error get_gain_range(uhd::gain_range_t& tx_gain_range, uhd::gain_range_t& rx_gain_range) override @@ -267,38 +283,61 @@ public: } uhd_error set_master_clock_rate(double rate) override { + Debug("Setting master clock rate to " << rate / 1e6 << " MHz"); UHD_SAFE_C_SAVE_ERROR(this, usrp->set_master_clock_rate(rate);) } - uhd_error set_rx_rate(double rate) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_rate(rate);) } - uhd_error set_tx_rate(double rate) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_rate(rate);) } + uhd_error set_rx_rate(double rate) override + { + Debug("Setting Rx Rate to " << rate / 1e6 << "MHz"); + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_rate(rate);) + } + uhd_error set_tx_rate(double rate) override + { + Debug("Setting Tx Rate to " << rate / 1e6 << "MHz"); + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_rate(rate);) + } uhd_error set_command_time(const uhd::time_spec_t& timespec) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_command_time(timespec);) } uhd_error get_rx_stream(size_t& max_num_samps) override { - UHD_SAFE_C_SAVE_ERROR(this, rx_stream = nullptr; rx_stream = usrp->get_rx_stream(stream_args); - max_num_samps = rx_stream->get_max_num_samps(); - if (max_num_samps == 0UL) { - last_error = "The maximum number of receive samples is zero."; - return UHD_ERROR_VALUE; - }) + Debug("Creating Rx stream"); + UHD_SAFE_C_SAVE_ERROR( + this, rx_stream = nullptr; rx_stream = usrp->get_rx_stream(stream_args); + max_num_samps = rx_stream->get_max_num_samps(); + if (max_num_samps == 0UL) { + last_error = "The maximum number of receive samples is zero."; + return UHD_ERROR_VALUE; + }) } uhd_error get_tx_stream(size_t& max_num_samps) override { - UHD_SAFE_C_SAVE_ERROR(this, tx_stream = nullptr; tx_stream = usrp->get_tx_stream(stream_args); - max_num_samps = tx_stream->get_max_num_samps(); - if (max_num_samps == 0UL) { - last_error = "The maximum number of transmit samples is zero."; - return UHD_ERROR_VALUE; - }) + Debug("Creating Tx stream"); + UHD_SAFE_C_SAVE_ERROR( + this, tx_stream = nullptr; tx_stream = usrp->get_tx_stream(stream_args); + max_num_samps = tx_stream->get_max_num_samps(); + if (max_num_samps == 0UL) { + last_error = "The maximum number of transmit samples is zero."; + return UHD_ERROR_VALUE; + }) + } + uhd_error set_tx_gain(size_t ch, double gain) override + { + Debug("Setting channel " << ch << " Tx gain to " << gain << " dB"); + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_gain(gain, ch);) + } + uhd_error set_rx_gain(size_t ch, double gain) override + { + Debug("Setting channel " << ch << " Rx gain to " << gain << " dB"); + UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_gain(gain, ch);) } - uhd_error set_tx_gain(size_t ch, double gain) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_tx_gain(gain, ch);) } - uhd_error set_rx_gain(size_t ch, double gain) override { UHD_SAFE_C_SAVE_ERROR(this, usrp->set_rx_gain(gain, ch);) } uhd_error get_rx_gain(double& gain) override { UHD_SAFE_C_SAVE_ERROR(this, gain = usrp->get_rx_gain();) } uhd_error get_tx_gain(double& gain) override { UHD_SAFE_C_SAVE_ERROR(this, gain = usrp->get_tx_gain();) } uhd_error set_tx_freq(uint32_t ch, double target_freq, double& actual_freq) override { + Debug("Setting channel " << ch << " Tx frequency to " << target_freq / 1e6 << " MHz"); + // Create Tune request uhd::tune_request_t tune_request(target_freq); @@ -314,6 +353,7 @@ public: } uhd_error set_rx_freq(uint32_t ch, double target_freq, double& actual_freq) override { + Debug("Setting channel " << ch << " Rx frequency to " << target_freq / 1e6 << " MHz"); // Create Tune request uhd::tune_request_t tune_request(target_freq); diff --git a/lib/src/phy/rf/rf_uhd_imp.cc b/lib/src/phy/rf/rf_uhd_imp.cc index 2565a49ed..eb2a9f62d 100644 --- a/lib/src/phy/rf/rf_uhd_imp.cc +++ b/lib/src/phy/rf/rf_uhd_imp.cc @@ -111,17 +111,21 @@ static const std::chrono::milliseconds RF_UHD_IMP_ASYNCH_MSG_SLEEP_MS = std::chr static const uint32_t RF_UHD_IMP_MAX_RX_TRIALS = 100; struct rf_uhd_handler_t { + size_t id; + std::string devname; std::shared_ptr uhd = nullptr; - srslte_rf_info_t info; - size_t rx_nof_samples = 0; - size_t tx_nof_samples = 0; - double tx_rate = 1.92e6; - double rx_rate = 1.92e6; - bool dynamic_master_rate = true; - uint32_t nof_rx_channels = 0; - uint32_t nof_tx_channels = 0; + srslte_rf_info_t info; + size_t rx_nof_samples = 0; + size_t tx_nof_samples = 0; + double tx_rate = 1.92e6; + double rx_rate = 1.92e6; + bool dynamic_master_rate = true; + uint32_t nof_rx_channels = 0; + uint32_t nof_tx_channels = 0; + std::array tx_freq = {}; + std::array rx_freq = {}; srslte_rf_error_handler_t uhd_error_handler = nullptr; void* uhd_error_handler_arg = nullptr; @@ -143,6 +147,10 @@ struct rf_uhd_handler_t { #endif /* HAVE_ASYNC_THREAD */ }; +// Store UHD Handler instances as shared pointer to avoid new/delete +static std::map > rf_uhd_map; +static size_t uhd_handler_counter = 0; + #if UHD_VERSION < 31100 static void (*handler)(const char*); @@ -277,10 +285,8 @@ static void* async_thread(void* h) } #endif -static inline void uhd_free(rf_uhd_handler_t* h) +static inline void uhd_free(rf_uhd_handler_t* handler) { - rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h; - // NULL handler, return if (handler == nullptr) { return; @@ -294,7 +300,8 @@ static inline void uhd_free(rf_uhd_handler_t* h) } #endif - delete handler; + // Erase element from MAP + rf_uhd_map.erase(handler->id); } void rf_uhd_suppress_stdout(void* h) @@ -453,6 +460,7 @@ const char* rf_uhd_devname(void* h) bool rf_uhd_rx_wait_lo_locked(void* h) { rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h; + Debug("Waiting for Rx LO Locked"); // wait for clock source to lock std::string sensor_name = "lo_locked"; @@ -519,7 +527,15 @@ int rf_uhd_stop_rx_stream(void* h) rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h; std::unique_lock lock(handler->rx_mutex); - return rf_uhd_stop_rx_stream_unsafe(handler); + if (rf_uhd_stop_rx_stream_unsafe(handler) < SRSLTE_SUCCESS) { + return SRSLTE_ERROR; + } + + // Make sure the Rx stream is flushed + lock.unlock(); // Flush has its own lock + rf_uhd_flush_buffer(h); + + return SRSLTE_SUCCESS; } void rf_uhd_flush_buffer(void* h) @@ -560,21 +576,8 @@ int rf_uhd_open(char* args, void** h) return rf_uhd_open_multi(args, h, 1); } -int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) +static int uhd_init(rf_uhd_handler_t* handler, char* args, uint32_t nof_channels) { - // Check valid handler pointer - if (h == nullptr) { - return SRSLTE_ERROR_INVALID_INPUTS; - } - - if (nof_channels > SRSLTE_MAX_CHANNELS) { - ERROR("Error opening UHD: maximum number of channels exceeded (%d>%d)", nof_channels, SRSLTE_MAX_CHANNELS); - return SRSLTE_ERROR; - } - - rf_uhd_handler_t* handler = new rf_uhd_handler_t; - *h = handler; - // Disable fast-path (U/L/O) messages setenv("UHD_LOG_FASTPATH_DISABLE", "1", 0); @@ -676,6 +679,32 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) } handler->current_master_clock = device_addr.cast("master_clock_rate", 0.0); + // Parse default frequencies + for (uint32_t i = 0; i < nof_channels; i++) { + // Parse Tx frequency + if (i == 0 and device_addr.has_key("tx_freq")) { + handler->tx_freq[i] = device_addr.cast("tx_freq", handler->tx_freq[i]); + device_addr.pop("tx_freq"); + } else { + std::string key = "tx_freq" + std::to_string(i); + if (device_addr.has_key(key)) { + handler->tx_freq[i] = device_addr.cast(key, handler->tx_freq[i]); + device_addr.pop(key); + } + } + + // Parse Rx frequency + if (i == 0 and device_addr.has_key("rx_freq")) { + handler->rx_freq[i] = device_addr.cast("rx_freq", handler->rx_freq[i]); + } else { + std::string key = "rx_freq" + std::to_string(i); + if (device_addr.has_key(key)) { + handler->rx_freq[i] = device_addr.cast("rx_freq" + std::to_string(i), handler->rx_freq[i]); + device_addr.pop(key); + } + } + } + // Set dynamic master clock rate configuration if (device_addr.has_key("type")) { handler->dynamic_master_rate = RH_UHD_IMP_FIX_MASTER_CLOCK_RATE_DEVICE_LIST.count(device_addr["type"]) == 0; @@ -706,7 +735,6 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) // Make USRP if (handler->uhd->usrp_make(device_addr, nof_channels) != UHD_ERROR_NONE) { print_usrp_error(handler); - uhd_free(handler); return SRSLTE_ERROR; } @@ -777,6 +805,7 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) handler->nof_rx_channels = nof_channels; handler->nof_tx_channels = nof_channels; + // Set default Tx/Rx rates if (handler->uhd->set_rx_rate(handler->rx_rate) != UHD_ERROR_NONE) { print_usrp_error(handler); return SRSLTE_ERROR; @@ -786,6 +815,7 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) return SRSLTE_ERROR; } + // Reset timestamps if (nof_channels > 1 and clock_src != "gpsdo") { handler->uhd->set_time_unknown_pps(uhd::time_spec_t()); } @@ -800,6 +830,27 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) return SRSLTE_ERROR; } + // Tune LOs if the default frequency is provided + bool require_wait_rx_lock = false; + for (uint32_t i = 0; i < nof_channels; i++) { + if (std::isnormal(handler->rx_freq[i])) { + if (handler->uhd->set_rx_freq(i, handler->rx_freq[i], handler->rx_freq[i]) != UHD_ERROR_NONE) { + print_usrp_error(handler); + return SRSLTE_ERROR; + } + rf_uhd_rx_wait_lo_locked(handler); + require_wait_rx_lock = true; + } + } + for (uint32_t i = 0; i < nof_channels; i++) { + if (std::isnormal(handler->tx_freq[i])) { + if (handler->uhd->set_tx_freq(i, handler->tx_freq[i], handler->tx_freq[i]) != UHD_ERROR_NONE) { + print_usrp_error(handler); + return SRSLTE_ERROR; + } + } + } + // Populate RF device info uhd::gain_range_t tx_gain_range; uhd::gain_range_t rx_gain_range; @@ -832,6 +883,37 @@ int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) return SRSLTE_SUCCESS; } +int rf_uhd_open_multi(char* args, void** h, uint32_t nof_channels) +{ + // Check valid handler pointer + if (h == nullptr) { + return SRSLTE_ERROR_INVALID_INPUTS; + } + + if (nof_channels > SRSLTE_MAX_CHANNELS) { + ERROR("Error opening UHD: maximum number of channels exceeded (%d>%d)", nof_channels, SRSLTE_MAX_CHANNELS); + return SRSLTE_ERROR; + } + + // Create UHD handler + rf_uhd_map[uhd_handler_counter] = std::make_shared(); + rf_uhd_handler_t* handler = rf_uhd_map[uhd_handler_counter].get(); + handler->id = uhd_handler_counter; + uhd_handler_counter++; + *h = handler; + + // Initialise UHD handler + if (uhd_init(handler, args, nof_channels) < SRSLTE_SUCCESS) { + ERROR("uhd_init failed, freeing..."); + // Free/Close UHD handler properly + uhd_free(handler); + *h = nullptr; + return SRSLTE_ERROR; + } + + return SRSLTE_SUCCESS; +} + int rf_uhd_close(void* h) { // Makes sure Tx is ended @@ -852,7 +934,7 @@ int rf_uhd_close(void* h) static inline void rf_uhd_set_master_clock_rate_unsafe(rf_uhd_handler_t* handler, double rate) { // Set master clock rate if it is allowed and change is required - if (handler->dynamic_master_rate && handler->current_master_clock != rate) { + if (handler->dynamic_master_rate and handler->current_master_clock != rate) { if (handler->uhd->set_master_clock_rate(rate) != UHD_ERROR_NONE) { print_usrp_error(handler); } @@ -1078,44 +1160,61 @@ srslte_rf_info_t* rf_uhd_get_info(void* h) return info; } +static bool rf_uhd_set_freq_ch(rf_uhd_handler_t* handler, uint32_t ch, double& freq, bool is_tx) +{ + double& curr_freq = (is_tx) ? handler->tx_freq[ch] : handler->rx_freq[ch]; + + // Skip if frequency is unchanged + if (round(freq) == round(curr_freq)) { + return false; + } + + // Set frequency + if (is_tx) { + if (handler->uhd->set_tx_freq(ch, freq, curr_freq) != UHD_ERROR_NONE) { + print_usrp_error(handler); + } + } else { + if (handler->uhd->set_rx_freq(ch, freq, curr_freq) != UHD_ERROR_NONE) { + print_usrp_error(handler); + } + } + return true; +} double rf_uhd_set_rx_freq(void* h, uint32_t ch, double freq) { + bool require_rx_wait_lo_locked = false; + rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h; if (ch < handler->nof_rx_channels) { - if (handler->uhd->set_rx_freq(ch, freq, freq) != UHD_ERROR_NONE) { - print_usrp_error(handler); - return SRSLTE_ERROR; - } + require_rx_wait_lo_locked |= rf_uhd_set_freq_ch(handler, ch, freq, false); } else { for (uint32_t i = 0; i < handler->nof_rx_channels; i++) { - if (handler->uhd->set_rx_freq(i, freq, freq) != UHD_ERROR_NONE) { - print_usrp_error(handler); - return SRSLTE_ERROR; - } + require_rx_wait_lo_locked |= rf_uhd_set_freq_ch(handler, i, freq, false); } } - rf_uhd_rx_wait_lo_locked(handler); - return freq; + + // wait for LO Locked + if (require_rx_wait_lo_locked) { + rf_uhd_rx_wait_lo_locked(handler); + } + + return handler->rx_freq[ch % handler->nof_rx_channels]; } double rf_uhd_set_tx_freq(void* h, uint32_t ch, double freq) { rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h; if (ch < handler->nof_tx_channels) { - if (handler->uhd->set_tx_freq(ch, freq, freq) != UHD_ERROR_NONE) { - print_usrp_error(handler); - return SRSLTE_ERROR; - } + rf_uhd_set_freq_ch(handler, ch, freq, true); } else { for (uint32_t i = 0; i < handler->nof_tx_channels; i++) { - if (handler->uhd->set_tx_freq(i, freq, freq) != UHD_ERROR_NONE) { - print_usrp_error(handler); - return SRSLTE_ERROR; - } + rf_uhd_set_freq_ch(handler, i, freq, true); } } - return freq; + + return handler->tx_freq[ch % handler->nof_tx_channels]; } void rf_uhd_get_time(void* h, time_t* secs, double* frac_secs) diff --git a/lib/src/phy/rf/rf_uhd_safe.h b/lib/src/phy/rf/rf_uhd_safe.h index 11a92a89a..7141870c3 100644 --- a/lib/src/phy/rf/rf_uhd_safe.h +++ b/lib/src/phy/rf/rf_uhd_safe.h @@ -111,17 +111,18 @@ protected: public: std::string last_error; - virtual uhd_error usrp_make(const uhd::device_addr_t& dev_addr, uint32_t nof_channels) = 0; - virtual uhd_error get_mboard_name(std::string& mboard_name) = 0; - virtual uhd_error get_mboard_sensor_names(std::vector& sensors) = 0; - virtual uhd_error get_rx_sensor_names(std::vector& sensors) = 0; - virtual uhd_error get_sensor(const std::string& sensor_name, double& sensor_value) = 0; - virtual uhd_error get_sensor(const std::string& sensor_name, bool& sensor_value) = 0; - virtual uhd_error get_rx_sensor(const std::string& sensor_name, bool& sensor_value) = 0; - virtual uhd_error set_time_unknown_pps(const uhd::time_spec_t& timespec) = 0; - virtual uhd_error get_time_now(uhd::time_spec_t& timespec) = 0; + virtual uhd_error usrp_make(const uhd::device_addr_t& dev_addr, uint32_t nof_channels) = 0; + virtual uhd_error get_mboard_name(std::string& mboard_name) = 0; + virtual uhd_error get_mboard_sensor_names(std::vector& sensors) = 0; + virtual uhd_error get_rx_sensor_names(std::vector& sensors) = 0; + virtual uhd_error get_sensor(const std::string& sensor_name, double& sensor_value) = 0; + virtual uhd_error get_sensor(const std::string& sensor_name, bool& sensor_value) = 0; + virtual uhd_error get_rx_sensor(const std::string& sensor_name, bool& sensor_value) = 0; + virtual uhd_error set_time_unknown_pps(const uhd::time_spec_t& timespec) = 0; + virtual uhd_error get_time_now(uhd::time_spec_t& timespec) = 0; uhd_error start_rx_stream(double delay) { + Debug("Starting Rx stream"); uhd::time_spec_t time_spec; uhd_error err = get_time_now(time_spec); if (err != UHD_ERROR_NONE) { @@ -137,7 +138,9 @@ public: } uhd_error stop_rx_stream() { + Debug("Stopping Rx stream"); UHD_SAFE_C_SAVE_ERROR(this, uhd::stream_cmd_t stream_cmd(uhd::stream_cmd_t::STREAM_MODE_STOP_CONTINUOUS); + stream_cmd.stream_now = true; rx_stream->issue_stream_cmd(stream_cmd);) } virtual uhd_error set_sync_source(const std::string& source) = 0; @@ -147,15 +150,13 @@ public: virtual uhd_error set_tx_rate(double rate) = 0; virtual uhd_error set_command_time(const uhd::time_spec_t& timespec) = 0; virtual uhd_error get_rx_stream(size_t& max_num_samps) = 0; - virtual uhd_error destroy_rx_stream() { UHD_SAFE_C_SAVE_ERROR(this, rx_stream = nullptr;) } - virtual uhd_error get_tx_stream(size_t& max_num_samps) = 0; - virtual uhd_error destroy_tx_stream() { UHD_SAFE_C_SAVE_ERROR(this, rx_stream = nullptr;) } - virtual uhd_error set_tx_gain(size_t ch, double gain) = 0; - virtual uhd_error set_rx_gain(size_t ch, double gain) = 0; - virtual uhd_error get_rx_gain(double& gain) = 0; - virtual uhd_error get_tx_gain(double& gain) = 0; - virtual uhd_error set_tx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0; - virtual uhd_error set_rx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0; + virtual uhd_error get_tx_stream(size_t& max_num_samps) = 0; + virtual uhd_error set_tx_gain(size_t ch, double gain) = 0; + virtual uhd_error set_rx_gain(size_t ch, double gain) = 0; + virtual uhd_error get_rx_gain(double& gain) = 0; + virtual uhd_error get_tx_gain(double& gain) = 0; + virtual uhd_error set_tx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0; + virtual uhd_error set_rx_freq(uint32_t ch, double target_freq, double& actual_freq) = 0; uhd_error receive(void** buffs, const size_t nsamps_per_buff, uhd::rx_metadata_t& metadata, diff --git a/lib/src/radio/test/benchmark_radio.cc b/lib/src/radio/test/benchmark_radio.cc index 1f0956ff7..dfd939238 100644 --- a/lib/src/radio/test/benchmark_radio.cc +++ b/lib/src/radio/test/benchmark_radio.cc @@ -332,6 +332,7 @@ static void* radio_thread_run(void* arg) radio_args.nof_carriers = 1; radio_args.device_args = radios_args[r].empty() ? "auto" : radios_args[r]; radio_args.rx_gain = agc_enable ? -1 : rf_gain; + radio_args.tx_gain = agc_enable ? -1 : rf_gain; radio_args.device_name = radio_device; if (radio_h[r]->init(radio_args, &phy) != SRSLTE_SUCCESS) { diff --git a/srsue/src/phy/sync.cc b/srsue/src/phy/sync.cc index e8d264c69..02bdc67bd 100644 --- a/srsue/src/phy/sync.cc +++ b/srsue/src/phy/sync.cc @@ -125,6 +125,10 @@ void sync::stop() q->stop(); } running = false; + + // Reset (stop Rx stream) as soon as possible to avoid base-band Rx buffer overflow + radio_h->reset(); + wait_thread_finish(); } From 5f954ab379a9e5b49e257733d0202241f969ffeb Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Tue, 16 Mar 2021 18:20:32 +0100 Subject: [PATCH 32/64] Fix UHD compilation --- lib/src/phy/rf/rf_uhd_generic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/phy/rf/rf_uhd_generic.h b/lib/src/phy/rf/rf_uhd_generic.h index 0856b41a1..2a7e51ebf 100644 --- a/lib/src/phy/rf/rf_uhd_generic.h +++ b/lib/src/phy/rf/rf_uhd_generic.h @@ -270,7 +270,7 @@ public: } uhd_error set_sync_source(const std::string& source) override { - Debug("Setting PPS source to '" << source << "'") + Debug("Setting PPS source to '" << source << "'"); #if UHD_VERSION < 3140099 UHD_SAFE_C_SAVE_ERROR(this, usrp->set_clock_source(source); usrp->set_time_source(source);) #else From d334907afe11c4d5c18e44f8689a640dea9cce40 Mon Sep 17 00:00:00 2001 From: Francisco Date: Tue, 16 Mar 2021 18:01:46 +0000 Subject: [PATCH 33/64] sched - wrote benchmark to detect regressions in DL/UL data rates, and to analyse the total latency of the scheduler --- srsenb/test/mac/CMakeLists.txt | 4 + srsenb/test/mac/sched_benchmark.cc | 401 +++++++++++++++++++++ srsenb/test/mac/sched_sim_ue.cc | 19 +- srsenb/test/mac/sched_sim_ue.h | 23 +- srsenb/test/mac/sched_test_common.cc | 6 +- srsenb/test/mac/sched_ue_ded_test_suite.cc | 18 +- 6 files changed, 444 insertions(+), 27 deletions(-) create mode 100644 srsenb/test/mac/sched_benchmark.cc diff --git a/srsenb/test/mac/CMakeLists.txt b/srsenb/test/mac/CMakeLists.txt index df46cb13c..bca676e40 100644 --- a/srsenb/test/mac/CMakeLists.txt +++ b/srsenb/test/mac/CMakeLists.txt @@ -58,3 +58,7 @@ add_test(sched_tpc_test sched_tpc_test) add_executable(sched_dci_test sched_dci_test.cc) target_link_libraries(sched_dci_test srslte_common srsenb_mac srslte_mac sched_test_common) add_test(sched_dci_test sched_dci_test) + +add_executable(sched_benchmark_test sched_benchmark.cc) +target_link_libraries(sched_benchmark_test srslte_common srsenb_mac srslte_mac sched_test_common) +add_test(sched_benchmark_test sched_benchmark_test) diff --git a/srsenb/test/mac/sched_benchmark.cc b/srsenb/test/mac/sched_benchmark.cc new file mode 100644 index 000000000..aca7c641b --- /dev/null +++ b/srsenb/test/mac/sched_benchmark.cc @@ -0,0 +1,401 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2020 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "sched_test_common.h" +#include "srsenb/hdr/stack/mac/sched.h" +#include "srslte/adt/accumulators.h" +#include + +const uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count(); + +namespace srsenb { + +struct run_params { + uint32_t nof_prbs; + uint32_t nof_ues; + uint32_t nof_ttis; + uint32_t cqi; + const char* sched_policy; +}; + +struct run_params_range { + std::vector nof_prbs = {6, 15, 25, 50, 75, 100}; + std::vector nof_ues = {1, 2, 5}; + uint32_t nof_ttis = 10000; + std::vector cqi = {5, 10, 15}; + std::vector sched_policy = {"time_rr", "time_pf"}; + + size_t nof_runs() const { return nof_prbs.size() * nof_ues.size() * cqi.size() * sched_policy.size(); } + run_params get_params(size_t idx) const + { + run_params r = {}; + r.nof_ttis = nof_ttis; + r.nof_prbs = nof_prbs[idx % nof_prbs.size()]; + idx /= nof_prbs.size(); + r.nof_ues = nof_ues[idx % nof_ues.size()]; + idx /= nof_ues.size(); + r.cqi = cqi[idx % cqi.size()]; + idx /= cqi.size(); + r.sched_policy = sched_policy.at(idx); + return r; + } +}; + +class sched_tester : public sched_sim_base +{ + static std::vector get_cell_cfg(srslte::span cell_params) + { + std::vector cell_cfg_list; + for (auto& c : cell_params) { + cell_cfg_list.push_back(c.cfg); + } + return cell_cfg_list; + } + +public: + explicit sched_tester(sched* sched_obj_, + const sched_interface::sched_args_t& sched_args, + const std::vector& cell_cfg_list) : + sched_sim_base(sched_obj_, sched_args, cell_cfg_list), + sched_ptr(sched_obj_), + dl_result(cell_cfg_list.size()), + ul_result(cell_cfg_list.size()) + {} + + srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); + sched* sched_ptr; + uint32_t dl_bytes_per_tti = 100000; + uint32_t ul_bytes_per_tti = 100000; + run_params current_run_params = {}; + + std::vector dl_result; + std::vector ul_result; + + struct throughput_stats { + srslte::rolling_average mean_dl_tbs, mean_ul_tbs, avg_dl_mcs, avg_ul_mcs; + srslte::rolling_average avg_latency; + }; + throughput_stats total_stats; + + int advance_tti() + { + tti_point tti_rx = get_tti_rx().is_valid() ? get_tti_rx() + 1 : tti_point(0); + mac_logger.set_context(tti_rx.to_uint()); + new_tti(tti_rx); + + for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) { + std::chrono::time_point tp = std::chrono::steady_clock::now(); + TESTASSERT(sched_ptr->dl_sched(to_tx_dl(tti_rx).to_uint(), cc, dl_result[cc]) == SRSLTE_SUCCESS); + TESTASSERT(sched_ptr->ul_sched(to_tx_ul(tti_rx).to_uint(), cc, ul_result[cc]) == SRSLTE_SUCCESS); + std::chrono::time_point tp2 = std::chrono::steady_clock::now(); + std::chrono::nanoseconds tdur = std::chrono::duration_cast(tp2 - tp); + total_stats.avg_latency.push(tdur.count()); + } + + sf_output_res_t sf_out{get_cell_params(), tti_rx, ul_result, dl_result}; + update(sf_out); + process_stats(sf_out); + + return SRSLTE_SUCCESS; + } + + void set_external_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events) override + { + // do nothing + if (ue_ctxt.conres_rx) { + sched_ptr->ul_bsr(ue_ctxt.rnti, 1, dl_bytes_per_tti); + sched_ptr->dl_rlc_buffer_state(ue_ctxt.rnti, 3, ul_bytes_per_tti, 0); + + if (get_tti_rx().to_uint() % 5 == 0) { + for (uint32_t cc = 0; cc < pending_events.cc_list.size(); ++cc) { + pending_events.cc_list[cc].dl_cqi = current_run_params.cqi; + pending_events.cc_list[cc].ul_snr = 40; + } + } + } + } + + void process_stats(sf_output_res_t& sf_out) + { + for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) { + uint32_t dl_tbs = 0, ul_tbs = 0, dl_mcs = 0, ul_mcs = 0; + for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) { + dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[0]; + dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[1]; + dl_mcs = std::max(dl_mcs, sf_out.dl_cc_result[cc].data[i].dci.tb[0].mcs_idx); + } + total_stats.mean_dl_tbs.push(dl_tbs); + if (sf_out.dl_cc_result[cc].nof_data_elems > 0) { + total_stats.avg_dl_mcs.push(dl_mcs); + } + for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_dci_elems; ++i) { + ul_tbs += sf_out.ul_cc_result[cc].pusch[i].tbs; + ul_mcs = std::max(ul_mcs, sf_out.ul_cc_result[cc].pusch[i].dci.tb.mcs_idx); + } + total_stats.mean_ul_tbs.push(ul_tbs); + if (sf_out.ul_cc_result[cc].nof_dci_elems) { + total_stats.avg_ul_mcs.push(ul_mcs); + } + } + } +}; + +int run_sched_new_ue(sched_tester& sched_tester, + const run_params& params, + uint16_t rnti, + const sched_interface::ue_cfg_t& ue_cfg) +{ + const uint32_t ENB_CC_IDX = 0; + + sched_tester.total_stats = {}; + sched_tester.current_run_params = params; + + // Add user (first need to advance to a PRACH TTI) + while (not srslte_prach_tti_opportunity_config_fdd( + sched_tester.get_cell_params()[ue_cfg.supported_cc_list[0].enb_cc_idx].cfg.prach_config, + sched_tester.get_tti_rx().to_uint(), + -1)) { + TESTASSERT(sched_tester.advance_tti() == SRSLTE_SUCCESS); + } + TESTASSERT(sched_tester.add_user(rnti, ue_cfg, 16) == SRSLTE_SUCCESS); + + // Ignore stats of the first TTIs until UE DRB1 is added + while (not sched_tester.get_enb_ctxt().ue_db.at(rnti)->conres_rx) { + sched_tester.advance_tti(); + } + sched_tester.total_stats = {}; + + for (uint32_t count = 0; count < params.nof_ttis; ++count) { + sched_tester.advance_tti(); + } + + return SRSLTE_SUCCESS; +} + +struct run_data { + run_params params; + float avg_dl_throughput; + float avg_ul_throughput; + float avg_dl_mcs; + float avg_ul_mcs; + std::chrono::microseconds avg_latency; +}; + +int run_benchmark_scenario(run_params params, std::vector& run_results) +{ + std::vector cell_list(1, generate_default_cell_cfg(params.nof_prbs)); + sched_interface::ue_cfg_t ue_cfg_default = generate_default_ue_cfg(); + sched_interface::sched_args_t sched_args = {}; + sched_args.sched_policy = params.sched_policy; + + sched sched_obj; + sched_obj.init(nullptr, sched_args); + sched_tester tester(&sched_obj, sched_args, cell_list); + + tester.total_stats = {}; + tester.current_run_params = params; + + for (uint32_t ue_idx = 0; ue_idx < params.nof_ues; ++ue_idx) { + uint16_t rnti = 0x46 + ue_idx; + // Add user (first need to advance to a PRACH TTI) + while (not srslte_prach_tti_opportunity_config_fdd( + tester.get_cell_params()[ue_cfg_default.supported_cc_list[0].enb_cc_idx].cfg.prach_config, + tester.get_tti_rx().to_uint(), + -1)) { + TESTASSERT(tester.advance_tti() == SRSLTE_SUCCESS); + } + TESTASSERT(tester.add_user(rnti, ue_cfg_default, 16) == SRSLTE_SUCCESS); + TESTASSERT(tester.advance_tti() == SRSLTE_SUCCESS); + } + + // Ignore stats of the first TTIs until all UEs DRB1 are created + auto ue_db_ctxt = tester.get_enb_ctxt().ue_db; + while (not std::all_of(ue_db_ctxt.begin(), ue_db_ctxt.end(), [](std::pair p) { + return p.second->conres_rx; + })) { + tester.advance_tti(); + ue_db_ctxt = tester.get_enb_ctxt().ue_db; + } + tester.total_stats = {}; + + // Run benchmark + for (uint32_t count = 0; count < params.nof_ttis; ++count) { + tester.advance_tti(); + } + + run_data run_result = {}; + run_result.params = params; + run_result.avg_dl_throughput = tester.total_stats.mean_dl_tbs.value() * 8.0 / 1e-3; + run_result.avg_ul_throughput = tester.total_stats.mean_ul_tbs.value() * 8.0 / 1e-3; + run_result.avg_dl_mcs = tester.total_stats.avg_dl_mcs.value(); + run_result.avg_ul_mcs = tester.total_stats.avg_ul_mcs.value(); + run_result.avg_latency = std::chrono::microseconds(static_cast(tester.total_stats.avg_latency.value() / 1000)); + run_results.push_back(run_result); + + return SRSLTE_SUCCESS; +} + +run_data expected_run_result(run_params params) +{ + assert(params.cqi == 15 && "only cqi=15 supported for now"); + run_data ret{}; + float dl_overhead = 0.9, ul_overhead = 0.75; + switch (params.nof_prbs) { + case 6: + ret.avg_dl_mcs = 25; + ret.avg_ul_mcs = 22; + dl_overhead = 0.8; + ul_overhead = 0.4; + break; + default: + ret.avg_dl_mcs = 26; + ret.avg_ul_mcs = 22; + } + + int tbs_idx = srslte_ra_tbs_idx_from_mcs(ret.avg_dl_mcs, false, false); + int tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs); + ret.avg_dl_throughput = tbs * 1e3 * dl_overhead; // bps + + tbs_idx = srslte_ra_tbs_idx_from_mcs(ret.avg_ul_mcs, false, true); + tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs - 2); + ret.avg_ul_throughput = tbs * 1e3 * ul_overhead; // bps + return ret; +} + +int run_rate_test() +{ + run_params_range run_param_list{}; + srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); + + run_param_list.nof_ues = {1}; + run_param_list.cqi = {15}; + + std::vector run_results; + size_t nof_runs = run_param_list.nof_runs(); + + for (size_t r = 0; r < nof_runs; ++r) { + run_params runparams = run_param_list.get_params(r); + + mac_logger.info("\n### New run {} ###\n", r); + TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS); + } + + const std::array expected_dl_rate_Mbps{2.5, 8.8, 15, 31, 47, 64}; + const std::array expected_ul_rate_Mbps{0.1, 1.8, 4, 8.3, 13, 19}; + bool success = true; + for (auto& run : run_results) { + run_data expected = expected_run_result(run.params); + if (run.avg_dl_mcs < expected.avg_dl_mcs) { + fmt::print( + "Nprb={:>2d}: DL mcs below expected ({} < {})\n", run.params.nof_prbs, run.avg_dl_mcs, expected.avg_dl_mcs); + success = false; + } + if (run.avg_dl_throughput < expected.avg_dl_throughput) { + fmt::print("Nprb={:>2d}: DL rate below expected ({:.2} < {:.2}) Mbps\n", + run.params.nof_prbs, + run.avg_dl_throughput / 1e6, + expected.avg_dl_throughput / 1e6); + success = false; + } + if (run.avg_ul_mcs < expected.avg_ul_mcs) { + fmt::print( + "Nprb={:>2d}: UL mcs below expected ({} < {})\n", run.params.nof_prbs, run.avg_ul_mcs, expected.avg_ul_mcs); + success = false; + } + if (run.avg_ul_throughput < expected.avg_ul_throughput) { + fmt::print("Nprb={:>2d}: UL rate below expected ({:.2} < {:.2}) Mbps\n", + run.params.nof_prbs, + run.avg_ul_throughput / 1e6, + expected.avg_ul_throughput / 1e6); + success = false; + } + } + return success ? SRSLTE_SUCCESS : SRSLTE_ERROR; +} + +void print_benchmark_results(const std::vector& run_results) +{ + srslog::flush(); + fmt::print("run | Nprb | cqi | sched pol | Nue | DL [Mbps] | UL [Mbps] | DL mcs | UL mcs | latency [usec]\n"); + fmt::print("---------------------------------------------------------------------------------------------\n"); + for (uint32_t i = 0; i < run_results.size(); ++i) { + const run_data& r = run_results[i]; + fmt::print("{:>3d}{:>6d}{:>6d}{:>12}{:>6d}{:12.2}{:12.2}{:9.1f}{:9.1f}{:12d}\n", + i, + r.params.nof_prbs, + r.params.cqi, + r.params.sched_policy, + r.params.nof_ues, + r.avg_dl_throughput / 1e6, + r.avg_ul_throughput / 1e6, + r.avg_dl_mcs, + r.avg_ul_mcs, + r.avg_latency.count()); + } +} + +int run_benchmark() +{ + run_params_range run_param_list{}; + srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); + + std::vector run_results; + size_t nof_runs = run_param_list.nof_runs(); + for (size_t r = 0; r < nof_runs; ++r) { + run_params runparams = run_param_list.get_params(r); + + mac_logger.info("\n### New run {} ###\n", r); + TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS); + } + + print_benchmark_results(run_results); + + return SRSLTE_SUCCESS; +} + +} // namespace srsenb + +int main() +{ + // Setup seed + srsenb::set_randseed(seed); + printf("This is the chosen seed: %u\n", seed); + + // Setup the log spy to intercept error and warning log entries. + if (!srslog::install_custom_sink( + srslte::log_sink_spy::name(), + std::unique_ptr(new srslte::log_sink_spy(srslog::get_default_log_formatter())))) { + return SRSLTE_ERROR; + } + + auto* spy = static_cast(srslog::find_sink(srslte::log_sink_spy::name())); + if (!spy) { + return SRSLTE_ERROR; + } + + auto& mac_log = srslog::fetch_basic_logger("MAC"); + mac_log.set_level(srslog::basic_levels::warning); + auto& test_log = srslog::fetch_basic_logger("TEST", *spy, false); + test_log.set_level(srslog::basic_levels::warning); + + // Start the log backend. + srslog::init(); + + bool run_benchmark = false; + + TESTASSERT(srsenb::run_rate_test() == SRSLTE_SUCCESS); + if (run_benchmark) { + TESTASSERT(srsenb::run_benchmark() == SRSLTE_SUCCESS); + } + + return 0; +} \ No newline at end of file diff --git a/srsenb/test/mac/sched_sim_ue.cc b/srsenb/test/mac/sched_sim_ue.cc index e6bfc8d62..41f171e0c 100644 --- a/srsenb/test/mac/sched_sim_ue.cc +++ b/srsenb/test/mac/sched_sim_ue.cc @@ -211,10 +211,21 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out) } } +sched_sim_base::sched_sim_base(sched_interface* sched_ptr_, + const sched_interface::sched_args_t& sched_args, + const std::vector& cell_cfg_list) : + logger(srslog::fetch_basic_logger("TEST")), sched_ptr(sched_ptr_), cell_params(cell_cfg_list.size()) +{ + for (uint32_t cc = 0; cc < cell_params.size(); ++cc) { + cell_params[cc].set_cfg(cc, cell_cfg_list[cc], sched_args); + } + sched_ptr->cell_cfg(cell_cfg_list); // call parent cfg +} + int sched_sim_base::add_user(uint16_t rnti, const sched_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx) { CONDERROR(!srslte_prach_tti_opportunity_config_fdd( - (*cell_params)[ue_cfg_.supported_cc_list[0].enb_cc_idx].prach_config, current_tti_rx.to_uint(), -1), + cell_params[ue_cfg_.supported_cc_list[0].enb_cc_idx].cfg.prach_config, current_tti_rx.to_uint(), -1), "New user added in a non-PRACH TTI"); TESTASSERT(ue_db.count(rnti) == 0); @@ -278,7 +289,7 @@ sim_enb_ctxt_t sched_sim_base::get_enb_ctxt() const int sched_sim_base::set_default_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events) { pending_events.cc_list.clear(); - pending_events.cc_list.resize(cell_params->size()); + pending_events.cc_list.resize(cell_params.size()); pending_events.tti_rx = current_tti_rx; for (uint32_t enb_cc_idx = 0; enb_cc_idx < pending_events.cc_list.size(); ++enb_cc_idx) { @@ -371,8 +382,8 @@ int sched_sim_base::apply_tti_events(sim_ue_ctxt_t& ue_ctxt, const ue_tti_events sched_ptr->dl_cqi_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.dl_cqi); } - if (cc_feedback.ul_cqi >= 0) { - sched_ptr->ul_snr_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_cqi, 0); + if (cc_feedback.ul_snr >= 0) { + sched_ptr->ul_snr_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_snr, 0); } } diff --git a/srsenb/test/mac/sched_sim_ue.h b/srsenb/test/mac/sched_sim_ue.h index 009a1221a..c00bab9f4 100644 --- a/srsenb/test/mac/sched_sim_ue.h +++ b/srsenb/test/mac/sched_sim_ue.h @@ -52,8 +52,8 @@ struct sim_ue_ctxt_t { }; struct sim_enb_ctxt_t { - const std::vector* cell_params; - std::map ue_db; + srslte::span cell_params; + std::map ue_db; }; struct ue_tti_events { struct cc_data { @@ -65,7 +65,7 @@ struct ue_tti_events { int ul_pid = -1; bool ul_ack = false; int dl_cqi = -1; - int ul_cqi = -1; + int ul_snr = -1; }; srslte::tti_point tti_rx; std::vector cc_list; @@ -99,11 +99,9 @@ private: class sched_sim_base { public: - sched_sim_base(sched_interface* sched_ptr_, const std::vector& cell_params_) : - logger(srslog::fetch_basic_logger("MAC")), sched_ptr(sched_ptr_), cell_params(&cell_params_) - { - sched_ptr->cell_cfg(cell_params_); // call parent cfg - } + sched_sim_base(sched_interface* sched_ptr_, + const sched_interface::sched_args_t& sched_args, + const std::vector& cell_params_); virtual ~sched_sim_base() = default; int add_user(uint16_t rnti, const sched_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx); @@ -133,6 +131,9 @@ public: const ue_sim* ret = find_rnti(rnti); return ret == nullptr ? nullptr : &ret->get_ctxt().ue_cfg; } + sched_interface* get_sched() { return sched_ptr; } + srslte::const_span get_cell_params() { return cell_params; } + tti_point get_tti_rx() const { return current_tti_rx; } std::map::iterator begin() { return ue_db.begin(); } std::map::iterator end() { return ue_db.end(); } @@ -144,9 +145,9 @@ private: int set_default_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_tti_events& pending_events); int apply_tti_events(sim_ue_ctxt_t& ue_ctxt, const ue_tti_events& events); - srslog::basic_logger& logger; - sched_interface* sched_ptr; - const std::vector* cell_params; + srslog::basic_logger& logger; + sched_interface* sched_ptr; + std::vector cell_params; srslte::tti_point current_tti_rx; std::map ue_db; diff --git a/srsenb/test/mac/sched_test_common.cc b/srsenb/test/mac/sched_test_common.cc index 7f55dfca2..a05bedafe 100644 --- a/srsenb/test/mac/sched_test_common.cc +++ b/srsenb/test/mac/sched_test_common.cc @@ -74,8 +74,8 @@ void sched_sim_random::set_external_tti_events(const sim_ue_ctxt_t& ue_ctxt, ue_ } // UL CQI - if (cc_feedback.ul_cqi >= 0) { - cc_feedback.ul_cqi = std::uniform_int_distribution{5, 40}(get_rand_gen()); + if (cc_feedback.ul_snr >= 0) { + cc_feedback.ul_snr = std::uniform_int_distribution{5, 40}(get_rand_gen()); } } } @@ -131,7 +131,7 @@ int common_sched_tester::sim_cfg(sim_sched_args args) sched::init(&rrc_ptr, sim_args0.sched_args); - sched_sim.reset(new sched_sim_random{this, sim_args0.cell_cfg}); + sched_sim.reset(new sched_sim_random{this, sim_args0.sched_args, sim_args0.cell_cfg}); sched_stats.reset(new sched_result_stats{sim_args0.cell_cfg}); return SRSLTE_SUCCESS; diff --git a/srsenb/test/mac/sched_ue_ded_test_suite.cc b/srsenb/test/mac/sched_ue_ded_test_suite.cc index 10bd69a71..d0d726ce2 100644 --- a/srsenb/test/mac/sched_ue_ded_test_suite.cc +++ b/srsenb/test/mac/sched_ue_ded_test_suite.cc @@ -65,7 +65,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, tti_point tti_rx = sf_out.tti_rx; const sim_ue_ctxt_t& ue_ctxt = *enb_ctxt.ue_db.at(pdsch.dci.rnti); const sched_interface::ue_cfg_t::cc_cfg_t* cc_cfg = ue_ctxt.get_cc_cfg(enb_cc_idx); - const sched_interface::cell_cfg_t& cell_params = (*enb_ctxt.cell_params)[enb_cc_idx]; + const sched_cell_params_t& cell_params = enb_ctxt.cell_params[enb_cc_idx]; bool has_pusch_grant = find_pusch_grant(pdsch.dci.rnti, sf_out.ul_cc_result[enb_cc_idx]) != nullptr; // TEST: Check if CC is configured and active @@ -100,8 +100,8 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, srslte_dl_sf_cfg_t dl_sf = {}; dl_sf.cfi = sf_out.dl_cc_result[enb_cc_idx].cfi; dl_sf.tti = to_tx_dl(tti_rx).to_uint(); - srslte_ra_dl_grant_to_grant_prb_allocation(&pdsch.dci, &grant, cell_params.cell.nof_prb); - uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cell, &dl_sf, &grant); + srslte_ra_dl_grant_to_grant_prb_allocation(&pdsch.dci, &grant, cell_params.nof_prb()); + uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell_params.cfg.cell, &dl_sf, &grant); float coderate = srslte_coderate(pdsch.tbs[0] * 8, nof_re); srslte_mod_t mod = srslte_ra_dl_mod_from_mcs(pdsch.dci.tb[0].mcs_idx, ue_ctxt.ue_cfg.use_tbs_index_alt); uint32_t max_Qm = ue_ctxt.ue_cfg.use_tbs_index_alt ? 8 : 6; @@ -112,7 +112,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, // TEST: PUCCH-ACK will not collide with SR CONDERROR(not has_pusch_grant and is_pucch_sr_collision(ue_ctxt.ue_cfg.pucch_cfg, to_tx_dl_ack(sf_out.tti_rx), - pdsch.dci.location.ncce + cell_params.n1pucch_an), + pdsch.dci.location.ncce + cell_params.cfg.n1pucch_an), "Collision detected between UE PUCCH-ACK and SR"); return SRSLTE_SUCCESS; @@ -120,7 +120,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, int test_dl_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) { - for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) { + for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) { for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) { const sched_interface::dl_sched_data_t& data = sf_out.dl_cc_result[cc].data[i]; CONDERROR( @@ -135,7 +135,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& { uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS); - for (size_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) { + for (size_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) { const auto* phich_begin = &sf_out.ul_cc_result[cc].phich[0]; const auto* phich_end = &sf_out.ul_cc_result[cc].phich[sf_out.ul_cc_result[cc].nof_phich_elems]; const auto* pusch_begin = &sf_out.ul_cc_result[cc].pusch[0]; @@ -228,7 +228,7 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) { - for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) { + for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) { const auto& dl_cc_res = sf_out.dl_cc_result[cc]; const auto& ul_cc_res = sf_out.ul_cc_result[cc]; for (const auto& ue_pair : enb_ctxt.ue_db) { @@ -242,7 +242,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) } // TEST: RAR allocation - uint32_t rar_win_size = (*enb_ctxt.cell_params)[cc].prach_rar_window; + uint32_t rar_win_size = enb_ctxt.cell_params[cc].cfg.prach_rar_window; srslte::tti_interval rar_window{ue.prach_tti_rx + 3, ue.prach_tti_rx + 3 + rar_win_size}; srslte::tti_point tti_tx_dl = to_tx_dl(sf_out.tti_rx); @@ -365,7 +365,7 @@ bool is_in_measgap(srslte::tti_point tti, uint32_t period, uint32_t offset) int test_meas_gaps(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) { - for (uint32_t cc = 0; cc < enb_ctxt.cell_params->size(); ++cc) { + for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) { const auto& dl_cc_res = sf_out.dl_cc_result[cc]; const auto& ul_cc_res = sf_out.ul_cc_result[cc]; for (const auto& ue_pair : enb_ctxt.ue_db) { From ac4d058fbf20d2f1b2ea8305959d29c9d6adeee6 Mon Sep 17 00:00:00 2001 From: Francisco Date: Tue, 16 Mar 2021 22:12:39 +0000 Subject: [PATCH 34/64] correct verification of sched rates in sched benchmark test --- srsenb/src/stack/mac/sched_grid.cc | 2 +- .../src/stack/mac/sched_phy_ch/sched_dci.cc | 6 +- .../stack/mac/sched_ue_ctrl/sched_ue_cell.cc | 4 +- srsenb/test/mac/sched_benchmark.cc | 107 ++++++++++-------- srsenb/test/mac/sched_common_test_suite.cc | 2 +- srsenb/test/mac/sched_dci_test.cc | 2 +- srsenb/test/mac/sched_test_utils.h | 4 +- srsenb/test/mac/sched_ue_ded_test_suite.cc | 2 +- 8 files changed, 72 insertions(+), 57 deletions(-) diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 925578861..5a3c1aa06 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -895,7 +895,7 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r fmt::format_to(str_buffer, "SCHED: {} {} rnti=0x{:x}, cc={}, pid={}, dci=({},{}), prb={}, n_rtx={}, tbs={}, bsr={} ({}-{})", ul_alloc.is_msg3 ? "Msg3" : "UL", - ul_alloc.is_retx() ? "retx" : "newtx", + ul_alloc.is_retx() ? "retx" : "tx", user->get_rnti(), cc_cfg->enb_cc_idx, h->get_id(), diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index a24c27fb3..ed902a31d 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -83,7 +83,7 @@ tbs_info compute_mcs_and_tbs(uint32_t nof_prb, float max_coderate = srslte_cqi_to_coderate(std::min(cqi + 1U, 15U), use_tbs_index_alt); uint32_t max_Qm = (is_ul) ? (ulqam64_enabled ? 6 : 4) : (use_tbs_index_alt ? 8 : 6); - max_coderate = std::min(max_coderate, 0.93F * max_Qm); + max_coderate = std::min(max_coderate, 0.932F * max_Qm); int mcs = 0; float prev_max_coderate = 0; @@ -113,7 +113,7 @@ tbs_info compute_mcs_and_tbs(uint32_t nof_prb, // update max coderate based on mcs srslte_mod_t mod = (is_ul) ? srslte_ra_ul_mod_from_mcs(mcs) : srslte_ra_dl_mod_from_mcs(mcs, use_tbs_index_alt); uint32_t Qm = srslte_mod_bits_x_symbol(mod); - max_coderate = std::min(0.93F * Qm, max_coderate); + max_coderate = std::min(0.932F * Qm, max_coderate); if (coderate <= max_coderate) { // solution was found @@ -217,7 +217,7 @@ int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci, // Compute effective code rate and verify it doesn't exceed max code rate uint32_t nof_re = cell_params.get_dl_nof_res(tti_tx_dl, dci, current_cfi); - if (srslte_coderate(tbs, nof_re) >= 0.93F * Qm) { + if (srslte_coderate(tbs, nof_re) >= 0.932F * Qm) { return -1; } diff --git a/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc b/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc index 17cfaad96..874bb7cdd 100644 --- a/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc +++ b/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc @@ -234,7 +234,7 @@ tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell, ret = compute_min_mcs_and_tbs_from_required_bytes( nof_prb, nof_re, cell.dl_cqi, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt); - // If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly + // If coderate > SRSLTE_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly // handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR if (ret.tbs_bytes < 0) { ret.mcs = 0; @@ -260,7 +260,7 @@ tbs_info cqi_to_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof ret = compute_min_mcs_and_tbs_from_required_bytes( nof_prb, nof_re, cell.ul_cqi, cell.max_mcs_ul, req_bytes, true, ulqam64_enabled, false); - // If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly + // If coderate > SRSLTE_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly // handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR if (ret.tbs_bytes < 0) { ret.mcs = 0; diff --git a/srsenb/test/mac/sched_benchmark.cc b/srsenb/test/mac/sched_benchmark.cc index aca7c641b..66cf6fbf4 100644 --- a/srsenb/test/mac/sched_benchmark.cc +++ b/srsenb/test/mac/sched_benchmark.cc @@ -15,8 +15,6 @@ #include "srslte/adt/accumulators.h" #include -const uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count(); - namespace srsenb { struct run_params { @@ -197,8 +195,9 @@ int run_benchmark_scenario(run_params params, std::vector& run_results sched_interface::sched_args_t sched_args = {}; sched_args.sched_policy = params.sched_policy; - sched sched_obj; - sched_obj.init(nullptr, sched_args); + sched sched_obj; + rrc_dummy rrc{}; + sched_obj.init(&rrc, sched_args); sched_tester tester(&sched_obj, sched_args, cell_list); tester.total_stats = {}; @@ -248,31 +247,72 @@ run_data expected_run_result(run_params params) { assert(params.cqi == 15 && "only cqi=15 supported for now"); run_data ret{}; - float dl_overhead = 0.9, ul_overhead = 0.75; + int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false); + int tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs); + ret.avg_dl_throughput = tbs * 1e3; // bps + + tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, true); + uint32_t nof_pusch_prbs = params.nof_prbs - (params.nof_prbs == 6 ? 2 : 4); + tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs); + ret.avg_ul_throughput = tbs * 1e3; // bps + + ret.avg_dl_mcs = 27; + ret.avg_ul_mcs = 22; switch (params.nof_prbs) { case 6: ret.avg_dl_mcs = 25; - ret.avg_ul_mcs = 22; - dl_overhead = 0.8; - ul_overhead = 0.4; + ret.avg_dl_throughput *= 0.7; + ret.avg_ul_throughput *= 0.25; + break; + case 15: + ret.avg_dl_throughput *= 0.95; + ret.avg_ul_throughput *= 0.5; break; default: - ret.avg_dl_mcs = 26; - ret.avg_ul_mcs = 22; + ret.avg_dl_throughput *= 0.97; + ret.avg_ul_throughput *= 0.5; + break; } + return ret; +} + +void print_benchmark_results(const std::vector& run_results) +{ + srslog::flush(); + fmt::print("run | Nprb | cqi | sched pol | Nue | DL/UL [Mbps] | DL/UL mcs | DL/UL OH [%] | latency " + "[usec]\n"); + fmt::print("---------------------------------------------------------------------------------------" + "------\n"); + for (uint32_t i = 0; i < run_results.size(); ++i) { + const run_data& r = run_results[i]; - int tbs_idx = srslte_ra_tbs_idx_from_mcs(ret.avg_dl_mcs, false, false); - int tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs); - ret.avg_dl_throughput = tbs * 1e3 * dl_overhead; // bps + int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false); + int tbs = srslte_ra_tbs_from_idx(tbs_idx, r.params.nof_prbs); + float dl_rate_overhead = 1.0F - r.avg_dl_throughput / (tbs * 1e3); + tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, true); + uint32_t nof_pusch_prbs = r.params.nof_prbs - (r.params.nof_prbs == 6 ? 2 : 4); + tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs); + float ul_rate_overhead = 1.0F - r.avg_ul_throughput / (tbs * 1e3); - tbs_idx = srslte_ra_tbs_idx_from_mcs(ret.avg_ul_mcs, false, true); - tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs - 2); - ret.avg_ul_throughput = tbs * 1e3 * ul_overhead; // bps - return ret; + fmt::print("{:>3d}{:>6d}{:>6d}{:>12}{:>6d}{:>9.2}/{:>4.2}{:>9.1f}/{:>4.1f}{:9.1f}/{:>4.1f}{:12d}\n", + i, + r.params.nof_prbs, + r.params.cqi, + r.params.sched_policy, + r.params.nof_ues, + r.avg_dl_throughput / 1e6, + r.avg_ul_throughput / 1e6, + r.avg_dl_mcs, + r.avg_ul_mcs, + dl_rate_overhead * 100, + ul_rate_overhead * 100, + r.avg_latency.count()); + } } int run_rate_test() { + fmt::print("\n====== Scheduler Rate Test ======\n\n"); run_params_range run_param_list{}; srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); @@ -285,13 +325,13 @@ int run_rate_test() for (size_t r = 0; r < nof_runs; ++r) { run_params runparams = run_param_list.get_params(r); - mac_logger.info("\n### New run {} ###\n", r); + mac_logger.info("\n=== New run {} ===\n", r); TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS); } - const std::array expected_dl_rate_Mbps{2.5, 8.8, 15, 31, 47, 64}; - const std::array expected_ul_rate_Mbps{0.1, 1.8, 4, 8.3, 13, 19}; - bool success = true; + print_benchmark_results(run_results); + + bool success = true; for (auto& run : run_results) { run_data expected = expected_run_result(run.params); if (run.avg_dl_mcs < expected.avg_dl_mcs) { @@ -322,27 +362,6 @@ int run_rate_test() return success ? SRSLTE_SUCCESS : SRSLTE_ERROR; } -void print_benchmark_results(const std::vector& run_results) -{ - srslog::flush(); - fmt::print("run | Nprb | cqi | sched pol | Nue | DL [Mbps] | UL [Mbps] | DL mcs | UL mcs | latency [usec]\n"); - fmt::print("---------------------------------------------------------------------------------------------\n"); - for (uint32_t i = 0; i < run_results.size(); ++i) { - const run_data& r = run_results[i]; - fmt::print("{:>3d}{:>6d}{:>6d}{:>12}{:>6d}{:12.2}{:12.2}{:9.1f}{:9.1f}{:12d}\n", - i, - r.params.nof_prbs, - r.params.cqi, - r.params.sched_policy, - r.params.nof_ues, - r.avg_dl_throughput / 1e6, - r.avg_ul_throughput / 1e6, - r.avg_dl_mcs, - r.avg_ul_mcs, - r.avg_latency.count()); - } -} - int run_benchmark() { run_params_range run_param_list{}; @@ -366,10 +385,6 @@ int run_benchmark() int main() { - // Setup seed - srsenb::set_randseed(seed); - printf("This is the chosen seed: %u\n", seed); - // Setup the log spy to intercept error and warning log entries. if (!srslog::install_custom_sink( srslte::log_sink_spy::name(), diff --git a/srsenb/test/mac/sched_common_test_suite.cc b/srsenb/test/mac/sched_common_test_suite.cc index 3e636af68..98bff563b 100644 --- a/srsenb/test/mac/sched_common_test_suite.cc +++ b/srsenb/test/mac/sched_common_test_suite.cc @@ -317,7 +317,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) float coderate = srslte_coderate(tbs * 8, nof_re); const uint32_t Qm = 2; CONDERROR( - coderate > 0.930f * Qm, "Max coderate was exceeded from %s DCI", dci.rnti == SRSLTE_SIRNTI ? "SIB" : "RAR"); + coderate > 0.932f * Qm, "Max coderate was exceeded from %s DCI", dci.rnti == SRSLTE_SIRNTI ? "SIB" : "RAR"); return SRSLTE_SUCCESS; }; diff --git a/srsenb/test/mac/sched_dci_test.cc b/srsenb/test/mac/sched_dci_test.cc index 00b5ed3c0..5d95a5b83 100644 --- a/srsenb/test/mac/sched_dci_test.cc +++ b/srsenb/test/mac/sched_dci_test.cc @@ -54,7 +54,7 @@ bool lower_coderate(tbs_info tb, uint32_t nof_re, const tbs_test_args& args) srslte_mod_t mod = (args.is_ul) ? srslte_ra_ul_mod_from_mcs(tb.mcs) : srslte_ra_dl_mod_from_mcs(tb.mcs, args.use_tbs_index_alt); float Qm = std::min(args.get_max_Qm(), srslte_mod_bits_x_symbol(mod)); - return coderate <= 0.930f * Qm; + return coderate <= 0.932f * Qm; } int test_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_test_args& args, tbs_info* result) diff --git a/srsenb/test/mac/sched_test_utils.h b/srsenb/test/mac/sched_test_utils.h index 4adc9e091..e9162a87c 100644 --- a/srsenb/test/mac/sched_test_utils.h +++ b/srsenb/test/mac/sched_test_utils.h @@ -49,9 +49,9 @@ inline srsenb::sched_interface::cell_cfg_t generate_default_cell_cfg(uint32_t no cell_cfg.maxharq_msg3tx = 3; cell_cfg.initial_dl_cqi = 6; cell_cfg.target_ul_sinr = -1; - cell_cfg.nrb_cqi = 2; + cell_cfg.nrb_cqi = 1; cell_cfg.n1pucch_an = 12; - cell_cfg.delta_pucch_shift = 2; + cell_cfg.delta_pucch_shift = 1; cell_cfg.ncs_an = 0; return cell_cfg; diff --git a/srsenb/test/mac/sched_ue_ded_test_suite.cc b/srsenb/test/mac/sched_ue_ded_test_suite.cc index d0d726ce2..80ee48abd 100644 --- a/srsenb/test/mac/sched_ue_ded_test_suite.cc +++ b/srsenb/test/mac/sched_ue_ded_test_suite.cc @@ -106,7 +106,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, srslte_mod_t mod = srslte_ra_dl_mod_from_mcs(pdsch.dci.tb[0].mcs_idx, ue_ctxt.ue_cfg.use_tbs_index_alt); uint32_t max_Qm = ue_ctxt.ue_cfg.use_tbs_index_alt ? 8 : 6; uint32_t Qm = std::min(max_Qm, srslte_mod_bits_x_symbol(mod)); - CONDERROR(coderate > 0.930f * Qm, "Max coderate was exceeded"); + CONDERROR(coderate > 0.932f * Qm, "Max coderate was exceeded"); } // TEST: PUCCH-ACK will not collide with SR From c3a4cb37f47e7e32f19fa826cec4374c0b4cbac4 Mon Sep 17 00:00:00 2001 From: Francisco Date: Wed, 17 Mar 2021 10:58:00 +0000 Subject: [PATCH 35/64] extension of sched benchmark test to allow different testing modes - test rates, benchmark, test different combinations of sched parameters --- srsenb/test/mac/sched_benchmark.cc | 36 +++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/srsenb/test/mac/sched_benchmark.cc b/srsenb/test/mac/sched_benchmark.cc index 66cf6fbf4..c307ee975 100644 --- a/srsenb/test/mac/sched_benchmark.cc +++ b/srsenb/test/mac/sched_benchmark.cc @@ -362,13 +362,40 @@ int run_rate_test() return success ? SRSLTE_SUCCESS : SRSLTE_ERROR; } +int run_all() +{ + run_params_range run_param_list{}; + srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); + + fmt::print("Running all param combinations\n"); + std::vector run_results; + size_t nof_runs = run_param_list.nof_runs(); + for (size_t r = 0; r < nof_runs; ++r) { + run_params runparams = run_param_list.get_params(r); + + mac_logger.info("\n### New run {} ###\n", r); + TESTASSERT(run_benchmark_scenario(runparams, run_results) == SRSLTE_SUCCESS); + } + + print_benchmark_results(run_results); + + return SRSLTE_SUCCESS; +} + int run_benchmark() { run_params_range run_param_list{}; srslog::basic_logger& mac_logger = srslog::fetch_basic_logger("MAC"); + run_param_list.nof_ttis = 1000000; + run_param_list.nof_prbs = {100}; + run_param_list.cqi = {15}; + run_param_list.nof_ues = {5}; + run_param_list.sched_policy = {"time_pf"}; + std::vector run_results; size_t nof_runs = run_param_list.nof_runs(); + fmt::print("Running Benchmark\n"); for (size_t r = 0; r < nof_runs; ++r) { run_params runparams = run_param_list.get_params(r); @@ -383,7 +410,7 @@ int run_benchmark() } // namespace srsenb -int main() +int main(int argc, char* argv[]) { // Setup the log spy to intercept error and warning log entries. if (!srslog::install_custom_sink( @@ -407,9 +434,12 @@ int main() bool run_benchmark = false; - TESTASSERT(srsenb::run_rate_test() == SRSLTE_SUCCESS); - if (run_benchmark) { + if (argc == 1 or strcmp(argv[1], "test") == 0) { + TESTASSERT(srsenb::run_rate_test() == SRSLTE_SUCCESS); + } else if (strcmp(argv[1], "benchmark") == 0) { TESTASSERT(srsenb::run_benchmark() == SRSLTE_SUCCESS); + } else { + TESTASSERT(srsenb::run_all() == SRSLTE_SUCCESS); } return 0; From 1d4e83473e04598f9cb0e5c0ed1b8f278b342105 Mon Sep 17 00:00:00 2001 From: Francisco Date: Tue, 16 Mar 2021 18:17:09 +0000 Subject: [PATCH 36/64] remove check that verifies that SRB0 is always activated in the scheduler. --- srsenb/src/stack/mac/sched_ue.cc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/srsenb/src/stack/mac/sched_ue.cc b/srsenb/src/stack/mac/sched_ue.cc index e6a68575c..8b8bc765c 100644 --- a/srsenb/src/stack/mac/sched_ue.cc +++ b/srsenb/src/stack/mac/sched_ue.cc @@ -785,12 +785,6 @@ srslte::interval sched_ue::get_requested_dl_bytes(uint32_t enb_cc_idx) assert(cells.at(enb_cc_idx).configured()); /* Set Maximum boundary */ - // Ensure there is space for ConRes and RRC Setup - // SRB0 is a special case due to being RLC TM (no segmentation possible) - if (not lch_handler.is_bearer_dl(0)) { - logger.error("SRB0 must always be activated for DL"); - return {}; - } if (cells[enb_cc_idx].cc_state() != cc_st::active) { return {}; } From 875773556ef293b522bf71734d10fc94de2c6468 Mon Sep 17 00:00:00 2001 From: faluco Date: Wed, 17 Mar 2021 15:17:31 +0100 Subject: [PATCH 37/64] Fixed a formatting error in a sched entry. --- srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index ed902a31d..adcd4034f 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -336,8 +336,8 @@ void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc, bc.dci.tb[0].mcs_idx); } else { get_mac_logger().info("SCHED: PCH, cc=%d, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d", - srslte::to_c_str(str_buffer), cell_params.enb_cc_idx, + srslte::to_c_str(str_buffer), bc.dci.location.L, bc.dci.location.ncce, bc.tbs, From c0a90c5aa876c4b6221a5af2b9724a4e24a80fed Mon Sep 17 00:00:00 2001 From: Francisco Date: Wed, 17 Mar 2021 13:09:39 +0000 Subject: [PATCH 38/64] refactored sf result resetting to avoid realloc of memory --- srsenb/hdr/stack/mac/sched.h | 4 +- srsenb/hdr/stack/mac/sched_carrier.h | 6 +-- srsenb/hdr/stack/mac/sched_grid.h | 53 +++++++++++++++-------- srsenb/src/stack/mac/sched.cc | 18 ++++---- srsenb/src/stack/mac/sched_carrier.cc | 20 ++++----- srsenb/src/stack/mac/sched_grid.cc | 61 ++++++++++----------------- srsenb/test/mac/sched_test_rand.cc | 1 - 7 files changed, 82 insertions(+), 81 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched.h b/srsenb/hdr/stack/mac/sched.h index ff5fbad48..3a8e68f33 100644 --- a/srsenb/hdr/stack/mac/sched.h +++ b/srsenb/hdr/stack/mac/sched.h @@ -97,11 +97,11 @@ protected: std::vector > carrier_schedulers; // Storage of past scheduling results - sched_result_list sched_results; + sched_result_ringbuffer sched_results; srslte::tti_point last_tti; std::mutex sched_mutex; - std::atomic configured; + bool configured; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/sched_carrier.h b/srsenb/hdr/stack/mac/sched_carrier.h index 01711ec98..03cd5f498 100644 --- a/srsenb/hdr/stack/mac/sched_carrier.h +++ b/srsenb/hdr/stack/mac/sched_carrier.h @@ -28,7 +28,7 @@ public: explicit carrier_sched(rrc_interface_mac* rrc_, std::map >* ue_db_, uint32_t enb_cc_idx_, - sched_result_list* sched_results_); + sched_result_ringbuffer* sched_results_); ~carrier_sched(); void reset(); void carrier_cfg(const sched_cell_params_t& sched_params_); @@ -57,10 +57,10 @@ private: const uint32_t enb_cc_idx; // Subframe scheduling logic - std::array sf_scheds; + srslte::circular_array sf_scheds; // scheduling results - sched_result_list* prev_sched_results; + sched_result_ringbuffer* prev_sched_results; std::vector sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index d77e2e1d4..f19544398 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -17,6 +17,7 @@ #include "sched_phy_ch/sf_cch_allocator.h" #include "sched_ue.h" #include "srslte/adt/bounded_bitset.h" +#include "srslte/adt/circular_array.h" #include "srslte/srslog/srslog.h" #include #include @@ -50,43 +51,61 @@ struct alloc_outcome_t { //! Result of a Subframe sched computation struct cc_sched_result { - tti_point tti_rx; - sched_interface::dl_sched_res_t dl_sched_result = {}; - sched_interface::ul_sched_res_t ul_sched_result = {}; + bool generated = false; rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations - - bool is_generated(tti_point tti_rx_) const { return tti_rx == tti_rx_; } + sched_interface::dl_sched_res_t dl_sched_result = {}; + sched_interface::ul_sched_res_t ul_sched_result = {}; }; struct sf_sched_result { - srslte::tti_point tti_rx; + tti_point tti_rx; std::vector enb_cc_list; - cc_sched_result* new_cc(uint32_t enb_cc_idx); + void new_tti(tti_point tti_rx); + bool is_generated(uint32_t enb_cc_idx) const + { + return enb_cc_list.size() > enb_cc_idx and enb_cc_list[enb_cc_idx].generated; + } const cc_sched_result* get_cc(uint32_t enb_cc_idx) const { - return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr; + assert(enb_cc_idx < enb_cc_list.size()); + return &enb_cc_list[enb_cc_idx]; } cc_sched_result* get_cc(uint32_t enb_cc_idx) { - return enb_cc_idx < enb_cc_list.size() ? &enb_cc_list[enb_cc_idx] : nullptr; + assert(enb_cc_idx < enb_cc_list.size()); + return &enb_cc_list[enb_cc_idx]; } bool is_ul_alloc(uint16_t rnti) const; bool is_dl_alloc(uint16_t rnti) const; }; -struct sched_result_list { +struct sched_result_ringbuffer { public: - sf_sched_result* new_tti(srslte::tti_point tti_rx); - sf_sched_result* get_sf(srslte::tti_point tti_rx); - const sf_sched_result* get_sf(srslte::tti_point tti_rx) const; - const cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const; - cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx); + void set_nof_carriers(uint32_t nof_carriers); + void new_tti(srslte::tti_point tti_rx); + bool has_sf(srslte::tti_point tti_rx) const { return results[tti_rx.to_uint()].tti_rx == tti_rx; } + sf_sched_result* get_sf(srslte::tti_point tti_rx) + { + assert(has_sf(tti_rx)); + return &results[tti_rx.to_uint()]; + } + const sf_sched_result* get_sf(srslte::tti_point tti_rx) const + { + assert(has_sf(tti_rx)); + return &results[tti_rx.to_uint()]; + } + const cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const + { + return get_sf(tti_rx)->get_cc(enb_cc_idx); + } + cc_sched_result* get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) { return get_sf(tti_rx)->get_cc(enb_cc_idx); } private: - std::array results; + uint32_t nof_carriers = 1; + srslte::circular_array results; }; /// manages a subframe grid resources, namely CCE and DL/UL RB allocations @@ -205,7 +224,7 @@ public: alloc_outcome_t alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1); bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); } - bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result); + bool alloc_phich(sched_ue* user); // compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI void generate_sched_results(sched_ue_list& ue_db); diff --git a/srsenb/src/stack/mac/sched.cc b/srsenb/src/stack/mac/sched.cc index 336320c6e..75e1e54dc 100644 --- a/srsenb/src/stack/mac/sched.cc +++ b/srsenb/src/stack/mac/sched.cc @@ -68,6 +68,8 @@ int sched::cell_cfg(const std::vector& cell_cfg) } } + sched_results.set_nof_carriers(cell_cfg.size()); + // Create remaining cells, if not created yet uint32_t prev_size = carrier_schedulers.size(); carrier_schedulers.resize(sched_cell_params.size()); @@ -80,7 +82,7 @@ int sched::cell_cfg(const std::vector& cell_cfg) carrier_schedulers[i]->carrier_cfg(sched_cell_params[i]); } - configured.store(true, std::memory_order_release); + configured = true; return 0; } @@ -287,11 +289,10 @@ std::array sched::get_scell_activation_mask(uint16_t // Downlink Scheduler API int sched::dl_sched(uint32_t tti_tx_dl, uint32_t enb_cc_idx, sched_interface::dl_sched_res_t& sched_result) { - if (not configured.load(std::memory_order_acquire)) { + std::lock_guard lock(sched_mutex); + if (not configured) { return 0; } - - std::lock_guard lock(sched_mutex); if (enb_cc_idx >= carrier_schedulers.size()) { return 0; } @@ -308,11 +309,10 @@ int sched::dl_sched(uint32_t tti_tx_dl, uint32_t enb_cc_idx, sched_interface::dl // Uplink Scheduler API int sched::ul_sched(uint32_t tti, uint32_t enb_cc_idx, srsenb::sched_interface::ul_sched_res_t& sched_result) { - if (not configured.load(std::memory_order_acquire)) { + std::lock_guard lock(sched_mutex); + if (not configured) { return 0; } - - std::lock_guard lock(sched_mutex); if (enb_cc_idx >= carrier_schedulers.size()) { return 0; } @@ -346,9 +346,7 @@ void sched::new_tti(tti_point tti_rx) /// Check if TTI result is generated bool sched::is_generated(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const { - const sf_sched_result* sf_result = sched_results.get_sf(tti_rx); - return sf_result != nullptr and sf_result->get_cc(enb_cc_idx) != nullptr and - sf_result->get_cc(enb_cc_idx)->is_generated(tti_rx); + return sched_results.has_sf(tti_rx) and sched_results.get_sf(tti_rx)->is_generated(enb_cc_idx); } // Common way to access ue_db elements in a read locking way diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index c5930ab4e..0813f1b30 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -322,10 +322,10 @@ void ra_sched::reset() * Carrier scheduling *******************************************************/ -sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, - sched_ue_list* ue_db_, - uint32_t enb_cc_idx_, - sched_result_list* sched_results_) : +sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, + sched_ue_list* ue_db_, + uint32_t enb_cc_idx_, + sched_result_ringbuffer* sched_results_) : rrc(rrc_), ue_db(ue_db_), logger(srslog::fetch_basic_logger("MAC")), @@ -376,7 +376,7 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r { sf_sched* tti_sched = get_sf_sched(tti_rx); sf_sched_result* sf_result = prev_sched_results->get_sf(tti_rx); - cc_sched_result* cc_result = sf_result->new_cc(enb_cc_idx); + cc_sched_result* cc_result = sf_result->get_cc(enb_cc_idx); bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl().to_uint() % sf_dl_mask.size()] == 0; @@ -390,7 +390,7 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r if (cc_result->ul_sched_result.nof_phich_elems >= MAX_PHICH_LIST) { break; } - tti_sched->alloc_phich(ue_pair.second.get(), &cc_result->ul_sched_result); + tti_sched->alloc_phich(ue_pair.second.get()); } /* Schedule DL control data */ @@ -460,13 +460,13 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) sf_sched* sched::carrier_sched::get_sf_sched(tti_point tti_rx) { - sf_sched* ret = &sf_scheds[tti_rx.to_uint() % sf_scheds.size()]; + sf_sched* ret = &sf_scheds[tti_rx.to_uint()]; if (ret->get_tti_rx() != tti_rx) { - sf_sched_result* sf_res = prev_sched_results->get_sf(tti_rx); - if (sf_res == nullptr) { + if (not prev_sched_results->has_sf(tti_rx)) { // Reset if tti_rx has not been yet set in the sched results - sf_res = prev_sched_results->new_tti(tti_rx); + prev_sched_results->new_tti(tti_rx); } + sf_sched_result* sf_res = prev_sched_results->get_sf(tti_rx); // start new TTI for the given CC. ret->new_tti(tti_rx, sf_res); } diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 5a3c1aa06..3c9e915ef 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -51,12 +51,13 @@ const char* alloc_outcome_t::to_string() const return "unknown error"; } -cc_sched_result* sf_sched_result::new_cc(uint32_t enb_cc_idx) +void sf_sched_result::new_tti(tti_point tti_rx_) { - if (enb_cc_idx >= enb_cc_list.size()) { - enb_cc_list.resize(enb_cc_idx + 1); + assert(tti_rx != tti_rx_); + tti_rx = tti_rx_; + for (auto& cc : enb_cc_list) { + cc = {}; } - return &enb_cc_list[enb_cc_idx]; } bool sf_sched_result::is_ul_alloc(uint16_t rnti) const @@ -82,36 +83,18 @@ bool sf_sched_result::is_dl_alloc(uint16_t rnti) const return false; } -sf_sched_result* sched_result_list::new_tti(srslte::tti_point tti_rx) -{ - sf_sched_result* res = &results[tti_rx.to_uint() % results.size()]; - res->tti_rx = tti_rx; - res->enb_cc_list.clear(); - return res; -} - -sf_sched_result* sched_result_list::get_sf(srslte::tti_point tti_rx) -{ - sf_sched_result* res = &results[tti_rx.to_uint() % results.size()]; - return (res->tti_rx != tti_rx) ? nullptr : res; -} - -const sf_sched_result* sched_result_list::get_sf(srslte::tti_point tti_rx) const +void sched_result_ringbuffer::set_nof_carriers(uint32_t nof_carriers_) { - const sf_sched_result* res = &results[tti_rx.to_uint() % results.size()]; - return (res->tti_rx != tti_rx) ? nullptr : res; -} - -const cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const -{ - const sf_sched_result* res = get_sf(tti_rx); - return res != nullptr ? res->get_cc(enb_cc_idx) : nullptr; + nof_carriers = nof_carriers_; + for (auto& sf_res : results) { + sf_res.enb_cc_list.resize(nof_carriers_); + } } -cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t enb_cc_idx) +void sched_result_ringbuffer::new_tti(srslte::tti_point tti_rx) { - sf_sched_result* res = get_sf(tti_rx); - return res != nullptr ? res->get_cc(enb_cc_idx) : nullptr; + sf_sched_result* res = &results[tti_rx.to_uint()]; + res->new_tti(tti_rx); } /******************************************************* @@ -634,14 +617,16 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc) return alloc_ul(user, alloc, alloc_type, h->is_msg3()); } -bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result) +bool sf_sched::alloc_phich(sched_ue* user) { + using phich_t = sched_interface::ul_sched_phich_t; + + auto* ul_sf_result = &cc_results->get_cc(cc_cfg->enb_cc_idx)->ul_sched_result; if (ul_sf_result->nof_phich_elems >= sched_interface::MAX_PHICH_LIST) { logger.warning("SCHED: Maximum number of PHICH allocations has been reached"); return false; } - using phich_t = sched_interface::ul_sched_phich_t; - auto& phich_list = ul_sf_result->phich[ul_sf_result->nof_phich_elems]; + phich_t& phich_item = ul_sf_result->phich[ul_sf_result->nof_phich_elems]; auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { @@ -653,8 +638,8 @@ bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_s /* Indicate PHICH acknowledgment if needed */ if (h->has_pending_phich()) { - phich_list.phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK; - phich_list.rnti = user->get_rnti(); + phich_item.phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK; + phich_item.rnti = user->get_rnti(); ul_sf_result->nof_phich_elems++; return true; } @@ -968,9 +953,9 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) set_ul_sched_result(dci_result, &cc_result->ul_sched_result, ue_db); /* Store remaining sf_sched results for this TTI */ - cc_result->dl_mask = tti_alloc.get_dl_mask(); - cc_result->ul_mask = tti_alloc.get_ul_mask(); - cc_result->tti_rx = get_tti_rx(); + cc_result->dl_mask = tti_alloc.get_dl_mask(); + cc_result->ul_mask = tti_alloc.get_ul_mask(); + cc_result->generated = true; } uint32_t sf_sched::get_nof_ctrl_symbols() const diff --git a/srsenb/test/mac/sched_test_rand.cc b/srsenb/test/mac/sched_test_rand.cc index 20ee20174..114d19adb 100644 --- a/srsenb/test/mac/sched_test_rand.cc +++ b/srsenb/test/mac/sched_test_rand.cc @@ -139,7 +139,6 @@ int sched_tester::process_results() { const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_rx, CARRIER_IDX); srsenb::sf_output_res_t sf_out{sched_cell_params, tti_rx, tti_info.ul_sched_result, tti_info.dl_sched_result}; - TESTASSERT(tti_rx == cc_result->tti_rx); // Common tests TESTASSERT(test_pdcch_collisions(sf_out, CARRIER_IDX, &cc_result->pdcch_mask) == SRSLTE_SUCCESS); From 0d918024959132e24413b3e94bb5acdc64a049d1 Mon Sep 17 00:00:00 2001 From: Francisco Date: Wed, 17 Mar 2021 15:39:03 +0000 Subject: [PATCH 39/64] sched optimization - swap c-arrays for bounded_vector in sched_interface to reduce time performing bzero/memcpy in the scheduler --- .../srslte/interfaces/sched_interface.h | 25 +++---- srsenb/src/stack/mac/mac.cc | 12 ++-- srsenb/src/stack/mac/sched_carrier.cc | 2 +- srsenb/src/stack/mac/sched_grid.cc | 70 ++++++++----------- srsenb/src/stack/mac/sched_helpers.cc | 5 +- srsenb/test/mac/sched_benchmark.cc | 8 +-- srsenb/test/mac/sched_ca_test.cc | 2 +- srsenb/test/mac/sched_common_test_suite.cc | 28 ++++---- srsenb/test/mac/sched_sim_ue.cc | 12 ++-- srsenb/test/mac/sched_test_common.cc | 4 +- srsenb/test/mac/sched_test_rand.cc | 10 +-- srsenb/test/mac/sched_ue_ded_test_suite.cc | 38 +++++----- 12 files changed, 97 insertions(+), 119 deletions(-) diff --git a/lib/include/srslte/interfaces/sched_interface.h b/lib/include/srslte/interfaces/sched_interface.h index 781638c50..375e392f6 100644 --- a/lib/include/srslte/interfaces/sched_interface.h +++ b/lib/include/srslte/interfaces/sched_interface.h @@ -214,27 +214,22 @@ public: } dl_sched_bc_t; - typedef struct { - uint32_t cfi; - uint32_t nof_data_elems; - uint32_t nof_rar_elems; - uint32_t nof_bc_elems; - dl_sched_data_t data[MAX_DATA_LIST]; - dl_sched_rar_t rar[MAX_RAR_LIST]; - dl_sched_bc_t bc[MAX_BC_LIST]; - } dl_sched_res_t; + struct dl_sched_res_t { + uint32_t cfi; + srslte::bounded_vector data; + srslte::bounded_vector rar; + srslte::bounded_vector bc; + }; typedef struct { uint16_t rnti; enum phich_elem { ACK, NACK } phich; } ul_sched_phich_t; - typedef struct { - uint32_t nof_dci_elems; - uint32_t nof_phich_elems; - ul_sched_data_t pusch[MAX_DATA_LIST]; - ul_sched_phich_t phich[MAX_PHICH_LIST]; - } ul_sched_res_t; + struct ul_sched_res_t { + srslte::bounded_vector pusch; + srslte::bounded_vector phich; + }; /******************* Scheduler Control ****************************/ diff --git a/srsenb/src/stack/mac/mac.cc b/srsenb/src/stack/mac/mac.cc index 4ec63739a..e1a2a5745 100644 --- a/srsenb/src/stack/mac/mac.cc +++ b/srsenb/src/stack/mac/mac.cc @@ -584,7 +584,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list) srslte::rwlock_read_guard lock(rwlock); // Copy data grants - for (uint32_t i = 0; i < sched_result.nof_data_elems; i++) { + for (uint32_t i = 0; i < sched_result.data.size(); i++) { uint32_t tb_count = 0; // Get UE @@ -645,7 +645,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list) } // Copy RAR grants - for (uint32_t i = 0; i < sched_result.nof_rar_elems; i++) { + for (uint32_t i = 0; i < sched_result.rar.size(); i++) { // Copy dci info dl_sched_res->pdsch[n].dci = sched_result.rar[i].dci; @@ -680,7 +680,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list) } // Copy SI and Paging grants - for (uint32_t i = 0; i < sched_result.nof_bc_elems; i++) { + for (uint32_t i = 0; i < sched_result.bc.size(); i++) { // Copy dci info dl_sched_res->pdsch[n].dci = sched_result.bc[i].dci; @@ -900,7 +900,7 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list) // Copy DCI grants phy_ul_sched_res->nof_grants = 0; int n = 0; - for (uint32_t i = 0; i < sched_result.nof_dci_elems; i++) { + for (uint32_t i = 0; i < sched_result.pusch.size(); i++) { if (sched_result.pusch[i].tbs > 0) { // Get UE uint16_t rnti = sched_result.pusch[i].dci.rnti; @@ -943,11 +943,11 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list) } // Copy PHICH actions - for (uint32_t i = 0; i < sched_result.nof_phich_elems; i++) { + for (uint32_t i = 0; i < sched_result.phich.size(); i++) { phy_ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK; phy_ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti; } - phy_ul_sched_res->nof_phich = sched_result.nof_phich_elems; + phy_ul_sched_res->nof_phich = sched_result.phich.size(); } // clear old buffers from all users for (auto& u : ue_db) { diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index 0813f1b30..fbbbf3cc6 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -387,7 +387,7 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r /* Schedule PHICH */ for (auto& ue_pair : *ue_db) { - if (cc_result->ul_sched_result.nof_phich_elems >= MAX_PHICH_LIST) { + if (cc_result->ul_sched_result.phich.size() >= MAX_PHICH_LIST) { break; } tti_sched->alloc_phich(ue_pair.second.get()); diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 3c9e915ef..021969bfd 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -63,7 +63,7 @@ void sf_sched_result::new_tti(tti_point tti_rx_) bool sf_sched_result::is_ul_alloc(uint16_t rnti) const { for (const auto& cc : enb_cc_list) { - for (uint32_t j = 0; j < cc.ul_sched_result.nof_dci_elems; ++j) { + for (uint32_t j = 0; j < cc.ul_sched_result.pusch.size(); ++j) { if (cc.ul_sched_result.pusch[j].dci.rnti == rnti) { return true; } @@ -74,7 +74,7 @@ bool sf_sched_result::is_ul_alloc(uint16_t rnti) const bool sf_sched_result::is_dl_alloc(uint16_t rnti) const { for (const auto& cc : enb_cc_list) { - for (uint32_t j = 0; j < cc.dl_sched_result.nof_data_elems; ++j) { + for (uint32_t j = 0; j < cc.dl_sched_result.data.size(); ++j) { if (cc.dl_sched_result.data[j].dci.rnti == rnti) { return true; } @@ -622,11 +622,10 @@ bool sf_sched::alloc_phich(sched_ue* user) using phich_t = sched_interface::ul_sched_phich_t; auto* ul_sf_result = &cc_results->get_cc(cc_cfg->enb_cc_idx)->ul_sched_result; - if (ul_sf_result->nof_phich_elems >= sched_interface::MAX_PHICH_LIST) { + if (ul_sf_result->phich.full()) { logger.warning("SCHED: Maximum number of PHICH allocations has been reached"); return false; } - phich_t& phich_item = ul_sf_result->phich[ul_sf_result->nof_phich_elems]; auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { @@ -638,9 +637,9 @@ bool sf_sched::alloc_phich(sched_ue* user) /* Indicate PHICH acknowledgment if needed */ if (h->has_pending_phich()) { - phich_item.phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK; - phich_item.rnti = user->get_rnti(); - ul_sf_result->nof_phich_elems++; + ul_sf_result->phich.emplace_back(); + ul_sf_result->phich.back().rnti = user->get_rnti(); + ul_sf_result->phich.back().phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK; return true; } return false; @@ -650,14 +649,9 @@ void sf_sched::set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_r sched_interface::dl_sched_res_t* dl_result) { for (const auto& bc_alloc : bc_allocs) { - sched_interface::dl_sched_bc_t* bc = &dl_result->bc[dl_result->nof_bc_elems]; - - *bc = bc_alloc.bc_grant; - // assign NCCE/L - bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; - dl_result->nof_bc_elems++; - - log_broadcast_allocation(*bc, bc_alloc.rbg_range, *cc_cfg); + dl_result->bc.emplace_back(bc_alloc.bc_grant); + dl_result->bc.back().dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; + log_broadcast_allocation(dl_result->bc.back(), bc_alloc.rbg_range, *cc_cfg); } } @@ -665,15 +659,9 @@ void sf_sched::set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_ sched_interface::dl_sched_res_t* dl_result) { for (const auto& rar_alloc : rar_allocs) { - sched_interface::dl_sched_rar_t* rar = &dl_result->rar[dl_result->nof_rar_elems]; - - // Setup RAR process - *rar = rar_alloc.rar_grant; - // Assign NCCE/L - rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; - dl_result->nof_rar_elems++; - - log_rar_allocation(*rar, rar_alloc.alloc_data.rbg_range); + dl_result->rar.emplace_back(rar_alloc.rar_grant); + dl_result->rar.back().dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; + log_rar_allocation(dl_result->rar.back(), rar_alloc.alloc_data.rbg_range); } } @@ -682,7 +670,8 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& sched_ue_list& ue_list) { for (const auto& data_alloc : data_allocs) { - sched_interface::dl_sched_data_t* data = &dl_result->data[dl_result->nof_data_elems]; + dl_result->data.emplace_back(); + sched_interface::dl_sched_data_t* data = &dl_result->data.back(); // Assign NCCE/L data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos; @@ -730,8 +719,6 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& data_before, user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop()); logger.info("%s", srslte::to_c_str(str_buffer)); - - dl_result->nof_data_elems++; } } @@ -773,7 +760,7 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched, needs_ack_uci = sf_sched->is_dl_alloc(user->get_rnti()); } else { auto& dl_result = other_cc_results.enb_cc_list[enbccidx].dl_sched_result; - for (uint32_t j = 0; j < dl_result.nof_data_elems; ++j) { + for (uint32_t j = 0; j < dl_result.data.size(); ++j) { if (dl_result.data[j].dci.rnti == user->get_rnti()) { needs_ack_uci = true; break; @@ -802,7 +789,7 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched, } for (uint32_t enbccidx = 0; enbccidx < other_cc_results.enb_cc_list.size(); ++enbccidx) { - for (uint32_t j = 0; j < other_cc_results.enb_cc_list[enbccidx].ul_sched_result.nof_dci_elems; ++j) { + for (uint32_t j = 0; j < other_cc_results.enb_cc_list[enbccidx].ul_sched_result.pusch.size(); ++j) { // Checks all the UL grants already allocated for the given rnti if (other_cc_results.enb_cc_list[enbccidx].ul_sched_result.pusch[j].dci.rnti == user->get_rnti()) { auto p = user->get_active_cell_index(enbccidx); @@ -827,8 +814,6 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r { /* Set UL data DCI locs and format */ for (const auto& ul_alloc : ul_data_allocs) { - sched_interface::ul_sched_data_t* pusch = &ul_result->pusch[ul_result->nof_dci_elems]; - auto ue_it = ue_list.find(ul_alloc.rnti); if (ue_it == ue_list.end()) { continue; @@ -844,8 +829,10 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r uci_pusch_t uci_type = is_uci_included(this, *cc_results, user, cc_cfg->enb_cc_idx); /* Generate DCI Format1A */ - uint32_t total_data_before = user->get_pending_ul_data_total(get_tti_tx_ul(), cc_cfg->enb_cc_idx); - int tbs = user->generate_format0(pusch, + ul_result->pusch.emplace_back(); + sched_interface::ul_sched_data_t& pusch = ul_result->pusch.back(); + uint32_t total_data_before = user->get_pending_ul_data_total(get_tti_tx_ul(), cc_cfg->enb_cc_idx); + int tbs = user->generate_format0(&pusch, get_tti_tx_ul(), cc_cfg->enb_cc_idx, ul_alloc.alloc, @@ -857,7 +844,7 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx); uint32_t new_pending_bytes = user->get_pending_ul_new_data(get_tti_tx_ul(), cc_cfg->enb_cc_idx); // Allow TBS=0 in case of UCI-only PUSCH - if (tbs < 0 || (tbs == 0 && pusch->dci.tb.mcs_idx != 29)) { + if (tbs < 0 || (tbs == 0 && pusch.dci.tb.mcs_idx != 29)) { fmt::memory_buffer str_buffer; fmt::format_to(str_buffer, "SCHED: Error {} {} rnti=0x{:x}, pid={}, dci=({},{}), prb={}, bsr={}", @@ -865,11 +852,12 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r ul_alloc.is_retx() ? "retx" : "tx", user->get_rnti(), h->get_id(), - pusch->dci.location.L, - pusch->dci.location.ncce, + pusch.dci.location.L, + pusch.dci.location.ncce, ul_alloc.alloc, new_pending_bytes); logger.warning("%s", srslte::to_c_str(str_buffer)); + ul_result->pusch.pop_back(); continue; } @@ -884,8 +872,8 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r user->get_rnti(), cc_cfg->enb_cc_idx, h->get_id(), - pusch->dci.location.L, - pusch->dci.location.ncce, + pusch.dci.location.L, + pusch.dci.location.ncce, ul_alloc.alloc, h->nof_retx(0), tbs, @@ -895,9 +883,7 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r logger.info("%s", srslte::to_c_str(str_buffer)); } - pusch->current_tx_nb = h->nof_retx(0); - - ul_result->nof_dci_elems++; + pusch.current_tx_nb = h->nof_retx(0); } } @@ -922,7 +908,7 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) /* Resume UL HARQs with pending retxs that did not get allocated */ using phich_t = sched_interface::ul_sched_phich_t; auto& phich_list = cc_result->ul_sched_result.phich; - for (uint32_t i = 0; i < cc_result->ul_sched_result.nof_phich_elems; ++i) { + for (uint32_t i = 0; i < cc_result->ul_sched_result.phich.size(); ++i) { auto& phich = phich_list[i]; if (phich.phich == phich_t::NACK) { auto& ue = *ue_db[phich.rnti]; diff --git a/srsenb/src/stack/mac/sched_helpers.cc b/srsenb/src/stack/mac/sched_helpers.cc index b1ce375ce..975053ce5 100644 --- a/srsenb/src/stack/mac/sched_helpers.cc +++ b/srsenb/src/stack/mac/sched_helpers.cc @@ -116,8 +116,7 @@ void log_dl_cc_results(srslog::basic_logger& logger, uint32_t enb_cc_idx, const } custom_mem_buffer strbuf; - for (uint32_t i = 0; i < result.nof_data_elems; ++i) { - const dl_sched_data_t& data = result.data[i]; + for (const auto& data : result.data) { if (logger.debug.enabled()) { fill_dl_cc_result_debug(strbuf, data); } else { @@ -142,7 +141,7 @@ void log_phich_cc_results(srslog::basic_logger& logger, return; } custom_mem_buffer strbuf; - for (uint32_t i = 0; i < result.nof_phich_elems; ++i) { + for (uint32_t i = 0; i < result.phich.size(); ++i) { const phich_t& phich = result.phich[i]; const char* prefix = strbuf.size() > 0 ? " | " : ""; const char* val = phich.phich == phich_t::ACK ? "ACK" : "NACK"; diff --git a/srsenb/test/mac/sched_benchmark.cc b/srsenb/test/mac/sched_benchmark.cc index c307ee975..48b16161f 100644 --- a/srsenb/test/mac/sched_benchmark.cc +++ b/srsenb/test/mac/sched_benchmark.cc @@ -126,21 +126,21 @@ public: { for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) { uint32_t dl_tbs = 0, ul_tbs = 0, dl_mcs = 0, ul_mcs = 0; - for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) { + for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) { dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[0]; dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[1]; dl_mcs = std::max(dl_mcs, sf_out.dl_cc_result[cc].data[i].dci.tb[0].mcs_idx); } total_stats.mean_dl_tbs.push(dl_tbs); - if (sf_out.dl_cc_result[cc].nof_data_elems > 0) { + if (sf_out.dl_cc_result[cc].data.size() > 0) { total_stats.avg_dl_mcs.push(dl_mcs); } - for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_dci_elems; ++i) { + for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].pusch.size(); ++i) { ul_tbs += sf_out.ul_cc_result[cc].pusch[i].tbs; ul_mcs = std::max(ul_mcs, sf_out.ul_cc_result[cc].pusch[i].dci.tb.mcs_idx); } total_stats.mean_ul_tbs.push(ul_tbs); - if (sf_out.ul_cc_result[cc].nof_dci_elems) { + if (not sf_out.ul_cc_result[cc].pusch.empty()) { total_stats.avg_ul_mcs.push(ul_mcs); } } diff --git a/srsenb/test/mac/sched_ca_test.cc b/srsenb/test/mac/sched_ca_test.cc index 9859a8481..5d22dfd79 100644 --- a/srsenb/test/mac/sched_ca_test.cc +++ b/srsenb/test/mac/sched_ca_test.cc @@ -177,7 +177,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para // TEST: When a DL newtx takes place, it should also encode the CE for (uint32_t i = 0; i < 100; ++i) { - if (tester.tti_info.dl_sched_result[params.pcell_idx].nof_data_elems > 0) { + if (not tester.tti_info.dl_sched_result[params.pcell_idx].data.empty()) { // DL data was allocated if (tester.tti_info.dl_sched_result[params.pcell_idx].data[0].nof_pdu_elems[0] > 0) { // it is a new DL tx diff --git a/srsenb/test/mac/sched_common_test_suite.cc b/srsenb/test/mac/sched_common_test_suite.cc index 98bff563b..dd96b2388 100644 --- a/srsenb/test/mac/sched_common_test_suite.cc +++ b/srsenb/test/mac/sched_common_test_suite.cc @@ -55,7 +55,7 @@ int test_pusch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co try_ul_fill({cell_params.cfg.cell.nof_prb - pucch_nrb, (uint32_t)cell_params.cfg.cell.nof_prb}, "PUCCH", strict); /* TEST: check collisions in the UL PUSCH */ - for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) { uint32_t L, RBstart; srslte_ra_type2_from_riv(ul_result.pusch[i].dci.type2_alloc.riv, &L, &RBstart, nof_prb, nof_prb); strict = ul_result.pusch[i].needs_pdcch or nof_prb != 6; // Msg3 may collide with PUCCH at PRB==6 @@ -113,12 +113,12 @@ int test_pdsch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co }; // Decode BC allocations, check collisions, and fill cumulative mask - for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) { + for (uint32_t i = 0; i < dl_result.bc.size(); ++i) { TESTASSERT(try_dl_mask_fill(dl_result.bc[i].dci, "BC") == SRSLTE_SUCCESS); } // Decode RAR allocations, check collisions, and fill cumulative mask - for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) { + for (uint32_t i = 0; i < dl_result.rar.size(); ++i) { TESTASSERT(try_dl_mask_fill(dl_result.rar[i].dci, "RAR") == SRSLTE_SUCCESS); } @@ -131,7 +131,7 @@ int test_pdsch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co } // Decode Data allocations, check collisions and fill cumulative mask - for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_result.data.size(); ++i) { TESTASSERT(try_dl_mask_fill(dl_result.data[i].dci, "data") == SRSLTE_SUCCESS); } @@ -170,8 +170,8 @@ int test_sib_scheduling(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) bool sib1_expected = ((sfn % 2) == 0) and sf_idx == 5; using bc_elem = const sched_interface::dl_sched_bc_t; - bc_elem* bc_begin = &dl_result.bc[0]; - bc_elem* bc_end = &dl_result.bc[dl_result.nof_bc_elems]; + bc_elem* bc_begin = dl_result.bc.begin(); + bc_elem* bc_end = dl_result.bc.end(); /* Test if SIB1 was correctly scheduled */ auto it = std::find_if(bc_begin, bc_end, [](bc_elem& elem) { return elem.index == 0; }); @@ -229,7 +229,7 @@ int test_pdcch_collisions(const sf_output_res_t& sf_out, }; /* TEST: verify there are no dci collisions for UL, DL data, BC, RAR */ - for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) { const auto& pusch = ul_result.pusch[i]; if (not pusch.needs_pdcch) { // In case of non-adaptive retx or Msg3 @@ -237,13 +237,13 @@ int test_pdcch_collisions(const sf_output_res_t& sf_out, } try_cce_fill(pusch.dci.location, "UL"); } - for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_result.data.size(); ++i) { try_cce_fill(dl_result.data[i].dci.location, "DL data"); } - for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) { + for (uint32_t i = 0; i < dl_result.bc.size(); ++i) { try_cce_fill(dl_result.bc[i].dci.location, "DL BC"); } - for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) { + for (uint32_t i = 0; i < dl_result.rar.size(); ++i) { try_cce_fill(dl_result.rar[i].dci.location, "DL RAR"); } @@ -262,7 +262,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) const auto& ul_result = sf_out.ul_cc_result[enb_cc_idx]; std::set alloc_rntis; - for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) { const auto& pusch = ul_result.pusch[i]; uint16_t rnti = pusch.dci.rnti; CONDERROR(pusch.tbs == 0, "Allocated PUSCH with invalid TBS=%d", pusch.tbs); @@ -281,7 +281,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) } alloc_rntis.clear(); - for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_result.data.size(); ++i) { auto& data = dl_result.data[i]; uint16_t rnti = data.dci.rnti; CONDERROR(data.tbs[0] == 0 and data.tbs[1] == 0, "Allocated DL data has empty TBS"); @@ -321,7 +321,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) return SRSLTE_SUCCESS; }; - for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) { + for (uint32_t i = 0; i < dl_result.bc.size(); ++i) { const sched_interface::dl_sched_bc_t& bc = dl_result.bc[i]; if (bc.type == sched_interface::dl_sched_bc_t::BCCH) { CONDERROR(bc.tbs < cell_params.cfg.sibs[bc.index].len, @@ -337,7 +337,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx) TESTASSERT(test_ra_bc_coderate(bc.tbs, bc.dci) == SRSLTE_SUCCESS); } - for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) { + for (uint32_t i = 0; i < dl_result.rar.size(); ++i) { const auto& rar = dl_result.rar[i]; CONDERROR(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d", rar.tbs); diff --git a/srsenb/test/mac/sched_sim_ue.cc b/srsenb/test/mac/sched_sim_ue.cc index 41f171e0c..f06213b4c 100644 --- a/srsenb/test/mac/sched_sim_ue.cc +++ b/srsenb/test/mac/sched_sim_ue.cc @@ -78,7 +78,7 @@ int ue_sim::update(const sf_output_res_t& sf_out) void ue_sim::update_dl_harqs(const sf_output_res_t& sf_out) { for (uint32_t cc = 0; cc < sf_out.cc_params.size(); ++cc) { - for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) { + for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) { const auto& data = sf_out.dl_cc_result[cc].data[i]; if (data.dci.rnti != ctxt.rnti) { continue; @@ -107,7 +107,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out) uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS); for (uint32_t cc = 0; cc < sf_out.cc_params.size(); ++cc) { // Update UL harqs with PHICH info - for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_phich_elems; ++i) { + for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].phich.size(); ++i) { const auto& phich = sf_out.ul_cc_result[cc].phich[i]; if (phich.rnti != ctxt.rnti) { continue; @@ -128,7 +128,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out) } // Update UL harqs with PUSCH grants - for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_dci_elems; ++i) { + for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].pusch.size(); ++i) { const auto& data = sf_out.ul_cc_result[cc].pusch[i]; if (data.dci.rnti != ctxt.rnti) { continue; @@ -171,7 +171,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out) srslte::tti_interval rar_window{ctxt.prach_tti_rx + 3, ctxt.prach_tti_rx + 3 + rar_win_size}; if (rar_window.contains(tti_tx_dl)) { - for (uint32_t i = 0; i < dl_cc_result.nof_rar_elems; ++i) { + for (uint32_t i = 0; i < dl_cc_result.rar.size(); ++i) { for (uint32_t j = 0; j < dl_cc_result.rar[i].msg3_grant.size(); ++j) { const auto& data = dl_cc_result.rar[i].msg3_grant[j].data; if (data.prach_tti == (uint32_t)ctxt.prach_tti_rx.to_uint() and data.preamble_idx == ctxt.preamble_idx) { @@ -188,7 +188,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out) srslte::tti_point expected_msg3_tti_rx = ctxt.rar_tti_rx + MSG3_DELAY_MS; if (expected_msg3_tti_rx == sf_out.tti_rx) { // Msg3 should exist - for (uint32_t i = 0; i < ul_cc_result.nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_cc_result.pusch.size(); ++i) { if (ul_cc_result.pusch[i].dci.rnti == ctxt.rnti) { ctxt.msg3_tti_rx = sf_out.tti_rx; } @@ -198,7 +198,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out) if (ctxt.msg3_tti_rx.is_valid() and not ctxt.msg4_tti_rx.is_valid()) { // Msg3 scheduled, but Msg4 not yet scheduled - for (uint32_t i = 0; i < dl_cc_result.nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_cc_result.data.size(); ++i) { if (dl_cc_result.data[i].dci.rnti == ctxt.rnti) { for (uint32_t j = 0; j < dl_cc_result.data[i].nof_pdu_elems[0]; ++j) { if (dl_cc_result.data[i].pdu[0][j].lcid == (uint32_t)srslte::dl_sch_lcid::CON_RES_ID) { diff --git a/srsenb/test/mac/sched_test_common.cc b/srsenb/test/mac/sched_test_common.cc index a05bedafe..0d0271068 100644 --- a/srsenb/test/mac/sched_test_common.cc +++ b/srsenb/test/mac/sched_test_common.cc @@ -89,12 +89,12 @@ void sched_result_stats::process_results(tti_point const std::vector& ul_result) { for (uint32_t ccidx = 0; ccidx < dl_result.size(); ++ccidx) { - for (uint32_t i = 0; i < dl_result[ccidx].nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_result[ccidx].data.size(); ++i) { user_stats* user = get_user(dl_result[ccidx].data[i].dci.rnti); user->tot_dl_sched_data[ccidx] += dl_result[ccidx].data[i].tbs[0]; user->tot_dl_sched_data[ccidx] += dl_result[ccidx].data[i].tbs[1]; } - for (uint32_t i = 0; i < ul_result[ccidx].nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_result[ccidx].pusch.size(); ++i) { user_stats* user = get_user(ul_result[ccidx].pusch[i].dci.rnti); user->tot_ul_sched_data[ccidx] += ul_result[ccidx].pusch[i].tbs; } diff --git a/srsenb/test/mac/sched_test_rand.cc b/srsenb/test/mac/sched_test_rand.cc index 114d19adb..266a470d4 100644 --- a/srsenb/test/mac/sched_test_rand.cc +++ b/srsenb/test/mac/sched_test_rand.cc @@ -158,7 +158,7 @@ int sched_tester::process_results() int sched_tester::test_harqs() { /* check consistency of DL harq procedures and allocations */ - for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].nof_data_elems; ++i) { + for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].data.size(); ++i) { const auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i]; uint32_t h_id = data.dci.pid; uint16_t rnti = data.dci.rnti; @@ -171,7 +171,7 @@ int sched_tester::test_harqs() } /* Check PHICH allocations */ - for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) { + for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].phich.size(); ++i) { const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i]; const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq; const auto* h = ue_db[phich.rnti]->get_ul_harq(srsenb::to_tx_ul(tti_rx), CARRIER_IDX); @@ -182,7 +182,7 @@ int sched_tester::test_harqs() if (not hprev.is_empty()) { // In case it was resumed CONDERROR(h == nullptr or h->is_empty(), "Cannot resume empty UL harq"); - for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++j) { + for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].pusch.size(); ++j) { auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[j]; CONDERROR(pusch.dci.rnti == phich.rnti, "Cannot send PHICH::ACK for same harq that got UL grant."); } @@ -198,7 +198,7 @@ int sched_tester::test_harqs() int sched_tester::update_ue_stats() { // update ue stats with number of allocated UL PRBs - for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++i) { + for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].pusch.size(); ++i) { const auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[i]; uint32_t L, RBstart; srslte_ra_type2_from_riv(pusch.dci.type2_alloc.riv, @@ -214,7 +214,7 @@ int sched_tester::update_ue_stats() // update ue stats with number of DL RB allocations srslte::bounded_bitset<100, true> alloc_mask(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb); - for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].nof_data_elems; ++i) { + for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].data.size(); ++i) { auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i]; TESTASSERT(srsenb::extract_dl_prbmask(sched_cell_params[CARRIER_IDX].cfg.cell, tti_info.dl_sched_result[CARRIER_IDX].data[i].dci, diff --git a/srsenb/test/mac/sched_ue_ded_test_suite.cc b/srsenb/test/mac/sched_ue_ded_test_suite.cc index 80ee48abd..571d038e6 100644 --- a/srsenb/test/mac/sched_ue_ded_test_suite.cc +++ b/srsenb/test/mac/sched_ue_ded_test_suite.cc @@ -43,18 +43,16 @@ int sim_ue_ctxt_t::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const const pusch_t* find_pusch_grant(uint16_t rnti, const sched_interface::ul_sched_res_t& ul_cc_res) { - const pusch_t* ptr = std::find_if(&ul_cc_res.pusch[0], - &ul_cc_res.pusch[ul_cc_res.nof_dci_elems], - [rnti](const pusch_t& pusch) { return pusch.dci.rnti == rnti; }); - return ptr == &ul_cc_res.pusch[ul_cc_res.nof_dci_elems] ? nullptr : ptr; + const pusch_t* ptr = std::find_if( + ul_cc_res.pusch.begin(), ul_cc_res.pusch.end(), [rnti](const pusch_t& pusch) { return pusch.dci.rnti == rnti; }); + return ptr == ul_cc_res.pusch.end() ? nullptr : ptr; } const pdsch_t* find_pdsch_grant(uint16_t rnti, const sched_interface::dl_sched_res_t& dl_cc_res) { - const pdsch_t* ptr = std::find_if(&dl_cc_res.data[0], - &dl_cc_res.data[dl_cc_res.nof_data_elems], - [rnti](const pdsch_t& pdsch) { return pdsch.dci.rnti == rnti; }); - return ptr == &dl_cc_res.data[dl_cc_res.nof_data_elems] ? nullptr : ptr; + const pdsch_t* ptr = std::find_if( + dl_cc_res.data.begin(), dl_cc_res.data.end(), [rnti](const pdsch_t& pdsch) { return pdsch.dci.rnti == rnti; }); + return ptr == dl_cc_res.data.end() ? nullptr : ptr; } int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, @@ -121,7 +119,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt, int test_dl_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) { for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) { - for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) { + for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) { const sched_interface::dl_sched_data_t& data = sf_out.dl_cc_result[cc].data[i]; CONDERROR( enb_ctxt.ue_db.count(data.dci.rnti) == 0, "Allocated DL grant for non-existent rnti=0x%x", data.dci.rnti); @@ -136,10 +134,10 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS); for (size_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) { - const auto* phich_begin = &sf_out.ul_cc_result[cc].phich[0]; - const auto* phich_end = &sf_out.ul_cc_result[cc].phich[sf_out.ul_cc_result[cc].nof_phich_elems]; - const auto* pusch_begin = &sf_out.ul_cc_result[cc].pusch[0]; - const auto* pusch_end = &sf_out.ul_cc_result[cc].pusch[sf_out.ul_cc_result[cc].nof_dci_elems]; + const auto* phich_begin = sf_out.ul_cc_result[cc].phich.begin(); + const auto* phich_end = sf_out.ul_cc_result[cc].phich.end(); + const auto* pusch_begin = sf_out.ul_cc_result[cc].pusch.begin(); + const auto* pusch_end = sf_out.ul_cc_result[cc].pusch.end(); // TEST: rnti must exist for all PHICH CONDERROR(std::any_of(phich_begin, @@ -250,14 +248,14 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) CONDERROR(not ue.rar_tti_rx.is_valid() and tti_tx_dl > rar_window.stop(), "rnti=0x%x RAR not scheduled within the RAR Window", rnti); - for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_rar_elems; ++i) { + for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].rar.size(); ++i) { CONDERROR(sf_out.dl_cc_result[cc].rar[i].dci.rnti == rnti, "No RAR allocations allowed outside of user RAR window"); } } else { // Inside RAR window uint32_t nof_rars = ue.rar_tti_rx.is_valid() ? 1 : 0; - for (uint32_t i = 0; i < dl_cc_res.nof_rar_elems; ++i) { + for (uint32_t i = 0; i < dl_cc_res.rar.size(); ++i) { for (const auto& grant : dl_cc_res.rar[i].msg3_grant) { const auto& data = grant.data; if (data.prach_tti == (uint32_t)ue.prach_tti_rx.to_uint() and data.preamble_idx == ue.preamble_idx) { @@ -278,7 +276,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) if (expected_msg3_tti_rx == sf_out.tti_rx) { // Msg3 should exist uint32_t msg3_count = 0; - for (uint32_t i = 0; i < ul_cc_res.nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_cc_res.pusch.size(); ++i) { if (ul_cc_res.pusch[i].dci.rnti == rnti) { msg3_count++; CONDERROR(ul_cc_res.pusch[i].needs_pdcch, "Msg3 allocations do not require PDCCH"); @@ -295,7 +293,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) if (ue.msg3_tti_rx.is_valid() and not ue.msg4_tti_rx.is_valid()) { // Msg3 scheduled, but Msg4 not yet scheduled uint32_t msg4_count = 0; - for (uint32_t i = 0; i < dl_cc_res.nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_cc_res.data.size(); ++i) { if (dl_cc_res.data[i].dci.rnti == rnti) { CONDERROR(to_tx_dl(sf_out.tti_rx) < to_tx_ul(ue.msg3_tti_rx), "Msg4 cannot be scheduled without Msg3 being tx"); @@ -316,7 +314,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) if (not ue.msg4_tti_rx.is_valid()) { // TEST: No UL allocs except for Msg3 before Msg4 - for (uint32_t i = 0; i < ul_cc_res.nof_dci_elems; ++i) { + for (uint32_t i = 0; i < ul_cc_res.pusch.size(); ++i) { if (ul_cc_res.pusch[i].dci.rnti == rnti) { CONDERROR(not ue.rar_tti_rx.is_valid(), "No UL allocs before RAR allowed"); srslte::tti_point expected_msg3_tti = ue.rar_tti_rx + MSG3_DELAY_MS; @@ -331,7 +329,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) // TEST: No DL allocs before Msg3 if (not ue.msg3_tti_rx.is_valid()) { - for (uint32_t i = 0; i < dl_cc_res.nof_data_elems; ++i) { + for (uint32_t i = 0; i < dl_cc_res.data.size(); ++i) { CONDERROR(dl_cc_res.data[i].dci.rnti == rnti, "No DL data allocs allowed before Msg3 is scheduled"); } } @@ -339,7 +337,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out) } // TEST: Ensure there are no spurious RARs that do not belong to any user - for (uint32_t i = 0; i < dl_cc_res.nof_rar_elems; ++i) { + for (uint32_t i = 0; i < dl_cc_res.rar.size(); ++i) { for (uint32_t j = 0; j < dl_cc_res.rar[i].msg3_grant.size(); ++j) { uint32_t prach_tti = dl_cc_res.rar[i].msg3_grant[j].data.prach_tti; uint32_t preamble_idx = dl_cc_res.rar[i].msg3_grant[j].data.preamble_idx; From eb27efd8675bf5a63a753c04620dc1a95d0e9a72 Mon Sep 17 00:00:00 2001 From: faluco Date: Wed, 17 Mar 2021 17:12:10 +0100 Subject: [PATCH 40/64] Modify the policy of the buffered sink to flush its contents continuously instead of doing it once. --- lib/src/srslog/event_trace.cpp | 30 ++----- lib/src/srslog/sinks/buffered_file_sink.h | 89 ++++++++++++++++++ lib/src/srslog/sinks/single_write_file_sink.h | 90 ------------------- 3 files changed, 98 insertions(+), 111 deletions(-) create mode 100644 lib/src/srslog/sinks/buffered_file_sink.h delete mode 100644 lib/src/srslog/sinks/single_write_file_sink.h diff --git a/lib/src/srslog/event_trace.cpp b/lib/src/srslog/event_trace.cpp index 196f4373b..924492e79 100644 --- a/lib/src/srslog/event_trace.cpp +++ b/lib/src/srslog/event_trace.cpp @@ -11,7 +11,7 @@ */ #include "srslte/srslog/event_trace.h" -#include "sinks/single_write_file_sink.h" +#include "sinks/buffered_file_sink.h" #include "srslte/srslog/srslog.h" #include @@ -62,16 +62,14 @@ bool srslog::event_trace_init(const std::string& filename, std::size_t capacity) return false; } - auto tracer_sink = std::unique_ptr(new single_write_file_sink( - filename, capacity, get_default_log_formatter())); + auto tracer_sink = std::unique_ptr(new buffered_file_sink(filename, capacity, get_default_log_formatter())); if (!install_custom_sink(sink_name, std::move(tracer_sink))) { return false; } if (sink* s = find_sink(sink_name)) { - log_channel& c = - fetch_log_channel("event_trace_channel", *s, {"TRACE", '\0', false}); - tracer = &c; + log_channel& c = fetch_log_channel("event_trace_channel", *s, {"TRACE", '\0', false}); + tracer = &c; return true; } @@ -82,7 +80,7 @@ bool srslog::event_trace_init(const std::string& filename, std::size_t capacity) static void format_time(char* buffer, size_t len) { std::time_t t = std::time(nullptr); - std::tm lt{}; + std::tm lt{}; ::localtime_r(&t, <); std::strftime(buffer, len, "%FT%T", <); } @@ -97,11 +95,7 @@ void trace_duration_begin(const std::string& category, const std::string& name) char fmt_time[24]; format_time(fmt_time, sizeof(fmt_time)); - (*tracer)("[%s] [TID:%0u] Entering \"%s\": %s", - fmt_time, - (unsigned)::pthread_self(), - category, - name); + (*tracer)("[%s] [TID:%0u] Entering \"%s\": %s", fmt_time, (unsigned)::pthread_self(), category, name); } void trace_duration_end(const std::string& category, const std::string& name) @@ -112,11 +106,7 @@ void trace_duration_end(const std::string& category, const std::string& name) char fmt_time[24]; format_time(fmt_time, sizeof(fmt_time)); - (*tracer)("[%s] [TID:%0u] Leaving \"%s\": %s", - fmt_time, - (unsigned)::pthread_self(), - category, - name); + (*tracer)("[%s] [TID:%0u] Leaving \"%s\": %s", fmt_time, (unsigned)::pthread_self(), category, name); } } // namespace srslog @@ -128,10 +118,8 @@ srslog::detail::scoped_complete_event::~scoped_complete_event() return; } - auto end = std::chrono::steady_clock::now(); - unsigned long long diff = - std::chrono::duration_cast(end - start) - .count(); + auto end = std::chrono::steady_clock::now(); + unsigned long long diff = std::chrono::duration_cast(end - start).count(); small_str_buffer str; // Limit to the category and name strings to a predefined length so everything fits in a small string. diff --git a/lib/src/srslog/sinks/buffered_file_sink.h b/lib/src/srslog/sinks/buffered_file_sink.h new file mode 100644 index 000000000..e2908d38c --- /dev/null +++ b/lib/src/srslog/sinks/buffered_file_sink.h @@ -0,0 +1,89 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2020 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#ifndef SRSLOG_BUFFERED_FILE_SINK_H +#define SRSLOG_BUFFERED_FILE_SINK_H + +#include "file_utils.h" +#include "srslte/srslog/sink.h" + +namespace srslog { + +/// This class is a wrapper of a file handle that buffers the input data into an internal buffer and writes its contents +/// to the file once the buffer is full or in object destruction. +class buffered_file_sink : public sink +{ +public: + buffered_file_sink(std::string filename, std::size_t capacity, std::unique_ptr f) : + sink(std::move(f)), filename(std::move(filename)) + { + buffer.reserve(capacity); + } + + ~buffered_file_sink() override { flush_buffer(); } + + buffered_file_sink(const buffered_file_sink& other) = delete; + buffered_file_sink& operator=(const buffered_file_sink& other) = delete; + + detail::error_string write(detail::memory_buffer input_buffer) override + { + // Create a new file the first time we hit this method. + if (!is_file_created) { + is_file_created = true; + assert(!handler && "No handler should be created yet"); + if (auto err_str = handler.create(filename)) { + return err_str; + } + } + + if (has_room_for(input_buffer.size())) { + buffer.insert(buffer.end(), input_buffer.begin(), input_buffer.end()); + return {}; + } + + return flush_buffer(); + } + + detail::error_string flush() override + { + if (auto err = flush_buffer()) { + return err; + } + return handler.flush(); + } + +private: + /// Returns true if the internal buffer has room for the specified input size, + /// otherwise returns false. + bool has_room_for(std::size_t s) const { return s + buffer.size() < buffer.capacity(); } + + /// Flushes the buffer contents into the file. + detail::error_string flush_buffer() + { + if (buffer.empty()) { + return {}; + } + auto err = handler.write(detail::memory_buffer(buffer.data(), buffer.size())); + buffer.clear(); + return err; + } + +private: + const std::string filename; + file_utils::file handler; + std::vector buffer; + bool is_file_created = false; +}; + +} // namespace srslog + +#endif // SRSLOG_BUFFERED_FILE_SINK_H diff --git a/lib/src/srslog/sinks/single_write_file_sink.h b/lib/src/srslog/sinks/single_write_file_sink.h deleted file mode 100644 index 37485e9e0..000000000 --- a/lib/src/srslog/sinks/single_write_file_sink.h +++ /dev/null @@ -1,90 +0,0 @@ -/** - * - * \section COPYRIGHT - * - * Copyright 2013-2020 Software Radio Systems Limited - * - * By using this file, you agree to the terms and conditions set - * forth in the LICENSE file which can be found at the top level of - * the distribution. - * - */ - -#ifndef SRSLOG_SINGLE_WRITE_FILE_SINK_H -#define SRSLOG_SINGLE_WRITE_FILE_SINK_H - -#include "file_utils.h" -#include "srslte/srslog/sink.h" - -namespace srslog { - -/// This class is a wrapper of a file handle that stores the input data into an -/// internal buffer and writes its contents to the file once the buffer is full -/// or in object destruction. -class single_write_file_sink : public sink -{ -public: - single_write_file_sink(std::string filename, - std::size_t capacity, - std::unique_ptr f) : - sink(std::move(f)), filename(std::move(filename)) - { - buffer.reserve(capacity); - } - - ~single_write_file_sink() override - { - if (!is_written) { - write_contents(); - } - } - - single_write_file_sink(const single_write_file_sink& other) = delete; - single_write_file_sink& - operator=(const single_write_file_sink& other) = delete; - - detail::error_string write(detail::memory_buffer input_buffer) override - { - // Nothing to do when the contents have been already written. - if (is_written) { - return {}; - } - - if (has_room_for(input_buffer.size())) { - buffer.insert(buffer.end(), input_buffer.begin(), input_buffer.end()); - return {}; - } - - return write_contents(); - } - - detail::error_string flush() override { return handler.flush(); } - -private: - /// Returns true if the internal buffer has room for the specified input size, - /// otherwise returns false. - bool has_room_for(std::size_t s) const - { - return s + buffer.size() < buffer.capacity(); - } - - /// Writes the buffer contents into the file. - detail::error_string write_contents() - { - is_written = true; - if (auto err_str = handler.create(filename)) { - return err_str; - } - return handler.write(detail::memory_buffer(buffer.data(), buffer.size())); - } - -private: - const std::string filename; - file_utils::file handler; - std::vector buffer; - bool is_written = false; -}; - -} // namespace srslog - -#endif // SRSLOG_SINGLE_WRITE_FILE_SINK_H From 358c4de0bd473bdeb76602970d049b53944fa4dd Mon Sep 17 00:00:00 2001 From: faluco Date: Wed, 17 Mar 2021 13:03:19 +0100 Subject: [PATCH 41/64] Fix a valgrind issue in a unit test in log_channel_test where the buffer was not being null terminated. --- lib/test/srslog/log_channel_test.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/test/srslog/log_channel_test.cpp b/lib/test/srslog/log_channel_test.cpp index 3fa24fb7c..84adf8293 100644 --- a/lib/test/srslog/log_channel_test.cpp +++ b/lib/test/srslog/log_channel_test.cpp @@ -288,6 +288,7 @@ when_logging_with_small_string_then_filled_in_log_entry_is_pushed_into_the_backe small_str_buffer buf; fmt::format_to(buf, "A {} {} {}", 1, 2, 3); + buf.push_back('\0'); log(std::move(buf)); ASSERT_EQ(backend.push_invocation_count(), 1); From 2cfc657fbb6ade25bbf9fa527b383bc2de822cd8 Mon Sep 17 00:00:00 2001 From: Andre Puschmann Date: Thu, 18 Mar 2021 13:19:33 +0100 Subject: [PATCH 42/64] rlc_am_lte: refactor logging when PDCP SDUs are enqueued the patch refactor the logging when a new PDCP SDU is enqueued for transmission at RLC. If the SN is already present, only a warning is logged. From the RLC perspective operation continues and the SDU will be transmitted. The patch also changes the order of logs. When the SN cannot be inserted inside the queue of undelivered SDUs, only one message is logged. --- lib/src/upper/rlc_am_lte.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/src/upper/rlc_am_lte.cc b/lib/src/upper/rlc_am_lte.cc index 4bf230413..98df5c952 100644 --- a/lib/src/upper/rlc_am_lte.cc +++ b/lib/src/upper/rlc_am_lte.cc @@ -439,15 +439,14 @@ int rlc_am_lte::rlc_am_lte_tx::write_sdu(unique_byte_buffer_t sdu) return SRSLTE_ERROR; } - // Store SDU info - logger.debug( - "Storing PDCP SDU info in queue. PDCP_SN=%d, Queue Size=%ld", sdu_pdcp_sn, undelivered_sdu_info_queue.nof_sdus()); - if (undelivered_sdu_info_queue.has_pdcp_sn(sdu_pdcp_sn)) { - logger.error("PDCP SDU info already exists. SN=%d", sdu_pdcp_sn); + logger.warning("PDCP_SN=%d already marked as undelivered", sdu_pdcp_sn); return SRSLTE_ERROR; } + // Store SDU info + logger.debug("Marking PDCP_SN=%d as undelivered (queue_len=%ld)", sdu_pdcp_sn, undelivered_sdu_info_queue.nof_sdus()); + undelivered_sdu_info_queue.add_pdcp_sdu(sdu_pdcp_sn); return SRSLTE_SUCCESS; } From 137a21d6b2b73fd6b879fd264349c5e7cd384385 Mon Sep 17 00:00:00 2001 From: Francisco Date: Wed, 17 Mar 2021 23:20:16 +0000 Subject: [PATCH 43/64] sched refactor - use simple enum to represent allocation result. Refactored logging messages when allocation fails. --- srsenb/hdr/stack/mac/sched_carrier.h | 2 +- srsenb/hdr/stack/mac/sched_grid.h | 98 ++++---- srsenb/hdr/stack/mac/schedulers/sched_base.h | 8 +- srsenb/src/stack/mac/sched_carrier.cc | 51 ++--- srsenb/src/stack/mac/sched_grid.cc | 213 +++++++++--------- srsenb/src/stack/mac/schedulers/sched_base.cc | 33 ++- .../src/stack/mac/schedulers/sched_time_pf.cc | 22 +- .../src/stack/mac/schedulers/sched_time_rr.cc | 18 +- 8 files changed, 211 insertions(+), 234 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_carrier.h b/srsenb/hdr/stack/mac/sched_carrier.h index 03cd5f498..d1b4071c5 100644 --- a/srsenb/hdr/stack/mac/sched_carrier.h +++ b/srsenb/hdr/stack/mac/sched_carrier.h @@ -115,7 +115,7 @@ public: void reset(); private: - alloc_outcome_t allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc); + alloc_result allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc); // args srslog::basic_logger& logger; diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index f19544398..972256d61 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -25,29 +25,18 @@ namespace srsenb { /// Error code of alloc attempt -struct alloc_outcome_t { - enum result_enum { - SUCCESS, - DCI_COLLISION, - RB_COLLISION, - ERROR, - NOF_RB_INVALID, - PUCCH_COLLISION, - MEASGAP_COLLISION, - ALREADY_ALLOC, - NO_DATA, - INVALID_PRBMASK, - INVALID_CARRIER, - CODERATE_TOO_HIGH, - NOF_ALLOCS_LIMIT - }; - result_enum result = ERROR; - alloc_outcome_t() = default; - alloc_outcome_t(result_enum e) : result(e) {} - operator result_enum() { return result; } - operator bool() { return result == SUCCESS; } - const char* to_string() const; +enum class alloc_result { + success, + sch_collision, + no_cch_space, + no_sch_space, + rnti_inactive, + invalid_grant_params, + invalid_coderate, + no_grant_space, + other_cause }; +const char* to_string(alloc_result res); //! Result of a Subframe sched computation struct cc_sched_result { @@ -113,23 +102,23 @@ class sf_grid_t { public: struct dl_ctrl_alloc_t { - alloc_outcome_t outcome; - rbg_interval rbg_range; + alloc_result outcome; + rbg_interval rbg_range; }; sf_grid_t() : logger(srslog::fetch_basic_logger("MAC")) {} - void init(const sched_cell_params_t& cell_params_); - void new_tti(tti_point tti_rx); - alloc_outcome_t alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type); - alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant); - bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); - void rem_last_alloc_dl(rbg_interval rbgs); + void init(const sched_cell_params_t& cell_params_); + void new_tti(tti_point tti_rx); + alloc_result alloc_dl_ctrl(uint32_t aggr_lvl, rbg_interval rbg_range, alloc_type_t alloc_type); + alloc_result alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant); + bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); + void rem_last_alloc_dl(rbg_interval rbgs); - alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true); - alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict); - alloc_outcome_t reserve_ul_prbs(prb_interval alloc, bool strict); - bool find_ul_alloc(uint32_t L, prb_interval* alloc) const; + alloc_result alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict = true); + alloc_result reserve_ul_prbs(const prbmask_t& prbmask, bool strict); + alloc_result reserve_ul_prbs(prb_interval alloc, bool strict); + bool find_ul_alloc(uint32_t L, prb_interval* alloc) const; // getters const rbgmask_t& get_dl_mask() const { return dl_mask; } @@ -139,11 +128,11 @@ public: uint32_t get_pucch_width() const { return pucch_nrb; } private: - alloc_outcome_t alloc_dl(uint32_t aggr_lvl, - alloc_type_t alloc_type, - rbgmask_t alloc_mask, - sched_ue* user = nullptr, - bool has_pusch_grant = false); + alloc_result alloc_dl(uint32_t aggr_lvl, + alloc_type_t alloc_type, + rbgmask_t alloc_mask, + sched_ue* user = nullptr, + bool has_pusch_grant = false); // consts const sched_cell_params_t* cc_cfg = nullptr; @@ -205,7 +194,7 @@ public: uint32_t n_prb = 0; uint32_t mcs = 0; }; - typedef std::pair ctrl_code_t; + typedef std::pair ctrl_code_t; // Control/Configuration Methods sf_sched(); @@ -213,27 +202,30 @@ public: void new_tti(srslte::tti_point tti_rx_, sf_sched_result* cc_results); // DL alloc methods - alloc_outcome_t alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs); - alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); - alloc_outcome_t alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants); + alloc_result alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs); + alloc_result alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); + alloc_result alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants); bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } const std::vector& get_allocated_rars() const { return rar_allocs; } // UL alloc methods - alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant); - alloc_outcome_t + alloc_result alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant); + alloc_result alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1); - bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); } + bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) + { + return tti_alloc.reserve_ul_prbs(ulmask, strict) == alloc_result::success; + } bool alloc_phich(sched_ue* user); // compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI void generate_sched_results(sched_ue_list& ue_db); - alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); + alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); } uint32_t get_nof_ctrl_symbols() const; const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); } - alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc); + alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc); const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); } tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); } @@ -264,11 +256,11 @@ private: // internal state sf_grid_t tti_alloc; - srslte::bounded_vector bc_allocs; - std::vector rar_allocs; - std::vector data_allocs; - std::vector ul_data_allocs; - uint32_t last_msg3_prb = 0, max_msg3_prb = 0; + srslte::bounded_vector bc_allocs; + std::vector rar_allocs; + std::vector data_allocs; + srslte::bounded_vector ul_data_allocs; + uint32_t last_msg3_prb = 0, max_msg3_prb = 0; // Next TTI state tti_point tti_rx; diff --git a/srsenb/hdr/stack/mac/schedulers/sched_base.h b/srsenb/hdr/stack/mac/schedulers/sched_base.h index 47ed42006..5d763b6f6 100644 --- a/srsenb/hdr/stack/mac/schedulers/sched_base.h +++ b/srsenb/hdr/stack/mac/schedulers/sched_base.h @@ -59,10 +59,10 @@ const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched); const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched); /// Helper methods to allocate resources in subframe -alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h); -alloc_outcome_t - try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr); -alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h); +alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h); +alloc_result + try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr); +alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h); } // namespace srsenb diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index fbbbf3cc6..1306e7f71 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -110,24 +110,24 @@ void bc_sched::alloc_sibs(sf_sched* tti_sched) } // Attempt PDSCH grants with increasing number of RBGs - alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH; - for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) { + alloc_result ret = alloc_result::invalid_coderate; + for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbgs) { rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); if (rbg_interv.length() != nrbgs) { - ret = alloc_outcome_t::RB_COLLISION; + ret = alloc_result::no_sch_space; break; } ret = tti_sched->alloc_sib(bc_aggr_level, sib_idx, pending_sibs[sib_idx].n_tx, rbg_interv); - if (ret == alloc_outcome_t::SUCCESS) { + if (ret == alloc_result::success) { // SIB scheduled successfully pending_sibs[sib_idx].n_tx++; } } - if (ret != alloc_outcome_t::SUCCESS) { + if (ret != alloc_result::success) { logger.warning("SCHED: Could not allocate SIB=%d, len=%d. Cause: %s", sib_idx + 1, cc_cfg->cfg.sibs[sib_idx].len, - ret.to_string()); + to_string(ret)); } } } @@ -141,20 +141,19 @@ void bc_sched::alloc_paging(sf_sched* tti_sched) return; } - alloc_outcome_t ret = alloc_outcome_t::CODERATE_TOO_HIGH; - for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbgs) { + alloc_result ret = alloc_result::invalid_coderate; + for (uint32_t nrbgs = 1; nrbgs < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbgs) { rbg_interval rbg_interv = find_empty_rbg_interval(nrbgs, tti_sched->get_dl_mask()); if (rbg_interv.length() != nrbgs) { - ret = alloc_outcome_t::RB_COLLISION; + ret = alloc_result::no_sch_space; break; } ret = tti_sched->alloc_paging(bc_aggr_level, paging_payload, rbg_interv); } - if (ret != alloc_outcome_t::SUCCESS) { - logger.warning( - "SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, ret.to_string()); + if (ret != alloc_result::success) { + logger.warning("SCHED: Could not allocate Paging with payload length=%d, cause=%s", paging_payload, to_string(ret)); } } @@ -173,28 +172,27 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, sched_ue_list& ue_db_) : cc_cfg(&cfg_), logger(srslog::fetch_basic_logger("MAC")), ue_db(&ue_db_) {} -alloc_outcome_t -ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc) +alloc_result ra_sched::allocate_pending_rar(sf_sched* tti_sched, const pending_rar_t& rar, uint32_t& nof_grants_alloc) { - alloc_outcome_t ret = alloc_outcome_t::ERROR; + alloc_result ret = alloc_result::other_cause; for (nof_grants_alloc = rar.msg3_grant.size(); nof_grants_alloc > 0; nof_grants_alloc--) { - ret = alloc_outcome_t::CODERATE_TOO_HIGH; - for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_outcome_t::CODERATE_TOO_HIGH; ++nrbg) { + ret = alloc_result::invalid_coderate; + for (uint32_t nrbg = 1; nrbg < cc_cfg->nof_rbgs and ret == alloc_result::invalid_coderate; ++nrbg) { rbg_interval rbg_interv = find_empty_rbg_interval(nrbg, tti_sched->get_dl_mask()); if (rbg_interv.length() == nrbg) { ret = tti_sched->alloc_rar(rar_aggr_level, rar, rbg_interv, nof_grants_alloc); } else { - ret = alloc_outcome_t::RB_COLLISION; + ret = alloc_result::no_sch_space; } } // If allocation was not successful because there were not enough RBGs, try allocating fewer Msg3 grants - if (ret != alloc_outcome_t::CODERATE_TOO_HIGH and ret != alloc_outcome_t::RB_COLLISION) { + if (ret != alloc_result::invalid_coderate and ret != alloc_result::no_sch_space) { break; } } - if (ret != alloc_outcome_t::SUCCESS) { - logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, ret.to_string()); + if (ret != alloc_result::success) { + logger.info("SCHED: RAR allocation for L=%d was postponed. Cause=%s", rar_aggr_level, to_string(ret)); } return ret; } @@ -232,10 +230,10 @@ void ra_sched::dl_sched(sf_sched* tti_sched) } // Try to schedule DCI + RBGs for RAR Grant - uint32_t nof_rar_allocs = 0; - alloc_outcome_t ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs); + uint32_t nof_rar_allocs = 0; + alloc_result ret = allocate_pending_rar(tti_sched, rar, nof_rar_allocs); - if (ret == alloc_outcome_t::SUCCESS) { + if (ret == alloc_result::success) { // If RAR allocation was successful: // - in case all Msg3 grants were allocated, remove pending RAR, and continue with following RAR // - otherwise, erase only Msg3 grants that were allocated, and stop iteration @@ -251,7 +249,7 @@ void ra_sched::dl_sched(sf_sched* tti_sched) // If RAR allocation was not successful: // - in case of unavailable PDCCH space, try next pending RAR allocation // - otherwise, stop iteration - if (ret != alloc_outcome_t::DCI_COLLISION) { + if (ret != alloc_result::no_cch_space) { break; } ++it; @@ -303,7 +301,8 @@ void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched) for (const auto& msg3grant : rar.rar_grant.msg3_grant) { uint16_t crnti = msg3grant.data.temp_crnti; auto user_it = ue_db->find(crnti); - if (user_it != ue_db->end() and sf_msg3_sched->alloc_msg3(user_it->second.get(), msg3grant)) { + if (user_it != ue_db->end() and + sf_msg3_sched->alloc_msg3(user_it->second.get(), msg3grant) == alloc_result::success) { logger.debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d", crnti, sf_msg3_sched->get_tti_tx_ul().to_uint()); } else { logger.error( diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 021969bfd..1e75fda7b 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -16,34 +16,26 @@ namespace srsenb { -const char* alloc_outcome_t::to_string() const +const char* to_string(alloc_result result) { switch (result) { - case SUCCESS: + case alloc_result::success: return "success"; - case DCI_COLLISION: - return "PDCCH position not available"; - case RB_COLLISION: - return "rb_collision"; - case ERROR: + case alloc_result::sch_collision: + return "Collision with existing SCH allocations"; + case alloc_result::other_cause: return "error"; - case NOF_RB_INVALID: - return "invalid nof prbs"; - case PUCCH_COLLISION: - return "pucch_collision"; - case MEASGAP_COLLISION: - return "measgap_collision"; - case ALREADY_ALLOC: - return "already allocated"; - case NO_DATA: - return "no pending data to allocate"; - case INVALID_PRBMASK: - return "invalid rbg mask"; - case INVALID_CARRIER: - return "invalid eNB carrier"; - case CODERATE_TOO_HIGH: - return "Effective coderate is too high"; - case NOF_ALLOCS_LIMIT: + case alloc_result::no_cch_space: + return "No space available in PUCCH or PDCCH"; + case alloc_result::no_sch_space: + return "Requested number of PRBs not available"; + case alloc_result::rnti_inactive: + return "rnti cannot be allocated (e.g. already allocated, no data, meas gap collision, carrier inactive, etc.)"; + case alloc_result::invalid_grant_params: + return "invalid grant arguments (e.g. invalid prb mask)"; + case alloc_result::invalid_coderate: + return "Effective coderate exceeds threshold"; + case alloc_result::no_grant_space: return "Max number of allocations reached"; default: break; @@ -150,46 +142,50 @@ void sf_grid_t::new_tti(tti_point tti_rx_) } //! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging) -alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, - alloc_type_t alloc_type, - rbgmask_t alloc_mask, - sched_ue* user, - bool has_pusch_grant) +alloc_result sf_grid_t::alloc_dl(uint32_t aggr_idx, + alloc_type_t alloc_type, + rbgmask_t alloc_mask, + sched_ue* user, + bool has_pusch_grant) { // Check RBG collision if ((dl_mask & alloc_mask).any()) { - return alloc_outcome_t::RB_COLLISION; + logger.debug("SCHED: Provided RBG mask collides with allocation previously made.\n"); + return alloc_result::sch_collision; } // Allocate DCI in PDCCH if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user, has_pusch_grant)) { - if (user != nullptr) { - if (logger.debug.enabled()) { - logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation:\n%s", + if (logger.debug.enabled()) { + if (user != nullptr) { + logger.debug("SCHED: No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation:\n%s", user->get_rnti(), pdcch_alloc.result_to_string(true).c_str()); + } else { + logger.debug("SCHED: No space in PDCCH for DL tx. Current PDCCH allocation:\n%s", + pdcch_alloc.result_to_string(true).c_str()); } } - return alloc_outcome_t::DCI_COLLISION; + return alloc_result::no_cch_space; } // Allocate RBGs dl_mask |= alloc_mask; - return alloc_outcome_t::SUCCESS; + return alloc_result::success; } /// Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. -alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type) +alloc_result sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_range, alloc_type_t alloc_type) { if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and alloc_type != alloc_type_t::DL_PCCH) { logger.error("SCHED: DL control allocations must be RAR/BC/PDCCH"); - return alloc_outcome_t::ERROR; + return alloc_result::other_cause; } // Setup rbg_range starting from left if (rbg_range.stop() > nof_rbgs) { - return alloc_outcome_t::RB_COLLISION; + return alloc_result::sch_collision; } // allocate DCI and RBGs @@ -199,26 +195,27 @@ alloc_outcome_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, rbg_interval rbg_ran } //! Allocates CCEs and RBs for a user DL data alloc. -alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant) +alloc_result sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant) { srslte_dci_format_t dci_format = user->get_dci_format(); uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format); uint32_t aggr_idx = user->get_aggr_level(cc_cfg->enb_cc_idx, nof_bits); - alloc_outcome_t ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user, has_pusch_grant); + alloc_result ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user, has_pusch_grant); return ret; } -alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict) +alloc_result sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch, bool strict) { if (alloc.stop() > ul_mask.size()) { - return alloc_outcome_t::ERROR; + return alloc_result::no_sch_space; } prbmask_t newmask(ul_mask.size()); newmask.fill(alloc.start(), alloc.stop()); if (strict and (ul_mask & newmask).any()) { - return alloc_outcome_t::RB_COLLISION; + logger.debug("SCHED: Failed UL allocation. Cause: %s", to_string(alloc_result::sch_collision)); + return alloc_result::sch_collision; } // Generate PDCCH except for RAR and non-adaptive retx @@ -231,13 +228,13 @@ alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, boo user->get_rnti(), pdcch_alloc.result_to_string(true).c_str()); } - return alloc_outcome_t::DCI_COLLISION; + return alloc_result::no_cch_space; } } ul_mask |= newmask; - return alloc_outcome_t::SUCCESS; + return alloc_result::success; } bool sf_grid_t::reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg) @@ -259,10 +256,10 @@ void sf_grid_t::rem_last_alloc_dl(rbg_interval rbgs) dl_mask &= ~rbgmask; } -alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) +alloc_result sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) { if (alloc.stop() > ul_mask.size()) { - return alloc_outcome_t::ERROR; + return alloc_result::no_sch_space; } prbmask_t newmask(ul_mask.size()); @@ -270,14 +267,14 @@ alloc_outcome_t sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) return reserve_ul_prbs(newmask, strict); } -alloc_outcome_t sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict) +alloc_result sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict) { - alloc_outcome_t ret = alloc_outcome_t::SUCCESS; + alloc_result ret = alloc_result::success; if (strict and (ul_mask & prbmask).any()) { fmt::memory_buffer tmp_buffer; fmt::format_to(tmp_buffer, "There was a collision in the UL. Current mask={:x}, new mask={:x}", ul_mask, prbmask); logger.error("%s", srslte::to_c_str(tmp_buffer)); - ret = alloc_outcome_t::ERROR; + ret = alloc_result::sch_collision; } ul_mask |= prbmask; return ret; @@ -362,17 +359,17 @@ bool sf_sched::is_ul_alloc(uint16_t rnti) const ul_data_allocs.begin(), ul_data_allocs.end(), [rnti](const ul_alloc_t& u) { return u.rnti == rnti; }); } -alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) +alloc_result sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); - return alloc_outcome_t::NOF_ALLOCS_LIMIT; + return alloc_result::no_grant_space; } bc_alloc_t bc_alloc; // Allocate SIB RBGs and PDCCH - alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC); - if (ret != alloc_outcome_t::SUCCESS) { + alloc_result ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_BC); + if (ret != alloc_result::success) { return ret; } @@ -380,7 +377,7 @@ alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_ if (not generate_sib_dci(bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, rbgs, *cc_cfg, tti_alloc.get_cfi())) { // Cancel on-going allocation tti_alloc.rem_last_alloc_dl(rbgs); - return alloc_outcome_t::CODERATE_TOO_HIGH; + return alloc_result::invalid_coderate; } // Allocation Successful @@ -389,20 +386,20 @@ alloc_outcome_t sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_ bc_alloc.req_bytes = cc_cfg->cfg.sibs[sib_idx].len; bc_allocs.push_back(bc_alloc); - return alloc_outcome_t::SUCCESS; + return alloc_result::success; } -alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs) +alloc_result sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs) { if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); - return alloc_outcome_t::NOF_ALLOCS_LIMIT; + return alloc_result::no_grant_space; } bc_alloc_t bc_alloc; // Allocate Paging RBGs and PDCCH - alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH); - if (ret != alloc_outcome_t::SUCCESS) { + alloc_result ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_PCCH); + if (ret != alloc_result::success) { return ret; } @@ -410,7 +407,7 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa if (not generate_paging_dci(bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, rbgs, *cc_cfg, tti_alloc.get_cfi())) { // Cancel on-going allocation tti_alloc.rem_last_alloc_dl(rbgs); - return alloc_outcome_t::CODERATE_TOO_HIGH; + return alloc_result::invalid_coderate; } // Allocation Successful @@ -419,15 +416,15 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa bc_alloc.req_bytes = paging_payload; bc_allocs.push_back(bc_alloc); - return alloc_outcome_t::SUCCESS; + return alloc_result::success; } -alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants) +alloc_result sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants) { static const uint32_t msg3_nof_prbs = 3; if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) { logger.info("SCHED: Maximum number of RAR allocations per TTI reached."); - return alloc_outcome_t::NOF_ALLOCS_LIMIT; + return alloc_result::no_grant_space; } uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff @@ -435,12 +432,12 @@ alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, // check if there is enough space for Msg3 if (last_msg3_prb + total_ul_nof_prbs > max_msg3_prb) { - return alloc_outcome_t::RB_COLLISION; + return alloc_result::sch_collision; } // allocate RBGs and PDCCH - alloc_outcome_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_RAR); - if (ret != alloc_outcome_t::SUCCESS) { + alloc_result ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, rbgs, alloc_type_t::DL_RAR); + if (ret != alloc_result::success) { return ret; } @@ -450,7 +447,7 @@ alloc_outcome_t sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rar_alloc.rar_grant, get_tti_tx_dl(), rar, rbgs, nof_grants, last_msg3_prb, *cc_cfg, tti_alloc.get_cfi())) { // Cancel on-going allocation tti_alloc.rem_last_alloc_dl(rbgs); - return alloc_outcome_t::CODERATE_TOO_HIGH; + return alloc_result::invalid_coderate; } // RAR allocation successful @@ -475,24 +472,24 @@ bool is_periodic_cqi_expected(const sched_interface::ue_cfg_t& ue_cfg, tti_point return false; } -alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) +alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) { if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) { logger.warning("SCHED: Maximum number of DL allocations reached"); - return alloc_outcome_t::NOF_ALLOCS_LIMIT; + return alloc_result::no_grant_space; } if (is_dl_alloc(user->get_rnti())) { logger.warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x", user->get_rnti()); - return alloc_outcome_t::ALREADY_ALLOC; + return alloc_result::rnti_inactive; } auto* cc = user->find_ue_carrier(cc_cfg->enb_cc_idx); if (cc == nullptr or cc->cc_state() != cc_st::active) { - return alloc_outcome_t::INVALID_CARRIER; + return alloc_result::rnti_inactive; } if (not user->pdsch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx)) { - return alloc_outcome_t::MEASGAP_COLLISION; + return alloc_result::rnti_inactive; } // Check if allocation would cause segmentation @@ -502,14 +499,14 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx); if (r.start() > user_mask.count()) { logger.warning("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti()); - return alloc_outcome_t::NOF_RB_INVALID; + return alloc_result::invalid_grant_params; } } srslte_dci_format_t dci_format = user->get_dci_format(); if (dci_format == SRSLTE_DCI_FORMAT1A and not is_contiguous(user_mask)) { logger.warning("SCHED: Can't use distributed RBGs for DCI format 1A"); - return alloc_outcome_t::INVALID_PRBMASK; + return alloc_result::invalid_grant_params; } bool has_pusch_grant = is_ul_alloc(user->get_rnti()) or cc_results->is_ul_alloc(user->get_rnti()); @@ -525,28 +522,28 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma prb_interval alloc = {}; uint32_t L = user->get_required_prb_ul(cc_cfg->enb_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8)); tti_alloc.find_ul_alloc(L, &alloc); - has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc); + has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc) == alloc_result::success; if (ue_cc_idx != 0 and not has_pusch_grant) { // For SCells, if we can't allocate small PUSCH grant, abort DL allocation - return alloc_outcome_t::PUCCH_COLLISION; + return alloc_result::no_cch_space; } } // Try to allocate RBGs, PDCCH, and PUCCH - alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant); + alloc_result ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant); - if (ret == alloc_outcome_t::DCI_COLLISION and not has_pusch_grant and not data_allocs.empty() and + if (ret == alloc_result::no_cch_space and not has_pusch_grant and not data_allocs.empty() and user->get_ul_harq(get_tti_tx_ul(), get_enb_cc_idx())->is_empty()) { // PUCCH may be too full. Attempt small UL grant allocation for UCI-PUSCH uint32_t L = user->get_required_prb_ul(cc_cfg->enb_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8)); prb_interval alloc = {}; tti_alloc.find_ul_alloc(L, &alloc); - has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc); + has_pusch_grant = alloc.length() > 0 and alloc_ul_user(user, alloc) == alloc_result::success; if (has_pusch_grant) { ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant); } } - if (ret != alloc_outcome_t::SUCCESS) { + if (ret != alloc_result::success) { return ret; } @@ -558,49 +555,49 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma alloc.pid = pid; data_allocs.push_back(alloc); - return alloc_outcome_t::SUCCESS; + return alloc_result::success; } -alloc_outcome_t +alloc_result sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3, int msg3_mcs) { - if (ul_data_allocs.size() >= sched_interface::MAX_DATA_LIST) { - logger.warning("SCHED: Maximum number of UL allocations reached"); - return alloc_outcome_t::ERROR; + if (ul_data_allocs.full()) { + logger.debug("SCHED: Maximum number of UL allocations=%zd reached", ul_data_allocs.size()); + return alloc_result::no_grant_space; } - // Check whether user was already allocated if (is_ul_alloc(user->get_rnti())) { - logger.warning("SCHED: Attempt to assign multiple ul_harq_proc to the same user rnti=0x%x", user->get_rnti()); - return alloc_outcome_t::ALREADY_ALLOC; + logger.warning("SCHED: Attempt to assign multiple UL grants to the same user rnti=0x%x", user->get_rnti()); + return alloc_result::rnti_inactive; } // Check if there is no collision with measGap bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or (alloc_type == ul_alloc_t::NEWTX and not is_msg3); - if (not user->pusch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx, needs_pdcch)) { - return alloc_outcome_t::MEASGAP_COLLISION; + if (not user->pusch_enabled(get_tti_rx(), cc_cfg->enb_cc_idx, needs_pdcch)) { + logger.debug("SCHED: PDCCH would collide with rnti=0x%x Measurement Gap", user->get_rnti()); + return alloc_result::rnti_inactive; } // Allocate RBGs and DCI space - bool allow_pucch_collision = cc_cfg->nof_prb() == 6 and is_msg3; - alloc_outcome_t ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch, not allow_pucch_collision); - if (ret != alloc_outcome_t::SUCCESS) { + bool allow_pucch_collision = cc_cfg->nof_prb() == 6 and is_msg3; + alloc_result ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch, not allow_pucch_collision); + if (ret != alloc_result::success) { return ret; } - ul_alloc_t ul_alloc = {}; - ul_alloc.type = alloc_type; - ul_alloc.is_msg3 = is_msg3; - ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; - ul_alloc.rnti = user->get_rnti(); - ul_alloc.alloc = alloc; - ul_alloc.msg3_mcs = msg3_mcs; - ul_data_allocs.push_back(ul_alloc); + ul_data_allocs.emplace_back(); + ul_alloc_t& ul_alloc = ul_data_allocs.back(); + ul_alloc.type = alloc_type; + ul_alloc.is_msg3 = is_msg3; + ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + ul_alloc.rnti = user->get_rnti(); + ul_alloc.alloc = alloc; + ul_alloc.msg3_mcs = msg3_mcs; - return alloc_outcome_t::SUCCESS; + return alloc_result::success; } -alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc) +alloc_result sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc) { // check whether adaptive/non-adaptive retx/newtx ul_alloc_t::type_t alloc_type; @@ -887,16 +884,16 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r } } -alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant) +alloc_result sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant) { // Derive PRBs from allocated RAR grants prb_interval msg3_alloc = prb_interval::riv_to_prbs(rargrant.grant.rba, cc_cfg->nof_prb()); - alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::NEWTX, true, rargrant.grant.trunc_mcs); - if (not ret) { + alloc_result ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::NEWTX, true, rargrant.grant.trunc_mcs); + if (ret != alloc_result::success) { fmt::memory_buffer str_buffer; fmt::format_to(str_buffer, "{}", msg3_alloc); - logger.warning("SCHED: Could not allocate msg3 within %s", srslte::to_c_str(str_buffer)); + logger.warning("SCHED: Could not allocate msg3 within %s.", srslte::to_c_str(str_buffer)); } return ret; } diff --git a/srsenb/src/stack/mac/schedulers/sched_base.cc b/srsenb/src/stack/mac/schedulers/sched_base.cc index 107374325..944e2fc23 100644 --- a/srsenb/src/stack/mac/schedulers/sched_base.cc +++ b/srsenb/src/stack/mac/schedulers/sched_base.cc @@ -112,12 +112,12 @@ const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched) return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), tti_sched->get_enb_cc_idx()); } -alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h) +alloc_result try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h) { // Try to reuse the same mask - rbgmask_t retx_mask = h.get_rbgmask(); - alloc_outcome_t code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id()); - if (code == alloc_outcome_t::SUCCESS or code == alloc_outcome_t::DCI_COLLISION) { + rbgmask_t retx_mask = h.get_rbgmask(); + alloc_result code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id()); + if (code != alloc_result::sch_collision) { return code; } @@ -128,11 +128,10 @@ alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_ha if (retx_mask.count() == nof_rbg) { return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id()); } - return alloc_outcome_t::RB_COLLISION; + return alloc_result::sch_collision; } -alloc_outcome_t -try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask) +alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask) { if (result_mask != nullptr) { *result_mask = {}; @@ -141,25 +140,25 @@ try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& // If all RBGs are occupied, the next steps can be shortcut const rbgmask_t& current_mask = tti_sched.get_dl_mask(); if (current_mask.all()) { - return alloc_outcome_t::RB_COLLISION; + return alloc_result::no_sch_space; } // If there is no data to transmit, no need to allocate rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx()); if (req_rbgs.stop() == 0) { - return alloc_outcome_t::NO_DATA; + return alloc_result::rnti_inactive; } // Find RBG mask that accommodates pending data bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A; rbgmask_t newtxmask = compute_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask); if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) { - return alloc_outcome_t::RB_COLLISION; + return alloc_result::no_sch_space; } // empty RBGs were found. Attempt allocation - alloc_outcome_t ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id()); - if (ret == alloc_outcome_t::SUCCESS and result_mask != nullptr) { + alloc_result ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id()); + if (ret == alloc_result::success and result_mask != nullptr) { *result_mask = newtxmask; } return ret; @@ -228,7 +227,7 @@ const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched) return h->is_empty() ? h : nullptr; } -alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h) +alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h) { prb_interval alloc = h.get_alloc(); if (tti_sched.get_cc_cfg()->nof_prb() == 6 and h.is_msg3()) { @@ -238,20 +237,20 @@ alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_ha // If can schedule the same mask as in earlier tx, do it if (not tti_sched.get_ul_mask().any(alloc.start(), alloc.stop())) { - alloc_outcome_t ret = tti_sched.alloc_ul_user(&ue, alloc); - if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::DCI_COLLISION) { + alloc_result ret = tti_sched.alloc_ul_user(&ue, alloc); + if (ret != alloc_result::sch_collision) { return ret; } } // Avoid measGaps accounting for PDCCH if (not ue.pusch_enabled(tti_sched.get_tti_rx(), tti_sched.get_enb_cc_idx(), true)) { - return alloc_outcome_t::MEASGAP_COLLISION; + return alloc_result::rnti_inactive; } uint32_t nof_prbs = alloc.length(); alloc = find_contiguous_ul_prbs(nof_prbs, tti_sched.get_ul_mask()); if (alloc.length() != nof_prbs) { - return alloc_outcome_t::RB_COLLISION; + return alloc_result::no_sch_space; } return tti_sched.alloc_ul_user(&ue, alloc); } diff --git a/srsenb/src/stack/mac/schedulers/sched_time_pf.cc b/srsenb/src/stack/mac/schedulers/sched_time_pf.cc index 316e36acc..d4ce4fb50 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_pf.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_pf.cc @@ -71,25 +71,22 @@ void sched_time_pf::sched_dl_users(sched_ue_list& ue_db, sf_sched* tti_sched) uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched) { - alloc_outcome_t code = alloc_outcome_t::ERROR; + alloc_result code = alloc_result::other_cause; if (ue_ctxt.dl_retx_h != nullptr) { code = try_dl_retx_alloc(*tti_sched, ue, *ue_ctxt.dl_retx_h); - if (code == alloc_outcome_t::SUCCESS) { + if (code == alloc_result::success) { return ue_ctxt.dl_retx_h->get_tbs(0) + ue_ctxt.dl_retx_h->get_tbs(1); } } // There is space in PDCCH and an available DL HARQ - if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) { + if (code != alloc_result::no_cch_space and ue_ctxt.dl_newtx_h != nullptr) { rbgmask_t alloc_mask; code = try_dl_newtx_alloc_greedy(*tti_sched, ue, *ue_ctxt.dl_newtx_h, &alloc_mask); - if (code == alloc_outcome_t::SUCCESS) { + if (code == alloc_result::success) { return ue.get_expected_dl_bitrate(cc_cfg->enb_cc_idx, alloc_mask.count()) * tti_duration_ms / 8; } } - if (code == alloc_outcome_t::DCI_COLLISION) { - logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", ue.get_rnti()); - } return 0; } @@ -122,11 +119,11 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t return ue_ctxt.ul_h->get_pending_data(); } - alloc_outcome_t code; - uint32_t estim_tbs_bytes = 0; + alloc_result code; + uint32_t estim_tbs_bytes = 0; if (ue_ctxt.ul_h->has_pending_retx()) { code = try_ul_retx_alloc(*tti_sched, ue, *ue_ctxt.ul_h); - estim_tbs_bytes = code == alloc_outcome_t::SUCCESS ? ue_ctxt.ul_h->get_pending_data() : 0; + estim_tbs_bytes = code == alloc_result::success ? ue_ctxt.ul_h->get_pending_data() : 0; } else { // Note: h->is_empty check is required, in case CA allocated a small UL grant for UCI uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), cc_cfg->enb_cc_idx); @@ -140,13 +137,10 @@ uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t return 0; } code = tti_sched->alloc_ul_user(&ue, alloc); - estim_tbs_bytes = code == alloc_outcome_t::SUCCESS + estim_tbs_bytes = code == alloc_result::success ? ue.get_expected_ul_bitrate(cc_cfg->enb_cc_idx, alloc.length()) * tti_duration_ms / 8 : 0; } - if (code == alloc_outcome_t::DCI_COLLISION) { - logger.info("SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", ue.get_rnti(), cc_cfg->enb_cc_idx); - } return estim_tbs_bytes; } diff --git a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc index e06c406c0..b9c3c6531 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc @@ -11,7 +11,6 @@ */ #include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h" -#include namespace srsenb { @@ -50,10 +49,7 @@ void sched_time_rr::sched_dl_retxs(sched_ue_list& ue_db, sf_sched* tti_sched, si if (h == nullptr) { continue; } - alloc_outcome_t code = try_dl_retx_alloc(*tti_sched, user, *h); - if (code == alloc_outcome_t::DCI_COLLISION) { - logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL retx for rnti=0x%x", user.get_rnti()); - } + try_dl_retx_alloc(*tti_sched, user, *h); } } @@ -74,7 +70,7 @@ void sched_time_rr::sched_dl_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s if (h == nullptr) { continue; } - if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_outcome_t::DCI_COLLISION) { + if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_result::no_cch_space) { logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", user.get_rnti()); } } @@ -109,9 +105,9 @@ void sched_time_rr::sched_ul_retxs(sched_ue_list& ue_db, sf_sched* tti_sched, si if (h == nullptr) { continue; } - alloc_outcome_t code = try_ul_retx_alloc(*tti_sched, user, *h); - if (code == alloc_outcome_t::DCI_COLLISION) { - logger.info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", user.get_rnti()); + alloc_result code = try_ul_retx_alloc(*tti_sched, user, *h); + if (code == alloc_result::no_cch_space) { + logger.debug("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x", user.get_rnti()); } } } @@ -140,8 +136,8 @@ void sched_time_rr::sched_ul_newtxs(sched_ue_list& ue_db, sf_sched* tti_sched, s if (alloc.empty()) { continue; } - alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc); - if (ret == alloc_outcome_t::DCI_COLLISION) { + alloc_result ret = tti_sched->alloc_ul_user(&user, alloc); + if (ret == alloc_result::no_cch_space) { logger.info( "SCHED: rnti=0x%x, cc=%d, Couldn't find space in PDCCH for UL tx", user.get_rnti(), cc_cfg->enb_cc_idx); } From fb48f4484cc1f7761529cbb304e5050dd9523d5a Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 10:40:37 +0000 Subject: [PATCH 44/64] sched refactor - removal or cleanup of unused sched methods --- lib/include/srslte/adt/bounded_vector.h | 8 +- srsenb/hdr/stack/mac/sched_grid.h | 43 ++++----- srsenb/src/stack/mac/sched_carrier.cc | 7 +- srsenb/src/stack/mac/sched_grid.cc | 87 +++++++++---------- srsenb/src/stack/mac/schedulers/sched_base.cc | 4 +- 5 files changed, 63 insertions(+), 86 deletions(-) diff --git a/lib/include/srslte/adt/bounded_vector.h b/lib/include/srslte/adt/bounded_vector.h index d940347c7..f642f4429 100644 --- a/lib/include/srslte/adt/bounded_vector.h +++ b/lib/include/srslte/adt/bounded_vector.h @@ -107,13 +107,13 @@ public: } T& front() { return (*this)[0]; } const T& front() const { return (*this)[0]; } - T* data() { return &front(); } - const T* data() const { return &front(); } + T* data() { return reinterpret_cast(buffer); } + const T* data() const { return reinterpret_cast(buffer); } // Iterators - iterator begin() { return reinterpret_cast(buffer); } + iterator begin() { return data(); } iterator end() { return begin() + size_; } - const_iterator begin() const { return reinterpret_cast(buffer); } + const_iterator begin() const { return data(); } const_iterator end() const { return begin() + size_; } // Capacity diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 972256d61..79895bb86 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -19,7 +19,6 @@ #include "srslte/adt/bounded_bitset.h" #include "srslte/adt/circular_array.h" #include "srslte/srslog/srslog.h" -#include #include namespace srsenb { @@ -30,7 +29,7 @@ enum class alloc_result { sch_collision, no_cch_space, no_sch_space, - rnti_inactive, + no_rnti_opportunity, invalid_grant_params, invalid_coderate, no_grant_space, @@ -101,11 +100,6 @@ private: class sf_grid_t { public: - struct dl_ctrl_alloc_t { - alloc_result outcome; - rbg_interval rbg_range; - }; - sf_grid_t() : logger(srslog::fetch_basic_logger("MAC")) {} void init(const sched_cell_params_t& cell_params_); @@ -168,8 +162,6 @@ public: }; struct bc_alloc_t : public ctrl_alloc_t { sched_interface::dl_sched_bc_t bc_grant; - bc_alloc_t() = default; - explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} }; struct dl_alloc_t { size_t dci_idx; @@ -194,7 +186,6 @@ public: uint32_t n_prb = 0; uint32_t mcs = 0; }; - typedef std::pair ctrl_code_t; // Control/Configuration Methods sf_sched(); @@ -206,28 +197,28 @@ public: alloc_result alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); alloc_result alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants); bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } - const std::vector& get_allocated_rars() const { return rar_allocs; } // UL alloc methods alloc_result alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant); alloc_result - alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1); - bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) + alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1); + alloc_result reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { - return tti_alloc.reserve_ul_prbs(ulmask, strict) == alloc_result::success; + return tti_alloc.reserve_ul_prbs(ulmask, strict); } - bool alloc_phich(sched_ue* user); + alloc_result alloc_phich(sched_ue* user); // compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI void generate_sched_results(sched_ue_list& ue_db); - alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); - tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); } - uint32_t get_nof_ctrl_symbols() const; - const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); } - alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc); - const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); } - tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); } + alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); + tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); } + uint32_t get_nof_ctrl_symbols() const; + const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); } + alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc); + const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); } + tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); } + srslte::const_span get_allocated_rars() const { return rar_allocs; } // getters tti_point get_tti_rx() const { return tti_rx; } @@ -237,10 +228,6 @@ public: const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; } private: - void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result); - void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result); void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result, sched_ue_list& ue_list); @@ -257,8 +244,8 @@ private: sf_grid_t tti_alloc; srslte::bounded_vector bc_allocs; - std::vector rar_allocs; - std::vector data_allocs; + srslte::bounded_vector rar_allocs; + srslte::bounded_vector data_allocs; srslte::bounded_vector ul_data_allocs; uint32_t last_msg3_prb = 0, max_msg3_prb = 0; diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index 1306e7f71..7f43795d6 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -295,7 +295,7 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) //! Schedule Msg3 grants in UL based on allocated RARs void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched) { - const std::vector& alloc_rars = sf_dl_sched->get_allocated_rars(); + srslte::const_span alloc_rars = sf_dl_sched->get_allocated_rars(); for (const auto& rar : alloc_rars) { for (const auto& msg3grant : rar.rar_grant.msg3_grant) { @@ -334,7 +334,7 @@ sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, sf_dl_mask.resize(1, 0); } -sched::carrier_sched::~carrier_sched() {} +sched::carrier_sched::~carrier_sched() = default; void sched::carrier_sched::reset() { @@ -386,10 +386,9 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r /* Schedule PHICH */ for (auto& ue_pair : *ue_db) { - if (cc_result->ul_sched_result.phich.size() >= MAX_PHICH_LIST) { + if (tti_sched->alloc_phich(ue_pair.second.get()) == alloc_result::no_grant_space) { break; } - tti_sched->alloc_phich(ue_pair.second.get()); } /* Schedule DL control data */ diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 1e75fda7b..d4d983c60 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -29,7 +29,7 @@ const char* to_string(alloc_result result) return "No space available in PUCCH or PDCCH"; case alloc_result::no_sch_space: return "Requested number of PRBs not available"; - case alloc_result::rnti_inactive: + case alloc_result::no_rnti_opportunity: return "rnti cannot be allocated (e.g. already allocated, no data, meas gap collision, carrier inactive, etc.)"; case alloc_result::invalid_grant_params: return "invalid grant arguments (e.g. invalid prb mask)"; @@ -55,8 +55,8 @@ void sf_sched_result::new_tti(tti_point tti_rx_) bool sf_sched_result::is_ul_alloc(uint16_t rnti) const { for (const auto& cc : enb_cc_list) { - for (uint32_t j = 0; j < cc.ul_sched_result.pusch.size(); ++j) { - if (cc.ul_sched_result.pusch[j].dci.rnti == rnti) { + for (const auto& pusch : cc.ul_sched_result.pusch) { + if (pusch.dci.rnti == rnti) { return true; } } @@ -66,8 +66,8 @@ bool sf_sched_result::is_ul_alloc(uint16_t rnti) const bool sf_sched_result::is_dl_alloc(uint16_t rnti) const { for (const auto& cc : enb_cc_list) { - for (uint32_t j = 0; j < cc.dl_sched_result.data.size(); ++j) { - if (cc.dl_sched_result.data[j].dci.rnti == rnti) { + for (const auto& data : cc.dl_sched_result.data) { + if (data.dci.rnti == rnti) { return true; } } @@ -259,7 +259,7 @@ void sf_grid_t::rem_last_alloc_dl(rbg_interval rbgs) alloc_result sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) { if (alloc.stop() > ul_mask.size()) { - return alloc_result::no_sch_space; + return alloc_result::invalid_grant_params; } prbmask_t newmask(ul_mask.size()); @@ -271,10 +271,13 @@ alloc_result sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict) { alloc_result ret = alloc_result::success; if (strict and (ul_mask & prbmask).any()) { - fmt::memory_buffer tmp_buffer; - fmt::format_to(tmp_buffer, "There was a collision in the UL. Current mask={:x}, new mask={:x}", ul_mask, prbmask); - logger.error("%s", srslte::to_c_str(tmp_buffer)); - ret = alloc_result::sch_collision; + if (logger.info.enabled()) { + fmt::memory_buffer tmp_buffer; + fmt::format_to( + tmp_buffer, "There was a collision in the UL. Current mask=0x{:x}, new mask=0x{:x}", ul_mask, prbmask); + logger.info("%s", srslte::to_c_str(tmp_buffer)); + ret = alloc_result::sch_collision; + } } ul_mask |= prbmask; return ret; @@ -325,7 +328,7 @@ void sf_sched::init(const sched_cell_params_t& cell_params_) { cc_cfg = &cell_params_; tti_alloc.init(*cc_cfg); - max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - tti_alloc.get_pucch_width()); + max_msg3_prb = std::max(6U, cc_cfg->cfg.cell.nof_prb - tti_alloc.get_pucch_width()); } void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_) @@ -361,7 +364,7 @@ bool sf_sched::is_ul_alloc(uint16_t rnti) const alloc_result sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) { - if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { + if (bc_allocs.full()) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); return alloc_result::no_grant_space; } @@ -391,7 +394,7 @@ alloc_result sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t s alloc_result sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs) { - if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { + if (bc_allocs.full()) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); return alloc_result::no_grant_space; } @@ -422,7 +425,7 @@ alloc_result sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, alloc_result sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants) { static const uint32_t msg3_nof_prbs = 3; - if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) { + if (rar_allocs.full()) { logger.info("SCHED: Maximum number of RAR allocations per TTI reached."); return alloc_result::no_grant_space; } @@ -474,22 +477,22 @@ bool is_periodic_cqi_expected(const sched_interface::ue_cfg_t& ue_cfg, tti_point alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) { - if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) { + if (data_allocs.full()) { logger.warning("SCHED: Maximum number of DL allocations reached"); return alloc_result::no_grant_space; } if (is_dl_alloc(user->get_rnti())) { logger.warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x", user->get_rnti()); - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } auto* cc = user->find_ue_carrier(cc_cfg->enb_cc_idx); if (cc == nullptr or cc->cc_state() != cc_st::active) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } if (not user->pdsch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx)) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Check if allocation would cause segmentation @@ -498,7 +501,7 @@ alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, // It is newTx rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx); if (r.start() > user_mask.count()) { - logger.warning("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti()); + logger.debug("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti()); return alloc_result::invalid_grant_params; } } @@ -568,14 +571,14 @@ sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_ if (is_ul_alloc(user->get_rnti())) { logger.warning("SCHED: Attempt to assign multiple UL grants to the same user rnti=0x%x", user->get_rnti()); - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Check if there is no collision with measGap bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or (alloc_type == ul_alloc_t::NEWTX and not is_msg3); if (not user->pusch_enabled(get_tti_rx(), cc_cfg->enb_cc_idx, needs_pdcch)) { logger.debug("SCHED: PDCCH would collide with rnti=0x%x Measurement Gap", user->get_rnti()); - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Allocate RBGs and DCI space @@ -614,20 +617,20 @@ alloc_result sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc) return alloc_ul(user, alloc, alloc_type, h->is_msg3()); } -bool sf_sched::alloc_phich(sched_ue* user) +alloc_result sf_sched::alloc_phich(sched_ue* user) { using phich_t = sched_interface::ul_sched_phich_t; auto* ul_sf_result = &cc_results->get_cc(cc_cfg->enb_cc_idx)->ul_sched_result; if (ul_sf_result->phich.full()) { logger.warning("SCHED: Maximum number of PHICH allocations has been reached"); - return false; + return alloc_result::no_grant_space; } auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { // user does not support this carrier - return false; + return alloc_result::no_rnti_opportunity; } ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx); @@ -637,29 +640,9 @@ bool sf_sched::alloc_phich(sched_ue* user) ul_sf_result->phich.emplace_back(); ul_sf_result->phich.back().rnti = user->get_rnti(); ul_sf_result->phich.back().phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK; - return true; - } - return false; -} - -void sf_sched::set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result) -{ - for (const auto& bc_alloc : bc_allocs) { - dl_result->bc.emplace_back(bc_alloc.bc_grant); - dl_result->bc.back().dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; - log_broadcast_allocation(dl_result->bc.back(), bc_alloc.rbg_range, *cc_cfg); - } -} - -void sf_sched::set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result) -{ - for (const auto& rar_alloc : rar_allocs) { - dl_result->rar.emplace_back(rar_alloc.rar_grant); - dl_result->rar.back().dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; - log_rar_allocation(dl_result->rar.back(), rar_alloc.alloc_data.rbg_range); + return alloc_result::success; } + return alloc_result::no_rnti_opportunity; } void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, @@ -927,9 +910,17 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) cc_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi(); /* Generate DCI formats and fill sched_result structs */ - set_bc_sched_result(dci_result, &cc_result->dl_sched_result); + for (const auto& bc_alloc : bc_allocs) { + cc_result->dl_sched_result.bc.emplace_back(bc_alloc.bc_grant); + cc_result->dl_sched_result.bc.back().dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; + log_broadcast_allocation(cc_result->dl_sched_result.bc.back(), bc_alloc.rbg_range, *cc_cfg); + } - set_rar_sched_result(dci_result, &cc_result->dl_sched_result); + for (const auto& rar_alloc : rar_allocs) { + cc_result->dl_sched_result.rar.emplace_back(rar_alloc.rar_grant); + cc_result->dl_sched_result.rar.back().dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; + log_rar_allocation(cc_result->dl_sched_result.rar.back(), rar_alloc.alloc_data.rbg_range); + } set_dl_data_sched_result(dci_result, &cc_result->dl_sched_result, ue_db); diff --git a/srsenb/src/stack/mac/schedulers/sched_base.cc b/srsenb/src/stack/mac/schedulers/sched_base.cc index 944e2fc23..2454871eb 100644 --- a/srsenb/src/stack/mac/schedulers/sched_base.cc +++ b/srsenb/src/stack/mac/schedulers/sched_base.cc @@ -146,7 +146,7 @@ alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const // If there is no data to transmit, no need to allocate rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx()); if (req_rbgs.stop() == 0) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Find RBG mask that accommodates pending data @@ -245,7 +245,7 @@ alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_ // Avoid measGaps accounting for PDCCH if (not ue.pusch_enabled(tti_sched.get_tti_rx(), tti_sched.get_enb_cc_idx(), true)) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } uint32_t nof_prbs = alloc.length(); alloc = find_contiguous_ul_prbs(nof_prbs, tti_sched.get_ul_mask()); From d5750f6fe63f7603604c534a122ea2845727e8f5 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 11:43:56 +0000 Subject: [PATCH 45/64] refactor - update of clang-tidy project file. automatic clang-tidy refactors --- .clang-tidy | 6 ++- srsenb/test/mac/sched_benchmark.cc | 70 ++++++++--------------------- srsenb/test/mac/sched_grid_test.cc | 5 +-- srsenb/test/mac/sched_lc_ch_test.cc | 4 +- 4 files changed, 28 insertions(+), 57 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index c5a75d316..874dd51ad 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -7,6 +7,7 @@ # - init of static memory may cause an exception (cert-err58) # - forbidden implicit conversion from pointer/int to bool # - recommended auto +# - remove llvm-specific checks (header guard style, usage of llvm namespace, restriction of libc includes, etc.) # Naming conventions set to snake_case Checks: '*,-fuchsia-*, -cppcoreguidelines-pro-type-vararg,-hicpp-vararg, @@ -14,12 +15,15 @@ Checks: '*,-fuchsia-*, -cppcoreguidelines-pro-bounds-array-to-pointer-decay,-hicpp-no-array-decay, -cppcoreguidelines-pro-bounds-constant-array-index,-cppcoreguidelines-pro-type-cstyle-cast, -cppcoreguidelines-pro-type-union-access, + -cppcoreguidelines-pro-type-static-cast-downcast, -modernize-use-using,-modernize-use-trailing-return-type, -modernize-use-auto,-hicpp-use-auto, - -llvmlibc-callee-namespace, + -llvmlibc-callee-namespace,-llvmlibc-implementation-in-namespace,-llvmlibc-restrict-system-libc-headers, + -llvm-header-guard, -google-runtime-references,-google-readability-casting,-google-build-using-namespace, google-default-arguments,-cppcoreguidelines-pro-bounds-pointer-arithmetic, -cert-err58-cpp, + -readability-function-cognitive-complexity,-readability-isolate-declaration, -misc-non-private-member-variables-in-classes,-altera-struct-pack-align,-readability-uppercase-literal-suffix, -cppcoreguidelines-non-private-member-variables-in-classes, readability-identifier-naming' diff --git a/srsenb/test/mac/sched_benchmark.cc b/srsenb/test/mac/sched_benchmark.cc index 48b16161f..2a83a41eb 100644 --- a/srsenb/test/mac/sched_benchmark.cc +++ b/srsenb/test/mac/sched_benchmark.cc @@ -53,7 +53,7 @@ class sched_tester : public sched_sim_base static std::vector get_cell_cfg(srslte::span cell_params) { std::vector cell_cfg_list; - for (auto& c : cell_params) { + for (const auto& c : cell_params) { cell_cfg_list.push_back(c.cfg); } return cell_cfg_list; @@ -114,9 +114,9 @@ public: sched_ptr->dl_rlc_buffer_state(ue_ctxt.rnti, 3, ul_bytes_per_tti, 0); if (get_tti_rx().to_uint() % 5 == 0) { - for (uint32_t cc = 0; cc < pending_events.cc_list.size(); ++cc) { - pending_events.cc_list[cc].dl_cqi = current_run_params.cqi; - pending_events.cc_list[cc].ul_snr = 40; + for (auto& cc : pending_events.cc_list) { + cc.dl_cqi = current_run_params.cqi; + cc.ul_snr = 40; } } } @@ -126,18 +126,18 @@ public: { for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) { uint32_t dl_tbs = 0, ul_tbs = 0, dl_mcs = 0, ul_mcs = 0; - for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) { - dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[0]; - dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[1]; - dl_mcs = std::max(dl_mcs, sf_out.dl_cc_result[cc].data[i].dci.tb[0].mcs_idx); + for (const auto& data : sf_out.dl_cc_result[cc].data) { + dl_tbs += data.tbs[0]; + dl_tbs += data.tbs[1]; + dl_mcs = std::max(dl_mcs, data.dci.tb[0].mcs_idx); } total_stats.mean_dl_tbs.push(dl_tbs); - if (sf_out.dl_cc_result[cc].data.size() > 0) { + if (not sf_out.dl_cc_result[cc].data.empty()) { total_stats.avg_dl_mcs.push(dl_mcs); } - for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].pusch.size(); ++i) { - ul_tbs += sf_out.ul_cc_result[cc].pusch[i].tbs; - ul_mcs = std::max(ul_mcs, sf_out.ul_cc_result[cc].pusch[i].dci.tb.mcs_idx); + for (const auto& pusch : sf_out.ul_cc_result[cc].pusch) { + ul_tbs += pusch.tbs; + ul_mcs = std::max(ul_mcs, pusch.dci.tb.mcs_idx); } total_stats.mean_ul_tbs.push(ul_tbs); if (not sf_out.ul_cc_result[cc].pusch.empty()) { @@ -147,38 +147,6 @@ public: } }; -int run_sched_new_ue(sched_tester& sched_tester, - const run_params& params, - uint16_t rnti, - const sched_interface::ue_cfg_t& ue_cfg) -{ - const uint32_t ENB_CC_IDX = 0; - - sched_tester.total_stats = {}; - sched_tester.current_run_params = params; - - // Add user (first need to advance to a PRACH TTI) - while (not srslte_prach_tti_opportunity_config_fdd( - sched_tester.get_cell_params()[ue_cfg.supported_cc_list[0].enb_cc_idx].cfg.prach_config, - sched_tester.get_tti_rx().to_uint(), - -1)) { - TESTASSERT(sched_tester.advance_tti() == SRSLTE_SUCCESS); - } - TESTASSERT(sched_tester.add_user(rnti, ue_cfg, 16) == SRSLTE_SUCCESS); - - // Ignore stats of the first TTIs until UE DRB1 is added - while (not sched_tester.get_enb_ctxt().ue_db.at(rnti)->conres_rx) { - sched_tester.advance_tti(); - } - sched_tester.total_stats = {}; - - for (uint32_t count = 0; count < params.nof_ttis; ++count) { - sched_tester.advance_tti(); - } - - return SRSLTE_SUCCESS; -} - struct run_data { run_params params; float avg_dl_throughput; @@ -233,8 +201,8 @@ int run_benchmark_scenario(run_params params, std::vector& run_results run_data run_result = {}; run_result.params = params; - run_result.avg_dl_throughput = tester.total_stats.mean_dl_tbs.value() * 8.0 / 1e-3; - run_result.avg_ul_throughput = tester.total_stats.mean_ul_tbs.value() * 8.0 / 1e-3; + run_result.avg_dl_throughput = tester.total_stats.mean_dl_tbs.value() * 8.0F / 1e-3F; + run_result.avg_ul_throughput = tester.total_stats.mean_ul_tbs.value() * 8.0F / 1e-3F; run_result.avg_dl_mcs = tester.total_stats.avg_dl_mcs.value(); run_result.avg_ul_mcs = tester.total_stats.avg_ul_mcs.value(); run_result.avg_latency = std::chrono::microseconds(static_cast(tester.total_stats.avg_latency.value() / 1000)); @@ -249,12 +217,12 @@ run_data expected_run_result(run_params params) run_data ret{}; int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false); int tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs); - ret.avg_dl_throughput = tbs * 1e3; // bps + ret.avg_dl_throughput = static_cast(tbs) * 1e3F; // bps tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, true); uint32_t nof_pusch_prbs = params.nof_prbs - (params.nof_prbs == 6 ? 2 : 4); tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs); - ret.avg_ul_throughput = tbs * 1e3; // bps + ret.avg_ul_throughput = static_cast(tbs) * 1e3F; // bps ret.avg_dl_mcs = 27; ret.avg_ul_mcs = 22; @@ -288,11 +256,11 @@ void print_benchmark_results(const std::vector& run_results) int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false); int tbs = srslte_ra_tbs_from_idx(tbs_idx, r.params.nof_prbs); - float dl_rate_overhead = 1.0F - r.avg_dl_throughput / (tbs * 1e3); + float dl_rate_overhead = 1.0F - r.avg_dl_throughput / (static_cast(tbs) * 1e3F); tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, true); uint32_t nof_pusch_prbs = r.params.nof_prbs - (r.params.nof_prbs == 6 ? 2 : 4); tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs); - float ul_rate_overhead = 1.0F - r.avg_ul_throughput / (tbs * 1e3); + float ul_rate_overhead = 1.0F - r.avg_ul_throughput / (static_cast(tbs) * 1e3F); fmt::print("{:>3d}{:>6d}{:>6d}{:>12}{:>6d}{:>9.2}/{:>4.2}{:>9.1f}/{:>4.1f}{:9.1f}/{:>4.1f}{:12d}\n", i, @@ -420,7 +388,7 @@ int main(int argc, char* argv[]) } auto* spy = static_cast(srslog::find_sink(srslte::log_sink_spy::name())); - if (!spy) { + if (spy == nullptr) { return SRSLTE_ERROR; } diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index 58e1b2577..81c88a200 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -92,7 +92,7 @@ int test_pdcch_one_ue() TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti()); TESTASSERT(pdcch_result[0]->total_mask.size() == cell_params[ENB_CC_IDX].nof_cce_table[pdcch.get_cfi() - 1]); TESTASSERT(pdcch_result[0]->current_mask == pdcch_result[0]->total_mask); - TESTASSERT(pdcch_result[0]->current_mask.count() == 1u << aggr_idx); + TESTASSERT(pdcch_result[0]->current_mask.count() == 1U << aggr_idx); TESTASSERT(std::count(dci_locs.begin(), dci_locs.end(), pdcch_result[0]->dci_pos.ncce) > 0); // allocate UL user @@ -120,7 +120,7 @@ int test_pdcch_one_ue() TESTASSERT(pdcch_result[1]->rnti == sched_ue.get_rnti()); TESTASSERT(pdcch_result[1]->total_mask.size() == cell_params[ENB_CC_IDX].nof_cce_table[pdcch.get_cfi() - 1]); TESTASSERT((pdcch_result[1]->current_mask & pdcch_result[0]->current_mask).none()); - TESTASSERT(pdcch_result[1]->current_mask.count() == 1u << aggr_idx); + TESTASSERT(pdcch_result[1]->current_mask.count() == 1U << aggr_idx); TESTASSERT(pdcch_result[1]->total_mask == (pdcch_result[0]->current_mask | pdcch_result[1]->current_mask)); TESTASSERT(std::count(dci_locs2.begin(), dci_locs2.end(), pdcch_result[0]->dci_pos.ncce) > 0); @@ -133,7 +133,6 @@ int test_pdcch_one_ue() int test_pdcch_ue_and_sibs() { - const uint32_t ENB_CC_IDX = 0; // Params uint32_t nof_prb = 100; diff --git a/srsenb/test/mac/sched_lc_ch_test.cc b/srsenb/test/mac/sched_lc_ch_test.cc index 58dafa2f6..61d7d1d96 100644 --- a/srsenb/test/mac/sched_lc_ch_test.cc +++ b/srsenb/test/mac/sched_lc_ch_test.cc @@ -45,7 +45,7 @@ int test_pdu_alloc_successful(srsenb::lch_ue_manager& lch_handler, int test_retx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_t rlc_payload_size) { int start_rlc_bytes = lch_handler.get_dl_retx(lcid); - int nof_pdus = ceil(start_rlc_bytes / (float)rlc_payload_size); + int nof_pdus = ceil(static_cast(start_rlc_bytes) / static_cast(rlc_payload_size)); int rem_rlc_bytes = start_rlc_bytes; sched_interface::dl_sched_pdu_t pdu; @@ -61,7 +61,7 @@ int test_retx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_ int test_newtx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_t rlc_payload_size) { int start_rlc_bytes = lch_handler.get_dl_tx(lcid); - int nof_pdus = ceil(start_rlc_bytes / (float)rlc_payload_size); + int nof_pdus = ceil(static_cast(start_rlc_bytes) / (float)rlc_payload_size); int rem_rlc_bytes = start_rlc_bytes; sched_interface::dl_sched_pdu_t pdu; From d6bae76698bd73627bf89e511e1f6f293f8be9b1 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 13:36:47 +0000 Subject: [PATCH 46/64] extend cch allocation unit tests to verify optimal cfi and DCI position selection for 6 PRBs --- .../mac/sched_phy_ch/sf_cch_allocator.cc | 6 +- srsenb/test/mac/sched_grid_test.cc | 84 ++++++++++++++++++- 2 files changed, 83 insertions(+), 7 deletions(-) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index d029c8178..f1e419139 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -69,10 +69,10 @@ bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sch if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and current_max_cfix > current_cfix) { // Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc, start with optimal CFI // in terms of nof CCE locs - uint32_t nof_locs = 0; - for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > current_cfix; --cfix_tmp) { + uint32_t nof_locs = 0, lowest_cfix = current_cfix; + for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > lowest_cfix; --cfix_tmp) { const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp); - if ((*dci_locs)[record.aggr_idx].size() >= nof_locs) { + if ((*dci_locs)[record.aggr_idx].size() > nof_locs) { nof_locs = (*dci_locs)[record.aggr_idx].size(); current_cfix = cfix_tmp; } else { diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index 81c88a200..b70a79d51 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -149,7 +149,7 @@ int test_pdcch_ue_and_sibs() TESTASSERT(pdcch.nof_alloc_combinations() == 0); TESTASSERT(pdcch.nof_allocs() == 0); - tti_point tti_rx{0}; + tti_point tti_rx{std::uniform_int_distribution(0, 9)(get_rand_gen())}; pdcch.new_tti(tti_rx); TESTASSERT(pdcch.nof_cces() == cell_params[0].nof_cce_table[0]); @@ -159,13 +159,88 @@ int test_pdcch_ue_and_sibs() TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, 2)); TESTASSERT(pdcch.nof_alloc_combinations() == 4); TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_RAR, 2)); - TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.nof_alloc_combinations() == 6); + TESTASSERT(pdcch.nof_allocs() == 2); TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, 2, &sched_ue, false)); - TESTASSERT(pdcch.nof_allocs() == 3 and pdcch.nof_alloc_combinations() == 9); + TESTASSERT(pdcch.nof_allocs() == 3); // TEST: Ability to revert last allocation pdcch.rem_last_dci(); - TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.nof_alloc_combinations() == 6); + TESTASSERT(pdcch.nof_allocs() == 2); + + // TEST: DCI positions + uint32_t cfi = pdcch.get_cfi(); + sf_cch_allocator::alloc_result_t dci_result; + pdcch_mask_t result_pdcch_mask; + pdcch.get_allocs(&dci_result, &result_pdcch_mask); + TESTASSERT(dci_result.size() == 2); + const cce_position_list& bc_dci_locs = cell_params[0].common_locations[cfi - 1][2]; + TESTASSERT(bc_dci_locs[0] == dci_result[0]->dci_pos.ncce); + const cce_position_list& rar_dci_locs = cell_params[0].rar_locations[to_tx_dl(tti_rx).sf_idx()][cfi - 1][2]; + TESTASSERT(std::any_of(rar_dci_locs.begin(), rar_dci_locs.end(), [&dci_result](uint32_t val) { + return dci_result[1]->dci_pos.ncce == val; + })); + + return SRSLTE_SUCCESS; +} + +int test_6prbs() +{ + std::vector cell_params(1); + sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg(); + sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(6); + sched_interface::sched_args_t sched_args{}; + TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args)); + + sf_cch_allocator pdcch; + sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg}; + sf_cch_allocator::alloc_result_t dci_result; + pdcch_mask_t result_pdcch_mask; + + pdcch.init(cell_params[PCell_IDX]); + TESTASSERT(pdcch.nof_alloc_combinations() == 0); + TESTASSERT(pdcch.nof_allocs() == 0); + + uint32_t opt_cfi = 3; + uint32_t bc_aggr_idx = 2, ue_aggr_idx = 1; + + // TEST: The first rnti will pick a DCI position of its 3 possible ones that avoids clash with SIB. The second rnti + // wont find space + tti_point tti_rx{0}; + pdcch.new_tti(tti_rx); + const cce_position_list& bc_dci_locs = cell_params[0].common_locations[opt_cfi - 1][bc_aggr_idx]; + const cce_position_list& rnti_dci_locs = + (*sched_ue.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx]; + const cce_position_list& rnti2_dci_locs = + (*sched_ue2.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx]; + + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, bc_aggr_idx)); + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue, false)); + TESTASSERT(not pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue2, false)); + TESTASSERT(pdcch.nof_allocs() == 2); + + pdcch.get_allocs(&dci_result, &result_pdcch_mask); + TESTASSERT(dci_result.size() == 2); + TESTASSERT(dci_result[0]->dci_pos.ncce == bc_dci_locs[0]); + TESTASSERT(dci_result[1]->dci_pos.ncce == rnti_dci_locs[2]); + + // TEST: Two RNTIs can be allocated if one doesnt use the PUCCH + opt_cfi = 2; + tti_rx = tti_point{1}; + pdcch.new_tti(tti_rx); + const cce_position_list& rnti_dci_locs3 = + (*sched_ue.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx]; + const cce_position_list& rnti_dci_locs4 = + (*sched_ue2.get_locations(0, opt_cfi, to_tx_dl(tti_rx).sf_idx()))[ue_aggr_idx]; + + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue, false)); + TESTASSERT(not pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue2, false)); + TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, ue_aggr_idx, &sched_ue2, true)); + TESTASSERT(pdcch.nof_allocs() == 2 and pdcch.get_cfi() == opt_cfi); + + pdcch.get_allocs(&dci_result, &result_pdcch_mask); + TESTASSERT(dci_result.size() == 2); + TESTASSERT(dci_result[0]->dci_pos.ncce == rnti_dci_locs3[0]); + TESTASSERT(dci_result[1]->dci_pos.ncce == rnti_dci_locs4[0]); return SRSLTE_SUCCESS; } @@ -183,6 +258,7 @@ int main() TESTASSERT(test_pdcch_one_ue() == SRSLTE_SUCCESS); TESTASSERT(test_pdcch_ue_and_sibs() == SRSLTE_SUCCESS); + TESTASSERT(test_6prbs() == SRSLTE_SUCCESS); srslog::flush(); From d646111aa95fd0360b33ef54fb7b659d9ce49cb0 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 14:22:39 +0000 Subject: [PATCH 47/64] refactor sched dci unit test --- srsenb/test/mac/sched_dci_test.cc | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/srsenb/test/mac/sched_dci_test.cc b/srsenb/test/mac/sched_dci_test.cc index 5d95a5b83..06ea2b5c0 100644 --- a/srsenb/test/mac/sched_dci_test.cc +++ b/srsenb/test/mac/sched_dci_test.cc @@ -103,7 +103,7 @@ int test_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_tes tbs_info tb2; for (tb2.mcs = ret.mcs + 1; tb2.mcs <= (int)args.max_mcs; ++tb2.mcs) { int tbs_idx2 = srslte_ra_tbs_idx_from_mcs(tb2.mcs, args.use_tbs_index_alt, args.is_ul); - tb2.tbs_bytes = srslte_ra_tbs_from_idx(tbs_idx2, args.prb_grant_size) / 8u; + tb2.tbs_bytes = srslte_ra_tbs_from_idx(tbs_idx2, args.prb_grant_size) / 8U; TESTASSERT(not lower_coderate(tb2, nof_re, args) or (args.prb_grant_size == 1 and tb2.mcs == 6)); } @@ -139,7 +139,7 @@ int assert_mcs_tbs_result(uint32_t cell_nof_prb, args.prb_grant_size = prb_grant_size; args.use_tbs_index_alt = alt_cqi_table; if (alt_cqi_table) { - args.max_mcs = std::min(args.max_mcs, 27u); // limited to 27 for 256-QAM + args.max_mcs = std::min(args.max_mcs, 27U); // limited to 27 for 256-QAM } tbs_info expected_result; @@ -156,23 +156,20 @@ int assert_mcs_tbs_result(uint32_t cell_nof_prb, int test_mcs_lookup_specific() { - sched_cell_params_t cell_params = {}; - sched_interface::cell_cfg_t cell_cfg = generate_default_cell_cfg(6); - sched_interface::sched_args_t sched_args = {}; - cell_params.set_cfg(0, cell_cfg, sched_args); - tbs_test_args args; - args.verbose = true; - tbs_info expected_result; - /* TEST CASE: DL, no 256-QAM */ // cqi=5,Nprb=1 -> {mcs=3, tbs_idx=3, tbs=40} TESTASSERT(assert_mcs_tbs_result(6, 5, 1, 40, 3) == SRSLTE_SUCCESS); - - TESTASSERT(assert_mcs_tbs_result(6, 15, 1, 336, 19) == SRSLTE_SUCCESS); TESTASSERT(assert_mcs_tbs_result(6, 5, 4, 256, 4) == SRSLTE_SUCCESS); TESTASSERT(assert_mcs_tbs_result(100, 9, 1, 712, 28) == SRSLTE_SUCCESS); TESTASSERT(assert_mcs_tbs_result(100, 10, 10, 5736, 25) == SRSLTE_SUCCESS); + + // cqi=15 + TESTASSERT(assert_mcs_tbs_result(6, 15, 1, 336, 19) == SRSLTE_SUCCESS); // I_tbs=17 + TESTASSERT(assert_mcs_tbs_result(6, 15, 6, 2152, 19) == SRSLTE_SUCCESS); // I_tbs=17 + TESTASSERT(assert_mcs_tbs_result(100, 15, 1, 712, 28) == SRSLTE_SUCCESS); // I_tbs=26 + TESTASSERT(assert_mcs_tbs_result(100, 15, 2, 1480, 28) == SRSLTE_SUCCESS); // I_tbs=26 + TESTASSERT(assert_mcs_tbs_result(100, 15, 10, 7480, 28) == SRSLTE_SUCCESS); // I_tbs=26 TESTASSERT(assert_mcs_tbs_result(100, 15, 1, 968, 27, true) == SRSLTE_SUCCESS); return SRSLTE_SUCCESS; @@ -204,6 +201,9 @@ int test_mcs_tbs_consistency_all() return SRSLTE_SUCCESS; } +/** + * Note: assumes lowest bound for nof of REs + */ int test_min_mcs_tbs_dl_helper(const sched_cell_params_t& cell_params, const tbs_test_args& args, tbs_info* result) { uint32_t nof_re = cell_params.get_dl_lb_nof_re(args.tti_tx_dl, args.prb_grant_size); From 47654af7171e64a32c75f13a84d2cfc74fcb4d98 Mon Sep 17 00:00:00 2001 From: Andre Puschmann Date: Thu, 18 Mar 2021 18:41:55 +0100 Subject: [PATCH 48/64] pdcp_entity_lte: fix printf formatter for size_t we've seen a heap-buffer overflow in fmt because printf wasn't using the right formtter for size_t, which should be %zu this patch fixes it for the PDCP LTE entity but we might have it elsewhere too [1m[31m==7595==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x629000e6f1fc at pc 0x562273a45289 bp 0x7f35567641f0 sp 0x7f35567641e0 [1m[0m[1m[34mREAD of size 4 at 0x629000e6f1fc thread T12 (STACK)[1m[0m 0 0x562273a45288 in fmt::v7::basic_format_arg >, char> > fmt::v7::detail::make_arg >, char>, unsigned int>(unsigned int const&) (/osmo-gsm-tester-srsue/srslte/bin/srsue+0x9dc288) 1 0x562273a3aa86 in void fmt::v7::dynamic_format_arg_store >, char> >::emplace_arg(unsigned int const&) (/osmo-gsm-tester-srsue/srslte/bin/srsue+0x9d1a86) 2 0x562273a308e7 in void fmt::v7::dynamic_format_arg_store >, char> >::push_back(unsigned int const&) /mnt/data/jenkins/workspace/srslte_ogt_trial_builder_x86-ubuntu1804-asan/srsLTE/lib/include/srslte/srslog/bundled/fmt/core.h:1548 3 0x562274361541 in void srslog::log_channel::operator()(char const*, unsigned int&, unsigned int&, unsigned long&&) /mnt/data/jenkins/workspace/srslte_ogt_trial_builder_x86-ubuntu1804-asan/srsLTE/lib/include/srslte/srslog/log_channel.h:101 4 0x56227430d9e7 in srslte::pdcp_entity_lte::update_rx_counts_queue(unsigned int) /mnt/data/jenkins/workspace/srslte_ogt_trial_builder_x86-ubuntu1804-asan/srsLTE/lib/src/upper/pdcp_entity_lte.cc:451 --- lib/src/upper/pdcp_entity_lte.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/src/upper/pdcp_entity_lte.cc b/lib/src/upper/pdcp_entity_lte.cc index 68ed25c94..4b6d98965 100644 --- a/lib/src/upper/pdcp_entity_lte.cc +++ b/lib/src/upper/pdcp_entity_lte.cc @@ -439,7 +439,7 @@ void pdcp_entity_lte::update_rx_counts_queue(uint32_t rx_count) // If the size of the rx_vector_info is getting very large // Consider the FMC as lost and update the vector. if (rx_counts_info.size() > reordering_window) { - logger.debug("Queue too large. Updating. Old FMC=%d, Old back=%d, old queue_size=%d", + logger.debug("Queue too large. Updating. Old FMC=%d, Old back=%d, old queue_size=%zu", fmc, rx_counts_info.back(), rx_counts_info.size()); @@ -448,16 +448,16 @@ void pdcp_entity_lte::update_rx_counts_queue(uint32_t rx_count) rx_counts_info.pop_back(); fmc++; } - logger.debug("Queue too large. Updating. New FMC=%d, new back=%d, new queue_size=%d", + logger.debug("Queue too large. Updating. New FMC=%d, new back=%d, new queue_size=%zu", fmc, rx_counts_info.back(), rx_counts_info.size()); } if (rx_counts_info.empty()) { - logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size=%d, FMC=%d", rx_count, rx_counts_info.size(), fmc); + logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size%zu, FMC=%d", rx_count, rx_counts_info.size(), fmc); } else { - logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size=%d, FMC=%d, back=%d", + logger.info("Updated RX_COUNT info with SDU COUNT=%d, queue_size=%zu, FMC=%d, back=%d", rx_count, rx_counts_info.size(), fmc, @@ -707,7 +707,7 @@ void pdcp_entity_lte::notify_delivery(const pdcp_sn_vector_t& pdcp_sns) return; } - logger.info("Received delivery notification from RLC. Number of PDU notified=%ld", pdcp_sns.size()); + logger.info("Received delivery notification from RLC. Number of PDU notified=%zu", pdcp_sns.size()); for (uint32_t sn : pdcp_sns) { logger.debug("Delivery notification received for PDU with SN=%d", sn); if (sn == UINT32_MAX) { @@ -737,7 +737,7 @@ void pdcp_entity_lte::notify_failure(const pdcp_sn_vector_t& pdcp_sns) return; } - logger.info("Received failure notification from RLC. Number of PDU notified=%ld", pdcp_sns.size()); + logger.info("Received failure notification from RLC. Number of PDU notified=%zu", pdcp_sns.size()); for (uint32_t sn : pdcp_sns) { logger.info("Failure notification received for PDU with SN=%d", sn); @@ -800,7 +800,7 @@ std::map pdcp_entity_lte::get_buffered_p logger.error("Buffered PDUs being requested for non-AM DRB"); return std::map{}; } - logger.info("Buffered PDUs requested, buffer_size=%d", undelivered_sdus->size()); + logger.info("Buffered PDUs requested, buffer_size=%zu", undelivered_sdus->size()); return undelivered_sdus->get_buffered_sdus(); } From 2782d9617024ef0e6c8b98b5f7cad06724ef360d Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Tue, 9 Mar 2021 13:08:36 +0100 Subject: [PATCH 49/64] SRSUE: compute speed from TA commands --- .../srslte/interfaces/ue_phy_interfaces.h | 4 +- srsue/hdr/phy/phy.h | 4 +- srsue/hdr/phy/phy_metrics.h | 2 + srsue/hdr/phy/ta_control.h | 93 ++++++++++++++++++- srsue/hdr/stack/mac/demux.h | 2 +- srsue/hdr/stack/mac/proc_ra.h | 2 +- srsue/src/metrics_csv.cc | 8 +- srsue/src/phy/phy.cc | 8 +- srsue/src/phy/phy_common.cc | 2 + srsue/src/phy/sync.cc | 8 +- srsue/src/stack/mac/demux.cc | 8 +- srsue/src/stack/mac/proc_ra.cc | 8 +- srsue/test/mac_test.cc | 4 +- srsue/test/ttcn3/hdr/lte_ttcn3_phy.h | 4 +- srsue/test/ttcn3/src/lte_ttcn3_phy.cc | 4 +- 15 files changed, 128 insertions(+), 33 deletions(-) diff --git a/lib/include/srslte/interfaces/ue_phy_interfaces.h b/lib/include/srslte/interfaces/ue_phy_interfaces.h index 5faa63be0..2290d895e 100644 --- a/lib/include/srslte/interfaces/ue_phy_interfaces.h +++ b/lib/include/srslte/interfaces/ue_phy_interfaces.h @@ -104,8 +104,8 @@ class phy_interface_mac_common { public: /* Time advance commands */ - virtual void set_timeadv_rar(uint32_t ta_cmd) = 0; - virtual void set_timeadv(uint32_t ta_cmd) = 0; + virtual void set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) = 0; + virtual void set_timeadv(uint32_t tti, uint32_t ta_cmd) = 0; /* Activate / Disactivate SCell*/ virtual void set_activation_deactivation_scell(uint32_t cmd, uint32_t tti) = 0; diff --git a/srsue/hdr/phy/phy.h b/srsue/hdr/phy/phy.h index 3d90ee33f..034f996f7 100644 --- a/srsue/hdr/phy/phy.h +++ b/srsue/hdr/phy/phy.h @@ -140,8 +140,8 @@ public: int sr_last_tx_tti() final; // Time advance commands - void set_timeadv_rar(uint32_t ta_cmd) final; - void set_timeadv(uint32_t ta_cmd) final; + void set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) final; + void set_timeadv(uint32_t tti, uint32_t ta_cmd) final; /* Activate / Disactivate SCell*/ void deactivate_scells() final; diff --git a/srsue/hdr/phy/phy_metrics.h b/srsue/hdr/phy/phy_metrics.h index 5384f4822..51dd6ff3c 100644 --- a/srsue/hdr/phy/phy_metrics.h +++ b/srsue/hdr/phy/phy_metrics.h @@ -24,6 +24,8 @@ struct info_metrics_t { struct sync_metrics_t { float ta_us; + float distance_km; + float speed_kmph; float cfo; float sfo; }; diff --git a/srsue/hdr/phy/ta_control.h b/srsue/hdr/phy/ta_control.h index 8cb6b9ff8..a6013c853 100644 --- a/srsue/hdr/phy/ta_control.h +++ b/srsue/hdr/phy/ta_control.h @@ -22,11 +22,34 @@ namespace srsue { class ta_control { private: + static const size_t MAX_NOF_SPEED_VALUES = 50; ///< Maximum number of data to store for speed calculation + static const size_t MIN_NOF_SPEED_VALUES = 1; ///< Minimum number of data for calculating the speed + static const size_t MAX_AGE_SPEED_VALUES = 10000; ///< Maximum age of speed data in milliseconds. Discards older data. + srslog::basic_logger& logger; mutable std::mutex mutex; uint32_t next_base_nta = 0; float next_base_sec = 0.0f; + // Vector containing data for calculating speed. The first value is the time increment from TTI and the second value + // is the distance increment from the TA command + struct speed_data_t { + uint32_t tti; + float delta_t; + float delta_d; + }; + std::array speed_data = {}; + int32_t last_tti = -1; // Last TTI writen, -1 if none + uint32_t write_idx = 0; + uint32_t read_idx = 0; + + void reset_speed_data() + { + write_idx = 0; + read_idx = 0; + last_tti = -1; + } + public: ta_control(srslog::basic_logger& logger) : logger(logger) {} @@ -45,6 +68,9 @@ public: // Update base in nta next_base_nta = static_cast(roundf(next_base_sec / SRSLTE_LTE_TS)); + // Reset speed data + reset_speed_data(); + logger.info("PHY: Set TA base: n_ta: %d, ta_usec: %.1f", next_base_nta, next_base_sec * 1e6f); } @@ -74,7 +100,7 @@ public: * * @param ta_cmd Time Alignment command */ - void add_ta_cmd_rar(uint32_t ta_cmd) + void add_ta_cmd_rar(uint32_t tti, uint32_t ta_cmd) { std::lock_guard lock(mutex); @@ -84,6 +110,10 @@ public: // Update base in seconds next_base_sec = static_cast(next_base_nta) * SRSLTE_LTE_TS; + // Reset speed data + reset_speed_data(); + last_tti = tti; + logger.info("PHY: Set TA RAR: ta_cmd: %d, n_ta: %d, ta_usec: %.1f", ta_cmd, next_base_nta, next_base_sec * 1e6f); } @@ -92,9 +122,10 @@ public: * * @param ta_cmd Time Alignment command */ - void add_ta_cmd_new(uint32_t ta_cmd) + void add_ta_cmd_new(uint32_t tti, uint32_t ta_cmd) { std::lock_guard lock(mutex); + float prev_base_sec = next_base_sec; // Update base nta next_base_nta = srslte_N_ta_new(next_base_nta, ta_cmd); @@ -103,6 +134,26 @@ public: next_base_sec = static_cast(next_base_nta) * SRSLTE_LTE_TS; logger.info("PHY: Set TA: ta_cmd: %d, n_ta: %d, ta_usec: %.1f", ta_cmd, next_base_nta, next_base_sec * 1e6f); + + // Calculate speed data + if (last_tti > 0) { + float delta_t = TTI_SUB(tti, last_tti) * 1e-3f; // Calculate the elapsed time since last time command + float delta_d = (next_base_sec - prev_base_sec) * 3e8f / 2.0f; // Calculate distance difference in metres + + // Write new data + speed_data[write_idx].tti = tti; + speed_data[write_idx].delta_t = delta_t; + speed_data[write_idx].delta_d = delta_d; + + // Advance write index + write_idx = (write_idx + 1) % MAX_NOF_SPEED_VALUES; + + // Advance read index if overlaps with write + if (write_idx == read_idx) { + read_idx = (read_idx + 1) % MAX_NOF_SPEED_VALUES; + } + } + last_tti = tti; // Update last TTI } /** @@ -141,7 +192,43 @@ public: std::lock_guard lock(mutex); // Returns the current base, one direction distance - return next_base_sec * (3.6f * 3e8f / 2.0f); + return next_base_sec * (3e8f / 2.0f); + } + + /** + * Calculates approximated speed in km/h from the TA commands + * + * @return Distance based on the current time base if enough data has been gathered + */ + float get_speed_kmph(uint32_t tti) + { + std::lock_guard lock(mutex); + + // Advance read pointer for old TTI + while (read_idx != write_idx and TTI_SUB(tti, speed_data[read_idx].tti) > MAX_AGE_SPEED_VALUES) { + read_idx = (read_idx + 1) % MAX_NOF_SPEED_VALUES; + } + + // Early return if there is not enough data to calculate speed + uint32_t nof_values = ((write_idx + MAX_NOF_SPEED_VALUES) - read_idx) % MAX_NOF_SPEED_VALUES; + if (nof_values < MIN_NOF_SPEED_VALUES) { + return 0.0f; + } + + // Compute speed from gathered data + float sum = 0.0f; + float square_sum = 0.0f; + for (uint32_t i = read_idx; i != write_idx; i = (i + 1) % MAX_NOF_SPEED_VALUES) { + square_sum += speed_data[i].delta_t * speed_data[i].delta_t; + sum += speed_data[i].delta_t * speed_data[i].delta_d; + } + if (!std::isnormal(square_sum)) { + return 0.0f; // Avoid zero division + } + float speed_mps = sum / square_sum; // Speed in m/s + + // Returns the speed in km/h + return speed_mps * 3.6f; } }; diff --git a/srsue/hdr/stack/mac/demux.h b/srsue/hdr/stack/mac/demux.h index 94dc1479c..4e4b9f9cc 100644 --- a/srsue/hdr/stack/mac/demux.h +++ b/srsue/hdr/stack/mac/demux.h @@ -78,7 +78,7 @@ private: void process_sch_pdu(srslte::sch_pdu* pdu); void process_mch_pdu(srslte::mch_pdu* pdu); bool process_ce(srslte::sch_subh* subheader, uint32_t tti); - void parse_ta_cmd(srslte::sch_subh* subh); + void parse_ta_cmd(srslte::sch_subh* subh, uint32_t tti); bool is_uecrid_successful = false; diff --git a/srsue/hdr/stack/mac/proc_ra.h b/srsue/hdr/stack/mac/proc_ra.h index 39e03cf32..63d268ae6 100644 --- a/srsue/hdr/stack/mac/proc_ra.h +++ b/srsue/hdr/stack/mac/proc_ra.h @@ -70,7 +70,7 @@ private: void state_backoff_wait(uint32_t tti); void state_contention_resolution(); - void process_timeadv_cmd(uint32_t ta_cmd); + void process_timeadv_cmd(uint32_t tti, uint32_t ta_cmd); void initialization(); void resource_selection(); void preamble_transmission(); diff --git a/srsue/src/metrics_csv.cc b/srsue/src/metrics_csv.cc index 456e04a32..081c11a54 100644 --- a/srsue/src/metrics_csv.cc +++ b/srsue/src/metrics_csv.cc @@ -74,7 +74,7 @@ void metrics_csv::set_metrics(const ue_metrics_t& metrics, const uint32_t period if (file.is_open() && ue != NULL) { if (n_reports == 0 && !file_exists) { file << "time;cc;earfcn;pci;rsrp;pl;cfo;pci_neigh;rsrp_neigh;cfo_neigh;dl_mcs;dl_snr;dl_turbo;dl_brate;dl_bler;" - "ul_ta;ul_mcs;ul_buff;ul_brate;ul_" + "ul_ta;distance_km;speed_kmph;ul_mcs;ul_buff;ul_brate;ul_" "bler;" "rf_o;rf_" "u;rf_l;is_attached;" @@ -130,6 +130,8 @@ void metrics_csv::set_metrics(const ue_metrics_t& metrics, const uint32_t period } file << float_to_string(metrics.phy.sync[r].ta_us, 2); + file << float_to_string(metrics.phy.sync[r].distance_km, 2); + file << float_to_string(metrics.phy.sync[r].speed_kmph, 2); file << float_to_string(metrics.phy.ul[r].mcs, 2); file << float_to_string((float)metrics.stack.mac[r].ul_buffer, 2); @@ -154,11 +156,11 @@ void metrics_csv::set_metrics(const ue_metrics_t& metrics, const uint32_t period file << (metrics.stack.rrc.state == RRC_STATE_CONNECTED ? "1.0" : "0.0") << ";"; // Write system metrics. - const srslte::sys_metrics_t &m = metrics.sys; + const srslte::sys_metrics_t& m = metrics.sys; file << float_to_string(m.process_realmem, 2); file << std::to_string(m.process_realmem_kB) << ";"; file << float_to_string(m.process_virtualmem, 2); - file << std::to_string(m.process_virtualmem_kB) << ";" ; + file << std::to_string(m.process_virtualmem_kB) << ";"; file << float_to_string(m.system_mem, 2); file << float_to_string(m.process_cpu_usage, 2); file << std::to_string(m.thread_count); diff --git a/srsue/src/phy/phy.cc b/srsue/src/phy/phy.cc index 39a36df40..4e0a326f4 100644 --- a/srsue/src/phy/phy.cc +++ b/srsue/src/phy/phy.cc @@ -204,14 +204,14 @@ void phy::get_metrics(phy_metrics_t* m) m->nof_active_cc = args.nof_lte_carriers; } -void phy::set_timeadv_rar(uint32_t ta_cmd) +void phy::set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) { - common.ta.add_ta_cmd_rar(ta_cmd); + common.ta.add_ta_cmd_rar(tti, ta_cmd); } -void phy::set_timeadv(uint32_t ta_cmd) +void phy::set_timeadv(uint32_t tti, uint32_t ta_cmd) { - common.ta.add_ta_cmd_new(ta_cmd); + common.ta.add_ta_cmd_new(tti, ta_cmd); } void phy::deactivate_scells() diff --git a/srsue/src/phy/phy_common.cc b/srsue/src/phy/phy_common.cc index ddcd26df4..300f5419c 100644 --- a/srsue/src/phy/phy_common.cc +++ b/srsue/src/phy/phy_common.cc @@ -865,6 +865,8 @@ void phy_common::set_sync_metrics(const uint32_t& cc_idx, const sync_metrics_t& sync_metrics[cc_idx].cfo = sync_metrics[cc_idx].cfo + (m.cfo - sync_metrics[cc_idx].cfo) / sync_metrics_count[cc_idx]; sync_metrics[cc_idx].sfo = sync_metrics[cc_idx].sfo + (m.sfo - sync_metrics[cc_idx].sfo) / sync_metrics_count[cc_idx]; sync_metrics[cc_idx].ta_us = m.ta_us; + sync_metrics[cc_idx].distance_km = m.distance_km; + sync_metrics[cc_idx].speed_kmph = m.speed_kmph; } void phy_common::get_sync_metrics(sync_metrics_t m[SRSLTE_MAX_CARRIERS]) diff --git a/srsue/src/phy/sync.cc b/srsue/src/phy/sync.cc index 02bdc67bd..37afdc076 100644 --- a/srsue/src/phy/sync.cc +++ b/srsue/src/phy/sync.cc @@ -467,9 +467,11 @@ void sync::run_camping_in_sync_state(lte::sf_worker* lte_worker, Debug("SYNC: Worker %d synchronized", lte_worker->get_id()); - metrics.sfo = srslte_ue_sync_get_sfo(&ue_sync); - metrics.cfo = srslte_ue_sync_get_cfo(&ue_sync); - metrics.ta_us = worker_com->ta.get_usec(); + metrics.sfo = srslte_ue_sync_get_sfo(&ue_sync); + metrics.cfo = srslte_ue_sync_get_cfo(&ue_sync); + metrics.ta_us = worker_com->ta.get_usec(); + metrics.distance_km = worker_com->ta.get_km(); + metrics.speed_kmph = worker_com->ta.get_speed_kmph(tti); for (uint32_t i = 0; i < worker_com->args->nof_lte_carriers; i++) { worker_com->set_sync_metrics(i, metrics); } diff --git a/srsue/src/stack/mac/demux.cc b/srsue/src/stack/mac/demux.cc index 0b6c8c86a..c3a4dd5d6 100644 --- a/srsue/src/stack/mac/demux.cc +++ b/srsue/src/stack/mac/demux.cc @@ -96,7 +96,7 @@ void demux::push_pdu_temp_crnti(uint8_t* buff, uint32_t nof_bytes) } break; case srslte::dl_sch_lcid::TA_CMD: - parse_ta_cmd(pending_mac_msg.get()); + parse_ta_cmd(pending_mac_msg.get(), 0); break; default: break; @@ -275,7 +275,7 @@ bool demux::process_ce(srslte::sch_subh* subh, uint32_t tti) // Do nothing break; case srslte::dl_sch_lcid::TA_CMD: - parse_ta_cmd(subh); + parse_ta_cmd(subh, tti); break; case srslte::dl_sch_lcid::SCELL_ACTIVATION: { uint32_t cmd = (uint32_t)subh->get_activation_deactivation_cmd(); @@ -293,9 +293,9 @@ bool demux::process_ce(srslte::sch_subh* subh, uint32_t tti) return true; } -void demux::parse_ta_cmd(srslte::sch_subh* subh) +void demux::parse_ta_cmd(srslte::sch_subh* subh, uint32_t tti) { - phy_h->set_timeadv(subh->get_ta_cmd()); + phy_h->set_timeadv(tti, subh->get_ta_cmd()); Info("Received TA=%d (%d/%d) ", subh->get_ta_cmd(), time_alignment_timer->time_elapsed(), diff --git a/srsue/src/stack/mac/proc_ra.cc b/srsue/src/stack/mac/proc_ra.cc index a5cf40b8c..e5ca78907 100644 --- a/srsue/src/stack/mac/proc_ra.cc +++ b/srsue/src/stack/mac/proc_ra.cc @@ -302,11 +302,11 @@ void ra_proc::preamble_transmission() } // Process Timing Advance Command as defined in Section 5.2 -void ra_proc::process_timeadv_cmd(uint32_t ta) +void ra_proc::process_timeadv_cmd(uint32_t tti, uint32_t ta) { if (preambleIndex == 0) { // Preamble not selected by UE MAC - phy_h->set_timeadv_rar(ta); + phy_h->set_timeadv_rar(tti, ta); // Only if timer is running reset the timer if (time_alignment_timer->is_running()) { time_alignment_timer->run(); @@ -315,7 +315,7 @@ void ra_proc::process_timeadv_cmd(uint32_t ta) } else { // Preamble selected by UE MAC if (!time_alignment_timer->is_running()) { - phy_h->set_timeadv_rar(ta); + phy_h->set_timeadv_rar(tti, ta); time_alignment_timer->run(); logger.debug("Applying RAR TA CMD %d", ta); } else { @@ -377,7 +377,7 @@ void ra_proc::tb_decoded_ok(const uint8_t cc_idx, const uint32_t tti) while (rar_pdu_msg.next()) { if (rar_pdu_msg.get()->has_rapid() && rar_pdu_msg.get()->get_rapid() == sel_preamble) { rar_received = true; - process_timeadv_cmd(rar_pdu_msg.get()->get_ta_cmd()); + process_timeadv_cmd(tti, rar_pdu_msg.get()->get_ta_cmd()); // TODO: Indicate received target power // phy_h->set_target_power_rar(iniReceivedTargetPower, (preambleTransmissionCounter-1)*powerRampingStep); diff --git a/srsue/test/mac_test.cc b/srsue/test/mac_test.cc index 25f7eba9a..85dca15db 100644 --- a/srsue/test/mac_test.cc +++ b/srsue/test/mac_test.cc @@ -144,8 +144,8 @@ public: void set_mch_period_stop(uint32_t stop) override{}; // phy_interface_mac_common - void set_timeadv_rar(uint32_t ta_cmd) override { rar_time_adv = ta_cmd; } - void set_timeadv(uint32_t ta_cmd) override{}; + void set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) override { rar_time_adv = ta_cmd; } + void set_timeadv(uint32_t tti, uint32_t ta_cmd) override{}; void set_activation_deactivation_scell(uint32_t cmd, uint32_t tti) override { scell_cmd = cmd; }; void set_rar_grant(uint8_t grant_payload[SRSLTE_RAR_GRANT_LEN], uint16_t rnti) override { diff --git a/srsue/test/ttcn3/hdr/lte_ttcn3_phy.h b/srsue/test/ttcn3/hdr/lte_ttcn3_phy.h index cace63e8d..0d0170c8b 100644 --- a/srsue/test/ttcn3/hdr/lte_ttcn3_phy.h +++ b/srsue/test/ttcn3/hdr/lte_ttcn3_phy.h @@ -83,8 +83,8 @@ public: int sr_last_tx_tti() override; // phy_interface_mac_common - void set_timeadv_rar(uint32_t ta_cmd) override; - void set_timeadv(uint32_t ta_cmd) override; + void set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) override; + void set_timeadv(uint32_t tti, uint32_t ta_cmd) override; void set_rar_grant(uint8_t grant_payload[SRSLTE_RAR_GRANT_LEN], uint16_t rnti) override; uint32_t get_current_tti() override; float get_phr() override; diff --git a/srsue/test/ttcn3/src/lte_ttcn3_phy.cc b/srsue/test/ttcn3/src/lte_ttcn3_phy.cc index 7a2f0f4d7..4012b3bcb 100644 --- a/srsue/test/ttcn3/src/lte_ttcn3_phy.cc +++ b/srsue/test/ttcn3/src/lte_ttcn3_phy.cc @@ -216,12 +216,12 @@ int lte_ttcn3_phy::sr_last_tx_tti() // The RAT-agnostic interface for MAC /* Time advance commands */ -void lte_ttcn3_phy::set_timeadv_rar(uint32_t ta_cmd) +void lte_ttcn3_phy::set_timeadv_rar(uint32_t tti, uint32_t ta_cmd) { logger.debug("%s not implemented.", __FUNCTION__); } -void lte_ttcn3_phy::set_timeadv(uint32_t ta_cmd) +void lte_ttcn3_phy::set_timeadv(uint32_t tti, uint32_t ta_cmd) { logger.debug("%s not implemented.", __FUNCTION__); } From 49a535b81ad9f5c91d3263253bedde441fe22546 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Tue, 9 Mar 2021 13:13:02 +0100 Subject: [PATCH 50/64] SRSUE: force printing neighbour cells by default in console trace --- srsue/hdr/metrics_stdout.h | 2 ++ srsue/src/metrics_stdout.cc | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/srsue/hdr/metrics_stdout.h b/srsue/hdr/metrics_stdout.h index b2853ff1d..f792415a4 100644 --- a/srsue/hdr/metrics_stdout.h +++ b/srsue/hdr/metrics_stdout.h @@ -38,6 +38,8 @@ public: void stop(){}; private: + static const bool FORCE_NEIGHBOUR_CELL = true; // Set to true for printing always neighbour cells + std::string float_to_string(float f, int digits); std::string float_to_eng_string(float f, int digits); void print_table(const bool display_neighbours); diff --git a/srsue/src/metrics_stdout.cc b/srsue/src/metrics_stdout.cc index 86a14afb7..d428bf14e 100644 --- a/srsue/src/metrics_stdout.cc +++ b/srsue/src/metrics_stdout.cc @@ -96,11 +96,11 @@ void metrics_stdout::set_metrics(const ue_metrics_t& metrics, const uint32_t per return; } - bool display_neighbours = false; + bool display_neighbours = FORCE_NEIGHBOUR_CELL; if (metrics.phy.nof_active_cc > 1) { - display_neighbours = metrics.stack.rrc.neighbour_cells.size() > metrics.phy.nof_active_cc - 1; + display_neighbours |= metrics.stack.rrc.neighbour_cells.size() > metrics.phy.nof_active_cc - 1; } else { - display_neighbours = metrics.stack.rrc.neighbour_cells.size() > 0; + display_neighbours |= metrics.stack.rrc.neighbour_cells.size() > 0; } // print table header every 10 reports From 4ae194581f7ccda3485ed5f24a4271997dfb476d Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Tue, 9 Mar 2021 14:10:55 +0100 Subject: [PATCH 51/64] SRSUE: Adjustments in distance and speed estimations --- srsue/hdr/phy/ta_control.h | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/srsue/hdr/phy/ta_control.h b/srsue/hdr/phy/ta_control.h index a6013c853..21a7b67ff 100644 --- a/srsue/hdr/phy/ta_control.h +++ b/srsue/hdr/phy/ta_control.h @@ -192,7 +192,7 @@ public: std::lock_guard lock(mutex); // Returns the current base, one direction distance - return next_base_sec * (3e8f / 2.0f); + return next_base_sec * (3e8f / 2e3f); } /** @@ -207,6 +207,11 @@ public: // Advance read pointer for old TTI while (read_idx != write_idx and TTI_SUB(tti, speed_data[read_idx].tti) > MAX_AGE_SPEED_VALUES) { read_idx = (read_idx + 1) % MAX_NOF_SPEED_VALUES; + + // If there us no data, make last_tti invalid to prevent invalid TTI difference + if (read_idx == write_idx) { + last_tti = -1; + } } // Early return if there is not enough data to calculate speed @@ -216,16 +221,16 @@ public: } // Compute speed from gathered data - float sum = 0.0f; - float square_sum = 0.0f; + float sum_t = 0.0f; + float sum_d = 0.0f; for (uint32_t i = read_idx; i != write_idx; i = (i + 1) % MAX_NOF_SPEED_VALUES) { - square_sum += speed_data[i].delta_t * speed_data[i].delta_t; - sum += speed_data[i].delta_t * speed_data[i].delta_d; + sum_t += speed_data[i].delta_t; + sum_d += speed_data[i].delta_d; } - if (!std::isnormal(square_sum)) { + if (!std::isnormal(sum_t)) { return 0.0f; // Avoid zero division } - float speed_mps = sum / square_sum; // Speed in m/s + float speed_mps = sum_d / sum_t; // Speed in m/s // Returns the speed in km/h return speed_mps * 3.6f; From 8c4ed489e89226189241acd311f8c01ff53abeb1 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Tue, 9 Mar 2021 18:38:19 +0100 Subject: [PATCH 52/64] SRSUE: Disable neighbour cell display by default --- srsue/hdr/metrics_stdout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srsue/hdr/metrics_stdout.h b/srsue/hdr/metrics_stdout.h index f792415a4..6c86f53e8 100644 --- a/srsue/hdr/metrics_stdout.h +++ b/srsue/hdr/metrics_stdout.h @@ -38,7 +38,7 @@ public: void stop(){}; private: - static const bool FORCE_NEIGHBOUR_CELL = true; // Set to true for printing always neighbour cells + static const bool FORCE_NEIGHBOUR_CELL = false; // Set to true for printing always neighbour cells std::string float_to_string(float f, int digits); std::string float_to_eng_string(float f, int digits); From dde8157bf1a5b1e86a4b57a7dc9464c775ac3d93 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 18 Mar 2021 15:25:06 +0000 Subject: [PATCH 53/64] Make sure that S1 Setup request is restarted regardless of which error made the procedure fail. --- lib/src/common/network_utils.cc | 9 +-------- srsenb/src/stack/upper/s1ap.cc | 17 +++++++++++------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/lib/src/common/network_utils.cc b/lib/src/common/network_utils.cc index e3f903de5..3356af1f9 100644 --- a/lib/src/common/network_utils.cc +++ b/lib/src/common/network_utils.cc @@ -474,14 +474,7 @@ public: bool ret = true; pdu->N_bytes = static_cast(n_recv); - if (flags & MSG_NOTIFICATION) { - // Received notification - union sctp_notification* notification = (union sctp_notification*)pdu->msg; - if (notification->sn_header.sn_type == SCTP_SHUTDOWN_EVENT) { - // Socket Shutdown - ret = false; - } - } + // SCTP notifications handled in callback. func(std::move(pdu), from, sri, flags); return ret; } diff --git a/srsenb/src/stack/upper/s1ap.cc b/srsenb/src/stack/upper/s1ap.cc index 695e565c0..9b5f296af 100644 --- a/srsenb/src/stack/upper/s1ap.cc +++ b/srsenb/src/stack/upper/s1ap.cc @@ -174,11 +174,7 @@ srslte::proc_outcome_t s1ap::s1_setup_proc_t::start_mme_connection() } if (not s1ap_ptr->connect_mme()) { - procInfo("Failed to initiate SCTP socket. Attempting reconnection in %d seconds", - s1ap_ptr->mme_connect_timer.duration() / 1000); - srslte::console("Failed to initiate SCTP socket. Attempting reconnection in %d seconds\n", - s1ap_ptr->mme_connect_timer.duration() / 1000); - s1ap_ptr->mme_connect_timer.run(); + procInfo("Could not connect to MME"); return srslte::proc_outcome_t::error; } @@ -203,7 +199,7 @@ srslte::proc_outcome_t s1ap::s1_setup_proc_t::react(const srsenb::s1ap::s1_setup procInfo("S1Setup procedure completed successfully"); return srslte::proc_outcome_t::success; } - procError("S1Setup failed. Exiting..."); + procError("S1Setup failed."); srslte::console("S1setup failed\n"); return srslte::proc_outcome_t::error; } @@ -211,8 +207,15 @@ srslte::proc_outcome_t s1ap::s1_setup_proc_t::react(const srsenb::s1ap::s1_setup void s1ap::s1_setup_proc_t::then(const srslte::proc_state_t& result) const { if (result.is_error()) { + procInfo("Failed to initiate S1 connection. Attempting reconnection in %d seconds", + s1ap_ptr->mme_connect_timer.duration() / 1000); + srslte::console("Failed to initiate S1 connection. Attempting reconnection in %d seconds\n", + s1ap_ptr->mme_connect_timer.duration() / 1000); + s1ap_ptr->mme_connect_timer.run(); + s1ap_ptr->stack->remove_mme_socket(s1ap_ptr->s1ap_socket.get_socket()); s1ap_ptr->s1ap_socket.reset(); procInfo("S1AP socket closed."); + // Try again with in 10 seconds } } @@ -429,11 +432,13 @@ bool s1ap::connect_mme() &s1ap_socket, srslte::net_utils::socket_type::seqpacket, args.s1c_bind_addr.c_str())) { return false; } + logger.info("SCTP socket opened. fd=%d", s1ap_socket.fd()); // Connect to the MME address if (not s1ap_socket.connect_to(args.mme_addr.c_str(), MME_PORT, &mme_addr)) { return false; } + logger.info("SCTP socket connected with MME. fd=%d", s1ap_socket.fd()); // Assign a handler to rx MME packets (going to run in a different thread) stack->add_mme_socket(s1ap_socket.fd()); From 977c194cbc00794703d3b4a80fe4f668f4672e03 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 18:23:32 +0000 Subject: [PATCH 54/64] gtpu,bugfix - handle the case when gtpu fails to allocate buffer for end marker. Also, added a timer that when expired, it autoremoves the GTPU handover tunnel --- lib/include/srslte/common/task_scheduler.h | 16 +++-- srsenb/hdr/stack/upper/gtpu.h | 27 ++++---- srsenb/src/stack/enb_stack_lte.cc | 2 +- srsenb/src/stack/upper/gtpu.cc | 80 +++++++++++++++++----- srsenb/test/upper/gtpu_test.cc | 7 +- 5 files changed, 95 insertions(+), 37 deletions(-) diff --git a/lib/include/srslte/common/task_scheduler.h b/lib/include/srslte/common/task_scheduler.h index ed564f8f7..6036c60af 100644 --- a/lib/include/srslte/common/task_scheduler.h +++ b/lib/include/srslte/common/task_scheduler.h @@ -42,7 +42,11 @@ public: srslte::task_queue_handle make_task_queue(uint32_t qsize) { return external_tasks.get_queue_handler(qsize); } //! Delays a task processing by duration_ms - void defer_callback(uint32_t duration_ms, std::function func) { timers.defer_callback(duration_ms, func); } + template + void defer_callback(uint32_t duration_ms, F&& func) + { + timers.defer_callback(duration_ms, std::forward(func)); + } //! Enqueues internal task to be run in next tic void defer_task(srslte::move_task_t func) { internal_tasks.push_back(std::move(func)); } @@ -114,9 +118,10 @@ public: { sched->notify_background_task_result(std::move(task)); } - void defer_callback(uint32_t duration_ms, std::function func) + template + void defer_callback(uint32_t duration_ms, F&& func) { - sched->defer_callback(duration_ms, std::move(func)); + sched->defer_callback(duration_ms, std::forward(func)); } void defer_task(srslte::move_task_t func) { sched->defer_task(std::move(func)); } @@ -136,9 +141,10 @@ public: sched->notify_background_task_result(std::move(task)); } srslte::task_queue_handle make_task_queue() { return sched->make_task_queue(); } - void defer_callback(uint32_t duration_ms, std::function func) + template + void defer_callback(uint32_t duration_ms, F&& func) { - sched->defer_callback(duration_ms, std::move(func)); + sched->defer_callback(duration_ms, std::forward(func)); } private: diff --git a/srsenb/hdr/stack/upper/gtpu.h b/srsenb/hdr/stack/upper/gtpu.h index d22875ea5..aade71e50 100644 --- a/srsenb/hdr/stack/upper/gtpu.h +++ b/srsenb/hdr/stack/upper/gtpu.h @@ -16,6 +16,7 @@ #include "common_enb.h" #include "srslte/common/buffer_pool.h" +#include "srslte/common/task_scheduler.h" #include "srslte/common/threads.h" #include "srslte/interfaces/enb_gtpu_interfaces.h" #include "srslte/phy/common/phy_common.h" @@ -34,7 +35,7 @@ class stack_interface_gtpu_lte; class gtpu final : public gtpu_interface_rrc, public gtpu_interface_pdcp { public: - explicit gtpu(srslog::basic_logger& logger); + explicit gtpu(srslte::task_sched_handle task_sched_, srslog::basic_logger& logger); int init(std::string gtp_bind_addr_, std::string mme_addr_, @@ -75,6 +76,7 @@ private: std::string mme_addr; srsenb::pdcp_interface_gtpu* pdcp = nullptr; srslog::basic_logger& logger; + srslte::task_sched_handle task_sched; // Class to create class m1u_handler @@ -104,16 +106,17 @@ private: const uint32_t undefined_pdcp_sn = std::numeric_limits::max(); struct tunnel { - bool dl_enabled = true; - bool fwd_teid_in_present = false; - bool prior_teid_in_present = false; - uint16_t rnti = SRSLTE_INVALID_RNTI; - uint32_t lcid = SRSENB_N_RADIO_BEARERS; - uint32_t teid_in = 0; - uint32_t teid_out = 0; - uint32_t spgw_addr = 0; - uint32_t fwd_teid_in = 0; ///< forward Rx SDUs to this TEID - uint32_t prior_teid_in = 0; ///< buffer bearer SDUs until this TEID receives an End Marker + bool dl_enabled = true; + bool fwd_teid_in_present = false; + bool prior_teid_in_present = false; + uint16_t rnti = SRSLTE_INVALID_RNTI; + uint32_t lcid = SRSENB_N_RADIO_BEARERS; + uint32_t teid_in = 0; + uint32_t teid_out = 0; + uint32_t spgw_addr = 0; + uint32_t fwd_teid_in = 0; ///< forward Rx SDUs to this TEID + uint32_t prior_teid_in = 0; ///< buffer bearer SDUs until this TEID receives an End Marker + srslte::unique_timer rx_timer; std::multimap buffer; }; std::unordered_map tunnels; @@ -129,7 +132,7 @@ private: void echo_response(in_addr_t addr, in_port_t port, uint16_t seq); void error_indication(in_addr_t addr, in_port_t port, uint32_t err_teid); - void end_marker(uint32_t teidin); + bool end_marker(uint32_t teidin); int create_dl_fwd_tunnel(uint32_t rx_teid_in, uint32_t tx_teid_in); diff --git a/srsenb/src/stack/enb_stack_lte.cc b/srsenb/src/stack/enb_stack_lte.cc index 5054ac028..67dce11a6 100644 --- a/srsenb/src/stack/enb_stack_lte.cc +++ b/srsenb/src/stack/enb_stack_lte.cc @@ -33,7 +33,7 @@ enb_stack_lte::enb_stack_lte(srslog::sink& log_sink) : pdcp(&task_sched, pdcp_logger), mac(&task_sched, mac_logger), rlc(rlc_logger), - gtpu(gtpu_logger), + gtpu(&task_sched, gtpu_logger), s1ap(&task_sched, s1ap_logger), rrc(&task_sched), mac_pcap(), diff --git a/srsenb/src/stack/upper/gtpu.cc b/srsenb/src/stack/upper/gtpu.cc index f4027bb10..a739f8b59 100644 --- a/srsenb/src/stack/upper/gtpu.cc +++ b/srsenb/src/stack/upper/gtpu.cc @@ -9,6 +9,7 @@ * the distribution. * */ + #include "srslte/upper/gtpu.h" #include "srsenb/hdr/stack/upper/gtpu.h" #include "srslte/common/network_utils.h" @@ -26,7 +27,9 @@ using namespace srslte; namespace srsenb { -gtpu::gtpu(srslog::basic_logger& logger) : m1u(this), logger(logger) {} +gtpu::gtpu(srslte::task_sched_handle task_sched_, srslog::basic_logger& logger) : + m1u(this), task_sched(task_sched_), logger(logger) +{} int gtpu::init(std::string gtp_bind_addr_, std::string mme_addr_, @@ -174,6 +177,26 @@ uint32_t gtpu::add_bearer(uint16_t rnti, uint32_t lcid, uint32_t addr, uint32_t after_tun.dl_enabled = false; after_tun.prior_teid_in_present = true; after_tun.prior_teid_in = teid_in; + // Schedule autoremoval of this indirect tunnel + uint32_t after_teidin = after_tun.teid_in; + uint32_t before_teidin = new_tun.teid_in; + new_tun.rx_timer = task_sched.get_unique_timer(); + new_tun.rx_timer.set(500, [this, before_teidin, after_teidin](uint32_t tid) { + auto it = tunnels.find(after_teidin); + if (it != tunnels.end()) { + tunnel& after_tun = it->second; + if (after_tun.prior_teid_in_present) { + after_tun.prior_teid_in_present = false; + set_tunnel_status(after_tun.teid_in, true); + } + // else: indirect tunnel already removed + } else { + logger.info("Callback to automatic indirect tunnel deletion called for non-existent TEID=%d", after_teidin); + } + // This will self-destruct the callback object + rem_tunnel(before_teidin); + }); + new_tun.rx_timer.run(); } // Connect tunnels if forwarding is activated @@ -301,11 +324,20 @@ void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const soc return; } - if (header.teid != 0 && tunnels.count(header.teid) == 0) { - // Received G-PDU for non-existing and non-zero TEID. - // Sending GTP-U error indication - error_indication(addr.sin_addr.s_addr, addr.sin_port, header.teid); - return; + tunnel* rx_tunnel = nullptr; + if (header.teid != 0) { + auto it = tunnels.find(header.teid); + if (it == tunnels.end()) { + // Received G-PDU for non-existing and non-zero TEID. + // Sending GTP-U error indication + error_indication(addr.sin_addr.s_addr, addr.sin_port, header.teid); + } + rx_tunnel = &it->second; + + if (rx_tunnel->rx_timer.is_valid()) { + // Restart Rx timer + rx_tunnel->rx_timer.run(); + } } switch (header.message_type) { @@ -345,22 +377,27 @@ void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const soc } } break; case GTPU_MSG_END_MARKER: { - tunnel& old_tun = tunnels.find(header.teid)->second; - uint16_t rnti = old_tun.rnti; + uint16_t rnti = rx_tunnel->rnti; logger.info("Received GTPU End Marker for rnti=0x%x.", rnti); // TS 36.300, Sec 10.1.2.2.1 - Path Switch upon handover - if (old_tun.fwd_teid_in_present) { + if (rx_tunnel->fwd_teid_in_present) { // END MARKER should be forwarded to TeNB if forwarding is activated - end_marker(old_tun.fwd_teid_in); - old_tun.fwd_teid_in_present = false; + end_marker(rx_tunnel->fwd_teid_in); + rx_tunnel->fwd_teid_in_present = false; } else { // TeNB switches paths, and flush PDUs that have been buffered - std::vector& bearer_tunnels = ue_teidin_db.find(old_tun.rnti)->second[old_tun.lcid]; + auto rnti_it = ue_teidin_db.find(rnti); + if (rnti_it == ue_teidin_db.end()) { + logger.error("No rnti=0x%x entry for associated TEID=%d", rnti, header.teid); + return; + } + std::vector& bearer_tunnels = rnti_it->second[rx_tunnel->lcid]; for (uint32_t new_teidin : bearer_tunnels) { tunnel& new_tun = tunnels.at(new_teidin); - if (new_teidin != old_tun.teid_in and new_tun.prior_teid_in_present and - new_tun.prior_teid_in == old_tun.teid_in) { + if (new_teidin != rx_tunnel->teid_in and new_tun.prior_teid_in_present and + new_tun.prior_teid_in == rx_tunnel->teid_in) { + rem_tunnel(new_tun.prior_teid_in); new_tun.prior_teid_in_present = false; set_tunnel_status(new_tun.teid_in, true); } @@ -369,6 +406,7 @@ void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const soc break; } default: + logger.warning("Unhandled GTPU message type=%d", header.message_type); break; } } @@ -471,13 +509,22 @@ void gtpu::echo_response(in_addr_t addr, in_port_t port, uint16_t seq) /**************************************************************************** * GTP-U END MARKER ***************************************************************************/ -void gtpu::end_marker(uint32_t teidin) +bool gtpu::end_marker(uint32_t teidin) { logger.info("TX GTPU End Marker."); - tunnel& tunnel = tunnels.find(teidin)->second; + auto it = tunnels.find(teidin); + if (it == tunnels.end()) { + logger.error("TEID=%d not found to send the end marker to", teidin); + return false; + } + tunnel& tunnel = it->second; gtpu_header_t header = {}; unique_byte_buffer_t pdu = make_byte_buffer(); + if (pdu == nullptr) { + logger.warning("Failed to allocate buffer to send End Marker to TEID=%d", teidin); + return false; + } // header header.flags = GTPU_FLAGS_VERSION_V1 | GTPU_FLAGS_GTP_PROTOCOL; @@ -493,6 +540,7 @@ void gtpu::end_marker(uint32_t teidin) servaddr.sin_port = htons(GTPU_PORT); sendto(fd, pdu->msg, pdu->N_bytes, MSG_EOR, (struct sockaddr*)&servaddr, sizeof(struct sockaddr_in)); + return true; } /**************************************************************************** diff --git a/srsenb/test/upper/gtpu_test.cc b/srsenb/test/upper/gtpu_test.cc index 15abb173f..37ecb7ba4 100644 --- a/srsenb/test/upper/gtpu_test.cc +++ b/srsenb/test/upper/gtpu_test.cc @@ -148,9 +148,10 @@ int test_gtpu_direct_tunneling() logger1.set_hex_dump_max_size(2048); srslog::basic_logger& logger2 = srslog::fetch_basic_logger("GTPU2"); logger2.set_hex_dump_max_size(2048); - srsenb::gtpu senb_gtpu(logger1), tenb_gtpu(logger2); - stack_tester senb_stack, tenb_stack; - pdcp_tester senb_pdcp, tenb_pdcp; + srslte::task_scheduler task_sched; + srsenb::gtpu senb_gtpu(&task_sched, logger1), tenb_gtpu(&task_sched, logger2); + stack_tester senb_stack, tenb_stack; + pdcp_tester senb_pdcp, tenb_pdcp; senb_gtpu.init(senb_addr_str, sgw_addr_str, "", "", &senb_pdcp, &senb_stack, false); tenb_gtpu.init(tenb_addr_str, sgw_addr_str, "", "", &tenb_pdcp, &tenb_stack, false); From a540c5655232092bc878edb1818459ed92112065 Mon Sep 17 00:00:00 2001 From: Francisco Date: Fri, 19 Mar 2021 11:29:11 +0000 Subject: [PATCH 55/64] gtpu bugfix - avoid erasing indirect tunnel and causing iterator invalidation while iterating list of tunnels --- srsenb/hdr/stack/upper/gtpu.h | 2 + srsenb/src/stack/upper/gtpu.cc | 70 +++++++++++++++++----------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/srsenb/hdr/stack/upper/gtpu.h b/srsenb/hdr/stack/upper/gtpu.h index aade71e50..1fe5df36a 100644 --- a/srsenb/hdr/stack/upper/gtpu.h +++ b/srsenb/hdr/stack/upper/gtpu.h @@ -134,6 +134,8 @@ private: void error_indication(in_addr_t addr, in_port_t port, uint32_t err_teid); bool end_marker(uint32_t teidin); + void handle_end_marker(tunnel& rx_tunnel); + int create_dl_fwd_tunnel(uint32_t rx_teid_in, uint32_t tx_teid_in); /**************************************************************************** diff --git a/srsenb/src/stack/upper/gtpu.cc b/srsenb/src/stack/upper/gtpu.cc index a739f8b59..74fb72c31 100644 --- a/srsenb/src/stack/upper/gtpu.cc +++ b/srsenb/src/stack/upper/gtpu.cc @@ -289,16 +289,11 @@ void gtpu::rem_tunnel(uint32_t teidin) logger.warning("Removing GTPU tunnel TEID In=0x%x", teidin); return; } - if (it->second.fwd_teid_in_present) { - // Forward End Marker to forwarding tunnel, before deleting tunnel - end_marker(it->second.fwd_teid_in); - it->second.fwd_teid_in_present = false; - } auto ue_it = ue_teidin_db.find(it->second.rnti); std::vector& lcid_tunnels = ue_it->second[it->second.lcid]; lcid_tunnels.erase(std::remove(lcid_tunnels.begin(), lcid_tunnels.end(), teidin), lcid_tunnels.end()); + logger.debug("TEID In=%d for rnti=0x%x erased", teidin, it->second.rnti); tunnels.erase(it); - logger.debug("TEID In=%d erased", teidin); } void gtpu::rem_user(uint16_t rnti) @@ -314,6 +309,39 @@ void gtpu::rem_user(uint16_t rnti) } } +void gtpu::handle_end_marker(tunnel& rx_tunnel) +{ + uint16_t rnti = rx_tunnel.rnti; + logger.info("Received GTPU End Marker for rnti=0x%x.", rnti); + + // TS 36.300, Sec 10.1.2.2.1 - Path Switch upon handover + if (rx_tunnel.fwd_teid_in_present) { + // END MARKER should be forwarded to TeNB if forwarding is activated + end_marker(rx_tunnel.fwd_teid_in); + rx_tunnel.fwd_teid_in_present = false; + + rem_tunnel(rx_tunnel.teid_in); + } else { + // TeNB switches paths, and flush PDUs that have been buffered + auto rnti_it = ue_teidin_db.find(rnti); + if (rnti_it == ue_teidin_db.end()) { + logger.error("No rnti=0x%x entry for associated TEID=%d", rnti, rx_tunnel.teid_in); + return; + } + std::vector& bearer_tunnels = rnti_it->second[rx_tunnel.lcid]; + for (uint32_t new_teidin : bearer_tunnels) { + tunnel& new_tun = tunnels.at(new_teidin); + if (new_teidin != rx_tunnel.teid_in and new_tun.prior_teid_in_present and + new_tun.prior_teid_in == rx_tunnel.teid_in) { + rem_tunnel(new_tun.prior_teid_in); + new_tun.prior_teid_in_present = false; + set_tunnel_status(new_tun.teid_in, true); + break; + } + } + } +} + void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const sockaddr_in& addr) { logger.debug("Received %d bytes from S1-U interface", pdu->N_bytes); @@ -376,35 +404,9 @@ void gtpu::handle_gtpu_s1u_rx_packet(srslte::unique_byte_buffer_t pdu, const soc } } } break; - case GTPU_MSG_END_MARKER: { - uint16_t rnti = rx_tunnel->rnti; - logger.info("Received GTPU End Marker for rnti=0x%x.", rnti); - - // TS 36.300, Sec 10.1.2.2.1 - Path Switch upon handover - if (rx_tunnel->fwd_teid_in_present) { - // END MARKER should be forwarded to TeNB if forwarding is activated - end_marker(rx_tunnel->fwd_teid_in); - rx_tunnel->fwd_teid_in_present = false; - } else { - // TeNB switches paths, and flush PDUs that have been buffered - auto rnti_it = ue_teidin_db.find(rnti); - if (rnti_it == ue_teidin_db.end()) { - logger.error("No rnti=0x%x entry for associated TEID=%d", rnti, header.teid); - return; - } - std::vector& bearer_tunnels = rnti_it->second[rx_tunnel->lcid]; - for (uint32_t new_teidin : bearer_tunnels) { - tunnel& new_tun = tunnels.at(new_teidin); - if (new_teidin != rx_tunnel->teid_in and new_tun.prior_teid_in_present and - new_tun.prior_teid_in == rx_tunnel->teid_in) { - rem_tunnel(new_tun.prior_teid_in); - new_tun.prior_teid_in_present = false; - set_tunnel_status(new_tun.teid_in, true); - } - } - } + case GTPU_MSG_END_MARKER: + handle_end_marker(*rx_tunnel); break; - } default: logger.warning("Unhandled GTPU message type=%d", header.message_type); break; From a2e25014240d930b410a1ee049e3efb3456eaadb Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 18:54:02 +0000 Subject: [PATCH 56/64] avoid disabling SRBs in the eNB when the max retx reached signal is sent from the RLC to the RRC --- srsenb/src/stack/rrc/mac_controller.cc | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/srsenb/src/stack/rrc/mac_controller.cc b/srsenb/src/stack/rrc/mac_controller.cc index 8924377db..8d763321c 100644 --- a/srsenb/src/stack/rrc/mac_controller.cc +++ b/srsenb/src/stack/rrc/mac_controller.cc @@ -301,10 +301,7 @@ void mac_controller::handle_ho_prep(const asn1::rrc::ho_prep_info_r8_ies_s& ho_p void mac_controller::handle_max_retx() { - for (auto& ue_bearer : current_sched_ue_cfg.ue_bearers) { - ue_bearer.direction = sched_interface::ue_bearer_cfg_t::IDLE; - } - update_mac(config_tx); + set_drb_activation(false); } void mac_controller::set_scell_activation(const std::bitset& scell_mask) From 8347cabe4f5c61fccc8141ca83005b3e436ef895 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 21:38:48 +0000 Subject: [PATCH 57/64] implemented a DFS-based PDCCH allocator --- srsenb/hdr/stack/mac/sched_grid.h | 11 +- .../hdr/stack/mac/sched_phy_ch/sched_result.h | 41 +++ .../stack/mac/sched_phy_ch/sf_cch_allocator.h | 76 ++++++ .../mac/sched_phy_ch/sf_cch_allocator.cc | 233 +++++++++++++++++- srsenb/test/mac/sched_grid_test.cc | 33 ++- 5 files changed, 358 insertions(+), 36 deletions(-) create mode 100644 srsenb/hdr/stack/mac/sched_phy_ch/sched_result.h diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 79895bb86..2e55ef2c1 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -14,6 +14,7 @@ #define SRSLTE_SCHED_GRID_H #include "lib/include/srslte/interfaces/sched_interface.h" +#include "sched_phy_ch/sched_result.h" #include "sched_phy_ch/sf_cch_allocator.h" #include "sched_ue.h" #include "srslte/adt/bounded_bitset.h" @@ -37,16 +38,6 @@ enum class alloc_result { }; const char* to_string(alloc_result res); -//! Result of a Subframe sched computation -struct cc_sched_result { - bool generated = false; - rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations - prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations - pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations - sched_interface::dl_sched_res_t dl_sched_result = {}; - sched_interface::ul_sched_res_t ul_sched_result = {}; -}; - struct sf_sched_result { tti_point tti_rx; std::vector enb_cc_list; diff --git a/srsenb/hdr/stack/mac/sched_phy_ch/sched_result.h b/srsenb/hdr/stack/mac/sched_phy_ch/sched_result.h new file mode 100644 index 000000000..fb41598ef --- /dev/null +++ b/srsenb/hdr/stack/mac/sched_phy_ch/sched_result.h @@ -0,0 +1,41 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2020 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#ifndef SRSLTE_SCHED_RESULT_H +#define SRSLTE_SCHED_RESULT_H + +#include "../sched_common.h" + +namespace srsenb { + +/// Result of a Subframe sched computation +struct cc_sched_result { + bool generated = false; + tti_point tti_rx{}; + + /// Accumulation of all DL RBG allocations + rbgmask_t dl_mask = {}; + + /// Accumulation of all UL PRB allocations + prbmask_t ul_mask = {}; + + /// Accumulation of all CCE allocations + pdcch_mask_t pdcch_mask = {}; + + /// Individual allocations information + sched_interface::dl_sched_res_t dl_sched_result = {}; + sched_interface::ul_sched_res_t ul_sched_result = {}; +}; + +} // namespace srsenb + +#endif // SRSLTE_SCHED_RESULT_H diff --git a/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h b/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h index d7167bbfc..c7ff21a73 100644 --- a/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h +++ b/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h @@ -11,6 +11,7 @@ */ #include "../sched_common.h" +#include "sched_result.h" #ifndef SRSLTE_PDCCH_SCHED_H #define SRSLTE_PDCCH_SCHED_H @@ -19,6 +20,81 @@ namespace srsenb { class sched_ue; +class sf_cch_allocator2 +{ +public: + const static uint32_t MAX_CFI = 3; + struct tree_node { + int8_t pucch_n_prb = -1; ///< this PUCCH resource identifier + uint16_t rnti = SRSLTE_INVALID_RNTI; + uint32_t record_idx = 0; + uint32_t dci_pos_idx = 0; + srslte_dci_location_t dci_pos = {0, 0}; + /// Accumulation of all PDCCH masks for the current solution (DFS path) + pdcch_mask_t total_mask, current_mask; + prbmask_t total_pucch_mask; + }; + // struct alloc_t { + // int8_t pucch_n_prb; ///< this PUCCH resource identifier + // uint16_t rnti = 0; + // srslte_dci_location_t dci_pos = {0, 0}; + // pdcch_mask_t current_mask; ///< this allocation PDCCH mask + // pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree + // route) prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current + // solution/tree route + // }; + using alloc_result_t = srslte::bounded_vector; + + sf_cch_allocator2() : logger(srslog::fetch_basic_logger("MAC")) {} + + void init(const sched_cell_params_t& cell_params_); + void new_tti(tti_point tti_rx_); + /** + * Allocates DCI space in PDCCH and PUCCH, avoiding in the process collisions with other users + * @param alloc_type allocation type (e.g. DL data, UL data, ctrl) + * @param aggr_idx Aggregation level index (0..3) + * @param user UE object or null in case of broadcast/RAR/paging allocation + * @param has_pusch_grant If the UE has already an PUSCH grant for UCI allocated + * @return if the allocation was successful + */ + bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false); + + void rem_last_dci(); + + // getters + uint32_t get_cfi() const { return current_cfix + 1; } + void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const; + uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; } + size_t nof_allocs() const { return dci_record_list.size(); } + std::string result_to_string(bool verbose = false) const; + +private: + /// DCI allocation parameters + struct alloc_record { + bool pusch_uci; + uint32_t aggr_idx; + alloc_type_t alloc_type; + sched_ue* user; + }; + const cce_cfi_position_table* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const; + + // PDCCH allocation algorithm + bool alloc_dfs_node(const alloc_record& record, uint32_t start_child_idx); + bool get_next_dfs(); + + // consts + const sched_cell_params_t* cc_cfg = nullptr; + srslog::basic_logger& logger; + srslte_pucch_cfg_t pucch_cfg_common = {}; + + // tti vars + tti_point tti_rx; + uint32_t current_cfix = 0; + uint32_t current_max_cfix = 0; + std::vector last_dci_dfs, temp_dci_dfs; + std::vector dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far +}; + /// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions. class sf_cch_allocator { diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index f1e419139..32a7c9c4d 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -16,6 +16,231 @@ namespace srsenb { +bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch) +{ + if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) { + return n1_pucch == ue_pucch_cfg.n_pucch_sr; + } + return false; +} + +void sf_cch_allocator2::init(const sched_cell_params_t& cell_params_) +{ + cc_cfg = &cell_params_; + pucch_cfg_common = cc_cfg->pucch_cfg_common; +} + +void sf_cch_allocator2::new_tti(tti_point tti_rx_) +{ + tti_rx = tti_rx_; + + dci_record_list.clear(); + last_dci_dfs.clear(); + current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1; + current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1; +} + +const cce_cfi_position_table* +sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const +{ + switch (alloc_type) { + case alloc_type_t::DL_BC: + return &cc_cfg->common_locations[cfix]; + case alloc_type_t::DL_PCCH: + return &cc_cfg->common_locations[cfix]; + case alloc_type_t::DL_RAR: + return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix]; + case alloc_type_t::DL_DATA: + return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); + case alloc_type_t::UL_DATA: + return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); + default: + break; + } + return nullptr; +} + +bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant) +{ + temp_dci_dfs.clear(); + uint32_t start_cfix = current_cfix; + + alloc_record record; + record.user = user; + record.aggr_idx = aggr_idx; + record.alloc_type = alloc_type; + record.pusch_uci = has_pusch_grant; + + // Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI + // positions + do { + bool success = alloc_dfs_node(record, 0); + if (success) { + // DCI record allocation successful + dci_record_list.push_back(record); + return true; + } + if (temp_dci_dfs.empty()) { + temp_dci_dfs = last_dci_dfs; + } + } while (get_next_dfs()); + + // Revert steps to initial state, before dci record allocation was attempted + last_dci_dfs.swap(temp_dci_dfs); + current_cfix = start_cfix; + return false; +} + +// bool sf_cch_allocator2::get_next_dfs() +//{ +// if (last_dci_dfs.empty()) { +// // If we reach root, increase CFI +// if (current_cfix < cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) { +// current_cfix++; +// return true; +// } +// return false; +// } +// +// uint32_t dfs_level = last_dci_dfs.size() - 1; +// uint32_t start_child_idx = last_dci_dfs.back().dci_pos_idx + 1; +// last_dci_dfs.pop_back(); +// while (not alloc_dfs_node(dci_record_list[dfs_level], start_child_idx)) { +// start_child_idx = 0; +// // If failed to allocate record, go one level lower in DFS +// if (not get_next_dfs()) { +// // If no more options in DFS, return false +// return false; +// } +// } +//} + +bool sf_cch_allocator2::get_next_dfs() +{ + do { + uint32_t start_child_idx = 0; + if (last_dci_dfs.empty()) { + // If we reach root, increase CFI + current_cfix++; + if (current_cfix > cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) { + return false; + } + } else { + // Attempt to re-add last tree node, but with a higher node child index + start_child_idx = last_dci_dfs.back().dci_pos_idx + 1; + last_dci_dfs.pop_back(); + } + while (last_dci_dfs.size() < dci_record_list.size() and + alloc_dfs_node(dci_record_list[last_dci_dfs.size()], start_child_idx)) { + start_child_idx = 0; + } + } while (last_dci_dfs.size() < dci_record_list.size()); + + // Finished computation of next DFS node + return true; +} + +bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx) +{ + // Get DCI Location Table + const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, current_cfix); + if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) { + return false; + } + const cce_position_list& dci_pos_list = (*dci_locs)[record.aggr_idx]; + if (start_dci_idx >= dci_pos_list.size()) { + return false; + } + + tree_node node; + node.dci_pos_idx = start_dci_idx; + node.dci_pos.L = record.aggr_idx; + node.rnti = record.user != nullptr ? record.user->get_rnti() : SRSLTE_INVALID_RNTI; + node.current_mask.resize(nof_cces()); + // get cumulative pdcch & pucch masks + if (not last_dci_dfs.empty()) { + node.total_mask = last_dci_dfs.back().total_mask; + node.total_pucch_mask = last_dci_dfs.back().total_pucch_mask; + } else { + node.total_mask.resize(nof_cces()); + node.total_pucch_mask.resize(cc_cfg->nof_prb()); + } + + for (; node.dci_pos_idx < dci_pos_list.size(); ++node.dci_pos_idx) { + node.dci_pos.ncce = dci_pos_list[node.dci_pos_idx]; + + if (record.alloc_type == alloc_type_t::DL_DATA and not record.pusch_uci) { + // The UE needs to allocate space in PUCCH for HARQ-ACK + pucch_cfg_common.n_pucch = node.dci_pos.ncce + pucch_cfg_common.N_pucch_1; + + if (is_pucch_sr_collision(record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx), pucch_cfg_common.n_pucch)) { + // avoid collision of HARQ-ACK with own SR n(1)_pucch + continue; + } + + node.pucch_n_prb = srslte_pucch_n_prb(&cc_cfg->cfg.cell, &pucch_cfg_common, 0); + if (not cc_cfg->sched_cfg->pucch_mux_enabled and node.total_pucch_mask.test(node.pucch_n_prb)) { + // PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position + continue; + } + } + + node.current_mask.reset(); + node.current_mask.fill(node.dci_pos.ncce, node.dci_pos.ncce + (1U << record.aggr_idx)); + if ((node.total_mask & node.current_mask).any()) { + // there is a PDCCH collision. Try another CCE position + continue; + } + + // Allocation successful + node.total_mask |= node.current_mask; + if (node.pucch_n_prb >= 0) { + node.total_pucch_mask.set(node.pucch_n_prb); + } + last_dci_dfs.push_back(node); + return true; + } + + return false; +} + +void sf_cch_allocator2::rem_last_dci() +{ + assert(not dci_record_list.empty()); + + // Remove DCI record + last_dci_dfs.pop_back(); + dci_record_list.pop_back(); +} + +void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const +{ + if (vec != nullptr) { + vec->clear(); + + vec->resize(last_dci_dfs.size()); + for (uint32_t i = 0; i < last_dci_dfs.size(); ++i) { + (*vec)[i] = &last_dci_dfs[i]; + } + } + + if (tot_mask != nullptr) { + if (last_dci_dfs.empty()) { + tot_mask->resize(nof_cces()); + tot_mask->reset(); + } else { + *tot_mask = last_dci_dfs.back().total_mask; + } + } +} + +std::string sf_cch_allocator2::result_to_string(bool verbose) const +{ + return ""; +} + +///////////////////////// + void sf_cch_allocator::init(const sched_cell_params_t& cell_params_) { cc_cfg = &cell_params_; @@ -226,14 +451,6 @@ void sf_cch_allocator::alloc_tree_t::reset() dci_alloc_tree.clear(); } -bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch) -{ - if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) { - return n1_pucch == ue_pucch_cfg.n_pucch_sr; - } - return false; -} - /// Algorithm to compute a valid PDCCH allocation bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx, const alloc_record_t& dci_record, diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index b70a79d51..3daecbaa6 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -45,11 +45,10 @@ int test_pdcch_one_ue() sched_interface::sched_args_t sched_args{}; TESTASSERT(cell_params[ENB_CC_IDX].set_cfg(ENB_CC_IDX, cell_cfg, sched_args)); - sf_cch_allocator pdcch; - sched_ue sched_ue{rnti, cell_params, ue_cfg}; + sf_cch_allocator2 pdcch; + sched_ue sched_ue{rnti, cell_params, ue_cfg}; pdcch.init(cell_params[PCell_IDX]); - TESTASSERT(pdcch.nof_alloc_combinations() == 0); TESTASSERT(pdcch.nof_allocs() == 0); uint32_t tti_counter = 0; @@ -85,8 +84,8 @@ int test_pdcch_one_ue() const cce_position_list& dci_locs = (*dci_cce)[aggr_idx]; // TEST: Check the first alloc of the pdcch result (e.g. rnti, valid cce mask, etc.) - sf_cch_allocator::alloc_result_t pdcch_result; - pdcch_mask_t pdcch_mask; + sf_cch_allocator2::alloc_result_t pdcch_result; + pdcch_mask_t pdcch_mask; pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0); TESTASSERT(pdcch_result.size() == 1); TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti()); @@ -142,11 +141,10 @@ int test_pdcch_ue_and_sibs() sched_interface::sched_args_t sched_args{}; TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args)); - sf_cch_allocator pdcch; - sched_ue sched_ue{0x46, cell_params, ue_cfg}; + sf_cch_allocator2 pdcch; + sched_ue sched_ue{0x46, cell_params, ue_cfg}; pdcch.init(cell_params[PCell_IDX]); - TESTASSERT(pdcch.nof_alloc_combinations() == 0); TESTASSERT(pdcch.nof_allocs() == 0); tti_point tti_rx{std::uniform_int_distribution(0, 9)(get_rand_gen())}; @@ -154,10 +152,10 @@ int test_pdcch_ue_and_sibs() pdcch.new_tti(tti_rx); TESTASSERT(pdcch.nof_cces() == cell_params[0].nof_cce_table[0]); TESTASSERT(pdcch.get_cfi() == 1); // Start at CFI=1 - TESTASSERT(pdcch.nof_alloc_combinations() == 0); + TESTASSERT(pdcch.nof_allocs() == 0); TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, 2)); - TESTASSERT(pdcch.nof_alloc_combinations() == 4); + TESTASSERT(pdcch.nof_allocs() == 1); TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_RAR, 2)); TESTASSERT(pdcch.nof_allocs() == 2); TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, 2, &sched_ue, false)); @@ -168,9 +166,9 @@ int test_pdcch_ue_and_sibs() TESTASSERT(pdcch.nof_allocs() == 2); // TEST: DCI positions - uint32_t cfi = pdcch.get_cfi(); - sf_cch_allocator::alloc_result_t dci_result; - pdcch_mask_t result_pdcch_mask; + uint32_t cfi = pdcch.get_cfi(); + sf_cch_allocator2::alloc_result_t dci_result; + pdcch_mask_t result_pdcch_mask; pdcch.get_allocs(&dci_result, &result_pdcch_mask); TESTASSERT(dci_result.size() == 2); const cce_position_list& bc_dci_locs = cell_params[0].common_locations[cfi - 1][2]; @@ -191,13 +189,12 @@ int test_6prbs() sched_interface::sched_args_t sched_args{}; TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args)); - sf_cch_allocator pdcch; - sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg}; - sf_cch_allocator::alloc_result_t dci_result; - pdcch_mask_t result_pdcch_mask; + sf_cch_allocator2 pdcch; + sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg}; + sf_cch_allocator2::alloc_result_t dci_result; + pdcch_mask_t result_pdcch_mask; pdcch.init(cell_params[PCell_IDX]); - TESTASSERT(pdcch.nof_alloc_combinations() == 0); TESTASSERT(pdcch.nof_allocs() == 0); uint32_t opt_cfi = 3; From 6bec92fbc9a0d337f2e214080b6e8055fe671189 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 22:25:35 +0000 Subject: [PATCH 58/64] using new DFS-based PDCCH allocator as a default --- srsenb/hdr/stack/mac/sched_grid.h | 24 ++++---- srsenb/src/stack/mac/sched_grid.cc | 14 ++--- .../mac/sched_phy_ch/sf_cch_allocator.cc | 58 ++++++++++--------- srsenb/test/mac/sched_benchmark.cc | 12 ++-- 4 files changed, 57 insertions(+), 51 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 2e55ef2c1..86d9b1b9b 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -106,11 +106,11 @@ public: bool find_ul_alloc(uint32_t L, prb_interval* alloc) const; // getters - const rbgmask_t& get_dl_mask() const { return dl_mask; } - const prbmask_t& get_ul_mask() const { return ul_mask; } - uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); } - const sf_cch_allocator& get_pdcch_grid() const { return pdcch_alloc; } - uint32_t get_pucch_width() const { return pucch_nrb; } + const rbgmask_t& get_dl_mask() const { return dl_mask; } + const prbmask_t& get_ul_mask() const { return ul_mask; } + uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); } + const sf_cch_allocator2& get_pdcch_grid() const { return pdcch_alloc; } + uint32_t get_pucch_width() const { return pucch_nrb; } private: alloc_result alloc_dl(uint32_t aggr_lvl, @@ -127,7 +127,7 @@ private: prbmask_t pucch_mask; // derived - sf_cch_allocator pdcch_alloc = {}; + sf_cch_allocator2 pdcch_alloc = {}; // internal state tti_point tti_rx; @@ -219,12 +219,12 @@ public: const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; } private: - void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result, - sched_ue_list& ue_list); - void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::ul_sched_res_t* ul_result, - sched_ue_list& ue_list); + void set_dl_data_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result, + sched_ue_list& ue_list); + void set_ul_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, + sched_interface::ul_sched_res_t* ul_result, + sched_ue_list& ue_list); // consts const sched_cell_params_t* cc_cfg = nullptr; diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index d4d983c60..22afd988e 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -645,9 +645,9 @@ alloc_result sf_sched::alloc_phich(sched_ue* user) return alloc_result::no_rnti_opportunity; } -void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result, - sched_ue_list& ue_list) +void sf_sched::set_dl_data_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result, + sched_ue_list& ue_list) { for (const auto& data_alloc : data_allocs) { dl_result->data.emplace_back(); @@ -788,9 +788,9 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched, } } -void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::ul_sched_res_t* ul_result, - sched_ue_list& ue_list) +void sf_sched::set_ul_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, + sched_interface::ul_sched_res_t* ul_result, + sched_ue_list& ue_list) { /* Set UL data DCI locs and format */ for (const auto& ul_alloc : ul_data_allocs) { @@ -902,7 +902,7 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) } /* Pick one of the possible DCI masks */ - sf_cch_allocator::alloc_result_t dci_result; + sf_cch_allocator2::alloc_result_t dci_result; // tti_alloc.get_pdcch_grid().result_to_string(); tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &cc_result->pdcch_mask); diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index 32a7c9c4d..8148da106 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -78,6 +78,11 @@ bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sc if (success) { // DCI record allocation successful dci_record_list.push_back(record); + + if (is_dl_ctrl_alloc(alloc_type)) { + // Dynamic CFI not yet supported for DL control allocations, as coderate can be exceeded + current_max_cfix = current_cfix; + } return true; } if (temp_dci_dfs.empty()) { @@ -91,30 +96,6 @@ bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sc return false; } -// bool sf_cch_allocator2::get_next_dfs() -//{ -// if (last_dci_dfs.empty()) { -// // If we reach root, increase CFI -// if (current_cfix < cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) { -// current_cfix++; -// return true; -// } -// return false; -// } -// -// uint32_t dfs_level = last_dci_dfs.size() - 1; -// uint32_t start_child_idx = last_dci_dfs.back().dci_pos_idx + 1; -// last_dci_dfs.pop_back(); -// while (not alloc_dfs_node(dci_record_list[dfs_level], start_child_idx)) { -// start_child_idx = 0; -// // If failed to allocate record, go one level lower in DFS -// if (not get_next_dfs()) { -// // If no more options in DFS, return false -// return false; -// } -// } -//} - bool sf_cch_allocator2::get_next_dfs() { do { @@ -122,7 +103,7 @@ bool sf_cch_allocator2::get_next_dfs() if (last_dci_dfs.empty()) { // If we reach root, increase CFI current_cfix++; - if (current_cfix > cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) { + if (current_cfix > current_max_cfix) { return false; } } else { @@ -236,7 +217,32 @@ void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, std::string sf_cch_allocator2::result_to_string(bool verbose) const { - return ""; + fmt::basic_memory_buffer strbuf; + if (dci_record_list.empty()) { + fmt::format_to(strbuf, "SCHED: PDCCH allocations cfi={}, nof_cce={}, No allocations.\n", get_cfi(), nof_cces()); + } else { + fmt::format_to(strbuf, + "SCHED: PDCCH allocations cfi={}, nof_cce={}, nof_allocs={}, total PDCCH mask=0x{:x}", + get_cfi(), + nof_cces(), + nof_allocs(), + last_dci_dfs.back().total_mask); + alloc_result_t vec; + get_allocs(&vec); + if (verbose) { + fmt::format_to(strbuf, ", allocations:\n"); + for (const auto& dci_alloc : vec) { + fmt::format_to(strbuf, + " > rnti=0x{:0x}: 0x{:x} / 0x{:x}\n", + dci_alloc->rnti, + dci_alloc->current_mask, + dci_alloc->total_mask); + } + } else { + fmt::format_to(strbuf, ".\n"); + } + } + return fmt::to_string(strbuf); } ///////////////////////// diff --git a/srsenb/test/mac/sched_benchmark.cc b/srsenb/test/mac/sched_benchmark.cc index 2a83a41eb..d61b270c4 100644 --- a/srsenb/test/mac/sched_benchmark.cc +++ b/srsenb/test/mac/sched_benchmark.cc @@ -219,7 +219,7 @@ run_data expected_run_result(run_params params) int tbs = srslte_ra_tbs_from_idx(tbs_idx, params.nof_prbs); ret.avg_dl_throughput = static_cast(tbs) * 1e3F; // bps - tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, true); + tbs_idx = srslte_ra_tbs_idx_from_mcs(24, false, true); uint32_t nof_pusch_prbs = params.nof_prbs - (params.nof_prbs == 6 ? 2 : 4); tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs); ret.avg_ul_throughput = static_cast(tbs) * 1e3F; // bps @@ -229,16 +229,16 @@ run_data expected_run_result(run_params params) switch (params.nof_prbs) { case 6: ret.avg_dl_mcs = 25; - ret.avg_dl_throughput *= 0.7; - ret.avg_ul_throughput *= 0.25; + ret.avg_dl_throughput *= 0.68; + ret.avg_ul_throughput *= 0.75; break; case 15: ret.avg_dl_throughput *= 0.95; - ret.avg_ul_throughput *= 0.5; + ret.avg_ul_throughput *= 0.7; break; default: ret.avg_dl_throughput *= 0.97; - ret.avg_ul_throughput *= 0.5; + ret.avg_ul_throughput *= 0.85; break; } return ret; @@ -257,7 +257,7 @@ void print_benchmark_results(const std::vector& run_results) int tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, false); int tbs = srslte_ra_tbs_from_idx(tbs_idx, r.params.nof_prbs); float dl_rate_overhead = 1.0F - r.avg_dl_throughput / (static_cast(tbs) * 1e3F); - tbs_idx = srslte_ra_tbs_idx_from_mcs(28, false, true); + tbs_idx = srslte_ra_tbs_idx_from_mcs(24, false, true); uint32_t nof_pusch_prbs = r.params.nof_prbs - (r.params.nof_prbs == 6 ? 2 : 4); tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_pusch_prbs); float ul_rate_overhead = 1.0F - r.avg_ul_throughput / (static_cast(tbs) * 1e3F); From 2054ad3f3ce64e52ff5065c05994198b8fa29568 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 18 Mar 2021 22:35:27 +0000 Subject: [PATCH 59/64] eliminate old BFS-based PDCCH allocator. Improvement the scheduler speed by ~2x --- srsenb/hdr/stack/mac/sched_grid.h | 24 +- .../stack/mac/sched_phy_ch/sf_cch_allocator.h | 109 +---- srsenb/src/stack/mac/sched_grid.cc | 14 +- .../mac/sched_phy_ch/sf_cch_allocator.cc | 394 ++---------------- srsenb/test/mac/sched_grid_test.cc | 26 +- 5 files changed, 60 insertions(+), 507 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 86d9b1b9b..2e55ef2c1 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -106,11 +106,11 @@ public: bool find_ul_alloc(uint32_t L, prb_interval* alloc) const; // getters - const rbgmask_t& get_dl_mask() const { return dl_mask; } - const prbmask_t& get_ul_mask() const { return ul_mask; } - uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); } - const sf_cch_allocator2& get_pdcch_grid() const { return pdcch_alloc; } - uint32_t get_pucch_width() const { return pucch_nrb; } + const rbgmask_t& get_dl_mask() const { return dl_mask; } + const prbmask_t& get_ul_mask() const { return ul_mask; } + uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); } + const sf_cch_allocator& get_pdcch_grid() const { return pdcch_alloc; } + uint32_t get_pucch_width() const { return pucch_nrb; } private: alloc_result alloc_dl(uint32_t aggr_lvl, @@ -127,7 +127,7 @@ private: prbmask_t pucch_mask; // derived - sf_cch_allocator2 pdcch_alloc = {}; + sf_cch_allocator pdcch_alloc = {}; // internal state tti_point tti_rx; @@ -219,12 +219,12 @@ public: const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; } private: - void set_dl_data_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result, - sched_ue_list& ue_list); - void set_ul_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, - sched_interface::ul_sched_res_t* ul_result, - sched_ue_list& ue_list); + void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result, + sched_ue_list& ue_list); + void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::ul_sched_res_t* ul_result, + sched_ue_list& ue_list); // consts const sched_cell_params_t* cc_cfg = nullptr; diff --git a/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h b/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h index c7ff21a73..f68dfd88b 100644 --- a/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h +++ b/srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h @@ -20,7 +20,8 @@ namespace srsenb { class sched_ue; -class sf_cch_allocator2 +/// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions. +class sf_cch_allocator { public: const static uint32_t MAX_CFI = 3; @@ -34,18 +35,9 @@ public: pdcch_mask_t total_mask, current_mask; prbmask_t total_pucch_mask; }; - // struct alloc_t { - // int8_t pucch_n_prb; ///< this PUCCH resource identifier - // uint16_t rnti = 0; - // srslte_dci_location_t dci_pos = {0, 0}; - // pdcch_mask_t current_mask; ///< this allocation PDCCH mask - // pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree - // route) prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current - // solution/tree route - // }; using alloc_result_t = srslte::bounded_vector; - sf_cch_allocator2() : logger(srslog::fetch_basic_logger("MAC")) {} + sf_cch_allocator() : logger(srslog::fetch_basic_logger("MAC")) {} void init(const sched_cell_params_t& cell_params_); void new_tti(tti_point tti_rx_); @@ -95,101 +87,6 @@ private: std::vector dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far }; -/// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions. -class sf_cch_allocator -{ -public: - const static uint32_t MAX_CFI = 3; - struct alloc_t { - int8_t pucch_n_prb; ///< this PUCCH resource identifier - uint16_t rnti = 0; - srslte_dci_location_t dci_pos = {0, 0}; - pdcch_mask_t current_mask; ///< this allocation PDCCH mask - pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route) - prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current solution/tree route - }; - using alloc_result_t = srslte::bounded_vector; - - sf_cch_allocator() : logger(srslog::fetch_basic_logger("MAC")) {} - - void init(const sched_cell_params_t& cell_params_); - void new_tti(tti_point tti_rx_); - /** - * Allocates DCI space in PDCCH and PUCCH, avoiding in the process collisions with other users - * @param alloc_type allocation type (e.g. DL data, UL data, ctrl) - * @param aggr_idx Aggregation level index (0..3) - * @param user UE object or null in case of broadcast/RAR/paging allocation - * @param has_pusch_grant If the UE has already an PUSCH grant for UCI allocated - * @return if the allocation was successful - */ - bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false); - - void rem_last_dci(); - - // getters - uint32_t get_cfi() const { return current_cfix + 1; } - void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const; - uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; } - size_t nof_allocs() const { return dci_record_list.size(); } - size_t nof_alloc_combinations() const { return get_alloc_tree().nof_leaves(); } - std::string result_to_string(bool verbose = false) const; - -private: - /// DCI allocation parameters - struct alloc_record_t { - sched_ue* user; - uint32_t aggr_idx; - alloc_type_t alloc_type; - bool pusch_uci; - }; - /// Tree-based data structure to store possible DCI allocation decisions - struct alloc_tree_t { - struct node_t { - int parent_idx; - alloc_t node; - node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {} - }; - - // args - size_t nof_cces; - const sched_cell_params_t* cc_cfg = nullptr; - srslte_pucch_cfg_t* pucch_cfg_temp = nullptr; - uint32_t cfi; - // state - std::vector dci_alloc_tree; - size_t prev_start = 0, prev_end = 0; - - explicit alloc_tree_t(uint32_t this_cfi, const sched_cell_params_t& cc_params, srslte_pucch_cfg_t& pucch_cfg); - size_t nof_leaves() const { return prev_end - prev_start; } - void reset(); - void get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const; - bool add_tree_node_leaves(int node_idx, - const alloc_record_t& dci_record, - const cce_cfi_position_table& dci_locs, - tti_point tti_rx); - std::string result_to_string(bool verbose) const; - }; - - const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; } - const cce_cfi_position_table* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const; - - // PDCCH allocation algorithm - bool set_cfi(uint32_t cfi); - bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix); - - // consts - const sched_cell_params_t* cc_cfg = nullptr; - srslog::basic_logger& logger; - srslte_pucch_cfg_t pucch_cfg_common = {}; - - // tti vars - tti_point tti_rx; - uint32_t current_cfix = 0; - uint32_t current_max_cfix = 0; - std::vector alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index - std::vector dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far -}; - // Helper methods bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch); diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 22afd988e..d4d983c60 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -645,9 +645,9 @@ alloc_result sf_sched::alloc_phich(sched_ue* user) return alloc_result::no_rnti_opportunity; } -void sf_sched::set_dl_data_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result, - sched_ue_list& ue_list) +void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::dl_sched_res_t* dl_result, + sched_ue_list& ue_list) { for (const auto& data_alloc : data_allocs) { dl_result->data.emplace_back(); @@ -788,9 +788,9 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched, } } -void sf_sched::set_ul_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result, - sched_interface::ul_sched_res_t* ul_result, - sched_ue_list& ue_list) +void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, + sched_interface::ul_sched_res_t* ul_result, + sched_ue_list& ue_list) { /* Set UL data DCI locs and format */ for (const auto& ul_alloc : ul_data_allocs) { @@ -902,7 +902,7 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) } /* Pick one of the possible DCI masks */ - sf_cch_allocator2::alloc_result_t dci_result; + sf_cch_allocator::alloc_result_t dci_result; // tti_alloc.get_pdcch_grid().result_to_string(); tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &cc_result->pdcch_mask); diff --git a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc index 8148da106..264d43fb3 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sf_cch_allocator.cc @@ -24,13 +24,13 @@ bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti return false; } -void sf_cch_allocator2::init(const sched_cell_params_t& cell_params_) +void sf_cch_allocator::init(const sched_cell_params_t& cell_params_) { cc_cfg = &cell_params_; pucch_cfg_common = cc_cfg->pucch_cfg_common; } -void sf_cch_allocator2::new_tti(tti_point tti_rx_) +void sf_cch_allocator::new_tti(tti_point tti_rx_) { tti_rx = tti_rx_; @@ -41,17 +41,15 @@ void sf_cch_allocator2::new_tti(tti_point tti_rx_) } const cce_cfi_position_table* -sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const +sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const { switch (alloc_type) { case alloc_type_t::DL_BC: - return &cc_cfg->common_locations[cfix]; case alloc_type_t::DL_PCCH: return &cc_cfg->common_locations[cfix]; case alloc_type_t::DL_RAR: return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix]; case alloc_type_t::DL_DATA: - return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); case alloc_type_t::UL_DATA: return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); default: @@ -60,7 +58,7 @@ sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, ui return nullptr; } -bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant) +bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant) { temp_dci_dfs.clear(); uint32_t start_cfix = current_cfix; @@ -71,6 +69,22 @@ bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sc record.alloc_type = alloc_type; record.pusch_uci = has_pusch_grant; + if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and cc_cfg->nof_prb() == 6 and + current_max_cfix > current_cfix) { + // Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc and a low number of PRBs, + // start with an CFI that maximizes nof potential CCE locs + uint32_t nof_locs = 0, lowest_cfix = current_cfix; + for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > lowest_cfix; --cfix_tmp) { + const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp); + if ((*dci_locs)[record.aggr_idx].size() > nof_locs) { + nof_locs = (*dci_locs)[record.aggr_idx].size(); + current_cfix = cfix_tmp; + } else { + break; + } + } + } + // Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI // positions do { @@ -96,7 +110,7 @@ bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sc return false; } -bool sf_cch_allocator2::get_next_dfs() +bool sf_cch_allocator::get_next_dfs() { do { uint32_t start_child_idx = 0; @@ -121,7 +135,7 @@ bool sf_cch_allocator2::get_next_dfs() return true; } -bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx) +bool sf_cch_allocator::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx) { // Get DCI Location Table const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, current_cfix); @@ -185,7 +199,7 @@ bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t star return false; } -void sf_cch_allocator2::rem_last_dci() +void sf_cch_allocator::rem_last_dci() { assert(not dci_record_list.empty()); @@ -194,7 +208,7 @@ void sf_cch_allocator2::rem_last_dci() dci_record_list.pop_back(); } -void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const +void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const { if (vec != nullptr) { vec->clear(); @@ -215,7 +229,7 @@ void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, } } -std::string sf_cch_allocator2::result_to_string(bool verbose) const +std::string sf_cch_allocator::result_to_string(bool verbose) const { fmt::basic_memory_buffer strbuf; if (dci_record_list.empty()) { @@ -245,362 +259,4 @@ std::string sf_cch_allocator2::result_to_string(bool verbose) const return fmt::to_string(strbuf); } -///////////////////////// - -void sf_cch_allocator::init(const sched_cell_params_t& cell_params_) -{ - cc_cfg = &cell_params_; - pucch_cfg_common = cc_cfg->pucch_cfg_common; - - // init alloc trees - alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols); - for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) { - alloc_trees.emplace_back(i + 1, *cc_cfg, pucch_cfg_common); - } -} - -void sf_cch_allocator::new_tti(tti_point tti_rx_) -{ - tti_rx = tti_rx_; - - // Reset back all CFIs - for (auto& t : alloc_trees) { - t.reset(); - } - dci_record_list.clear(); - current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1; - current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1; -} - -const cce_cfi_position_table* -sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const -{ - switch (alloc_type) { - case alloc_type_t::DL_BC: - return &cc_cfg->common_locations[cfix]; - case alloc_type_t::DL_PCCH: - return &cc_cfg->common_locations[cfix]; - case alloc_type_t::DL_RAR: - return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix]; - case alloc_type_t::DL_DATA: - return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); - case alloc_type_t::UL_DATA: - return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); - default: - break; - } - return nullptr; -} - -bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant) -{ - // TODO: Make the alloc tree update lazy - alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type, .pusch_uci = has_pusch_grant}; - - if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and current_max_cfix > current_cfix) { - // Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc, start with optimal CFI - // in terms of nof CCE locs - uint32_t nof_locs = 0, lowest_cfix = current_cfix; - for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > lowest_cfix; --cfix_tmp) { - const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp); - if ((*dci_locs)[record.aggr_idx].size() > nof_locs) { - nof_locs = (*dci_locs)[record.aggr_idx].size(); - current_cfix = cfix_tmp; - } else { - break; - } - } - } - - // Try to allocate user in PDCCH for given CFI. If it fails, increment CFI. - uint32_t first_cfi = get_cfi(); - bool success; - do { - success = alloc_dci_record(record, get_cfi() - 1); - } while (not success and current_cfix < current_max_cfix and set_cfi(get_cfi() + 1)); - - if (not success) { - // DCI allocation failed. go back to original CFI - if (get_cfi() != first_cfi and not set_cfi(first_cfi)) { - logger.error("SCHED: Failed to return back to original PDCCH state"); - } - return false; - } - - // DCI record allocation successful - dci_record_list.push_back(record); - - if (is_dl_ctrl_alloc(alloc_type)) { - // Dynamic CFI not yet supported for DL control allocations, as coderate can be exceeded - current_max_cfix = current_cfix; - } - - return true; -} - -void sf_cch_allocator::rem_last_dci() -{ - assert(not dci_record_list.empty()); - - // Remove DCI record - dci_record_list.pop_back(); - - // Remove leaves of PDCCH position decisions - auto& tree = alloc_trees[current_cfix]; - tree.prev_end = tree.prev_start; - if (dci_record_list.empty()) { - tree.prev_start = 0; - } else { - tree.prev_start = tree.dci_alloc_tree[tree.prev_start].parent_idx; - // Discover other tree nodes with same level - while (tree.prev_start > 0) { - uint32_t count = 1; - int parent_idx = tree.dci_alloc_tree[tree.prev_start - 1].parent_idx; - while (parent_idx >= 0) { - count++; - parent_idx = tree.dci_alloc_tree[parent_idx].parent_idx; - } - if (count == dci_record_list.size()) { - tree.prev_start--; - } else { - break; - } - } - } - tree.dci_alloc_tree.erase(tree.dci_alloc_tree.begin() + tree.prev_end, tree.dci_alloc_tree.end()); -} - -bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t cfix) -{ - bool ret = false; - auto& tree = alloc_trees[cfix]; - - // Get DCI Location Table - const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix); - if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) { - return ret; - } - - if (tree.prev_end > 0) { - for (size_t j = tree.prev_start; j < tree.prev_end; ++j) { - ret |= tree.add_tree_node_leaves((int)j, record, *dci_locs, tti_rx); - } - } else { - ret = tree.add_tree_node_leaves(-1, record, *dci_locs, tti_rx); - } - - if (ret) { - tree.prev_start = tree.prev_end; - tree.prev_end = tree.dci_alloc_tree.size(); - } - - return ret; -} - -bool sf_cch_allocator::set_cfi(uint32_t cfi) -{ - if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) { - logger.error("Invalid CFI value. Defaulting to current CFI."); - return false; - } - - uint32_t new_cfix = cfi - 1; - if (new_cfix == current_cfix) { - return true; - } - - // setup new PDCCH alloc tree - auto& new_tree = alloc_trees[new_cfix]; - new_tree.reset(); - - if (not dci_record_list.empty()) { - // there are already PDCCH allocations - - // Rebuild Allocation Tree - bool ret = true; - for (const auto& old_record : dci_record_list) { - ret &= alloc_dci_record(old_record, new_cfix); - } - - if (not ret) { - // Fail to rebuild allocation tree. Go back to previous CFI - return false; - } - } - - current_cfix = new_cfix; - return true; -} - -void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const -{ - alloc_trees[current_cfix].get_allocs(vec, tot_mask, idx); -} - -std::string sf_cch_allocator::result_to_string(bool verbose) const -{ - return alloc_trees[current_cfix].result_to_string(verbose); -} - -sf_cch_allocator::alloc_tree_t::alloc_tree_t(uint32_t this_cfi, - const sched_cell_params_t& cc_params, - srslte_pucch_cfg_t& pucch_cfg_common) : - cfi(this_cfi), cc_cfg(&cc_params), pucch_cfg_temp(&pucch_cfg_common), nof_cces(cc_params.nof_cce_table[this_cfi - 1]) -{ - dci_alloc_tree.reserve(8); -} - -void sf_cch_allocator::alloc_tree_t::reset() -{ - prev_start = 0; - prev_end = 0; - dci_alloc_tree.clear(); -} - -/// Algorithm to compute a valid PDCCH allocation -bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx, - const alloc_record_t& dci_record, - const cce_cfi_position_table& dci_locs, - tti_point tti_rx_) -{ - bool ret = false; - - alloc_t alloc; - alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : SRSLTE_INVALID_RNTI; - alloc.dci_pos.L = dci_record.aggr_idx; - - // get cumulative pdcch & pucch masks - pdcch_mask_t parent_total_mask; - prbmask_t parent_pucch_mask; - if (parent_node_idx >= 0) { - parent_total_mask = dci_alloc_tree[parent_node_idx].node.total_mask; - parent_pucch_mask = dci_alloc_tree[parent_node_idx].node.total_pucch_mask; - } else { - parent_total_mask.resize(nof_cces); - parent_pucch_mask.resize(cc_cfg->nof_prb()); - } - - for (uint32_t i = 0; i < dci_locs[dci_record.aggr_idx].size(); ++i) { - int8_t pucch_prbidx = -1; - uint32_t ncce_pos = dci_locs[dci_record.aggr_idx][i]; - - if (dci_record.alloc_type == alloc_type_t::DL_DATA and not dci_record.pusch_uci) { - // The UE needs to allocate space in PUCCH for HARQ-ACK - pucch_cfg_temp->n_pucch = ncce_pos + pucch_cfg_temp->N_pucch_1; - - if (is_pucch_sr_collision( - dci_record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx_), pucch_cfg_temp->n_pucch)) { - // avoid collision of HARQ-ACK with own SR n(1)_pucch - continue; - } - - pucch_prbidx = srslte_pucch_n_prb(&cc_cfg->cfg.cell, pucch_cfg_temp, 0); - if (not cc_cfg->sched_cfg->pucch_mux_enabled and parent_pucch_mask.test(pucch_prbidx)) { - // PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position - continue; - } - } - - pdcch_mask_t alloc_mask(nof_cces); - alloc_mask.fill(ncce_pos, ncce_pos + (1u << dci_record.aggr_idx)); - if ((parent_total_mask & alloc_mask).any()) { - // there is a PDCCH collision. Try another CCE position - continue; - } - - // Allocation successful - alloc.current_mask = alloc_mask; - alloc.total_mask = parent_total_mask | alloc_mask; - alloc.dci_pos.ncce = ncce_pos; - alloc.pucch_n_prb = pucch_prbidx; - alloc.total_pucch_mask = parent_pucch_mask; - if (pucch_prbidx >= 0) { - alloc.total_pucch_mask.set(pucch_prbidx); - } - - // Prune if repetition of total_masks - uint32_t j = prev_end; - for (; j < dci_alloc_tree.size(); ++j) { - if (dci_alloc_tree[j].node.total_mask == alloc.total_mask) { - // leave nested for-loop - break; - } - } - if (j < dci_alloc_tree.size()) { - continue; - } - - // Register allocation - dci_alloc_tree.emplace_back(parent_node_idx, alloc); - ret = true; - } - - return ret; -} - -void sf_cch_allocator::alloc_tree_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const -{ - // if alloc tree is empty - if (prev_start == prev_end) { - if (vec != nullptr) { - vec->clear(); - } - if (tot_mask != nullptr) { - tot_mask->resize(nof_cces); - tot_mask->reset(); - } - return; - } - - // set vector of allocations - if (vec != nullptr) { - vec->clear(); - size_t i = prev_start + idx; - while (dci_alloc_tree[i].parent_idx >= 0) { - vec->push_back(&dci_alloc_tree[i].node); - i = (size_t)dci_alloc_tree[i].parent_idx; - } - vec->push_back(&dci_alloc_tree[i].node); - std::reverse(vec->begin(), vec->end()); - } - - // set final cce mask - if (tot_mask != nullptr) { - *tot_mask = dci_alloc_tree[prev_start + idx].node.total_mask; - } -} - -std::string sf_cch_allocator::alloc_tree_t::result_to_string(bool verbose) const -{ - // get all the possible combinations of DCI pos allocations - fmt::basic_memory_buffer strbuf; - fmt::format_to(strbuf, - "SCHED: PDCCH allocations cfi={}, nof_cce={}, {} possible combinations:\n", - cfi, - nof_cces, - prev_end - prev_start); - uint32_t count = 0; - for (size_t i = prev_start; i < prev_end; ++i) { - alloc_result_t vec; - pdcch_mask_t tot_mask; - get_allocs(&vec, &tot_mask, i - prev_start); - - fmt::format_to(strbuf, "[{}]: total mask=0x{:x}", count, tot_mask); - if (verbose) { - fmt::format_to(strbuf, ", allocations:\n"); - for (const auto& dci_alloc : vec) { - fmt::format_to(strbuf, - " > rnti=0x{:0x}: 0x{:x} / 0x{:x}\n", - dci_alloc->rnti, - dci_alloc->current_mask, - dci_alloc->total_mask); - } - } else { - fmt::format_to(strbuf, "\n"); - } - count++; - } - return fmt::to_string(strbuf); -} - } // namespace srsenb diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index 3daecbaa6..d4732ceee 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -45,8 +45,8 @@ int test_pdcch_one_ue() sched_interface::sched_args_t sched_args{}; TESTASSERT(cell_params[ENB_CC_IDX].set_cfg(ENB_CC_IDX, cell_cfg, sched_args)); - sf_cch_allocator2 pdcch; - sched_ue sched_ue{rnti, cell_params, ue_cfg}; + sf_cch_allocator pdcch; + sched_ue sched_ue{rnti, cell_params, ue_cfg}; pdcch.init(cell_params[PCell_IDX]); TESTASSERT(pdcch.nof_allocs() == 0); @@ -84,8 +84,8 @@ int test_pdcch_one_ue() const cce_position_list& dci_locs = (*dci_cce)[aggr_idx]; // TEST: Check the first alloc of the pdcch result (e.g. rnti, valid cce mask, etc.) - sf_cch_allocator2::alloc_result_t pdcch_result; - pdcch_mask_t pdcch_mask; + sf_cch_allocator::alloc_result_t pdcch_result; + pdcch_mask_t pdcch_mask; pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0); TESTASSERT(pdcch_result.size() == 1); TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti()); @@ -141,8 +141,8 @@ int test_pdcch_ue_and_sibs() sched_interface::sched_args_t sched_args{}; TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args)); - sf_cch_allocator2 pdcch; - sched_ue sched_ue{0x46, cell_params, ue_cfg}; + sf_cch_allocator pdcch; + sched_ue sched_ue{0x46, cell_params, ue_cfg}; pdcch.init(cell_params[PCell_IDX]); TESTASSERT(pdcch.nof_allocs() == 0); @@ -166,9 +166,9 @@ int test_pdcch_ue_and_sibs() TESTASSERT(pdcch.nof_allocs() == 2); // TEST: DCI positions - uint32_t cfi = pdcch.get_cfi(); - sf_cch_allocator2::alloc_result_t dci_result; - pdcch_mask_t result_pdcch_mask; + uint32_t cfi = pdcch.get_cfi(); + sf_cch_allocator::alloc_result_t dci_result; + pdcch_mask_t result_pdcch_mask; pdcch.get_allocs(&dci_result, &result_pdcch_mask); TESTASSERT(dci_result.size() == 2); const cce_position_list& bc_dci_locs = cell_params[0].common_locations[cfi - 1][2]; @@ -189,10 +189,10 @@ int test_6prbs() sched_interface::sched_args_t sched_args{}; TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args)); - sf_cch_allocator2 pdcch; - sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg}; - sf_cch_allocator2::alloc_result_t dci_result; - pdcch_mask_t result_pdcch_mask; + sf_cch_allocator pdcch; + sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg}; + sf_cch_allocator::alloc_result_t dci_result; + pdcch_mask_t result_pdcch_mask; pdcch.init(cell_params[PCell_IDX]); TESTASSERT(pdcch.nof_allocs() == 0); From ad3a3af4904f932218d4d5f25168b2333394a4a2 Mon Sep 17 00:00:00 2001 From: David Rupprecht Date: Fri, 19 Mar 2021 15:04:57 +0100 Subject: [PATCH 60/64] Adding more info into pcap test --- lib/test/mac/mac_pcap_test.cc | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/lib/test/mac/mac_pcap_test.cc b/lib/test/mac/mac_pcap_test.cc index c8fdd4c90..627adc412 100644 --- a/lib/test/mac/mac_pcap_test.cc +++ b/lib/test/mac/mac_pcap_test.cc @@ -40,6 +40,8 @@ void write_pcap_nr_thread_function(srslte::mac_pcap* pcap_handle, const std::arr int mac_pcap_eutra_test() { + auto& pcap_logger = srslog::fetch_basic_logger("PCAP"); + pcap_logger.info("In mac_pcap_eutra_test"); std::array tv = { 0x21, 0x08, 0x22, 0x80, 0x82, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, @@ -58,16 +60,17 @@ int mac_pcap_eutra_test() TESTASSERT(pcap_handle->open("mac_pcap_test.pcap") != SRSLTE_SUCCESS); // open again will fail std::vector writer_threads; + pcap_logger.info("Start writer_threads"); for (uint32_t i = 0; i < num_threads; i++) { writer_threads.push_back(std::thread(write_pcap_eutra_thread_function, pcap_handle.get(), tv, num_pdus_per_thread)); } - + pcap_logger.info("Wait for writer_threads to finish"); // wait for threads to finish for (std::thread& thread : writer_threads) { thread.join(); } - + pcap_logger.info("Close PCAP handle"); TESTASSERT(pcap_handle->close() == SRSLTE_SUCCESS); TESTASSERT(pcap_handle->close() != 0); // closing twice will fail @@ -76,6 +79,8 @@ int mac_pcap_eutra_test() int mac_pcap_nr_test() { + auto& pcap_logger = srslog::fetch_basic_logger("PCAP"); + pcap_logger.info("In mac_pcap_nr_test"); std::array tv = {0x42, 0x00, 0x08, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}; uint32_t num_threads = 10; @@ -86,16 +91,19 @@ int mac_pcap_nr_test() TESTASSERT(pcap_handle->open("mac_pcap_nr_test.pcap") != SRSLTE_SUCCESS); // open again will fail std::vector writer_threads; + pcap_logger.info("Start writer_threads"); for (uint32_t i = 0; i < num_threads; i++) { writer_threads.push_back(std::thread(write_pcap_nr_thread_function, pcap_handle.get(), tv, num_pdus_per_thread)); } + pcap_logger.info("Wait for writer_threads to finish"); // wait for threads to finish for (std::thread& thread : writer_threads) { thread.join(); } + pcap_logger.info("Close PCAP handle"); TESTASSERT(pcap_handle->close() == SRSLTE_SUCCESS); TESTASSERT(pcap_handle->close() != 0); // closing twice will fail @@ -104,6 +112,21 @@ int mac_pcap_nr_test() int main() { + // Start the log backend. + srslog::init(); + + auto& mac_logger = srslog::fetch_basic_logger("MAC", false); + mac_logger.set_level(srslog::basic_levels::debug); + mac_logger.set_hex_dump_max_size(-1); + + auto& pcap_logger = srslog::fetch_basic_logger("PCAP", false); + pcap_logger.set_level(srslog::basic_levels::debug); + pcap_logger.set_hex_dump_max_size(-1); + + pcap_logger.info("Start mac_pcap_eutra_test"); TESTASSERT(mac_pcap_eutra_test() == SRSLTE_SUCCESS); + pcap_logger.info("Start mac_pcap_nr_test"); TESTASSERT(mac_pcap_nr_test() == SRSLTE_SUCCESS); + + srslog::flush(); } From 8d2197e751a8ce2d06f2445884f3a8c98e7ceaa8 Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Wed, 17 Mar 2021 15:20:54 +0100 Subject: [PATCH 61/64] Increase MAC UE circular buffer --- srsenb/hdr/stack/mac/ue.h | 2 +- srsenb/src/stack/mac/ue.cc | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/srsenb/hdr/stack/mac/ue.h b/srsenb/hdr/stack/mac/ue.h index 648a5cf38..f073d0ac1 100644 --- a/srsenb/hdr/stack/mac/ue.h +++ b/srsenb/hdr/stack/mac/ue.h @@ -59,7 +59,7 @@ private: srslog::basic_logger* logger; srslte::pdu_queue* shared_pdu_queue; - srslte::circular_array, SRSLTE_FDD_NOF_HARQ * 2> pdu_map; + srslte::circular_array, SRSLTE_FDD_NOF_HARQ * 8> pdu_map; }; class cc_buffer_handler diff --git a/srsenb/src/stack/mac/ue.cc b/srsenb/src/stack/mac/ue.cc index 4d53de2e0..9f9d4ac3f 100644 --- a/srsenb/src/stack/mac/ue.cc +++ b/srsenb/src/stack/mac/ue.cc @@ -275,6 +275,7 @@ uint8_t* ue::request_buffer(uint32_t tti, uint32_t ue_cc_idx, const uint32_t len uint8_t* pdu = nullptr; if (len > 0) { pdu = cc_buffers[ue_cc_idx].get_rx_used_buffers().request_pdu(tti_point(tti), len); + logger.info("request_buffer: allocated for rnti=0x%x, tti=%d", rnti, tti); } else { logger.error("UE buffers: Requesting buffer for zero bytes"); } @@ -423,9 +424,9 @@ void ue::deallocate_pdu(uint32_t tti, uint32_t ue_cc_idx) { std::unique_lock lock(rx_buffers_mutex); if (not cc_buffers[ue_cc_idx].get_rx_used_buffers().try_deallocate_pdu(tti_point(tti))) { - logger.warning("UE buffers: Null RX PDU pointer in deallocate_pdu for rnti=0x%x pid=%d cc_idx=%d", + logger.warning("UE buffers: Null RX PDU pointer in deallocate_pdu for rnti=0x%x tti=%d cc_idx=%d", rnti, - tti % nof_rx_harq_proc, + tti, ue_cc_idx); } } @@ -435,7 +436,7 @@ void ue::push_pdu(uint32_t tti, uint32_t ue_cc_idx, uint32_t len) std::unique_lock lock(rx_buffers_mutex); if (not cc_buffers[ue_cc_idx].get_rx_used_buffers().push_pdu(tti_point(tti), len)) { logger.warning( - "UE buffers: Failed to push RX PDU for rnti=0x%x pid=%d cc_idx=%d", rnti, tti % nof_rx_harq_proc, ue_cc_idx); + "UE buffers: Failed to push RX PDU for rnti=0x%x tti=%d cc_idx=%d", rnti, tti, ue_cc_idx); } } From 430b7b7b348dacfd9a6a26c9d30b533b3b7f254e Mon Sep 17 00:00:00 2001 From: Ismael Gomez Date: Wed, 17 Mar 2021 15:21:51 +0100 Subject: [PATCH 62/64] Remove unnecessary info --- srsenb/src/stack/mac/ue.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/srsenb/src/stack/mac/ue.cc b/srsenb/src/stack/mac/ue.cc index 9f9d4ac3f..e4a3a45ff 100644 --- a/srsenb/src/stack/mac/ue.cc +++ b/srsenb/src/stack/mac/ue.cc @@ -275,7 +275,6 @@ uint8_t* ue::request_buffer(uint32_t tti, uint32_t ue_cc_idx, const uint32_t len uint8_t* pdu = nullptr; if (len > 0) { pdu = cc_buffers[ue_cc_idx].get_rx_used_buffers().request_pdu(tti_point(tti), len); - logger.info("request_buffer: allocated for rnti=0x%x, tti=%d", rnti, tti); } else { logger.error("UE buffers: Requesting buffer for zero bytes"); } From 638989ec4d144118265138bb5e1e942f97d95708 Mon Sep 17 00:00:00 2001 From: Francisco Date: Fri, 19 Mar 2021 17:08:57 +0000 Subject: [PATCH 63/64] sched - limit max coderate (via an assumed low cqi) for sib and rar allocations --- srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index adcd4034f..c65118400 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -179,7 +179,8 @@ int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci, const sched_cell_params_t& cell_params, uint32_t current_cfi) { - static const uint32_t Qm = 2; + static const uint32_t Qm = 2, bc_rar_cqi = 5; + static const float max_ctrl_coderate = std::min(srslte_cqi_to_coderate(bc_rar_cqi + 1, false), 0.932F * Qm); // Calculate I_tbs for this TBS int tbs = static_cast(req_bytes) * 8; @@ -217,7 +218,7 @@ int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci, // Compute effective code rate and verify it doesn't exceed max code rate uint32_t nof_re = cell_params.get_dl_nof_res(tti_tx_dl, dci, current_cfi); - if (srslte_coderate(tbs, nof_re) >= 0.932F * Qm) { + if (srslte_coderate(tbs, nof_re) >= max_ctrl_coderate) { return -1; } From df8ec4ddd504ab7fa9948e87bc9a6938a64bcd58 Mon Sep 17 00:00:00 2001 From: Francisco Date: Fri, 19 Mar 2021 19:52:13 +0000 Subject: [PATCH 64/64] reduce max coderate for SIB allocations --- srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc index c65118400..51b636cd1 100644 --- a/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc +++ b/srsenb/src/stack/mac/sched_phy_ch/sched_dci.cc @@ -179,7 +179,7 @@ int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci, const sched_cell_params_t& cell_params, uint32_t current_cfi) { - static const uint32_t Qm = 2, bc_rar_cqi = 5; + static const uint32_t Qm = 2, bc_rar_cqi = 4; static const float max_ctrl_coderate = std::min(srslte_cqi_to_coderate(bc_rar_cqi + 1, false), 0.932F * Qm); // Calculate I_tbs for this TBS