Rename functions that contain the unsafe word to nolock.

The term "unsafe" does not clearly state that the function is not locking the shared date and it sounds "dangerous", so use a more explicit term.
master
faluco 3 years ago committed by faluco
parent c7e2038904
commit fa351cd285

@ -55,7 +55,7 @@ struct choice_storage_t {
}
template <typename U>
void destroy_unsafe()
void destroy_unchecked()
{
get_unchecked<U>().~U();
};
@ -71,7 +71,7 @@ struct CopyCtorVisitor {
template <typename T>
void operator()(const T& t)
{
c->construct_unsafe(t);
c->construct_unchecked(t);
}
C* c;
};
@ -82,18 +82,18 @@ struct MoveCtorVisitor {
template <typename T>
void operator()(T&& t)
{
c->construct_unsafe(std::move(t));
c->construct_unchecked(std::move(t));
}
C* c;
};
template <typename C>
struct DtorUnsafeVisitor {
explicit DtorUnsafeVisitor(C* c_) : c(c_) {}
struct DtorUncheckVisitor {
explicit DtorUncheckVisitor(C* c_) : c(c_) {}
template <typename T>
void operator()(T& t)
{
c->template destroy_unsafe<T>();
c->template destroy_unchecked<T>();
}
C* c;
};
@ -110,12 +110,12 @@ struct tagged_union_t
std::size_t type_id;
using base_t::destroy_unsafe;
using base_t::destroy_unchecked;
using base_t::get_buffer;
using base_t::get_unchecked;
template <typename U, typename... Args2>
void construct_emplace_unsafe(Args2&&... args)
void construct_emplace_unchecked(Args2&&... args)
{
using U2 = typename std::decay<U>::type;
static_assert(type_list_contains<U2, Args...>(),
@ -125,7 +125,7 @@ struct tagged_union_t
}
template <typename U>
void construct_unsafe(U&& u)
void construct_unchecked(U&& u)
{
using U2 = typename std::decay<U>::type;
static_assert(type_list_contains<U2, Args...>(),
@ -134,11 +134,11 @@ struct tagged_union_t
new (get_buffer()) U2(std::forward<U>(u));
}
void copy_unsafe(const this_type& other) { visit(CopyCtorVisitor<this_type>{this}, other); }
void copy_unchecked(const this_type& other) { visit(CopyCtorVisitor<this_type>{this}, other); }
void move_unsafe(this_type&& other) { visit(MoveCtorVisitor<this_type>{this}, other); }
void move_unchecked(this_type&& other) { visit(MoveCtorVisitor<this_type>{this}, other); }
void dtor_unsafe() { visit(choice_details::DtorUnsafeVisitor<base_t>{this}, *this); }
void dtor_unchecked() { visit(choice_details::DtorUncheckVisitor<base_t>{this}, *this); }
size_t get_type_idx() const { return type_id; }
@ -180,51 +180,51 @@ public:
typename = typename std::enable_if<std::is_constructible<default_type, Args2...>::value>::type>
explicit choice_t(Args2&&... args) noexcept
{
base_t::template construct_emplace_unsafe<default_type>(std::forward<Args2>(args)...);
base_t::template construct_emplace_unchecked<default_type>(std::forward<Args2>(args)...);
}
choice_t(const choice_t<Args...>& other) noexcept { base_t::copy_unsafe(other); }
choice_t(const choice_t<Args...>& other) noexcept { base_t::copy_unchecked(other); }
choice_t(choice_t<Args...>&& other) noexcept { base_t::move_unsafe(std::move(other)); }
choice_t(choice_t<Args...>&& other) noexcept { base_t::move_unchecked(std::move(other)); }
template <typename U, typename = enable_if_can_hold<U> >
choice_t(U&& u) noexcept
{
base_t::construct_unsafe(std::forward<U>(u));
base_t::construct_unchecked(std::forward<U>(u));
}
~choice_t() { base_t::dtor_unsafe(); }
~choice_t() { base_t::dtor_unchecked(); }
template <typename U, typename = enable_if_can_hold<U> >
choice_t& operator=(U&& u) noexcept
{
if (not base_t::template is<U>()) {
base_t::dtor_unsafe();
base_t::dtor_unchecked();
}
base_t::construct_unsafe(std::forward<U>(u));
base_t::construct_unchecked(std::forward<U>(u));
return *this;
}
template <typename U, typename... Args2>
void emplace(Args2&&... args) noexcept
{
base_t::dtor_unsafe();
base_t::template construct_emplace_unsafe<U>(std::forward<Args2>(args)...);
base_t::dtor_unchecked();
base_t::template construct_emplace_unchecked<U>(std::forward<Args2>(args)...);
}
choice_t& operator=(const choice_t& other) noexcept
{
if (this != &other) {
base_t::dtor_unsafe();
base_t::copy_unsafe(other);
base_t::dtor_unchecked();
base_t::copy_unchecked(other);
}
return *this;
}
choice_t& operator=(choice_t&& other) noexcept
{
base_t::dtor_unsafe();
base_t::move_unsafe(std::move(other));
base_t::dtor_unchecked();
base_t::move_unchecked(std::move(other));
return *this;
}

@ -121,7 +121,7 @@ public:
void* node = grow_pool.allocate_node();
if (grow_pool.size() < batch_threshold) {
allocate_batch_in_background_unlocked();
allocate_batch_in_background_nolock();
}
return node;
}
@ -146,7 +146,7 @@ public:
}
private:
void allocate_batch_in_background_unlocked()
void allocate_batch_in_background_nolock()
{
if (state->dispatched) {
// new batch allocation already ongoing

@ -35,7 +35,6 @@ namespace srsran {
template <typename myobj>
class block_queue
{
public:
// Callback functions for mutexed operations inside pop/push methods
class call_mutexed_itf
@ -107,7 +106,7 @@ public:
bool full()
{ // queue is full?
pthread_mutex_lock(&mutex);
bool ret = not check_queue_space_unlocked(false);
bool ret = not check_queue_space_nolock(false);
pthread_mutex_unlock(&mutex);
return ret;
}
@ -163,7 +162,7 @@ private:
return ret;
}
bool check_queue_space_unlocked(bool block)
bool check_queue_space_nolock(bool block)
{
num_threads++;
if (capacity > 0) {
@ -190,7 +189,7 @@ private:
return std::move(value);
}
pthread_mutex_lock(&mutex);
bool ret = check_queue_space_unlocked(block);
bool ret = check_queue_space_nolock(block);
if (ret) {
if (mutexed_callback) {
mutexed_callback->pushing(value);
@ -210,7 +209,7 @@ private:
return false;
}
pthread_mutex_lock(&mutex);
bool ret = check_queue_space_unlocked(block);
bool ret = check_queue_space_nolock(block);
if (ret) {
if (mutexed_callback) {
mutexed_callback->pushing(value);

@ -392,7 +392,7 @@ private:
void update_notification_ack_info(uint32_t rlc_sn);
void debug_state();
void empty_queue_unsafe();
void empty_queue_nolock();
int required_buffer_size(const rlc_amd_retx_t& retx);
void retransmit_pdu(uint32_t sn);

@ -489,7 +489,7 @@ bool rf_uhd_rx_wait_lo_locked(void* h)
return is_locked;
}
static inline int rf_uhd_start_rx_stream_unsafe(rf_uhd_handler_t* handler)
static inline int rf_uhd_start_rx_stream_nolock(rf_uhd_handler_t* handler)
{
// Check if stream was not created or started
if (not handler->uhd->is_rx_ready() or handler->rx_stream_enabled) {
@ -512,10 +512,10 @@ int rf_uhd_start_rx_stream(void* h, bool now)
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
std::unique_lock<std::mutex> lock(handler->rx_mutex);
return rf_uhd_start_rx_stream_unsafe(handler);
return rf_uhd_start_rx_stream_nolock(handler);
}
static inline int rf_uhd_stop_rx_stream_unsafe(rf_uhd_handler_t* handler)
static inline int rf_uhd_stop_rx_stream_nolock(rf_uhd_handler_t* handler)
{
// Check if stream was created or stream was not started
if (not handler->uhd->is_rx_ready() or not handler->rx_stream_enabled) {
@ -538,7 +538,7 @@ int rf_uhd_stop_rx_stream(void* h)
rf_uhd_handler_t* handler = (rf_uhd_handler_t*)h;
std::unique_lock<std::mutex> lock(handler->rx_mutex);
if (rf_uhd_stop_rx_stream_unsafe(handler) < SRSRAN_SUCCESS) {
if (rf_uhd_stop_rx_stream_nolock(handler) < SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
@ -937,7 +937,7 @@ int rf_uhd_close(void* h)
return SRSRAN_SUCCESS;
}
static inline void rf_uhd_set_master_clock_rate_unsafe(rf_uhd_handler_t* handler, double rate)
static inline void rf_uhd_set_master_clock_rate_nolock(rf_uhd_handler_t* handler, double rate)
{
// Set master clock rate if it is allowed and change is required
if (handler->dynamic_master_rate and handler->current_master_clock != rate) {
@ -985,14 +985,14 @@ double rf_uhd_set_rx_srate(void* h, double freq)
// Stop RX streamer
if (RF_UHD_IMP_PROHIBITED_STOP_START.count(handler->devname) == 0) {
if (rf_uhd_stop_rx_stream_unsafe(handler) != SRSRAN_SUCCESS) {
if (rf_uhd_stop_rx_stream_nolock(handler) != SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
}
// Set master clock rate
if (fmod(handler->current_master_clock, freq) > 0.0) {
rf_uhd_set_master_clock_rate_unsafe(handler, 4 * freq);
rf_uhd_set_master_clock_rate_nolock(handler, 4 * freq);
}
if (handler->nof_rx_channels > 1) {
@ -1043,7 +1043,7 @@ double rf_uhd_set_tx_srate(void* h, double freq)
// Set master clock rate
if (fmod(handler->current_master_clock, freq) > 0.0) {
rf_uhd_set_master_clock_rate_unsafe(handler, 4 * freq);
rf_uhd_set_master_clock_rate_nolock(handler, 4 * freq);
}
if (handler->nof_tx_channels > 1) {
@ -1258,7 +1258,7 @@ int rf_uhd_recv_with_time_multi(void* h,
// Start stream if not started
if (not handler->rx_stream_enabled) {
if (rf_uhd_start_rx_stream_unsafe(handler) != SRSRAN_SUCCESS) {
if (rf_uhd_start_rx_stream_nolock(handler) != SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
}
@ -1303,7 +1303,7 @@ int rf_uhd_recv_with_time_multi(void* h,
if (RF_UHD_IMP_PROHIBITED_STOP_START.count(handler->devname) == 0) {
// Stop Rx stream
rf_uhd_stop_rx_stream_unsafe(handler);
rf_uhd_stop_rx_stream_nolock(handler);
}
return -1;

@ -327,7 +327,7 @@ bool rlc_am_lte::rlc_am_lte_tx::configure(const rlc_config_t& cfg_)
}
// make sure Tx queue is empty before attempting to resize
empty_queue_unsafe();
empty_queue_nolock();
tx_sdu_queue.resize(cfg_.tx_queue_length);
tx_enabled = true;
@ -339,7 +339,7 @@ void rlc_am_lte::rlc_am_lte_tx::stop()
{
std::lock_guard<std::mutex> lock(mutex);
empty_queue_unsafe();
empty_queue_nolock();
tx_enabled = false;
@ -372,10 +372,10 @@ void rlc_am_lte::rlc_am_lte_tx::stop()
void rlc_am_lte::rlc_am_lte_tx::empty_queue()
{
std::lock_guard<std::mutex> lock(mutex);
empty_queue_unsafe();
empty_queue_nolock();
}
void rlc_am_lte::rlc_am_lte_tx::empty_queue_unsafe()
void rlc_am_lte::rlc_am_lte_tx::empty_queue_nolock()
{
// deallocate all SDUs in transmit queue
while (tx_sdu_queue.size() > 0) {

@ -80,15 +80,15 @@ int test_tagged_union()
{
using srsran::choice_details::tagged_union_t;
tagged_union_t<char, int, double, C> u;
u.construct_unsafe(5);
u.construct_unchecked(5);
TESTASSERT(u.is<int>());
TESTASSERT(u.get_unchecked<int>() == 5);
u.destroy_unsafe<int>();
u.destroy_unchecked<int>();
TESTASSERT(C::counter == 0);
u.construct_unsafe<C>(C{});
u.construct_unchecked<C>(C{});
TESTASSERT(C::counter == 1);
u.destroy_unsafe<C>();
u.destroy_unchecked<C>();
TESTASSERT(C::counter == 0);
return SRSRAN_SUCCESS;

@ -73,8 +73,8 @@ private:
uint16_t alloc_ue(uint32_t enb_cc_idx);
// internal misc helpers
bool is_rnti_valid_unsafe(uint16_t rnti);
bool is_rnti_active_unsafe(uint16_t rnti);
bool is_rnti_valid_nolock(uint16_t rnti);
bool is_rnti_active_nolock(uint16_t rnti);
// handle UCI data from either PUCCH or PUSCH
bool handle_uci_data(const uint16_t rnti, const srsran_uci_cfg_nr_t& cfg, const srsran_uci_value_nr_t& value);

@ -195,7 +195,7 @@ uint16_t mac_nr::alloc_ue(uint32_t enb_cc_idx)
// Pre-check if rnti is valid
{
srsran::rwlock_read_guard read_lock(rwlock);
if (not is_rnti_valid_unsafe(rnti)) {
if (not is_rnti_valid_nolock(rnti)) {
continue;
}
}
@ -205,7 +205,7 @@ uint16_t mac_nr::alloc_ue(uint32_t enb_cc_idx)
// Add UE to rnti map
srsran::rwlock_write_guard rw_lock(rwlock);
if (not is_rnti_valid_unsafe(rnti)) {
if (not is_rnti_valid_nolock(rnti)) {
continue;
}
auto ret = ue_db.insert(rnti, std::move(ue_ptr));
@ -223,7 +223,7 @@ uint16_t mac_nr::alloc_ue(uint32_t enb_cc_idx)
int mac_nr::remove_ue(uint16_t rnti)
{
srsran::rwlock_write_guard lock(rwlock);
if (is_rnti_active_unsafe(rnti)) {
if (is_rnti_active_nolock(rnti)) {
ue_db.erase(rnti);
} else {
logger.error("User rnti=0x%x not found", rnti);
@ -233,7 +233,7 @@ int mac_nr::remove_ue(uint16_t rnti)
return SRSRAN_SUCCESS;
}
bool mac_nr::is_rnti_valid_unsafe(uint16_t rnti)
bool mac_nr::is_rnti_valid_nolock(uint16_t rnti)
{
if (not started) {
logger.info("RACH ignored as eNB is being shutdown");
@ -250,7 +250,7 @@ bool mac_nr::is_rnti_valid_unsafe(uint16_t rnti)
return true;
}
bool mac_nr::is_rnti_active_unsafe(uint16_t rnti)
bool mac_nr::is_rnti_active_nolock(uint16_t rnti)
{
if (not ue_db.contains(rnti)) {
logger.error("User rnti=0x%x not found", rnti);
@ -292,7 +292,7 @@ int mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched
for (pdsch_t& pdsch : dl_sched.pdsch) {
if (pdsch.sch.grant.rnti_type == srsran_rnti_type_c) {
uint16_t rnti = pdsch.sch.grant.rnti;
if (not is_rnti_active_unsafe(rnti)) {
if (not is_rnti_active_nolock(rnti)) {
continue;
}
for (auto& tb_data : pdsch.data) {
@ -372,7 +372,7 @@ int mac_nr::pusch_info(const srsran_slot_cfg_t& slot_cfg, mac_interface_phy_nr::
auto process_pdu_task = [this, rnti](srsran::unique_byte_buffer_t& pdu) {
srsran::rwlock_read_guard lock(rwlock);
if (is_rnti_active_unsafe(rnti)) {
if (is_rnti_active_nolock(rnti)) {
ue_db[rnti]->process_pdu(std::move(pdu));
} else {
logger.debug("Discarding PDU rnti=0x%x", rnti);

@ -32,16 +32,16 @@ public:
uint32_t get_buffer_len();
void set_tti(uint32_t tti);
void set_cfo_unlocked(float cfo);
void set_cfo_nolock(float cfo);
float get_ref_cfo() const;
// Functions to set configuration.
// Warning: all these functions are unlocked and must be called while the worker is not processing data
void reset_cell_unlocked();
bool set_cell_unlocked(srsran_cell_t cell_);
void set_tdd_config_unlocked(srsran_tdd_config_t config);
void set_config_unlocked(const srsran::phy_cfg_t& phy_cfg);
void upd_config_dci_unlocked(const srsran_dci_cfg_t& dci_cfg);
void reset_cell_nolock();
bool set_cell_nolock(srsran_cell_t cell_);
void set_tdd_config_nolock(srsran_tdd_config_t config);
void set_config_nolock(const srsran::phy_cfg_t& phy_cfg);
void upd_config_dci_nolock(const srsran_dci_cfg_t& dci_cfg);
void set_uci_periodic_cqi(srsran_uci_data_t* uci_data);

@ -36,18 +36,18 @@ public:
sf_worker(uint32_t max_prb, phy_common* phy_, srslog::basic_logger& logger);
virtual ~sf_worker();
void reset_cell_unlocked(uint32_t cc_idx);
bool set_cell_unlocked(uint32_t cc_idx, srsran_cell_t cell_);
void reset_cell_nolock(uint32_t cc_idx);
bool set_cell_nolock(uint32_t cc_idx, srsran_cell_t cell_);
/* Functions used by main PHY thread */
cf_t* get_buffer(uint32_t cc_idx, uint32_t antenna_idx);
uint32_t get_buffer_len();
void set_context(const srsran::phy_common_interface::worker_context_t& w_ctx);
void set_prach(cf_t* prach_ptr, float prach_power);
void set_cfo_unlocked(const uint32_t& cc_idx, float cfo);
void set_cfo_nolock(const uint32_t& cc_idx, float cfo);
void set_tdd_config_unlocked(srsran_tdd_config_t config);
void set_config_unlocked(uint32_t cc_idx, const srsran::phy_cfg_t& phy_cfg);
void set_tdd_config_nolock(srsran_tdd_config_t config);
void set_config_nolock(uint32_t cc_idx, const srsran::phy_cfg_t& phy_cfg);
///< Methods for plotting called from GUI thread
int read_ce_abs(float* ce_abs, uint32_t tx_antenna, uint32_t rx_antenna);

@ -76,7 +76,7 @@ private:
bool calc_is_new_transmission(mac_interface_phy_lte::mac_grant_dl_t grant);
// Internal function to reset process, caller must hold the mutex
void reset_unsafe();
void reset_nolock();
std::mutex mutex;
@ -109,14 +109,14 @@ private:
dl_sps dl_sps_assig;
std::vector<dl_harq_process> proc;
dl_harq_process bcch_proc;
demux* demux_unit = nullptr;
srslog::basic_logger& logger;
srsran::mac_pcap* pcap = nullptr;
ue_rnti* rntis = nullptr;
uint16_t last_temporal_crnti = 0;
int si_window_start = 0;
std::vector<dl_harq_process> proc;
dl_harq_process bcch_proc;
demux* demux_unit = nullptr;
srslog::basic_logger& logger;
srsran::mac_pcap* pcap = nullptr;
ue_rnti* rntis = nullptr;
uint16_t last_temporal_crnti = 0;
int si_window_start = 0;
std::mutex retx_cnt_mutex = {};

@ -57,7 +57,7 @@ public:
void print_logical_channel_state(const std::string& info);
private:
uint8_t* pdu_get_unsafe(srsran::byte_buffer_t* payload, uint32_t pdu_sz);
uint8_t* pdu_get_nolock(srsran::byte_buffer_t* payload, uint32_t pdu_sz);
bool pdu_move_to_msg3(uint32_t pdu_sz);
uint32_t allocate_sdu(uint32_t lcid, srsran::sch_pdu* pdu, int max_sdu_sz);
bool sched_sdu(srsran::logical_channel_config_t* ch, int* sdu_space, int max_sdu_sz);

@ -78,7 +78,7 @@ private:
void response_error();
void complete();
bool contention_resolution_id_received_unsafe(uint64_t rx_contention_id);
bool contention_resolution_id_received_nolock(uint64_t rx_contention_id);
// Buffer to receive RAR PDU
static const uint32_t MAX_RAR_PDU_LEN = 2048;

@ -40,7 +40,7 @@ public:
bool sr_opportunity(uint32_t tti, uint32_t sr_id, bool meas_gap, bool ul_sch_tx);
private:
void reset_unsafe();
void reset_nolock();
int sr_counter = 0;
bool is_pending_sr = false;

@ -112,15 +112,15 @@ void cc_worker::reset()
{
// constructor sets defaults
srsran::phy_cfg_t empty_cfg;
set_config_unlocked(empty_cfg);
set_config_nolock(empty_cfg);
}
void cc_worker::reset_cell_unlocked()
void cc_worker::reset_cell_nolock()
{
cell_initiated = false;
}
bool cc_worker::set_cell_unlocked(srsran_cell_t cell_)
bool cc_worker::set_cell_nolock(srsran_cell_t cell_)
{
if (cell.id != cell_.id || !cell_initiated) {
cell = cell_;
@ -171,7 +171,7 @@ void cc_worker::set_tti(uint32_t tti)
sf_cfg_ul.shortened = false;
}
void cc_worker::set_cfo_unlocked(float cfo)
void cc_worker::set_cfo_nolock(float cfo)
{
ue_ul_cfg.cfo_value = cfo;
}
@ -181,7 +181,7 @@ float cc_worker::get_ref_cfo() const
return ue_dl.chest_res.cfo;
}
void cc_worker::set_tdd_config_unlocked(srsran_tdd_config_t config)
void cc_worker::set_tdd_config_nolock(srsran_tdd_config_t config)
{
sf_cfg_dl.tdd_config = config;
sf_cfg_ul.tdd_config = config;
@ -874,7 +874,7 @@ void cc_worker::set_uci_ack(srsran_uci_data_t* uci_data,
/* Translates RRC structs into PHY structs
*/
void cc_worker::set_config_unlocked(const srsran::phy_cfg_t& phy_cfg)
void cc_worker::set_config_nolock(const srsran::phy_cfg_t& phy_cfg)
{
// Save configuration
ue_dl_cfg.cfg = phy_cfg.dl_cfg;
@ -883,7 +883,7 @@ void cc_worker::set_config_unlocked(const srsran::phy_cfg_t& phy_cfg)
phy->set_pdsch_cfg(&ue_dl_cfg.cfg.pdsch);
}
void cc_worker::upd_config_dci_unlocked(const srsran_dci_cfg_t& dci_cfg)
void cc_worker::upd_config_dci_nolock(const srsran_dci_cfg_t& dci_cfg)
{
ue_dl_cfg.cfg.dci = dci_cfg;
}

@ -64,15 +64,15 @@ sf_worker::~sf_worker()
}
}
void sf_worker::reset_cell_unlocked(uint32_t cc_idx)
void sf_worker::reset_cell_nolock(uint32_t cc_idx)
{
cc_workers[cc_idx]->reset_cell_unlocked();
cc_workers[cc_idx]->reset_cell_nolock();
}
bool sf_worker::set_cell_unlocked(uint32_t cc_idx, srsran_cell_t cell_)
bool sf_worker::set_cell_nolock(uint32_t cc_idx, srsran_cell_t cell_)
{
if (cc_idx < cc_workers.size()) {
if (!cc_workers[cc_idx]->set_cell_unlocked(cell_)) {
if (!cc_workers[cc_idx]->set_cell_nolock(cell_)) {
Error("Setting cell for cc=%d", cc_idx);
return false;
}
@ -120,26 +120,26 @@ void sf_worker::set_prach(cf_t* prach_ptr_, float prach_power_)
prach_power = prach_power_;
}
void sf_worker::set_cfo_unlocked(const uint32_t& cc_idx, float cfo)
void sf_worker::set_cfo_nolock(const uint32_t& cc_idx, float cfo)
{
cc_workers[cc_idx]->set_cfo_unlocked(cfo);
cc_workers[cc_idx]->set_cfo_nolock(cfo);
}
void sf_worker::set_tdd_config_unlocked(srsran_tdd_config_t config)
void sf_worker::set_tdd_config_nolock(srsran_tdd_config_t config)
{
for (auto& cc_worker : cc_workers) {
cc_worker->set_tdd_config_unlocked(config);
cc_worker->set_tdd_config_nolock(config);
}
tdd_config = config;
}
void sf_worker::set_config_unlocked(uint32_t cc_idx, const srsran::phy_cfg_t& phy_cfg)
void sf_worker::set_config_nolock(uint32_t cc_idx, const srsran::phy_cfg_t& phy_cfg)
{
if (cc_idx < cc_workers.size()) {
cc_workers[cc_idx]->set_config_unlocked(phy_cfg);
cc_workers[cc_idx]->set_config_nolock(phy_cfg);
if (cc_idx > 0) {
// Update DCI config for PCell
cc_workers[0]->upd_config_dci_unlocked(phy_cfg.dl_cfg.dci);
cc_workers[0]->upd_config_dci_nolock(phy_cfg.dl_cfg.dci);
}
} else {
Error("Setting config for cc=%d; Invalid cc_idx", cc_idx);

@ -78,7 +78,7 @@ sf_worker* worker_pool::wait_worker(uint32_t tti)
uint32_t worker_id = w->get_id();
for (uint32_t cc_idx = 0; cc_idx < SRSRAN_MAX_CARRIERS; cc_idx++) {
if (phy_cfg_stash[cc_idx].is_pending(worker_id)) {
w->set_config_unlocked(cc_idx, phy_cfg_stash[cc_idx].get_cfg(worker_id));
w->set_config_nolock(cc_idx, phy_cfg_stash[cc_idx].get_cfg(worker_id));
}
}

@ -527,10 +527,10 @@ bool phy::set_scell(srsran_cell_t cell_info, uint32_t cc_idx, uint32_t earfcn)
if (w) {
// Reset secondary serving cell configuration, this needs to be done when the sf_worker is reserved to prevent
// resetting the cell while it is working
w->reset_cell_unlocked(cc_idx);
w->reset_cell_nolock(cc_idx);
// Set the new cell
w->set_cell_unlocked(cc_idx, cell_info);
w->set_cell_nolock(cc_idx, cell_info);
// Release the new worker, it should not start processing until the SCell state is set to configured
w->release();
@ -578,7 +578,7 @@ void phy::set_config_tdd(srsran_tdd_config_t& tdd_config_)
// set_tdd_config is not protected so run when worker is finished
lte::sf_worker* w = lte_workers.wait_worker_id(i);
if (w) {
w->set_tdd_config_unlocked(tdd_config);
w->set_tdd_config_nolock(tdd_config);
w->release();
}
}

@ -517,7 +517,7 @@ void sync::run_camping_in_sync_state(lte::sf_worker* lte_worker,
// Set CFO for all Carriers
for (uint32_t cc = 0; cc < worker_com->args->nof_lte_carriers; cc++) {
lte_worker->set_cfo_unlocked(cc, get_tx_cfo());
lte_worker->set_cfo_nolock(cc, get_tx_cfo());
worker_com->update_cfo_measurement(cc, cfo);
}
@ -853,14 +853,14 @@ bool sync::set_cell(float cfo_in)
// Reset cell configuration
for (uint32_t i = 0; i < worker_com->args->nof_phy_threads; i++) {
(*lte_worker_pool)[i]->reset_cell_unlocked(0);
(*lte_worker_pool)[i]->reset_cell_nolock(0);
}
bool success = true;
for (uint32_t i = 0; i < worker_com->args->nof_phy_threads; i++) {
lte::sf_worker* w = lte_worker_pool->wait_worker_id(i);
if (w) {
success &= w->set_cell_unlocked(0, cell.get());
success &= w->set_cell_nolock(0, cell.get());
w->release();
}
}

@ -207,10 +207,10 @@ bool dl_harq_entity::dl_harq_process::dl_tb_process::init(int pid, dl_harq_entit
void dl_harq_entity::dl_harq_process::dl_tb_process::reset()
{
std::lock_guard<std::mutex> lock(mutex);
reset_unsafe();
reset_nolock();
}
void dl_harq_entity::dl_harq_process::dl_tb_process::reset_unsafe()
void dl_harq_entity::dl_harq_process::dl_tb_process::reset_nolock()
{
bzero(&cur_grant, sizeof(mac_interface_phy_lte::mac_grant_dl_t));
is_first_tb = true;
@ -300,7 +300,7 @@ void dl_harq_entity::dl_harq_process::dl_tb_process::new_grant_dl(mac_interface_
n_retx > RESET_DUPLICATE_TIMEOUT ? "yes" : "no");
if (n_retx > RESET_DUPLICATE_TIMEOUT) {
// reset without trying to acquire the mutex again
reset_unsafe();
reset_nolock();
}
}
@ -360,7 +360,7 @@ void dl_harq_entity::dl_harq_process::dl_tb_process::tb_decoded(mac_interface_ph
if (ack && is_bcch) {
// reset without trying to acquire the mutex again
reset_unsafe();
reset_nolock();
}
mutex.unlock();

@ -99,11 +99,11 @@ srsran::ul_sch_lcid bsr_format_convert(bsr_proc::bsr_format_t format)
uint8_t* mux::pdu_get(srsran::byte_buffer_t* payload, uint32_t pdu_sz)
{
std::lock_guard<std::mutex> lock(mutex);
return pdu_get_unsafe(payload, pdu_sz);
return pdu_get_nolock(payload, pdu_sz);
}
// Multiplexing and logical channel priorization as defined in Section 5.4.3
uint8_t* mux::pdu_get_unsafe(srsran::byte_buffer_t* payload, uint32_t pdu_sz)
uint8_t* mux::pdu_get_nolock(srsran::byte_buffer_t* payload, uint32_t pdu_sz)
{
// Logical Channel Procedure
payload->clear();
@ -363,7 +363,7 @@ uint8_t* mux::msg3_get(srsran::byte_buffer_t* payload, uint32_t pdu_sz)
std::lock_guard<std::mutex> lock(mutex);
if (pdu_sz < msg3_buff.get_tailroom()) {
if (msg3_is_empty()) {
if (!pdu_get_unsafe(&msg3_buff, pdu_sz)) {
if (!pdu_get_nolock(&msg3_buff, pdu_sz)) {
Error("Moving PDU from Mux unit to Msg3 buffer");
return NULL;
}

@ -152,9 +152,9 @@ void ra_proc::state_pdcch_setup()
rInfo("seq=%d, ra-rnti=0x%x, ra-tti=%d, f_id=%d", sel_preamble.load(), ra_rnti, info.tti_ra, info.f_id);
srsran::console(
"Random Access Transmission: seq=%d, tti=%d, ra-rnti=0x%x\n", sel_preamble.load(), info.tti_ra, ra_rnti);
rar_window_st = ra_tti + 3;
rar_window_st = ra_tti + 3;
rntis->set_rar_rnti(ra_rnti);
state = RESPONSE_RECEPTION;
state = RESPONSE_RECEPTION;
} else {
rDebug("preamble not yet transmitted");
}
@ -219,7 +219,7 @@ void ra_proc::initialization()
transmitted_contention_id = 0;
preambleTransmissionCounter = 1;
mux_unit->msg3_flush();
backoff_param_ms = 0;
backoff_param_ms = 0;
transmitted_crnti = 0;
resource_selection();
}
@ -536,14 +536,14 @@ void ra_proc::timer_expired(uint32_t timer_id)
*/
bool ra_proc::contention_resolution_id_received(uint64_t rx_contention_id)
{
task_queue.push([this, rx_contention_id]() { contention_resolution_id_received_unsafe(rx_contention_id); });
task_queue.push([this, rx_contention_id]() { contention_resolution_id_received_nolock(rx_contention_id); });
return (transmitted_contention_id == rx_contention_id);
}
/*
* Performs the actions defined in 5.1.5 for Temporal C-RNTI Contention Resolution
*/
bool ra_proc::contention_resolution_id_received_unsafe(uint64_t rx_contention_id)
bool ra_proc::contention_resolution_id_received_nolock(uint64_t rx_contention_id)
{
bool uecri_successful = false;

@ -35,10 +35,10 @@ int32_t proc_sr_nr::init(mac_interface_sr_nr* mac_, phy_interface_mac_nr* phy_,
void proc_sr_nr::reset()
{
std::lock_guard<std::mutex> lock(mutex);
reset_unsafe();
reset_nolock();
}
void proc_sr_nr::reset_unsafe()
void proc_sr_nr::reset_nolock()
{
is_pending_sr = false;
}
@ -92,7 +92,7 @@ void proc_sr_nr::step(uint32_t tti)
// 2> initiate a Random Access procedure (see clause 5.1) on the SpCell and cancel the pending SR.
logger.info("SR: PUCCH not configured. Starting RA procedure");
mac->start_ra();
reset_unsafe();
reset_nolock();
return;
}
@ -110,7 +110,7 @@ void proc_sr_nr::step(uint32_t tti)
// ... TODO
mac->start_ra();
reset_unsafe();
reset_nolock();
}
}

Loading…
Cancel
Save