Protect access to shared grants (#1117)

* Protect access to shared grants

* Set correct number of carriers for DL

* Fix previous issues

* Address comments
master
Ismael Gomez 5 years ago committed by GitHub
parent 8e837e173d
commit 9a264b62c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -135,10 +135,6 @@ public:
stack_interface_phy_lte* stack = nullptr; stack_interface_phy_lte* stack = nullptr;
srslte::channel_ptr dl_channel = nullptr; srslte::channel_ptr dl_channel = nullptr;
// Common objects for scheduling grants
stack_interface_phy_lte::ul_sched_list_t ul_grants[TTIMOD_SZ] = {};
stack_interface_phy_lte::dl_sched_list_t dl_grants[TTIMOD_SZ] = {};
/** /**
* UE Database object, direct public access, all PHY threads should be able to access this attribute directly * UE Database object, direct public access, all PHY threads should be able to access this attribute directly
*/ */
@ -150,7 +146,16 @@ public:
bool is_mbsfn_sf(srslte_mbsfn_cfg_t* cfg, uint32_t phy_tti); bool is_mbsfn_sf(srslte_mbsfn_cfg_t* cfg, uint32_t phy_tti);
void set_mch_period_stop(uint32_t stop); void set_mch_period_stop(uint32_t stop);
// Getters and setters for ul grants which need to be shared between workers
const stack_interface_phy_lte::ul_sched_list_t& get_ul_grants(uint32_t tti);
void set_ul_grants(uint32_t tti, const stack_interface_phy_lte::ul_sched_list_t& ul_grants);
void clear_grants(uint16_t rnti);
private: private:
// Common objects for scheduling grants
stack_interface_phy_lte::ul_sched_list_t ul_grants[TTIMOD_SZ] = {};
std::mutex grant_mutex = {};
phy_cell_cfg_list_t cell_list; phy_cell_cfg_list_t cell_list;
bool have_mtch_stop = false; bool have_mtch_stop = false;

@ -213,25 +213,6 @@ void cc_worker::rem_rnti(uint16_t rnti)
srslte_enb_dl_rem_rnti(&enb_dl, rnti); srslte_enb_dl_rem_rnti(&enb_dl, rnti);
srslte_enb_ul_rem_rnti(&enb_ul, rnti); srslte_enb_ul_rem_rnti(&enb_ul, rnti);
// remove any pending dci for each subframe
for (auto& list : phy->ul_grants) {
for (auto& q : list) {
for (uint32_t j = 0; j < q.nof_grants; j++) {
if (q.pusch[j].dci.rnti == rnti) {
q.pusch[j].dci.rnti = 0;
}
}
}
}
for (auto& list : phy->dl_grants) {
for (auto& q : list) {
for (uint32_t j = 0; j < q.nof_grants; j++) {
if (q.pdsch[j].dci.rnti == rnti) {
q.pdsch[j].dci.rnti = 0;
}
}
}
}
} else { } else {
Error("Removing user: rnti=0x%x does not exist\n", rnti); Error("Removing user: rnti=0x%x does not exist\n", rnti);
} }
@ -555,7 +536,7 @@ int cc_worker::encode_pdsch(stack_interface_phy_lte::dl_sched_grant_t* grants, u
// Save metrics stats // Save metrics stats
ue_db[rnti]->metrics_dl(grants[i].dci.tb[0].mcs_idx); ue_db[rnti]->metrics_dl(grants[i].dci.tb[0].mcs_idx);
} else { } else {
ERROR("RNTI (x%x) not found in Component Carrier worker %d\n", rnti, cc_idx); Error("User rnti=0x%x not found in cc_worker=%d\n", rnti, cc_idx);
} }
} }

@ -92,6 +92,7 @@ int phy::init(const phy_args_t& args,
mylog->set_hex_limit(args.log.phy_hex_limit); mylog->set_hex_limit(args.log.phy_hex_limit);
log_vec.push_back(std::move(mylog)); log_vec.push_back(std::move(mylog));
} }
log_h = log_vec[0].get();
// Add PHY lib log // Add PHY lib log
if (log_vec.at(0)->get_level_from_string(args.log.phy_lib_level) != srslte::LOG_LEVEL_NONE) { if (log_vec.at(0)->get_level_from_string(args.log.phy_lib_level) != srslte::LOG_LEVEL_NONE) {
@ -170,11 +171,15 @@ int phy::add_rnti(uint16_t rnti, uint32_t pcell_index, bool is_temporal)
void phy::rem_rnti(uint16_t rnti) void phy::rem_rnti(uint16_t rnti)
{ {
// Remove the RNTI when the TTI finishes, this has a delay up to the pipeline length (3 ms)
for (uint32_t i = 0; i < nof_workers; i++) {
sf_worker* w = (sf_worker*)workers_pool.wait_worker_id(i);
w->rem_rnti(rnti);
w->release();
}
if (SRSLTE_RNTI_ISUSER(rnti)) { if (SRSLTE_RNTI_ISUSER(rnti)) {
workers_common.ue_db.rem_rnti(rnti); workers_common.ue_db.rem_rnti(rnti);
} workers_common.clear_grants(rnti);
for (uint32_t i = 0; i < nof_workers; i++) {
workers[i].rem_rnti(rnti);
} }
} }

@ -48,11 +48,6 @@ namespace srsenb {
void phy_common::reset() void phy_common::reset()
{ {
for (auto& q : dl_grants) {
for (auto& g : q) {
g = {};
}
}
for (auto& q : ul_grants) { for (auto& q : ul_grants) {
for (auto& g : q) { for (auto& g : q) {
g = {}; g = {};
@ -78,9 +73,6 @@ bool phy_common::init(const phy_cell_cfg_list_t& cell_list_,
} }
// Create grants // Create grants
for (auto& q : dl_grants) {
q.resize(cell_list.size());
}
for (auto& q : ul_grants) { for (auto& q : ul_grants) {
q.resize(cell_list.size()); q.resize(cell_list.size());
} }
@ -97,6 +89,34 @@ void phy_common::stop()
semaphore.wait_all(); semaphore.wait_all();
} }
void phy_common::clear_grants(uint16_t rnti)
{
std::lock_guard<std::mutex> lock(grant_mutex);
// remove any pending dci for each subframe
for (auto& list : ul_grants) {
for (auto& q : list) {
for (uint32_t j = 0; j < q.nof_grants; j++) {
if (q.pusch[j].dci.rnti == rnti) {
q.pusch[j].dci.rnti = 0;
}
}
}
}
}
const stack_interface_phy_lte::ul_sched_list_t& phy_common::get_ul_grants(uint32_t tti)
{
std::lock_guard<std::mutex> lock(grant_mutex);
return ul_grants[tti % TTIMOD_SZ];
}
void phy_common::set_ul_grants(uint32_t tti, const stack_interface_phy_lte::ul_sched_list_t& ul_grant_list)
{
std::lock_guard<std::mutex> lock(grant_mutex);
ul_grants[tti % TTIMOD_SZ] = ul_grant_list;
}
/* The transmission of UL subframes must be in sequence. The correct sequence is guaranteed by a chain of N semaphores, /* The transmission of UL subframes must be in sequence. The correct sequence is guaranteed by a chain of N semaphores,
* one per TTI%nof_workers. Each threads waits for the semaphore for the current thread and after transmission allows * one per TTI%nof_workers. Each threads waits for the semaphore for the current thread and after transmission allows
* next TTI to be transmitted * next TTI to be transmitted

@ -176,8 +176,14 @@ void sf_worker::work_imp()
srslte_mbsfn_cfg_t mbsfn_cfg; srslte_mbsfn_cfg_t mbsfn_cfg;
srslte_sf_t sf_type = phy->is_mbsfn_sf(&mbsfn_cfg, tti_tx_dl) ? SRSLTE_SF_MBSFN : SRSLTE_SF_NORM; srslte_sf_t sf_type = phy->is_mbsfn_sf(&mbsfn_cfg, tti_tx_dl) ? SRSLTE_SF_MBSFN : SRSLTE_SF_NORM;
stack_interface_phy_lte::ul_sched_list_t* ul_grants = phy->ul_grants; // Uplink grants to receive this TTI
stack_interface_phy_lte::dl_sched_list_t* dl_grants = phy->dl_grants; stack_interface_phy_lte::ul_sched_list_t ul_grants = phy->get_ul_grants(t_rx);
// Uplink grants to transmit this tti and receive in the future
stack_interface_phy_lte::ul_sched_list_t ul_grants_tx = phy->get_ul_grants(t_tx_ul);
// Downlink grants to transmit this TTI
stack_interface_phy_lte::dl_sched_list_t dl_grants(phy->get_nof_carriers());
stack_interface_phy_lte* stack = phy->stack; stack_interface_phy_lte* stack = phy->stack;
log_h->step(tti_rx); log_h->step(tti_rx);
@ -189,19 +195,19 @@ void sf_worker::work_imp()
// Process UL // Process UL
for (uint32_t cc = 0; cc < cc_workers.size(); cc++) { for (uint32_t cc = 0; cc < cc_workers.size(); cc++) {
cc_workers[cc]->work_ul(ul_sf, phy->ul_grants[t_rx][cc]); cc_workers[cc]->work_ul(ul_sf, ul_grants[cc]);
} }
// Get DL scheduling for the TX TTI from MAC // Get DL scheduling for the TX TTI from MAC
if (sf_type == SRSLTE_SF_NORM) { if (sf_type == SRSLTE_SF_NORM) {
if (stack->get_dl_sched(tti_tx_dl, dl_grants[t_tx_dl]) < 0) { if (stack->get_dl_sched(tti_tx_dl, dl_grants) < 0) {
Error("Getting DL scheduling from MAC\n"); Error("Getting DL scheduling from MAC\n");
phy->worker_end(this, tx_buffer, 0, tx_time); phy->worker_end(this, tx_buffer, 0, tx_time);
return; return;
} }
} else { } else {
dl_grants[t_tx_dl][0].cfi = mbsfn_cfg.non_mbsfn_region_length; dl_grants[0].cfi = mbsfn_cfg.non_mbsfn_region_length;
if (stack->get_mch_sched(tti_tx_dl, mbsfn_cfg.is_mcch, dl_grants[t_tx_dl])) { if (stack->get_mch_sched(tti_tx_dl, mbsfn_cfg.is_mcch, dl_grants)) {
Error("Getting MCH packets from MAC\n"); Error("Getting MCH packets from MAC\n");
phy->worker_end(this, tx_buffer, 0, tx_time); phy->worker_end(this, tx_buffer, 0, tx_time);
return; return;
@ -209,11 +215,11 @@ void sf_worker::work_imp()
} }
// Make sure CFI is in the right range // Make sure CFI is in the right range
dl_grants[t_tx_dl][0].cfi = SRSLTE_MAX(dl_grants[t_tx_dl][0].cfi, 1); dl_grants[0].cfi = SRSLTE_MAX(dl_grants[0].cfi, 1);
dl_grants[t_tx_dl][0].cfi = SRSLTE_MIN(dl_grants[t_tx_dl][0].cfi, 3); dl_grants[0].cfi = SRSLTE_MIN(dl_grants[0].cfi, 3);
// Get UL scheduling for the TX TTI from MAC // Get UL scheduling for the TX TTI from MAC
if (stack->get_ul_sched(tti_tx_ul, ul_grants[t_tx_ul]) < 0) { if (stack->get_ul_sched(tti_tx_ul, ul_grants_tx) < 0) {
Error("Getting UL scheduling from MAC\n"); Error("Getting UL scheduling from MAC\n");
phy->worker_end(this, tx_buffer, 0, tx_time); phy->worker_end(this, tx_buffer, 0, tx_time);
return; return;
@ -229,10 +235,14 @@ void sf_worker::work_imp()
// Process DL // Process DL
for (uint32_t cc = 0; cc < cc_workers.size(); cc++) { for (uint32_t cc = 0; cc < cc_workers.size(); cc++) {
dl_sf.cfi = dl_grants[t_tx_dl][cc].cfi; dl_sf.cfi = dl_grants[cc].cfi;
cc_workers[cc]->work_dl(dl_sf, phy->dl_grants[t_tx_dl][cc], phy->ul_grants[t_tx_ul][cc], &mbsfn_cfg); cc_workers[cc]->work_dl(dl_sf, dl_grants[cc], ul_grants_tx[cc], &mbsfn_cfg);
} }
// Save grants
phy->set_ul_grants(t_tx_ul, ul_grants_tx);
phy->set_ul_grants(t_rx, ul_grants);
Debug("Sending to radio\n"); Debug("Sending to radio\n");
phy->worker_end(this, tx_buffer, SRSLTE_SF_LEN_PRB(phy->get_nof_prb(0)), tx_time); phy->worker_end(this, tx_buffer, SRSLTE_SF_LEN_PRB(phy->get_nof_prb(0)), tx_time);

@ -232,8 +232,8 @@ int mac::ue_rem(uint16_t rnti)
{ {
srslte::rwlock_read_guard lock(rwlock); srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) { if (ue_db.count(rnti)) {
scheduler.ue_rem(rnti);
phy_h->rem_rnti(rnti); phy_h->rem_rnti(rnti);
scheduler.ue_rem(rnti);
ret = 0; ret = 0;
} else { } else {
Error("User rnti=0x%x not found\n", rnti); Error("User rnti=0x%x not found\n", rnti);

@ -192,7 +192,6 @@ uint8_t* ue::request_buffer(const uint32_t ue_cc_idx, const uint32_t tti, const
ret = pdus.request(len); ret = pdus.request(len);
pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc) = ret; pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc) = ret;
} else { } else {
log_h->console("Error requesting buffer for pid %d, not pushed yet\n", tti % nof_rx_harq_proc);
log_h->error("Requesting buffer for pid %d, not pushed yet\n", tti % nof_rx_harq_proc); log_h->error("Requesting buffer for pid %d, not pushed yet\n", tti % nof_rx_harq_proc);
} }
} else { } else {

Loading…
Cancel
Save