enb,mac: fix concurrent access to cell structs

this patch moves the rwlock that protects the UE database outside
to also protect the cell struct.

It also adds a missing write guard when setting the cell
master
Andre Puschmann 3 years ago
parent ff72c78745
commit 4b069d9b86

@ -228,6 +228,7 @@ int mac::ue_set_crnti(uint16_t temp_crnti, uint16_t crnti, const sched_interface
int mac::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg_) int mac::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg_)
{ {
srsran::rwlock_write_guard lock(rwlock);
cell_config = cell_cfg_; cell_config = cell_cfg_;
return scheduler.cell_cfg(cell_config); return scheduler.cell_cfg(cell_config);
} }
@ -576,6 +577,8 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
add_padding(); add_padding();
} }
srsran::rwlock_read_guard lock(rwlock);
for (uint32_t enb_cc_idx = 0; enb_cc_idx < cell_config.size(); enb_cc_idx++) { for (uint32_t enb_cc_idx = 0; enb_cc_idx < cell_config.size(); enb_cc_idx++) {
// Run scheduler with current info // Run scheduler with current info
sched_interface::dl_sched_res_t sched_result = {}; sched_interface::dl_sched_res_t sched_result = {};
@ -587,68 +590,62 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
int n = 0; int n = 0;
dl_sched_t* dl_sched_res = &dl_sched_res_list[enb_cc_idx]; dl_sched_t* dl_sched_res = &dl_sched_res_list[enb_cc_idx];
{ // Copy data grants
srsran::rwlock_read_guard lock(rwlock); for (uint32_t i = 0; i < sched_result.data.size(); i++) {
uint32_t tb_count = 0;
// Copy data grants // Get UE
for (uint32_t i = 0; i < sched_result.data.size(); i++) { uint16_t rnti = sched_result.data[i].dci.rnti;
uint32_t tb_count = 0;
// Get UE if (ue_db.contains(rnti)) {
uint16_t rnti = sched_result.data[i].dci.rnti; // Copy dci info
dl_sched_res->pdsch[n].dci = sched_result.data[i].dci;
if (ue_db.contains(rnti)) { for (uint32_t tb = 0; tb < SRSRAN_MAX_TB; tb++) {
// Copy dci info dl_sched_res->pdsch[n].softbuffer_tx[tb] =
dl_sched_res->pdsch[n].dci = sched_result.data[i].dci; ue_db[rnti]->get_tx_softbuffer(enb_cc_idx, sched_result.data[i].dci.pid, tb);
for (uint32_t tb = 0; tb < SRSRAN_MAX_TB; tb++) { // If the Rx soft-buffer is not given, abort transmission
dl_sched_res->pdsch[n].softbuffer_tx[tb] = if (dl_sched_res->pdsch[n].softbuffer_tx[tb] == nullptr) {
ue_db[rnti]->get_tx_softbuffer(enb_cc_idx, sched_result.data[i].dci.pid, tb); continue;
}
// If the Rx soft-buffer is not given, abort transmission if (sched_result.data[i].nof_pdu_elems[tb] > 0) {
if (dl_sched_res->pdsch[n].softbuffer_tx[tb] == nullptr) { /* Get PDU if it's a new transmission */
continue; dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(enb_cc_idx,
sched_result.data[i].dci.pid,
tb,
sched_result.data[i].pdu[tb],
sched_result.data[i].nof_pdu_elems[tb],
sched_result.data[i].tbs[tb]);
if (!dl_sched_res->pdsch[n].data[tb]) {
logger.error("Error! PDU was not generated (rnti=0x%04x, tb=%d)", rnti, tb);
} }
if (sched_result.data[i].nof_pdu_elems[tb] > 0) { if (pcap) {
/* Get PDU if it's a new transmission */ pcap->write_dl_crnti(
dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(enb_cc_idx, dl_sched_res->pdsch[n].data[tb], sched_result.data[i].tbs[tb], rnti, true, tti_tx_dl, enb_cc_idx);
sched_result.data[i].dci.pid,
tb,
sched_result.data[i].pdu[tb],
sched_result.data[i].nof_pdu_elems[tb],
sched_result.data[i].tbs[tb]);
if (!dl_sched_res->pdsch[n].data[tb]) {
logger.error("Error! PDU was not generated (rnti=0x%04x, tb=%d)", rnti, tb);
}
if (pcap) {
pcap->write_dl_crnti(
dl_sched_res->pdsch[n].data[tb], sched_result.data[i].tbs[tb], rnti, true, tti_tx_dl, enb_cc_idx);
}
if (pcap_net) {
pcap_net->write_dl_crnti(
dl_sched_res->pdsch[n].data[tb], sched_result.data[i].tbs[tb], rnti, true, tti_tx_dl, enb_cc_idx);
}
} else {
/* TB not enabled OR no data to send: set pointers to NULL */
dl_sched_res->pdsch[n].data[tb] = nullptr;
} }
if (pcap_net) {
tb_count++; pcap_net->write_dl_crnti(
dl_sched_res->pdsch[n].data[tb], sched_result.data[i].tbs[tb], rnti, true, tti_tx_dl, enb_cc_idx);
}
} else {
/* TB not enabled OR no data to send: set pointers to NULL */
dl_sched_res->pdsch[n].data[tb] = nullptr;
} }
// Count transmission if at least one TB has succesfully added tb_count++;
if (tb_count > 0) {
n++;
}
} else {
logger.warning("Invalid DL scheduling result. User 0x%x does not exist", rnti);
} }
}
// No more uses of shared ue_db beyond here // Count transmission if at least one TB has succesfully added
if (tb_count > 0) {
n++;
}
} else {
logger.warning("Invalid DL scheduling result. User 0x%x does not exist", rnti);
}
} }
// Copy RAR grants // Copy RAR grants
@ -728,11 +725,8 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
} }
// Count number of TTIs for all active users // Count number of TTIs for all active users
{ for (auto& u : ue_db) {
srsran::rwlock_read_guard lock(rwlock); u.second->metrics_cnt();
for (auto& u : ue_db) {
u.second->metrics_cnt();
}
} }
return SRSRAN_SUCCESS; return SRSRAN_SUCCESS;

Loading…
Cancel
Save