refactor: use enb_cc_idx instead of ue_cc_idx in mac::ue

master
Francisco 4 years ago committed by Francisco Paisana
parent fa7a8fb687
commit e8983b88f0

@ -304,10 +304,10 @@ public:
virtual int ul_sched(uint32_t tti, uint32_t enb_cc_idx, ul_sched_res_t& sched_result) = 0;
/* Custom */
virtual void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) = 0;
virtual std::array<int, SRSRAN_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) = 0;
virtual std::array<bool, SRSRAN_MAX_CARRIERS> get_scell_activation_mask(uint16_t rnti) = 0;
virtual int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) = 0;
virtual void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) = 0;
virtual std::array<int, SRSRAN_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) = 0;
virtual std::array<int, SRSRAN_MAX_CARRIERS> get_enb_ue_activ_cc_map(uint16_t rnti) = 0;
virtual int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) = 0;
};
} // namespace srsenb

@ -105,10 +105,8 @@ public:
const uint8_t mcch_payload_length) override;
private:
static const uint32_t cfi = 3;
bool check_ue_active(uint16_t rnti);
uint16_t allocate_ue();
uint16_t allocate_ue(uint32_t enb_cc_idx);
bool is_valid_rnti_unprotected(uint16_t rnti);
srslog::basic_logger& logger;

@ -72,10 +72,10 @@ public:
/* Custom functions
*/
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) final;
std::array<int, SRSRAN_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) final;
std::array<bool, SRSRAN_MAX_CARRIERS> get_scell_activation_mask(uint16_t rnti) final;
int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) final;
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) final;
std::array<int, SRSRAN_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) final;
std::array<int, SRSRAN_MAX_CARRIERS> get_enb_ue_activ_cc_map(uint16_t rnti) final;
int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) final;
class carrier_sched;

@ -122,7 +122,7 @@ class ue : public srsran::read_pdu_interface, public mac_ta_ue_interface
{
public:
ue(uint16_t rnti,
uint32_t nof_prb,
uint32_t enb_cc_idx,
sched_interface* sched,
rrc_interface_mac* rrc_,
rlc_interface_mac* rlc,
@ -145,7 +145,7 @@ public:
void set_active(bool active) { active_state.store(active, std::memory_order_relaxed); }
bool is_active() const { return active_state.load(std::memory_order_relaxed); }
uint8_t* generate_pdu(uint32_t ue_cc_idx,
uint8_t* generate_pdu(uint32_t enb_cc_idx,
uint32_t harq_pid,
uint32_t tb_idx,
const sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST],
@ -154,12 +154,12 @@ public:
uint8_t*
generate_mch_pdu(uint32_t harq_pid, sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems, uint32_t grant_size);
srsran_softbuffer_tx_t* get_tx_softbuffer(uint32_t ue_cc_idx, uint32_t harq_process, uint32_t tb_idx);
srsran_softbuffer_rx_t* get_rx_softbuffer(uint32_t ue_cc_idx, uint32_t tti);
srsran_softbuffer_tx_t* get_tx_softbuffer(uint32_t enb_cc_idx, uint32_t harq_process, uint32_t tb_idx);
srsran_softbuffer_rx_t* get_rx_softbuffer(uint32_t enb_cc_idx, uint32_t tti);
uint8_t* request_buffer(uint32_t tti, uint32_t ue_cc_idx, const uint32_t len);
uint8_t* request_buffer(uint32_t tti, uint32_t enb_cc_idx, uint32_t len);
void process_pdu(srsran::unique_byte_buffer_t pdu, uint32_t grant_nof_prbs);
srsran::unique_byte_buffer_t release_pdu(uint32_t tti, uint32_t ue_cc_idx);
srsran::unique_byte_buffer_t release_pdu(uint32_t tti, uint32_t enb_cc_idx);
void clear_old_buffers(uint32_t tti);
void metrics_read(mac_ue_metrics_t* metrics_);

@ -314,14 +314,7 @@ int mac::push_pdu(uint32_t tti_rx,
return SRSRAN_ERROR;
}
std::array<int, SRSRAN_MAX_CARRIERS> enb_ue_cc_map = scheduler.get_enb_ue_cc_map(rnti);
if (enb_ue_cc_map[enb_cc_idx] < 0) {
logger.error("User rnti=0x%x is not activated for carrier %d", rnti, enb_cc_idx);
return SRSRAN_ERROR;
}
uint32_t ue_cc_idx = enb_ue_cc_map[enb_cc_idx];
srsran::unique_byte_buffer_t pdu = ue_db[rnti]->release_pdu(tti_rx, ue_cc_idx);
srsran::unique_byte_buffer_t pdu = ue_db[rnti]->release_pdu(tti_rx, enb_cc_idx);
if (pdu == nullptr) {
logger.warning("Could not find MAC UL PDU for rnti=0x%x, cc=%d, tti=%d", rnti, enb_cc_idx, tti_rx);
return SRSRAN_ERROR;
@ -454,7 +447,7 @@ bool mac::is_valid_rnti_unprotected(uint16_t rnti)
return true;
}
uint16_t mac::allocate_ue()
uint16_t mac::allocate_ue(uint32_t enb_cc_idx)
{
ue* inserted_ue = nullptr;
uint16_t rnti = SRSRAN_INVALID_RNTI;
@ -473,7 +466,7 @@ uint16_t mac::allocate_ue()
// Allocate and initialize UE object
unique_rnti_ptr<ue> ue_ptr = make_rnti_obj<ue>(
rnti, rnti, args.nof_prb, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size(), softbuffer_pool.get());
rnti, rnti, enb_cc_idx, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size(), softbuffer_pool.get());
// Add UE to rnti map
srsran::rwlock_write_guard rw_lock(rwlock);
@ -502,7 +495,7 @@ uint16_t mac::allocate_ue()
uint16_t mac::reserve_new_crnti(const sched_interface::ue_cfg_t& ue_cfg)
{
uint16_t rnti = allocate_ue();
uint16_t rnti = allocate_ue(ue_cfg.supported_cc_list[0].enb_cc_idx);
if (rnti == SRSRAN_INVALID_RNTI) {
return rnti;
}
@ -523,7 +516,7 @@ void mac::rach_detected(uint32_t tti, uint32_t enb_cc_idx, uint32_t preamble_idx
auto rach_tprof_meas = rach_tprof.start();
stack_task_queue.push([this, tti, enb_cc_idx, preamble_idx, time_adv, rach_tprof_meas]() mutable {
uint16_t rnti = allocate_ue();
uint16_t rnti = allocate_ue(enb_cc_idx);
if (rnti == SRSRAN_INVALID_RNTI) {
return;
}
@ -612,7 +605,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
for (uint32_t tb = 0; tb < SRSRAN_MAX_TB; tb++) {
dl_sched_res->pdsch[n].softbuffer_tx[tb] =
ue_db[rnti]->get_tx_softbuffer(sched_result.data[i].dci.ue_cc_idx, sched_result.data[i].dci.pid, tb);
ue_db[rnti]->get_tx_softbuffer(enb_cc_idx, sched_result.data[i].dci.pid, tb);
// If the Rx soft-buffer is not given, abort transmission
if (dl_sched_res->pdsch[n].softbuffer_tx[tb] == nullptr) {
@ -621,7 +614,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
if (sched_result.data[i].nof_pdu_elems[tb] > 0) {
/* Get PDU if it's a new transmission */
dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(sched_result.data[i].dci.ue_cc_idx,
dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(enb_cc_idx,
sched_result.data[i].dci.pid,
tb,
sched_result.data[i].pdu[tb],
@ -927,8 +920,7 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list)
phy_ul_sched_res->pusch[n].pid = TTI_RX(tti_tx_ul) % SRSRAN_FDD_NOF_HARQ;
phy_ul_sched_res->pusch[n].needs_pdcch = sched_result.pusch[i].needs_pdcch;
phy_ul_sched_res->pusch[n].dci = sched_result.pusch[i].dci;
phy_ul_sched_res->pusch[n].softbuffer_rx =
ue_db[rnti]->get_rx_softbuffer(sched_result.pusch[i].dci.ue_cc_idx, tti_tx_ul);
phy_ul_sched_res->pusch[n].softbuffer_rx = ue_db[rnti]->get_rx_softbuffer(enb_cc_idx, tti_tx_ul);
// If the Rx soft-buffer is not given, abort reception
if (phy_ul_sched_res->pusch[n].softbuffer_rx == nullptr) {
@ -939,7 +931,7 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list)
srsran_softbuffer_rx_reset_tbs(phy_ul_sched_res->pusch[n].softbuffer_rx, sched_result.pusch[i].tbs * 8);
}
phy_ul_sched_res->pusch[n].data =
ue_db[rnti]->request_buffer(tti_tx_ul, sched_result.pusch[i].dci.ue_cc_idx, sched_result.pusch[i].tbs);
ue_db[rnti]->request_buffer(tti_tx_ul, enb_cc_idx, sched_result.pusch[i].tbs);
if (phy_ul_sched_res->pusch[n].data) {
phy_ul_sched_res->nof_grants++;
} else {
@ -986,7 +978,7 @@ void mac::write_mcch(const srsran::sib2_mbms_t* sib2_,
memcpy(mcch_payload_buffer, mcch_payload, mcch_payload_length * sizeof(uint8_t));
current_mcch_length = mcch_payload_length;
ue_db[SRSRAN_MRNTI] = std::unique_ptr<ue>{
new ue(SRSRAN_MRNTI, args.nof_prb, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size(), softbuffer_pool.get())};
new ue(SRSRAN_MRNTI, 0, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size(), softbuffer_pool.get())};
rrc_h->add_user(SRSRAN_MRNTI, {});
}

@ -267,18 +267,22 @@ std::array<int, SRSRAN_MAX_CARRIERS> sched::get_enb_ue_cc_map(uint16_t rnti)
return ret;
}
std::array<bool, SRSRAN_MAX_CARRIERS> sched::get_scell_activation_mask(uint16_t rnti)
{
std::array<bool, SRSRAN_MAX_CARRIERS> scell_mask = {};
ue_db_access_locked(rnti, [this, &scell_mask](sched_ue& ue) {
for (size_t enb_cc_idx = 0; enb_cc_idx < carrier_schedulers.size(); ++enb_cc_idx) {
const sched_ue_cell* cc_ue = ue.find_ue_carrier(enb_cc_idx);
if (cc_ue != nullptr and (cc_ue->cc_state() == cc_st::active or cc_ue->cc_state() == cc_st::activating)) {
scell_mask[cc_ue->get_ue_cc_idx()] = true;
}
}
});
return scell_mask;
std::array<int, SRSRAN_MAX_CARRIERS> sched::get_enb_ue_activ_cc_map(uint16_t rnti)
{
std::array<int, SRSRAN_MAX_CARRIERS> ret{};
ret.fill(-1); // -1 for inactive & non-existent carriers
ue_db_access_locked(
rnti,
[this, &ret](sched_ue& ue) {
for (size_t enb_cc_idx = 0; enb_cc_idx < carrier_schedulers.size(); ++enb_cc_idx) {
const sched_ue_cell* cc_ue = ue.find_ue_carrier(enb_cc_idx);
if (cc_ue != nullptr and (cc_ue->cc_state() == cc_st::active or cc_ue->cc_state() == cc_st::activating)) {
ret[enb_cc_idx] = cc_ue->get_ue_cc_idx();
}
}
},
__PRETTY_FUNCTION__);
return ret;
}
/*******************************************************

@ -173,7 +173,7 @@ void cc_buffer_handler::reset()
}
ue::ue(uint16_t rnti_,
uint32_t nof_prb_,
uint32_t enb_cc_idx,
sched_interface* sched_,
rrc_interface_mac* rrc_,
rlc_interface_mac* rlc_,
@ -195,7 +195,7 @@ ue::ue(uint16_t rnti_,
cc_buffers(nof_cells_)
{
// Allocate buffer for PCell
cc_buffers[0].allocate_cc(softbuffer_pool->make());
cc_buffers[enb_cc_idx].allocate_cc(softbuffer_pool->make());
}
ue::~ue() {}
@ -220,31 +220,31 @@ void ue::start_pcap(srsran::mac_pcap* pcap_)
pcap = pcap_;
}
srsran_softbuffer_rx_t* ue::get_rx_softbuffer(uint32_t ue_cc_idx, uint32_t tti)
srsran_softbuffer_rx_t* ue::get_rx_softbuffer(uint32_t enb_cc_idx, uint32_t tti)
{
if ((size_t)ue_cc_idx >= cc_buffers.size()) {
ERROR("UE CC Index (%d/%zd) out-of-range", ue_cc_idx, cc_buffers.size());
if ((size_t)enb_cc_idx >= cc_buffers.size() or cc_buffers[enb_cc_idx].empty()) {
ERROR("eNB CC Index (%d/%zd) out-of-range", enb_cc_idx, cc_buffers.size());
return nullptr;
}
return &cc_buffers[ue_cc_idx].get_rx_softbuffer(tti);
return &cc_buffers[enb_cc_idx].get_rx_softbuffer(tti);
}
srsran_softbuffer_tx_t* ue::get_tx_softbuffer(uint32_t ue_cc_idx, uint32_t harq_process, uint32_t tb_idx)
srsran_softbuffer_tx_t* ue::get_tx_softbuffer(uint32_t enb_cc_idx, uint32_t harq_process, uint32_t tb_idx)
{
if ((size_t)ue_cc_idx >= cc_buffers.size()) {
ERROR("UE CC Index (%d/%zd) out-of-range", ue_cc_idx, cc_buffers.size());
if ((size_t)enb_cc_idx >= cc_buffers.size() or cc_buffers[enb_cc_idx].empty()) {
ERROR("eNB CC Index (%d/%zd) out-of-range", enb_cc_idx, cc_buffers.size());
return nullptr;
}
return &cc_buffers[ue_cc_idx].get_tx_softbuffer(harq_process, tb_idx);
return &cc_buffers[enb_cc_idx].get_tx_softbuffer(harq_process, tb_idx);
}
uint8_t* ue::request_buffer(uint32_t tti, uint32_t ue_cc_idx, const uint32_t len)
uint8_t* ue::request_buffer(uint32_t tti, uint32_t enb_cc_idx, uint32_t len)
{
srsran_assert(len > 0, "UE buffers: Requesting buffer for zero bytes");
std::unique_lock<std::mutex> lock(rx_buffers_mutex);
return cc_buffers[ue_cc_idx].get_rx_used_buffers().request_pdu(tti_point(tti), len);
return cc_buffers[enb_cc_idx].get_rx_used_buffers().request_pdu(tti_point(tti), len);
}
void ue::clear_old_buffers(uint32_t tti)
@ -377,10 +377,10 @@ void ue::process_pdu(srsran::unique_byte_buffer_t pdu, uint32_t grant_nof_prbs)
logger.debug("MAC PDU processed");
}
srsran::unique_byte_buffer_t ue::release_pdu(uint32_t tti, uint32_t ue_cc_idx)
srsran::unique_byte_buffer_t ue::release_pdu(uint32_t tti, uint32_t enb_cc_idx)
{
std::lock_guard<std::mutex> lock(rx_buffers_mutex);
return cc_buffers[ue_cc_idx].get_rx_used_buffers().release_pdu(tti_point(tti));
return cc_buffers[enb_cc_idx].get_rx_used_buffers().release_pdu(tti_point(tti));
}
bool ue::process_ce(srsran::sch_subh* subh, uint32_t grant_nof_prbs)
@ -500,13 +500,19 @@ void ue::allocate_ce(srsran::sch_pdu* pdu, uint32_t lcid)
break;
case srsran::dl_sch_lcid::SCELL_ACTIVATION:
if (pdu->new_subh()) {
std::array<bool, SRSRAN_MAX_CARRIERS> active_scell_list = sched->get_scell_activation_mask(rnti);
std::array<int, SRSRAN_MAX_CARRIERS> active_ccs = sched->get_enb_ue_activ_cc_map(rnti);
std::array<bool, SRSRAN_MAX_CARRIERS> active_scell_list{};
for (int ue_cc_idx : active_ccs) {
if (ue_cc_idx > 0) {
active_scell_list[ue_cc_idx] = true;
}
}
if (pdu->get()->set_scell_activation_cmd(active_scell_list)) {
phy->set_activation_deactivation_scell(rnti, active_scell_list);
// Allocate and initialize Rx/Tx softbuffers for new carriers (exclude PCell)
for (size_t i = 0; i < std::min(active_scell_list.size(), cc_buffers.size()); ++i) {
if (active_scell_list[i] and cc_buffers[i].empty()) {
cc_buffers[i].allocate_cc(softbuffer_pool->make());
for (size_t cc = 0; cc < cc_buffers.size(); ++cc) {
if (active_ccs[cc] >= 0 and cc_buffers[cc].empty()) {
cc_buffers[cc].allocate_cc(softbuffer_pool->make());
}
}
} else {
@ -522,7 +528,7 @@ void ue::allocate_ce(srsran::sch_pdu* pdu, uint32_t lcid)
}
}
uint8_t* ue::generate_pdu(uint32_t ue_cc_idx,
uint8_t* ue::generate_pdu(uint32_t enb_cc_idx,
uint32_t harq_pid,
uint32_t tb_idx,
const sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST],
@ -531,30 +537,26 @@ uint8_t* ue::generate_pdu(uint32_t ue_cc_idx,
{
std::lock_guard<std::mutex> lock(mutex);
uint8_t* ret = nullptr;
if (rlc) {
if (ue_cc_idx < SRSRAN_MAX_CARRIERS && harq_pid < SRSRAN_FDD_NOF_HARQ && tb_idx < SRSRAN_MAX_TB) {
srsran::byte_buffer_t* buffer = cc_buffers[ue_cc_idx].get_tx_payload_buffer(harq_pid, tb_idx);
buffer->clear();
mac_msg_dl.init_tx(buffer, grant_size, false);
for (uint32_t i = 0; i < nof_pdu_elems; i++) {
if (pdu[i].lcid <= (uint32_t)srsran::ul_sch_lcid::PHR_REPORT) {
allocate_sdu(&mac_msg_dl, pdu[i].lcid, pdu[i].nbytes);
} else {
allocate_ce(&mac_msg_dl, pdu[i].lcid);
}
}
ret = mac_msg_dl.write_packet(logger);
if (logger.info.enabled()) {
fmt::memory_buffer str_buffer;
mac_msg_dl.to_string(str_buffer);
logger.info("0x%x %s", rnti, srsran::to_c_str(str_buffer));
if (enb_cc_idx < SRSRAN_MAX_CARRIERS && harq_pid < SRSRAN_FDD_NOF_HARQ && tb_idx < SRSRAN_MAX_TB) {
srsran::byte_buffer_t* buffer = cc_buffers[enb_cc_idx].get_tx_payload_buffer(harq_pid, tb_idx);
buffer->clear();
mac_msg_dl.init_tx(buffer, grant_size, false);
for (uint32_t i = 0; i < nof_pdu_elems; i++) {
if (pdu[i].lcid <= (uint32_t)srsran::ul_sch_lcid::PHR_REPORT) {
allocate_sdu(&mac_msg_dl, pdu[i].lcid, pdu[i].nbytes);
} else {
allocate_ce(&mac_msg_dl, pdu[i].lcid);
}
} else {
logger.error(
"Invalid parameters calling generate_pdu: cc_idx=%d, harq_pid=%d, tb_idx=%d", ue_cc_idx, harq_pid, tb_idx);
}
ret = mac_msg_dl.write_packet(logger);
if (logger.info.enabled()) {
fmt::memory_buffer str_buffer;
mac_msg_dl.to_string(str_buffer);
logger.info("0x%x %s", rnti, srsran::to_c_str(str_buffer));
}
} else {
std::cout << "Error ue not configured (must call config() first" << std::endl;
logger.error(
"Invalid parameters calling generate_pdu: cc_idx=%d, harq_pid=%d, tb_idx=%d", enb_cc_idx, harq_pid, tb_idx);
}
return ret;
}

Loading…
Cancel
Save