extend eNB MAC to support multiple CC per UE

- add tx/rx softbuffers for each CC that a UE might have
- make sure to call assign correct buffers when iterating
  over the CC for UL/DL grant assignment
master
Andre Puschmann 5 years ago
parent 356fa9258b
commit a8acd235f6

@ -382,7 +382,7 @@ public:
virtual bool is_paging_opportunity(uint32_t tti, uint32_t* payload_len) = 0;
///< Provide packed SIB to MAC (buffer is managed by RRC)
virtual uint8_t* read_pdu_bcch_dlsch(const uint8_t cc_idx, const uint32_t sib_index) = 0;
virtual uint8_t* read_pdu_bcch_dlsch(const uint8_t enb_cc_idx, const uint32_t sib_index) = 0;
};
// SCell configuration

@ -54,15 +54,15 @@ public:
/******** Interface from PHY (PHY -> MAC) ****************/
int sr_detected(uint32_t tti, uint16_t rnti) final;
void rach_detected(uint32_t tti, uint32_t primary_cc_idx, uint32_t preamble_idx, uint32_t time_adv) final;
void rach_detected(uint32_t tti, uint32_t enb_cc_idx, uint32_t preamble_idx, uint32_t time_adv) final;
int ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value) override;
int pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value) override;
int cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value) override;
int snr_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, float snr) override;
int ri_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t ri_value) override;
int pmi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t pmi_value) override;
int cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi_value) override;
int snr_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, float snr) override;
int ta_info(uint32_t tti, uint16_t rnti, float ta_us) override;
int ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack) override;
int crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t nof_bytes, bool crc_res) override;
int ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) override;
int crc_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t nof_bytes, bool crc_res) override;
int get_dl_sched(uint32_t tti, dl_sched_list_t& dl_sched_res) override;
int get_ul_sched(uint32_t tti, ul_sched_list_t& ul_sched_res) override;

@ -70,14 +70,14 @@ public:
uint8_t*
generate_mch_pdu(uint32_t harq_pid, sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems, uint32_t grant_size);
srslte_softbuffer_tx_t* get_tx_softbuffer(uint32_t harq_process, uint32_t tb_idx);
srslte_softbuffer_rx_t* get_rx_softbuffer(uint32_t tti);
srslte_softbuffer_tx_t* get_tx_softbuffer(const uint32_t cc_idx, const uint32_t harq_process, const uint32_t tb_idx);
srslte_softbuffer_rx_t* get_rx_softbuffer(const uint32_t cc_idx, const uint32_t tti);
bool process_pdus();
uint8_t* request_buffer(uint32_t tti, uint32_t len);
uint8_t* request_buffer(const uint32_t ue_cc_idx, const uint32_t tti, const uint32_t len);
void process_pdu(uint8_t* pdu, uint32_t nof_bytes, srslte::pdu_queue::channel_t channel);
void push_pdu(uint32_t tti, uint32_t len);
void deallocate_pdu(uint32_t tti);
void push_pdu(const uint32_t ue_cc_idx, const uint32_t tti, uint32_t len);
void deallocate_pdu(const uint32_t ue_cc_idx, const uint32_t tti);
uint32_t rl_failure();
void rl_failure_reset();
@ -96,6 +96,8 @@ public:
int read_pdu(uint32_t lcid, uint8_t* payload, uint32_t requested_bytes);
private:
uint32_t allocate_cc_buffers(const uint32_t num_cc = 1); ///< Add and initialize softbuffers for CC
void allocate_sdu(srslte::sch_pdu* pdu, uint32_t lcid, uint32_t sdu_len);
bool process_ce(srslte::sch_subh* subh);
void allocate_ce(srslte::sch_pdu* pdu, uint32_t lcid);
@ -106,29 +108,33 @@ private:
uint32_t dl_cqi_counter = 0;
uint32_t dl_ri_counter = 0;
uint32_t dl_pmi_counter = 0;
mac_metrics_t metrics;
srslte::mac_pcap* pcap = nullptr;
mac_metrics_t metrics = {};
uint64_t conres_id = 0;
srslte::mac_pcap* pcap = nullptr;
uint64_t conres_id = 0;
uint16_t rnti = 0;
uint32_t nof_prb = 0;
uint32_t last_tti = 0;
uint32_t nof_failures = 0;
int nof_rx_harq_proc = 0;
int nof_tx_harq_proc = 0;
uint16_t rnti = 0;
typedef std::vector<srslte_softbuffer_tx_t>
cc_softbuffer_tx_list_t; ///< List of Tx softbuffers for all HARQ processes of one carrier
std::vector<cc_softbuffer_tx_list_t> softbuffer_tx; ///< List of softbuffer lists for Tx
uint32_t last_tti = 0;
typedef std::vector<srslte_softbuffer_rx_t>
cc_softbuffer_rx_list_t; ///< List of Rx softbuffers for all HARQ processes of one carrier
std::vector<cc_softbuffer_rx_list_t> softbuffer_rx; ///< List of softbuffer lists for Rx
uint32_t nof_failures = 0;
srslte::block_queue<uint32_t> pending_ta_commands;
int nof_rx_harq_proc = 0;
int nof_tx_harq_proc = 0;
std::vector<srslte_softbuffer_tx_t> softbuffer_tx;
std::vector<srslte_softbuffer_rx_t> softbuffer_rx;
std::vector<uint8_t*> pending_buffers;
typedef std::vector<uint8_t*> cc_buffer_ptr_t; ///< List of buffer pointers for RX HARQ processes of one carrier
std::vector<cc_buffer_ptr_t> pending_buffers; ///< List of buffer pointer list for Rx
// For DL there are two buffers, one for each Transport block
srslte::byte_buffer_t tx_payload_buffer[SRSLTE_FDD_NOF_HARQ][SRSLTE_MAX_TB];
srslte::block_queue<uint32_t> pending_ta_commands;
// For UL there are multiple buffers per PID and are managed by pdu_queue
srslte::pdu_queue pdus;
srslte::sch_pdu mac_msg_dl, mac_msg_ul;
@ -142,7 +148,7 @@ private:
bool conres_id_available = false;
// Mutexes
pthread_mutex_t mutex;
std::mutex mutex;
const uint8_t UL_CC_IDX = 0; ///< Passed to write CC index in PCAP (TODO: use actual CC idx)
};

@ -295,106 +295,118 @@ void mac::rl_ok(uint16_t rnti)
}
}
int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack)
{
srslte::rwlock_read_guard lock(rwlock);
log_h->step(tti);
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, cc_idx, tb_idx, ack);
ue_db[rnti]->metrics_tx(ack, nof_bytes);
if (ue_db.count(rnti)) {
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, enb_cc_idx, tb_idx, ack);
ue_db[rnti]->metrics_tx(ack, nof_bytes);
if (ack) {
if (nof_bytes > 64) { // do not count RLC status messages only
rrc_h->set_activity_user(rnti);
log_h->debug("DL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
if (ack) {
if (nof_bytes > 64) { // do not count RLC status messages only
rrc_h->set_activity_user(rnti);
log_h->debug("DL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
}
}
}
return 0;
return SRSLTE_SUCCESS;
}
int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t nof_bytes, bool crc)
int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t nof_bytes, bool crc)
{
int ret = SRSLTE_ERROR;
log_h->step(tti);
int ret = -1;
srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) {
ue_db[rnti]->set_tti(tti);
ue_db[rnti]->metrics_rx(crc, nof_bytes);
uint32_t ue_cc_idx = 0; // FIXME: mapping between eNB->UE CC idx
// push the pdu through the queue if received correctly
if (crc) {
Info("Pushing PDU rnti=%d, tti=%d, nof_bytes=%d\n", rnti, tti, nof_bytes);
ue_db[rnti]->push_pdu(tti, nof_bytes);
ue_db[rnti]->push_pdu(ue_cc_idx, tti, nof_bytes);
stack_task_queue.push([this]() { process_pdus(); });
} else {
ue_db[rnti]->deallocate_pdu(tti);
ue_db[rnti]->deallocate_pdu(ue_cc_idx, tti);
}
ret = scheduler.ul_crc_info(tti, rnti, cc_idx, crc);
ret = scheduler.ul_crc_info(tti, rnti, ue_cc_idx, crc);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
return ret;
}
int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t ri_value)
int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t ri_value)
{
int ret = SRSLTE_ERROR;
log_h->step(tti);
int ret = -1;
srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_ri_info(tti, rnti, cc_idx, ri_value);
scheduler.dl_ri_info(tti, rnti, enb_cc_idx, ri_value);
ue_db[rnti]->metrics_dl_ri(ri_value);
ret = 0;
ret = SRSLTE_SUCCESS;
} else {
Error("User rnti=0x%x not found\n", rnti);
}
return ret;
}
int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t pmi_value)
int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t pmi_value)
{
int ret = SRSLTE_ERROR;
log_h->step(tti);
srslte::rwlock_read_guard lock(rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
scheduler.dl_pmi_info(tti, rnti, cc_idx, pmi_value);
scheduler.dl_pmi_info(tti, rnti, enb_cc_idx, pmi_value);
ue_db[rnti]->metrics_dl_pmi(pmi_value);
ret = 0;
ret = SRSLTE_SUCCESS;
} else {
Error("User rnti=0x%x not found\n", rnti);
}
return ret;
}
int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, uint32_t cqi_value)
int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi_value)
{
int ret = SRSLTE_ERROR;
log_h->step(tti);
int ret = -1;
srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_cqi_info(tti, rnti, cc_idx, cqi_value);
scheduler.dl_cqi_info(tti, rnti, enb_cc_idx, cqi_value);
ue_db[rnti]->metrics_dl_cqi(cqi_value);
ret = 0;
ret = SRSLTE_SUCCESS;
} else {
Error("User rnti=0x%x not found\n", rnti);
}
return ret;
}
int mac::snr_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, float snr)
int mac::snr_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, float snr)
{
int ret = SRSLTE_ERROR;
log_h->step(tti);
int ret = -1;
srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) {
uint32_t cqi = srslte_cqi_from_snr(snr);
scheduler.ul_cqi_info(tti, rnti, cc_idx, cqi, 0);
ret = 0;
scheduler.ul_cqi_info(tti, rnti, enb_cc_idx, cqi, 0);
ret = SRSLTE_SUCCESS;
} else {
Error("User rnti=0x%x not found\n", rnti);
}
return ret;
}
@ -491,7 +503,7 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_list_t& dl_sched_res_list)
log_h->step(tti);
for (uint32_t enb_cc_idx = 0; enb_cc_idx <= cell_config.size(); enb_cc_idx++) {
for (uint32_t enb_cc_idx = 0; enb_cc_idx < cell_config.size(); enb_cc_idx++) {
// Run scheduler with current info
sched_interface::dl_sched_res_t sched_result = {};
if (scheduler.dl_sched(tti, enb_cc_idx, sched_result) < 0) {
@ -516,7 +528,7 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_list_t& dl_sched_res_list)
dl_sched_res->pdsch[n].dci = sched_result.data[i].dci;
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
dl_sched_res->pdsch[n].softbuffer_tx[tb] = ue_db[rnti]->get_tx_softbuffer(sched_result.data[i].dci.pid, tb);
dl_sched_res->pdsch[n].softbuffer_tx[tb] = ue_db[rnti]->get_tx_softbuffer(sched_result.data[i].dci.ue_cc_idx, sched_result.data[i].dci.pid, tb);
if (sched_result.data[i].nof_pdu_elems[tb] > 0) {
/* Get PDU if it's a new transmission */
@ -739,9 +751,9 @@ uint8_t* mac::assemble_rar(sched_interface::dl_sched_rar_grant_t* grants,
}
}
uint8_t* mac::assemble_si(const uint8_t cc_idx, const uint32_t sib_index)
uint8_t* mac::assemble_si(const uint8_t enb_cc_idx, const uint32_t sib_index)
{
uint8_t* sib_payload = rrc_h->read_pdu_bcch_dlsch(cc_idx, sib_index);
uint8_t* sib_payload = rrc_h->read_pdu_bcch_dlsch(enb_cc_idx, sib_index);
if (sib_payload == nullptr) {
// return MAC managed dummy buffer in this case
sib_payload = bcch_dlsch_payload;
@ -751,63 +763,67 @@ uint8_t* mac::assemble_si(const uint8_t cc_idx, const uint32_t sib_index)
int mac::get_ul_sched(uint32_t tti, ul_sched_list_t& ul_sched_res_list)
{
ul_sched_t* ul_sched_res = &ul_sched_res_list[0];
log_h->step(tti);
if (!started) {
return 0;
}
// Run scheduler with current info
sched_interface::ul_sched_res_t sched_result = {};
if (scheduler.ul_sched(tti, 0, sched_result) < 0) {
Error("Running scheduler\n");
return SRSLTE_ERROR;
return SRSLTE_SUCCESS;
}
{
srslte::rwlock_read_guard lock(rwlock);
for (uint32_t enb_cc_idx = 0; enb_cc_idx < cell_config.size(); enb_cc_idx++) {
ul_sched_t* phy_ul_sched_res = &ul_sched_res_list[enb_cc_idx];
// Copy DCI grants
ul_sched_res->nof_grants = 0;
int n = 0;
for (uint32_t i = 0; i < sched_result.nof_dci_elems; i++) {
// Run scheduler with current info
sched_interface::ul_sched_res_t sched_result = {};
if (scheduler.ul_sched(tti, enb_cc_idx, sched_result) < 0) {
Error("Running scheduler\n");
return SRSLTE_ERROR;
}
if (sched_result.pusch[i].tbs > 0) {
// Get UE
uint16_t rnti = sched_result.pusch[i].dci.rnti;
{
srslte::rwlock_read_guard lock(rwlock);
if (ue_db.count(rnti)) {
// Copy grant info
ul_sched_res->pusch[n].current_tx_nb = sched_result.pusch[i].current_tx_nb;
ul_sched_res->pusch[n].needs_pdcch = sched_result.pusch[i].needs_pdcch;
ul_sched_res->pusch[n].dci = sched_result.pusch[i].dci;
ul_sched_res->pusch[n].softbuffer_rx = ue_db[rnti]->get_rx_softbuffer(tti);
if (sched_result.pusch[n].current_tx_nb == 0) {
srslte_softbuffer_rx_reset_tbs(ul_sched_res->pusch[n].softbuffer_rx, sched_result.pusch[i].tbs * 8);
// Copy DCI grants
phy_ul_sched_res->nof_grants = 0;
int n = 0;
for (uint32_t i = 0; i < sched_result.nof_dci_elems; i++) {
if (sched_result.pusch[i].tbs > 0) {
// Get UE
uint16_t rnti = sched_result.pusch[i].dci.rnti;
if (ue_db.count(rnti)) {
// Copy grant info
phy_ul_sched_res->pusch[n].current_tx_nb = sched_result.pusch[i].current_tx_nb;
phy_ul_sched_res->pusch[n].needs_pdcch = sched_result.pusch[i].needs_pdcch;
phy_ul_sched_res->pusch[n].dci = sched_result.pusch[i].dci;
phy_ul_sched_res->pusch[n].softbuffer_rx =
ue_db[rnti]->get_rx_softbuffer(sched_result.pusch[i].dci.ue_cc_idx, tti);
if (sched_result.pusch[n].current_tx_nb == 0) {
srslte_softbuffer_rx_reset_tbs(phy_ul_sched_res->pusch[n].softbuffer_rx, sched_result.pusch[i].tbs * 8);
}
phy_ul_sched_res->pusch[n].data =
ue_db[rnti]->request_buffer(sched_result.pusch[i].dci.ue_cc_idx, tti, sched_result.pusch[i].tbs);
phy_ul_sched_res->nof_grants++;
n++;
} else {
Warning("Invalid UL scheduling result. User 0x%x does not exist\n", rnti);
}
ul_sched_res->pusch[n].data = ue_db[rnti]->request_buffer(tti, sched_result.pusch[i].tbs);
ul_sched_res->nof_grants++;
n++;
} else {
Warning("Invalid UL scheduling result. User 0x%x does not exist\n", rnti);
Warning("Grant %d for rnti=0x%x has zero TBS\n", i, sched_result.pusch[i].dci.rnti);
}
} else {
Warning("Grant %d for rnti=0x%x has zero TBS\n", i, sched_result.pusch[i].dci.rnti);
}
}
// No more uses of ue_db beyond here
}
// No more uses of ue_db beyond here
}
// Copy PHICH actions
for (uint32_t i = 0; i < sched_result.nof_phich_elems; i++) {
ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK;
ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti;
// Copy PHICH actions
for (uint32_t i = 0; i < sched_result.nof_phich_elems; i++) {
phy_ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK;
phy_ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti;
}
phy_ul_sched_res->nof_phich = sched_result.nof_phich_elems;
}
ul_sched_res->nof_phich = sched_result.nof_phich_elems;
return SRSLTE_SUCCESS;
}

@ -35,7 +35,7 @@
namespace srsenb {
ue::ue(uint16_t rnti_,
uint32_t nof_prb,
uint32_t nof_prb_,
sched_interface* sched_,
rrc_interface_mac* rrc_,
rlc_interface_mac* rlc_,
@ -43,6 +43,7 @@ ue::ue(uint16_t rnti_,
uint32_t nof_rx_harq_proc_,
uint32_t nof_tx_harq_proc_) :
rnti(rnti_),
nof_prb(nof_prb_),
sched(sched_),
rrc(rrc_),
rlc(rlc_),
@ -54,24 +55,10 @@ ue::ue(uint16_t rnti_,
nof_rx_harq_proc(nof_rx_harq_proc_),
nof_tx_harq_proc(nof_tx_harq_proc_)
{
bzero(&metrics, sizeof(mac_metrics_t));
bzero(&mutex, sizeof(pthread_mutex_t));
pthread_mutex_init(&mutex, NULL);
pdus.init(this, log_h);
softbuffer_tx.reserve(nof_tx_harq_proc);
softbuffer_rx.reserve(nof_rx_harq_proc);
pending_buffers.reserve(nof_rx_harq_proc);
for (int i = 0; i < nof_rx_harq_proc; i++) {
srslte_softbuffer_rx_init(&softbuffer_rx[i], nof_prb);
pending_buffers[i] = nullptr;
}
for (int i = 0; i < nof_tx_harq_proc; i++) {
srslte_softbuffer_tx_init(&softbuffer_tx[i], nof_prb);
}
// don't need to reset because just initiated the buffers
// Allocate buffer for PCell
allocate_cc_buffers();
// Set LCID group for SRB0 and SRB1
set_lcg(0, 0);
@ -80,26 +67,71 @@ ue::ue(uint16_t rnti_,
ue::~ue()
{
for (int i = 0; i < nof_rx_harq_proc; i++) {
srslte_softbuffer_rx_free(&softbuffer_rx[i]);
// Free up all softbuffers for all CCs
for (auto cc : softbuffer_rx) {
for (auto buffer : cc) {
srslte_softbuffer_rx_free(&buffer);
}
}
for (int i = 0; i < nof_tx_harq_proc; i++) {
srslte_softbuffer_tx_free(&softbuffer_tx[i]);
for (auto cc : softbuffer_tx) {
for (auto buffer : cc) {
srslte_softbuffer_tx_free(&buffer);
}
}
pthread_mutex_destroy(&mutex);
}
void ue::reset()
{
bzero(&metrics, sizeof(mac_metrics_t));
metrics = {};
nof_failures = 0;
for (int i = 0; i < nof_rx_harq_proc; i++) {
srslte_softbuffer_rx_reset(&softbuffer_rx[i]);
for (auto cc : softbuffer_rx) {
for (auto buffer : cc) {
srslte_softbuffer_rx_reset(&buffer);
}
}
for (auto cc : softbuffer_tx) {
for (auto buffer : cc) {
srslte_softbuffer_tx_reset(&buffer);
}
}
for (int i = 0; i < nof_tx_harq_proc; i++) {
srslte_softbuffer_tx_reset(&softbuffer_tx[i]);
}
/**
* Allocate and initialize softbuffers for Tx and Rx and
* append to current list of CC buffers. It uses the configured
* number of HARQ processes and cell width.
*
* @param num_cc Number of carriers to add buffers for (default 1)
* @return number of carriers
*/
uint32_t ue::allocate_cc_buffers(const uint32_t num_cc)
{
for (uint32_t i = 0; i < num_cc; ++i) {
// create and init Rx buffers for Pcell
softbuffer_rx.emplace_back();
softbuffer_rx.back().resize(nof_rx_harq_proc);
for (auto& buffer : softbuffer_rx.back()) {
srslte_softbuffer_rx_init(&buffer, nof_prb);
}
pending_buffers.emplace_back();
pending_buffers.back().resize(nof_rx_harq_proc);
for (auto& buffer : pending_buffers.back()) {
buffer = nullptr;
}
// Create and init Tx buffers for Pcell
softbuffer_tx.emplace_back();
softbuffer_tx.back().resize(nof_tx_harq_proc);
for (auto& buffer : softbuffer_tx.back()) {
srslte_softbuffer_tx_init(&buffer, nof_prb);
}
// don't need to reset because just initiated the buffers
}
return softbuffer_tx.size();
}
void ue::start_pcap(srslte::mac_pcap* pcap_)
@ -127,23 +159,23 @@ void ue::set_lcg(uint32_t lcid, uint32_t lcg)
lc_groups[lcg].push_back(lcid);
}
srslte_softbuffer_rx_t* ue::get_rx_softbuffer(uint32_t tti)
srslte_softbuffer_rx_t* ue::get_rx_softbuffer(const uint32_t cc_idx, const uint32_t tti)
{
return &softbuffer_rx[tti % nof_rx_harq_proc];
return &softbuffer_rx.at(cc_idx).at(tti % nof_rx_harq_proc);
}
srslte_softbuffer_tx_t* ue::get_tx_softbuffer(uint32_t harq_process, uint32_t tb_idx)
srslte_softbuffer_tx_t* ue::get_tx_softbuffer(const uint32_t cc_idx, const uint32_t harq_process, const uint32_t tb_idx)
{
return &softbuffer_tx[(harq_process * SRSLTE_MAX_TB + tb_idx) % nof_tx_harq_proc];
return &softbuffer_tx.at(cc_idx).at((harq_process * SRSLTE_MAX_TB + tb_idx) % nof_tx_harq_proc);
}
uint8_t* ue::request_buffer(uint32_t tti, uint32_t len)
uint8_t* ue::request_buffer(const uint32_t ue_cc_idx, const uint32_t tti, const uint32_t len)
{
uint8_t* ret = NULL;
uint8_t* ret = nullptr;
if (len > 0) {
if (!pending_buffers[tti % nof_rx_harq_proc]) {
if (!pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc)) {
ret = pdus.request(len);
pending_buffers[tti % nof_rx_harq_proc] = ret;
pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc) = ret;
} else {
log_h->console("Error requesting buffer for pid %d, not pushed yet\n", tti % nof_rx_harq_proc);
log_h->error("Requesting buffer for pid %d, not pushed yet\n", tti % nof_rx_harq_proc);
@ -282,23 +314,24 @@ void ue::process_pdu(uint8_t* pdu, uint32_t nof_bytes, srslte::pdu_queue::channe
Debug("MAC PDU processed\n");
}
void ue::deallocate_pdu(uint32_t tti)
void ue::deallocate_pdu(const uint32_t ue_cc_idx, const uint32_t tti)
{
if (pending_buffers[tti % nof_rx_harq_proc]) {
pdus.deallocate(pending_buffers[tti % nof_rx_harq_proc]);
pending_buffers[tti % nof_rx_harq_proc] = NULL;
if (pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc)) {
pdus.deallocate(pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc));
pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc) = nullptr;
} else {
log_h->console("Error deallocating buffer for pid=%d. Not requested\n", tti % nof_rx_harq_proc);
log_h->console(
"Error deallocating buffer for ue_cc_idx=%d, pid=%d. Not requested\n", ue_cc_idx, tti % nof_rx_harq_proc);
}
}
void ue::push_pdu(uint32_t tti, uint32_t len)
void ue::push_pdu(const uint32_t ue_cc_idx, const uint32_t tti, uint32_t len)
{
if (pending_buffers[tti % nof_rx_harq_proc]) {
pdus.push(pending_buffers[tti % nof_rx_harq_proc], len);
pending_buffers[tti % nof_rx_harq_proc] = NULL;
if (pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc)) {
pdus.push(pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc), len);
pending_buffers.at(ue_cc_idx).at(tti % nof_rx_harq_proc) = nullptr;
} else {
log_h->console("Error pushing buffer for pid=%d. Not requested\n", tti % nof_rx_harq_proc);
log_h->console("Error pushing buffer for ue_cc_idx=%d, pid=%d. Not requested\n", ue_cc_idx, tti % nof_rx_harq_proc);
}
}
@ -435,6 +468,8 @@ void ue::allocate_ce(srslte::sch_pdu* pdu, uint32_t lcid)
}
if (pdu->get()->set_scell_activation_cmd(active_cc_list)) {
Info("CE: Added SCell Activation CE.\n");
// Allocate and initialize Rx/Tx softbuffers for new carriers (exclude PCell)
allocate_cc_buffers(active_cc_list.size() - 1);
} else {
Error("CE: Setting SCell Activation CE\n");
}
@ -457,8 +492,8 @@ uint8_t* ue::generate_pdu(uint32_t harq_pid,
uint32_t nof_pdu_elems,
uint32_t grant_size)
{
uint8_t* ret = NULL;
pthread_mutex_lock(&mutex);
std::lock_guard<std::mutex> lock(mutex);
uint8_t* ret = nullptr;
if (rlc) {
tx_payload_buffer[harq_pid][tb_idx].clear();
mac_msg_dl.init_tx(&tx_payload_buffer[harq_pid][tb_idx], grant_size, false);
@ -469,14 +504,10 @@ uint8_t* ue::generate_pdu(uint32_t harq_pid,
allocate_ce(&mac_msg_dl, pdu[i].lcid);
}
}
ret = mac_msg_dl.write_packet(log_h);
} else {
std::cout << "Error ue not configured (must call config() first" << std::endl;
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -485,8 +516,8 @@ uint8_t* ue::generate_mch_pdu(uint32_t harq_pid,
uint32_t nof_pdu_elems,
uint32_t grant_size)
{
uint8_t* ret = NULL;
pthread_mutex_lock(&mutex);
std::lock_guard<std::mutex> lock(mutex);
uint8_t* ret = nullptr;
tx_payload_buffer[harq_pid][0].clear();
mch_mac_msg_dl.init_tx(&tx_payload_buffer[harq_pid][0], grant_size);
@ -504,7 +535,6 @@ uint8_t* ue::generate_mch_pdu(uint32_t harq_pid,
}
ret = mch_mac_msg_dl.write_packet(log_h);
pthread_mutex_unlock(&mutex);
return ret;
}
@ -519,7 +549,7 @@ void ue::metrics_read(mac_metrics_t* metrics_)
phr_counter = 0;
dl_cqi_counter = 0;
bzero(&metrics, sizeof(mac_metrics_t));
metrics = {};
}
void ue::metrics_phr(float phr)

Loading…
Cancel
Save