/** * * \section COPYRIGHT * * Copyright 2013-2017 Software Radio Systems Limited * * \section LICENSE * * This file is part of srsLTE. * * srsUE is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * srsUE is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * A copy of the GNU Affero General Public License can be found in * the LICENSE file in the top-level directory of this distribution * and at http://www.gnu.org/licenses/. * */ #include #include "srslte/srslte.h" #include "srslte/common/pdu.h" #include "srsenb/hdr/mac/scheduler_ue.h" #include "srsenb/hdr/mac/scheduler.h" #define Error(fmt, ...) log_h->error(fmt, ##__VA_ARGS__) #define Warning(fmt, ...) log_h->warning(fmt, ##__VA_ARGS__) #define Info(fmt, ...) log_h->info(fmt, ##__VA_ARGS__) #define Debug(fmt, ...) log_h->debug(fmt, ##__VA_ARGS__) /****************************************************** * UE class * ******************************************************/ namespace srsenb { /******************************************************* * * Initialization and configuration functions * *******************************************************/ sched_ue::sched_ue() : dl_next_alloc(NULL), ul_next_alloc(NULL), has_pucch(false), power_headroom(0), rnti(0), max_mcs_dl(0), max_mcs_ul(0), fixed_mcs_ul(0), fixed_mcs_dl(0), phy_config_dedicated_enabled(false) { log_h = NULL; bzero(&cell, sizeof(cell)); bzero(&lch, sizeof(lch)); bzero(&dci_locations, sizeof(dci_locations)); bzero(&dl_harq, sizeof(dl_harq)); bzero(&ul_harq, sizeof(ul_harq)); pthread_mutex_init(&mutex, NULL); reset(); } sched_ue::~sched_ue() { pthread_mutex_lock(&mutex); pthread_mutex_unlock(&mutex); pthread_mutex_destroy(&mutex); } void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_interface::cell_cfg_t *cell_cfg, srslte_regs_t *regs, srslte::log *log_h_) { reset(); pthread_mutex_lock(&mutex); rnti = rnti_; log_h = log_h_; memcpy(&cell, &cell_cfg->cell, sizeof(srslte_cell_t)); P = srslte_ra_type0_P(cell.nof_prb); max_mcs_dl = 28; max_mcs_ul = 28; if (cfg_) { memcpy(&cfg, cfg_, sizeof(sched_interface::ue_cfg_t)); } Info("SCHED: Added user rnti=0x%x\n", rnti); // Config HARQ processes for (int i=0;i 4) { len -= 4; } if (lcid < sched_interface::MAX_LC) { if (bearer_is_ul(&lch[lcid])) { if (lch[lcid].bsr > (int) len) { lch[lcid].bsr -= len; } else { lch[lcid].bsr = 0; } } } Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr); pthread_mutex_unlock(&mutex); } void sched_ue::set_ul_crc(uint32_t tti, bool crc_res) { pthread_mutex_lock(&mutex); get_ul_harq(tti)->set_ack(0, crc_res); pthread_mutex_unlock(&mutex); } void sched_ue::set_dl_ri(uint32_t tti, uint32_t ri) { pthread_mutex_lock(&mutex); dl_ri = ri; dl_ri_tti = tti; pthread_mutex_unlock(&mutex); } void sched_ue::set_dl_pmi(uint32_t tti, uint32_t pmi) { pthread_mutex_lock(&mutex); dl_pmi = pmi; dl_pmi_tti = tti; pthread_mutex_unlock(&mutex); } void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cqi) { pthread_mutex_lock(&mutex); dl_cqi = cqi; dl_cqi_tti = tti; pthread_mutex_unlock(&mutex); } void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d) { pthread_mutex_lock(&mutex); dl_ant_info = *d; pthread_mutex_unlock(&mutex); } void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code) { pthread_mutex_lock(&mutex); ul_cqi = cqi; ul_cqi_tti = tti; pthread_mutex_unlock(&mutex); } void sched_ue::tpc_inc() { pthread_mutex_lock(&mutex); if (power_headroom > 0) { next_tpc_pusch = 3; next_tpc_pucch = 3; } log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti); pthread_mutex_unlock(&mutex); } void sched_ue::tpc_dec() { pthread_mutex_lock(&mutex); next_tpc_pusch = 0; next_tpc_pucch = 0; log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti); pthread_mutex_unlock(&mutex); } /******************************************************* * * Functions used to generate DCI grants * *******************************************************/ // Generates a Format1 grant int sched_ue::generate_format1(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi) { pthread_mutex_lock(&mutex); srslte_ra_dl_dci_t *dci = &data->dci; bzero(dci, sizeof(srslte_ra_dl_dci_t)); uint32_t sf_idx = tti%10; int mcs = 0; int tbs = 0; dci->alloc_type = SRSLTE_RA_ALLOC_TYPE0; dci->type0_alloc.rbg_bitmask = h->get_rbgmask(); // If this is the first transmission for this UE, make room for MAC Contention Resolution ID bool need_conres_ce = false; if (is_first_dl_tx()) { need_conres_ce = true; } if (h->is_empty(0)) { uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti); uint32_t nof_prb = format1_count_prb(h->get_rbgmask(), cell.nof_prb); srslte_ra_dl_grant_t grant; srslte_ra_dl_dci_to_grant_prb_allocation(dci, &grant, cell.nof_prb); uint32_t nof_ctrl_symbols = cfi+(cell.nof_prb<10?1:0); uint32_t nof_re = srslte_ra_dl_grant_nof_re(&grant, cell, sf_idx, nof_ctrl_symbols); if(need_conres_ce and cell.nof_prb<10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs tbs = srslte_ra_tbs_from_idx(srslte_ra_dl_tbs_idx_from_mcs(4), nof_prb) / 8; mcs = 4; } else if (fixed_mcs_dl < 0) { tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs); } else { tbs = srslte_ra_tbs_from_idx(srslte_ra_dl_tbs_idx_from_mcs(fixed_mcs_dl), nof_prb) / 8; mcs = fixed_mcs_dl; } h->new_tx(0, tti, mcs, tbs, data->dci_location.ncce); // Allocate MAC ConRes CE if (need_conres_ce) { data->pdu[0][0].lcid = srslte::sch_subh::CON_RES_ID; data->nof_pdu_elems[0]++; Info("SCHED: Added MAC Contention Resolution CE for rnti=0x%x\n", rnti); } int rem_tbs = tbs; int x = 0; do { x = alloc_pdu(rem_tbs, &data->pdu[0][data->nof_pdu_elems[0]]); rem_tbs -= x; if (x) { data->nof_pdu_elems[0]++; } } while(rem_tbs > 0 && x > 0); Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes); } else { h->new_retx(0, tti, &mcs, &tbs); Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs); } data->rnti = rnti; if (tbs > 0) { dci->harq_process = h->get_id(); dci->mcs_idx = (uint32_t) mcs; dci->rv_idx = sched::get_rvidx(h->nof_retx(0)); dci->ndi = h->get_ndi(0); dci->tpc_pucch = (uint8_t) next_tpc_pucch; next_tpc_pucch = 1; data->tbs[0] = (uint32_t) tbs; data->tbs[1] = 0; dci->tb_en[0] = true; dci->tb_en[1] = false; } pthread_mutex_unlock(&mutex); return tbs; } // Generates a Format2a grant int sched_ue::generate_format2a(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi) { pthread_mutex_lock(&mutex); int ret = generate_format2a_unlocked(h, data, tti, cfi); pthread_mutex_unlock(&mutex); return ret; } // Generates a Format2a grant int sched_ue::generate_format2a_unlocked(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi) { bool tb_en[SRSLTE_MAX_TB] = {false}; srslte_ra_dl_dci_t *dci = &data->dci; bzero(dci, sizeof(srslte_ra_dl_dci_t)); uint32_t sf_idx = tti%10; dci->alloc_type = SRSLTE_RA_ALLOC_TYPE0; dci->type0_alloc.rbg_bitmask = h->get_rbgmask(); uint32_t nof_prb = format1_count_prb(h->get_rbgmask(), cell.nof_prb); uint32_t nof_ctrl_symbols = cfi + (cell.nof_prb < 10 ? 1 : 0); srslte_ra_dl_grant_t grant; srslte_ra_dl_dci_to_grant_prb_allocation(dci, &grant, cell.nof_prb); uint32_t nof_re = srslte_ra_dl_grant_nof_re(&grant, cell, sf_idx, nof_ctrl_symbols); bool no_retx = true; if (dl_ri == 0) { if (h->is_empty(1)) { /* One layer, tb1 buffer is empty, send tb0 only */ tb_en[0] = true; } else { /* One layer, tb1 buffer is not empty, send tb1 only */ tb_en[1] = true; } } else { /* Two layers, retransmit what TBs that have not been Acknowledged */ for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { if (!h->is_empty(tb)) { tb_en[tb] = true; no_retx = false; } } /* Two layers, no retransmissions... */ if (no_retx) { tb_en[0] = true; tb_en[1] = true; } } for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) { uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti); int mcs = 0; int tbs = 0; if (!h->is_empty(tb)) { h->new_retx(tb, tti, &mcs, &tbs); Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs); } else if (tb_en[tb] && req_bytes && no_retx) { if (fixed_mcs_dl < 0) { tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs); } else { tbs = srslte_ra_tbs_from_idx((uint32_t)srslte_ra_dl_tbs_idx_from_mcs((uint32_t)fixed_mcs_dl), nof_prb) / 8; mcs = fixed_mcs_dl; } h->new_tx(tb, tti, mcs, tbs, data->dci_location.ncce); int rem_tbs = tbs; int x = 0; do { x = alloc_pdu(rem_tbs, &data->pdu[tb][data->nof_pdu_elems[tb]]); rem_tbs -= x; if (x) { data->nof_pdu_elems[tb]++; } } while (rem_tbs > 0 && x > 0); Debug("SCHED: Alloc format2/2a new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes); } /* Fill DCI TB dedicated fields */ if (tbs > 0) { if (tb == 0) { dci->mcs_idx = (uint32_t) mcs; dci->rv_idx = sched::get_rvidx(h->nof_retx(tb)); dci->ndi = h->get_ndi(tb); } else { dci->mcs_idx_1 = (uint32_t) mcs; dci->rv_idx_1 = sched::get_rvidx(h->nof_retx(tb)); dci->ndi_1 = h->get_ndi(tb); } data->tbs[tb] = (uint32_t) tbs; dci->tb_en[tb] = true; } else { data->tbs[tb] = 0; dci->tb_en[tb] = false; } } /* Fill common fields */ data->rnti = rnti; dci->harq_process = h->get_id(); dci->tpc_pucch = (uint8_t) next_tpc_pucch; next_tpc_pucch = 1; int ret = data->tbs[0] + data->tbs[1]; return ret; } // Generates a Format2 grant int sched_ue::generate_format2(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi) { pthread_mutex_lock(&mutex); /* Call Format 2a (common) */ int ret = generate_format2a_unlocked(h, data, tti, cfi); /* Compute precoding information */ if (SRSLTE_RA_DL_GRANT_NOF_TB(&data->dci) == 1) { data->dci.pinfo = (uint8_t) (dl_pmi + 1) % (uint8_t) 5; } else { data->dci.pinfo = (uint8_t) (dl_pmi & 1); } pthread_mutex_unlock(&mutex); return ret; } int sched_ue::generate_format0(ul_harq_proc *h, sched_interface::ul_sched_data_t *data, uint32_t tti, bool cqi_request) { pthread_mutex_lock(&mutex); srslte_ra_ul_dci_t *dci = &data->dci; bzero(dci, sizeof(srslte_ra_ul_dci_t)); int mcs = 0; int tbs = 0; ul_harq_proc::ul_alloc_t allocation = h->get_alloc(); bool is_newtx = true; if (h->get_rar_mcs(&mcs)) { tbs = srslte_ra_tbs_from_idx(srslte_ra_ul_tbs_idx_from_mcs(mcs), allocation.L) / 8; h->new_tx(tti, mcs, tbs); } else if (h->is_empty(0)) { uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti); uint32_t N_srs = 0; uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*allocation.L*SRSLTE_NRE; if (fixed_mcs_ul < 0) { tbs = alloc_tbs_ul(allocation.L, nof_re, req_bytes, &mcs); } else { tbs = srslte_ra_tbs_from_idx(srslte_ra_ul_tbs_idx_from_mcs(fixed_mcs_ul), allocation.L) / 8; mcs = fixed_mcs_ul; } h->new_tx(tti, mcs, tbs); } else { h->new_retx(0, tti, &mcs, NULL); is_newtx = false; tbs = srslte_ra_tbs_from_idx(srslte_ra_ul_tbs_idx_from_mcs(mcs), allocation.L) / 8; } data->rnti = rnti; data->tbs = tbs; if (tbs > 0) { dci->type2_alloc.L_crb = allocation.L; dci->type2_alloc.RB_start = allocation.RB_start; dci->rv_idx = sched::get_rvidx(h->nof_retx(0)); if (!is_newtx && h->is_adaptive_retx()) { dci->mcs_idx = 28+dci->rv_idx; } else { dci->mcs_idx = mcs; } dci->ndi = h->get_ndi(0); dci->cqi_request = cqi_request; dci->freq_hop_fl = srslte_ra_ul_dci_t::SRSLTE_RA_PUSCH_HOP_DISABLED; dci->tpc_pusch = next_tpc_pusch; next_tpc_pusch = 1; } pthread_mutex_unlock(&mutex); return tbs; } /******************************************************* * * Functions used by scheduler or scheduler metric objects * *******************************************************/ bool sched_ue::bearer_is_ul(ue_bearer_t *lch) { return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::UL || lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH; } bool sched_ue::bearer_is_dl(ue_bearer_t *lch) { return lch->cfg.direction == sched_interface::ue_bearer_cfg_t::DL || lch->cfg.direction == sched_interface::ue_bearer_cfg_t::BOTH; } uint32_t sched_ue::get_max_retx() { return cfg.maxharq_tx; } bool sched_ue::is_first_dl_tx() { for (int i=0;i 0) { return false; } } return true; } bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent) { pthread_mutex_lock(&mutex); bool ret = needs_cqi_unlocked(tti, will_be_sent); pthread_mutex_unlock(&mutex); return ret; } // Private lock-free implemenentation bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent) { bool ret = false; if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked(tti) > 0) { uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti); bool needscqi = interval >= cfg.aperiodic_cqi_period; if (needscqi) { uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti); if (interval_sent >= 16) { if (will_be_sent) { cqi_request_tti = tti; } Debug("SCHED: Needs_cqi, last_sent=%d, will_be_sent=%d\n", cqi_request_tti, will_be_sent); ret = true; } } } return ret; } uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti) { pthread_mutex_lock(&mutex); uint32_t pending_data = get_pending_dl_new_data_unlocked(tti); pthread_mutex_unlock(&mutex); return pending_data; } /// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data, /// the RAR resources, and headers /// \param tti /// \return number of bytes to be allocated uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti) { pthread_mutex_lock(&mutex); uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti); if(req_bytes>0) { req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header if(is_first_dl_tx()) { req_bytes += 6; // count for RAR } } pthread_mutex_unlock(&mutex); return req_bytes; } // Private lock-free implementation uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti) { uint32_t pending_data = 0; for (int i=0;i pending_ul_data) { pending_data -= pending_ul_data; } else { pending_data = 0; } if (pending_data) { Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n", pending_data,pending_ul_data, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr); } return pending_data; } // Private lock-free implementation uint32_t sched_ue::get_pending_ul_old_data_unlocked() { uint32_t pending_data = 0; for (int i=0;i 0) { nbytes = tbs; } else if (tbs < 0) { pthread_mutex_unlock(&mutex); return 0; } } pthread_mutex_unlock(&mutex); return n; } uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes) { int mcs = 0; int tbs = 0; uint32_t nbytes = 0; uint32_t N_srs = 0; uint32_t n = 0; if (req_bytes == 0) { return 0; } pthread_mutex_lock(&mutex); for (n=1;n 0) { nbytes = tbs; } } while (!srslte_dft_precoding_valid_prb(n) && n<=cell.nof_prb) { n++; } pthread_mutex_unlock(&mutex); return n; } bool sched_ue::is_sr_triggered() { return sr; } void sched_ue::reset_timeout_dl_harq(uint32_t tti) { for (int i=0;i 50) { log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti); dl_harq[i].reset(0); dl_harq[i].reset(1); } } } } /* Gets HARQ process with oldest pending retx */ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti) { #if ASYNC_DL_SCHED pthread_mutex_lock(&mutex); int oldest_idx=-1; uint32_t oldest_tti = 0; for (int i=0;i oldest_tti) { oldest_idx = i; oldest_tti = x; } } } dl_harq_proc *h = NULL; if (oldest_idx >= 0) { h = &dl_harq[oldest_idx]; } pthread_mutex_unlock(&mutex); return h; #else return &dl_harq[tti%SCHED_MAX_HARQ_PROC]; #endif } dl_harq_proc* sched_ue::get_empty_dl_harq() { pthread_mutex_lock(&mutex); dl_harq_proc *h = NULL; for (int i=0;i max_coderate); Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n", dl_cqi, l, nof_bits, coderate, max_coderate); pthread_mutex_unlock(&mutex); return l; } sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx) { if (cfi > 0 && cfi <= 3) { return &dci_locations[cfi-1][sf_idx]; } else { Error("SCHED: Invalid CFI=%d\n", cfi); return &dci_locations[0][sf_idx]; } } /* Allocates first available RLC PDU */ int sched_ue::alloc_pdu(int tbs_bytes, sched_interface::dl_sched_pdu_t* pdu) { // TODO: Implement lcid priority (now lowest index is lowest priority) int x = 0; int i = 0; for (i=0;ilcid = i-1; pdu->nbytes = x; Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", pdu->lcid, pdu->nbytes, tbs_bytes); } return x; } uint32_t sched_ue::format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb) { uint32_t P = srslte_ra_type0_P(cell_nof_prb); uint32_t nb = (int) ceilf((float) cell_nof_prb / P); uint32_t nof_prb = 0; for (uint32_t i = 0; i < nb; i++) { if (bitmask & (1 << (nb - i - 1))) { for (uint32_t j = 0; j < P; j++) { if (i*P+j < cell_nof_prb) { nof_prb++; } } } } return nof_prb; } int sched_ue::cqi_to_tbs(uint32_t cqi, uint32_t nof_prb, uint32_t nof_re, uint32_t max_mcs, uint32_t max_Qm, bool is_ul, uint32_t* mcs) { float max_coderate = srslte_cqi_to_coderate(cqi); int sel_mcs = max_mcs+1; float coderate = 99; float eff_coderate = 99; uint32_t Qm = 1; int tbs = 0; do { sel_mcs--; uint32_t tbs_idx = (is_ul) ? srslte_ra_ul_tbs_idx_from_mcs(sel_mcs) : srslte_ra_dl_tbs_idx_from_mcs(sel_mcs); tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_prb); coderate = srslte_coderate(tbs, nof_re); srslte_mod_t mod = (is_ul) ? srslte_ra_ul_mod_from_mcs(sel_mcs) : srslte_ra_dl_mod_from_mcs(sel_mcs); Qm = SRSLTE_MIN(max_Qm, srslte_mod_bits_x_symbol(mod)); eff_coderate = coderate/Qm; } while((sel_mcs > 0 && coderate > max_coderate) || eff_coderate > 0.930); if (mcs) { *mcs = (uint32_t) sel_mcs; } return tbs; } int sched_ue::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int *mcs) { return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs); } int sched_ue::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int *mcs) { return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs); } /* In this scheduler we tend to use all the available bandwidth and select the MCS * that approximates the minimum between the capacity and the requested rate */ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int *mcs) { uint32_t sel_mcs = 0; uint32_t cqi = is_ul?ul_cqi:dl_cqi; uint32_t max_mcs = is_ul?max_mcs_ul:max_mcs_dl; uint32_t max_Qm = is_ul?4:6; // Allow 16-QAM in PUSCH Only // TODO: Compute real spectral efficiency based on PUSCH-UCI configuration int tbs = cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8; /* If less bytes are requested, lower the MCS */ if (tbs > (int) req_bytes && req_bytes > 0) { int req_tbs_idx = srslte_ra_tbs_to_table_idx(req_bytes * 8, nof_prb); int req_mcs = (is_ul) ? srslte_ra_ul_mcs_from_tbs_idx(req_tbs_idx) : srslte_ra_dl_mcs_from_tbs_idx(req_tbs_idx); if (req_mcs < sel_mcs) { sel_mcs = req_mcs; tbs = srslte_ra_tbs_from_idx(req_tbs_idx, nof_prb)/8; } } // Avoid the unusual case n_prb=1, mcs=6 tbs=328 (used in voip) if (nof_prb == 1 && sel_mcs == 6) { sel_mcs--; uint32_t tbs_idx = (is_ul) ? srslte_ra_ul_tbs_idx_from_mcs(sel_mcs) : srslte_ra_dl_tbs_idx_from_mcs(sel_mcs); tbs = srslte_ra_tbs_from_idx(tbs_idx, nof_prb) / 8; } if (mcs && tbs >= 0) { *mcs = (int) sel_mcs; } return tbs; } }