|
|
|
@ -53,8 +53,6 @@ sched_ue::sched_ue()
|
|
|
|
|
bzero(&cell, sizeof(cell));
|
|
|
|
|
bzero(&lch, sizeof(lch));
|
|
|
|
|
bzero(&dci_locations, sizeof(dci_locations));
|
|
|
|
|
bzero(&dl_harq, sizeof(dl_harq));
|
|
|
|
|
bzero(&ul_harq, sizeof(ul_harq));
|
|
|
|
|
bzero(&dl_ant_info, sizeof(dl_ant_info));
|
|
|
|
|
|
|
|
|
|
reset();
|
|
|
|
@ -71,9 +69,6 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
|
|
|
|
|
log_h = sched_params->log_h;
|
|
|
|
|
cell = sched_params->cfg->cell;
|
|
|
|
|
|
|
|
|
|
max_mcs_dl = 28;
|
|
|
|
|
max_mcs_ul = 28;
|
|
|
|
|
max_aggr_level = 3;
|
|
|
|
|
max_msg3retx = sched_params->cfg->maxharq_msg3tx;
|
|
|
|
|
|
|
|
|
|
cfg = *cfg_;
|
|
|
|
@ -82,11 +77,10 @@ void sched_ue::set_cfg(uint16_t rnti_, const sched_params_t& sched_params_, sche
|
|
|
|
|
cfg.dl_cfg.tm = SRSLTE_TM1;
|
|
|
|
|
|
|
|
|
|
Info("SCHED: Added user rnti=0x%x\n", rnti);
|
|
|
|
|
// Config HARQ processes
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
dl_harq[i].config(i, cfg.maxharq_tx, log_h);
|
|
|
|
|
ul_harq[i].config(i, cfg.maxharq_tx, log_h);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Init sched_ue carriers
|
|
|
|
|
// TODO: check config for number of carriers
|
|
|
|
|
carriers.emplace_back(&cfg, &cell, rnti, 0, log_h);
|
|
|
|
|
|
|
|
|
|
// Generate allowed CCE locations
|
|
|
|
|
for (int cfi = 0; cfi < 3; cfi++) {
|
|
|
|
@ -117,20 +111,9 @@ void sched_ue::reset()
|
|
|
|
|
buf_mac = 0;
|
|
|
|
|
buf_ul = 0;
|
|
|
|
|
phy_config_dedicated_enabled = false;
|
|
|
|
|
dl_cqi = 1;
|
|
|
|
|
ul_cqi = 1;
|
|
|
|
|
dl_cqi_tti = 0;
|
|
|
|
|
ul_cqi_tti = 0;
|
|
|
|
|
dl_ri = 0;
|
|
|
|
|
dl_ri_tti = 0;
|
|
|
|
|
dl_pmi = 0;
|
|
|
|
|
dl_pmi_tti = 0;
|
|
|
|
|
cqi_request_tti = 0;
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
|
|
|
|
|
dl_harq[i].reset(tb);
|
|
|
|
|
ul_harq[i].reset(tb);
|
|
|
|
|
}
|
|
|
|
|
for (auto& c : carriers) {
|
|
|
|
|
c.reset();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -142,27 +125,22 @@ void sched_ue::reset()
|
|
|
|
|
void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
fixed_mcs_ul = mcs_ul;
|
|
|
|
|
fixed_mcs_dl = mcs_dl;
|
|
|
|
|
for (auto& c : carriers) {
|
|
|
|
|
c.fixed_mcs_dl = mcs_dl;
|
|
|
|
|
c.fixed_mcs_ul = mcs_ul;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl, int max_aggr_level_)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
if (mcs_ul < 0) {
|
|
|
|
|
max_mcs_ul = 28;
|
|
|
|
|
} else {
|
|
|
|
|
max_mcs_ul = mcs_ul;
|
|
|
|
|
}
|
|
|
|
|
if (mcs_dl < 0) {
|
|
|
|
|
max_mcs_dl = 28;
|
|
|
|
|
} else {
|
|
|
|
|
max_mcs_dl = mcs_dl;
|
|
|
|
|
}
|
|
|
|
|
if (max_aggr_level_ < 0) {
|
|
|
|
|
max_aggr_level = 3;
|
|
|
|
|
} else {
|
|
|
|
|
max_aggr_level = max_aggr_level_;
|
|
|
|
|
uint32_t max_mcs_ul = mcs_ul >= 0 ? mcs_ul : 28;
|
|
|
|
|
uint32_t max_mcs_dl = mcs_dl >= 0 ? mcs_dl : 28;
|
|
|
|
|
uint32_t max_aggr_level = max_aggr_level_ >= 0 ? max_aggr_level_ : 3;
|
|
|
|
|
for (auto& c : carriers) {
|
|
|
|
|
c.max_mcs_dl = max_mcs_dl;
|
|
|
|
|
c.max_mcs_ul = max_mcs_ul;
|
|
|
|
|
c.max_aggr_level = max_aggr_level;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -193,9 +171,10 @@ void sched_ue::rem_bearer(uint32_t lc_id)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
|
|
|
|
|
void sched_ue::phy_config_enabled(uint32_t tti, uint32_t cc_idx, bool enabled)
|
|
|
|
|
{
|
|
|
|
|
dl_cqi_tti = tti;
|
|
|
|
|
carriers[cc_idx].dl_cqi_tti = tti;
|
|
|
|
|
// FIXME: "why do we need this?"
|
|
|
|
|
phy_config_dedicated_enabled = enabled;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -262,7 +241,7 @@ bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
|
|
|
|
|
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t cc_idx, uint32_t prb_idx[2])
|
|
|
|
|
{
|
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
|
@ -279,9 +258,9 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
|
|
|
|
|
ret |= cfg.pucch_cfg.uci_cfg.is_scheduling_request_tti;
|
|
|
|
|
|
|
|
|
|
// Pending ACKs
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
if (TTI_TX(dl_harq[i].get_tti()) == current_tti) {
|
|
|
|
|
cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = dl_harq[i].get_n_cce();
|
|
|
|
|
for (auto& h : carriers[cc_idx].dl_harq) {
|
|
|
|
|
if (TTI_TX(h.get_tti()) == current_tti) {
|
|
|
|
|
cfg.pucch_cfg.uci_cfg.ack[0].ncce[0] = h.get_n_cce();
|
|
|
|
|
cfg.pucch_cfg.uci_cfg.ack[0].nof_acks = 1;
|
|
|
|
|
ret = true;
|
|
|
|
|
}
|
|
|
|
@ -310,24 +289,10 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack)
|
|
|
|
|
int sched_ue::set_ack_info(uint32_t tti, uint32_t cc_idx, uint32_t tb_idx, bool ack)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
int ret;
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
if (TTI_TX(dl_harq[i].get_tti()) == tti) {
|
|
|
|
|
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, i, tb_idx, tti);
|
|
|
|
|
dl_harq[i].set_ack(tb_idx, ack);
|
|
|
|
|
ret = dl_harq[i].get_tbs(tb_idx);
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti);
|
|
|
|
|
ret = -1;
|
|
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
|
return ret;
|
|
|
|
|
return carriers[cc_idx].set_ack_info(tti, tb_idx, ack);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
|
|
|
|
@ -350,31 +315,31 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
|
|
|
|
|
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid, lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_ul_crc(uint32_t tti, bool crc_res)
|
|
|
|
|
void sched_ue::set_ul_crc(uint32_t tti, uint32_t cc_idx, bool crc_res)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
get_ul_harq(tti)->set_ack(0, crc_res);
|
|
|
|
|
get_ul_harq(tti, cc_idx)->set_ack(0, crc_res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_dl_ri(uint32_t tti, uint32_t ri)
|
|
|
|
|
void sched_ue::set_dl_ri(uint32_t tti, uint32_t cc_idx, uint32_t ri)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
dl_ri = ri;
|
|
|
|
|
dl_ri_tti = tti;
|
|
|
|
|
carriers[cc_idx].dl_ri = ri;
|
|
|
|
|
carriers[cc_idx].dl_ri_tti = tti;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t pmi)
|
|
|
|
|
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t cc_idx, uint32_t pmi)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
dl_pmi = pmi;
|
|
|
|
|
dl_pmi_tti = tti;
|
|
|
|
|
carriers[cc_idx].dl_pmi = pmi;
|
|
|
|
|
carriers[cc_idx].dl_pmi_tti = tti;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cqi)
|
|
|
|
|
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
dl_cqi = cqi;
|
|
|
|
|
dl_cqi_tti = tti;
|
|
|
|
|
carriers[cc_idx].dl_cqi = cqi;
|
|
|
|
|
carriers[cc_idx].dl_cqi_tti = tti;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d)
|
|
|
|
@ -383,11 +348,11 @@ void sched_ue::set_dl_ant_info(asn1::rrc::phys_cfg_ded_s::ant_info_c_* d)
|
|
|
|
|
dl_ant_info = *d;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code)
|
|
|
|
|
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cc_idx, uint32_t cqi, uint32_t ul_ch_code)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
ul_cqi = cqi;
|
|
|
|
|
ul_cqi_tti = tti;
|
|
|
|
|
carriers[cc_idx].ul_cqi = cqi;
|
|
|
|
|
carriers[cc_idx].ul_cqi_tti = tti;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::tpc_inc()
|
|
|
|
@ -418,7 +383,8 @@ void sched_ue::tpc_dec()
|
|
|
|
|
// > return 0 if TBS<MIN_DATA_TBS
|
|
|
|
|
int sched_ue::generate_format1(dl_harq_proc* h,
|
|
|
|
|
sched_interface::dl_sched_data_t* data,
|
|
|
|
|
uint32_t tti,
|
|
|
|
|
uint32_t tti_tx_dl,
|
|
|
|
|
uint32_t cc_idx,
|
|
|
|
|
uint32_t cfi,
|
|
|
|
|
const rbgmask_t& user_mask)
|
|
|
|
|
{
|
|
|
|
@ -434,13 +400,13 @@ int sched_ue::generate_format1(dl_harq_proc* h,
|
|
|
|
|
|
|
|
|
|
// If this is the first transmission for this UE, make room for MAC Contention Resolution ID
|
|
|
|
|
bool need_conres_ce = false;
|
|
|
|
|
if (is_first_dl_tx()) {
|
|
|
|
|
if (is_first_dl_tx(cc_idx)) {
|
|
|
|
|
need_conres_ce = true;
|
|
|
|
|
}
|
|
|
|
|
if (h->is_empty(0)) {
|
|
|
|
|
|
|
|
|
|
// Get total available data to transmit (includes MAC header)
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
|
|
|
|
|
|
|
|
|
|
uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb);
|
|
|
|
|
|
|
|
|
@ -448,16 +414,16 @@ int sched_ue::generate_format1(dl_harq_proc* h,
|
|
|
|
|
srslte_pdsch_grant_t grant = {};
|
|
|
|
|
srslte_dl_sf_cfg_t dl_sf = {};
|
|
|
|
|
dl_sf.cfi = cfi;
|
|
|
|
|
dl_sf.tti = tti;
|
|
|
|
|
dl_sf.tti = tti_tx_dl;
|
|
|
|
|
srslte_ra_dl_grant_to_grant_prb_allocation(dci, &grant, cell.nof_prb);
|
|
|
|
|
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell, &dl_sf, &grant);
|
|
|
|
|
|
|
|
|
|
int mcs0 = fixed_mcs_dl;
|
|
|
|
|
int mcs0 = carriers[cc_idx].fixed_mcs_dl;
|
|
|
|
|
if (need_conres_ce and cell.nof_prb < 10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs
|
|
|
|
|
mcs0 = MCS_FIRST_DL;
|
|
|
|
|
}
|
|
|
|
|
if (mcs0 < 0) { // dynamic MCS
|
|
|
|
|
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
|
|
|
|
|
tbs = carriers[cc_idx].alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
|
|
|
|
|
} else {
|
|
|
|
|
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), nof_prb) / 8;
|
|
|
|
|
mcs = mcs0;
|
|
|
|
@ -468,7 +434,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
h->new_tx(user_mask, 0, tti, mcs, tbs, data->dci.location.ncce);
|
|
|
|
|
h->new_tx(user_mask, 0, tti_tx_dl, mcs, tbs, data->dci.location.ncce);
|
|
|
|
|
|
|
|
|
|
int rem_tbs = tbs;
|
|
|
|
|
int x = 0;
|
|
|
|
@ -500,7 +466,7 @@ int sched_ue::generate_format1(dl_harq_proc* h,
|
|
|
|
|
|
|
|
|
|
Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes);
|
|
|
|
|
} else {
|
|
|
|
|
h->new_retx(user_mask, 0, tti, &mcs, &tbs, data->dci.location.ncce);
|
|
|
|
|
h->new_retx(user_mask, 0, tti_tx_dl, &mcs, &tbs, data->dci.location.ncce);
|
|
|
|
|
Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -525,11 +491,12 @@ int sched_ue::generate_format1(dl_harq_proc* h,
|
|
|
|
|
int sched_ue::generate_format2a(dl_harq_proc* h,
|
|
|
|
|
sched_interface::dl_sched_data_t* data,
|
|
|
|
|
uint32_t tti,
|
|
|
|
|
uint32_t cc_idx,
|
|
|
|
|
uint32_t cfi,
|
|
|
|
|
const rbgmask_t& user_mask)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
|
|
|
|
|
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -537,6 +504,7 @@ int sched_ue::generate_format2a(dl_harq_proc* h,
|
|
|
|
|
int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
|
|
|
|
|
sched_interface::dl_sched_data_t* data,
|
|
|
|
|
uint32_t tti,
|
|
|
|
|
uint32_t cc_idx,
|
|
|
|
|
uint32_t cfi,
|
|
|
|
|
const rbgmask_t& user_mask)
|
|
|
|
|
{
|
|
|
|
@ -559,7 +527,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
|
|
|
|
|
|
|
|
|
|
bool no_retx = true;
|
|
|
|
|
|
|
|
|
|
if (dl_ri == 0) {
|
|
|
|
|
if (carriers[cc_idx].dl_ri == 0) {
|
|
|
|
|
if (h->is_empty(1)) {
|
|
|
|
|
/* One layer, tb1 buffer is empty, send tb0 only */
|
|
|
|
|
tb_en[0] = true;
|
|
|
|
@ -583,7 +551,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked();
|
|
|
|
|
int mcs = 0;
|
|
|
|
|
int tbs = 0;
|
|
|
|
|
|
|
|
|
@ -591,11 +559,13 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
|
|
|
|
|
h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce);
|
|
|
|
|
Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs);
|
|
|
|
|
} else if (tb_en[tb] && req_bytes && no_retx) {
|
|
|
|
|
if (fixed_mcs_dl < 0) {
|
|
|
|
|
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
|
|
|
|
|
if (carriers[cc_idx].fixed_mcs_dl < 0) {
|
|
|
|
|
tbs = carriers[cc_idx].alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
|
|
|
|
|
} else {
|
|
|
|
|
tbs = srslte_ra_tbs_from_idx((uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)fixed_mcs_dl, false), nof_prb) / 8;
|
|
|
|
|
mcs = fixed_mcs_dl;
|
|
|
|
|
tbs = srslte_ra_tbs_from_idx(
|
|
|
|
|
(uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)carriers[cc_idx].fixed_mcs_dl, false), nof_prb) /
|
|
|
|
|
8;
|
|
|
|
|
mcs = carriers[cc_idx].fixed_mcs_dl;
|
|
|
|
|
}
|
|
|
|
|
h->new_tx(user_mask, tb, tti, mcs, tbs, data->dci.location.ncce);
|
|
|
|
|
|
|
|
|
@ -643,6 +613,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
|
|
|
|
|
int sched_ue::generate_format2(dl_harq_proc* h,
|
|
|
|
|
sched_interface::dl_sched_data_t* data,
|
|
|
|
|
uint32_t tti,
|
|
|
|
|
uint32_t cc_idx,
|
|
|
|
|
uint32_t cfi,
|
|
|
|
|
const rbgmask_t& user_mask)
|
|
|
|
|
{
|
|
|
|
@ -650,14 +621,14 @@ int sched_ue::generate_format2(dl_harq_proc* h,
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
|
|
/* Call Format 2a (common) */
|
|
|
|
|
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
|
|
|
|
|
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
|
|
|
|
|
|
|
|
|
|
/* Compute precoding information */
|
|
|
|
|
data->dci.format = SRSLTE_DCI_FORMAT2;
|
|
|
|
|
if ((SRSLTE_DCI_IS_TB_EN(data->dci.tb[0]) + SRSLTE_DCI_IS_TB_EN(data->dci.tb[1])) == 1) {
|
|
|
|
|
data->dci.pinfo = (uint8_t)(dl_pmi + 1) % (uint8_t)5;
|
|
|
|
|
data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi + 1) % (uint8_t)5;
|
|
|
|
|
} else {
|
|
|
|
|
data->dci.pinfo = (uint8_t)(dl_pmi & 1u);
|
|
|
|
|
data->dci.pinfo = (uint8_t)(carriers[cc_idx].dl_pmi & 1u);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
@ -665,6 +636,7 @@ int sched_ue::generate_format2(dl_harq_proc* h,
|
|
|
|
|
|
|
|
|
|
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
|
|
|
|
|
uint32_t tti,
|
|
|
|
|
uint32_t cc_idx,
|
|
|
|
|
ul_harq_proc::ul_alloc_t alloc,
|
|
|
|
|
bool needs_pdcch,
|
|
|
|
|
srslte_dci_location_t dci_pos,
|
|
|
|
@ -672,7 +644,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
|
|
ul_harq_proc* h = get_ul_harq(tti);
|
|
|
|
|
ul_harq_proc* h = get_ul_harq(tti, cc_idx);
|
|
|
|
|
srslte_dci_ul_t* dci = &data->dci;
|
|
|
|
|
|
|
|
|
|
bool cqi_request = needs_cqi_unlocked(tti, true);
|
|
|
|
@ -681,7 +653,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
|
|
|
|
|
data->needs_pdcch = needs_pdcch;
|
|
|
|
|
dci->location = dci_pos;
|
|
|
|
|
|
|
|
|
|
int mcs = (explicit_mcs >= 0) ? explicit_mcs : fixed_mcs_ul;
|
|
|
|
|
int mcs = (explicit_mcs >= 0) ? explicit_mcs : carriers[cc_idx].fixed_mcs_ul;
|
|
|
|
|
int tbs = 0;
|
|
|
|
|
|
|
|
|
|
bool is_newtx = h->is_empty(0);
|
|
|
|
@ -698,7 +670,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
|
|
|
|
|
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
|
|
|
|
|
uint32_t N_srs = 0;
|
|
|
|
|
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE;
|
|
|
|
|
tbs = alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs);
|
|
|
|
|
tbs = carriers[cc_idx].alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs);
|
|
|
|
|
}
|
|
|
|
|
h->new_tx(tti, mcs, tbs, alloc, nof_retx);
|
|
|
|
|
|
|
|
|
@ -755,27 +727,37 @@ uint32_t sched_ue::get_max_retx()
|
|
|
|
|
|
|
|
|
|
bool sched_ue::is_first_dl_tx()
|
|
|
|
|
{
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
if (dl_harq[i].nof_tx(0) > 0) {
|
|
|
|
|
for (uint32_t i = 0; i < carriers.size(); ++i) {
|
|
|
|
|
if (not is_first_dl_tx(i)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sched_ue::is_first_dl_tx(uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
for (auto& h : carriers[cc_idx].dl_harq) {
|
|
|
|
|
if (h.nof_tx(0) > 0) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
|
|
|
|
|
bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
bool ret = needs_cqi_unlocked(tti, will_be_sent);
|
|
|
|
|
bool ret = needs_cqi_unlocked(tti, cc_idx, will_be_sent);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Private lock-free implemenentation
|
|
|
|
|
bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
|
|
|
|
|
bool sched_ue::needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
|
|
|
|
|
{
|
|
|
|
|
bool ret = false;
|
|
|
|
|
if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked(tti) > 0) {
|
|
|
|
|
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti);
|
|
|
|
|
if (phy_config_dedicated_enabled && cfg.aperiodic_cqi_period && get_pending_dl_new_data_unlocked() > 0) {
|
|
|
|
|
uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti);
|
|
|
|
|
bool needscqi = interval >= cfg.aperiodic_cqi_period;
|
|
|
|
|
if (needscqi) {
|
|
|
|
|
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
|
|
|
|
@ -791,27 +773,24 @@ bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data()
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
uint32_t pending_data = get_pending_dl_new_data_unlocked(tti);
|
|
|
|
|
return pending_data;
|
|
|
|
|
return get_pending_dl_new_data_unlocked();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data,
|
|
|
|
|
/// the RAR resources, and headers
|
|
|
|
|
/// \param tti
|
|
|
|
|
/// \return number of bytes to be allocated
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti)
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data_total()
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
|
|
|
|
|
return req_bytes;
|
|
|
|
|
return get_pending_dl_new_data_total_unlocked();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data_total_unlocked()
|
|
|
|
|
{
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
|
|
|
|
|
uint32_t req_bytes = get_pending_dl_new_data_unlocked();
|
|
|
|
|
if (req_bytes > 0) {
|
|
|
|
|
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
|
|
|
|
|
if (is_first_dl_tx()) {
|
|
|
|
@ -822,7 +801,7 @@ uint32_t sched_ue::get_pending_dl_new_data_total_unlocked(uint32_t tti)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Private lock-free implementation
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
|
|
|
|
|
uint32_t sched_ue::get_pending_dl_new_data_unlocked()
|
|
|
|
|
{
|
|
|
|
|
uint32_t pending_data = 0;
|
|
|
|
|
for (int i = 0; i < sched_interface::MAX_LC; i++) {
|
|
|
|
@ -830,7 +809,7 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
|
|
|
|
|
pending_data += lch[i].buf_retx + lch[i].buf_tx;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!is_first_dl_tx() && nof_ta_cmd) {
|
|
|
|
|
if (not is_first_dl_tx() and nof_ta_cmd > 0) {
|
|
|
|
|
pending_data += nof_ta_cmd * 2;
|
|
|
|
|
}
|
|
|
|
|
return pending_data;
|
|
|
|
@ -839,15 +818,13 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
|
|
|
|
|
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
uint32_t pending_data = get_pending_ul_new_data_unlocked(tti);
|
|
|
|
|
return pending_data;
|
|
|
|
|
return get_pending_ul_new_data_unlocked(tti);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue::get_pending_ul_old_data()
|
|
|
|
|
uint32_t sched_ue::get_pending_ul_old_data(uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
uint32_t pending_data = get_pending_ul_old_data_unlocked();
|
|
|
|
|
return pending_data;
|
|
|
|
|
return get_pending_ul_old_data_unlocked(cc_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Private lock-free implementation
|
|
|
|
@ -859,19 +836,25 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
|
|
|
|
|
pending_data += lch[i].bsr;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!pending_data && is_sr_triggered()) {
|
|
|
|
|
if (pending_data == 0) {
|
|
|
|
|
if (is_sr_triggered()) {
|
|
|
|
|
return 512;
|
|
|
|
|
}
|
|
|
|
|
if (!pending_data && needs_cqi_unlocked(tti)) {
|
|
|
|
|
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
|
|
|
|
|
if (needs_cqi_unlocked(tti, cc_idx)) {
|
|
|
|
|
return 128;
|
|
|
|
|
}
|
|
|
|
|
uint32_t pending_ul_data = get_pending_ul_old_data_unlocked();
|
|
|
|
|
if (pending_data > pending_ul_data) {
|
|
|
|
|
pending_data -= pending_ul_data;
|
|
|
|
|
} else {
|
|
|
|
|
pending_data = 0;
|
|
|
|
|
}
|
|
|
|
|
if (pending_data) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Subtract all the UL data already allocated in the UL harqs
|
|
|
|
|
uint32_t pending_ul_data = 0;
|
|
|
|
|
for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) {
|
|
|
|
|
pending_ul_data += get_pending_ul_old_data_unlocked(cc_idx);
|
|
|
|
|
}
|
|
|
|
|
pending_data = (pending_data > pending_ul_data) ? pending_data - pending_ul_data : 0;
|
|
|
|
|
|
|
|
|
|
if (pending_data > 0) {
|
|
|
|
|
Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n",
|
|
|
|
|
pending_data,
|
|
|
|
|
pending_ul_data,
|
|
|
|
@ -884,13 +867,9 @@ uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Private lock-free implementation
|
|
|
|
|
uint32_t sched_ue::get_pending_ul_old_data_unlocked()
|
|
|
|
|
uint32_t sched_ue::get_pending_ul_old_data_unlocked(uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
uint32_t pending_data = 0;
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
pending_data += ul_harq[i].get_pending_data();
|
|
|
|
|
}
|
|
|
|
|
return pending_data;
|
|
|
|
|
return carriers[cc_idx].get_pending_ul_old_data();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb)
|
|
|
|
@ -903,7 +882,7 @@ uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
|
|
|
|
|
return sched_params->P * nof_rbg;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
|
|
|
|
|
uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
|
@ -913,11 +892,11 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
|
|
|
|
|
|
|
|
|
|
uint32_t nbytes = 0;
|
|
|
|
|
uint32_t n;
|
|
|
|
|
int mcs0 = (is_first_dl_tx() and cell.nof_prb == 6) ? MCS_FIRST_DL : fixed_mcs_dl;
|
|
|
|
|
int mcs0 = (is_first_dl_tx(cc_idx) and cell.nof_prb == 6) ? MCS_FIRST_DL : carriers[cc_idx].fixed_mcs_dl;
|
|
|
|
|
for (n = 0; n < cell.nof_prb && nbytes < req_bytes; ++n) {
|
|
|
|
|
nof_re = srslte_ra_dl_approx_nof_re(&cell, n + 1, nof_ctrl_symbols);
|
|
|
|
|
if (mcs0 < 0) {
|
|
|
|
|
tbs = alloc_tbs_dl(n + 1, nof_re, 0, &mcs);
|
|
|
|
|
tbs = carriers[cc_idx].alloc_tbs_dl(n + 1, nof_re, 0, &mcs);
|
|
|
|
|
} else {
|
|
|
|
|
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), n + 1) / 8;
|
|
|
|
|
}
|
|
|
|
@ -931,37 +910,10 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
|
|
|
|
|
return n;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
|
|
|
|
|
uint32_t sched_ue::get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes)
|
|
|
|
|
{
|
|
|
|
|
int mcs = 0;
|
|
|
|
|
uint32_t nbytes = 0;
|
|
|
|
|
uint32_t N_srs = 0;
|
|
|
|
|
|
|
|
|
|
uint32_t n = 0;
|
|
|
|
|
if (req_bytes == 0) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
|
|
for (n = 1; n < cell.nof_prb && nbytes < req_bytes + 4; n++) {
|
|
|
|
|
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * n * SRSLTE_NRE;
|
|
|
|
|
int tbs = 0;
|
|
|
|
|
if (fixed_mcs_ul < 0) {
|
|
|
|
|
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
|
|
|
|
|
} else {
|
|
|
|
|
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
|
|
|
|
|
}
|
|
|
|
|
if (tbs > 0) {
|
|
|
|
|
nbytes = tbs;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (!srslte_dft_precoding_valid_prb(n) && n <= cell.nof_prb) {
|
|
|
|
|
n++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return n;
|
|
|
|
|
return carriers[cc_idx].get_required_prb_ul(req_bytes);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool sched_ue::is_sr_triggered()
|
|
|
|
@ -969,88 +921,42 @@ bool sched_ue::is_sr_triggered()
|
|
|
|
|
return sr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue::reset_pending_pids(uint32_t tti_rx)
|
|
|
|
|
void sched_ue::reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx);
|
|
|
|
|
|
|
|
|
|
// UL harqs
|
|
|
|
|
get_ul_harq(tti_tx_ul)->reset_pending_data();
|
|
|
|
|
|
|
|
|
|
// DL harqs
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
dl_harq[i].reset_pending_data();
|
|
|
|
|
if (not dl_harq[i].is_empty()) {
|
|
|
|
|
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, dl_harq[i].get_tti());
|
|
|
|
|
if (tti_diff > 50 and tti_diff < 10240 / 2) {
|
|
|
|
|
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti_tx_dl);
|
|
|
|
|
dl_harq[i].reset(0);
|
|
|
|
|
dl_harq[i].reset(1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
carriers[cc_idx].reset_old_pending_pids(tti_rx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Gets HARQ process with oldest pending retx */
|
|
|
|
|
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
|
|
|
|
|
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
|
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
|
|
int oldest_idx = -1;
|
|
|
|
|
uint32_t oldest_tti = 0;
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
|
|
|
|
|
if (dl_harq[i].has_pending_retx(0, tti) || dl_harq[i].has_pending_retx(1, tti)) {
|
|
|
|
|
uint32_t x = srslte_tti_interval(tti, dl_harq[i].get_tti());
|
|
|
|
|
if (x > oldest_tti) {
|
|
|
|
|
oldest_idx = i;
|
|
|
|
|
oldest_tti = x;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
dl_harq_proc* h = nullptr;
|
|
|
|
|
if (oldest_idx >= 0) {
|
|
|
|
|
h = &dl_harq[oldest_idx];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return h;
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
|
|
|
|
|
#endif
|
|
|
|
|
return carriers[cc_idx].get_pending_dl_harq(tti_tx_dl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* sched_ue::get_empty_dl_harq()
|
|
|
|
|
dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* h = nullptr;
|
|
|
|
|
for (int i = 0; i < SCHED_MAX_HARQ_PROC && !h; i++) {
|
|
|
|
|
if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) {
|
|
|
|
|
h = &dl_harq[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return h;
|
|
|
|
|
return carriers[cc_idx].get_empty_dl_harq();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti)
|
|
|
|
|
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
return &ul_harq[tti % SCHED_MAX_HARQ_PROC];
|
|
|
|
|
return carriers[cc_idx].get_ul_harq(tti_tx_ul);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti)
|
|
|
|
|
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti_rx, uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
for (uint32_t i = 0; i < SCHED_MAX_HARQ_PROC; ++i) {
|
|
|
|
|
if (dl_harq[i].get_tti() == tti) {
|
|
|
|
|
return &dl_harq[i];
|
|
|
|
|
for (auto& h : carriers[cc_idx].dl_harq) {
|
|
|
|
|
if (h.get_tti() == tti_rx) {
|
|
|
|
|
return &h;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx)
|
|
|
|
|
dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx, uint32_t cc_idx)
|
|
|
|
|
{
|
|
|
|
|
return &dl_harq[idx];
|
|
|
|
|
return &carriers[cc_idx].dl_harq[idx];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srslte_dci_format_t sched_ue::get_dci_format()
|
|
|
|
@ -1084,33 +990,6 @@ srslte_dci_format_t sched_ue::get_dci_format()
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
|
|
|
|
|
uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
uint32_t l = 0;
|
|
|
|
|
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
|
|
|
|
|
float coderate = 99;
|
|
|
|
|
float factor = 1.5;
|
|
|
|
|
uint32_t l_max = 3;
|
|
|
|
|
if (cell.nof_prb == 6) {
|
|
|
|
|
factor = 1.0;
|
|
|
|
|
l_max = 2;
|
|
|
|
|
}
|
|
|
|
|
l_max = SRSLTE_MIN(max_aggr_level, l_max);
|
|
|
|
|
do {
|
|
|
|
|
coderate = srslte_pdcch_coderate(nof_bits, l);
|
|
|
|
|
l++;
|
|
|
|
|
} while (l < l_max && factor * coderate > max_coderate);
|
|
|
|
|
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
|
|
|
|
|
dl_cqi,
|
|
|
|
|
l,
|
|
|
|
|
nof_bits,
|
|
|
|
|
coderate,
|
|
|
|
|
max_coderate);
|
|
|
|
|
return l;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx)
|
|
|
|
|
{
|
|
|
|
|
if (cfi > 0 && cfi <= 3) {
|
|
|
|
@ -1192,20 +1071,159 @@ int sched_ue::cqi_to_tbs(uint32_t cqi,
|
|
|
|
|
return tbs;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sched_ue::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
|
|
|
|
|
/************************************************************************************************
|
|
|
|
|
* sched_ue::sched_ue_carrier
|
|
|
|
|
***********************************************************************************************/
|
|
|
|
|
|
|
|
|
|
sched_ue_carrier::sched_ue_carrier(sched_interface::ue_cfg_t* cfg_,
|
|
|
|
|
srslte_cell_t* cell_cfg_,
|
|
|
|
|
uint16_t rnti_,
|
|
|
|
|
uint32_t cc_idx_,
|
|
|
|
|
srslte::log* log_) :
|
|
|
|
|
cfg(cfg_),
|
|
|
|
|
cell(cell_cfg_),
|
|
|
|
|
rnti(rnti_),
|
|
|
|
|
cc_idx(cc_idx_),
|
|
|
|
|
log_h(log_)
|
|
|
|
|
{
|
|
|
|
|
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs);
|
|
|
|
|
// Config HARQ processes
|
|
|
|
|
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
|
|
|
|
|
dl_harq[i].config(i, cfg->maxharq_tx, log_h);
|
|
|
|
|
ul_harq[i].config(i, cfg->maxharq_tx, log_h);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sched_ue::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
|
|
|
|
|
void sched_ue_carrier::reset()
|
|
|
|
|
{
|
|
|
|
|
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs);
|
|
|
|
|
dl_ri = 0;
|
|
|
|
|
dl_ri_tti = 0;
|
|
|
|
|
dl_pmi = 0;
|
|
|
|
|
dl_pmi_tti = 0;
|
|
|
|
|
dl_cqi = 1;
|
|
|
|
|
dl_cqi_tti = 0;
|
|
|
|
|
ul_cqi = 1;
|
|
|
|
|
ul_cqi_tti = 0;
|
|
|
|
|
for (uint32_t i = 0; i < dl_harq.size(); ++i) {
|
|
|
|
|
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
|
|
|
|
|
dl_harq[i].reset(tb);
|
|
|
|
|
ul_harq[i].reset(tb);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sched_ue_carrier::reset_old_pending_pids(uint32_t tti_rx)
|
|
|
|
|
{
|
|
|
|
|
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx);
|
|
|
|
|
|
|
|
|
|
// UL Harqs
|
|
|
|
|
get_ul_harq(tti_tx_ul)->reset_pending_data();
|
|
|
|
|
|
|
|
|
|
// DL harqs
|
|
|
|
|
for (auto& h : dl_harq) {
|
|
|
|
|
h.reset_pending_data();
|
|
|
|
|
if (not h.is_empty()) {
|
|
|
|
|
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, h.get_tti());
|
|
|
|
|
if (tti_diff > 50 and tti_diff < 10240 / 2) {
|
|
|
|
|
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", h.get_id(), h.get_tti(), tti_tx_dl);
|
|
|
|
|
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
|
|
|
|
|
h.reset(tb);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* sched_ue_carrier::get_pending_dl_harq(uint32_t tti_tx_dl)
|
|
|
|
|
{
|
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
|
|
|
|
|
|
int oldest_idx = -1;
|
|
|
|
|
uint32_t oldest_tti = 0;
|
|
|
|
|
for (auto& h : dl_harq) {
|
|
|
|
|
if (h.has_pending_retx(0, tti_tx_dl) or h.has_pending_retx(1, tti_tx_dl)) {
|
|
|
|
|
uint32_t x = srslte_tti_interval(tti_tx_dl, h.get_tti());
|
|
|
|
|
if (x > oldest_tti) {
|
|
|
|
|
oldest_idx = h.get_id();
|
|
|
|
|
oldest_tti = x;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
dl_harq_proc* h = nullptr;
|
|
|
|
|
if (oldest_idx >= 0) {
|
|
|
|
|
h = &dl_harq[oldest_idx];
|
|
|
|
|
}
|
|
|
|
|
return h;
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
return &dl_harq[tti % SCHED_MAX_HARQ_PROC];
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* sched_ue_carrier::get_empty_dl_harq()
|
|
|
|
|
{
|
|
|
|
|
auto it =
|
|
|
|
|
std::find_if(dl_harq.begin(), dl_harq.end(), [](dl_harq_proc& h) { return h.is_empty(0) and h.is_empty(1); });
|
|
|
|
|
return it != dl_harq.end() ? &(*it) : nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sched_ue_carrier::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack)
|
|
|
|
|
{
|
|
|
|
|
for (auto& h : dl_harq) {
|
|
|
|
|
if (TTI_TX(h.get_tti()) == tti_rx) {
|
|
|
|
|
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, h.get_id(), tb_idx, tti_rx);
|
|
|
|
|
h.set_ack(tb_idx, ack);
|
|
|
|
|
return h.get_tbs(tb_idx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ul_harq_proc* sched_ue_carrier::get_ul_harq(uint32_t tti)
|
|
|
|
|
{
|
|
|
|
|
return &ul_harq[tti % SCHED_MAX_HARQ_PROC];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue_carrier::get_pending_ul_old_data()
|
|
|
|
|
{
|
|
|
|
|
uint32_t pending_data = 0;
|
|
|
|
|
for (auto& h : ul_harq) {
|
|
|
|
|
pending_data += h.get_pending_data();
|
|
|
|
|
}
|
|
|
|
|
return pending_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
|
|
|
|
|
uint32_t sched_ue_carrier::get_aggr_level(uint32_t nof_bits)
|
|
|
|
|
{
|
|
|
|
|
uint32_t l = 0;
|
|
|
|
|
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
|
|
|
|
|
float coderate = 99;
|
|
|
|
|
float factor = 1.5;
|
|
|
|
|
uint32_t l_max = 3;
|
|
|
|
|
if (cell->nof_prb == 6) {
|
|
|
|
|
factor = 1.0;
|
|
|
|
|
l_max = 2;
|
|
|
|
|
}
|
|
|
|
|
l_max = SRSLTE_MIN(max_aggr_level, l_max);
|
|
|
|
|
do {
|
|
|
|
|
coderate = srslte_pdcch_coderate(nof_bits, l);
|
|
|
|
|
l++;
|
|
|
|
|
} while (l < l_max && factor * coderate > max_coderate);
|
|
|
|
|
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n",
|
|
|
|
|
dl_cqi,
|
|
|
|
|
l,
|
|
|
|
|
nof_bits,
|
|
|
|
|
coderate,
|
|
|
|
|
max_coderate);
|
|
|
|
|
return l;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* In this scheduler we tend to use all the available bandwidth and select the MCS
|
|
|
|
|
* that approximates the minimum between the capacity and the requested rate
|
|
|
|
|
*/
|
|
|
|
|
int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs)
|
|
|
|
|
int sched_ue_carrier::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, bool is_ul, int* mcs)
|
|
|
|
|
{
|
|
|
|
|
uint32_t sel_mcs = 0;
|
|
|
|
|
|
|
|
|
@ -1214,7 +1232,7 @@ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, b
|
|
|
|
|
uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only
|
|
|
|
|
|
|
|
|
|
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
|
|
|
|
|
int tbs_bytes = cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8;
|
|
|
|
|
int tbs_bytes = sched_ue::cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, is_ul, &sel_mcs) / 8;
|
|
|
|
|
|
|
|
|
|
/* If less bytes are requested, lower the MCS */
|
|
|
|
|
if (tbs_bytes > (int)req_bytes && req_bytes > 0) {
|
|
|
|
@ -1240,4 +1258,45 @@ int sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, b
|
|
|
|
|
return tbs_bytes;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sched_ue_carrier::alloc_tbs_dl(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
|
|
|
|
|
{
|
|
|
|
|
return alloc_tbs(nof_prb, nof_re, req_bytes, false, mcs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sched_ue_carrier::alloc_tbs_ul(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes, int* mcs)
|
|
|
|
|
{
|
|
|
|
|
return alloc_tbs(nof_prb, nof_re, req_bytes, true, mcs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes)
|
|
|
|
|
{
|
|
|
|
|
int mcs = 0;
|
|
|
|
|
uint32_t nbytes = 0;
|
|
|
|
|
uint32_t N_srs = 0;
|
|
|
|
|
|
|
|
|
|
uint32_t n = 0;
|
|
|
|
|
if (req_bytes == 0) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (n = 1; n < cell->nof_prb && nbytes < req_bytes + 4; n++) {
|
|
|
|
|
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell->cp) - 1) - N_srs) * n * SRSLTE_NRE;
|
|
|
|
|
int tbs = 0;
|
|
|
|
|
if (fixed_mcs_ul < 0) {
|
|
|
|
|
tbs = alloc_tbs_ul(n, nof_re, 0, &mcs);
|
|
|
|
|
} else {
|
|
|
|
|
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), n) / 8;
|
|
|
|
|
}
|
|
|
|
|
if (tbs > 0) {
|
|
|
|
|
nbytes = tbs;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (!srslte_dft_precoding_valid_prb(n) && n <= cell->nof_prb) {
|
|
|
|
|
n++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return n;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace srsenb
|
|
|
|
|