Improvements and fixes on srsENB scheduler (#228)

master
Ismael Gomez 7 years ago committed by GitHub
parent badbb1de77
commit 8c92f3fddc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -81,6 +81,9 @@ public:
uint32_t n1pucch_an;
uint32_t delta_pucch_shift;
// If non-negative, statically allocate N prbs at the edges of the uplink for PUCCH
int nrb_pucch;
uint32_t nrb_cqi;
uint32_t ncs_an;

@ -129,6 +129,10 @@ SRSLTE_API bool srslte_prach_tti_opportunity(srslte_prach_t *p,
uint32_t current_tti,
int allowed_subframe);
SRSLTE_API bool srslte_prach_tti_opportunity_config(uint32_t config_idx,
uint32_t current_tti,
int allowed_subframe);
SRSLTE_API void srslte_prach_sf_config(uint32_t config_idx,
srslte_prach_sf_config_t *sf_config);

@ -193,6 +193,11 @@ srslte_prach_sfn_t srslte_prach_get_sfn(uint32_t config_idx) {
*/
bool srslte_prach_tti_opportunity(srslte_prach_t *p, uint32_t current_tti, int allowed_subframe) {
uint32_t config_idx = p->config_idx;
return srslte_prach_tti_opportunity_config(config_idx,current_tti,allowed_subframe);
}
bool srslte_prach_tti_opportunity_config(uint32_t config_idx, uint32_t current_tti, int allowed_subframe)
{
// Get SFN and sf_idx from the PRACH configuration index
srslte_prach_sfn_t prach_sfn = srslte_prach_get_sfn(config_idx);
@ -213,6 +218,7 @@ bool srslte_prach_tti_opportunity(srslte_prach_t *p, uint32_t current_tti, int a
}
}
return false;
}
void srslte_prach_sf_config(uint32_t config_idx, srslte_prach_sf_config_t *sf_config) {

@ -66,9 +66,10 @@ public:
public:
/* Virtual methods for user metric calculation */
virtual void reset_allocation(uint32_t nof_rb_) = 0;
virtual void new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t nof_rb, uint32_t tti) = 0;
virtual ul_harq_proc* get_user_allocation(sched_ue *user) = 0;
virtual void update_allocation(ul_harq_proc::ul_alloc_t alloc) = 0;
virtual bool update_allocation(ul_harq_proc::ul_alloc_t alloc) = 0;
};
@ -120,7 +121,6 @@ public:
int dl_sched(uint32_t tti, dl_sched_res_t *sched_result);
int ul_sched(uint32_t tti, ul_sched_res_t *sched_result);
/* Custom TPC functions
*/
void tpc_inc(uint16_t rnti);

@ -93,10 +93,11 @@ class ul_harq_proc : public harq_proc
{
public:
typedef struct {
struct ul_alloc_t {
uint32_t RB_start;
uint32_t L;
} ul_alloc_t;
inline void set(uint32_t start, uint32_t len) {RB_start = start; L = len;}
};
void new_tx(uint32_t tti, int mcs, int tbs);

@ -64,20 +64,22 @@ class ul_metric_rr : public sched::metric_ul
public:
void new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t nof_rb, uint32_t tti);
ul_harq_proc* get_user_allocation(sched_ue *user);
void update_allocation(ul_harq_proc::ul_alloc_t alloc);
bool update_allocation(ul_harq_proc::ul_alloc_t alloc);
void reset_allocation(uint32_t nof_rb_);
private:
const static int MAX_PRB = 100;
bool new_allocation(uint32_t L, ul_harq_proc::ul_alloc_t *alloc);
bool allocation_is_valid(ul_harq_proc::ul_alloc_t alloc);
ul_harq_proc* apply_user_allocation(sched_ue *user);
ul_harq_proc* apply_user_allocation(sched_ue *user, bool retx_only);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_retx_prbs(sched_ue *user);
bool used_rb[MAX_PRB];
uint32_t current_tti;
uint32_t nof_rb;
uint32_t available_rb;
};

@ -46,7 +46,7 @@ sib2 =
{
high_speed_flag = false;
prach_config_index = 3;
prach_freq_offset = 0;
prach_freq_offset = 2;
zero_correlation_zone_config = 11;
};
};

@ -792,6 +792,7 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
// Uplink sched
int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched_result)
{
typedef std::map<uint16_t, sched_ue>::iterator it_t;
if (!configured) {
return 0;
}
@ -816,9 +817,10 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
// current_cfi is set in dl_sched()
bzero(sched_result, sizeof(sched_interface::ul_sched_res_t));
ul_metric->reset_allocation(cfg.cell.nof_prb);
// Get HARQ process for this TTI
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue*) &iter->second;
uint16_t rnti = (uint16_t) iter->first;
@ -834,16 +836,38 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
}
}
ul_metric->new_tti(ue_db, cfg.cell.nof_prb, current_tti);
// Update available allocation if there's a pending RAR
if (pending_msg3[tti%10].enabled) {
ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[tti%10].n_prb, pending_msg3[tti%10].L};
ul_metric->update_allocation(msg3);
if(ul_metric->update_allocation(msg3)) {
log_h->debug("SCHED: Allocated msg3 RBs within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L);
}
else {
log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L);
}
}
// Allocate PUCCH resources
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
if (cfg.nrb_pucch >= 0) {
ul_harq_proc::ul_alloc_t pucch = {0, (uint32_t) cfg.nrb_pucch};
if(!ul_metric->update_allocation(pucch)) {
log_h->warning("SCHED: Failed to allocate PUCCH\n");
}
pucch.RB_start = cfg.cell.nof_prb-cfg.nrb_pucch;
pucch.L = (uint32_t) cfg.nrb_pucch;
if(!ul_metric->update_allocation(pucch)) {
log_h->warning("SCHED: Failed to allocate PUCCH\n");
}
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue *) &iter->second;
uint16_t rnti = (uint16_t) iter->first;
uint32_t prb_idx[2] = {0, 0};
if(user->get_pucch_sched(current_tti, prb_idx)) {
user->has_pucch = true;
}
}
} else {
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue*) &iter->second;
uint16_t rnti = (uint16_t) iter->first;
uint32_t prb_idx[2] = {0, 0};
@ -856,9 +880,23 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
}
}
}
}
// reserve PRBs for PRACH
if(srslte_prach_tti_opportunity_config(cfg.prach_config, tti, -1)) {
ul_harq_proc::ul_alloc_t prach = {cfg.prach_freq_offset, 6};
if(!ul_metric->update_allocation(prach)) {
log_h->warning("SCHED: Failed to allocate PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L);
}
else {
log_h->debug("SCHED: Allocated PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L);
}
}
ul_metric->new_tti(ue_db, cfg.cell.nof_prb, current_tti);
// Now allocate PUSCH
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue*) &iter->second;
uint16_t rnti = (uint16_t) iter->first;
@ -950,7 +988,7 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
}
// Update pending data counters after this TTI
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue *) &iter->second;
uint16_t rnti = (uint16_t) iter->first;

@ -210,26 +210,48 @@ dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user)
*
*****************************************************************/
void ul_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t nof_rb_, uint32_t tti)
void ul_metric_rr::reset_allocation(uint32_t nof_rb_)
{
current_tti = tti;
nof_rb = nof_rb_;
available_rb = nof_rb_;
bzero(used_rb, nof_rb*sizeof(bool));
}
void ul_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t nof_rb_, uint32_t tti)
{
typedef std::map<uint16_t, sched_ue>::iterator it_t;
current_tti = tti;
if(ue_db.size()==0)
return;
for(it_t it = ue_db.begin(); it != ue_db.end(); ++it) {
it->second.ul_next_alloc = NULL;
}
// give priority in a time-domain RR basis
uint32_t priority_idx = (current_tti+ue_db.size()/2) % ue_db.size(); // make DL and UL interleaved
std::map<uint16_t, sched_ue>::iterator iter = ue_db.begin();
uint32_t priority_idx = (current_tti+(uint32_t)ue_db.size()/2) % (uint32_t)ue_db.size(); // make DL and UL interleaved
// allocate reTxs first
it_t iter = ue_db.begin();
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue *user = (sched_ue *) &iter->second;
user->ul_next_alloc = allocate_user_retx_prbs(user);
}
// give priority in a time-domain RR basis
iter = ue_db.begin();
std::advance(iter,priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue *user = (sched_ue*) &iter->second;
user->ul_next_alloc = apply_user_allocation(user);
if (!user->ul_next_alloc) {
user->ul_next_alloc = allocate_user_newtx_prbs(user);
}
}
}
@ -266,8 +288,8 @@ bool ul_metric_rr::new_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
}
}
}
if (!alloc->L) {
return 0;
if (alloc->L==0) {
return false;
}
// Make sure L is allowed by SC-FDMA modulation
@ -277,21 +299,64 @@ bool ul_metric_rr::new_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
return alloc->L == L;
}
void ul_metric_rr::update_allocation(ul_harq_proc::ul_alloc_t alloc)
bool ul_metric_rr::update_allocation(ul_harq_proc::ul_alloc_t alloc)
{
if (alloc.L > available_rb) {
return;
}
if (alloc.RB_start + alloc.L > nof_rb) {
return;
}
if(allocation_is_valid(alloc)) {
for (uint32_t n=alloc.RB_start;n<alloc.RB_start+alloc.L;n++) {
used_rb[n] = true;
}
available_rb -= alloc.L;
return true;
}
return false;
}
ul_harq_proc* ul_metric_rr::apply_user_allocation(sched_ue *user) {
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user)
{
ul_harq_proc *h = user->get_ul_harq(current_tti);
// if there are procedures and we have space
if(!h->is_empty(0)) {
ul_harq_proc::ul_alloc_t alloc = h->get_alloc();
// If can schedule the same mask, do it
if (update_allocation(alloc)) {
return h;
}
// If not, try to find another mask in the current tti
if (new_allocation(alloc.L, &alloc)) {
if(not update_allocation(alloc)) {
printf("SCHED: Computed UL allocation is not valid!\n");
}
h->set_alloc(alloc);
return h;
}
}
return NULL;
}
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
{
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc *h = user->get_ul_harq(current_tti);
// find an empty PID
if (h->is_empty(0) and pending_data) {
uint32_t pending_rb = user->get_required_prb_ul(pending_data);
ul_harq_proc::ul_alloc_t alloc;
new_allocation(pending_rb, &alloc);
if (alloc.L) {
if(!update_allocation(alloc)) {
printf("SCHED: Computed UL allocation is not valid!\n");
}
h->set_alloc(alloc);
return h;
}
}
return NULL;
}
ul_harq_proc* ul_metric_rr::apply_user_allocation(sched_ue *user, bool retx_only) {
// Time-domain RR scheduling
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc *h = user->get_ul_harq(current_tti);
@ -314,6 +379,10 @@ ul_harq_proc* ul_metric_rr::apply_user_allocation(sched_ue *user) {
}
}
if (retx_only) {
return NULL;
}
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
if (h->is_empty(0)) {
// Allocate resources based on pending data

@ -111,7 +111,7 @@ void metrics_stdout::print_metrics()
{
n_reports = 0;
cout << endl;
cout << "------DL-------------------------------UL--------------------------------" << endl;
cout << "------DL------------------------------UL----------------------------------" << endl;
cout << "rnti cqi ri mcs brate bler snr phr mcs brate bler bsr" << endl;
}
if (metrics.rrc.n_ues > 0) {
@ -126,13 +126,9 @@ void metrics_stdout::print_metrics()
cout << std::hex << metrics.mac[i].rnti << " ";
cout << float_to_string(metrics.mac[i].dl_cqi, 2);
cout << float_to_string(metrics.mac[i].dl_ri, 3);
cout << float_to_string(metrics.mac[i].dl_ri, 1);
cout << float_to_string(metrics.phy[i].dl.mcs, 2);
if (metrics.mac[i].tx_brate > 0 && metrics_report_period) {
cout << float_to_eng_string((float) metrics.mac[i].tx_brate/metrics_report_period, 2);
} else {
cout << float_to_string(0, 2);
}
if (metrics.mac[i].tx_pkts > 0 && metrics.mac[i].tx_errors) {
cout << float_to_string((float) 100*metrics.mac[i].tx_errors/metrics.mac[i].tx_pkts, 1) << "%";
} else {
@ -144,7 +140,7 @@ void metrics_stdout::print_metrics()
if (metrics.mac[i].rx_brate > 0 && metrics_report_period) {
cout << float_to_eng_string((float) metrics.mac[i].rx_brate/metrics_report_period, 2);
} else {
cout << float_to_string(0, 2);
cout << " " << float_to_string(0, 2);
}
if (metrics.mac[i].rx_pkts > 0 && metrics.mac[i].rx_errors > 0) {
cout << float_to_string((float) 100*metrics.mac[i].rx_errors/metrics.mac[i].rx_pkts, 1) << "%";
@ -174,7 +170,14 @@ void metrics_stdout::print_disconnect()
std::string metrics_stdout::float_to_string(float f, int digits)
{
std::ostringstream os;
const int precision = (f == 0.0) ? digits-1 : digits - log10(fabs(f))-2*DBL_EPSILON;
int precision;
if(isnan(f) or abs(f) < 0.0001) {
f = 0.0;
precision = digits-1;
}
else {
precision = digits - (int)(log10(fabs(f))-2*DBL_EPSILON);
}
os << std::setw(6) << std::fixed << std::setprecision(precision) << f;
return os.str();
}

@ -89,6 +89,7 @@ bool gtpu::init(std::string gtp_bind_addr_, std::string mme_addr_, srsenb::pdcp_
if (bind(src_fd, (struct sockaddr *)&bindaddr, sizeof(struct sockaddr_in))) {
gtpu_log->error("Failed to bind on address %s, port %d\n", gtp_bind_addr.c_str(), GTPU_PORT);
gtpu_log->console("Failed to bind on address %s, port %d\n", gtp_bind_addr.c_str(), GTPU_PORT);
return false;
}

@ -637,8 +637,12 @@ void rrc::config_mac()
}
sched_cfg.si_window_ms = liblte_rrc_si_window_length_num[cfg.sibs[0].sib.sib1.si_window_length];
sched_cfg.prach_rar_window = liblte_rrc_ra_response_window_size_num[cfg.sibs[1].sib.sib2.rr_config_common_sib.rach_cnfg.ra_resp_win_size];
sched_cfg.prach_freq_offset = cfg.sibs[1].sib.sib2.rr_config_common_sib.prach_cnfg.prach_cnfg_info.prach_freq_offset;
sched_cfg.maxharq_msg3tx = cfg.sibs[1].sib.sib2.rr_config_common_sib.rach_cnfg.max_harq_msg3_tx;
sched_cfg.nrb_pucch = SRSLTE_MAX(cfg.sr_cfg.nof_prb, cfg.cqi_cfg.nof_prb);
rrc_log->info("Allocating %d PRBs for PUCCH\n", sched_cfg.nrb_pucch);
// Copy Cell configuration
memcpy(&sched_cfg.cell, &cfg.cell, sizeof(srslte_cell_t));
@ -804,8 +808,7 @@ void rrc::run_thread()
break;
}
} else {
printf("Discarting rnti=0x%xn", p.rnti);
rrc_log->warning("Discarting PDU for removed rnti=0x%x\n", p.rnti);
rrc_log->warning("Discarding PDU for removed rnti=0x%x\n", p.rnti);
}
pthread_mutex_unlock(&user_mutex);
}

Loading…
Cancel
Save