Merge branch 'next' of github.com:softwareradiosystems/srsLTE into next

master
Ismael Gomez 7 years ago
commit 195beac263

@ -2715,8 +2715,10 @@ LIBLTE_ERROR_ENUM liblte_rrc_pack_report_config_eutra_ie(LIBLTE_RRC_REPORT_CONFI
liblte_value_2_bits(rep_cnfg_eutra->trigger_type, ie_ptr, 1); liblte_value_2_bits(rep_cnfg_eutra->trigger_type, ie_ptr, 1);
if(LIBLTE_RRC_TRIGGER_TYPE_EUTRA_EVENT == rep_cnfg_eutra->trigger_type) if(LIBLTE_RRC_TRIGGER_TYPE_EUTRA_EVENT == rep_cnfg_eutra->trigger_type)
{ {
// Event ID choice extension indicator
liblte_value_2_bits(0, ie_ptr, 1); // Choice with extension - unlikely to be >63 choices
// Event ID // Event ID
// FIXME: Handle extension properly
liblte_value_2_bits(rep_cnfg_eutra->event.event_id, ie_ptr, 3); liblte_value_2_bits(rep_cnfg_eutra->event.event_id, ie_ptr, 3);
if(LIBLTE_RRC_EVENT_ID_EUTRA_A1 == rep_cnfg_eutra->event.event_id) if(LIBLTE_RRC_EVENT_ID_EUTRA_A1 == rep_cnfg_eutra->event.event_id)
{ {

@ -42,18 +42,17 @@ private:
bool new_allocation(uint32_t nof_rbg, uint32_t* rbgmask); bool new_allocation(uint32_t nof_rbg, uint32_t* rbgmask);
void update_allocation(uint32_t new_mask); void update_allocation(uint32_t new_mask);
bool allocation_is_valid(uint32_t mask); bool allocation_is_valid(uint32_t mask);
dl_harq_proc* apply_user_allocation(sched_ue *user);
uint32_t get_required_rbg(sched_ue *user, uint32_t tti); uint32_t get_required_rbg(sched_ue *user, uint32_t tti);
uint32_t count_rbg(uint32_t mask); uint32_t count_rbg(uint32_t mask);
uint32_t calc_rbg_mask(bool mask[25]); uint32_t calc_rbg_mask(bool mask[25]);
bool used_rb[MAX_RBG]; bool used_rb[MAX_RBG];
uint32_t nof_users_with_data;
uint32_t current_tti;
uint32_t current_tti;
uint32_t total_rb; uint32_t total_rb;
uint32_t used_rb_mask; uint32_t used_rb_mask;
uint32_t nof_ctrl_symbols; uint32_t nof_ctrl_symbols;
@ -72,8 +71,8 @@ private:
bool new_allocation(uint32_t L, ul_harq_proc::ul_alloc_t *alloc); bool new_allocation(uint32_t L, ul_harq_proc::ul_alloc_t *alloc);
bool allocation_is_valid(ul_harq_proc::ul_alloc_t alloc); bool allocation_is_valid(ul_harq_proc::ul_alloc_t alloc);
ul_harq_proc* apply_user_allocation(sched_ue *user);
uint32_t nof_users_with_data;
bool used_rb[MAX_PRB]; bool used_rb[MAX_PRB];
uint32_t current_tti; uint32_t current_tti;

@ -40,7 +40,8 @@ class sched_ue {
public: public:
// used by sched_metric // used by sched_metric
uint32_t ue_idx; dl_harq_proc* dl_next_alloc;
ul_harq_proc* ul_next_alloc;
bool has_pucch; bool has_pucch;

@ -78,7 +78,6 @@ uint32_t dl_metric_rr::get_required_rbg(sched_ue *user, uint32_t tti)
void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti) void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti)
{ {
total_rb = start_rb+nof_rb; total_rb = start_rb+nof_rb;
for (uint32_t i=0;i<total_rb;i++) { for (uint32_t i=0;i<total_rb;i++) {
if (i<start_rb) { if (i<start_rb) {
@ -89,16 +88,22 @@ void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb
} }
available_rb = nof_rb; available_rb = nof_rb;
used_rb_mask = calc_rbg_mask(used_rb); used_rb_mask = calc_rbg_mask(used_rb);
current_tti = tti; current_tti = tti;
nof_ctrl_symbols = nof_ctrl_symbols_; nof_ctrl_symbols = nof_ctrl_symbols_;
nof_users_with_data = 0; if(ue_db.size()==0)
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { return;
sched_ue *user = (sched_ue*) &iter->second;
if (user->get_pending_dl_new_data(current_tti) || user->get_pending_dl_harq(current_tti)) { // give priority in a time-domain RR basis
user->ue_idx = nof_users_with_data; uint32_t priority_idx = current_tti % ue_db.size();
nof_users_with_data++; std::map<uint16_t, sched_ue>::iterator iter = ue_db.begin();
std::advance(iter,priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
} }
sched_ue *user = (sched_ue*) &iter->second;
user->dl_next_alloc = apply_user_allocation(user);
} }
} }
@ -136,25 +141,11 @@ bool dl_metric_rr::allocation_is_valid(uint32_t mask)
return (mask & used_rb_mask); return (mask & used_rb_mask);
} }
dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user) dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) {
{ uint32_t pending_data = user->get_pending_dl_new_data(current_tti);
uint32_t pending_data = user->get_pending_dl_new_data(current_tti);
dl_harq_proc *h = user->get_pending_dl_harq(current_tti); dl_harq_proc *h = user->get_pending_dl_harq(current_tti);
// Time-domain RR scheduling // Schedule retx if we have space
#if ASYNC_DL_SCHED
if (pending_data || h) {
#else
if (pending_data || (h && !h->is_empty())) {
#endif
if (nof_users_with_data) {
if ((current_tti%nof_users_with_data) != user->ue_idx) {
return NULL;
}
}
}
// Schedule retx if we have space
#if ASYNC_DL_SCHED #if ASYNC_DL_SCHED
if (h) { if (h) {
#else #else
@ -164,38 +155,45 @@ dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user)
// If can schedule the same mask, do it // If can schedule the same mask, do it
if (!allocation_is_valid(retx_mask)) { if (!allocation_is_valid(retx_mask)) {
update_allocation(retx_mask); update_allocation(retx_mask);
return h; return h;
} }
// If not, try to find another mask in the current tti
// If not, try to find another mask in the current tti
uint32_t nof_rbg = count_rbg(retx_mask); uint32_t nof_rbg = count_rbg(retx_mask);
if (nof_rbg < available_rb) { if (nof_rbg < available_rb) {
if (new_allocation(nof_rbg, &retx_mask)) { if (new_allocation(nof_rbg, &retx_mask)) {
update_allocation(retx_mask); update_allocation(retx_mask);
h->set_rbgmask(retx_mask); h->set_rbgmask(retx_mask);
return h; return h;
} }
} }
} }
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID // If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
#if ASYNC_DL_SCHED #if ASYNC_DL_SCHED
h = user->get_empty_dl_harq(); h = user->get_empty_dl_harq();
if (h) { if (h) {
#else #else
if (h && h->is_empty()) { if (h && h->is_empty()) {
#endif #endif
// Allocate resources based on pending data // Allocate resources based on pending data
if (pending_data) { if (pending_data) {
uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols); uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols);
uint32_t newtx_mask = 0; uint32_t newtx_mask = 0;
new_allocation(pending_rb, &newtx_mask); new_allocation(pending_rb, &newtx_mask);
if (newtx_mask) { if (newtx_mask) {
update_allocation(newtx_mask); update_allocation(newtx_mask);
h->set_rbgmask(newtx_mask); h->set_rbgmask(newtx_mask);
return h; return h;
} }
} }
} }
return NULL;
return NULL;
}
dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user)
{
return user->dl_next_alloc;
} }
@ -219,15 +217,20 @@ void ul_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t nof_rb_,
available_rb = nof_rb_; available_rb = nof_rb_;
bzero(used_rb, nof_rb*sizeof(bool)); bzero(used_rb, nof_rb*sizeof(bool));
nof_users_with_data = 0; if(ue_db.size()==0)
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) { return;
sched_ue *user = (sched_ue*) &iter->second;
if (user->get_pending_ul_new_data(current_tti) || !user->get_ul_harq(current_tti)->is_empty(0)) { // give priority in a time-domain RR basis
user->ue_idx = nof_users_with_data; uint32_t priority_idx = (current_tti+ue_db.size()/2) % ue_db.size(); // make DL and UL interleaved
nof_users_with_data++; std::map<uint16_t, sched_ue>::iterator iter = ue_db.begin();
std::advance(iter,priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
} }
sched_ue *user = (sched_ue*) &iter->second;
user->ul_next_alloc = apply_user_allocation(user);
} }
} }
bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc) bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc)
@ -288,56 +291,49 @@ void ul_metric_rr::update_allocation(ul_harq_proc::ul_alloc_t alloc)
available_rb -= alloc.L; available_rb -= alloc.L;
} }
ul_harq_proc* ul_metric_rr::get_user_allocation(sched_ue *user) ul_harq_proc* ul_metric_rr::apply_user_allocation(sched_ue *user) {
{
// Time-domain RR scheduling // Time-domain RR scheduling
uint32_t pending_data = user->get_pending_ul_new_data(current_tti); uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc *h = user->get_ul_harq(current_tti); ul_harq_proc *h = user->get_ul_harq(current_tti);
if (pending_data || !h->is_empty(0)) {
if (nof_users_with_data) {
if ((current_tti%nof_users_with_data) != user->ue_idx) {
return NULL;
}
}
}
// Schedule retx if we have space // Schedule retx if we have space
if (!h->is_empty(0)) { if (!h->is_empty(0)) {
ul_harq_proc::ul_alloc_t alloc = h->get_alloc(); ul_harq_proc::ul_alloc_t alloc = h->get_alloc();
// If can schedule the same mask, do it // If can schedule the same mask, do it
if (allocation_is_valid(alloc)) { if (allocation_is_valid(alloc)) {
update_allocation(alloc); update_allocation(alloc);
return h; return h;
} }
// If not, try to find another mask in the current tti // If not, try to find another mask in the current tti
if (new_allocation(alloc.L, &alloc)) { if (new_allocation(alloc.L, &alloc)) {
update_allocation(alloc); update_allocation(alloc);
h->set_alloc(alloc); h->set_alloc(alloc);
return h; return h;
} }
} }
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
if (h->is_empty(0)) { if (h->is_empty(0)) {
// Allocate resources based on pending data // Allocate resources based on pending data
if (pending_data) { if (pending_data) {
uint32_t pending_rb = user->get_required_prb_ul(pending_data); uint32_t pending_rb = user->get_required_prb_ul(pending_data);
ul_harq_proc::ul_alloc_t alloc; ul_harq_proc::ul_alloc_t alloc;
new_allocation(pending_rb, &alloc); new_allocation(pending_rb, &alloc);
if (alloc.L) { if (alloc.L) {
update_allocation(alloc); update_allocation(alloc);
h->set_alloc(alloc); h->set_alloc(alloc);
return h; return h;
} }
} }
} }
return NULL; return NULL;
} }
ul_harq_proc* ul_metric_rr::get_user_allocation(sched_ue *user)
{
return user->ul_next_alloc;
}
} }

@ -49,7 +49,7 @@ namespace srsenb {
* *
*******************************************************/ *******************************************************/
sched_ue::sched_ue() : ue_idx(0), has_pucch(false), power_headroom(0), rnti(0), max_mcs_dl(0), max_mcs_ul(0), sched_ue::sched_ue() : dl_next_alloc(NULL), ul_next_alloc(NULL), has_pucch(false), power_headroom(0), rnti(0), max_mcs_dl(0), max_mcs_ul(0),
fixed_mcs_ul(0), fixed_mcs_dl(0), phy_config_dedicated_enabled(false) fixed_mcs_ul(0), fixed_mcs_dl(0), phy_config_dedicated_enabled(false)
{ {
log_h = NULL; log_h = NULL;

Loading…
Cancel
Save