|
|
@ -78,7 +78,6 @@ uint32_t dl_metric_rr::get_required_rbg(sched_ue *user, uint32_t tti)
|
|
|
|
|
|
|
|
|
|
|
|
void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti)
|
|
|
|
void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
total_rb = start_rb+nof_rb;
|
|
|
|
total_rb = start_rb+nof_rb;
|
|
|
|
for (uint32_t i=0;i<total_rb;i++) {
|
|
|
|
for (uint32_t i=0;i<total_rb;i++) {
|
|
|
|
if (i<start_rb) {
|
|
|
|
if (i<start_rb) {
|
|
|
@ -89,16 +88,22 @@ void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
available_rb = nof_rb;
|
|
|
|
available_rb = nof_rb;
|
|
|
|
used_rb_mask = calc_rbg_mask(used_rb);
|
|
|
|
used_rb_mask = calc_rbg_mask(used_rb);
|
|
|
|
current_tti = tti;
|
|
|
|
current_tti = tti;
|
|
|
|
nof_ctrl_symbols = nof_ctrl_symbols_;
|
|
|
|
nof_ctrl_symbols = nof_ctrl_symbols_;
|
|
|
|
|
|
|
|
|
|
|
|
nof_users_with_data = 0;
|
|
|
|
if(ue_db.size()==0)
|
|
|
|
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
|
|
|
|
return;
|
|
|
|
sched_ue *user = (sched_ue*) &iter->second;
|
|
|
|
|
|
|
|
if (user->get_pending_dl_new_data(current_tti) || user->get_pending_dl_harq(current_tti)) {
|
|
|
|
// give priority in a time-domain RR basis
|
|
|
|
user->ue_idx = nof_users_with_data;
|
|
|
|
uint32_t priority_idx = current_tti % ue_db.size();
|
|
|
|
nof_users_with_data++;
|
|
|
|
std::map<uint16_t, sched_ue>::iterator iter = ue_db.begin();
|
|
|
|
|
|
|
|
std::advance(iter,priority_idx);
|
|
|
|
|
|
|
|
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
|
|
|
|
|
|
|
|
if(iter==ue_db.end()) {
|
|
|
|
|
|
|
|
iter = ue_db.begin(); // wrap around
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sched_ue *user = (sched_ue*) &iter->second;
|
|
|
|
|
|
|
|
user->dl_next_alloc = apply_user_allocation(user);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -136,25 +141,11 @@ bool dl_metric_rr::allocation_is_valid(uint32_t mask)
|
|
|
|
return (mask & used_rb_mask);
|
|
|
|
return (mask & used_rb_mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user)
|
|
|
|
dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) {
|
|
|
|
{
|
|
|
|
uint32_t pending_data = user->get_pending_dl_new_data(current_tti);
|
|
|
|
uint32_t pending_data = user->get_pending_dl_new_data(current_tti);
|
|
|
|
|
|
|
|
dl_harq_proc *h = user->get_pending_dl_harq(current_tti);
|
|
|
|
dl_harq_proc *h = user->get_pending_dl_harq(current_tti);
|
|
|
|
|
|
|
|
|
|
|
|
// Time-domain RR scheduling
|
|
|
|
// Schedule retx if we have space
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
|
|
|
|
if (pending_data || h) {
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
if (pending_data || (h && !h->is_empty())) {
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (nof_users_with_data) {
|
|
|
|
|
|
|
|
if ((current_tti%nof_users_with_data) != user->ue_idx) {
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Schedule retx if we have space
|
|
|
|
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
if (h) {
|
|
|
|
if (h) {
|
|
|
|
#else
|
|
|
|
#else
|
|
|
@ -164,38 +155,45 @@ dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user)
|
|
|
|
// If can schedule the same mask, do it
|
|
|
|
// If can schedule the same mask, do it
|
|
|
|
if (!allocation_is_valid(retx_mask)) {
|
|
|
|
if (!allocation_is_valid(retx_mask)) {
|
|
|
|
update_allocation(retx_mask);
|
|
|
|
update_allocation(retx_mask);
|
|
|
|
return h;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If not, try to find another mask in the current tti
|
|
|
|
|
|
|
|
|
|
|
|
// If not, try to find another mask in the current tti
|
|
|
|
uint32_t nof_rbg = count_rbg(retx_mask);
|
|
|
|
uint32_t nof_rbg = count_rbg(retx_mask);
|
|
|
|
if (nof_rbg < available_rb) {
|
|
|
|
if (nof_rbg < available_rb) {
|
|
|
|
if (new_allocation(nof_rbg, &retx_mask)) {
|
|
|
|
if (new_allocation(nof_rbg, &retx_mask)) {
|
|
|
|
update_allocation(retx_mask);
|
|
|
|
update_allocation(retx_mask);
|
|
|
|
h->set_rbgmask(retx_mask);
|
|
|
|
h->set_rbgmask(retx_mask);
|
|
|
|
return h;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
|
|
|
|
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
#if ASYNC_DL_SCHED
|
|
|
|
h = user->get_empty_dl_harq();
|
|
|
|
h = user->get_empty_dl_harq();
|
|
|
|
if (h) {
|
|
|
|
if (h) {
|
|
|
|
#else
|
|
|
|
#else
|
|
|
|
if (h && h->is_empty()) {
|
|
|
|
if (h && h->is_empty()) {
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
// Allocate resources based on pending data
|
|
|
|
// Allocate resources based on pending data
|
|
|
|
if (pending_data) {
|
|
|
|
if (pending_data) {
|
|
|
|
uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols);
|
|
|
|
uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols);
|
|
|
|
uint32_t newtx_mask = 0;
|
|
|
|
uint32_t newtx_mask = 0;
|
|
|
|
new_allocation(pending_rb, &newtx_mask);
|
|
|
|
new_allocation(pending_rb, &newtx_mask);
|
|
|
|
if (newtx_mask) {
|
|
|
|
if (newtx_mask) {
|
|
|
|
update_allocation(newtx_mask);
|
|
|
|
update_allocation(newtx_mask);
|
|
|
|
h->set_rbgmask(newtx_mask);
|
|
|
|
h->set_rbgmask(newtx_mask);
|
|
|
|
return h;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dl_harq_proc* dl_metric_rr::get_user_allocation(sched_ue *user)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
return user->dl_next_alloc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -219,15 +217,20 @@ void ul_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t nof_rb_,
|
|
|
|
available_rb = nof_rb_;
|
|
|
|
available_rb = nof_rb_;
|
|
|
|
bzero(used_rb, nof_rb*sizeof(bool));
|
|
|
|
bzero(used_rb, nof_rb*sizeof(bool));
|
|
|
|
|
|
|
|
|
|
|
|
nof_users_with_data = 0;
|
|
|
|
if(ue_db.size()==0)
|
|
|
|
for(std::map<uint16_t, sched_ue>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
|
|
|
|
return;
|
|
|
|
sched_ue *user = (sched_ue*) &iter->second;
|
|
|
|
|
|
|
|
if (user->get_pending_ul_new_data(current_tti) || !user->get_ul_harq(current_tti)->is_empty(0)) {
|
|
|
|
// give priority in a time-domain RR basis
|
|
|
|
user->ue_idx = nof_users_with_data;
|
|
|
|
uint32_t priority_idx = (current_tti+ue_db.size()/2) % ue_db.size(); // make DL and UL interleaved
|
|
|
|
nof_users_with_data++;
|
|
|
|
std::map<uint16_t, sched_ue>::iterator iter = ue_db.begin();
|
|
|
|
|
|
|
|
std::advance(iter,priority_idx);
|
|
|
|
|
|
|
|
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
|
|
|
|
|
|
|
|
if(iter==ue_db.end()) {
|
|
|
|
|
|
|
|
iter = ue_db.begin(); // wrap around
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sched_ue *user = (sched_ue*) &iter->second;
|
|
|
|
|
|
|
|
user->ul_next_alloc = apply_user_allocation(user);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc)
|
|
|
|
bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc)
|
|
|
@ -288,56 +291,49 @@ void ul_metric_rr::update_allocation(ul_harq_proc::ul_alloc_t alloc)
|
|
|
|
available_rb -= alloc.L;
|
|
|
|
available_rb -= alloc.L;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ul_harq_proc* ul_metric_rr::get_user_allocation(sched_ue *user)
|
|
|
|
ul_harq_proc* ul_metric_rr::apply_user_allocation(sched_ue *user) {
|
|
|
|
{
|
|
|
|
|
|
|
|
// Time-domain RR scheduling
|
|
|
|
// Time-domain RR scheduling
|
|
|
|
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
|
|
|
|
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
|
|
|
|
ul_harq_proc *h = user->get_ul_harq(current_tti);
|
|
|
|
ul_harq_proc *h = user->get_ul_harq(current_tti);
|
|
|
|
|
|
|
|
|
|
|
|
if (pending_data || !h->is_empty(0)) {
|
|
|
|
|
|
|
|
if (nof_users_with_data) {
|
|
|
|
|
|
|
|
if ((current_tti%nof_users_with_data) != user->ue_idx) {
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Schedule retx if we have space
|
|
|
|
// Schedule retx if we have space
|
|
|
|
|
|
|
|
|
|
|
|
if (!h->is_empty(0)) {
|
|
|
|
if (!h->is_empty(0)) {
|
|
|
|
|
|
|
|
|
|
|
|
ul_harq_proc::ul_alloc_t alloc = h->get_alloc();
|
|
|
|
ul_harq_proc::ul_alloc_t alloc = h->get_alloc();
|
|
|
|
|
|
|
|
|
|
|
|
// If can schedule the same mask, do it
|
|
|
|
// If can schedule the same mask, do it
|
|
|
|
if (allocation_is_valid(alloc)) {
|
|
|
|
if (allocation_is_valid(alloc)) {
|
|
|
|
update_allocation(alloc);
|
|
|
|
update_allocation(alloc);
|
|
|
|
return h;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// If not, try to find another mask in the current tti
|
|
|
|
// If not, try to find another mask in the current tti
|
|
|
|
if (new_allocation(alloc.L, &alloc)) {
|
|
|
|
if (new_allocation(alloc.L, &alloc)) {
|
|
|
|
update_allocation(alloc);
|
|
|
|
update_allocation(alloc);
|
|
|
|
h->set_alloc(alloc);
|
|
|
|
h->set_alloc(alloc);
|
|
|
|
return h;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
|
|
|
|
|
|
|
|
|
|
|
|
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
|
|
|
|
if (h->is_empty(0)) {
|
|
|
|
if (h->is_empty(0)) {
|
|
|
|
// Allocate resources based on pending data
|
|
|
|
// Allocate resources based on pending data
|
|
|
|
if (pending_data) {
|
|
|
|
if (pending_data) {
|
|
|
|
uint32_t pending_rb = user->get_required_prb_ul(pending_data);
|
|
|
|
uint32_t pending_rb = user->get_required_prb_ul(pending_data);
|
|
|
|
ul_harq_proc::ul_alloc_t alloc;
|
|
|
|
ul_harq_proc::ul_alloc_t alloc;
|
|
|
|
new_allocation(pending_rb, &alloc);
|
|
|
|
new_allocation(pending_rb, &alloc);
|
|
|
|
if (alloc.L) {
|
|
|
|
if (alloc.L) {
|
|
|
|
update_allocation(alloc);
|
|
|
|
update_allocation(alloc);
|
|
|
|
h->set_alloc(alloc);
|
|
|
|
h->set_alloc(alloc);
|
|
|
|
return h;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ul_harq_proc* ul_metric_rr::get_user_allocation(sched_ue *user)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
return user->ul_next_alloc;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|