Refactoring of scheduling algorithms

- Use single interface for both DL and UL
- Wrote helper functions for tasks that are common to different algorithms
- created subfolder for all scheduling algorithms
master
Francisco 4 years ago committed by Andre Puschmann
parent 64ee0ac2ef
commit b71e8075f4

@ -14,7 +14,7 @@
#define SRSENB_MAC_H #define SRSENB_MAC_H
#include "sched.h" #include "sched.h"
#include "sched_metric.h" #include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h"
#include "srslte/common/log.h" #include "srslte/common/log.h"
#include "srslte/common/mac_pcap.h" #include "srslte/common/mac_pcap.h"
#include "srslte/common/task_scheduler.h" #include "srslte/common/task_scheduler.h"

@ -14,6 +14,7 @@
#define SRSLTE_SCHED_CARRIER_H #define SRSLTE_SCHED_CARRIER_H
#include "sched.h" #include "sched.h"
#include "schedulers/sched_base.h"
namespace srsenb { namespace srsenb {
@ -52,8 +53,6 @@ private:
srslte::log_ref log_h; srslte::log_ref log_h;
rrc_interface_mac* rrc = nullptr; rrc_interface_mac* rrc = nullptr;
std::map<uint16_t, sched_ue>* ue_db = nullptr; std::map<uint16_t, sched_ue>* ue_db = nullptr;
std::unique_ptr<metric_dl> dl_metric;
std::unique_ptr<metric_ul> ul_metric;
const uint32_t enb_cc_idx; const uint32_t enb_cc_idx;
// Subframe scheduling logic // Subframe scheduling logic
@ -66,6 +65,7 @@ private:
std::unique_ptr<bc_sched> bc_sched_ptr; std::unique_ptr<bc_sched> bc_sched_ptr;
std::unique_ptr<ra_sched> ra_sched_ptr; std::unique_ptr<ra_sched> ra_sched_ptr;
std::unique_ptr<sched_base> sched_algo;
}; };
//! Broadcast (SIB + paging) scheduler //! Broadcast (SIB + paging) scheduler

@ -305,6 +305,7 @@ public:
const tti_params_t& get_tti_params() const { return tti_params; } const tti_params_t& get_tti_params() const { return tti_params; }
bool is_dl_alloc(uint16_t rnti) const final; bool is_dl_alloc(uint16_t rnti) const final;
bool is_ul_alloc(uint16_t rnti) const final; bool is_ul_alloc(uint16_t rnti) const final;
uint32_t get_enb_cc_idx() const { return cc_cfg->enb_cc_idx; }
private: private:
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);

@ -1,56 +0,0 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSENB_SCHEDULER_METRIC_H
#define SRSENB_SCHEDULER_METRIC_H
#include "sched.h"
namespace srsenb {
class dl_metric_rr : public sched::metric_dl
{
const static int MAX_RBG = 25;
public:
void set_params(const sched_cell_params_t& cell_params_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched) final;
private:
bool find_allocation(uint32_t min_nof_rbg, uint32_t max_nof_rbg, rbgmask_t* rbgmask);
dl_harq_proc* allocate_user(sched_ue* user);
const sched_cell_params_t* cc_cfg = nullptr;
srslte::log_ref log_h;
dl_sf_sched_itf* tti_alloc = nullptr;
};
class ul_metric_rr : public sched::metric_ul
{
public:
void set_params(const sched_cell_params_t& cell_params_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) final;
private:
bool find_allocation(uint32_t L, prb_interval* alloc);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user);
const sched_cell_params_t* cc_cfg = nullptr;
srslte::log_ref log_h;
ul_sf_sched_itf* tti_alloc = nullptr;
uint32_t current_tti = 0;
};
} // namespace srsenb
#endif // SRSENB_SCHEDULER_METRIC_H

@ -1,99 +0,0 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSLTE_SCHED_PF_H
#define SRSLTE_SCHED_PF_H
#include "sched.h"
#include <queue>
namespace srsenb {
class sched_dl_pf : public sched::metric_dl
{
using ue_cit_t = std::map<uint16_t, sched_ue>::const_iterator;
public:
void set_params(const sched_cell_params_t& cell_params_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched) final;
private:
const sched_cell_params_t* cc_cfg = nullptr;
srslte::log_ref log_h;
struct ue_ctxt {
ue_ctxt(uint16_t rnti_) : rnti(rnti_) {}
float avg_rate() const { return nof_samples == 0 ? 0 : rate; }
uint32_t count() const { return nof_samples; }
void new_tti(const sched_cell_params_t& cell, sched_ue& ue, dl_sf_sched_itf* tti_sched);
void save_history(bool alloc, float alpha);
const uint16_t rnti;
uint32_t ue_cc_idx = 0;
bool is_retx = false;
float prio = 0;
dl_harq_proc* h = nullptr;
private:
float rate = 0;
uint32_t nof_samples = 0;
};
std::map<uint16_t, ue_ctxt> ue_history_db;
struct ue_prio_compare {
bool operator()(const ue_ctxt* lhs, const ue_ctxt* rhs) const;
};
std::priority_queue<ue_ctxt*, std::vector<ue_ctxt*>, ue_prio_compare> ue_queue;
bool try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, dl_sf_sched_itf* tti_sched);
};
class sched_ul_pf : public sched::metric_ul
{
using ue_cit_t = std::map<uint16_t, sched_ue>::const_iterator;
public:
void set_params(const sched_cell_params_t& cell_params_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) final;
private:
const sched_cell_params_t* cc_cfg = nullptr;
srslte::log_ref log_h;
struct ue_ctxt {
ue_ctxt(uint16_t rnti_) : rnti(rnti_) {}
float avg_rate() const { return nof_samples == 0 ? 0 : rate; }
uint32_t count() const { return nof_samples; }
void new_tti(const sched_cell_params_t& cell, sched_ue& ue, ul_sf_sched_itf* tti_sched);
void save_history(bool alloc, float alpha);
const uint16_t rnti;
uint32_t ue_cc_idx = 0;
float prio = 0;
ul_harq_proc* h = nullptr;
private:
float rate = 0;
uint32_t nof_samples = 0;
};
std::map<uint16_t, ue_ctxt> ue_history_db;
struct ue_prio_compare {
bool operator()(const ue_ctxt* lhs, const ue_ctxt* rhs) const;
};
std::priority_queue<ue_ctxt*, std::vector<ue_ctxt*>, ue_prio_compare> ue_queue;
bool try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, ul_sf_sched_itf* tti_sched);
};
} // namespace srsenb
#endif // SRSLTE_SCHED_PF_H

@ -0,0 +1,64 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSLTE_SCHED_BASE_H
#define SRSLTE_SCHED_BASE_H
#include "srsenb/hdr/stack/mac/sched_grid.h"
namespace srsenb {
/**
* Base class for scheduler algorithms implementations
*/
class sched_base
{
public:
virtual ~sched_base() = default;
virtual void sched_dl_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched) = 0;
virtual void sched_ul_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched) = 0;
protected:
srslte::log_ref log_h = srslte::logmap::get("MAC");
};
/**************** Helper methods ****************/
/**
* Finds a bitmask of available RBG resources
* @param L Size of the requested DL RBGs
* @param current_mask input RBG bitmask where to search for available RBGs
* @return bitmask of found RBGs
*/
rbgmask_t find_available_dl_rbgs(uint32_t L, const rbgmask_t& current_mask);
/**
* Finds a range of L contiguous PRBs that are empty
* @param L Size of the requested UL PRBs
* @param current_mask input prb mask where to search for available PRBs
* @return found interval of PRBs
*/
prb_interval find_contiguous_ul_prbs(uint32_t L, const prbmask_t& current_mask);
const dl_harq_proc* get_dl_retx_harq(sched_ue& user, sf_sched* tti_sched);
const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched);
const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched);
const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched);
/// Helper methods to allocate resources in subframe
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h);
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h);
} // namespace srsenb
#endif // SRSLTE_SCHED_BASE_H

@ -0,0 +1,78 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSLTE_SCHED_TIME_PF_H
#define SRSLTE_SCHED_TIME_PF_H
#include "sched_base.h"
#include <queue>
namespace srsenb {
class sched_time_pf final : public sched_base
{
using ue_cit_t = std::map<uint16_t, sched_ue>::const_iterator;
public:
sched_time_pf(const sched_cell_params_t& cell_params_);
void sched_dl_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched) override;
void sched_ul_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched) override;
private:
void new_tti(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched);
const sched_cell_params_t* cc_cfg = nullptr;
srslte::tti_point current_tti_rx;
struct ue_ctxt {
ue_ctxt(uint16_t rnti_) : rnti(rnti_) {}
float dl_avg_rate() const { return dl_nof_samples == 0 ? 0 : dl_avg_rate_; }
float ul_avg_rate() const { return ul_nof_samples == 0 ? 0 : ul_avg_rate_; }
uint32_t dl_count() const { return dl_nof_samples; }
uint32_t ul_count() const { return ul_nof_samples; }
void new_tti(const sched_cell_params_t& cell, sched_ue& ue, sf_sched* tti_sched);
void save_dl_alloc(uint32_t alloc_bytes, float alpha);
void save_ul_alloc(uint32_t alloc_bytes, float alpha);
const uint16_t rnti;
int ue_cc_idx = 0;
float dl_prio = 0;
float ul_prio = 0;
const dl_harq_proc* dl_retx_h = nullptr;
const dl_harq_proc* dl_newtx_h = nullptr;
const ul_harq_proc* ul_h = nullptr;
private:
float dl_avg_rate_ = 0;
float ul_avg_rate_ = 0;
uint32_t dl_nof_samples = 0;
uint32_t ul_nof_samples = 0;
};
std::map<uint16_t, ue_ctxt> ue_history_db;
struct ue_dl_prio_compare {
bool operator()(const ue_ctxt* lhs, const ue_ctxt* rhs) const;
};
struct ue_ul_prio_compare {
bool operator()(const ue_ctxt* lhs, const ue_ctxt* rhs) const;
};
std::priority_queue<ue_ctxt*, std::vector<ue_ctxt*>, ue_dl_prio_compare> dl_queue;
std::priority_queue<ue_ctxt*, std::vector<ue_ctxt*>, ue_ul_prio_compare> ul_queue;
uint32_t try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched);
uint32_t try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched);
};
} // namespace srsenb
#endif // SRSLTE_SCHED_TIME_PF_H

@ -0,0 +1,40 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSENB_SCHEDULER_METRIC_H
#define SRSENB_SCHEDULER_METRIC_H
#include "sched_base.h"
namespace srsenb {
class sched_time_rr final : public sched_base
{
const static int MAX_RBG = 25;
public:
sched_time_rr(const sched_cell_params_t& cell_params_);
void sched_dl_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched) override;
void sched_ul_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched) override;
private:
void sched_dl_retxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx);
void sched_dl_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx);
void sched_ul_retxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx);
void sched_ul_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx);
const sched_cell_params_t* cc_cfg = nullptr;
};
} // namespace srsenb
#endif // SRSENB_SCHEDULER_METRIC_H

@ -6,9 +6,11 @@
# the distribution. # the distribution.
# #
set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_harq.cc sched_metric.cc sched_ue.cc add_subdirectory(schedulers)
sched_lch.cc sched_interface_helpers.cc sched_pf.cc)
add_library(srsenb_mac STATIC ${SOURCES}) set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_harq.cc sched_ue.cc
sched_lch.cc sched_interface_helpers.cc)
add_library(srsenb_mac STATIC ${SOURCES} $<TARGET_OBJECTS:mac_schedulers>)
if(ENABLE_5GNR) if(ENABLE_5GNR)
set(SOURCES mac_nr.cc) set(SOURCES mac_nr.cc)

@ -12,9 +12,8 @@
#include "srsenb/hdr/stack/mac/sched_carrier.h" #include "srsenb/hdr/stack/mac/sched_carrier.h"
#include "srsenb/hdr/stack/mac/sched_interface_helpers.h" #include "srsenb/hdr/stack/mac/sched_interface_helpers.h"
#include "srsenb/hdr/stack/mac/sched_metric.h" #include "srsenb/hdr/stack/mac/schedulers/sched_time_pf.h"
#include "srsenb/hdr/stack/mac/sched_pf.h" #include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h"
#include "srslte/common/log_helper.h"
#include "srslte/common/logmap.h" #include "srslte/common/logmap.h"
namespace srsenb { namespace srsenb {
@ -289,14 +288,8 @@ void sched::carrier_sched::carrier_cfg(const sched_cell_params_t& cell_params_)
ra_sched_ptr.reset(new ra_sched{*cc_cfg, *ue_db}); ra_sched_ptr.reset(new ra_sched{*cc_cfg, *ue_db});
// Setup data scheduling algorithms // Setup data scheduling algorithms
// dl_metric.reset(new srsenb::dl_metric_rr{}); sched_algo.reset(new sched_time_pf{*cc_cfg});
// dl_metric->set_params(*cc_cfg); // sched_algo.reset(new sched_time_rr{*cc_cfg});
// ul_metric.reset(new srsenb::ul_metric_rr{});
// ul_metric->set_params(*cc_cfg);
dl_metric.reset(new srsenb::sched_dl_pf{});
dl_metric->set_params(*cc_cfg);
ul_metric.reset(new srsenb::sched_ul_pf{});
ul_metric->set_params(*cc_cfg);
// Initiate the tti_scheduler for each TTI // Initiate the tti_scheduler for each TTI
for (sf_sched& tti_sched : sf_scheds) { for (sf_sched& tti_sched : sf_scheds) {
@ -375,13 +368,13 @@ void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result)
} }
// call DL scheduler metric to fill RB grid // call DL scheduler metric to fill RB grid
dl_metric->sched_users(*ue_db, tti_result); sched_algo->sched_dl_users(*ue_db, tti_result);
} }
int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
{ {
/* Call scheduler for UL data */ /* Call scheduler for UL data */
ul_metric->sched_users(*ue_db, tti_sched); sched_algo->sched_ul_users(*ue_db, tti_sched);
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }

@ -1,321 +0,0 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/sched_metric.h"
#include "srsenb/hdr/stack/mac/sched_harq.h"
#include "srslte/common/log_helper.h"
#include "srslte/common/logmap.h"
#include <string.h>
namespace srsenb {
/*****************************************************************
*
* Downlink Metric
*
*****************************************************************/
void dl_metric_rr::set_params(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
log_h = srslte::logmap::get("MAC ");
}
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched)
{
tti_alloc = tti_sched;
if (ue_db.empty()) {
return;
}
// give priority in a time-domain RR basis.
uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size();
auto iter = ue_db.begin();
std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user(user);
}
}
bool dl_metric_rr::find_allocation(uint32_t min_nof_rbg, uint32_t max_nof_rbg, rbgmask_t* rbgmask)
{
if (tti_alloc->get_dl_mask().all()) {
return false;
}
// 1's for free rbgs
rbgmask_t localmask = ~(tti_alloc->get_dl_mask());
uint32_t i = 0, nof_alloc = 0;
for (; i < localmask.size() and nof_alloc < max_nof_rbg; ++i) {
if (localmask.test(i)) {
nof_alloc++;
}
}
if (nof_alloc < min_nof_rbg) {
return false;
}
localmask.fill(i, localmask.size(), false);
*rbgmask = localmask;
return true;
}
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
{
// Do not allocate a user multiple times in the same tti
if (tti_alloc->is_dl_alloc(user->get_rnti())) {
return nullptr;
}
// Do not allocate a user to an inactive carrier
auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx);
if (not p.first) {
return nullptr;
}
uint32_t cell_idx = p.second;
if (not user->pdsch_enabled(srslte::tti_point(tti_alloc->get_tti_tx_dl() - TX_ENB_DELAY), cc_cfg->enb_cc_idx)) {
return nullptr;
}
alloc_outcome_t code;
uint32_t tti_dl = tti_alloc->get_tti_tx_dl();
dl_harq_proc* h = user->get_pending_dl_harq(tti_dl, cell_idx);
// Schedule retx if we have space
if (h != nullptr) {
// Try to reuse the same mask
rbgmask_t retx_mask = h->get_rbgmask();
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
}
if (code == alloc_outcome_t::DCI_COLLISION) {
// No DCIs available for this user. Move to next
log_h->info("SCHED: Couldn't find space in PDCCH for DL retx for rnti=0x%x\n", user->get_rnti());
return nullptr;
}
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
if (find_allocation(nof_rbg, nof_rbg, &retx_mask)) {
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
}
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL retx for rnti=0x%x\n", user->get_rnti());
return nullptr;
}
}
}
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
h = user->get_empty_dl_harq(tti_dl, cell_idx);
if (h != nullptr) {
// Allocate resources based on pending data
rbg_interval req_rbgs = user->get_required_dl_rbgs(cell_idx);
if (req_rbgs.stop() > 0) {
rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size());
if (find_allocation(req_rbgs.start(), req_rbgs.stop(), &newtx_mask)) {
// some empty spaces were found
code = tti_alloc->alloc_dl_user(user, newtx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
} else if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x\n", user->get_rnti());
}
}
}
}
return nullptr;
}
/*****************************************************************
*
* Uplink Metric
*
*****************************************************************/
void ul_metric_rr::set_params(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
log_h = srslte::logmap::get("MAC ");
}
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched)
{
tti_alloc = tti_sched;
current_tti = tti_alloc->get_tti_tx_ul();
if (ue_db.empty()) {
return;
}
// give priority in a time-domain RR basis
uint32_t priority_idx =
(current_tti + (uint32_t)ue_db.size() / 2) % (uint32_t)ue_db.size(); // make DL and UL interleaved
// allocate reTxs first
auto iter = ue_db.begin();
std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user_retx_prbs(user);
}
// give priority in a time-domain RR basis
iter = ue_db.begin();
std::advance(iter, priority_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue* user = &iter->second;
allocate_user_newtx_prbs(user);
}
}
/**
* Finds a range of L contiguous PRBs that are empty
* @param L Size of the requested UL allocation in PRBs
* @param alloc Found allocation. It is guaranteed that 0 <= alloc->L <= L
* @return true if the requested allocation of size L was strictly met
*/
bool ul_metric_rr::find_allocation(uint32_t L, prb_interval* alloc)
{
const prbmask_t* used_rb = &tti_alloc->get_ul_mask();
*alloc = {};
for (uint32_t n = 0; n < used_rb->size() && alloc->length() < L; n++) {
if (not used_rb->test(n) && alloc->length() == 0) {
alloc->displace_to(n);
}
if (not used_rb->test(n)) {
alloc->resize_by(1);
} else if (alloc->length() > 0) {
// avoid edges
if (n < 3) {
*alloc = {};
} else {
break;
}
}
}
if (alloc->length() == 0) {
return false;
}
// Make sure L is allowed by SC-FDMA modulation
while (!srslte_dft_precoding_valid_prb(alloc->length())) {
alloc->resize_by(-1);
}
return alloc->length() == L;
}
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
{
if (tti_alloc->is_ul_alloc(user->get_rnti())) {
return nullptr;
}
auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx);
if (not p.first) {
// this cc is not activated for this user
return nullptr;
}
uint32_t cell_idx = p.second;
alloc_outcome_t ret;
ul_harq_proc* h = user->get_ul_harq(current_tti, cell_idx);
srslte::tti_point tti_rx{current_tti - (TX_ENB_DELAY + FDD_HARQ_DELAY_DL_MS)};
// if there are procedures and we have space
if (h->has_pending_retx()) {
// Avoid measGaps
if (not user->pusch_enabled(tti_rx, cc_cfg->enb_cc_idx, false)) {
return nullptr;
}
prb_interval alloc = h->get_alloc();
// If can schedule the same mask, do it
ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x\n", user->get_rnti());
return nullptr;
}
// Avoid measGaps accounting for PDCCH
if (not user->pusch_enabled(tti_rx, cc_cfg->enb_cc_idx, true)) {
return nullptr;
}
if (find_allocation(alloc.length(), &alloc)) {
ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x\n", user->get_rnti());
}
}
}
return nullptr;
}
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
{
if (tti_alloc->is_ul_alloc(user->get_rnti())) {
return nullptr;
}
auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx);
if (not p.first) {
// this cc is not activated for this user
return nullptr;
}
uint32_t cell_idx = p.second;
// Avoid measGaps
if (not user->pusch_enabled(
srslte::tti_point{current_tti - (TX_ENB_DELAY + FDD_HARQ_DELAY_DL_MS)}, cc_cfg->enb_cc_idx, true)) {
return nullptr;
}
uint32_t pending_data = user->get_pending_ul_new_data(current_tti, cell_idx);
ul_harq_proc* h = user->get_ul_harq(current_tti, cell_idx);
// find an empty PID
if (h->is_empty(0) and pending_data > 0) {
uint32_t pending_rb = user->get_required_prb_ul(cell_idx, pending_data);
prb_interval alloc{};
find_allocation(pending_rb, &alloc);
if (alloc.length() > 0) { // at least one PRB was scheduled
alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti());
}
}
}
return nullptr;
}
} // namespace srsenb

@ -1,355 +0,0 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/sched_pf.h"
#include "srsenb/hdr/stack/mac/sched_harq.h"
namespace srsenb {
void sched_dl_pf::set_params(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
log_h = srslte::logmap::get("MAC");
}
void sched_dl_pf::sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_sf_sched_itf* tti_sched)
{
if (ue_db.empty()) {
return;
}
// remove deleted users from history
for (auto it = ue_history_db.begin(); it != ue_history_db.end();) {
if (not ue_db.count(it->first)) {
it = ue_history_db.erase(it);
} else {
++it;
}
}
// add new users to history db, and update priority queue
for (auto& u : ue_db) {
auto it = ue_history_db.find(u.first);
if (it == ue_history_db.end()) {
it = ue_history_db.insert(std::make_pair(u.first, ue_ctxt{u.first})).first;
}
it->second.new_tti(*cc_cfg, u.second, tti_sched);
ue_queue.push(&it->second);
}
while (not ue_queue.empty()) {
ue_ctxt& ue = *ue_queue.top();
bool alloc_success = try_dl_alloc(ue, ue_db[ue.rnti], tti_sched);
ue.save_history(alloc_success, 0.01);
ue_queue.pop();
}
}
void sched_dl_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue& ue, dl_sf_sched_itf* tti_sched)
{
h = nullptr;
prio = 0;
is_retx = false;
auto p = ue.get_active_cell_index(cell.enb_cc_idx);
if (not p.first) {
return;
}
if (not ue.pdsch_enabled(srslte::tti_point(tti_sched->get_tti_tx_dl() - TX_ENB_DELAY), cell.enb_cc_idx)) {
return;
}
ue_cc_idx = p.second;
// search for DL HARQ
h = ue.get_pending_dl_harq(tti_sched->get_tti_tx_dl(), ue_cc_idx);
is_retx = h != nullptr;
if (h == nullptr) {
h = ue.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), ue_cc_idx);
if (h == nullptr) {
return;
}
}
// calculate PF priority
float r = ue.get_expected_dl_bitrate(ue_cc_idx) / 8;
float R = avg_rate();
prio = (R != 0) ? r / R : (r == 0 ? 0 : std::numeric_limits<float>::max());
}
void sched_dl_pf::ue_ctxt::save_history(bool alloc, float alpha)
{
float sample = alloc ? (h->get_tbs(0) + h->get_tbs(1)) : 0;
if (nof_samples < 1 / alpha) {
// fast start
rate = rate + (sample - rate) / (nof_samples + 1);
} else {
rate = (1 - alpha) * rate + (alpha)*sample;
}
nof_samples++;
}
bool find_allocation(uint32_t min_nof_rbg, uint32_t max_nof_rbg, rbgmask_t* rbgmask, dl_sf_sched_itf* tti_alloc)
{
if (tti_alloc->get_dl_mask().all()) {
return false;
}
// 1's for free rbgs
rbgmask_t localmask = ~(tti_alloc->get_dl_mask());
uint32_t i = 0, nof_alloc = 0;
for (; i < localmask.size() and nof_alloc < max_nof_rbg; ++i) {
if (localmask.test(i)) {
nof_alloc++;
}
}
if (nof_alloc < min_nof_rbg) {
return false;
}
localmask.fill(i, localmask.size(), false);
*rbgmask = localmask;
return true;
}
bool sched_dl_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, dl_sf_sched_itf* tti_sched)
{
if (tti_sched->is_dl_alloc(ue_ctxt.rnti) or ue_ctxt.prio == 0) {
return false;
}
alloc_outcome_t code;
if (ue_ctxt.is_retx) {
// Try to reuse the same mask
rbgmask_t retx_mask = ue_ctxt.h->get_rbgmask();
code = tti_sched->alloc_dl_user(&ue, retx_mask, ue_ctxt.h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return true;
}
if (code == alloc_outcome_t::DCI_COLLISION) {
// No DCIs available for this user. Move to next
log_h->info("SCHED: Couldn't find space in PDCCH for DL retx for rnti=0x%x\n", ue_ctxt.rnti);
return false;
}
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
if (find_allocation(nof_rbg, nof_rbg, &retx_mask, tti_sched)) {
code = tti_sched->alloc_dl_user(&ue, retx_mask, ue_ctxt.h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return true;
}
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL retx for rnti=0x%x\n", ue.get_rnti());
return false;
}
}
} else {
// Allocate resources based on pending data
rbg_interval req_rbgs = ue.get_required_dl_rbgs(ue_ctxt.ue_cc_idx);
if (req_rbgs.stop() > 0) {
rbgmask_t newtx_mask(tti_sched->get_dl_mask().size());
if (find_allocation(req_rbgs.start(), req_rbgs.stop(), &newtx_mask, tti_sched)) {
// some empty spaces were found
code = tti_sched->alloc_dl_user(&ue, newtx_mask, ue_ctxt.h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return true;
} else if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x\n", ue_ctxt.rnti);
}
}
}
}
return false;
}
bool sched_dl_pf::ue_prio_compare::operator()(const sched_dl_pf::ue_ctxt* lhs, const sched_dl_pf::ue_ctxt* rhs) const
{
return (not lhs->is_retx and rhs->is_retx) or (lhs->is_retx == rhs->is_retx and lhs->prio < rhs->prio);
}
/*****************************************************************
*
* Uplink Metric
*
*****************************************************************/
void sched_ul_pf::set_params(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
log_h = srslte::logmap::get("MAC");
}
void sched_ul_pf::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched)
{
if (ue_db.empty()) {
return;
}
// remove deleted users from history
for (auto it = ue_history_db.begin(); it != ue_history_db.end();) {
if (not ue_db.count(it->first)) {
it = ue_history_db.erase(it);
} else {
++it;
}
}
// add new users to history db, and update priority queue
for (auto& u : ue_db) {
auto it = ue_history_db.find(u.first);
if (it == ue_history_db.end()) {
it = ue_history_db.insert(std::make_pair(u.first, ue_ctxt{u.first})).first;
}
it->second.new_tti(*cc_cfg, u.second, tti_sched);
ue_queue.push(&it->second);
}
while (not ue_queue.empty()) {
ue_ctxt& ue = *ue_queue.top();
bool alloc_success = try_ul_alloc(ue, ue_db[ue.rnti], tti_sched);
ue.save_history(alloc_success, 0.01);
ue_queue.pop();
}
}
/**
* Finds a range of L contiguous PRBs that are empty
* @param L Size of the requested UL allocation in PRBs
* @param alloc Found allocation. It is guaranteed that 0 <= alloc->L <= L
* @return true if the requested allocation of size L was strictly met
*/
bool find_allocation(uint32_t L, prb_interval* alloc, ul_sf_sched_itf* tti_sched)
{
const prbmask_t* used_rb = &tti_sched->get_ul_mask();
*alloc = {};
for (uint32_t n = 0; n < used_rb->size() && alloc->length() < L; n++) {
if (not used_rb->test(n) && alloc->length() == 0) {
alloc->displace_to(n);
}
if (not used_rb->test(n)) {
alloc->resize_by(1);
} else if (alloc->length() > 0) {
// avoid edges
if (n < 3) {
*alloc = {};
} else {
break;
}
}
}
if (alloc->length() == 0) {
return false;
}
// Make sure L is allowed by SC-FDMA modulation
while (!srslte_dft_precoding_valid_prb(alloc->length())) {
alloc->resize_by(-1);
}
return alloc->length() == L;
}
bool sched_ul_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, ul_sf_sched_itf* tti_sched)
{
if (ue_ctxt.h == nullptr or tti_sched->is_ul_alloc(ue_ctxt.rnti) or ue_ctxt.prio == 0) {
return false;
}
srslte::tti_point tti_rx{tti_sched->get_tti_tx_ul() - (TX_ENB_DELAY + FDD_HARQ_DELAY_DL_MS)};
alloc_outcome_t ret;
if (ue_ctxt.h->has_pending_retx()) {
prb_interval alloc = ue_ctxt.h->get_alloc();
// If can schedule the same mask, do it
ret = tti_sched->alloc_ul_user(&ue, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return true;
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x\n", ue.get_rnti());
return false;
}
// Avoid measGaps accounting for PDCCH
if (not ue.pusch_enabled(tti_rx, cc_cfg->enb_cc_idx, true)) {
return false;
}
if (find_allocation(alloc.length(), &alloc, tti_sched)) {
ret = tti_sched->alloc_ul_user(&ue, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return true;
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x\n", ue.get_rnti());
}
}
} else {
// Avoid measGaps accounting for PDCCH
if (not ue.pusch_enabled(tti_rx, cc_cfg->enb_cc_idx, true)) {
return false;
}
uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), ue_ctxt.ue_cc_idx);
// find an empty PID
if (ue_ctxt.h->is_empty(0) and pending_data > 0) {
uint32_t pending_rb = ue.get_required_prb_ul(ue_ctxt.ue_cc_idx, pending_data);
prb_interval alloc{};
find_allocation(pending_rb, &alloc, tti_sched);
if (alloc.length() > 0) { // at least one PRB was scheduled
ret = tti_sched->alloc_ul_user(&ue, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return true;
}
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", ue.get_rnti());
}
}
}
}
return false;
}
void sched_ul_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue& ue, ul_sf_sched_itf* tti_sched)
{
srslte::tti_point tti_rx = srslte::tti_point(tti_sched->get_tti_tx_ul() - TX_ENB_DELAY - FDD_HARQ_DELAY_DL_MS);
h = nullptr;
prio = 0;
auto p = ue.get_active_cell_index(cell.enb_cc_idx);
if (not p.first) {
return;
}
if (not ue.pusch_enabled(tti_rx, cell.enb_cc_idx, false)) {
return;
}
ue_cc_idx = p.second;
h = ue.get_ul_harq(tti_sched->get_tti_tx_ul(), ue_cc_idx);
// calculate PF priority
float r = ue.get_expected_ul_bitrate(ue_cc_idx) / 8;
float R = avg_rate();
prio = (R != 0) ? r / R : (r == 0 ? 0 : std::numeric_limits<float>::max());
}
void sched_ul_pf::ue_ctxt::save_history(bool alloc, float alpha)
{
float sample = alloc ? h->get_pending_data() : 0;
if (nof_samples < 1 / alpha) {
// fast start
rate = rate + (sample - rate) / (nof_samples + 1);
} else {
rate = (1 - alpha) * rate + (alpha)*sample;
}
nof_samples++;
}
bool sched_ul_pf::ue_prio_compare::operator()(const sched_ul_pf::ue_ctxt* lhs, const sched_ul_pf::ue_ctxt* rhs) const
{
bool is_retx1 = lhs->h != nullptr and lhs->h->has_pending_retx(),
is_retx2 = rhs->h != nullptr and rhs->h->has_pending_retx();
return (not is_retx1 and is_retx2) or (is_retx1 == is_retx2 and lhs->prio < rhs->prio);
}
} // namespace srsenb

@ -0,0 +1,10 @@
#
# Copyright 2013-2020 Software Radio Systems Limited
#
# By using this file, you agree to the terms and conditions set
# forth in the LICENSE file which can be found at the top level of
# the distribution.
#
set(SOURCES sched_base.cc sched_time_rr.cc sched_time_pf.cc)
add_library(mac_schedulers OBJECT ${SOURCES})

@ -0,0 +1,186 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/schedulers/sched_base.h"
namespace srsenb {
rbgmask_t find_available_dl_rbgs(uint32_t max_rbgs, const rbgmask_t& current_mask)
{
if (max_rbgs == 0 or current_mask.all()) {
return rbgmask_t{};
}
// 1's for free rbgs
rbgmask_t localmask = ~(current_mask);
uint32_t i = 0, nof_alloc = 0;
for (; i < localmask.size() and nof_alloc < max_rbgs; ++i) {
if (localmask.test(i)) {
nof_alloc++;
}
}
localmask.fill(i, localmask.size(), false);
return localmask;
}
prb_interval find_contiguous_ul_prbs(uint32_t L, const prbmask_t& current_mask)
{
prb_interval prb_interv, prb_interv2;
for (uint32_t n = 0; n < current_mask.size() and prb_interv.length() < L; n++) {
if (not current_mask.test(n) and prb_interv.length() == 0) {
// new interval
prb_interv.set(n, n + 1);
} else if (not current_mask.test(n)) {
// extend current interval
prb_interv.resize_by(1);
} else if (prb_interv.length() > 0) {
// reset interval
prb_interv2 = prb_interv.length() > prb_interv2.length() ? prb_interv : prb_interv2;
prb_interv = {};
}
}
prb_interv = prb_interv2.length() > prb_interv.length() ? prb_interv2 : prb_interv;
if (prb_interv.empty()) {
return prb_interv;
}
// Make sure L is allowed by SC-FDMA modulation
prb_interv2 = prb_interv;
while (not srslte_dft_precoding_valid_prb(prb_interv.length()) and prb_interv.stop() < current_mask.size() and
not current_mask.test(prb_interv.stop())) {
prb_interv.resize_by(1);
}
if (not srslte_dft_precoding_valid_prb(prb_interv.length())) {
// if length increase failed, try to decrease
prb_interv = prb_interv2;
prb_interv.resize_by(-1);
while (not srslte_dft_precoding_valid_prb(prb_interv.length()) and not prb_interv.empty()) {
prb_interv.resize_by(-1);
}
}
return prb_interv;
}
int get_ue_cc_idx_if_pdsch_enabled(const sched_ue& user, sf_sched* tti_sched)
{
// Do not allocate a user multiple times in the same tti
if (tti_sched->is_dl_alloc(user.get_rnti())) {
return -1;
}
// Do not allocate a user to an inactive carrier
auto p = user.get_active_cell_index(tti_sched->get_enb_cc_idx());
if (not p.first) {
return -1;
}
uint32_t cell_idx = p.second;
// Do not allow allocations when PDSCH is deactivated
if (not user.pdsch_enabled(srslte::tti_point(tti_sched->get_tti_rx()), tti_sched->get_enb_cc_idx())) {
return -1;
}
return cell_idx;
}
const dl_harq_proc* get_dl_retx_harq(sched_ue& user, sf_sched* tti_sched)
{
int ue_cc_idx = get_ue_cc_idx_if_pdsch_enabled(user, tti_sched);
if (ue_cc_idx < 0) {
return nullptr;
}
dl_harq_proc* h = user.get_pending_dl_harq(tti_sched->get_tti_tx_dl(), ue_cc_idx);
return h;
}
const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched)
{
int ue_cc_idx = get_ue_cc_idx_if_pdsch_enabled(user, tti_sched);
if (ue_cc_idx < 0) {
return nullptr;
}
return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), ue_cc_idx);
}
int get_ue_cc_idx_if_pusch_enabled(const sched_ue& user, sf_sched* tti_sched, bool needs_pdcch)
{
// Do not allocate a user multiple times in the same tti
if (tti_sched->is_ul_alloc(user.get_rnti())) {
return -1;
}
// Do not allocate a user to an inactive carrier
auto p = user.get_active_cell_index(tti_sched->get_enb_cc_idx());
if (not p.first) {
return -1;
}
uint32_t cell_idx = p.second;
// Do not allow allocations when PDSCH is deactivated
if (not user.pusch_enabled(srslte::tti_point(tti_sched->get_tti_rx()), tti_sched->get_enb_cc_idx(), needs_pdcch)) {
return -1;
}
return cell_idx;
}
const ul_harq_proc* get_ul_retx_harq(sched_ue& user, sf_sched* tti_sched)
{
int ue_cc_idx = get_ue_cc_idx_if_pusch_enabled(user, tti_sched, false);
if (ue_cc_idx < 0) {
return nullptr;
}
const ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_tx_ul(), ue_cc_idx);
return h->has_pending_retx() ? h : nullptr;
}
const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched)
{
int ue_cc_idx = get_ue_cc_idx_if_pusch_enabled(user, tti_sched, true);
if (ue_cc_idx < 0) {
return nullptr;
}
const ul_harq_proc* h = user.get_ul_harq(tti_sched->get_tti_tx_ul(), ue_cc_idx);
return h->is_empty() ? h : nullptr;
}
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
{
// Try to reuse the same mask
rbgmask_t retx_mask = h.get_rbgmask();
alloc_outcome_t code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code == alloc_outcome_t::SUCCESS or code == alloc_outcome_t::DCI_COLLISION) {
return code;
}
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
retx_mask = find_available_dl_rbgs(nof_rbg, tti_sched.get_dl_mask());
if (retx_mask.count() == nof_rbg) {
return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
}
return alloc_outcome_t::RB_COLLISION;
}
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h)
{
// If can schedule the same mask, do it
prb_interval alloc = h.get_alloc();
alloc_outcome_t ret = tti_sched.alloc_ul_user(&ue, alloc);
if (ret == alloc_outcome_t::SUCCESS or ret == alloc_outcome_t::DCI_COLLISION) {
return ret;
}
// Avoid measGaps accounting for PDCCH
srslte::tti_point tti_rx{tti_sched.get_tti_rx()};
if (not ue.pusch_enabled(tti_rx, tti_sched.get_enb_cc_idx(), true)) {
return alloc_outcome_t::MEASGAP_COLLISION;
}
uint32_t nof_prbs = alloc.length();
alloc = find_contiguous_ul_prbs(nof_prbs, tti_sched.get_ul_mask());
if (alloc.length() != nof_prbs) {
return alloc_outcome_t::RB_COLLISION;
}
return tti_sched.alloc_ul_user(&ue, alloc);
}
} // namespace srsenb

@ -0,0 +1,216 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/schedulers/sched_time_pf.h"
namespace srsenb {
using srslte::tti_point;
sched_time_pf::sched_time_pf(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
}
void sched_time_pf::new_tti(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched)
{
current_tti_rx = tti_point{tti_sched->get_tti_rx()};
// remove deleted users from history
for (auto it = ue_history_db.begin(); it != ue_history_db.end();) {
if (not ue_db.count(it->first)) {
it = ue_history_db.erase(it);
} else {
++it;
}
}
// add new users to history db, and update priority queues
for (auto& u : ue_db) {
auto it = ue_history_db.find(u.first);
if (it == ue_history_db.end()) {
it = ue_history_db.insert(std::make_pair(u.first, ue_ctxt{u.first})).first;
}
it->second.new_tti(*cc_cfg, u.second, tti_sched);
if (it->second.dl_newtx_h != nullptr or it->second.dl_retx_h != nullptr) {
dl_queue.push(&it->second);
}
if (it->second.ul_h != nullptr) {
ul_queue.push(&it->second);
}
}
}
/*****************************************************************
* Dowlink
*****************************************************************/
void sched_time_pf::sched_dl_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched)
{
srslte::tti_point tti_rx{tti_sched->get_tti_rx()};
if (current_tti_rx != tti_rx) {
new_tti(ue_db, tti_sched);
}
while (not dl_queue.empty()) {
ue_ctxt& ue = *dl_queue.top();
ue.save_dl_alloc(try_dl_alloc(ue, ue_db[ue.rnti], tti_sched), 0.01);
dl_queue.pop();
}
}
uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched)
{
alloc_outcome_t code = alloc_outcome_t::ERROR;
if (ue_ctxt.dl_retx_h != nullptr) {
code = try_dl_retx_alloc(*tti_sched, ue, *ue_ctxt.dl_retx_h);
if (code == alloc_outcome_t::SUCCESS) {
return ue_ctxt.dl_retx_h->get_tbs(0) + ue_ctxt.dl_retx_h->get_tbs(1);
}
}
if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) {
rbg_interval req_rbgs = ue.get_required_dl_rbgs(ue_ctxt.ue_cc_idx);
// Check if there is an empty harq for the newtx
if (req_rbgs.stop() == 0) {
return 0;
}
// Allocate resources based on pending data
rbgmask_t newtx_mask = find_available_dl_rbgs(req_rbgs.stop(), tti_sched->get_dl_mask());
if (newtx_mask.count() >= req_rbgs.start()) {
// empty RBGs were found
code = tti_sched->alloc_dl_user(&ue, newtx_mask, ue_ctxt.dl_newtx_h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return ue_ctxt.dl_newtx_h->get_tbs(0) + ue_ctxt.dl_newtx_h->get_tbs(1);
}
}
}
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x\n", ue.get_rnti());
}
return 0;
}
/*****************************************************************
* Uplink
*****************************************************************/
void sched_time_pf::sched_ul_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched)
{
srslte::tti_point tti_rx{tti_sched->get_tti_rx()};
if (current_tti_rx != tti_rx) {
new_tti(ue_db, tti_sched);
}
while (not ul_queue.empty()) {
ue_ctxt& ue = *ul_queue.top();
ue.save_ul_alloc(try_ul_alloc(ue, ue_db[ue.rnti], tti_sched), 0.01);
ul_queue.pop();
}
}
uint32_t sched_time_pf::try_ul_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* tti_sched)
{
alloc_outcome_t code = alloc_outcome_t::ERROR;
if (ue_ctxt.ul_h != nullptr and ue_ctxt.ul_h->has_pending_retx()) {
code = try_ul_retx_alloc(*tti_sched, ue, *ue_ctxt.ul_h);
} else if (ue_ctxt.ul_h != nullptr) {
uint32_t pending_data = ue.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), ue_ctxt.ue_cc_idx);
// Check if there is a empty harq, and data to transmit
if (pending_data == 0) {
return 0;
}
uint32_t pending_rb = ue.get_required_prb_ul(ue_ctxt.ue_cc_idx, pending_data);
prb_interval alloc = find_contiguous_ul_prbs(pending_rb, tti_sched->get_ul_mask());
if (alloc.empty()) {
return 0;
}
code = tti_sched->alloc_ul_user(&ue, alloc);
}
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x\n", ue.get_rnti());
}
return code == alloc_outcome_t::SUCCESS ? ue_ctxt.ul_h->get_pending_data() : 0;
}
/*****************************************************************
* UE history
*****************************************************************/
void sched_time_pf::ue_ctxt::new_tti(const sched_cell_params_t& cell, sched_ue& ue, sf_sched* tti_sched)
{
dl_retx_h = nullptr;
dl_newtx_h = nullptr;
ul_h = nullptr;
dl_prio = 0;
ue_cc_idx = ue.enb_to_ue_cc_idx(cell.enb_cc_idx);
if (ue_cc_idx < 0) {
// not active
return;
}
// Calculate DL priority
dl_retx_h = get_dl_retx_harq(ue, tti_sched);
dl_newtx_h = get_dl_newtx_harq(ue, tti_sched);
if (dl_retx_h != nullptr or dl_newtx_h != nullptr) {
// calculate DL PF priority
float r = ue.get_expected_dl_bitrate(ue_cc_idx) / 8;
float R = dl_avg_rate();
dl_prio = (R != 0) ? r / R : (r == 0 ? 0 : std::numeric_limits<float>::max());
}
// Calculate UL priority
ul_h = get_ul_retx_harq(ue, tti_sched);
if (ul_h == nullptr) {
ul_h = get_ul_newtx_harq(ue, tti_sched);
}
if (ul_h != nullptr) {
float r = ue.get_expected_ul_bitrate(ue_cc_idx) / 8;
float R = ul_avg_rate();
ul_prio = (R != 0) ? r / R : (r == 0 ? 0 : std::numeric_limits<float>::max());
}
}
void sched_time_pf::ue_ctxt::save_dl_alloc(uint32_t alloc_bytes, float alpha)
{
if (dl_nof_samples < 1 / alpha) {
// fast start
dl_avg_rate_ = dl_avg_rate_ + (alloc_bytes - dl_avg_rate_) / (dl_nof_samples + 1);
} else {
dl_avg_rate_ = (1 - alpha) * dl_avg_rate_ + (alpha)*alloc_bytes;
}
dl_nof_samples++;
}
void sched_time_pf::ue_ctxt::save_ul_alloc(uint32_t alloc_bytes, float alpha)
{
if (ul_nof_samples < 1 / alpha) {
// fast start
ul_avg_rate_ = ul_avg_rate_ + (alloc_bytes - ul_avg_rate_) / (ul_nof_samples + 1);
} else {
ul_avg_rate_ = (1 - alpha) * ul_avg_rate_ + (alpha)*alloc_bytes;
}
ul_nof_samples++;
}
bool sched_time_pf::ue_dl_prio_compare::operator()(const sched_time_pf::ue_ctxt* lhs,
const sched_time_pf::ue_ctxt* rhs) const
{
bool is_retx1 = lhs->dl_retx_h != nullptr, is_retx2 = rhs->dl_retx_h != nullptr;
return (not is_retx1 and is_retx2) or (is_retx1 == is_retx2 and lhs->dl_prio < rhs->dl_prio);
}
bool sched_time_pf::ue_ul_prio_compare::operator()(const sched_time_pf::ue_ctxt* lhs,
const sched_time_pf::ue_ctxt* rhs) const
{
bool is_retx1 = lhs->ul_h->has_pending_retx(), is_retx2 = rhs->ul_h->has_pending_retx();
return (not is_retx1 and is_retx2) or (is_retx1 == is_retx2 and lhs->ul_prio < rhs->ul_prio);
}
} // namespace srsenb

@ -0,0 +1,155 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h"
#include <string.h>
namespace srsenb {
sched_time_rr::sched_time_rr(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
}
/*****************************************************************
* Dowlink
*****************************************************************/
void sched_time_rr::sched_dl_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched)
{
if (ue_db.empty()) {
return;
}
// give priority in a time-domain RR basis.
uint32_t priority_idx = tti_sched->get_tti_tx_dl() % (uint32_t)ue_db.size();
sched_dl_retxs(ue_db, tti_sched, priority_idx);
sched_dl_newtxs(ue_db, tti_sched, priority_idx);
}
void sched_time_rr::sched_dl_retxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx)
{
auto iter = ue_db.begin();
std::advance(iter, prio_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue& user = iter->second;
const dl_harq_proc* h = get_dl_retx_harq(user, tti_sched);
// Check if there is a pending retx
if (h == nullptr) {
continue;
}
alloc_outcome_t code = try_dl_retx_alloc(*tti_sched, user, *h);
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL retx for rnti=0x%x\n", user.get_rnti());
}
}
}
void sched_time_rr::sched_dl_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx)
{
auto iter = ue_db.begin();
std::advance(iter, prio_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue& user = iter->second;
const dl_harq_proc* h = get_dl_newtx_harq(user, tti_sched);
rbg_interval req_rbgs = user.get_required_dl_rbgs(user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx));
// Check if there is an empty harq for the newtx
if (h == nullptr or req_rbgs.stop() == 0) {
continue;
}
// Allocate resources based on pending data
rbgmask_t newtx_mask = find_available_dl_rbgs(req_rbgs.stop(), tti_sched->get_dl_mask());
if (newtx_mask.count() >= req_rbgs.start()) {
// empty RBGs were found
alloc_outcome_t code = tti_sched->alloc_dl_user(&user, newtx_mask, h->get_id());
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x\n", user.get_rnti());
}
}
}
}
/*****************************************************************
* Uplink
*****************************************************************/
void sched_time_rr::sched_ul_users(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched)
{
if (ue_db.empty()) {
return;
}
// give priority in a time-domain RR basis.
uint32_t priority_idx = tti_sched->get_tti_tx_ul() % (uint32_t)ue_db.size();
sched_ul_retxs(ue_db, tti_sched, priority_idx);
sched_ul_newtxs(ue_db, tti_sched, priority_idx);
}
void sched_time_rr::sched_ul_retxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx)
{
auto iter = ue_db.begin();
std::advance(iter, prio_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue& user = iter->second;
const ul_harq_proc* h = get_ul_retx_harq(user, tti_sched);
// Check if there is a pending retx
if (h == nullptr) {
continue;
}
alloc_outcome_t code = try_ul_retx_alloc(*tti_sched, user, *h);
if (code == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL retx of rnti=0x%x\n", user.get_rnti());
}
}
}
void sched_time_rr::sched_ul_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sched* tti_sched, size_t prio_idx)
{
auto iter = ue_db.begin();
std::advance(iter, prio_idx);
for (uint32_t ue_count = 0; ue_count < ue_db.size(); ++iter, ++ue_count) {
if (iter == ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue& user = iter->second;
const ul_harq_proc* h = get_ul_newtx_harq(user, tti_sched);
// Check if there is a empty harq
if (h == nullptr) {
continue;
}
uint32_t ue_cc_idx = user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx);
uint32_t pending_data = user.get_pending_ul_new_data(tti_sched->get_tti_tx_ul(), ue_cc_idx);
// Check if there is a empty harq, and data to transmit
if (pending_data == 0) {
continue;
}
uint32_t pending_rb = user.get_required_prb_ul(ue_cc_idx, pending_data);
prb_interval alloc = find_contiguous_ul_prbs(pending_rb, tti_sched->get_ul_mask());
if (alloc.empty()) {
continue;
}
alloc_outcome_t ret = tti_sched->alloc_ul_user(&user, alloc);
if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->info("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user.get_rnti());
}
}
}
} // namespace srsenb
Loading…
Cancel
Save