substitution of ul_alloc_t for prb_interval

master
Francisco Paisana 4 years ago
parent 3eb1b83a43
commit 20b69fb22e

@ -0,0 +1,127 @@
/*
* Copyright 2013-2020 Software Radio Systems Limited
*
* This file is part of srsLTE.
*
* srsLTE is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* srsLTE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* A copy of the GNU Affero General Public License can be found in
* the LICENSE file in the top-level directory of this distribution
* and at http://www.gnu.org/licenses/.
*
*/
#ifndef SRSLTE_INTERVAL_H
#define SRSLTE_INTERVAL_H
#include <string>
namespace srslte {
template <typename T>
class interval
{
public:
T start;
T stop;
interval() : start(T{}), stop(T{}) {}
interval(const T& start_, const T& stop_) : start(start_), stop(stop_) {}
bool is_empty() const { return stop <= start; }
T length() const { return stop - start; }
void set_length(const T& len) { stop = start + len; }
void add_offset(int offset)
{
start += offset;
stop += offset;
}
void shift_to(int new_start)
{
stop = new_start + length();
start = new_start;
}
bool overlaps(const interval& other) const { return start < other.stop and other.start < stop; }
bool contains(const T& point) const { return start <= point and point < stop; }
std::string to_string() const
{
std::string s = "[" + std::to_string(start) + ", " + std::to_string(stop) + ")";
return s;
}
};
template <typename T>
bool operator==(const interval<T>& lhs, const interval<T>& rhs)
{
return lhs.start == rhs.start and lhs.stop == rhs.stop;
}
template <typename T>
bool operator!=(const interval<T>& lhs, const interval<T>& rhs)
{
return not(lhs == rhs);
}
template <typename T>
bool operator<(const interval<T>& lhs, const interval<T>& rhs)
{
return lhs.start < rhs.start or (lhs.start == rhs.start and lhs.stop < rhs.stop);
}
//! Union of intervals
template <typename T>
interval<T> operator|(const interval<T>& lhs, const interval<T>& rhs)
{
if (not lhs.overlaps(rhs)) {
return interval<T>{};
}
return {std::min(lhs.start, rhs.start), std::max(lhs.stop, rhs.stop)};
}
template <typename T>
interval<T> make_union(const interval<T>& lhs, const interval<T>& rhs)
{
return lhs | rhs;
}
//! Intersection of intervals
template <typename T>
interval<T> operator&(const interval<T>& lhs, const interval<T>& rhs)
{
if (not lhs.overlaps(rhs)) {
return interval<T>{};
}
return interval<T>{std::max(lhs.start, rhs.start), std::min(lhs.stop, rhs.stop)};
}
template <typename T>
interval<T> make_intersection(const interval<T>& lhs, const interval<T>& rhs)
{
return lhs & rhs;
}
template <typename T>
std::ostream& operator<<(std::ostream& out, const interval<T>& interv)
{
out << interv.to_string();
return out;
}
} // namespace srslte
#endif // SRSLTE_INTERVAL_H

@ -37,3 +37,7 @@ add_test(bounded_bitset_test bounded_bitset_test)
add_executable(span_test span_test.cc) add_executable(span_test span_test.cc)
target_link_libraries(span_test srslte_common) target_link_libraries(span_test srslte_common)
add_test(span_test span_test) add_test(span_test span_test)
add_executable(interval_test interval_test.cc)
target_link_libraries(interval_test srslte_common)
add_test(interval_test interval_test)

@ -0,0 +1,72 @@
/*
* Copyright 2013-2020 Software Radio Systems Limited
*
* This file is part of srsLTE.
*
* srsLTE is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* srsLTE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* A copy of the GNU Affero General Public License can be found in
* the LICENSE file in the top-level directory of this distribution
* and at http://www.gnu.org/licenses/.
*
*/
#include "srslte/adt/interval.h"
#include "srslte/common/test_common.h"
int test_interval_overlaps()
{
srslte::interval<int> I{10, 15}, I2{9, 11}, I3{11, 14}, I4{9, 16}, I5{14, 16}, I6{4, 10}, I7{15, 17};
TESTASSERT(I.overlaps(I2));
TESTASSERT(I.overlaps(I3));
TESTASSERT(I.overlaps(I4));
TESTASSERT(I.overlaps(I5));
TESTASSERT(not I.overlaps(I6));
TESTASSERT(not I.overlaps(I7));
return SRSLTE_SUCCESS;
}
int test_interval_contains()
{
srslte::interval<int> I{5, 10};
TESTASSERT(I.contains(5));
TESTASSERT(I.contains(6));
TESTASSERT(I.contains(9));
TESTASSERT(not I.contains(10));
TESTASSERT(not I.contains(11));
TESTASSERT(not I.contains(4));
return SRSLTE_SUCCESS;
}
int test_interval_intersect()
{
srslte::interval<int> I{5, 10}, I2{3, 6}, I3{9, 12}, I4{10, 13};
TESTASSERT(srslte::make_intersection(I, I2) == (I & I2));
TESTASSERT((I & I2) == srslte::interval<int>(5, 6));
TESTASSERT((I & I3) == srslte::interval<int>(9, 10));
TESTASSERT(not(I & I3).is_empty());
TESTASSERT((I & I4).is_empty());
return SRSLTE_SUCCESS;
}
int main()
{
TESTASSERT(test_interval_overlaps() == SRSLTE_SUCCESS);
TESTASSERT(test_interval_contains() == SRSLTE_SUCCESS);
TESTASSERT(test_interval_intersect() == SRSLTE_SUCCESS);
return 0;
}

@ -22,6 +22,7 @@
#ifndef SRSLTE_SCHEDULER_COMMON_H #ifndef SRSLTE_SCHEDULER_COMMON_H
#define SRSLTE_SCHEDULER_COMMON_H #define SRSLTE_SCHEDULER_COMMON_H
#include "srslte/adt/interval.h"
#include "srslte/adt/bounded_bitset.h" #include "srslte/adt/bounded_bitset.h"
#include "srslte/interfaces/sched_interface.h" #include "srslte/interfaces/sched_interface.h"
@ -80,25 +81,18 @@ using rbgmask_t = srslte::bounded_bitset<25, true>;
using prbmask_t = srslte::bounded_bitset<100, true>; using prbmask_t = srslte::bounded_bitset<100, true>;
//! Struct to express a {min,...,max} range of RBGs //! Struct to express a {min,...,max} range of RBGs
struct prb_range_t; struct prb_interval;
struct rbg_range_t { struct rbg_interval : public srslte::interval<uint32_t> {
uint32_t rbg_min = 0, rbg_max = 0; using interval::interval;
rbg_range_t() = default; static rbg_interval prbs_to_rbgs(const prb_interval& prbs, uint32_t P);
rbg_range_t(uint32_t s, uint32_t e) : rbg_min(s), rbg_max(e) {}
uint32_t nof_rbgs() const { return rbg_max - rbg_min; }
static rbg_range_t prbs_to_rbgs(const prb_range_t& prbs, uint32_t P);
}; };
//! Struct to express a {min,...,max} range of PRBs //! Struct to express a {min,...,max} range of PRBs
struct prb_range_t { struct prb_interval : public srslte::interval<uint32_t> {
uint32_t prb_min = 0, prb_max = 0; using interval::interval;
prb_range_t() = default;
prb_range_t(uint32_t s, uint32_t e) : prb_min(s), prb_max(e) {} static prb_interval rbgs_to_prbs(const rbg_interval& rbgs, uint32_t P);
uint32_t nof_prbs() { return prb_max - prb_min; } static prb_interval riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1);
static prb_range_t rbgs_to_prbs(const rbg_range_t& rbgs, uint32_t P);
static prb_range_t riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1);
}; };
/*********************** /***********************

@ -160,7 +160,7 @@ class sf_grid_t
public: public:
struct dl_ctrl_alloc_t { struct dl_ctrl_alloc_t {
alloc_outcome_t outcome; alloc_outcome_t outcome;
rbg_range_t rbg_range; rbg_interval rbg_range;
}; };
void init(const sched_cell_params_t& cell_params_); void init(const sched_cell_params_t& cell_params_);
@ -168,9 +168,9 @@ public:
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
alloc_outcome_t alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch); alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch);
bool reserve_ul_prbs(const prbmask_t& prbmask, bool strict); bool reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
bool find_ul_alloc(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) const; bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
// getters // getters
const rbgmask_t& get_dl_mask() const { return dl_mask; } const rbgmask_t& get_dl_mask() const { return dl_mask; }
@ -213,10 +213,10 @@ public:
class ul_sf_sched_itf class ul_sf_sched_itf
{ {
public: public:
virtual alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) = 0; virtual alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc) = 0;
virtual const prbmask_t& get_ul_mask() const = 0; virtual const prbmask_t& get_ul_mask() const = 0;
virtual uint32_t get_tti_tx_ul() const = 0; virtual uint32_t get_tti_tx_ul() const = 0;
virtual bool is_ul_alloc(const sched_ue* user) const = 0; virtual bool is_ul_alloc(const sched_ue* user) const = 0;
}; };
/** Description: Stores the RAR, broadcast, paging, DL data, UL data allocations for the given subframe /** Description: Stores the RAR, broadcast, paging, DL data, UL data allocations for the given subframe
@ -228,7 +228,7 @@ class sf_sched : public dl_sf_sched_itf, public ul_sf_sched_itf
public: public:
struct ctrl_alloc_t { struct ctrl_alloc_t {
size_t dci_idx; size_t dci_idx;
rbg_range_t rbg_range; rbg_interval rbg_range;
uint16_t rnti; uint16_t rnti;
uint32_t req_bytes; uint32_t req_bytes;
alloc_type_t alloc_type; alloc_type_t alloc_type;
@ -253,14 +253,14 @@ public:
}; };
struct ul_alloc_t { struct ul_alloc_t {
enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 }; enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 };
size_t dci_idx; size_t dci_idx;
type_t type; type_t type;
sched_ue* user_ptr; sched_ue* user_ptr;
ul_harq_proc::ul_alloc_t alloc; prb_interval alloc;
uint32_t mcs = 0; uint32_t mcs = 0;
bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; } bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; }
bool is_msg3() const { return type == MSG3; } bool is_msg3() const { return type == MSG3; }
bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; } bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; }
}; };
struct pending_msg3_t { struct pending_msg3_t {
uint16_t rnti = 0; uint16_t rnti = 0;
@ -290,8 +290,7 @@ public:
// UL alloc methods // UL alloc methods
alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant); alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant);
alloc_outcome_t alloc_outcome_t alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, uint32_t mcs = 0);
alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t mcs = 0);
bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); } bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); }
bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result); bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result);
@ -304,7 +303,7 @@ public:
uint32_t get_nof_ctrl_symbols() const final; uint32_t get_nof_ctrl_symbols() const final;
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); } const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf // ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final; alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc) final;
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); } const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; } uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; }
@ -316,12 +315,7 @@ public:
private: private:
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
int generate_format1a(uint32_t rb_start, int generate_format1a(prb_interval prb_range, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
uint32_t l_crb,
uint32_t tbs,
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result); void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result); void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,

@ -90,24 +90,13 @@ private:
class ul_harq_proc : public harq_proc class ul_harq_proc : public harq_proc
{ {
public: public:
struct ul_alloc_t { void new_tx(uint32_t tti, int mcs, int tbs, prb_interval alloc, uint32_t max_retx_);
uint32_t RB_start; void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, prb_interval alloc);
uint32_t L;
void set(uint32_t start, uint32_t len)
{
RB_start = start;
L = len;
}
uint32_t RB_end() const { return RB_start + L; }
};
void new_tx(uint32_t tti, int mcs, int tbs, ul_alloc_t alloc, uint32_t max_retx_);
void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, ul_alloc_t alloc);
bool set_ack(uint32_t tb_idx, bool ack); bool set_ack(uint32_t tb_idx, bool ack);
ul_alloc_t get_alloc() const; prb_interval get_alloc() const;
bool has_pending_retx() const; bool has_pending_retx() const;
bool is_adaptive_retx() const; bool is_adaptive_retx() const;
void reset_pending_data(); void reset_pending_data();
bool has_pending_ack() const; bool has_pending_ack() const;
@ -115,10 +104,10 @@ public:
uint32_t get_pending_data() const; uint32_t get_pending_data() const;
private: private:
ul_alloc_t allocation; prb_interval allocation;
int pending_data; int pending_data;
bool is_adaptive; bool is_adaptive;
ack_t pending_ack; ack_t pending_ack;
}; };
class harq_entity class harq_entity

@ -50,7 +50,7 @@ public:
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) final; void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_itf* tti_sched) final;
private: private:
bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc); bool find_allocation(uint32_t L, prb_interval* alloc);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user); ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_retx_prbs(sched_ue* user); ul_harq_proc* allocate_user_retx_prbs(sched_ue* user);

@ -172,12 +172,12 @@ public:
uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes); uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes);
rbg_range_t get_required_dl_rbgs(uint32_t ue_cc_idx); rbg_interval get_required_dl_rbgs(uint32_t ue_cc_idx);
std::pair<uint32_t, uint32_t> get_requested_dl_bytes(uint32_t ue_cc_idx); srslte::interval<uint32_t> get_requested_dl_bytes(uint32_t ue_cc_idx);
uint32_t get_pending_dl_new_data(); uint32_t get_pending_dl_new_data();
uint32_t get_pending_ul_new_data(uint32_t tti); uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_old_data(uint32_t cc_idx); uint32_t get_pending_ul_old_data(uint32_t cc_idx);
uint32_t get_pending_dl_new_data_total(); uint32_t get_pending_dl_new_data_total();
dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx); dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx);
dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx); dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx);
@ -205,7 +205,7 @@ public:
int generate_format0(sched_interface::ul_sched_data_t* data, int generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx, uint32_t cc_idx,
ul_harq_proc::ul_alloc_t alloc, prb_interval alloc,
bool needs_pdcch, bool needs_pdcch,
srslte_dci_location_t cce_range, srslte_dci_location_t cce_range,
int explicit_mcs = -1, int explicit_mcs = -1,

@ -450,24 +450,24 @@ void sched_cell_params_t::regs_deleter::operator()(srslte_regs_t* p)
} }
} }
rbg_range_t rbg_range_t::prbs_to_rbgs(const prb_range_t& prbs, uint32_t P) rbg_interval rbg_interval::prbs_to_rbgs(const prb_interval& prbs, uint32_t P)
{ {
return rbg_range_t{srslte::ceil_div(prbs.prb_min, P), srslte::ceil_div(prbs.prb_min, P)}; return rbg_interval{srslte::ceil_div(prbs.start, P), srslte::ceil_div(prbs.start, P)};
} }
prb_range_t prb_range_t::rbgs_to_prbs(const rbg_range_t& rbgs, uint32_t P) prb_interval prb_interval::rbgs_to_prbs(const rbg_interval& rbgs, uint32_t P)
{ {
return prb_range_t{rbgs.rbg_min * P, rbgs.rbg_max * P}; return prb_interval{rbgs.start * P, rbgs.stop * P};
} }
prb_range_t prb_range_t::riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs) prb_interval prb_interval::riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs)
{ {
prb_range_t p; prb_interval p;
if (nof_vrbs < 0) { if (nof_vrbs < 0) {
nof_vrbs = nof_prbs; nof_vrbs = nof_prbs;
} }
srslte_ra_type2_from_riv(riv, &p.prb_max, &p.prb_min, nof_prbs, (uint32_t)nof_vrbs); srslte_ra_type2_from_riv(riv, &p.stop, &p.start, nof_prbs, (uint32_t)nof_vrbs);
p.prb_max += p.prb_min; p.stop += p.start;
return p; return p;
} }

@ -444,9 +444,9 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, alloc_type_t alloc_type,
//! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. //! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner.
sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_type_t alloc_type) sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_type_t alloc_type)
{ {
rbg_range_t range; rbg_interval range;
range.rbg_min = nof_rbgs - avail_rbg; range.start = nof_rbgs - avail_rbg;
range.rbg_max = range.rbg_min + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg); range.set_length((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg);
if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and
alloc_type != alloc_type_t::DL_PCCH) { alloc_type != alloc_type_t::DL_PCCH) {
@ -454,13 +454,13 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_typ
return {alloc_outcome_t::ERROR, range}; return {alloc_outcome_t::ERROR, range};
} }
// Setup range starting from left // Setup range starting from left
if (range.rbg_max > nof_rbgs) { if (range.stop > nof_rbgs) {
return {alloc_outcome_t::RB_COLLISION, range}; return {alloc_outcome_t::RB_COLLISION, range};
} }
// allocate DCI and RBGs // allocate DCI and RBGs
rbgmask_t new_mask(dl_mask.size()); rbgmask_t new_mask(dl_mask.size());
new_mask.fill(range.rbg_min, range.rbg_max); new_mask.fill(range.start, range.stop);
return {alloc_dl(aggr_idx, alloc_type, new_mask), range}; return {alloc_dl(aggr_idx, alloc_type, new_mask), range};
} }
@ -475,14 +475,14 @@ alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_m
return ret; return ret;
} }
alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch) alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch)
{ {
if (alloc.RB_start + alloc.L > ul_mask.size()) { if (alloc.stop > ul_mask.size()) {
return alloc_outcome_t::ERROR; return alloc_outcome_t::ERROR;
} }
prbmask_t newmask(ul_mask.size()); prbmask_t newmask(ul_mask.size());
newmask.fill(alloc.RB_start, alloc.RB_start + alloc.L); newmask.fill(alloc.start, alloc.stop);
if ((ul_mask & newmask).any()) { if ((ul_mask & newmask).any()) {
return alloc_outcome_t::RB_COLLISION; return alloc_outcome_t::RB_COLLISION;
} }
@ -531,34 +531,34 @@ bool sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict)
* @param alloc Found allocation. It is guaranteed that 0 <= alloc->L <= L * @param alloc Found allocation. It is guaranteed that 0 <= alloc->L <= L
* @return true if the requested allocation of size L was strictly met * @return true if the requested allocation of size L was strictly met
*/ */
bool sf_grid_t::find_ul_alloc(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) const bool sf_grid_t::find_ul_alloc(uint32_t L, prb_interval* alloc) const
{ {
*alloc = {}; *alloc = {};
for (uint32_t n = 0; n < ul_mask.size() && alloc->L < L; n++) { for (uint32_t n = 0; n < ul_mask.size() && alloc->length() < L; n++) {
if (not ul_mask.test(n) && alloc->L == 0) { if (not ul_mask.test(n) && alloc->length() == 0) {
alloc->RB_start = n; alloc->shift_to(n);
} }
if (not ul_mask.test(n)) { if (not ul_mask.test(n)) {
alloc->L++; alloc->stop++;
} else if (alloc->L > 0) { } else if (alloc->length() > 0) {
// avoid edges // avoid edges
if (n < 3) { if (n < 3) {
alloc->RB_start = 0; alloc->start = 0;
alloc->L = 0; alloc->stop = 0;
} else { } else {
break; break;
} }
} }
} }
if (alloc->L == 0) { if (alloc->length() == 0) {
return false; return false;
} }
// Make sure L is allowed by SC-FDMA modulation // Make sure L is allowed by SC-FDMA modulation
while (!srslte_dft_precoding_valid_prb(alloc->L)) { while (!srslte_dft_precoding_valid_prb(alloc->length())) {
alloc->L--; alloc->stop--;
} }
return alloc->L == L; return alloc->length() == L;
} }
/******************************************************* /*******************************************************
@ -757,8 +757,8 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
const dl_harq_proc& h = user->get_dl_harq(pid, ue_cc_idx); const dl_harq_proc& h = user->get_dl_harq(pid, ue_cc_idx);
if (h.is_empty()) { if (h.is_empty()) {
// It is newTx // It is newTx
rbg_range_t r = user->get_required_dl_rbgs(ue_cc_idx); rbg_interval r = user->get_required_dl_rbgs(ue_cc_idx);
if (r.rbg_min > user_mask.count()) { if (r.start > user_mask.count()) {
log_h->warning("The number of RBGs allocated to rnti=0x%x will force segmentation\n", user->get_rnti()); log_h->warning("The number of RBGs allocated to rnti=0x%x will force segmentation\n", user->get_rnti());
return alloc_outcome_t::NOF_RB_INVALID; return alloc_outcome_t::NOF_RB_INVALID;
} }
@ -771,10 +771,10 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
bool has_pusch_grant = is_ul_alloc(user) or cc_results->is_ul_alloc(user->get_rnti()); bool has_pusch_grant = is_ul_alloc(user) or cc_results->is_ul_alloc(user->get_rnti());
if (not has_pusch_grant) { if (not has_pusch_grant) {
// Try to allocate small PUSCH grant, if there are no allocated PUSCH grants for this TTI yet // Try to allocate small PUSCH grant, if there are no allocated PUSCH grants for this TTI yet
ul_harq_proc::ul_alloc_t alloc = {}; prb_interval alloc = {};
uint32_t L = user->get_required_prb_ul(ue_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8)); uint32_t L = user->get_required_prb_ul(ue_cc_idx, srslte::ceil_div(SRSLTE_UCI_CQI_CODED_PUCCH_B + 2, 8));
tti_alloc.find_ul_alloc(L, &alloc); tti_alloc.find_ul_alloc(L, &alloc);
if (ue_cc_idx != 0 and (alloc.L == 0 or not alloc_ul_user(user, alloc))) { if (ue_cc_idx != 0 and (alloc.length() == 0 or not alloc_ul_user(user, alloc))) {
// For SCells, if we can't allocate small PUSCH grant, abort DL allocation // For SCells, if we can't allocate small PUSCH grant, abort DL allocation
return alloc_outcome_t::PUCCH_COLLISION; return alloc_outcome_t::PUCCH_COLLISION;
} }
@ -798,10 +798,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
return alloc_outcome_t::SUCCESS; return alloc_outcome_t::SUCCESS;
} }
alloc_outcome_t sf_sched::alloc_ul(sched_ue* user, alloc_outcome_t sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, uint32_t mcs)
ul_harq_proc::ul_alloc_t alloc,
sf_sched::ul_alloc_t::type_t alloc_type,
uint32_t mcs)
{ {
// Check whether user was already allocated // Check whether user was already allocated
if (is_ul_alloc(user)) { if (is_ul_alloc(user)) {
@ -827,15 +824,15 @@ alloc_outcome_t sf_sched::alloc_ul(sched_ue* user,
return alloc_outcome_t::SUCCESS; return alloc_outcome_t::SUCCESS;
} }
alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc)
{ {
// check whether adaptive/non-adaptive retx/newtx // check whether adaptive/non-adaptive retx/newtx
sf_sched::ul_alloc_t::type_t alloc_type; ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_cell_index(cc_cfg->enb_cc_idx).second); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_cell_index(cc_cfg->enb_cc_idx).second);
bool has_retx = h->has_pending_retx(); bool has_retx = h->has_pending_retx();
if (has_retx) { if (has_retx) {
ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc(); prb_interval prev_alloc = h->get_alloc();
if (prev_alloc.L == alloc.L and prev_alloc.RB_start == alloc.RB_start) { if (prev_alloc == alloc) {
alloc_type = ul_alloc_t::NOADAPT_RETX; alloc_type = ul_alloc_t::NOADAPT_RETX;
} else { } else {
alloc_type = ul_alloc_t::ADAPT_RETX; alloc_type = ul_alloc_t::ADAPT_RETX;
@ -885,17 +882,16 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */ /* Generate DCI format1A */
prb_range_t prb_range = prb_range_t::rbgs_to_prbs(bc_alloc.rbg_range, cc_cfg->P); prb_interval prb_range = prb_interval::rbgs_to_prbs(bc_alloc.rbg_range, cc_cfg->P);
int tbs = generate_format1a( int tbs = generate_format1a(prb_range, bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci);
prb_range.prb_min, prb_range.nof_prbs(), bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci);
// Setup BC/Paging processes // Setup BC/Paging processes
if (bc_alloc.alloc_type == alloc_type_t::DL_BC) { if (bc_alloc.alloc_type == alloc_type_t::DL_BC) {
if (tbs <= (int)bc_alloc.req_bytes) { if (tbs <= (int)bc_alloc.req_bytes) {
log_h->warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d\n", log_h->warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d\n",
bc_alloc.sib_idx + 1, bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.rbg_min, bc_alloc.rbg_range.start,
bc_alloc.rbg_range.rbg_max, bc_alloc.rbg_range.stop,
bc->dci.location.L, bc->dci.location.L,
bc->dci.location.ncce, bc->dci.location.ncce,
bc_alloc.req_bytes); bc_alloc.req_bytes);
@ -909,8 +905,8 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
log_h->debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d\n", log_h->debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d\n",
bc_alloc.sib_idx + 1, bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.rbg_min, bc_alloc.rbg_range.start,
bc_alloc.rbg_range.rbg_max, bc_alloc.rbg_range.stop,
bc->dci.location.L, bc->dci.location.L,
bc->dci.location.ncce, bc->dci.location.ncce,
bc_alloc.rv, bc_alloc.rv,
@ -920,9 +916,8 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
} else { } else {
// Paging // Paging
if (tbs <= 0) { if (tbs <= 0) {
log_h->warning("SCHED: Error Paging, rbgs=(%d,%d), dci=(%d,%d)\n", log_h->warning("SCHED: Error Paging, rbgs=%s, dci=(%d,%d)\n",
bc_alloc.rbg_range.rbg_min, bc_alloc.rbg_range.to_string().c_str(),
bc_alloc.rbg_range.rbg_max,
bc->dci.location.L, bc->dci.location.L,
bc->dci.location.ncce); bc->dci.location.ncce);
continue; continue;
@ -932,9 +927,8 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
bc->type = sched_interface::dl_sched_bc_t::PCCH; bc->type = sched_interface::dl_sched_bc_t::PCCH;
bc->tbs = (uint32_t)tbs; bc->tbs = (uint32_t)tbs;
log_h->info("SCHED: PCH, rbgs=(%d,%d), dci=(%d,%d), tbs=%d, mcs=%d\n", log_h->info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d\n",
bc_alloc.rbg_range.rbg_min, bc_alloc.rbg_range.to_string().c_str(),
bc_alloc.rbg_range.rbg_max,
bc->dci.location.L, bc->dci.location.L,
bc->dci.location.ncce, bc->dci.location.ncce,
tbs, tbs,
@ -955,18 +949,12 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
/* Generate DCI format1A */ /* Generate DCI format1A */
prb_range_t prb_range = prb_range_t::rbgs_to_prbs(rar_alloc.alloc_data.rbg_range, cc_cfg->P); prb_interval prb_range = prb_interval::rbgs_to_prbs(rar_alloc.alloc_data.rbg_range, cc_cfg->P);
int tbs = generate_format1a(prb_range.prb_min, int tbs = generate_format1a(prb_range, rar_alloc.alloc_data.req_bytes, 0, rar_alloc.alloc_data.rnti, &rar->dci);
prb_range.nof_prbs(),
rar_alloc.alloc_data.req_bytes,
0,
rar_alloc.alloc_data.rnti,
&rar->dci);
if (tbs <= 0) { if (tbs <= 0) {
log_h->warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=(%d,%d), dci=(%d,%d)\n", log_h->warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=%s, dci=(%d,%d)\n",
rar_alloc.alloc_data.rnti, rar_alloc.alloc_data.rnti,
rar_alloc.alloc_data.rbg_range.rbg_min, rar_alloc.alloc_data.rbg_range.to_string().c_str(),
rar_alloc.alloc_data.rbg_range.rbg_max,
rar->dci.location.L, rar->dci.location.L,
rar->dci.location.ncce); rar->dci.location.ncce);
continue; continue;
@ -981,12 +969,11 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
for (uint32_t i = 0; i < rar->nof_grants; ++i) { for (uint32_t i = 0; i < rar->nof_grants; ++i) {
const auto& msg3_grant = rar->msg3_grant[i]; const auto& msg3_grant = rar->msg3_grant[i];
uint16_t expected_rnti = msg3_grant.data.temp_crnti; uint16_t expected_rnti = msg3_grant.data.temp_crnti;
log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, " log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=%s, dci=(%d,%d), rar_grant_rba=%d, "
"rar_grant_mcs=%d\n", "rar_grant_mcs=%d\n",
expected_rnti, expected_rnti,
rar_alloc.alloc_data.rnti, rar_alloc.alloc_data.rnti,
rar_alloc.alloc_data.rbg_range.rbg_min, rar_alloc.alloc_data.rbg_range.to_string().c_str(),
rar_alloc.alloc_data.rbg_range.rbg_max,
rar->dci.location.L, rar->dci.location.L,
rar->dci.location.ncce, rar->dci.location.ncce,
msg3_grant.grant.rba, msg3_grant.grant.rba,
@ -1142,21 +1129,20 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cell_index); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cell_index);
if (tbs <= 0) { if (tbs <= 0) {
log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=(%d,%d), bsr=%d\n", log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=%s, bsr=%d\n",
ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL", ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL",
ul_alloc.is_retx() ? "retx" : "tx", ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(), user->get_rnti(),
h->get_id(), h->get_id(),
pusch->dci.location.L, pusch->dci.location.L,
pusch->dci.location.ncce, pusch->dci.location.ncce,
ul_alloc.alloc.RB_start, ul_alloc.alloc.to_string().c_str(),
ul_alloc.alloc.RB_start + ul_alloc.alloc.L,
user->get_pending_ul_new_data(get_tti_tx_ul())); user->get_pending_ul_new_data(get_tti_tx_ul()));
continue; continue;
} }
// Print Resulting UL Allocation // Print Resulting UL Allocation
log_h->info("SCHED: %s %s rnti=0x%x, cc=%d, pid=%d, dci=(%d,%d), prb=(%d,%d), n_rtx=%d, tbs=%d, bsr=%d (%d-%d)\n", log_h->info("SCHED: %s %s rnti=0x%x, cc=%d, pid=%d, dci=(%d,%d), prb=%s, n_rtx=%d, tbs=%d, bsr=%d (%d-%d)\n",
ul_alloc.is_msg3() ? "Msg3" : "UL", ul_alloc.is_msg3() ? "Msg3" : "UL",
ul_alloc.is_retx() ? "retx" : "tx", ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(), user->get_rnti(),
@ -1164,8 +1150,7 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
h->get_id(), h->get_id(),
pusch->dci.location.L, pusch->dci.location.L,
pusch->dci.location.ncce, pusch->dci.location.ncce,
ul_alloc.alloc.RB_start, ul_alloc.alloc.to_string().c_str(),
ul_alloc.alloc.RB_start + ul_alloc.alloc.L,
h->nof_retx(0), h->nof_retx(0),
tbs, tbs,
user->get_pending_ul_new_data(get_tti_tx_ul()), user->get_pending_ul_new_data(get_tti_tx_ul()),
@ -1179,14 +1164,14 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant) alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant)
{ {
// Derive PRBs from allocated RAR grants // Derive PRBs from allocated RAR grants
ul_harq_proc::ul_alloc_t msg3_alloc = {}; prb_interval msg3_alloc = {};
srslte_ra_type2_from_riv( uint32_t L;
rargrant.grant.rba, &msg3_alloc.L, &msg3_alloc.RB_start, cc_cfg->nof_prb(), cc_cfg->nof_prb()); srslte_ra_type2_from_riv(rargrant.grant.rba, &L, &msg3_alloc.start, cc_cfg->nof_prb(), cc_cfg->nof_prb());
msg3_alloc.set_length(L);
alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::MSG3, rargrant.grant.trunc_mcs); alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::MSG3, rargrant.grant.trunc_mcs);
if (not ret) { if (not ret) {
log_h->warning( log_h->warning("SCHED: Could not allocate msg3 within %s\n", msg3_alloc.to_string().c_str());
"SCHED: Could not allocate msg3 within (%d,%d)\n", msg3_alloc.RB_start, msg3_alloc.RB_start + msg3_alloc.L);
} }
return ret; return ret;
} }
@ -1223,8 +1208,7 @@ uint32_t sf_sched::get_nof_ctrl_symbols() const
return tti_alloc.get_cfi() + ((cc_cfg->cfg.cell.nof_prb <= 10) ? 1 : 0); return tti_alloc.get_cfi() + ((cc_cfg->cfg.cell.nof_prb <= 10) ? 1 : 0);
} }
int sf_sched::generate_format1a(uint32_t rb_start, int sf_sched::generate_format1a(prb_interval prb_range,
uint32_t l_crb,
uint32_t tbs_bytes, uint32_t tbs_bytes,
uint32_t rv, uint32_t rv,
uint16_t rnti, uint16_t rnti,
@ -1262,7 +1246,7 @@ int sf_sched::generate_format1a(uint32_t rb_start,
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2; dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC; dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(l_crb, rb_start, cc_cfg->cfg.cell.nof_prb); dci->type2_alloc.riv = srslte_ra_type2_to_riv(prb_range.length(), prb_range.start, cc_cfg->cfg.cell.nof_prb);
dci->pid = 0; dci->pid = 0;
dci->tb[0].mcs_idx = mcs; dci->tb[0].mcs_idx = mcs;
dci->tb[0].rv = rv; dci->tb[0].rv = rv;

@ -228,7 +228,7 @@ void dl_harq_proc::reset_pending_data()
* UE::UL HARQ class * * UE::UL HARQ class *
******************************************************/ ******************************************************/
ul_harq_proc::ul_alloc_t ul_harq_proc::get_alloc() const prb_interval ul_harq_proc::get_alloc() const
{ {
return allocation; return allocation;
} }
@ -243,7 +243,7 @@ bool ul_harq_proc::is_adaptive_retx() const
return is_adaptive and has_pending_retx(); return is_adaptive and has_pending_retx();
} }
void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs, ul_harq_proc::ul_alloc_t alloc, uint32_t max_retx_) void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs, prb_interval alloc, uint32_t max_retx_)
{ {
max_retx = (uint32_t)max_retx_; max_retx = (uint32_t)max_retx_;
is_adaptive = false; is_adaptive = false;
@ -253,9 +253,9 @@ void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs, ul_harq_proc::ul_allo
pending_ack = NULL_ACK; pending_ack = NULL_ACK;
} }
void ul_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, ul_harq_proc::ul_alloc_t alloc) void ul_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, prb_interval alloc)
{ {
is_adaptive = alloc.L != allocation.L or alloc.RB_start != allocation.RB_start; is_adaptive = alloc != allocation;
allocation = alloc; allocation = alloc;
new_retx_common(tb_idx, tti_point{tti_}, mcs, tbs); new_retx_common(tb_idx, tti_point{tti_}, mcs, tbs);
} }

@ -131,10 +131,10 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
h = user->get_empty_dl_harq(tti_dl, cell_idx); h = user->get_empty_dl_harq(tti_dl, cell_idx);
if (h != nullptr) { if (h != nullptr) {
// Allocate resources based on pending data // Allocate resources based on pending data
rbg_range_t req_rbgs = user->get_required_dl_rbgs(cell_idx); rbg_interval req_rbgs = user->get_required_dl_rbgs(cell_idx);
if (req_rbgs.rbg_min > 0) { if (req_rbgs.start > 0) {
rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size()); rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size());
if (find_allocation(req_rbgs.rbg_min, req_rbgs.rbg_max, &newtx_mask)) { if (find_allocation(req_rbgs.start, req_rbgs.stop, &newtx_mask)) {
// some empty spaces were found // some empty spaces were found
code = tti_alloc->alloc_dl_user(user, newtx_mask, h->get_id()); code = tti_alloc->alloc_dl_user(user, newtx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) { if (code == alloc_outcome_t::SUCCESS) {
@ -203,35 +203,34 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_sf_sched_
* @param alloc Found allocation. It is guaranteed that 0 <= alloc->L <= L * @param alloc Found allocation. It is guaranteed that 0 <= alloc->L <= L
* @return true if the requested allocation of size L was strictly met * @return true if the requested allocation of size L was strictly met
*/ */
bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) bool ul_metric_rr::find_allocation(uint32_t L, prb_interval* alloc)
{ {
const prbmask_t* used_rb = &tti_alloc->get_ul_mask(); const prbmask_t* used_rb = &tti_alloc->get_ul_mask();
bzero(alloc, sizeof(ul_harq_proc::ul_alloc_t)); *alloc = {};
for (uint32_t n = 0; n < used_rb->size() && alloc->L < L; n++) { for (uint32_t n = 0; n < used_rb->size() && alloc->length() < L; n++) {
if (not used_rb->test(n) && alloc->L == 0) { if (not used_rb->test(n) && alloc->length() == 0) {
alloc->RB_start = n; alloc->shift_to(n);
} }
if (not used_rb->test(n)) { if (not used_rb->test(n)) {
alloc->L++; alloc->stop++;
} else if (alloc->L > 0) { } else if (alloc->length() > 0) {
// avoid edges // avoid edges
if (n < 3) { if (n < 3) {
alloc->RB_start = 0; *alloc = {};
alloc->L = 0;
} else { } else {
break; break;
} }
} }
} }
if (alloc->L == 0) { if (alloc->length() == 0) {
return false; return false;
} }
// Make sure L is allowed by SC-FDMA modulation // Make sure L is allowed by SC-FDMA modulation
while (!srslte_dft_precoding_valid_prb(alloc->L)) { while (!srslte_dft_precoding_valid_prb(alloc->length())) {
alloc->L--; alloc->stop--;
} }
return alloc->L == L; return alloc->length() == L;
} }
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user) ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
@ -251,7 +250,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
// if there are procedures and we have space // if there are procedures and we have space
if (h->has_pending_retx()) { if (h->has_pending_retx()) {
ul_harq_proc::ul_alloc_t alloc = h->get_alloc(); prb_interval alloc = h->get_alloc();
// If can schedule the same mask, do it // If can schedule the same mask, do it
ret = tti_alloc->alloc_ul_user(user, alloc); ret = tti_alloc->alloc_ul_user(user, alloc);
@ -263,7 +262,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user)
return nullptr; return nullptr;
} }
if (find_allocation(alloc.L, &alloc)) { if (find_allocation(alloc.length(), &alloc)) {
ret = tti_alloc->alloc_ul_user(user, alloc); ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) { if (ret == alloc_outcome_t::SUCCESS) {
return h; return h;
@ -293,11 +292,11 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
// find an empty PID // find an empty PID
if (h->is_empty(0) and pending_data > 0) { if (h->is_empty(0) and pending_data > 0) {
uint32_t pending_rb = user->get_required_prb_ul(cell_idx, pending_data); uint32_t pending_rb = user->get_required_prb_ul(cell_idx, pending_data);
ul_harq_proc::ul_alloc_t alloc{}; prb_interval alloc{};
find_allocation(pending_rb, &alloc); find_allocation(pending_rb, &alloc);
if (alloc.L > 0) { // at least one PRB was scheduled if (alloc.length() > 0) { // at least one PRB was scheduled
alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc); alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) { if (ret == alloc_outcome_t::SUCCESS) {
return h; return h;

@ -524,8 +524,8 @@ std::pair<int, int> sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_i
uint32_t cfi, uint32_t cfi,
const srslte_dci_dl_t& dci) const srslte_dci_dl_t& dci)
{ {
int mcs = 0, tbs_bytes = 0; int mcs = 0, tbs_bytes = 0;
std::pair<uint32_t, uint32_t> req_bytes = get_requested_dl_bytes(ue_cc_idx); srslte::interval<uint32_t> req_bytes = get_requested_dl_bytes(ue_cc_idx);
// Calculate exact number of RE for this PRB allocation // Calculate exact number of RE for this PRB allocation
srslte_pdsch_grant_t grant = {}; srslte_pdsch_grant_t grant = {};
@ -539,7 +539,7 @@ std::pair<int, int> sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_i
// Use a higher MCS for the Msg4 to fit in the 6 PRB case // Use a higher MCS for the Msg4 to fit in the 6 PRB case
if (carriers[ue_cc_idx].fixed_mcs_dl < 0 or not carriers[ue_cc_idx].dl_cqi_rx) { if (carriers[ue_cc_idx].fixed_mcs_dl < 0 or not carriers[ue_cc_idx].dl_cqi_rx) {
// Dynamic MCS // Dynamic MCS
tbs_bytes = carriers[ue_cc_idx].alloc_tbs_dl(nof_alloc_prbs, nof_re, req_bytes.second, &mcs); tbs_bytes = carriers[ue_cc_idx].alloc_tbs_dl(nof_alloc_prbs, nof_re, req_bytes.stop, &mcs);
} else { } else {
// Fixed MCS // Fixed MCS
mcs = carriers[ue_cc_idx].fixed_mcs_dl; mcs = carriers[ue_cc_idx].fixed_mcs_dl;
@ -549,7 +549,7 @@ std::pair<int, int> sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_i
// If the number of prbs is not sufficient to fit minimum required bytes, increase the mcs // If the number of prbs is not sufficient to fit minimum required bytes, increase the mcs
// NOTE: this may happen during ConRes CE tx when DL-CQI is still not available // NOTE: this may happen during ConRes CE tx when DL-CQI is still not available
while (tbs_bytes > 0 and (uint32_t) tbs_bytes < req_bytes.first and mcs < 28) { while (tbs_bytes > 0 and (uint32_t) tbs_bytes < req_bytes.start and mcs < 28) {
mcs++; mcs++;
tbs_bytes = sched_utils::get_tbs_bytes((uint32_t)mcs, nof_alloc_prbs, cfg.use_tbs_index_alt, false); tbs_bytes = sched_utils::get_tbs_bytes((uint32_t)mcs, nof_alloc_prbs, cfg.use_tbs_index_alt, false);
} }
@ -666,7 +666,7 @@ int sched_ue::generate_format2(uint32_t pid,
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti, uint32_t tti,
uint32_t cc_idx, uint32_t cc_idx,
ul_harq_proc::ul_alloc_t alloc, prb_interval alloc,
bool needs_pdcch, bool needs_pdcch,
srslte_dci_location_t dci_pos, srslte_dci_location_t dci_pos,
int explicit_mcs, int explicit_mcs,
@ -692,18 +692,18 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
nof_retx = (data->needs_pdcch) ? get_max_retx() : max_msg3retx; nof_retx = (data->needs_pdcch) ? get_max_retx() : max_msg3retx;
if (mcs >= 0) { if (mcs >= 0) {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.L) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8;
} else { } else {
// dynamic mcs // dynamic mcs
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti); uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0; uint32_t N_srs = 0;
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE; uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.length() * SRSLTE_NRE;
tbs = carriers[cc_idx].alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs); tbs = carriers[cc_idx].alloc_tbs_ul(alloc.length(), nof_re, req_bytes, &mcs);
if (carries_uci) { if (carries_uci) {
// Reduce MCS to fit UCI // Reduce MCS to fit UCI
mcs -= std::min(main_cc_params->sched_cfg->uci_mcs_dec, mcs); mcs -= std::min(main_cc_params->sched_cfg->uci_mcs_dec, mcs);
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.L) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8;
} }
} }
h->new_tx(tti, mcs, tbs, alloc, nof_retx); h->new_tx(tti, mcs, tbs, alloc, nof_retx);
@ -713,7 +713,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
} else { } else {
// retx // retx
h->new_retx(0, tti, &mcs, nullptr, alloc); h->new_retx(0, tti, &mcs, nullptr, alloc);
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.L) / 8; tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8;
} }
data->tbs = tbs; data->tbs = tbs;
@ -722,7 +722,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
dci->rnti = rnti; dci->rnti = rnti;
dci->format = SRSLTE_DCI_FORMAT0; dci->format = SRSLTE_DCI_FORMAT0;
dci->ue_cc_idx = cc_idx; dci->ue_cc_idx = cc_idx;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(alloc.L, alloc.RB_start, cell.nof_prb); dci->type2_alloc.riv = srslte_ra_type2_to_riv(alloc.length(), alloc.start, cell.nof_prb);
dci->tb.rv = sched_utils::get_rvidx(h->nof_retx(0)); dci->tb.rv = sched_utils::get_rvidx(h->nof_retx(0));
if (!is_newtx && h->is_adaptive_retx()) { if (!is_newtx && h->is_adaptive_retx()) {
dci->tb.mcs_idx = 28 + dci->tb.rv; dci->tb.mcs_idx = 28 + dci->tb.rv;
@ -793,25 +793,25 @@ uint32_t sched_ue::get_pending_dl_new_data_total()
* @param ue_cc_idx carrier of the UE * @param ue_cc_idx carrier of the UE
* @return range of number of RBGs that a UE can allocate in a given subframe * @return range of number of RBGs that a UE can allocate in a given subframe
*/ */
rbg_range_t sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx) rbg_interval sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx)
{ {
std::pair<uint32_t, uint32_t> req_bytes = get_requested_dl_bytes(ue_cc_idx); srslte::interval<uint32_t> req_bytes = get_requested_dl_bytes(ue_cc_idx);
if (req_bytes.first == 0 and req_bytes.second == 0) { if (req_bytes == srslte::interval<uint32_t>{0, 0}) {
return {0, 0}; return {0, 0};
} }
const auto* cellparams = carriers[ue_cc_idx].get_cell_cfg(); const auto* cellparams = carriers[ue_cc_idx].get_cell_cfg();
int pending_prbs = int pending_prbs =
carriers[ue_cc_idx].get_required_prb_dl(req_bytes.first, cellparams->sched_cfg->max_nof_ctrl_symbols); carriers[ue_cc_idx].get_required_prb_dl(req_bytes.start, cellparams->sched_cfg->max_nof_ctrl_symbols);
if (pending_prbs < 0) { if (pending_prbs < 0) {
// Cannot fit allocation in given PRBs // Cannot fit allocation in given PRBs
log_h->error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. " log_h->error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. "
"Consider increasing initial CQI value.\n", "Consider increasing initial CQI value.\n",
carriers[ue_cc_idx].dl_cqi, carriers[ue_cc_idx].dl_cqi,
req_bytes.first); req_bytes.start);
return {cellparams->nof_prb(), cellparams->nof_prb()}; return {cellparams->nof_prb(), cellparams->nof_prb()};
} }
uint32_t min_pending_rbg = cellparams->prb_to_rbg(pending_prbs); uint32_t min_pending_rbg = cellparams->prb_to_rbg(pending_prbs);
pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.second, cellparams->sched_cfg->max_nof_ctrl_symbols); pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.stop, cellparams->sched_cfg->max_nof_ctrl_symbols);
pending_prbs = (pending_prbs < 0) ? cellparams->nof_prb() : pending_prbs; pending_prbs = (pending_prbs < 0) ? cellparams->nof_prb() : pending_prbs;
uint32_t max_pending_rbg = cellparams->prb_to_rbg(pending_prbs); uint32_t max_pending_rbg = cellparams->prb_to_rbg(pending_prbs);
return {min_pending_rbg, max_pending_rbg}; return {min_pending_rbg, max_pending_rbg};
@ -832,7 +832,7 @@ rbg_range_t sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx)
* @ue_cc_idx carrier where allocation is being made * @ue_cc_idx carrier where allocation is being made
* @return * @return
*/ */
std::pair<uint32_t, uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_idx) srslte::interval<uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_idx)
{ {
const uint32_t min_alloc_bytes = 5; // 2 for subheader, and 3 for RLC header const uint32_t min_alloc_bytes = 5; // 2 for subheader, and 3 for RLC header
// Convenience function to compute the number of bytes allocated for a given SDU // Convenience function to compute the number of bytes allocated for a given SDU
@ -849,10 +849,10 @@ std::pair<uint32_t, uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_id
// SRB0 is a special case due to being RLC TM (no segmentation possible) // SRB0 is a special case due to being RLC TM (no segmentation possible)
if (not lch_handler.is_bearer_dl(0)) { if (not lch_handler.is_bearer_dl(0)) {
log_h->error("SRB0 must always be activated for DL\n"); log_h->error("SRB0 must always be activated for DL\n");
return {0, 0}; return {};
} }
if (not carriers[ue_cc_idx].is_active()) { if (not carriers[ue_cc_idx].is_active()) {
return {0, 0}; return {};
} }
uint32_t max_data = 0, min_data = 0; uint32_t max_data = 0, min_data = 0;
@ -866,7 +866,7 @@ std::pair<uint32_t, uint32_t> sched_ue::get_requested_dl_bytes(uint32_t ue_cc_id
if (ue_cc_idx == 0) { if (ue_cc_idx == 0) {
if (srb0_data == 0 and not pending_ces.empty() and pending_ces.front() == srslte::dl_sch_lcid::CON_RES_ID) { if (srb0_data == 0 and not pending_ces.empty() and pending_ces.front() == srslte::dl_sch_lcid::CON_RES_ID) {
// Wait for SRB0 data to be available for Msg4 before scheduling the ConRes CE // Wait for SRB0 data to be available for Msg4 before scheduling the ConRes CE
return {0, 0}; return {};
} }
for (const ce_cmd& ce : pending_ces) { for (const ce_cmd& ce : pending_ces) {
sum_ce_data += srslte::ce_total_size(ce); sum_ce_data += srslte::ce_total_size(ce);

@ -64,20 +64,16 @@ int output_sched_tester::test_pusch_collisions(const tti_params_t&
ul_allocs.resize(nof_prb); ul_allocs.resize(nof_prb);
ul_allocs.reset(); ul_allocs.reset();
auto try_ul_fill = [&](srsenb::ul_harq_proc::ul_alloc_t alloc, const char* ch_str, bool strict = true) { auto try_ul_fill = [&](prb_interval alloc, const char* ch_str, bool strict = true) {
CONDERROR((alloc.RB_start + alloc.L) > nof_prb, CONDERROR(alloc.stop > nof_prb, "Allocated RBs %s out-of-bounds\n", alloc.to_string().c_str());
"Allocated RBs (%d,%d) out-of-bounds\n", CONDERROR(alloc.is_empty(), "Allocations must have at least one PRB\n");
alloc.RB_start, if (strict and ul_allocs.any(alloc.start, alloc.stop)) {
alloc.RB_start + alloc.L); TESTERROR("Collision Detected of %s alloc=%s and cumulative_mask=0x%s\n",
CONDERROR(alloc.L == 0, "Allocations must have at least one PRB\n");
if (strict and ul_allocs.any(alloc.RB_start, alloc.RB_start + alloc.L)) {
TESTERROR("Collision Detected of %s alloc=(%d,%d) and cumulative_mask=0x%s\n",
ch_str, ch_str,
alloc.RB_start, alloc.to_string().c_str(),
alloc.RB_start + alloc.L,
ul_allocs.to_hex().c_str()); ul_allocs.to_hex().c_str());
} }
ul_allocs.fill(alloc.RB_start, alloc.RB_start + alloc.L, true); ul_allocs.fill(alloc.start, alloc.stop, true);
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
}; };
@ -85,21 +81,22 @@ int output_sched_tester::test_pusch_collisions(const tti_params_t&
bool is_prach_tti_tx_ul = bool is_prach_tti_tx_ul =
srslte_prach_tti_opportunity_config_fdd(cell_params.cfg.prach_config, tti_params.tti_tx_ul, -1); srslte_prach_tti_opportunity_config_fdd(cell_params.cfg.prach_config, tti_params.tti_tx_ul, -1);
if (is_prach_tti_tx_ul) { if (is_prach_tti_tx_ul) {
try_ul_fill({cell_params.cfg.prach_freq_offset, 6}, "PRACH"); try_ul_fill({cell_params.cfg.prach_freq_offset, cell_params.cfg.prach_freq_offset + 6}, "PRACH");
} }
/* TEST: check collisions in PUCCH */ /* TEST: check collisions in PUCCH */
bool strict = nof_prb != 6 or (not is_prach_tti_tx_ul); // and not tti_data.ul_pending_msg3_present); bool strict = nof_prb != 6 or (not is_prach_tti_tx_ul); // and not tti_data.ul_pending_msg3_present);
try_ul_fill({0, (uint32_t)cell_params.cfg.nrb_pucch}, "PUCCH", strict); try_ul_fill({0, (uint32_t)cell_params.cfg.nrb_pucch}, "PUCCH", strict);
try_ul_fill( try_ul_fill({cell_params.cfg.cell.nof_prb - cell_params.cfg.nrb_pucch, (uint32_t)cell_params.cfg.cell.nof_prb},
{cell_params.cfg.cell.nof_prb - cell_params.cfg.nrb_pucch, (uint32_t)cell_params.cfg.nrb_pucch}, "PUCCH", strict); "PUCCH",
strict);
/* TEST: check collisions in the UL PUSCH */ /* TEST: check collisions in the UL PUSCH */
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
uint32_t L, RBstart; uint32_t L, RBstart;
srslte_ra_type2_from_riv(ul_result.pusch[i].dci.type2_alloc.riv, &L, &RBstart, nof_prb, nof_prb); srslte_ra_type2_from_riv(ul_result.pusch[i].dci.type2_alloc.riv, &L, &RBstart, nof_prb, nof_prb);
strict = ul_result.pusch[i].needs_pdcch or nof_prb != 6; // Msg3 may collide with PUCCH at PRB==6 strict = ul_result.pusch[i].needs_pdcch or nof_prb != 6; // Msg3 may collide with PUCCH at PRB==6
try_ul_fill({RBstart, L}, "PUSCH", strict); try_ul_fill({RBstart, RBstart + L}, "PUSCH", strict);
// ue_stats[ul_result.pusch[i].dci.rnti].nof_ul_rbs += L; // ue_stats[ul_result.pusch[i].dci.rnti].nof_ul_rbs += L;
} }

Loading…
Cancel
Save