rework scheduler

master
Francisco Paisana 6 years ago committed by Andre Puschmann
parent 2aa36dd11c
commit 7be183c223

@ -43,7 +43,7 @@ class bounded_bitset
public:
constexpr bounded_bitset() : buffer(), cur_size(0) {}
constexpr bounded_bitset(size_t cur_size_) : buffer(), cur_size(cur_size_) {}
constexpr explicit bounded_bitset(size_t cur_size_) : buffer(), cur_size(cur_size_) {}
constexpr size_t max_size() const noexcept { return N; }

@ -29,8 +29,11 @@ namespace srsenb {
class sched_interface
{
public:
public:
const static uint32_t max_cce = 128;
const static uint32_t max_prb = 100;
const static uint32_t max_rbg = 25;
const static int MAX_SIB_PAYLOAD_LEN = 2048;
const static int MAX_SIBS = 16;
const static int MAX_LC = 6;

@ -1297,7 +1297,7 @@ void rlc_am::rlc_am_rx::handle_data_pdu(uint8_t *payload, uint32_t nof_bytes, rl
}
memcpy(pdu.buf->msg, payload, nof_bytes);
pdu.buf->N_bytes = nof_bytes;
pdu.header = header;
pdu.header = header;
rx_window[header.sn] = pdu;
@ -1382,7 +1382,7 @@ void rlc_am::rlc_am_rx::handle_data_pdu_segment(uint8_t *payload, uint32_t nof_b
memcpy(segment.buf->msg, payload, nof_bytes);
segment.buf->N_bytes = nof_bytes;
segment.header = header;
segment.header = header;
// Check if we already have a segment from the same PDU
it = rx_segments.find(header.sn);

@ -146,7 +146,8 @@ void test4()
assert(b2.msg[i] == b1.msg[i]);
}
int main(int argc, char **argv) {
int main(int argc, char** argv)
{
test1();
test2();
test3();

@ -24,7 +24,7 @@
#include "scheduler_harq.h"
#include "scheduler_ue.h"
#include "srslte/common/bounded_bitset.h"
#include "srsenb/hdr/mac/scheduler_grid.h"
#include "srslte/common/log.h"
#include "srslte/interfaces/enb_interfaces.h"
#include "srslte/interfaces/sched_interface.h"
@ -42,39 +42,53 @@ namespace srsenb {
*/
class sched : public sched_interface
{
public:
{
public:
// handle for DL metric
class dl_tti_sched_t
{
public:
virtual alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) = 0;
virtual const rbgmask_t& get_dl_mask() const = 0;
virtual uint32_t get_tti_tx_dl() const = 0;
virtual uint32_t get_nof_ctrl_symbols() const = 0;
virtual bool is_dl_alloc(sched_ue* user) const = 0;
};
// handle for UL metric
class ul_tti_sched_t
{
public:
virtual alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) = 0;
virtual const prbmask_t& get_ul_mask() const = 0;
virtual uint32_t get_tti_tx_ul() const = 0;
virtual bool is_ul_alloc(sched_ue* user) const = 0;
};
/*************************************************************
*
* Scheduling metric interface definition
*
************************************************************/
class metric_dl
{
public:
/* Virtual methods for user metric calculation */
virtual void
sched_users(std::map<uint16_t, sched_ue>& ue_db, rbgmask_t* dl_mask, uint32_t nof_ctrl_symbols, uint32_t tti) = 0;
virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, dl_tti_sched_t* tti_sched) = 0;
};
class metric_ul
{
public:
/* Virtual methods for user metric calculation */
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_mask_t* ul_mask, uint32_t tti) = 0;
virtual bool update_allocation(ul_harq_proc::ul_alloc_t alloc) = 0;
virtual void set_log(srslte::log* log_) = 0;
virtual void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_tti_sched_t* tti_sched) = 0;
};
/*************************************************************
*
* FAPI-like Interface
@ -117,25 +131,24 @@ public:
int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true);
int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len);
int ul_phr(uint16_t rnti, int phr);
int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code);
int dl_sched(uint32_t tti, dl_sched_res_t *sched_result);
int ul_sched(uint32_t tti, ul_sched_res_t *sched_result);
int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code);
int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) final;
int ul_sched(uint32_t tti, ul_sched_res_t* sched_result) final;
/* Custom TPC functions
*/
void tpc_inc(uint16_t rnti);
void tpc_dec(uint16_t rnti);
// Static Methods
static uint32_t get_rvidx(uint32_t retx_idx) {
const static int rv_idx[4] = {0, 2, 3, 1};
return rv_idx[retx_idx%4];
}
static void generate_cce_location(
srslte_regs_t* regs, sched_ue::sched_dci_cce_t* location, uint32_t cfi, uint32_t sf_idx = 0, uint16_t rnti = 0);
static uint32_t aggr_level(uint32_t aggr_idx) { return 1u << aggr_idx; }
protected:
metric_dl *dl_metric;
@ -149,30 +162,8 @@ protected:
cell_cfg_t cfg;
sched_args_t sched_cfg;
const static int MAX_PRB = 100;
const static int MAX_RBG = 25;
const static int MAX_CCE = 128;
// This is for computing DCI locations
srslte_regs_t regs;
class sched_vars
{
public:
struct tti_vars_t {
srslte::bounded_bitset<MAX_CCE, true> used_cce;
uint32_t tti_rx = 0;
tti_vars_t() : used_cce(MAX_CCE) {}
};
void init(sched* parent_);
tti_vars_t& new_tti(uint32_t tti_rx);
tti_vars_t& tti_vars(uint32_t tti_rx);
private:
static const uint32_t tti_array_size = 16;
sched* parent = NULL;
tti_vars_t tti_vars_[tti_array_size];
};
sched_vars sched_vars;
typedef struct {
int buf_rar;
@ -187,30 +178,124 @@ protected:
uint32_t n_tx;
} sched_sib_t;
int dl_sched_bc(dl_sched_bc_t bc[MAX_BC_LIST]);
int dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]);
int dl_sched_data(dl_sched_data_t data[MAX_DATA_LIST]);
class tti_sched_t : public dl_tti_sched_t, public ul_tti_sched_t
{
public:
struct ctrl_alloc_t {
size_t dci_idx;
rbg_range_t rbg_range;
uint16_t rnti;
uint32_t req_bytes;
alloc_type_t alloc_type;
};
struct rar_alloc_t : public ctrl_alloc_t {
dl_sched_rar_t rar_grant;
rar_alloc_t() = default;
explicit rar_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
uint32_t sib_idx = 0;
bc_alloc_t() = default;
explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
struct dl_alloc_t {
size_t dci_idx;
sched_ue* user_ptr;
rbgmask_t user_mask;
uint32_t pid;
};
struct ul_alloc_t {
enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 };
size_t dci_idx;
type_t type;
sched_ue* user_ptr;
ul_harq_proc::ul_alloc_t alloc;
uint32_t mcs = 0;
bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; }
bool is_msg3() const { return type == MSG3; }
bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; }
};
typedef std::pair<alloc_outcome_t, const rar_alloc_t*> rar_code_t;
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
// TTI scheduler result
pdcch_mask_t pdcch_mask;
sched_interface::dl_sched_res_t dl_sched_result;
sched_interface::ul_sched_res_t ul_sched_result;
void init(sched* parent_);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx);
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
rar_code_t alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant, uint32_t rar_tti, uint32_t buf_rar);
void generate_dcis();
// dl_tti_sched itf
alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final;
uint32_t get_tti_tx_dl() const final { return tti_alloc.get_tti_tx_dl(); }
uint32_t get_nof_ctrl_symbols() const final;
const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); }
// ul_tti_sched itf
alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final;
alloc_outcome_t alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs);
const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); }
uint32_t get_tti_tx_ul() const final { return tti_alloc.get_tti_tx_ul(); }
// getters
const pdcch_mask_t& get_pdcch_mask() const { return pdcch_mask; }
rbgmask_t& get_dl_mask() { return tti_alloc.get_dl_mask(); }
prbmask_t& get_ul_mask() { return tti_alloc.get_ul_mask(); }
const std::vector<ul_alloc_t>& get_ul_allocs() const { return ul_data_allocs; }
uint32_t get_cfi() const { return tti_alloc.get_cfi(); }
uint32_t get_tti_rx() const { return tti_alloc.get_tti_rx(); }
uint32_t get_sfn() const { return tti_alloc.get_sfn(); }
uint32_t get_sf_idx() const { return tti_alloc.get_sf_idx(); }
void ul_sched_msg3();
private:
bool is_dl_alloc(sched_ue* user) const final;
bool is_ul_alloc(sched_ue* user) const final;
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
alloc_outcome_t alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_t::ul_alloc_t::type_t alloc_type,
uint32_t msg3 = 0);
int generate_format1a(
uint32_t rb_start, uint32_t l_crb, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
// consts
sched* parent = NULL;
srslte::log* log_h = NULL;
uint32_t P;
cell_cfg_sib_t* sibs_cfg = NULL;
// internal state
tti_grid_t tti_alloc;
std::vector<rar_alloc_t> rar_allocs;
std::vector<bc_alloc_t> bc_allocs;
std::vector<dl_alloc_t> data_allocs;
std::vector<ul_alloc_t> ul_data_allocs;
};
int generate_format1a(
uint32_t rb_start, uint32_t l_crb, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
int find_empty_dci(sched_ue::sched_dci_cce_t* locations,
uint32_t aggr_level,
sched_vars::tti_vars_t* tti_vars,
sched_ue* user = NULL);
bool generate_dci(srslte_dci_location_t* sched_location,
sched_ue::sched_dci_cce_t* locations,
uint32_t aggr_level,
sched_vars::tti_vars_t* tti_vars,
sched_ue* user = NULL);
const static uint32_t nof_sched_ttis = 10;
tti_sched_t tti_scheds[nof_sched_ttis];
tti_sched_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; }
std::map<uint16_t, sched_ue> ue_db;
typedef std::map<uint16_t, sched_ue>::iterator ue_db_it_t;
tti_sched_t* new_tti(uint32_t tti_rx);
void generate_phich(tti_sched_t* tti_sched);
int generate_dl_sched(tti_sched_t* tti_sched);
int generate_ul_sched(tti_sched_t* tti_sched);
void dl_sched_bc(tti_sched_t* tti_sched);
void dl_sched_rar(tti_sched_t* tti_sched);
void dl_sched_data(tti_sched_t* tti_sched);
void ul_sched_msg3(tti_sched_t* tti_sched);
std::map<uint16_t, sched_ue> ue_db;
sched_sib_t pending_sibs[MAX_SIBS];
typedef struct {
bool enabled;
uint16_t rnti;
@ -224,36 +309,27 @@ protected:
pending_msg3_t pending_msg3[10];
// Allowed DCI locations for SIB and RAR per CFI
sched_ue::sched_dci_cce_t common_locations[3];
sched_ue::sched_dci_cce_t rar_locations[3][10];
sched_ue::sched_dci_cce_t common_locations[3];
sched_ue::sched_dci_cce_t rar_locations[3][10];
// derived from args
uint32_t P;
uint32_t si_n_rbg;
uint32_t rar_n_rbg;
uint32_t nof_rbg;
prbmask_t prach_mask;
prbmask_t pucch_mask;
uint32_t bc_aggr_level;
uint32_t rar_aggr_level;
uint32_t pdsch_re[10];
uint32_t avail_rbg;
uint32_t P;
uint32_t start_rbg;
uint32_t si_n_rbg;
uint32_t rar_n_rbg;
uint32_t nof_rbg;
uint32_t sf_idx;
uint32_t sfn;
uint32_t current_tti;
uint32_t current_cfi;
ul_mask_t ul_mask;
rbgmask_t dl_mask;
bool fail_dci_alloc = false;
bool configured;
};
}
#endif // SRSENB_SCHEDULER_H

@ -0,0 +1,152 @@
/*
* Copyright 2013-2019 Software Radio Systems Limited
*
* This file is part of srsLTE.
*
* srsLTE is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* srsLTE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* A copy of the GNU Affero General Public License can be found in
* the LICENSE file in the top-level directory of this distribution
* and at http://www.gnu.org/licenses/.
*
*/
#ifndef SRSLTE_SCHEDULER_GRID_H
#define SRSLTE_SCHEDULER_GRID_H
#include "lib/include/srslte/interfaces/sched_interface.h"
#include "srsenb/hdr/mac/scheduler_ue.h"
#include "srslte/common/bounded_bitset.h"
#include "srslte/common/log.h"
#include <vector>
namespace srsenb {
// Type of Allocation
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
// Result of alloc attempt
struct alloc_outcome_t {
enum result_enum { SUCCESS, DCI_COLLISION, RB_COLLISION, ERROR };
result_enum result = ERROR;
alloc_outcome_t() = default;
alloc_outcome_t(result_enum e) : result(e) {}
operator result_enum() { return result; }
operator bool() { return result == SUCCESS; }
const char* to_string() const;
};
class pdcch_grid_t
{
public:
struct alloc_t {
uint16_t rnti;
srslte_dci_location_t dci_pos = {0, 0};
pdcch_mask_t current_mask;
pdcch_mask_t total_mask;
};
typedef std::vector<const alloc_t*> alloc_result_t;
void init(srslte::log* log_,
srslte_regs_t* regs,
sched_ue::sched_dci_cce_t* common_locs,
sched_ue::sched_dci_cce_t (*rar_locs)[10]);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = NULL);
bool set_cfi(uint32_t cfi);
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const { return cce_size_array[current_cfix]; }
size_t nof_allocs() const { return nof_dci_allocs; }
size_t nof_alloc_combinations() const { return prev_end - prev_start; }
void print_result(bool verbose = false) const;
uint32_t get_sf_idx() const { return sf_idx; }
private:
const static uint32_t nof_cfis = 3;
typedef std::pair<int, alloc_t> tree_node_t;
void reset();
const sched_ue::sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const;
void update_alloc_tree(int node_idx,
uint32_t aggr_idx,
sched_ue* user,
alloc_type_t alloc_type,
const sched_ue::sched_dci_cce_t* dci_locs);
// consts
srslte::log* log_h = nullptr;
sched_ue::sched_dci_cce_t* common_locations = nullptr;
sched_ue::sched_dci_cce_t* rar_locations[10];
uint32_t cce_size_array[nof_cfis];
// tti vars
uint32_t tti_rx;
uint32_t sf_idx;
uint32_t current_cfix;
size_t prev_start, prev_end;
std::vector<tree_node_t> dci_alloc_tree;
size_t nof_dci_allocs;
};
class tti_grid_t
{
public:
typedef std::pair<alloc_outcome_t, rbg_range_t> ctrl_alloc_t;
void init(srslte::log* log_, sched_interface::cell_cfg_t* cell_, const pdcch_grid_t& pdcch_grid);
void new_tti(uint32_t tti_rx_, uint32_t start_cfi);
ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask);
alloc_outcome_t alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch);
// getters
uint32_t get_avail_rbgs() const { return avail_rbg; }
rbgmask_t& get_dl_mask() { return dl_mask; }
const rbgmask_t& get_dl_mask() const { return dl_mask; }
prbmask_t& get_ul_mask() { return ul_mask; }
const prbmask_t& get_ul_mask() const { return ul_mask; }
uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); }
const pdcch_grid_t& get_pdcch_grid() const { return pdcch_alloc; }
uint32_t get_tti_rx() const { return tti_rx; }
uint32_t get_tti_tx_dl() const { return tti_tx_dl; }
uint32_t get_tti_tx_ul() const { return tti_tx_ul; }
uint32_t get_sfn() const { return sfn; }
uint32_t get_sf_idx() const { return pdcch_alloc.get_sf_idx(); }
private:
alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = NULL);
// consts
srslte::log* log_h = nullptr;
sched_interface::cell_cfg_t* cell_cfg = nullptr;
uint32_t nof_prbs;
uint32_t nof_rbgs;
uint32_t si_n_rbg, rar_n_rbg;
// tti const
uint32_t tti_rx = 10241;
// derived
uint32_t tti_tx_dl, tti_tx_ul;
uint32_t sfn;
pdcch_grid_t pdcch_alloc;
// internal state
uint32_t avail_rbg = 0;
rbgmask_t dl_mask;
prbmask_t ul_mask;
};
} // namespace srsenb
#endif // SRSLTE_SCHEDULER_GRID_H

@ -29,31 +29,55 @@
namespace srsenb {
// MASK used for CCE allocations
typedef srslte::bounded_bitset<sched_interface::max_cce, true> pdcch_mask_t;
// Range of RBGs
class prb_range_t;
struct rbg_range_t {
uint32_t rbg_start = 0, rbg_end = 0;
rbg_range_t() = default;
rbg_range_t(uint32_t s, uint32_t e) : rbg_start(s), rbg_end(e) {}
rbg_range_t(const prb_range_t& rbgs, uint32_t P);
uint32_t length() const { return rbg_end - rbg_start; }
};
// Range of PRBs
struct prb_range_t {
uint32_t prb_start = 0, prb_end = 0;
prb_range_t() = default;
prb_range_t(uint32_t s, uint32_t e) : prb_start(s), prb_end(e) {}
prb_range_t(const rbg_range_t& rbgs, uint32_t P);
uint32_t length() { return prb_end - prb_start; }
static prb_range_t riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1);
};
class harq_proc
{
public:
void config(uint32_t id, uint32_t max_retx, srslte::log* log_h);
void set_max_retx(uint32_t max_retx);
void reset(uint32_t tb_idx);
uint32_t get_id() const;
bool is_empty() const;
bool is_empty(uint32_t tb_idx) const;
bool get_ack(uint32_t tb_idx) const;
void set_ack(uint32_t tb_idx, bool ack);
uint32_t nof_tx(uint32_t tb_idx) const;
uint32_t nof_retx(uint32_t tb_idx) const;
uint32_t get_tti() const;
bool get_ndi(uint32_t tb_idx) const;
uint32_t max_nof_retx() const;
protected:
void new_tx_common(uint32_t tb_idx, uint32_t tti, int mcs, int tbs);
void new_retx_common(uint32_t tb_idx, uint32_t tti, int* mcs, int* tbs);
bool has_pending_retx_common(uint32_t tb_idx) const;
void set_ack_common(uint32_t tb_idx, bool ack);
void reset_pending_data_common();
enum ack_t { NULL_ACK, NACK, ACK };
bool ack[SRSLTE_MAX_TB];
ack_t ack_state[SRSLTE_MAX_TB];
bool active[SRSLTE_MAX_TB];
bool ndi[SRSLTE_MAX_TB];
uint32_t id;
@ -63,11 +87,8 @@ protected:
int tti;
int last_mcs[SRSLTE_MAX_TB];
int last_tbs[SRSLTE_MAX_TB];
srslte::log* log_h;
private:
bool ack_received[SRSLTE_MAX_TB];
srslte::log* log_h;
};
typedef srslte::bounded_bitset<25, true> rbgmask_t;
@ -76,13 +97,15 @@ class dl_harq_proc : public harq_proc
{
public:
dl_harq_proc();
void new_tx(uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce);
void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs);
rbgmask_t get_rbgmask();
void set_rbgmask(rbgmask_t new_mask);
void new_tx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce_);
void new_retx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, uint32_t n_cce_);
void set_ack(uint32_t tb_idx, bool ack);
rbgmask_t get_rbgmask() const;
bool has_pending_retx(uint32_t tb_idx, uint32_t tti) const;
int get_tbs(uint32_t tb_idx) const;
uint32_t get_n_cce();
uint32_t get_n_cce() const;
void reset_pending_data();
private:
rbgmask_t rbgmask;
uint32_t n_cce;
@ -100,46 +123,31 @@ public:
RB_start = start;
L = len;
}
uint32_t RB_end() const { return RB_start + L; }
};
void new_tx(uint32_t tti, int mcs, int tbs);
void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs);
void new_tx(uint32_t tti, int mcs, int tbs, ul_alloc_t alloc, uint32_t max_retx_);
void new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, ul_alloc_t alloc);
void set_ack(uint32_t tb_idx, bool ack);
ul_alloc_t get_alloc();
void set_alloc(ul_alloc_t alloc);
void set_realloc(ul_alloc_t alloc);
bool has_pending_retx();
bool is_adaptive_retx();
bool is_rar_tx();
bool is_new_tx();
ul_alloc_t get_alloc() const;
bool has_pending_retx() const;
bool is_adaptive_retx() const;
void reset_pending_data();
bool has_pending_ack();
uint32_t get_pending_data();
void set_rar_mcs(uint32_t mcs);
bool get_rar_mcs(int* mcs);
bool has_pending_ack() const;
bool get_pending_ack() const;
uint32_t get_pending_data() const;
private:
ul_alloc_t allocation;
bool need_ack;
int pending_data;
uint32_t rar_mcs;
bool has_rar_mcs;
bool is_adaptive;
bool is_rar;
ack_t pending_ack;
};
class ul_mask_t : public srslte::bounded_bitset<100>
{
typedef srslte::bounded_bitset<100> base_type;
typedef srslte::bounded_bitset<100, true> prbmask_t;
public:
using srslte::bounded_bitset<100>::any;
using srslte::bounded_bitset<100>::fill;
bool any(ul_harq_proc::ul_alloc_t alloc) const noexcept;
void fill(ul_harq_proc::ul_alloc_t alloc) noexcept;
};
} // namespace srsenb
#endif // SRSENB_SCHEDULER_HARQ_H

@ -25,40 +25,36 @@
#include "scheduler.h"
namespace srsenb {
class dl_metric_rr : public sched::metric_dl
{
const static int MAX_RBG = 25;
public:
void sched_users(std::map<uint16_t, sched_ue>& ue_db, rbgmask_t* dl_mask, uint32_t nof_ctrl_symbols, uint32_t tti);
void set_log(srslte::log* log_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, sched::dl_tti_sched_t* tti_sched) final;
private:
bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask);
void update_allocation(rbgmask_t new_mask);
bool allocation_is_valid(rbgmask_t mask);
dl_harq_proc* allocate_user(sched_ue* user);
uint32_t current_tti;
// rbgmask_t used_rbg_mask;
rbgmask_t* used_rbg;
uint32_t nof_ctrl_symbols;
uint32_t available_rbg;
srslte::log* log_h = nullptr;
sched::dl_tti_sched_t* tti_alloc = nullptr;
};
class ul_metric_rr : public sched::metric_ul
{
public:
void sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_mask_t* start_mask, uint32_t tti);
void set_log(srslte::log* log_) final;
void sched_users(std::map<uint16_t, sched_ue>& ue_db, sched::ul_tti_sched_t* tti_sched) final;
private:
bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc);
bool allocation_is_valid(ul_harq_proc::ul_alloc_t alloc);
bool update_allocation(ul_harq_proc::ul_alloc_t alloc);
ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user);
ul_harq_proc* allocate_user_retx_prbs(sched_ue *user);
ul_mask_t* used_rb;
srslte::log* log_h = nullptr;
sched::ul_tti_sched_t* tti_alloc = nullptr;
uint32_t current_tti;
};

@ -86,12 +86,9 @@ public:
void set_max_mcs(int mcs_ul, int mcs_dl);
void set_fixed_mcs(int mcs_ul, int mcs_dl);
void set_dl_alloc(dl_harq_proc* alloc);
dl_harq_proc* get_dl_alloc();
void set_ul_alloc(ul_harq_proc* alloc);
ul_harq_proc* get_ul_alloc();
dl_harq_proc* find_dl_harq(uint32_t tti);
const dl_harq_proc* get_dl_harq(uint32_t idx) const;
dl_harq_proc* find_dl_harq(uint32_t tti);
dl_harq_proc* get_dl_harq(uint32_t idx);
uint16_t get_rnti() const { return rnti; }
/*******************************************************
* Functions used by scheduler metric objects
@ -108,7 +105,7 @@ public:
uint32_t get_pending_ul_old_data();
uint32_t get_pending_dl_new_data_total(uint32_t tti);
void reset_timeout_dl_harq(uint32_t tti);
void reset_pending_pids(uint32_t tti_rx);
dl_harq_proc *get_pending_dl_harq(uint32_t tti);
dl_harq_proc* get_empty_dl_harq();
ul_harq_proc* get_ul_harq(uint32_t tti);
@ -120,10 +117,18 @@ public:
void set_sr();
void unset_sr();
int generate_format1(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi);
int generate_format2a(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi);
int generate_format2(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi);
int generate_format0(ul_harq_proc *h, sched_interface::ul_sched_data_t *data, uint32_t tti, bool cqi_request);
int generate_format1(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask);
int generate_format2a(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask);
int generate_format2(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask);
int generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti,
ul_harq_proc::ul_alloc_t alloc,
bool needs_pdcch,
srslte_dci_location_t cce_range,
int explicit_mcs = -1);
srslte_dci_format_t get_dci_format();
uint32_t get_aggr_level(uint32_t nof_bits);
@ -164,7 +169,8 @@ private:
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false);
int generate_format2a_unlocked(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi);
int generate_format2a_unlocked(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask);
bool is_first_dl_tx();
@ -191,14 +197,15 @@ private:
uint32_t ul_cqi;
uint32_t ul_cqi_tti;
uint16_t rnti;
uint32_t max_mcs_dl;
uint32_t max_mcs_ul;
uint32_t max_mcs_dl;
uint32_t max_mcs_ul;
uint32_t max_msg3retx;
int fixed_mcs_ul;
int fixed_mcs_dl;
uint32_t P;
int next_tpc_pusch;
int next_tpc_pucch;
int next_tpc_pucch;
// Allowed DCI locations per CFI and per subframe
sched_dci_cce_t dci_locations[3][10];
@ -210,8 +217,6 @@ private:
bool phy_config_dedicated_enabled;
asn1::rrc::phys_cfg_ded_s::ant_info_c_ dl_ant_info;
dl_harq_proc* next_dl_harq_proc;
ul_harq_proc* next_ul_harq_proc;
};
}

@ -52,7 +52,7 @@ public:
srslte_softbuffer_tx_t* get_tx_softbuffer(uint32_t harq_process, uint32_t tb_idx);
srslte_softbuffer_rx_t* get_rx_softbuffer(uint32_t tti);
bool process_pdus();
uint8_t* request_buffer(uint32_t tti, uint32_t len);
void process_pdu(uint8_t* pdu, uint32_t nof_bytes, srslte::pdu_queue::channel_t channel);

File diff suppressed because it is too large Load Diff

@ -0,0 +1,369 @@
/*
* Copyright 2013-2019 Software Radio Systems Limited
*
* This file is part of srsLTE.
*
* srsLTE is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* srsLTE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* A copy of the GNU Affero General Public License can be found in
* the LICENSE file in the top-level directory of this distribution
* and at http://www.gnu.org/licenses/.
*
*/
#include "srsenb/hdr/mac/scheduler_grid.h"
#include "srsenb/hdr/mac/scheduler.h"
#include <srslte/interfaces/sched_interface.h>
namespace srsenb {
const char* alloc_outcome_t::to_string() const
{
switch (result) {
case SUCCESS:
return "success";
case DCI_COLLISION:
return "dci_collision";
case RB_COLLISION:
return "rb_collision";
case ERROR:
return "error";
}
return "unknown error";
}
/*******************************************************
* PDCCH Allocation Methods
*******************************************************/
void pdcch_grid_t::init(srslte::log* log_,
srslte_regs_t* regs,
sched_ue::sched_dci_cce_t* common_locs,
sched_ue::sched_dci_cce_t (*rar_locs)[10])
{
log_h = log_;
common_locations = common_locs;
for (uint32_t cfix = 0; cfix < 3; ++cfix) {
rar_locations[cfix] = rar_locs[cfix];
}
// precompute nof_cces
for (uint32_t cfix = 0; cfix < nof_cfis; ++cfix) {
int ret = srslte_regs_pdcch_ncce(regs, cfix + 1);
if (ret < 0) {
log_h->error("SCHED: Failed to calculate the number of CCEs in the PDCCH\n");
}
cce_size_array[cfix] = (uint32_t)ret;
}
reset();
}
void pdcch_grid_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
{
tti_rx = tti_rx_;
sf_idx = TTI_TX(tti_rx) % 10;
current_cfix = start_cfi - 1;
reset();
}
const sched_ue::sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &common_locations[current_cfix];
case alloc_type_t::DL_PCCH:
return &common_locations[current_cfix];
case alloc_type_t::DL_RAR:
return &rar_locations[current_cfix][sf_idx];
case alloc_type_t::DL_DATA:
return user->get_locations(current_cfix + 1, sf_idx);
case alloc_type_t::UL_DATA:
return user->get_locations(current_cfix + 1, sf_idx);
}
return NULL;
}
bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user)
{
// FIXME: Make the alloc tree update lazy
/* Get DCI Location Table */
const sched_ue::sched_dci_cce_t* dci_locs = get_cce_loc_table(alloc_type, user);
if (!dci_locs) {
return false;
}
/* Search for potential DCI positions */
if (prev_end > 0) {
for (size_t j = prev_start; j < prev_end; ++j) {
update_alloc_tree((int)j, aggr_idx, user, alloc_type, dci_locs);
}
} else {
update_alloc_tree(-1, aggr_idx, user, alloc_type, dci_locs);
}
// if no pdcch space was available
if (dci_alloc_tree.size() == prev_end) {
return false;
}
prev_start = prev_end;
prev_end = dci_alloc_tree.size();
nof_dci_allocs++;
return true;
}
void pdcch_grid_t::update_alloc_tree(int parent_node_idx,
uint32_t aggr_idx,
sched_ue* user,
alloc_type_t alloc_type,
const sched_ue::sched_dci_cce_t* dci_locs)
{
alloc_t alloc;
alloc.rnti = (user != nullptr) ? user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = aggr_idx;
// get cumulative pdcch mask
pdcch_mask_t cum_mask;
if (parent_node_idx >= 0) {
cum_mask = dci_alloc_tree[parent_node_idx].second.total_mask;
} else {
cum_mask.resize(nof_cces());
}
uint32_t nof_locs = dci_locs->nof_loc[aggr_idx];
for (uint32_t i = 0; i < nof_locs; ++i) {
uint32_t startpos = dci_locs->cce_start[aggr_idx][i];
if (alloc_type == alloc_type_t::DL_DATA and user->pucch_sr_collision(TTI_TX(tti_rx), startpos)) {
// will cause a collision in the PUCCH
continue;
}
pdcch_mask_t alloc_mask(nof_cces());
alloc_mask.fill(startpos, startpos + (1u << aggr_idx));
if ((cum_mask & alloc_mask).any()) {
// there is collision. Try another mask
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = cum_mask | alloc_mask;
alloc.dci_pos.ncce = startpos;
// Prune if repetition
uint32_t j = prev_end;
for (; j < dci_alloc_tree.size(); ++j) {
if (dci_alloc_tree[j].second.total_mask == alloc.total_mask) {
break;
}
}
if (j < dci_alloc_tree.size()) {
continue;
}
// Register allocation
dci_alloc_tree.emplace_back(parent_node_idx, alloc);
}
}
bool pdcch_grid_t::set_cfi(uint32_t cfi)
{
current_cfix = cfi - 1;
// FIXME: use this function for dynamic cfi
// FIXME: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences
return true;
}
void pdcch_grid_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
nof_dci_allocs = 0;
}
void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
// if alloc tree is empty
if (prev_start == prev_end) {
if (vec)
vec->clear();
if (tot_mask) {
tot_mask->reset();
}
return;
}
// set vector of allocations
if (vec) {
vec->clear();
size_t i = prev_start + idx;
while (dci_alloc_tree[i].first >= 0) {
vec->push_back(&dci_alloc_tree[i].second);
i = (size_t)dci_alloc_tree[i].first;
}
vec->push_back(&dci_alloc_tree[i].second);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask) {
*tot_mask = dci_alloc_tree[prev_start + idx].second.total_mask;
}
}
void pdcch_grid_t::print_result(bool verbose) const
{
if (prev_start == prev_end) {
log_h->info("SCHED: No DCI allocations\n");
}
std::stringstream ss;
ss << "SCHED: cfi=" << get_cfi() << ", " << prev_end - prev_start << " DCI allocation combinations:\n";
// get all the possible combinations of DCI allocations
uint32_t count = 0;
for (size_t i = prev_start; i < prev_end; ++i) {
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - prev_start);
ss << " combination " << count << ": mask=0x" << tot_mask.to_hex().c_str();
if (verbose) {
ss << ", DCI allocs:\n";
for (const auto& dci_alloc : vec) {
char hex[5];
sprintf(hex, "%x", dci_alloc->rnti);
ss << " > rnti=0x" << hex << ": " << dci_alloc->current_mask.to_hex().c_str() << " / "
<< dci_alloc->total_mask.to_hex().c_str() << "\n";
}
} else {
ss << "\n";
}
count++;
}
log_h->info("%s", ss.str().c_str());
}
/*******************************************************
* TTI resource Scheduling Methods
*******************************************************/
void tti_grid_t::init(srslte::log* log_, sched_interface::cell_cfg_t* cell_, const pdcch_grid_t& pdcch_grid)
{
log_h = log_;
cell_cfg = cell_;
nof_prbs = cell_cfg->cell.nof_prb;
uint32_t P = srslte_ra_type0_P(cell_cfg->cell.nof_prb);
nof_rbgs = srslte::ceil_div(cell_cfg->cell.nof_prb, P);
si_n_rbg = srslte::ceil_div(4, P);
rar_n_rbg = srslte::ceil_div(3, P);
pdcch_alloc = pdcch_grid;
}
void tti_grid_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
{
tti_rx = tti_rx_;
// derived
tti_tx_dl = TTI_TX(tti_rx);
tti_tx_ul = TTI_RX_ACK(tti_rx);
sfn = tti_tx_dl / 10;
// internal state
avail_rbg = nof_rbgs;
dl_mask.reset();
dl_mask.resize(nof_rbgs);
ul_mask.reset();
ul_mask.resize(nof_prbs);
pdcch_alloc.new_tti(tti_rx, start_cfi);
}
alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user)
{
// Check RBG collision
if ((dl_mask & alloc_mask).any()) {
return alloc_outcome_t::RB_COLLISION;
}
// Allocate DCI in PDCCH
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_lvl, user)) {
return alloc_outcome_t::DCI_COLLISION;
}
// Allocate RBGs
dl_mask |= alloc_mask;
avail_rbg -= alloc_mask.count();
return alloc_outcome_t::SUCCESS;
}
tti_grid_t::ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type)
{
rbg_range_t range;
range.rbg_start = nof_rbgs - avail_rbg;
range.rbg_end = range.rbg_start + ((alloc_type == alloc_type_t::DL_RAR) ? rar_n_rbg : si_n_rbg);
if (alloc_type != alloc_type_t::DL_RAR and alloc_type != alloc_type_t::DL_BC and
alloc_type != alloc_type_t::DL_PCCH) {
log_h->error("SCHED: DL control allocations must be RAR/BC/PDCCH\n");
return {alloc_outcome_t::ERROR, range};
}
// Setup range starting from left
if (range.rbg_end > nof_rbgs) {
return {alloc_outcome_t::RB_COLLISION, range};
}
// allocate DCI and RBGs
rbgmask_t new_mask(dl_mask.size());
new_mask.fill(range.rbg_start, range.rbg_end);
return {alloc_dl(aggr_lvl, alloc_type, new_mask), range};
}
alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
{
srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t aggr_level = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, NULL, NULL, dci_format));
return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user);
}
alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch)
{
if (alloc.RB_start + alloc.L > ul_mask.size()) {
return alloc_outcome_t::ERROR;
}
prbmask_t newmask(ul_mask.size());
newmask.fill(alloc.RB_start, alloc.RB_start + alloc.L);
if ((ul_mask & newmask).any()) {
return alloc_outcome_t::RB_COLLISION;
}
// Generate PDCCH except for RAR and non-adaptive retx
if (needs_pdcch) {
uint32_t aggr_idx = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, NULL, NULL, SRSLTE_DCI_FORMAT0));
if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) {
return alloc_outcome_t::DCI_COLLISION;
}
}
ul_mask |= newmask;
return alloc_outcome_t::SUCCESS;
}
} // namespace srsenb

@ -32,6 +32,24 @@
namespace srsenb {
rbg_range_t::rbg_range_t(const prb_range_t& rbgs, uint32_t P) :
rbg_range_t(srslte::ceil_div(rbgs.prb_start, P), srslte::ceil_div(rbgs.prb_end, P))
{
}
prb_range_t::prb_range_t(const rbg_range_t& rbgs, uint32_t P) : prb_range_t(rbgs.rbg_start * P, rbgs.rbg_end * P) {}
prb_range_t prb_range_t::riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs)
{
prb_range_t p;
if (nof_vrbs < 0) {
nof_vrbs = nof_prbs;
}
srslte_ra_type2_from_riv(riv, &p.prb_end, &p.prb_start, nof_prbs, (uint32_t)nof_vrbs);
p.prb_end += p.prb_start;
return p;
}
/******************************************************
*
* These classes manage the HARQ Processes.
@ -49,16 +67,10 @@ void harq_proc::config(uint32_t id_, uint32_t max_retx_, srslte::log* log_h_)
}
}
void harq_proc::set_max_retx(uint32_t max_retx_) {
log_h->debug("Set max_retx=%d pid=%d\n", max_retx_, id);
max_retx = max_retx_;
}
void harq_proc::reset(uint32_t tb_idx)
{
ack_state[tb_idx] = NULL_ACK;
active[tb_idx] = false;
ack[tb_idx] = true;
ack_received[tb_idx] = false;
n_rtx[tb_idx] = 0;
tti = 0;
last_mcs[tb_idx] = -1;
@ -83,12 +95,12 @@ bool harq_proc::is_empty() const
bool harq_proc::is_empty(uint32_t tb_idx) const
{
return !active[tb_idx] || (active[tb_idx] && ack[tb_idx] && ack_received[tb_idx]);
return !active[tb_idx];
}
bool harq_proc::has_pending_retx_common(uint32_t tb_idx) const
{
return !ack[tb_idx] && n_rtx[tb_idx] < max_retx;
return !is_empty(tb_idx) && ack_state[tb_idx] == NACK;
}
uint32_t harq_proc::get_tti() const
@ -96,19 +108,15 @@ uint32_t harq_proc::get_tti() const
return (uint32_t) tti;
}
bool harq_proc::get_ack(uint32_t tb_idx) const
void harq_proc::set_ack_common(uint32_t tb_idx, bool ack_)
{
return ack[tb_idx];
}
void harq_proc::set_ack(uint32_t tb_idx, bool ack_)
{
ack[tb_idx] = ack_;
ack_received[tb_idx] = true;
ack_state[tb_idx] = ack_ ? ACK : NACK;
log_h->debug("ACK=%d received pid=%d, tb_idx=%d, n_rtx=%d, max_retx=%d\n", ack_, id, tb_idx, n_rtx[tb_idx], max_retx);
if (!ack_ && (n_rtx[tb_idx] + 1 >= max_retx)) {
Warning("SCHED: discarting TB %d pid=%d, tti=%d, maximum number of retx exceeded (%d)\n", tb_idx, id, tti, max_retx);
active[tb_idx] = false;
} else if (ack_) {
active[tb_idx] = false;
}
}
@ -121,17 +129,13 @@ void harq_proc::new_tx_common(uint32_t tb_idx, uint32_t tti_, int mcs, int tbs)
last_mcs[tb_idx] = mcs;
last_tbs[tb_idx] = tbs;
if (max_retx) {
active[tb_idx] = true;
} else {
active[tb_idx] = false; // Can reuse this process if no retx are allowed
}
active[tb_idx] = true;
}
void harq_proc::new_retx_common(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs)
{
ack_received[tb_idx] = false;
tti = tti_;
ack_state[tb_idx] = NACK;
tti = tti_;
n_rtx[tb_idx]++;
if (mcs) {
*mcs = last_mcs[tb_idx];
@ -141,6 +145,16 @@ void harq_proc::new_retx_common(uint32_t tb_idx, uint32_t tti_, int* mcs, int* t
}
}
void harq_proc::reset_pending_data_common()
{
// reuse harqs with no retxs
if (max_retx == 0 and !is_empty()) {
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) {
active[tb] = false;
}
}
}
uint32_t harq_proc::nof_tx(uint32_t tb_idx) const
{
return tx_cnt[tb_idx];
@ -156,47 +170,55 @@ bool harq_proc::get_ndi(uint32_t tb_idx) const
return ndi[tb_idx];
}
uint32_t harq_proc::max_nof_retx() const
{
return max_retx;
}
/******************************************************
* UE::DL HARQ class *
* UE::DL HARQ class *
******************************************************/
dl_harq_proc::dl_harq_proc()
dl_harq_proc::dl_harq_proc() : harq_proc()
{
n_cce = 0;
rbgmask = 0;
n_cce = 0;
}
void dl_harq_proc::new_tx(uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce_)
void dl_harq_proc::new_tx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti, int mcs, int tbs, uint32_t n_cce_)
{
n_cce = n_cce_;
n_cce = n_cce_;
rbgmask = new_mask;
new_tx_common(tb_idx, tti, mcs, tbs);
}
void dl_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs)
void dl_harq_proc::new_retx(
const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, uint32_t n_cce_)
{
n_cce = n_cce_;
rbgmask = new_mask;
new_retx_common(tb_idx, tti_, mcs, tbs);
}
uint32_t dl_harq_proc::get_n_cce()
void dl_harq_proc::set_ack(uint32_t tb_idx, bool ack)
{
return n_cce;
set_ack_common(tb_idx, ack);
}
rbgmask_t dl_harq_proc::get_rbgmask()
uint32_t dl_harq_proc::get_n_cce() const
{
return rbgmask;
return n_cce;
}
void dl_harq_proc::set_rbgmask(rbgmask_t new_mask)
rbgmask_t dl_harq_proc::get_rbgmask() const
{
rbgmask = new_mask;
return rbgmask;
}
bool dl_harq_proc::has_pending_retx(uint32_t tb_idx, uint32_t current_tti) const
{
uint32_t tti_diff = srslte_tti_interval(current_tti, tti);
// NOTE: tti may be ahead of current_tti due to thread flip
return (tti_diff < (10240 / 2)) and (tti_diff >= SRSLTE_FDD_NOF_HARQ) and !is_empty(tb_idx);
return (tti_diff < (10240 / 2)) and (tti_diff >= SRSLTE_FDD_NOF_HARQ) and has_pending_retx_common(tb_idx);
}
int dl_harq_proc::get_tbs(uint32_t tb_idx) const
@ -204,131 +226,77 @@ int dl_harq_proc::get_tbs(uint32_t tb_idx) const
return last_tbs[tb_idx];
}
/******************************************************
* UE::UL RB MASK *
******************************************************/
bool ul_mask_t::any(ul_harq_proc::ul_alloc_t alloc) const noexcept
{
return base_type::any(alloc.RB_start, alloc.RB_start + alloc.L);
}
void ul_mask_t::fill(srsenb::ul_harq_proc::ul_alloc_t alloc) noexcept
void dl_harq_proc::reset_pending_data()
{
base_type::fill(alloc.RB_start, alloc.RB_start + alloc.L, true);
reset_pending_data_common();
}
/******************************************************
* UE::UL HARQ class *
******************************************************/
ul_harq_proc::ul_alloc_t ul_harq_proc::get_alloc()
ul_harq_proc::ul_alloc_t ul_harq_proc::get_alloc() const
{
return allocation;
}
void ul_harq_proc::set_alloc(ul_harq_proc::ul_alloc_t alloc)
{
if (not is_empty(0)) {
log_h->error("Trying to overwrite an on-going harq procedure\n");
return;
}
is_rar = false; // can be set to true through set_rar_mcs()
is_adaptive = false;
allocation = alloc;
}
void ul_harq_proc::set_realloc(ul_harq_proc::ul_alloc_t alloc)
{
if (is_empty(0)) {
log_h->error("Trying to reallocate an empty harq procedure\n");
return;
}
if (alloc.L != allocation.L or alloc.RB_start != allocation.RB_start) {
is_adaptive = true;
}
allocation = alloc;
}
bool ul_harq_proc::has_pending_retx()
bool ul_harq_proc::has_pending_retx() const
{
return active[0] and has_pending_retx_common(0) and need_ack;
return has_pending_retx_common(0);
}
bool ul_harq_proc::is_adaptive_retx()
bool ul_harq_proc::is_adaptive_retx() const
{
return is_adaptive and has_pending_retx();
}
bool ul_harq_proc::is_rar_tx()
{
return is_rar;
}
bool ul_harq_proc::is_new_tx()
void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs, ul_harq_proc::ul_alloc_t alloc, uint32_t max_retx_)
{
return active[0] and not has_pending_retx();
}
void ul_harq_proc::new_tx(uint32_t tti_, int mcs, int tbs)
{
need_ack = true;
max_retx = (uint32_t)max_retx_;
is_adaptive = false;
allocation = alloc;
new_tx_common(0, tti_, mcs, tbs);
pending_data = tbs;
pending_ack = NULL_ACK;
}
void ul_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs)
void ul_harq_proc::new_retx(uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, ul_harq_proc::ul_alloc_t alloc)
{
if (alloc.L != allocation.L or alloc.RB_start != allocation.RB_start) {
is_adaptive = true;
}
allocation = alloc;
new_retx_common(tb_idx, tti_, mcs, tbs);
}
bool ul_harq_proc::has_pending_ack()
void ul_harq_proc::set_ack(uint32_t tb_idx, bool ack_)
{
bool ret = need_ack;
// Reset if already received a positive ACK
if (active[0] && ack[0]) {
active[0] = false;
}
if (!active[0]) {
need_ack = false;
}
return ret;
pending_ack = ack_ ? ACK : NACK;
set_ack_common(tb_idx, ack_);
}
void ul_harq_proc::reset_pending_data()
bool ul_harq_proc::has_pending_ack() const
{
if (!active[0]) {
pending_data = 0;
}
return pending_ack != NULL_ACK;
}
uint32_t ul_harq_proc::get_pending_data()
bool ul_harq_proc::get_pending_ack() const
{
return (uint32_t) pending_data;
return pending_ack == ACK;
}
void ul_harq_proc::set_rar_mcs(uint32_t mcs)
void ul_harq_proc::reset_pending_data()
{
rar_mcs = mcs;
has_rar_mcs = true;
is_rar = true;
reset_pending_data_common();
pending_ack = NULL_ACK;
if (is_empty(0)) {
pending_data = 0;
}
}
bool ul_harq_proc::get_rar_mcs(int *mcs)
uint32_t ul_harq_proc::get_pending_data() const
{
if (has_rar_mcs) {
if (mcs) {
*mcs = (int) rar_mcs;
}
has_rar_mcs = false;
is_rar = false;
return true;
}
return false;
return (uint32_t) pending_data;
}
}

@ -36,40 +36,39 @@ namespace srsenb {
*
*****************************************************************/
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db,
rbgmask_t* dl_mask,
uint32_t nof_ctrl_symbols_,
uint32_t tti)
void dl_metric_rr::set_log(srslte::log* log_)
{
log_h = log_;
}
void dl_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, sched::dl_tti_sched_t* tti_sched)
{
typedef std::map<uint16_t, sched_ue>::iterator it_t;
current_tti = tti;
nof_ctrl_symbols = nof_ctrl_symbols_;
used_rbg = dl_mask;
available_rbg = (uint32_t)(used_rbg->size() - used_rbg->count()); // nof_rbg;
tti_alloc = tti_sched;
if (ue_db.empty())
return;
// give priority in a time-domain RR basis
uint32_t priority_idx = current_tti % (uint32_t)ue_db.size();
uint32_t priority_idx = tti_alloc->get_tti_tx_dl() % (uint32_t)ue_db.size();
it_t iter = ue_db.begin();
std::advance(iter,priority_idx);
std::advance(iter, priority_idx);
for(uint32_t ue_count = 0 ; ue_count < ue_db.size() ; ++iter, ++ue_count) {
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue *user = (sched_ue*) &iter->second;
user->set_dl_alloc(allocate_user(user));
sched_ue* user = &iter->second;
allocate_user(user);
}
}
bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask)
{
*rbgmask = ~(*used_rbg);
*rbgmask = ~(tti_alloc->get_dl_mask());
uint32_t i = 0;
for (; i < used_rbg->size() and nof_rbg > 0; ++i) {
for (; i < rbgmask->size() and nof_rbg > 0; ++i) {
if (rbgmask->test(i)) {
nof_rbg--;
}
@ -79,26 +78,16 @@ bool dl_metric_rr::find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask)
return nof_rbg == 0;
}
void dl_metric_rr::update_allocation(rbgmask_t new_rbgmask)
{
*used_rbg |= new_rbgmask;
}
/**
* Checks if a mask can fit in the current RBG grid
* @param mask
* @return Returns true if all the mask entries set to true are empty
*/
bool dl_metric_rr::allocation_is_valid(rbgmask_t mask)
{
return (mask & (*used_rbg)).none();
}
dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
{
if (tti_alloc->is_dl_alloc(user)) {
return nullptr;
}
// FIXME: First do reTxs for all users. Only then do the rest.
dl_harq_proc *h = user->get_pending_dl_harq(current_tti);
uint32_t req_bytes = user->get_pending_dl_new_data_total(current_tti);
alloc_outcome_t code;
uint32_t tti_dl = tti_alloc->get_tti_tx_dl();
dl_harq_proc* h = user->get_pending_dl_harq(tti_dl);
uint32_t req_bytes = user->get_pending_dl_new_data_total(tti_dl);
// Schedule retx if we have space
#if ASYNC_DL_SCHED
@ -106,23 +95,28 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
#else
if (h && !h->is_empty()) {
#endif
// Try to reuse the same mask
rbgmask_t retx_mask = h->get_rbgmask();
uint32_t nof_rbg = retx_mask.count();
if (nof_rbg <= available_rbg) {
// Try to reuse the same mask
if (allocation_is_valid(retx_mask)) {
update_allocation(retx_mask);
return h;
}
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
} else if (code == alloc_outcome_t::DCI_COLLISION) {
// No DCIs available for this user. Move to next
return NULL;
}
// If previous mask does not fit, find another with exact same number of rbgs
if (find_allocation(nof_rbg, &retx_mask)) {
update_allocation(retx_mask);
h->set_rbgmask(retx_mask);
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
if (find_allocation(nof_rbg, &retx_mask)) {
code = tti_alloc->alloc_dl_user(user, retx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
} else if (code == alloc_outcome_t::DCI_COLLISION) {
return NULL;
}
}
}
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID
#if ASYNC_DL_SCHED
h = user->get_empty_dl_harq();
@ -132,13 +126,14 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
#endif
// Allocate resources based on pending data
if (req_bytes) {
uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, nof_ctrl_symbols));
rbgmask_t newtx_mask(used_rbg->size());
uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, tti_alloc->get_nof_ctrl_symbols()));
rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size());
find_allocation(pending_rbg, &newtx_mask);
if (newtx_mask.any()) { // some empty spaces were found
update_allocation(newtx_mask);
h->set_rbgmask(newtx_mask);
return h;
code = tti_alloc->alloc_dl_user(user, newtx_mask, h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return h;
}
}
}
}
@ -152,12 +147,17 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user)
*
*****************************************************************/
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_mask_t* start_mask, uint32_t tti)
void ul_metric_rr::set_log(srslte::log* log_)
{
log_h = log_;
}
void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, sched::ul_tti_sched_t* tti_sched)
{
typedef std::map<uint16_t, sched_ue>::iterator it_t;
current_tti = tti;
used_rb = start_mask;
tti_alloc = tti_sched;
current_tti = tti_alloc->get_tti_tx_ul();
if(ue_db.size()==0)
return;
@ -172,10 +172,8 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_mask_t* s
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue *user = (sched_ue *) &iter->second;
if (user->get_ul_alloc() == NULL) { // can already be allocated for msg3
user->set_ul_alloc(allocate_user_retx_prbs(user));
}
sched_ue* user = &iter->second;
allocate_user_retx_prbs(user);
}
// give priority in a time-domain RR basis
@ -185,21 +183,11 @@ void ul_metric_rr::sched_users(std::map<uint16_t, sched_ue>& ue_db, ul_mask_t* s
if(iter==ue_db.end()) {
iter = ue_db.begin(); // wrap around
}
sched_ue *user = (sched_ue*) &iter->second;
if (user->get_ul_alloc() == NULL) {
user->set_ul_alloc(allocate_user_newtx_prbs(user));
}
sched_ue* user = &iter->second;
allocate_user_newtx_prbs(user);
}
}
bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc)
{
if (alloc.RB_start + alloc.L > used_rb->size()) {
return false;
}
return not used_rb->any(alloc);
}
/**
* Finds a range of L contiguous PRBs that are empty
* @param L Size of the requested UL allocation in PRBs
@ -208,6 +196,7 @@ bool ul_metric_rr::allocation_is_valid(ul_harq_proc::ul_alloc_t alloc)
*/
bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
{
const prbmask_t* used_rb = &tti_alloc->get_ul_mask();
bzero(alloc, sizeof(ul_harq_proc::ul_alloc_t));
for (uint32_t n = 0; n < used_rb->size() && alloc->L < L; n++) {
if (not used_rb->test(n) && alloc->L == 0) {
@ -218,8 +207,8 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
} else if (alloc->L > 0) {
// avoid edges
if (n < 3) {
alloc->RB_start = 0;
alloc->L = 0;
alloc->RB_start = 0;
alloc->L = 0;
} else {
break;
}
@ -236,38 +225,34 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc)
return alloc->L == L;
}
bool ul_metric_rr::update_allocation(ul_harq_proc::ul_alloc_t alloc)
{
bool ret = false;
if(allocation_is_valid(alloc)) {
used_rb->fill(alloc);
return true;
}
return ret;
}
ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user)
{
ul_harq_proc *h = user->get_ul_harq(current_tti);
if (tti_alloc->is_ul_alloc(user)) {
return NULL;
}
alloc_outcome_t ret;
ul_harq_proc* h = user->get_ul_harq(current_tti);
// if there are procedures and we have space
if (h->has_pending_retx()) {
ul_harq_proc::ul_alloc_t alloc = h->get_alloc();
// If can schedule the same mask, do it
if (update_allocation(alloc)) {
h->set_realloc(alloc);
ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
} else if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti());
return NULL;
}
// If not, try to find another mask in the current tti with the same number of PRBs
if (find_allocation(alloc.L, &alloc)) {
if(not update_allocation(alloc)) {
printf("ERROR: Scheduler failed to allocate user\n");
return NULL;
ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
} else if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti());
}
h->set_realloc(alloc);
return h;
}
}
return NULL;
@ -275,8 +260,11 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user)
ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
{
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc *h = user->get_ul_harq(current_tti);
if (tti_alloc->is_ul_alloc(user)) {
return NULL;
}
uint32_t pending_data = user->get_pending_ul_new_data(current_tti);
ul_harq_proc* h = user->get_ul_harq(current_tti);
// find an empty PID
if (h->is_empty(0) and pending_data) {
@ -285,12 +273,12 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user)
find_allocation(pending_rb, &alloc);
if (alloc.L > 0) { // at least one PRB was scheduled
if (not update_allocation(alloc)) {
printf("ERROR: Scheduler failed to allocate user\n");
return NULL;
alloc_outcome_t ret = tti_alloc->alloc_ul_user(user, alloc);
if (ret == alloc_outcome_t::SUCCESS) {
return h;
} else if (ret == alloc_outcome_t::DCI_COLLISION) {
log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti());
}
h->set_alloc(alloc);
return h;
}
}
return NULL;

@ -48,8 +48,6 @@ namespace srsenb {
*******************************************************/
sched_ue::sched_ue() :
next_dl_harq_proc(NULL),
next_ul_harq_proc(NULL),
has_pucch(false),
power_headroom(0),
rnti(0),
@ -94,6 +92,7 @@ void sched_ue::set_cfg(uint16_t rnti_,
max_mcs_dl = 28;
max_mcs_ul = 28;
max_msg3retx = cell_cfg->maxharq_msg3tx;
cfg = *cfg_;
@ -105,7 +104,6 @@ void sched_ue::set_cfg(uint16_t rnti_,
for (int i = 0; i < SCHED_MAX_HARQ_PROC; i++) {
dl_harq[i].config(i, cfg.maxharq_tx, log_h);
ul_harq[i].config(i, cfg.maxharq_tx, log_h);
dl_harq[i].set_rbgmask(rbgmask_t((uint32_t)ceil((float)cell.nof_prb / P)));
}
// Generate allowed CCE locations
@ -437,7 +435,9 @@ void sched_ue::tpc_dec() {
*******************************************************/
// Generates a Format1 dci
int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi)
// > return 0 if TBS<MIN_DATA_TBS
int sched_ue::generate_format1(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask)
{
pthread_mutex_lock(&mutex);
@ -447,7 +447,7 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t
int tbs = 0;
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE0;
dci->type0_alloc.rbg_bitmask = (uint32_t)h->get_rbgmask().to_uint64();
dci->type0_alloc.rbg_bitmask = (uint32_t)user_mask.to_uint64();
// If this is the first transmission for this UE, make room for MAC Contention Resolution ID
bool need_conres_ce = false;
@ -456,9 +456,11 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t
}
if (h->is_empty(0)) {
// Get total available data to transmit (includes MAC header)
uint32_t req_bytes = get_pending_dl_new_data_total_unlocked(tti);
uint32_t nof_prb = format1_count_prb((uint32_t)h->get_rbgmask().to_uint64(), cell.nof_prb);
uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb);
// Calculate exact number of RE for this PRB allocation
srslte_pdsch_grant_t grant = {};
srslte_dl_sf_cfg_t dl_sf = {};
@ -467,14 +469,15 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t
srslte_ra_dl_grant_to_grant_prb_allocation(dci, &grant, cell.nof_prb);
uint32_t nof_re = srslte_ra_dl_grant_nof_re(&cell, &dl_sf, &grant);
int mcs0 = fixed_mcs_dl;
if (need_conres_ce and cell.nof_prb < 10) { // SRB0 Tx. Use a higher MCS for the PRACH to fit in 6 PRBs
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(MCS_FIRST_DL, false), nof_prb) / 8;
mcs = MCS_FIRST_DL;
} else if (fixed_mcs_dl < 0) {
mcs0 = MCS_FIRST_DL;
}
if (mcs0 < 0) { // dynamic MCS
tbs = alloc_tbs_dl(nof_prb, nof_re, req_bytes, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl, false), nof_prb) / 8;
mcs = fixed_mcs_dl;
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs0, false), nof_prb) / 8;
mcs = mcs0;
}
if (tbs < MIN_DATA_TBS) {
@ -482,7 +485,7 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t
return 0;
}
h->new_tx(0, tti, mcs, tbs, data->dci.location.ncce);
h->new_tx(user_mask, 0, tti, mcs, tbs, data->dci.location.ncce);
// Allocate MAC ConRes CE
if (need_conres_ce) {
@ -503,7 +506,7 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t
Debug("SCHED: Alloc format1 new mcs=%d, tbs=%d, nof_prb=%d, req_bytes=%d\n", mcs, tbs, nof_prb, req_bytes);
} else {
h->new_retx(0, tti, &mcs, &tbs);
h->new_retx(user_mask, 0, tti, &mcs, &tbs, data->dci.location.ncce);
Debug("SCHED: Alloc format1 previous mcs=%d, tbs=%d\n", mcs, tbs);
}
@ -526,31 +529,27 @@ int sched_ue::generate_format1(dl_harq_proc* h, sched_interface::dl_sched_data_t
}
// Generates a Format2a dci
int sched_ue::generate_format2a(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
int sched_ue::generate_format2a(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask)
{
pthread_mutex_lock(&mutex);
int ret = generate_format2a_unlocked(h, data, tti, cfi);
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
pthread_mutex_unlock(&mutex);
return ret;
}
// Generates a Format2a dci
int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
sched_interface::dl_sched_data_t* data,
uint32_t tti,
uint32_t cfi)
int sched_ue::generate_format2a_unlocked(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask)
{
bool tb_en[SRSLTE_MAX_TB] = {false};
srslte_dci_dl_t* dci = &data->dci;
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE0;
dci->type0_alloc.rbg_bitmask = (uint32_t)h->get_rbgmask().to_uint64();
dci->type0_alloc.rbg_bitmask = (uint32_t)user_mask.to_uint64();
uint32_t nof_prb = format1_count_prb((uint32_t)h->get_rbgmask().to_uint64(), cell.nof_prb);
uint32_t nof_prb = format1_count_prb((uint32_t)user_mask.to_uint64(), cell.nof_prb); // FIXME: format1???
// Calculate exact number of RE for this PRB allocation
srslte_pdsch_grant_t grant = {};
@ -591,7 +590,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
int tbs = 0;
if (!h->is_empty(tb)) {
h->new_retx(tb, tti, &mcs, &tbs);
h->new_retx(user_mask, tb, tti, &mcs, &tbs, data->dci.location.ncce);
Debug("SCHED: Alloc format2/2a previous mcs=%d, tbs=%d\n", mcs, tbs);
} else if (tb_en[tb] && req_bytes && no_retx) {
if (fixed_mcs_dl < 0) {
@ -600,7 +599,7 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
tbs = srslte_ra_tbs_from_idx((uint32_t)srslte_ra_tbs_idx_from_mcs((uint32_t)fixed_mcs_dl, false), nof_prb) / 8;
mcs = fixed_mcs_dl;
}
h->new_tx(tb, tti, mcs, tbs, data->dci.location.ncce);
h->new_tx(user_mask, tb, tti, mcs, tbs, data->dci.location.ncce);
int rem_tbs = tbs;
int x = 0;
@ -643,16 +642,14 @@ int sched_ue::generate_format2a_unlocked(dl_harq_proc* h,
}
// Generates a Format2 dci
int sched_ue::generate_format2(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
int sched_ue::generate_format2(
dl_harq_proc* h, sched_interface::dl_sched_data_t* data, uint32_t tti, uint32_t cfi, const rbgmask_t& user_mask)
{
pthread_mutex_lock(&mutex);
/* Call Format 2a (common) */
int ret = generate_format2a_unlocked(h, data, tti, cfi);
int ret = generate_format2a_unlocked(h, data, tti, cfi, user_mask);
/* Compute precoding information */
data->dci.format = SRSLTE_DCI_FORMAT2;
@ -667,40 +664,49 @@ int sched_ue::generate_format2(dl_harq_proc *h,
return ret;
}
int sched_ue::generate_format0(ul_harq_proc* h, sched_interface::ul_sched_data_t* data, uint32_t tti, bool cqi_request)
int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t tti,
ul_harq_proc::ul_alloc_t alloc,
bool needs_pdcch,
srslte_dci_location_t dci_pos,
int explicit_mcs)
{
pthread_mutex_lock(&mutex);
ul_harq_proc* h = get_ul_harq(tti);
srslte_dci_ul_t* dci = &data->dci;
int mcs = 0;
bool cqi_request = needs_cqi_unlocked(tti, true);
// Set DCI position
data->needs_pdcch = needs_pdcch;
dci->location = dci_pos;
int mcs = (explicit_mcs >= 0) ? explicit_mcs : fixed_mcs_ul;
int tbs = 0;
ul_harq_proc::ul_alloc_t allocation = h->get_alloc();
bool is_newtx = true;
if (h->get_rar_mcs(&mcs)) {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), allocation.L) / 8;
h->new_tx(tti, mcs, tbs);
} else if (h->is_empty(0)) {
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0;
uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*allocation.L*SRSLTE_NRE;
if (fixed_mcs_ul < 0) {
tbs = alloc_tbs_ul(allocation.L, nof_re, req_bytes, &mcs);
bool is_newtx = h->is_empty(0);
if (is_newtx) {
uint32_t nof_retx;
// If Msg3 set different nof retx
nof_retx = (data->needs_pdcch) ? get_max_retx() : max_msg3retx;
if (mcs >= 0) {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), alloc.L) / 8;
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, true), allocation.L) / 8;
mcs = fixed_mcs_ul;
// dynamic mcs
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0;
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.L * SRSLTE_NRE;
tbs = alloc_tbs_ul(alloc.L, nof_re, req_bytes, &mcs);
}
h->new_tx(tti, mcs, tbs);
h->new_tx(tti, mcs, tbs, alloc, nof_retx);
} else {
h->new_retx(0, tti, &mcs, NULL);
is_newtx = false;
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), allocation.L) / 8;
// retx
h->new_retx(0, tti, &mcs, NULL, alloc);
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, true), alloc.L) / 8;
}
data->tbs = tbs;
@ -708,7 +714,7 @@ int sched_ue::generate_format0(ul_harq_proc* h, sched_interface::ul_sched_data_t
if (tbs > 0) {
dci->rnti = rnti;
dci->format = SRSLTE_DCI_FORMAT0;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(allocation.L, allocation.RB_start, cell.nof_prb);
dci->type2_alloc.riv = srslte_ra_type2_to_riv(alloc.L, alloc.RB_start, cell.nof_prb);
dci->tb.rv = sched::get_rvidx(h->nof_retx(0));
if (!is_newtx && h->is_adaptive_retx()) {
dci->tb.mcs_idx = 28 + dci->tb.rv;
@ -966,12 +972,20 @@ bool sched_ue::is_sr_triggered()
return sr;
}
void sched_ue::reset_timeout_dl_harq(uint32_t tti) {
void sched_ue::reset_pending_pids(uint32_t tti_rx)
{
uint32_t tti_tx_dl = TTI_TX(tti_rx), tti_tx_ul = TTI_RX_ACK(tti_rx);
// UL harqs
get_ul_harq(tti_tx_ul)->reset_pending_data();
// DL harqs
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
if (!(dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1))) {
uint32_t tti_diff = srslte_tti_interval(tti, dl_harq[i].get_tti());
dl_harq[i].reset_pending_data();
if (not dl_harq[i].is_empty()) {
uint32_t tti_diff = srslte_tti_interval(tti_tx_dl, dl_harq[i].get_tti());
if (tti_diff > 50 and tti_diff < 10240 / 2) {
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti);
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti_tx_dl);
dl_harq[i].reset(0);
dl_harq[i].reset(1);
}
@ -1030,26 +1044,6 @@ ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti)
return &ul_harq[tti % SCHED_MAX_HARQ_PROC];
}
void sched_ue::set_dl_alloc(dl_harq_proc* alloc)
{
next_dl_harq_proc = alloc;
}
dl_harq_proc* sched_ue::get_dl_alloc()
{
return next_dl_harq_proc;
}
void sched_ue::set_ul_alloc(ul_harq_proc* alloc)
{
next_ul_harq_proc = alloc;
}
ul_harq_proc* sched_ue::get_ul_alloc()
{
return next_ul_harq_proc;
}
dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti)
{
for (uint32_t i = 0; i < SCHED_MAX_HARQ_PROC; ++i) {
@ -1060,7 +1054,7 @@ dl_harq_proc* sched_ue::find_dl_harq(uint32_t tti)
return NULL;
}
const dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx) const
dl_harq_proc* sched_ue::get_dl_harq(uint32_t idx)
{
return &dl_harq[idx];
}

@ -10,7 +10,7 @@
#
# srsLTE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# A copy of the GNU Affero General Public License can be found in

@ -21,6 +21,7 @@
#include <algorithm>
#include <cstdlib>
#include <random>
#include <set>
#include <srsenb/hdr/mac/scheduler.h>
#include <srsenb/hdr/mac/scheduler_ue.h>
@ -37,8 +38,14 @@
#include "srslte/phy/utils/debug.h"
#include "srslte/radio/radio.h"
// Create classes
long int seed = time(NULL);
// uint32_t const seed = std::random_device()();
uint32_t const seed = 2452071795; // time(NULL);
std::default_random_engine rand_gen(seed);
std::uniform_real_distribution<float> unif_dist(0, 1.0);
float randf()
{
return unif_dist(rand_gen);
}
uint32_t err_counter = 0;
uint32_t warn_counter = 0;
struct ue_stats_t {
@ -47,6 +54,18 @@ struct ue_stats_t {
};
std::map<uint16_t, ue_stats_t> ue_stats;
template <class MapContainer, class Predicate>
void erase_if(MapContainer& c, Predicate should_remove)
{
for (auto it = c.begin(); it != c.end();) {
if (should_remove(*it)) {
it = c.erase(it);
} else {
++it;
}
}
}
/*******************
* Logging *
*******************/
@ -77,7 +96,7 @@ void log_on_exit()
for (auto& e : ue_stats) {
log_out.info("0x%x: {DL RBs: %lu, UL RBs: %lu}\n", e.first, e.second.nof_dl_rbs, e.second.nof_ul_rbs);
}
log_out.info("[TESTER] This was the seed: %ld\n", seed);
log_out.info("[TESTER] This was the seed: %u\n", seed);
}
#define Warning(fmt, ...) \
@ -97,10 +116,6 @@ void log_on_exit()
/*******************
* Dummies *
*******************/
float randf()
{
return (float)((double)rand() / (RAND_MAX));
}
struct sched_sim_args {
struct tti_event_t {
@ -127,22 +142,6 @@ struct sched_sim_args {
// Designed for testing purposes
struct sched_tester : public srsenb::sched {
struct dl_harq_params_t {
uint32_t pid;
uint32_t nof_retxs;
uint32_t tti;
bool is_empty = true;
bool pending_retx = false;
dl_harq_params_t() = default;
dl_harq_params_t(const srsenb::dl_harq_proc& h, uint32_t tti_tx_dl)
{
pid = h.get_id();
nof_retxs = h.nof_retx(0);
tti = h.get_tti();
is_empty = h.is_empty();
pending_retx = h.has_pending_retx(0, tti_tx_dl); // or h.has_pending_retx(1, h.get_tti());
}
};
struct tester_user_results {
uint32_t dl_pending_data = 0;
uint32_t ul_pending_data = 0; ///< data pending for UL
@ -154,7 +153,8 @@ struct sched_tester : public srsenb::sched {
bool ul_retx_got_delayed = false;
srsenb::sched_interface::ul_sched_data_t* ul_sched = NULL; // fast lookup
srsenb::sched_interface::dl_sched_data_t* dl_sched = NULL; // fast lookup
dl_harq_params_t dl_harqs[2 * FDD_HARQ_DELAY_MS];
srsenb::dl_harq_proc dl_harqs[2 * FDD_HARQ_DELAY_MS];
srsenb::ul_harq_proc ul_harq;
};
struct sched_tti_data {
bool is_prach_tti_tx_ul = false;
@ -186,16 +186,22 @@ struct sched_tester : public srsenb::sched {
uint32_t tti;
bool dl_ack;
uint32_t retx_delay;
dl_harq_params_t dl_harq;
srsenb::dl_harq_proc dl_harq;
ack_info_t() : dl_ack(false), retx_delay(0) {}
};
struct ul_ack_info_t {
uint16_t rnti;
uint32_t tti_ack, tti_tx_ul;
bool ack = false;
srsenb::ul_harq_proc ul_harq;
};
sched_sim_args sim_args;
// tester control data
typedef std::map<uint16_t, ue_info>::iterator ue_it_t;
std::map<uint16_t, ue_info> tester_ues;
std::multimap<uint32_t, ack_info_t> to_ack;
std::multimap<uint32_t, ul_ack_info_t> to_ul_ack;
typedef std::multimap<uint32_t, ack_info_t>::iterator ack_it_t;
// sched results
@ -206,14 +212,14 @@ struct sched_tester : public srsenb::sched {
srsenb::sched_interface::ue_cfg_t ue_cfg_);
void rem_user(uint16_t rnti);
void test_ra();
void test_dci_locations();
void test_tti_result();
void assert_no_empty_allocs();
void test_collisions();
void test_harqs();
void run_tti(uint32_t tti_rx);
private:
void new_tti(uint32_t tti_);
void new_test_tti(uint32_t tti_);
void process_tti_args();
void before_sched();
void process_results();
@ -249,7 +255,7 @@ void sched_tester::rem_user(uint16_t rnti)
tti_data.ue_data.erase(rnti);
}
void sched_tester::new_tti(uint32_t tti_)
void sched_tester::new_test_tti(uint32_t tti_)
{
// NOTE: make a local copy, since some of these variables may be cleared during scheduling
tti_data.tti_rx = tti_;
@ -332,8 +338,10 @@ void sched_tester::before_sched()
for (uint32_t i = 0; i < 2 * FDD_HARQ_DELAY_MS; ++i) {
const srsenb::dl_harq_proc* h = user->get_dl_harq(i);
tti_data.ue_data[rnti].dl_harqs[i] = dl_harq_params_t(*h, tti_data.tti_tx_dl);
tti_data.ue_data[rnti].dl_harqs[i] = *h;
}
// NOTE: ACK might have just cleared the harq for tti_data.tti_tx_ul
tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_data.tti_tx_ul);
}
// TODO: Check whether pending pending_rar.rar_tti correspond to a prach_tti
@ -358,7 +366,7 @@ void sched_tester::process_results()
rnti);
}
test_dci_locations();
test_tti_result();
test_ra();
test_collisions();
assert_no_empty_allocs();
@ -367,7 +375,7 @@ void sched_tester::process_results()
void sched_tester::run_tti(uint32_t tti_rx)
{
new_tti(tti_rx);
new_test_tti(tti_rx);
log_out.info("[TESTER] ---- tti=%u | nof_ues=%lu ----\n", tti_rx, ue_db.size());
process_tti_args();
@ -395,37 +403,39 @@ void sched_tester::test_ra()
// Check whether RA has completed correctly
int prach_tti = userinfo.prach_tti;
if (userinfo.msg3_tti <= prach_tti) { // Msg3 not yet sent
bool rar_not_sent = prach_tti >= userinfo.rar_tti;
uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg.prach_rar_window};
if (rar_not_sent) {
CondError(tti_data.tti_tx_dl > window[1], "[TESTER] There was no RAR scheduled within the RAR Window\n");
if (tti_data.tti_tx_dl >= window[0]) {
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) {
for (uint32_t j = 0; j < tti_data.sched_result_dl.rar[i].nof_grants; ++j) {
if (tti_data.sched_result_dl.rar[i].msg3_grant[j].ra_id == userinfo.ra_id) {
userinfo.rar_tti = tti_data.tti_tx_dl;
}
if (userinfo.msg3_tti > prach_tti) { // Msg3 already scheduled
continue;
}
bool rar_not_sent = prach_tti >= userinfo.rar_tti;
uint32_t window[2] = {(uint32_t)prach_tti + 3, prach_tti + 3 + cfg.prach_rar_window};
if (rar_not_sent) {
CondError(tti_data.tti_tx_dl > window[1], "[TESTER] There was no RAR scheduled within the RAR Window\n");
if (tti_data.tti_tx_dl >= window[0]) {
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) {
for (uint32_t j = 0; j < tti_data.sched_result_dl.rar[i].nof_grants; ++j) {
if (tti_data.sched_result_dl.rar[i].msg3_grant[j].ra_id == userinfo.ra_id) {
userinfo.rar_tti = tti_data.tti_tx_dl;
}
}
}
} else { // RAR completed, check for Msg3
uint32_t msg3_tti = (uint32_t)(userinfo.rar_tti + FDD_HARQ_DELAY_MS + MSG3_DELAY_MS) % 10240;
if (msg3_tti == tti_data.tti_tx_ul) {
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
if (tti_data.sched_result_ul.pusch[i].dci.rnti == rnti) {
CondError(tti_data.sched_result_ul.pusch[i].needs_pdcch,
"[TESTER] Msg3 allocations do not require PDCCH\n");
CondError(tti_data.ul_pending_msg3.rnti != rnti, "[TESTER] The UL pending msg3 RNTI did not match\n");
CondError(not tti_data.ul_pending_msg3.enabled, "[TESTER] The UL pending msg3 RNTI did not match\n");
userinfo.msg3_tti = tti_data.tti_tx_ul;
msg3_count++;
}
}
} else { // RAR completed, check for Msg3
uint32_t msg3_tti = (uint32_t)(userinfo.rar_tti + FDD_HARQ_DELAY_MS + MSG3_DELAY_MS) % 10240;
if (msg3_tti == tti_data.tti_tx_ul) {
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
if (tti_data.sched_result_ul.pusch[i].dci.rnti == rnti) {
CondError(tti_data.sched_result_ul.pusch[i].needs_pdcch,
"[TESTER] Msg3 allocations do not require PDCCH\n");
CondError(tti_data.ul_pending_msg3.rnti != rnti, "[TESTER] The UL pending msg3 RNTI did not match\n");
CondError(not tti_data.ul_pending_msg3.enabled, "[TESTER] The UL pending msg3 RNTI did not match\n");
userinfo.msg3_tti = tti_data.tti_tx_ul;
msg3_count++;
}
CondError(msg3_count == 0, "[TESTER] No UL msg3 allocation was made\n");
} else if (msg3_tti < tti_data.tti_tx_ul) {
TestError("[TESTER] No UL msg3 allocation was made\n");
}
CondError(msg3_count == 0, "[TESTER] No UL msg3 allocation was made\n");
} else if (msg3_tti < tti_data.tti_tx_ul) {
TestError("[TESTER] No UL msg3 allocation was made\n");
}
}
}
@ -448,7 +458,7 @@ void sched_tester::assert_no_empty_allocs()
TestError("[TESTER] There was a user without data that got allocated in UL\n");
}
srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul);
iter.second.ul_retx_got_delayed = iter.second.has_ul_retx and hul->is_new_tx();
iter.second.ul_retx_got_delayed = iter.second.has_ul_retx and iter.second.ul_harq.is_empty(0);
tti_data.total_ues.ul_retx_got_delayed |= iter.second.ul_retx_got_delayed;
// Retxs cannot give space to newtx allocations
CondError(
@ -470,10 +480,12 @@ void sched_tester::assert_no_empty_allocs()
/**
* Tests whether there were collisions in the DCI allocations
*/
void sched_tester::test_dci_locations()
void sched_tester::test_tti_result()
{
// checks if there is any collision. If not, fills the mask
auto try_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) {
tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx);
// Helper Function: checks if there is any collision. If not, fills the mask
auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) {
uint32_t cce_start = dci_loc.ncce, cce_stop = dci_loc.ncce + (1u << dci_loc.L);
if (tti_data.used_cce.any(cce_start, cce_stop)) {
TestError("[TESTER] %s DCI collision between CCE positions (%u, %u)\n", ch, cce_start, cce_stop);
@ -481,30 +493,59 @@ void sched_tester::test_dci_locations()
tti_data.used_cce.fill(cce_start, cce_stop);
};
// verify there are no dci collisions for UL, DL data, BC, RAR
/* verify there are no dci collisions for UL, DL data, BC, RAR */
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
if (not tti_data.sched_result_ul.pusch[i].needs_pdcch) {
// In case of adaptive retx or Msg3
const auto& pusch = tti_data.sched_result_ul.pusch[i];
CondError(pusch.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", pusch.tbs);
CondError(ue_db.count(pusch.dci.rnti) == 0, "The allocated rnti=0x%x does not exist\n", pusch.dci.rnti);
if (not pusch.needs_pdcch) {
// In case of non-adaptive retx or Msg3
continue;
}
srslte_dci_location_t& dci_loc = tti_data.sched_result_ul.pusch[i].dci.location;
CondError(dci_loc.L == 0, "[TESTER] Invalid aggregation level %d\n", dci_loc.L); // TODO: Extend this test
try_fill(dci_loc, "UL");
CondError(pusch.dci.location.L == 0,
"[TESTER] Invalid aggregation level %d\n",
pusch.dci.location.L); // TODO: Extend this test
try_cce_fill(pusch.dci.location, "UL");
}
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) {
try_fill(tti_data.sched_result_dl.data[i].dci.location, "DL data");
auto& data = tti_data.sched_result_dl.data[i];
try_cce_fill(data.dci.location, "DL data");
CondError(ue_db.count(data.dci.rnti) == 0, "Allocated rnti=0x%x that does not exist\n", data.dci.rnti);
}
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_bc_elems; ++i) {
try_fill(tti_data.sched_result_dl.bc[i].dci.location, "DL BC");
auto& bc = tti_data.sched_result_dl.bc[i];
try_cce_fill(bc.dci.location, "DL BC");
if (bc.type == sched_interface::dl_sched_bc_t::BCCH) {
CondError(bc.index >= MAX_SIBS, "Invalid SIB idx=%d\n", bc.index + 1);
CondError(bc.tbs < cfg.sibs[bc.index].len,
"Allocated BC process with TBS=%d < sib_len=%d\n",
bc.tbs,
cfg.sibs[bc.index].len);
} else if (bc.type == sched_interface::dl_sched_bc_t::PCCH) {
CondError(bc.tbs == 0, "Allocated paging process with invalid TBS=%d\n", bc.tbs);
} else {
TestError("Invalid broadcast process id=%d\n", (int)bc.type);
}
}
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) {
try_fill(tti_data.sched_result_dl.rar[i].dci.location, "DL RAR");
const auto& rar = tti_data.sched_result_dl.rar[i];
try_cce_fill(rar.dci.location, "DL RAR");
CondError(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", rar.tbs);
for (uint32_t j = 0; j < rar.nof_grants; ++j) {
const auto& msg3_grant = rar.msg3_grant[j];
uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % 10;
CondError(not pending_msg3[pending_tti].enabled, "Pending Msg3 should have been set\n");
uint32_t rba =
srslte_ra_type2_to_riv(pending_msg3[pending_tti].L, pending_msg3[pending_tti].n_prb, cfg.cell.nof_prb);
CondError(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n");
}
}
// verify if sched_result "used_cce" coincide with sched "used_cce"
if (tti_data.used_cce != sched_vars.tti_vars(tti_data.tti_rx).used_cce) {
TestError("[TESTER] The used_cce do not match: %s\n",
sched_vars.tti_vars(tti_data.tti_rx).used_cce.to_string().c_str());
/* verify if sched_result "used_cce" coincide with sched "used_cce" */
auto* tti_alloc = get_tti_sched(tti_data.tti_rx);
if (tti_data.used_cce != tti_alloc->get_pdcch_mask()) {
std::string mask_str = tti_alloc->get_pdcch_mask().to_string();
TestError("[TESTER] The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), tti_data.used_cce.to_hex().c_str());
}
// FIXME: Check postponed retxs
@ -523,46 +564,103 @@ void sched_tester::test_dci_locations()
void sched_tester::test_harqs()
{
// check consistency of harq procedures and allocations
/* check consistency of DL harq procedures and allocations */
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) {
uint32_t h_id = tti_data.sched_result_dl.data[i].dci.pid;
uint16_t rnti = tti_data.sched_result_dl.data[i].dci.rnti;
const auto& data = tti_data.sched_result_dl.data[i];
uint32_t h_id = data.dci.pid;
uint16_t rnti = data.dci.rnti;
const srsenb::dl_harq_proc* h = ue_db[rnti].get_dl_harq(h_id);
CondError(h == NULL, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id);
CondError(h == nullptr, "[TESTER] scheduled DL harq pid=%d does not exist\n", h_id);
CondError(h->is_empty(), "[TESTER] Cannot schedule an empty harq proc\n");
CondError(h->get_tti() != tti_data.tti_tx_dl,
"[TESTER] The scheduled DL harq pid=%d does not a valid tti=%u",
"[TESTER] The scheduled DL harq pid=%d does not a valid tti=%u\n",
h_id,
tti_data.tti_tx_dl);
if (tti_data.ue_data[rnti].dl_harqs[h_id].pending_retx) { // retx
CondError(tti_data.ue_data[rnti].dl_harqs[h_id].nof_retxs + 1 != h->nof_retx(0),
CondError(h->get_n_cce() != data.dci.location.ncce, "[TESTER] Harq DCI location does not match with result\n");
if (tti_data.ue_data[rnti].dl_harqs[h_id].has_pending_retx(0, tti_data.tti_tx_dl)) { // retx
CondError(tti_data.ue_data[rnti].dl_harqs[h_id].nof_retx(0) + 1 != h->nof_retx(0),
"[TESTER] A dl harq of user rnti=0x%x was likely overwritten.\n",
rnti);
CondError(h->nof_retx(0) >= sim_args.ue_cfg.maxharq_tx,
"[TESTER] The number of retx=%d exceeded its max=%d\n",
h->nof_retx(0),
sim_args.ue_cfg.maxharq_tx);
} else { // newtx
CondError(h->nof_retx(0) != 0, "[TESTER] A new harq was scheduled but with invalid number of retxs\n");
}
}
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
const auto& pusch = tti_data.sched_result_ul.pusch[i];
uint16_t rnti = pusch.dci.rnti;
const auto& ue_data = tti_data.ue_data[rnti];
const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul);
CondError(h == nullptr or h->is_empty(), "[TESTER] scheduled UL harq does not exist or is empty\n");
CondError(h->get_tti() != tti_data.tti_tx_ul,
"[TESTER] The scheduled UL harq does not a valid tti=%u\n",
tti_data.tti_tx_ul);
CondError(h->has_pending_ack(), "[TESTER] At the end of the TTI, there shouldnt be any pending ACKs\n");
if (h->has_pending_retx()) {
// retx
CondError(ue_data.ul_harq.is_empty(0), "[TESTER] reTx in an UL harq that was empty\n");
CondError(h->nof_retx(0) != ue_data.ul_harq.nof_retx(0) + 1,
"[TESTER] A retx UL harq was scheduled but with invalid number of retxs\n");
CondError(h->is_adaptive_retx() and not pusch.needs_pdcch, "[TESTER] Adaptive retxs need PDCCH alloc\n");
} else {
CondError(h->nof_retx(0) != 0, "[TESTER] A new harq was scheduled but with invalid number of retxs\n");
CondError(not ue_data.ul_harq.is_empty(0), "[TESTER] UL new tx in a UL harq that was not empty\n");
}
}
/* Check PHICH allocations */
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_phich_elems; ++i) {
const auto& phich = tti_data.sched_result_ul.phich[i];
CondError(tti_data.ue_data.count(phich.rnti) == 0, "[TESTER] Allocated PHICH rnti no longer exists\n");
const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq;
const auto* h = ue_db[phich.rnti].get_ul_harq(tti_data.tti_tx_ul);
CondError(not hprev.has_pending_ack(), "[TESTER] Alloc PHICH did not have any pending ack\n");
bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx();
if (phich.phich == sched_interface::ul_sched_phich_t::ACK) {
CondError(!hprev.is_empty(), "[TESTER] ack phich for UL harq that is not empty\n");
} else {
CondError(h->get_pending_data() == 0 and !maxretx_flag, "[TESTER] NACKed harq has no pending data\n");
}
}
for (const auto& ue : ue_db) {
const auto& hprev = tti_data.ue_data[ue.first].ul_harq;
if (not hprev.has_pending_ack())
continue;
uint32_t i = 0;
for (; i < tti_data.sched_result_ul.nof_phich_elems; ++i) {
const auto& phich = tti_data.sched_result_ul.phich[i];
if (phich.rnti == ue.first)
break;
}
CondError(i == tti_data.sched_result_ul.nof_phich_elems,
"[TESTER] harq had pending ack but no phich was allocked\n");
}
// schedule future acks
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) {
ack_info_t ack_data;
ack_data.rnti = tti_data.sched_result_dl.data[i].dci.rnti;
ack_data.tti = FDD_HARQ_DELAY_MS + tti_data.tti_tx_dl;
const srsenb::dl_harq_proc* dl_h = ue_db[ack_data.rnti].get_dl_harq(tti_data.sched_result_dl.data[i].dci.pid);
ack_data.dl_harq = dl_harq_params_t(*dl_h, tti_data.tti_tx_dl);
if (ack_data.dl_harq.nof_retxs == 0) {
ack_data.dl_harq = *dl_h;
if (ack_data.dl_harq.nof_retx(0) == 0) {
ack_data.dl_ack = randf() > sim_args.P_retx;
} else { // always ack after three retxs
ack_data.dl_ack = ack_data.dl_harq.nof_retxs == 3;
ack_data.dl_ack = ack_data.dl_harq.nof_retx(0) == 3;
}
// Remove harq from the ack list if there was a harq rewrite
ack_it_t it = to_ack.begin();
while (it != to_ack.end() and it->first < ack_data.tti) {
if (it->second.rnti == ack_data.rnti and it->second.dl_harq.pid == ack_data.dl_harq.pid) {
if (it->second.rnti == ack_data.rnti and it->second.dl_harq.get_id() == ack_data.dl_harq.get_id()) {
CondError(it->second.tti + 2 * FDD_HARQ_DELAY_MS > ack_data.tti,
"[TESTER] The retx dl harq id=%d was transmitted too soon\n",
ack_data.dl_harq.pid);
ack_data.dl_harq.get_id());
ack_it_t toerase_it = it++;
to_ack.erase(toerase_it);
continue;
@ -574,72 +672,78 @@ void sched_tester::test_harqs()
to_ack.insert(std::make_pair(ack_data.tti, ack_data));
}
// // Check whether some pids got old
// for (auto& e : ue_db) {
// for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) {
// if (not(e.second.get_dl_harq(i)->is_empty(0) and e.second.get_dl_harq(1))) {
// if (srslte_tti_interval(tti_data.tti_tx_dl, e.second.get_dl_harq(i)->get_tti()) > 49) {
// TestError("[TESTER] The pid=%d for rnti=0x%x got old.\n", e.second.get_dl_harq(i)->get_id(), e.first);
// }
// }
// }
// }
/* Schedule UL ACKs */
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
const auto& pusch = tti_data.sched_result_ul.pusch[i];
ul_ack_info_t ack_data;
ack_data.rnti = pusch.dci.rnti;
ack_data.ul_harq = *ue_db[ack_data.rnti].get_ul_harq(tti_data.tti_tx_ul);
ack_data.tti_tx_ul = tti_data.tti_tx_ul;
ack_data.tti_ack = tti_data.tti_tx_ul + FDD_HARQ_DELAY_MS;
if (ack_data.ul_harq.nof_retx(0) == 0) {
ack_data.ack = randf() > sim_args.P_retx;
} else {
ack_data.ack = ack_data.ul_harq.nof_retx(0) == 3;
}
to_ul_ack.insert(std::make_pair(ack_data.tti_tx_ul, ack_data));
}
// Check whether some pids got old
for (auto& user : ue_db) {
for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) {
if (not(user.second.get_dl_harq(i)->is_empty(0) and user.second.get_dl_harq(1))) {
if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i)->get_tti()) > 49) {
TestError("[TESTER] The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i)->get_id(), user.first);
}
}
}
}
}
void sched_tester::test_collisions()
{
srsenb::ul_mask_t ul_allocs;
ul_allocs.resize(cfg.cell.nof_prb);
tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx);
srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb);
// Helper function to fill RBG mask
auto try_ul_fill = [&](srsenb::ul_harq_proc::ul_alloc_t alloc, const char* ch_str, bool strict = true) {
CondError((alloc.RB_start + alloc.L) > cfg.cell.nof_prb,
"[TESTER] Allocated RBs (%d,%d) out of bounds\n",
alloc.RB_start,
alloc.RB_start + alloc.L);
CondError(alloc.L == 0, "[TESTER] Allocations must have at least one PRB\n");
if (strict and ul_allocs.any(alloc.RB_start, alloc.RB_start + alloc.L)) {
TestError("[TESTER] There is a collision of %s alloc=(%d,%d) and cumulative_mask=%s\n",
ch_str,
alloc.RB_start,
alloc.RB_start + alloc.L,
ul_allocs.to_hex().c_str());
}
ul_allocs.fill(alloc.RB_start, alloc.RB_start + alloc.L, true);
};
// TEST: Check if there is space for PRACH
/* TEST: Check if there is space for PRACH */
if (tti_data.is_prach_tti_tx_ul) {
srsenb::ul_harq_proc::ul_alloc_t prach_alloc = {cfg.prach_freq_offset, 6};
if (ul_allocs.any(prach_alloc)) {
TestError("[TESTER] There is a collision with the PRACH\n");
}
ul_allocs.fill(prach_alloc);
try_ul_fill({cfg.prach_freq_offset, 6}, "PRACH");
}
// TEST: check collisions in the UL PUSCH and PUCCH
/* TEST: check collisions in the UL PUSCH and PUCCH */
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
uint32_t L, RBstart;
srslte_ra_type2_from_riv(
tti_data.sched_result_ul.pusch[i].dci.type2_alloc.riv, &L, &RBstart, cfg.cell.nof_prb, cfg.cell.nof_prb);
CondError((RBstart + L) > cfg.cell.nof_prb,
"[TESTER] Allocated RBs (%d,%d) out of bounds (0,%d)\n",
RBstart,
RBstart + L,
cfg.cell.nof_prb);
try_ul_fill({RBstart, L}, "PUSCH");
ue_stats[tti_data.sched_result_ul.pusch[i].dci.rnti].nof_ul_rbs += L;
if (ul_allocs.any(RBstart, RBstart + L)) {
TestError("[TESTER] There is a collision for UE UL data alloc=(%d,%d) with joint mask=%s\n",
RBstart,
RBstart + L,
ul_allocs.to_hex().c_str());
}
ul_allocs.fill(RBstart, RBstart + L, true);
}
// Fill PUCCH
if (cfg.cell.nof_prb != 6 or (not tti_data.is_prach_tti_tx_ul and not tti_data.ul_pending_msg3.enabled)) {
if (ul_allocs.any(0, cfg.nrb_pucch) or ul_allocs.any(cfg.cell.nof_prb - cfg.nrb_pucch, cfg.cell.nof_prb)) {
TestError("[TESTER] There is a collision with the PUCCH\n");
}
}
ul_allocs.fill(0, cfg.nrb_pucch);
ul_allocs.fill(cfg.cell.nof_prb - cfg.nrb_pucch, cfg.cell.nof_prb);
/* TEST: check collisions with PUCCH */
bool strict = cfg.cell.nof_prb != 6 or (not tti_data.is_prach_tti_tx_ul and not tti_data.ul_pending_msg3.enabled);
try_ul_fill({0, (uint32_t)cfg.nrb_pucch}, "PUCCH", strict);
try_ul_fill({cfg.cell.nof_prb - cfg.nrb_pucch, (uint32_t)cfg.nrb_pucch}, "PUCCH", strict);
// TEST: Check if there is a collision with Msg3 or Msg3 alloc data is not consistent
/* TEST: Check if there is a collision with Msg3 or Msg3 alloc data is not consistent */
if (tti_data.ul_pending_msg3.enabled) {
srsenb::ul_harq_proc::ul_alloc_t msg3_alloc = {tti_data.ul_pending_msg3.n_prb, tti_data.ul_pending_msg3.L};
for (uint32_t i = msg3_alloc.RB_start; i < msg3_alloc.RB_start + msg3_alloc.L; ++i) {
if (not ul_allocs.test(i)) {
TestError(
"[TESTER] The RB %d was not allocated for the msg3 alloc=(%d,%d)\n", i, msg3_alloc.RB_start, msg3_alloc.L);
}
}
bool passed = false;
for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) {
if (tti_data.ul_pending_msg3.rnti == tti_data.sched_result_ul.pusch[i].dci.rnti) {
@ -657,15 +761,12 @@ void sched_tester::test_collisions()
CondError(not passed, "[TESTER] No Msg3 allocation was found in the sched_result\n");
}
// NOTE: Not possible until DCI conflict issue is resolved
// // TEST: final mask
// if(ul_allocs != ul_mask) {
// TestError("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n");
// }
/* TEST: check whether cumulative UL PRB masks coincide */
if (ul_allocs != tti_sched->get_ul_mask()) {
TestError("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n");
}
srslte::bounded_bitset<100, true> dl_allocs, alloc_mask;
dl_allocs.resize(cfg.cell.nof_prb);
alloc_mask.resize(cfg.cell.nof_prb);
srslte::bounded_bitset<100, true> dl_allocs(cfg.cell.nof_prb), alloc_mask(cfg.cell.nof_prb);
srslte_dl_sf_cfg_t dl_sf;
ZERO_OBJECT(dl_sf);
@ -691,16 +792,14 @@ void sched_tester::test_collisions()
ue_stats[tti_data.sched_result_dl.data[i].dci.rnti].nof_dl_rbs += alloc_mask.count();
}
for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_bc_elems; ++i) {
alloc_mask.reset();
srslte_pdsch_grant_t grant;
CondError(srslte_ra_dl_dci_to_grant(&cfg.cell, &dl_sf, SRSLTE_TM1, &tti_data.sched_result_dl.bc[i].dci, &grant) ==
SRSLTE_ERROR,
"Failed to decode PDSCH grant\n");
alloc_mask.reset();
for (uint32_t i = 0; i < alloc_mask.size(); ++i) {
if (grant.prb_idx[0][i]) {
alloc_mask.set(i);
} else {
alloc_mask.reset(i);
}
}
if ((dl_allocs & alloc_mask).any()) {
@ -744,47 +843,91 @@ void sched_tester::test_collisions()
rbgmask.reset(i);
}
}
if (rbgmask != dl_mask and not fail_dci_alloc) {
if (rbgmask != get_tti_sched(tti_data.tti_rx)->get_dl_mask()) {
TestError("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n");
}
}
void sched_tester::ack_txs()
{
typedef std::map<uint16_t, srsenb::sched_ue>::iterator it_t;
for (ack_it_t it = to_ack.begin(); it != to_ack.end() and it->first <= tti_data.tti_rx;) {
if (ue_db.count(it->second.rnti) == 0) {
ack_it_t erase_it = it++;
to_ack.erase(erase_it);
/* check if user was removed. If so, clean respective acks */
erase_if(to_ack,
[this](std::pair<const uint32_t, ack_info_t>& elem) { return this->ue_db.count(elem.second.rnti) == 0; });
erase_if(to_ul_ack,
[this](std::pair<const uint32_t, ul_ack_info_t>& elem) { return this->ue_db.count(elem.second.rnti) == 0; });
/* Ack DL HARQs */
for (const auto& ack_it : to_ack) {
if (ack_it.second.tti != tti_data.tti_rx) {
continue;
}
if (it->second.tti == tti_data.tti_rx) {
bool ret = false;
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) {
ret |= dl_ack_info(tti_data.tti_rx, it->second.rnti, tb, it->second.dl_ack) > 0;
srsenb::dl_harq_proc* h = ue_db[ack_it.second.rnti].get_dl_harq(ack_it.second.dl_harq.get_id());
const srsenb::dl_harq_proc& hack = ack_it.second.dl_harq;
CondError(hack.is_empty(), "[TESTER] The acked DL harq was not active\n");
bool ret = false;
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; ++tb) {
if (ack_it.second.dl_harq.is_empty(tb)) {
continue;
}
CondError(not ret, "[TESTER] The dl harq proc that was acked does not exist\n");
if (it->second.dl_ack)
log_out.info(
"[TESTER] DL ACK tti=%u rnti=0x%x pid=%d\n", tti_data.tti_rx, it->second.rnti, it->second.dl_harq.pid);
ack_it_t erase_it = it++;
to_ack.erase(erase_it);
continue;
ret |= dl_ack_info(tti_data.tti_rx, ack_it.second.rnti, tb, ack_it.second.dl_ack) > 0;
}
CondError(not ret, "[TESTER] The dl harq proc that was acked does not exist\n");
if (ack_it.second.dl_ack) {
CondError(!h->is_empty(), "[TESTER] ACKed dl harq was not emptied\n");
CondError(h->has_pending_retx(0, tti_data.tti_tx_dl), "[TESTER] ACKed dl harq still has pending retx\n");
log_out.info("[TESTER] DL ACK tti=%u rnti=0x%x pid=%d\n",
tti_data.tti_rx,
ack_it.second.rnti,
ack_it.second.dl_harq.get_id());
} else {
CondError(h->is_empty() and hack.nof_retx(0) + 1 < hack.max_nof_retx(), "[TESTER] NACKed DL harq got emptied\n");
}
++it;
}
bool ack = true; //(tti_data.tti_rx % 3) == 0;
if (tti_data.tti_rx >= FDD_HARQ_DELAY_MS) {
for (it_t it = ue_db.begin(); it != ue_db.end(); ++it) {
uint16_t rnti = it->first;
srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_rx);
if (h != NULL and not h->is_empty()) {
ul_crc_info(tti_data.tti_rx, rnti, ack);
}
/* Ack UL HARQs */
for (const auto& ack_it : to_ul_ack) {
if (ack_it.first != tti_data.tti_rx) {
continue;
}
srsenb::ul_harq_proc* h = ue_db[ack_it.second.rnti].get_ul_harq(tti_data.tti_rx);
const srsenb::ul_harq_proc& hack = ack_it.second.ul_harq;
CondError(h == nullptr or h->get_tti() != hack.get_tti(), "[TESTER] UL Harq TTI does not match the ACK TTI\n");
CondError(h->is_empty(0), "[TESTER] The acked UL harq is not active\n");
CondError(hack.is_empty(0), "[TESTER] The acked UL harq was not active\n");
ul_crc_info(tti_data.tti_rx, ack_it.second.rnti, ack_it.second.ack);
CondError(!h->get_pending_data(), "[TESTER] UL harq lost its pending data\n");
CondError(!h->has_pending_ack(), "[TESTER] ACK/NACKed UL harq should have a pending ACK\n");
if (ack_it.second.ack) {
CondError(!h->is_empty(), "[TESTER] ACKed UL harq did not get emptied\n");
CondError(h->has_pending_retx(), "[TESTER] ACKed UL harq still has pending retx\n");
log_out.info("[TESTER] UL ACK tti=%u rnti=0x%x pid=%d\n", tti_data.tti_rx, ack_it.second.rnti, hack.get_id());
} else {
// NACK
CondError(!h->is_empty() and !h->has_pending_retx(), "[TESTER] If NACKed, UL harq has to have pending retx\n");
CondError(h->is_empty() and hack.nof_retx(0) + 1 < hack.max_nof_retx(),
"[TESTER] Nacked UL harq did get emptied\n");
}
}
// erase processed acks
to_ack.erase(tti_data.tti_rx);
to_ul_ack.erase(tti_data.tti_rx);
// bool ack = true; //(tti_data.tti_rx % 3) == 0;
// if (tti_data.tti_rx >= FDD_HARQ_DELAY_MS) {
// for (auto it = ue_db.begin(); it != ue_db.end(); ++it) {
// uint16_t rnti = it->first;
// srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_rx);
// if (h != NULL and not h->is_empty()) {
// ul_crc_info(tti_data.tti_rx, rnti, ack);
// }
// }
// }
}
srsenb::sched_interface::cell_cfg_t generate_cell_cfg()
@ -916,7 +1059,7 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c
int main(int argc, char* argv[])
{
printf("[TESTER] This is the chosen seed: %lu\n", seed);
printf("[TESTER] This is the chosen seed: %u\n", seed);
/* initialize random seed: */
srand(seed);
uint32_t N_runs = 1, nof_ttis = 10240 + 10;
@ -936,5 +1079,5 @@ int main(int argc, char* argv[])
printf("[TESTER] Number of assertion warnings: %u\n", warn_counter);
printf("[TESTER] Number of assertion errors: %u\n", err_counter);
printf("[TESTER] This was the chosen seed: %lu\n", seed);
printf("[TESTER] This was the chosen seed: %u\n", seed);
}

Loading…
Cancel
Save