apply segmentation break check to newtxs only. Allow configurable cqi

master
Francisco Paisana 5 years ago committed by Francisco Paisana
parent c75e31db03
commit 3c29bce014

@ -50,8 +50,7 @@ tti_params_t::tti_params_t(uint32_t tti_rx_) :
tti_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS)), tti_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS)),
tti_tx_ul(TTI_ADD(tti_rx, (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS))), tti_tx_ul(TTI_ADD(tti_rx, (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS))),
sfn_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) / 10) sfn_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) / 10)
{ {}
}
/******************************************************* /*******************************************************
* PDCCH Allocation Methods * PDCCH Allocation Methods
@ -403,14 +402,6 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_typ
//! Allocates CCEs and RBs for a user DL data alloc. //! Allocates CCEs and RBs for a user DL data alloc.
alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask) alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
{ {
// Check if allocation would cause segmentation
uint32_t ue_cc_idx = user->get_cell_index(cc_cfg->enb_cc_idx).second;
rbg_range_t r = user->get_required_dl_rbgs(ue_cc_idx);
if (r.rbg_min > user_mask.count()) {
log_h->warning("The number of RBGs allocated to rnti=0x%x will force segmentation\n", user->get_rnti());
return alloc_outcome_t::NOF_RB_INVALID;
}
srslte_dci_format_t dci_format = user->get_dci_format(); srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format); uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format);
uint32_t aggr_idx = user->get_ue_carrier(cc_cfg->enb_cc_idx)->get_aggr_level(nof_bits); uint32_t aggr_idx = user->get_ue_carrier(cc_cfg->enb_cc_idx)->get_aggr_level(nof_bits);
@ -647,6 +638,18 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
return alloc_outcome_t::ERROR; return alloc_outcome_t::ERROR;
} }
// Check if allocation would cause segmentation
uint32_t ue_cc_idx = user->get_cell_index(cc_cfg->enb_cc_idx).second;
const dl_harq_proc& h = user->get_dl_harq(pid, ue_cc_idx);
if (h.is_empty()) {
// It is newTx
rbg_range_t r = user->get_required_dl_rbgs(ue_cc_idx);
if (r.rbg_min > user_mask.count()) {
log_h->warning("The number of RBGs allocated to rnti=0x%x will force segmentation\n", user->get_rnti());
return alloc_outcome_t::NOF_RB_INVALID;
}
}
// Try to allocate RBGs and DCI // Try to allocate RBGs and DCI
alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask); alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask);
if (ret != alloc_outcome_t::SUCCESS) { if (ret != alloc_outcome_t::SUCCESS) {

@ -306,7 +306,9 @@ ue_ctxt_test::ue_ctxt_test(uint16_t rnti_,
uint32_t preamble_idx_, uint32_t preamble_idx_,
srslte::tti_point prach_tti_, srslte::tti_point prach_tti_,
const sched::ue_cfg_t& ue_cfg_, const sched::ue_cfg_t& ue_cfg_,
const std::vector<srsenb::sched::cell_cfg_t>& cell_params_) : const std::vector<srsenb::sched::cell_cfg_t>& cell_params_,
const ue_ctxt_test_cfg& cfg_) :
sim_cfg(cfg_),
rnti(rnti_), rnti(rnti_),
prach_tti(prach_tti_), prach_tti(prach_tti_),
preamble_idx(preamble_idx_), preamble_idx(preamble_idx_),
@ -351,7 +353,7 @@ int ue_ctxt_test::new_tti(sched* sched_ptr, srslte::tti_point tti_rx)
current_tti_rx = tti_rx; current_tti_rx = tti_rx;
TESTASSERT(fwd_pending_acks(sched_ptr) == SRSLTE_SUCCESS); TESTASSERT(fwd_pending_acks(sched_ptr) == SRSLTE_SUCCESS);
if ((tti_rx.to_uint() % cqi_Npd) == cqi_Noffset) { if (sim_cfg.periodic_cqi and (tti_rx.to_uint() % sim_cfg.cqi_Npd) == sim_cfg.cqi_Noffset) {
for (auto& cc : active_ccs) { for (auto& cc : active_ccs) {
sched_ptr->dl_cqi_info( sched_ptr->dl_cqi_info(
tti_rx.to_uint(), rnti, cc.enb_cc_idx, std::uniform_int_distribution<uint32_t>{5, 24}(get_rand_gen())); tti_rx.to_uint(), rnti, cc.enb_cc_idx, std::uniform_int_distribution<uint32_t>{5, 24}(get_rand_gen()));
@ -730,13 +732,15 @@ void user_state_sched_tester::new_tti(sched* sched_ptr, uint32_t tti_rx)
int user_state_sched_tester::add_user(uint16_t rnti, int user_state_sched_tester::add_user(uint16_t rnti,
uint32_t preamble_idx, uint32_t preamble_idx,
const srsenb::sched_interface::ue_cfg_t& ue_cfg) const srsenb::sched_interface::ue_cfg_t& ue_cfg,
const ue_ctxt_test_cfg& cfg_)
{ {
CONDERROR(!srslte_prach_tti_opportunity_config_fdd( CONDERROR(!srslte_prach_tti_opportunity_config_fdd(
cell_params[ue_cfg.supported_cc_list[0].enb_cc_idx].prach_config, tic.tti_rx(), -1), cell_params[ue_cfg.supported_cc_list[0].enb_cc_idx].prach_config, tic.tti_rx(), -1),
"New user added in a non-PRACH TTI\n"); "New user added in a non-PRACH TTI\n");
TESTASSERT(users.count(rnti) == 0); TESTASSERT(users.count(rnti) == 0);
ue_ctxt_test ue{rnti, preamble_idx, srslte::tti_point{tic.tti_rx()}, ue_cfg, cell_params}; ue_ctxt_test_cfg cfg;
ue_ctxt_test ue{rnti, preamble_idx, srslte::tti_point{tic.tti_rx()}, ue_cfg, cell_params, cfg_};
users.insert(std::make_pair(rnti, ue)); users.insert(std::make_pair(rnti, ue));
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }
@ -901,7 +905,9 @@ int common_sched_tester::add_user(uint16_t rnti, const ue_cfg_t& ue_cfg_)
uint32_t pcell_idx = ue_cfg_.supported_cc_list[0].enb_cc_idx; uint32_t pcell_idx = ue_cfg_.supported_cc_list[0].enb_cc_idx;
dl_rach_info(pcell_idx, rar_info); dl_rach_info(pcell_idx, rar_info);
ue_tester->add_user(rnti, rar_info.preamble_idx, ue_cfg_); ue_ctxt_test_cfg ue_sim_cfg{};
ue_sim_cfg.periodic_cqi = sim_args0.cqi_policy == sim_sched_args::cqi_gen_policy_t::periodic_random;
ue_tester->add_user(rnti, rar_info.preamble_idx, ue_cfg_, ue_sim_cfg);
tester_log->info("Adding user rnti=0x%x\n", rnti); tester_log->info("Adding user rnti=0x%x\n", rnti);
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;

@ -83,11 +83,16 @@ private:
using dl_sched_res_list = std::vector<sched_interface::dl_sched_res_t>; using dl_sched_res_list = std::vector<sched_interface::dl_sched_res_t>;
using ul_sched_res_list = std::vector<sched_interface::ul_sched_res_t>; using ul_sched_res_list = std::vector<sched_interface::ul_sched_res_t>;
struct ue_ctxt_test_cfg {
bool periodic_cqi = false;
uint32_t cqi_Npd = 10, cqi_Noffset = std::uniform_int_distribution<uint32_t>{0, 10}(get_rand_gen()); // CQI reporting
};
struct ue_ctxt_test { struct ue_ctxt_test {
// args // args
srslte::log_ref log_h{"TEST"}; srslte::log_ref log_h{"TEST"};
uint32_t cqi_Npd = 10, cqi_Noffset = std::uniform_int_distribution<uint32_t>{0, 10}(get_rand_gen()); // CQI reporting
std::vector<float> prob_dl_ack_mask{0.5, 0.5, 1}, prob_ul_ack_mask{0.5, 0.5, 1}; std::vector<float> prob_dl_ack_mask{0.5, 0.5, 1}, prob_ul_ack_mask{0.5, 0.5, 1};
ue_ctxt_test_cfg sim_cfg;
// prach args // prach args
uint16_t rnti; uint16_t rnti;
@ -125,7 +130,8 @@ struct ue_ctxt_test {
uint32_t preamble_idx_, uint32_t preamble_idx_,
srslte::tti_point prach_tti, srslte::tti_point prach_tti,
const sched::ue_cfg_t& ue_cfg_, const sched::ue_cfg_t& ue_cfg_,
const std::vector<srsenb::sched::cell_cfg_t>& cell_params_); const std::vector<srsenb::sched::cell_cfg_t>& cell_params_,
const ue_ctxt_test_cfg& cfg_);
int set_cfg(const sched::ue_cfg_t& ue_cfg_); int set_cfg(const sched::ue_cfg_t& ue_cfg_);
cc_ue_ctxt_test* get_cc_state(uint32_t enb_cc_idx); cc_ue_ctxt_test* get_cc_state(uint32_t enb_cc_idx);
@ -181,7 +187,10 @@ public:
} }
/* Config users */ /* Config users */
int add_user(uint16_t rnti, uint32_t preamble_idx, const srsenb::sched_interface::ue_cfg_t& ue_cfg); int add_user(uint16_t rnti,
uint32_t preamble_idx,
const srsenb::sched_interface::ue_cfg_t& ue_cfg,
const ue_ctxt_test_cfg& cfg);
int user_reconf(uint16_t rnti, const srsenb::sched_interface::ue_cfg_t& ue_cfg); int user_reconf(uint16_t rnti, const srsenb::sched_interface::ue_cfg_t& ue_cfg);
int bearer_cfg(uint16_t rnti, uint32_t lcid, const srsenb::sched_interface::ue_bearer_cfg_t& bearer_cfg); int bearer_cfg(uint16_t rnti, uint32_t lcid, const srsenb::sched_interface::ue_bearer_cfg_t& bearer_cfg);
void rem_user(uint16_t rnti); void rem_user(uint16_t rnti);

@ -456,6 +456,8 @@ sched_sim_events rand_sim_params(uint32_t nof_ttis)
sim_gen.sim_args.P_retx = 0.1; sim_gen.sim_args.P_retx = 0.1;
sim_gen.sim_args.start_tti = 0; sim_gen.sim_args.start_tti = 0;
sim_gen.sim_args.sim_log = log_global.get(); sim_gen.sim_args.sim_log = log_global.get();
sim_gen.sim_args.cqi_policy = sim_sched_args::cqi_gen_policy_t::periodic_random;
generator.tti_events.resize(nof_ttis); generator.tti_events.resize(nof_ttis);
for (uint32_t tti = 0; tti < nof_ttis; ++tti) { for (uint32_t tti = 0; tti < nof_ttis; ++tti) {

@ -157,6 +157,7 @@ struct sim_sched_args {
srsenb::sched_interface::ue_cfg_t ue_cfg; srsenb::sched_interface::ue_cfg_t ue_cfg;
std::vector<srsenb::sched_interface::cell_cfg_t> cell_cfg; std::vector<srsenb::sched_interface::cell_cfg_t> cell_cfg;
srslte::log* sim_log = nullptr; srslte::log* sim_log = nullptr;
enum class cqi_gen_policy_t { none, periodic_random } cqi_policy = cqi_gen_policy_t::none;
}; };
// generate all events up front // generate all events up front

Loading…
Cancel
Save