Fixes RLC small grant issue in eNodeB and non-adaptive retx

master
Ismael Gomez 7 years ago
parent 2afcf1eb30
commit 8418c74a2d

@ -178,7 +178,6 @@ int srslte_sync_resize(srslte_sync_t *q, uint32_t frame_size, uint32_t max_offse
int ret = SRSLTE_ERROR_INVALID_INPUTS;
if (q != NULL &&
frame_size <= 307200 &&
fft_size_isvalid(fft_size))
{
if (frame_size > q->max_frame_size) {

@ -175,8 +175,8 @@ uint32_t rlc_am::get_bearer()
void rlc_am::write_sdu(byte_buffer_t *sdu)
{
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU", rrc->get_rb_name(lcid).c_str());
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, tx_sdu_len=%d", rrc->get_rb_name(lcid).c_str(), tx_sdu_queue.size());
}
/****************************************************************************
@ -682,7 +682,7 @@ int rlc_am::build_data_pdu(uint8_t *payload, uint32_t nof_bytes)
tx_sdu->msg += to_move;
if(tx_sdu->N_bytes == 0)
{
log->info("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
log->debug("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
rrc->get_rb_name(lcid).c_str(), tx_sdu->get_latency_us());
pool->deallocate(tx_sdu);
tx_sdu = NULL;
@ -717,7 +717,7 @@ int rlc_am::build_data_pdu(uint8_t *payload, uint32_t nof_bytes)
tx_sdu->msg += to_move;
if(tx_sdu->N_bytes == 0)
{
log->info("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
log->debug("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
rrc->get_rb_name(lcid).c_str(), tx_sdu->get_latency_us());
pool->deallocate(tx_sdu);
tx_sdu = NULL;

@ -111,7 +111,7 @@ int rlc_tm::read_pdu(uint8_t *payload, uint32_t nof_bytes)
ul_queue.read(&buf);
pdu_size = buf->N_bytes;
memcpy(payload, buf->msg, buf->N_bytes);
log->info("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
log->debug("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
rrc->get_rb_name(lcid).c_str(), buf->get_latency_us());
pool->deallocate(buf);
log->info_hex(payload, pdu_size, "TX %s, %s PDU", rrc->get_rb_name(lcid).c_str(), rlc_mode_text[RLC_MODE_TM]);

@ -153,8 +153,8 @@ uint32_t rlc_um::get_bearer()
void rlc_um::write_sdu(byte_buffer_t *sdu)
{
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU", rrc->get_rb_name(lcid).c_str());
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, tx_sdu_len=%d", rrc->get_rb_name(lcid).c_str(), tx_sdu_queue.size());
}
/****************************************************************************
@ -178,7 +178,7 @@ uint32_t rlc_um::get_buffer_state()
// Room needed for fixed header?
if(n_bytes > 0)
n_bytes += 2;
n_bytes += 3;
return n_bytes;
}
@ -300,7 +300,7 @@ int rlc_um::build_data_pdu(uint8_t *payload, uint32_t nof_bytes)
tx_sdu->msg += to_move;
if(tx_sdu->N_bytes == 0)
{
log->info("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
log->debug("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
rrc->get_rb_name(lcid).c_str(), tx_sdu->get_latency_us());
pool->deallocate(tx_sdu);
tx_sdu = NULL;
@ -329,7 +329,7 @@ int rlc_um::build_data_pdu(uint8_t *payload, uint32_t nof_bytes)
tx_sdu->msg += to_move;
if(tx_sdu->N_bytes == 0)
{
log->info("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
log->debug("%s Complete SDU scheduled for tx. Stack latency: %ld us\n",
rrc->get_rb_name(lcid).c_str(), tx_sdu->get_latency_us());
pool->deallocate(tx_sdu);
tx_sdu = NULL;

@ -97,6 +97,7 @@ typedef struct {
typedef struct {
std::string phy_level;
std::string phy_lib_level;
std::string mac_level;
std::string rlc_level;
std::string pdcp_level;

@ -102,8 +102,8 @@ public:
ul_alloc_t get_alloc();
void set_alloc(ul_alloc_t alloc);
void same_alloc();
bool is_adaptive_retx();
void re_alloc(ul_alloc_t alloc);
bool is_adaptive_retx();
void reset_pending_data();
bool has_pending_ack();

@ -199,13 +199,14 @@ ul_harq_proc::ul_alloc_t ul_harq_proc::get_alloc()
void ul_harq_proc::set_alloc(ul_harq_proc::ul_alloc_t alloc)
{
is_adaptive = false;
is_adaptive = false;
memcpy(&allocation, &alloc, sizeof(ul_alloc_t));
}
void ul_harq_proc::same_alloc()
void ul_harq_proc::re_alloc(ul_harq_proc::ul_alloc_t alloc)
{
is_adaptive = true;
is_adaptive = true;
memcpy(&allocation, &alloc, sizeof(ul_alloc_t));
}
bool ul_harq_proc::is_adaptive_retx()

@ -313,15 +313,14 @@ ul_harq_proc* ul_metric_rr::get_user_allocation(sched_ue *user)
// If can schedule the same mask, do it
if (allocation_is_valid(alloc)) {
update_allocation(alloc);
h->same_alloc();
return h;
return h;
}
// If not, try to find another mask in the current tti
if (new_allocation(alloc.L, &alloc)) {
update_allocation(alloc);
h->set_alloc(alloc);
return h;
h->set_alloc(alloc);
return h;
}
}
// If could not schedule the reTx, or there wasn't any pending retx, find an empty PID

@ -320,7 +320,7 @@ void ue::allocate_sdu(srslte::sch_pdu *pdu, uint32_t lcid, uint32_t total_sdu_le
if (sdu_space > 0) {
int sdu_len = SRSLTE_MIN(total_sdu_len, (uint32_t) sdu_space);
int n=1;
while(sdu_len > 0 && n > 0) {
while(sdu_len > 3 && n > 0) {
if (pdu->new_subh()) { // there is space for a new subheader
log_h->debug("SDU: set_sdu(), lcid=%d, sdu_len=%d, sdu_space=%d\n", lcid, sdu_len, sdu_space);
n = pdu->get()->set_sdu(lcid, sdu_len, this);

@ -104,6 +104,7 @@ void parse_args(all_args_t *args, int argc, char* argv[]) {
("log.phy_level", bpo::value<string>(&args->log.phy_level), "PHY log level")
("log.phy_hex_limit", bpo::value<int>(&args->log.phy_hex_limit), "PHY log hex dump limit")
("log.phy_lib_level", bpo::value<string>(&args->log.phy_lib_level)->default_value("none"), "PHY lib log level")
("log.mac_level", bpo::value<string>(&args->log.mac_level), "MAC log level")
("log.mac_hex_limit", bpo::value<int>(&args->log.mac_hex_limit), "MAC log hex dump limit")
("log.rlc_level", bpo::value<string>(&args->log.rlc_level), "RLC log level")
@ -274,6 +275,9 @@ void parse_args(all_args_t *args, int argc, char* argv[]) {
if(!vm.count("log.phy_level")) {
args->log.phy_level = args->log.all_level;
}
if (!vm.count("log.phy_lib_level")) {
args->log.phy_lib_level = args->log.all_level;
}
if(!vm.count("log.mac_level")) {
args->log.mac_level = args->log.all_level;
}

@ -125,7 +125,7 @@ void gtpu::stop()
// gtpu_interface_pdcp
void gtpu::write_pdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* pdu)
{
gtpu_log->info_hex(pdu->msg, pdu->N_bytes, "TX PDU, RNTI: 0x%x, LCID: %d", rnti, lcid);
gtpu_log->info_hex(pdu->msg, pdu->N_bytes, "TX PDU, RNTI: 0x%x, LCID: %d, n_bytes=%d", rnti, lcid, pdu->N_bytes);
gtpu_header_t header;
header.flags = 0x30;
header.message_type = 0xFF;
@ -234,7 +234,7 @@ void gtpu::run_thread()
continue;
}
gtpu_log->info_hex(pdu->msg, pdu->N_bytes, "RX GTPU PDU rnti=0x%x, lcid=%d", rnti, lcid);
gtpu_log->info_hex(pdu->msg, pdu->N_bytes, "RX GTPU PDU rnti=0x%x, lcid=%d, n_bytes=%d", rnti, lcid, pdu->N_bytes);
pdcp->write_sdu(rnti, lcid, pdu);
do {

@ -153,8 +153,8 @@ void rlc::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
// communicate buffer state every time a new SDU is written
uint32_t tx_queue = users[rnti].rlc->get_total_buffer_state(lcid);
uint32_t retx_queue = 0;
log_h->info("Buffer state: rnti=0x%x, lcid=%d, tx_queue=%d\n", rnti, lcid, tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, retx_queue);
log_h->info("Buffer state: rnti=0x%x, lcid=%d, tx_queue=%d\n", rnti, lcid, tx_queue);
} else {
pool->deallocate(sdu);
}

@ -1477,16 +1477,16 @@ void phch_recv::intra_measure::write(uint32_t tti, cf_t *data, uint32_t nsamples
}
if (receiving == true) {
if (srslte_ringbuffer_write(&ring_buffer, data, nsamples*sizeof(cf_t)) < (int) (nsamples*sizeof(cf_t))) {
Warning("Error writing to ringbuffer\n");
Warning("Error writting to ringbuffer\n");
receiving = false;
} else {
receive_cnt++;
if (receive_cnt == CAPTURE_LEN_SF) {
tti_sync.increase();
receiving = false;
}
}
}
}
}

Loading…
Cancel
Save