Merge branch 'next' into epc_sigpipe

master
Pedro Alvarez 6 years ago
commit 3d24094829

@ -42,6 +42,8 @@
#include <pthread.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <strings.h>
namespace srslte {
@ -63,32 +65,34 @@ public:
pthread_cond_init(&cv_full, NULL);
this->capacity = capacity;
mutexed_callback = NULL;
enable = true;
num_threads = 0;
}
void set_mutexed_itf(call_mutexed_itf *itf) {
mutexed_callback = itf;
}
void resize(int new_capacity) {
capacity = new_capacity;
}
bool push_(const myobj& value, bool block) {
~block_queue() {
// Unlock threads waiting at push or pop
pthread_mutex_lock(&mutex);
if (capacity > 0) {
if (block) {
while(q.size() > (uint32_t) capacity) {
pthread_cond_wait(&cv_full, &mutex);
}
} else {
enable = false;
pthread_cond_signal(&cv_full);
pthread_cond_signal(&cv_empty);
pthread_mutex_unlock(&mutex);
return false;
// Wait threads blocked in push/pop to exit
while(num_threads>0) {
usleep(100);
}
// Wait them to exit and destroy cv and mutex
pthread_mutex_lock(&mutex);
pthread_cond_destroy(&cv_full);
pthread_cond_destroy(&cv_empty);
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
}
q.push(value);
if (mutexed_callback) {
mutexed_callback->pushing(value);
void set_mutexed_itf(call_mutexed_itf *itf) {
mutexed_callback = itf;
}
pthread_cond_signal(&cv_empty);
pthread_mutex_unlock(&mutex);
return true;
void resize(int new_capacity) {
capacity = new_capacity;
}
void push(const myobj& value) {
@ -100,35 +104,13 @@ public:
}
bool try_pop(myobj *value) {
pthread_mutex_lock(&mutex);
if (q.empty()) {
pthread_mutex_unlock(&mutex);
return false;
}
if (value) {
*value = q.front();
q.pop();
}
if (mutexed_callback) {
mutexed_callback->popping(*value);
}
pthread_cond_signal(&cv_full);
pthread_mutex_unlock(&mutex);
return true;
return pop_(value, false);
}
myobj wait_pop() { // blocking pop
pthread_mutex_lock(&mutex);
while(q.empty()) {
pthread_cond_wait(&cv_empty, &mutex);
}
myobj value = q.front();
q.pop();
if (mutexed_callback) {
mutexed_callback->popping(value);
}
pthread_cond_signal(&cv_full);
pthread_mutex_unlock(&mutex);
myobj value;
bzero(&value, sizeof(myobj));
pop_(&value, true);
return value;
}
@ -153,12 +135,77 @@ public:
}
private:
bool pop_(myobj *value, bool block) {
if (!enable) {
return false;
}
pthread_mutex_lock(&mutex);
num_threads++;
bool ret = false;
if (q.empty() && !block) {
goto exit;
}
while (q.empty() && enable) {
pthread_cond_wait(&cv_empty, &mutex);
}
if (!enable) {
goto exit;
}
if (value) {
*value = q.front();
q.pop();
}
ret = true;
if (mutexed_callback) {
mutexed_callback->popping(*value);
}
pthread_cond_signal(&cv_full);
exit:
num_threads--;
pthread_mutex_unlock(&mutex);
return ret;
}
bool push_(const myobj& value, bool block) {
if (!enable) {
return false;
}
pthread_mutex_lock(&mutex);
num_threads++;
bool ret = false;
if (capacity > 0) {
if (block) {
while(q.size() >= (uint32_t) capacity && enable) {
pthread_cond_wait(&cv_full, &mutex);
}
if (!enable) {
goto exit;
}
} else if (q.size() >= (uint32_t) capacity) {
goto exit;
}
}
q.push(value);
ret = true;
if (mutexed_callback) {
mutexed_callback->pushing(value);
}
pthread_cond_signal(&cv_empty);
exit:
num_threads--;
pthread_mutex_unlock(&mutex);
return ret;
}
std::queue<myobj> q;
pthread_mutex_t mutex;
pthread_cond_t cv_empty;
pthread_cond_t cv_full;
call_mutexed_itf *mutexed_callback;
int capacity;
bool enable;
uint32_t num_threads;
};
}

@ -64,11 +64,9 @@ private:
int64_t max_length;
int64_t cur_length;
FILE* logfile;
bool inited;
bool not_done;
bool is_running;
std::string filename;
pthread_cond_t not_empty;
pthread_cond_t not_full;
pthread_mutex_t mutex;
pthread_t thread;
std::deque<str_ptr> buffer;

@ -173,7 +173,6 @@ public:
class rlc_interface_rrc
{
public:
virtual void reset(uint16_t rnti) = 0;
virtual void clear_buffer(uint16_t rnti) = 0;
virtual void add_user(uint16_t rnti) = 0;
virtual void rem_user(uint16_t rnti) = 0;

@ -29,6 +29,7 @@
#include <stdint.h>
#include "srslte/common/common.h"
#include "srslte/common/log.h"
namespace srslte {
@ -58,8 +59,8 @@ typedef struct{
}gtpu_header_t;
bool gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header);
bool gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu);
bool gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header, srslte::log *gtpu_log);
bool gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu, srslte::log *gtpu_log);
inline void uint8_to_uint32(uint8_t *buf, uint32_t *i)
{

@ -56,13 +56,15 @@ public:
srsue::ue_interface *ue_,
log *rlc_log_,
mac_interface_timers *mac_timers_,
uint32_t lcid_);
uint32_t lcid_,
int buffer_size = -1); // -1 to use default buffer sizes
void stop();
void get_metrics(rlc_metrics_t &m);
// PDCP interface
void write_sdu(uint32_t lcid, byte_buffer_t *sdu);
void write_sdu_nb(uint32_t lcid, byte_buffer_t *sdu);
void write_sdu_mch(uint32_t lcid, byte_buffer_t *sdu);
bool rb_is_um(uint32_t lcid);
@ -99,6 +101,7 @@ private:
srslte::rlc_entity rlc_array[SRSLTE_N_RADIO_BEARERS];
srslte::rlc_um rlc_array_mrb[SRSLTE_N_MCH_LCIDS];
uint32_t default_lcid;
int buffer_size;
long ul_tput_bytes[SRSLTE_N_RADIO_BEARERS];
long dl_tput_bytes[SRSLTE_N_RADIO_BEARERS];

@ -70,7 +70,7 @@ class rlc_am
:public rlc_common
{
public:
rlc_am();
rlc_am(uint32_t queue_len = 16);
~rlc_am();
void init(log *rlc_entity_log_,
uint32_t lcid_,
@ -78,9 +78,9 @@ public:
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers);
void configure(srslte_rlc_config_t cnfg);
void reset();
void reestablish();
void stop();
void empty_queue();
rlc_mode_t get_mode();
@ -88,6 +88,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -122,6 +123,7 @@ private:
// Mutexes
pthread_mutex_t mutex;
bool tx_enabled;
bool poll_received;
bool do_status;
rlc_status_pdu_t status;

@ -151,13 +151,17 @@ struct rlc_status_pdu_t{
class rlc_common
{
public:
// Size of the Uplink buffer in number of PDUs
const static int RLC_BUFFER_NOF_PDU = 128;
virtual ~rlc_common() {}
virtual void init(srslte::log *rlc_entity_log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
srslte::mac_interface_timers *mac_timers_) = 0;
virtual void configure(srslte_rlc_config_t cnfg) = 0;
virtual void reset() = 0;
virtual void stop() = 0;
virtual void empty_queue() = 0;
@ -166,6 +170,7 @@ public:
// PDCP interface
virtual void write_sdu(byte_buffer_t *sdu) = 0;
virtual void write_sdu_nb(byte_buffer_t *sdu) = 0;
// MAC interface
virtual uint32_t get_buffer_state() = 0;

@ -52,10 +52,10 @@ public:
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers_);
mac_interface_timers *mac_timers_,
int buffer_size = -1); // use -1 for default buffer sizes
void configure(srslte_rlc_config_t cnfg);
void reset();
void reestablish();
void stop();
void empty_queue();
@ -66,6 +66,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -75,10 +76,8 @@ public:
private:
rlc_tm tm;
rlc_um um;
rlc_am am;
rlc_mode_t mode;
uint32_t lcid;
rlc_common *rlc;
};

@ -40,14 +40,14 @@ class rlc_tm
:public rlc_common
{
public:
rlc_tm();
rlc_tm(uint32_t queue_len = 16);
~rlc_tm();
void init(log *rlc_entity_log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers);
void configure(srslte_rlc_config_t cnfg);
void reset();
void stop();
void empty_queue();
@ -56,6 +56,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -71,6 +72,8 @@ private:
srsue::pdcp_interface_rlc *pdcp;
srsue::rrc_interface_rlc *rrc;
bool tx_enabled;
// Thread-safe queues for MAC messages
rlc_tx_queue ul_queue;
};

@ -44,7 +44,7 @@ namespace srslte {
class rlc_tx_queue : public block_queue<byte_buffer_t*>::call_mutexed_itf
{
public:
rlc_tx_queue(uint32_t capacity = 128) : queue((int) capacity) {
rlc_tx_queue(int capacity = 128) : queue(capacity) {
unread_bytes = 0;
queue.set_mutexed_itf(this);
}
@ -64,6 +64,11 @@ public:
queue.push(msg);
}
bool try_write(byte_buffer_t *msg)
{
return queue.try_push(msg);
}
void read(byte_buffer_t **msg)
{
byte_buffer_t *m = queue.wait_pop();

@ -49,8 +49,7 @@ class rlc_um
,public rlc_common
{
public:
rlc_um();
rlc_um(uint32_t queue_len = 32);
~rlc_um();
void init(log *rlc_entity_log_,
uint32_t lcid_,
@ -58,7 +57,6 @@ public:
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers_);
void configure(srslte_rlc_config_t cnfg);
void reset();
void stop();
void empty_queue();
bool is_mrb();
@ -68,6 +66,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -131,6 +130,7 @@ private:
srslte::timers::timer *reordering_timer;
uint32_t reordering_timer_id;
bool tx_enabled;
bool pdu_lost;
int build_data_pdu(uint8_t *payload, uint32_t nof_bytes);

@ -34,38 +34,41 @@ using namespace std;
namespace srslte{
logger_file::logger_file()
:inited(false)
,logfile(NULL)
,not_done(true)
:logfile(NULL)
,is_running(false)
,cur_length(0)
,max_length(0)
{}
logger_file::~logger_file() {
not_done = false;
if(inited) {
if(is_running) {
log(new std::string("Closing log\n"));
pthread_mutex_lock(&mutex);
is_running = false;
pthread_cond_signal(&not_empty); // wakeup thread and let it terminate
pthread_mutex_unlock(&mutex);
wait_thread_finish();
flush();
if (logfile) {
fclose(logfile);
}
pthread_mutex_destroy(&mutex);
pthread_cond_destroy(&not_empty);
}
}
void logger_file::init(std::string file, int max_length_) {
pthread_mutex_init(&mutex, NULL);
pthread_cond_init(&not_empty, NULL);
pthread_cond_init(&not_full, NULL);
max_length = (int64_t)max_length_*1024;
name_idx = 0;
filename = file;
logfile = fopen(filename.c_str(), "w");
if(logfile==NULL) {
if(logfile == NULL) {
printf("Error: could not create log file, no messages will be logged!\n");
}
is_running = true;
start(-2);
inited = true;
}
void logger_file::log(const char *msg) {
@ -80,13 +83,13 @@ void logger_file::log(str_ptr msg) {
}
void logger_file::run_thread() {
while(not_done) {
while(is_running) {
pthread_mutex_lock(&mutex);
while(buffer.empty()) {
pthread_cond_wait(&not_empty, &mutex);
if(!is_running) return; // Thread done. Messages in buffer will be handled in flush.
}
str_ptr s = buffer.front();
pthread_cond_signal(&not_full);
int n = 0;
if(logfile)
n = fprintf(logfile, "%s", s->c_str());

@ -40,6 +40,8 @@
#include <SoapySDR/Logger.h>
#include <Types.h>
#define HAVE_ASYNC_THREAD 1
#define USE_TX_MTU 0
#define SET_RF_BW 1
@ -62,6 +64,11 @@ typedef struct {
double tx_rate;
size_t rx_mtu, tx_mtu;
srslte_rf_error_handler_t soapy_error_handler;
bool async_thread_running;
pthread_t async_thread;
uint32_t num_time_errors;
uint32_t num_lates;
uint32_t num_overflows;
@ -74,6 +81,76 @@ typedef struct {
cf_t zero_mem[64*1024];
static void log_overflow(rf_soapy_handler_t *h) {
if (h->soapy_error_handler) {
srslte_rf_error_t error;
bzero(&error, sizeof(srslte_rf_error_t));
error.type = SRSLTE_RF_ERROR_OVERFLOW;
h->soapy_error_handler(error);
} else {
h->num_overflows++;
}
}
static void log_late(rf_soapy_handler_t *h, bool is_rx) {
if (h->soapy_error_handler) {
srslte_rf_error_t error;
bzero(&error, sizeof(srslte_rf_error_t));
error.opt = is_rx?1:0;
error.type = SRSLTE_RF_ERROR_LATE;
h->soapy_error_handler(error);
} else {
h->num_lates++;
}
}
static void log_underflow(rf_soapy_handler_t *h) {
if (h->soapy_error_handler) {
srslte_rf_error_t error;
bzero(&error, sizeof(srslte_rf_error_t));
error.type = SRSLTE_RF_ERROR_UNDERFLOW;
h->soapy_error_handler(error);
} else {
h->num_underflows++;
}
}
#if HAVE_ASYNC_THREAD
static void* async_thread(void *h) {
rf_soapy_handler_t *handler = (rf_soapy_handler_t*) h;
while(handler->async_thread_running) {
int ret = 0;
size_t chanMask = 0;
int flags = 0;
const long timeoutUs = 400000; // arbitrarily chosen
long long timeNs;
ret = SoapySDRDevice_readStreamStatus(handler->device, handler->txStream, &chanMask, &flags, &timeNs, timeoutUs);
if (ret == SOAPY_SDR_TIME_ERROR) {
// this is a late
log_late(handler, false);
} else if (ret == SOAPY_SDR_UNDERFLOW) {
log_underflow(handler);
} else if (ret == SOAPY_SDR_OVERFLOW) {
log_overflow(handler);
} else if (ret == SOAPY_SDR_TIMEOUT) {
// this is a timeout of the readStreamStatus call, ignoring it ..
} else if (ret == SOAPY_SDR_NOT_SUPPORTED) {
// stopping async thread
fprintf(stderr, "Receiving async metadata not supported by device. Exiting thread.\n");
handler->async_thread_running = false;
} else {
fprintf(stderr, "Error while receiving aync metadata: %s (%d), flags=%d, channel=%zu, timeNs=%lld\n", SoapySDR_errToStr(ret), ret, flags, chanMask, timeNs);
handler->async_thread_running = false;
}
}
return NULL;
}
#endif
int soapy_error(void *h)
{
return 0;
@ -104,9 +181,10 @@ void rf_soapy_suppress_stdout(void *h)
}
void rf_soapy_register_error_handler(void *notused, srslte_rf_error_handler_t new_handler)
void rf_soapy_register_error_handler(void *h, srslte_rf_error_handler_t new_handler)
{
// not supported
rf_soapy_handler_t *handler = (rf_soapy_handler_t*) h;
handler->soapy_error_handler = new_handler;
}
@ -359,6 +437,14 @@ int rf_soapy_open_multi(char *args, void **h, uint32_t nof_rx_antennas)
}
}
#if HAVE_ASYNC_THREAD
bool start_async_thread = true;
if (strstr(args, "silent")) {
REMOVE_SUBSTRING_WITHCOMAS(args, "silent");
start_async_thread = false;
}
#endif
// receive one subframe to allow for transceiver calibration
if (strstr(devname, "lime")) {
// set default tx gain and leave some time to calibrate tx
@ -396,6 +482,17 @@ int rf_soapy_open_multi(char *args, void **h, uint32_t nof_rx_antennas)
ant = SoapySDRDevice_getAntenna(handler->device, SOAPY_SDR_TX, 0);
printf("Tx antenna set to %s\n", ant);
#if HAVE_ASYNC_THREAD
if (start_async_thread) {
// Start low priority thread to receive async commands
handler->async_thread_running = true;
if (pthread_create(&handler->async_thread, NULL, async_thread, handler)) {
perror("pthread_create");
return -1;
}
}
#endif
return SRSLTE_SUCCESS;
}
@ -409,6 +506,14 @@ int rf_soapy_open(char *args, void **h)
int rf_soapy_close(void *h)
{
rf_soapy_handler_t *handler = (rf_soapy_handler_t*) h;
#if HAVE_ASYNC_THREAD
if (handler->async_thread_running) {
handler->async_thread_running = false;
pthread_join(handler->async_thread, NULL);
}
#endif
if (handler->tx_stream_active) {
rf_soapy_stop_tx_stream(handler);
SoapySDRDevice_closeStream(handler->device, handler->txStream);
@ -647,15 +752,11 @@ int rf_soapy_recv_with_time_multi(void *h,
ret = SoapySDRDevice_readStream(handler->device, handler->rxStream, buffs_ptr, rx_samples, &flags, &timeNs, timeoutUs);
if (ret == SOAPY_SDR_OVERFLOW || (ret > 0 && (flags & SOAPY_SDR_END_ABRUPT) != 0)) {
handler->num_overflows++;
fprintf(stderr, "O");
fflush(stderr);
log_overflow(handler);
continue;
} else
if (ret == SOAPY_SDR_TIMEOUT) {
handler->num_time_errors++;
fprintf(stderr, "T");
fflush(stderr);
log_late(handler, true);
continue;
} else
if (ret < 0) {
@ -792,7 +893,7 @@ int rf_soapy_send_timed_multi(void *h,
// An error has occured
switch (ret) {
case SOAPY_SDR_TIMEOUT:
handler->num_lates++;
log_late(handler, false);
printf("L");
break;
case SOAPY_SDR_STREAM_ERROR:
@ -804,7 +905,7 @@ int rf_soapy_send_timed_multi(void *h,
printf("T");
break;
case SOAPY_SDR_UNDERFLOW:
handler->num_underflows++;
log_underflow(handler);
printf("U");
break;
default:

@ -25,6 +25,7 @@
*/
#include <unistd.h>
#include <signal.h>
#include "srslte/srslte.h"
#include "srslte/radio/radio_multi.h"
@ -39,6 +40,11 @@ double duration = 0.01; /* in seconds, 10 ms by default */
cf_t *buffers[SRSLTE_MAX_PORTS];
bool tx_enable = false;
uint32_t num_lates = 0;
uint32_t num_overflows = 0;
uint32_t num_underflows = 0;
uint32_t num_other_error = 0;
void usage(char *prog) {
printf("Usage: %s [rpstvh]\n", prog);
@ -85,18 +91,54 @@ void parse_args(int argc, char **argv) {
}
}
bool go_exit = false;
void sig_int_handler(int signo)
{
printf("SIGINT received. Exiting...\n");
if (signo == SIGINT) {
go_exit = true;
} else if (signo == SIGSEGV) {
exit(1);
}
}
void rf_msg(srslte_rf_error_t error)
{
if (error.type == srslte_rf_error_t::SRSLTE_RF_ERROR_OVERFLOW) {
num_overflows++;
} else
if (error.type == srslte_rf_error_t::SRSLTE_RF_ERROR_UNDERFLOW) {
num_underflows++;
} else
if (error.type == srslte_rf_error_t::SRSLTE_RF_ERROR_LATE) {
num_lates++;
} else {
num_other_error++;
}
}
void print_rf_summary(void)
{
printf("#lates=%d\n", num_lates);
printf("#overflows=%d\n", num_overflows);
printf("#underflows=%d\n", num_underflows);
printf("#num_other_error=%d\n", num_other_error);
}
int main(int argc, char **argv)
{
int ret = SRSLTE_ERROR;
srslte::radio_multi *radio_h = NULL;
srslte_timestamp_t ts_rx = {}, ts_tx = {};
signal(SIGINT, sig_int_handler);
/* Parse args */
parse_args(argc, argv);
uint32_t nof_samples = (uint32_t) (duration * srate);
uint32_t frame_size = (uint32_t) (srate / 1000.0); /* 1 ms at srate */
uint32_t nof_frames = (uint32_t) ceil(nof_samples / frame_size);
uint32_t nof_frames = duration * 1e3;
radio_h = new radio_multi();
if (!radio_h) {
@ -123,6 +165,8 @@ int main(int argc, char **argv)
goto clean_exit;
}
radio_h->register_error_handler(rf_msg);
radio_h->set_rx_freq(freq);
/* Set radio */
@ -155,6 +199,8 @@ int main(int argc, char **argv)
}
nof_samples -= frame_size;
if (go_exit) break;
}
printf("Finished streaming ...\n");
@ -178,5 +224,7 @@ clean_exit:
printf("Ok!\n");
}
print_rf_summary();
return ret;
}

@ -35,18 +35,18 @@ namespace srslte {
* Ref: 3GPP TS 29.281 v10.1.0 Section 5
***************************************************************************/
bool gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu)
bool gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu, srslte::log *gtpu_log)
{
if(header->flags != 0x30) {
//gtpu_log->error("gtpu_write_header - Unhandled header flags: 0x%x\n", header->flags);
gtpu_log->error("gtpu_write_header - Unhandled header flags: 0x%x\n", header->flags);
return false;
}
if(header->message_type != 0xFF) {
//gtpu_log->error("gtpu_write_header - Unhandled message type: 0x%x\n", header->message_type);
gtpu_log->error("gtpu_write_header - Unhandled message type: 0x%x\n", header->message_type);
return false;
}
if(pdu->get_headroom() < GTPU_HEADER_LEN) {
//gtpu_log->error("gtpu_write_header - No room in PDU for header\n");
gtpu_log->error("gtpu_write_header - No room in PDU for header\n");
return false;
}
@ -66,7 +66,7 @@ bool gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu)
return true;
}
bool gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header)
bool gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header, srslte::log *gtpu_log)
{
uint8_t *ptr = pdu->msg;
@ -82,11 +82,11 @@ bool gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header)
uint8_to_uint32(ptr, &header->teid);
if(header->flags != 0x30) {
//gtpu_log->error("gtpu_read_header - Unhandled header flags: 0x%x\n", header->flags);
gtpu_log->error("gtpu_read_header - Unhandled header flags: 0x%x\n", header->flags);
return false;
}
if(header->message_type != 0xFF) {
//gtpu_log->error("gtpu_read_header - Unhandled message type: 0x%x\n", header->message_type);
gtpu_log->error("gtpu_read_header - Unhandled message type: 0x%x\n", header->message_type);
return false;
}

@ -51,7 +51,8 @@ void rlc::init(srsue::pdcp_interface_rlc *pdcp_,
srsue::ue_interface *ue_,
log *rlc_log_,
mac_interface_timers *mac_timers_,
uint32_t lcid_)
uint32_t lcid_,
int buffer_size_)
{
pdcp = pdcp_;
rrc = rrc_;
@ -59,11 +60,12 @@ void rlc::init(srsue::pdcp_interface_rlc *pdcp_,
rlc_log = rlc_log_;
mac_timers = mac_timers_;
default_lcid = lcid_;
buffer_size = buffer_size_;
gettimeofday(&metrics_time[1], NULL);
reset_metrics();
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers); // SRB0
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers, buffer_size); // SRB0
}
void rlc::reset_metrics()
@ -75,9 +77,10 @@ void rlc::reset_metrics()
void rlc::stop()
{
for(uint32_t i=0; i<SRSLTE_N_RADIO_BEARERS; i++) {
if(rlc_array[i].active())
if(rlc_array[i].active()) {
rlc_array[i].stop();
}
}
}
void rlc::get_metrics(rlc_metrics_t &m)
@ -114,6 +117,7 @@ void rlc::get_metrics(rlc_metrics_t &m)
reset_metrics();
}
// A call to reestablish stops all lcids but does not delete the instances. The mapping lcid to rlc mode can not change
void rlc::reestablish() {
for(uint32_t i=0; i<SRSLTE_N_RADIO_BEARERS; i++) {
if(rlc_array[i].active()) {
@ -122,14 +126,16 @@ void rlc::reestablish() {
}
}
// Resetting the RLC layer returns the object to the state after the call to init(): All lcids are stopped and
// defaul lcid=0 is created
void rlc::reset()
{
for(uint32_t i=0; i<SRSLTE_N_RADIO_BEARERS; i++) {
if(rlc_array[i].active())
rlc_array[i].reset();
rlc_array[i].stop();
}
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers); // SRB0
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers, buffer_size); // SRB0
}
void rlc::empty_queue()
@ -149,6 +155,12 @@ void rlc::write_sdu(uint32_t lcid, byte_buffer_t *sdu)
rlc_array[lcid].write_sdu(sdu);
}
}
void rlc::write_sdu_nb(uint32_t lcid, byte_buffer_t *sdu)
{
if(valid_lcid(lcid)) {
rlc_array[lcid].write_sdu_nb(sdu);
}
}
void rlc::write_sdu_mch(uint32_t lcid, byte_buffer_t *sdu)
{
if(valid_lcid_mrb(lcid)) {
@ -307,16 +319,16 @@ void rlc::add_bearer(uint32_t lcid, srslte_rlc_config_t cnfg)
switch(cnfg.rlc_mode)
{
case LIBLTE_RRC_RLC_MODE_AM:
rlc_array[lcid].init(RLC_MODE_AM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_AM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
case LIBLTE_RRC_RLC_MODE_UM_BI:
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
case LIBLTE_RRC_RLC_MODE_UM_UNI_DL:
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
case LIBLTE_RRC_RLC_MODE_UM_UNI_UL:
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
default:
rlc_log->error("Cannot add RLC entity - invalid mode\n");

@ -36,7 +36,7 @@
namespace srslte {
rlc_am::rlc_am() : tx_sdu_queue(16)
rlc_am::rlc_am(uint32_t queue_len) : tx_sdu_queue(queue_len)
{
log = NULL;
pdcp = NULL;
@ -68,19 +68,13 @@ rlc_am::rlc_am() : tx_sdu_queue(16)
do_status = false;
}
// Warning: must call stop() to properly deallocate all buffers
rlc_am::~rlc_am()
{
// reset RLC and dealloc SDUs
stop();
if(rx_sdu) {
pool->deallocate(rx_sdu);
}
if(tx_sdu) {
pool->deallocate(tx_sdu);
}
pthread_mutex_destroy(&mutex);
pool = NULL;
}
void rlc_am::init(srslte::log *log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
@ -91,6 +85,7 @@ void rlc_am::init(srslte::log *log_,
lcid = lcid_;
pdcp = pdcp_;
rrc = rrc_;
tx_enabled = true;
}
void rlc_am::configure(srslte_rlc_config_t cfg_)
@ -106,21 +101,16 @@ void rlc_am::configure(srslte_rlc_config_t cfg_)
void rlc_am::empty_queue() {
// Drop all messages in TX SDU queue
byte_buffer_t *buf;
while(tx_sdu_queue.size() > 0) {
tx_sdu_queue.read(&buf);
while(tx_sdu_queue.try_read(&buf)) {
pool->deallocate(buf);
}
}
void rlc_am::stop()
{
reset();
pthread_mutex_destroy(&mutex);
}
void rlc_am::reset()
{
// Empty tx_sdu_queue before locking the mutex
tx_enabled = false;
usleep(100);
empty_queue();
pthread_mutex_lock(&mutex);
@ -198,8 +188,34 @@ uint32_t rlc_am::get_bearer()
void rlc_am::write_sdu(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
void rlc_am::write_sdu_nb(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
if (tx_sdu_queue.try_write(sdu)) {
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->info_hex(sdu->msg, sdu->N_bytes, "[Dropped SDU] %s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
pool->deallocate(sdu);
}
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
/****************************************************************************
@ -1111,7 +1127,7 @@ void rlc_am::handle_control_pdu(uint8_t *payload, uint32_t nof_bytes)
// sanity check
if (status.nacks[j].so_start >= it->second.buf->N_bytes) {
// print error but try to send original PDU again
log->error("SO_start is larger than original PDU (%d >= %d)\n",
log->info("SO_start is larger than original PDU (%d >= %d)\n",
status.nacks[j].so_start,
it->second.buf->N_bytes);
status.nacks[j].so_start = 0;
@ -1193,7 +1209,7 @@ void rlc_am::reassemble_rx_sdus()
}
if (rx_sdu->get_tailroom() >= len) {
if (rx_window[vr_r].buf->get_tailroom() >= len) {
if ((rx_window[vr_r].buf->msg - rx_window[vr_r].buf->buffer) + len < SRSLTE_MAX_BUFFER_SIZE_BYTES) {
memcpy(&rx_sdu->msg[rx_sdu->N_bytes], rx_window[vr_r].buf->msg, len);
rx_sdu->N_bytes += len;
rx_window[vr_r].buf->msg += len;
@ -1213,7 +1229,7 @@ void rlc_am::reassemble_rx_sdus()
#endif
}
} else {
log->error("Cannot read %d bytes from rx_window. vr_r=%d, tailroom=%d bytes\n", len, rx_window[vr_r].buf->get_tailroom());
log->error("Cannot read %d bytes from rx_window. vr_r=%d, msg-buffer=%d bytes\n", len, vr_r, (rx_window[vr_r].buf->msg - rx_window[vr_r].buf->buffer));
pool->deallocate(rx_sdu);
goto exit;
}
@ -1289,7 +1305,6 @@ void rlc_am::debug_state()
"vr_r = %d, vr_mr = %d, vr_x = %d, vr_ms = %d, vr_h = %d\n",
rrc->get_rb_name(lcid).c_str(), vt_a, vt_ms, vt_s, poll_sn,
vr_r, vr_mr, vr_x, vr_ms, vr_h);
}
void rlc_am::print_rx_segments()

@ -33,34 +33,49 @@ rlc_entity::rlc_entity()
{
}
void rlc_entity::init(rlc_mode_t mode,
void rlc_entity::init(rlc_mode_t mode_,
log *rlc_entity_log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers_)
mac_interface_timers *mac_timers_,
int buffer_size)
{
tm.reset();
um.reset();
am.reset();
switch(mode)
if (buffer_size <= 0) {
buffer_size = rlc_common::RLC_BUFFER_NOF_PDU;
}
// Create the RLC instance the first time init() is called.
// If called to reestablished, the entity is stopped but not destroyed
// Next call to init() must use same mode
if (rlc == NULL) {
switch(mode_)
{
case RLC_MODE_TM:
rlc = &tm;
rlc = new rlc_tm((uint32_t) buffer_size);
break;
case RLC_MODE_UM:
rlc = &um;
rlc = new rlc_um((uint32_t) buffer_size);
break;
case RLC_MODE_AM:
rlc = &am;
rlc = new rlc_am((uint32_t) buffer_size);
break;
default:
rlc_entity_log_->error("Invalid RLC mode - defaulting to TM\n");
rlc = &tm;
rlc = new rlc_tm((uint32_t) buffer_size);
break;
}
lcid = lcid_;
mode = mode_;
} else {
if (lcid != lcid_) {
rlc_entity_log_->warning("Reestablishing RLC instance. LCID changed from %d to %d\n", lcid, lcid_);
lcid = lcid_;
}
if (mode != mode_) {
rlc_entity_log_->console("Error reestablishing RLC instance. Mode changed from %d to %d. \n", mode, mode_);
}
}
rlc->init(rlc_entity_log_, lcid_, pdcp_, rrc_, mac_timers_);
}
@ -70,19 +85,16 @@ void rlc_entity::configure(srslte_rlc_config_t cnfg)
rlc->configure(cnfg);
}
// Reestablishment stops the entity but does not destroy it. Mode will not change
void rlc_entity::reestablish() {
rlc->reset();
}
void rlc_entity::reset()
{
rlc->reset();
rlc = NULL;
rlc->stop();
}
// A call to stop() stops the entity and clears deletes the instance. Next time this entity can be used for other mode.
void rlc_entity::stop()
{
rlc->stop();
delete rlc;
rlc = NULL;
}
@ -119,6 +131,12 @@ void rlc_entity::write_sdu(byte_buffer_t *sdu)
rlc->write_sdu(sdu);
}
void rlc_entity::write_sdu_nb(byte_buffer_t *sdu)
{
if(rlc)
rlc->write_sdu_nb(sdu);
}
// MAC interface
uint32_t rlc_entity::get_buffer_state()
{

@ -29,7 +29,7 @@
namespace srslte {
rlc_tm::rlc_tm() : ul_queue(16)
rlc_tm::rlc_tm(uint32_t queue_len) : ul_queue(queue_len)
{
log = NULL;
pdcp = NULL;
@ -38,6 +38,11 @@ rlc_tm::rlc_tm() : ul_queue(16)
pool = byte_buffer_pool::get_instance();
}
// Warning: must call stop() to properly deallocate all buffers
rlc_tm::~rlc_tm() {
pool = NULL;
}
void rlc_tm::init(srslte::log *log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
@ -48,6 +53,7 @@ void rlc_tm::init(srslte::log *log_,
lcid = lcid_;
pdcp = pdcp_;
rrc = rrc_;
tx_enabled = true;
}
void rlc_tm::configure(srslte_rlc_config_t cnfg)
@ -64,14 +70,10 @@ void rlc_tm::empty_queue()
}
}
void rlc_tm::reset()
{
empty_queue();
}
void rlc_tm::stop()
{
reset();
tx_enabled = false;
empty_queue();
}
rlc_mode_t rlc_tm::get_mode()
@ -87,11 +89,37 @@ uint32_t rlc_tm::get_bearer()
// PDCP interface
void rlc_tm::write_sdu(byte_buffer_t *sdu)
{
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, before: queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
ul_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, after: queue size=%d, bytes=%d",
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
void rlc_tm::write_sdu_nb(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
if (ul_queue.try_write(sdu)) {
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
} else {
log->info_hex(sdu->msg, sdu->N_bytes, "[Dropped SDU] %s Tx SDU, queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
pool->deallocate(sdu);
}
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
// MAC interface

@ -33,7 +33,7 @@
namespace srslte {
rlc_um::rlc_um() : tx_sdu_queue(32)
rlc_um::rlc_um(uint32_t queue_len) : tx_sdu_queue(queue_len)
{
log = NULL;
pdcp = NULL;
@ -62,10 +62,13 @@ rlc_um::rlc_um() : tx_sdu_queue(32)
pdu_lost = false;
}
// Warning: must call stop() to properly deallocate all buffers
rlc_um::~rlc_um()
{
stop();
pthread_mutex_destroy(&mutex);
pool = NULL;
}
void rlc_um::init(srslte::log *log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
@ -79,6 +82,7 @@ void rlc_um::init(srslte::log *log_,
mac_timers = mac_timers_;
reordering_timer_id = mac_timers->timer_get_unique_id();
reordering_timer = mac_timers->timer_get(reordering_timer_id);
tx_enabled = true;
}
void rlc_um::configure(srslte_rlc_config_t cnfg_)
@ -115,8 +119,7 @@ void rlc_um::configure(srslte_rlc_config_t cnfg_)
void rlc_um::empty_queue() {
// Drop all messages in TX SDU queue
byte_buffer_t *buf;
while(tx_sdu_queue.size() > 0) {
tx_sdu_queue.read(&buf);
while(tx_sdu_queue.try_read(&buf)) {
pool->deallocate(buf);
}
}
@ -127,17 +130,9 @@ bool rlc_um::is_mrb()
}
void rlc_um::stop()
{
reset();
if (mac_timers && reordering_timer) {
mac_timers->timer_release_id(reordering_timer_id);
reordering_timer = NULL;
}
}
void rlc_um::reset()
{
// Empty tx_sdu_queue before locking the mutex
tx_enabled = false;
empty_queue();
pthread_mutex_lock(&mutex);
@ -167,6 +162,11 @@ void rlc_um::reset()
}
rx_window.clear();
pthread_mutex_unlock(&mutex);
if (mac_timers && reordering_timer) {
mac_timers->timer_release_id(reordering_timer_id);
reordering_timer = NULL;
}
}
rlc_mode_t rlc_um::get_mode()
@ -182,11 +182,36 @@ uint32_t rlc_um::get_bearer()
/****************************************************************************
* PDCP interface
***************************************************************************/
void rlc_um::write_sdu(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
void rlc_um::write_sdu_nb(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
if (tx_sdu_queue.try_write(sdu)) {
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->info_hex(sdu->msg, sdu->N_bytes, "[Dropped SDU] %s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
pool->deallocate(sdu);
}
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
/****************************************************************************

@ -1408,7 +1408,7 @@ void reset_test()
pdu_bufs.N_bytes = len;
// reset RLC1
rlc1.reset();
rlc1.stop();
// read another PDU segment from RLC1
len = rlc1.read_pdu(pdu_bufs.msg, 4);

@ -124,7 +124,8 @@ private:
static const int MAC_PDU_THREAD_PRIO = 60;
// We use a rwlock in MAC to allow multiple workers to access MAC simultaneously. No conflicts will happen since access for different TTIs
pthread_rwlock_t rwlock;
// Interaction with PHY
phy_interface_mac *phy_h;

@ -38,6 +38,13 @@
namespace srsenb {
/* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe
* Rest of operations are thread-safe
*
* The subclass sched_ue is thread-safe so that access to shared variables like buffer states
* from scheduler thread and other threads is protected for each individual user.
*/
class sched : public sched_interface
{
@ -217,9 +224,6 @@ private:
bool configured;
pthread_mutex_t mutex, mutex2;
};

@ -35,6 +35,12 @@
namespace srsenb {
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads.
*
* 1 mutex is created for every user and only access to same user variables are mutexed
*/
class sched_ue {
public:
@ -56,6 +62,7 @@ public:
*
************************************************************/
sched_ue();
~sched_ue();
void reset();
void phy_config_enabled(uint32_t tti, bool enabled);
void set_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg, sched_interface::cell_cfg_t *cell_cfg,
@ -101,8 +108,10 @@ public:
uint32_t get_pending_dl_new_data(uint32_t tti);
uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_old_data();
uint32_t get_pending_dl_new_data_total(uint32_t tti);
void reset_timeout_dl_harq(uint32_t tti);
dl_harq_proc *get_pending_dl_harq(uint32_t tti);
dl_harq_proc *get_empty_dl_harq();
ul_harq_proc *get_ul_harq(uint32_t tti);
@ -129,8 +138,6 @@ public:
bool get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]);
bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce);
uint32_t get_pending_ul_old_data();
private:
typedef struct {
@ -152,12 +159,22 @@ private:
static bool bearer_is_ul(ue_bearer_t *lch);
static bool bearer_is_dl(ue_bearer_t *lch);
uint32_t get_pending_dl_new_data_unlocked(uint32_t tti);
uint32_t get_pending_ul_old_data_unlocked();
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false);
int generate_format2a_unlocked(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi);
bool is_first_dl_tx();
sched_interface::ue_cfg_t cfg;
srslte_cell_t cell;
srslte::log* log_h;
pthread_mutex_t mutex;
/* Buffer states */
bool sr;
int buf_mac;

@ -71,7 +71,7 @@ public:
private:
const static float PUSCH_RL_SNR_DB_TH = 1.0;
const static float PUCCH_RL_CORR_TH = 0.1;
const static float PUCCH_RL_CORR_TH = 0.15;
void work_imp();

@ -58,14 +58,6 @@ namespace srsenb {
#define GTPU_HEADER_LEN 8
typedef struct{
uint8_t flags; // Only support 0x30 - v1, PT1 (GTP), no other flags
uint8_t message_type; // Only support 0xFF - T-PDU type
uint16_t length;
uint32_t teid;
}gtpu_header_t;
class gtpu
:public gtpu_interface_rrc
,public gtpu_interface_pdcp
@ -93,8 +85,6 @@ private:
bool running;
bool run_enable;
bool enable_mbsfn;
std::string gtp_bind_addr;
std::string mme_addr;
@ -143,13 +133,6 @@ private:
pthread_mutex_t mutex;
/****************************************************************************
* Header pack/unpack helper functions
* Ref: 3GPP TS 29.281 v10.1.0 Section 5
***************************************************************************/
bool gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu);
bool gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header);
/****************************************************************************
* TEID to RNIT/LCID helper functions
***************************************************************************/

@ -105,8 +105,12 @@ private:
srslte::pdcp *pdcp;
};
void clear_user(user_interface *ue);
std::map<uint32_t,user_interface> users;
pthread_rwlock_t rwlock;
rlc_interface_pdcp *rlc;
rrc_interface_pdcp *rrc;
gtpu_interface_pdcp *gtpu;

@ -52,7 +52,6 @@ public:
void stop();
// rlc_interface_rrc
void reset(uint16_t rnti);
void clear_buffer(uint16_t rnti);
void add_user(uint16_t rnti);
void rem_user(uint16_t rnti);
@ -93,6 +92,12 @@ private:
srsenb::rlc *parent;
};
void clear_user(user_interface *ue);
const static int RLC_TX_QUEUE_LEN = 512;
pthread_rwlock_t rwlock;
std::map<uint32_t,user_interface> users;
std::vector<mch_service_t> mch_services;

@ -123,7 +123,6 @@ public:
bzero(&cqi_sched, sizeof(cqi_sched));
bzero(&cfg, sizeof(cfg));
bzero(&sib2, sizeof(sib2));
bzero(&user_mutex, sizeof(user_mutex));
bzero(&paging_mutex, sizeof(paging_mutex));
}
@ -350,6 +349,7 @@ private:
srslte::byte_buffer_t* pdu;
}rrc_pdu;
const static uint32_t LCID_EXIT = 0xffff0000;
const static uint32_t LCID_REM_USER = 0xffff0001;
const static uint32_t LCID_REL_USER = 0xffff0002;
const static uint32_t LCID_RLF_USER = 0xffff0003;

@ -9,7 +9,7 @@ mac_cnfg =
ulsch_cnfg =
{
max_harq_tx = 4;
periodic_bsr_timer = 40; // in ms
periodic_bsr_timer = 20; // in ms
retx_bsr_timer = 320; // in ms
};
@ -26,9 +26,9 @@ phy_cnfg =
pusch_cnfg_ded =
{
beta_offset_ack_idx = 10;
beta_offset_ri_idx = 5;
beta_offset_cqi_idx = 10;
beta_offset_ack_idx = 6;
beta_offset_ri_idx = 6;
beta_offset_cqi_idx = 6;
};
// PUCCH-SR resources are scheduled on time-frequeny domain first, then multiplexed in the same resource.

@ -47,13 +47,13 @@ sib2 =
high_speed_flag = false;
prach_config_index = 3;
prach_freq_offset = 2;
zero_correlation_zone_config = 11;
zero_correlation_zone_config = 5;
};
};
pdsch_cnfg =
{
p_b = 0;
rs_power = -10;
rs_power = 0;
};
pusch_cnfg =
{

@ -96,6 +96,8 @@ bool mac::init(mac_args_t *args_, srslte_cell_t *cell_, phy_interface_mac *phy,
reset();
pthread_rwlock_init(&rwlock, NULL);
started = true;
}
@ -104,6 +106,8 @@ bool mac::init(mac_args_t *args_, srslte_cell_t *cell_, phy_interface_mac *phy,
void mac::stop()
{
pthread_rwlock_wrlock(&rwlock);
for (uint32_t i=0;i<ue_db.size();i++) {
delete ue_db[i];
}
@ -115,6 +119,9 @@ void mac::stop()
started = false;
timers_thread.stop();
pdu_process_thread.stop();
pthread_rwlock_unlock(&rwlock);
pthread_rwlock_destroy(&rwlock);
}
// Implement Section 5.9
@ -149,43 +156,52 @@ void mac::start_pcap(srslte::mac_pcap* pcap_)
*******************************************************/
int mac::rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
return scheduler.dl_rlc_buffer_state(rnti, lc_id, tx_queue, retx_queue);
ret = scheduler.dl_rlc_buffer_state(rnti, lc_id, tx_queue, retx_queue);
} else {
for(uint32_t i = 0; i < mch.num_mtch_sched; i++){
if(lc_id == mch.mtch_sched[i].lcid){
mch.mtch_sched[i].lcid_buffer_size = tx_queue;
}
}
return 0;
ret = 0;
}
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg)
{
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
// configure BSR group in UE
ue_db[rnti]->set_lcg(lc_id, (uint32_t) cfg->group);
return scheduler.bearer_ue_cfg(rnti, lc_id, cfg);
ret = scheduler.bearer_ue_cfg(rnti, lc_id, cfg);
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
{
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
return scheduler.bearer_ue_rem(rnti, lc_id);
ret = scheduler.bearer_ue_rem(rnti, lc_id);
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
void mac::phy_config_enabled(uint16_t rnti, bool enabled)
@ -196,6 +212,8 @@ void mac::phy_config_enabled(uint16_t rnti, bool enabled)
// Update UE configuration
int mac::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg)
{
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
// Add RNTI to the PHY (pregerate signals) now instead of after PRACH
@ -212,29 +230,42 @@ int mac::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg)
// Update Scheduler configuration
if (scheduler.ue_cfg(rnti, cfg)) {
Error("Registering new UE rnti=0x%x to SCHED\n", rnti);
return -1;
} else {
ret = 0;
}
return 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
// Removes UE from DB
int mac::ue_rem(uint16_t rnti)
{
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.ue_rem(rnti);
phy_h->rem_rnti(rnti);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
if (ret) {
return ret;
}
pthread_rwlock_wrlock(&rwlock);
if (ue_db.count(rnti)) {
delete ue_db[rnti];
ue_db.erase(rnti);
Info("User rnti=0x%x removed from MAC/PHY\n", rnti);
return 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
Error("User rnti=0x%x already removed\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
return 0;
}
int mac::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
@ -245,6 +276,7 @@ int mac::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
void mac::get_metrics(mac_metrics_t metrics[ENB_METRICS_MAX_USERS])
{
pthread_rwlock_rdlock(&rwlock);
int cnt=0;
for(std::map<uint16_t, ue*>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
ue *u = iter->second;
@ -253,6 +285,7 @@ void mac::get_metrics(mac_metrics_t metrics[ENB_METRICS_MAX_USERS])
cnt++;
}
}
pthread_rwlock_unlock(&rwlock);
}
@ -264,6 +297,7 @@ void mac::get_metrics(mac_metrics_t metrics[ENB_METRICS_MAX_USERS])
void mac::rl_failure(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
uint32_t nof_fails = ue_db[rnti]->rl_failure();
if (nof_fails >= (uint32_t) args.link_failure_nof_err && args.link_failure_nof_err > 0) {
@ -274,19 +308,23 @@ void mac::rl_failure(uint16_t rnti)
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void mac::rl_ok(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
ue_db[rnti]->rl_failure_reset();
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
{
pthread_rwlock_rdlock(&rwlock);
log_h->step(tti);
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, tb_idx, ack);
ue_db[rnti]->metrics_tx(ack, nof_bytes);
@ -294,16 +332,18 @@ int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
if (ack) {
if (nof_bytes > 64) { // do not count RLC status messages only
rrc_h->set_activity_user(rnti);
log_h->info("DL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
log_h->debug("DL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
}
}
pthread_rwlock_unlock(&rwlock);
return 0;
}
int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
ue_db[rnti]->set_tti(tti);
@ -317,98 +357,117 @@ int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
ue_db[rnti]->deallocate_pdu(tti);
}
return scheduler.ul_crc_info(tti, rnti, crc);
ret = scheduler.ul_crc_info(tti, rnti, crc);
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::set_dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *dl_ant_info) {
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_ant_info(rnti, dl_ant_info);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_ri_info(tti, rnti, ri_value);
ue_db[rnti]->metrics_dl_ri(ri_value);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
{
log_h->step(tti);
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
scheduler.dl_pmi_info(tti, rnti, pmi_value);
ue_db[rnti]->metrics_dl_pmi(pmi_value);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_cqi_info(tti, rnti, cqi_value);
ue_db[rnti]->metrics_dl_cqi(cqi_value);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::snr_info(uint32_t tti, uint16_t rnti, float snr)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
uint32_t cqi = srslte_cqi_from_snr(snr);
scheduler.ul_cqi_info(tti, rnti, cqi, 0);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::sr_detected(uint32_t tti, uint16_t rnti)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.ul_sr_info(tti, rnti);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::rach_detected(uint32_t tti, uint32_t preamble_idx, uint32_t time_adv)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
// Find empty slot for pending rars
uint32_t ra_id=0;
while(pending_rars[ra_id].temp_crnti && ra_id<MAX_PENDING_RARS-1) {
@ -416,7 +475,7 @@ int mac::rach_detected(uint32_t tti, uint32_t preamble_idx, uint32_t time_adv)
}
if (ra_id == MAX_PENDING_RARS) {
Error("Maximum number of pending RARs exceeded (%d)\n", MAX_PENDING_RARS);
return -1;
goto unlock;
}
// Create new UE
@ -440,7 +499,7 @@ int mac::rach_detected(uint32_t tti, uint32_t preamble_idx, uint32_t time_adv)
// Release pending RAR
bzero(&pending_rars[ra_id], sizeof(pending_rar_t));
Error("Registering new user rnti=0x%x to SCHED\n", last_rnti);
return -1;
goto unlock;
}
// Register new user in RRC
@ -458,7 +517,11 @@ int mac::rach_detected(uint32_t tti, uint32_t preamble_idx, uint32_t time_adv)
if (last_rnti >= 60000) {
last_rnti = 70;
}
return 0;
ret = 0;
unlock:
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
@ -483,12 +546,15 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
int n = 0;
pthread_rwlock_rdlock(&rwlock);
// Copy data grants
for (uint32_t i=0;i<sched_result.nof_data_elems;i++) {
// Get UE
uint16_t rnti = sched_result.data[i].rnti;
if (ue_db.count(rnti)) {
// Copy grant info
dl_sched_res->sched_grants[n].rnti = rnti;
dl_sched_res->sched_grants[n].dci_format = sched_result.data[i].dci_format;
@ -521,8 +587,14 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
}
}
n++;
} else {
Warning("Invalid DL scheduling result. User 0x%x does not exist\n", rnti);
}
}
// No more uses of shared ue_db beyond here
pthread_rwlock_unlock(&rwlock);
// Copy RAR grants
for (uint32_t i=0;i<sched_result.nof_rar_elems;i++) {
// Copy grant info
@ -726,6 +798,8 @@ int mac::get_ul_sched(uint32_t tti, ul_sched_t *ul_sched_res)
return SRSLTE_ERROR;
}
pthread_rwlock_rdlock(&rwlock);
// Copy DCI grants
ul_sched_res->nof_grants = 0;
int n = 0;
@ -735,6 +809,7 @@ int mac::get_ul_sched(uint32_t tti, ul_sched_t *ul_sched_res)
// Get UE
uint16_t rnti = sched_result.pusch[i].rnti;
if (ue_db.count(rnti)) {
// Copy grant info
ul_sched_res->sched_grants[n].rnti = rnti;
ul_sched_res->sched_grants[n].current_tx_nb = sched_result.pusch[i].current_tx_nb;
@ -750,12 +825,18 @@ int mac::get_ul_sched(uint32_t tti, ul_sched_t *ul_sched_res)
ul_sched_res->sched_grants[n].data = ue_db[rnti]->request_buffer(tti, sched_result.pusch[i].tbs);
ul_sched_res->nof_grants++;
n++;
} else {
Warning("Invalid DL scheduling result. User 0x%x does not exist\n", rnti);
}
} else {
Warning("Grant %d for rnti=0x%x has zero TBS\n", i, sched_result.pusch[i].rnti);
}
}
// No more uses of ue_db beyond here
pthread_rwlock_unlock(&rwlock);
// Copy PHICH actions
for (uint32_t i=0;i<sched_result.nof_phich_elems;i++) {
ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK;
@ -871,12 +952,14 @@ void mac::pdu_process::run_thread()
bool mac::process_pdus()
{
pthread_rwlock_rdlock(&rwlock);
bool ret = false;
for(std::map<uint16_t, ue*>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
ue *u = iter->second;
uint16_t rnti = iter->first;
ret = ret | u->process_pdus();
}
pthread_rwlock_unlock(&rwlock);
return ret;
}

@ -57,22 +57,16 @@ sched::sched() : bc_aggr_level(0), rar_aggr_level(0), avail_rbg(0), P(0), start_
bzero(&sched_cfg, sizeof(sched_cfg));
bzero(&common_locations, sizeof(common_locations));
bzero(&pdsch_re, sizeof(pdsch_re));
bzero(&mutex, sizeof(mutex));
for (int i = 0; i < 3; i++) {
bzero(rar_locations[i], sizeof(sched_ue::sched_dci_cce_t) * 10);
}
pthread_mutex_init(&mutex, NULL);
pthread_mutex_init(&mutex2, NULL);
reset();
}
sched::~sched()
{
srslte_regs_free(&regs);
pthread_mutex_destroy(&mutex);
pthread_mutex_destroy(&mutex2);
}
void sched::init(rrc_interface_mac *rrc_, srslte::log* log)
@ -118,8 +112,6 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
return -1;
}
pthread_mutex_lock(&mutex);
memcpy(&cfg, cell_cfg, sizeof(sched_interface::cell_cfg_t));
// Get DCI locations
@ -147,8 +139,6 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
}
configured = true;
pthread_mutex_unlock(&mutex);
return 0;
}
@ -161,21 +151,16 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t *ue_cfg)
{
pthread_mutex_lock(&mutex);
// Add or config user
ue_db[rnti].set_cfg(rnti, ue_cfg, &cfg, &regs, log_h);
ue_db[rnti].set_max_mcs(sched_cfg.pusch_max_mcs, sched_cfg.pdsch_max_mcs);
ue_db[rnti].set_fixed_mcs(sched_cfg.pusch_mcs, sched_cfg.pdsch_mcs);
pthread_mutex_unlock(&mutex);
return 0;
}
int sched::ue_rem(uint16_t rnti)
{
pthread_mutex_lock(&mutex);
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db.erase(rnti);
@ -183,8 +168,6 @@ int sched::ue_rem(uint16_t rnti)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
pthread_mutex_unlock(&mutex);
return ret;
}
@ -195,18 +178,15 @@ bool sched::ue_exists(uint16_t rnti)
void sched::phy_config_enabled(uint16_t rnti, bool enabled)
{
pthread_mutex_lock(&mutex);
if (ue_db.count(rnti)) {
ue_db[rnti].phy_config_enabled(current_tti, enabled);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_mutex_unlock(&mutex);
}
int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t *cfg)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_bearer_cfg(lc_id, cfg);
@ -214,14 +194,11 @@ int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bear
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
{
pthread_mutex_lock(&mutex);
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].rem_bearer(lc_id);
@ -229,49 +206,33 @@ int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
pthread_mutex_unlock(&mutex);
return ret;
}
uint32_t sched::get_dl_buffer(uint16_t rnti)
{
pthread_mutex_lock(&mutex);
uint32_t ret = 0;
if (ue_db.count(rnti)) {
ret = ue_db[rnti].get_pending_dl_new_data(current_tti);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_mutex_unlock(&mutex);
return ret;
}
uint32_t sched::get_ul_buffer(uint16_t rnti)
{
pthread_mutex_lock(&mutex);
uint32_t ret = 0;
if (ue_db.count(rnti)) {
ret = ue_db[rnti].get_pending_ul_new_data(current_tti);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_mutex_unlock(&mutex);
return ret;
}
/* \Warning: This function is not mutexed because it can produce late changes on the buffer state while
* the scheduler is already allocating data, resulting in empty grants.
* Ideally we would like the scheduler to query the RLC for buffer states in order to get the most updated
* buffer state with the minimum overhead. However, the current architecture is designed to be compliant
* with the FAPI interface
*
* We add a new mutex used only in ue_rem to avoid the UE being removed in between the access to
* ue_db.count() and the access to the std::map.
*/
int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].dl_buffer_state(lc_id, tx_queue, retx_queue);
@ -279,14 +240,11 @@ int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue,
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
return ret;
}
/* \Warning Read comment in dl_rlc_buffer_state() */
int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code)
{
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].mac_buffer_state(ce_code);
@ -294,12 +252,10 @@ int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
return ret;
}
int sched::dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *dl_ant_info) {
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_ant_info(dl_ant_info);
@ -307,13 +263,11 @@ int sched::dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ret = ue_db[rnti].set_ack_info(tti, tb_idx, ack);
@ -321,13 +275,11 @@ int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_ul_crc(tti, crc);
@ -335,13 +287,11 @@ int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_ri(tti, cqi_value);
@ -349,13 +299,11 @@ int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_pmi(tti, pmi_value);
@ -363,13 +311,11 @@ int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_cqi(tti, cqi_value);
@ -377,7 +323,6 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -399,7 +344,6 @@ int sched::dl_rach_info(uint32_t tti, uint32_t ra_id, uint16_t rnti, uint32_t es
int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_ul_cqi(tti, cqi, ul_ch_code);
@ -407,13 +351,11 @@ int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].ul_buffer_state(lcid, bsr, set_value);
@ -421,13 +363,11 @@ int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].ul_recv_len(lcid, len);
@ -435,13 +375,11 @@ int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::ul_phr(uint16_t rnti, int phr)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].ul_phr(phr);
@ -449,13 +387,11 @@ int sched::ul_phr(uint16_t rnti, int phr)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::ul_sr_info(uint32_t tti, uint16_t rnti)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_sr();;
@ -463,7 +399,6 @@ int sched::ul_sr_info(uint32_t tti, uint16_t rnti)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -745,8 +680,14 @@ int sched::dl_sched_data(dl_sched_data_t data[MAX_DATA_LIST])
Warning("SCHED: Could not schedule DL DCI for rnti=0x%x, pid=%d, cfi=%d\n", rnti, h->get_id(), current_cfi);
}
}
// Reset blocked PIDs
user->reset_timeout_dl_harq(current_tti);
}
return nof_data_elems;
}
@ -756,7 +697,6 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
if (!configured) {
return 0;
}
pthread_mutex_lock(&mutex);
/* If ul_sched() not yet called this tti, reset CCE state */
if (current_tti != tti) {
@ -786,7 +726,6 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
/* Set CFI */
sched_result->cfi = current_cfi;
pthread_mutex_unlock(&mutex);
return 0;
}
@ -798,8 +737,6 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
return 0;
}
pthread_mutex_lock(&mutex);
/* If dl_sched() not yet called this tti (this tti is +4ms advanced), reset CCE state */
if (TTI_TX(current_tti) != tti) {
bzero(used_cce, MAX_CCE*sizeof(bool));
@ -858,14 +795,8 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
pucch.L = (uint32_t) cfg.nrb_pucch;
if(!ul_metric->update_allocation(pucch)) {
log_h->warning("SCHED: Failed to allocate PUCCH\n");
}
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue *) &iter->second;
uint16_t rnti = (uint16_t) iter->first;
uint32_t prb_idx[2] = {0, 0};
if(user->get_pucch_sched(current_tti, prb_idx)) {
user->has_pucch = true;
}
} else {
log_h->debug("Allocating PUCCH (%d,%d)\n", pucch.RB_start, pucch.RB_start+pucch.L);
}
} else {
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
@ -888,6 +819,10 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
ul_harq_proc::ul_alloc_t prach = {cfg.prach_freq_offset, 6};
if(!ul_metric->update_allocation(prach)) {
log_h->warning("SCHED: Failed to allocate PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L);
if (prach.RB_start + prach.L > cfg.cell.nof_prb) {
fprintf(stderr, "Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset);
return -1;
}
}
else {
log_h->debug("SCHED: Allocated PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L);
@ -999,8 +934,6 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
sched_result->nof_dci_elems = nof_dci_elems;
sched_result->nof_phich_elems = nof_phich_elems;
pthread_mutex_unlock(&mutex);
return SRSLTE_SUCCESS;
}

@ -60,14 +60,23 @@ sched_ue::sched_ue() : dl_next_alloc(NULL), ul_next_alloc(NULL), has_pucch(false
bzero(&dl_harq, sizeof(dl_harq));
bzero(&ul_harq, sizeof(ul_harq));
bzero(&dl_ant_info, sizeof(dl_ant_info));
pthread_mutex_init(&mutex, NULL);
reset();
}
sched_ue::~sched_ue() {
pthread_mutex_lock(&mutex);
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
}
void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_interface::cell_cfg_t *cell_cfg,
srslte_regs_t *regs, srslte::log *log_h_)
{
reset();
pthread_mutex_lock(&mutex);
rnti = rnti_;
log_h = log_h_;
memcpy(&cell, &cell_cfg->cell, sizeof(srslte_cell_t));
@ -81,10 +90,6 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
}
Info("SCHED: Added user rnti=0x%x\n", rnti);
for (int i=0;i<sched_interface::MAX_LC;i++) {
set_bearer_cfg(i, &cfg.ue_bearers[i]);
}
// Config HARQ processes
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
dl_harq[i].config(i, cfg.maxharq_tx, log_h);
@ -97,10 +102,17 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
sched::generate_cce_location(regs, &dci_locations[cfi][sf_idx], cfi+1, sf_idx, rnti);
}
}
pthread_mutex_unlock(&mutex);
for (int i=0;i<sched_interface::MAX_LC;i++) {
set_bearer_cfg(i, &cfg.ue_bearers[i]);
}
}
void sched_ue::reset()
{
pthread_mutex_lock(&mutex);
bzero(&cfg, sizeof(sched_interface::ue_cfg_t));
sr = false;
next_tpc_pusch = 1;
@ -123,17 +135,22 @@ void sched_ue::reset()
ul_harq[i].reset(tb);
}
}
pthread_mutex_unlock(&mutex);
for (int i=0;i<sched_interface::MAX_LC; i++) {
rem_bearer(i);
}
}
void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl) {
pthread_mutex_lock(&mutex);
fixed_mcs_ul = mcs_ul;
fixed_mcs_dl = mcs_dl;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
pthread_mutex_lock(&mutex);
if (mcs_ul < 0) {
max_mcs_ul = 28;
} else {
@ -144,6 +161,7 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
} else {
max_mcs_dl = mcs_dl;
}
pthread_mutex_unlock(&mutex);
}
@ -155,6 +173,7 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
memcpy(&lch[lc_id].cfg, cfg, sizeof(sched_interface::ue_bearer_cfg_t));
lch[lc_id].buf_tx = 0;
@ -163,13 +182,16 @@ void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t*
Info("SCHED: Set bearer config lc_id=%d, direction=%d\n", lc_id, (int) lch[lc_id].cfg.direction);
}
}
pthread_mutex_unlock(&mutex);
}
void sched_ue::rem_bearer(uint32_t lc_id)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
bzero(&lch[lc_id], sizeof(ue_bearer_t));
}
pthread_mutex_unlock(&mutex);
}
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
@ -180,6 +202,7 @@ void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
if (set_value) {
lch[lc_id].bsr = bsr;
@ -189,25 +212,30 @@ void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
}
Debug("SCHED: bsr=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", bsr, lc_id,
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
pthread_mutex_unlock(&mutex);
}
void sched_ue::ul_phr(int phr)
{
power_headroom= phr;
power_headroom = phr;
}
void sched_ue::dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
lch[lc_id].buf_retx = retx_queue;
lch[lc_id].buf_tx = tx_queue;
Debug("SCHED: DL lcid=%d buffer_state=%d,%d\n", lc_id, tx_queue, retx_queue);
}
pthread_mutex_unlock(&mutex);
}
void sched_ue::mac_buffer_state(uint32_t ce_code)
{
pthread_mutex_lock(&mutex);
buf_mac++;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_sr()
@ -222,42 +250,59 @@ void sched_ue::unset_sr()
bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
{
bool ret = false;
pthread_mutex_lock(&mutex);
uint32_t n_pucch_sr, n_pucch_nosr;
srslte_pucch_sched_t pucch_sched;
bool has_sr;
if (!phy_config_dedicated_enabled) {
return false;
goto unlock;
}
srslte_pucch_sched_t pucch_sched;
pucch_sched.sps_enabled = false;
pucch_sched.n_pucch_sr = cfg.sr_N_pucch;
pucch_sched.n_pucch_2 = cfg.n_pucch_cqi;
pucch_sched.N_pucch_1 = cfg.pucch_cfg.n1_pucch_an;
bool has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
if (!has_sr) {
return false;
goto unlock;
}
uint32_t n_pucch_sr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, true, &pucch_sched);
uint32_t n_pucch_nosr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, false, &pucch_sched);
n_pucch_sr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, true, &pucch_sched);
n_pucch_nosr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, false, &pucch_sched);
if (srslte_pucch_n_prb(&cfg.pucch_cfg, SRSLTE_PUCCH_FORMAT_1A, n_pucch_sr, cell.nof_prb, cell.cp, 0) ==
srslte_pucch_n_prb(&cfg.pucch_cfg, SRSLTE_PUCCH_FORMAT_1A, n_pucch_nosr, cell.nof_prb, cell.cp, 0))
{
return true;
ret = true;
} else {
return false;
ret = false;
}
unlock:
pthread_mutex_unlock(&mutex);
return ret;
}
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
{
bool ret = false;
bool has_sr;
pthread_mutex_lock(&mutex);
if (!phy_config_dedicated_enabled) {
return false;
goto unlock;
}
srslte_pucch_sched_t pucch_sched;
pucch_sched.sps_enabled = false;
pucch_sched.n_pucch_sr = cfg.sr_N_pucch;
pucch_sched.n_pucch_2 = cfg.n_pucch_cqi;
pucch_sched.N_pucch_1 = cfg.pucch_cfg.n1_pucch_an;
bool has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
// First check if it has pending ACKs
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
@ -270,7 +315,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
Debug("SCHED: Reserved Format1A PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d, ncce=%d, has_sr=%d, n_pucch_1=%d\n",
rnti, prb_idx[0], prb_idx[1], n_pucch, dl_harq[i].get_n_cce(), has_sr, pucch_sched.N_pucch_1);
}
return true;
ret = true;
goto unlock;
}
}
// If there is no Format1A/B, then check if it's expecting Format1
@ -281,7 +327,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
}
}
Debug("SCHED: Reserved Format1 PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d\n", rnti, prb_idx[0], prb_idx[1], cfg.sr_N_pucch);
return true;
ret = true;
goto unlock;
}
// Finally check Format2 (periodic CQI)
if (cfg.cqi_enabled && srslte_cqi_send(cfg.cqi_idx, current_tti)) {
@ -292,27 +339,41 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
Debug("SCHED: Reserved Format2 PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d, pmi_idx=%d\n",
rnti, prb_idx[0], prb_idx[1], cfg.cqi_pucch, cfg.cqi_idx);
}
return true;
ret = true;
goto unlock;
}
return false;
ret = false;
unlock:
pthread_mutex_unlock(&mutex);
return ret;
}
int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack)
{
pthread_mutex_lock(&mutex);
int ret = -1;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
if (TTI_TX(dl_harq[i].get_tti()) == tti) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d.%d, tti=%d\n", ack, rnti, i, tb_idx, tti);
dl_harq[i].set_ack(tb_idx, ack);
return dl_harq[i].get_tbs(tb_idx);
ret = dl_harq[i].get_tbs(tb_idx);
goto unlock;
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti);
return -1;
ret = -1;
unlock:
pthread_mutex_unlock(&mutex);
return ret;
}
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
{
pthread_mutex_lock(&mutex);
// Remove PDCP header??
if (len > 4) {
len -= 4;
@ -328,54 +389,72 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
}
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid,
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_ul_crc(uint32_t tti, bool crc_res)
{
pthread_mutex_lock(&mutex);
get_ul_harq(tti)->set_ack(0, crc_res);
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_ri(uint32_t tti, uint32_t ri)
{
pthread_mutex_lock(&mutex);
dl_ri = ri;
dl_ri_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t pmi)
{
pthread_mutex_lock(&mutex);
dl_pmi = pmi;
dl_pmi_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cqi)
{
pthread_mutex_lock(&mutex);
dl_cqi = cqi;
dl_cqi_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_ant_info(LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *d)
{
pthread_mutex_lock(&mutex);
memcpy(&dl_ant_info, d, sizeof(LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT));
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code)
{
pthread_mutex_lock(&mutex);
ul_cqi = cqi;
ul_cqi_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::tpc_inc() {
pthread_mutex_lock(&mutex);
if (power_headroom > 0) {
next_tpc_pusch = 3;
next_tpc_pucch = 3;
}
log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti);
pthread_mutex_unlock(&mutex);
}
void sched_ue::tpc_dec() {
pthread_mutex_lock(&mutex);
next_tpc_pusch = 0;
next_tpc_pucch = 0;
log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti);
pthread_mutex_unlock(&mutex);
}
/*******************************************************
@ -391,6 +470,8 @@ int sched_ue::generate_format1(dl_harq_proc *h,
uint32_t tti,
uint32_t cfi)
{
pthread_mutex_lock(&mutex);
srslte_ra_dl_dci_t *dci = &data->dci;
bzero(dci, sizeof(srslte_ra_dl_dci_t));
@ -410,7 +491,7 @@ int sched_ue::generate_format1(dl_harq_proc *h,
}
if (h->is_empty(0)) {
uint32_t req_bytes = get_pending_dl_new_data(tti);
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
uint32_t nof_prb = format1_count_prb(h->get_rbgmask(), cell.nof_prb);
srslte_ra_dl_grant_t grant;
@ -463,6 +544,7 @@ int sched_ue::generate_format1(dl_harq_proc *h,
dci->tb_en[0] = true;
dci->tb_en[1] = false;
}
pthread_mutex_unlock(&mutex);
return tbs;
}
@ -471,8 +553,21 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
{
pthread_mutex_lock(&mutex);
int ret = generate_format2a_unlocked(h, data, tti, cfi);
pthread_mutex_unlock(&mutex);
return ret;
}
// Generates a Format2a grant
int sched_ue::generate_format2a_unlocked(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
{
bool tb_en[SRSLTE_MAX_TB] = {false};
srslte_ra_dl_dci_t *dci = &data->dci;
bzero(dci, sizeof(srslte_ra_dl_dci_t));
@ -512,7 +607,7 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
}
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
uint32_t req_bytes = get_pending_dl_new_data(tti);
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
int mcs = 0;
int tbs = 0;
@ -566,17 +661,22 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
dci->tpc_pucch = (uint8_t) next_tpc_pucch;
next_tpc_pucch = 1;
return data->tbs[0] + data->tbs[1];
int ret = data->tbs[0] + data->tbs[1];
return ret;
}
// Generates a Format2 grant
int sched_ue::generate_format2(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
{
pthread_mutex_lock(&mutex);
/* Call Format 2a (common) */
int ret = generate_format2a(h, data, tti, cfi);
int ret = generate_format2a_unlocked(h, data, tti, cfi);
/* Compute precoding information */
if (SRSLTE_RA_DL_GRANT_NOF_TB(&data->dci) == 1) {
@ -585,6 +685,8 @@ int sched_ue::generate_format2(dl_harq_proc *h,
data->dci.pinfo = (uint8_t) (dl_pmi & 1);
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -594,6 +696,8 @@ int sched_ue::generate_format0(ul_harq_proc *h,
uint32_t tti,
bool cqi_request)
{
pthread_mutex_lock(&mutex);
srslte_ra_ul_dci_t *dci = &data->dci;
bzero(dci, sizeof(srslte_ra_ul_dci_t));
@ -608,7 +712,7 @@ int sched_ue::generate_format0(ul_harq_proc *h,
h->new_tx(tti, mcs, tbs);
} else if (h->is_empty(0)) {
uint32_t req_bytes = get_pending_ul_new_data(tti);
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0;
uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*allocation.L*SRSLTE_NRE;
@ -646,6 +750,8 @@ int sched_ue::generate_format0(ul_harq_proc *h,
next_tpc_pusch = 1;
}
pthread_mutex_unlock(&mutex);
return tbs;
}
@ -678,11 +784,20 @@ bool sched_ue::is_first_dl_tx()
}
bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
{
pthread_mutex_lock(&mutex);
bool ret = needs_cqi_unlocked(tti, will_be_sent);
pthread_mutex_unlock(&mutex);
return ret;
}
// Private lock-free implemenentation
bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
{
bool ret = false;
if (phy_config_dedicated_enabled &&
cfg.aperiodic_cqi_period &&
get_pending_dl_new_data(tti) > 0)
get_pending_dl_new_data_unlocked(tti) > 0)
{
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti);
bool needscqi = interval >= cfg.aperiodic_cqi_period;
@ -702,12 +817,9 @@ bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
{
uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) {
if (bearer_is_dl(&lch[i])) {
pending_data += lch[i].buf_retx + lch[i].buf_tx;
}
}
pthread_mutex_lock(&mutex);
uint32_t pending_data = get_pending_dl_new_data_unlocked(tti);
pthread_mutex_unlock(&mutex);
return pending_data;
}
@ -717,17 +829,48 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
/// \return number of bytes to be allocated
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti)
{
uint32_t req_bytes = get_pending_dl_new_data(tti);
pthread_mutex_lock(&mutex);
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
if(req_bytes>0) {
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
if(is_first_dl_tx()) {
req_bytes += 6; // count for RAR
}
}
pthread_mutex_unlock(&mutex);
return req_bytes;
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
{
uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) {
if (bearer_is_dl(&lch[i])) {
pending_data += lch[i].buf_retx + lch[i].buf_tx;
}
}
return pending_data;
}
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{
pthread_mutex_lock(&mutex);
uint32_t pending_data = get_pending_ul_new_data_unlocked(tti);
pthread_mutex_unlock(&mutex);
return pending_data;
}
uint32_t sched_ue::get_pending_ul_old_data()
{
pthread_mutex_lock(&mutex);
uint32_t pending_data = get_pending_ul_old_data_unlocked();
pthread_mutex_unlock(&mutex);
return pending_data;
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
{
uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) {
@ -738,10 +881,10 @@ uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
if (!pending_data && is_sr_triggered()) {
return 512;
}
if (!pending_data && needs_cqi(tti)) {
if (!pending_data && needs_cqi_unlocked(tti)) {
return 128;
}
uint32_t pending_ul_data = get_pending_ul_old_data();
uint32_t pending_ul_data = get_pending_ul_old_data_unlocked();
if (pending_data > pending_ul_data) {
pending_data -= pending_ul_data;
} else {
@ -754,7 +897,8 @@ uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
return pending_data;
}
uint32_t sched_ue::get_pending_ul_old_data()
// Private lock-free implementation
uint32_t sched_ue::get_pending_ul_old_data_unlocked()
{
uint32_t pending_data = 0;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
@ -775,6 +919,8 @@ uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{
pthread_mutex_lock(&mutex);
int mcs = 0;
uint32_t nof_re = 0;
int tbs = 0;
@ -791,10 +937,12 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
if (tbs > 0) {
nbytes = tbs;
} else if (tbs < 0) {
pthread_mutex_unlock(&mutex);
return 0;
}
}
pthread_mutex_unlock(&mutex);
return n;
}
@ -810,6 +958,8 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
return 0;
}
pthread_mutex_lock(&mutex);
for (n=1;n<cell.nof_prb && nbytes < req_bytes + 4;n++) {
uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*n*SRSLTE_NRE;
int tbs = 0;
@ -827,6 +977,8 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
n++;
}
pthread_mutex_unlock(&mutex);
return n;
}
@ -835,10 +987,26 @@ bool sched_ue::is_sr_triggered()
return sr;
}
void sched_ue::reset_timeout_dl_harq(uint32_t tti) {
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
if (!(dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1))) {
log_h->info("SCHED: pid=%d is empty\n", i);
if (srslte_tti_interval(tti, dl_harq[i].get_tti()) > 50) {
log_h->info("SCHED: pid=%d is old. tti_pid=%d, now is %d, resetting\n", i, dl_harq[i].get_tti(), tti);
dl_harq[i].reset(0);
dl_harq[i].reset(1);
}
}
}
}
/* Gets HARQ process with oldest pending retx */
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
{
#if ASYNC_DL_SCHED
pthread_mutex_lock(&mutex);
int oldest_idx=-1;
uint32_t oldest_tti = 0;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
@ -850,11 +1018,15 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
}
}
}
dl_harq_proc *h = NULL;
if (oldest_idx >= 0) {
return &dl_harq[oldest_idx];
} else {
return NULL;
h = &dl_harq[oldest_idx];
}
pthread_mutex_unlock(&mutex);
return h;
#else
return &dl_harq[tti%SCHED_MAX_HARQ_PROC];
#endif
@ -862,12 +1034,16 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
dl_harq_proc* sched_ue::get_empty_dl_harq()
{
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
pthread_mutex_lock(&mutex);
dl_harq_proc *h = NULL;
for (int i=0;i<SCHED_MAX_HARQ_PROC && !h;i++) {
if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) {
return &dl_harq[i];
h = &dl_harq[i];
}
}
return NULL;
pthread_mutex_unlock(&mutex);
return h;
}
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti)
@ -908,6 +1084,7 @@ srslte_dci_format_t sched_ue::get_dci_format() {
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
{
pthread_mutex_lock(&mutex);
uint32_t l=0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99;
@ -922,6 +1099,7 @@ uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
l++;
} while(l<l_max && factor*coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n", dl_cqi, l, nof_bits, coderate, max_coderate);
pthread_mutex_unlock(&mutex);
return l;
}
@ -1029,9 +1207,6 @@ int sched_ue::alloc_tbs(uint32_t nof_prb,
uint32_t max_Qm = is_ul?4:6; // Allow 16-QAM in PUSCH Only
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
if (has_pucch && is_ul) {
cqi-=3;
}
int tbs = cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, &sel_mcs)/8;

@ -115,7 +115,6 @@ srslte_softbuffer_tx_t* ue::get_tx_softbuffer(uint32_t harq_process, uint32_t tb
uint8_t* ue::request_buffer(uint32_t tti, uint32_t len)
{
uint8_t *ret = NULL;
pthread_mutex_lock(&mutex);
if (len > 0) {
if (!pending_buffers[tti%NOF_HARQ_PROCESSES]) {
ret = pdus.request(len);
@ -127,7 +126,6 @@ uint8_t* ue::request_buffer(uint32_t tti, uint32_t len)
} else {
log_h->warning("Requesting buffer for zero bytes\n");
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -190,12 +188,12 @@ void ue::process_pdu(uint8_t* pdu, uint32_t nof_bytes, srslte::pdu_queue::channe
}
// Indicate scheduler to update BSR counters
sched->ul_recv_len(rnti, mac_msg_ul.get()->get_sdu_lcid(), mac_msg_ul.get()->get_payload_size());
//sched->ul_recv_len(rnti, mac_msg_ul.get()->get_sdu_lcid(), mac_msg_ul.get()->get_payload_size());
// Indicate RRC about successful activity if valid RLC message is received
if (mac_msg_ul.get()->get_payload_size() > 64) { // do not count RLC status messages only
rrc->set_activity_user(rnti);
log_h->info("UL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
log_h->debug("UL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
}
if ((int) mac_msg_ul.get()->get_payload_size() > most_data) {

@ -167,7 +167,7 @@ void parse_args(all_args_t *args, int argc, char* argv[]) {
"Number of PHY threads")
("expert.link_failure_nof_err",
bpo::value<int>(&args->expert.mac.link_failure_nof_err)->default_value(50),
bpo::value<int>(&args->expert.mac.link_failure_nof_err)->default_value(100),
"Number of PUSCH failures after which a radio-link failure is triggered")
("expert.max_prach_offset_us",

@ -41,6 +41,8 @@ using namespace std;
namespace srsenb{
#define MAX(a,b) (a>b?a:b)
char const * const prefixes[2][9] =
{
{ "", "m", "u", "n", "p", "f", "a", "z", "y", },
@ -125,25 +127,41 @@ void metrics_stdout::print_metrics()
}
cout << std::hex << metrics.mac[i].rnti << " ";
cout << float_to_string(metrics.mac[i].dl_cqi, 2);
cout << float_to_string(MAX(0.1,metrics.mac[i].dl_cqi), 2);
cout << float_to_string(metrics.mac[i].dl_ri, 1);
cout << float_to_string(metrics.phy[i].dl.mcs, 2);
cout << float_to_eng_string((float) metrics.mac[i].tx_brate/metrics_report_period, 2);
if(not isnan(metrics.phy[i].dl.mcs)) {
cout << float_to_string(MAX(0.1,metrics.phy[i].dl.mcs), 2);
} else {
cout << float_to_string(0,2);
}
if (metrics.mac[i].tx_brate > 0 && metrics_report_period) {
cout << float_to_eng_string(MAX(0.1,(float) metrics.mac[i].tx_brate/metrics_report_period), 2);
} else {
cout << float_to_string(0, 2) << "";
}
if (metrics.mac[i].tx_pkts > 0 && metrics.mac[i].tx_errors) {
cout << float_to_string((float) 100*metrics.mac[i].tx_errors/metrics.mac[i].tx_pkts, 1) << "%";
cout << float_to_string(MAX(0.1,(float) 100*metrics.mac[i].tx_errors/metrics.mac[i].tx_pkts), 1) << "%";
} else {
cout << float_to_string(0, 1) << "%";
}
cout << float_to_string(metrics.phy[i].ul.sinr, 2);
if(not isnan(metrics.phy[i].ul.sinr)) {
cout << float_to_string(MAX(0.1,metrics.phy[i].ul.sinr), 2);
} else {
cout << float_to_string(0,2);
}
cout << float_to_string(metrics.mac[i].phr, 2);
cout << float_to_string(metrics.phy[i].ul.mcs, 2);
if(not isnan(metrics.phy[i].ul.mcs)) {
cout << float_to_string(MAX(0.1,metrics.phy[i].ul.mcs), 2);
} else {
cout << float_to_string(0,2);
}
if (metrics.mac[i].rx_brate > 0 && metrics_report_period) {
cout << float_to_eng_string((float) metrics.mac[i].rx_brate/metrics_report_period, 2);
cout << float_to_eng_string(MAX(0.1,(float) metrics.mac[i].rx_brate/metrics_report_period), 2);
} else {
cout << " " << float_to_string(0, 2);
cout << float_to_string(0, 2) << "";
}
if (metrics.mac[i].rx_pkts > 0 && metrics.mac[i].rx_errors > 0) {
cout << float_to_string((float) 100*metrics.mac[i].rx_errors/metrics.mac[i].rx_pkts, 1) << "%";
cout << float_to_string(MAX(0.1,(float) 100*metrics.mac[i].rx_errors/metrics.mac[i].rx_pkts), 1) << "%";
} else {
cout << float_to_string(0, 1) << "%";
}

@ -39,10 +39,10 @@
using namespace std;
// Enable this to log SI
#define LOG_THIS(a) 1
//#define LOG_THIS(a) 1
// Enable this one to skip SI-RNTI
//#define LOG_THIS(rnti) (rnti != 0xFFFF)
#define LOG_THIS(rnti) (rnti != 0xFFFF)
/* Define GUI-related things */
@ -185,11 +185,11 @@ void phch_worker::stop()
free(signal_buffer_tx[p]);
}
}
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
} else {
printf("Warning could not stop properly PHY\n");
}
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
}
void phch_worker::reset()
{
@ -372,6 +372,7 @@ void phch_worker::work_imp()
subframe_cfg_t sf_cfg;
phy->get_sf_config(&sf_cfg, tti_tx_dl);// TODO difference between tti_tx_dl and t_tx_dl
pthread_mutex_lock(&mutex);
is_worker_running = true;

@ -23,7 +23,7 @@
* and at http://www.gnu.org/licenses/.
*
*/
#include "srslte/upper/gtpu.h"
#include "srsenb/hdr/upper/gtpu.h"
#include <unistd.h>
#include <sys/socket.h>
@ -31,12 +31,11 @@
#include <errno.h>
using namespace srslte;
namespace srsenb {
gtpu::gtpu():mchthread()
{
}
gtpu::gtpu():mchthread()
{
}
bool gtpu::init(std::string gtp_bind_addr_, std::string mme_addr_, srsenb::pdcp_interface_gtpu* pdcp_, srslte::log* gtpu_log_, bool enable_mbsfn)
{
@ -148,7 +147,7 @@ void gtpu::write_pdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* pdu)
servaddr.sin_addr.s_addr = htonl(rnti_bearers[rnti].spgw_addrs[lcid]);
servaddr.sin_port = htons(GTPU_PORT);
gtpu_write_header(&header, pdu);
gtpu_write_header(&header, pdu, gtpu_log);
if (sendto(snk_fd, pdu->msg, pdu->N_bytes, MSG_EOR, (struct sockaddr*)&servaddr, sizeof(struct sockaddr_in))<0) {
perror("sendto");
}
@ -230,7 +229,7 @@ void gtpu::run_thread()
pdu->N_bytes = (uint32_t) n;
gtpu_header_t header;
gtpu_read_header(pdu, &header);
gtpu_read_header(pdu, &header,gtpu_log);
uint16_t rnti = 0;
uint16_t lcid = 0;
@ -265,69 +264,6 @@ void gtpu::run_thread()
running = false;
}
/****************************************************************************
* Header pack/unpack helper functions
* Ref: 3GPP TS 29.281 v10.1.0 Section 5
***************************************************************************/
bool gtpu::gtpu_write_header(gtpu_header_t *header, srslte::byte_buffer_t *pdu)
{
if(header->flags != 0x30) {
gtpu_log->error("gtpu_write_header - Unhandled header flags: 0x%x\n", header->flags);
return false;
}
if(header->message_type != 0xFF) {
gtpu_log->error("gtpu_write_header - Unhandled message type: 0x%x\n", header->message_type);
return false;
}
if(pdu->get_headroom() < GTPU_HEADER_LEN) {
gtpu_log->error("gtpu_write_header - No room in PDU for header\n");
return false;
}
pdu->msg -= GTPU_HEADER_LEN;
pdu->N_bytes += GTPU_HEADER_LEN;
uint8_t *ptr = pdu->msg;
*ptr = header->flags;
ptr++;
*ptr = header->message_type;
ptr++;
uint16_to_uint8(header->length, ptr);
ptr += 2;
uint32_to_uint8(header->teid, ptr);
return true;
}
bool gtpu::gtpu_read_header(srslte::byte_buffer_t *pdu, gtpu_header_t *header)
{
uint8_t *ptr = pdu->msg;
pdu->msg += GTPU_HEADER_LEN;
pdu->N_bytes -= GTPU_HEADER_LEN;
header->flags = *ptr;
ptr++;
header->message_type = *ptr;
ptr++;
uint8_to_uint16(ptr, &header->length);
ptr += 2;
uint8_to_uint32(ptr, &header->teid);
if(header->flags != 0x30) {
gtpu_log->error("gtpu_read_header - Unhandled header flags: 0x%x\n", header->flags);
return false;
}
if(header->message_type != 0xFF) {
gtpu_log->error("gtpu_read_header - Unhandled message type: 0x%x\n", header->message_type);
return false;
}
return true;
}
/****************************************************************************
* TEID to RNIT/LCID helper functions
***************************************************************************/
@ -428,6 +364,9 @@ void gtpu::mch_thread::run_thread()
pdu->N_bytes = (uint32_t) n;
gtpu_header_t header;
gtpu_read_header(pdu, &header, gtpu_log);
pdcp->write_sdu(SRSLTE_MRNTI, lcid, pdu);
do {
pdu = pool_allocate;

@ -37,18 +37,24 @@ void pdcp::init(rlc_interface_pdcp* rlc_, rrc_interface_pdcp* rrc_, gtpu_interfa
log_h = pdcp_log_;
pool = srslte::byte_buffer_pool::get_instance();
pthread_rwlock_init(&rwlock, NULL);
}
void pdcp::stop()
{
pthread_rwlock_wrlock(&rwlock);
for(std::map<uint32_t, user_interface>::iterator iter=users.begin(); iter!=users.end(); ++iter) {
rem_user((uint32_t) iter->first);
clear_user(&iter->second);
}
users.clear();
pthread_rwlock_unlock(&rwlock);
pthread_rwlock_destroy(&rwlock);
}
void pdcp::add_user(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti) == 0) {
srslte::pdcp *obj = new srslte::pdcp;
obj->init(&users[rnti].rlc_itf, &users[rnti].rrc_itf, &users[rnti].gtpu_itf, log_h, RB_ID_SRB0, SECURITY_DIRECTION_DOWNLINK);
@ -61,20 +67,30 @@ void pdcp::add_user(uint16_t rnti)
users[rnti].gtpu_itf.gtpu = gtpu;
users[rnti].pdcp = obj;
}
pthread_rwlock_unlock(&rwlock);
}
// Private unlocked deallocation of user
void pdcp::clear_user(user_interface *ue)
{
ue->pdcp->stop();
delete ue->pdcp;
ue->pdcp = NULL;
}
void pdcp::rem_user(uint16_t rnti)
{
pthread_rwlock_wrlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->stop();
delete users[rnti].pdcp;
users[rnti].pdcp = NULL;
clear_user(&users[rnti]);
users.erase(rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::add_bearer(uint16_t rnti, uint32_t lcid, srslte::srslte_pdcp_config_t cfg)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
users[rnti].pdcp->add_bearer(lcid, cfg);
@ -82,37 +98,45 @@ void pdcp::add_bearer(uint16_t rnti, uint32_t lcid, srslte::srslte_pdcp_config_t
users[rnti].pdcp->add_bearer_mrb(lcid, cfg);
}
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::reset(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->reset();
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::config_security(uint16_t rnti, uint32_t lcid, uint8_t* k_rrc_enc_, uint8_t* k_rrc_int_,
srslte::CIPHERING_ALGORITHM_ID_ENUM cipher_algo_,
srslte::INTEGRITY_ALGORITHM_ID_ENUM integ_algo_)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->config_security(lcid, k_rrc_enc_, k_rrc_int_, cipher_algo_, integ_algo_);
users[rnti].pdcp->enable_integrity(lcid);
users[rnti].pdcp->enable_encryption(lcid);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::write_pdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->write_pdu(lcid, sdu);
} else {
pool->deallocate(sdu);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
users[rnti].pdcp->write_sdu(lcid, sdu);
@ -122,6 +146,7 @@ void pdcp::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
} else {
pool->deallocate(sdu);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::user_interface_gtpu::write_pdu(uint32_t lcid, srslte::byte_buffer_t *pdu)

@ -40,48 +40,57 @@ void rlc::init(pdcp_interface_rlc* pdcp_, rrc_interface_rlc* rrc_, mac_interface
pool = srslte::byte_buffer_pool::get_instance();
pthread_rwlock_init(&rwlock, NULL);
}
void rlc::stop()
{
pthread_rwlock_wrlock(&rwlock);
for(std::map<uint32_t, user_interface>::iterator iter=users.begin(); iter!=users.end(); ++iter) {
rem_user((uint32_t) iter->first);
clear_user(&iter->second);
}
users.clear();
pthread_rwlock_unlock(&rwlock);
pthread_rwlock_destroy(&rwlock);
}
void rlc::add_user(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti) == 0) {
srslte::rlc *obj = new srslte::rlc;
obj->init(&users[rnti], &users[rnti], &users[rnti], log_h, mac_timers, RB_ID_SRB0);
obj->init(&users[rnti], &users[rnti], &users[rnti], log_h, mac_timers, RB_ID_SRB0, RLC_TX_QUEUE_LEN);
users[rnti].rnti = rnti;
users[rnti].pdcp = pdcp;
users[rnti].rrc = rrc;
users[rnti].rlc = obj;
users[rnti].parent = this;
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::rem_user(uint16_t rnti)
// Private unlocked deallocation of user
void rlc::clear_user(user_interface *ue)
{
if (users.count(rnti)) {
users[rnti].rlc->stop();
delete users[rnti].rlc;
users[rnti].rlc = NULL;
users.erase(rnti);
}
ue->rlc->stop();
delete ue->rlc;
ue->rlc = NULL;
}
void rlc::reset(uint16_t rnti)
void rlc::rem_user(uint16_t rnti)
{
pthread_rwlock_wrlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->reset();
clear_user(&users[rnti]);
users.erase(rnti);
} else {
log_h->error("Removing rnti=0x%x. Already removed\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::clear_buffer(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->empty_queue();
for (int i=0;i<SRSLTE_N_RADIO_BEARERS;i++) {
@ -89,27 +98,34 @@ void rlc::clear_buffer(uint16_t rnti)
}
log_h->info("Cleared buffer rnti=0x%x\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::add_bearer(uint16_t rnti, uint32_t lcid)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->add_bearer(lcid);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::add_bearer(uint16_t rnti, uint32_t lcid, srslte::srslte_rlc_config_t cnfg)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->add_bearer(lcid, cnfg);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::add_bearer_mrb(uint16_t rnti, uint32_t lcid)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->add_bearer_mrb_enb(lcid);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::read_pdu_pcch(uint8_t* payload, uint32_t buffer_size)
@ -121,11 +137,13 @@ int rlc::read_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_b
{
int ret;
uint32_t tx_queue;
if(users.count(rnti)){
if(rnti != SRSLTE_MRNTI){
pthread_rwlock_rdlock(&rwlock);
if(users.count(rnti)) {
if(rnti != SRSLTE_MRNTI) {
ret = users[rnti].rlc->read_pdu(lcid, payload, nof_bytes);
tx_queue = users[rnti].rlc->get_total_buffer_state(lcid);
}else{
} else {
ret = users[rnti].rlc->read_pdu_mch(lcid, payload, nof_bytes);
tx_queue = users[rnti].rlc->get_total_mch_buffer_state(lcid);
}
@ -135,15 +153,16 @@ int rlc::read_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_b
uint32_t retx_queue = 0;
log_h->debug("Buffer state PDCP: rnti=0x%x, lcid=%d, tx_queue=%d\n", rnti, lcid, tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, retx_queue);
return ret;
}else{
return SRSLTE_ERROR;
ret = SRSLTE_ERROR;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
void rlc::write_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_bytes)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->write_pdu(lcid, payload, nof_bytes);
@ -154,6 +173,7 @@ void rlc::write_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof
log_h->debug("Buffer state PDCP: rnti=0x%x, lcid=%d, tx_queue=%d\n", rnti, lcid, tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, retx_queue);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::read_pdu_bcch_dlsch(uint32_t sib_index, uint8_t *payload)
@ -166,9 +186,11 @@ void rlc::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
{
uint32_t tx_queue;
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
users[rnti].rlc->write_sdu(lcid, sdu);
users[rnti].rlc->write_sdu_nb(lcid, sdu);
tx_queue = users[rnti].rlc->get_total_buffer_state(lcid);
}else {
users[rnti].rlc->write_sdu_mch(lcid, sdu);
@ -183,14 +205,17 @@ void rlc::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
} else {
pool->deallocate(sdu);
}
pthread_rwlock_unlock(&rwlock);
}
bool rlc::rb_is_um(uint16_t rnti, uint32_t lcid) {
bool ret = false;
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
return users[rnti].rlc->rb_is_um(lcid);
} else {
return false;
ret = users[rnti].rlc->rb_is_um(lcid);
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
void rlc::user_interface::max_retx_attempted()

@ -84,7 +84,8 @@ void rrc::stop()
{
if(running) {
running = false;
thread_cancel();
rrc_pdu p = {0, LCID_EXIT, NULL};
rx_pdu_queue.push(p);
wait_thread_finish();
}
act_monitor.stop();
@ -556,7 +557,7 @@ void rrc::parse_ul_dcch(uint16_t rnti, uint32_t lcid, byte_buffer_t *pdu)
void rrc::process_rl_failure(uint16_t rnti)
{
if (users.count(rnti) == 0) {
if (users.count(rnti) == 1) {
uint32_t n_rfl = users[rnti].rl_failure();
if (n_rfl == 1) {
rrc_log->info("Radio-Link failure detected rnti=0x%x\n", rnti);
@ -572,6 +573,8 @@ void rrc::process_rl_failure(uint16_t rnti)
} else {
rrc_log->info("%d Radio-Link failure detected rnti=0x%x\n", n_rfl, rnti);
}
} else {
rrc_log->error("Radio-Link failure detected for uknown rnti=0x%x\n", rnti);
}
}
@ -585,8 +588,7 @@ void rrc::process_release_complete(uint16_t rnti)
// There is no RRCReleaseComplete message from UE thus wait ~50 subframes for tx
usleep(50000);
}
// Save to call rem_user() directly without thread, because calling from private function
rem_user(rnti);
rem_user_thread(rnti);
} else {
rrc_log->error("Received ReleaseComplete for unknown rnti=0x%x\n", rnti);
}
@ -594,6 +596,7 @@ void rrc::process_release_complete(uint16_t rnti)
void rrc::rem_user(uint16_t rnti)
{
pthread_mutex_lock(&user_mutex);
if (users.count(rnti) == 1) {
rrc_log->console("Disconnecting rnti=0x%x.\n", rnti);
rrc_log->info("Disconnecting rnti=0x%x.\n", rnti);
@ -603,11 +606,6 @@ void rrc::rem_user(uint16_t rnti)
mac->ue_rem(rnti); // MAC handles PHY
gtpu->rem_user(rnti);
// Wait enough time
pthread_mutex_unlock(&user_mutex);
usleep(50000);
pthread_mutex_lock(&user_mutex);
// Now remove RLC and PDCP
rlc->rem_user(rnti);
pdcp->rem_user(rnti);
@ -615,11 +613,13 @@ void rrc::rem_user(uint16_t rnti)
// And deallocate resources from RRC
users[rnti].sr_free();
users[rnti].cqi_free();
users.erase(rnti);
rrc_log->info("Removed user rnti=0x%x\n", rnti);
} else {
rrc_log->error("Removing user rnti=0x%x (does not exist)\n", rnti);
}
pthread_mutex_unlock(&user_mutex);
}
void rrc::config_mac()
@ -778,7 +778,6 @@ void rrc::run_thread()
}
// Mutex these calls even though it's a private function
pthread_mutex_lock(&user_mutex);
if (users.count(p.rnti) == 1) {
switch(p.lcid)
{
@ -803,6 +802,9 @@ void rrc::run_thread()
users[p.rnti].set_activity();
}
break;
case LCID_EXIT:
rrc_log->info("Exiting thread\n");
break;
default:
rrc_log->error("Rx PDU with invalid bearer id: %d", p.lcid);
break;
@ -810,7 +812,6 @@ void rrc::run_thread()
} else {
rrc_log->warning("Discarding PDU for removed rnti=0x%x\n", p.rnti);
}
pthread_mutex_unlock(&user_mutex);
}
}
@ -863,7 +864,7 @@ void rrc::activity_monitor::run_thread()
parent->s1ap->user_release(rem_rnti, LIBLTE_S1AP_CAUSERADIONETWORK_USER_INACTIVITY);
} else {
if(rem_rnti != SRSLTE_MRNTI)
parent->rem_user(rem_rnti);
parent->rem_user_thread(rem_rnti);
}
}
pthread_mutex_unlock(&parent->user_mutex);

@ -49,6 +49,7 @@ const uint16_t GTPU_RX_PORT = 2152;
typedef struct {
std::string name;
std::string sgi_mb_if_addr;
std::string sgi_mb_if_mask;
std::string m1u_multi_addr;
} mbms_gw_args_t;

@ -12,7 +12,8 @@
#####################################################################
[mbms_gw]
name = srsmbmsgw01
sgi_mb_if_addr = 172.16.1.1
sgi_mb_if_addr = 172.16.0.254
sgi_mb_if_mask = 255.255.255.255
m1u_multi_addr = 239.255.0.1
####################################################################

@ -83,6 +83,7 @@ parse_args(all_args_t *args, int argc, char* argv[]) {
string mbms_gw_name;
string mbms_gw_sgi_mb_if_addr;
string mbms_gw_sgi_mb_if_mask;
string mbms_gw_m1u_multi_addr;
string log_filename;
@ -99,8 +100,9 @@ parse_args(all_args_t *args, int argc, char* argv[]) {
common.add_options()
("mbms_gw.name", bpo::value<string>(&mbms_gw_name)->default_value("srsmbmsgw01"), "MBMS-GW Name")
("mbms_gw.sgi_mb_if_addr", bpo::value<string>(&mbms_gw_sgi_mb_if_addr)->default_value("172.16.1.1"), "SGi-mb TUN interface Address")
("mbms_gw.m1u_multi_addr", bpo::value<string>(&mbms_gw_m1u_multi_addr)->default_value("239.255.0.1"), "M1-u GTPu destination multicast address")
("mbms_gw.sgi_mb_if_addr", bpo::value<string>(&mbms_gw_sgi_mb_if_addr)->default_value("172.16.1.1"), "SGi-mb TUN interface Address.")
("mbms_gw.sgi_mb_if_mask", bpo::value<string>(&mbms_gw_sgi_mb_if_mask)->default_value("255.255.255.255"), "SGi-mb TUN interface mask.")
("mbms_gw.m1u_multi_addr", bpo::value<string>(&mbms_gw_m1u_multi_addr)->default_value("239.255.0.1"), "M1-u GTPu destination multicast address.")
("log.all_level", bpo::value<string>(&args->log_args.all_level)->default_value("info"), "ALL log level")
("log.all_hex_limit", bpo::value<int>(&args->log_args.all_hex_limit)->default_value(32), "ALL log hex dump limit")
@ -152,7 +154,9 @@ parse_args(all_args_t *args, int argc, char* argv[]) {
args->mbms_gw_args.name = mbms_gw_name;
args->mbms_gw_args.sgi_mb_if_addr = mbms_gw_sgi_mb_if_addr;
args->mbms_gw_args.sgi_mb_if_mask = mbms_gw_sgi_mb_if_mask;
args->mbms_gw_args.m1u_multi_addr = mbms_gw_m1u_multi_addr;
// Apply all_level to any unset layers
if (vm.count("log.all_level")) {
if(!vm.count("log.mbms_gw_level")) {

@ -196,7 +196,7 @@ mbms_gw::init_sgi_mb_if(mbms_gw_args_t *args)
}
ifr.ifr_netmask.sa_family = AF_INET;
((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr = inet_addr("255.255.255.0");
((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr = inet_addr(args->sgi_mb_if_mask.c_str());
if (ioctl(sgi_mb_sock, SIOCSIFNETMASK, &ifr) < 0) {
m_mbms_gw_log->error("Failed to set TUN interface Netmask. Error: %s\n", strerror(errno));
close(m_sgi_mb_if);
@ -285,8 +285,6 @@ mbms_gw::handle_sgi_md_pdu(srslte::byte_buffer_t *msg)
{
uint8_t version;
srslte::gtpu_header_t header;
in_addr_t baddr = inet_addr("172.16.0.255");
in_addr_t saddr = inet_addr("172.16.0.254");
//Setup GTP-U header
header.flags = 0x30;
@ -301,46 +299,16 @@ mbms_gw::handle_sgi_md_pdu(srslte::byte_buffer_t *msg)
return;
}
//IP+UDP Headers
//IP Headers
struct iphdr *iph = (struct iphdr *) msg->msg;
struct udphdr *udph = (struct udphdr *) (msg->msg + iph->ihl*4);
if(iph->version != 4)
{
m_mbms_gw_log->warning("IPv6 not supported yet.\n");
return;
}
//Replace Destination IP with broadcast address
iph->daddr = baddr;
//Replace Source IP with address in same subnet
iph->saddr = saddr;
//Replace IP cheksum
iph->check = 0;
iph->check = in_cksum((uint16_t*)msg->msg,4*(msg->msg[0] & 0x0F));
//Set Pseudo Header
struct pseudo_hdr phdr;
phdr.src_addr = iph->saddr;
phdr.dst_addr = iph->daddr;
phdr.protocol = IPPROTO_UDP;
phdr.placeholder = 0;
phdr.udp_len = udph->len;
//Set Pseudo Datagram
udph->check = 0;
int psize = sizeof(struct pseudo_hdr) + ntohs(udph->len);
uint8_t * pseudo_dgram = (uint8_t*) malloc(psize);
memcpy(pseudo_dgram, &phdr,sizeof(struct pseudo_hdr));
memcpy(pseudo_dgram+sizeof(pseudo_hdr),udph,ntohs(udph->len));
//Recompute UDP checksum
udph->check = in_cksum((uint16_t*) pseudo_dgram, psize);
free(pseudo_dgram);
//Write GTP-U header into packet
if(!srslte::gtpu_write_header(&header, msg))
if(!srslte::gtpu_write_header(&header, msg, m_mbms_gw_log))
{
m_mbms_gw_log->console("Error writing GTP-U header on PDU\n");
}
@ -355,31 +323,4 @@ mbms_gw::handle_sgi_md_pdu(srslte::byte_buffer_t *msg)
}
}
uint16_t
mbms_gw::in_cksum(uint16_t *iphdr, int count)
{
//RFC 1071
uint32_t sum = 0;
uint16_t padd = 0;
uint16_t result;
while(count > 1)
{
sum+= *iphdr++;
count -= 2;
}
if( count > 0 )
{
padd = * (uint8_t *) iphdr;
sum += padd;
}
/*Fold 32-bit sum to 16-bit*/
// while(sum>>16)
// sum = (sum & 0xffff) + (sum >> 16);
sum = (sum>>16)+(sum & 0xFFFF);
sum = sum + (sum >> 16);
result = (uint16_t) ~sum;
return result;
}
} //namespace srsepc

@ -369,7 +369,7 @@ spgw::handle_sgi_pdu(srslte::byte_buffer_t *msg)
header.teid = enb_fteid.teid;
//Write header into packet
if(!srslte::gtpu_write_header(&header, msg))
if(!srslte::gtpu_write_header(&header, msg, m_spgw_log))
{
m_spgw_log->console("Error writing GTP-U header on PDU\n");
}
@ -395,7 +395,7 @@ spgw::handle_s1u_pdu(srslte::byte_buffer_t *msg)
{
//m_spgw_log->console("Received PDU from S1-U. Bytes=%d\n",msg->N_bytes);
srslte::gtpu_header_t header;
srslte::gtpu_read_header(msg, &header);
srslte::gtpu_read_header(msg, &header, m_spgw_log);
//m_spgw_log->console("TEID 0x%x. Bytes=%d\n", header.teid, msg->N_bytes);
int n = write(m_sgi_if, msg->msg, msg->N_bytes);

@ -447,8 +447,6 @@ private:
uint32_t sib_start_tti(uint32_t tti, uint32_t period, uint32_t offset, uint32_t sf);
const static int SIB_SEARCH_TIMEOUT_MS = 1000;
const static uint32_t NOF_REQUIRED_SIBS = 13; // SIB1, SIB2 and SIB3
bool initiated;
bool ho_start;
bool go_idle;

@ -210,6 +210,10 @@ bool bsr_proc::generate_bsr(bsr_t *bsr, uint32_t nof_padding_bytes) {
bsr->format = LONG_BSR;
}
}
Info("BSR: Type %s, Format %s, Value=%d,%d,%d,%d\n",
bsr_type_tostring(triggered_bsr_type), bsr_format_tostring(bsr->format),
bsr->buff_size[0], bsr->buff_size[1], bsr->buff_size[2], bsr->buff_size[3]);
return ret;
}
@ -337,9 +341,6 @@ bool bsr_proc::generate_padding_bsr(uint32_t nof_padding_bytes, bsr_t *bsr)
}
generate_bsr(bsr, nof_padding_bytes);
ret = true;
Info("BSR: Type %s, Format %s, Value=%d,%d,%d,%d\n",
bsr_type_tostring(triggered_bsr_type), bsr_format_tostring(bsr->format),
bsr->buff_size[0], bsr->buff_size[1], bsr->buff_size[2], bsr->buff_size[3]);
if (timers_db->get(timer_periodic_id)->get_timeout() && bsr->format != TRUNC_BSR) {
timers_db->get(timer_periodic_id)->reset();

@ -52,7 +52,7 @@ namespace srsue {
phy::phy() : workers_pool(MAX_WORKERS),
workers(MAX_WORKERS),
workers_common(phch_recv::MUTEX_X_WORKER*MAX_WORKERS)
workers_common(phch_recv::MUTEX_X_WORKER*MAX_WORKERS),nof_coworkers(0)
{
}

@ -318,11 +318,11 @@ void ue::print_pool() {
bool ue::get_metrics(ue_metrics_t &m)
{
bzero(&m, sizeof(ue_metrics_t));
m.rf = rf_metrics;
bzero(&rf_metrics, sizeof(rf_metrics_t));
rf_metrics.rf_error = false; // Reset error flag
bzero(&m, sizeof(ue_metrics_t));
if(EMM_STATE_REGISTERED == nas.get_state()) {
if(RRC_STATE_CONNECTED == rrc.get_state()) {
phy.get_metrics(m.phy);

@ -41,6 +41,8 @@ using namespace srslte;
namespace srsue {
const static uint32_t NOF_REQUIRED_SIBS = 4;
const static uint32_t required_sibs[NOF_REQUIRED_SIBS] = {0,1,2,12}; // SIB1, SIB2, SIB3 and SIB13 (eMBMS)
/*******************************************************************************
Base functions
@ -543,24 +545,25 @@ bool rrc::configure_serving_cell() {
return false;
}
serving_cell->has_mcch = false;
// Apply configurations if already retrieved SIB2
if (serving_cell->has_sib2()) {
apply_sib2_configs(serving_cell->sib2ptr());
}
// Obtain the rest of required SIBs (configuration is applied when received)
// Obtain the SIBs if not available or apply the configuration if available
for (uint32_t i = 0; i < NOF_REQUIRED_SIBS; i++) {
if (!serving_cell->has_sib(i)) {
rrc_log->info("Cell has no SIB%d. Obtaining SIB%d\n", i+1, i+1);
if (!si_acquire(i)) {
rrc_log->info("Timeout while acquiring SIB%d\n", i+1);
if (i < 2) {
if (!serving_cell->has_sib(required_sibs[i])) {
rrc_log->info("Cell has no SIB%d. Obtaining SIB%d\n", required_sibs[i]+1, required_sibs[i]+1);
if (!si_acquire(required_sibs[i])) {
rrc_log->info("Timeout while acquiring SIB%d\n", required_sibs[i]+1);
if (required_sibs[i] < 2) {
return false;
}
}
} else {
rrc_log->info("Cell has SIB%d\n", i+1);
if(i+1 == 13){
rrc_log->info("Cell has SIB%d\n", required_sibs[i]+1);
switch(required_sibs[i]) {
case 1:
apply_sib2_configs(serving_cell->sib2ptr());
break;
case 12:
apply_sib13_configs(serving_cell->sib13ptr());
break;
}
}
}
@ -768,7 +771,7 @@ bool rrc::si_acquire(uint32_t sib_index)
}
}
if (!found) {
rrc_log->error("Could not find SIB%d scheduling in SIB1\n", sib_index+1);
rrc_log->info("Could not find SIB%d scheduling in SIB1\n", sib_index+1);
return false;
}
}

Loading…
Cancel
Save