gtpu, feature - added the ability to configure gtpu indirect tunnel timeout value. This parameter is useful in the case that the end marker is not received

master
Francisco 4 years ago committed by Francisco Paisana
parent cd51537234
commit c6b9c12ba2

@ -18,6 +18,15 @@
namespace srsenb {
struct gtpu_args_t {
std::string gtp_bind_addr;
std::string mme_addr;
std::string embms_m1u_multiaddr;
std::string embms_m1u_if_addr;
bool embms_enable = false;
uint32_t indirect_tunnel_timeout_msec = 2000;
};
// GTPU interface for PDCP
class gtpu_interface_pdcp
{

@ -307,6 +307,7 @@ enable = false
# nof_prealloc_ues: Number of UE memory resources to preallocate during eNB initialization for faster UE creation (Default 8)
# eea_pref_list: Ordered preference list for the selection of encryption algorithm (EEA) (default: EEA0, EEA2, EEA1).
# eia_pref_list: Ordered preference list for the selection of integrity algorithm (EIA) (default: EIA2, EIA1, EIA0).
# gtpu_tunnel_timeout: Maximum time that GTPU takes to release indirect forwarding tunnel since the last received GTPU PDU.
#
#####################################################################
[expert]
@ -331,3 +332,4 @@ enable = false
#nof_prealloc_ues = 8
#eea_pref_list = EEA0, EEA2, EEA1
#eia_pref_list = EIA2, EIA1, EIA0
#gtpu_tunnel_timeout = 2000

@ -95,6 +95,7 @@ struct general_args_t {
std::string eea_pref_list;
uint32_t max_mac_dl_kos;
uint32_t max_mac_ul_kos;
uint32_t gtpu_indirect_tunnel_timeout;
};
struct all_args_t {

@ -70,6 +70,7 @@ typedef struct {
typedef struct {
std::string type;
uint32_t sync_queue_size; // Max allowed difference between PHY and Stack clocks (in TTI)
uint32_t gtpu_indirect_tunnel_timeout_msec;
mac_args_t mac;
s1ap_args_t s1ap;
pcap_args_t mac_pcap;

@ -90,7 +90,7 @@ public:
using ue_lcid_tunnel_list = srsran::bounded_vector<lcid_tunnel, MAX_TUNNELS_PER_UE>;
explicit gtpu_tunnel_manager(srsran::task_sched_handle task_sched_, srslog::basic_logger& logger);
void init(pdcp_interface_gtpu* pdcp_);
void init(const gtpu_args_t& gtpu_args, pdcp_interface_gtpu* pdcp_);
bool has_teid(uint32_t teid) const { return tunnels.contains(teid); }
const tunnel* find_tunnel(uint32_t teid);
@ -116,6 +116,7 @@ private:
using tunnel_ctxt_it = typename tunnel_list_t::iterator;
srsran::task_sched_handle task_sched;
const gtpu_args_t* gtpu_args = nullptr;
pdcp_interface_gtpu* pdcp = nullptr;
srslog::basic_logger& logger;
@ -134,12 +135,7 @@ public:
srsran::socket_manager_itf* rx_socket_handler_);
~gtpu();
int init(std::string gtp_bind_addr_,
std::string mme_addr_,
std::string m1u_multiaddr_,
std::string m1u_if_addr_,
pdcp_interface_gtpu* pdcp_,
bool enable_mbsfn = false);
int init(const gtpu_args_t& gtpu_args, pdcp_interface_gtpu* pdcp_);
void stop();
// gtpu_interface_rrc
@ -168,7 +164,7 @@ private:
srsran::socket_manager_itf* rx_socket_handler = nullptr;
srsran::task_queue_handle gtpu_queue;
bool enable_mbsfn = false;
gtpu_args_t args;
std::string gtp_bind_addr;
std::string mme_addr;
srsenb::pdcp_interface_gtpu* pdcp = nullptr;

@ -217,6 +217,7 @@ void parse_args(all_args_t* args, int argc, char* argv[])
("expert.nof_prealloc_ues", bpo::value<uint32_t>(&args->stack.mac.nof_prealloc_ues)->default_value(8), "Number of UE resources to preallocate during eNB initialization")
("expert.max_mac_dl_kos", bpo::value<uint32_t>(&args->general.max_mac_dl_kos)->default_value(100), "Maximum number of consecutive KOs in DL before triggering the UE's release")
("expert.max_mac_ul_kos", bpo::value<uint32_t>(&args->general.max_mac_ul_kos)->default_value(100), "Maximum number of consecutive KOs in UL before triggering the UE's release")
("expert.gtpu_tunnel_timeout", bpo::value<uint32_t>(&args->stack.gtpu_indirect_tunnel_timeout_msec)->default_value(2000), "Maximum time that GTPU takes to release indirect forwarding tunnel since the last received GTPU PDU.")
// eMBMS section

@ -133,12 +133,15 @@ int enb_stack_lte::init(const stack_args_t& args_, const rrc_cfg_t& rrc_cfg_)
stack_logger.error("Couldn't initialize S1AP");
return SRSRAN_ERROR;
}
if (gtpu.init(args.s1ap.gtp_bind_addr,
args.s1ap.mme_addr,
args.embms.m1u_multiaddr,
args.embms.m1u_if_addr,
&pdcp,
args.embms.enable)) {
gtpu_args_t gtpu_args;
gtpu_args.embms_enable = args.embms.enable;
gtpu_args.embms_m1u_multiaddr = args.embms.m1u_multiaddr;
gtpu_args.embms_m1u_if_addr = args.embms.m1u_if_addr;
gtpu_args.mme_addr = args.s1ap.mme_addr;
gtpu_args.gtp_bind_addr = args.s1ap.gtp_bind_addr;
gtpu_args.indirect_tunnel_timeout_msec = args.gtpu_indirect_tunnel_timeout_msec;
if (gtpu.init(gtpu_args, &pdcp) != SRSRAN_SUCCESS) {
stack_logger.error("Couldn't initialize GTPU");
return SRSRAN_ERROR;
}

@ -36,8 +36,9 @@ gtpu_tunnel_manager::gtpu_tunnel_manager(srsran::task_sched_handle task_sched_,
logger(logger), task_sched(task_sched_), tunnels(1)
{}
void gtpu_tunnel_manager::init(pdcp_interface_gtpu* pdcp_)
void gtpu_tunnel_manager::init(const gtpu_args_t& args, pdcp_interface_gtpu* pdcp_)
{
gtpu_args = &args;
pdcp = pdcp_;
}
@ -239,10 +240,13 @@ void gtpu_tunnel_manager::set_tunnel_priority(uint32_t before_teid, uint32_t aft
}
};
// Schedule auto-removal of this indirect tunnel
// Schedule auto-removal of the indirect tunnel in case the End Marker is not received
// TS 36.300 - On detection of the "end marker", the target eNB may also initiate the release of the data forwarding
// resource. However, the release of the data forwarding resource is implementation dependent and could
// also be based on other mechanisms (e.g. timer-based mechanism).
before_tun.rx_timer = task_sched.get_unique_timer();
before_tun.rx_timer.set(500, [this, before_teid](uint32_t tid) {
// This will self-destruct the callback object
before_tun.rx_timer.set(gtpu_args->indirect_tunnel_timeout_msec, [this, before_teid](uint32_t tid) {
// Note: This will self-destruct the callback object
remove_tunnel(before_teid);
});
before_tun.rx_timer.run();
@ -323,18 +327,14 @@ gtpu::~gtpu()
stop();
}
int gtpu::init(std::string gtp_bind_addr_,
std::string mme_addr_,
std::string m1u_multiaddr_,
std::string m1u_if_addr_,
srsenb::pdcp_interface_gtpu* pdcp_,
bool enable_mbsfn_)
int gtpu::init(const gtpu_args_t& gtpu_args, pdcp_interface_gtpu* pdcp_)
{
args = gtpu_args;
pdcp = pdcp_;
gtp_bind_addr = gtp_bind_addr_;
mme_addr = mme_addr_;
gtp_bind_addr = gtpu_args.gtp_bind_addr;
mme_addr = gtpu_args.mme_addr;
tunnels.init(pdcp);
tunnels.init(args, pdcp);
char errbuf[128] = {};
@ -374,9 +374,8 @@ int gtpu::init(std::string gtp_bind_addr_,
rx_socket_handler->add_socket_handler(fd, srsran::make_sdu_handler(logger, gtpu_queue, rx_callback));
// Start MCH socket if enabled
enable_mbsfn = enable_mbsfn_;
if (enable_mbsfn) {
if (not m1u.init(m1u_multiaddr_, m1u_if_addr_)) {
if (args.embms_enable) {
if (not m1u.init(args.embms_m1u_multiaddr, args.embms_m1u_if_addr)) {
return SRSRAN_ERROR;
}
}

@ -232,8 +232,12 @@ int test_gtpu_direct_tunneling(tunnel_test_event event)
dummy_socket_manager senb_rx_sockets, tenb_rx_sockets;
srsenb::gtpu senb_gtpu(&task_sched, logger1, &senb_rx_sockets), tenb_gtpu(&task_sched, logger2, &tenb_rx_sockets);
pdcp_tester senb_pdcp, tenb_pdcp;
senb_gtpu.init(senb_addr_str, sgw_addr_str, "", "", &senb_pdcp, false);
tenb_gtpu.init(tenb_addr_str, sgw_addr_str, "", "", &tenb_pdcp, false);
gtpu_args_t gtpu_args;
gtpu_args.gtp_bind_addr = senb_addr_str;
gtpu_args.mme_addr = sgw_addr_str;
senb_gtpu.init(gtpu_args, &senb_pdcp);
gtpu_args.gtp_bind_addr = tenb_addr_str;
tenb_gtpu.init(gtpu_args, &tenb_pdcp);
// create tunnels MME-SeNB and MME-TeNB
uint32_t senb_teid_in = senb_gtpu.add_bearer(rnti, drb1, sgw_addr, sgw_teidout1).value();

Loading…
Cancel
Save