udp/session: refactor to support dgram mode
[vpp.git] / src / vnet / session / session.c
index 5893b65..dfc967b 100644 (file)
@@ -23,7 +23,6 @@
 #include <vlibmemory/api.h>
 #include <vnet/dpo/load_balance.h>
 #include <vnet/fib/ip4_fib.h>
-#include <vnet/tcp/tcp.h>
 
 session_manager_main_t session_manager_main;
 extern transport_proto_vft_t *tp_vfts;
@@ -34,7 +33,7 @@ session_send_evt_to_thread (u64 session_handle, fifo_event_type_t evt_type,
 {
   u32 tries = 0;
   session_fifo_event_t evt = { {0}, };
-  unix_shared_memory_queue_t *q;
+  svm_queue_t *q;
 
   evt.event_type = evt_type;
   if (evt_type == FIFO_EVENT_RPC)
@@ -46,7 +45,7 @@ session_send_evt_to_thread (u64 session_handle, fifo_event_type_t evt_type,
     evt.session_handle = session_handle;
 
   q = session_manager_get_vpp_event_queue (thread_index);
-  while (unix_shared_memory_queue_add (q, (u8 *) & evt, 1))
+  while (svm_queue_add (q, (u8 *) & evt, 1))
     {
       if (tries++ == 3)
        {
@@ -86,12 +85,12 @@ session_alloc (u32 thread_index)
   pool_get_aligned_will_expand (smm->sessions[thread_index], will_expand,
                                CLIB_CACHE_LINE_BYTES);
   /* If we have peekers, let them finish */
-  if (PREDICT_FALSE (will_expand))
+  if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
     {
-      clib_spinlock_lock_if_init (&smm->peekers_write_locks[thread_index]);
+      clib_rwlock_writer_lock (&smm->peekers_rw_locks[thread_index]);
       pool_get_aligned (session_manager_main.sessions[thread_index], s,
                        CLIB_CACHE_LINE_BYTES);
-      clib_spinlock_unlock_if_init (&smm->peekers_write_locks[thread_index]);
+      clib_rwlock_writer_unlock (&smm->peekers_rw_locks[thread_index]);
     }
   else
     {
@@ -104,7 +103,7 @@ session_alloc (u32 thread_index)
   return s;
 }
 
-static void
+void
 session_free (stream_session_t * s)
 {
   pool_put (session_manager_main.sessions[s->thread_index], s);
@@ -112,7 +111,7 @@ session_free (stream_session_t * s)
     memset (s, 0xFA, sizeof (*s));
 }
 
-static int
+int
 session_alloc_fifos (segment_manager_t * sm, stream_session_t * s)
 {
   svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0;
@@ -147,7 +146,7 @@ session_alloc_for_connection (transport_connection_t * tc)
   s = session_alloc (thread_index);
   s->session_type = session_type_from_proto_and_ip (tc->proto, tc->is_ip4);
   s->session_state = SESSION_STATE_CONNECTING;
-  s->thread_index = thread_index;
+  s->enqueue_epoch = ~0;
 
   /* Attach transport to session and vice versa */
   s->connection_index = tc->c_index;
@@ -166,6 +165,7 @@ session_alloc_and_init (segment_manager_t * sm, transport_connection_t * tc,
   if (alloc_fifos && (rv = session_alloc_fifos (sm, s)))
     {
       session_free (s);
+      *ret_s = 0;
       return rv;
     }
 
@@ -355,14 +355,19 @@ session_enqueue_stream_connection (transport_connection_t * tc,
   return enqueued;
 }
 
+
 int
-session_enqueue_dgram_connection (stream_session_t * s, vlib_buffer_t * b,
-                                 u8 proto, u8 queue_event)
+session_enqueue_dgram_connection (stream_session_t * s,
+                                 session_dgram_hdr_t * hdr,
+                                 vlib_buffer_t * b, u8 proto, u8 queue_event)
 {
   int enqueued = 0, rv, in_order_off;
 
-  if (svm_fifo_max_enqueue (s->server_rx_fifo) < b->current_length)
-    return -1;
+  ASSERT (svm_fifo_max_enqueue (s->server_rx_fifo)
+         >= b->current_length + sizeof (*hdr));
+
+  svm_fifo_enqueue_nowait (s->server_rx_fifo, sizeof (session_dgram_hdr_t),
+                          (u8 *) hdr);
   enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, b->current_length,
                                      vlib_buffer_get_current (b));
   if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0))
@@ -443,7 +448,7 @@ session_enqueue_notify (stream_session_t * s, u8 block)
 {
   application_t *app;
   session_fifo_event_t evt;
-  unix_shared_memory_queue_t *q;
+  svm_queue_t *q;
 
   if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED))
     {
@@ -463,9 +468,9 @@ session_enqueue_notify (stream_session_t * s, u8 block)
       return 0;
     }
 
-  /* Built-in server? Hand event to the callback... */
-  if (app->cb_fns.builtin_server_rx_callback)
-    return app->cb_fns.builtin_server_rx_callback (s);
+  /* Built-in app? Hand event to the callback... */
+  if (app->cb_fns.builtin_app_rx_callback)
+    return app->cb_fns.builtin_app_rx_callback (s);
 
   /* If no event, send one */
   if (svm_fifo_set_event (s->server_rx_fifo))
@@ -479,8 +484,8 @@ session_enqueue_notify (stream_session_t * s, u8 block)
 
       /* Based on request block (or not) for lack of space */
       if (block || PREDICT_TRUE (q->cursize < q->maxsize))
-       unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt,
-                                     0 /* do wait for mutex */ );
+       svm_queue_add (app->event_queue, (u8 *) & evt,
+                      0 /* do wait for mutex */ );
       else
        {
          clib_warning ("fifo full");
@@ -530,6 +535,16 @@ session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index)
   return errors;
 }
 
+int
+session_manager_flush_all_enqueue_events (u8 transport_proto)
+{
+  vlib_thread_main_t *vtm = vlib_get_thread_main ();
+  int i, errors = 0;
+  for (i = 0; i < 1 + vtm->n_threads; i++)
+    errors += session_manager_flush_enqueue_events (transport_proto, i);
+  return errors;
+}
+
 /**
  * Init fifo tail and head pointers
  *
@@ -548,13 +563,13 @@ stream_session_init_fifos_pointers (transport_connection_t * tc,
 int
 session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
 {
-  application_t *app;
+  u32 opaque = 0, new_ti, new_si;
   stream_session_t *new_s = 0;
-  u64 handle;
-  u32 opaque = 0;
-  int error = 0;
   segment_manager_t *sm;
+  application_t *app;
   u8 alloc_fifos;
+  int error = 0;
+  u64 handle;
 
   /*
    * Find connection handle and cleanup half-open table
@@ -581,14 +596,18 @@ session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
   if (!is_fail)
     {
       sm = application_get_connect_segment_manager (app);
-      alloc_fifos = application_is_proxy (app);
+      alloc_fifos = !application_is_builtin_proxy (app);
       if (session_alloc_and_init (sm, tc, alloc_fifos, &new_s))
        {
          is_fail = 1;
          error = -1;
        }
       else
-       new_s->app_index = app->index;
+       {
+         new_s->app_index = app->index;
+         new_si = new_s->session_index;
+         new_ti = new_s->thread_index;
+       }
     }
 
   /*
@@ -599,12 +618,18 @@ session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
     {
       SESSION_DBG ("failed to notify app");
       if (!is_fail)
-       stream_session_disconnect (new_s);
+       {
+         new_s = session_get (new_si, new_ti);
+         stream_session_disconnect_transport (new_s);
+       }
     }
   else
     {
       if (!is_fail)
-       new_s->session_state = SESSION_STATE_READY;
+       {
+         new_s = session_get (new_si, new_ti);
+         new_s->session_state = SESSION_STATE_READY;
+       }
     }
 
   return error;
@@ -622,12 +647,14 @@ static void
 session_switch_pool (void *cb_args)
 {
   session_switch_pool_args_t *args = (session_switch_pool_args_t *) cb_args;
+  transport_proto_t tp;
   stream_session_t *s;
   ASSERT (args->thread_index == vlib_get_thread_index ());
   s = session_get (args->session_index, args->thread_index);
   s->server_tx_fifo->master_session_index = args->new_session_index;
   s->server_tx_fifo->master_thread_index = args->new_thread_index;
-  tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index);
+  tp = session_get_transport_proto (s);
+  tp_vfts[tp].cleanup (s->connection_index, s->thread_index);
   session_free (s);
   clib_mem_free (cb_args);
 }
@@ -762,13 +789,10 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index,
   application_t *server;
   stream_session_t *s, *listener;
   segment_manager_t *sm;
-  session_type_t sst;
   int rv;
 
-  sst = session_type_from_proto_and_ip (tc->proto, tc->is_ip4);
-
   /* Find the server */
-  listener = listen_session_get (sst, listener_index);
+  listener = listen_session_get (listener_index);
   server = application_get (listener->app_index);
 
   sm = application_get_listen_segment_manager (server, listener);
@@ -788,100 +812,134 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index,
   return 0;
 }
 
-/**
- * Ask transport to open connection to remote transport endpoint.
- *
- * Stores handle for matching request with reply since the call can be
- * asynchronous. For instance, for TCP the 3-way handshake must complete
- * before reply comes. Session is only created once connection is established.
- *
- * @param app_index Index of the application requesting the connect
- * @param st Session type requested.
- * @param tep Remote transport endpoint
- * @param opaque Opaque data (typically, api_context) the application expects
- *              on open completion.
- */
 int
-session_open (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+session_open_cl (u32 app_index, session_endpoint_t * rmt, u32 opaque)
 {
   transport_connection_t *tc;
-  session_type_t sst;
+  transport_endpoint_t *tep;
   segment_manager_t *sm;
   stream_session_t *s;
   application_t *app;
   int rv;
+
+  tep = session_endpoint_to_transport (rmt);
+  rv = tp_vfts[rmt->transport_proto].open (tep);
+  if (rv < 0)
+    {
+      SESSION_DBG ("Transport failed to open connection.");
+      return VNET_API_ERROR_SESSION_CONNECT;
+    }
+
+  tc = tp_vfts[rmt->transport_proto].get_half_open ((u32) rv);
+
+  /* For dgram type of service, allocate session and fifos now.
+   */
+  app = application_get (app_index);
+  sm = application_get_connect_segment_manager (app);
+
+  if (session_alloc_and_init (sm, tc, 1, &s))
+    return -1;
+  s->app_index = app->index;
+  s->session_state = SESSION_STATE_OPENED;
+
+  /* Tell the app about the new event fifo for this session */
+  app->cb_fns.session_connected_callback (app->index, opaque, s, 0);
+
+  return 0;
+}
+
+int
+session_open_vc (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+{
+  transport_connection_t *tc;
+  transport_endpoint_t *tep;
   u64 handle;
+  int rv;
 
-  sst = session_type_from_proto_and_ip (rmt->transport_proto, rmt->is_ip4);
-  rv = tp_vfts[sst].open (session_endpoint_to_transport (rmt));
+  tep = session_endpoint_to_transport (rmt);
+  rv = tp_vfts[rmt->transport_proto].open (tep);
   if (rv < 0)
     {
       SESSION_DBG ("Transport failed to open connection.");
       return VNET_API_ERROR_SESSION_CONNECT;
     }
 
-  tc = tp_vfts[sst].get_half_open ((u32) rv);
+  tc = tp_vfts[rmt->transport_proto].get_half_open ((u32) rv);
 
   /* If transport offers a stream service, only allocate session once the
    * connection has been established.
+   * Add connection to half-open table and save app and tc index. The
+   * latter is needed to help establish the connection while the former
+   * is needed when the connect notify comes and we have to notify the
+   * external app
    */
-  if (transport_is_stream (rmt->transport_proto))
-    {
-      /* Add connection to half-open table and save app and tc index. The
-       * latter is needed to help establish the connection while the former
-       * is needed when the connect notify comes and we have to notify the
-       * external app
-       */
-      handle = (((u64) app_index) << 32) | (u64) tc->c_index;
-      session_lookup_add_half_open (tc, handle);
-
-      /* Store api_context (opaque) for when the reply comes. Not the nicest
-       * thing but better than allocating a separate half-open pool.
-       */
-      tc->s_index = opaque;
-    }
-  /* For dgram type of service, allocate session and fifos now.
+  handle = (((u64) app_index) << 32) | (u64) tc->c_index;
+  session_lookup_add_half_open (tc, handle);
+
+  /* Store api_context (opaque) for when the reply comes. Not the nicest
+   * thing but better than allocating a separate half-open pool.
    */
-  else
-    {
-      app = application_get (app_index);
-      sm = application_get_connect_segment_manager (app);
+  tc->s_index = opaque;
+  return 0;
+}
 
-      if (session_alloc_and_init (sm, tc, 1, &s))
-       return -1;
-      s->app_index = app->index;
-      s->session_state = SESSION_STATE_CONNECTING_READY;
+int
+session_open_app (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+{
+  session_endpoint_extended_t *sep = (session_endpoint_extended_t *) rmt;
+  sep->app_index = app_index;
+  sep->opaque = opaque;
 
-      /* Tell the app about the new event fifo for this session */
-      app->cb_fns.session_connected_callback (app->index, opaque, s, 0);
-    }
-  return 0;
+  return tp_vfts[rmt->transport_proto].open ((transport_endpoint_t *) sep);
 }
 
+typedef int (*session_open_service_fn) (u32, session_endpoint_t *, u32);
+
+/* *INDENT-OFF* */
+static session_open_service_fn session_open_srv_fns[TRANSPORT_N_SERVICES] = {
+  session_open_vc,
+  session_open_cl,
+  session_open_app,
+};
+/* *INDENT-ON* */
+
 /**
- * Ask transport to listen on local transport endpoint.
+ * Ask transport to open connection to remote transport endpoint.
  *
- * @param s Session for which listen will be called. Note that unlike
- *         established sessions, listen sessions are not associated to a
- *         thread.
- * @param tep Local endpoint to be listened on.
+ * Stores handle for matching request with reply since the call can be
+ * asynchronous. For instance, for TCP the 3-way handshake must complete
+ * before reply comes. Session is only created once connection is established.
+ *
+ * @param app_index Index of the application requesting the connect
+ * @param st Session type requested.
+ * @param tep Remote transport endpoint
+ * @param opaque Opaque data (typically, api_context) the application expects
+ *              on open completion.
  */
 int
-stream_session_listen (stream_session_t * s, session_endpoint_t * sep)
+session_open (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+{
+  transport_service_type_t tst = tp_vfts[rmt->transport_proto].service_type;
+  return session_open_srv_fns[tst] (app_index, rmt, opaque);
+}
+
+int
+session_listen_vc (stream_session_t * s, session_endpoint_t * sep)
 {
   transport_connection_t *tc;
   u32 tci;
 
   /* Transport bind/listen  */
-  tci = tp_vfts[s->session_type].bind (s->session_index,
-                                      session_endpoint_to_transport (sep));
+  tci = tp_vfts[sep->transport_proto].bind (s->session_index,
+                                           session_endpoint_to_transport
+                                           (sep));
 
   if (tci == (u32) ~ 0)
     return -1;
 
   /* Attach transport to session */
   s->connection_index = tci;
-  tc = tp_vfts[s->session_type].get_listener (tci);
+  tc = tp_vfts[sep->transport_proto].get_listener (tci);
 
   /* Weird but handle it ... */
   if (tc == 0)
@@ -892,6 +950,78 @@ stream_session_listen (stream_session_t * s, session_endpoint_t * sep)
   return 0;
 }
 
+int
+session_listen_cl (stream_session_t * s, session_endpoint_t * sep)
+{
+  transport_connection_t *tc;
+  application_t *server;
+  segment_manager_t *sm;
+  u32 tci;
+
+  /* Transport bind/listen  */
+  tci = tp_vfts[sep->transport_proto].bind (s->session_index,
+                                           session_endpoint_to_transport
+                                           (sep));
+
+  if (tci == (u32) ~ 0)
+    return -1;
+
+  /* Attach transport to session */
+  s->connection_index = tci;
+  tc = tp_vfts[sep->transport_proto].get_listener (tci);
+
+  /* Weird but handle it ... */
+  if (tc == 0)
+    return -1;
+
+  server = application_get (s->app_index);
+  sm = application_get_listen_segment_manager (server, s);
+  if (session_alloc_fifos (sm, s))
+    return -1;
+
+  /* Add to the main lookup table */
+  session_lookup_add_connection (tc, s->session_index);
+  return 0;
+}
+
+int
+session_listen_app (stream_session_t * s, session_endpoint_t * sep)
+{
+  session_endpoint_extended_t esep;
+  clib_memcpy (&esep, sep, sizeof (*sep));
+  esep.app_index = s->app_index;
+
+  return tp_vfts[sep->transport_proto].bind (s->session_index,
+                                            (transport_endpoint_t *) & esep);
+}
+
+typedef int (*session_listen_service_fn) (stream_session_t *,
+                                         session_endpoint_t *);
+
+/* *INDENT-OFF* */
+static session_listen_service_fn
+session_listen_srv_fns[TRANSPORT_N_SERVICES] = {
+  session_listen_vc,
+  session_listen_cl,
+  session_listen_app,
+};
+/* *INDENT-ON* */
+
+/**
+ * Ask transport to listen on local transport endpoint.
+ *
+ * @param s Session for which listen will be called. Note that unlike
+ *         established sessions, listen sessions are not associated to a
+ *         thread.
+ * @param tep Local endpoint to be listened on.
+ */
+int
+stream_session_listen (stream_session_t * s, session_endpoint_t * sep)
+{
+  transport_service_type_t tst = tp_vfts[sep->transport_proto].service_type;
+  return session_listen_srv_fns[tst] (s, sep);
+}
+
 /**
  * Ask transport to stop listening on local transport endpoint.
  *
@@ -900,15 +1030,15 @@ stream_session_listen (stream_session_t * s, session_endpoint_t * sep)
 int
 stream_session_stop_listen (stream_session_t * s)
 {
+  transport_proto_t tp = session_get_transport_proto (s);
   transport_connection_t *tc;
-
   if (s->session_state != SESSION_STATE_LISTENING)
     {
       clib_warning ("not a listening session");
       return -1;
     }
 
-  tc = tp_vfts[s->session_type].get_listener (s->connection_index);
+  tc = tp_vfts[tp].get_listener (s->connection_index);
   if (!tc)
     {
       clib_warning ("no transport");
@@ -916,22 +1046,39 @@ stream_session_stop_listen (stream_session_t * s)
     }
 
   session_lookup_del_connection (tc);
-  tp_vfts[s->session_type].unbind (s->connection_index);
+  tp_vfts[tp].unbind (s->connection_index);
   return 0;
 }
 
 /**
- * Disconnect session and propagate to transport. This should eventually
+ * Initialize session disconnect.
+ *
+ * Request is always sent to session node to ensure that all outstanding
+ * requests are served before transport is notified.
+ */
+void
+stream_session_disconnect (stream_session_t * s)
+{
+  if (!s || s->session_state == SESSION_STATE_CLOSED)
+    return;
+  s->session_state = SESSION_STATE_CLOSED;
+  session_send_session_evt_to_thread (session_handle (s),
+                                     FIFO_EVENT_DISCONNECT, s->thread_index);
+}
+
+/**
+ * Notify transport the session can be disconnected. This should eventually
  * result in a delete notification that allows us to cleanup session state.
  * Called for both active/passive disconnects.
  *
- * Should be called from the session's thread.
+ * Must be called from the session's thread.
  */
 void
-stream_session_disconnect (stream_session_t * s)
+stream_session_disconnect_transport (stream_session_t * s)
 {
   s->session_state = SESSION_STATE_CLOSED;
-  tp_vfts[s->session_type].close (s->connection_index, s->thread_index);
+  tp_vfts[session_get_transport_proto (s)].close (s->connection_index,
+                                                 s->thread_index);
 }
 
 /**
@@ -952,81 +1099,144 @@ stream_session_cleanup (stream_session_t * s)
   if (rv)
     clib_warning ("hash delete error, rv %d", rv);
 
-  tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index);
+  tp_vfts[session_get_transport_proto (s)].cleanup (s->connection_index,
+                                                   s->thread_index);
 }
 
 /**
- * Allocate vpp event queue (once) per worker thread
+ * Allocate event queues in the shared-memory segment
+ *
+ * That can either be a newly created memfd segment, that will need to be
+ * mapped by all stack users, or the binary api's svm region. The latter is
+ * assumed to be already mapped. NOTE that this assumption DOES NOT hold if
+ * api clients bootstrap shm api over sockets (i.e. use memfd segments) and
+ * vpp uses api svm region for event queues.
  */
 void
-session_vpp_event_queue_allocate (session_manager_main_t * smm,
-                                 u32 thread_index)
+session_vpp_event_queues_allocate (session_manager_main_t * smm)
 {
+  u32 evt_q_length = 2048, evt_size = sizeof (session_fifo_event_t);
+  ssvm_private_t *eqs = &smm->evt_qs_segment;
   api_main_t *am = &api_main;
+  u64 eqs_size = 64 << 20;
+  pid_t vpp_pid = getpid ();
   void *oldheap;
-  u32 event_queue_length = 2048;
+  int i;
 
-  if (smm->vpp_event_queues[thread_index] == 0)
-    {
-      /* Allocate event fifo in the /vpe-api shared-memory segment */
-      oldheap = svm_push_data_heap (am->vlib_rp);
+  if (smm->configured_event_queue_length)
+    evt_q_length = smm->configured_event_queue_length;
 
-      if (smm->configured_event_queue_length)
-       event_queue_length = smm->configured_event_queue_length;
+  if (smm->evt_qs_use_memfd_seg)
+    {
+      if (smm->evt_qs_segment_size)
+       eqs_size = smm->evt_qs_segment_size;
 
-      smm->vpp_event_queues[thread_index] =
-       unix_shared_memory_queue_init
-       (event_queue_length,
-        sizeof (session_fifo_event_t), 0 /* consumer pid */ ,
-        0 /* (do not) send signal when queue non-empty */ );
+      eqs->ssvm_size = eqs_size;
+      eqs->i_am_master = 1;
+      eqs->my_pid = vpp_pid;
+      eqs->name = format (0, "%s%c", "evt-qs-segment", 0);
+      eqs->requested_va = smm->session_baseva;
 
-      svm_pop_heap (oldheap);
+      ssvm_master_init (eqs, SSVM_SEGMENT_MEMFD);
     }
-}
 
-session_type_t
-session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4)
-{
-  if (proto == TRANSPORT_PROTO_TCP)
+  if (smm->evt_qs_use_memfd_seg)
+    oldheap = ssvm_push_heap (eqs->sh);
+  else
+    oldheap = svm_push_data_heap (am->vlib_rp);
+
+  for (i = 0; i < vec_len (smm->vpp_event_queues); i++)
     {
-      if (is_ip4)
-       return SESSION_TYPE_IP4_TCP;
-      else
-       return SESSION_TYPE_IP6_TCP;
+      smm->vpp_event_queues[i] = svm_queue_init (evt_q_length, evt_size,
+                                                vpp_pid, 0);
     }
+
+  if (smm->evt_qs_use_memfd_seg)
+    ssvm_pop_heap (oldheap);
   else
+    svm_pop_heap (oldheap);
+}
+
+ssvm_private_t *
+session_manager_get_evt_q_segment (void)
+{
+  session_manager_main_t *smm = &session_manager_main;
+  if (smm->evt_qs_use_memfd_seg)
+    return &smm->evt_qs_segment;
+  return 0;
+}
+
+/* *INDENT-OFF* */
+static session_fifo_rx_fn *session_tx_fns[TRANSPORT_TX_N_FNS] = {
+    session_tx_fifo_peek_and_snd,
+    session_tx_fifo_dequeue_and_snd,
+    session_tx_fifo_dequeue_internal,
+    session_tx_fifo_dequeue_and_snd
+};
+/* *INDENT-ON* */
+
+/**
+ * Initialize session layer for given transport proto and ip version
+ *
+ * Allocates per session type (transport proto + ip version) data structures
+ * and adds arc from session queue node to session type output node.
+ */
+void
+session_register_transport (transport_proto_t transport_proto,
+                           const transport_proto_vft_t * vft, u8 is_ip4,
+                           u32 output_node)
+{
+  session_manager_main_t *smm = &session_manager_main;
+  session_type_t session_type;
+  u32 next_index = ~0;
+
+  session_type = session_type_from_proto_and_ip (transport_proto, is_ip4);
+
+  vec_validate (smm->session_type_to_next, session_type);
+  vec_validate (smm->session_tx_fns, session_type);
+
+  /* *INDENT-OFF* */
+  if (output_node != ~0)
     {
-      if (is_ip4)
-       return SESSION_TYPE_IP4_UDP;
-      else
-       return SESSION_TYPE_IP6_UDP;
+      foreach_vlib_main (({
+          next_index = vlib_node_add_next (this_vlib_main,
+                                           session_queue_node.index,
+                                           output_node);
+      }));
     }
+  /* *INDENT-ON* */
 
-  return SESSION_N_TYPES;
+  smm->session_type_to_next[session_type] = next_index;
+  smm->session_tx_fns[session_type] = session_tx_fns[vft->tx_type];
 }
 
 transport_connection_t *
 session_get_transport (stream_session_t * s)
 {
-  if (s->session_state >= SESSION_STATE_READY)
-    return tp_vfts[s->session_type].get_connection (s->connection_index,
-                                                   s->thread_index);
+  transport_proto_t tp;
+  if (s->session_state != SESSION_STATE_LISTENING)
+    {
+      tp = session_get_transport_proto (s);
+      return tp_vfts[tp].get_connection (s->connection_index,
+                                        s->thread_index);
+    }
   return 0;
 }
 
 transport_connection_t *
 listen_session_get_transport (stream_session_t * s)
 {
-  return tp_vfts[s->session_type].get_listener (s->connection_index);
+  transport_proto_t tp = session_get_transport_proto (s);
+  return tp_vfts[tp].get_listener (s->connection_index);
 }
 
 int
 listen_session_get_local_session_endpoint (stream_session_t * listener,
                                           session_endpoint_t * sep)
 {
+  transport_proto_t tp = session_get_transport_proto (listener);
   transport_connection_t *tc;
-  tc =
-    tp_vfts[listener->session_type].get_listener (listener->connection_index);
+  tc = tp_vfts[tp].get_listener (listener->connection_index);
   if (!tc)
     {
       clib_warning ("no transport");
@@ -1043,10 +1253,10 @@ listen_session_get_local_session_endpoint (stream_session_t * listener,
 static clib_error_t *
 session_manager_main_enable (vlib_main_t * vm)
 {
+  segment_manager_main_init_args_t _sm_args = { 0 }, *sm_args = &_sm_args;
   session_manager_main_t *smm = &session_manager_main;
   vlib_thread_main_t *vtm = vlib_get_thread_main ();
-  u32 num_threads;
-  u32 preallocated_sessions_per_worker;
+  u32 num_threads, preallocated_sessions_per_worker;
   int i, j;
 
   num_threads = 1 /* main thread */  + vtm->n_threads;
@@ -1054,10 +1264,6 @@ session_manager_main_enable (vlib_main_t * vm)
   if (num_threads < 1)
     return clib_error_return (0, "n_thread_stacks not set");
 
-  /* $$$ config parameters */
-  svm_fifo_segment_init (0x200000000ULL /* first segment base VA */ ,
-                        20 /* timeout in seconds */ );
-
   /* configure per-thread ** vectors */
   vec_validate (smm->sessions, num_threads - 1);
   vec_validate (smm->tx_buffers, num_threads - 1);
@@ -1065,16 +1271,15 @@ session_manager_main_enable (vlib_main_t * vm)
   vec_validate (smm->pending_disconnects, num_threads - 1);
   vec_validate (smm->free_event_vector, num_threads - 1);
   vec_validate (smm->vpp_event_queues, num_threads - 1);
-  vec_validate (smm->session_peekers, num_threads - 1);
-  vec_validate (smm->peekers_readers_locks, num_threads - 1);
-  vec_validate (smm->peekers_write_locks, num_threads - 1);
+  vec_validate (smm->peekers_rw_locks, num_threads - 1);
 
   for (i = 0; i < TRANSPORT_N_PROTO; i++)
-    for (j = 0; j < num_threads; j++)
-      {
-       vec_validate (smm->session_to_enqueue[i], num_threads - 1);
-       vec_validate (smm->current_enqueue_epoch[i], num_threads - 1);
-      }
+    {
+      vec_validate (smm->current_enqueue_epoch[i], num_threads - 1);
+      vec_validate (smm->session_to_enqueue[i], num_threads - 1);
+      for (j = 0; j < num_threads; j++)
+       smm->current_enqueue_epoch[i][j] = 1;
+    }
 
   for (i = 0; i < num_threads; i++)
     {
@@ -1084,15 +1289,21 @@ session_manager_main_enable (vlib_main_t * vm)
       _vec_len (smm->pending_event_vector[i]) = 0;
       vec_validate (smm->pending_disconnects[i], 0);
       _vec_len (smm->pending_disconnects[i]) = 0;
+      if (num_threads > 1)
+       clib_rwlock_init (&smm->peekers_rw_locks[i]);
     }
 
 #if SESSION_DBG
   vec_validate (smm->last_event_poll_by_thread, num_threads - 1);
 #endif
 
-  /* Allocate vpp event queues */
-  for (i = 0; i < vec_len (smm->vpp_event_queues); i++)
-    session_vpp_event_queue_allocate (smm, i);
+  /* Allocate vpp event queues segment and queue */
+  session_vpp_event_queues_allocate (smm);
+
+  /* Initialize fifo segment main baseva and timeout */
+  sm_args->baseva = smm->session_baseva + smm->evt_qs_segment_size;
+  sm_args->size = smm->session_va_space_size;
+  segment_manager_main_init (sm_args);
 
   /* Preallocate sessions */
   if (smm->preallocated_sessions)
@@ -1122,8 +1333,8 @@ session_manager_main_enable (vlib_main_t * vm)
 
   smm->is_enabled = 1;
 
-  /* Enable TCP transport */
-  vnet_tcp_enable_disable (vm, 1);
+  /* Enable transports */
+  transport_enable_disable (vm, 1);
 
   return 0;
 }
@@ -1165,6 +1376,9 @@ clib_error_t *
 session_manager_main_init (vlib_main_t * vm)
 {
   session_manager_main_t *smm = &session_manager_main;
+  smm->session_baseva = 0x200000000ULL;
+  smm->session_va_space_size = (u64) 128 << 30;
+  smm->evt_qs_segment_size = 64 << 20;
   smm->is_enabled = 0;
   return 0;
 }
@@ -1234,6 +1448,19 @@ session_config_fn (vlib_main_t * vm, unformat_input_t * input)
                                      tmp, tmp);
          smm->configured_v6_halfopen_table_memory = tmp;
        }
+      else if (unformat (input, "local-endpoints-table-memory %U",
+                        unformat_memory_size, &tmp))
+       {
+         if (tmp >= 0x100000000)
+           return clib_error_return (0, "memory size %llx (%lld) too large",
+                                     tmp, tmp);
+         smm->local_endpoints_table_memory = tmp;
+       }
+      else if (unformat (input, "local-endpoints-table-buckets %d",
+                        &smm->local_endpoints_table_buckets))
+       ;
+      else if (unformat (input, "evt_qs_memfd_seg"))
+       smm->evt_qs_use_memfd_seg = 1;
       else
        return clib_error_return (0, "unknown input `%U'",
                                  format_unformat_error, input);