vcl/ldp: add write msg function and fine tuning
[vpp.git] / src / vnet / session / session.c
index 9790ec2..d30254e 100644 (file)
@@ -66,9 +66,11 @@ session_send_evt_to_thread (void *data, void *args, u32 thread_index,
       evt->rpc_args.arg = args;
       break;
     case FIFO_EVENT_APP_TX:
+    case SESSION_IO_EVT_TX_FLUSH:
     case FIFO_EVENT_BUILTIN_RX:
       evt->fifo = data;
       break;
+    case FIFO_EVENT_BUILTIN_TX:
     case FIFO_EVENT_DISCONNECT:
       evt->session_handle = session_handle ((stream_session_t *) data);
       break;
@@ -89,10 +91,10 @@ session_send_io_evt_to_thread (svm_fifo_t * f, session_evt_type_t evt_type)
 }
 
 int
-session_send_io_evt_to_thread_custom (svm_fifo_t * f, u32 thread_index,
+session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
                                      session_evt_type_t evt_type)
 {
-  return session_send_evt_to_thread (f, 0, thread_index, evt_type);
+  return session_send_evt_to_thread (data, 0, thread_index, evt_type);
 }
 
 int
@@ -117,29 +119,48 @@ session_send_rpc_evt_to_thread (u32 thread_index, void *fp, void *rpc_args)
     }
 }
 
+static void
+session_program_transport_close (stream_session_t * s)
+{
+  u32 thread_index = vlib_get_thread_index ();
+  session_manager_worker_t *wrk;
+  session_event_t *evt;
+
+  /* If we are in the handler thread, or being called with the worker barrier
+   * held, just append a new event to pending disconnects vector. */
+  if (vlib_thread_is_main_w_barrier () || thread_index == s->thread_index)
+    {
+      wrk = session_manager_get_worker (s->thread_index);
+      vec_add2 (wrk->pending_disconnects, evt, 1);
+      clib_memset (evt, 0, sizeof (*evt));
+      evt->session_handle = session_handle (s);
+      evt->event_type = FIFO_EVENT_DISCONNECT;
+    }
+  else
+    session_send_ctrl_evt_to_thread (s, FIFO_EVENT_DISCONNECT);
+}
+
 stream_session_t *
 session_alloc (u32 thread_index)
 {
-  session_manager_main_t *smm = &session_manager_main;
+  session_manager_worker_t *wrk = &session_manager_main.wrk[thread_index];
   stream_session_t *s;
   u8 will_expand = 0;
-  pool_get_aligned_will_expand (smm->sessions[thread_index], will_expand,
+  pool_get_aligned_will_expand (wrk->sessions, will_expand,
                                CLIB_CACHE_LINE_BYTES);
   /* If we have peekers, let them finish */
   if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
     {
-      clib_rwlock_writer_lock (&smm->peekers_rw_locks[thread_index]);
-      pool_get_aligned (session_manager_main.sessions[thread_index], s,
-                       CLIB_CACHE_LINE_BYTES);
-      clib_rwlock_writer_unlock (&smm->peekers_rw_locks[thread_index]);
+      clib_rwlock_writer_lock (&wrk->peekers_rw_locks);
+      pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
+      clib_rwlock_writer_unlock (&wrk->peekers_rw_locks);
     }
   else
     {
-      pool_get_aligned (session_manager_main.sessions[thread_index], s,
-                       CLIB_CACHE_LINE_BYTES);
+      pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
     }
-  memset (s, 0, sizeof (*s));
-  s->session_index = s - session_manager_main.sessions[thread_index];
+  clib_memset (s, 0, sizeof (*s));
+  s->session_index = s - wrk->sessions;
   s->thread_index = thread_index;
   return s;
 }
@@ -147,9 +168,34 @@ session_alloc (u32 thread_index)
 void
 session_free (stream_session_t * s)
 {
-  pool_put (session_manager_main.sessions[s->thread_index], s);
+  pool_put (session_manager_main.wrk[s->thread_index].sessions, s);
   if (CLIB_DEBUG)
-    memset (s, 0xFA, sizeof (*s));
+    clib_memset (s, 0xFA, sizeof (*s));
+}
+
+void
+session_free_w_fifos (stream_session_t * s)
+{
+  segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo,
+                                s->server_tx_fifo);
+  session_free (s);
+}
+
+/**
+ * Cleans up session and lookup table.
+ *
+ * Transport connection must still be valid.
+ */
+static void
+session_delete (stream_session_t * s)
+{
+  int rv;
+
+  /* Delete from the main lookup table. */
+  if ((rv = session_lookup_del_session (s)))
+    clib_warning ("hash delete error, rv %d", rv);
+
+  session_free_w_fifos (s);
 }
 
 int
@@ -187,8 +233,8 @@ session_alloc_for_connection (transport_connection_t * tc)
 
   s = session_alloc (thread_index);
   s->session_type = session_type_from_proto_and_ip (tc->proto, tc->is_ip4);
-  s->session_state = SESSION_STATE_CONNECTING;
-  s->enqueue_epoch = ~0;
+  s->enqueue_epoch = (u64) ~ 0;
+  s->session_state = SESSION_STATE_CLOSED;
 
   /* Attach transport to session and vice versa */
   s->connection_index = tc->c_index;
@@ -382,22 +428,19 @@ session_enqueue_stream_connection (transport_connection_t * tc,
     {
       /* Queue RX event on this fifo. Eventually these will need to be flushed
        * by calling stream_server_flush_enqueue_events () */
-      session_manager_main_t *smm = vnet_get_session_manager_main ();
-      u32 thread_index = s->thread_index;
-      u32 enqueue_epoch = smm->current_enqueue_epoch[tc->proto][thread_index];
+      session_manager_worker_t *wrk;
 
-      if (s->enqueue_epoch != enqueue_epoch)
+      wrk = session_manager_get_worker (s->thread_index);
+      if (s->enqueue_epoch != wrk->current_enqueue_epoch[tc->proto])
        {
-         s->enqueue_epoch = enqueue_epoch;
-         vec_add1 (smm->session_to_enqueue[tc->proto][thread_index],
-                   s - smm->sessions[thread_index]);
+         s->enqueue_epoch = wrk->current_enqueue_epoch[tc->proto];
+         vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
        }
     }
 
   return enqueued;
 }
 
-
 int
 session_enqueue_dgram_connection (stream_session_t * s,
                                  session_dgram_hdr_t * hdr,
@@ -423,15 +466,13 @@ session_enqueue_dgram_connection (stream_session_t * s,
     {
       /* Queue RX event on this fifo. Eventually these will need to be flushed
        * by calling stream_server_flush_enqueue_events () */
-      session_manager_main_t *smm = vnet_get_session_manager_main ();
-      u32 thread_index = s->thread_index;
-      u32 enqueue_epoch = smm->current_enqueue_epoch[proto][thread_index];
+      session_manager_worker_t *wrk;
 
-      if (s->enqueue_epoch != enqueue_epoch)
+      wrk = session_manager_get_worker (s->thread_index);
+      if (s->enqueue_epoch != wrk->current_enqueue_epoch[proto])
        {
-         s->enqueue_epoch = enqueue_epoch;
-         vec_add1 (smm->session_to_enqueue[proto][thread_index],
-                   s - smm->sessions[thread_index]);
+         s->enqueue_epoch = wrk->current_enqueue_epoch[proto];
+         vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
        }
     }
   return enqueued;
@@ -486,14 +527,14 @@ stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes)
  * @return 0 on success or negative number if failed to send notification.
  */
 static inline int
-session_enqueue_notify (stream_session_t * s, u8 lock)
+session_enqueue_notify (stream_session_t * s)
 {
-  application_t *app;
+  app_worker_t *app;
 
-  app = application_get_if_valid (s->app_index);
+  app = app_worker_get_if_valid (s->app_wrk_index);
   if (PREDICT_FALSE (!app))
     {
-      TCP_DBG ("invalid s->app_index = %d", s->app_index);
+      SESSION_DBG ("invalid s->app_index = %d", s->app_wrk_index);
       return 0;
     }
 
@@ -504,25 +545,19 @@ session_enqueue_notify (stream_session_t * s, u8 lock)
   }));
   /* *INDENT-ON* */
 
-  if (lock)
-    return application_lock_and_send_event (app, s, FIFO_EVENT_APP_RX);
-
-  return application_send_event (app, s, FIFO_EVENT_APP_RX);
+  return app_worker_lock_and_send_event (app, s, FIFO_EVENT_APP_RX);
 }
 
 int
 session_dequeue_notify (stream_session_t * s)
 {
-  application_t *app;
+  app_worker_t *app;
 
-  app = application_get_if_valid (s->app_index);
+  app = app_worker_get_if_valid (s->app_wrk_index);
   if (PREDICT_FALSE (!app))
     return -1;
 
-  if (session_transport_service_type (s) == TRANSPORT_SERVICE_CL)
-    return application_lock_and_send_event (app, s, FIFO_EVENT_APP_RX);
-
-  return application_send_event (app, s, FIFO_EVENT_APP_TX);
+  return app_worker_lock_and_send_event (app, s, FIFO_EVENT_APP_TX);
 }
 
 /**
@@ -536,15 +571,12 @@ session_dequeue_notify (stream_session_t * s)
 int
 session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index)
 {
-  session_manager_main_t *smm = &session_manager_main;
-  transport_service_type_t tp_service;
-  int i, errors = 0, lock;
+  session_manager_worker_t *wrk = session_manager_get_worker (thread_index);
   stream_session_t *s;
+  int i, errors = 0;
   u32 *indices;
 
-  indices = smm->session_to_enqueue[transport_proto][thread_index];
-  tp_service = transport_protocol_service_type (transport_proto);
-  lock = tp_service == TRANSPORT_SERVICE_CL;
+  indices = wrk->session_to_enqueue[transport_proto];
 
   for (i = 0; i < vec_len (indices); i++)
     {
@@ -554,13 +586,13 @@ session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index)
          errors++;
          continue;
        }
-      if (PREDICT_FALSE (session_enqueue_notify (s, lock)))
+      if (PREDICT_FALSE (session_enqueue_notify (s)))
        errors++;
     }
 
   vec_reset_length (indices);
-  smm->session_to_enqueue[transport_proto][thread_index] = indices;
-  smm->current_enqueue_epoch[transport_proto][thread_index]++;
+  wrk->session_to_enqueue[transport_proto] = indices;
+  wrk->current_enqueue_epoch[transport_proto]++;
 
   return errors;
 }
@@ -596,6 +628,7 @@ session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
   u32 opaque = 0, new_ti, new_si;
   stream_session_t *new_s = 0;
   segment_manager_t *sm;
+  app_worker_t *app_wrk;
   application_t *app;
   u8 alloc_fifos;
   int error = 0;
@@ -615,17 +648,18 @@ session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
   /* Get the app's index from the handle we stored when opening connection
    * and the opaque (api_context for external apps) from transport session
    * index */
-  app = application_get_if_valid (handle >> 32);
-  if (!app)
+  app_wrk = app_worker_get_if_valid (handle >> 32);
+  if (!app_wrk)
     return -1;
   opaque = tc->s_index;
+  app = application_get (app_wrk->app_index);
 
   /*
    * Allocate new session with fifos (svm segments are allocated if needed)
    */
   if (!is_fail)
     {
-      sm = application_get_connect_segment_manager (app);
+      sm = app_worker_get_connect_segment_manager (app_wrk);
       alloc_fifos = !application_is_builtin_proxy (app);
       if (session_alloc_and_init (sm, tc, alloc_fifos, &new_s))
        {
@@ -634,7 +668,8 @@ session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
        }
       else
        {
-         new_s->app_index = app->index;
+         new_s->session_state = SESSION_STATE_CONNECTING;
+         new_s->app_wrk_index = app_wrk->wrk_index;
          new_si = new_s->session_index;
          new_ti = new_s->thread_index;
        }
@@ -643,14 +678,14 @@ session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
   /*
    * Notify client application
    */
-  if (app->cb_fns.session_connected_callback (app->index, opaque, new_s,
-                                             is_fail))
+  if (app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque,
+                                             new_s, is_fail))
     {
       SESSION_DBG ("failed to notify app");
       if (!is_fail)
        {
          new_s = session_get (new_si, new_ti);
-         stream_session_disconnect_transport (new_s);
+         session_transport_close (new_s);
        }
     }
   else
@@ -728,15 +763,20 @@ session_dgram_connect_notify (transport_connection_t * tc,
   return 0;
 }
 
-void
+int
 stream_session_accept_notify (transport_connection_t * tc)
 {
-  application_t *server;
+  app_worker_t *app_wrk;
+  application_t *app;
   stream_session_t *s;
 
   s = session_get (tc->s_index, tc->thread_index);
-  server = application_get (s->app_index);
-  server->cb_fns.session_accept_callback (s);
+  app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+  if (!app_wrk)
+    return -1;
+  s->session_state = SESSION_STATE_ACCEPTING;
+  app = application_get (app_wrk->app_index);
+  return app->cb_fns.session_accept_callback (s);
 }
 
 /**
@@ -747,69 +787,121 @@ stream_session_accept_notify (transport_connection_t * tc)
  * Ultimately this leads to close being called on transport (passive close).
  */
 void
-stream_session_disconnect_notify (transport_connection_t * tc)
+session_transport_closing_notify (transport_connection_t * tc)
 {
-  application_t *server;
+  app_worker_t *app_wrk;
+  application_t *app;
   stream_session_t *s;
 
   s = session_get (tc->s_index, tc->thread_index);
-  s->session_state = SESSION_STATE_CLOSING;
-  server = application_get_if_valid (s->app_index);
-  if (server)
-    server->cb_fns.session_disconnect_callback (s);
+  if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
+    return;
+  s->session_state = SESSION_STATE_TRANSPORT_CLOSING;
+  app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+  if (!app_wrk)
+    return;
+  app = application_get (app_wrk->app_index);
+  app->cb_fns.session_disconnect_callback (s);
 }
 
 /**
- * Cleans up session and lookup table.
+ * Notification from transport that connection is being deleted
  *
- * Transport connection must still be valid.
+ * This removes the session if it is still valid. It should be called only on
+ * previously fully established sessions. For instance failed connects should
+ * call stream_session_connect_notify and indicate that the connect has
+ * failed.
  */
 void
-stream_session_delete (stream_session_t * s)
+session_transport_delete_notify (transport_connection_t * tc)
 {
-  int rv;
+  stream_session_t *s;
 
-  /* Delete from the main lookup table. */
-  if ((rv = session_lookup_del_session (s)))
-    clib_warning ("hash delete error, rv %d", rv);
+  /* App might've been removed already */
+  if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
+    return;
 
-  /* Cleanup fifo segments */
-  segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo,
-                                s->server_tx_fifo);
-  session_free (s);
+  /* Make sure we don't try to send anything more */
+  svm_fifo_dequeue_drop_all (s->server_tx_fifo);
+
+  switch (s->session_state)
+    {
+    case SESSION_STATE_ACCEPTING:
+    case SESSION_STATE_TRANSPORT_CLOSING:
+      /* If transport finishes or times out before we get a reply
+       * from the app, mark transport as closed and wait for reply
+       * before removing the session. Cleanup session table in advance
+       * because transport will soon be closed and closed sessions
+       * are assumed to have been removed from the lookup table */
+      session_lookup_del_session (s);
+      s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
+      break;
+    case SESSION_STATE_CLOSING:
+    case SESSION_STATE_CLOSED_WAITING:
+      /* Cleanup lookup table as transport needs to still be valid.
+       * Program transport close to ensure that all session events
+       * have been cleaned up. Once transport close is called, the
+       * session is just removed because both transport and app have
+       * confirmed the close*/
+      session_lookup_del_session (s);
+      s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
+      session_program_transport_close (s);
+      break;
+    case SESSION_STATE_TRANSPORT_CLOSED:
+      break;
+    case SESSION_STATE_CLOSED:
+      session_delete (s);
+      break;
+    default:
+      clib_warning ("session state %u", s->session_state);
+      session_delete (s);
+      break;
+    }
 }
 
 /**
- * Notification from transport that connection is being deleted
+ * Notification from transport that session can be closed
  *
- * This removes the session if it is still valid. It should be called only on
- * previously fully established sessions. For instance failed connects should
- * call stream_session_connect_notify and indicate that the connect has
- * failed.
+ * Should be called by transport only if it was closed with non-empty
+ * tx fifo and once it decides to begin the closing procedure prior to
+ * issuing a delete notify. This gives the chance to the session layer
+ * to cleanup any outstanding events.
  */
 void
-stream_session_delete_notify (transport_connection_t * tc)
+session_transport_closed_notify (transport_connection_t * tc)
 {
   stream_session_t *s;
 
-  /* App might've been removed already */
-  s = session_get_if_valid (tc->s_index, tc->thread_index);
-  if (!s)
+  if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
     return;
-  stream_session_delete (s);
+
+  /* If app close has not been received or has not yet resulted in
+   * a transport close, only mark the session transport as closed */
+  if (s->session_state <= SESSION_STATE_CLOSING)
+    {
+      session_lookup_del_session (s);
+      s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
+    }
+  else
+    s->session_state = SESSION_STATE_CLOSED;
 }
 
 /**
  * Notify application that connection has been reset.
  */
 void
-stream_session_reset_notify (transport_connection_t * tc)
+session_transport_reset_notify (transport_connection_t * tc)
 {
   stream_session_t *s;
+  app_worker_t *app_wrk;
   application_t *app;
   s = session_get (tc->s_index, tc->thread_index);
-  s->session_state = SESSION_STATE_CLOSED;
-  app = application_get (s->app_index);
+  svm_fifo_dequeue_drop_all (s->server_tx_fifo);
+  if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
+    return;
+  s->session_state = SESSION_STATE_TRANSPORT_CLOSING;
+  app_wrk = app_worker_get (s->app_wrk_index);
+  app = application_get (app_wrk->app_index);
   app->cb_fns.session_reset_callback (s);
 }
 
@@ -820,43 +912,44 @@ int
 stream_session_accept (transport_connection_t * tc, u32 listener_index,
                       u8 notify)
 {
-  application_t *server;
   stream_session_t *s, *listener;
+  app_worker_t *app_wrk;
   segment_manager_t *sm;
   int rv;
 
   /* Find the server */
   listener = listen_session_get (listener_index);
-  server = application_get (listener->app_index);
+  app_wrk = application_listener_select_worker (listener, 0);
 
-  sm = application_get_listen_segment_manager (server, listener);
+  sm = app_worker_get_listen_segment_manager (app_wrk, listener);
   if ((rv = session_alloc_and_init (sm, tc, 1, &s)))
     return rv;
 
-  s->app_index = server->index;
+  s->app_wrk_index = app_wrk->wrk_index;
   s->listener_index = listener_index;
-  s->session_state = SESSION_STATE_ACCEPTING;
 
   /* Shoulder-tap the server */
   if (notify)
     {
-      server->cb_fns.session_accept_callback (s);
+      application_t *app = application_get (app_wrk->app_index);
+      return app->cb_fns.session_accept_callback (s);
     }
 
   return 0;
 }
 
 int
-session_open_cl (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+session_open_cl (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
 {
   transport_connection_t *tc;
-  transport_endpoint_t *tep;
+  transport_endpoint_cfg_t *tep;
   segment_manager_t *sm;
+  app_worker_t *app_wrk;
   stream_session_t *s;
   application_t *app;
   int rv;
 
-  tep = session_endpoint_to_transport (rmt);
+  tep = session_endpoint_to_transport_cfg (rmt);
   rv = tp_vfts[rmt->transport_proto].open (tep);
   if (rv < 0)
     {
@@ -868,29 +961,30 @@ session_open_cl (u32 app_index, session_endpoint_t * rmt, u32 opaque)
 
   /* For dgram type of service, allocate session and fifos now.
    */
-  app = application_get (app_index);
-  sm = application_get_connect_segment_manager (app);
+  app_wrk = app_worker_get (app_wrk_index);
+  sm = app_worker_get_connect_segment_manager (app_wrk);
 
   if (session_alloc_and_init (sm, tc, 1, &s))
     return -1;
-  s->app_index = app->index;
+  s->app_wrk_index = app_wrk->wrk_index;
   s->session_state = SESSION_STATE_OPENED;
 
   /* Tell the app about the new event fifo for this session */
-  app->cb_fns.session_connected_callback (app->index, opaque, s, 0);
+  app = application_get (app_wrk->app_index);
+  app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque, s, 0);
 
   return 0;
 }
 
 int
-session_open_vc (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+session_open_vc (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
 {
   transport_connection_t *tc;
-  transport_endpoint_t *tep;
+  transport_endpoint_cfg_t *tep;
   u64 handle;
   int rv;
 
-  tep = session_endpoint_to_transport (rmt);
+  tep = session_endpoint_to_transport_cfg (rmt);
   rv = tp_vfts[rmt->transport_proto].open (tep);
   if (rv < 0)
     {
@@ -907,7 +1001,7 @@ session_open_vc (u32 app_index, session_endpoint_t * rmt, u32 opaque)
    * is needed when the connect notify comes and we have to notify the
    * external app
    */
-  handle = (((u64) app_index) << 32) | (u64) tc->c_index;
+  handle = (((u64) app_wrk_index) << 32) | (u64) tc->c_index;
   session_lookup_add_half_open (tc, handle);
 
   /* Store api_context (opaque) for when the reply comes. Not the nicest
@@ -918,13 +1012,15 @@ session_open_vc (u32 app_index, session_endpoint_t * rmt, u32 opaque)
 }
 
 int
-session_open_app (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+session_open_app (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
 {
-  session_endpoint_extended_t *sep = (session_endpoint_extended_t *) rmt;
-  sep->app_index = app_index;
+  session_endpoint_cfg_t *sep = (session_endpoint_cfg_t *) rmt;
+  transport_endpoint_cfg_t *tep_cfg = session_endpoint_to_transport_cfg (sep);
+
+  sep->app_wrk_index = app_wrk_index;
   sep->opaque = opaque;
 
-  return tp_vfts[rmt->transport_proto].open ((transport_endpoint_t *) sep);
+  return tp_vfts[rmt->transport_proto].open (tep_cfg);
 }
 
 typedef int (*session_open_service_fn) (u32, session_endpoint_t *, u32);
@@ -951,118 +1047,52 @@ static session_open_service_fn session_open_srv_fns[TRANSPORT_N_SERVICES] = {
  *              on open completion.
  */
 int
-session_open (u32 app_index, session_endpoint_t * rmt, u32 opaque)
+session_open (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
 {
   transport_service_type_t tst = tp_vfts[rmt->transport_proto].service_type;
-  return session_open_srv_fns[tst] (app_index, rmt, opaque);
+  return session_open_srv_fns[tst] (app_wrk_index, rmt, opaque);
 }
 
+/**
+ * Ask transport to listen on session endpoint.
+ *
+ * @param s Session for which listen will be called. Note that unlike
+ *         established sessions, listen sessions are not associated to a
+ *         thread.
+ * @param sep Local endpoint to be listened on.
+ */
 int
-session_listen_vc (stream_session_t * s, session_endpoint_t * sep)
-{
-  transport_connection_t *tc;
-  u32 tci;
-
-  /* Transport bind/listen  */
-  tci = tp_vfts[sep->transport_proto].bind (s->session_index,
-                                           session_endpoint_to_transport
-                                           (sep));
-
-  if (tci == (u32) ~ 0)
-    return -1;
-
-  /* Attach transport to session */
-  s->connection_index = tci;
-  tc = tp_vfts[sep->transport_proto].get_listener (tci);
-
-  /* Weird but handle it ... */
-  if (tc == 0)
-    return -1;
-
-  /* Add to the main lookup table */
-  session_lookup_add_connection (tc, s->session_index);
-  return 0;
-}
-
-int
-session_listen_cl (stream_session_t * s, session_endpoint_t * sep)
+session_listen (stream_session_t * ls, session_endpoint_cfg_t * sep)
 {
   transport_connection_t *tc;
-  application_t *server;
-  segment_manager_t *sm;
-  u32 tci;
+  transport_endpoint_t *tep;
+  u32 tc_index, s_index;
 
-  /* Transport bind/listen  */
-  tci = tp_vfts[sep->transport_proto].bind (s->session_index,
-                                           session_endpoint_to_transport
-                                           (sep));
+  /* Transport bind/listen */
+  tep = session_endpoint_to_transport (sep);
+  s_index = ls->session_index;
+  tc_index = tp_vfts[sep->transport_proto].bind (s_index, tep);
 
-  if (tci == (u32) ~ 0)
+  if (tc_index == (u32) ~ 0)
     return -1;
 
   /* Attach transport to session */
-  s->connection_index = tci;
-  tc = tp_vfts[sep->transport_proto].get_listener (tci);
-
-  /* Weird but handle it ... */
-  if (tc == 0)
-    return -1;
-
-  server = application_get (s->app_index);
-  sm = application_get_listen_segment_manager (server, s);
-  if (session_alloc_fifos (sm, s))
-    return -1;
+  ls = listen_session_get (s_index);
+  ls->connection_index = tc_index;
 
-  /* Add to the main lookup table */
-  session_lookup_add_connection (tc, s->session_index);
+  /* Add to the main lookup table after transport was initialized */
+  tc = tp_vfts[sep->transport_proto].get_listener (tc_index);
+  session_lookup_add_connection (tc, s_index);
   return 0;
 }
 
-int
-session_listen_app (stream_session_t * s, session_endpoint_t * sep)
-{
-  session_endpoint_extended_t esep;
-  clib_memcpy (&esep, sep, sizeof (*sep));
-  esep.app_index = s->app_index;
-
-  return tp_vfts[sep->transport_proto].bind (s->session_index,
-                                            (transport_endpoint_t *) & esep);
-}
-
-typedef int (*session_listen_service_fn) (stream_session_t *,
-                                         session_endpoint_t *);
-
-/* *INDENT-OFF* */
-static session_listen_service_fn
-session_listen_srv_fns[TRANSPORT_N_SERVICES] = {
-  session_listen_vc,
-  session_listen_cl,
-  session_listen_app,
-};
-/* *INDENT-ON* */
-
-/**
- * Ask transport to listen on local transport endpoint.
- *
- * @param s Session for which listen will be called. Note that unlike
- *         established sessions, listen sessions are not associated to a
- *         thread.
- * @param tep Local endpoint to be listened on.
- */
-int
-stream_session_listen (stream_session_t * s, session_endpoint_t * sep)
-{
-  transport_service_type_t tst = tp_vfts[sep->transport_proto].service_type;
-  return session_listen_srv_fns[tst] (s, sep);
-}
-
 /**
  * Ask transport to stop listening on local transport endpoint.
  *
  * @param s Session to stop listening on. It must be in state LISTENING.
  */
 int
-stream_session_stop_listen (stream_session_t * s)
+session_stop_listen (stream_session_t * s)
 {
   transport_proto_t tp = session_get_transport_proto (s);
   transport_connection_t *tc;
@@ -1085,23 +1115,24 @@ stream_session_stop_listen (stream_session_t * s)
 }
 
 /**
- * Initialize session disconnect.
+ * Initialize session closing procedure.
  *
  * Request is always sent to session node to ensure that all outstanding
  * requests are served before transport is notified.
  */
 void
-stream_session_disconnect (stream_session_t * s)
+session_close (stream_session_t * s)
 {
-  u32 thread_index = vlib_get_thread_index ();
-  session_manager_main_t *smm = &session_manager_main;
-  session_event_t *evt;
-
   if (!s)
     return;
 
   if (s->session_state >= SESSION_STATE_CLOSING)
     {
+      /* Session will only be removed once both app and transport
+       * acknowledge the close */
+      if (s->session_state == SESSION_STATE_TRANSPORT_CLOSED)
+       session_program_transport_close (s);
+
       /* Session already closed. Clear the tx fifo */
       if (s->session_state == SESSION_STATE_CLOSED)
        svm_fifo_dequeue_drop_all (s->server_tx_fifo);
@@ -1109,20 +1140,7 @@ stream_session_disconnect (stream_session_t * s)
     }
 
   s->session_state = SESSION_STATE_CLOSING;
-
-  /* If we are in the handler thread, or being called with the worker barrier
-   * held (api/cli), just append a new event to pending disconnects vector. */
-  if ((thread_index == 0 && !vlib_get_current_process (vlib_get_main ()))
-      || thread_index == s->thread_index)
-    {
-      ASSERT (s->thread_index == thread_index || thread_index == 0);
-      vec_add2 (smm->pending_disconnects[s->thread_index], evt, 1);
-      memset (evt, 0, sizeof (*evt));
-      evt->session_handle = session_handle (s);
-      evt->event_type = FIFO_EVENT_DISCONNECT;
-    }
-  else
-    session_send_ctrl_evt_to_thread (s, FIFO_EVENT_DISCONNECT);
+  session_program_transport_close (s);
 }
 
 /**
@@ -1133,9 +1151,26 @@ stream_session_disconnect (stream_session_t * s)
  * Must be called from the session's thread.
  */
 void
-stream_session_disconnect_transport (stream_session_t * s)
+session_transport_close (stream_session_t * s)
 {
-  s->session_state = SESSION_STATE_CLOSED;
+  /* If transport is already closed, just free the session */
+  if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
+    {
+      session_free_w_fifos (s);
+      return;
+    }
+
+  /* If tx queue wasn't drained, change state to closed waiting for transport.
+   * This way, the transport, if it so wishes, can continue to try sending the
+   * outstanding data (in closed state it cannot). It MUST however at one
+   * point, either after sending everything or after a timeout, call delete
+   * notify. This will finally lead to the complete cleanup of the session.
+   */
+  if (svm_fifo_max_dequeue (s->server_tx_fifo))
+    s->session_state = SESSION_STATE_CLOSED_WAITING;
+  else
+    s->session_state = SESSION_STATE_CLOSED;
+
   tp_vfts[session_get_transport_proto (s)].close (s->connection_index,
                                                  s->thread_index);
 }
@@ -1148,7 +1183,7 @@ stream_session_disconnect_transport (stream_session_t * s)
  * closed.
  */
 void
-stream_session_cleanup (stream_session_t * s)
+session_transport_cleanup (stream_session_t * s)
 {
   s->session_state = SESSION_STATE_CLOSED;
 
@@ -1158,9 +1193,7 @@ stream_session_cleanup (stream_session_t * s)
                                                    s->thread_index);
   /* Since we called cleanup, no delete notification will come. So, make
    * sure the session is properly freed. */
-  segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo,
-                                s->server_tx_fifo);
-  session_free (s);
+  session_free_w_fifos (s);
 }
 
 transport_service_type_t
@@ -1231,23 +1264,22 @@ session_vpp_event_queues_allocate (session_manager_main_t * smm)
   else
     oldheap = svm_push_data_heap (am->vlib_rp);
 
-  for (i = 0; i < vec_len (smm->vpp_event_queues); i++)
+  for (i = 0; i < vec_len (smm->wrk); i++)
     {
       svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
-      u32 notif_q_size = clib_max (16, evt_q_length >> 4);
       svm_msg_q_ring_cfg_t rc[SESSION_MQ_N_RINGS] = {
        {evt_q_length, evt_size, 0}
        ,
-       {notif_q_size, 256, 0}
+       {evt_q_length << 1, 256, 0}
       };
       cfg->consumer_pid = 0;
       cfg->n_rings = 2;
       cfg->q_nitems = evt_q_length;
       cfg->ring_cfgs = rc;
-      smm->vpp_event_queues[i] = svm_msg_q_alloc (cfg);
+      smm->wrk[i].vpp_event_queue = svm_msg_q_alloc (cfg);
       if (smm->evt_qs_use_memfd_seg)
        {
-         if (svm_msg_q_alloc_consumer_eventfd (smm->vpp_event_queues[i]))
+         if (svm_msg_q_alloc_consumer_eventfd (smm->wrk[i].vpp_event_queue))
            clib_warning ("eventfd returned");
        }
     }
@@ -1366,6 +1398,7 @@ session_manager_main_enable (vlib_main_t * vm)
   session_manager_main_t *smm = &session_manager_main;
   vlib_thread_main_t *vtm = vlib_get_thread_main ();
   u32 num_threads, preallocated_sessions_per_worker;
+  session_manager_worker_t *wrk;
   int i, j;
 
   num_threads = 1 /* main thread */  + vtm->n_threads;
@@ -1373,34 +1406,32 @@ session_manager_main_enable (vlib_main_t * vm)
   if (num_threads < 1)
     return clib_error_return (0, "n_thread_stacks not set");
 
-  /* configure per-thread ** vectors */
-  vec_validate (smm->sessions, num_threads - 1);
-  vec_validate (smm->tx_buffers, num_threads - 1);
-  vec_validate (smm->pending_event_vector, num_threads - 1);
-  vec_validate (smm->pending_disconnects, num_threads - 1);
-  vec_validate (smm->free_event_vector, num_threads - 1);
-  vec_validate (smm->vpp_event_queues, num_threads - 1);
-  vec_validate (smm->peekers_rw_locks, num_threads - 1);
-  vec_validate_aligned (smm->ctx, num_threads - 1, CLIB_CACHE_LINE_BYTES);
+  /* Allocate cache line aligned worker contexts */
+  vec_validate_aligned (smm->wrk, num_threads - 1, CLIB_CACHE_LINE_BYTES);
 
   for (i = 0; i < TRANSPORT_N_PROTO; i++)
     {
-      vec_validate (smm->current_enqueue_epoch[i], num_threads - 1);
-      vec_validate (smm->session_to_enqueue[i], num_threads - 1);
       for (j = 0; j < num_threads; j++)
-       smm->current_enqueue_epoch[i][j] = 1;
+       smm->wrk[j].current_enqueue_epoch[i] = 1;
     }
 
   for (i = 0; i < num_threads; i++)
     {
-      vec_validate (smm->free_event_vector[i], 0);
-      _vec_len (smm->free_event_vector[i]) = 0;
-      vec_validate (smm->pending_event_vector[i], 0);
-      _vec_len (smm->pending_event_vector[i]) = 0;
-      vec_validate (smm->pending_disconnects[i], 0);
-      _vec_len (smm->pending_disconnects[i]) = 0;
+      wrk = &smm->wrk[i];
+      vec_validate (wrk->free_event_vector, 128);
+      _vec_len (wrk->free_event_vector) = 0;
+      vec_validate (wrk->pending_event_vector, 128);
+      _vec_len (wrk->pending_event_vector) = 0;
+      vec_validate (wrk->pending_disconnects, 128);
+      _vec_len (wrk->pending_disconnects) = 0;
+      vec_validate (wrk->postponed_event_vector, 128);
+      _vec_len (wrk->postponed_event_vector) = 0;
+
+      wrk->last_vlib_time = vlib_time_now (vlib_mains[i]);
+      wrk->dispatch_period = 500e-6;
+
       if (num_threads > 1)
-       clib_rwlock_init (&smm->peekers_rw_locks[i]);
+       clib_rwlock_init (&smm->wrk[i].peekers_rw_locks);
     }
 
 #if SESSION_DEBUG
@@ -1420,7 +1451,7 @@ session_manager_main_enable (vlib_main_t * vm)
     {
       if (num_threads == 1)
        {
-         pool_init_fixed (smm->sessions[0], smm->preallocated_sessions);
+         pool_init_fixed (smm->wrk[0].sessions, smm->preallocated_sessions);
        }
       else
        {
@@ -1431,7 +1462,7 @@ session_manager_main_enable (vlib_main_t * vm)
 
          for (j = 1; j < num_threads; j++)
            {
-             pool_init_fixed (smm->sessions[j],
+             pool_init_fixed (smm->wrk[j].sessions,
                               preallocated_sessions_per_worker);
            }
        }
@@ -1445,7 +1476,7 @@ session_manager_main_enable (vlib_main_t * vm)
 
   /* Enable transports */
   transport_enable_disable (vm, 1);
-
+  transport_init_tx_pacers_period ();
   return 0;
 }