session vcl: refactor builtin tx event for main tx
[vpp.git] / src / vnet / session / session_node.c
index ce460e9..c61e89b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at:
 #include <math.h>
 #include <vlib/vlib.h>
 #include <vnet/vnet.h>
-#include <vnet/tcp/tcp.h>
 #include <vppinfra/elog.h>
+#include <vnet/session/transport.h>
+#include <vnet/session/session.h>
 #include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/application_local.h>
 #include <vnet/session/session_debug.h>
-#include <vlibmemory/unix_shared_memory_queue.h>
+#include <svm/queue.h>
+#include <sys/timerfd.h>
 
-vlib_node_registration_t session_queue_node;
+static inline void
+session_wrk_send_evt_to_main (session_worker_t *wrk, session_evt_elt_t *elt)
+{
+  session_evt_elt_t *he;
+  uword thread_index;
+  u8 is_empty;
+
+  thread_index = wrk->vm->thread_index;
+  he = clib_llist_elt (wrk->event_elts, wrk->evts_pending_main);
+  is_empty = clib_llist_is_empty (wrk->event_elts, evt_list, he);
+  clib_llist_add_tail (wrk->event_elts, evt_list, elt, he);
+  if (is_empty)
+    session_send_rpc_evt_to_thread (0, session_wrk_handle_evts_main_rpc,
+                                   uword_to_pointer (thread_index, void *));
+}
 
-typedef struct
+#define app_check_thread_and_barrier(_wrk, _elt)                              \
+  if (!vlib_thread_is_main_w_barrier ())                                      \
+    {                                                                         \
+      session_wrk_send_evt_to_main (wrk, elt);                                \
+      return;                                                                 \
+    }
+
+static void
+session_wrk_timerfd_update (session_worker_t *wrk, u64 time_ns)
 {
-  u32 session_index;
-  u32 server_thread_index;
-} session_queue_trace_t;
+  struct itimerspec its;
 
-/* packet trace format function */
-static u8 *
-format_session_queue_trace (u8 * s, va_list * args)
+  its.it_value.tv_sec = 0;
+  its.it_value.tv_nsec = time_ns;
+  its.it_interval.tv_sec = 0;
+  its.it_interval.tv_nsec = its.it_value.tv_nsec;
+
+  if (timerfd_settime (wrk->timerfd, 0, &its, NULL) == -1)
+    clib_warning ("timerfd_settime");
+}
+
+always_inline u64
+session_wrk_tfd_timeout (session_wrk_state_t state, u32 thread_index)
 {
-  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
+  if (state == SESSION_WRK_INTERRUPT)
+    return thread_index ? 1e6 : vlib_num_workers () ? 5e8 : 1e6;
+  else if (state == SESSION_WRK_IDLE)
+    return thread_index ? 1e8 : vlib_num_workers () ? 5e8 : 1e8;
+  else
+    return 0;
+}
 
-  s = format (s, "SESSION_QUEUE: session index %d, server thread index %d",
-             t->session_index, t->server_thread_index);
-  return s;
+static inline void
+session_wrk_set_state (session_worker_t *wrk, session_wrk_state_t state)
+{
+  u64 time_ns;
+
+  wrk->state = state;
+  if (wrk->timerfd == -1)
+    return;
+  time_ns = session_wrk_tfd_timeout (state, wrk->vm->thread_index);
+  session_wrk_timerfd_update (wrk, time_ns);
 }
 
-vlib_node_registration_t session_queue_node;
+static transport_endpt_ext_cfg_t *
+session_mq_get_ext_config (application_t *app, uword offset)
+{
+  svm_fifo_chunk_t *c;
+  fifo_segment_t *fs;
 
-#define foreach_session_queue_error            \
-_(TX, "Packets transmitted")                   \
-_(TIMER, "Timer events")                       \
-_(NO_BUFFER, "Out of buffers")
+  fs = application_get_rx_mqs_segment (app);
+  c = fs_chunk_ptr (fs->h, offset);
+  return (transport_endpt_ext_cfg_t *) c->data;
+}
 
-typedef enum
+static void
+session_mq_free_ext_config (application_t *app, uword offset)
 {
-#define _(sym,str) SESSION_QUEUE_ERROR_##sym,
-  foreach_session_queue_error
-#undef _
-    SESSION_QUEUE_N_ERROR,
-} session_queue_error_t;
+  svm_fifo_chunk_t *c;
+  fifo_segment_t *fs;
 
-static char *session_queue_error_strings[] = {
-#define _(sym,string) string,
-  foreach_session_queue_error
-#undef _
-};
+  fs = application_get_rx_mqs_segment (app);
+  c = fs_chunk_ptr (fs->h, offset);
+  fifo_segment_collect_chunk (fs, 0 /* only one slice */, c);
+}
 
-static u32 session_type_to_next[] = {
-  SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT,
-  SESSION_QUEUE_NEXT_IP4_LOOKUP,
-  SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT,
-  SESSION_QUEUE_NEXT_IP6_LOOKUP,
-};
+static void
+session_mq_listen_handler (session_worker_t *wrk, session_evt_elt_t *elt)
+{
+  vnet_listen_args_t _a, *a = &_a;
+  session_listen_msg_t *mp;
+  app_worker_t *app_wrk;
+  application_t *app;
+  int rv;
 
-always_inline void
-session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm,
-                           u8 thread_index, svm_fifo_t * fifo,
-                           vlib_buffer_t * b0, u32 bi0, u8 n_bufs_per_seg,
-                           u32 left_from_seg, u32 * left_to_snd0,
-                           u16 * n_bufs, u32 * tx_offset, u16 deq_per_buf,
-                           u8 peek_data)
-{
-  vlib_buffer_t *chain_b0, *prev_b0;
-  u32 chain_bi0, to_deq;
-  u16 len_to_deq0, n_bytes_read;
-  u8 *data0, j;
-
-  b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
-  b0->total_length_not_including_first_buffer = 0;
-
-  chain_bi0 = bi0;
-  chain_b0 = b0;
-  to_deq = left_from_seg;
-  for (j = 1; j < n_bufs_per_seg; j++)
-    {
-      prev_b0 = chain_b0;
-      len_to_deq0 = clib_min (to_deq, deq_per_buf);
+  app_check_thread_and_barrier (wrk, elt);
 
-      *n_bufs -= 1;
-      chain_bi0 = smm->tx_buffers[thread_index][*n_bufs];
-      _vec_len (smm->tx_buffers[thread_index]) = *n_bufs;
+  mp = session_evt_ctrl_data (wrk, elt);
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
 
-      chain_b0 = vlib_get_buffer (vm, chain_bi0);
-      chain_b0->current_data = 0;
-      data0 = vlib_buffer_get_current (chain_b0);
-      if (peek_data)
-       {
-         n_bytes_read = svm_fifo_peek (fifo, *tx_offset, len_to_deq0, data0);
-         *tx_offset += n_bytes_read;
-       }
-      else
-       {
-         n_bytes_read = svm_fifo_dequeue_nowait (fifo, len_to_deq0, data0);
-       }
-      ASSERT (n_bytes_read == len_to_deq0);
-      chain_b0->current_length = n_bytes_read;
-      b0->total_length_not_including_first_buffer += chain_b0->current_length;
+  clib_memset (a, 0, sizeof (*a));
+  a->sep.is_ip4 = mp->is_ip4;
+  ip_copy (&a->sep.ip, &mp->ip, mp->is_ip4);
+  a->sep.port = mp->port;
+  a->sep.fib_index = mp->vrf;
+  a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
+  a->sep.transport_proto = mp->proto;
+  a->app_index = app->app_index;
+  a->wrk_map_index = mp->wrk_index;
+  a->sep_ext.transport_flags = mp->flags;
 
-      /* update previous buffer */
-      prev_b0->next_buffer = chain_bi0;
-      prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+  if (mp->ext_config)
+    a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config);
 
-      /* update current buffer */
-      chain_b0->next_buffer = 0;
+  if ((rv = vnet_listen (a)))
+    clib_warning ("listen returned: %U", format_session_error, rv);
 
-      to_deq -= n_bytes_read;
-      if (to_deq == 0)
-       break;
-    }
-  ASSERT (to_deq == 0
-         && b0->total_length_not_including_first_buffer == left_from_seg);
-  *left_to_snd0 -= left_from_seg;
+  app_wrk = application_get_worker (app, mp->wrk_index);
+  mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
+
+  if (mp->ext_config)
+    session_mq_free_ext_config (app, mp->ext_config);
 }
 
-always_inline int
-session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node,
-                               session_manager_main_t * smm,
-                               session_fifo_event_t * e0,
-                               stream_session_t * s0, u32 thread_index,
-                               int *n_tx_packets, u8 peek_data)
+static void
+session_mq_listen_uri_handler (session_worker_t *wrk, session_evt_elt_t *elt)
 {
-  u32 n_trace = vlib_get_trace_count (vm, node);
-  u32 left_to_snd0, max_len_to_snd0, len_to_deq0, snd_space0;
-  u32 n_bufs_per_evt, n_frames_per_evt, n_bufs_per_frame;
-  transport_connection_t *tc0;
-  transport_proto_vft_t *transport_vft;
-  u32 next_index, next0, *to_next, n_left_to_next, bi0;
-  vlib_buffer_t *b0;
-  u32 tx_offset = 0, max_dequeue0, n_bytes_per_seg, left_for_seg;
-  u16 snd_mss0, n_bufs_per_seg, n_bufs;
-  u8 *data0;
-  int i, n_bytes_read;
-  u32 n_bytes_per_buf, deq_per_buf, deq_per_first_buf;
-  u32 buffers_allocated, buffers_allocated_this_call;
+  vnet_listen_args_t _a, *a = &_a;
+  session_listen_uri_msg_t *mp;
+  app_worker_t *app_wrk;
+  application_t *app;
+  int rv;
 
-  next_index = next0 = session_type_to_next[s0->session_type];
+  app_check_thread_and_barrier (wrk, elt);
 
-  transport_vft = transport_protocol_get_vft (s0->session_type);
-  tc0 = transport_vft->get_connection (s0->connection_index, thread_index);
+  mp = session_evt_ctrl_data (wrk, elt);
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
 
-  /* Make sure we have space to send and there's something to dequeue */
-  snd_mss0 = transport_vft->send_mss (tc0);
-  snd_space0 = transport_vft->send_space (tc0);
+  clib_memset (a, 0, sizeof (*a));
+  a->uri = (char *) mp->uri;
+  a->app_index = app->app_index;
+  rv = vnet_bind_uri (a);
+
+  app_wrk = application_get_worker (app, 0);
+  mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
+}
 
-  /* Can't make any progress */
-  if (snd_space0 == 0 || snd_mss0 == 0)
+static void
+session_mq_connect_one (session_connect_msg_t *mp)
+{
+  vnet_connect_args_t _a, *a = &_a;
+  app_worker_t *app_wrk;
+  application_t *app;
+  int rv;
+
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
+
+  clib_memset (a, 0, sizeof (*a));
+  a->sep.is_ip4 = mp->is_ip4;
+  clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip));
+  a->sep.port = mp->port;
+  a->sep.transport_proto = mp->proto;
+  a->sep.peer.fib_index = mp->vrf;
+  a->sep.dscp = mp->dscp;
+  clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip));
+  if (mp->is_ip4)
+    {
+      ip46_address_mask_ip4 (&a->sep.ip);
+      ip46_address_mask_ip4 (&a->sep.peer.ip);
+    }
+  a->sep.peer.port = mp->lcl_port;
+  a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX;
+  a->sep_ext.parent_handle = mp->parent_handle;
+  a->sep_ext.transport_flags = mp->flags;
+  a->api_context = mp->context;
+  a->app_index = app->app_index;
+  a->wrk_map_index = mp->wrk_index;
+
+  if (mp->ext_config)
+    a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config);
+
+  if ((rv = vnet_connect (a)))
     {
-      vec_add1 (smm->pending_event_vector[thread_index], *e0);
-      return 0;
+      clib_warning ("connect returned: %U", format_session_error, rv);
+      app_wrk = application_get_worker (app, mp->wrk_index);
+      mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
     }
 
-  /* Allow enqueuing of a new event */
-  svm_fifo_unset_event (s0->server_tx_fifo);
+  if (mp->ext_config)
+    session_mq_free_ext_config (app, mp->ext_config);
+}
 
-  /* Check how much we can pull. */
-  max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo);
+static void
+session_mq_handle_connects_rpc (void *arg)
+{
+  u32 max_connects = 32, n_connects = 0;
+  session_evt_elt_t *he, *elt, *next;
+  session_worker_t *fwrk;
 
-  if (peek_data)
-    {
-      /* Offset in rx fifo from where to peek data */
-      tx_offset = transport_vft->tx_fifo_offset (tc0);
-      if (PREDICT_FALSE (tx_offset >= max_dequeue0))
-       max_dequeue0 = 0;
-      else
-       max_dequeue0 -= tx_offset;
-    }
+  ASSERT (session_vlib_thread_is_cl_thread ());
 
-  /* Nothing to read return */
-  if (max_dequeue0 == 0)
-    return 0;
+  /* Pending connects on linked list pertaining to first worker */
+  fwrk = session_main_get_worker (transport_cl_thread ());
+  if (!fwrk->n_pending_connects)
+    return;
 
-  /* Ensure we're not writing more than transport window allows */
-  if (max_dequeue0 < snd_space0)
+  he = clib_llist_elt (fwrk->event_elts, fwrk->pending_connects);
+  elt = clib_llist_next (fwrk->event_elts, evt_list, he);
+
+  /* Avoid holding the worker for too long */
+  while (n_connects < max_connects && elt != he)
     {
-      /* Constrained by tx queue. Try to send only fully formed segments */
-      max_len_to_snd0 = (max_dequeue0 > snd_mss0) ?
-       max_dequeue0 - max_dequeue0 % snd_mss0 : max_dequeue0;
-      /* TODO Nagle ? */
+      next = clib_llist_next (fwrk->event_elts, evt_list, elt);
+      clib_llist_remove (fwrk->event_elts, evt_list, elt);
+      session_mq_connect_one (session_evt_ctrl_data (fwrk, elt));
+      session_evt_ctrl_data_free (fwrk, elt);
+      clib_llist_put (fwrk->event_elts, elt);
+      elt = next;
+      n_connects += 1;
     }
-  else
+
+  /* Decrement with worker barrier */
+  fwrk->n_pending_connects -= n_connects;
+  if (fwrk->n_pending_connects > 0)
     {
-      /* Expectation is that snd_space0 is already a multiple of snd_mss */
-      max_len_to_snd0 = snd_space0;
+      session_send_rpc_evt_to_thread_force (fwrk->vm->thread_index,
+                                           session_mq_handle_connects_rpc, 0);
     }
+}
 
-  n_bytes_per_buf = vlib_buffer_free_list_buffer_size
-    (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
-  ASSERT (n_bytes_per_buf > MAX_HDRS_LEN);
-  n_bytes_per_seg = MAX_HDRS_LEN + snd_mss0;
-  n_bufs_per_seg = ceil ((double) n_bytes_per_seg / n_bytes_per_buf);
-  n_bufs_per_evt = ceil ((double) max_len_to_snd0 / n_bytes_per_seg);
-  n_frames_per_evt = ceil ((double) n_bufs_per_evt / VLIB_FRAME_SIZE);
-  n_bufs_per_frame = n_bufs_per_seg * VLIB_FRAME_SIZE;
-
-  deq_per_buf = clib_min (snd_mss0, n_bytes_per_buf);
-  deq_per_first_buf = clib_min (snd_mss0, n_bytes_per_buf - MAX_HDRS_LEN);
+static void
+session_mq_connect_handler (session_worker_t *wrk, session_evt_elt_t *elt)
+{
+  u32 thread_index = wrk - session_main.wrk;
+  session_evt_elt_t *he;
 
-  n_bufs = vec_len (smm->tx_buffers[thread_index]);
-  left_to_snd0 = max_len_to_snd0;
-  for (i = 0; i < n_frames_per_evt; i++)
+  if (PREDICT_FALSE (thread_index > transport_cl_thread ()))
     {
-      /* Make sure we have at least one full frame of buffers ready */
-      if (PREDICT_FALSE (n_bufs < n_bufs_per_frame))
-       {
-         vec_validate (smm->tx_buffers[thread_index],
-                       n_bufs + n_bufs_per_frame - 1);
-         buffers_allocated = 0;
-         do
-           {
-             buffers_allocated_this_call = vlib_buffer_alloc (vm,
-                                                              &smm->tx_buffers
-                                                              [thread_index]
-                                                              [n_bufs +
-                                                               buffers_allocated],
-                                                              n_bufs_per_frame
-                                                              -
-                                                              buffers_allocated);
-             buffers_allocated += buffers_allocated_this_call;
-           }
-         while (buffers_allocated_this_call > 0
-                && ((buffers_allocated + n_bufs < n_bufs_per_frame)));
-
-         n_bufs += buffers_allocated;
-         _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
+      clib_warning ("Connect on wrong thread. Dropping");
+      return;
+    }
 
-         if (PREDICT_FALSE (n_bufs < n_bufs_per_frame))
-           {
-             vec_add1 (smm->pending_event_vector[thread_index], *e0);
-             return -1;
-           }
-         ASSERT (n_bufs >= n_bufs_per_frame);
-       }
+  /* If on worker, check if main has any pending messages. Avoids reordering
+   * with other control messages that need to be handled by main
+   */
+  if (thread_index)
+    {
+      he = clib_llist_elt (wrk->event_elts, wrk->evts_pending_main);
 
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      while (left_to_snd0 && n_left_to_next)
+      /* Events pending on main, postpone to avoid reordering */
+      if (!clib_llist_is_empty (wrk->event_elts, evt_list, he))
        {
-         /*
-          * Handle first buffer in chain separately
-          */
-
-         /* Get free buffer */
-         ASSERT (n_bufs >= 1);
-         bi0 = smm->tx_buffers[thread_index][--n_bufs];
-         _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
-
-         /* usual speculation, or the enqueue_x1 macro will barf */
-         to_next[0] = bi0;
-         to_next += 1;
-         n_left_to_next -= 1;
-
-         b0 = vlib_get_buffer (vm, bi0);
-         b0->error = 0;
-         b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
-         b0->current_data = 0;
-         b0->total_length_not_including_first_buffer = 0;
-
-         len_to_deq0 = clib_min (left_to_snd0, deq_per_first_buf);
-         data0 = vlib_buffer_make_headroom (b0, MAX_HDRS_LEN);
-         if (peek_data)
-           {
-             n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, tx_offset,
-                                           len_to_deq0, data0);
-             if (n_bytes_read <= 0)
-               goto dequeue_fail;
-             /* Keep track of progress locally, transport is also supposed to
-              * increment it independently when pushing the header */
-             tx_offset += n_bytes_read;
-           }
-         else
-           {
-             n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo,
-                                                     len_to_deq0, data0);
-             if (n_bytes_read <= 0)
-               goto dequeue_fail;
-           }
-
-         b0->current_length = n_bytes_read;
-
-         left_to_snd0 -= n_bytes_read;
-         *n_tx_packets = *n_tx_packets + 1;
-
-         /*
-          * Fill in the remaining buffers in the chain, if any
-          */
-         if (PREDICT_FALSE (n_bufs_per_seg > 1 && left_to_snd0))
-           {
-             left_for_seg = clib_min (snd_mss0 - n_bytes_read, left_to_snd0);
-             session_tx_fifo_chain_tail (smm, vm, thread_index,
-                                         s0->server_tx_fifo, b0, bi0,
-                                         n_bufs_per_seg, left_for_seg,
-                                         &left_to_snd0, &n_bufs, &tx_offset,
-                                         deq_per_buf, peek_data);
-           }
+         clib_llist_add_tail (wrk->event_elts, evt_list, elt, he);
+         return;
+       }
+    }
 
-         /* Ask transport to push header after current_length and
-          * total_length_not_including_first_buffer are updated */
-         transport_vft->push_header (tc0, b0);
+  /* Add to pending list to be handled by first worker */
+  he = clib_llist_elt (wrk->event_elts, wrk->pending_connects);
+  clib_llist_add_tail (wrk->event_elts, evt_list, elt, he);
 
-         /* *INDENT-OFF* */
-         SESSION_EVT_DBG(SESSION_EVT_DEQ, s0, ({
-             ed->data[0] = e0->event_type;
-             ed->data[1] = max_dequeue0;
-             ed->data[2] = len_to_deq0;
-             ed->data[3] = left_to_snd0;
-         }));
-         /* *INDENT-ON* */
+  /* Decremented with worker barrier */
+  wrk->n_pending_connects += 1;
+  if (wrk->n_pending_connects == 1)
+    {
+      session_send_rpc_evt_to_thread_force (thread_index,
+                                           session_mq_handle_connects_rpc, 0);
+    }
+}
 
-         VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
-         tcp_trajectory_add_start (b0, 3);
+static void
+session_mq_connect_uri_handler (session_worker_t *wrk, session_evt_elt_t *elt)
+{
+  vnet_connect_args_t _a, *a = &_a;
+  session_connect_uri_msg_t *mp;
+  app_worker_t *app_wrk;
+  application_t *app;
+  int rv;
 
-         if (PREDICT_FALSE (n_trace > 0))
-           {
-             session_queue_trace_t *t0;
-             vlib_trace_buffer (vm, node, next_index, b0,
-                                1 /* follow_chain */ );
-             vlib_set_trace_count (vm, node, --n_trace);
-             t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
-             t0->session_index = s0->session_index;
-             t0->server_thread_index = s0->thread_index;
-           }
+  app_check_thread_and_barrier (wrk, elt);
 
-         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          bi0, next0);
-       }
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-    }
+  mp = session_evt_ctrl_data (wrk, elt);
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
 
-  /* If we couldn't dequeue all bytes mark as partially read */
-  if (max_len_to_snd0 < max_dequeue0)
+  clib_memset (a, 0, sizeof (*a));
+  a->uri = (char *) mp->uri;
+  a->api_context = mp->context;
+  a->app_index = app->app_index;
+  if ((rv = vnet_connect_uri (a)))
     {
-      /* If we don't already have new event */
-      if (svm_fifo_set_event (s0->server_tx_fifo))
-       {
-         vec_add1 (smm->pending_event_vector[thread_index], *e0);
-       }
+      clib_warning ("connect_uri returned: %d", rv);
+      app_wrk = application_get_worker (app, 0 /* default wrk only */ );
+      mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
     }
-  return 0;
+}
 
-dequeue_fail:
-  /*
-   * Can't read from fifo. If we don't already have an event, save as partially
-   * read, return buff to free list and return
-   */
-  clib_warning ("dequeue fail");
+static void
+session_mq_shutdown_handler (void *data)
+{
+  session_shutdown_msg_t *mp = (session_shutdown_msg_t *) data;
+  vnet_shutdown_args_t _a, *a = &_a;
+  application_t *app;
 
-  if (svm_fifo_set_event (s0->server_tx_fifo))
-    {
-      vec_add1 (smm->pending_event_vector[thread_index], *e0);
-    }
-  vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);
-  _vec_len (smm->tx_buffers[thread_index]) += 1;
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
 
-  return 0;
+  a->app_index = app->app_index;
+  a->handle = mp->handle;
+  vnet_shutdown_session (a);
 }
 
-int
-session_tx_fifo_peek_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node,
-                             session_manager_main_t * smm,
-                             session_fifo_event_t * e0,
-                             stream_session_t * s0, u32 thread_index,
-                             int *n_tx_pkts)
+static void
+session_mq_disconnect_handler (void *data)
 {
-  return session_tx_fifo_read_and_snd_i (vm, node, smm, e0, s0, thread_index,
-                                        n_tx_pkts, 1);
+  session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data;
+  vnet_disconnect_args_t _a, *a = &_a;
+  application_t *app;
+
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
+
+  a->app_index = app->app_index;
+  a->handle = mp->handle;
+  vnet_disconnect_session (a);
 }
 
-int
-session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node,
-                                session_manager_main_t * smm,
-                                session_fifo_event_t * e0,
-                                stream_session_t * s0, u32 thread_index,
-                                int *n_tx_pkts)
+static void
+app_mq_detach_handler (session_worker_t *wrk, session_evt_elt_t *elt)
 {
-  return session_tx_fifo_read_and_snd_i (vm, node, smm, e0, s0, thread_index,
-                                        n_tx_pkts, 0);
+  vnet_app_detach_args_t _a, *a = &_a;
+  session_app_detach_msg_t *mp;
+  application_t *app;
+
+  app_check_thread_and_barrier (wrk, elt);
+
+  mp = session_evt_ctrl_data (wrk, elt);
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
+
+  a->app_index = app->app_index;
+  a->api_client_index = mp->client_index;
+  vnet_application_detach (a);
 }
 
-always_inline stream_session_t *
-session_event_get_session (session_fifo_event_t * e, u8 thread_index)
+static void
+session_mq_unlisten_handler (session_worker_t *wrk, session_evt_elt_t *elt)
 {
-  return session_get_if_valid (e->fifo->master_session_index, thread_index);
+  vnet_unlisten_args_t _a, *a = &_a;
+  session_unlisten_msg_t *mp;
+  app_worker_t *app_wrk;
+  session_handle_t sh;
+  application_t *app;
+  int rv;
+
+  app_check_thread_and_barrier (wrk, elt);
+
+  mp = session_evt_ctrl_data (wrk, elt);
+  sh = mp->handle;
+
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
+
+  clib_memset (a, 0, sizeof (*a));
+  a->app_index = app->app_index;
+  a->handle = sh;
+  a->wrk_map_index = mp->wrk_index;
+
+  if ((rv = vnet_unlisten (a)))
+    clib_warning ("unlisten returned: %d", rv);
+
+  app_wrk = application_get_worker (app, a->wrk_map_index);
+  if (!app_wrk)
+    return;
+
+  mq_send_unlisten_reply (app_wrk, sh, mp->context, rv);
 }
 
-void
-dump_thread_0_event_queue (void)
+static void
+session_mq_accepted_reply_handler (session_worker_t *wrk,
+                                  session_evt_elt_t *elt)
 {
-  session_manager_main_t *smm = vnet_get_session_manager_main ();
-  vlib_main_t *vm = &vlib_global_main;
-  u32 my_thread_index = vm->thread_index;
-  session_fifo_event_t _e, *e = &_e;
-  stream_session_t *s0;
-  int i, index;
-  i8 *headp;
+  vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+  session_accepted_reply_msg_t *mp;
+  session_state_t old_state;
+  app_worker_t *app_wrk;
+  session_t *s;
+
+  mp = session_evt_ctrl_data (wrk, elt);
+
+  /* Mail this back from the main thread. We're not polling in main
+   * thread so we're using other workers for notifications. */
+  if (session_thread_from_handle (mp->handle) == 0 && vlib_num_workers () &&
+      vlib_get_thread_index () != 0)
+    {
+      session_wrk_send_evt_to_main (wrk, elt);
+      return;
+    }
 
-  unix_shared_memory_queue_t *q;
-  q = smm->vpp_event_queues[my_thread_index];
+  s = session_get_from_handle_if_valid (mp->handle);
+  if (!s)
+    return;
 
-  index = q->head;
+  app_wrk = app_worker_get (s->app_wrk_index);
+  if (app_wrk->app_index != mp->context)
+    {
+      clib_warning ("app doesn't own session");
+      return;
+    }
 
-  for (i = 0; i < q->cursize; i++)
+  /* Server isn't interested, disconnect the session */
+  if (mp->retval)
     {
-      headp = (i8 *) (&q->data[0] + q->elsize * index);
-      clib_memcpy (e, headp, q->elsize);
+      a->app_index = mp->context;
+      a->handle = mp->handle;
+      vnet_disconnect_session (a);
+      return;
+    }
 
-      switch (e->event_type)
-       {
-       case FIFO_EVENT_APP_TX:
-         s0 = session_event_get_session (e, my_thread_index);
-         fformat (stdout, "[%04d] TX session %d\n", i, s0->session_index);
-         break;
+  /* Special handling for cut-through sessions */
+  if (!session_has_transport (s))
+    {
+      session_set_state (s, SESSION_STATE_READY);
+      ct_session_connect_notify (s, SESSION_E_NONE);
+      return;
+    }
 
-       case FIFO_EVENT_DISCONNECT:
-         s0 = session_get_from_handle (e->session_handle);
-         fformat (stdout, "[%04d] disconnect session %d\n", i,
-                  s0->session_index);
-         break;
+  old_state = s->session_state;
+  session_set_state (s, SESSION_STATE_READY);
 
-       case FIFO_EVENT_BUILTIN_RX:
-         s0 = session_event_get_session (e, my_thread_index);
-         fformat (stdout, "[%04d] builtin_rx %d\n", i, s0->session_index);
-         break;
+  if (!svm_fifo_is_empty_prod (s->rx_fifo))
+    app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
 
-       case FIFO_EVENT_RPC:
-         fformat (stdout, "[%04d] RPC call %llx with %llx\n",
-                  i, (u64) (e->rpc_args.fp), (u64) (e->rpc_args.arg));
-         break;
+  /* Closed while waiting for app to reply. Resend disconnect */
+  if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
+    {
+      app_worker_close_notify (app_wrk, s);
+      session_set_state (s, old_state);
+      return;
+    }
+}
 
-       default:
-         fformat (stdout, "[%04d] unhandled event type %d\n",
-                  i, e->event_type);
-         break;
-       }
+static void
+session_mq_reset_reply_handler (void *data)
+{
+  vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+  session_reset_reply_msg_t *mp;
+  app_worker_t *app_wrk;
+  session_t *s;
+  application_t *app;
+  u32 index, thread_index;
+
+  mp = (session_reset_reply_msg_t *) data;
+  app = application_lookup (mp->context);
+  if (!app)
+    return;
 
-      index++;
+  session_parse_handle (mp->handle, &index, &thread_index);
+  s = session_get_if_valid (index, thread_index);
 
-      if (index == q->maxsize)
-       index = 0;
+  /* No session or not the right session */
+  if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING)
+    return;
+
+  app_wrk = app_worker_get (s->app_wrk_index);
+  if (!app_wrk || app_wrk->app_index != app->app_index)
+    {
+      clib_warning ("App %u does not own handle 0x%lx!", app->app_index,
+                   mp->handle);
+      return;
+    }
+
+  /* Client objected to resetting the session, log and continue */
+  if (mp->retval)
+    {
+      clib_warning ("client retval %d", mp->retval);
+      return;
     }
+
+  /* This comes as a response to a reset, transport only waiting for
+   * confirmation to remove connection state, no need to disconnect */
+  a->handle = mp->handle;
+  a->app_index = app->app_index;
+  vnet_disconnect_session (a);
 }
 
-static u8
-session_node_cmp_event (session_fifo_event_t * e, svm_fifo_t * f)
+static void
+session_mq_disconnected_handler (void *data)
 {
-  stream_session_t *s;
-  switch (e->event_type)
+  session_disconnected_reply_msg_t *rmp;
+  vnet_disconnect_args_t _a, *a = &_a;
+  svm_msg_q_msg_t _msg, *msg = &_msg;
+  session_disconnected_msg_t *mp;
+  app_worker_t *app_wrk;
+  session_event_t *evt;
+  session_t *s;
+  application_t *app;
+  int rv = 0;
+
+  mp = (session_disconnected_msg_t *) data;
+  if (!(s = session_get_from_handle_if_valid (mp->handle)))
     {
-    case FIFO_EVENT_APP_RX:
-    case FIFO_EVENT_APP_TX:
-    case FIFO_EVENT_BUILTIN_RX:
-      if (e->fifo == f)
-       return 1;
-      break;
-    case FIFO_EVENT_DISCONNECT:
-      break;
-    case FIFO_EVENT_RPC:
-      s = session_get_from_handle (e->session_handle);
-      if (!s)
-       {
-         clib_warning ("session has event but doesn't exist!");
-         break;
-       }
-      if (s->server_rx_fifo == f || s->server_tx_fifo == f)
-       return 1;
-      break;
-    default:
-      break;
+      clib_warning ("could not disconnect handle %llu", mp->handle);
+      return;
     }
-  return 0;
+  app_wrk = app_worker_get (s->app_wrk_index);
+  app = application_lookup (mp->client_index);
+  if (!(app_wrk && app && app->app_index == app_wrk->app_index))
+    {
+      clib_warning ("could not disconnect session: %llu app: %u",
+                   mp->handle, mp->client_index);
+      return;
+    }
+
+  a->handle = mp->handle;
+  a->app_index = app_wrk->wrk_index;
+  rv = vnet_disconnect_session (a);
+
+  svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
+                                      SESSION_MQ_CTRL_EVT_RING,
+                                      SVM_Q_WAIT, msg);
+  evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
+  clib_memset (evt, 0, sizeof (*evt));
+  evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY;
+  rmp = (session_disconnected_reply_msg_t *) evt->data;
+  rmp->handle = mp->handle;
+  rmp->context = mp->context;
+  rmp->retval = rv;
+  svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
 }
 
-u8
-session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e)
+static void
+session_mq_disconnected_reply_handler (void *data)
 {
-  session_manager_main_t *smm = vnet_get_session_manager_main ();
-  unix_shared_memory_queue_t *q;
-  session_fifo_event_t *pending_event_vector, *evt;
-  int i, index, found = 0;
-  i8 *headp;
-  u8 thread_index;
+  session_disconnected_reply_msg_t *mp;
+  vnet_disconnect_args_t _a, *a = &_a;
+  application_t *app;
 
-  ASSERT (e);
-  thread_index = f->master_thread_index;
-  /*
-   * Search evt queue
-   */
-  q = smm->vpp_event_queues[thread_index];
-  index = q->head;
-  for (i = 0; i < q->cursize; i++)
-    {
-      headp = (i8 *) (&q->data[0] + q->elsize * index);
-      clib_memcpy (e, headp, q->elsize);
-      found = session_node_cmp_event (e, f);
-      if (found)
-       break;
-      if (++index == q->maxsize)
-       index = 0;
+  mp = (session_disconnected_reply_msg_t *) data;
+
+  /* Client objected to disconnecting the session, log and continue */
+  if (mp->retval)
+    {
+      clib_warning ("client retval %d", mp->retval);
+      return;
+    }
+
+  /* Disconnect has been confirmed. Confirm close to transport */
+  app = application_lookup (mp->context);
+  if (app)
+    {
+      a->handle = mp->handle;
+      a->app_index = app->app_index;
+      vnet_disconnect_session (a);
     }
-  /*
-   * Search pending events vector
-   */
-  pending_event_vector = smm->pending_event_vector[thread_index];
-  vec_foreach (evt, pending_event_vector)
-  {
-    found = session_node_cmp_event (evt, f);
-    if (found)
-      {
-       clib_memcpy (e, evt, sizeof (*evt));
-       break;
-      }
-  }
-  return found;
 }
 
-static uword
-session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
-                      vlib_frame_t * frame)
+static void
+session_mq_worker_update_handler (void *data)
 {
-  session_manager_main_t *smm = vnet_get_session_manager_main ();
-  session_fifo_event_t *my_pending_event_vector, *pending_disconnects, *e;
-  session_fifo_event_t *my_fifo_events;
-  u32 n_to_dequeue, n_events;
-  unix_shared_memory_queue_t *q;
+  session_worker_update_msg_t *mp = (session_worker_update_msg_t *) data;
+  session_worker_update_reply_msg_t *rmp;
+  svm_msg_q_msg_t _msg, *msg = &_msg;
+  app_worker_t *app_wrk;
+  u32 owner_app_wrk_map;
+  session_event_t *evt;
+  session_t *s;
   application_t *app;
-  int n_tx_packets = 0;
-  u32 my_thread_index = vm->thread_index;
-  int i, rv;
-  f64 now = vlib_time_now (vm);
-  void (*fp) (void *);
 
-  SESSION_EVT_DBG (SESSION_EVT_POLL_GAP_TRACK, smm, my_thread_index);
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
+  if (!(s = session_get_from_handle_if_valid (mp->handle)))
+    {
+      clib_warning ("invalid handle %llu", mp->handle);
+      return;
+    }
+  app_wrk = app_worker_get (s->app_wrk_index);
+  if (app_wrk->app_index != app->app_index)
+    {
+      clib_warning ("app %u does not own session %llu", app->app_index,
+                   mp->handle);
+      return;
+    }
+  owner_app_wrk_map = app_wrk->wrk_map_index;
+  app_wrk = application_get_worker (app, mp->wrk_index);
+
+  /* This needs to come from the new owner */
+  if (mp->req_wrk_index == owner_app_wrk_map)
+    {
+      session_req_worker_update_msg_t *wump;
+
+      svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
+                                          SESSION_MQ_CTRL_EVT_RING,
+                                          SVM_Q_WAIT, msg);
+      evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
+      clib_memset (evt, 0, sizeof (*evt));
+      evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE;
+      wump = (session_req_worker_update_msg_t *) evt->data;
+      wump->session_handle = mp->handle;
+      svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
+      return;
+    }
+
+  app_worker_own_session (app_wrk, s);
 
   /*
-   *  Update TCP time
+   * Send reply
    */
-  tcp_update_time (now, my_thread_index);
+  svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
+                                      SESSION_MQ_CTRL_EVT_RING,
+                                      SVM_Q_WAIT, msg);
+  evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
+  clib_memset (evt, 0, sizeof (*evt));
+  evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY;
+  rmp = (session_worker_update_reply_msg_t *) evt->data;
+  rmp->handle = mp->handle;
+  if (s->rx_fifo)
+    rmp->rx_fifo = fifo_segment_fifo_offset (s->rx_fifo);
+  if (s->tx_fifo)
+    rmp->tx_fifo = fifo_segment_fifo_offset (s->tx_fifo);
+  rmp->segment_handle = session_segment_handle (s);
+  svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
 
   /*
-   * Get vpp queue events
+   * Retransmit messages that may have been lost
    */
-  q = smm->vpp_event_queues[my_thread_index];
-  if (PREDICT_FALSE (q == 0))
-    return 0;
+  if (s->tx_fifo && !svm_fifo_is_empty (s->tx_fifo))
+    session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
 
-  my_fifo_events = smm->free_event_vector[my_thread_index];
+  if (s->rx_fifo && !svm_fifo_is_empty (s->rx_fifo))
+    app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
 
-  /* min number of events we can dequeue without blocking */
-  n_to_dequeue = q->cursize;
-  my_pending_event_vector = smm->pending_event_vector[my_thread_index];
-  pending_disconnects = smm->pending_disconnects[my_thread_index];
+  if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
+    app_worker_close_notify (app_wrk, s);
+}
 
-  if (!n_to_dequeue && !vec_len (my_pending_event_vector)
-      && !vec_len (pending_disconnects))
-    return 0;
+static void
+session_mq_app_wrk_rpc_handler (void *data)
+{
+  session_app_wrk_rpc_msg_t *mp = (session_app_wrk_rpc_msg_t *) data;
+  svm_msg_q_msg_t _msg, *msg = &_msg;
+  session_app_wrk_rpc_msg_t *rmp;
+  app_worker_t *app_wrk;
+  session_event_t *evt;
+  application_t *app;
 
-  SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0);
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
+
+  app_wrk = application_get_worker (app, mp->wrk_index);
+
+  svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
+                                      SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT,
+                                      msg);
+  evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
+  clib_memset (evt, 0, sizeof (*evt));
+  evt->event_type = SESSION_CTRL_EVT_APP_WRK_RPC;
+  rmp = (session_app_wrk_rpc_msg_t *) evt->data;
+  clib_memcpy (rmp->data, mp->data, sizeof (mp->data));
+  svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
+}
 
-  /*
-   * If we didn't manage to process previous events try going
-   * over them again without dequeuing new ones.
-   */
-  /* XXX: Block senders to sessions that can't keep up */
-  if (0 && vec_len (my_pending_event_vector) >= 100)
-    {
-      clib_warning ("too many fifo events unsolved");
-      goto skip_dequeue;
-    }
+static void
+session_mq_transport_attr_handler (void *data)
+{
+  session_transport_attr_msg_t *mp = (session_transport_attr_msg_t *) data;
+  session_transport_attr_reply_msg_t *rmp;
+  svm_msg_q_msg_t _msg, *msg = &_msg;
+  app_worker_t *app_wrk;
+  session_event_t *evt;
+  application_t *app;
+  session_t *s;
+  int rv;
 
-  /* See you in the next life, don't be late */
-  if (pthread_mutex_trylock (&q->mutex))
-    return 0;
+  app = application_lookup (mp->client_index);
+  if (!app)
+    return;
 
-  for (i = 0; i < n_to_dequeue; i++)
+  if (!(s = session_get_from_handle_if_valid (mp->handle)))
     {
-      vec_add2 (my_fifo_events, e, 1);
-      unix_shared_memory_queue_sub_raw (q, (u8 *) e);
+      clib_warning ("invalid handle %llu", mp->handle);
+      return;
+    }
+  app_wrk = app_worker_get (s->app_wrk_index);
+  if (app_wrk->app_index != app->app_index)
+    {
+      clib_warning ("app %u does not own session %llu", app->app_index,
+                   mp->handle);
+      return;
     }
 
-  /* The other side of the connection is not polling */
-  if (q->cursize < (q->maxsize / 8))
-    (void) pthread_cond_broadcast (&q->condvar);
-  pthread_mutex_unlock (&q->mutex);
+  rv = session_transport_attribute (s, mp->is_get, &mp->attr);
+
+  svm_msg_q_lock_and_alloc_msg_w_ring (
+    app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg);
+  evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
+  clib_memset (evt, 0, sizeof (*evt));
+  evt->event_type = SESSION_CTRL_EVT_TRANSPORT_ATTR_REPLY;
+  rmp = (session_transport_attr_reply_msg_t *) evt->data;
+  rmp->handle = mp->handle;
+  rmp->retval = rv;
+  rmp->is_get = mp->is_get;
+  if (!rv && mp->is_get)
+    rmp->attr = mp->attr;
+  svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
+}
 
-  vec_append (my_fifo_events, my_pending_event_vector);
-  vec_append (my_fifo_events, smm->pending_disconnects[my_thread_index]);
+void
+session_wrk_handle_evts_main_rpc (void *args)
+{
+  vlib_main_t *vm = vlib_get_main ();
+  clib_llist_index_t ei, next_ei;
+  session_evt_elt_t *he, *elt;
+  session_worker_t *fwrk;
+  u32 thread_index;
 
-  _vec_len (my_pending_event_vector) = 0;
-  smm->pending_event_vector[my_thread_index] = my_pending_event_vector;
-  _vec_len (smm->pending_disconnects[my_thread_index]) = 0;
+  vlib_worker_thread_barrier_sync (vm);
 
-skip_dequeue:
-  n_events = vec_len (my_fifo_events);
-  for (i = 0; i < n_events; i++)
-    {
-      stream_session_t *s0;    /* $$$ prefetch 1 ahead maybe */
-      session_fifo_event_t *e0;
+  thread_index = pointer_to_uword (args);
+  fwrk = session_main_get_worker (thread_index);
 
-      e0 = &my_fifo_events[i];
+  he = clib_llist_elt (fwrk->event_elts, fwrk->evts_pending_main);
+  ei = clib_llist_next_index (he, evt_list);
 
-      switch (e0->event_type)
+  while (ei != fwrk->evts_pending_main)
+    {
+      elt = clib_llist_elt (fwrk->event_elts, ei);
+      next_ei = clib_llist_next_index (elt, evt_list);
+      clib_llist_remove (fwrk->event_elts, evt_list, elt);
+      switch (elt->evt.event_type)
        {
-       case FIFO_EVENT_APP_TX:
-         s0 = session_event_get_session (e0, my_thread_index);
-
-         if (PREDICT_FALSE (!s0))
-           {
-             clib_warning ("It's dead, Jim!");
-             continue;
-           }
-         /* Can retransmit for closed sessions but can't do anything if
-          * session is not ready or closed */
-         if (PREDICT_FALSE (s0->session_state < SESSION_STATE_READY))
-           continue;
-         /* Spray packets in per session type frames, since they go to
-          * different nodes */
-         rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0,
-                                                       my_thread_index,
-                                                       &n_tx_packets);
-         /* Out of buffers */
-         if (PREDICT_FALSE (rv < 0))
-           {
-             vlib_node_increment_counter (vm, node->node_index,
-                                          SESSION_QUEUE_ERROR_NO_BUFFER, 1);
-             continue;
-           }
+       case SESSION_CTRL_EVT_LISTEN:
+         session_mq_listen_handler (fwrk, elt);
          break;
-       case FIFO_EVENT_DISCONNECT:
-         /* Make sure disconnects run after the pending list is drained */
-         if (!e0->postponed)
-           {
-             e0->postponed = 1;
-             vec_add1 (smm->pending_disconnects[my_thread_index], *e0);
-             continue;
-           }
-         s0 = session_get_from_handle (e0->session_handle);
-         stream_session_disconnect (s0);
+       case SESSION_CTRL_EVT_UNLISTEN:
+         session_mq_unlisten_handler (fwrk, elt);
          break;
-       case FIFO_EVENT_BUILTIN_RX:
-         s0 = session_event_get_session (e0, my_thread_index);
-         if (PREDICT_FALSE (!s0))
-           continue;
-         svm_fifo_unset_event (s0->server_rx_fifo);
-         app = application_get (s0->app_index);
-         app->cb_fns.builtin_server_rx_callback (s0);
+       case SESSION_CTRL_EVT_APP_DETACH:
+         app_mq_detach_handler (fwrk, elt);
          break;
-       case FIFO_EVENT_RPC:
-         fp = e0->rpc_args.fp;
-         (*fp) (e0->rpc_args.arg);
+       case SESSION_CTRL_EVT_CONNECT_URI:
+         session_mq_connect_uri_handler (fwrk, elt);
+         break;
+       case SESSION_CTRL_EVT_ACCEPTED_REPLY:
+         session_mq_accepted_reply_handler (fwrk, elt);
+         break;
+       case SESSION_CTRL_EVT_CONNECT:
+         session_mq_connect_handler (fwrk, elt);
          break;
-
        default:
-         clib_warning ("unhandled event type %d", e0->event_type);
+         clib_warning ("unhandled %u", elt->evt.event_type);
+         ALWAYS_ASSERT (0);
+         break;
        }
-    }
-
-  _vec_len (my_fifo_events) = 0;
-  smm->free_event_vector[my_thread_index] = my_fifo_events;
 
-  vlib_node_increment_counter (vm, session_queue_node.index,
-                              SESSION_QUEUE_ERROR_TX, n_tx_packets);
-
-  SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 1);
+      /* Regrab element in case pool moved */
+      elt = clib_llist_elt (fwrk->event_elts, ei);
+      if (!clib_llist_elt_is_linked (elt, evt_list))
+       {
+         session_evt_ctrl_data_free (fwrk, elt);
+         clib_llist_put (fwrk->event_elts, elt);
+       }
+      ei = next_ei;
+    }
 
-  return n_tx_packets;
+  vlib_worker_thread_barrier_release (vm);
 }
 
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (session_queue_node) =
+vlib_node_registration_t session_queue_node;
+
+typedef struct
+{
+  u32 session_index;
+  u32 server_thread_index;
+} session_queue_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_session_queue_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
+
+  s = format (s, "session index %d thread index %d",
+             t->session_index, t->server_thread_index);
+  return s;
+}
+
+#define foreach_session_queue_error                                           \
+  _ (TX, tx, INFO, "Packets transmitted")                                     \
+  _ (TIMER, timer, INFO, "Timer events")                                      \
+  _ (NO_BUFFER, no_buffer, ERROR, "Out of buffers")
+
+typedef enum
+{
+#define _(f, n, s, d) SESSION_QUEUE_ERROR_##f,
+  foreach_session_queue_error
+#undef _
+    SESSION_QUEUE_N_ERROR,
+} session_queue_error_t;
+
+static vlib_error_desc_t session_error_counters[] = {
+#define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
+  foreach_session_queue_error
+#undef _
+};
+
+enum
+{
+  SESSION_TX_NO_BUFFERS = -2,
+  SESSION_TX_NO_DATA,
+  SESSION_TX_OK
+};
+
+static void
+session_tx_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
+                       u32 next_index, vlib_buffer_t **bufs, u16 n_segs,
+                       session_t *s, u32 n_trace)
+{
+  vlib_buffer_t **b = bufs;
+
+  while (n_trace && n_segs)
+    {
+      if (PREDICT_TRUE (vlib_trace_buffer (vm, node, next_index, b[0],
+                                          1 /* follow_chain */)))
+       {
+         session_queue_trace_t *t =
+           vlib_add_trace (vm, node, b[0], sizeof (*t));
+         t->session_index = s->session_index;
+         t->server_thread_index = s->thread_index;
+         n_trace--;
+       }
+      b++;
+      n_segs--;
+    }
+  vlib_set_trace_count (vm, node, n_trace);
+}
+
+always_inline int
+session_tx_fill_dma_transfers (session_worker_t *wrk,
+                              session_tx_context_t *ctx, vlib_buffer_t *b)
+{
+  vlib_main_t *vm = wrk->vm;
+  u32 len_to_deq;
+  u8 *data0 = NULL;
+  int n_bytes_read, len_write;
+  svm_fifo_seg_t data_fs[2];
+
+  u32 n_segs = 2;
+  u16 n_transfers = 0;
+  /*
+   * Start with the first buffer in chain
+   */
+  b->error = 0;
+  b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
+  b->current_data = 0;
+  data0 = vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
+  len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
+
+  n_bytes_read = svm_fifo_segments (ctx->s->tx_fifo, ctx->sp.tx_offset,
+                                   data_fs, &n_segs, len_to_deq);
+
+  len_write = n_bytes_read;
+  ASSERT (n_bytes_read == len_to_deq);
+
+  while (n_bytes_read)
+    {
+      wrk->batch_num++;
+      vlib_dma_batch_add (vm, wrk->batch, data0, data_fs[n_transfers].data,
+                         data_fs[n_transfers].len);
+      data0 += data_fs[n_transfers].len;
+      n_bytes_read -= data_fs[n_transfers].len;
+      n_transfers++;
+    }
+  return len_write;
+}
+
+always_inline int
+session_tx_fill_dma_transfers_tail (session_worker_t *wrk,
+                                   session_tx_context_t *ctx,
+                                   vlib_buffer_t *b, u32 len_to_deq, u8 *data)
+{
+  vlib_main_t *vm = wrk->vm;
+  int n_bytes_read, len_write;
+  svm_fifo_seg_t data_fs[2];
+  u32 n_segs = 2;
+  u16 n_transfers = 0;
+
+  n_bytes_read = svm_fifo_segments (ctx->s->tx_fifo, ctx->sp.tx_offset,
+                                   data_fs, &n_segs, len_to_deq);
+
+  len_write = n_bytes_read;
+
+  ASSERT (n_bytes_read == len_to_deq);
+
+  while (n_bytes_read)
+    {
+      wrk->batch_num++;
+      vlib_dma_batch_add (vm, wrk->batch, data, data_fs[n_transfers].data,
+                         data_fs[n_transfers].len);
+      data += data_fs[n_transfers].len;
+      n_bytes_read -= data_fs[n_transfers].len;
+      n_transfers++;
+    }
+
+  return len_write;
+}
+
+always_inline int
+session_tx_copy_data (session_worker_t *wrk, session_tx_context_t *ctx,
+                     vlib_buffer_t *b, u32 len_to_deq, u8 *data0)
+{
+  int n_bytes_read;
+  if (PREDICT_TRUE (!wrk->dma_enabled))
+    n_bytes_read =
+      svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset, len_to_deq, data0);
+  else
+    n_bytes_read = session_tx_fill_dma_transfers (wrk, ctx, b);
+  return n_bytes_read;
+}
+
+always_inline int
+session_tx_copy_data_tail (session_worker_t *wrk, session_tx_context_t *ctx,
+                          vlib_buffer_t *b, u32 len_to_deq, u8 *data)
+{
+  int n_bytes_read;
+  if (PREDICT_TRUE (!wrk->dma_enabled))
+    n_bytes_read =
+      svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset, len_to_deq, data);
+  else
+    n_bytes_read =
+      session_tx_fill_dma_transfers_tail (wrk, ctx, b, len_to_deq, data);
+  return n_bytes_read;
+}
+
+always_inline void
+session_tx_fifo_chain_tail (session_worker_t *wrk, session_tx_context_t *ctx,
+                           vlib_buffer_t *b, u16 *n_bufs, u8 peek_data)
+{
+  vlib_main_t *vm = wrk->vm;
+  vlib_buffer_t *chain_b, *prev_b;
+  u32 chain_bi0, to_deq, left_from_seg;
+  int len_to_deq, n_bytes_read;
+  u8 *data, j;
+
+  b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+  b->total_length_not_including_first_buffer = 0;
+
+  chain_b = b;
+  left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length,
+                           ctx->left_to_snd);
+  to_deq = left_from_seg;
+  for (j = 1; j < ctx->n_bufs_per_seg; j++)
+    {
+      prev_b = chain_b;
+      len_to_deq = clib_min (to_deq, ctx->deq_per_buf);
+
+      *n_bufs -= 1;
+      chain_bi0 = ctx->tx_buffers[*n_bufs];
+      chain_b = vlib_get_buffer (vm, chain_bi0);
+      chain_b->current_data = 0;
+      data = vlib_buffer_get_current (chain_b);
+      if (peek_data)
+       {
+         n_bytes_read =
+           session_tx_copy_data_tail (wrk, ctx, b, len_to_deq, data);
+         ctx->sp.tx_offset += n_bytes_read;
+       }
+      else
+       {
+         if (ctx->transport_vft->transport_options.tx_type ==
+             TRANSPORT_TX_DGRAM)
+           {
+             svm_fifo_t *f = ctx->s->tx_fifo;
+             session_dgram_hdr_t *hdr = &ctx->hdr;
+             u16 deq_now;
+             u32 offset;
+
+             deq_now = clib_min (hdr->data_length - hdr->data_offset,
+                                 len_to_deq);
+             offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
+             n_bytes_read = svm_fifo_peek (f, offset, deq_now, data);
+             ASSERT (n_bytes_read > 0);
+
+             hdr->data_offset += n_bytes_read;
+             if (hdr->data_offset == hdr->data_length)
+               {
+                 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
+                 svm_fifo_dequeue_drop (f, offset);
+                 if (ctx->left_to_snd > n_bytes_read)
+                   svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
+                                  (u8 *) & ctx->hdr);
+               }
+             else if (ctx->left_to_snd == n_bytes_read)
+               svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
+                                        sizeof (session_dgram_pre_hdr_t));
+           }
+         else
+           n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
+                                            len_to_deq, data);
+       }
+      ASSERT (n_bytes_read == len_to_deq);
+      chain_b->current_length = n_bytes_read;
+      b->total_length_not_including_first_buffer += chain_b->current_length;
+
+      /* update previous buffer */
+      prev_b->next_buffer = chain_bi0;
+      prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+      /* update current buffer */
+      chain_b->next_buffer = 0;
+
+      to_deq -= n_bytes_read;
+      if (to_deq == 0)
+       break;
+    }
+  ASSERT (to_deq == 0
+         && b->total_length_not_including_first_buffer == left_from_seg);
+  ctx->left_to_snd -= left_from_seg;
+}
+
+always_inline void
+session_tx_fill_buffer (session_worker_t *wrk, session_tx_context_t *ctx,
+                       vlib_buffer_t *b, u16 *n_bufs, u8 peek_data)
+{
+  u32 len_to_deq;
+  u8 *data0;
+  int n_bytes_read;
+  /*
+   * Start with the first buffer in chain
+   */
+  b->error = 0;
+  b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
+  b->current_data = 0;
+
+  data0 = vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
+  len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
+
+  if (peek_data)
+    {
+      n_bytes_read = session_tx_copy_data (wrk, ctx, b, len_to_deq, data0);
+      ASSERT (n_bytes_read > 0);
+      /* Keep track of progress locally, transport is also supposed to
+       * increment it independently when pushing the header */
+      ctx->sp.tx_offset += n_bytes_read;
+    }
+  else
+    {
+      if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
+       {
+         session_dgram_hdr_t *hdr = &ctx->hdr;
+         svm_fifo_t *f = ctx->s->tx_fifo;
+         u16 deq_now;
+         u32 offset;
+
+         ASSERT (hdr->data_length > hdr->data_offset);
+         deq_now = clib_min (hdr->data_length - hdr->data_offset,
+                             len_to_deq);
+         offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
+         n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0);
+         ASSERT (n_bytes_read > 0);
+
+         if (transport_connection_is_cless (ctx->tc))
+           {
+             ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4);
+             ip_copy (&ctx->tc->lcl_ip, &hdr->lcl_ip, ctx->tc->is_ip4);
+             /* Local port assumed to be bound, not overwriting it */
+             ctx->tc->rmt_port = hdr->rmt_port;
+           }
+         hdr->data_offset += n_bytes_read;
+         if (hdr->data_offset == hdr->data_length)
+           {
+             offset = hdr->data_length + SESSION_CONN_HDR_LEN;
+             svm_fifo_dequeue_drop (f, offset);
+             if (ctx->left_to_snd > n_bytes_read)
+               svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
+                              (u8 *) & ctx->hdr);
+           }
+         else if (ctx->left_to_snd == n_bytes_read)
+           svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
+                                    sizeof (session_dgram_pre_hdr_t));
+       }
+      else
+       {
+         n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
+                                          len_to_deq, data0);
+         ASSERT (n_bytes_read > 0);
+       }
+    }
+
+  b->current_length = n_bytes_read;
+  ctx->left_to_snd -= n_bytes_read;
+
+  /*
+   * Fill in the remaining buffers in the chain, if any
+   */
+  if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd))
+    session_tx_fifo_chain_tail (wrk, ctx, b, n_bufs, peek_data);
+}
+
+always_inline u8
+session_tx_not_ready (session_t * s, u8 peek_data)
+{
+  if (peek_data)
+    {
+      if (PREDICT_TRUE (s->session_state == SESSION_STATE_READY))
+       return 0;
+      /* Can retransmit for closed sessions but can't send new data if
+       * session is not ready or closed */
+      else if (s->session_state < SESSION_STATE_READY)
+       {
+         /* Allow accepting session to send custom packets.
+          * For instance, tcp want to send acks in established, but
+          * the app has not called accept() yet */
+         if (s->session_state == SESSION_STATE_ACCEPTING &&
+             (s->flags & SESSION_F_CUSTOM_TX))
+           return 0;
+         return 1;
+       }
+      else if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
+       {
+         /* Allow closed transports to still send custom packets.
+          * For instance, tcp may want to send acks in time-wait. */
+         if (s->session_state != SESSION_STATE_TRANSPORT_DELETED
+             && (s->flags & SESSION_F_CUSTOM_TX))
+           return 0;
+         return 2;
+       }
+    }
+  else
+    {
+      if (s->session_state == SESSION_STATE_TRANSPORT_DELETED)
+       return 2;
+    }
+  return 0;
+}
+
+always_inline transport_connection_t *
+session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data)
+{
+  if (peek_data)
+    {
+      return ctx->transport_vft->get_connection (ctx->s->connection_index,
+                                                ctx->s->thread_index);
+    }
+  else
+    {
+      if (ctx->s->session_state == SESSION_STATE_LISTENING)
+       return ctx->transport_vft->get_listener (ctx->s->connection_index);
+      else
+       {
+         return ctx->transport_vft->get_connection (ctx->s->connection_index,
+                                                    ctx->s->thread_index);
+       }
+    }
+}
+
+always_inline void
+session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx,
+                              u32 max_segs, u8 peek_data)
+{
+  u32 n_bytes_per_buf, n_bytes_per_seg;
+
+  n_bytes_per_buf = vlib_buffer_get_default_data_size (vm);
+  ctx->max_dequeue = svm_fifo_max_dequeue_cons (ctx->s->tx_fifo);
+
+  if (peek_data)
+    {
+      /* Offset in rx fifo from where to peek data */
+      if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue))
+       {
+         ctx->max_len_to_snd = 0;
+         return;
+       }
+      ctx->max_dequeue -= ctx->sp.tx_offset;
+    }
+  else
+    {
+      if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
+       {
+         u32 len, chain_limit;
+
+         if (ctx->max_dequeue <= sizeof (ctx->hdr))
+           {
+             ctx->max_len_to_snd = 0;
+             return;
+           }
+
+         svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
+                        (u8 *) & ctx->hdr);
+         /* Zero length dgrams not supported */
+         if (PREDICT_FALSE (ctx->hdr.data_length == 0))
+           {
+             svm_fifo_dequeue_drop (ctx->s->tx_fifo, sizeof (ctx->hdr));
+             ctx->max_len_to_snd = 0;
+             return;
+           }
+         ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset);
+         len = ctx->hdr.data_length - ctx->hdr.data_offset;
+
+         if (ctx->hdr.gso_size)
+           {
+             ctx->sp.snd_mss = clib_min (ctx->sp.snd_mss, ctx->hdr.gso_size);
+           }
+
+         /* Process multiple dgrams if smaller than min (buf_space, mss).
+          * This avoids handling multiple dgrams if they require buffer
+          * chains */
+         chain_limit = clib_min (n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN,
+                                 ctx->sp.snd_mss);
+         if (ctx->hdr.data_length <= chain_limit)
+           {
+             u32 first_dgram_len, dgram_len, offset, max_offset;
+             session_dgram_hdr_t hdr;
+
+             ctx->sp.snd_mss = clib_min (ctx->sp.snd_mss, len);
+             offset = ctx->hdr.data_length + sizeof (session_dgram_hdr_t);
+             first_dgram_len = len;
+             max_offset = clib_min (ctx->max_dequeue, 16 << 10);
+
+             while (offset < max_offset)
+               {
+                 svm_fifo_peek (ctx->s->tx_fifo, offset, sizeof (ctx->hdr),
+                                (u8 *) & hdr);
+                 dgram_len = hdr.data_length - hdr.data_offset;
+                 if (offset + sizeof (hdr) + hdr.data_length >
+                       ctx->max_dequeue ||
+                     first_dgram_len != dgram_len)
+                   break;
+                 /* Assert here to allow test above with zero length dgrams */
+                 ASSERT (hdr.data_length > hdr.data_offset);
+                 len += dgram_len;
+                 offset += sizeof (hdr) + hdr.data_length;
+               }
+           }
+
+         ctx->max_dequeue = len;
+       }
+    }
+  ASSERT (ctx->max_dequeue > 0);
+
+  /* Ensure we're not writing more than transport window allows */
+  if (ctx->max_dequeue < ctx->sp.snd_space)
+    {
+      /* Constrained by tx queue. Try to send only fully formed segments */
+      ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ?
+       (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) :
+       ctx->max_dequeue;
+      /* TODO Nagle ? */
+    }
+  else
+    {
+      /* Expectation is that snd_space0 is already a multiple of snd_mss */
+      ctx->max_len_to_snd = ctx->sp.snd_space;
+    }
+
+  /* Check if we're tx constrained by the node */
+  ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss);
+  if (ctx->n_segs_per_evt > max_segs)
+    {
+      ctx->n_segs_per_evt = max_segs;
+      ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss;
+    }
+
+  ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN);
+  if (ctx->n_segs_per_evt > 1)
+    {
+      u32 n_bytes_last_seg, n_bufs_last_seg;
+
+      n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss;
+      n_bytes_last_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd
+       - ((ctx->n_segs_per_evt - 1) * ctx->sp.snd_mss);
+      ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
+      n_bufs_last_seg = ceil ((f64) n_bytes_last_seg / n_bytes_per_buf);
+      ctx->n_bufs_needed = ((ctx->n_segs_per_evt - 1) * ctx->n_bufs_per_seg)
+       + n_bufs_last_seg;
+    }
+  else
+    {
+      n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd;
+      ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
+      ctx->n_bufs_needed = ctx->n_bufs_per_seg;
+    }
+
+  ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf);
+  ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss,
+                                    n_bytes_per_buf -
+                                    TRANSPORT_MAX_HDRS_LEN);
+}
+
+always_inline void
+session_tx_maybe_reschedule (session_worker_t * wrk,
+                            session_tx_context_t * ctx,
+                            session_evt_elt_t * elt)
+{
+  session_t *s = ctx->s;
+
+  svm_fifo_unset_event (s->tx_fifo);
+  if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset)
+    {
+      if (svm_fifo_set_event (s->tx_fifo))
+       session_evt_add_head_old (wrk, elt);
+    }
+  else
+    {
+      transport_connection_deschedule (ctx->tc);
+    }
+}
+
+always_inline void
+session_tx_add_pending_buffer (session_worker_t *wrk, u32 bi, u32 next_index)
+{
+  if (PREDICT_TRUE (!wrk->dma_enabled))
+    {
+      vec_add1 (wrk->pending_tx_buffers, bi);
+      vec_add1 (wrk->pending_tx_nexts, next_index);
+    }
+  else
+    {
+      session_dma_transfer *dma_transfer = &wrk->dma_trans[wrk->trans_tail];
+      vec_add1 (dma_transfer->pending_tx_buffers, bi);
+      vec_add1 (dma_transfer->pending_tx_nexts, next_index);
+    }
+}
+
+always_inline int
+session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
+                               vlib_node_runtime_t * node,
+                               session_evt_elt_t * elt,
+                               int *n_tx_packets, u8 peek_data)
+{
+  u32 n_trace, n_left, pbi, next_index, max_burst;
+  session_tx_context_t *ctx = &wrk->ctx;
+  session_main_t *smm = &session_main;
+  session_event_t *e = &elt->evt;
+  vlib_main_t *vm = wrk->vm;
+  transport_proto_t tp;
+  vlib_buffer_t *pb;
+  u16 n_bufs, rv;
+
+  if (PREDICT_FALSE ((rv = session_tx_not_ready (ctx->s, peek_data))))
+    {
+      if (rv < 2)
+       session_evt_add_old (wrk, elt);
+      return SESSION_TX_NO_DATA;
+    }
+
+  next_index = smm->session_type_to_next[ctx->s->session_type];
+  max_burst = SESSION_NODE_FRAME_SIZE - *n_tx_packets;
+
+  tp = session_get_transport_proto (ctx->s);
+  ctx->transport_vft = transport_protocol_get_vft (tp);
+  ctx->tc = session_tx_get_transport (ctx, peek_data);
+
+  if (PREDICT_FALSE (e->event_type == SESSION_IO_EVT_TX_FLUSH))
+    {
+      if (ctx->transport_vft->flush_data)
+       ctx->transport_vft->flush_data (ctx->tc);
+      e->event_type = SESSION_IO_EVT_TX;
+    }
+
+  if (ctx->s->flags & SESSION_F_CUSTOM_TX)
+    {
+      u32 n_custom_tx;
+      ctx->s->flags &= ~SESSION_F_CUSTOM_TX;
+      ctx->sp.max_burst_size = max_burst;
+      n_custom_tx = ctx->transport_vft->custom_tx (ctx->tc, &ctx->sp);
+      *n_tx_packets += n_custom_tx;
+      if (PREDICT_FALSE
+         (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
+       return SESSION_TX_OK;
+      max_burst -= n_custom_tx;
+      if (!max_burst || (ctx->s->flags & SESSION_F_CUSTOM_TX))
+       {
+         session_evt_add_old (wrk, elt);
+         return SESSION_TX_OK;
+       }
+    }
+
+  /* Connection previously descheduled because it had no data to send.
+   * Clear descheduled flag and reset pacer if in use */
+  if (transport_connection_is_descheduled (ctx->tc))
+    transport_connection_clear_descheduled (ctx->tc);
+
+  transport_connection_snd_params (ctx->tc, &ctx->sp);
+
+  if (!ctx->sp.snd_space)
+    {
+      /* If the deschedule flag was set, remove session from scheduler.
+       * Transport is responsible for rescheduling this session. */
+      if (ctx->sp.flags & TRANSPORT_SND_F_DESCHED)
+       transport_connection_deschedule (ctx->tc);
+      /* Request to postpone the session, e.g., zero-wnd and transport
+       * is not currently probing */
+      else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE)
+       session_evt_add_old (wrk, elt);
+      /* This flow queue is "empty" so it should be re-evaluated before
+       * the ones that have data to send. */
+      else
+       session_evt_add_head_old (wrk, elt);
+
+      return SESSION_TX_NO_DATA;
+    }
+
+  if (transport_connection_is_tx_paced (ctx->tc))
+    {
+      u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc);
+      if (snd_space < TRANSPORT_PACER_MIN_BURST)
+       {
+         session_evt_add_head_old (wrk, elt);
+         return SESSION_TX_NO_DATA;
+       }
+      snd_space = clib_min (ctx->sp.snd_space, snd_space);
+      ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ?
+       snd_space - snd_space % ctx->sp.snd_mss : snd_space;
+    }
+
+  /* Check how much we can pull. */
+  session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
+
+  if (PREDICT_FALSE (!ctx->max_len_to_snd))
+    {
+      transport_connection_tx_pacer_reset_bucket (ctx->tc, 0);
+      session_tx_maybe_reschedule (wrk, ctx, elt);
+      return SESSION_TX_NO_DATA;
+    }
+
+  vec_validate_aligned (ctx->tx_buffers, ctx->n_bufs_needed - 1,
+                       CLIB_CACHE_LINE_BYTES);
+  n_bufs = vlib_buffer_alloc (vm, ctx->tx_buffers, ctx->n_bufs_needed);
+  if (PREDICT_FALSE (n_bufs < ctx->n_bufs_needed))
+    {
+      if (n_bufs)
+       vlib_buffer_free (vm, ctx->tx_buffers, n_bufs);
+      session_evt_add_head_old (wrk, elt);
+      vlib_node_increment_counter (wrk->vm, node->node_index,
+                                  SESSION_QUEUE_ERROR_NO_BUFFER, 1);
+      return SESSION_TX_NO_BUFFERS;
+    }
+
+  if (transport_connection_is_tx_paced (ctx->tc))
+    transport_connection_tx_pacer_update_bytes (ctx->tc, ctx->max_len_to_snd);
+
+  ctx->left_to_snd = ctx->max_len_to_snd;
+  n_left = ctx->n_segs_per_evt;
+
+  vec_validate (ctx->transport_pending_bufs, n_left);
+
+  while (n_left >= 4)
+    {
+      vlib_buffer_t *b0, *b1;
+      u32 bi0, bi1;
+
+      pbi = ctx->tx_buffers[n_bufs - 3];
+      pb = vlib_get_buffer (vm, pbi);
+      vlib_prefetch_buffer_header (pb, STORE);
+      pbi = ctx->tx_buffers[n_bufs - 4];
+      pb = vlib_get_buffer (vm, pbi);
+      vlib_prefetch_buffer_header (pb, STORE);
+
+      bi0 = ctx->tx_buffers[--n_bufs];
+      bi1 = ctx->tx_buffers[--n_bufs];
+
+      b0 = vlib_get_buffer (vm, bi0);
+      b1 = vlib_get_buffer (vm, bi1);
+
+      session_tx_fill_buffer (wrk, ctx, b0, &n_bufs, peek_data);
+      session_tx_fill_buffer (wrk, ctx, b1, &n_bufs, peek_data);
+
+      ctx->transport_pending_bufs[ctx->n_segs_per_evt - n_left] = b0;
+      ctx->transport_pending_bufs[ctx->n_segs_per_evt - n_left + 1] = b1;
+      n_left -= 2;
+
+      session_tx_add_pending_buffer (wrk, bi0, next_index);
+      session_tx_add_pending_buffer (wrk, bi1, next_index);
+    }
+  while (n_left)
+    {
+      vlib_buffer_t *b0;
+      u32 bi0;
+
+      if (n_left > 1)
+       {
+         pbi = ctx->tx_buffers[n_bufs - 2];
+         pb = vlib_get_buffer (vm, pbi);
+         vlib_prefetch_buffer_header (pb, STORE);
+       }
+
+      bi0 = ctx->tx_buffers[--n_bufs];
+      b0 = vlib_get_buffer (vm, bi0);
+      session_tx_fill_buffer (wrk, ctx, b0, &n_bufs, peek_data);
+
+      ctx->transport_pending_bufs[ctx->n_segs_per_evt - n_left] = b0;
+      n_left -= 1;
+
+      session_tx_add_pending_buffer (wrk, bi0, next_index);
+    }
+
+  /* Ask transport to push headers */
+  ctx->transport_vft->push_header (ctx->tc, ctx->transport_pending_bufs,
+                                  ctx->n_segs_per_evt);
+
+  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)) > 0))
+    session_tx_trace_frame (vm, node, next_index, ctx->transport_pending_bufs,
+                           ctx->n_segs_per_evt, ctx->s, n_trace);
+
+  if (PREDICT_FALSE (n_bufs))
+    vlib_buffer_free (vm, ctx->tx_buffers, n_bufs);
+
+  *n_tx_packets += ctx->n_segs_per_evt;
+
+  SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue,
+              ctx->s->tx_fifo->shr->has_event, wrk->last_vlib_time);
+
+  ASSERT (ctx->left_to_snd == 0);
+
+  /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise,
+   * check if application enqueued more data and reschedule accordingly */
+  if (ctx->max_len_to_snd < ctx->max_dequeue)
+    session_evt_add_old (wrk, elt);
+  else
+    session_tx_maybe_reschedule (wrk, ctx, elt);
+
+  if (!peek_data)
+    {
+      u32 n_dequeued = ctx->max_len_to_snd;
+      if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
+       n_dequeued += ctx->n_segs_per_evt * SESSION_CONN_HDR_LEN;
+      if (svm_fifo_needs_deq_ntf (ctx->s->tx_fifo, n_dequeued))
+       session_dequeue_notify (ctx->s);
+    }
+  return SESSION_TX_OK;
+}
+
+int
+session_tx_fifo_peek_and_snd (session_worker_t * wrk,
+                             vlib_node_runtime_t * node,
+                             session_evt_elt_t * e, int *n_tx_packets)
+{
+  return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 1);
+}
+
+int
+session_tx_fifo_dequeue_and_snd (session_worker_t * wrk,
+                                vlib_node_runtime_t * node,
+                                session_evt_elt_t * e, int *n_tx_packets)
+{
+  return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 0);
+}
+
+int
+session_tx_fifo_dequeue_internal (session_worker_t * wrk,
+                                 vlib_node_runtime_t * node,
+                                 session_evt_elt_t * elt, int *n_tx_packets)
+{
+  transport_send_params_t *sp = &wrk->ctx.sp;
+  session_t *s = wrk->ctx.s;
+  u32 n_packets;
+
+  if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
+    return 0;
+
+  /* Clear custom-tx flag used to request reschedule for tx */
+  s->flags &= ~SESSION_F_CUSTOM_TX;
+
+  sp->flags = 0;
+  sp->bytes_dequeued = 0;
+  sp->max_burst_size = clib_min (SESSION_NODE_FRAME_SIZE - *n_tx_packets,
+                                TRANSPORT_PACER_MAX_BURST_PKTS);
+
+  n_packets = transport_custom_tx (session_get_transport_proto (s), s, sp);
+  *n_tx_packets += n_packets;
+
+  if (s->flags & SESSION_F_CUSTOM_TX)
+    {
+      session_evt_add_old (wrk, elt);
+    }
+  else if (!(sp->flags & TRANSPORT_SND_F_DESCHED))
+    {
+      svm_fifo_unset_event (s->tx_fifo);
+      if (svm_fifo_max_dequeue_cons (s->tx_fifo))
+       if (svm_fifo_set_event (s->tx_fifo))
+         session_evt_add_head_old (wrk, elt);
+    }
+
+  if (sp->bytes_dequeued &&
+      svm_fifo_needs_deq_ntf (s->tx_fifo, sp->bytes_dequeued))
+    session_dequeue_notify (s);
+
+  return n_packets;
+}
+
+always_inline session_t *
+session_event_get_session (session_worker_t * wrk, session_event_t * e)
+{
+  if (PREDICT_FALSE (pool_is_free_index (wrk->sessions, e->session_index)))
+    return 0;
+
+  ASSERT (session_is_valid (e->session_index, wrk->vm->thread_index));
+  return pool_elt_at_index (wrk->sessions, e->session_index);
+}
+
+always_inline void
+session_event_dispatch_ctrl (session_worker_t * wrk, session_evt_elt_t * elt)
+{
+  clib_llist_index_t ei;
+  void (*fp) (void *);
+  session_event_t *e;
+  session_t *s;
+
+  ei = clib_llist_entry_index (wrk->event_elts, elt);
+  e = &elt->evt;
+
+  switch (e->event_type)
+    {
+    case SESSION_CTRL_EVT_RPC:
+      fp = e->rpc_args.fp;
+      (*fp) (e->rpc_args.arg);
+      break;
+    case SESSION_CTRL_EVT_HALF_CLOSE:
+      s = session_get_from_handle_if_valid (e->session_handle);
+      if (PREDICT_FALSE (!s))
+       break;
+      session_transport_half_close (s);
+      break;
+    case SESSION_CTRL_EVT_CLOSE:
+      s = session_get_from_handle_if_valid (e->session_handle);
+      if (PREDICT_FALSE (!s))
+       break;
+      session_transport_close (s);
+      break;
+    case SESSION_CTRL_EVT_RESET:
+      s = session_get_from_handle_if_valid (e->session_handle);
+      if (PREDICT_FALSE (!s))
+       break;
+      session_transport_reset (s);
+      break;
+    case SESSION_CTRL_EVT_LISTEN:
+      session_mq_listen_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_LISTEN_URI:
+      session_mq_listen_uri_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_UNLISTEN:
+      session_mq_unlisten_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_CONNECT:
+      session_mq_connect_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_CONNECT_URI:
+      session_mq_connect_uri_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_SHUTDOWN:
+      session_mq_shutdown_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    case SESSION_CTRL_EVT_DISCONNECT:
+      session_mq_disconnect_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    case SESSION_CTRL_EVT_DISCONNECTED:
+      session_mq_disconnected_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    case SESSION_CTRL_EVT_ACCEPTED_REPLY:
+      session_mq_accepted_reply_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_DISCONNECTED_REPLY:
+      session_mq_disconnected_reply_handler (session_evt_ctrl_data (wrk,
+                                                                   elt));
+      break;
+    case SESSION_CTRL_EVT_RESET_REPLY:
+      session_mq_reset_reply_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    case SESSION_CTRL_EVT_WORKER_UPDATE:
+      session_mq_worker_update_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    case SESSION_CTRL_EVT_APP_DETACH:
+      app_mq_detach_handler (wrk, elt);
+      break;
+    case SESSION_CTRL_EVT_APP_WRK_RPC:
+      session_mq_app_wrk_rpc_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    case SESSION_CTRL_EVT_TRANSPORT_ATTR:
+      session_mq_transport_attr_handler (session_evt_ctrl_data (wrk, elt));
+      break;
+    default:
+      clib_warning ("unhandled event type %d", e->event_type);
+    }
+
+  /* Regrab elements in case pool moved */
+  elt = clib_llist_elt (wrk->event_elts, ei);
+  if (!clib_llist_elt_is_linked (elt, evt_list))
+    {
+      e = &elt->evt;
+      if (e->event_type >= SESSION_CTRL_EVT_BOUND)
+       session_evt_ctrl_data_free (wrk, elt);
+      clib_llist_put (wrk->event_elts, elt);
+    }
+  SESSION_EVT (SESSION_EVT_COUNTS, CNT_CTRL_EVTS, 1, wrk);
+}
+
+always_inline void
+session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node,
+                          session_evt_elt_t * elt, int *n_tx_packets)
+{
+  session_main_t *smm = &session_main;
+  app_worker_t *app_wrk;
+  clib_llist_index_t ei;
+  session_event_t *e;
+  session_t *s;
+
+  ei = clib_llist_entry_index (wrk->event_elts, elt);
+  e = &elt->evt;
+
+  switch (e->event_type)
+    {
+    case SESSION_IO_EVT_TX_FLUSH:
+    case SESSION_IO_EVT_TX:
+      s = session_event_get_session (wrk, e);
+      if (PREDICT_FALSE (!s))
+       break;
+      CLIB_PREFETCH (s->tx_fifo, sizeof (*(s->tx_fifo)), LOAD);
+      wrk->ctx.s = s;
+      /* Spray packets in per session type frames, since they go to
+       * different nodes */
+      (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets);
+      break;
+    case SESSION_IO_EVT_RX:
+      s = session_event_get_session (wrk, e);
+      if (!s)
+       break;
+      transport_app_rx_evt (session_get_transport_proto (s),
+                           s->connection_index, s->thread_index);
+      break;
+    case SESSION_IO_EVT_BUILTIN_RX:
+      s = session_event_get_session (wrk, e);
+      if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING))
+       break;
+      svm_fifo_unset_event (s->rx_fifo);
+      app_wrk = app_worker_get (s->app_wrk_index);
+      app_worker_builtin_rx (app_wrk, s);
+      break;
+    case SESSION_IO_EVT_TX_MAIN:
+      s = session_get_if_valid (e->session_index, 0 /* main thread */);
+      if (PREDICT_FALSE (!s))
+       break;
+      wrk->ctx.s = s;
+      if (PREDICT_TRUE (s != 0))
+       (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets);
+      break;
+    default:
+      clib_warning ("unhandled event type %d", e->event_type);
+    }
+
+  SESSION_EVT (SESSION_EVT_IO_EVT_COUNTS, e->event_type, 1, wrk);
+
+  /* Regrab elements in case pool moved */
+  elt = clib_llist_elt (wrk->event_elts, ei);
+  if (!clib_llist_elt_is_linked (elt, evt_list))
+    clib_llist_put (wrk->event_elts, elt);
+}
+
+/* *INDENT-OFF* */
+static const u32 session_evt_msg_sizes[] = {
+#define _(symc, sym)                                                   \
+  [SESSION_CTRL_EVT_ ## symc] = sizeof (session_ ## sym ##_msg_t),
+  foreach_session_ctrl_evt
+#undef _
+};
+/* *INDENT-ON* */
+
+always_inline void
+session_update_time_subscribers (session_main_t *smm, clib_time_type_t now,
+                                u32 thread_index)
+{
+  session_update_time_fn *fn;
+
+  vec_foreach (fn, smm->update_time_fns)
+    (*fn) (now, thread_index);
+}
+
+always_inline void
+session_evt_add_to_list (session_worker_t * wrk, session_event_t * evt)
+{
+  session_evt_elt_t *elt;
+
+  if (evt->event_type >= SESSION_CTRL_EVT_RPC)
+    {
+      elt = session_evt_alloc_ctrl (wrk);
+      if (evt->event_type >= SESSION_CTRL_EVT_BOUND)
+       {
+         elt->evt.ctrl_data_index = session_evt_ctrl_data_alloc (wrk);
+         elt->evt.event_type = evt->event_type;
+         clib_memcpy_fast (session_evt_ctrl_data (wrk, elt), evt->data,
+                           session_evt_msg_sizes[evt->event_type]);
+       }
+      else
+       {
+         /* Internal control events fit into io events footprint */
+         clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
+       }
+    }
+  else
+    {
+      elt = session_evt_alloc_new (wrk);
+      clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
+    }
+}
+
+static void
+session_flush_pending_tx_buffers (session_worker_t * wrk,
+                                 vlib_node_runtime_t * node)
+{
+  vlib_buffer_enqueue_to_next_vec (wrk->vm, node, &wrk->pending_tx_buffers,
+                                  &wrk->pending_tx_nexts,
+                                  vec_len (wrk->pending_tx_nexts));
+  vec_reset_length (wrk->pending_tx_buffers);
+  vec_reset_length (wrk->pending_tx_nexts);
+}
+
+int
+session_wrk_handle_mq (session_worker_t *wrk, svm_msg_q_t *mq)
+{
+  svm_msg_q_msg_t _msg, *msg = &_msg;
+  u32 i, n_to_dequeue = 0;
+  session_event_t *evt;
+
+  n_to_dequeue = svm_msg_q_size (mq);
+  for (i = 0; i < n_to_dequeue; i++)
+    {
+      svm_msg_q_sub_raw (mq, msg);
+      evt = svm_msg_q_msg_data (mq, msg);
+      session_evt_add_to_list (wrk, evt);
+      svm_msg_q_free_msg (mq, msg);
+    }
+
+  return n_to_dequeue;
+}
+
+static void
+session_wrk_update_state (session_worker_t *wrk)
+{
+  vlib_main_t *vm = wrk->vm;
+
+  if (wrk->state == SESSION_WRK_POLLING)
+    {
+      if (clib_llist_elts (wrk->event_elts) == 4 &&
+         vlib_last_vectors_per_main_loop (vm) < 1)
+       {
+         session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
+         vlib_node_set_state (vm, session_queue_node.index,
+                              VLIB_NODE_STATE_INTERRUPT);
+       }
+    }
+  else if (wrk->state == SESSION_WRK_INTERRUPT)
+    {
+      if (clib_llist_elts (wrk->event_elts) > 4 ||
+         vlib_last_vectors_per_main_loop (vm) > 1)
+       {
+         session_wrk_set_state (wrk, SESSION_WRK_POLLING);
+         vlib_node_set_state (vm, session_queue_node.index,
+                              VLIB_NODE_STATE_POLLING);
+       }
+      else if (PREDICT_FALSE (!pool_elts (wrk->sessions)))
+       {
+         session_wrk_set_state (wrk, SESSION_WRK_IDLE);
+       }
+    }
+  else
+    {
+      if (clib_llist_elts (wrk->event_elts))
+       {
+         session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
+       }
+    }
+}
+
+static uword
+session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+                      vlib_frame_t * frame)
 {
+  u32 thread_index = vm->thread_index, __clib_unused n_evts;
+  session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
+  session_main_t *smm = vnet_get_session_main ();
+  session_worker_t *wrk = &smm->wrk[thread_index];
+  clib_llist_index_t ei, next_ei, old_ti;
+  int n_tx_packets;
+
+  SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk);
+
+  session_wrk_update_time (wrk, vlib_time_now (vm));
+
+  /*
+   *  Update transport time
+   */
+  session_update_time_subscribers (smm, wrk->last_vlib_time, thread_index);
+  n_tx_packets = vec_len (wrk->pending_tx_buffers);
+  SESSION_EVT (SESSION_EVT_DSP_CNTRS, UPDATE_TIME, wrk);
+
+  if (PREDICT_FALSE (wrk->dma_enabled))
+    {
+      if (wrk->trans_head == ((wrk->trans_tail + 1) & (wrk->trans_size - 1)))
+       return 0;
+      wrk->batch = vlib_dma_batch_new (vm, wrk->config_index);
+      if (!wrk->batch)
+       return 0;
+    }
+
+  /*
+   *  Dequeue new internal mq events
+   */
+
+  n_evts = session_wrk_handle_mq (wrk, wrk->vpp_event_queue);
+  SESSION_EVT (SESSION_EVT_DSP_CNTRS, MQ_DEQ, wrk, n_evts);
+
+  /*
+   * Handle control events
+   */
+
+  ei = wrk->ctrl_head;
+  ctrl_he = clib_llist_elt (wrk->event_elts, ei);
+  next_ei = clib_llist_next_index (ctrl_he, evt_list);
+  old_ti = clib_llist_prev_index (ctrl_he, evt_list);
+  while (ei != old_ti)
+    {
+      ei = next_ei;
+      elt = clib_llist_elt (wrk->event_elts, next_ei);
+      next_ei = clib_llist_next_index (elt, evt_list);
+      clib_llist_remove (wrk->event_elts, evt_list, elt);
+      session_event_dispatch_ctrl (wrk, elt);
+    }
+
+  SESSION_EVT (SESSION_EVT_DSP_CNTRS, CTRL_EVTS, wrk);
+
+  /*
+   * Handle the new io events.
+   */
+
+  new_he = clib_llist_elt (wrk->event_elts, wrk->new_head);
+  old_he = clib_llist_elt (wrk->event_elts, wrk->old_head);
+  old_ti = clib_llist_prev_index (old_he, evt_list);
+
+  ei = clib_llist_next_index (new_he, evt_list);
+  while (ei != wrk->new_head && n_tx_packets < SESSION_NODE_FRAME_SIZE)
+    {
+      elt = clib_llist_elt (wrk->event_elts, ei);
+      ei = clib_llist_next_index (elt, evt_list);
+      clib_llist_remove (wrk->event_elts, evt_list, elt);
+      session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
+    }
+
+  SESSION_EVT (SESSION_EVT_DSP_CNTRS, NEW_IO_EVTS, wrk);
+
+  /*
+   * Handle the old io events, if we had any prior to processing the new ones
+   */
+
+  if (old_ti != wrk->old_head)
+    {
+      old_he = clib_llist_elt (wrk->event_elts, wrk->old_head);
+      ei = clib_llist_next_index (old_he, evt_list);
+
+      while (n_tx_packets < SESSION_NODE_FRAME_SIZE)
+       {
+         elt = clib_llist_elt (wrk->event_elts, ei);
+         next_ei = clib_llist_next_index (elt, evt_list);
+         clib_llist_remove (wrk->event_elts, evt_list, elt);
+
+         session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
+
+         if (ei == old_ti)
+           break;
+
+         ei = next_ei;
+       };
+    }
+
+  if (PREDICT_FALSE (wrk->dma_enabled))
+    {
+      if (wrk->batch_num)
+       {
+         vlib_dma_batch_set_cookie (vm, wrk->batch, wrk->trans_tail);
+         wrk->batch_num = 0;
+         wrk->trans_tail++;
+         if (wrk->trans_tail == wrk->trans_size)
+           wrk->trans_tail = 0;
+       }
+
+      vlib_dma_batch_submit (vm, wrk->batch);
+    }
+
+  SESSION_EVT (SESSION_EVT_DSP_CNTRS, OLD_IO_EVTS, wrk);
+
+  if (vec_len (wrk->pending_tx_buffers))
+    session_flush_pending_tx_buffers (wrk, node);
+
+  vlib_node_increment_counter (vm, session_queue_node.index,
+                              SESSION_QUEUE_ERROR_TX, n_tx_packets);
+
+  SESSION_EVT (SESSION_EVT_DISPATCH_END, wrk, n_tx_packets);
+
+  if (wrk->flags & SESSION_WRK_F_ADAPTIVE)
+    session_wrk_update_state (wrk);
+
+  return n_tx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (session_queue_node) = {
   .function = session_queue_node_fn,
+  .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
   .name = "session-queue",
   .format_trace = format_session_queue_trace,
   .type = VLIB_NODE_TYPE_INPUT,
-  .n_errors = ARRAY_LEN (session_queue_error_strings),
-  .error_strings = session_queue_error_strings,
-  .n_next_nodes = SESSION_QUEUE_N_NEXT,
+  .n_errors = SESSION_QUEUE_N_ERROR,
+  .error_counters = session_error_counters,
+  .state = VLIB_NODE_STATE_DISABLED,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+session_wrk_tfd_read_ready (clib_file_t *cf)
+{
+  session_worker_t *wrk = session_main_get_worker (cf->private_data);
+  u64 buf;
+  int rv;
+
+  vlib_node_set_interrupt_pending (wrk->vm, session_queue_node.index);
+  rv = read (wrk->timerfd, &buf, sizeof (buf));
+  if (rv < 0 && errno != EAGAIN)
+    clib_unix_warning ("failed");
+  return 0;
+}
+
+static clib_error_t *
+session_wrk_tfd_write_ready (clib_file_t *cf)
+{
+  return 0;
+}
+
+void
+session_wrk_enable_adaptive_mode (session_worker_t *wrk)
+{
+  u32 thread_index = wrk->vm->thread_index;
+  clib_file_t template = { 0 };
+
+  if ((wrk->timerfd = timerfd_create (CLOCK_MONOTONIC, TFD_NONBLOCK)) < 0)
+    clib_warning ("timerfd_create");
+
+  template.read_function = session_wrk_tfd_read_ready;
+  template.write_function = session_wrk_tfd_write_ready;
+  template.file_descriptor = wrk->timerfd;
+  template.private_data = thread_index;
+  template.polling_thread_index = thread_index;
+  template.description = format (0, "session-wrk-tfd-%u", thread_index);
+
+  wrk->timerfd_file = clib_file_add (&file_main, &template);
+  wrk->flags |= SESSION_WRK_F_ADAPTIVE;
+}
+
+static clib_error_t *
+session_queue_exit (vlib_main_t * vm)
+{
+  if (vlib_get_n_threads () < 2)
+    return 0;
+
+  /*
+   * Shut off (especially) worker-thread session nodes.
+   * Otherwise, vpp can crash as the main thread unmaps the
+   * API segment.
+   */
+  vlib_worker_thread_barrier_sync (vm);
+  session_node_enable_disable (0 /* is_enable */ );
+  vlib_worker_thread_barrier_release (vm);
+  return 0;
+}
+
+VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit);
+
+static uword
+session_queue_run_on_main (vlib_main_t * vm)
+{
+  vlib_node_runtime_t *node;
+
+  node = vlib_node_get_runtime (vm, session_queue_node.index);
+  return session_queue_node_fn (vm, node, 0);
+}
+
+static uword
+session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
+                      vlib_frame_t * f)
+{
+  uword *event_data = 0;
+  f64 timeout = 1.0;
+  uword event_type;
+
+  while (1)
+    {
+      vlib_process_wait_for_event_or_clock (vm, timeout);
+      event_type = vlib_process_get_events (vm, (uword **) & event_data);
+
+      switch (event_type)
+       {
+       case SESSION_Q_PROCESS_RUN_ON_MAIN:
+         /* Run session queue node on main thread */
+         session_queue_run_on_main (vm);
+         break;
+       case SESSION_Q_PROCESS_STOP:
+         vlib_node_set_state (vm, session_queue_process_node.index,
+                              VLIB_NODE_STATE_DISABLED);
+         timeout = 100000.0;
+         break;
+       case ~0:
+         /* Timed out. Run on main to ensure all events are handled */
+         session_queue_run_on_main (vm);
+         break;
+       }
+      vec_reset_length (event_data);
+    }
+  return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (session_queue_process_node) =
+{
+  .function = session_queue_process,
+  .type = VLIB_NODE_TYPE_PROCESS,
+  .name = "session-queue-process",
+  .state = VLIB_NODE_STATE_DISABLED,
+};
+/* *INDENT-ON* */
+
+static_always_inline uword
+session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+                               vlib_frame_t * frame)
+{
+  session_main_t *sm = &session_main;
+  if (!sm->wrk[0].vpp_event_queue)
+    return 0;
+  node = vlib_node_get_runtime (vm, session_queue_node.index);
+  return session_queue_node_fn (vm, node, frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (session_queue_pre_input_node) =
+{
+  .function = session_queue_pre_input_inline,
+  .type = VLIB_NODE_TYPE_PRE_INPUT,
+  .name = "session-queue-main",
   .state = VLIB_NODE_STATE_DISABLED,
-  .next_nodes =
-  {
-      [SESSION_QUEUE_NEXT_DROP] = "error-drop",
-      [SESSION_QUEUE_NEXT_IP4_LOOKUP] = "ip4-lookup",
-      [SESSION_QUEUE_NEXT_IP6_LOOKUP] = "ip6-lookup",
-      [SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT] = "tcp4-output",
-      [SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT] = "tcp6-output",
-  },
 };
 /* *INDENT-ON* */