session: optimize ct fifo segment allocations
[vpp.git] / src / vnet / session / application_local.c
index b5cac56..9a8fe00 100644 (file)
 #include <vnet/session/application_local.h>
 #include <vnet/session/session.h>
 
-ct_connection_t *connections;
+typedef enum ct_segment_flags_
+{
+  CT_SEGMENT_F_CLIENT_DETACHED = 1 << 0,
+  CT_SEGMENT_F_SERVER_DETACHED = 1 << 1,
+} ct_segment_flags_t;
+
+typedef struct ct_segment_
+{
+  u32 segment_index;
+  u32 client_n_sessions;
+  u32 server_n_sessions;
+  ct_segment_flags_t flags;
+} ct_segment_t;
+
+typedef struct ct_segments_
+{
+  u32 sm_index;
+  u32 server_wrk;
+  u32 client_wrk;
+  ct_segment_t *segments;
+} ct_segments_ctx_t;
 
-ct_connection_t *
-ct_connection_alloc (void)
+typedef struct ct_main_
+{
+  ct_connection_t **connections;       /**< Per-worker connection pools */
+  u32 n_workers;                       /**< Number of vpp workers */
+  u32 n_sessions;                      /**< Cumulative sessions counter */
+  u32 *ho_reusable;                    /**< Vector of reusable ho indices */
+  clib_spinlock_t ho_reuseable_lock;   /**< Lock for reusable ho indices */
+  clib_rwlock_t app_segs_lock;         /**< RW lock for seg contexts */
+  uword *app_segs_ctxs_table;          /**< App handle to segment pool map */
+  ct_segments_ctx_t *app_seg_ctxs;     /**< Pool of ct segment contexts */
+} ct_main_t;
+
+static ct_main_t ct_main;
+
+static ct_connection_t *
+ct_connection_alloc (u32 thread_index)
 {
   ct_connection_t *ct;
 
-  pool_get_zero (connections, ct);
-  ct->c_c_index = ct - connections;
-  ct->c_thread_index = 0;
+  pool_get_zero (ct_main.connections[thread_index], ct);
+  ct->c_c_index = ct - ct_main.connections[thread_index];
+  ct->c_thread_index = thread_index;
   ct->client_wrk = ~0;
   ct->server_wrk = ~0;
+  ct->seg_ctx_index = ~0;
+  ct->ct_seg_index = ~0;
   return ct;
 }
 
-ct_connection_t *
-ct_connection_get (u32 ct_index)
+static ct_connection_t *
+ct_connection_get (u32 ct_index, u32 thread_index)
 {
-  if (pool_is_free_index (connections, ct_index))
+  if (pool_is_free_index (ct_main.connections[thread_index], ct_index))
     return 0;
-  return pool_elt_at_index (connections, ct_index);
+  return pool_elt_at_index (ct_main.connections[thread_index], ct_index);
 }
 
-void
+static void
 ct_connection_free (ct_connection_t * ct)
 {
   if (CLIB_DEBUG)
-    memset (ct, 0xfc, sizeof (*ct));
-  pool_put (connections, ct);
+    {
+      u32 thread_index = ct->c_thread_index;
+      memset (ct, 0xfc, sizeof (*ct));
+      pool_put (ct_main.connections[thread_index], ct);
+      return;
+    }
+  pool_put (ct_main.connections[ct->c_thread_index], ct);
+}
+
+static ct_connection_t *
+ct_half_open_alloc (void)
+{
+  ct_main_t *cm = &ct_main;
+  u32 *hip;
+
+  clib_spinlock_lock (&cm->ho_reuseable_lock);
+  vec_foreach (hip, cm->ho_reusable)
+    pool_put_index (cm->connections[0], *hip);
+  vec_reset_length (cm->ho_reusable);
+  clib_spinlock_unlock (&cm->ho_reuseable_lock);
+
+  return ct_connection_alloc (0);
+}
+
+void
+ct_half_open_add_reusable (u32 ho_index)
+{
+  ct_main_t *cm = &ct_main;
+
+  clib_spinlock_lock (&cm->ho_reuseable_lock);
+  vec_add1 (cm->ho_reusable, ho_index);
+  clib_spinlock_unlock (&cm->ho_reuseable_lock);
 }
 
 session_t *
 ct_session_get_peer (session_t * s)
 {
   ct_connection_t *ct, *peer_ct;
-  ct = ct_connection_get (s->connection_index);
-  peer_ct = ct_connection_get (ct->peer_index);
-  return session_get (peer_ct->c_s_index, 0);
+  ct = ct_connection_get (s->connection_index, s->thread_index);
+  peer_ct = ct_connection_get (ct->peer_index, s->thread_index);
+  return session_get (peer_ct->c_s_index, s->thread_index);
 }
 
 void
@@ -64,211 +130,468 @@ ct_session_endpoint (session_t * ll, session_endpoint_t * sep)
   sep->transport_proto = ct->actual_tp;
   sep->port = ct->c_lcl_port;
   sep->is_ip4 = ct->c_is_ip4;
+  ip_copy (&sep->ip, &ct->c_lcl_ip, ct->c_is_ip4);
+}
+
+static void
+ct_session_dealloc_fifos (ct_connection_t *ct, svm_fifo_t *rx_fifo,
+                         svm_fifo_t *tx_fifo)
+{
+  ct_segments_ctx_t *seg_ctx;
+  ct_main_t *cm = &ct_main;
+  ct_segment_flags_t flags;
+  segment_manager_t *sm;
+  app_worker_t *app_wrk;
+  ct_segment_t *ct_seg;
+  fifo_segment_t *fs;
+  u32 seg_index;
+  u8 cnt;
+
+  /*
+   * Cleanup fifos
+   */
+
+  sm = segment_manager_get (rx_fifo->segment_manager);
+  seg_index = rx_fifo->segment_index;
+
+  fs = segment_manager_get_segment_w_lock (sm, seg_index);
+  fifo_segment_free_fifo (fs, rx_fifo);
+  fifo_segment_free_fifo (fs, tx_fifo);
+  segment_manager_segment_reader_unlock (sm);
+
+  /*
+   * Update segment context
+   */
+
+  clib_rwlock_reader_lock (&cm->app_segs_lock);
+
+  seg_ctx = pool_elt_at_index (cm->app_seg_ctxs, ct->seg_ctx_index);
+  ct_seg = pool_elt_at_index (seg_ctx->segments, ct->ct_seg_index);
+
+  if (ct->flags & CT_CONN_F_CLIENT)
+    {
+      cnt =
+       __atomic_sub_fetch (&ct_seg->client_n_sessions, 1, __ATOMIC_RELAXED);
+      if (!cnt)
+       ct_seg->flags |= CT_SEGMENT_F_CLIENT_DETACHED;
+    }
+  else
+    {
+      cnt =
+       __atomic_sub_fetch (&ct_seg->server_n_sessions, 1, __ATOMIC_RELAXED);
+      if (!cnt)
+       ct_seg->flags |= CT_SEGMENT_F_SERVER_DETACHED;
+    }
+
+  flags = ct_seg->flags;
+
+  clib_rwlock_reader_unlock (&cm->app_segs_lock);
+
+  /*
+   * No need to do any app updates, return
+   */
+  if (cnt)
+    return;
+
+  if (ct->flags & CT_CONN_F_CLIENT)
+    {
+      app_wrk = app_worker_get_if_valid (ct->client_wrk);
+      /* Determine if client app still needs notification, i.e., if it is
+       * still attached. If client detached and this is the last ct session
+       * on this segment, then its connects segment manager should also be
+       * detached, so do not send notification */
+      if (app_wrk)
+       {
+         segment_manager_t *csm;
+         csm = app_worker_get_connect_segment_manager (app_wrk);
+         if (!segment_manager_app_detached (csm))
+           app_worker_del_segment_notify (app_wrk, ct->segment_handle);
+       }
+    }
+  else if (!segment_manager_app_detached (sm))
+    {
+      app_wrk = app_worker_get (ct->server_wrk);
+      app_worker_del_segment_notify (app_wrk, ct->segment_handle);
+    }
+
+  if (!(flags & CT_SEGMENT_F_CLIENT_DETACHED) ||
+      !(flags & CT_SEGMENT_F_SERVER_DETACHED))
+    return;
+
+  /*
+   * Remove segment context because both client and server detached
+   */
+
+  clib_rwlock_writer_lock (&cm->app_segs_lock);
+
+  seg_ctx = pool_elt_at_index (cm->app_seg_ctxs, ct->seg_ctx_index);
+  pool_put_index (seg_ctx->segments, ct->ct_seg_index);
+
+  /*
+   * No more segment indices left, remove the segments context
+   */
+  if (!pool_elts (seg_ctx->segments))
+    {
+      u64 table_handle = seg_ctx->client_wrk << 16 | seg_ctx->server_wrk;
+      table_handle = (u64) seg_ctx->sm_index << 32 | table_handle;
+      hash_unset (cm->app_segs_ctxs_table, table_handle);
+      pool_free (seg_ctx->segments);
+      pool_put_index (cm->app_seg_ctxs, ct->seg_ctx_index);
+    }
+
+  clib_rwlock_writer_unlock (&cm->app_segs_lock);
+
+  segment_manager_lock_and_del_segment (sm, seg_index);
+
+  /* Cleanup segment manager if needed. If server detaches there's a chance
+   * the client's sessions will hold up segment removal */
+  if (segment_manager_app_detached (sm) && !segment_manager_has_fifos (sm))
+    segment_manager_free_safe (sm);
 }
 
 int
-ct_session_connect_notify (session_t * ss)
+ct_session_connect_notify (session_t *ss)
 {
-  svm_fifo_segment_private_t *seg;
+  u32 ss_index, opaque, thread_index, cnt;
   ct_connection_t *sct, *cct;
+  ct_segments_ctx_t *seg_ctx;
   app_worker_t *client_wrk;
-  segment_manager_t *sm;
-  u64 segment_handle;
-  int is_fail = 0;
+  ct_main_t *cm = &ct_main;
+  ct_segment_t *ct_seg;
   session_t *cs;
-  u32 ss_index;
+  int err = 0;
 
   ss_index = ss->session_index;
+  thread_index = ss->thread_index;
   sct = (ct_connection_t *) session_get_transport (ss);
   client_wrk = app_worker_get (sct->client_wrk);
+  opaque = sct->client_opaque;
 
-  sm = segment_manager_get (ss->rx_fifo->segment_manager);
-  seg = segment_manager_get_segment_w_lock (sm, ss->rx_fifo->segment_index);
-  segment_handle = segment_manager_segment_handle (sm, seg);
+  cct = ct_connection_get (sct->peer_index, thread_index);
 
-  if (app_worker_add_segment_notify (client_wrk, segment_handle))
+  /* Client closed while waiting for reply from server */
+  if (!cct)
     {
-      clib_warning ("failed to notify client %u of new segment",
-                   sct->client_wrk);
-      segment_manager_segment_reader_unlock (sm);
-      session_close (ss);
-      is_fail = 1;
+      session_transport_closing_notify (&sct->connection);
+      session_transport_delete_notify (&sct->connection);
+      ct_connection_free (sct);
+      return 0;
     }
-  else
+
+  session_half_open_delete_notify (&cct->connection);
+  cct->flags &= ~CT_CONN_F_HALF_OPEN;
+
+  /*
+   * Update ct segment context
+   */
+
+  clib_rwlock_reader_lock (&cm->app_segs_lock);
+
+  seg_ctx = pool_elt_at_index (cm->app_seg_ctxs, sct->seg_ctx_index);
+  ct_seg = pool_elt_at_index (seg_ctx->segments, sct->ct_seg_index);
+
+  cnt = __atomic_add_fetch (&ct_seg->client_n_sessions, 1, __ATOMIC_RELAXED);
+  if (cnt == 1)
     {
-      segment_manager_segment_reader_unlock (sm);
+      err = app_worker_add_segment_notify (client_wrk, cct->segment_handle);
+      if (err)
+       {
+         clib_rwlock_reader_unlock (&cm->app_segs_lock);
+         session_close (ss);
+         goto error;
+       }
     }
 
-  /* Alloc client session */
-  cct = ct_connection_get (sct->peer_index);
+  clib_rwlock_reader_unlock (&cm->app_segs_lock);
+
+  /*
+   * Alloc client session
+   */
 
-  cs = session_alloc (0);
-  ss = session_get (ss_index, 0);
+  cs = session_alloc (thread_index);
+  ss = session_get (ss_index, thread_index);
   cs->session_type = ss->session_type;
-  cs->connection_index = sct->c_c_index;
-  cs->listener_index = SESSION_INVALID_INDEX;
+  cs->listener_handle = SESSION_INVALID_HANDLE;
   cs->session_state = SESSION_STATE_CONNECTING;
   cs->app_wrk_index = client_wrk->wrk_index;
   cs->connection_index = cct->c_c_index;
+  cct->seg_ctx_index = sct->seg_ctx_index;
+  cct->ct_seg_index = sct->ct_seg_index;
 
   cct->c_s_index = cs->session_index;
+  cct->client_rx_fifo = ss->tx_fifo;
+  cct->client_tx_fifo = ss->rx_fifo;
+
+  cct->client_rx_fifo->refcnt++;
+  cct->client_tx_fifo->refcnt++;
 
   /* This will allocate fifos for the session. They won't be used for
    * exchanging data but they will be used to close the connection if
    * the segment manager/worker is freed */
-  if (app_worker_init_connected (client_wrk, cs))
+  if ((err = app_worker_init_connected (client_wrk, cs)))
     {
       session_close (ss);
-      return -1;
+      session_free (cs);
+      goto error;
     }
 
-  if (app_worker_connect_notify (client_wrk, is_fail ? 0 : cs,
-                                sct->client_opaque))
+  cs->session_state = SESSION_STATE_CONNECTING;
+
+  if (app_worker_connect_notify (client_wrk, cs, err, opaque))
     {
       session_close (ss);
+      ct_session_dealloc_fifos (cct, cs->rx_fifo, cs->tx_fifo);
+      session_free (cs);
       return -1;
     }
 
-  cs = session_get (cct->c_s_index, 0);
+  cs = session_get (cct->c_s_index, cct->c_thread_index);
   cs->session_state = SESSION_STATE_READY;
 
   return 0;
+
+error:
+  app_worker_connect_notify (client_wrk, 0, err, opaque);
+  return -1;
 }
 
-static void
-ct_session_fix_eventds (svm_msg_q_t * sq, svm_msg_q_t * cq)
+static ct_segment_t *
+ct_lookup_free_segment (segment_manager_t *sm, ct_segments_ctx_t *seg_ctx,
+                       u32 pair_bytes)
 {
-  int fd;
+  uword free_bytes, max_free_bytes;
+  ct_segment_t *ct_seg, *res = 0;
+  fifo_segment_t *fs;
+  u32 max_fifos;
 
-  /*
-   * segment manager initializes only the producer eventds, since vpp is
-   * typically the producer. But for local sessions, we also pass to the
-   * apps the mqs they listen on for events from peer apps, so they are also
-   * consumer fds.
-   */
-  fd = svm_msg_q_get_producer_eventfd (sq);
-  svm_msg_q_set_consumer_eventfd (sq, fd);
-  fd = svm_msg_q_get_producer_eventfd (cq);
-  svm_msg_q_set_consumer_eventfd (cq, fd);
+  max_free_bytes = pair_bytes;
+  pool_foreach (ct_seg, seg_ctx->segments)
+    {
+      /* Client or server has detached so segment cannot be used */
+      if ((ct_seg->flags & CT_SEGMENT_F_SERVER_DETACHED) ||
+         (ct_seg->flags & CT_SEGMENT_F_CLIENT_DETACHED))
+       continue;
+      fs = segment_manager_get_segment (sm, ct_seg->segment_index);
+      free_bytes = fifo_segment_available_bytes (fs);
+      max_fifos = fifo_segment_size (fs) / pair_bytes;
+      if (free_bytes > max_free_bytes &&
+         fifo_segment_num_fifos (fs) / 2 < max_fifos)
+       {
+         max_free_bytes = free_bytes;
+         res = ct_seg;
+       }
+    }
+
+  return res;
 }
 
-int
-ct_init_local_session (app_worker_t * client_wrk, app_worker_t * server_wrk,
-                      ct_connection_t * ct, session_t * ls, session_t * ll)
-{
-  u32 seg_size, evt_q_sz, evt_q_elts, margin = 16 << 10;
-  u32 round_rx_fifo_sz, round_tx_fifo_sz, sm_index;
-  segment_manager_properties_t *props, *cprops;
-  svm_fifo_segment_private_t *seg;
-  application_t *server, *client;
+static int
+ct_init_accepted_session (app_worker_t *server_wrk, ct_connection_t *ct,
+                         session_t *ls, session_t *ll)
+{
+  u32 sm_index, pair_bytes, seg_ctx_index = ~0, ct_seg_index = ~0;
+  u64 seg_handle, table_handle, seg_size;
+  segment_manager_props_t *props;
+  const u32 margin = 16 << 10;
+  ct_segments_ctx_t *seg_ctx;
+  ct_main_t *cm = &ct_main;
+  application_t *server;
   segment_manager_t *sm;
-  svm_msg_q_t *sq, *cq;
-  u64 segment_handle;
-  int seg_index, rv;
+  ct_segment_t *ct_seg;
+  fifo_segment_t *fs;
+  int rv, fs_index;
+  uword *spp;
 
+  sm = app_worker_get_listen_segment_manager (server_wrk, ll);
+  sm_index = segment_manager_index (sm);
   server = application_get (server_wrk->app_index);
-  client = application_get (client_wrk->app_index);
-
   props = application_segment_manager_properties (server);
-  cprops = application_segment_manager_properties (client);
-  evt_q_elts = props->evt_q_size + cprops->evt_q_size;
-  evt_q_sz = segment_manager_evt_q_expected_size (evt_q_elts);
-  round_rx_fifo_sz = 1 << max_log2 (props->rx_fifo_size);
-  round_tx_fifo_sz = 1 << max_log2 (props->tx_fifo_size);
-  seg_size = round_rx_fifo_sz + round_tx_fifo_sz + evt_q_sz + margin;
 
-  sm = app_worker_get_listen_segment_manager (server_wrk, ll);
-  seg_index = segment_manager_add_segment (sm, seg_size);
-  if (seg_index < 0)
+  table_handle = ct->client_wrk << 16 | server_wrk->wrk_index;
+  table_handle = (u64) segment_manager_index (sm) << 32 | table_handle;
+
+  /*
+   * Check if we already have a segment that can hold the fifos
+   */
+
+  clib_rwlock_reader_lock (&cm->app_segs_lock);
+
+  spp = hash_get (cm->app_segs_ctxs_table, table_handle);
+  if (spp)
     {
-      clib_warning ("failed to add new cut-through segment");
-      return seg_index;
+      seg_ctx_index = *spp;
+      seg_ctx = pool_elt_at_index (cm->app_seg_ctxs, seg_ctx_index);
+      pair_bytes = props->rx_fifo_size + props->tx_fifo_size + margin;
+      ct_seg = ct_lookup_free_segment (sm, seg_ctx, pair_bytes);
+      if (ct_seg)
+       {
+         ct_seg_index = ct_seg - seg_ctx->segments;
+         fs_index = ct_seg->segment_index;
+         __atomic_add_fetch (&ct_seg->server_n_sessions, 1, __ATOMIC_RELAXED);
+       }
+    }
+
+  clib_rwlock_reader_unlock (&cm->app_segs_lock);
+
+  /*
+   * No segment, try to alloc one and notify the server
+   */
+
+  if (ct_seg_index == ~0)
+    {
+      seg_size = clib_max (props->segment_size, 128 << 20);
+      fs_index = segment_manager_add_segment (sm, seg_size, 0);
+      if (fs_index < 0)
+       {
+         rv = -1;
+         goto failed;
+       }
+
+      /* Make sure the segment is not used for other fifos */
+      fs = segment_manager_get_segment_w_lock (sm, fs_index);
+      fifo_segment_flags (fs) |= FIFO_SEGMENT_F_CUSTOM_USE;
+      segment_manager_segment_reader_unlock (sm);
+
+      clib_rwlock_writer_lock (&cm->app_segs_lock);
+
+      if (seg_ctx_index == ~0)
+       {
+         pool_get_zero (cm->app_seg_ctxs, seg_ctx);
+         seg_ctx_index = seg_ctx - cm->app_seg_ctxs;
+         hash_set (cm->app_segs_ctxs_table, table_handle, seg_ctx_index);
+         seg_ctx->server_wrk = server_wrk->wrk_index;
+         seg_ctx->client_wrk = ct->client_wrk;
+         seg_ctx->sm_index = sm_index;
+       }
+      else
+       seg_ctx = pool_elt_at_index (cm->app_seg_ctxs, seg_ctx_index);
+
+      pool_get_zero (seg_ctx->segments, ct_seg);
+      ct_seg->segment_index = fs_index;
+      ct_seg->server_n_sessions += 1;
+      ct_seg_index = ct_seg - seg_ctx->segments;
+
+      clib_rwlock_writer_unlock (&cm->app_segs_lock);
+
+      /* New segment, notify the server. Client notification sent after
+       * server accepts the connection */
+      seg_handle = segment_manager_make_segment_handle (sm_index, fs_index);
+      if ((rv = app_worker_add_segment_notify (server_wrk, seg_handle)))
+       {
+         segment_manager_lock_and_del_segment (sm, fs_index);
+
+         clib_rwlock_writer_lock (&cm->app_segs_lock);
+         pool_put_index (seg_ctx->segments, ct_seg_index);
+         clib_rwlock_writer_unlock (&cm->app_segs_lock);
+
+         goto failed_fix_count;
+       }
     }
-  seg = segment_manager_get_segment_w_lock (sm, seg_index);
-  sq = segment_manager_alloc_queue (seg, props);
-  cq = segment_manager_alloc_queue (seg, cprops);
-
-  if (props->use_mq_eventfd)
-    ct_session_fix_eventds (sq, cq);
-
-  ct->server_evt_q = pointer_to_uword (sq);
-  ct->client_evt_q = pointer_to_uword (cq);
-  rv = segment_manager_try_alloc_fifos (seg, props->rx_fifo_size,
-                                       props->tx_fifo_size, &ls->rx_fifo,
-                                       &ls->tx_fifo);
+
+  /*
+   * Allocate and initialize the fifos
+   */
+  fs = segment_manager_get_segment_w_lock (sm, fs_index);
+  rv = segment_manager_try_alloc_fifos (
+    fs, ls->thread_index, props->rx_fifo_size, props->tx_fifo_size,
+    &ls->rx_fifo, &ls->tx_fifo);
   if (rv)
     {
-      clib_warning ("failed to add fifos in cut-through segment");
       segment_manager_segment_reader_unlock (sm);
-      goto failed;
+      goto failed_fix_count;
     }
 
-  sm_index = segment_manager_index (sm);
-  ls->rx_fifo->ct_session_index = ls->session_index;
-  ls->tx_fifo->ct_session_index = ls->session_index;
+  ls->rx_fifo->shr->master_session_index = ls->session_index;
+  ls->tx_fifo->shr->master_session_index = ls->session_index;
+  ls->rx_fifo->master_thread_index = ls->thread_index;
+  ls->tx_fifo->master_thread_index = ls->thread_index;
   ls->rx_fifo->segment_manager = sm_index;
   ls->tx_fifo->segment_manager = sm_index;
-  ls->rx_fifo->segment_index = seg_index;
-  ls->tx_fifo->segment_index = seg_index;
-  ls->svm_segment_index = seg_index;
+  ls->rx_fifo->segment_index = fs_index;
+  ls->tx_fifo->segment_index = fs_index;
 
-  segment_handle = segment_manager_segment_handle (sm, seg);
-  if ((rv = app_worker_add_segment_notify (server_wrk, segment_handle)))
-    {
-      clib_warning ("failed to notify server of new segment");
-      segment_manager_segment_reader_unlock (sm);
-      goto failed;
-    }
+  seg_handle = segment_manager_segment_handle (sm, fs);
   segment_manager_segment_reader_unlock (sm);
-  ct->segment_handle = segment_handle;
+
+  ct->segment_handle = seg_handle;
+  ct->seg_ctx_index = seg_ctx_index;
+  ct->ct_seg_index = ct_seg_index;
 
   return 0;
 
+failed_fix_count:
+
+  clib_rwlock_reader_lock (&cm->app_segs_lock);
+
+  seg_ctx = pool_elt_at_index (cm->app_seg_ctxs, seg_ctx_index);
+  ct_seg = pool_elt_at_index (seg_ctx->segments, ct_seg_index);
+  __atomic_sub_fetch (&ct_seg->server_n_sessions, 1, __ATOMIC_RELAXED);
+
+  clib_rwlock_reader_unlock (&cm->app_segs_lock);
+
 failed:
-  segment_manager_del_segment (sm, seg);
   return rv;
 }
 
-int
-ct_connect (app_worker_t * client_wrk, session_t * ll,
-           session_endpoint_cfg_t * sep)
+static void
+ct_accept_rpc_wrk_handler (void *accept_args)
 {
-  u32 cct_index, ll_index, ll_ct_index;
-  ct_connection_t *sct, *cct, *ll_ct;
+  u32 cct_index, ho_index, thread_index, ll_index;
+  ct_connection_t *sct, *cct, *ho;
+  transport_connection_t *ll_ct;
   app_worker_t *server_wrk;
-  session_t *ss;
+  session_t *ss, *ll;
 
-  ll_index = ll->session_index;
-  ll_ct_index = ll->connection_index;
-
-  cct = ct_connection_alloc ();
+  /*
+   * Alloc client ct and initialize from ho
+   */
+  thread_index = vlib_get_thread_index ();
+  cct = ct_connection_alloc (thread_index);
   cct_index = cct->c_c_index;
-  sct = ct_connection_alloc ();
-  ll_ct = ct_connection_get (ll_ct_index);
 
-  /*
-   * Alloc and init client transport
+  ho_index = pointer_to_uword (accept_args);
+  ho = ct_connection_get (ho_index, 0);
+
+  /* Unlikely but half-open session and transport could have been freed */
+  if (PREDICT_FALSE (!ho))
+    {
+      ct_connection_free (cct);
+      return;
+    }
+
+  clib_memcpy (cct, ho, sizeof (*ho));
+  cct->c_c_index = cct_index;
+  cct->c_thread_index = thread_index;
+  cct->flags |= CT_CONN_F_HALF_OPEN;
+
+  /* Notify session layer that half-open is on a different thread
+   * and mark ho connection index reusable. Avoids another rpc
    */
-  cct = ct_connection_get (cct_index);
-  cct->c_thread_index = 0;
-  cct->c_rmt_port = sep->port;
-  cct->c_lcl_port = 0;
-  cct->c_is_ip4 = sep->is_ip4;
-  clib_memcpy (&cct->c_rmt_ip, &sep->ip, sizeof (sep->ip));
-  cct->actual_tp = ll_ct->actual_tp;
+  session_half_open_migrate_notify (&cct->connection);
+  session_half_open_migrated_notify (&cct->connection);
+  ct_half_open_add_reusable (ho_index);
 
   /*
-   * Init server transport
+   * Alloc and init server transport
    */
-  sct->c_thread_index = 0;
+
+  ll_index = cct->peer_index;
+  ll = listen_session_get (ll_index);
+  sct = ct_connection_alloc (thread_index);
+  /* Transport not necessarily ct but it might, so grab after sct alloc */
+  ll_ct = listen_session_get_transport (ll);
+
+  /* Make sure cct is valid after sct alloc */
+  cct = ct_connection_get (cct_index, thread_index);
+
   sct->c_rmt_port = 0;
-  sct->c_lcl_port = ll_ct->c_lcl_port;
-  sct->c_is_ip4 = sep->is_ip4;
-  clib_memcpy (&sct->c_lcl_ip, &ll_ct->c_lcl_ip, sizeof (ll_ct->c_lcl_ip));
-  sct->client_wrk = client_wrk->wrk_index;
+  sct->c_lcl_port = ll_ct->lcl_port;
+  sct->c_is_ip4 = cct->c_is_ip4;
+  clib_memcpy (&sct->c_lcl_ip, &ll_ct->lcl_ip, sizeof (ll_ct->lcl_ip));
+  sct->client_wrk = cct->client_wrk;
   sct->c_proto = TRANSPORT_PROTO_NONE;
-  sct->client_opaque = sep->opaque;
-  sct->actual_tp = ll_ct->actual_tp;
+  sct->client_opaque = cct->client_opaque;
+  sct->actual_tp = cct->actual_tp;
 
   sct->peer_index = cct->c_c_index;
   cct->peer_index = sct->c_c_index;
@@ -277,11 +600,12 @@ ct_connect (app_worker_t * client_wrk, session_t * ll,
    * Accept server session. Client session is created only after
    * server confirms accept.
    */
-  ss = session_alloc (0);
+  ss = session_alloc (thread_index);
   ll = listen_session_get (ll_index);
-  ss->session_type = ll->session_type;
+  ss->session_type = session_type_from_proto_and_ip (TRANSPORT_PROTO_NONE,
+                                                    sct->c_is_ip4);
   ss->connection_index = sct->c_c_index;
-  ss->listener_index = ll->session_index;
+  ss->listener_handle = listen_session_get_handle (ll);
   ss->session_state = SESSION_STATE_CREATED;
 
   server_wrk = application_listener_select_worker (ll);
@@ -290,66 +614,136 @@ ct_connect (app_worker_t * client_wrk, session_t * ll,
   sct->c_s_index = ss->session_index;
   sct->server_wrk = ss->app_wrk_index;
 
-  if (ct_init_local_session (client_wrk, server_wrk, sct, ss, ll))
+  if (ct_init_accepted_session (server_wrk, sct, ss, ll))
     {
-      clib_warning ("failed");
       ct_connection_free (sct);
       session_free (ss);
-      return -1;
+      return;
     }
 
   ss->session_state = SESSION_STATE_ACCEPTING;
   if (app_worker_accept_notify (server_wrk, ss))
     {
-      clib_warning ("failed");
+      ct_session_dealloc_fifos (sct, ss->rx_fifo, ss->tx_fifo);
       ct_connection_free (sct);
-      session_free_w_fifos (ss);
-      return -1;
+      session_free (ss);
+      return;
     }
 
-  cct->client_evt_q = sct->client_evt_q;
-  cct->server_evt_q = sct->server_evt_q;
   cct->segment_handle = sct->segment_handle;
+}
 
-  return 0;
+static int
+ct_connect (app_worker_t * client_wrk, session_t * ll,
+           session_endpoint_cfg_t * sep)
+{
+  u32 thread_index, ho_index;
+  ct_main_t *cm = &ct_main;
+  ct_connection_t *ho;
+
+  /* Simple round-robin policy for spreading sessions over workers. We skip
+   * thread index 0, i.e., offset the index by 1, when we have workers as it
+   * is the one dedicated to main thread. Note that n_workers does not include
+   * main thread */
+  cm->n_sessions += 1;
+  thread_index = cm->n_workers ? (cm->n_sessions % cm->n_workers) + 1 : 0;
+
+  /*
+   * Alloc and init client half-open transport
+   */
+
+  ho = ct_half_open_alloc ();
+  ho_index = ho->c_c_index;
+  ho->c_rmt_port = sep->port;
+  ho->c_lcl_port = 0;
+  ho->c_is_ip4 = sep->is_ip4;
+  ho->client_opaque = sep->opaque;
+  ho->client_wrk = client_wrk->wrk_index;
+  ho->peer_index = ll->session_index;
+  ho->c_proto = TRANSPORT_PROTO_NONE;
+  ho->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
+  clib_memcpy (&ho->c_rmt_ip, &sep->ip, sizeof (sep->ip));
+  ho->flags |= CT_CONN_F_CLIENT;
+  ho->c_s_index = ~0;
+  ho->actual_tp = sep->transport_proto;
+
+  /*
+   * Accept connection on thread selected above. Connected reply comes
+   * after server accepts the connection.
+   */
+
+  session_send_rpc_evt_to_thread_force (thread_index,
+                                       ct_accept_rpc_wrk_handler,
+                                       uword_to_pointer (ho_index, void *));
+
+  return ho_index;
 }
 
-u32
+static u32
 ct_start_listen (u32 app_listener_index, transport_endpoint_t * tep)
 {
   session_endpoint_cfg_t *sep;
   ct_connection_t *ct;
 
   sep = (session_endpoint_cfg_t *) tep;
-  ct = ct_connection_alloc ();
+  ct = ct_connection_alloc (0);
   ct->server_wrk = sep->app_wrk_index;
   ct->c_is_ip4 = sep->is_ip4;
   clib_memcpy (&ct->c_lcl_ip, &sep->ip, sizeof (sep->ip));
   ct->c_lcl_port = sep->port;
+  ct->c_s_index = app_listener_index;
   ct->actual_tp = sep->transport_proto;
   return ct->c_c_index;
 }
 
-u32
+static u32
 ct_stop_listen (u32 ct_index)
 {
   ct_connection_t *ct;
-  ct = ct_connection_get (ct_index);
+  ct = ct_connection_get (ct_index, 0);
   ct_connection_free (ct);
   return 0;
 }
 
-transport_connection_t *
+static transport_connection_t *
 ct_listener_get (u32 ct_index)
 {
-  return (transport_connection_t *) ct_connection_get (ct_index);
+  return (transport_connection_t *) ct_connection_get (ct_index, 0);
 }
 
-int
+static transport_connection_t *
+ct_half_open_get (u32 ct_index)
+{
+  return (transport_connection_t *) ct_connection_get (ct_index, 0);
+}
+
+static void
+ct_session_cleanup (u32 conn_index, u32 thread_index)
+{
+  ct_connection_t *ct, *peer_ct;
+
+  ct = ct_connection_get (conn_index, thread_index);
+  if (!ct)
+    return;
+
+  peer_ct = ct_connection_get (ct->peer_index, thread_index);
+  if (peer_ct)
+    peer_ct->peer_index = ~0;
+
+  ct_connection_free (ct);
+}
+
+static void
+ct_cleanup_ho (u32 ho_index)
+{
+  ct_connection_free (ct_connection_get (ho_index, 0));
+}
+
+static int
 ct_session_connect (transport_endpoint_cfg_t * tep)
 {
   session_endpoint_cfg_t *sep_ext;
-  session_endpoint_t *sep;
+  session_endpoint_t _sep, *sep = &_sep;
   app_worker_t *app_wrk;
   session_handle_t lh;
   application_t *app;
@@ -359,7 +753,7 @@ ct_session_connect (transport_endpoint_cfg_t * tep)
   u8 fib_proto;
 
   sep_ext = (session_endpoint_cfg_t *) tep;
-  sep = (session_endpoint_t *) tep;
+  _sep = *(session_endpoint_t *) tep;
   app_wrk = app_worker_get (sep_ext->app_wrk_index);
   app = application_get (app_wrk->app_index);
 
@@ -367,7 +761,7 @@ ct_session_connect (transport_endpoint_cfg_t * tep)
   table_index = application_local_session_table (app);
   lh = session_lookup_local_endpoint (table_index, sep);
   if (lh == SESSION_DROP_HANDLE)
-    return VNET_API_ERROR_APP_CONNECT_FILTERED;
+    return SESSION_E_FILTERED;
 
   if (lh == SESSION_INVALID_HANDLE)
     goto global_scope;
@@ -392,47 +786,78 @@ ct_session_connect (transport_endpoint_cfg_t * tep)
 
 global_scope:
   if (session_endpoint_is_local (sep))
-    return VNET_API_ERROR_SESSION_CONNECT;
+    return SESSION_E_NOROUTE;
 
   if (!application_has_global_scope (app))
-    return VNET_API_ERROR_APP_CONNECT_SCOPE;
+    return SESSION_E_SCOPE;
 
   fib_proto = session_endpoint_fib_proto (sep);
-  table_index = application_session_table (app, fib_proto);
-  ll = session_lookup_listener (table_index, sep);
+  table_index = session_lookup_get_index_for_fib (fib_proto, sep->fib_index);
+  ll = session_lookup_listener_wildcard (table_index, sep);
 
-  if (ll)
+  /* Avoid connecting app to own listener */
+  if (ll && ll->app_index != app->app_index)
     return ct_connect (app_wrk, ll, sep_ext);
 
   /* Failed to connect but no error */
-  return 1;
+  return SESSION_E_LOCAL_CONNECT;
 }
 
-void
+static void
 ct_session_close (u32 ct_index, u32 thread_index)
 {
   ct_connection_t *ct, *peer_ct;
+  app_worker_t *app_wrk;
   session_t *s;
 
-  ct = ct_connection_get (ct_index);
-  peer_ct = ct_connection_get (ct->peer_index);
+  ct = ct_connection_get (ct_index, thread_index);
+  peer_ct = ct_connection_get (ct->peer_index, thread_index);
   if (peer_ct)
     {
       peer_ct->peer_index = ~0;
-      session_transport_closing_notify (&peer_ct->connection);
+      /* Make sure session was allocated */
+      if (peer_ct->flags & CT_CONN_F_HALF_OPEN)
+       {
+         app_wrk = app_worker_get (peer_ct->client_wrk);
+         app_worker_connect_notify (app_wrk, 0, SESSION_E_REFUSED,
+                                    peer_ct->client_opaque);
+       }
+      else if (peer_ct->c_s_index != ~0)
+       session_transport_closing_notify (&peer_ct->connection);
+      else
+       ct_connection_free (peer_ct);
+    }
+
+  s = session_get (ct->c_s_index, ct->c_thread_index);
+
+  if (ct->flags & CT_CONN_F_CLIENT)
+    {
+      /* Normal free for client session as the fifos are allocated through
+       * the connects segment manager in a segment that's not shared with
+       * the server */
+      session_free_w_fifos (s);
+      ct_session_dealloc_fifos (ct, ct->client_rx_fifo, ct->client_tx_fifo);
+    }
+  else
+    {
+      /* Manual session and fifo segment cleanup to avoid implicit
+       * segment manager cleanups and notifications */
+      app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+      if (app_wrk)
+       app_worker_cleanup_notify (app_wrk, s, SESSION_CLEANUP_SESSION);
+
+      ct_session_dealloc_fifos (ct, s->rx_fifo, s->tx_fifo);
+      session_free (s);
     }
 
-  s = session_get (ct->c_s_index, 0);
-  app_worker_del_segment_notify (app_worker_get (s->app_wrk_index),
-                                ct->segment_handle);
-  session_free_w_fifos (s);
   ct_connection_free (ct);
 }
 
-transport_connection_t *
+static transport_connection_t *
 ct_session_get (u32 ct_index, u32 thread_index)
 {
-  return (transport_connection_t *) ct_connection_get (ct_index);
+  return (transport_connection_t *) ct_connection_get (ct_index,
+                                                      thread_index);
 }
 
 static u8 *
@@ -461,19 +886,64 @@ format_ct_connection_id (u8 * s, va_list * args)
   return s;
 }
 
-u8 *
+static int
+ct_custom_tx (void *session, transport_send_params_t * sp)
+{
+  session_t *s = (session_t *) session;
+  if (session_has_transport (s))
+    return 0;
+  /* If event enqueued towards peer, remove from scheduler and remove
+   * session tx flag, i.e., accept new tx events. Unset fifo flag now to
+   * avoid missing events if peer did not clear fifo flag yet, which is
+   * interpreted as successful notification and session is descheduled. */
+  svm_fifo_unset_event (s->tx_fifo);
+  if (!ct_session_tx (s))
+    sp->flags = TRANSPORT_SND_F_DESCHED;
+
+  /* The scheduler uses packet count as a means of upper bounding the amount
+   * of work done per dispatch. So make it look like we have sent something */
+  return 1;
+}
+
+static int
+ct_app_rx_evt (transport_connection_t * tc)
+{
+  ct_connection_t *ct = (ct_connection_t *) tc, *peer_ct;
+  session_t *ps;
+
+  peer_ct = ct_connection_get (ct->peer_index, tc->thread_index);
+  if (!peer_ct)
+    return -1;
+  ps = session_get (peer_ct->c_s_index, peer_ct->c_thread_index);
+  return session_dequeue_notify (ps);
+}
+
+static u8 *
 format_ct_listener (u8 * s, va_list * args)
 {
   u32 tc_index = va_arg (*args, u32);
+  u32 __clib_unused thread_index = va_arg (*args, u32);
   u32 __clib_unused verbose = va_arg (*args, u32);
-  ct_connection_t *ct = ct_connection_get (tc_index);
-  s = format (s, "%-50U", format_ct_connection_id, ct);
+  ct_connection_t *ct = ct_connection_get (tc_index, 0);
+  s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_ct_connection_id, ct);
+  if (verbose)
+    s = format (s, "%-" SESSION_CLI_STATE_LEN "s", "LISTEN");
+  return s;
+}
+
+static u8 *
+format_ct_half_open (u8 *s, va_list *args)
+{
+  u32 ho_index = va_arg (*args, u32);
+  u32 verbose = va_arg (*args, u32);
+  ct_connection_t *ct = ct_connection_get (ho_index, 0);
+  s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_ct_connection_id, ct);
   if (verbose)
-    s = format (s, "%-15s", "LISTEN");
+    s = format (s, "%-" SESSION_CLI_STATE_LEN "s", "HALF-OPEN");
   return s;
 }
 
-u8 *
+static u8 *
 format_ct_connection (u8 * s, va_list * args)
 {
   ct_connection_t *ct = va_arg (*args, ct_connection_t *);
@@ -481,10 +951,10 @@ format_ct_connection (u8 * s, va_list * args)
 
   if (!ct)
     return s;
-  s = format (s, "%-50U", format_ct_connection_id, ct);
+  s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_ct_connection_id, ct);
   if (verbose)
     {
-      s = format (s, "%-15s", "ESTABLISHED");
+      s = format (s, "%-" SESSION_CLI_STATE_LEN "s", "ESTABLISHED");
       if (verbose > 1)
        {
          s = format (s, "\n");
@@ -493,15 +963,15 @@ format_ct_connection (u8 * s, va_list * args)
   return s;
 }
 
-u8 *
+static u8 *
 format_ct_session (u8 * s, va_list * args)
 {
   u32 ct_index = va_arg (*args, u32);
-  u32 __clib_unused thread_index = va_arg (*args, u32);
+  u32 thread_index = va_arg (*args, u32);
   u32 verbose = va_arg (*args, u32);
   ct_connection_t *ct;
 
-  ct = ct_connection_get (ct_index);
+  ct = ct_connection_get (ct_index, thread_index);
   if (!ct)
     {
       s = format (s, "empty\n");
@@ -512,26 +982,67 @@ format_ct_session (u8 * s, va_list * args)
   return s;
 }
 
+clib_error_t *
+ct_enable_disable (vlib_main_t * vm, u8 is_en)
+{
+  ct_main_t *cm = &ct_main;
+
+  cm->n_workers = vlib_num_workers ();
+  vec_validate (cm->connections, cm->n_workers);
+  clib_spinlock_init (&cm->ho_reuseable_lock);
+  clib_rwlock_init (&cm->app_segs_lock);
+  return 0;
+}
+
 /* *INDENT-OFF* */
-const static transport_proto_vft_t cut_thru_proto = {
+static const transport_proto_vft_t cut_thru_proto = {
+  .enable = ct_enable_disable,
   .start_listen = ct_start_listen,
   .stop_listen = ct_stop_listen,
+  .get_connection = ct_session_get,
   .get_listener = ct_listener_get,
+  .get_half_open = ct_half_open_get,
+  .cleanup = ct_session_cleanup,
+  .cleanup_ho = ct_cleanup_ho,
   .connect = ct_session_connect,
   .close = ct_session_close,
-  .get_connection = ct_session_get,
-  .tx_type = TRANSPORT_TX_INTERNAL,
-  .service_type = TRANSPORT_SERVICE_APP,
+  .custom_tx = ct_custom_tx,
+  .app_rx_evt = ct_app_rx_evt,
   .format_listener = format_ct_listener,
+  .format_half_open = format_ct_half_open,
   .format_connection = format_ct_session,
+  .transport_options = {
+    .name = "ct",
+    .short_name = "C",
+    .tx_type = TRANSPORT_TX_INTERNAL,
+    .service_type = TRANSPORT_SERVICE_VC,
+  },
 };
 /* *INDENT-ON* */
 
+int
+ct_session_tx (session_t * s)
+{
+  ct_connection_t *ct, *peer_ct;
+  session_t *peer_s;
+
+  ct = (ct_connection_t *) session_get_transport (s);
+  peer_ct = ct_connection_get (ct->peer_index, ct->c_thread_index);
+  if (!peer_ct)
+    return 0;
+  peer_s = session_get (peer_ct->c_s_index, peer_ct->c_thread_index);
+  if (peer_s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
+    return 0;
+  return session_enqueue_notify (peer_s);
+}
+
 static clib_error_t *
 ct_transport_init (vlib_main_t * vm)
 {
   transport_register_protocol (TRANSPORT_PROTO_NONE, &cut_thru_proto,
                               FIB_PROTOCOL_IP4, ~0);
+  transport_register_protocol (TRANSPORT_PROTO_NONE, &cut_thru_proto,
+                              FIB_PROTOCOL_IP6, ~0);
   return 0;
 }