Cleanup URI code and TCP bugfixing 03/5603/7
authorFlorin Coras <fcoras@cisco.com>
Wed, 1 Mar 2017 16:17:34 +0000 (08:17 -0800)
committerDave Barach <openvpp@barachs.net>
Sat, 4 Mar 2017 01:22:36 +0000 (01:22 +0000)
- Add CLI/API to enable session layer, by default it's disabled
- Improve rcv wnd computation
- Improvements to tx path
- URI code cleanup
- Builtin test tcp server
- Improve src port allocation

Change-Id: I2ace498e76a0771d4c31a8075cc14fe33d7dfa38
Signed-off-by: Florin Coras <fcoras@cisco.com>
24 files changed:
src/scripts/vnet/uri/dummy_app.py [new file with mode: 0644]
src/scripts/vnet/uri/tcp_server
src/svm/svm_fifo.c
src/uri.am
src/uri/uri_tcp_test.c
src/uri/uri_udp_test.c
src/uri/uri_udp_test2.c [deleted file]
src/uri/uritest.c [deleted file]
src/vnet.am
src/vnet/api_errno.h
src/vnet/session/application.c
src/vnet/session/application.h
src/vnet/session/application_interface.c
src/vnet/session/node.c
src/vnet/session/session.api
src/vnet/session/session.c
src/vnet/session/session.h
src/vnet/session/session_api.c
src/vnet/session/session_cli.c
src/vnet/tcp/builtin_server.c [new file with mode: 0644]
src/vnet/tcp/tcp.c
src/vnet/tcp/tcp.h
src/vnet/tcp/tcp_input.c
src/vnet/tcp/tcp_output.c

diff --git a/src/scripts/vnet/uri/dummy_app.py b/src/scripts/vnet/uri/dummy_app.py
new file mode 100644 (file)
index 0000000..b80fbb2
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+import socket
+import sys
+import bitstring
+
+# action can be reflect or drop 
+action = "drop"
+
+def handle_connection (connection, client_address):
+    print("Received connection from {}".format(repr(client_address)))
+    try:
+        while True:
+            data = connection.recv(4096)
+            if not data:
+                break;
+            if (action != "drop"):
+                connection.sendall(data)
+    finally:
+        connection.close()
+        
+def run_server(ip, port):
+    print("Starting server {}:{}".format(repr(ip), repr(port)))
+    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_address = (ip, int(port))
+    sock.bind(server_address)
+    sock.listen(1)
+    
+    while True:
+        connection, client_address = sock.accept()
+        handle_connection (connection, client_address)
+
+def prepare_data():
+    buf = []
+    for i in range (0, pow(2, 16)):
+        buf.append(i & 0xff)
+    return bytearray(buf)
+
+def run_client(ip, port):
+    print("Starting client {}:{}".format(repr(ip), repr(port)))
+    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_address = ("6.0.1.1", 1234)
+    sock.connect(server_address)
+    
+    data = prepare_data()
+    try:
+        sock.sendall(data)
+    finally:
+        sock.close()
+    
+def run(mode, ip, port):
+    if (mode == "server"):
+        run_server (ip, port)
+    elif (mode == "client"):
+        run_client (ip, port)
+    else:
+        raise Exception("Unknown mode. Only client and server supported")
+
+if __name__ == "__main__":
+    if (len(sys.argv)) < 4:
+        raise Exception("Usage: ./dummy_app <mode> <ip> <port> [<action>]")
+    if (len(sys.argv) == 5):
+        action = sys.argv[4]
+
+    run (sys.argv[1], sys.argv[2], sys.argv[3])
index 7f5a86d..c29afc6 100644 (file)
@@ -2,3 +2,4 @@ create host-interface name vpp1
 set int state host-vpp1 up
 set int ip address host-vpp1 6.0.1.1/24
 trace add af-packet-input 10
+session enable
index 11f9019..e3f534b 100644 (file)
@@ -508,9 +508,9 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes,
     {
       /* Number of bytes in first copy segment */
       first_copy_bytes =
-       ((nitems - f->head) < total_copy_bytes) ?
-       (nitems - f->head) : total_copy_bytes;
-      clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes);
+       ((nitems - f->head + offset) < total_copy_bytes) ?
+       (nitems - f->head + offset) : total_copy_bytes;
+      clib_memcpy (copy_here, &f->data[f->head + offset], first_copy_bytes);
 
       /* Number of bytes in second copy segment, if any */
       second_copy_bytes = total_copy_bytes - first_copy_bytes;
index 8cdd77c..09b5b15 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-noinst_PROGRAMS += uri_udp_test2 uri_tcp_test
+noinst_PROGRAMS += uri_udp_test uri_tcp_test
 
-uri_udp_test2_SOURCES = uri/uri_udp_test2.c                    
-uri_udp_test2_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \
-       libvppinfra.la -lpthread -lm -lrt 
+uri_udp_test_SOURCES = uri/uri_udp_test.c
+uri_udp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \
+       libvppinfra.la -lpthread -lm -lrt
 
 uri_tcp_test_SOURCES = uri/uri_tcp_test.c
 uri_tcp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \
-       libvppinfra.la -lpthread -lm -lrt 
+       libvppinfra.la -lpthread -lm -lrt
index ed5a37d..6c9cf1d 100644 (file)
 #include <svm/svm_fifo_segment.h>
 #include <vlibmemory/api.h>
 #include <vpp/api/vpe_msg_enum.h>
+#include <vnet/session/application_interface.h>
 
-#include "../vnet/session/application_interface.h"
-
-#define vl_typedefs             /* define message structures */
+#define vl_typedefs            /* define message structures */
 #include <vpp/api/vpe_all_api_h.h>
 #undef vl_typedefs
 
 /* declare message handlers for each api */
 
-#define vl_endianfun            /* define message structures */
+#define vl_endianfun           /* define message structures */
 #include <vpp/api/vpe_all_api_h.h>
 #undef vl_endianfun
 
@@ -45,8 +44,8 @@ vlib_main_t **vlib_mains;
 
 typedef struct
 {
-  svm_fifo_t * server_rx_fifo;
-  svm_fifo_t * server_tx_fifo;
+  svm_fifo_t *server_rx_fifo;
+  svm_fifo_t *server_tx_fifo;
 
   u32 vpp_session_index;
   u32 vpp_session_thread;
@@ -69,19 +68,19 @@ typedef struct
   u32 my_client_index;
 
   /* The URI we're playing with */
-  u8 * uri;
+  u8 *uri;
 
   /* Session pool */
-  session_t * sessions;
+  session_t *sessions;
 
   /* Hash table for disconnect processing */
-  uword * session_index_by_vpp_handles;
+  uword *session_index_by_vpp_handles;
 
   /* intermediate rx buffer */
-  u8 * rx_buf;
+  u8 *rx_buf;
 
   /* URI for slave's connect */
-  u8 * connect_uri;
+  u8 *connect_uri;
 
   u32 connected_session_index;
 
@@ -91,10 +90,10 @@ typedef struct
   int drop_packets;
 
   /* Our event queue */
-  unix_shared_memory_queue_t * our_event_queue;
+  unix_shared_memory_queue_t *our_event_queue;
 
   /* $$$ single thread only for the moment */
-  unix_shared_memory_queue_t * vpp_event_queue;
+  unix_shared_memory_queue_t *vpp_event_queue;
 
   pid_t my_pid;
 
@@ -111,12 +110,15 @@ typedef struct
   u32 configured_segment_size;
 
   /* VNET_API_ERROR_FOO -> "Foo" hash table */
-  uword * error_string_by_error_number;
-
-  /* convenience */
-  svm_fifo_segment_main_t * segment_main;
+  uword *error_string_by_error_number;
 
   u8 *connect_test_data;
+  pthread_t client_rx_thread_handle;
+  u32 client_bytes_received;
+  u8 test_return_packets;
+
+  /* convenience */
+  svm_fifo_segment_main_t *segment_main;
 } uri_tcp_test_main_t;
 
 uri_tcp_test_main_t uri_tcp_test_main;
@@ -141,7 +143,7 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state)
   while (clib_time_now (&utm->clib_time) < timeout)
     {
       if (utm->state == state)
-        return 0;
+       return 0;
       if (utm->state == STATE_FAILED)
        return -1;
     }
@@ -209,7 +211,7 @@ connect_to_vpp (char *name)
 }
 
 static void
-vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp)
+vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp)
 {
   svm_fifo_segment_create_args_t _a, *a = &_a;
   int rv;
@@ -221,24 +223,24 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp)
   if (rv)
     {
       clib_warning ("svm_fifo_segment_attach ('%s') failed",
-                    mp->segment_name);
+                   mp->segment_name);
       return;
     }
   clib_warning ("Mapped new segment '%s' size %d", mp->segment_name,
-                mp->segment_size);
+               mp->segment_size);
 }
 
 static void
 vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp)
 {
   uri_tcp_test_main_t *utm = &uri_tcp_test_main;
-  session_t * session;
-  vl_api_disconnect_session_reply_t * rmp;
-  uword * p;
+  session_t *session;
+  vl_api_disconnect_session_reply_t *rmp;
+  uword *p;
   int rv = 0;
   u64 key;
 
-  key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index;
+  key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index;
 
   p = hash_get (utm->session_index_by_vpp_handles, key);
 
@@ -254,6 +256,8 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp)
       rv = -11;
     }
 
+  utm->time_to_stop = 1;
+
   rmp = vl_msg_api_alloc (sizeof (*rmp));
   memset (rmp, 0, sizeof (*rmp));
 
@@ -261,32 +265,32 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp)
   rmp->retval = rv;
   rmp->session_index = mp->session_index;
   rmp->session_thread_index = mp->session_thread_index;
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp);
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp);
 }
 
 static void
 vl_api_reset_session_t_handler (vl_api_reset_session_t * mp)
 {
   uri_tcp_test_main_t *utm = &uri_tcp_test_main;
-  session_t * session;
-  vl_api_reset_session_reply_t * rmp;
-  uword * p;
+  session_t *session;
+  vl_api_reset_session_reply_t *rmp;
+  uword *p;
   int rv = 0;
   u64 key;
 
-  key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index;
+  key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index;
 
-  p = hash_get(utm->session_index_by_vpp_handles, key);
+  p = hash_get (utm->session_index_by_vpp_handles, key);
 
   if (p)
     {
-      session = pool_elt_at_index(utm->sessions, p[0]);
-      hash_unset(utm->session_index_by_vpp_handles, key);
-      pool_put(utm->sessions, session);
+      session = pool_elt_at_index (utm->sessions, p[0]);
+      hash_unset (utm->session_index_by_vpp_handles, key);
+      pool_put (utm->sessions, session);
     }
   else
     {
-      clib_warning("couldn't find session key %llx", key);
+      clib_warning ("couldn't find session key %llx", key);
       rv = -11;
     }
 
@@ -296,301 +300,95 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp)
   rmp->retval = rv;
   rmp->session_index = mp->session_index;
   rmp->session_thread_index = mp->session_thread_index;
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp);
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp);
 }
 
 void
-handle_fifo_event_connect_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e)
+client_handle_fifo_event_rx (uri_tcp_test_main_t * utm,
+                            session_fifo_event_t * e)
 {
-  svm_fifo_t * rx_fifo;
-  int n_read, bytes;
+  svm_fifo_t *rx_fifo;
+  int n_read, bytes, i;
 
   rx_fifo = e->fifo;
 
   bytes = e->enqueue_length;
   do
     {
-      n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf),
-                                         utm->rx_buf);
+      n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf),
+                                       utm->rx_buf);
       if (n_read > 0)
-        bytes -= n_read;
+       {
+         bytes -= n_read;
+         for (i = 0; i < n_read; i++)
+           {
+             if (utm->rx_buf[i] != ((utm->client_bytes_received + i) & 0xff))
+               {
+                 clib_warning ("error at byte %lld, 0x%x not 0x%x",
+                               utm->client_bytes_received + i,
+                               utm->rx_buf[i],
+                               ((utm->client_bytes_received + i) & 0xff));
+               }
+           }
+         utm->client_bytes_received += n_read;
+       }
+
     }
   while (n_read < 0 || bytes > 0);
-
-  //      bytes_to_read = svm_fifo_max_dequeue (rx_fifo);
-  //
-  //      bytes_to_read = vec_len(utm->rx_buf) > bytes_to_read ?
-  //        bytes_to_read : vec_len(utm->rx_buf);
-  //
-  //      buffer_offset = 0;
-  //      while (bytes_to_read > 0)
-  //        {
-  //          rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid,
-  //                                         bytes_to_read,
-  //                                         utm->rx_buf + buffer_offset);
-  //          if (rv > 0)
-  //            {
-  //              bytes_to_read -= rv;
-  //              buffer_offset += rv;
-  //              bytes_received += rv;
-  //            }
-  //        }
-
-
-  //  while (bytes_received < bytes_sent)
-  //    {
-  //      rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid,
-  //                                     vec_len (utm->rx_buf),
-  //                                     utm->rx_buf);
-  //      if (rv > 0)
-  //        {
-  //#if CLIB_DEBUG > 0
-  //          int j;
-  //          for (j = 0; j < rv; j++)
-  //            {
-  //              if (utm->rx_buf[j] != ((bytes_received + j) & 0xff))
-  //                {
-  //                  clib_warning ("error at byte %lld, 0x%x not 0x%x",
-  //                                bytes_received + j,
-  //                                utm->rx_buf[j],
-  //                                ((bytes_received + j )&0xff));
-  //                }
-  //            }
-  //#endif
-  //          bytes_received += (u64) rv;
-  //        }
-  //    }
 }
 
 void
-handle_connect_event_queue (uri_tcp_test_main_t * utm)
+client_handle_event_queue (uri_tcp_test_main_t * utm)
 {
   session_fifo_event_t _e, *e = &_e;;
 
-  unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, 0 /* nowait */);
+  unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e,
+                               0 /* nowait */ );
   switch (e->event_type)
     {
     case FIFO_EVENT_SERVER_RX:
-      handle_fifo_event_connect_rx (utm, e);
+      client_handle_fifo_event_rx (utm, e);
       break;
 
     case FIFO_EVENT_SERVER_EXIT:
       return;
 
     default:
-      clib_warning("unknown event type %d", e->event_type);
+      clib_warning ("unknown event type %d", e->event_type);
       break;
     }
 }
 
-void
-uri_tcp_connect_send (uri_tcp_test_main_t *utm)
-{
-  u8 *test_data = utm->connect_test_data;
-  u64 bytes_sent = 0;
-  int rv;
-  int mypid = getpid();
-  session_t * session;
-  svm_fifo_t *tx_fifo;
-  int buffer_offset, bytes_to_send = 0;
-  session_fifo_event_t evt;
-  static int serial_number = 0;
-  int i;
-  u32 max_chunk = 64 << 10, write;
-
-  session = pool_elt_at_index (utm->sessions, utm->connected_session_index);
-  tx_fifo = session->server_tx_fifo;
-
-  vec_validate (utm->rx_buf, vec_len (test_data) - 1);
-
-  for (i = 0; i < 10; i++)
-    {
-      bytes_to_send = vec_len (test_data);
-      buffer_offset = 0;
-      while (bytes_to_send > 0)
-        {
-          write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send;
-          rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write,
-                                         test_data + buffer_offset);
-
-          if (rv > 0)
-            {
-              bytes_to_send -= rv;
-              buffer_offset += rv;
-              bytes_sent += rv;
-
-              /* Fabricate TX event, send to vpp */
-              evt.fifo = tx_fifo;
-              evt.event_type = FIFO_EVENT_SERVER_TX;
-              /* $$$$ for event logging */
-              evt.enqueue_length = rv;
-              evt.event_id = serial_number++;
-
-              unix_shared_memory_queue_add (utm->vpp_event_queue, (u8 *) &evt,
-                                            0 /* do wait for mutex */);
-            }
-        }
-    }
-}
-
-static void
-uri_tcp_client_test (uri_tcp_test_main_t * utm)
-{
-  vl_api_connect_uri_t * cmp;
-  vl_api_disconnect_session_t *dmp;
-  session_t *connected_session;
-  int i;
-
-  cmp = vl_msg_api_alloc (sizeof (*cmp));
-  memset (cmp, 0, sizeof (*cmp));
-
-  cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI);
-  cmp->client_index = utm->my_client_index;
-  cmp->context = ntohl(0xfeedface);
-  memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&cmp);
-
-  if (wait_for_state_change (utm, STATE_READY))
-    {
-      return;
-    }
-
-  /* Init test data */
-  vec_validate (utm->connect_test_data, 64 * 1024 - 1);
-  for (i = 0; i < vec_len (utm->connect_test_data); i++)
-    utm->connect_test_data[i] = i & 0xff;
-
-  /* Start reader thread */
-  /* handle_connect_event_queue (utm); */
-
-  /* Start send */
-  uri_tcp_connect_send (utm);
-
-  /* Disconnect */
-  connected_session = pool_elt_at_index(utm->sessions,
-                                       utm->connected_session_index);
-  dmp = vl_msg_api_alloc (sizeof (*dmp));
-  memset (dmp, 0, sizeof (*dmp));
-  dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION);
-  dmp->client_index = utm->my_client_index;
-  dmp->session_index = connected_session->vpp_session_index;
-  dmp->session_thread_index = connected_session->vpp_session_thread;
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&dmp);
-}
-
-void
-handle_fifo_event_server_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e)
-{
-  svm_fifo_t * rx_fifo, * tx_fifo;
-  int n_read;
-
-  session_fifo_event_t evt;
-  unix_shared_memory_queue_t *q;
-  int rv, bytes;
-
-  rx_fifo = e->fifo;
-  tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo;
-
-  bytes = e->enqueue_length;
-  do
-    {
-      n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf),
-                                         utm->rx_buf);
-
-      /* Reflect if a non-drop session */
-      if (!utm->drop_packets && n_read > 0)
-        {
-          do
-            {
-              rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf);
-            }
-          while (rv == -2);
-
-          /* Fabricate TX event, send to vpp */
-          evt.fifo = tx_fifo;
-          evt.event_type = FIFO_EVENT_SERVER_TX;
-          /* $$$$ for event logging */
-          evt.enqueue_length = n_read;
-          evt.event_id = e->event_id;
-          q = utm->vpp_event_queue;
-          unix_shared_memory_queue_add (q, (u8 *) &evt, 0 /* do wait for mutex */);
-        }
-
-      if (n_read > 0)
-        bytes -= n_read;
-    }
-  while (n_read < 0 || bytes > 0);
-}
-
-void
-handle_event_queue (uri_tcp_test_main_t * utm)
+static void *
+client_rx_thread_fn (void *arg)
 {
-  session_fifo_event_t _e, *e = &_e;;
+  session_fifo_event_t _e, *e = &_e;
+  uri_tcp_test_main_t *utm = &uri_tcp_test_main;
 
+  utm->client_bytes_received = 0;
   while (1)
     {
-      unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *)e,
-                                    0 /* nowait */);
+      unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e,
+                                   0 /* nowait */ );
       switch (e->event_type)
-        {
-        case FIFO_EVENT_SERVER_RX:
-          handle_fifo_event_server_rx (utm, e);
-          break;
-
-        case FIFO_EVENT_SERVER_EXIT:
-          return;
-
-        default:
-          clib_warning ("unknown event type %d", e->event_type);
-          break;
-        }
-      if (PREDICT_FALSE(utm->time_to_stop == 1))
-        break;
-      if (PREDICT_FALSE(utm->time_to_print_stats == 1))
-        {
-          utm->time_to_print_stats = 0;
-          fformat(stdout, "%d connections\n", pool_elts (utm->sessions));
-        }
+       {
+       case FIFO_EVENT_SERVER_RX:
+         client_handle_fifo_event_rx (utm, e);
+         break;
+
+       case FIFO_EVENT_SERVER_EXIT:
+         return 0;
+       default:
+         clib_warning ("unknown event type %d", e->event_type);
+         break;
+       }
+
+      if (PREDICT_FALSE (utm->time_to_stop == 1))
+       break;
     }
+  pthread_exit (0);
 }
 
-static void
-vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
-{
-  uri_tcp_test_main_t *utm = &uri_tcp_test_main;
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  int rv;
-
-  if (mp->retval)
-    {
-      clib_warning("bind failed: %d", mp->retval);
-      return;
-    }
-
-  if (mp->segment_name_length == 0)
-    {
-      clib_warning("segment_name_length zero");
-      return;
-    }
-
-  a->segment_name = (char *) mp->segment_name;
-  a->segment_size = mp->segment_size;
-
-  ASSERT(mp->server_event_queue_address);
-
-  /* Attach to the segment vpp created */
-  rv = svm_fifo_segment_attach (a);
-  if (rv)
-    {
-      clib_warning("svm_fifo_segment_attach ('%s') failed", mp->segment_name);
-      return;
-    }
-
-  utm->our_event_queue =
-      (unix_shared_memory_queue_t *) mp->server_event_queue_address;
-
-  utm->state = STATE_READY;
-}
 
 static void
 vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
@@ -601,6 +399,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
   u32 session_index;
   svm_fifo_t *rx_fifo, *tx_fifo;
   int rv;
+  u64 key;
 
   if (mp->retval)
     {
@@ -608,6 +407,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
       utm->state = STATE_FAILED;
       return;
     }
+
   /*
    * Attatch to segment
    */
@@ -622,14 +422,14 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
   a->segment_name = (char *) mp->segment_name;
   a->segment_size = mp->segment_size;
 
-  ASSERT(mp->client_event_queue_address);
+  ASSERT (mp->client_event_queue_address);
 
   /* Attach to the segment vpp created */
   rv = svm_fifo_segment_attach (a);
   if (rv)
     {
       clib_warning ("svm_fifo_segment_attach ('%s') failed",
-                    mp->segment_name);
+                   mp->segment_name);
       return;
     }
 
@@ -650,9 +450,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
   pool_get (utm->sessions, session);
   session_index = session - utm->sessions;
 
-  rx_fifo = (svm_fifo_t *)mp->server_rx_fifo;
+  rx_fifo = (svm_fifo_t *) mp->server_rx_fifo;
   rx_fifo->client_session_index = session_index;
-  tx_fifo = (svm_fifo_t *)mp->server_tx_fifo;
+  tx_fifo = (svm_fifo_t *) mp->server_tx_fifo;
   tx_fifo->client_session_index = session_index;
 
   session->server_rx_fifo = rx_fifo;
@@ -662,54 +462,193 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
 
   /* Save handle */
   utm->connected_session_index = session_index;
-
   utm->state = STATE_READY;
+
+  /* Add it to lookup table */
+  key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index;
+  hash_set (utm->session_index_by_vpp_handles, key, session_index);
+
+  /* Start RX thread */
+  rv = pthread_create (&utm->client_rx_thread_handle,
+                      NULL /*attr */ , client_rx_thread_fn, 0);
+  if (rv)
+    {
+      clib_warning ("pthread_create returned %d", rv);
+      rv = VNET_API_ERROR_SYSCALL_ERROR_1;
+    }
 }
 
 void
-uri_tcp_bind (uri_tcp_test_main_t *utm)
+client_send_data (uri_tcp_test_main_t * utm)
 {
-  vl_api_bind_uri_t * bmp;
-  u32 fifo_size = 3 << 20;
-  bmp = vl_msg_api_alloc (sizeof (*bmp));
-  memset (bmp, 0, sizeof (*bmp));
+  u8 *test_data = utm->connect_test_data;
+  u64 bytes_sent = 0;
+  int rv;
+  int mypid = getpid ();
+  session_t *session;
+  svm_fifo_t *tx_fifo;
+  int buffer_offset, bytes_to_send = 0;
+  session_fifo_event_t evt;
+  static int serial_number = 0;
+  int i;
+  u32 max_chunk = 64 << 10, write;
 
-  bmp->_vl_msg_id = ntohs (VL_API_BIND_URI);
-  bmp->client_index = utm->my_client_index;
-  bmp->context = ntohl(0xfeedface);
-  bmp->initial_segment_size = 256<<20;    /* size of initial segment */
-  bmp->options[SESSION_OPTIONS_FLAGS] =
-    SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT;
-  bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size;
-  bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size;
-  bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128<<20;
-  memcpy (bmp->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&bmp);
+  session = pool_elt_at_index (utm->sessions, utm->connected_session_index);
+  tx_fifo = session->server_tx_fifo;
+
+  vec_validate (utm->rx_buf, vec_len (test_data) - 1);
+
+  for (i = 0; i < 1; i++)
+    {
+      bytes_to_send = vec_len (test_data);
+      buffer_offset = 0;
+      while (bytes_to_send > 0)
+       {
+         write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send;
+         rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write,
+                                       test_data + buffer_offset);
+
+         if (rv > 0)
+           {
+             bytes_to_send -= rv;
+             buffer_offset += rv;
+             bytes_sent += rv;
+
+             /* Fabricate TX event, send to vpp */
+             evt.fifo = tx_fifo;
+             evt.event_type = FIFO_EVENT_SERVER_TX;
+             /* $$$$ for event logging */
+             evt.enqueue_length = rv;
+             evt.event_id = serial_number++;
+
+             unix_shared_memory_queue_add (utm->vpp_event_queue,
+                                           (u8 *) & evt,
+                                           0 /* do wait for mutex */ );
+           }
+       }
+    }
+
+  if (utm->test_return_packets)
+    {
+      f64 timeout = clib_time_now (&utm->clib_time) + 2;
+
+      /* Wait for the outstanding packets */
+      while (utm->client_bytes_received < vec_len (test_data))
+       {
+         if (clib_time_now (&utm->clib_time) > timeout)
+           {
+             clib_warning ("timed out waiting for the missing packets");
+             break;
+           }
+       }
+
+      utm->time_to_stop = 1;
+    }
+}
+
+void
+client_connect (uri_tcp_test_main_t * utm)
+{
+  vl_api_connect_uri_t *cmp;
+  cmp = vl_msg_api_alloc (sizeof (*cmp));
+  memset (cmp, 0, sizeof (*cmp));
+
+  cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI);
+  cmp->client_index = utm->my_client_index;
+  cmp->context = ntohl (0xfeedface);
+  memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri));
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp);
+}
+
+void
+client_disconnect (uri_tcp_test_main_t * utm)
+{
+  session_t *connected_session;
+  vl_api_disconnect_session_t *dmp;
+  connected_session = pool_elt_at_index (utm->sessions,
+                                        utm->connected_session_index);
+  dmp = vl_msg_api_alloc (sizeof (*dmp));
+  memset (dmp, 0, sizeof (*dmp));
+  dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION);
+  dmp->client_index = utm->my_client_index;
+  dmp->session_index = connected_session->vpp_session_index;
+  dmp->session_thread_index = connected_session->vpp_session_thread;
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp);
+}
+
+static void
+client_test (uri_tcp_test_main_t * utm)
+{
+  int i;
+
+  client_connect (utm);
+
+  if (wait_for_state_change (utm, STATE_READY))
+    {
+      return;
+    }
+
+  /* Init test data */
+  vec_validate (utm->connect_test_data, 64 * 1024 - 1);
+  for (i = 0; i < vec_len (utm->connect_test_data); i++)
+    utm->connect_test_data[i] = i & 0xff;
+
+  /* Start send */
+  client_send_data (utm);
+
+  /* Disconnect */
+  client_disconnect (utm);
 }
 
 static void
-vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t *mp)
+vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
 {
   uri_tcp_test_main_t *utm = &uri_tcp_test_main;
+  svm_fifo_segment_create_args_t _a, *a = &_a;
+  int rv;
 
-  if (mp->retval != 0)
-    clib_warning ("returned %d", ntohl(mp->retval));
+  if (mp->retval)
+    {
+      clib_warning ("bind failed: %d", mp->retval);
+      utm->state = STATE_FAILED;
+      return;
+    }
 
-  utm->state = STATE_START;
+  if (mp->segment_name_length == 0)
+    {
+      clib_warning ("segment_name_length zero");
+      return;
+    }
+
+  a->segment_name = (char *) mp->segment_name;
+  a->segment_size = mp->segment_size;
+
+  ASSERT (mp->server_event_queue_address);
+
+  /* Attach to the segment vpp created */
+  rv = svm_fifo_segment_attach (a);
+  if (rv)
+    {
+      clib_warning ("svm_fifo_segment_attach ('%s') failed",
+                   mp->segment_name);
+      return;
+    }
+
+  utm->our_event_queue =
+    (unix_shared_memory_queue_t *) mp->server_event_queue_address;
+
+  utm->state = STATE_READY;
 }
 
-void
-uri_tcp_unbind (uri_tcp_test_main_t *utm)
+static void
+vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp)
 {
-  vl_api_unbind_uri_t * ump;
+  uri_tcp_test_main_t *utm = &uri_tcp_test_main;
 
-  ump = vl_msg_api_alloc (sizeof (*ump));
-  memset (ump, 0, sizeof (*ump));
+  if (mp->retval != 0)
+    clib_warning ("returned %d", ntohl (mp->retval));
 
-  ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI);
-  ump->client_index = utm->my_client_index;
-  memcpy (ump->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&ump);
+  utm->state = STATE_START;
 }
 
 static void
@@ -717,14 +656,14 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
 {
   uri_tcp_test_main_t *utm = &uri_tcp_test_main;
   vl_api_accept_session_reply_t *rmp;
-  svm_fifo_t * rx_fifo, * tx_fifo;
-  session_t * session;
+  svm_fifo_t *rx_fifo, *tx_fifo;
+  session_t *session;
   static f64 start_time;
   u64 key;
   u32 session_index;
 
   if (start_time == 0.0)
-      start_time = clib_time_now (&utm->clib_time);
+    start_time = clib_time_now (&utm->clib_time);
 
   utm->vpp_event_queue = (unix_shared_memory_queue_t *)
     mp->vpp_event_queue_address;
@@ -733,45 +672,159 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
   pool_get (utm->sessions, session);
   session_index = session - utm->sessions;
 
-  rx_fifo = (svm_fifo_t *)mp->server_rx_fifo;
+  rx_fifo = (svm_fifo_t *) mp->server_rx_fifo;
   rx_fifo->client_session_index = session_index;
-  tx_fifo = (svm_fifo_t *)mp->server_tx_fifo;
+  tx_fifo = (svm_fifo_t *) mp->server_tx_fifo;
   tx_fifo->client_session_index = session_index;
 
   session->server_rx_fifo = rx_fifo;
   session->server_tx_fifo = tx_fifo;
 
   /* Add it to lookup table */
-  key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index;
+  key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index;
   hash_set (utm->session_index_by_vpp_handles, key, session_index);
 
   utm->state = STATE_READY;
 
   /* Stats printing */
-  if (pool_elts (utm->sessions) && (pool_elts(utm->sessions) % 20000) == 0)
+  if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0)
     {
       f64 now = clib_time_now (&utm->clib_time);
       fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n",
-               pool_elts(utm->sessions), now - start_time,
-               (f64)pool_elts(utm->sessions) / (now - start_time));
+              pool_elts (utm->sessions), now - start_time,
+              (f64) pool_elts (utm->sessions) / (now - start_time));
     }
 
-  /* Send accept reply to vpp */
+  /*
+   * Send accept reply to vpp
+   */
   rmp = vl_msg_api_alloc (sizeof (*rmp));
   memset (rmp, 0, sizeof (*rmp));
   rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY);
   rmp->session_type = mp->session_type;
   rmp->session_index = mp->session_index;
   rmp->session_thread_index = mp->session_thread_index;
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp);
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp);
 }
 
 void
-uri_tcp_server_test (uri_tcp_test_main_t * utm)
+server_handle_fifo_event_rx (uri_tcp_test_main_t * utm,
+                            session_fifo_event_t * e)
 {
+  svm_fifo_t *rx_fifo, *tx_fifo;
+  int n_read;
+
+  session_fifo_event_t evt;
+  unix_shared_memory_queue_t *q;
+  int rv, bytes;
+
+  rx_fifo = e->fifo;
+  tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo;
+
+  bytes = e->enqueue_length;
+  do
+    {
+      n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf),
+                                       utm->rx_buf);
+
+      /* Reflect if a non-drop session */
+      if (!utm->drop_packets && n_read > 0)
+       {
+         do
+           {
+             rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf);
+           }
+         while (rv == -2);
+
+         /* Fabricate TX event, send to vpp */
+         evt.fifo = tx_fifo;
+         evt.event_type = FIFO_EVENT_SERVER_TX;
+         /* $$$$ for event logging */
+         evt.enqueue_length = n_read;
+         evt.event_id = e->event_id;
+         q = utm->vpp_event_queue;
+         unix_shared_memory_queue_add (q, (u8 *) & evt,
+                                       0 /* do wait for mutex */ );
+       }
+
+      if (n_read > 0)
+       bytes -= n_read;
+    }
+  while (n_read < 0 || bytes > 0);
+}
+
+void
+server_handle_event_queue (uri_tcp_test_main_t * utm)
+{
+  session_fifo_event_t _e, *e = &_e;;
 
+  while (1)
+    {
+      unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e,
+                                   0 /* nowait */ );
+      switch (e->event_type)
+       {
+       case FIFO_EVENT_SERVER_RX:
+         server_handle_fifo_event_rx (utm, e);
+         break;
+
+       case FIFO_EVENT_SERVER_EXIT:
+         return;
+
+       default:
+         clib_warning ("unknown event type %d", e->event_type);
+         break;
+       }
+      if (PREDICT_FALSE (utm->time_to_stop == 1))
+       break;
+      if (PREDICT_FALSE (utm->time_to_print_stats == 1))
+       {
+         utm->time_to_print_stats = 0;
+         fformat (stdout, "%d connections\n", pool_elts (utm->sessions));
+       }
+    }
+}
+
+void
+server_bind (uri_tcp_test_main_t * utm)
+{
+  vl_api_bind_uri_t *bmp;
+  u32 fifo_size = 3 << 20;
+  bmp = vl_msg_api_alloc (sizeof (*bmp));
+  memset (bmp, 0, sizeof (*bmp));
+
+  bmp->_vl_msg_id = ntohs (VL_API_BIND_URI);
+  bmp->client_index = utm->my_client_index;
+  bmp->context = ntohl (0xfeedface);
+  bmp->initial_segment_size = 256 << 20;       /* size of initial segment */
+  bmp->options[SESSION_OPTIONS_FLAGS] =
+    SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT;
+  bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size;
+  bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size;
+  bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20;
+  memcpy (bmp->uri, utm->uri, vec_len (utm->uri));
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp);
+}
+
+void
+server_unbind (uri_tcp_test_main_t * utm)
+{
+  vl_api_unbind_uri_t *ump;
+
+  ump = vl_msg_api_alloc (sizeof (*ump));
+  memset (ump, 0, sizeof (*ump));
+
+  ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI);
+  ump->client_index = utm->my_client_index;
+  memcpy (ump->uri, utm->uri, vec_len (utm->uri));
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump);
+}
+
+void
+server_test (uri_tcp_test_main_t * utm)
+{
   /* Bind to uri */
-  uri_tcp_bind (utm);
+  server_bind (utm);
 
   if (wait_for_state_change (utm, STATE_READY))
     {
@@ -780,10 +833,10 @@ uri_tcp_server_test (uri_tcp_test_main_t * utm)
     }
 
   /* Enter handle event loop */
-  handle_event_queue (utm);
+  server_handle_event_queue (utm);
 
   /* Cleanup */
-  uri_tcp_unbind (utm);
+  server_unbind (utm);
 
   if (wait_for_state_change (utm, STATE_START))
     {
@@ -824,12 +877,12 @@ main (int argc, char **argv)
   unformat_input_t _argv, *a = &_argv;
   u8 *chroot_prefix;
   u8 *heap;
-  u8 * bind_name = (u8 *) "tcp://0.0.0.0/1234";
+  u8 *bind_name = (u8 *) "tcp://0.0.0.0/1234";
   u32 tmp;
   mheap_t *h;
-  session_t * session;
+  session_t *session;
   int i;
-  int i_am_master = 1, drop_packets = 0;
+  int i_am_master = 1, drop_packets = 0, test_return_packets = 0;
 
   clib_mem_init (0, 256 << 20);
 
@@ -841,53 +894,54 @@ main (int argc, char **argv)
 
   vec_validate (utm->rx_buf, 65536);
 
-  utm->session_index_by_vpp_handles =
-    hash_create (0, sizeof(uword));
+  utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
 
-  utm->my_pid = getpid();
-  utm->configured_segment_size = 1<<20;
+  utm->my_pid = getpid ();
+  utm->configured_segment_size = 1 << 20;
 
   clib_time_init (&utm->clib_time);
   init_error_string_table (utm);
-  svm_fifo_segment_init(0x200000000ULL, 20);
+  svm_fifo_segment_init (0x200000000ULL, 20);
   unformat_init_command_line (a, argv);
 
   while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT)
     {
       if (unformat (a, "chroot prefix %s", &chroot_prefix))
-        {
-          vl_set_memory_root_path ((char *) chroot_prefix);
-        }
+       {
+         vl_set_memory_root_path ((char *) chroot_prefix);
+       }
       else if (unformat (a, "uri %s", &bind_name))
-        ;
+       ;
       else if (unformat (a, "segment-size %dM", &tmp))
-        utm->configured_segment_size = tmp<<20;
+       utm->configured_segment_size = tmp << 20;
       else if (unformat (a, "segment-size %dG", &tmp))
-        utm->configured_segment_size = tmp<<30;
+       utm->configured_segment_size = tmp << 30;
       else if (unformat (a, "master"))
-        i_am_master = 1;
+       i_am_master = 1;
       else if (unformat (a, "slave"))
-        i_am_master = 0;
+       i_am_master = 0;
       else if (unformat (a, "drop"))
-        drop_packets = 1;
+       drop_packets = 1;
+      else if (unformat (a, "test"))
+       test_return_packets = 1;
       else
-        {
-          fformat (stderr, "%s: usage [master|slave]\n");
-          exit (1);
-        }
+       {
+         fformat (stderr, "%s: usage [master|slave]\n");
+         exit (1);
+       }
     }
 
   utm->uri = format (0, "%s%c", bind_name, 0);
   utm->i_am_master = i_am_master;
   utm->segment_main = &svm_fifo_segment_main;
   utm->drop_packets = drop_packets;
-
+  utm->test_return_packets = test_return_packets;
   utm->connect_uri = format (0, "tcp://6.0.1.2/1234%c", 0);
 
-  setup_signal_handlers();
+  setup_signal_handlers ();
   uri_api_hookup (utm);
 
-  if (connect_to_vpp (i_am_master? "uri_tcp_server":"uri_tcp_client") < 0)
+  if (connect_to_vpp (i_am_master ? "uri_tcp_server" : "uri_tcp_client") < 0)
     {
       svm_region_exit ();
       fformat (stderr, "Couldn't connect to vpe, exiting...\n");
@@ -896,7 +950,7 @@ main (int argc, char **argv)
 
   if (i_am_master == 0)
     {
-      uri_tcp_client_test (utm);
+      client_test (utm);
       exit (0);
     }
 
@@ -909,8 +963,16 @@ main (int argc, char **argv)
   for (i = 0; i < 200000; i++)
     pool_put_index (utm->sessions, i);
 
-  uri_tcp_server_test (utm);
+  server_test (utm);
 
   vl_client_disconnect_from_vlib ();
   exit (0);
 }
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 6f5284c..54625d6 100644 (file)
 #include <vlib/unix/unix.h>
 #include <vlibapi/api.h>
 #include <vlibmemory/api.h>
-#include <vpp-api/vpe_msg_enum.h>
-#include <svm_fifo_segment.h>
-
-#include <vnet/uri/uri.h>
+#include <vpp/api/vpe_msg_enum.h>
+#include <svm/svm_fifo_segment.h>
+#include <pthread.h>
+#include <vnet/session/application_interface.h>
 
 #define vl_typedefs            /* define message structures */
-#include <vpp-api/vpe_all_api_h.h>
+#include <vpp/api/vpe_all_api_h.h>
 #undef vl_typedefs
 
 /* declare message handlers for each api */
 
 #define vl_endianfun           /* define message structures */
-#include <vpp-api/vpe_all_api_h.h>
+#include <vpp/api/vpe_all_api_h.h>
 #undef vl_endianfun
 
 /* instantiate all the print functions we know about */
 #define vl_print(handle, ...)
 #define vl_printfun
-#include <vpp-api/vpe_all_api_h.h>
+#include <vpp/api/vpe_all_api_h.h>
 #undef vl_printfun
 
 /* Satisfy external references when not linking with -lvlib */
@@ -87,12 +87,28 @@ typedef struct
   /* intermediate rx buffer */
   u8 *rx_buf;
 
+  /* URI for connect */
+  u8 *connect_uri;
+
+  int i_am_master;
+
   /* Our event queue */
   unix_shared_memory_queue_t *our_event_queue;
 
   /* $$$ single thread only for the moment */
   unix_shared_memory_queue_t *vpp_event_queue;
 
+  /* $$$$ hack: cut-through session index */
+  volatile u32 cut_through_session_index;
+
+  /* unique segment name counter */
+  u32 unique_segment_index;
+
+  pid_t my_pid;
+
+  /* pthread handle */
+  pthread_t cut_through_thread_handle;
+
   /* For deadman timers */
   clib_time_t clib_time;
 
@@ -102,14 +118,20 @@ typedef struct
   volatile int time_to_stop;
   volatile int time_to_print_stats;
 
+  u32 configured_segment_size;
+
   /* VNET_API_ERROR_FOO -> "Foo" hash table */
   uword *error_string_by_error_number;
+
+  /* convenience */
+  svm_fifo_segment_main_t *segment_main;
+
 } uri_udp_test_main_t;
 
 #if CLIB_DEBUG > 0
-#define NITER 1000
+#define NITER 10000
 #else
-#define NITER 1000000
+#define NITER 4000000
 #endif
 
 uri_udp_test_main_t uri_udp_test_main;
@@ -159,7 +181,13 @@ format_api_error (u8 * s, va_list * args)
 int
 wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state)
 {
-  f64 timeout = clib_time_now (&utm->clib_time) + 5.0;
+#if CLIB_DEBUG > 0
+#define TIMEOUT 600.0
+#else
+#define TIMEOUT 600.0
+#endif
+
+  f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT;
 
   while (clib_time_now (&utm->clib_time) < timeout)
     {
@@ -169,6 +197,183 @@ wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state)
   return -1;
 }
 
+u64 server_bytes_received, server_bytes_sent;
+
+static void *
+cut_through_thread_fn (void *arg)
+{
+  session_t *s;
+  svm_fifo_t *rx_fifo;
+  svm_fifo_t *tx_fifo;
+  u8 *my_copy_buffer = 0;
+  uri_udp_test_main_t *utm = &uri_udp_test_main;
+  i32 actual_transfer;
+  int rv;
+  u32 buffer_offset;
+
+  while (utm->cut_through_session_index == ~0)
+    ;
+
+  s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index);
+
+  rx_fifo = s->server_rx_fifo;
+  tx_fifo = s->server_tx_fifo;
+
+  vec_validate (my_copy_buffer, 64 * 1024 - 1);
+
+  while (true)
+    {
+      /* We read from the tx fifo and write to the rx fifo */
+      do
+       {
+         actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0,
+                                                    vec_len (my_copy_buffer),
+                                                    my_copy_buffer);
+       }
+      while (actual_transfer <= 0);
+
+      server_bytes_received += actual_transfer;
+
+      buffer_offset = 0;
+      while (actual_transfer > 0)
+       {
+         rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer,
+                                       my_copy_buffer + buffer_offset);
+         if (rv > 0)
+           {
+             actual_transfer -= rv;
+             buffer_offset += rv;
+             server_bytes_sent += rv;
+           }
+
+       }
+      if (PREDICT_FALSE (utm->time_to_stop))
+       break;
+    }
+
+  pthread_exit (0);
+}
+
+static void
+uri_udp_slave_test (uri_udp_test_main_t * utm)
+{
+  vl_api_connect_uri_t *cmp;
+  int i;
+  u8 *test_data = 0;
+  u64 bytes_received = 0, bytes_sent = 0;
+  i32 bytes_to_read;
+  int rv;
+  int mypid = getpid ();
+  f64 before, after, delta, bytes_per_second;
+  session_t *session;
+  svm_fifo_t *rx_fifo, *tx_fifo;
+  int buffer_offset, bytes_to_send = 0;
+
+  vec_validate (test_data, 64 * 1024 - 1);
+  for (i = 0; i < vec_len (test_data); i++)
+    test_data[i] = i & 0xff;
+
+  cmp = vl_msg_api_alloc (sizeof (*cmp));
+  memset (cmp, 0, sizeof (*cmp));
+
+  cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI);
+  cmp->client_index = utm->my_client_index;
+  cmp->context = ntohl (0xfeedface);
+  memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri));
+  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp);
+
+  if (wait_for_state_change (utm, STATE_READY))
+    {
+      clib_warning ("timeout waiting for STATE_READY");
+      return;
+    }
+
+  session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index);
+  rx_fifo = session->server_rx_fifo;
+  tx_fifo = session->server_tx_fifo;
+
+  before = clib_time_now (&utm->clib_time);
+
+  vec_validate (utm->rx_buf, vec_len (test_data) - 1);
+
+  for (i = 0; i < NITER; i++)
+    {
+      bytes_to_send = vec_len (test_data);
+      buffer_offset = 0;
+      while (bytes_to_send > 0)
+       {
+         rv = svm_fifo_enqueue_nowait (tx_fifo, mypid,
+                                       bytes_to_send,
+                                       test_data + buffer_offset);
+
+         if (rv > 0)
+           {
+             bytes_to_send -= rv;
+             buffer_offset += rv;
+             bytes_sent += rv;
+           }
+       }
+
+      bytes_to_read = svm_fifo_max_dequeue (rx_fifo);
+
+      bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ?
+       bytes_to_read : vec_len (utm->rx_buf);
+
+      buffer_offset = 0;
+      while (bytes_to_read > 0)
+       {
+         rv = svm_fifo_dequeue_nowait (rx_fifo, mypid,
+                                       bytes_to_read,
+                                       utm->rx_buf + buffer_offset);
+         if (rv > 0)
+           {
+             bytes_to_read -= rv;
+             buffer_offset += rv;
+             bytes_received += rv;
+           }
+       }
+    }
+  while (bytes_received < bytes_sent)
+    {
+      rv = svm_fifo_dequeue_nowait (rx_fifo, mypid,
+                                   vec_len (utm->rx_buf), utm->rx_buf);
+      if (rv > 0)
+       {
+#if CLIB_DEBUG > 0
+         int j;
+         for (j = 0; j < rv; j++)
+           {
+             if (utm->rx_buf[j] != ((bytes_received + j) & 0xff))
+               {
+                 clib_warning ("error at byte %lld, 0x%x not 0x%x",
+                               bytes_received + j,
+                               utm->rx_buf[j],
+                               ((bytes_received + j) & 0xff));
+               }
+           }
+#endif
+         bytes_received += (u64) rv;
+       }
+    }
+
+  after = clib_time_now (&utm->clib_time);
+  delta = after - before;
+  bytes_per_second = 0.0;
+
+  if (delta > 0.0)
+    bytes_per_second = (f64) bytes_received / delta;
+
+  fformat (stdout,
+          "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n",
+          bytes_received, delta, bytes_per_second);
+  fformat (stdout,
+          "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n",
+          bytes_sent, delta, bytes_per_second);
+  fformat (stdout,
+          "client -> server -> client round trip: %.2f Gbit/sec \n\n",
+          (bytes_per_second * 8.0) / 1e9);
+}
+
 static void
 vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
 {
@@ -183,12 +388,16 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
     }
 
   a->segment_name = (char *) mp->segment_name;
+  a->segment_size = mp->segment_size;
+
+  ASSERT (mp->server_event_queue_address);
 
   /* Attach to the segment vpp created */
   rv = svm_fifo_segment_attach (a);
   if (rv)
     {
-      clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name);
+      clib_warning ("svm_fifo_segment_attach ('%s') failed",
+                   mp->segment_name);
       return;
     }
 
@@ -198,6 +407,101 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
   utm->state = STATE_READY;
 }
 
+static void
+vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp)
+{
+  svm_fifo_segment_create_args_t _a, *a = &_a;
+  int rv;
+
+  a->segment_name = (char *) mp->segment_name;
+  a->segment_size = mp->segment_size;
+  /* Attach to the segment vpp created */
+  rv = svm_fifo_segment_attach (a);
+  if (rv)
+    {
+      clib_warning ("svm_fifo_segment_attach ('%s') failed",
+                   mp->segment_name);
+      return;
+    }
+  clib_warning ("Mapped new segment '%s' size %d", mp->segment_name,
+               mp->segment_size);
+}
+
+static void
+vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp)
+{
+  u32 segment_index;
+  uri_udp_test_main_t *utm = &uri_udp_test_main;
+  svm_fifo_segment_main_t *sm = &svm_fifo_segment_main;
+  svm_fifo_segment_create_args_t _a, *a = &_a;
+  svm_fifo_segment_private_t *seg;
+  unix_shared_memory_queue_t *client_q;
+  vl_api_connect_uri_reply_t *rmp;
+  session_t *session;
+  int rv = 0;
+
+  /* Create the segment */
+  a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid,
+                                    utm->unique_segment_index++, 0);
+  a->segment_size = utm->configured_segment_size;
+
+  rv = svm_fifo_segment_create (a);
+  if (rv)
+    {
+      clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name);
+      rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
+      goto send_reply;
+    }
+
+  vec_add2 (utm->seg, seg, 1);
+
+  segment_index = vec_len (sm->segments) - 1;
+
+  memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0]));
+
+  pool_get (utm->sessions, session);
+
+  /*
+   * By construction the master's idea of the rx fifo ends up in
+   * fsh->fifos[0], and the master's idea of the tx fifo ends up in
+   * fsh->fifos[1].
+   */
+  session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg,
+                                                        128 * 1024);
+  ASSERT (session->server_rx_fifo);
+
+  session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg,
+                                                        128 * 1024);
+  ASSERT (session->server_tx_fifo);
+
+  session->server_rx_fifo->server_session_index = session - utm->sessions;
+  session->server_tx_fifo->server_session_index = session - utm->sessions;
+  utm->cut_through_session_index = session - utm->sessions;
+
+  rv = pthread_create (&utm->cut_through_thread_handle,
+                      NULL /*attr */ , cut_through_thread_fn, 0);
+  if (rv)
+    {
+      clib_warning ("pthread_create returned %d", rv);
+      rv = VNET_API_ERROR_SYSCALL_ERROR_1;
+    }
+
+send_reply:
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  memset (rmp, 0, sizeof (*rmp));
+
+  rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = ntohl (rv);
+  rmp->segment_name_length = vec_len (a->segment_name);
+  memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name));
+
+  vec_free (a->segment_name);
+
+  client_q = (unix_shared_memory_queue_t *) mp->client_queue_address;
+  vl_msg_api_send_shmem (client_q, (u8 *) & rmp);
+}
+
 static void
 vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp)
 {
@@ -293,18 +597,79 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp)
   vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp);
 }
 
+static void
+vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
+{
+  svm_fifo_segment_main_t *sm = &svm_fifo_segment_main;
+  uri_udp_test_main_t *utm = &uri_udp_test_main;
+  svm_fifo_segment_create_args_t _a, *a = &_a;
+  ssvm_shared_header_t *sh;
+  svm_fifo_segment_private_t *seg;
+  svm_fifo_segment_header_t *fsh;
+  session_t *session;
+  u32 segment_index;
+  int rv;
+
+  ASSERT (utm->i_am_master == 0);
+
+  if (mp->segment_name_length == 0)
+    {
+      clib_warning ("segment_name_length zero");
+      return;
+    }
+
+  memset (a, 0, sizeof (*a));
+
+  a->segment_name = (char *) mp->segment_name;
+
+  sleep (1);
+
+  rv = svm_fifo_segment_attach (a);
+  if (rv)
+    {
+      clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name);
+      return;
+    }
+
+  segment_index = vec_len (sm->segments) - 1;
+
+  vec_add2 (utm->seg, seg, 1);
+
+  memcpy (seg, sm->segments + segment_index, sizeof (*seg));
+  sh = seg->ssvm.sh;
+  fsh = (svm_fifo_segment_header_t *) sh->opaque[0];
+
+  while (vec_len (fsh->fifos) < 2)
+    sleep (1);
+
+  pool_get (utm->sessions, session);
+  utm->cut_through_session_index = session - utm->sessions;
+
+  session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0];
+  ASSERT (session->server_rx_fifo);
+  session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1];
+  ASSERT (session->server_tx_fifo);
+
+  /* security: could unlink /dev/shm/<mp->segment_name> here, maybe */
+
+  utm->state = STATE_READY;
+}
+
 #define foreach_uri_msg                         \
 _(BIND_URI_REPLY, bind_uri_reply)               \
+_(CONNECT_URI, connect_uri)                     \
+_(CONNECT_URI_REPLY, connect_uri_reply)         \
 _(UNBIND_URI_REPLY, unbind_uri_reply)           \
 _(ACCEPT_SESSION, accept_session)              \
-_(DISCONNECT_SESSION, disconnect_session)
+_(DISCONNECT_SESSION, disconnect_session)      \
+_(MAP_ANOTHER_SEGMENT, map_another_segment)
 
 void
 uri_api_hookup (uri_udp_test_main_t * utm)
 {
 #define _(N,n)                                                  \
     vl_msg_api_set_handlers(VL_API_##N, #n,                     \
-                           vl_api_##n##_t_handler,             \
+                           vl_api_##n##_t_handler,              \
                            vl_noop_handler,                     \
                            vl_api_##n##_t_endian,               \
                            vl_api_##n##_t_print,                \
@@ -349,7 +714,7 @@ init_error_string_table (uri_udp_test_main_t * utm)
 }
 
 void
-handle_fifo_event_server_rx (uri_udp_test_main_t * utm,
+server_handle_fifo_event_rx (uri_udp_test_main_t * utm,
                             session_fifo_event_t * e)
 {
   svm_fifo_t *rx_fifo, *tx_fifo;
@@ -385,7 +750,7 @@ handle_fifo_event_server_rx (uri_udp_test_main_t * utm,
 }
 
 void
-handle_event_queue (uri_udp_test_main_t * utm)
+server_handle_event_queue (uri_udp_test_main_t * utm)
 {
   session_fifo_event_t _e, *e = &_e;;
 
@@ -396,7 +761,7 @@ handle_event_queue (uri_udp_test_main_t * utm)
       switch (e->event_type)
        {
        case FIFO_EVENT_SERVER_RX:
-         handle_fifo_event_server_rx (utm, e);
+         server_handle_fifo_event_rx (utm, e);
          break;
 
        case FIFO_EVENT_SERVER_EXIT:
@@ -428,7 +793,12 @@ uri_udp_test (uri_udp_test_main_t * utm)
   bmp->_vl_msg_id = ntohs (VL_API_BIND_URI);
   bmp->client_index = utm->my_client_index;
   bmp->context = ntohl (0xfeedface);
-  bmp->segment_size = 2 << 30;
+  bmp->initial_segment_size = 256 << 20;       /* size of initial segment */
+  bmp->options[SESSION_OPTIONS_FLAGS] =
+    SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT;
+  bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10;
+  bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10;
+  bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20;
   memcpy (bmp->uri, utm->uri, vec_len (utm->uri));
   vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp);
 
@@ -438,7 +808,7 @@ uri_udp_test (uri_udp_test_main_t * utm)
       return;
     }
 
-  handle_event_queue (utm);
+  server_handle_event_queue (utm);
 
   ump = vl_msg_api_alloc (sizeof (*ump));
   memset (ump, 0, sizeof (*ump));
@@ -464,10 +834,12 @@ main (int argc, char **argv)
   unformat_input_t _argv, *a = &_argv;
   u8 *chroot_prefix;
   u8 *heap;
-  u8 *bind_name = (u8 *) "udp4:1234";
+  u8 *bind_name = (u8 *) "udp://0.0.0.0/1234";
+  u32 tmp;
   mheap_t *h;
   session_t *session;
   int i;
+  int i_am_master = 1;
 
   clib_mem_init (0, 256 << 20);
 
@@ -481,6 +853,9 @@ main (int argc, char **argv)
 
   utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
 
+  utm->my_pid = getpid ();
+  utm->configured_segment_size = 1 << 20;
+
   clib_time_init (&utm->clib_time);
   init_error_string_table (utm);
   svm_fifo_segment_init (0x200000000ULL, 20);
@@ -494,6 +869,14 @@ main (int argc, char **argv)
        }
       else if (unformat (a, "uri %s", &bind_name))
        ;
+      else if (unformat (a, "segment-size %dM", &tmp))
+       utm->configured_segment_size = tmp << 20;
+      else if (unformat (a, "segment-size %dG", &tmp))
+       utm->configured_segment_size = tmp << 30;
+      else if (unformat (a, "master"))
+       i_am_master = 1;
+      else if (unformat (a, "slave"))
+       i_am_master = 0;
       else
        {
          fformat (stderr, "%s: usage [master|slave]\n");
@@ -501,19 +884,30 @@ main (int argc, char **argv)
        }
     }
 
+  utm->cut_through_session_index = ~0;
   utm->uri = format (0, "%s%c", bind_name, 0);
+  utm->i_am_master = i_am_master;
+  utm->segment_main = &svm_fifo_segment_main;
+
+  utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0);
 
   setup_signal_handlers ();
 
   uri_api_hookup (utm);
 
-  if (connect_to_vpp ("uri_udp_test") < 0)
+  if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0)
     {
       svm_region_exit ();
       fformat (stderr, "Couldn't connect to vpe, exiting...\n");
       exit (1);
     }
 
+  if (i_am_master == 0)
+    {
+      uri_udp_slave_test (utm);
+      exit (0);
+    }
+
   /* $$$$ hack preallocation */
   for (i = 0; i < 200000; i++)
     {
@@ -531,7 +925,7 @@ main (int argc, char **argv)
 
 #undef vl_api_version
 #define vl_api_version(n,v) static u32 vpe_api_version = v;
-#include <vpp-api/vpe.api.h>
+#include <vpp/api/vpe.api.h>
 #undef vl_api_version
 
 void
@@ -544,6 +938,12 @@ vl_client_add_api_signatures (vl_api_memclnt_create_t * mp)
   mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version);
 }
 
+u32
+vl (void *p)
+{
+  return vec_len (p);
+}
+
 /*
  * fd.io coding-style-patch-verification: ON
  *
diff --git a/src/uri/uri_udp_test2.c b/src/uri/uri_udp_test2.c
deleted file mode 100644 (file)
index ddfffaa..0000000
+++ /dev/null
@@ -1,954 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-#include <setjmp.h>
-#include <signal.h>
-#include <vppinfra/clib.h>
-#include <vppinfra/format.h>
-#include <vppinfra/error.h>
-#include <vppinfra/time.h>
-#include <vppinfra/macros.h>
-#include <vnet/vnet.h>
-#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
-#include <vlibapi/api.h>
-#include <vlibmemory/api.h>
-#include <vpp/api/vpe_msg_enum.h>
-#include <svm/svm_fifo_segment.h>
-#include <pthread.h>
-
-#include "../vnet/session/application_interface.h"
-
-#define vl_typedefs            /* define message structures */
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_typedefs
-
-/* declare message handlers for each api */
-
-#define vl_endianfun           /* define message structures */
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_endianfun
-
-/* instantiate all the print functions we know about */
-#define vl_print(handle, ...)
-#define vl_printfun
-#include <vpp/api/vpe_all_api_h.h>
-#undef vl_printfun
-
-/* Satisfy external references when not linking with -lvlib */
-vlib_main_t vlib_global_main;
-vlib_main_t **vlib_mains;
-
-typedef enum
-{
-  STATE_START,
-  STATE_READY,
-  STATE_DISCONNECTING,
-} connection_state_t;
-
-typedef struct
-{
-  svm_fifo_t *server_rx_fifo;
-  svm_fifo_t *server_tx_fifo;
-} session_t;
-
-typedef struct
-{
-  /* vpe input queue */
-  unix_shared_memory_queue_t *vl_input_queue;
-
-  /* API client handle */
-  u32 my_client_index;
-
-  /* The URI we're playing with */
-  u8 *uri;
-
-  /* Session pool */
-  session_t *sessions;
-
-  /* Hash table for disconnect processing */
-  uword *session_index_by_vpp_handles;
-
-  /* fifo segment */
-  svm_fifo_segment_private_t *seg;
-
-  /* intermediate rx buffer */
-  u8 *rx_buf;
-
-  /* URI for connect */
-  u8 *connect_uri;
-
-  int i_am_master;
-
-  /* Our event queue */
-  unix_shared_memory_queue_t *our_event_queue;
-
-  /* $$$ single thread only for the moment */
-  unix_shared_memory_queue_t *vpp_event_queue;
-
-  /* $$$$ hack: cut-through session index */
-  volatile u32 cut_through_session_index;
-
-  /* unique segment name counter */
-  u32 unique_segment_index;
-
-  pid_t my_pid;
-
-  /* pthread handle */
-  pthread_t cut_through_thread_handle;
-
-  /* For deadman timers */
-  clib_time_t clib_time;
-
-  /* State of the connection, shared between msg RX thread and main thread */
-  volatile connection_state_t state;
-
-  volatile int time_to_stop;
-  volatile int time_to_print_stats;
-
-  u32 configured_segment_size;
-
-  /* VNET_API_ERROR_FOO -> "Foo" hash table */
-  uword *error_string_by_error_number;
-
-  /* convenience */
-  svm_fifo_segment_main_t *segment_main;
-
-} uri_udp_test_main_t;
-
-#if CLIB_DEBUG > 0
-#define NITER 10000
-#else
-#define NITER 4000000
-#endif
-
-uri_udp_test_main_t uri_udp_test_main;
-
-static void
-stop_signal (int signum)
-{
-  uri_udp_test_main_t *um = &uri_udp_test_main;
-
-  um->time_to_stop = 1;
-}
-
-static void
-stats_signal (int signum)
-{
-  uri_udp_test_main_t *um = &uri_udp_test_main;
-
-  um->time_to_print_stats = 1;
-}
-
-static clib_error_t *
-setup_signal_handlers (void)
-{
-  signal (SIGINT, stats_signal);
-  signal (SIGQUIT, stop_signal);
-  signal (SIGTERM, stop_signal);
-
-  return 0;
-}
-
-u8 *
-format_api_error (u8 * s, va_list * args)
-{
-  uri_udp_test_main_t *utm = va_arg (*args, uri_udp_test_main_t *);
-  i32 error = va_arg (*args, u32);
-  uword *p;
-
-  p = hash_get (utm->error_string_by_error_number, -error);
-
-  if (p)
-    s = format (s, "%s", p[0]);
-  else
-    s = format (s, "%d", error);
-  return s;
-}
-
-int
-wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state)
-{
-#if CLIB_DEBUG > 0
-#define TIMEOUT 600.0
-#else
-#define TIMEOUT 600.0
-#endif
-
-  f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT;
-
-  while (clib_time_now (&utm->clib_time) < timeout)
-    {
-      if (utm->state == state)
-       return 0;
-    }
-  return -1;
-}
-
-u64 server_bytes_received, server_bytes_sent;
-
-static void *
-cut_through_thread_fn (void *arg)
-{
-  session_t *s;
-  svm_fifo_t *rx_fifo;
-  svm_fifo_t *tx_fifo;
-  u8 *my_copy_buffer = 0;
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  i32 actual_transfer;
-  int rv;
-  u32 buffer_offset;
-
-  while (utm->cut_through_session_index == ~0)
-    ;
-
-  s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index);
-
-  rx_fifo = s->server_rx_fifo;
-  tx_fifo = s->server_tx_fifo;
-
-  vec_validate (my_copy_buffer, 64 * 1024 - 1);
-
-  while (true)
-    {
-      /* We read from the tx fifo and write to the rx fifo */
-      do
-       {
-         actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0,
-                                                    vec_len (my_copy_buffer),
-                                                    my_copy_buffer);
-       }
-      while (actual_transfer <= 0);
-
-      server_bytes_received += actual_transfer;
-
-      buffer_offset = 0;
-      while (actual_transfer > 0)
-       {
-         rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer,
-                                       my_copy_buffer + buffer_offset);
-         if (rv > 0)
-           {
-             actual_transfer -= rv;
-             buffer_offset += rv;
-             server_bytes_sent += rv;
-           }
-
-       }
-      if (PREDICT_FALSE (utm->time_to_stop))
-       break;
-    }
-
-  pthread_exit (0);
-}
-
-static void
-uri_udp_slave_test (uri_udp_test_main_t * utm)
-{
-  vl_api_connect_uri_t *cmp;
-  int i;
-  u8 *test_data = 0;
-  u64 bytes_received = 0, bytes_sent = 0;
-  i32 bytes_to_read;
-  int rv;
-  int mypid = getpid ();
-  f64 before, after, delta, bytes_per_second;
-  session_t *session;
-  svm_fifo_t *rx_fifo, *tx_fifo;
-  int buffer_offset, bytes_to_send = 0;
-
-  vec_validate (test_data, 64 * 1024 - 1);
-  for (i = 0; i < vec_len (test_data); i++)
-    test_data[i] = i & 0xff;
-
-  cmp = vl_msg_api_alloc (sizeof (*cmp));
-  memset (cmp, 0, sizeof (*cmp));
-
-  cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI);
-  cmp->client_index = utm->my_client_index;
-  cmp->context = ntohl (0xfeedface);
-  memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp);
-
-  if (wait_for_state_change (utm, STATE_READY))
-    {
-      clib_warning ("timeout waiting for STATE_READY");
-      return;
-    }
-
-  session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index);
-  rx_fifo = session->server_rx_fifo;
-  tx_fifo = session->server_tx_fifo;
-
-  before = clib_time_now (&utm->clib_time);
-
-  vec_validate (utm->rx_buf, vec_len (test_data) - 1);
-
-  for (i = 0; i < NITER; i++)
-    {
-      bytes_to_send = vec_len (test_data);
-      buffer_offset = 0;
-      while (bytes_to_send > 0)
-       {
-         rv = svm_fifo_enqueue_nowait (tx_fifo, mypid,
-                                       bytes_to_send,
-                                       test_data + buffer_offset);
-
-         if (rv > 0)
-           {
-             bytes_to_send -= rv;
-             buffer_offset += rv;
-             bytes_sent += rv;
-           }
-       }
-
-      bytes_to_read = svm_fifo_max_dequeue (rx_fifo);
-
-      bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ?
-       bytes_to_read : vec_len (utm->rx_buf);
-
-      buffer_offset = 0;
-      while (bytes_to_read > 0)
-       {
-         rv = svm_fifo_dequeue_nowait (rx_fifo, mypid,
-                                       bytes_to_read,
-                                       utm->rx_buf + buffer_offset);
-         if (rv > 0)
-           {
-             bytes_to_read -= rv;
-             buffer_offset += rv;
-             bytes_received += rv;
-           }
-       }
-    }
-  while (bytes_received < bytes_sent)
-    {
-      rv = svm_fifo_dequeue_nowait (rx_fifo, mypid,
-                                   vec_len (utm->rx_buf), utm->rx_buf);
-      if (rv > 0)
-       {
-#if CLIB_DEBUG > 0
-         int j;
-         for (j = 0; j < rv; j++)
-           {
-             if (utm->rx_buf[j] != ((bytes_received + j) & 0xff))
-               {
-                 clib_warning ("error at byte %lld, 0x%x not 0x%x",
-                               bytes_received + j,
-                               utm->rx_buf[j],
-                               ((bytes_received + j) & 0xff));
-               }
-           }
-#endif
-         bytes_received += (u64) rv;
-       }
-    }
-
-  after = clib_time_now (&utm->clib_time);
-  delta = after - before;
-  bytes_per_second = 0.0;
-
-  if (delta > 0.0)
-    bytes_per_second = (f64) bytes_received / delta;
-
-  fformat (stdout,
-          "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n",
-          bytes_received, delta, bytes_per_second);
-  fformat (stdout,
-          "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n",
-          bytes_sent, delta, bytes_per_second);
-  fformat (stdout,
-          "client -> server -> client round trip: %.2f Gbit/sec \n\n",
-          (bytes_per_second * 8.0) / 1e9);
-}
-
-static void
-vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
-{
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  int rv;
-
-  if (mp->segment_name_length == 0)
-    {
-      clib_warning ("segment_name_length zero");
-      return;
-    }
-
-  a->segment_name = (char *) mp->segment_name;
-  a->segment_size = mp->segment_size;
-
-  ASSERT (mp->server_event_queue_address);
-
-  /* Attach to the segment vpp created */
-  rv = svm_fifo_segment_attach (a);
-  if (rv)
-    {
-      clib_warning ("svm_fifo_segment_attach ('%s') failed",
-                   mp->segment_name);
-      return;
-    }
-
-  utm->our_event_queue = (unix_shared_memory_queue_t *)
-    mp->server_event_queue_address;
-
-  utm->state = STATE_READY;
-}
-
-static void
-vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp)
-{
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  int rv;
-
-  a->segment_name = (char *) mp->segment_name;
-  a->segment_size = mp->segment_size;
-  /* Attach to the segment vpp created */
-  rv = svm_fifo_segment_attach (a);
-  if (rv)
-    {
-      clib_warning ("svm_fifo_segment_attach ('%s') failed",
-                   mp->segment_name);
-      return;
-    }
-  clib_warning ("Mapped new segment '%s' size %d", mp->segment_name,
-               mp->segment_size);
-}
-
-static void
-vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp)
-{
-  u32 segment_index;
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  svm_fifo_segment_main_t *sm = &svm_fifo_segment_main;
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  svm_fifo_segment_private_t *seg;
-  unix_shared_memory_queue_t *client_q;
-  vl_api_connect_uri_reply_t *rmp;
-  session_t *session;
-  int rv = 0;
-
-  /* Create the segment */
-  a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid,
-                                    utm->unique_segment_index++, 0);
-  a->segment_size = utm->configured_segment_size;
-
-  rv = svm_fifo_segment_create (a);
-  if (rv)
-    {
-      clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name);
-      rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
-      goto send_reply;
-    }
-
-  vec_add2 (utm->seg, seg, 1);
-
-  segment_index = vec_len (sm->segments) - 1;
-
-  memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0]));
-
-  pool_get (utm->sessions, session);
-
-  /*
-   * By construction the master's idea of the rx fifo ends up in
-   * fsh->fifos[0], and the master's idea of the tx fifo ends up in
-   * fsh->fifos[1].
-   */
-  session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg,
-                                                        128 * 1024);
-  ASSERT (session->server_rx_fifo);
-
-  session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg,
-                                                        128 * 1024);
-  ASSERT (session->server_tx_fifo);
-
-  session->server_rx_fifo->server_session_index = session - utm->sessions;
-  session->server_tx_fifo->server_session_index = session - utm->sessions;
-  utm->cut_through_session_index = session - utm->sessions;
-
-  rv = pthread_create (&utm->cut_through_thread_handle,
-                      NULL /*attr */ , cut_through_thread_fn, 0);
-  if (rv)
-    {
-      clib_warning ("pthread_create returned %d", rv);
-      rv = VNET_API_ERROR_SYSCALL_ERROR_1;
-    }
-
-send_reply:
-  rmp = vl_msg_api_alloc (sizeof (*rmp));
-  memset (rmp, 0, sizeof (*rmp));
-
-  rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY);
-  rmp->context = mp->context;
-  rmp->retval = ntohl (rv);
-  rmp->segment_name_length = vec_len (a->segment_name);
-  memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name));
-
-  vec_free (a->segment_name);
-
-  client_q = (unix_shared_memory_queue_t *) mp->client_queue_address;
-  vl_msg_api_send_shmem (client_q, (u8 *) & rmp);
-}
-
-static void
-vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp)
-{
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-
-  if (mp->retval != 0)
-    clib_warning ("returned %d", ntohl (mp->retval));
-
-  utm->state = STATE_START;
-}
-
-static void
-vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
-{
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  vl_api_accept_session_reply_t *rmp;
-  svm_fifo_t *rx_fifo, *tx_fifo;
-  session_t *session;
-  static f64 start_time;
-  u64 key;
-
-  if (start_time == 0.0)
-    start_time = clib_time_now (&utm->clib_time);
-
-  utm->vpp_event_queue = (unix_shared_memory_queue_t *)
-    mp->vpp_event_queue_address;
-
-  pool_get (utm->sessions, session);
-
-  rx_fifo = (svm_fifo_t *) mp->server_rx_fifo;
-  rx_fifo->client_session_index = session - utm->sessions;
-  tx_fifo = (svm_fifo_t *) mp->server_tx_fifo;
-  tx_fifo->client_session_index = session - utm->sessions;
-
-  session->server_rx_fifo = rx_fifo;
-  session->server_tx_fifo = tx_fifo;
-
-  key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index;
-
-  hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions);
-
-  utm->state = STATE_READY;
-
-  if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0)
-    {
-      f64 now = clib_time_now (&utm->clib_time);
-      fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n",
-              pool_elts (utm->sessions), now - start_time,
-              (f64) pool_elts (utm->sessions) / (now - start_time));
-    }
-
-  rmp = vl_msg_api_alloc (sizeof (*rmp));
-  memset (rmp, 0, sizeof (*rmp));
-  rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY);
-  rmp->session_type = mp->session_type;
-  rmp->session_index = mp->session_index;
-  rmp->session_thread_index = mp->session_thread_index;
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp);
-}
-
-static void
-vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp)
-{
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  session_t *session;
-  vl_api_disconnect_session_reply_t *rmp;
-  uword *p;
-  int rv = 0;
-  u64 key;
-
-  key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index;
-
-  p = hash_get (utm->session_index_by_vpp_handles, key);
-
-  if (p)
-    {
-      session = pool_elt_at_index (utm->sessions, p[0]);
-      hash_unset (utm->session_index_by_vpp_handles, key);
-      pool_put (utm->sessions, session);
-    }
-  else
-    {
-      clib_warning ("couldn't find session key %llx", key);
-      rv = -11;
-    }
-
-  rmp = vl_msg_api_alloc (sizeof (*rmp));
-  memset (rmp, 0, sizeof (*rmp));
-  rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY);
-  rmp->retval = rv;
-  rmp->session_index = mp->session_index;
-  rmp->session_thread_index = mp->session_thread_index;
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp);
-}
-
-static void
-vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
-{
-  svm_fifo_segment_main_t *sm = &svm_fifo_segment_main;
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  ssvm_shared_header_t *sh;
-  svm_fifo_segment_private_t *seg;
-  svm_fifo_segment_header_t *fsh;
-  session_t *session;
-  u32 segment_index;
-  int rv;
-
-  ASSERT (utm->i_am_master == 0);
-
-  if (mp->segment_name_length == 0)
-    {
-      clib_warning ("segment_name_length zero");
-      return;
-    }
-
-  memset (a, 0, sizeof (*a));
-
-  a->segment_name = (char *) mp->segment_name;
-
-  sleep (1);
-
-  rv = svm_fifo_segment_attach (a);
-  if (rv)
-    {
-      clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name);
-      return;
-    }
-
-  segment_index = vec_len (sm->segments) - 1;
-
-  vec_add2 (utm->seg, seg, 1);
-
-  memcpy (seg, sm->segments + segment_index, sizeof (*seg));
-  sh = seg->ssvm.sh;
-  fsh = (svm_fifo_segment_header_t *) sh->opaque[0];
-
-  while (vec_len (fsh->fifos) < 2)
-    sleep (1);
-
-  pool_get (utm->sessions, session);
-  utm->cut_through_session_index = session - utm->sessions;
-
-  session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0];
-  ASSERT (session->server_rx_fifo);
-  session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1];
-  ASSERT (session->server_tx_fifo);
-
-  /* security: could unlink /dev/shm/<mp->segment_name> here, maybe */
-
-  utm->state = STATE_READY;
-}
-
-#define foreach_uri_msg                         \
-_(BIND_URI_REPLY, bind_uri_reply)               \
-_(CONNECT_URI, connect_uri)                     \
-_(CONNECT_URI_REPLY, connect_uri_reply)         \
-_(UNBIND_URI_REPLY, unbind_uri_reply)           \
-_(ACCEPT_SESSION, accept_session)              \
-_(DISCONNECT_SESSION, disconnect_session)      \
-_(MAP_ANOTHER_SEGMENT, map_another_segment)
-
-void
-uri_api_hookup (uri_udp_test_main_t * utm)
-{
-#define _(N,n)                                                  \
-    vl_msg_api_set_handlers(VL_API_##N, #n,                     \
-                           vl_api_##n##_t_handler,              \
-                           vl_noop_handler,                     \
-                           vl_api_##n##_t_endian,               \
-                           vl_api_##n##_t_print,                \
-                           sizeof(vl_api_##n##_t), 1);
-  foreach_uri_msg;
-#undef _
-
-}
-
-
-int
-connect_to_vpp (char *name)
-{
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  api_main_t *am = &api_main;
-
-  if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0)
-    return -1;
-
-  utm->vl_input_queue = am->shmem_hdr->vl_input_queue;
-  utm->my_client_index = am->my_client_index;
-
-  return 0;
-}
-
-void
-vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...)
-{
-  clib_warning ("BUG");
-}
-
-static void
-init_error_string_table (uri_udp_test_main_t * utm)
-{
-  utm->error_string_by_error_number = hash_create (0, sizeof (uword));
-
-#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s);
-  foreach_vnet_api_error;
-#undef _
-
-  hash_set (utm->error_string_by_error_number, 99, "Misc");
-}
-
-void
-handle_fifo_event_server_rx (uri_udp_test_main_t * utm,
-                            session_fifo_event_t * e)
-{
-  svm_fifo_t *rx_fifo, *tx_fifo;
-  int nbytes;
-
-  session_fifo_event_t evt;
-  unix_shared_memory_queue_t *q;
-  int rv;
-
-  rx_fifo = e->fifo;
-  tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo;
-
-  do
-    {
-      nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0,
-                                       vec_len (utm->rx_buf), utm->rx_buf);
-    }
-  while (nbytes <= 0);
-  do
-    {
-      rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf);
-    }
-  while (rv == -2);
-
-  /* Fabricate TX event, send to vpp */
-  evt.fifo = tx_fifo;
-  evt.event_type = FIFO_EVENT_SERVER_TX;
-  /* $$$$ for event logging */
-  evt.enqueue_length = nbytes;
-  evt.event_id = e->event_id;
-  q = utm->vpp_event_queue;
-  unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ );
-}
-
-void
-handle_event_queue (uri_udp_test_main_t * utm)
-{
-  session_fifo_event_t _e, *e = &_e;;
-
-  while (1)
-    {
-      unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e,
-                                   0 /* nowait */ );
-      switch (e->event_type)
-       {
-       case FIFO_EVENT_SERVER_RX:
-         handle_fifo_event_server_rx (utm, e);
-         break;
-
-       case FIFO_EVENT_SERVER_EXIT:
-         return;
-
-       default:
-         clib_warning ("unknown event type %d", e->event_type);
-         break;
-       }
-      if (PREDICT_FALSE (utm->time_to_stop == 1))
-       break;
-      if (PREDICT_FALSE (utm->time_to_print_stats == 1))
-       {
-         utm->time_to_print_stats = 0;
-         fformat (stdout, "%d connections\n", pool_elts (utm->sessions));
-       }
-    }
-}
-
-void
-uri_udp_test (uri_udp_test_main_t * utm)
-{
-  vl_api_bind_uri_t *bmp;
-  vl_api_unbind_uri_t *ump;
-
-  bmp = vl_msg_api_alloc (sizeof (*bmp));
-  memset (bmp, 0, sizeof (*bmp));
-
-  bmp->_vl_msg_id = ntohs (VL_API_BIND_URI);
-  bmp->client_index = utm->my_client_index;
-  bmp->context = ntohl (0xfeedface);
-  bmp->initial_segment_size = 256 << 20;       /* size of initial segment */
-  bmp->options[SESSION_OPTIONS_FLAGS] =
-    SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT;
-  bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10;
-  bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10;
-  bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20;
-  memcpy (bmp->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp);
-
-  if (wait_for_state_change (utm, STATE_READY))
-    {
-      clib_warning ("timeout waiting for STATE_READY");
-      return;
-    }
-
-  handle_event_queue (utm);
-
-  ump = vl_msg_api_alloc (sizeof (*ump));
-  memset (ump, 0, sizeof (*ump));
-
-  ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI);
-  ump->client_index = utm->my_client_index;
-  memcpy (ump->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump);
-
-  if (wait_for_state_change (utm, STATE_START))
-    {
-      clib_warning ("timeout waiting for STATE_START");
-      return;
-    }
-
-  fformat (stdout, "Test complete...\n");
-}
-
-int
-main (int argc, char **argv)
-{
-  uri_udp_test_main_t *utm = &uri_udp_test_main;
-  unformat_input_t _argv, *a = &_argv;
-  u8 *chroot_prefix;
-  u8 *heap;
-  u8 *bind_name = (u8 *) "udp://0.0.0.0/1234";
-  u32 tmp;
-  mheap_t *h;
-  session_t *session;
-  int i;
-  int i_am_master = 1;
-
-  clib_mem_init (0, 256 << 20);
-
-  heap = clib_mem_get_per_cpu_heap ();
-  h = mheap_header (heap);
-
-  /* make the main heap thread-safe */
-  h->flags |= MHEAP_FLAG_THREAD_SAFE;
-
-  vec_validate (utm->rx_buf, 8192);
-
-  utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
-
-  utm->my_pid = getpid ();
-  utm->configured_segment_size = 1 << 20;
-
-  clib_time_init (&utm->clib_time);
-  init_error_string_table (utm);
-  svm_fifo_segment_init (0x200000000ULL, 20);
-  unformat_init_command_line (a, argv);
-
-  while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT)
-    {
-      if (unformat (a, "chroot prefix %s", &chroot_prefix))
-       {
-         vl_set_memory_root_path ((char *) chroot_prefix);
-       }
-      else if (unformat (a, "uri %s", &bind_name))
-       ;
-      else if (unformat (a, "segment-size %dM", &tmp))
-       utm->configured_segment_size = tmp << 20;
-      else if (unformat (a, "segment-size %dG", &tmp))
-       utm->configured_segment_size = tmp << 30;
-      else if (unformat (a, "master"))
-       i_am_master = 1;
-      else if (unformat (a, "slave"))
-       i_am_master = 0;
-      else
-       {
-         fformat (stderr, "%s: usage [master|slave]\n");
-         exit (1);
-       }
-    }
-
-  utm->cut_through_session_index = ~0;
-  utm->uri = format (0, "%s%c", bind_name, 0);
-  utm->i_am_master = i_am_master;
-  utm->segment_main = &svm_fifo_segment_main;
-
-  utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0);
-
-  setup_signal_handlers ();
-
-  uri_api_hookup (utm);
-
-  if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0)
-    {
-      svm_region_exit ();
-      fformat (stderr, "Couldn't connect to vpe, exiting...\n");
-      exit (1);
-    }
-
-  if (i_am_master == 0)
-    {
-      uri_udp_slave_test (utm);
-      exit (0);
-    }
-
-  /* $$$$ hack preallocation */
-  for (i = 0; i < 200000; i++)
-    {
-      pool_get (utm->sessions, session);
-      memset (session, 0, sizeof (*session));
-    }
-  for (i = 0; i < 200000; i++)
-    pool_put_index (utm->sessions, i);
-
-  uri_udp_test (utm);
-
-  vl_client_disconnect_from_vlib ();
-  exit (0);
-}
-
-#undef vl_api_version
-#define vl_api_version(n,v) static u32 vpe_api_version = v;
-#include <vpp/api/vpe.api.h>
-#undef vl_api_version
-
-void
-vl_client_add_api_signatures (vl_api_memclnt_create_t * mp)
-{
-  /*
-   * Send the main API signature in slot 0. This bit of code must
-   * match the checks in ../vpe/api/api.c: vl_msg_api_version_check().
-   */
-  mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version);
-}
-
-u32
-vl (void *p)
-{
-  return vec_len (p);
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/uri/uritest.c b/src/uri/uritest.c
deleted file mode 100644 (file)
index edcdb3a..0000000
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-#include <setjmp.h>
-#include <vppinfra/clib.h>
-#include <vppinfra/format.h>
-#include <vppinfra/error.h>
-#include <vppinfra/time.h>
-#include <vppinfra/macros.h>
-#include <vnet/vnet.h>
-#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
-#include <vlibapi/api.h>
-#include <vlibmemory/api.h>
-#include <vpp-api/vpe_msg_enum.h>
-#include <svm_fifo_segment.h>
-
-#define vl_typedefs            /* define message structures */
-#include <vpp-api/vpe_all_api_h.h>
-#undef vl_typedefs
-
-/* declare message handlers for each api */
-
-#define vl_endianfun           /* define message structures */
-#include <vpp-api/vpe_all_api_h.h>
-#undef vl_endianfun
-
-/* instantiate all the print functions we know about */
-#define vl_print(handle, ...)
-#define vl_printfun
-#include <vpp-api/vpe_all_api_h.h>
-#undef vl_printfun
-
-typedef enum
-{
-  STATE_START,
-  STATE_READY,
-  STATE_DISCONNECTING,
-} connection_state_t;
-
-typedef struct
-{
-  /* vpe input queue */
-  unix_shared_memory_queue_t *vl_input_queue;
-
-  /* API client handle */
-  u32 my_client_index;
-
-  /* role */
-  int i_am_master;
-
-  /* The URI we're playing with */
-  u8 *uri;
-
-  /* fifo segment */
-  svm_fifo_segment_private_t *seg;
-
-  svm_fifo_t *rx_fifo;
-  svm_fifo_t *tx_fifo;
-
-  /* For deadman timers */
-  clib_time_t clib_time;
-
-  /* State of the connection, shared between msg RX thread and main thread */
-  volatile connection_state_t state;
-
-  /* VNET_API_ERROR_FOO -> "Foo" hash table */
-  uword *error_string_by_error_number;
-} uritest_main_t;
-
-#if CLIB_DEBUG > 0
-#define NITER 1000
-#else
-#define NITER 1000000
-#endif
-
-uritest_main_t uritest_main;
-
-u8 *
-format_api_error (u8 * s, va_list * args)
-{
-  uritest_main_t *utm = va_arg (*args, uritest_main_t *);
-  i32 error = va_arg (*args, u32);
-  uword *p;
-
-  p = hash_get (utm->error_string_by_error_number, -error);
-
-  if (p)
-    s = format (s, "%s", p[0]);
-  else
-    s = format (s, "%d", error);
-  return s;
-}
-
-int
-wait_for_state_change (uritest_main_t * utm, connection_state_t state)
-{
-  f64 timeout = clib_time_now (&utm->clib_time) + 1.0;
-
-  while (clib_time_now (&utm->clib_time) < timeout)
-    {
-      if (utm->state == state)
-       return 0;
-    }
-  return -1;
-}
-
-static void
-vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp)
-{
-  uritest_main_t *utm = &uritest_main;
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  int rv;
-
-  ASSERT (utm->i_am_master);
-
-  if (mp->segment_name_length == 0)
-    {
-      clib_warning ("segment_name_length zero");
-      return;
-    }
-
-  a->segment_name = (char *) mp->segment_name;
-  a->segment_size = mp->segment_size;
-
-  /* Create the segment */
-  rv = svm_fifo_segment_create (a);
-  if (rv)
-    {
-      clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name);
-      return;
-    }
-
-  vec_validate (utm->seg, 0);
-
-  memcpy (utm->seg, a->rv, sizeof (*utm->seg));
-
-  /*
-   * By construction the master's idea of the rx fifo ends up in
-   * fsh->fifos[0], and the master's idea of the tx fifo ends up in
-   * fsh->fifos[1].
-   */
-  utm->rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240);
-  ASSERT (utm->rx_fifo);
-
-  utm->tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240);
-  ASSERT (utm->tx_fifo);
-
-  utm->state = STATE_READY;
-}
-
-static void
-vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp)
-{
-  uritest_main_t *utm = &uritest_main;
-  svm_fifo_segment_create_args_t _a, *a = &_a;
-  ssvm_shared_header_t *sh;
-  svm_fifo_segment_header_t *fsh;
-  int rv;
-
-  ASSERT (utm->i_am_master == 0);
-
-  if (mp->segment_name_length == 0)
-    {
-      clib_warning ("segment_name_length zero");
-      return;
-    }
-
-  memset (a, 0, sizeof (*a));
-
-  a->segment_name = (char *) mp->segment_name;
-
-  rv = svm_fifo_segment_attach (a);
-  if (rv)
-    {
-      clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name);
-      return;
-    }
-
-  vec_validate (utm->seg, 0);
-
-  memcpy (utm->seg, a->rv, sizeof (*utm->seg));
-  sh = utm->seg->ssvm.sh;
-  fsh = (svm_fifo_segment_header_t *) sh->opaque[0];
-
-  while (vec_len (fsh->fifos) < 2)
-    sleep (1);
-
-  utm->rx_fifo = (svm_fifo_t *) fsh->fifos[1];
-  ASSERT (utm->rx_fifo);
-  utm->tx_fifo = (svm_fifo_t *) fsh->fifos[0];
-  ASSERT (utm->tx_fifo);
-
-  /* security: could unlink /dev/shm/<mp->segment_name> here, maybe */
-
-  utm->state = STATE_READY;
-}
-
-static void
-vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp)
-{
-  uritest_main_t *utm = &uritest_main;
-
-  if (mp->retval != 0)
-    clib_warning ("returned %d", ntohl (mp->retval));
-
-  utm->state = STATE_START;
-}
-
-#define foreach_uri_msg                         \
-_(BIND_URI_REPLY, bind_uri_reply)               \
-_(CONNECT_URI_REPLY, connect_uri_reply)         \
-_(UNBIND_URI_REPLY, unbind_uri_reply)
-
-void
-uri_api_hookup (uritest_main_t * utm)
-{
-#define _(N,n)                                                  \
-    vl_msg_api_set_handlers(VL_API_##N, #n,                     \
-                           vl_api_##n##_t_handler,             \
-                           vl_noop_handler,                     \
-                           vl_api_##n##_t_endian,               \
-                           vl_api_##n##_t_print,                \
-                           sizeof(vl_api_##n##_t), 1);
-  foreach_uri_msg;
-#undef _
-
-}
-
-
-int
-connect_to_vpp (char *name)
-{
-  uritest_main_t *utm = &uritest_main;
-  api_main_t *am = &api_main;
-
-  if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0)
-    return -1;
-
-  utm->vl_input_queue = am->shmem_hdr->vl_input_queue;
-  utm->my_client_index = am->my_client_index;
-
-  return 0;
-}
-
-void
-vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...)
-{
-  clib_warning ("BUG");
-}
-
-static void
-init_error_string_table (uritest_main_t * utm)
-{
-  utm->error_string_by_error_number = hash_create (0, sizeof (uword));
-
-#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s);
-  foreach_vnet_api_error;
-#undef _
-
-  hash_set (utm->error_string_by_error_number, 99, "Misc");
-}
-
-void
-uritest_master (uritest_main_t * utm)
-{
-  vl_api_bind_uri_t *bmp;
-  vl_api_unbind_uri_t *ump;
-  int i;
-  u8 *test_data = 0;
-  u8 *reply = 0;
-  u32 reply_len;
-  int mypid = getpid ();
-
-  for (i = 0; i < 2048; i++)
-    vec_add1 (test_data, 'a' + (i % 32));
-
-  bmp = vl_msg_api_alloc (sizeof (*bmp));
-  memset (bmp, 0, sizeof (*bmp));
-
-  bmp->_vl_msg_id = ntohs (VL_API_BIND_URI);
-  bmp->client_index = utm->my_client_index;
-  bmp->context = ntohl (0xfeedface);
-  bmp->segment_size = 256 << 10;
-  memcpy (bmp->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp);
-
-  if (wait_for_state_change (utm, STATE_READY))
-    {
-      clib_warning ("timeout waiting for STATE_READY");
-      return;
-    }
-
-  for (i = 0; i < NITER; i++)
-    svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (test_data), test_data);
-
-  vec_validate (reply, 0);
-
-  reply_len = svm_fifo_dequeue (utm->rx_fifo, mypid, vec_len (reply), reply);
-
-  if (reply_len != 1)
-    clib_warning ("reply length %d", reply_len);
-
-  if (reply[0] == 1)
-    fformat (stdout, "Test OK...");
-
-  ump = vl_msg_api_alloc (sizeof (*ump));
-  memset (ump, 0, sizeof (*ump));
-
-  ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI);
-  ump->client_index = utm->my_client_index;
-  memcpy (ump->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump);
-
-  if (wait_for_state_change (utm, STATE_START))
-    {
-      clib_warning ("timeout waiting for STATE_READY");
-      return;
-    }
-
-  fformat (stdout, "Master done...\n");
-}
-
-void
-uritest_slave (uritest_main_t * utm)
-{
-  vl_api_connect_uri_t *cmp;
-  int i, j;
-  u8 *test_data = 0;
-  u8 *reply = 0;
-  u32 bytes_received = 0;
-  u32 actual_bytes;
-  int mypid = getpid ();
-  u8 ok;
-  f64 before, after, delta, bytes_per_second;
-
-  vec_validate (test_data, 4095);
-
-  cmp = vl_msg_api_alloc (sizeof (*cmp));
-  memset (cmp, 0, sizeof (*cmp));
-
-  cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI);
-  cmp->client_index = utm->my_client_index;
-  cmp->context = ntohl (0xfeedface);
-  memcpy (cmp->uri, utm->uri, vec_len (utm->uri));
-  vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp);
-
-  if (wait_for_state_change (utm, STATE_READY))
-    {
-      clib_warning ("timeout waiting for STATE_READY");
-      return;
-    }
-
-  ok = 1;
-  before = clib_time_now (&utm->clib_time);
-  for (i = 0; i < NITER; i++)
-    {
-      actual_bytes = svm_fifo_dequeue (utm->rx_fifo, mypid,
-                                      vec_len (test_data), test_data);
-      j = 0;
-      while (j < actual_bytes)
-       {
-         if (test_data[j] != ('a' + (bytes_received % 32)))
-           ok = 0;
-         bytes_received++;
-         j++;
-       }
-      if (bytes_received == NITER * 2048)
-       break;
-    }
-
-  vec_add1 (reply, ok);
-
-  svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (reply), reply);
-  after = clib_time_now (&utm->clib_time);
-  delta = after - before;
-  bytes_per_second = 0.0;
-
-  if (delta > 0.0)
-    bytes_per_second = (f64) bytes_received / delta;
-
-  fformat (stdout,
-          "Slave done, %d bytes in %.2f seconds, %.2f bytes/sec...\n",
-          bytes_received, delta, bytes_per_second);
-}
-
-int
-main (int argc, char **argv)
-{
-  uritest_main_t *utm = &uritest_main;
-  unformat_input_t _argv, *a = &_argv;
-  u8 *chroot_prefix;
-  u8 *heap;
-  char *bind_name = "fifo:uritest";
-  mheap_t *h;
-  int i_am_master = 0;
-
-  clib_mem_init (0, 128 << 20);
-
-  heap = clib_mem_get_per_cpu_heap ();
-  h = mheap_header (heap);
-
-  /* make the main heap thread-safe */
-  h->flags |= MHEAP_FLAG_THREAD_SAFE;
-
-  clib_time_init (&utm->clib_time);
-  init_error_string_table (utm);
-  svm_fifo_segment_init (0x200000000ULL, 20);
-  unformat_init_command_line (a, argv);
-
-  utm->uri = format (0, "%s%c", bind_name, 0);
-
-  while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT)
-    {
-      if (unformat (a, "master"))
-       i_am_master = 1;
-      else if (unformat (a, "slave"))
-       i_am_master = 0;
-      else if (unformat (a, "chroot prefix %s", &chroot_prefix))
-       {
-         vl_set_memory_root_path ((char *) chroot_prefix);
-       }
-      else
-       {
-         fformat (stderr, "%s: usage [master|slave]\n");
-         exit (1);
-       }
-    }
-
-  uri_api_hookup (utm);
-
-  if (connect_to_vpp (i_am_master ? "uritest_master" : "uritest_slave") < 0)
-    {
-      svm_region_exit ();
-      fformat (stderr, "Couldn't connect to vpe, exiting...\n");
-      exit (1);
-    }
-
-  utm->i_am_master = i_am_master;
-
-  if (i_am_master)
-    uritest_master (utm);
-  else
-    uritest_slave (utm);
-
-  vl_client_disconnect_from_vlib ();
-  exit (0);
-}
-
-#undef vl_api_version
-#define vl_api_version(n,v) static u32 vpe_api_version = v;
-#include <vpp-api/vpe.api.h>
-#undef vl_api_version
-
-void
-vl_client_add_api_signatures (vl_api_memclnt_create_t * mp)
-{
-  /*
-   * Send the main API signature in slot 0. This bit of code must
-   * match the checks in ../vpe/api/api.c: vl_msg_api_version_check().
-   */
-  mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version);
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
index 7125a12..4e30ee9 100644 (file)
@@ -461,6 +461,7 @@ libvnet_la_SOURCES +=                               \
  vnet/tcp/tcp_output.c                         \
  vnet/tcp/tcp_input.c                          \
  vnet/tcp/tcp_newreno.c                                \
+ vnet/tcp/builtin_server.c                     \
  vnet/tcp/tcp.c
 
 nobase_include_HEADERS +=                      \
index 5e65ac7..74d39bd 100644 (file)
@@ -103,7 +103,8 @@ _(LISP_RLOC_LOCAL, -110, "RLOC address is local")                       \
 _(BFD_EAGAIN, -111, "BFD object cannot be manipulated at this time")   \
 _(INVALID_GPE_MODE, -112, "Invalid GPE mode")                           \
 _(LISP_GPE_ENTRIES_PRESENT, -113, "LISP GPE entries are present")       \
-_(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface")
+_(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface")    \
+_(SESSION_CONNECT_FAIL, -115, "Session failed to connect")
 
 typedef enum
 {
index a561e7d..a542eeb 100644 (file)
@@ -154,6 +154,15 @@ application_get (u32 index)
   return pool_elt_at_index (app_pool, index);
 }
 
+application_t *
+application_get_if_valid (u32 index)
+{
+  if (pool_is_free_index (app_pool, index))
+    return 0;
+
+  return pool_elt_at_index (app_pool, index);
+}
+
 u32
 application_get_index (application_t * app)
 {
@@ -209,7 +218,7 @@ format_application_server (u8 * s, va_list * args)
 
   regp = vl_api_client_index_to_registration (srv->api_client_index);
   if (!regp)
-    server_name = format (0, "%s%c", regp->name, 0);
+    server_name = format (0, "builtin-%d%c", srv->index, 0);
   else
     server_name = regp->name;
 
@@ -269,11 +278,17 @@ static clib_error_t *
 show_app_command_fn (vlib_main_t * vm, unformat_input_t * input,
                     vlib_cli_command_t * cmd)
 {
+  session_manager_main_t *smm = &session_manager_main;
   application_t *app;
   int do_server = 0;
   int do_client = 0;
   int verbose = 0;
 
+  if (!smm->is_enabled)
+    {
+      clib_error_return (0, "session layer is not enabled");
+    }
+
   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
     {
       if (unformat (input, "server"))
@@ -323,16 +338,20 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input,
           /* *INDENT-ON* */
        }
       else
-       vlib_cli_output (vm, "No active server bindings");
+       vlib_cli_output (vm, "No active client bindings");
     }
 
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (show_app_command, static) =
 {
-.path = "show app",.short_help =
-    "show app [server|client] [verbose]",.function = show_app_command_fn,};
+  .path = "show app",
+  .short_help = "show app [server|client] [verbose]",
+  .function = show_app_command_fn,
+};
+/* *INDENT-ON* */
 
 /*
  * fd.io coding-style-patch-verification: ON
index 027d696..480828f 100644 (file)
@@ -100,6 +100,7 @@ application_t *application_new (application_type_t type, session_type_t sst,
                                session_cb_vft_t * cb_fns);
 void application_del (application_t * app);
 application_t *application_get (u32 index);
+application_t *application_get_if_valid (u32 index);
 application_t *application_lookup (u32 api_client_index);
 u32 application_get_index (application_t * app);
 
index 0ea77fd..6ddfb70 100644 (file)
@@ -51,7 +51,7 @@ ip_is_local (ip46_address_t * ip46_address, u8 is_ip4)
       prefix.fp_proto = FIB_PROTOCOL_IP6;
     }
 
-  clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address));
+  clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address_t));
   fei = fib_table_lookup (0, &prefix);
   flags = fib_entry_get_flags (fei);
 
@@ -186,9 +186,7 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst,
   /*
    * Not connecting to a local server. Create regular session
    */
-  stream_session_open (sst, ip46, port, app->index);
-
-  return 0;
+  return stream_session_open (sst, ip46, port, app->index);
 }
 
 /**
index e467f4e..399077d 100644 (file)
@@ -104,9 +104,13 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node,
   snd_space0 = transport_vft->send_space (tc0);
   snd_mss0 = transport_vft->send_mss (tc0);
 
+  /* Can't make any progress */
   if (snd_space0 == 0 || svm_fifo_max_dequeue (s0->server_tx_fifo) == 0
       || snd_mss0 == 0)
-    return 0;
+    {
+      vec_add1 (smm->evts_partially_read[thread_index], *e0);
+      return 0;
+    }
 
   ASSERT (e0->enqueue_length > 0);
 
@@ -143,7 +147,12 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node,
          if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE))
            {
              /* Keep track of how much we've dequeued and exit */
-             e0->enqueue_length -= max_len_to_snd0 - left_to_snd0;
+             if (left_to_snd0 != max_len_to_snd0)
+               {
+                 e0->enqueue_length -= max_len_to_snd0 - left_to_snd0;
+                 vec_add1 (smm->evts_partially_read[thread_index], *e0);
+               }
+
              return -1;
            }
 
@@ -185,12 +194,13 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node,
              t0->server_thread_index = s0->thread_index;
            }
 
+         /* *INDENT-OFF* */
          if (1)
            {
-             ELOG_TYPE_DECLARE (e) =
-             {
-             .format = "evt-dequeue: id %d length %d",.format_args =
-                 "i4i4",};
+             ELOG_TYPE_DECLARE (e) = {
+                 .format = "evt-dequeue: id %d length %d",
+                 .format_args = "i4i4",
+             };
              struct
              {
                u32 data[2];
@@ -199,6 +209,7 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node,
              ed->data[0] = e0->event_id;
              ed->data[1] = e0->enqueue_length;
            }
+         /* *INDENT-ON* */
 
          len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0;
 
@@ -289,7 +300,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
 {
   session_manager_main_t *smm = vnet_get_session_manager_main ();
   session_fifo_event_t *my_fifo_events, *e;
-  u32 n_to_dequeue;
+  u32 n_to_dequeue, n_events;
   unix_shared_memory_queue_t *q;
   int n_tx_packets = 0;
   u32 my_thread_index = vm->cpu_index;
@@ -309,14 +320,16 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
 
   /* min number of events we can dequeue without blocking */
   n_to_dequeue = q->cursize;
-  if (n_to_dequeue == 0)
-    return 0;
-
   my_fifo_events = smm->fifo_events[my_thread_index];
 
-  /* If we didn't manage to process previous events try going
+  if (n_to_dequeue == 0 && vec_len (my_fifo_events) == 0)
+    return 0;
+
+  /*
+   * If we didn't manage to process previous events try going
    * over them again without dequeuing new ones.
-   * XXX: Block senders to sessions that can't keep up */
+   */
+  /* XXX: Block senders to sessions that can't keep up */
   if (vec_len (my_fifo_events) >= 100)
     goto skip_dequeue;
 
@@ -338,8 +351,8 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
   smm->fifo_events[my_thread_index] = my_fifo_events;
 
 skip_dequeue:
-
-  for (i = 0; i < n_to_dequeue; i++)
+  n_events = vec_len (my_fifo_events);
+  for (i = 0; i < n_events; i++)
     {
       svm_fifo_t *f0;          /* $$$ prefetch 1 ahead maybe */
       stream_session_t *s0;
@@ -354,8 +367,13 @@ skip_dequeue:
       /* $$$ add multiple event queues, per vpp worker thread */
       ASSERT (server_thread_index0 == my_thread_index);
 
-      s0 = pool_elt_at_index (smm->sessions[my_thread_index],
-                             server_session_index0);
+      s0 = stream_session_get_if_valid (server_session_index0,
+                                       my_thread_index);
+      if (!s0)
+       {
+         clib_warning ("It's dead Jim!");
+         continue;
+       }
 
       ASSERT (s0->thread_index == my_thread_index);
 
@@ -380,11 +398,11 @@ skip_dequeue:
 done:
 
   /* Couldn't process all events. Probably out of buffers */
-  if (PREDICT_FALSE (i < n_to_dequeue))
+  if (PREDICT_FALSE (i < n_events))
     {
       session_fifo_event_t *partially_read =
        smm->evts_partially_read[my_thread_index];
-      vec_add (partially_read, &my_fifo_events[i], n_to_dequeue - i);
+      vec_add (partially_read, &my_fifo_events[i], n_events - i);
       vec_free (my_fifo_events);
       smm->fifo_events[my_thread_index] = partially_read;
       smm->evts_partially_read[my_thread_index] = 0;
@@ -413,8 +431,7 @@ VLIB_REGISTER_NODE (session_queue_node) =
   .n_errors = ARRAY_LEN (session_queue_error_strings),
   .error_strings = session_queue_error_strings,
   .n_next_nodes = SESSION_QUEUE_N_NEXT,
-  /* .state = VLIB_NODE_STATE_DISABLED, enable on-demand? */
-  /* edit / add dispositions here */
+  .state = VLIB_NODE_STATE_DISABLED,
   .next_nodes =
   {
       [SESSION_QUEUE_NEXT_DROP] = "error-drop",
index a7b28c1..582765b 100644 (file)
@@ -422,6 +422,28 @@ define reset_sock_reply {
   i32 retval;
   u64 handle;
 };
+
+/** \brief enable/disable session layer
+    @param client_index - opaque cookie to identify the sender
+                          client to vpp direction only
+    @param context - sender context, to match reply w/ request
+    @param is_enable - disable session layer if 0, enable otherwise
+*/
+define session_enable_disable {
+  u32 client_index;
+  u32 context;
+  u8 is_enable;
+};
+
+/** \brief Reply for session enable/disable
+    @param context - returned sender context, to match reply w/ request
+    @param retval - return code
+*/
+define session_enable_disable_reply {
+  u32 context;
+  i32 retval;
+};
+
 /*
  * Local Variables:
  * eval: (c-set-style "gnu")
index 539da61..422527e 100644 (file)
@@ -311,11 +311,11 @@ stream_session_half_open_lookup (session_manager_main_t * smm,
 }
 
 transport_connection_t *
-stream_session_lookup_transport4 (session_manager_main_t * smm,
-                                 ip4_address_t * lcl, ip4_address_t * rmt,
+stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt,
                                  u16 lcl_port, u16 rmt_port, u8 proto,
                                  u32 my_thread_index)
 {
+  session_manager_main_t *smm = &session_manager_main;
   session_kv4_t kv4;
   stream_session_t *s;
   int rv;
@@ -345,11 +345,11 @@ stream_session_lookup_transport4 (session_manager_main_t * smm,
 }
 
 transport_connection_t *
-stream_session_lookup_transport6 (session_manager_main_t * smm,
-                                 ip6_address_t * lcl, ip6_address_t * rmt,
+stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt,
                                  u16 lcl_port, u16 rmt_port, u8 proto,
                                  u32 my_thread_index)
 {
+  session_manager_main_t *smm = &session_manager_main;
   stream_session_t *s;
   session_kv6_t kv6;
   int rv;
@@ -554,7 +554,7 @@ session_manager_allocate_session_fifos (session_manager_main_t * smm,
                                        u8 * added_a_segment)
 {
   svm_fifo_segment_private_t *fifo_segment;
-  u32 fifo_size, default_fifo_size = 8192 /* TODO config */ ;
+  u32 fifo_size, default_fifo_size = 128 << 10;        /* TODO config */
   int i;
 
   *added_a_segment = 0;
@@ -948,7 +948,7 @@ void
 connects_session_manager_init (session_manager_main_t * smm, u8 session_type)
 {
   session_manager_t *sm;
-  u32 connect_fifo_size = 8 << 10;     /* Config? */
+  u32 connect_fifo_size = 256 << 10;   /* Config? */
   u32 default_segment_size = 1 << 20;
 
   pool_get (smm->session_managers, sm);
@@ -1055,10 +1055,15 @@ stream_session_delete (stream_session_t * s)
   svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo);
   svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo);
 
-  /* Cleanup app if client */
-  app = application_get (s->app_index);
+  app = application_get_if_valid (s->app_index);
+
+  /* No app. A possibility: after disconnect application called unbind */
+  if (!app)
+    return;
+
   if (app->mode == APP_CLIENT)
     {
+      /* Cleanup app if client */
       application_del (app);
     }
   else if (app->mode == APP_SERVER)
@@ -1068,6 +1073,7 @@ stream_session_delete (stream_session_t * s)
       svm_fifo_t **fifos;
       u32 fifo_index;
 
+      /* For server, see if any segments can be removed */
       sm = session_manager_get (app->session_manager_index);
 
       /* Delete fifo */
@@ -1096,10 +1102,10 @@ stream_session_delete_notify (transport_connection_t * tc)
 {
   stream_session_t *s;
 
+  /* App might've been removed already */
   s = stream_session_get_if_valid (tc->s_index, tc->thread_index);
   if (!s)
     {
-      clib_warning ("Surprised!");
       return;
     }
   stream_session_delete (s);
@@ -1151,16 +1157,24 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index,
   return 0;
 }
 
-void
+int
 stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order,
                     u32 app_index)
 {
   transport_connection_t *tc;
   u32 tci;
   u64 value;
+  int rv;
 
   /* Ask transport to open connection */
-  tci = tp_vfts[sst].open (addr, port_host_byte_order);
+  rv = tp_vfts[sst].open (addr, port_host_byte_order);
+  if (rv < 0)
+    {
+      clib_warning ("Transport failed to open connection.");
+      return VNET_API_ERROR_SESSION_CONNECT_FAIL;
+    }
+
+  tci = rv;
 
   /* Get transport connection */
   tc = tp_vfts[sst].get_half_open (tci);
@@ -1170,6 +1184,8 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order,
 
   /* Add to the half-open lookup table */
   stream_session_half_open_table_add (sst, tc, value);
+
+  return 0;
 }
 
 /**
@@ -1216,16 +1232,13 @@ session_get_transport_vft (u8 type)
 }
 
 static clib_error_t *
-session_manager_main_init (vlib_main_t * vm)
+session_manager_main_enable (vlib_main_t * vm)
 {
-  u32 num_threads;
-  vlib_thread_main_t *vtm = vlib_get_thread_main ();
   session_manager_main_t *smm = &session_manager_main;
+  vlib_thread_main_t *vtm = vlib_get_thread_main ();
+  u32 num_threads;
   int i;
 
-  smm->vlib_main = vm;
-  smm->vnet_main = vnet_get_main ();
-
   num_threads = 1 /* main thread */  + vtm->n_threads;
 
   if (num_threads < 1)
@@ -1272,11 +1285,48 @@ session_manager_main_init (vlib_main_t * vm)
   for (i = 0; i < SESSION_N_TYPES; i++)
     smm->connect_manager_index[i] = INVALID_INDEX;
 
+  smm->is_enabled = 1;
+
   return 0;
 }
 
-VLIB_INIT_FUNCTION (session_manager_main_init);
+clib_error_t *
+vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
+{
+  if (is_en)
+    {
+      if (session_manager_main.is_enabled)
+       return 0;
+
+      vlib_node_set_state (vm, session_queue_node.index,
+                          VLIB_NODE_STATE_POLLING);
+
+      return session_manager_main_enable (vm);
+    }
+  else
+    {
+      session_manager_main.is_enabled = 0;
+      vlib_node_set_state (vm, session_queue_node.index,
+                          VLIB_NODE_STATE_DISABLED);
+    }
+
+  return 0;
+}
+
+
+clib_error_t *
+session_manager_main_init (vlib_main_t * vm)
+{
+  session_manager_main_t *smm = &session_manager_main;
+
+  smm->vlib_main = vm;
+  smm->vnet_main = vnet_get_main ();
+  smm->is_enabled = 0;
+
+  return 0;
+}
 
+VLIB_INIT_FUNCTION (session_manager_main_init)
 /*
  * fd.io coding-style-patch-verification: ON
  *
index cf14cca..46e5ce2 100644 (file)
@@ -213,12 +213,15 @@ struct _session_manager_main
   /** Per transport rx function that can either dequeue or peek */
   session_fifo_rx_fn *session_rx_fns[SESSION_N_TYPES];
 
+  u8 is_enabled;
+
   /* Convenience */
   vlib_main_t *vlib_main;
   vnet_main_t *vnet_main;
 };
 
 extern session_manager_main_t session_manager_main;
+extern vlib_node_registration_t session_queue_node;
 
 /*
  * Session manager function
@@ -276,14 +279,12 @@ stream_session_t *stream_session_lookup6 (ip6_address_t * lcl,
                                          ip6_address_t * rmt, u16 lcl_port,
                                          u16 rmt_port, u8, u32 thread_index);
 transport_connection_t
-  * stream_session_lookup_transport4 (session_manager_main_t * smm,
-                                     ip4_address_t * lcl,
+  * stream_session_lookup_transport4 (ip4_address_t * lcl,
                                      ip4_address_t * rmt, u16 lcl_port,
                                      u16 rmt_port, u8 proto,
                                      u32 thread_index);
 transport_connection_t
-  * stream_session_lookup_transport6 (session_manager_main_t * smm,
-                                     ip6_address_t * lcl,
+  * stream_session_lookup_transport6 (ip6_address_t * lcl,
                                      ip6_address_t * rmt, u16 lcl_port,
                                      u16 rmt_port, u8 proto,
                                      u32 thread_index);
@@ -338,6 +339,14 @@ stream_session_max_enqueue (transport_connection_t * tc)
   return svm_fifo_max_enqueue (s->server_rx_fifo);
 }
 
+always_inline u32
+stream_session_fifo_size (transport_connection_t * tc)
+{
+  stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+  return s->server_rx_fifo->nitems;
+}
+
+
 int
 stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len,
                             u8 queue_event);
@@ -356,8 +365,8 @@ void stream_session_reset_notify (transport_connection_t * tc);
 int
 stream_session_accept (transport_connection_t * tc, u32 listener_index,
                       u8 sst, u8 notify);
-void stream_session_open (u8 sst, ip46_address_t * addr,
-                         u16 port_host_byte_order, u32 api_client_index);
+int stream_session_open (u8 sst, ip46_address_t * addr,
+                        u16 port_host_byte_order, u32 api_client_index);
 void stream_session_disconnect (stream_session_t * s);
 void stream_session_cleanup (stream_session_t * s);
 int
@@ -369,6 +378,8 @@ u8 *format_stream_session (u8 * s, va_list * args);
 void session_register_transport (u8 type, const transport_proto_vft_t * vft);
 transport_proto_vft_t *session_get_transport_vft (u8 type);
 
+clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en);
+
 #endif /* __included_session_h__ */
 
 /*
index 9d06868..8852fc6 100644 (file)
@@ -52,6 +52,8 @@ _(DISCONNECT_SOCK, disconnect_sock)                                   \
 _(DISCONNECT_SOCK_REPLY, disconnect_sock_reply)                                \
 _(ACCEPT_SOCK_REPLY, accept_sock_reply)                                \
 _(RESET_SOCK_REPLY, reset_sock_reply)                                  \
+_(SESSION_ENABLE_DISABLE, session_enable_disable)                      \
+
 
 static int
 send_add_segment_callback (u32 api_client_index, const u8 * segment_name,
@@ -146,7 +148,6 @@ send_session_connected_uri_callback (u32 api_client_index,
   mp = vl_msg_api_alloc (sizeof (*mp));
   mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY);
   mp->context = app->api_context;
-  mp->retval = is_fail;
   if (!is_fail)
     {
       vpp_queue = session_manager_get_vpp_event_queue (s->thread_index);
@@ -157,6 +158,7 @@ send_session_connected_uri_callback (u32 api_client_index,
       mp->session_type = s->session_type;
       mp->vpp_event_queue_address = (u64) vpp_queue;
       mp->client_event_queue_address = (u64) app->event_queue;
+      mp->retval = 0;
 
       session_manager_get_segment_info (s->server_segment_index, &seg_name,
                                        &mp->segment_size);
@@ -164,12 +166,22 @@ send_session_connected_uri_callback (u32 api_client_index,
       if (mp->segment_name_length)
        clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length);
     }
+  else
+    {
+      mp->retval = VNET_API_ERROR_SESSION_CONNECT_FAIL;
+    }
 
   vl_msg_api_send_shmem (q, (u8 *) & mp);
 
   /* Remove client if connect failed */
   if (is_fail)
-    application_del (app);
+    {
+      application_del (app);
+    }
+  else
+    {
+      s->session_state = SESSION_STATE_READY;
+    }
 
   return 0;
 }
@@ -431,6 +443,17 @@ api_session_not_valid (u32 session_index, u32 thread_index)
   return 0;
 }
 
+static void
+vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp)
+{
+  vl_api_session_enable_disable_reply_t *rmp;
+  vlib_main_t *vm = vlib_get_main ();
+  int rv = 0;
+
+  vnet_session_enable_disable (vm, mp->is_enable);
+  REPLY_MACRO (VL_API_SESSION_ENABLE_DISABLE_REPLY);
+}
+
 static void
 vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp)
 {
@@ -476,7 +499,6 @@ vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp)
       }
   }));
   /* *INDENT-ON* */
-
 }
 
 static void
@@ -493,7 +515,9 @@ vl_api_unbind_uri_t_handler (vl_api_unbind_uri_t * mp)
 static void
 vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp)
 {
+  vl_api_connect_uri_reply_t *rmp;
   vnet_connect_args_t _a, *a = &_a;
+  int rv;
 
   a->uri = (char *) mp->uri;
   a->api_client_index = mp->client_index;
@@ -501,7 +525,19 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp)
   a->options = mp->options;
   a->session_cb_vft = &uri_session_cb_vft;
   a->mp = mp;
-  vnet_connect_uri (a);
+
+  rv = vnet_connect_uri (a);
+
+  if (rv == 0 || rv == VNET_CONNECT_REDIRECTED)
+    return;
+
+  /* Got some error, relay it */
+
+  /* *INDENT-OFF* */
+  REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({
+    rmp->retval = rv;
+  }));
+  /* *INDENT-ON* */
 }
 
 static void
@@ -662,7 +698,9 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp)
 static void
 vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp)
 {
+  vl_api_connect_sock_reply_t *rmp;
   vnet_connect_args_t _a, *a = &_a;
+  int rv;
 
   clib_memcpy (&a->tep.ip, mp->ip,
               (mp->is_ip4 ? sizeof (ip4_address_t) :
@@ -675,7 +713,18 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp)
   a->api_context = mp->context;
   a->mp = mp;
 
-  vnet_connect (a);
+  rv = vnet_connect (a);
+
+  if (rv == 0 || rv == VNET_CONNECT_REDIRECTED)
+    return;
+
+  /* Got some error, relay it */
+
+  /* *INDENT-OFF* */
+  REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({
+    rmp->retval = rv;
+  }));
+  /* *INDENT-ON* */
 }
 
 static void
index b2943a1..b029ee6 100644 (file)
@@ -60,7 +60,7 @@ format_stream_session (u8 * s, va_list * args)
     }
   else
     {
-      clib_warning ("Session in unknown state!");
+      clib_warning ("Session in state: %d!", ss->session_state);
     }
 
   vec_free (str);
@@ -78,6 +78,11 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input,
   stream_session_t *s;
   u8 *str = 0;
 
+  if (!smm->is_enabled)
+    {
+      clib_error_return (0, "session layer is not enabled");
+    }
+
   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
     {
       if (unformat (input, "verbose"))
@@ -126,11 +131,14 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input,
   return 0;
 }
 
-VLIB_CLI_COMMAND (show_uri_command, static) =
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_session_command, static) =
 {
-.path = "show session",.short_help = "show session [verbose]",.function =
-    show_session_command_fn,};
-
+  .path = "show session",
+  .short_help = "show session [verbose]",
+  .function = show_session_command_fn,
+};
+/* *INDENT-ON* */
 
 static clib_error_t *
 clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input,
@@ -142,6 +150,11 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input,
   stream_session_t *pool, *session;
   application_t *server;
 
+  if (!smm->is_enabled)
+    {
+      clib_error_return (0, "session layer is not enabled");
+    }
+
   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
     {
       if (unformat (input, "thread %d", &thread_index))
@@ -174,11 +187,43 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input,
   return 0;
 }
 
-VLIB_CLI_COMMAND (clear_uri_session_command, static) =
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_session_command, static) =
+{
+  .path = "clear session",
+  .short_help = "clear session thread <thread> session <index>",
+  .function = clear_session_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+session_enable_disable_fn (vlib_main_t * vm, unformat_input_t * input,
+                          vlib_cli_command_t * cmd)
+{
+  u8 is_en = 1;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "enable"))
+       is_en = 1;
+      else if (unformat (input, "disable"))
+       is_en = 0;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+
+  return vnet_session_enable_disable (vm, is_en);
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (session_enable_disable_command, static) =
 {
-.path = "clear session",.short_help =
-    "clear session thread <thread> session <index>",.function =
-    clear_session_command_fn,};
+  .path = "session",
+  .short_help = "session [enable|disable]",
+  .function = session_enable_disable_fn,
+};
+/* *INDENT-ON* */
 
 /*
  * fd.io coding-style-patch-verification: ON
diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c
new file mode 100644 (file)
index 0000000..be65642
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+* Copyright (c) 2015-2017 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+
+int
+builtin_session_accept_callback (stream_session_t * s)
+{
+  clib_warning ("called...");
+  s->session_state = SESSION_STATE_READY;
+  return 0;
+}
+
+void
+builtin_session_disconnect_callback (stream_session_t * s)
+{
+  clib_warning ("called...");
+}
+
+int
+builtin_session_connected_callback (u32 client_index,
+                                   stream_session_t * s, u8 is_fail)
+{
+  clib_warning ("called...");
+  return -1;
+}
+
+int
+builtin_add_segment_callback (u32 client_index,
+                             const u8 * seg_name, u32 seg_size)
+{
+  clib_warning ("called...");
+  return -1;
+}
+
+int
+builtin_redirect_connect_callback (u32 client_index, void *mp)
+{
+  clib_warning ("called...");
+  return -1;
+}
+
+int
+builtin_server_rx_callback (stream_session_t * s)
+{
+  clib_warning ("called...");
+  return 0;
+}
+
+static session_cb_vft_t builtin_session_cb_vft = {
+  .session_accept_callback = builtin_session_accept_callback,
+  .session_disconnect_callback = builtin_session_disconnect_callback,
+  .session_connected_callback = builtin_session_connected_callback,
+  .add_segment_callback = builtin_add_segment_callback,
+  .redirect_connect_callback = builtin_redirect_connect_callback,
+  .builtin_server_rx_callback = builtin_server_rx_callback
+};
+
+static int
+server_create (vlib_main_t * vm)
+{
+  vnet_bind_args_t _a, *a = &_a;
+  u64 options[SESSION_OPTIONS_N_OPTIONS];
+  char segment_name[128];
+
+  memset (a, 0, sizeof (*a));
+  memset (options, 0, sizeof (options));
+
+  a->uri = "tcp://0.0.0.0/80";
+  a->api_client_index = ~0;
+  a->session_cb_vft = &builtin_session_cb_vft;
+  a->options = options;
+  a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 10;
+  a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10;
+  a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10;
+  a->segment_name = segment_name;
+  a->segment_name_length = ARRAY_LEN (segment_name);
+
+  return vnet_bind_uri (a);
+}
+
+static clib_error_t *
+server_create_command_fn (vlib_main_t * vm,
+                         unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  int rv;
+#if 0
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "whatever %d", &whatever))
+       ;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+#endif
+
+  rv = server_create (vm);
+  switch (rv)
+    {
+    case 0:
+      break;
+    default:
+      return clib_error_return (0, "server_create returned %d", rv);
+    }
+  return 0;
+}
+
+VLIB_CLI_COMMAND (server_create_command, static) =
+{
+.path = "test server",.short_help = "test server",.function =
+    server_create_command_fn,};
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
index 0f9b709..e5feaeb 100644 (file)
@@ -217,6 +217,7 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4)
   return 0;
 }
 
+#define PORT_MASK ((1 << 16)- 1)
 /**
  * Allocate local port and add if successful add entry to local endpoint
  * table to mark the pair as used.
@@ -224,7 +225,6 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4)
 u16
 tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip)
 {
-  u8 unique = 0;
   transport_endpoint_t *tep;
   u32 time_now, tei;
   u16 min = 1024, max = 65535, tries;  /* XXX configurable ? */
@@ -235,37 +235,34 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip)
   /* Start at random point or max */
   pool_get (tm->local_endpoints, tep);
   clib_memcpy (&tep->ip, ip, sizeof (*ip));
-  tep->port = random_u32 (&time_now) << 16;
-  tep->port = tep->port < min ? max : tep->port;
 
   /* Search for first free slot */
-  while (tries)
+  for (; tries >= 0; tries--)
     {
+      u16 port = 0;
+
+      /* Find a port in the specified range */
+      while (1)
+       {
+         port = random_u32 (&time_now) & PORT_MASK;
+         if (PREDICT_TRUE (port >= min && port < max))
+           break;
+       }
+
+      tep->port = port;
+
+      /* Look it up */
       tei = transport_endpoint_lookup (&tm->local_endpoints_table, &tep->ip,
                                       tep->port);
+      /* If not found, we're done */
       if (tei == TRANSPORT_ENDPOINT_INVALID_INDEX)
        {
-         unique = 1;
-         break;
+         transport_endpoint_table_add (&tm->local_endpoints_table, tep,
+                                       tep - tm->local_endpoints);
+         return tep->port;
        }
-
-      tep->port--;
-
-      if (tep->port < min)
-       tep->port = max;
-
-      tries--;
     }
-
-  if (unique)
-    {
-      transport_endpoint_table_add (&tm->local_endpoints_table, tep,
-                                   tep - tm->local_endpoints);
-
-      return tep->port;
-    }
-
-  /* Failed */
+  /* No free ports */
   pool_put (tm->local_endpoints, tep);
   return -1;
 }
@@ -360,7 +357,10 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4)
   /* Allocate source port */
   lcl_port = tcp_allocate_local_port (tm, &lcl_addr);
   if (lcl_port < 1)
-    return -1;
+    {
+      clib_warning ("Failed to allocate src port");
+      return -1;
+    }
 
   /*
    * Create connection and send SYN
index 22f00a6..3560509 100644 (file)
@@ -30,7 +30,8 @@
 #define TCP_MAX_OPTION_SPACE 40
 
 #define TCP_DUPACK_THRESHOLD 3
-#define TCP_DEFAULT_RX_FIFO_SIZE 64 << 10
+#define TCP_MAX_RX_FIFO_SIZE 2 << 20
+#define TCP_IW_N_SEGMENTS 10
 
 /** TCP FSM state definitions as per RFC793. */
 #define foreach_tcp_fsm_state   \
@@ -590,7 +591,6 @@ vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq,
 /**
  * Push TCP header to buffer
  *
- * @param vm - vlib_main
  * @param b - buffer to write the header to
  * @param sp_net - source port net order
  * @param dp_net - destination port net order
index daa0683..0a907d0 100644 (file)
@@ -711,7 +711,7 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
   if (tcp_opts_sack_permitted (&tc->opt))
     tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
 
-  new_snd_wnd = clib_net_to_host_u32 (th->window) << tc->snd_wscale;
+  new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale;
 
   if (tcp_ack_is_dupack (tc, b, new_snd_wnd))
     {
@@ -1320,7 +1320,6 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 
          /* Parse options */
          tcp_options_parse (tcp0, &new_tc0->opt);
-         tcp_connection_init_vars (new_tc0);
 
          if (tcp_opts_tstamp (&new_tc0->opt))
            {
@@ -1331,11 +1330,13 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          if (tcp_opts_wscale (&new_tc0->opt))
            new_tc0->snd_wscale = new_tc0->opt.wscale;
 
-         new_tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window)
-           << new_tc0->snd_wscale;
+         /* No scaling */
+         new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
          new_tc0->snd_wl1 = seq0;
          new_tc0->snd_wl2 = ack0;
 
+         tcp_connection_init_vars (new_tc0);
+
          /* SYN-ACK: See if we can switch to ESTABLISHED state */
          if (tcp_ack (tcp0))
            {
@@ -1345,6 +1346,9 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              new_tc0->snd_una = ack0;
              new_tc0->state = TCP_STATE_ESTABLISHED;
 
+             /* Make sure las is initialized for the wnd computation */
+             new_tc0->rcv_las = new_tc0->rcv_nxt;
+
              /* Notify app that we have connection */
              stream_session_connect_notify (&new_tc0->connection, sst, 0);
 
@@ -1575,7 +1579,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 
              /* Initialize session variables */
              tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
-             tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window)
+             tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
                << tc0->opt.wscale;
              tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
              tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
@@ -1899,7 +1903,6 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
            }
 
          tcp_options_parse (th0, &child0->opt);
-         tcp_connection_init_vars (child0);
 
          child0->irs = vnet_buffer (b0)->tcp.seq_number;
          child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
@@ -1913,6 +1916,16 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              child0->tsval_recent_age = tcp_time_now ();
            }
 
+         if (tcp_opts_wscale (&child0->opt))
+           child0->snd_wscale = child0->opt.wscale;
+
+         /* No scaling */
+         child0->snd_wnd = clib_net_to_host_u16 (th0->window);
+         child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
+         child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
+
+         tcp_connection_init_vars (child0);
+
          /* Reuse buffer to make syn-ack and send */
          tcp_make_synack (child0, b0);
          next0 = tcp_next_output (is_ip4);
@@ -1923,7 +1936,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 
            }
 
-         b0->error = error0 ? node->errors[error0] : 0;
+         b0->error = node->errors[error0];
 
          vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
                                           n_left_to_next, bi0, next0);
@@ -2069,7 +2082,6 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   u32 n_left_from, next_index, *from, *to_next;
   u32 my_thread_index = vm->cpu_index;
   tcp_main_t *tm = vnet_get_tcp_main ();
-  session_manager_main_t *ssm = vnet_get_session_manager_main ();
 
   from = vlib_frame_vector_args (from_frame);
   n_left_from = from_frame->n_vectors;
@@ -2109,26 +2121,26 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 
              /* lookup session */
              tc0 =
-               (tcp_connection_t *) stream_session_lookup_transport4 (ssm,
-                                                                      &ip40->dst_address,
-                                                                      &ip40->src_address,
-                                                                      tcp0->dst_port,
-                                                                      tcp0->src_port,
-                                                                      SESSION_TYPE_IP4_TCP,
-                                                                      my_thread_index);
+               (tcp_connection_t *)
+               stream_session_lookup_transport4 (&ip40->dst_address,
+                                                 &ip40->src_address,
+                                                 tcp0->dst_port,
+                                                 tcp0->src_port,
+                                                 SESSION_TYPE_IP4_TCP,
+                                                 my_thread_index);
            }
          else
            {
              ip60 = vlib_buffer_get_current (b0);
              tcp0 = ip6_next_header (ip60);
              tc0 =
-               (tcp_connection_t *) stream_session_lookup_transport6 (ssm,
-                                                                      &ip60->src_address,
-                                                                      &ip60->dst_address,
-                                                                      tcp0->src_port,
-                                                                      tcp0->dst_port,
-                                                                      SESSION_TYPE_IP6_TCP,
-                                                                      my_thread_index);
+               (tcp_connection_t *)
+               stream_session_lookup_transport6 (&ip60->src_address,
+                                                 &ip60->dst_address,
+                                                 tcp0->src_port,
+                                                 tcp0->dst_port,
+                                                 SESSION_TYPE_IP6_TCP,
+                                                 my_thread_index);
            }
 
          /* Session exists */
index dbcf1f7..7e431cd 100644 (file)
@@ -90,6 +90,15 @@ tcp_window_compute_scale (u32 available_space)
   return wnd_scale;
 }
 
+/**
+ * TCP's IW as recommended by RFC6928
+ */
+always_inline u32
+tcp_initial_wnd_unscaled (tcp_connection_t * tc)
+{
+  return TCP_IW_N_SEGMENTS * dummy_mtu;
+}
+
 /**
  * Compute initial window and scale factor. As per RFC1323, window field in
  * SYN and SYN-ACK segments is never scaled.
@@ -97,18 +106,15 @@ tcp_window_compute_scale (u32 available_space)
 u32
 tcp_initial_window_to_advertise (tcp_connection_t * tc)
 {
-  u32 available_space;
+  u32 max_fifo;
 
   /* Initial wnd for SYN. Fifos are not allocated yet.
-   * Use some predefined value */
-  if (tc->state != TCP_STATE_SYN_RCVD)
-    {
-      return TCP_DEFAULT_RX_FIFO_SIZE;
-    }
+   * Use some predefined value. For SYN-ACK we still want the
+   * scale to be computed in the same way */
+  max_fifo = TCP_MAX_RX_FIFO_SIZE;
 
-  available_space = stream_session_max_enqueue (&tc->connection);
-  tc->rcv_wscale = tcp_window_compute_scale (available_space);
-  tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale);
+  tc->rcv_wscale = tcp_window_compute_scale (max_fifo);
+  tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
 
   return clib_min (tc->rcv_wnd, TCP_WND_MAX);
 }
@@ -119,23 +125,43 @@ tcp_initial_window_to_advertise (tcp_connection_t * tc)
 u32
 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
 {
-  u32 available_space, wnd, scaled_space;
+  u32 available_space, max_fifo, observed_wnd;
 
-  if (state != TCP_STATE_ESTABLISHED)
+  if (state < TCP_STATE_ESTABLISHED)
     return tcp_initial_window_to_advertise (tc);
 
+  /*
+   * Figure out how much space we have available
+   */
   available_space = stream_session_max_enqueue (&tc->connection);
-  scaled_space = available_space >> tc->rcv_wscale;
+  max_fifo = stream_session_fifo_size (&tc->connection);
+
+  ASSERT (tc->opt.mss < max_fifo);
+
+  if (available_space < tc->opt.mss && available_space < max_fifo / 8)
+    available_space = 0;
 
-  /* Need to update scale */
-  if (PREDICT_FALSE ((scaled_space == 0 && available_space != 0))
-      || (scaled_space >= TCP_WND_MAX))
-    tc->rcv_wscale = tcp_window_compute_scale (available_space);
+  /*
+   * Use the above and what we know about what we've previously advertised
+   * to compute the new window
+   */
+  observed_wnd = tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
 
-  wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale);
-  tc->rcv_wnd = wnd;
+  /* Bad. Thou shalt not shrink */
+  if (available_space < observed_wnd)
+    {
+      if (available_space == 0)
+       clib_warning ("Didn't shrink rcv window despite not having space");
+    }
+
+  tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale);
+
+  if (tc->rcv_wnd == 0)
+    {
+      tc->flags |= TCP_CONN_SENT_RCV_WND0;
+    }
 
-  return wnd >> tc->rcv_wscale;
+  return tc->rcv_wnd >> tc->rcv_wscale;
 }
 
 /**
@@ -225,7 +251,7 @@ tcp_options_write (u8 * data, tcp_options_t * opts)
 }
 
 always_inline int
-tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd)
+tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale)
 {
   u8 len = 0;
 
@@ -234,7 +260,7 @@ tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd)
   len += TCP_OPTION_LEN_MSS;
 
   opts->flags |= TCP_OPTS_FLAG_WSCALE;
-  opts->wscale = tcp_window_compute_scale (initial_wnd);
+  opts->wscale = wnd_scale;
   len += TCP_OPTION_LEN_WINDOW_SCALE;
 
   opts->flags |= TCP_OPTS_FLAG_TSTAMP;
@@ -327,8 +353,7 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
     case TCP_STATE_SYN_RCVD:
       return tcp_make_synack_options (tc, opts);
     case TCP_STATE_SYN_SENT:
-      return tcp_make_syn_options (opts,
-                                  tcp_initial_window_to_advertise (tc));
+      return tcp_make_syn_options (opts, tc->rcv_wscale);
     default:
       clib_warning ("Not handled!");
       return 0;
@@ -732,7 +757,7 @@ tcp_send_syn (tcp_connection_t * tc)
 
   /* Make and write options */
   memset (&snd_opts, 0, sizeof (snd_opts));
-  tcp_opts_len = tcp_make_syn_options (&snd_opts, initial_wnd);
+  tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale);
   tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
 
   th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
@@ -900,7 +925,7 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b,
 
   tcp_reuse_buffer (vm, b);
 
-  ASSERT (tc->state == TCP_STATE_ESTABLISHED);
+  ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
   ASSERT (max_bytes != 0);
 
   if (tcp_opts_sack_permitted (&tc->opt))
@@ -929,7 +954,6 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b,
                                       max_bytes);
   ASSERT (n_bytes != 0);
 
-  tc->snd_nxt += n_bytes;
   tcp_push_hdr_i (tc, b, tc->state);
 
   return n_bytes;
@@ -967,7 +991,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
   tcp_get_free_buffer_index (tm, &bi);
   b = vlib_get_buffer (vm, bi);
 
-  if (tc->state == TCP_STATE_ESTABLISHED)
+  if (tc->state >= TCP_STATE_ESTABLISHED)
     {
       tcp_fastrecovery_off (tc);
 
@@ -977,6 +1001,12 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
       /* Figure out what and how many bytes we can send */
       snd_space = tcp_available_snd_space (tc);
       max_bytes = clib_min (tc->snd_mss, snd_space);
+
+      if (max_bytes == 0)
+       {
+         clib_warning ("no wnd to retransmit");
+         return;
+       }
       tcp_prepare_retransmit_segment (tc, b, max_bytes);
 
       tc->rtx_bytes += max_bytes;
@@ -996,7 +1026,11 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
        tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
 
       vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
+
       tcp_push_hdr_i (tc, b, tc->state);
+
+      /* Account for the SYN */
+      tc->snd_nxt += 1;
     }
 
   if (!is_syn)
@@ -1163,8 +1197,8 @@ tcp46_output_inline (vlib_main_t * vm,
          if (PREDICT_FALSE
              (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK))
            {
+             ASSERT (tc0->snt_dupacks > 0);
              tc0->snt_dupacks--;
-             ASSERT (tc0->snt_dupacks >= 0);
              if (!tcp_session_has_ooo_data (tc0))
                {
                  error0 = TCP_ERROR_FILTERED_DUPACKS;