TCP/session improvements
[vpp.git] / src / vnet / session / session.c
index 539da61..f10918a 100644 (file)
@@ -22,6 +22,8 @@
 #include <vnet/dpo/load_balance.h>
 #include <vnet/fib/ip4_fib.h>
 #include <vnet/session/application.h>
+#include <vnet/tcp/tcp.h>
+#include <vnet/session/session_debug.h>
 
 /**
  * Per-type vector of transport protocol virtual function tables
@@ -311,11 +313,11 @@ stream_session_half_open_lookup (session_manager_main_t * smm,
 }
 
 transport_connection_t *
-stream_session_lookup_transport4 (session_manager_main_t * smm,
-                                 ip4_address_t * lcl, ip4_address_t * rmt,
+stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt,
                                  u16 lcl_port, u16 rmt_port, u8 proto,
                                  u32 my_thread_index)
 {
+  session_manager_main_t *smm = &session_manager_main;
   session_kv4_t kv4;
   stream_session_t *s;
   int rv;
@@ -345,11 +347,11 @@ stream_session_lookup_transport4 (session_manager_main_t * smm,
 }
 
 transport_connection_t *
-stream_session_lookup_transport6 (session_manager_main_t * smm,
-                                 ip6_address_t * lcl, ip6_address_t * rmt,
+stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt,
                                  u16 lcl_port, u16 rmt_port, u8 proto,
                                  u32 my_thread_index)
 {
+  session_manager_main_t *smm = &session_manager_main;
   stream_session_t *s;
   session_kv6_t kv6;
   int rv;
@@ -372,7 +374,7 @@ stream_session_lookup_transport6 (session_manager_main_t * smm,
   /* Finally, try half-open connections */
   rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6);
   if (rv == 0)
-    return tp_vfts[s->session_type].get_half_open (kv6.value & 0xFFFFFFFF);
+    return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF);
 
   return 0;
 }
@@ -554,7 +556,7 @@ session_manager_allocate_session_fifos (session_manager_main_t * smm,
                                        u8 * added_a_segment)
 {
   svm_fifo_segment_private_t *fifo_segment;
-  u32 fifo_size, default_fifo_size = 8192 /* TODO config */ ;
+  u32 fifo_size, default_fifo_size = 128 << 10;        /* TODO config */
   int i;
 
   *added_a_segment = 0;
@@ -616,7 +618,10 @@ again:
          goto again;
        }
       else
-       return SESSION_ERROR_NO_SPACE;
+       {
+         clib_warning ("No space to allocate fifos!");
+         return SESSION_ERROR_NO_SPACE;
+       }
     }
   return 0;
 }
@@ -799,36 +804,39 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block)
   /* Get session's server */
   app = application_get (s->app_index);
 
-  /* Fabricate event */
-  evt.fifo = s->server_rx_fifo;
-  evt.event_type = FIFO_EVENT_SERVER_RX;
-  evt.event_id = serial_number++;
-  evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo);
-
-  /* Add event to server's event queue */
-  q = app->event_queue;
+  /* Built-in server? Hand event to the callback... */
+  if (app->cb_fns.builtin_server_rx_callback)
+    return app->cb_fns.builtin_server_rx_callback (s);
 
-  /* Based on request block (or not) for lack of space */
-  if (block || PREDICT_TRUE (q->cursize < q->maxsize))
-    unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt,
-                                 0 /* do wait for mutex */ );
-  else
-    return -1;
-
-  if (1)
+  /* If no event, send one */
+  if (svm_fifo_set_event (s->server_rx_fifo))
     {
-      ELOG_TYPE_DECLARE (e) =
-      {
-      .format = "evt-enqueue: id %d length %d",.format_args = "i4i4",};
-      struct
-      {
-       u32 data[2];
-      } *ed;
-      ed = ELOG_DATA (&vlib_global_main.elog_main, e);
-      ed->data[0] = evt.event_id;
-      ed->data[1] = evt.enqueue_length;
+      /* Fabricate event */
+      evt.fifo = s->server_rx_fifo;
+      evt.event_type = FIFO_EVENT_SERVER_RX;
+      evt.event_id = serial_number++;
+
+      /* Add event to server's event queue */
+      q = app->event_queue;
+
+      /* Based on request block (or not) for lack of space */
+      if (block || PREDICT_TRUE (q->cursize < q->maxsize))
+       unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt,
+                                     0 /* do wait for mutex */ );
+      else
+       {
+         clib_warning ("fifo full");
+         return -1;
+       }
     }
 
+  /* *INDENT-OFF* */
+  SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({
+      ed->data[0] = evt.event_id;
+      ed->data[1] = svm_fifo_max_dequeue (s->server_rx_fifo);
+  }));
+  /* *INDENT-ON* */
+
   return 0;
 }
 
@@ -900,8 +908,7 @@ stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port)
   s->app_index = srv->index;
 
   /* Transport bind/listen  */
-  tci = tp_vfts[srv->session_type].bind (smm->vlib_main, s->session_index, ip,
-                                        port);
+  tci = tp_vfts[srv->session_type].bind (s->session_index, ip, port);
 
   /* Attach transport to session */
   s->connection_index = tci;
@@ -930,8 +937,7 @@ stream_session_stop_listen (u32 server_index)
   tc = tp_vfts[srv->session_type].get_listener (listener->connection_index);
   stream_session_table_del_for_tc (smm, listener->session_type, tc);
 
-  tp_vfts[srv->session_type].unbind (smm->vlib_main,
-                                    listener->connection_index);
+  tp_vfts[srv->session_type].unbind (listener->connection_index);
   pool_put (smm->listen_sessions[srv->session_type], listener);
 }
 
@@ -948,7 +954,7 @@ void
 connects_session_manager_init (session_manager_main_t * smm, u8 session_type)
 {
   session_manager_t *sm;
-  u32 connect_fifo_size = 8 << 10;     /* Config? */
+  u32 connect_fifo_size = 256 << 10;   /* Config? */
   u32 default_segment_size = 1 << 20;
 
   pool_get (smm->session_managers, sm);
@@ -1042,23 +1048,24 @@ stream_session_delete (stream_session_t * s)
   session_manager_main_t *smm = vnet_get_session_manager_main ();
   svm_fifo_segment_private_t *fifo_segment;
   application_t *app;
-  int rv;
 
-  /* delete from the main lookup table */
-  rv = stream_session_table_del (smm, s);
-
-  if (rv)
-    clib_warning ("hash delete error, rv %d", rv);
+  /* Delete from the main lookup table. */
+  stream_session_table_del (smm, s);
 
   /* Cleanup fifo segments */
   fifo_segment = svm_fifo_get_segment (s->server_segment_index);
   svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo);
   svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo);
 
-  /* Cleanup app if client */
-  app = application_get (s->app_index);
+  app = application_get_if_valid (s->app_index);
+
+  /* No app. A possibility: after disconnect application called unbind */
+  if (!app)
+    return;
+
   if (app->mode == APP_CLIENT)
     {
+      /* Cleanup app if client */
       application_del (app);
     }
   else if (app->mode == APP_SERVER)
@@ -1068,6 +1075,7 @@ stream_session_delete (stream_session_t * s)
       svm_fifo_t **fifos;
       u32 fifo_index;
 
+      /* For server, see if any segments can be removed */
       sm = session_manager_get (app->session_manager_index);
 
       /* Delete fifo */
@@ -1096,10 +1104,10 @@ stream_session_delete_notify (transport_connection_t * tc)
 {
   stream_session_t *s;
 
+  /* App might've been removed already */
   s = stream_session_get_if_valid (tc->s_index, tc->thread_index);
   if (!s)
     {
-      clib_warning ("Surprised!");
       return;
     }
   stream_session_delete (s);
@@ -1151,16 +1159,24 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index,
   return 0;
 }
 
-void
+int
 stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order,
                     u32 app_index)
 {
   transport_connection_t *tc;
   u32 tci;
   u64 value;
+  int rv;
 
   /* Ask transport to open connection */
-  tci = tp_vfts[sst].open (addr, port_host_byte_order);
+  rv = tp_vfts[sst].open (addr, port_host_byte_order);
+  if (rv < 0)
+    {
+      clib_warning ("Transport failed to open connection.");
+      return VNET_API_ERROR_SESSION_CONNECT_FAIL;
+    }
+
+  tci = rv;
 
   /* Get transport connection */
   tc = tp_vfts[sst].get_half_open (tci);
@@ -1170,6 +1186,8 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order,
 
   /* Add to the half-open lookup table */
   stream_session_half_open_table_add (sst, tc, value);
+
+  return 0;
 }
 
 /**
@@ -1180,18 +1198,51 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order,
 void
 stream_session_disconnect (stream_session_t * s)
 {
-  tp_vfts[s->session_type].close (s->connection_index, s->thread_index);
+//  session_fifo_event_t evt;
+
   s->session_state = SESSION_STATE_CLOSED;
+  /* RPC to vpp evt queue in the right thread */
+
+  tp_vfts[s->session_type].close (s->connection_index, s->thread_index);
+
+//  {
+//  /* Fabricate event */
+//  evt.fifo = s->server_rx_fifo;
+//  evt.event_type = FIFO_EVENT_SERVER_RX;
+//  evt.event_id = serial_number++;
+//
+//  /* Based on request block (or not) for lack of space */
+//  if (PREDICT_TRUE(q->cursize < q->maxsize))
+//    unix_shared_memory_queue_add (app->event_queue, (u8 *) &evt,
+//                                0 /* do wait for mutex */);
+//  else
+//    {
+//      clib_warning("fifo full");
+//      return -1;
+//    }
+//  }
 }
 
 /**
  * Cleanup transport and session state.
+ *
+ * Notify transport of the cleanup, wait for a delete notify to actually
+ * remove the session state.
  */
 void
 stream_session_cleanup (stream_session_t * s)
 {
+  session_manager_main_t *smm = &session_manager_main;
+  int rv;
+
+  s->session_state = SESSION_STATE_CLOSED;
+
+  /* Delete from the main lookup table to avoid more enqueues */
+  rv = stream_session_table_del (smm, s);
+  if (rv)
+    clib_warning ("hash delete error, rv %d", rv);
+
   tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index);
-  stream_session_delete (s);
 }
 
 void
@@ -1203,8 +1254,9 @@ session_register_transport (u8 type, const transport_proto_vft_t * vft)
   tp_vfts[type] = *vft;
 
   /* If an offset function is provided, then peek instead of dequeue */
-  smm->session_rx_fns[type] =
-    (vft->rx_fifo_offset) ? session_fifo_rx_peek : session_fifo_rx_dequeue;
+  smm->session_tx_fns[type] =
+    (vft->tx_fifo_offset) ? session_tx_fifo_peek_and_snd :
+    session_tx_fifo_dequeue_and_snd;
 }
 
 transport_proto_vft_t *
@@ -1216,16 +1268,13 @@ session_get_transport_vft (u8 type)
 }
 
 static clib_error_t *
-session_manager_main_init (vlib_main_t * vm)
+session_manager_main_enable (vlib_main_t * vm)
 {
-  u32 num_threads;
-  vlib_thread_main_t *vtm = vlib_get_thread_main ();
   session_manager_main_t *smm = &session_manager_main;
+  vlib_thread_main_t *vtm = vlib_get_thread_main ();
+  u32 num_threads;
   int i;
 
-  smm->vlib_main = vm;
-  smm->vnet_main = vnet_get_main ();
-
   num_threads = 1 /* main thread */  + vtm->n_threads;
 
   if (num_threads < 1)
@@ -1272,11 +1321,50 @@ session_manager_main_init (vlib_main_t * vm)
   for (i = 0; i < SESSION_N_TYPES; i++)
     smm->connect_manager_index[i] = INVALID_INDEX;
 
+  smm->is_enabled = 1;
+
+  /* Enable TCP transport */
+  vnet_tcp_enable_disable (vm, 1);
+
+  return 0;
+}
+
+clib_error_t *
+vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
+{
+  if (is_en)
+    {
+      if (session_manager_main.is_enabled)
+       return 0;
+
+      vlib_node_set_state (vm, session_queue_node.index,
+                          VLIB_NODE_STATE_POLLING);
+
+      return session_manager_main_enable (vm);
+    }
+  else
+    {
+      session_manager_main.is_enabled = 0;
+      vlib_node_set_state (vm, session_queue_node.index,
+                          VLIB_NODE_STATE_DISABLED);
+    }
+
   return 0;
 }
 
-VLIB_INIT_FUNCTION (session_manager_main_init);
+clib_error_t *
+session_manager_main_init (vlib_main_t * vm)
+{
+  session_manager_main_t *smm = &session_manager_main;
+
+  smm->vlib_main = vm;
+  smm->vnet_main = vnet_get_main ();
+  smm->is_enabled = 0;
+
+  return 0;
+}
 
+VLIB_INIT_FUNCTION (session_manager_main_init)
 /*
  * fd.io coding-style-patch-verification: ON
  *