session: cleanup attach flags
[vpp.git] / src / vcl / vppcom.c
index 85bf1c2..ce222a7 100644 (file)
@@ -132,6 +132,7 @@ typedef struct
 typedef struct vppcom_cfg_t_
 {
   u64 heapsize;
+  u32 vpp_api_q_length;
   u64 segment_baseva;
   u32 segment_size;
   u32 add_segment_size;
@@ -361,7 +362,8 @@ vppcom_connect_to_vpp (char *app_name)
 
   if (VPPCOM_DEBUG > 0)
     printf ("\nConnecting to VPP api...");
-  if (vl_client_connect_to_vlib ("/vpe-api", app_name, 32) < 0)
+  if (vl_client_connect_to_vlib ("/vpe-api", app_name,
+                                vcm->cfg.vpp_api_q_length) < 0)
     {
       clib_warning ("[%d] connect to vpp (%s) failed!", getpid (), app_name);
       return VPPCOM_ECONNREFUSED;
@@ -551,10 +553,13 @@ vppcom_app_send_attach (void)
   bmp->options[APP_OPTIONS_PROXY_TRANSPORT] =
     (vcm->cfg.app_proxy_transport_tcp ? 1 << TRANSPORT_PROTO_TCP : 0) |
     (vcm->cfg.app_proxy_transport_udp ? 1 << TRANSPORT_PROTO_UDP : 0);
-  bmp->options[SESSION_OPTIONS_SEGMENT_SIZE] = vcm->cfg.segment_size;
-  bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = vcm->cfg.add_segment_size;
-  bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = vcm->cfg.rx_fifo_size;
-  bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = vcm->cfg.tx_fifo_size;
+  bmp->options[APP_OPTIONS_SEGMENT_SIZE] = vcm->cfg.segment_size;
+  bmp->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = vcm->cfg.add_segment_size;
+  bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = vcm->cfg.rx_fifo_size;
+  bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = vcm->cfg.tx_fifo_size;
+  bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+    vcm->cfg.preallocated_fifo_pairs;
+  bmp->options[APP_OPTIONS_EVT_QUEUE_SIZE] = vcm->cfg.event_queue_size;
   if (nsid_len)
     {
       bmp->namespace_id_len = nsid_len;
@@ -1063,7 +1068,7 @@ format_ip46_address (u8 * s, va_list * args)
 }
 
 static inline void
-vppcom_send_accept_session_reply (u32 handle, int retval)
+vppcom_send_accept_session_reply (u64 handle, u32 context, int retval)
 {
   vl_api_accept_session_reply_t *rmp;
 
@@ -1071,6 +1076,7 @@ vppcom_send_accept_session_reply (u32 handle, int retval)
   memset (rmp, 0, sizeof (*rmp));
   rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY);
   rmp->retval = htonl (retval);
+  rmp->context = context;
   rmp->handle = handle;
   vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp);
 }
@@ -1086,7 +1092,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
   if (!clib_fifo_free_elts (vcm->client_session_index_fifo))
     {
       clib_warning ("[%d] client session queue is full!", getpid ());
-      vppcom_send_accept_session_reply (mp->handle,
+      vppcom_send_accept_session_reply (mp->handle, mp->context,
                                        VNET_API_ERROR_QUEUE_FULL);
       clib_spinlock_unlock (&vcm->sessions_lockp);
       return;
@@ -1112,6 +1118,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
   tx_fifo->client_session_index = session_index;
 
   session->vpp_handle = mp->handle;
+  session->client_context = mp->context;
   session->server_rx_fifo = rx_fifo;
   session->server_tx_fifo = tx_fifo;
   session->vpp_event_queue = uword_to_pointer (mp->vpp_event_queue_address,
@@ -1135,18 +1142,16 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
   clib_spinlock_unlock (&vcm->sessions_lockp);
 
   if (VPPCOM_DEBUG > 1)
-    {
-      u8 *ip_str = format (0, "%U", format_ip46_address, &mp->ip, mp->is_ip4);
-      clib_warning ("[%d] received request to accept session (sid %d) "
-                   "from %s:%d", getpid (), session_index, ip_str,
-                   clib_net_to_host_u16 (mp->port));
-      vec_free (ip_str);
-    }
+    clib_warning ("[%d] vpp handle 0x%llx, sid %u: client accept "
+                 "request from %s address %U port %d queue %p!", getpid (),
+                 mp->handle, session_index, mp->is_ip4 ? "IPv4" : "IPv6",
+                 format_ip46_address, &mp->ip, mp->is_ip4,
+                 clib_net_to_host_u16 (mp->port), session->vpp_event_queue);
 }
 
 static void
 vppcom_send_connect_session_reply (session_t * session, u32 session_index,
-                                  int retval)
+                                  u64 vpp_handle, u32 context, int retval)
 {
   vl_api_connect_session_reply_t *rmp;
   u32 len;
@@ -1154,8 +1159,17 @@ vppcom_send_connect_session_reply (session_t * session, u32 session_index,
 
   rmp = vl_msg_api_alloc (sizeof (*rmp));
   memset (rmp, 0, sizeof (*rmp));
-
   rmp->_vl_msg_id = ntohs (VL_API_CONNECT_SESSION_REPLY);
+
+  if (!session)
+    {
+      rmp->context = context;
+      rmp->handle = vpp_handle;
+      rmp->retval = htonl (retval);
+      vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp);
+      return;
+    }
+
   rmp->context = session->client_context;
   rmp->retval = htonl (retval);
   rmp->handle = session->vpp_handle;
@@ -1194,8 +1208,10 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp)
       if (VPPCOM_DEBUG > 1)
        clib_warning ("[%d] client session queue is full!", getpid ());
 
-      /* TBD: fix handle missing in api msg. */
-      vppcom_send_accept_session_reply (0, VNET_API_ERROR_QUEUE_FULL);
+      /* TBD: Fix api to include vpp handle */
+      vppcom_send_connect_session_reply (0 /* session */ , 0 /* sid */ ,
+                                        0 /* handle */ , mp->context,
+                                        VNET_API_ERROR_QUEUE_FULL);
       return;
     }
 
@@ -1438,6 +1454,7 @@ vppcom_cfg_init (vppcom_cfg_t * vcl_cfg)
   ASSERT (vcl_cfg);
 
   vcl_cfg->heapsize = (256ULL << 20);
+  vcl_cfg->vpp_api_q_length = 1024;
   vcl_cfg->segment_baseva = 0x200000000ULL;
   vcl_cfg->segment_size = (256 << 20);
   vcl_cfg->add_segment_size = (128 << 20);
@@ -1600,7 +1617,7 @@ vppcom_cfg_read (char *conf_fname)
   u8 vc_cfg_input = 0;
   u8 *chroot_path;
   struct stat s;
-  u32 uid, gid;
+  u32 uid, gid, q_len;
 
   fd = open (conf_fname, O_RDONLY);
   if (fd < 0)
@@ -1659,6 +1676,23 @@ vppcom_cfg_read (char *conf_fname)
                              getpid (), chroot_path);
              chroot_path = 0;  /* Don't vec_free() it! */
            }
+         else if (unformat (line_input, "vpp-api-q-length %d", &q_len))
+           {
+             if (q_len < vcl_cfg->vpp_api_q_length)
+               {
+                 clib_warning ("[%d] ERROR: configured vpp-api-q-length "
+                               "(%u) is too small! Using default: %u ",
+                               getpid (), q_len, vcl_cfg->vpp_api_q_length);
+               }
+             else
+               {
+                 vcl_cfg->vpp_api_q_length = q_len;
+
+                 if (VPPCOM_DEBUG > 0)
+                   clib_warning ("[%d] configured vpp-api-q-length %u",
+                                 getpid (), vcl_cfg->vpp_api_q_length);
+               }
+           }
          else if (unformat (line_input, "uid %d", &uid))
            {
              vl_set_memory_uid (uid);
@@ -2436,7 +2470,10 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep,
          vec_reset_length (a->new_segment_indices);
          rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
          vppcom_send_connect_session_reply (client_session,
-                                            client_session_index, rv);
+                                            client_session_index,
+                                            client_session->vpp_handle,
+                                            client_session->client_context,
+                                            rv);
          clib_spinlock_unlock (&vcm->sessions_lockp);
          rv = VPPCOM_ENOMEM;
          goto done;
@@ -2460,7 +2497,10 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep,
                        vcm->cfg.rx_fifo_size, vcm->cfg.rx_fifo_size);
          rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
          vppcom_send_connect_session_reply (client_session,
-                                            client_session_index, rv);
+                                            client_session_index,
+                                            client_session->vpp_handle,
+                                            client_session->client_context,
+                                            rv);
          clib_spinlock_unlock (&vcm->sessions_lockp);
          rv = VPPCOM_ENOMEM;
          goto done;
@@ -2481,7 +2521,10 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep,
                        vcm->cfg.tx_fifo_size, vcm->cfg.tx_fifo_size);
          rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
          vppcom_send_connect_session_reply (client_session,
-                                            client_session_index, rv);
+                                            client_session_index,
+                                            client_session->vpp_handle,
+                                            client_session->client_context,
+                                            rv);
          clib_spinlock_unlock (&vcm->sessions_lockp);
          rv = VPPCOM_ENOMEM;
          goto done;
@@ -2514,12 +2557,16 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep,
 #endif
       vppcom_send_connect_session_reply (client_session,
                                         client_session_index,
+                                        client_session->vpp_handle,
+                                        client_session->client_context,
                                         0 /* retval OK */ );
     }
   else
     {
       cut_thru_str = " ";
-      vppcom_send_accept_session_reply (client_session->vpp_handle, 0);
+      vppcom_send_accept_session_reply (client_session->vpp_handle,
+                                       client_session->client_context,
+                                       0 /* retval OK */ );
     }
 
   if (VPPCOM_DEBUG > 0)
@@ -2826,6 +2873,19 @@ vppcom_session_read_ready (session_t * session, u32 session_index)
        }
     }
   rv = ready;
+
+  if (vcm->app_event_queue->cursize &&
+      !pthread_mutex_trylock (&vcm->app_event_queue->mutex))
+    {
+      u32 i, n_to_dequeue = vcm->app_event_queue->cursize;
+      session_fifo_event_t e;
+
+      for (i = 0; i < n_to_dequeue; i++)
+       unix_shared_memory_queue_sub_raw (vcm->app_event_queue, (u8 *) & e);
+
+      pthread_mutex_unlock (&vcm->app_event_queue->mutex);
+    }
+
 done:
   return rv;
 }