vls: multi-process and multi-threaded apps improvements
[vpp.git] / src / vcl / vcl_private.c
index ae4498e..5f9ce27 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <vcl/vcl_private.h>
 
-pthread_key_t vcl_worker_stop_key;
+static pthread_key_t vcl_worker_stop_key;
 
 static const char *
 vppcom_app_state_str (app_state_t state)
@@ -211,6 +211,7 @@ vcl_worker_alloc (void)
   pool_get (vcm->workers, wrk);
   memset (wrk, 0, sizeof (*wrk));
   wrk->wrk_index = wrk - vcm->workers;
+  wrk->forked_child = ~0;
   return wrk;
 }
 
@@ -220,20 +221,36 @@ vcl_worker_free (vcl_worker_t * wrk)
   pool_put (vcm->workers, wrk);
 }
 
-static void
-vcl_worker_cleanup (void *arg)
+void
+vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp)
 {
-  vcl_worker_t *wrk = vcl_worker_get_current ();
-  VDBG (0, "cleaning up worker %u", wrk->wrk_index);
-  vcl_send_app_worker_add_del (0 /* is_add */ );
-  close (wrk->mqs_epfd);
+  clib_spinlock_lock (&vcm->workers_lock);
+  if (notify_vpp)
+    {
+      if (wrk->wrk_index == vcl_get_worker_index ())
+       vcl_send_app_worker_add_del (0 /* is_add */ );
+      else
+       vcl_send_child_worker_del (wrk);
+    }
+  if (wrk->mqs_epfd > 0)
+    close (wrk->mqs_epfd);
   hash_free (wrk->session_index_by_vpp_handles);
   hash_free (wrk->ct_registration_by_mq);
   clib_spinlock_free (&wrk->ct_registration_lock);
   vec_free (wrk->mq_events);
   vec_free (wrk->mq_msg_vector);
-  vcl_set_worker_index (~0);
   vcl_worker_free (wrk);
+  clib_spinlock_unlock (&vcm->workers_lock);
+}
+
+static void
+vcl_worker_cleanup_cb (void *arg)
+{
+  vcl_worker_t *wrk = vcl_worker_get_current ();
+  u32 wrk_index = wrk->wrk_index;
+  vcl_worker_cleanup (wrk, 1 /* notify vpp */ );
+  vcl_set_worker_index (~0);
+  VDBG (0, "cleaned up worker %u", wrk_index);
 }
 
 vcl_worker_t *
@@ -254,6 +271,8 @@ vcl_worker_alloc_and_init ()
   clib_spinlock_lock (&vcm->workers_lock);
   wrk = vcl_worker_alloc ();
   vcl_set_worker_index (wrk->wrk_index);
+  wrk->thread_id = pthread_self ();
+  wrk->current_pid = getpid ();
 
   wrk->mqs_epfd = -1;
   if (vcm->cfg.use_mq_eventfd)
@@ -262,7 +281,7 @@ vcl_worker_alloc_and_init ()
       if (wrk->mqs_epfd < 0)
        {
          clib_unix_warning ("epoll_create() returned");
-         return 0;
+         goto done;
        }
     }
 
@@ -273,29 +292,167 @@ vcl_worker_alloc_and_init ()
   vec_validate (wrk->mq_events, 64);
   vec_validate (wrk->mq_msg_vector, 128);
   vec_reset_length (wrk->mq_msg_vector);
+  vec_validate (wrk->unhandled_evts_vector, 128);
+  vec_reset_length (wrk->unhandled_evts_vector);
+  clib_spinlock_unlock (&vcm->workers_lock);
 
-  if (wrk->wrk_index == 0)
-    {
-      clib_spinlock_unlock (&vcm->workers_lock);
-      return wrk;
-    }
+done:
+  return wrk;
+}
+
+int
+vcl_worker_register_with_vpp (void)
+{
+  vcl_worker_t *wrk = vcl_worker_get_current ();
+
+  clib_spinlock_lock (&vcm->workers_lock);
 
   vcm->app_state = STATE_APP_ADDING_WORKER;
   vcl_send_app_worker_add_del (1 /* is_add */ );
   if (vcl_wait_for_app_state_change (STATE_APP_READY))
     {
       clib_warning ("failed to add worker to vpp");
-      return 0;
+      return -1;
     }
-
-  if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup))
+  if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb))
     clib_warning ("failed to add pthread cleanup function");
+  if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id))
+    clib_warning ("failed to setup key value");
 
   clib_spinlock_unlock (&vcm->workers_lock);
 
   VDBG (0, "added worker %u", wrk->wrk_index);
+  return 0;
+}
 
-  return wrk;
+int
+vcl_worker_set_bapi (void)
+{
+  vcl_worker_t *wrk = vcl_worker_get_current ();
+  int i;
+
+  /* Find the first worker with the same pid */
+  for (i = 0; i < vec_len (vcm->workers); i++)
+    {
+      if (i == wrk->wrk_index)
+       continue;
+      if (vcm->workers[i].current_pid == wrk->current_pid)
+       {
+         wrk->vl_input_queue = vcm->workers[i].vl_input_queue;
+         wrk->my_client_index = vcm->workers[i].my_client_index;
+         return 0;
+       }
+    }
+  return -1;
+}
+
+void
+vcl_segment_table_add (u64 segment_handle, u32 svm_segment_index)
+{
+  clib_rwlock_writer_lock (&vcm->segment_table_lock);
+  hash_set (vcm->segment_table, segment_handle, svm_segment_index);
+  clib_rwlock_writer_unlock (&vcm->segment_table_lock);
+}
+
+u32
+vcl_segment_table_lookup (u64 segment_handle)
+{
+  uword *seg_indexp;
+
+  clib_rwlock_reader_lock (&vcm->segment_table_lock);
+  seg_indexp = hash_get (vcm->segment_table, segment_handle);
+  clib_rwlock_reader_unlock (&vcm->segment_table_lock);
+
+  if (!seg_indexp)
+    return VCL_INVALID_SEGMENT_INDEX;
+  return ((u32) * seg_indexp);
+}
+
+void
+vcl_segment_table_del (u64 segment_handle)
+{
+  clib_rwlock_writer_lock (&vcm->segment_table_lock);
+  hash_unset (vcm->segment_table, segment_handle);
+  clib_rwlock_writer_unlock (&vcm->segment_table_lock);
+}
+
+void
+vcl_cleanup_bapi (void)
+{
+  socket_client_main_t *scm = &socket_client_main;
+  api_main_t *am = &api_main;
+
+  am->my_client_index = ~0;
+  am->my_registration = 0;
+  am->vl_input_queue = 0;
+  am->msg_index_by_name_and_crc = 0;
+  scm->socket_fd = 0;
+
+  vl_client_api_unmap ();
+}
+
+int
+vcl_session_read_ready (vcl_session_t * session)
+{
+  /* Assumes caller has acquired spinlock: vcm->sessions_lockp */
+  if (PREDICT_FALSE (session->is_vep))
+    {
+      VDBG (0, "ERROR: session %u: cannot read from an epoll session!",
+           session->session_index);
+      return VPPCOM_EBADFD;
+    }
+
+  if (PREDICT_FALSE (!(session->session_state & (STATE_OPEN | STATE_LISTEN))))
+    {
+      session_state_t state = session->session_state;
+      int rv;
+
+      rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
+
+      VDBG (1, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)",
+           session->session_index, session->vpp_handle, state,
+           vppcom_session_state_str (state), rv, vppcom_retval_str (rv));
+      return rv;
+    }
+
+  if (session->session_state & STATE_LISTEN)
+    return clib_fifo_elts (session->accept_evts_fifo);
+
+  return svm_fifo_max_dequeue (session->rx_fifo);
+}
+
+int
+vcl_session_write_ready (vcl_session_t * session)
+{
+  /* Assumes caller has acquired spinlock: vcm->sessions_lockp */
+  if (PREDICT_FALSE (session->is_vep))
+    {
+      VDBG (0, "session %u [0x%llx]: cannot write to an epoll session!",
+           session->session_index, session->vpp_handle);
+      return VPPCOM_EBADFD;
+    }
+
+  if (PREDICT_FALSE (session->session_state & STATE_LISTEN))
+    {
+      if (session->tx_fifo)
+       return svm_fifo_max_enqueue (session->tx_fifo);
+      else
+       return VPPCOM_EBADFD;
+    }
+
+  if (PREDICT_FALSE (!(session->session_state & STATE_OPEN)))
+    {
+      session_state_t state = session->session_state;
+      int rv;
+
+      rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
+      VDBG (0, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)",
+           session->session_index, session->vpp_handle, state,
+           vppcom_session_state_str (state), rv, vppcom_retval_str (rv));
+      return rv;
+    }
+
+  return svm_fifo_max_enqueue (session->tx_fifo);
 }
 
 /*