ip: Use .api declared error counters
[vpp.git] / src / vlibmemory / memory_api.c
index 9db27eb..80d8628 100644 (file)
 #include <vlibmemory/vl_memory_api_h.h>
 #undef vl_endianfun
 
-static inline void *
-vl_api_memclnt_create_t_print (vl_api_memclnt_create_t * a, void *handle)
-{
-  vl_print (handle, "vl_api_memclnt_create_t:\n");
-  vl_print (handle, "name: %s\n", a->name);
-  vl_print (handle, "input_queue: 0x%wx\n", a->input_queue);
-  vl_print (handle, "context: %u\n", (unsigned) a->context);
-  vl_print (handle, "ctx_quota: %ld\n", (long) a->ctx_quota);
-  return handle;
-}
-
-static inline void *
-vl_api_memclnt_delete_t_print (vl_api_memclnt_delete_t * a, void *handle)
-{
-  vl_print (handle, "vl_api_memclnt_delete_t:\n");
-  vl_print (handle, "index: %u\n", (unsigned) a->index);
-  vl_print (handle, "handle: 0x%wx\n", a->handle);
-  return handle;
-}
-
 volatile int **vl_api_queue_cursizes;
 
 static void
@@ -212,6 +192,7 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
 
   regp->name = format (0, "%s", mp->name);
   vec_add1 (regp->name, 0);
+  regp->keepalive = true;
 
   if (am->serialized_message_table_in_shmem == 0)
     am->serialized_message_table_in_shmem =
@@ -237,6 +218,87 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
   vl_msg_api_send_shmem (q, (u8 *) & rp);
 }
 
+void
+vl_api_memclnt_create_v2_t_handler (vl_api_memclnt_create_v2_t *mp)
+{
+  vl_api_registration_t **regpp;
+  vl_api_registration_t *regp;
+  vl_api_memclnt_create_v2_reply_t *rp;
+  svm_queue_t *q;
+  int rv = 0;
+  void *oldheap;
+  api_main_t *am = vlibapi_get_main ();
+  u8 *msg_table;
+
+  /*
+   * This is tortured. Maintain a vlib-address-space private
+   * pool of client registrations. We use the shared-memory virtual
+   * address of client structure as a handle, to allow direct
+   * manipulation of context quota vbls from the client library.
+   *
+   * This scheme causes trouble w/ API message trace replay, since
+   * some random VA from clib_mem_alloc() certainly won't
+   * occur in the Linux sim. The (very) few places
+   * that care need to use the pool index.
+   *
+   * Putting the registration object(s) into a pool in shared memory and
+   * using the pool index as a handle seems like a great idea.
+   * Unfortunately, each and every reference to that pool would need
+   * to be protected by a mutex:
+   *
+   *     Client                      VLIB
+   *     ------                      ----
+   *     convert pool index to
+   *     pointer.
+   *     <deschedule>
+   *                                 expand pool
+   *                                 <deschedule>
+   *     kaboom!
+   */
+
+  pool_get (am->vl_clients, regpp);
+
+  oldheap = vl_msg_push_heap ();
+  *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
+
+  regp = *regpp;
+  clib_memset (regp, 0, sizeof (*regp));
+  regp->registration_type = REGISTRATION_TYPE_SHMEM;
+  regp->vl_api_registration_pool_index = regpp - am->vl_clients;
+  regp->vlib_rp = am->vlib_rp;
+  regp->shmem_hdr = am->shmem_hdr;
+  regp->clib_file_index = am->shmem_hdr->clib_file_index;
+
+  q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
+  VL_MSG_API_SVM_QUEUE_UNPOISON (q);
+
+  regp->name = format (0, "%s", mp->name);
+  vec_add1 (regp->name, 0);
+  regp->keepalive = mp->keepalive;
+
+  if (am->serialized_message_table_in_shmem == 0)
+    am->serialized_message_table_in_shmem =
+      vl_api_serialize_message_table (am, 0);
+
+  if (am->vlib_rp != am->vlib_primary_rp)
+    msg_table = vl_api_serialize_message_table (am, 0);
+  else
+    msg_table = am->serialized_message_table_in_shmem;
+
+  vl_msg_pop_heap (oldheap);
+
+  rp = vl_msg_api_alloc (sizeof (*rp));
+  rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_V2_REPLY);
+  rp->handle = (uword) regp;
+  rp->index = vl_msg_api_handle_from_index_and_epoch (
+    regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts);
+  rp->context = mp->context;
+  rp->response = ntohl (rv);
+  rp->message_table = pointer_to_uword (msg_table);
+
+  vl_msg_api_send_shmem (q, (u8 *) &rp);
+}
+
 void
 vl_api_call_reaper_functions (u32 client_index)
 {
@@ -417,11 +479,12 @@ vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp)
  * don't trace memclnt_keepalive[_reply] msgs
  */
 
-#define foreach_vlib_api_msg                            \
-_(MEMCLNT_CREATE, memclnt_create, 1)                    \
-_(MEMCLNT_DELETE, memclnt_delete, 1)                    \
-_(MEMCLNT_KEEPALIVE, memclnt_keepalive, 0)              \
-_(MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
+#define foreach_vlib_api_msg                                                  \
+  _ (MEMCLNT_CREATE, memclnt_create, 0)                                       \
+  _ (MEMCLNT_CREATE_V2, memclnt_create_v2, 0)                                 \
+  _ (MEMCLNT_DELETE, memclnt_delete, 0)                                       \
+  _ (MEMCLNT_KEEPALIVE, memclnt_keepalive, 0)                                 \
+  _ (MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
 
 /*
  * memory_api_init
@@ -469,9 +532,9 @@ vl_mem_api_init (const char *region_name)
    * special-case freeing of memclnt_delete messages, so we can
    * simply munmap pairwise / private API segments...
    */
-  am->message_bounce[VL_API_MEMCLNT_DELETE] = 1;
-  am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE_REPLY] = 1;
-  am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE] = 1;
+  am->msg_data[VL_API_MEMCLNT_DELETE].bounce = 1;
+  vl_api_set_msg_thread_safe (am, VL_API_MEMCLNT_KEEPALIVE_REPLY, 1);
+  vl_api_set_msg_thread_safe (am, VL_API_MEMCLNT_KEEPALIVE, 1);
 
   vlib_set_queue_signal_callback (vm, memclnt_queue_callback);
 
@@ -598,8 +661,10 @@ vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
 
   /* *INDENT-OFF* */
   pool_foreach (regpp, am->vl_clients)  {
+      if (!(*regpp)->keepalive)
+       continue;
       vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices,
-                                          &confused_indices);
+                                         &confused_indices);
   }
   /* *INDENT-ON* */
 
@@ -694,6 +759,148 @@ vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
     }
 }
 
+void (*vl_mem_api_fuzz_hook) (u16, void *);
+
+/* This is only to be called from a vlib/vnet app */
+static void
+vl_mem_api_handler_with_vm_node (api_main_t *am, svm_region_t *vlib_rp,
+                                void *the_msg, vlib_main_t *vm,
+                                vlib_node_runtime_t *node, u8 is_private)
+{
+  u16 id = clib_net_to_host_u16 (*((u16 *) the_msg));
+  vl_api_msg_data_t *m = vl_api_get_msg_data (am, id);
+  u8 *(*handler) (void *, void *, void *);
+  u8 *(*print_fp) (void *, void *);
+  svm_region_t *old_vlib_rp;
+  void *save_shmem_hdr;
+  int is_mp_safe = 1;
+
+  if (PREDICT_FALSE (am->elog_trace_api_messages))
+    {
+      ELOG_TYPE_DECLARE (e) = {
+       .format = "api-msg: %s",
+       .format_args = "T4",
+      };
+      struct
+      {
+       u32 c;
+      } * ed;
+      ed = ELOG_DATA (am->elog_main, e);
+      if (m && m->name)
+       ed->c = elog_string (am->elog_main, (char *) m->name);
+      else
+       ed->c = elog_string (am->elog_main, "BOGUS");
+    }
+
+  if (m && m->handler)
+    {
+      handler = (void *) m->handler;
+
+      if (PREDICT_FALSE (am->rx_trace && am->rx_trace->enabled))
+       vl_msg_api_trace (am, am->rx_trace, the_msg);
+
+      if (PREDICT_FALSE (am->msg_print_flag))
+       {
+         fformat (stdout, "[%d]: %s\n", id, m->name);
+         print_fp = (void *) am->msg_data[id].print_handler;
+         if (print_fp == 0)
+           {
+             fformat (stdout, "  [no registered print fn for msg %d]\n", id);
+           }
+         else
+           {
+             (*print_fp) (the_msg, vm);
+           }
+       }
+      is_mp_safe = am->msg_data[id].is_mp_safe;
+
+      if (!is_mp_safe)
+       {
+         vl_msg_api_barrier_trace_context (am->msg_data[id].name);
+         vl_msg_api_barrier_sync ();
+       }
+      if (is_private)
+       {
+         old_vlib_rp = am->vlib_rp;
+         save_shmem_hdr = am->shmem_hdr;
+         am->vlib_rp = vlib_rp;
+         am->shmem_hdr = (void *) vlib_rp->user_ctx;
+       }
+
+      if (PREDICT_FALSE (vl_mem_api_fuzz_hook != 0))
+       (*vl_mem_api_fuzz_hook) (id, the_msg);
+
+      if (m->is_autoendian)
+       {
+         void (*endian_fp) (void *);
+         endian_fp = am->msg_data[id].endian_handler;
+         (*endian_fp) (the_msg);
+       }
+      if (PREDICT_FALSE (vec_len (am->perf_counter_cbs) != 0))
+       clib_call_callbacks (am->perf_counter_cbs, am, id, 0 /* before */);
+
+      (*handler) (the_msg, vm, node);
+
+      if (PREDICT_FALSE (vec_len (am->perf_counter_cbs) != 0))
+       clib_call_callbacks (am->perf_counter_cbs, am, id, 1 /* after */);
+      if (is_private)
+       {
+         am->vlib_rp = old_vlib_rp;
+         am->shmem_hdr = save_shmem_hdr;
+       }
+      if (!is_mp_safe)
+       vl_msg_api_barrier_release ();
+    }
+  else
+    {
+      clib_warning ("no handler for msg id %d", id);
+    }
+
+  /*
+   * Special-case, so we can e.g. bounce messages off the vnet
+   * main thread without copying them...
+   */
+  if (!m || !m->bounce)
+    {
+      if (is_private)
+       {
+         old_vlib_rp = am->vlib_rp;
+         save_shmem_hdr = am->shmem_hdr;
+         am->vlib_rp = vlib_rp;
+         am->shmem_hdr = (void *) vlib_rp->user_ctx;
+       }
+      vl_msg_api_free (the_msg);
+      if (is_private)
+       {
+         am->vlib_rp = old_vlib_rp;
+         am->shmem_hdr = save_shmem_hdr;
+       }
+    }
+
+  if (PREDICT_FALSE (am->elog_trace_api_messages))
+    {
+      ELOG_TYPE_DECLARE (e) = { .format = "api-msg-done(%s): %s",
+                               .format_args = "t4T4",
+                               .n_enum_strings = 2,
+                               .enum_strings = {
+                                 "barrier",
+                                 "mp-safe",
+                               } };
+
+      struct
+      {
+       u32 barrier;
+       u32 c;
+      } * ed;
+      ed = ELOG_DATA (am->elog_main, e);
+      if (m && m->name)
+       ed->c = elog_string (am->elog_main, (char *) m->name);
+      else
+       ed->c = elog_string (am->elog_main, "BOGUS");
+      ed->barrier = is_mp_safe;
+    }
+}
+
 static inline int
 void_mem_api_handle_msg_i (api_main_t * am, svm_region_t * vlib_rp,
                           vlib_main_t * vm, vlib_node_runtime_t * node,
@@ -707,7 +914,7 @@ void_mem_api_handle_msg_i (api_main_t * am, svm_region_t * vlib_rp,
   if (!svm_queue_sub2 (q, (u8 *) & mp))
     {
       VL_MSG_API_UNPOISON ((void *) mp);
-      vl_msg_api_handler_with_vm_node (am, vlib_rp, (void *) mp, vm, node,
+      vl_mem_api_handler_with_vm_node (am, vlib_rp, (void *) mp, vm, node,
                                       is_private);
       return 0;
     }
@@ -757,8 +964,8 @@ vl_mem_api_handle_rpc (vlib_main_t * vm, vlib_node_runtime_t * node)
       for (i = 0; i < vec_len (vm->processing_rpc_requests); i++)
        {
          mp = vm->processing_rpc_requests[i];
-         vl_msg_api_handler_with_vm_node (am, am->vlib_rp, (void *) mp, vm,
-                                          node, 0 /* is_private */ );
+         vl_mem_api_handler_with_vm_node (am, am->vlib_rp, (void *) mp, vm,
+                                          node, 0 /* is_private */);
        }
       vl_msg_api_barrier_release ();
     }