api: fix handlers that explicitly depend on svm queue
[vpp.git] / src / vpp / stats / stats.c
index cd50142..fe9ff1c 100644 (file)
@@ -448,7 +448,7 @@ do_simple_interface_counters (stats_main_t * sm)
   vnet_interface_main_t *im = sm->interface_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vlib_simple_counter_main_t *cm;
   u32 items_this_message = 0;
   u64 v, *vp = 0;
@@ -544,7 +544,7 @@ static void
   vl_api_want_interface_combined_stats_reply_t *rmp;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   u32 swif;
 
   swif = ~0;                   //Using same mechanism as _per_interface_
@@ -555,9 +555,8 @@ static void
                              mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     {
       sm->enable_poller =
        clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
@@ -570,7 +569,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 static void
@@ -579,7 +578,7 @@ static void
 {
   vpe_client_registration_t *clients, client;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
   u32 mp_size;
   int i;
@@ -593,26 +592,26 @@ static void
   for (i = 0; i < vec_len (clients); i++)
     {
       client = clients[i];
-      q = vl_api_client_index_to_input_queue (client.client_index);
-      if (q)
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
        {
-         if (q_prev && (q_prev->cursize < q_prev->maxsize))
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
            {
              mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
              clib_memcpy (mp_copy, mp, mp_size);
-             vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
              mp = mp_copy;
            }
-         q_prev = q;
+         reg_prev = reg;
        }
     }
 #if STATS_DEBUG > 0
   fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
 #endif
 
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -627,7 +626,7 @@ do_combined_interface_counters (stats_main_t * sm)
   vnet_interface_main_t *im = sm->interface_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vlib_combined_counter_main_t *cm;
   u32 items_this_message = 0;
   vlib_counter_t v, *vp = 0;
@@ -687,7 +686,7 @@ static void
   vlib_combined_counter_main_t *cm;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   int i;
   u32 swif;
 
@@ -718,9 +717,8 @@ static void
     }
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     {
       for (i = 0; i < mp->num; i++)
        {
@@ -737,7 +735,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 /* Per Interface Combined distribution to client */
@@ -748,7 +746,7 @@ do_combined_per_interface_counters (stats_main_t * sm)
   vnet_interface_main_t *im = sm->interface_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = NULL;
+  vl_api_registration_t *vl_reg;
   vlib_combined_counter_main_t *cm;
   /*
    * items_this_message will eventually be used to optimise the batching
@@ -790,11 +788,14 @@ do_combined_per_interface_counters (stats_main_t * sm)
   timestamp = vlib_time_now (sm->vlib_main);
 
   vec_reset_length (sm->regs_tmp);
+
+  /* *INDENT-OFF* */
   pool_foreach (reg,
-               sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
-               (
-                   {
-                   vec_add1 (sm->regs_tmp, reg);}));
+                sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
+  ({
+    vec_add1 (sm->regs_tmp, reg);
+  }));
+  /* *INDENT-ON* */
 
   for (i = 0; i < vec_len (sm->regs_tmp); i++)
     {
@@ -807,21 +808,23 @@ do_combined_per_interface_counters (stats_main_t * sm)
          continue;
        }
       vec_reset_length (sm->clients_tmp);
-      pool_foreach (client, reg->clients, (
-                                           {
-                                           vec_add1 (sm->clients_tmp,
-                                                     client);}
-                   ));
+
+      /* *INDENT-OFF* */
+      pool_foreach (client, reg->clients, ({
+       vec_add1 (sm->clients_tmp, client);
+      }));
+      /* *INDENT-ON* */
 
       //FIXME - should be doing non-variant part of mp here and managing
       // any alloc per client in that vec_foreach
       for (j = 0; j < vec_len (sm->clients_tmp); j++)
        {
          client = sm->clients_tmp[j];
-         q = vl_api_client_index_to_input_queue (client->client_index);
+
+         vl_reg = vl_api_client_index_to_registration (client->client_index);
 
          //Client may have disconnected abrubtly, clean up so we don't poll nothing.
-         if (!q)
+         if (!vl_reg)
            {
              sm->enable_poller =
                clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
@@ -860,7 +863,7 @@ do_combined_per_interface_counters (stats_main_t * sm)
          clib_mem_unaligned (&vp->tx_bytes, u64) =
            clib_host_to_net_u64 (v.bytes);
 
-         vl_msg_api_send_shmem (q, (u8 *) & mp);
+         vl_api_send_msg (vl_reg, (u8 *) mp);
        }
     }
 
@@ -882,7 +885,7 @@ static void
   vlib_simple_counter_main_t *cm;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   int i;
   u32 swif;
 
@@ -912,10 +915,10 @@ static void
     }
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
+  reg = vl_api_client_index_to_registration (mp->client_index);
 
-  //Client may have disconnected abrubtly, clean up so we don't poll nothing.
-  if (!q)
+  /* Client may have disconnected abruptly, clean up */
+  if (!reg)
     {
       for (i = 0; i < mp->num; i++)
        {
@@ -934,7 +937,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 /* Per Interface Simple distribution to client */
@@ -945,7 +948,7 @@ do_simple_per_interface_counters (stats_main_t * sm)
   vnet_interface_main_t *im = sm->interface_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = NULL;
+  vl_api_registration_t *vl_reg;
   vlib_simple_counter_main_t *cm;
   /*
    * items_this_message will eventually be used to optimise the batching
@@ -987,12 +990,13 @@ do_simple_per_interface_counters (stats_main_t * sm)
   timestamp = vlib_time_now (sm->vlib_main);
 
   vec_reset_length (sm->regs_tmp);
+
+  /* *INDENT-OFF* */
   pool_foreach (reg,
-               sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], (
-                                                                              {
-                                                                              vec_add1
-                                                                              (sm->regs_tmp,
-                                                                               reg);}));
+                sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], ({
+    vec_add1 (sm->regs_tmp, reg);
+  }));
+  /* *INDENT-ON* */
 
   for (i = 0; i < vec_len (sm->regs_tmp); i++)
     {
@@ -1005,21 +1009,22 @@ do_simple_per_interface_counters (stats_main_t * sm)
          continue;
        }
       vec_reset_length (sm->clients_tmp);
-      pool_foreach (client, reg->clients, (
-                                           {
-                                           vec_add1 (sm->clients_tmp,
-                                                     client);}
-                   ));
+
+      /* *INDENT-OFF* */
+      pool_foreach (client, reg->clients, ({
+       vec_add1 (sm->clients_tmp, client);
+      }));
+      /* *INDENT-ON* */
 
       //FIXME - should be doing non-variant part of mp here and managing
       // any alloc per client in that vec_foreach
       for (j = 0; j < vec_len (sm->clients_tmp); j++)
        {
          client = sm->clients_tmp[j];
-         q = vl_api_client_index_to_input_queue (client->client_index);
+         vl_reg = vl_api_client_index_to_registration (client->client_index);
 
-         //Client may have disconnected abrubtly, clean up so we don't poll nothing.
-         if (!q)
+         /* Client may have disconnected abrubtly, clean up */
+         if (!vl_reg)
            {
              sm->enable_poller =
                clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
@@ -1086,7 +1091,7 @@ do_simple_per_interface_counters (stats_main_t * sm)
          v = vlib_get_simple_counter (cm, reg->item);
          clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
 
-         vl_msg_api_send_shmem (q, (u8 *) & mp);
+         vl_api_send_msg (vl_reg, (u8 *) mp);
        }
     }
 
@@ -1164,7 +1169,7 @@ ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
 {
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vl_api_vnet_ip4_nbr_counters_t *mp = 0;
   int first = 0;
 
@@ -1204,11 +1209,11 @@ ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
       /*
        * send to the shm q
        */
-      unix_shared_memory_queue_lock (q);
-      pause = unix_shared_memory_queue_is_full (q);
+      svm_queue_lock (q);
+      pause = svm_queue_is_full (q);
 
       vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-      unix_shared_memory_queue_unlock (q);
+      svm_queue_unlock (q);
       dsunlock (sm);
 
       if (pause)
@@ -1319,7 +1324,7 @@ ip6_nbr_ship (stats_main_t * sm,
 {
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vl_api_vnet_ip6_nbr_counters_t *mp = 0;
   int first = 0;
 
@@ -1359,11 +1364,11 @@ ip6_nbr_ship (stats_main_t * sm,
       /*
        * send to the shm q
        */
-      unix_shared_memory_queue_lock (q);
-      pause = unix_shared_memory_queue_is_full (q);
+      svm_queue_lock (q);
+      pause = svm_queue_is_full (q);
 
       vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-      unix_shared_memory_queue_unlock (q);
+      svm_queue_unlock (q);
       dsunlock (sm);
 
       if (pause)
@@ -1430,7 +1435,7 @@ do_ip4_fib_counters (stats_main_t * sm)
   ip4_main_t *im4 = &ip4_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   ip4_route_t *r;
   fib_table_t *fib;
   ip4_fib_t *v4_fib;
@@ -1551,19 +1556,19 @@ again:
                 * drop the data structure lock (which the main thread
                 * may want), and take a pause.
                 */
-               unix_shared_memory_queue_lock (q);
-               if (unix_shared_memory_queue_is_full (q))
+               svm_queue_lock (q);
+               if (svm_queue_is_full (q))
                  {
                    dsunlock (sm);
                    vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-                   unix_shared_memory_queue_unlock (q);
+                   svm_queue_unlock (q);
                    mp = 0;
                    ip46_fib_stats_delay (sm, 0 /* sec */ ,
                                          STATS_RELEASE_DELAY_NS);
                    goto again;
                  }
                vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-               unix_shared_memory_queue_unlock (q);
+               svm_queue_unlock (q);
 
                items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
                mp = vl_msg_api_alloc_as_if_client
@@ -1623,7 +1628,7 @@ do_ip4_mfib_counters (stats_main_t * sm)
   ip4_main_t *im4 = &ip4_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   mfib_prefix_t *pfx;
   mfib_table_t *mfib;
   do_ip46_fibs_t *do_fibs;
@@ -1718,17 +1723,17 @@ do_ip4_mfib_counters (stats_main_t * sm)
                 * drop the data structure lock (which the main thread
                 * may want), and take a pause.
                 */
-               unix_shared_memory_queue_lock (q);
+               svm_queue_lock (q);
 
-               while (unix_shared_memory_queue_is_full (q))
+               while (svm_queue_is_full (q))
                  {
-                   unix_shared_memory_queue_unlock (q);
+                   svm_queue_unlock (q);
                    ip46_fib_stats_delay (sm, 0 /* sec */ ,
                                          STATS_RELEASE_DELAY_NS);
-                   unix_shared_memory_queue_lock (q);
+                   svm_queue_lock (q);
                  }
                vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-               unix_shared_memory_queue_unlock (q);
+               svm_queue_unlock (q);
 
                items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
                mp = vl_msg_api_alloc_as_if_client
@@ -1762,7 +1767,7 @@ do_ip6_mfib_counters (stats_main_t * sm)
   ip6_main_t *im6 = &ip6_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   mfib_prefix_t *pfx;
   mfib_table_t *mfib;
   do_ip46_fibs_t *do_fibs;
@@ -1857,17 +1862,17 @@ do_ip6_mfib_counters (stats_main_t * sm)
                 * drop the data structure lock (which the main thread
                 * may want), and take a pause.
                 */
-               unix_shared_memory_queue_lock (q);
+               svm_queue_lock (q);
 
-               while (unix_shared_memory_queue_is_full (q))
+               while (svm_queue_is_full (q))
                  {
-                   unix_shared_memory_queue_unlock (q);
+                   svm_queue_unlock (q);
                    ip46_fib_stats_delay (sm, 0 /* sec */ ,
                                          STATS_RELEASE_DELAY_NS);
-                   unix_shared_memory_queue_lock (q);
+                   svm_queue_lock (q);
                  }
                vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-               unix_shared_memory_queue_unlock (q);
+               svm_queue_unlock (q);
 
                items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
                mp = vl_msg_api_alloc_as_if_client
@@ -1929,7 +1934,7 @@ do_ip6_fib_counters (stats_main_t * sm)
   ip6_main_t *im6 = &ip6_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   ip6_route_t *r;
   fib_table_t *fib;
   do_ip46_fibs_t *do_fibs;
@@ -2022,19 +2027,19 @@ again:
                 * drop the data structure lock (which the main thread
                 * may want), and take a pause.
                 */
-               unix_shared_memory_queue_lock (q);
-               if (unix_shared_memory_queue_is_full (q))
+               svm_queue_lock (q);
+               if (svm_queue_is_full (q))
                  {
                    dsunlock (sm);
                    vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-                   unix_shared_memory_queue_unlock (q);
+                   svm_queue_unlock (q);
                    mp = 0;
                    ip46_fib_stats_delay (sm, 0 /* sec */ ,
                                          STATS_RELEASE_DELAY_NS);
                    goto again;
                  }
                vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-               unix_shared_memory_queue_unlock (q);
+               svm_queue_unlock (q);
 
                items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
                mp = vl_msg_api_alloc_as_if_client
@@ -2137,7 +2142,7 @@ static void
 {
   vpe_client_registration_t *clients, client;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
   u32 mp_size;
   int i;
@@ -2151,17 +2156,17 @@ static void
   for (i = 0; i < vec_len (clients); i++)
     {
       client = clients[i];
-      q = vl_api_client_index_to_input_queue (client.client_index);
-      if (q)
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
        {
-         if (q_prev && (q_prev->cursize < q_prev->maxsize))
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
            {
              mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
              clib_memcpy (mp_copy, mp, mp_size);
-             vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
              mp = mp_copy;
            }
-         q_prev = q;
+         reg_prev = reg;
        }
       else
        {
@@ -2176,9 +2181,9 @@ static void
   fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
 #endif
 
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -2190,7 +2195,7 @@ static void
 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
 {
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
   u32 mp_size;
   vpe_client_registration_t *clients, client;
@@ -2205,17 +2210,17 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
   for (i = 0; i < vec_len (clients); i++)
     {
       client = clients[i];
-      q = vl_api_client_index_to_input_queue (client.client_index);
-      if (q)
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
        {
-         if (q_prev && (q_prev->cursize < q_prev->maxsize))
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
            {
              mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
              clib_memcpy (mp_copy, mp, mp_size);
-             vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
              mp = mp_copy;
            }
-         q_prev = q;
+         reg_prev = reg;
        }
       else
        {
@@ -2225,9 +2230,9 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
        }
     }
 
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -2239,7 +2244,7 @@ static void
 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
 {
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
   u32 mp_size;
   vpe_client_registration_t *clients, client;
@@ -2254,17 +2259,17 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
   for (i = 0; i < vec_len (clients); i++)
     {
       client = clients[i];
-      q = vl_api_client_index_to_input_queue (client.client_index);
-      if (q)
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
        {
-         if (q_prev && (q_prev->cursize < q_prev->maxsize))
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
            {
              mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
              clib_memcpy (mp_copy, mp, mp_size);
-             vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
              mp = mp_copy;
            }
-         q_prev = q;
+         reg_prev = reg;
        }
       else
        {
@@ -2275,9 +2280,9 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
     }
 
   /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -2289,7 +2294,7 @@ static void
 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
 {
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
   u32 mp_size;
   vpe_client_registration_t *clients, client;
@@ -2304,17 +2309,17 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
   for (i = 0; i < vec_len (clients); i++)
     {
       client = clients[i];
-      q = vl_api_client_index_to_input_queue (client.client_index);
-      if (q)
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
        {
-         if (q_prev && (q_prev->cursize < q_prev->maxsize))
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
            {
              mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
              clib_memcpy (mp_copy, mp, mp_size);
-             vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
              mp = mp_copy;
            }
-         q_prev = q;
+         reg_prev = reg;
        }
       else
        {
@@ -2324,9 +2329,9 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
        }
     }
   /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -2338,7 +2343,7 @@ static void
 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
 {
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
   u32 mp_size;
   vpe_client_registration_t *clients, client;
@@ -2353,17 +2358,17 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
   for (i = 0; i < vec_len (clients); i++)
     {
       client = clients[i];
-      q = vl_api_client_index_to_input_queue (client.client_index);
-      if (q)
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
        {
-         if (q_prev && (q_prev->cursize < q_prev->maxsize))
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
            {
              mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
              clib_memcpy (mp_copy, mp, mp_size);
-             vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
              mp = mp_copy;
            }
-         q_prev = q;
+         reg_prev = reg;
        }
       else
        {
@@ -2373,9 +2378,9 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
        }
     }
   /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -2392,7 +2397,7 @@ vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
   uword *p;
   i32 retval = 0;
   u32 item;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
 
   item = ~0;                   //"ALL THE THINGS IN THE THINGS
   rp.client_index = mp->client_index;
@@ -2417,9 +2422,8 @@ vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
                              item, mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     return;
 
   rmp = vl_msg_api_alloc (sizeof (*rmp));
@@ -2427,7 +2431,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 static void
@@ -2440,7 +2444,7 @@ static void
   uword *p;
   i32 retval = 0;
   u32 swif;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
 
   swif = ~0;                   //Using same mechanism as _per_interface_
   rp.client_index = mp->client_index;
@@ -2450,9 +2454,9 @@ static void
                              mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
+  reg = vl_api_client_index_to_registration (mp->client_index);
 
-  if (!q)
+  if (!reg)
     {
       sm->enable_poller =
        clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
@@ -2465,7 +2469,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 
@@ -2477,7 +2481,7 @@ vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
   vl_api_want_ip4_fib_stats_reply_t *rmp;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   u32 fib;
 
   fib = ~0;                    //Using same mechanism as _per_interface_
@@ -2488,9 +2492,9 @@ vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
                              mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
+  reg = vl_api_client_index_to_registration (mp->client_index);
 
-  if (!q)
+  if (!reg)
     {
       sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
                                                 fib, mp->client_index);
@@ -2502,7 +2506,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 static void
@@ -2513,7 +2517,7 @@ vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
   vl_api_want_ip4_mfib_stats_reply_t *rmp;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   u32 mfib;
 
   mfib = ~0;                   //Using same mechanism as _per_interface_
@@ -2524,9 +2528,8 @@ vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
                              mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     {
       sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
                                                 mfib, mp->client_index);
@@ -2538,7 +2541,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 static void
@@ -2549,7 +2552,7 @@ vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
   vl_api_want_ip4_fib_stats_reply_t *rmp;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   u32 fib;
 
   fib = ~0;                    //Using same mechanism as _per_interface_
@@ -2560,9 +2563,8 @@ vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
                              mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     {
       sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
                                                 fib, mp->client_index);
@@ -2574,7 +2576,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 static void
@@ -2585,7 +2587,7 @@ vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
   vl_api_want_ip4_mfib_stats_reply_t *rmp;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  vl_api_registration_t *reg;
   u32 mfib;
 
   mfib = ~0;                   //Using same mechanism as _per_interface_
@@ -2596,9 +2598,8 @@ vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
                              mp->enable_disable);
 
 reply:
-  q = vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     {
       sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
                                                 mfib, mp->client_index);
@@ -2610,7 +2611,7 @@ reply:
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
@@ -2635,14 +2636,11 @@ vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
   int i, which;
   u64 total_pkts[VLIB_N_RX_TX];
   u64 total_bytes[VLIB_N_RX_TX];
+  vl_api_registration_t *reg;
 
-  unix_shared_memory_queue_t *q =
-    vl_api_client_index_to_input_queue (mp->client_index);
-
-  if (!q)
-    {
-      return;
-    }
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
 
   rmp = vl_msg_api_alloc (sizeof (*rmp));
   rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
@@ -2674,7 +2672,7 @@ vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
   rmp->vector_rate =
     clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 int