UDP Encap counters
[vpp.git] / src / vpp / stats / stats.c
index b0fac73..3fe03e4 100644 (file)
  */
 #include <vpp/stats/stats.h>
 #include <signal.h>
-#include <vlib/threads.h>
-#include <vnet/fib/fib_entry.h>
-#include <vnet/fib/fib_table.h>
 #include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/mfib/mfib_entry.h>
 #include <vnet/dpo/load_balance.h>
+#include <vnet/udp/udp_encap.h>
 
 #define STATS_DEBUG 0
 
@@ -48,23 +48,156 @@ stats_main_t stats_main;
 #define foreach_stats_msg                                              \
 _(WANT_STATS, want_stats)                                              \
 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters)      \
+_(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats)    \
 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters)  \
+_(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats)        \
+_(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats)        \
+_(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters)                                \
+_(WANT_IP4_FIB_STATS, want_ip4_fib_stats)            \
 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters)                                \
+_(WANT_IP6_FIB_STATS, want_ip6_fib_stats)        \
+_(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats)                             \
+_(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats)                             \
 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters)                                \
-_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters)
+_(WANT_IP4_NBR_STATS, want_ip4_nbr_stats)            \
+_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
+_(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
+_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
+_(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
+_(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
+
+#define vl_msg_name_crc_list
+#include <vpp/stats/stats.api.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) \
+  vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+  foreach_vl_msg_name_crc_stats;
+#undef _
+}
 
 /* These constants ensure msg sizes <= 1024, aka ring allocation */
 #define SIMPLE_COUNTER_BATCH_SIZE      126
 #define COMBINED_COUNTER_BATCH_SIZE    63
 #define IP4_FIB_COUNTER_BATCH_SIZE     48
 #define IP6_FIB_COUNTER_BATCH_SIZE     30
+#define IP4_MFIB_COUNTER_BATCH_SIZE    24
+#define IP6_MFIB_COUNTER_BATCH_SIZE    15
+#define UDP_ENCAP_COUNTER_BATCH_SIZE   (1024 / sizeof(vl_api_udp_encap_counter_t))
 
 /* 5ms */
 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
 /*                              ns/us  us/ms        */
 
-void
+u8 *
+format_vnet_interface_combined_counters (u8 * s, va_list * args)
+{
+  stats_main_t *sm = &stats_main;
+  vl_api_vnet_interface_combined_counters_t *mp =
+    va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
+
+  char *counter_name;
+  u32 count, sw_if_index;
+  int i;
+  count = ntohl (mp->count);
+  sw_if_index = ntohl (mp->first_sw_if_index);
+
+  vlib_counter_t *vp;
+  u64 packets, bytes;
+  vp = (vlib_counter_t *) mp->data;
+
+  switch (mp->vnet_counter_type)
+    {
+    case VNET_INTERFACE_COUNTER_RX:
+      counter_name = "rx";
+      break;
+    case VNET_INTERFACE_COUNTER_TX:
+      counter_name = "tx";
+      break;
+    default:
+      counter_name = "bogus";
+      break;
+    }
+  for (i = 0; i < count; i++)
+    {
+      packets = clib_mem_unaligned (&vp->packets, u64);
+      packets = clib_net_to_host_u64 (packets);
+      bytes = clib_mem_unaligned (&vp->bytes, u64);
+      bytes = clib_net_to_host_u64 (bytes);
+      vp++;
+      s = format (s, "%U.%s.packets %lld\n",
+                 format_vnet_sw_if_index_name,
+                 sm->vnet_main, sw_if_index, counter_name, packets);
+      s = format (s, "%U.%s.bytes %lld\n",
+                 format_vnet_sw_if_index_name,
+                 sm->vnet_main, sw_if_index, counter_name, bytes);
+      sw_if_index++;
+    }
+  return s;
+}
+
+u8 *
+format_vnet_interface_simple_counters (u8 * s, va_list * args)
+{
+  stats_main_t *sm = &stats_main;
+  vl_api_vnet_interface_simple_counters_t *mp =
+    va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
+  char *counter_name;
+  u32 count, sw_if_index;
+  count = ntohl (mp->count);
+  sw_if_index = ntohl (mp->first_sw_if_index);
+  u64 *vp, v;
+  vp = (u64 *) mp->data;
+  int i;
+
+  switch (mp->vnet_counter_type)
+    {
+    case VNET_INTERFACE_COUNTER_DROP:
+      counter_name = "drop";
+      break;
+    case VNET_INTERFACE_COUNTER_PUNT:
+      counter_name = "punt";
+      break;
+    case VNET_INTERFACE_COUNTER_IP4:
+      counter_name = "ip4";
+      break;
+    case VNET_INTERFACE_COUNTER_IP6:
+      counter_name = "ip6";
+      break;
+    case VNET_INTERFACE_COUNTER_RX_NO_BUF:
+      counter_name = "rx-no-buff";
+      break;
+    case VNET_INTERFACE_COUNTER_RX_MISS:
+      counter_name = "rx-miss";
+      break;
+    case VNET_INTERFACE_COUNTER_RX_ERROR:
+      counter_name = "rx-error (fifo-full)";
+      break;
+    case VNET_INTERFACE_COUNTER_TX_ERROR:
+      counter_name = "tx-error (fifo-full)";
+      break;
+    default:
+      counter_name = "bogus";
+      break;
+    }
+  for (i = 0; i < count; i++)
+    {
+      v = clib_mem_unaligned (vp, u64);
+      v = clib_net_to_host_u64 (v);
+      vp++;
+      s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
+                 sm->vnet_main, sw_if_index, counter_name, v);
+      sw_if_index++;
+    }
+
+  return s;
+}
+
+static void
 dslock (stats_main_t * sm, int release_hint, int tag)
 {
   u32 thread_index;
@@ -97,7 +230,7 @@ stats_dslock_with_hint (int hint, int tag)
   dslock (sm, hint, tag);
 }
 
-void
+static void
 dsunlock (stats_main_t * sm)
 {
   u32 thread_index;
@@ -125,6 +258,192 @@ stats_dsunlock (int hint, int tag)
   dsunlock (sm);
 }
 
+static vpe_client_registration_t *
+get_client_for_stat (u32 reg, u32 item, u32 client_index)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_stats_registration_t *registration;
+  uword *p;
+
+  /* Is there anything listening for item in that reg */
+  p = hash_get (sm->stats_registration_hash[reg], item);
+
+  if (!p)
+    return 0;                  // Fail
+
+  /* If there is, is our client_index one of them */
+  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
+  p = hash_get (registration->client_hash, client_index);
+
+  if (!p)
+    return 0;                  // Fail
+
+  return pool_elt_at_index (registration->clients, p[0]);
+
+}
+
+static int
+set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_stats_registration_t *registration;
+  vpe_client_registration_t *cr;
+  uword *p;
+
+  /* Is there anything listening for item in that reg */
+  p = hash_get (sm->stats_registration_hash[reg], item);
+
+  if (!p)
+    {
+      pool_get (sm->stats_registrations[reg], registration);
+      registration->item = item;
+      hash_set (sm->stats_registration_hash[reg], item,
+               registration - sm->stats_registrations[reg]);
+    }
+  else
+    {
+      registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
+    }
+
+  p = hash_get (registration->client_hash, client->client_index);
+
+  if (!p)
+    {
+      pool_get (registration->clients, cr);
+      cr->client_index = client->client_index;
+      cr->client_pid = client->client_pid;
+      hash_set (registration->client_hash, cr->client_index,
+               cr - registration->clients);
+    }
+
+  return 1;                    //At least one client is doing something ... poll
+}
+
+int
+clear_client_for_stat (u32 reg, u32 item, u32 client_index)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_stats_registration_t *registration;
+  vpe_client_registration_t *client;
+  uword *p;
+  int i, elts;
+
+  /* Clear the client first */
+  /* Is there anything listening for item in that reg */
+  p = hash_get (sm->stats_registration_hash[reg], item);
+
+  if (!p)
+    goto exit;
+
+  /* If there is, is our client_index one of them */
+  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
+  p = hash_get (registration->client_hash, client_index);
+
+  if (!p)
+    goto exit;
+
+  client = pool_elt_at_index (registration->clients, p[0]);
+  hash_unset (registration->client_hash, client->client_index);
+  pool_put (registration->clients, client);
+
+  /* Now check if that was the last client for that item */
+  if (0 == pool_elts (registration->clients))
+    {
+      hash_unset (sm->stats_registration_hash[reg], item);
+      pool_put (sm->stats_registrations[reg], registration);
+    }
+
+exit:
+  elts = 0;
+  /* Now check if that was the last item in any of the listened to stats */
+  for (i = 0; i < STATS_REG_N_IDX; i++)
+    {
+      elts += pool_elts (sm->stats_registrations[i]);
+    }
+  return elts;
+}
+
+vpe_client_registration_t *
+get_clients_for_stat (u32 reg, u32 item)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t *client, *clients = 0;
+  vpe_client_stats_registration_t *registration;
+  uword *p;
+
+  /* Is there anything listening for item in that reg */
+  p = hash_get (sm->stats_registration_hash[reg], item);
+
+  if (!p)
+    return 0;                  // Fail
+
+  /* If there is, is our client_index one of them */
+  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
+
+  vec_reset_length (clients);
+  pool_foreach (client, registration->clients, (
+                                                {
+                                                vec_add1 (clients, *client);}
+               ));
+  return clients;
+}
+
+
+static void
+clear_client_reg (u32 ** registrations)
+{
+  /* When registrations[x] is a vector of pool indices
+     here is a good place to clean up the pools
+   */
+#define stats_reg(n) vec_free(registrations[IDX_##n]);
+#include <vpp/stats/stats.reg>
+#undef stats_reg
+
+  vec_free (registrations);
+}
+
+u32 **
+init_client_reg (u32 ** registrations)
+{
+
+  /*
+     Initialise the stats registrations for each
+     type of stat a client can register for as well as
+     a vector of "interested" indexes.
+     Initially this is a u32 of either sw_if_index or fib_index
+     but eventually this should migrate to a pool_index (u32)
+     with a type specific pool that can include more complex things
+     such as timing and structured events.
+   */
+  vec_validate (registrations, STATS_REG_N_IDX);
+#define stats_reg(n) \
+  vec_reset_length(registrations[IDX_##n]);
+#include <vpp/stats/stats.reg>
+#undef stats_reg
+
+  /*
+     When registrations[x] is a vector of pool indices, here
+     is a good place to init the pools.
+   */
+  return registrations;
+}
+
+u32 **
+enable_all_client_reg (u32 ** registrations)
+{
+
+  /*
+     Enable all stats known by adding
+     ~0 to the index vector. Eventually this
+     should be deprecated.
+   */
+#define stats_reg(n)                            \
+  vec_add1(registrations[IDX_##n], ~0);
+#include <vpp/stats/stats.reg>
+#undef stats_reg
+  return registrations;
+}
+
 static void
 do_simple_interface_counters (stats_main_t * sm)
 {
@@ -132,7 +451,7 @@ do_simple_interface_counters (stats_main_t * sm)
   vnet_interface_main_t *im = sm->interface_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vlib_simple_counter_main_t *cm;
   u32 items_this_message = 0;
   u64 v, *vp = 0;
@@ -179,6 +498,130 @@ do_simple_interface_counters (stats_main_t * sm)
   vnet_interface_counter_unlock (im);
 }
 
+void
+handle_client_registration (vpe_client_registration_t * client, u32 stat,
+                           u32 item, int enable_disable)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t *rp, _rp;
+
+  rp = get_client_for_stat (stat, item, client->client_index);
+
+  /* Disable case */
+  if (enable_disable == 0)
+    {
+      if (!rp)                 // No client to disable
+       {
+         clib_warning ("pid %d: already disabled for stats...",
+                       client->client_pid);
+         return;
+       }
+      sm->enable_poller =
+       clear_client_for_stat (stat, item, client->client_index);
+      return;
+    }
+  /* Enable case */
+  if (!rp)
+    {
+      rp = &_rp;
+      rp->client_index = client->client_index;
+      rp->client_pid = client->client_pid;
+      sm->enable_poller = set_client_for_stat (stat, item, rp);
+    }
+}
+
+
+/**********************************
+ * ALL Interface Combined stats - to be deprecated
+ **********************************/
+
+/*
+ * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
+ */
+static void
+  vl_api_want_interface_combined_stats_t_handler
+  (vl_api_want_interface_combined_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_interface_combined_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 swif;
+
+  swif = ~0;                   //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
+                             mp->enable_disable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    {
+      sm->enable_poller =
+       clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
+                              mp->client_index);
+      return;
+    }
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void
+  vl_api_vnet_interface_combined_counters_t_handler
+  (vl_api_vnet_interface_combined_counters_t * mp)
+{
+  vpe_client_registration_t *clients, client;
+  stats_main_t *sm = &stats_main;
+  vl_api_registration_t *reg, *reg_prev = NULL;
+  vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
+  u32 mp_size;
+  int i;
+
+  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
+
+  clients =
+    get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
+                         ~0 /*flag for all */ );
+
+  for (i = 0; i < vec_len (clients); i++)
+    {
+      client = clients[i];
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
+       {
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
+           {
+             mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
+             clib_memcpy (mp_copy, mp, mp_size);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
+             mp = mp_copy;
+           }
+         reg_prev = reg;
+       }
+    }
+#if STATS_DEBUG > 0
+  fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
+#endif
+
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
+    {
+      vl_api_send_msg (reg_prev, (u8 *) mp);
+    }
+  else
+    {
+      vl_msg_api_free (mp);
+    }
+}
+
 static void
 do_combined_interface_counters (stats_main_t * sm)
 {
@@ -186,7 +629,7 @@ do_combined_interface_counters (stats_main_t * sm)
   vnet_interface_main_t *im = sm->interface_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vlib_combined_counter_main_t *cm;
   u32 items_this_message = 0;
   vlib_counter_t v, *vp = 0;
@@ -231,35 +674,447 @@ do_combined_interface_counters (stats_main_t * sm)
   vnet_interface_counter_unlock (im);
 }
 
-/* from .../vnet/vnet/ip/lookup.c. Yuck */
-typedef CLIB_PACKED (struct
-                    {
-                    ip4_address_t address;
-u32 address_length: 6;
-u32 index:          26;
-                    }) ip4_route_t;
+/**********************************
+ * Per Interface Combined stats
+ **********************************/
 
+/* Request from client registering interfaces it wants */
 static void
-ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
+  vl_api_want_per_interface_combined_stats_t_handler
+  (vl_api_want_per_interface_combined_stats_t * mp)
 {
-  struct timespec _req, *req = &_req;
-  struct timespec _rem, *rem = &_rem;
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_per_interface_combined_stats_reply_t *rmp;
+  vlib_combined_counter_main_t *cm;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 i, swif, num = 0;
 
-  req->tv_sec = sec;
-  req->tv_nsec = nsec;
-  while (1)
-    {
-      if (nanosleep (req, rem) == 0)
-       break;
-      *req = *rem;
-      if (errno == EINTR)
-       continue;
-      clib_unix_warning ("nanosleep");
-      break;
-    }
-}
+  num = ntohl (mp->num);
 
-/**
+  /*
+   * Validate sw_if_indexes before registering
+   */
+  for (i = 0; i < num; i++)
+    {
+      swif = ntohl (mp->sw_ifs[i]);
+
+      /*
+       * Check its a real sw_if_index that the client is allowed to see
+       */
+      if (swif != ~0)
+       {
+         if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
+           {
+             retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+             goto reply;
+           }
+       }
+    }
+
+  for (i = 0; i < num; i++)
+    {
+      swif = ntohl (mp->sw_ifs[i]);
+
+      rp.client_index = mp->client_index;
+      rp.client_pid = mp->pid;
+      handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
+                                 swif, ntohl (mp->enable_disable));
+    }
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    {
+      for (i = 0; i < num; i++)
+       {
+         swif = ntohl (mp->sw_ifs[i]);
+
+         sm->enable_poller =
+           clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
+                                  mp->client_index);
+       }
+      return;
+    }
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+/* Per Interface Combined distribution to client */
+static void
+do_combined_per_interface_counters (stats_main_t * sm)
+{
+  vl_api_vnet_per_interface_combined_counters_t *mp = 0;
+  vnet_interface_main_t *im = sm->interface_main;
+  api_main_t *am = sm->api_main;
+  vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+  vl_api_registration_t *vl_reg;
+  vlib_combined_counter_main_t *cm;
+  vl_api_vnet_combined_counter_t *vp = 0;
+  vlib_counter_t v;
+  u32 i, j;
+  vpe_client_stats_registration_t *reg;
+  vpe_client_registration_t *client;
+  u32 *sw_if_index = 0;
+
+  vnet_interface_counter_lock (im);
+
+  vec_reset_length (sm->regs_tmp);
+
+  /* *INDENT-OFF* */
+  pool_foreach (reg,
+                sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
+               ({ vec_add1 (sm->regs_tmp, reg); }));
+  /* *INDENT-ON* */
+
+  for (i = 0; i < vec_len (sm->regs_tmp); i++)
+    {
+      reg = sm->regs_tmp[i];
+      if (reg->item == ~0)
+       {
+         vnet_interface_counter_unlock (im);
+         do_combined_interface_counters (sm);
+         vnet_interface_counter_lock (im);
+         continue;
+       }
+      vec_reset_length (sm->clients_tmp);
+
+      /* *INDENT-OFF* */
+      pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
+                                                     client);}));
+      /* *INDENT-ON* */
+
+      for (j = 0; j < vec_len (sm->clients_tmp); j++)
+       {
+         client = sm->clients_tmp[j];
+
+         vl_reg = vl_api_client_index_to_registration (client->client_index);
+
+         //Client may have disconnected abrubtly, clean up so we don't poll nothing.
+         if (!vl_reg)
+           {
+             sm->enable_poller =
+               clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
+                                      reg->item, client->client_index);
+             continue;
+           }
+         mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
+         memset (mp, 0, sizeof (*mp));
+
+         mp->_vl_msg_id =
+           ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
+
+         /*
+          * count will eventually be used to optimise the batching
+          * of per client messages for each stat. For now setting this to 1 then
+          * iterate. This will not affect API.
+          *
+          * FIXME instead of enqueueing here, this should be sent to a batch
+          * storer for per-client transmission. Each "mp" sent would be a single entry
+          * and if a client is listening to other sw_if_indexes for same, it would be
+          * appended to that *mp
+          *
+          *
+          * FIXME(s):
+          * - capturing the timestamp of the counters "when VPP knew them" is important.
+          * Less so is that the timing of the delivery to the control plane be in the same
+          * timescale.
+
+          * i.e. As long as the control plane can delta messages from VPP and work out
+          * velocity etc based on the timestamp, it can do so in a more "batch mode".
+
+          * It would be beneficial to keep a "per-client" message queue, and then
+          * batch all the stat messages for a client into one message, with
+          * discrete timestamps.
+
+          * Given this particular API is for "per interface" one assumes that the scale
+          * is less than the ~0 case, which the prior API is suited for.
+          */
+
+         /*
+          * 1 message per api call for now
+          */
+         mp->count = htonl (1);
+         mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
+
+         vp = (vl_api_vnet_combined_counter_t *) mp->data;
+         vp->sw_if_index = htonl (reg->item);
+
+         im = &vnet_get_main ()->interface_main;
+         cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
+         vlib_get_combined_counter (cm, reg->item, &v);
+         clib_mem_unaligned (&vp->rx_packets, u64) =
+           clib_host_to_net_u64 (v.packets);
+         clib_mem_unaligned (&vp->rx_bytes, u64) =
+           clib_host_to_net_u64 (v.bytes);
+         cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
+         vlib_get_combined_counter (cm, reg->item, &v);
+         clib_mem_unaligned (&vp->tx_packets, u64) =
+           clib_host_to_net_u64 (v.packets);
+         clib_mem_unaligned (&vp->tx_bytes, u64) =
+           clib_host_to_net_u64 (v.bytes);
+
+         vl_api_send_msg (vl_reg, (u8 *) mp);
+       }
+    }
+
+  vnet_interface_counter_unlock (im);
+}
+
+/**********************************
+ * Per Interface simple stats
+ **********************************/
+
+/* Request from client registering interfaces it wants */
+static void
+  vl_api_want_per_interface_simple_stats_t_handler
+  (vl_api_want_per_interface_simple_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_per_interface_simple_stats_reply_t *rmp;
+  vlib_simple_counter_main_t *cm;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 i, swif, num = 0;
+
+  num = ntohl (mp->num);
+
+  for (i = 0; i < num; i++)
+    {
+      swif = ntohl (mp->sw_ifs[i]);
+
+      /* Check its a real sw_if_index that the client is allowed to see */
+      if (swif != ~0)
+       {
+         if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
+           {
+             retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+             goto reply;
+           }
+       }
+    }
+
+  for (i = 0; i < num; i++)
+    {
+      swif = ntohl (mp->sw_ifs[i]);
+
+      rp.client_index = mp->client_index;
+      rp.client_pid = mp->pid;
+      handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
+                                 swif, ntohl (mp->enable_disable));
+    }
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+
+  /* Client may have disconnected abruptly, clean up */
+  if (!reg)
+    {
+      for (i = 0; i < num; i++)
+       {
+         swif = ntohl (mp->sw_ifs[i]);
+         sm->enable_poller =
+           clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
+                                  mp->client_index);
+       }
+
+      return;
+    }
+
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+/* Per Interface Simple distribution to client */
+static void
+do_simple_per_interface_counters (stats_main_t * sm)
+{
+  vl_api_vnet_per_interface_simple_counters_t *mp = 0;
+  vnet_interface_main_t *im = sm->interface_main;
+  api_main_t *am = sm->api_main;
+  vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+  vl_api_registration_t *vl_reg;
+  vlib_simple_counter_main_t *cm;
+  u32 i, j, size;
+  vpe_client_stats_registration_t *reg;
+  vpe_client_registration_t *client;
+  u32 timestamp, count;
+  vl_api_vnet_simple_counter_t *vp = 0;
+  counter_t v;
+
+  vnet_interface_counter_lock (im);
+
+  vec_reset_length (sm->regs_tmp);
+
+  /* *INDENT-OFF* */
+  pool_foreach (reg,
+               sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
+                ({ vec_add1 (sm->regs_tmp, reg); }));
+  /* *INDENT-ON* */
+
+  for (i = 0; i < vec_len (sm->regs_tmp); i++)
+    {
+      reg = sm->regs_tmp[i];
+      if (reg->item == ~0)
+       {
+         vnet_interface_counter_unlock (im);
+         do_simple_interface_counters (sm);
+         vnet_interface_counter_lock (im);
+         continue;
+       }
+      vec_reset_length (sm->clients_tmp);
+
+      /* *INDENT-OFF* */
+      pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
+                                                     client);}));
+      /* *INDENT-ON* */
+
+      for (j = 0; j < vec_len (sm->clients_tmp); j++)
+       {
+         client = sm->clients_tmp[j];
+         vl_reg = vl_api_client_index_to_registration (client->client_index);
+
+         /* Client may have disconnected abrubtly, clean up */
+         if (!vl_reg)
+           {
+             sm->enable_poller =
+               clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
+                                      reg->item, client->client_index);
+             continue;
+           }
+
+         mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
+         memset (mp, 0, sizeof (*mp));
+         mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
+
+         /*
+          * count will eventually be used to optimise the batching
+          * of per client messages for each stat. For now setting this to 1 then
+          * iterate. This will not affect API.
+          *
+          * FIXME instead of enqueueing here, this should be sent to a batch
+          * storer for per-client transmission. Each "mp" sent would be a single entry
+          * and if a client is listening to other sw_if_indexes for same, it would be
+          * appended to that *mp
+          *
+          *
+          * FIXME(s):
+          * - capturing the timestamp of the counters "when VPP knew them" is important.
+          * Less so is that the timing of the delivery to the control plane be in the same
+          * timescale.
+
+          * i.e. As long as the control plane can delta messages from VPP and work out
+          * velocity etc based on the timestamp, it can do so in a more "batch mode".
+
+          * It would be beneficial to keep a "per-client" message queue, and then
+          * batch all the stat messages for a client into one message, with
+          * discrete timestamps.
+
+          * Given this particular API is for "per interface" one assumes that the scale
+          * is less than the ~0 case, which the prior API is suited for.
+          */
+
+         /*
+          * 1 message per api call for now
+          */
+         mp->count = htonl (1);
+         mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
+         vp = (vl_api_vnet_simple_counter_t *) mp->data;
+
+         vp->sw_if_index = htonl (reg->item);
+
+         // VNET_INTERFACE_COUNTER_DROP
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
+
+         // VNET_INTERFACE_COUNTER_PUNT
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
+
+         // VNET_INTERFACE_COUNTER_IP4
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
+
+         //VNET_INTERFACE_COUNTER_IP6
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
+
+         //VNET_INTERFACE_COUNTER_RX_NO_BUF
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->rx_no_buffer, u64) =
+           clib_host_to_net_u64 (v);
+
+         //VNET_INTERFACE_COUNTER_RX_MISS
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
+
+         //VNET_INTERFACE_COUNTER_RX_ERROR
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
+
+         //VNET_INTERFACE_COUNTER_TX_ERROR
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
+
+         //VNET_INTERFACE_COUNTER_MPLS
+         cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
+         v = vlib_get_simple_counter (cm, reg->item);
+         clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
+
+         vl_api_send_msg (vl_reg, (u8 *) mp);
+       }
+    }
+
+  vnet_interface_counter_unlock (im);
+}
+
+/**********************************
+ * Per FIB IP4 stats
+ **********************************/
+
+static void
+ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
+{
+  struct timespec _req, *req = &_req;
+  struct timespec _rem, *rem = &_rem;
+
+  req->tv_sec = sec;
+  req->tv_nsec = nsec;
+  while (1)
+    {
+      if (nanosleep (req, rem) == 0)
+       break;
+      *req = *rem;
+      if (errno == EINTR)
+       continue;
+      clib_unix_warning ("nanosleep");
+      break;
+    }
+}
+
+/**
  * @brief The context passed when collecting adjacency counters
  */
 typedef struct ip4_nbr_stats_ctx_t_
@@ -306,7 +1161,7 @@ ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
 {
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vl_api_vnet_ip4_nbr_counters_t *mp = 0;
   int first = 0;
 
@@ -346,11 +1201,11 @@ ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
       /*
        * send to the shm q
        */
-      unix_shared_memory_queue_lock (q);
-      pause = unix_shared_memory_queue_is_full (q);
+      svm_queue_lock (q);
+      pause = svm_queue_is_full (q);
 
       vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-      unix_shared_memory_queue_unlock (q);
+      svm_queue_unlock (q);
       dsunlock (sm);
 
       if (pause)
@@ -360,7 +1215,7 @@ ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
 }
 
 static void
-do_ip4_nbrs (stats_main_t * sm)
+do_ip4_nbr_counters (stats_main_t * sm)
 {
   vnet_main_t *vnm = vnet_get_main ();
   vnet_interface_main_t *im = &vnm->interface_main;
@@ -461,7 +1316,7 @@ ip6_nbr_ship (stats_main_t * sm,
 {
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   vl_api_vnet_ip6_nbr_counters_t *mp = 0;
   int first = 0;
 
@@ -501,11 +1356,11 @@ ip6_nbr_ship (stats_main_t * sm,
       /*
        * send to the shm q
        */
-      unix_shared_memory_queue_lock (q);
-      pause = unix_shared_memory_queue_is_full (q);
+      svm_queue_lock (q);
+      pause = svm_queue_is_full (q);
 
       vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-      unix_shared_memory_queue_unlock (q);
+      svm_queue_unlock (q);
       dsunlock (sm);
 
       if (pause)
@@ -515,7 +1370,7 @@ ip6_nbr_ship (stats_main_t * sm,
 }
 
 static void
-do_ip6_nbrs (stats_main_t * sm)
+do_ip6_nbr_counters (stats_main_t * sm)
 {
   vnet_main_t *vnm = vnet_get_main ();
   vnet_interface_main_t *im = &vnm->interface_main;
@@ -567,172 +1422,475 @@ do_ip6_nbrs (stats_main_t * sm)
 }
 
 static void
-do_ip4_fibs (stats_main_t * sm)
+do_ip4_fib_counters (stats_main_t * sm)
 {
   ip4_main_t *im4 = &ip4_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
-  static ip4_route_t *routes;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   ip4_route_t *r;
   fib_table_t *fib;
   ip4_fib_t *v4_fib;
-  static uword *results;
+  do_ip46_fibs_t *do_fibs;
   vl_api_vnet_ip4_fib_counters_t *mp = 0;
   u32 items_this_message;
   vl_api_ip4_fib_counter_t *ctrp = 0;
   u32 start_at_fib_index = 0;
-  int i;
+  int i, j, k;
+
+  do_fibs = &sm->do_ip46_fibs;
 
 again:
+  vec_reset_length (do_fibs->fibs);
   /* *INDENT-OFF* */
   pool_foreach (fib, im4->fibs,
-  ({
-    /* We may have bailed out due to control-plane activity */
-    while ((fib - im4->fibs) < start_at_fib_index)
-      continue;
+                ({vec_add1(do_fibs->fibs,fib);}));
 
-    v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
+  /* *INDENT-ON* */
 
-    if (mp == 0)
-      {
-       items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
-       mp = vl_msg_api_alloc_as_if_client
-         (sizeof (*mp) +
-          items_this_message * sizeof (vl_api_ip4_fib_counter_t));
-       mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
-       mp->count = 0;
-       mp->vrf_id = ntohl (fib->ft_table_id);
-       ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
-      }
-    else
-      {
-       /* happens if the last FIB was empty... */
-       ASSERT (mp->count == 0);
-       mp->vrf_id = ntohl (fib->ft_table_id);
-      }
+  for (j = 0; j < vec_len (do_fibs->fibs); j++)
+    {
+      fib = do_fibs->fibs[j];
+      /* We may have bailed out due to control-plane activity */
+      while ((fib - im4->fibs) < start_at_fib_index)
+       continue;
 
-    dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+      v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
 
-    vec_reset_length (routes);
-    vec_reset_length (results);
+      if (mp == 0)
+       {
+         items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
+         mp = vl_msg_api_alloc_as_if_client
+           (sizeof (*mp) +
+            items_this_message * sizeof (vl_api_ip4_fib_counter_t));
+         mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
+         mp->count = 0;
+         mp->vrf_id = ntohl (fib->ft_table_id);
+         ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+       }
+      else
+       {
+         /* happens if the last FIB was empty... */
+         ASSERT (mp->count == 0);
+         mp->vrf_id = ntohl (fib->ft_table_id);
+       }
 
-    for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
-      {
-       uword *hash = v4_fib->fib_entry_by_dst_address[i];
-       hash_pair_t *p;
-       ip4_route_t x;
-
-       x.address_length = i;
-
-        hash_foreach_pair (p, hash,
-        ({
-          x.address.data_u32 = p->key;
-          x.index = p->value[0];
-
-          vec_add1 (routes, x);
-          if (sm->data_structure_lock->release_hint)
-            {
-              start_at_fib_index = fib - im4->fibs;
-              dsunlock (sm);
-              ip46_fib_stats_delay (sm, 0 /* sec */,
-                                    STATS_RELEASE_DELAY_NS);
-              mp->count = 0;
-              ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
-              goto again;
-            }
-        }));
-      }
+      dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+      vec_reset_length (do_fibs->ip4routes);
+      vec_reset_length (do_fibs->results);
+
+      for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
+       {
+         uword *hash = v4_fib->fib_entry_by_dst_address[i];
+         hash_pair_t *p;
+         ip4_route_t x;
+
+         vec_reset_length (do_fibs->pvec);
+
+         x.address_length = i;
+
+         hash_foreach_pair (p, hash, (
+                                       {
+                                       vec_add1 (do_fibs->pvec, p);}
+                            ));
+         for (k = 0; k < vec_len (do_fibs->pvec); k++)
+           {
+             p = do_fibs->pvec[k];
+             x.address.data_u32 = p->key;
+             x.index = p->value[0];
+
+             vec_add1 (do_fibs->ip4routes, x);
+             if (sm->data_structure_lock->release_hint)
+               {
+                 start_at_fib_index = fib - im4->fibs;
+                 dsunlock (sm);
+                 ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                                       STATS_RELEASE_DELAY_NS);
+                 mp->count = 0;
+                 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+                 goto again;
+               }
+           }
+       }
 
-    vec_foreach (r, routes)
+      vec_foreach (r, do_fibs->ip4routes)
       {
-        vlib_counter_t c;
-        const dpo_id_t *dpo_id;
-
-        dpo_id = fib_entry_contribute_ip_forwarding(r->index);
-        vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
-                                   (u32)dpo_id->dpoi_index, &c);
-        /*
-         * If it has actually
-         * seen at least one packet, send it.
-         */
-        if (c.packets > 0)
-          {
-
-            /* already in net byte order */
-            ctrp->address = r->address.as_u32;
-            ctrp->address_length = r->address_length;
-            ctrp->packets = clib_host_to_net_u64 (c.packets);
-            ctrp->bytes = clib_host_to_net_u64 (c.bytes);
-            mp->count++;
-            ctrp++;
-
-            if (mp->count == items_this_message)
-              {
-                mp->count = htonl (items_this_message);
-                /*
-                 * If the main thread's input queue is stuffed,
-                 * drop the data structure lock (which the main thread
-                 * may want), and take a pause.
-                 */
-                unix_shared_memory_queue_lock (q);
-                if (unix_shared_memory_queue_is_full (q))
-                  {
-                    dsunlock (sm);
-                    vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-                    unix_shared_memory_queue_unlock (q);
-                    mp = 0;
-                    ip46_fib_stats_delay (sm, 0 /* sec */ ,
-                                          STATS_RELEASE_DELAY_NS);
-                    goto again;
-                  }
-                vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-                unix_shared_memory_queue_unlock (q);
-
-                items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
-                mp = vl_msg_api_alloc_as_if_client
-                  (sizeof (*mp) +
-                   items_this_message * sizeof (vl_api_ip4_fib_counter_t));
-                mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
-                mp->count = 0;
-                mp->vrf_id = ntohl (fib->ft_table_id);
-                ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
-              }
-          }                    /* for each (mp or single) adj */
-        if (sm->data_structure_lock->release_hint)
-          {
-            start_at_fib_index = fib - im4->fibs;
-            dsunlock (sm);
-            ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
-            mp->count = 0;
-            ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
-            goto again;
-          }
+       vlib_counter_t c;
+       const dpo_id_t *dpo_id;
+       u32 index;
+
+       dpo_id = fib_entry_contribute_ip_forwarding (r->index);
+       index = (u32) dpo_id->dpoi_index;
+
+       vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
+                                  index, &c);
+       /*
+        * If it has actually
+        * seen at least one packet, send it.
+        */
+       if (c.packets > 0)
+         {
+
+           /* already in net byte order */
+           ctrp->address = r->address.as_u32;
+           ctrp->address_length = r->address_length;
+           ctrp->packets = clib_host_to_net_u64 (c.packets);
+           ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+           mp->count++;
+           ctrp++;
+
+           if (mp->count == items_this_message)
+             {
+               mp->count = htonl (items_this_message);
+               /*
+                * If the main thread's input queue is stuffed,
+                * drop the data structure lock (which the main thread
+                * may want), and take a pause.
+                */
+               svm_queue_lock (q);
+               if (svm_queue_is_full (q))
+                 {
+                   dsunlock (sm);
+                   vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+                   svm_queue_unlock (q);
+                   mp = 0;
+                   ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                                         STATS_RELEASE_DELAY_NS);
+                   goto again;
+                 }
+               vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+               svm_queue_unlock (q);
+
+               items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
+               mp = vl_msg_api_alloc_as_if_client
+                 (sizeof (*mp) +
+                  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
+               mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
+               mp->count = 0;
+               mp->vrf_id = ntohl (fib->ft_table_id);
+               ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+             }
+         }                     /* for each (mp or single) adj */
+       if (sm->data_structure_lock->release_hint)
+         {
+           start_at_fib_index = fib - im4->fibs;
+           dsunlock (sm);
+           ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
+           mp->count = 0;
+           ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
+           goto again;
+         }
       }                                /* vec_foreach (routes) */
 
-    dsunlock (sm);
+      dsunlock (sm);
+
+      /* Flush any data from this fib */
+      if (mp->count)
+       {
+         mp->count = htonl (mp->count);
+         vl_msg_api_send_shmem (q, (u8 *) & mp);
+         mp = 0;
+       }
+    }
+
+  /* If e.g. the last FIB had no reportable routes, free the buffer */
+  if (mp)
+    vl_msg_api_free (mp);
+}
+
+static int
+mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
+{
+  stats_main_t *sm = ctx;
+  do_ip46_fibs_t *do_fibs;
+  mfib_entry_t *entry;
+
+  do_fibs = &sm->do_ip46_fibs;
+  entry = mfib_entry_get (fei);
+
+  vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
+
+  return (1);
+}
+
+static void
+do_ip4_mfib_counters (stats_main_t * sm)
+{
+  ip4_main_t *im4 = &ip4_main;
+  api_main_t *am = sm->api_main;
+  vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
+  mfib_prefix_t *pfx;
+  mfib_table_t *mfib;
+  do_ip46_fibs_t *do_fibs;
+  vl_api_vnet_ip4_mfib_counters_t *mp = 0;
+  u32 items_this_message;
+  vl_api_ip4_mfib_counter_t *ctrp = 0;
+  u32 start_at_mfib_index = 0;
+  int i, j, k;
+
+  do_fibs = &sm->do_ip46_fibs;
+
+  vec_reset_length (do_fibs->mfibs);
+  /* *INDENT-OFF* */
+  pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
+  /* *INDENT-ON* */
+
+  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
+    {
+      mfib = do_fibs->mfibs[j];
+      /* We may have bailed out due to control-plane activity */
+      while ((mfib - im4->mfibs) < start_at_mfib_index)
+       continue;
+
+      if (mp == 0)
+       {
+         items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
+         mp = vl_msg_api_alloc_as_if_client
+           (sizeof (*mp) +
+            items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
+         mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
+         mp->count = 0;
+         mp->vrf_id = ntohl (mfib->mft_table_id);
+         ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
+       }
+      else
+       {
+         /* happens if the last MFIB was empty... */
+         ASSERT (mp->count == 0);
+         mp->vrf_id = ntohl (mfib->mft_table_id);
+       }
 
-    /* Flush any data from this fib */
-    if (mp->count)
+      vec_reset_length (do_fibs->mroutes);
+
+      /*
+       * walk the table with table updates blocked
+       */
+      dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+      mfib_table_walk (mfib->mft_index,
+                      FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
+      dsunlock (sm);
+
+      vec_foreach (pfx, do_fibs->mroutes)
       {
-       mp->count = htonl (mp->count);
-       vl_msg_api_send_shmem (q, (u8 *) & mp);
-       mp = 0;
+       const dpo_id_t *dpo_id;
+       fib_node_index_t mfei;
+       vlib_counter_t c;
+       u32 index;
+
+       /*
+        * re-lookup the entry, since we suspend during the collection
+        */
+       mfei = mfib_table_lookup (mfib->mft_index, pfx);
+
+       if (FIB_NODE_INDEX_INVALID == mfei)
+         continue;
+
+       dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
+       index = (u32) dpo_id->dpoi_index;
+
+       vlib_get_combined_counter (&replicate_main.repm_counters,
+                                  dpo_id->dpoi_index, &c);
+       /*
+        * If it has seen at least one packet, send it.
+        */
+       if (c.packets > 0)
+         {
+           /* already in net byte order */
+           memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
+           memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
+           ctrp->group_length = pfx->fp_len;
+           ctrp->packets = clib_host_to_net_u64 (c.packets);
+           ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+           mp->count++;
+           ctrp++;
+
+           if (mp->count == items_this_message)
+             {
+               mp->count = htonl (items_this_message);
+               /*
+                * If the main thread's input queue is stuffed,
+                * drop the data structure lock (which the main thread
+                * may want), and take a pause.
+                */
+               svm_queue_lock (q);
+
+               while (svm_queue_is_full (q))
+                 {
+                   svm_queue_unlock (q);
+                   ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                                         STATS_RELEASE_DELAY_NS);
+                   svm_queue_lock (q);
+                 }
+               vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+               svm_queue_unlock (q);
+
+               items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
+               mp = vl_msg_api_alloc_as_if_client
+                 (sizeof (*mp) +
+                  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
+               mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
+               mp->count = 0;
+               mp->vrf_id = ntohl (mfib->mft_table_id);
+               ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
+             }
+         }
       }
-  }));
-  /* *INDENT-ON* */
+
+      /* Flush any data from this mfib */
+      if (mp->count)
+       {
+         mp->count = htonl (mp->count);
+         vl_msg_api_send_shmem (q, (u8 *) & mp);
+         mp = 0;
+       }
+    }
 
   /* If e.g. the last FIB had no reportable routes, free the buffer */
   if (mp)
     vl_msg_api_free (mp);
 }
 
-typedef struct
+static void
+do_ip6_mfib_counters (stats_main_t * sm)
 {
-  ip6_address_t address;
-  u32 address_length;
-  u32 index;
-} ip6_route_t;
+  ip6_main_t *im6 = &ip6_main;
+  api_main_t *am = sm->api_main;
+  vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
+  mfib_prefix_t *pfx;
+  mfib_table_t *mfib;
+  do_ip46_fibs_t *do_fibs;
+  vl_api_vnet_ip6_mfib_counters_t *mp = 0;
+  u32 items_this_message;
+  vl_api_ip6_mfib_counter_t *ctrp = 0;
+  u32 start_at_mfib_index = 0;
+  int i, j, k;
+
+  do_fibs = &sm->do_ip46_fibs;
+
+  vec_reset_length (do_fibs->mfibs);
+  /* *INDENT-OFF* */
+  pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
+  /* *INDENT-ON* */
+
+  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
+    {
+      mfib = do_fibs->mfibs[j];
+      /* We may have bailed out due to control-plane activity */
+      while ((mfib - im6->mfibs) < start_at_mfib_index)
+       continue;
+
+      if (mp == 0)
+       {
+         items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
+         mp = vl_msg_api_alloc_as_if_client
+           (sizeof (*mp) +
+            items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
+         mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
+         mp->count = 0;
+         mp->vrf_id = ntohl (mfib->mft_table_id);
+         ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
+       }
+      else
+       {
+         /* happens if the last MFIB was empty... */
+         ASSERT (mp->count == 0);
+         mp->vrf_id = ntohl (mfib->mft_table_id);
+       }
+
+      vec_reset_length (do_fibs->mroutes);
+
+      /*
+       * walk the table with table updates blocked
+       */
+      dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+      mfib_table_walk (mfib->mft_index,
+                      FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
+      dsunlock (sm);
+
+      vec_foreach (pfx, do_fibs->mroutes)
+      {
+       const dpo_id_t *dpo_id;
+       fib_node_index_t mfei;
+       vlib_counter_t c;
+       u32 index;
+
+       /*
+        * re-lookup the entry, since we suspend during the collection
+        */
+       mfei = mfib_table_lookup (mfib->mft_index, pfx);
+
+       if (FIB_NODE_INDEX_INVALID == mfei)
+         continue;
+
+       dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
+       index = (u32) dpo_id->dpoi_index;
+
+       vlib_get_combined_counter (&replicate_main.repm_counters,
+                                  dpo_id->dpoi_index, &c);
+       /*
+        * If it has seen at least one packet, send it.
+        */
+       if (c.packets > 0)
+         {
+           /* already in net byte order */
+           memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
+           memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
+           ctrp->group_length = pfx->fp_len;
+           ctrp->packets = clib_host_to_net_u64 (c.packets);
+           ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+           mp->count++;
+           ctrp++;
+
+           if (mp->count == items_this_message)
+             {
+               mp->count = htonl (items_this_message);
+               /*
+                * If the main thread's input queue is stuffed,
+                * drop the data structure lock (which the main thread
+                * may want), and take a pause.
+                */
+               svm_queue_lock (q);
+
+               while (svm_queue_is_full (q))
+                 {
+                   svm_queue_unlock (q);
+                   ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                                         STATS_RELEASE_DELAY_NS);
+                   svm_queue_lock (q);
+                 }
+               vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+               svm_queue_unlock (q);
+
+               items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
+               mp = vl_msg_api_alloc_as_if_client
+                 (sizeof (*mp) +
+                  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
+               mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
+               mp->count = 0;
+               mp->vrf_id = ntohl (mfib->mft_table_id);
+               ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
+             }
+         }
+      }
+
+      /* Flush any data from this mfib */
+      if (mp->count)
+       {
+         mp->count = htonl (mp->count);
+         vl_msg_api_send_shmem (q, (u8 *) & mp);
+         mp = 0;
+       }
+    }
+
+  /* If e.g. the last FIB had no reportable routes, free the buffer */
+  if (mp)
+    vl_msg_api_free (mp);
+}
 
 typedef struct
 {
@@ -763,149 +1921,332 @@ add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
 }
 
 static void
-do_ip6_fibs (stats_main_t * sm)
+do_ip6_fib_counters (stats_main_t * sm)
 {
   ip6_main_t *im6 = &ip6_main;
   api_main_t *am = sm->api_main;
   vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
-  unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
-  static ip6_route_t *routes;
+  svm_queue_t *q = shmem_hdr->vl_input_queue;
   ip6_route_t *r;
   fib_table_t *fib;
-  static uword *results;
+  do_ip46_fibs_t *do_fibs;
   vl_api_vnet_ip6_fib_counters_t *mp = 0;
   u32 items_this_message;
   vl_api_ip6_fib_counter_t *ctrp = 0;
   u32 start_at_fib_index = 0;
   BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
   add_routes_in_fib_arg_t _a, *a = &_a;
+  int i;
 
+  do_fibs = &sm->do_ip46_fibs;
 again:
+  vec_reset_length (do_fibs->fibs);
   /* *INDENT-OFF* */
   pool_foreach (fib, im6->fibs,
-  ({
-    /* We may have bailed out due to control-plane activity */
-    while ((fib - im6->fibs) < start_at_fib_index)
-      continue;
+                ({vec_add1(do_fibs->fibs,fib);}));
+  /* *INDENT-ON* */
 
-    if (mp == 0)
-      {
-       items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
-       mp = vl_msg_api_alloc_as_if_client
-         (sizeof (*mp) +
-          items_this_message * sizeof (vl_api_ip6_fib_counter_t));
-       mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
-       mp->count = 0;
-       mp->vrf_id = ntohl (fib->ft_table_id);
-       ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
-      }
 
-    dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+  for (i = 0; i < vec_len (do_fibs->fibs); i++)
+    {
+      fib = do_fibs->fibs[i];
+      /* We may have bailed out due to control-plane activity */
+      while ((fib - im6->fibs) < start_at_fib_index)
+       continue;
 
-    vec_reset_length (routes);
-    vec_reset_length (results);
+      if (mp == 0)
+       {
+         items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
+         mp = vl_msg_api_alloc_as_if_client
+           (sizeof (*mp) +
+            items_this_message * sizeof (vl_api_ip6_fib_counter_t));
+         mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
+         mp->count = 0;
+         mp->vrf_id = ntohl (fib->ft_table_id);
+         ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+       }
 
-    a->fib_index = fib - im6->fibs;
-    a->routep = &routes;
-    a->sm = sm;
+      dslock (sm, 0 /* release hint */ , 1 /* tag */ );
 
-    if (clib_setjmp (&sm->jmp_buf, 0) == 0)
-      {
-       start_at_fib_index = fib - im6->fibs;
-       BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
-      }
-    else
-      {
-       dsunlock (sm);
-       ip46_fib_stats_delay (sm, 0 /* sec */ ,
-                             STATS_RELEASE_DELAY_NS);
-       mp->count = 0;
-       ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
-       goto again;
-      }
+      vec_reset_length (do_fibs->ip6routes);
+      vec_reset_length (do_fibs->results);
 
-    vec_foreach (r, routes)
-    {
-        vlib_counter_t c;
-
-        vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
-                                   r->index, &c);
-        /*
-         * If it has actually
-         * seen at least one packet, send it.
-         */
-        if (c.packets > 0)
-          {
-            /* already in net byte order */
-            ctrp->address[0] = r->address.as_u64[0];
-            ctrp->address[1] = r->address.as_u64[1];
-            ctrp->address_length = (u8) r->address_length;
-            ctrp->packets = clib_host_to_net_u64 (c.packets);
-            ctrp->bytes = clib_host_to_net_u64 (c.bytes);
-            mp->count++;
-            ctrp++;
-
-            if (mp->count == items_this_message)
-              {
-                mp->count = htonl (items_this_message);
-                /*
-                 * If the main thread's input queue is stuffed,
-                 * drop the data structure lock (which the main thread
-                 * may want), and take a pause.
-                 */
-                unix_shared_memory_queue_lock (q);
-                if (unix_shared_memory_queue_is_full (q))
-                  {
-                    dsunlock (sm);
-                    vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-                    unix_shared_memory_queue_unlock (q);
-                    mp = 0;
-                    ip46_fib_stats_delay (sm, 0 /* sec */ ,
-                                          STATS_RELEASE_DELAY_NS);
-                    goto again;
-                  }
-                vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
-                unix_shared_memory_queue_unlock (q);
-
-                items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
-                mp = vl_msg_api_alloc_as_if_client
-                  (sizeof (*mp) +
-                   items_this_message * sizeof (vl_api_ip6_fib_counter_t));
-                mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
-                mp->count = 0;
-                mp->vrf_id = ntohl (fib->ft_table_id);
-                ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
-              }
-          }
-
-        if (sm->data_structure_lock->release_hint)
-          {
-            start_at_fib_index = fib - im6->fibs;
-            dsunlock (sm);
-            ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
-            mp->count = 0;
-            ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
-            goto again;
-          }
-    }                          /* vec_foreach (routes) */
+      a->fib_index = fib - im6->fibs;
+      a->routep = &do_fibs->ip6routes;
+      a->sm = sm;
 
-    dsunlock (sm);
+      if (clib_setjmp (&sm->jmp_buf, 0) == 0)
+       {
+         start_at_fib_index = fib - im6->fibs;
+         BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
+       }
+      else
+       {
+         dsunlock (sm);
+         ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                               STATS_RELEASE_DELAY_NS);
+         mp->count = 0;
+         ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+         goto again;
+       }
 
-    /* Flush any data from this fib */
-    if (mp->count)
+      vec_foreach (r, do_fibs->ip6routes)
       {
-       mp->count = htonl (mp->count);
-       vl_msg_api_send_shmem (q, (u8 *) & mp);
-       mp = 0;
-      }
-  }));
-  /* *INDENT-ON* */
+       vlib_counter_t c;
+
+       vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
+                                  r->index, &c);
+       /*
+        * If it has actually
+        * seen at least one packet, send it.
+        */
+       if (c.packets > 0)
+         {
+           /* already in net byte order */
+           ctrp->address[0] = r->address.as_u64[0];
+           ctrp->address[1] = r->address.as_u64[1];
+           ctrp->address_length = (u8) r->address_length;
+           ctrp->packets = clib_host_to_net_u64 (c.packets);
+           ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+           mp->count++;
+           ctrp++;
+
+           if (mp->count == items_this_message)
+             {
+               mp->count = htonl (items_this_message);
+               /*
+                * If the main thread's input queue is stuffed,
+                * drop the data structure lock (which the main thread
+                * may want), and take a pause.
+                */
+               svm_queue_lock (q);
+               if (svm_queue_is_full (q))
+                 {
+                   dsunlock (sm);
+                   vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+                   svm_queue_unlock (q);
+                   mp = 0;
+                   ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                                         STATS_RELEASE_DELAY_NS);
+                   goto again;
+                 }
+               vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+               svm_queue_unlock (q);
+
+               items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
+               mp = vl_msg_api_alloc_as_if_client
+                 (sizeof (*mp) +
+                  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
+               mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
+               mp->count = 0;
+               mp->vrf_id = ntohl (fib->ft_table_id);
+               ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+             }
+         }
+
+       if (sm->data_structure_lock->release_hint)
+         {
+           start_at_fib_index = fib - im6->fibs;
+           dsunlock (sm);
+           ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
+           mp->count = 0;
+           ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+           goto again;
+         }
+      }                                /* vec_foreach (routes) */
+
+      dsunlock (sm);
+
+      /* Flush any data from this fib */
+      if (mp->count)
+       {
+         mp->count = htonl (mp->count);
+         vl_msg_api_send_shmem (q, (u8 *) & mp);
+         mp = 0;
+       }
+    }
 
   /* If e.g. the last FIB had no reportable routes, free the buffer */
   if (mp)
     vl_msg_api_free (mp);
 }
 
+typedef struct udp_encap_stat_t_
+{
+  u32 ue_id;
+  u64 stats[2];
+} udp_encap_stat_t;
+
+typedef struct udp_encap_stats_walk_t_
+{
+  udp_encap_stat_t *stats;
+} udp_encap_stats_walk_t;
+
+static int
+udp_encap_stats_walk_cb (index_t uei, void *arg)
+{
+  udp_encap_stats_walk_t *ctx = arg;
+  udp_encap_stat_t *stat;
+  udp_encap_t *ue;
+
+  ue = udp_encap_get (uei);
+  vec_add2 (ctx->stats, stat, 1);
+
+  stat->ue_id = uei;
+  udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
+
+  return (1);
+}
+
+static void
+udp_encap_ship (udp_encap_stats_walk_t * ctx)
+{
+  vl_api_vnet_udp_encap_counters_t *mp;
+  vl_shmem_hdr_t *shmem_hdr;
+  stats_main_t *sm;
+  api_main_t *am;
+  svm_queue_t *q;
+
+  mp = NULL;
+  sm = &stats_main;
+  am = sm->api_main;
+  shmem_hdr = am->shmem_hdr;
+  q = shmem_hdr->vl_input_queue;
+
+  /*
+   * If the walk context has counters, which may be left over from the last
+   * suspend, then we continue from there.
+   */
+  while (0 != vec_len (ctx->stats))
+    {
+      u32 n_items = MIN (vec_len (ctx->stats),
+                        UDP_ENCAP_COUNTER_BATCH_SIZE);
+      u8 pause = 0;
+
+      dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+      mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+                                         (n_items *
+                                          sizeof
+                                          (vl_api_udp_encap_counter_t)));
+      mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
+      mp->count = ntohl (n_items);
+
+      /*
+       * copy the counters from the back of the context, then we can easily
+       * 'erase' them by resetting the vector length.
+       * The order we push the stats to the caller is not important.
+       */
+      clib_memcpy (mp->c,
+                  &ctx->stats[vec_len (ctx->stats) - n_items],
+                  n_items * sizeof (*ctx->stats));
+
+      _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
+
+      /*
+       * send to the shm q
+       */
+      svm_queue_lock (q);
+      pause = svm_queue_is_full (q);
+
+      vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+      svm_queue_unlock (q);
+      dsunlock (sm);
+
+      if (pause)
+       ip46_fib_stats_delay (sm, 0 /* sec */ ,
+                             STATS_RELEASE_DELAY_NS);
+    }
+}
+
+static void
+do_udp_encap_counters (stats_main_t * sm)
+{
+  udp_encap_stat_t *stat;
+
+  udp_encap_stats_walk_t ctx = {
+    .stats = NULL,
+  };
+
+  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+  udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
+  dsunlock (sm);
+
+  udp_encap_ship (&ctx);
+}
+
+int
+stats_set_poller_delay (u32 poller_delay_sec)
+{
+  stats_main_t *sm = &stats_main;
+  if (!poller_delay_sec)
+    {
+      return VNET_API_ERROR_INVALID_ARGUMENT;
+    }
+  else
+    {
+      sm->stats_poll_interval_in_seconds = poller_delay_sec;
+      return 0;
+    }
+}
+
+static clib_error_t *
+stats_config (vlib_main_t * vm, unformat_input_t * input)
+{
+  u32 sec;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "interval %u", &sec))
+       {
+         int rv = stats_set_poller_delay (sec);
+         if (rv)
+           {
+             return clib_error_return (0,
+                                       "`stats_set_poller_delay' API call failed, rv=%d:%U",
+                                       (int) rv, format_vnet_api_errno, rv);
+           }
+         return 0;
+       }
+      else
+       {
+         return clib_error_return (0, "unknown input '%U'",
+                                   format_unformat_error, input);
+       }
+    }
+  return 0;
+}
+
+/* stats { ... } configuration. */
+/*?
+ *
+ * @cfgcmd{interval, &lt;seconds&gt;}
+ * Configure stats poller delay to be @c seconds.
+ *
+?*/
+VLIB_CONFIG_FUNCTION (stats_config, "stats");
+
+static void
+  vl_api_stats_get_poller_delay_t_handler
+  (vl_api_stats_get_poller_delay_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vl_api_registration_t *reg;
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
+  vl_api_stats_get_poller_delay_reply_t *rmp;
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = 0;
+  rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+
+}
+
 static void
 stats_thread_fn (void *arg)
 {
@@ -928,188 +2269,92 @@ stats_thread_fn (void *arg)
 
   while (1)
     {
-      /* 10 second poll interval */
-      ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
+      ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
+                           0 /* nsec */ );
 
       if (!(sm->enable_poller))
-       continue;
-      do_simple_interface_counters (sm);
-      do_combined_interface_counters (sm);
-      do_ip4_fibs (sm);
-      do_ip6_fibs (sm);
-      do_ip4_nbrs (sm);
-      do_ip6_nbrs (sm);
-    }
-}
+       {
+         continue;
+       }
+      if (pool_elts
+         (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
+       do_combined_per_interface_counters (sm);
 
-static void
-  vl_api_vnet_interface_simple_counters_t_handler
-  (vl_api_vnet_interface_simple_counters_t * mp)
-{
-  vpe_client_registration_t *reg;
-  stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
-  vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
-  u32 mp_size;
+      if (pool_elts
+         (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
+       do_simple_per_interface_counters (sm);
 
-#if STATS_DEBUG > 0
-  char *counter_name;
-  u32 count, sw_if_index;
-  int i;
-#endif
+      if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
+       do_ip4_fib_counters (sm);
 
-  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
+      if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
+       do_ip6_fib_counters (sm);
 
-  /* *INDENT-OFF* */
-  pool_foreach(reg, sm->stats_registrations,
-              ({
-                q = vl_api_client_index_to_input_queue (reg->client_index);
-                if (q)
-                  {
-                    if (q_prev && (q_prev->cursize < q_prev->maxsize))
-                      {
-                        mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
-                        clib_memcpy(mp_copy, mp, mp_size);
-                        vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
-                        mp = mp_copy;
-                      }
-                    q_prev = q;
-                  }
-              }));
-  /* *INDENT-ON* */
+      if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
+       do_ip4_mfib_counters (sm);
 
-#if STATS_DEBUG > 0
-  count = ntohl (mp->count);
-  sw_if_index = ntohl (mp->first_sw_if_index);
-  u64 *vp, v;
-  vp = (u64 *) mp->data;
+      if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
+       do_ip6_mfib_counters (sm);
 
-  switch (mp->vnet_counter_type)
-    {
-    case VNET_INTERFACE_COUNTER_DROP:
-      counter_name = "drop";
-      break;
-    case VNET_INTERFACE_COUNTER_PUNT:
-      counter_name = "punt";
-      break;
-    case VNET_INTERFACE_COUNTER_IP4:
-      counter_name = "ip4";
-      break;
-    case VNET_INTERFACE_COUNTER_IP6:
-      counter_name = "ip6";
-      break;
-    case VNET_INTERFACE_COUNTER_RX_NO_BUF:
-      counter_name = "rx-no-buff";
-      break;
-    case VNET_INTERFACE_COUNTER_RX_MISS:
-      , counter_name = "rx-miss";
-      break;
-    case VNET_INTERFACE_COUNTER_RX_ERROR:
-      , counter_name = "rx-error (fifo-full)";
-      break;
-    case VNET_INTERFACE_COUNTER_TX_ERROR:
-      , counter_name = "tx-error (fifo-full)";
-      break;
-    default:
-      counter_name = "bogus";
-      break;
-    }
-  for (i = 0; i < count; i++)
-    {
-      v = clib_mem_unaligned (vp, u64);
-      v = clib_net_to_host_u64 (v);
-      vp++;
-      fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
-              sm->vnet_main, sw_if_index, counter_name, v);
-      sw_if_index++;
-    }
-#endif
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
-    {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
-    }
-  else
-    {
-      vl_msg_api_free (mp);
+      if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
+       do_ip4_nbr_counters (sm);
+
+      if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
+       do_ip6_nbr_counters (sm);
+
+      if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
+       do_udp_encap_counters (sm);
     }
 }
 
 static void
-  vl_api_vnet_interface_combined_counters_t_handler
-  (vl_api_vnet_interface_combined_counters_t * mp)
+  vl_api_vnet_interface_simple_counters_t_handler
+  (vl_api_vnet_interface_simple_counters_t * mp)
 {
-  vpe_client_registration_t *reg;
+  vpe_client_registration_t *clients, client;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
-  vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
+  vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
   u32 mp_size;
-
-#if STATS_DEBUG > 0
-  char *counter_name;
-  u32 count, sw_if_index;
   int i;
-#endif
 
-  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
-
-  /* *INDENT-OFF* */
-  pool_foreach(reg, sm->stats_registrations,
-              ({
-                q = vl_api_client_index_to_input_queue (reg->client_index);
-                if (q)
-                  {
-                    if (q_prev && (q_prev->cursize < q_prev->maxsize))
-                      {
-                        mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
-                        clib_memcpy(mp_copy, mp, mp_size);
-                        vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
-                        mp = mp_copy;
-                      }
-                    q_prev = q;
-                  }
-              }));
-  /* *INDENT-ON* */
+  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
 
-#if STATS_DEBUG > 0
-  count = ntohl (mp->count);
-  sw_if_index = ntohl (mp->first_sw_if_index);
+  clients =
+    get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
+                         ~0 /*flag for all */ );
 
-  vlib_counter_t *vp;
-  u64 packets, bytes;
-  vp = (vlib_counter_t *) mp->data;
-
-  switch (mp->vnet_counter_type)
-    {
-    case VNET_INTERFACE_COUNTER_RX:
-      counter_name = "rx";
-      break;
-    case VNET_INTERFACE_COUNTER_TX:
-      counter_name = "tx";
-      break;
-    default:
-      counter_name = "bogus";
-      break;
-    }
-  for (i = 0; i < count; i++)
+  for (i = 0; i < vec_len (clients); i++)
     {
-      packets = clib_mem_unaligned (&vp->packets, u64);
-      packets = clib_net_to_host_u64 (packets);
-      bytes = clib_mem_unaligned (&vp->bytes, u64);
-      bytes = clib_net_to_host_u64 (bytes);
-      vp++;
-      fformat (stdout, "%U.%s.packets %lld\n",
-              format_vnet_sw_if_index_name,
-              sm->vnet_main, sw_if_index, counter_name, packets);
-      fformat (stdout, "%U.%s.bytes %lld\n",
-              format_vnet_sw_if_index_name,
-              sm->vnet_main, sw_if_index, counter_name, bytes);
-      sw_if_index++;
+      client = clients[i];
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
+       {
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
+           {
+             mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
+             clib_memcpy (mp_copy, mp, mp_size);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
+             mp = mp_copy;
+           }
+         reg_prev = reg;
+       }
+      else
+       {
+         sm->enable_poller =
+           clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
+                                  client.client_index);
+         continue;
+       }
     }
 
+#if STATS_DEBUG > 0
+  fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
 #endif
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -1120,35 +2365,45 @@ static void
 static void
 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
 {
-  vpe_client_registration_t *reg;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
   u32 mp_size;
+  vpe_client_registration_t *clients, client;
+  int i;
 
   mp_size = sizeof (*mp_copy) +
     ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
 
-  /* *INDENT-OFF* */
-  pool_foreach(reg, sm->stats_registrations,
-  ({
-    q = vl_api_client_index_to_input_queue (reg->client_index);
-    if (q)
-      {
-        if (q_prev && (q_prev->cursize < q_prev->maxsize))
-          {
-            mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
-            clib_memcpy(mp_copy, mp, mp_size);
-            vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
-            mp = mp_copy;
-          }
-        q_prev = q;
-      }
-  }));
-  /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  clients =
+    get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
+
+  for (i = 0; i < vec_len (clients); i++)
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      client = clients[i];
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
+       {
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
+           {
+             mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
+             clib_memcpy (mp_copy, mp, mp_size);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
+             mp = mp_copy;
+           }
+         reg_prev = reg;
+       }
+      else
+       {
+         sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
+                                                    ~0, client.client_index);
+         continue;
+       }
+    }
+
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
+    {
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -1159,35 +2414,46 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
 static void
 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
 {
-  vpe_client_registration_t *reg;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
   u32 mp_size;
+  vpe_client_registration_t *clients, client;
+  int i;
 
   mp_size = sizeof (*mp_copy) +
     ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
 
-  /* *INDENT-OFF* */
-  pool_foreach(reg, sm->stats_registrations,
-  ({
-    q = vl_api_client_index_to_input_queue (reg->client_index);
-    if (q)
-      {
-        if (q_prev && (q_prev->cursize < q_prev->maxsize))
-          {
-            mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
-            clib_memcpy(mp_copy, mp, mp_size);
-            vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
-            mp = mp_copy;
-          }
-        q_prev = q;
-      }
-  }));
+  clients =
+    get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
+
+  for (i = 0; i < vec_len (clients); i++)
+    {
+      client = clients[i];
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
+       {
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
+           {
+             mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
+             clib_memcpy (mp_copy, mp, mp_size);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
+             mp = mp_copy;
+           }
+         reg_prev = reg;
+       }
+      else
+       {
+         sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
+                                                    ~0, client.client_index);
+         continue;
+       }
+    }
+
   /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -1198,35 +2464,45 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
 static void
 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
 {
-  vpe_client_registration_t *reg;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
   u32 mp_size;
+  vpe_client_registration_t *clients, client;
+  int i;
 
   mp_size = sizeof (*mp_copy) +
     ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
 
-  /* *INDENT-OFF* */
-  pool_foreach(reg, sm->stats_registrations,
-  ({
-    q = vl_api_client_index_to_input_queue (reg->client_index);
-    if (q)
-      {
-        if (q_prev && (q_prev->cursize < q_prev->maxsize))
-          {
-            mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
-            clib_memcpy(mp_copy, mp, mp_size);
-            vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
-            mp = mp_copy;
-          }
-        q_prev = q;
-      }
-  }));
+  clients =
+    get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
+
+  for (i = 0; i < vec_len (clients); i++)
+    {
+      client = clients[i];
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
+       {
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
+           {
+             mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
+             clib_memcpy (mp_copy, mp, mp_size);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
+             mp = mp_copy;
+           }
+         reg_prev = reg;
+       }
+      else
+       {
+         sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
+                                                    ~0, client.client_index);
+         continue;
+       }
+    }
   /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -1237,35 +2513,45 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
 static void
 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
 {
-  vpe_client_registration_t *reg;
   stats_main_t *sm = &stats_main;
-  unix_shared_memory_queue_t *q, *q_prev = NULL;
+  vl_api_registration_t *reg, *reg_prev = NULL;
   vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
   u32 mp_size;
+  vpe_client_registration_t *clients, client;
+  int i;
 
   mp_size = sizeof (*mp_copy) +
     ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
 
-  /* *INDENT-OFF* */
-  pool_foreach(reg, sm->stats_registrations,
-  ({
-    q = vl_api_client_index_to_input_queue (reg->client_index);
-    if (q)
-      {
-        if (q_prev && (q_prev->cursize < q_prev->maxsize))
-          {
-            mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
-            clib_memcpy(mp_copy, mp, mp_size);
-            vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
-            mp = mp_copy;
-          }
-        q_prev = q;
-      }
-  }));
+  clients =
+    get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
+
+  for (i = 0; i < vec_len (clients); i++)
+    {
+      client = clients[i];
+      reg = vl_api_client_index_to_registration (client.client_index);
+      if (reg)
+       {
+         if (reg_prev && vl_api_can_send_msg (reg_prev))
+           {
+             mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
+             clib_memcpy (mp_copy, mp, mp_size);
+             vl_api_send_msg (reg_prev, (u8 *) mp);
+             mp = mp_copy;
+           }
+         reg_prev = reg;
+       }
+      else
+       {
+         sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
+                                                    ~0, client.client_index);
+         continue;
+       }
+    }
   /* *INDENT-ON* */
-  if (q_prev && (q_prev->cursize < q_prev->maxsize))
+  if (reg_prev && vl_api_can_send_msg (reg_prev))
     {
-      vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+      vl_api_send_msg (reg_prev, (u8 *) mp);
     }
   else
     {
@@ -1273,78 +2559,343 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
     }
 }
 
+static void
+vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_udp_encap_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 fib;
+
+  fib = ~0;                    //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+
+  if (!reg)
+    {
+      sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
+                                                fib, mp->client_index);
+      return;
+    }
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
 static void
 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
 {
   stats_main_t *sm = &stats_main;
-  vpe_client_registration_t *rp;
+  vpe_client_registration_t rp;
   vl_api_want_stats_reply_t *rmp;
   uword *p;
   i32 retval = 0;
-  unix_shared_memory_queue_t *q;
+  u32 item;
+  vl_api_registration_t *reg;
+
+  item = ~0;                   //"ALL THE THINGS IN THE THINGS
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
+                             item, mp->enable_disable);
+
+  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
+                             item, mp->enable_disable);
+
+  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
+                             item, mp->enable_disable);
+
+  handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
+                             item, mp->enable_disable);
+
+  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
+                             item, mp->enable_disable);
+
+  handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
+                             item, mp->enable_disable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void
+  vl_api_want_interface_simple_stats_t_handler
+  (vl_api_want_interface_simple_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_interface_simple_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  u32 swif;
+  vl_api_registration_t *reg;
+
+  swif = ~0;                   //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
+                             mp->enable_disable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+
+  if (!reg)
+    {
+      sm->enable_poller =
+       clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
+                              mp->client_index);
+      return;
+    }
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+
+static void
+vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_ip4_fib_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 fib;
+
+  fib = ~0;                    //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
+                             mp->enable_disable);
 
-  p = hash_get (sm->stats_registration_hash, mp->client_index);
-  if (p)
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+
+  if (!reg)
     {
-      if (mp->enable_disable)
-       {
-         clib_warning ("pid %d: already enabled...", mp->pid);
-         retval = -2;
-         goto reply;
-       }
-      else
-       {
-         rp = pool_elt_at_index (sm->stats_registrations, p[0]);
-         pool_put (sm->stats_registrations, rp);
-         hash_unset (sm->stats_registration_hash, mp->client_index);
-         goto reply;
-       }
+      sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
+                                                fib, mp->client_index);
+      return;
     }
-  if (mp->enable_disable == 0)
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void
+vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_ip4_mfib_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 mfib;
+
+  mfib = ~0;                   //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
+                             mp->enable_disable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
     {
-      clib_warning ("pid %d: already disabled...", mp->pid);
-      retval = -3;
-      goto reply;
+      sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
+                                                mfib, mp->client_index);
+      return;
     }
-  pool_get (sm->stats_registrations, rp);
-  rp->client_index = mp->client_index;
-  rp->client_pid = mp->pid;
-  hash_set (sm->stats_registration_hash, rp->client_index,
-           rp - sm->stats_registrations);
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void
+vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_ip4_fib_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 fib;
+
+  fib = ~0;                    //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
+                             mp->enable_disable);
 
 reply:
-  if (pool_elts (sm->stats_registrations))
-    sm->enable_poller = 1;
-  else
-    sm->enable_poller = 0;
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    {
+      sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
+                                                fib, mp->client_index);
+      return;
+    }
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
 
-  q = vl_api_client_index_to_input_queue (mp->client_index);
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
 
-  if (!q)
-    return;
+static void
+vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_ip4_mfib_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 mfib;
+
+  mfib = ~0;                   //Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
+                             mp->enable_disable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    {
+      sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
+                                                mfib, mp->client_index);
+      return;
+    }
 
   rmp = vl_msg_api_alloc (sizeof (*rmp));
-  rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
   rmp->context = mp->context;
   rmp->retval = retval;
 
-  vl_msg_api_send_shmem (q, (u8 *) & rmp);
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+/* FIXME - NBR stats broken - this will be fixed in subsequent patch */
+static void
+vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
+{
+}
+
+static void
+vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
+{
+}
+
+static void
+vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vnet_interface_main_t *im = sm->interface_main;
+  vl_api_vnet_get_summary_stats_reply_t *rmp;
+  vlib_combined_counter_main_t *cm;
+  vlib_counter_t v;
+  int i, which;
+  u64 total_pkts[VLIB_N_RX_TX];
+  u64 total_bytes[VLIB_N_RX_TX];
+  vl_api_registration_t *reg;
+
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = 0;
+
+  memset (total_pkts, 0, sizeof (total_pkts));
+  memset (total_bytes, 0, sizeof (total_bytes));
+
+  vnet_interface_counter_lock (im);
+
+  vec_foreach (cm, im->combined_sw_if_counters)
+  {
+    which = cm - im->combined_sw_if_counters;
+
+    for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
+      {
+       vlib_get_combined_counter (cm, i, &v);
+       total_pkts[which] += v.packets;
+       total_bytes[which] += v.bytes;
+      }
+  }
+  vnet_interface_counter_unlock (im);
+
+  rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
+  rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
+  rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
+  rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
+  rmp->vector_rate =
+    clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
+
+  vl_api_send_msg (reg, (u8 *) rmp);
 }
 
 int
 stats_memclnt_delete_callback (u32 client_index)
 {
-  vpe_client_registration_t *rp;
+  vpe_client_stats_registration_t *rp;
   stats_main_t *sm = &stats_main;
   uword *p;
 
-  p = hash_get (sm->stats_registration_hash, client_index);
-  if (p)
-    {
-      rp = pool_elt_at_index (sm->stats_registrations, p[0]);
-      pool_put (sm->stats_registrations, rp);
-      hash_unset (sm->stats_registration_hash, client_index);
-    }
+  // FIXME
+  /* p = hash_get (sm->stats_registration_hash, client_index); */
+  /* if (p) */
+  /*   { */
+  /*     rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
+  /*     pool_put (sm->stats_registrations, rp); */
+  /*     hash_unset (sm->stats_registration_hash, client_index); */
+  /*   } */
 
   return 0;
 }
@@ -1397,6 +2948,19 @@ stats_init (vlib_main_t * vm)
   am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
   am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
 
+  /*
+   * Set up the (msg_name, crc, message-id) table
+   */
+  setup_message_id_table (am);
+
+  vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
+  vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
+#define stats_reg(n)                            \
+  sm->stats_registrations[IDX_##n] = 0; \
+  sm->stats_registration_hash[IDX_##n] = 0;
+#include <vpp/stats/stats.reg>
+#undef stats_reg
+
   return 0;
 }