stats: adding symlinks for nodes and interfaces in the stat segment 36/31636/18
authorArthur de Kerhor <arthurdekerhor@gmail.com>
Thu, 11 Mar 2021 18:26:54 +0000 (10:26 -0800)
committerOle Tr�an <otroan@employees.org>
Tue, 4 May 2021 16:29:29 +0000 (16:29 +0000)
A given interface counter (e.g rx) can be accessed via
/interfaces/<interface_name>/<counter_name>.
Same goes with nodes: /nodes/<node_name>/<counter_name>
As interfaces may contain '/' in their names,
these are replaced by '_' in symlinks
Also added 2 tests for the stat segment

Type: feature

Signed-off-by: Arthur de Kerhor <arthurdekerhor@gmail.com>
Change-Id: I27da252e7b3dc177815616ca46b5c200a456bf0f
Signed-off-by: Ole Troan <ot@cisco.com>
src/vpp-api/client/stat_client.c
src/vpp-api/python/vpp_papi/vpp_stats.py
src/vpp/stats/stat_segment.c
src/vpp/stats/stat_segment.h
src/vpp/stats/stat_segment_shared.h
test/framework.py
test/test_stats_client.py

index 018cce3..2c30be6 100644 (file)
@@ -174,7 +174,7 @@ stat_segment_heartbeat_r (stat_client_main_t * sm)
   stat_segment_access_t sa;
   stat_segment_directory_entry_t *ep;
 
-  /* Has directory been update? */
+  /* Has directory been updated? */
   if (sm->shared_header->epoch != sm->current_epoch)
     return 0;
   if (stat_segment_access_start (&sa, sm))
@@ -202,8 +202,29 @@ stat_segment_heartbeat (void)
    _v(v);                                             \
 })
 
+static counter_t *
+stat_vec_simple_init (counter_t c)
+{
+  counter_t *v = 0;
+  vec_add1 (v, c);
+  return v;
+}
+
+static vlib_counter_t *
+stat_vec_combined_init (vlib_counter_t c)
+{
+  vlib_counter_t *v = 0;
+  vec_add1 (v, c);
+  return v;
+}
+
+/*
+ * If index2 is specified copy out the column (the indexed value across all
+ * threads), otherwise copy out all values.
+ */
 static stat_segment_data_t
-copy_data (stat_segment_directory_entry_t * ep, stat_client_main_t * sm)
+copy_data (stat_segment_directory_entry_t *ep, u32 index2, char *name,
+          stat_client_main_t *sm)
 {
   stat_segment_data_t result = { 0 };
   int i;
@@ -214,7 +235,8 @@ copy_data (stat_segment_directory_entry_t * ep, stat_client_main_t * sm)
   assert (sm->shared_header);
 
   result.type = ep->type;
-  result.name = strdup (ep->name);
+  result.name = strdup (name ? name : ep->name);
+
   switch (ep->type)
     {
     case STAT_DIR_TYPE_SCALAR_INDEX:
@@ -227,7 +249,10 @@ copy_data (stat_segment_directory_entry_t * ep, stat_client_main_t * sm)
       for (i = 0; i < vec_len (simple_c); i++)
        {
          counter_t *cb = stat_segment_adjust (sm, simple_c[i]);
-         result.simple_counter_vec[i] = stat_vec_dup (sm, cb);
+         if (index2 != ~0)
+           result.simple_counter_vec[i] = stat_vec_simple_init (cb[index2]);
+         else
+           result.simple_counter_vec[i] = stat_vec_dup (sm, cb);
        }
       break;
 
@@ -237,7 +262,11 @@ copy_data (stat_segment_directory_entry_t * ep, stat_client_main_t * sm)
       for (i = 0; i < vec_len (combined_c); i++)
        {
          vlib_counter_t *cb = stat_segment_adjust (sm, combined_c[i]);
-         result.combined_counter_vec[i] = stat_vec_dup (sm, cb);
+         if (index2 != ~0)
+           result.combined_counter_vec[i] =
+             stat_vec_combined_init (cb[index2]);
+         else
+           result.combined_counter_vec[i] = stat_vec_dup (sm, cb);
        }
       break;
 
@@ -265,6 +294,14 @@ copy_data (stat_segment_directory_entry_t * ep, stat_client_main_t * sm)
       }
       break;
 
+    case STAT_DIR_TYPE_SYMLINK:
+      /* Gather info from all threads into a vector */
+      {
+       stat_segment_directory_entry_t *ep2;
+       ep2 = vec_elt_at_index (sm->directory_vector, ep->index1);
+       return copy_data (ep2, ep->index2, ep->name, sm);
+      }
+
     case STAT_DIR_TYPE_EMPTY:
       break;
 
@@ -390,7 +427,7 @@ stat_segment_dump_r (uint32_t * stats, stat_client_main_t * sm)
     {
       /* Collect counter */
       ep = vec_elt_at_index (sm->directory_vector, stats[i]);
-      vec_add1 (res, copy_data (ep, sm));
+      vec_add1 (res, copy_data (ep, ~0, 0, sm));
     }
 
   if (stat_segment_access_end (&sa, sm))
@@ -440,12 +477,16 @@ stat_segment_dump_entry_r (uint32_t index, stat_client_main_t * sm)
   stat_segment_data_t *res = 0;
   stat_segment_access_t sa;
 
+  /* Has directory been update? */
+  if (sm->shared_header->epoch != sm->current_epoch)
+    return 0;
+
   if (stat_segment_access_start (&sa, sm))
     return 0;
 
   /* Collect counter */
   ep = vec_elt_at_index (sm->directory_vector, index);
-  vec_add1 (res, copy_data (ep, sm));
+  vec_add1 (res, copy_data (ep, ~0, 0, sm));
 
   if (stat_segment_access_end (&sa, sm))
     return res;
index 919df7e..821a413 100755 (executable)
@@ -180,67 +180,97 @@ class VPPStats():
 
     elementfmt = 'IQ128s'
 
-    def refresh(self):
+    def refresh(self, blocking=True):
         '''Refresh directory vector cache (epoch changed)'''
         directory = {}
-        with self.lock:
-            for direntry in StatsVector(self, self.directory_vector, self.elementfmt):
-                path_raw = direntry[2].find(b'\x00')
-                path = direntry[2][:path_raw].decode('ascii')
-                directory[path] = StatsEntry(direntry[0], direntry[1])
-            self.last_epoch = self.epoch
-            self.directory = directory
-
-            # Cache the error index vectors
-            self.error_vectors = []
-            for threads in StatsVector(self, self.error_vector, 'P'):
-                self.error_vectors.append(StatsVector(self, threads[0], 'Q'))
-
-    def __getitem__(self, item):
+        directory_by_idx = {}
+        while True:
+            try:
+                with self.lock:
+                    for i, direntry in enumerate(StatsVector(self, self.directory_vector, self.elementfmt)):
+                        path_raw = direntry[2].find(b'\x00')
+                        path = direntry[2][:path_raw].decode('ascii')
+                        directory[path] = StatsEntry(direntry[0], direntry[1])
+                        directory_by_idx[i] = path
+                    self.last_epoch = self.epoch
+                    self.directory = directory
+                    self.directory_by_idx = directory_by_idx
+
+                    # Cache the error index vectors
+                    self.error_vectors = []
+                    for threads in StatsVector(self, self.error_vector, 'P'):
+                        self.error_vectors.append(StatsVector(self, threads[0], 'Q'))
+                    return
+            except IOError:
+                if not blocking:
+                    raise
+
+    def __getitem__(self, item, blocking=True):
         if not self.connected:
             self.connect()
-        if self.last_epoch != self.epoch:
-            self.refresh()
-        with self.lock:
-            return self.directory[item].get_counter(self)
+        while True:
+            try:
+                if self.last_epoch != self.epoch:
+                    self.refresh(blocking)
+                with self.lock:
+                    return self.directory[item].get_counter(self)
+            except IOError:
+                if not blocking:
+                    raise
 
     def __iter__(self):
         return iter(self.directory.items())
 
-    def set_errors(self):
+    def set_errors(self, blocking=True):
         '''Return dictionary of error counters > 0'''
         if not self.connected:
             self.connect()
 
         errors = {k:v for k, v in self.directory.items() if k.startswith("/err/")}
         result = {}
-        with self.lock:
-            for k, entry in errors.items():
-                total = 0
-                i = entry.value
-                for per_thread in self.error_vectors:
-                    total += per_thread[i]
-                if total:
-                    result[k] = total
-        return result
-
-    def set_errors_str(self):
+        while True:
+            try:
+                if self.last_epoch != self.epoch:
+                    self.refresh(blocking)
+                with self.lock:
+                    for k, entry in errors.items():
+                        total = 0
+                        i = entry.value
+                        for per_thread in self.error_vectors:
+                            total += per_thread[i]
+                        if total:
+                            result[k] = total
+                return result
+            except IOError:
+                if not blocking:
+                    raise
+
+    def set_errors_str(self, blocking=True):
         '''Return all errors counters > 0 pretty printed'''
         error_string = ['ERRORS:']
-        error_counters = self.set_errors()
+        error_counters = self.set_errors(blocking)
         for k in sorted(error_counters):
             error_string.append('{:<60}{:>10}'.format(k, error_counters[k]))
         return '%s\n' % '\n'.join(error_string)
 
-    def get_counter(self, name):
+    def get_counter(self, name, blocking=True):
         '''Alternative call to __getitem__'''
-        return self.__getitem__(name)
+        return self.__getitem__(name, blocking)
 
-    def get_err_counter(self, name):
+    def get_err_counter(self, name, blocking=True):
         '''Return a single value (sum of all threads)'''
         if not self.connected:
             self.connect()
-        return sum(self.directory[name].get_counter(self))
+        if name.startswith("/err/"):
+            while True:
+                try:
+                    if self.last_epoch != self.epoch:
+                        self.refresh(blocking)
+                    with self.lock:
+                        return sum(self.directory[name].get_counter(self))
+                except IOError:
+                    if not blocking:
+                        raise
 
     def ls(self, patterns):
         '''Returns list of counters matching pattern'''
@@ -253,13 +283,13 @@ class VPPStats():
         return [k for k, v in self.directory.items()
                 if any(re.match(pattern, k) for pattern in regex)]
 
-    def dump(self, counters):
+    def dump(self, counters, blocking=True):
         '''Given a list of counters return a dictionary of results'''
         if not self.connected:
             self.connect()
         result = {}
         for cnt in counters:
-            result[cnt] = self.__getitem__(cnt)
+            result[cnt] = self.__getitem__(cnt,blocking)
         return result
 
 class StatsLock():
@@ -377,6 +407,8 @@ class StatsEntry():
             self.function = self.error
         elif stattype == 5:
             self.function = self.name
+        elif stattype == 7:
+            self.function = self.symlink
         else:
             self.function = self.illegal
 
@@ -415,12 +447,23 @@ class StatsEntry():
         '''Name counter'''
         counter = []
         for name in StatsVector(stats, self.value, 'P'):
-            counter.append(get_string(stats, name[0]))
+            if name[0]:
+                counter.append(get_string(stats, name[0]))
         return counter
 
+    SYMLINK_FMT1 = Struct('II')
+    SYMLINK_FMT2 = Struct('Q')
+    def symlink(self, stats):
+        '''Symlink counter'''
+        b = self.SYMLINK_FMT2.pack(self.value)
+        index1, index2 = self.SYMLINK_FMT1.unpack(b)
+        name = stats.directory_by_idx[index1]
+        return stats[name][:,index2]
+
     def get_counter(self, stats):
         '''Return a list of counters'''
-        return self.function(stats)
+        if stats:
+            return self.function(stats)
 
 class TestStats(unittest.TestCase):
     '''Basic statseg tests'''
@@ -508,6 +551,11 @@ class TestStats(unittest.TestCase):
         print('/sys/node', self.stat.dump(counters))
         print('/net/route/to', self.stat['/net/route/to'])
 
+    def test_symlink(self):
+        '''Symbolic links'''
+        print('/interface/local0/rx', self.stat['/interfaces/local0/rx'])
+        print('/sys/nodes/unix-epoll-input', self.stat['/nodes/unix-epoll-input/calls'])
+
 if __name__ == '__main__':
     import cProfile
     from pstats import Stats
index ddf6e19..c8445ba 100644 (file)
@@ -228,6 +228,80 @@ vlib_stats_pop_heap (void *cm_arg, void *oldheap, u32 cindex,
   clib_mem_set_heap (oldheap);
 }
 
+u8 *
+format_vlib_stats_symlink (u8 *s, va_list *args)
+{
+  char *input = va_arg (*args, char *);
+  char *modified_input = vec_dup (input);
+  int i;
+  u8 *result;
+
+  for (i = 0; i < strlen (modified_input); i++)
+    if (modified_input[i] == '/')
+      modified_input[i] = '_';
+
+  result = format (s, "%s", modified_input);
+  vec_free (modified_input);
+  return result;
+}
+
+void
+vlib_stats_register_symlink (void *oldheap, u8 *name, u32 index1, u32 index2,
+                            u8 lock)
+{
+  stat_segment_main_t *sm = &stat_segment_main;
+  stat_segment_shared_header_t *shared_header = sm->shared_header;
+  stat_segment_directory_entry_t e;
+
+  ASSERT (shared_header);
+
+  if (lock)
+    vlib_stat_segment_lock ();
+  clib_mem_set_heap (oldheap); /* Exit stats segment */
+  u32 vector_index = lookup_hash_index (name);
+  /* Back to stats segment */
+  clib_mem_set_heap (sm->heap); /* Re-enter stat segment */
+
+  if (vector_index == STAT_SEGMENT_INDEX_INVALID)
+    {
+      memcpy (e.name, name, vec_len (name));
+      e.name[vec_len (name)] = '\0';
+      e.type = STAT_DIR_TYPE_SYMLINK;
+      e.index1 = index1;
+      e.index2 = index2;
+      vector_index = vlib_stats_create_counter (&e, oldheap);
+
+      /* Warn clients to refresh any pointers they might be holding */
+      shared_header->directory_vector = sm->directory_vector;
+    }
+
+  if (lock)
+    vlib_stat_segment_unlock ();
+}
+
+void
+vlib_stats_rename_symlink (void *oldheap, u64 index, u8 *new_name)
+{
+  stat_segment_main_t *sm = &stat_segment_main;
+  stat_segment_directory_entry_t *e;
+
+  ASSERT (clib_mem_get_heap () == sm->heap);
+
+  if (index > vec_len (sm->directory_vector))
+    return;
+
+  e = &sm->directory_vector[index];
+
+  clib_mem_set_heap (oldheap);
+  hash_unset (sm->directory_vector_by_name, &e->name);
+  clib_mem_set_heap (sm->heap);
+
+  strncpy (e->name, (char *) new_name, 128 - 1);
+  clib_mem_set_heap (oldheap);
+  hash_set (sm->directory_vector_by_name, &e->name, index);
+  clib_mem_set_heap (sm->heap);
+}
+
 void
 vlib_stats_register_error_index (void *oldheap, u8 * name, u64 * em_vec,
                                 u64 index)
@@ -507,13 +581,12 @@ update_node_counters (stat_segment_main_t * sm)
   int i, j;
   static u32 no_max_nodes = 0;
 
-  vlib_node_get_nodes (0 /* vm, for barrier sync */ ,
-                      (u32) ~ 0 /* all threads */ ,
-                      1 /* include stats */ ,
-                      0 /* barrier sync */ ,
-                      &node_dups, &stat_vms);
+  vlib_node_get_nodes (0 /* vm, for barrier sync */,
+                      (u32) ~0 /* all threads */, 1 /* include stats */,
+                      0 /* barrier sync */, &node_dups, &stat_vms);
 
   u32 l = vec_len (node_dups[0]);
+  u8 *symlink_name = 0;
 
   /*
    * Extend performance nodes if necessary
@@ -523,14 +596,14 @@ update_node_counters (stat_segment_main_t * sm)
       void *oldheap = clib_mem_set_heap (sm->heap);
       vlib_stat_segment_lock ();
 
-      stat_validate_counter_vector (&sm->directory_vector
-                                   [STAT_COUNTER_NODE_CLOCKS], l - 1);
-      stat_validate_counter_vector (&sm->directory_vector
-                                   [STAT_COUNTER_NODE_VECTORS], l - 1);
-      stat_validate_counter_vector (&sm->directory_vector
-                                   [STAT_COUNTER_NODE_CALLS], l - 1);
-      stat_validate_counter_vector (&sm->directory_vector
-                                   [STAT_COUNTER_NODE_SUSPENDS], l - 1);
+      stat_validate_counter_vector (
+       &sm->directory_vector[STAT_COUNTER_NODE_CLOCKS], l - 1);
+      stat_validate_counter_vector (
+       &sm->directory_vector[STAT_COUNTER_NODE_VECTORS], l - 1);
+      stat_validate_counter_vector (
+       &sm->directory_vector[STAT_COUNTER_NODE_CALLS], l - 1);
+      stat_validate_counter_vector (
+       &sm->directory_vector[STAT_COUNTER_NODE_SUSPENDS], l - 1);
 
       vec_validate (sm->nodes, l - 1);
       stat_segment_directory_entry_t *ep;
@@ -548,7 +621,17 @@ update_node_counters (stat_segment_main_t * sm)
          if (sm->nodes[n->index])
            vec_free (sm->nodes[n->index]);
          sm->nodes[n->index] = s;
+
+#define _(E, t, name, p)                                                      \
+  vec_reset_length (symlink_name);                                            \
+  symlink_name = format (symlink_name, "/nodes/%U/" #name "%c",               \
+                        format_vlib_stats_symlink, s, 0);                    \
+  vlib_stats_register_symlink (oldheap, symlink_name, STAT_COUNTER_##E,       \
+                              n->index, 0 /* don't lock */);
+         foreach_stat_segment_node_counter_name
+#undef _
        }
+      vec_free (symlink_name);
       vlib_stat_segment_unlock ();
       clib_mem_set_heap (oldheap);
       no_max_nodes = l;
@@ -564,6 +647,39 @@ update_node_counters (stat_segment_main_t * sm)
          counter_t *c;
          vlib_node_t *n = nodes[i];
 
+         if (j == 0)
+           {
+             if (strncmp ((char *) sm->nodes[n->index], (char *) n->name,
+                          strlen ((char *) sm->nodes[n->index])))
+               {
+                 u8 *s = 0;
+                 u32 vector_index;
+                 u8 *symlink_new_name = 0;
+                 void *oldheap = clib_mem_set_heap (sm->heap);
+                 vlib_stat_segment_lock ();
+                 s = format (s, "%v%c", n->name, 0);
+#define _(E, t, name, p)                                                      \
+  vec_reset_length (symlink_name);                                            \
+  symlink_name = format (symlink_name, "/nodes/%U/" #name "%c",               \
+                        format_vlib_stats_symlink, sm->nodes[n->index], 0);  \
+  clib_mem_set_heap (oldheap); /* Exit stats segment */                       \
+  vector_index = lookup_hash_index ((u8 *) symlink_name);                     \
+  clib_mem_set_heap (sm->heap); /* Re-enter stat segment */                   \
+  vec_reset_length (symlink_new_name);                                        \
+  symlink_new_name = format (symlink_new_name, "/nodes/%U/" #name "%c",       \
+                            format_vlib_stats_symlink, s, 0);                \
+  vlib_stats_rename_symlink (oldheap, vector_index, symlink_new_name);
+                 foreach_stat_segment_node_counter_name
+#undef _
+                   vec_free (symlink_name);
+                 vec_free (symlink_new_name);
+                 vec_free (sm->nodes[n->index]);
+                 sm->nodes[n->index] = s;
+                 vlib_stat_segment_unlock ();
+                 clib_mem_set_heap (oldheap);
+               }
+           }
+
          counters = sm->directory_vector[STAT_COUNTER_NODE_CLOCKS].data;
          c = counters[j];
          c[n->index] = n->stats_total.clocks - n->stats_last_clear.clocks;
@@ -578,8 +694,7 @@ update_node_counters (stat_segment_main_t * sm)
 
          counters = sm->directory_vector[STAT_COUNTER_NODE_SUSPENDS].data;
          c = counters[j];
-         c[n->index] =
-           n->stats_total.suspends - n->stats_last_clear.suspends;
+         c[n->index] = n->stats_total.suspends - n->stats_last_clear.suspends;
        }
       vec_free (node_dups[j]);
     }
@@ -939,32 +1054,62 @@ static clib_error_t *
 statseg_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
 {
   stat_segment_main_t *sm = &stat_segment_main;
+  vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+  vnet_sw_interface_t *si_sup =
+    vnet_get_sup_sw_interface (vnm, si->sw_if_index);
+  vnet_hw_interface_t *hi_sup;
+  u8 *s = 0;
+  u8 *symlink_name = 0;
+  u32 vector_index;
 
   void *oldheap = vlib_stats_push_heap (sm->interfaces);
   vlib_stat_segment_lock ();
 
   vec_validate (sm->interfaces, sw_if_index);
+
+  ASSERT (si_sup->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
+  hi_sup = vnet_get_hw_interface (vnm, si_sup->hw_if_index);
+
+  s = format (s, "%v", hi_sup->name);
+  if (si->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
+    s = format (s, ".%d", si->sub.id);
+  s = format (s, "%c", 0);
+
   if (is_add)
     {
-      vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
-      vnet_sw_interface_t *si_sup =
-       vnet_get_sup_sw_interface (vnm, si->sw_if_index);
-      vnet_hw_interface_t *hi_sup;
-
-      ASSERT (si_sup->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
-      hi_sup = vnet_get_hw_interface (vnm, si_sup->hw_if_index);
-
-      u8 *s = 0;
-      s = format (s, "%v", hi_sup->name);
-      if (si->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
-       s = format (s, ".%d", si->sub.id);
-      s = format (s, "%c", 0);
       sm->interfaces[sw_if_index] = s;
+#define _(E, n, p)                                                            \
+  clib_mem_set_heap (oldheap); /* Exit stats segment */                       \
+  vector_index = lookup_hash_index ((u8 *) "/" #p "/" #n);                    \
+  clib_mem_set_heap (sm->heap); /* Re-enter stat segment */                   \
+  vec_reset_length (symlink_name);                                            \
+  symlink_name = format (symlink_name, "/interfaces/%U/" #n "%c",             \
+                        format_vlib_stats_symlink, s, 0);                    \
+  vlib_stats_register_symlink (oldheap, symlink_name, vector_index,           \
+                              sw_if_index, 0 /* don't lock */);
+      foreach_simple_interface_counter_name
+       foreach_combined_interface_counter_name
+#undef _
+
+         vec_free (symlink_name);
     }
   else
     {
       vec_free (sm->interfaces[sw_if_index]);
       sm->interfaces[sw_if_index] = 0;
+#define _(E, n, p)                                                            \
+  vec_reset_length (symlink_name);                                            \
+  symlink_name = format (symlink_name, "/interfaces/%U/" #n "%c",             \
+                        format_vlib_stats_symlink, s, 0);                    \
+  clib_mem_set_heap (oldheap); /* Exit stats segment */                       \
+  vector_index = lookup_hash_index ((u8 *) symlink_name);                     \
+  clib_mem_set_heap (sm->heap); /* Re-enter stat segment */                   \
+  vlib_stats_delete_counter (vector_index, oldheap);
+      foreach_simple_interface_counter_name
+       foreach_combined_interface_counter_name
+#undef _
+
+         vec_free (symlink_name);
     }
 
   stat_segment_directory_entry_t *ep;
index a048fa5..1d1ff31 100644 (file)
@@ -40,23 +40,28 @@ typedef enum
  STAT_COUNTERS
 } stat_segment_counter_t;
 
-#define foreach_stat_segment_counter_name                       \
-  _(VECTOR_RATE, SCALAR_INDEX, vector_rate, /sys)               \
-  _(VECTOR_RATE_PER_WORKER, COUNTER_VECTOR_SIMPLE,              \
-    vector_rate_per_worker, /sys)                               \
-  _(NUM_WORKER_THREADS, SCALAR_INDEX, num_worker_threads, /sys) \
-  _(INPUT_RATE, SCALAR_INDEX, input_rate, /sys)                 \
-  _(LAST_UPDATE, SCALAR_INDEX, last_update, /sys)               \
-  _(LAST_STATS_CLEAR, SCALAR_INDEX, last_stats_clear, /sys)     \
-  _(HEARTBEAT, SCALAR_INDEX, heartbeat, /sys)                   \
-  _(NODE_CLOCKS, COUNTER_VECTOR_SIMPLE, clocks, /sys/node)      \
-  _(NODE_VECTORS, COUNTER_VECTOR_SIMPLE, vectors, /sys/node)    \
-  _(NODE_CALLS, COUNTER_VECTOR_SIMPLE, calls, /sys/node)        \
-  _(NODE_SUSPENDS, COUNTER_VECTOR_SIMPLE, suspends, /sys/node)  \
-  _(INTERFACE_NAMES, NAME_VECTOR, names, /if)                   \
-  _(NODE_NAMES, NAME_VECTOR, names, /sys/node)                  \
-  _(MEM_STATSEG_TOTAL, SCALAR_INDEX, total, /mem/statseg)       \
-  _(MEM_STATSEG_USED, SCALAR_INDEX, used, /mem/statseg)
+/* clang-format off */
+#define foreach_stat_segment_node_counter_name                                \
+  _ (NODE_CLOCKS, COUNTER_VECTOR_SIMPLE, clocks, /sys/node)                   \
+  _ (NODE_VECTORS, COUNTER_VECTOR_SIMPLE, vectors, /sys/node)                 \
+  _ (NODE_CALLS, COUNTER_VECTOR_SIMPLE, calls, /sys/node)                     \
+  _ (NODE_SUSPENDS, COUNTER_VECTOR_SIMPLE, suspends, /sys/node)
+
+#define foreach_stat_segment_counter_name                                     \
+  _ (VECTOR_RATE, SCALAR_INDEX, vector_rate, /sys)                            \
+  _ (VECTOR_RATE_PER_WORKER, COUNTER_VECTOR_SIMPLE, vector_rate_per_worker,   \
+     /sys)                                                                    \
+  _ (NUM_WORKER_THREADS, SCALAR_INDEX, num_worker_threads, /sys)              \
+  _ (INPUT_RATE, SCALAR_INDEX, input_rate, /sys)                              \
+  _ (LAST_UPDATE, SCALAR_INDEX, last_update, /sys)                            \
+  _ (LAST_STATS_CLEAR, SCALAR_INDEX, last_stats_clear, /sys)                  \
+  _ (HEARTBEAT, SCALAR_INDEX, heartbeat, /sys)                                \
+  _ (INTERFACE_NAMES, NAME_VECTOR, names, /if)                                \
+  _ (NODE_NAMES, NAME_VECTOR, names, /sys/node)                               \
+  _ (MEM_STATSEG_TOTAL, SCALAR_INDEX, total, /mem/statseg)                    \
+  _ (MEM_STATSEG_USED, SCALAR_INDEX, used, /mem/statseg)                      \
+  foreach_stat_segment_node_counter_name
+/* clang-format on */
 
 /* Default stat segment 32m */
 #define STAT_SEGMENT_DEFAULT_SIZE      (32<<20)
index b09e2b5..6f26d4b 100644 (file)
@@ -25,12 +25,18 @@ typedef enum
   STAT_DIR_TYPE_ERROR_INDEX,
   STAT_DIR_TYPE_NAME_VECTOR,
   STAT_DIR_TYPE_EMPTY,
+  STAT_DIR_TYPE_SYMLINK,
 } stat_directory_type_t;
 
 typedef struct
 {
   stat_directory_type_t type;
   union {
+    struct
+    {
+      uint32_t index1;
+      uint32_t index2;
+    };
     uint64_t index;
     uint64_t value;
     void *data;
index a628207..dcea2e7 100644 (file)
@@ -330,6 +330,7 @@ class VppTestCase(CPUInterface, unittest.TestCase):
     classes. It provides methods to create and run test case.
     """
 
+    extra_vpp_statseg_config = ""
     extra_vpp_punt_config = []
     extra_vpp_plugin_config = []
     logger = null_logger
@@ -457,7 +458,8 @@ class VppTestCase(CPUInterface, unittest.TestCase):
         cls.vpp_cmdline.extend([
             "}",
             "physmem", "{", "max-size", "32m", "}",
-            "statseg", "{", "socket-name", cls.get_stats_sock_path(), "}",
+            "statseg", "{", "socket-name", cls.get_stats_sock_path(),
+            cls.extra_vpp_statseg_config, "}",
             "socksvr", "{", "socket-name", cls.get_api_sock_path(), "}",
             "node { ", default_variant, "}",
             "api-fuzz {", api_fuzzing, "}",
index bdc9811..7e17e2a 100644 (file)
@@ -6,6 +6,8 @@ from vpp_papi.vpp_stats import VPPStats
 
 from framework import tag_fixme_vpp_workers
 from framework import VppTestCase, VppTestRunner
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP
 
 
 @tag_fixme_vpp_workers
@@ -20,6 +22,12 @@ class StatsClientTestCase(VppTestCase):
     def tearDownClass(cls):
         super(StatsClientTestCase, cls).tearDownClass()
 
+    @classmethod
+    def setUpConstants(cls):
+        cls.extra_vpp_statseg_config = "per-node-counters on"
+        cls.extra_vpp_statseg_config += "update-interval 0.05"
+        super(StatsClientTestCase, cls).setUpConstants()
+
     def test_set_errors(self):
         """Test set errors"""
         self.assertEqual(self.statistics.set_errors(), {})
@@ -44,6 +52,94 @@ class StatsClientTestCase(VppTestCase):
                          "ending client side file descriptor count: %s" % (
                              initial_fds, ending_fds))
 
+    def test_symlink_values(self):
+        """Test symlinks reported values"""
+        self.create_pg_interfaces(range(2))
+
+        for i in self.pg_interfaces:
+            i.admin_up()
+            i.config_ip4()
+            i.resolve_arp()
+
+        p = list()
+        for i in range(5):
+            packet = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+                      IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4))
+            p.append(packet)
+
+        self.send_and_expect(self.pg0, p, self.pg1)
+
+        pg1_tx = self.statistics.get_counter('/interfaces/pg1/tx')
+        if_tx = self.statistics.get_counter('/if/tx')
+
+        self.assertEqual(pg1_tx[0]['bytes'],
+                         if_tx[0][self.pg1.sw_if_index]['bytes'])
+        for i in self.pg_interfaces:
+            i.unconfig()
+            i.admin_down()
+
+    def test_symlink_add_del_interfaces(self):
+        """Test symlinks when adding and deleting interfaces"""
+        # We first create and delete interfaces
+        self.create_loopback_interfaces(1)
+        self.create_pg_interfaces(range(1))
+        self.loop0.remove_vpp_config()
+        self.create_pg_interfaces(range(2))
+
+        for i in self.pg_interfaces:
+            i.admin_up()
+            i.config_ip4()
+            i.resolve_arp()
+
+        p = list()
+        bytes_to_send = 0
+        for i in range(5):
+            packet = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+                      IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4))
+            bytes_to_send += len(packet)
+            p.append(packet)
+
+        tx_before_sending = self.statistics.get_counter('/interfaces/pg1/tx')
+        rx_before_sending = self.statistics.get_counter('/interfaces/pg0/rx')
+        self.send_and_expect(self.pg0, p, self.pg1)
+        tx = self.statistics.get_counter('/interfaces/pg1/tx')
+        rx = self.statistics.get_counter('/interfaces/pg0/rx')
+
+        # We wait for nodes symlinks to update (interfaces created/deleted).
+        # ... and packets to be sent
+        self.sleep(0.1)
+        vectors = self.statistics.get_counter('/nodes/pg1-tx/vectors')
+
+        self.assertEqual(tx[0]['bytes'] - tx_before_sending[0]['bytes'],
+                         bytes_to_send)
+        self.assertEqual(tx[0]['packets'] - tx_before_sending[0]['packets'],
+                         5)
+        self.assertEqual(rx[0]['bytes'] - rx_before_sending[0]['bytes'],
+                         bytes_to_send)
+        self.assertEqual(rx[0]['packets'] - rx_before_sending[0]['packets'],
+                         5)
+        self.assertEqual(vectors[0], rx[0]['packets'])
+
+        for i in self.pg_interfaces:
+            i.unconfig()
+            i.admin_down()
+
+    def test_index_consistency(self):
+        """Test index consistency despite changes in the stats"""
+        d = self.statistics.ls(['/if/names'])
+        self.create_loopback_interfaces(10)
+        for i in range(10):
+            try:
+                s = self.statistics.dump(d)
+                break
+            except:
+                pass
+        k, v = s.popitem()
+        self.assertEqual(len(v), 11)
+
+        for i in self.lo_interfaces:
+            i.remove_vpp_config()
+
     @unittest.skip("Manual only")
     def test_mem_leak(self):
         def loop():