+void
+vlib_node_get_nodes (vlib_main_t * vm, u32 max_threads, int include_stats,
+ int barrier_sync, vlib_node_t **** node_dupsp,
+ vlib_main_t *** stat_vmsp)
+{
+ vlib_node_main_t *nm = &vm->node_main;
+ vlib_node_t *n;
+ vlib_node_t ***node_dups = *node_dupsp;
+ vlib_node_t **nodes;
+ vlib_main_t **stat_vms = *stat_vmsp;
+ vlib_main_t *stat_vm;
+ uword i, j;
+ u32 threads_to_serialize;
+
+ if (vec_len (stat_vms) == 0)
+ {
+ for (i = 0; i < vec_len (vlib_mains); i++)
+ {
+ stat_vm = vlib_mains[i];
+ if (stat_vm)
+ vec_add1 (stat_vms, stat_vm);
+ }
+ }
+
+ threads_to_serialize = clib_min (max_threads, vec_len (stat_vms));
+
+ vec_validate (node_dups, threads_to_serialize - 1);
+
+ /*
+ * Barrier sync across stats scraping.
+ * Otherwise, the counts will be grossly inaccurate.
+ */
+ if (barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
+
+ for (j = 0; j < threads_to_serialize; j++)
+ {
+ stat_vm = stat_vms[j];
+ nm = &stat_vm->node_main;
+
+ if (include_stats)
+ {
+ for (i = 0; i < vec_len (nm->nodes); i++)
+ {
+ n = nm->nodes[i];
+ vlib_node_sync_stats (stat_vm, n);
+ }
+ }
+
+ nodes = node_dups[j];
+ vec_validate (nodes, vec_len (nm->nodes) - 1);
+ clib_memcpy (nodes, nm->nodes, vec_len (nm->nodes) * sizeof (nodes[0]));
+ node_dups[j] = nodes;
+ }
+
+ if (barrier_sync)
+ vlib_worker_thread_barrier_release (vm);
+
+ *node_dupsp = node_dups;
+ *stat_vmsp = stat_vms;
+}
+