+ if ((api_segment + stats_segment + main_heap + numa_heaps + map) == 0)
+ return clib_error_return
+ (0, "Need one of api-segment, stats-segment, main-heap, numa-heaps "
+ "or map");
+
+ if (api_segment)
+ {
+ void *oldheap = vl_msg_push_heap ();
+ was_enabled = clib_mem_trace_enable_disable (0);
+ u8 *s_in_svm = format (0, "%U\n", format_clib_mem_heap, 0, 1);
+ vl_msg_pop_heap (oldheap);
+ u8 *s = vec_dup (s_in_svm);
+
+ oldheap = vl_msg_push_heap ();
+ vec_free (s_in_svm);
+ clib_mem_trace_enable_disable (was_enabled);
+ vl_msg_pop_heap (oldheap);
+ vlib_cli_output (vm, "API segment");
+ vlib_cli_output (vm, "%v", s);
+ vec_free (s);
+ }
+ if (stats_segment)
+ {
+ void *oldheap = vlib_stats_push_heap (0);
+ was_enabled = clib_mem_trace_enable_disable (0);
+ u8 *s_in_svm = format (0, "%U\n", format_clib_mem_heap, 0, 1);
+ if (oldheap)
+ clib_mem_set_heap (oldheap);
+ u8 *s = vec_dup (s_in_svm);
+
+ oldheap = vlib_stats_push_heap (0);
+ vec_free (s_in_svm);
+ if (oldheap)
+ {
+ clib_mem_trace_enable_disable (was_enabled);
+ clib_mem_set_heap (oldheap);
+ }
+ vlib_cli_output (vm, "Stats segment");
+ vlib_cli_output (vm, "%v", s);
+ vec_free (s);
+ }
+
+
+ {
+ if (main_heap)
+ {
+ /*
+ * Note: the foreach_vlib_main causes allocator traffic,
+ * so shut off tracing before we go there...
+ */
+ was_enabled = clib_mem_trace_enable_disable (0);
+
+ foreach_vlib_main ()
+ {
+ vlib_cli_output (vm, "%sThread %d %s\n", index ? "\n" : "", index,
+ vlib_worker_threads[index].name);
+ vlib_cli_output (vm, " %U\n", format_clib_mem_heap,
+ mm->per_cpu_mheaps[index], verbose);
+ index++;
+ }
+
+ /* Restore the trace flag */
+ clib_mem_trace_enable_disable (was_enabled);
+ }
+ if (numa_heaps)
+ {
+ for (i = 0; i < ARRAY_LEN (mm->per_numa_mheaps); i++)
+ {
+ if (mm->per_numa_mheaps[i] == 0)
+ continue;
+ if (mm->per_numa_mheaps[i] == mm->per_cpu_mheaps[i])
+ {
+ vlib_cli_output (vm, "Numa %d uses the main heap...", i);
+ continue;
+ }
+ was_enabled = clib_mem_trace_enable_disable (0);
+
+ vlib_cli_output (vm, "Numa %d:", i);
+ vlib_cli_output (vm, " %U\n", format_clib_mem_heap,
+ mm->per_numa_mheaps[index], verbose);
+ }
+ }
+ if (map)
+ {
+ clib_mem_page_stats_t stats = { };
+ clib_mem_vm_map_hdr_t *hdr = 0;
+ u8 *s = 0;
+ int numa = -1;
+
+ s = format (s, "\n%-16s%7s%5s%7s%7s",
+ "StartAddr", "size", "FD", "PageSz", "Pages");
+ while ((numa = vlib_mem_get_next_numa_node (numa)) != -1)
+ s = format (s, " Numa%u", numa);
+ s = format (s, " NotMap");
+ s = format (s, " Name");
+ vlib_cli_output (vm, "%v", s);
+ vec_reset_length (s);
+
+ while ((hdr = clib_mem_vm_get_next_map_hdr (hdr)))
+ {
+ clib_mem_get_page_stats ((void *) hdr->base_addr,
+ hdr->log2_page_sz, hdr->num_pages,
+ &stats);
+ s = format (s, "%016lx%7U",
+ hdr->base_addr, format_memory_size,
+ hdr->num_pages << hdr->log2_page_sz);
+
+ if (hdr->fd != -1)
+ s = format (s, "%5d", hdr->fd);
+ else
+ s = format (s, "%5s", " ");
+
+ s = format (s, "%7U%7lu",
+ format_log2_page_size, hdr->log2_page_sz,
+ hdr->num_pages);
+ while ((numa = vlib_mem_get_next_numa_node (numa)) != -1)
+ s = format (s, "%6lu", stats.per_numa[numa]);
+ s = format (s, "%7lu", stats.not_mapped);
+ s = format (s, " %s", hdr->name);
+ vlib_cli_output (vm, "%v", s);
+ vec_reset_length (s);
+ }
+ vec_free (s);
+ }
+ }