uword page_size = clib_mem_get_page_size ();
if (pm->is_running)
- for (int i = 0; i < vec_len (vlib_mains); i++)
- vlib_node_set_dispatch_wrapper (vlib_mains[i], 0);
+ for (int i = 0; i < vlib_get_n_threads (); i++)
+ vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i), 0);
for (int i = 0; i < vec_len (pm->fds_to_close); i++)
close (pm->fds_to_close[i]);
vec_free (tr->node_stats);
for (int j = 0; j < PERF_MAX_EVENTS; j++)
if (tr->mmap_pages[j])
- munmap (tr->mmap_pages, page_size);
+ munmap (tr->mmap_pages[j], page_size);
}
vec_free (pm->thread_runtimes);
pm->active_bundle = 0;
}
-clib_error_t *
+static clib_error_t *
perfmon_set (vlib_main_t *vm, perfmon_bundle_t *b)
{
clib_error_t *err = 0;
s = b->src;
ASSERT (b->n_events);
- if (b->type == PERFMON_BUNDLE_TYPE_NODE)
+ if (b->active_type == PERFMON_BUNDLE_TYPE_NODE)
is_node = 1;
if (s->instances_by_type == 0)
{
vec_add2 (pm->default_instance_type, it, 1);
it->name = is_node ? "Thread/Node" : "Thread";
- for (int i = 0; i < vec_len (vlib_mains); i++)
+ for (int i = 0; i < vlib_get_n_threads (); i++)
{
vlib_worker_thread_t *w = vlib_worker_threads + i;
perfmon_instance_t *in;
in->name = (char *) format (0, "%s (%u)%c", w->name, i, 0);
}
if (is_node)
- vec_validate (pm->thread_runtimes, vec_len (vlib_mains) - 1);
+ vec_validate (pm->thread_runtimes, vlib_get_n_threads () - 1);
}
else
{
{
perfmon_thread_runtime_t *rt;
rt = vec_elt_at_index (pm->thread_runtimes, i);
+ rt->bundle = b;
rt->n_events = b->n_events;
rt->n_nodes = n_nodes;
vec_validate_aligned (rt->node_stats, n_nodes - 1,
}
clib_error_t *
-perfmon_start (vlib_main_t *vm)
+perfmon_start (vlib_main_t *vm, perfmon_bundle_t *b)
{
+ clib_error_t *err = 0;
perfmon_main_t *pm = &perfmon_main;
- int n_groups = vec_len (pm->group_fds);
-
- if (n_groups == 0)
- return clib_error_return (0, "no bundle configured");
+ int n_groups;
if (pm->is_running == 1)
return clib_error_return (0, "already running");
+ if ((err = perfmon_set (vm, b)) != 0)
+ return err;
+
+ n_groups = vec_len (pm->group_fds);
+
for (int i = 0; i < n_groups; i++)
{
if (ioctl (pm->group_fds[i], PERF_EVENT_IOC_ENABLE,
return clib_error_return_unix (0, "ioctl(PERF_EVENT_IOC_ENABLE)");
}
}
- if (pm->active_bundle->type == PERFMON_BUNDLE_TYPE_NODE)
+ if (b->active_type == PERFMON_BUNDLE_TYPE_NODE)
{
- for (int i = 0; i < vec_len (vlib_mains); i++)
- vlib_node_set_dispatch_wrapper (vlib_mains[i],
- perfmon_dispatch_wrapper);
+
+ vlib_node_function_t *funcs[PERFMON_OFFSET_TYPE_MAX];
+#define _(type, pfunc) funcs[type] = pfunc;
+
+ foreach_permon_offset_type
+#undef _
+
+ ASSERT (funcs[b->offset_type]);
+
+ for (int i = 0; i < vlib_get_n_threads (); i++)
+ vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i),
+ funcs[b->offset_type]);
}
+
+ pm->sample_time = vlib_time_now (vm);
pm->is_running = 1;
+
return 0;
}
if (pm->is_running != 1)
return clib_error_return (0, "not running");
- if (pm->active_bundle->type == PERFMON_BUNDLE_TYPE_NODE)
+ if (pm->active_bundle->active_type == PERFMON_BUNDLE_TYPE_NODE)
{
- for (int i = 0; i < vec_len (vlib_mains); i++)
- vlib_node_set_dispatch_wrapper (vlib_mains[i], 0);
+ for (int i = 0; i < vlib_get_n_threads (); i++)
+ vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i), 0);
}
for (int i = 0; i < n_groups; i++)
}
pm->is_running = 0;
+ pm->sample_time = vlib_time_now (vm) - pm->sample_time;
+ return 0;
+}
+
+static_always_inline u8
+is_bundle_supported (perfmon_bundle_t *b)
+{
+ perfmon_cpu_supports_t *supports = b->cpu_supports;
+
+ if (!b->cpu_supports)
+ return 1;
+
+ for (int i = 0; i < b->n_cpu_supports; ++i)
+ if (supports[i].cpu_supports ())
+ return 1;
+
return 0;
}
{
clib_error_t *err;
uword *p;
+
+ if (!is_bundle_supported (b))
+ {
+ log_warn ("skipping bundle '%s' - not supported", b->name);
+ b = b->next;
+ continue;
+ }
+
if (hash_get_mem (pm->bundle_by_name, b->name) != 0)
clib_panic ("duplicate bundle name '%s'", b->name);