2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/mem.h>
17 #include <vpp/stats/stats.h>
18 #undef HAVE_MEMFD_CREATE
19 #include <vppinfra/linux/syscall.h>
22 * Used only by VPP writers
25 vlib_stat_segment_lock (void)
27 stats_main_t *sm = &stats_main;
28 clib_spinlock_lock (sm->stat_segment_lockp);
29 sm->shared_header->in_progress = 1;
33 vlib_stat_segment_unlock (void)
35 stats_main_t *sm = &stats_main;
36 sm->shared_header->epoch++;
37 sm->shared_header->in_progress = 0;
38 clib_spinlock_unlock (sm->stat_segment_lockp);
42 * Change heap to the stats shared memory segment
45 vlib_stats_push_heap (void)
47 stats_main_t *sm = &stats_main;
49 ASSERT (sm && sm->shared_header);
50 return clib_mem_set_heap (sm->heap);
53 /* Name to vector index hash */
55 lookup_or_create_hash_index (void *oldheap, char *name, u32 next_vector_index)
57 stats_main_t *sm = &stats_main;
61 hp = hash_get_pair (sm->directory_vector_by_name, name);
64 hash_set (sm->directory_vector_by_name, name, next_vector_index);
65 index = next_vector_index;
76 vlib_stats_pop_heap (void *cm_arg, void *oldheap, stat_directory_type_t type)
78 vlib_simple_counter_main_t *cm = (vlib_simple_counter_main_t *) cm_arg;
79 stats_main_t *sm = &stats_main;
80 stat_segment_shared_header_t *shared_header = sm->shared_header;
81 char *stat_segment_name;
82 stat_segment_directory_entry_t e = { 0 };
84 /* Not all counters have names / hash-table entries */
85 if (!cm->name && !cm->stat_segment_name)
87 clib_mem_set_heap (oldheap);
91 ASSERT (shared_header);
93 vlib_stat_segment_lock ();
95 /* Lookup hash-table is on the main heap */
97 cm->stat_segment_name ? cm->stat_segment_name : cm->name;
98 u32 next_vector_index = vec_len (sm->directory_vector);
99 clib_mem_set_heap (oldheap); /* Exit stats segment */
100 u32 vector_index = lookup_or_create_hash_index (oldheap, stat_segment_name,
102 /* Back to stats segment */
103 clib_mem_set_heap (sm->heap); /* Re-enter stat segment */
106 /* Update the vector */
107 if (vector_index == next_vector_index)
109 strncpy (e.name, stat_segment_name, 128 - 1);
111 vec_add1 (sm->directory_vector, e);
115 stat_segment_directory_entry_t *ep = &sm->directory_vector[vector_index];
116 ep->offset = stat_segment_offset (shared_header, cm->counters); /* Vector of threads of vectors of counters */
118 ep->offset_vector ? stat_segment_pointer (shared_header,
119 ep->offset_vector) : 0;
121 /* Update the 2nd dimension offset vector */
123 vec_validate (offset_vector, vec_len (cm->counters) - 1);
124 for (i = 0; i < vec_len (cm->counters); i++)
125 offset_vector[i] = stat_segment_offset (shared_header, cm->counters[i]);
126 ep->offset_vector = stat_segment_offset (shared_header, offset_vector);
127 sm->directory_vector[vector_index].offset =
128 stat_segment_offset (shared_header, cm->counters);
130 /* Reset the client hash table pointer, since it WILL change! */
131 shared_header->directory_offset =
132 stat_segment_offset (shared_header, sm->directory_vector);
134 vlib_stat_segment_unlock ();
135 clib_mem_set_heap (oldheap);
139 vlib_stats_register_error_index (u8 * name, u64 * em_vec, u64 index)
141 stats_main_t *sm = &stats_main;
142 stat_segment_shared_header_t *shared_header = sm->shared_header;
143 stat_segment_directory_entry_t e;
146 ASSERT (shared_header);
148 vlib_stat_segment_lock ();
150 memcpy (e.name, name, vec_len (name));
151 e.name[vec_len (name)] = '\0';
152 e.type = STAT_DIR_TYPE_ERROR_INDEX;
154 vec_add1 (sm->directory_vector, e);
156 /* Warn clients to refresh any pointers they might be holding */
157 shared_header->directory_offset =
158 stat_segment_offset (shared_header, sm->directory_vector);
160 vlib_stat_segment_unlock ();
164 stat_validate_counter_vector (stat_segment_directory_entry_t * ep, u32 max)
166 stats_main_t *sm = &stats_main;
167 stat_segment_shared_header_t *shared_header = sm->shared_header;
168 counter_t **counters = 0;
169 vlib_thread_main_t *tm = vlib_get_thread_main ();
171 u64 *offset_vector = 0;
173 vec_validate_aligned (counters, tm->n_vlib_mains - 1,
174 CLIB_CACHE_LINE_BYTES);
175 for (i = 0; i < tm->n_vlib_mains; i++)
177 vec_validate_aligned (counters[i], max, CLIB_CACHE_LINE_BYTES);
178 vec_add1 (offset_vector,
179 stat_segment_offset (shared_header, counters[i]));
181 ep->offset = stat_segment_offset (shared_header, counters);
182 ep->offset_vector = stat_segment_offset (shared_header, offset_vector);
186 vlib_stats_pop_heap2 (u64 * error_vector, u32 thread_index, void *oldheap)
188 stats_main_t *sm = &stats_main;
189 stat_segment_shared_header_t *shared_header = sm->shared_header;
191 ASSERT (shared_header);
193 vlib_stat_segment_lock ();
195 /* Reset the client hash table pointer, since it WILL change! */
196 shared_header->error_offset =
197 stat_segment_offset (shared_header, error_vector);
198 shared_header->directory_offset =
199 stat_segment_offset (shared_header, sm->directory_vector);
201 vlib_stat_segment_unlock ();
202 clib_mem_set_heap (oldheap);
206 vlib_map_stat_segment_init (void)
208 stats_main_t *sm = &stats_main;
209 stat_segment_shared_header_t *shared_header;
210 stat_segment_directory_entry_t *ep;
221 char *mem_name = "stat_segment_test";
224 memory_size = sm->memory_size;
225 if (memory_size == 0)
226 memory_size = STAT_SEGMENT_DEFAULT_SIZE;
228 /* Create shared memory segment */
229 if ((mfd = memfd_create (mem_name, 0)) < 0)
230 return clib_error_return (0, "stat segment memfd_create failure");
233 if ((ftruncate (mfd, memory_size)) == -1)
234 return clib_error_return (0, "stat segment ftruncate failure");
237 mmap (NULL, memory_size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd,
239 return clib_error_return (0, "stat segment mmap failure");
242 #if USE_DLMALLOC == 0
243 heap = mheap_alloc_with_flags (((u8 *) memaddr) + getpagesize (),
244 memory_size - getpagesize (),
245 MHEAP_FLAG_DISABLE_VM |
246 MHEAP_FLAG_THREAD_SAFE);
249 create_mspace_with_base (((u8 *) memaddr) + getpagesize (),
250 memory_size - getpagesize (), 1 /* locked */ );
251 mspace_disable_expand (heap);
257 sm->directory_vector_by_name = hash_create_string (0, sizeof (uword));
258 sm->shared_header = shared_header = memaddr;
259 sm->stat_segment_lockp = clib_mem_alloc (sizeof (clib_spinlock_t));
260 clib_spinlock_init (sm->stat_segment_lockp);
262 oldheap = clib_mem_set_heap (sm->heap);
264 /* Set up the name to counter-vector hash table */
265 sm->directory_vector = 0;
267 shared_header->epoch = 1;
269 /* Scalar stats and node counters */
270 vec_validate (sm->directory_vector, STAT_COUNTERS - 1);
272 strcpy(sm->directory_vector[STAT_COUNTER_##E].name, "/sys" #p "/" #n); \
273 sm->directory_vector[STAT_COUNTER_##E].type = STAT_DIR_TYPE_##t;
274 foreach_stat_segment_counter_name
276 /* Save the vector offset in the shared segment, for clients */
277 shared_header->directory_offset =
278 stat_segment_offset (shared_header, sm->directory_vector);
280 clib_mem_set_heap (oldheap);
286 name_sort_cmp (void *a1, void *a2)
288 stat_segment_directory_entry_t *n1 = a1;
289 stat_segment_directory_entry_t *n2 = a2;
291 return strcmp ((char *) n1->name, (char *) n2->name);
295 format_stat_dir_entry (u8 * s, va_list * args)
297 stat_segment_directory_entry_t *ep =
298 va_arg (*args, stat_segment_directory_entry_t *);
302 format_string = "%-74s %-10s %10lld";
306 case STAT_DIR_TYPE_SCALAR_INDEX:
307 type_name = "ScalarPtr";
310 case STAT_DIR_TYPE_COUNTER_VECTOR_SIMPLE:
311 case STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED:
312 type_name = "CMainPtr";
315 case STAT_DIR_TYPE_ERROR_INDEX:
316 type_name = "ErrIndex";
320 type_name = "illegal!";
324 return format (s, format_string, ep->name, type_name, ep->offset);
327 static clib_error_t *
328 show_stat_segment_command_fn (vlib_main_t * vm,
329 unformat_input_t * input,
330 vlib_cli_command_t * cmd)
332 stats_main_t *sm = &stats_main;
335 stat_segment_directory_entry_t *show_data, *this;
341 if (unformat (input, "verbose"))
344 /* Lock even as reader, as this command doesn't handle epoch changes */
345 vlib_stat_segment_lock ();
346 show_data = vec_dup (sm->directory_vector);
347 vlib_stat_segment_unlock ();
349 vec_sort_with_function (show_data, name_sort_cmp);
351 vlib_cli_output (vm, "%-74s %10s %10s", "Name", "Type", "Value");
353 for (i = 0; i < vec_len (show_data); i++)
355 vlib_cli_output (vm, "%-100U", format_stat_dir_entry,
356 vec_elt_at_index (show_data, i));
362 vlib_cli_output (vm, "%U", format_mheap, sm->heap, 0 /* verbose */ );
369 VLIB_CLI_COMMAND (show_stat_segment_command, static) =
371 .path = "show statistics segment",
372 .short_help = "show statistics segment [verbose]",
373 .function = show_stat_segment_command_fn,
378 * Node performance counters:
379 * total_calls [threads][node-index]
386 update_node_counters (stats_main_t * sm)
388 vlib_main_t *vm = vlib_mains[0];
389 vlib_main_t **stat_vms = 0;
390 vlib_node_t ***node_dups = 0;
392 stat_segment_shared_header_t *shared_header = sm->shared_header;
393 static u32 no_max_nodes = 0;
395 vlib_node_get_nodes (0 /* vm, for barrier sync */ ,
396 (u32) ~ 0 /* all threads */ ,
397 1 /* include stats */ ,
398 0 /* barrier sync */ ,
399 &node_dups, &stat_vms);
401 u32 l = vec_len (node_dups[0]);
404 * Extend performance nodes if necessary
406 if (l > no_max_nodes)
408 void *oldheap = clib_mem_set_heap (sm->heap);
409 vlib_stat_segment_lock ();
411 stat_validate_counter_vector (&sm->directory_vector
412 [STAT_COUNTER_NODE_CLOCKS], l);
413 stat_validate_counter_vector (&sm->directory_vector
414 [STAT_COUNTER_NODE_VECTORS], l);
415 stat_validate_counter_vector (&sm->directory_vector
416 [STAT_COUNTER_NODE_CALLS], l);
417 stat_validate_counter_vector (&sm->directory_vector
418 [STAT_COUNTER_NODE_SUSPENDS], l);
420 vlib_stat_segment_unlock ();
421 clib_mem_set_heap (oldheap);
425 for (j = 0; j < vec_len (node_dups); j++)
427 vlib_node_t **nodes = node_dups[j];
428 u32 l = vec_len (nodes);
430 for (i = 0; i < vec_len (nodes); i++)
432 counter_t **counters;
434 vlib_node_t *n = nodes[i];
437 stat_segment_pointer (shared_header,
439 [STAT_COUNTER_NODE_CLOCKS].offset);
441 c[n->index] = n->stats_total.clocks - n->stats_last_clear.clocks;
444 stat_segment_pointer (shared_header,
446 [STAT_COUNTER_NODE_VECTORS].offset);
448 c[n->index] = n->stats_total.vectors - n->stats_last_clear.vectors;
451 stat_segment_pointer (shared_header,
453 [STAT_COUNTER_NODE_CALLS].offset);
455 c[n->index] = n->stats_total.calls - n->stats_last_clear.calls;
458 stat_segment_pointer (shared_header,
460 [STAT_COUNTER_NODE_SUSPENDS].offset);
463 n->stats_total.suspends - n->stats_last_clear.suspends;
469 * Called by stats_thread_fn, in stats.c, which runs in a
470 * separate pthread, which won't halt the parade
471 * in single-forwarding-core cases.
475 do_stat_segment_updates (stats_main_t * sm)
477 vlib_main_t *vm = vlib_mains[0];
479 u64 input_packets, last_input_packets;
481 vlib_main_t *this_vlib_main;
485 * Compute the average vector rate across all workers
489 start = vec_len (vlib_mains) > 1 ? 1 : 0;
491 for (i = start; i < vec_len (vlib_mains); i++)
493 this_vlib_main = vlib_mains[i];
494 vector_rate += vlib_last_vector_length_per_node (this_vlib_main);
496 vector_rate /= (f64) (i - start);
498 sm->directory_vector[STAT_COUNTER_VECTOR_RATE].value =
499 vector_rate / ((f64) (vec_len (vlib_mains) - start));
502 * Compute the aggregate input rate
504 now = vlib_time_now (vm);
505 dt = now - sm->directory_vector[STAT_COUNTER_LAST_UPDATE].value;
506 input_packets = vnet_get_aggregate_rx_packets ();
507 sm->directory_vector[STAT_COUNTER_INPUT_RATE].value =
508 (f64) (input_packets - sm->last_input_packets) / dt;
509 sm->directory_vector[STAT_COUNTER_LAST_UPDATE].value = now;
510 sm->last_input_packets = input_packets;
511 sm->directory_vector[STAT_COUNTER_LAST_STATS_CLEAR].value =
512 vm->node_main.time_last_runtime_stats_clear;
514 if (sm->node_counters_enabled)
515 update_node_counters (sm);
517 /* Heartbeat, so clients detect we're still here */
518 sm->directory_vector[STAT_COUNTER_HEARTBEAT].value++;
521 static clib_error_t *
522 statseg_config (vlib_main_t * vm, unformat_input_t * input)
524 stats_main_t *sm = &stats_main;
527 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
529 if (unformat (input, "size %U", unformat_memory_size, &sm->memory_size))
531 else if (unformat (input, "per-node-counters on"))
532 sm->node_counters_enabled = 1;
533 else if (unformat (input, "per-node-counters off"))
534 sm->node_counters_enabled = 0;
536 return clib_error_return (0, "unknown input `%U'",
537 format_unformat_error, input);
543 VLIB_EARLY_CONFIG_FUNCTION (statseg_config, "statseg");
546 * fd.io coding-style-patch-verification: ON
549 * eval: (c-set-style "gnu")