2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/mem.h>
17 #include <vlib/vlib.h>
18 #include <vlib/unix/unix.h>
19 #include "stat_segment.h"
20 #include <vnet/vnet.h>
21 #include <vnet/devices/devices.h> /* vnet_get_aggregate_rx_packets */
22 #undef HAVE_MEMFD_CREATE
23 #include <vppinfra/linux/syscall.h>
24 #include <vpp-api/client/stat_client.h>
25 stat_segment_main_t stat_segment_main;
28 * Used only by VPP writers
31 vlib_stat_segment_lock (void)
33 stat_segment_main_t *sm = &stat_segment_main;
34 clib_spinlock_lock (sm->stat_segment_lockp);
35 sm->shared_header->in_progress = 1;
39 vlib_stat_segment_unlock (void)
41 stat_segment_main_t *sm = &stat_segment_main;
42 sm->shared_header->epoch++;
43 sm->shared_header->in_progress = 0;
44 clib_spinlock_unlock (sm->stat_segment_lockp);
48 * Change heap to the stats shared memory segment
51 vlib_stats_push_heap (void)
53 stat_segment_main_t *sm = &stat_segment_main;
55 ASSERT (sm && sm->shared_header);
56 return clib_mem_set_heap (sm->heap);
59 /* Name to vector index hash */
61 lookup_or_create_hash_index (void *oldheap, char *name, u32 next_vector_index)
63 stat_segment_main_t *sm = &stat_segment_main;
67 hp = hash_get_pair (sm->directory_vector_by_name, name);
70 hash_set (sm->directory_vector_by_name, name, next_vector_index);
71 index = next_vector_index;
82 vlib_stats_pop_heap (void *cm_arg, void *oldheap, stat_directory_type_t type)
84 vlib_simple_counter_main_t *cm = (vlib_simple_counter_main_t *) cm_arg;
85 stat_segment_main_t *sm = &stat_segment_main;
86 stat_segment_shared_header_t *shared_header = sm->shared_header;
87 char *stat_segment_name;
88 stat_segment_directory_entry_t e = { 0 };
90 /* Not all counters have names / hash-table entries */
91 if (!cm->name && !cm->stat_segment_name)
93 clib_mem_set_heap (oldheap);
97 ASSERT (shared_header);
99 vlib_stat_segment_lock ();
101 /* Lookup hash-table is on the main heap */
103 cm->stat_segment_name ? cm->stat_segment_name : cm->name;
104 u32 next_vector_index = vec_len (sm->directory_vector);
105 clib_mem_set_heap (oldheap); /* Exit stats segment */
106 u32 vector_index = lookup_or_create_hash_index (oldheap, stat_segment_name,
108 /* Back to stats segment */
109 clib_mem_set_heap (sm->heap); /* Re-enter stat segment */
112 /* Update the vector */
113 if (vector_index == next_vector_index)
115 strncpy (e.name, stat_segment_name, 128 - 1);
117 vec_add1 (sm->directory_vector, e);
120 stat_segment_directory_entry_t *ep = &sm->directory_vector[vector_index];
121 ep->offset = stat_segment_offset (shared_header, cm->counters); /* Vector of threads of vectors of counters */
123 ep->offset_vector ? stat_segment_pointer (shared_header,
124 ep->offset_vector) : 0;
126 /* Update the 2nd dimension offset vector */
128 vec_validate (offset_vector, vec_len (cm->counters) - 1);
129 for (i = 0; i < vec_len (cm->counters); i++)
130 offset_vector[i] = stat_segment_offset (shared_header, cm->counters[i]);
131 ep->offset_vector = stat_segment_offset (shared_header, offset_vector);
132 sm->directory_vector[vector_index].offset =
133 stat_segment_offset (shared_header, cm->counters);
135 /* Reset the client hash table pointer, since it WILL change! */
136 shared_header->directory_offset =
137 stat_segment_offset (shared_header, sm->directory_vector);
139 vlib_stat_segment_unlock ();
140 clib_mem_set_heap (oldheap);
144 vlib_stats_register_error_index (u8 * name, u64 * em_vec, u64 index)
146 stat_segment_main_t *sm = &stat_segment_main;
147 stat_segment_shared_header_t *shared_header = sm->shared_header;
148 stat_segment_directory_entry_t e;
151 ASSERT (shared_header);
153 vlib_stat_segment_lock ();
155 memcpy (e.name, name, vec_len (name));
156 e.name[vec_len (name)] = '\0';
157 e.type = STAT_DIR_TYPE_ERROR_INDEX;
160 vec_add1 (sm->directory_vector, e);
162 /* Warn clients to refresh any pointers they might be holding */
163 shared_header->directory_offset =
164 stat_segment_offset (shared_header, sm->directory_vector);
166 vlib_stat_segment_unlock ();
170 stat_validate_counter_vector (stat_segment_directory_entry_t * ep, u32 max)
172 stat_segment_main_t *sm = &stat_segment_main;
173 stat_segment_shared_header_t *shared_header = sm->shared_header;
174 counter_t **counters = 0;
175 vlib_thread_main_t *tm = vlib_get_thread_main ();
177 u64 *offset_vector = 0;
179 vec_validate_aligned (counters, tm->n_vlib_mains - 1,
180 CLIB_CACHE_LINE_BYTES);
181 for (i = 0; i < tm->n_vlib_mains; i++)
183 vec_validate_aligned (counters[i], max, CLIB_CACHE_LINE_BYTES);
184 vec_add1 (offset_vector,
185 stat_segment_offset (shared_header, counters[i]));
187 ep->offset = stat_segment_offset (shared_header, counters);
188 ep->offset_vector = stat_segment_offset (shared_header, offset_vector);
192 vlib_stats_pop_heap2 (u64 * error_vector, u32 thread_index, void *oldheap)
194 stat_segment_main_t *sm = &stat_segment_main;
195 stat_segment_shared_header_t *shared_header = sm->shared_header;
197 ASSERT (shared_header);
199 vlib_stat_segment_lock ();
201 /* Reset the client hash table pointer, since it WILL change! */
202 shared_header->error_offset =
203 stat_segment_offset (shared_header, error_vector);
204 shared_header->directory_offset =
205 stat_segment_offset (shared_header, sm->directory_vector);
207 vlib_stat_segment_unlock ();
208 clib_mem_set_heap (oldheap);
212 vlib_map_stat_segment_init (void)
214 stat_segment_main_t *sm = &stat_segment_main;
215 stat_segment_shared_header_t *shared_header;
216 stat_segment_directory_entry_t *ep;
227 char *mem_name = "stat_segment_test";
230 memory_size = sm->memory_size;
231 if (memory_size == 0)
232 memory_size = STAT_SEGMENT_DEFAULT_SIZE;
234 /* Create shared memory segment */
235 if ((mfd = memfd_create (mem_name, 0)) < 0)
236 return clib_error_return (0, "stat segment memfd_create failure");
239 if ((ftruncate (mfd, memory_size)) == -1)
240 return clib_error_return (0, "stat segment ftruncate failure");
243 mmap (NULL, memory_size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd,
245 return clib_error_return (0, "stat segment mmap failure");
248 #if USE_DLMALLOC == 0
249 heap = mheap_alloc_with_flags (((u8 *) memaddr) + getpagesize (),
250 memory_size - getpagesize (),
251 MHEAP_FLAG_DISABLE_VM |
252 MHEAP_FLAG_THREAD_SAFE);
255 create_mspace_with_base (((u8 *) memaddr) + getpagesize (),
256 memory_size - getpagesize (), 1 /* locked */ );
257 mspace_disable_expand (heap);
263 sm->directory_vector_by_name = hash_create_string (0, sizeof (uword));
264 sm->shared_header = shared_header = memaddr;
265 sm->stat_segment_lockp = clib_mem_alloc (sizeof (clib_spinlock_t));
266 clib_spinlock_init (sm->stat_segment_lockp);
268 oldheap = clib_mem_set_heap (sm->heap);
270 /* Set up the name to counter-vector hash table */
271 sm->directory_vector = 0;
273 shared_header->epoch = 1;
275 /* Scalar stats and node counters */
276 vec_validate (sm->directory_vector, STAT_COUNTERS - 1);
278 strcpy(sm->directory_vector[STAT_COUNTER_##E].name, "/sys" #p "/" #n); \
279 sm->directory_vector[STAT_COUNTER_##E].type = STAT_DIR_TYPE_##t;
280 foreach_stat_segment_counter_name
282 /* Save the vector offset in the shared segment, for clients */
283 shared_header->directory_offset =
284 stat_segment_offset (shared_header, sm->directory_vector);
286 clib_mem_set_heap (oldheap);
292 name_sort_cmp (void *a1, void *a2)
294 stat_segment_directory_entry_t *n1 = a1;
295 stat_segment_directory_entry_t *n2 = a2;
297 return strcmp ((char *) n1->name, (char *) n2->name);
301 format_stat_dir_entry (u8 * s, va_list * args)
303 stat_segment_directory_entry_t *ep =
304 va_arg (*args, stat_segment_directory_entry_t *);
308 format_string = "%-74s %-10s %10lld";
312 case STAT_DIR_TYPE_SCALAR_INDEX:
313 type_name = "ScalarPtr";
316 case STAT_DIR_TYPE_COUNTER_VECTOR_SIMPLE:
317 case STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED:
318 type_name = "CMainPtr";
321 case STAT_DIR_TYPE_ERROR_INDEX:
322 type_name = "ErrIndex";
326 type_name = "illegal!";
330 return format (s, format_string, ep->name, type_name, ep->offset);
333 static clib_error_t *
334 show_stat_segment_command_fn (vlib_main_t * vm,
335 unformat_input_t * input,
336 vlib_cli_command_t * cmd)
338 stat_segment_main_t *sm = &stat_segment_main;
341 stat_segment_directory_entry_t *show_data, *this;
347 if (unformat (input, "verbose"))
350 /* Lock even as reader, as this command doesn't handle epoch changes */
351 vlib_stat_segment_lock ();
352 show_data = vec_dup (sm->directory_vector);
353 vlib_stat_segment_unlock ();
355 vec_sort_with_function (show_data, name_sort_cmp);
357 vlib_cli_output (vm, "%-74s %10s %10s", "Name", "Type", "Value");
359 for (i = 0; i < vec_len (show_data); i++)
361 vlib_cli_output (vm, "%-100U", format_stat_dir_entry,
362 vec_elt_at_index (show_data, i));
368 vlib_cli_output (vm, "%U", format_mheap, sm->heap, 0 /* verbose */ );
375 VLIB_CLI_COMMAND (show_stat_segment_command, static) =
377 .path = "show statistics segment",
378 .short_help = "show statistics segment [verbose]",
379 .function = show_stat_segment_command_fn,
384 * Node performance counters:
385 * total_calls [threads][node-index]
392 update_node_counters (stat_segment_main_t * sm)
394 vlib_main_t *vm = vlib_mains[0];
395 vlib_main_t **stat_vms = 0;
396 vlib_node_t ***node_dups = 0;
398 stat_segment_shared_header_t *shared_header = sm->shared_header;
399 static u32 no_max_nodes = 0;
401 vlib_node_get_nodes (0 /* vm, for barrier sync */ ,
402 (u32) ~ 0 /* all threads */ ,
403 1 /* include stats */ ,
404 0 /* barrier sync */ ,
405 &node_dups, &stat_vms);
407 u32 l = vec_len (node_dups[0]);
410 * Extend performance nodes if necessary
412 if (l > no_max_nodes)
414 void *oldheap = clib_mem_set_heap (sm->heap);
415 vlib_stat_segment_lock ();
417 stat_validate_counter_vector (&sm->directory_vector
418 [STAT_COUNTER_NODE_CLOCKS], l);
419 stat_validate_counter_vector (&sm->directory_vector
420 [STAT_COUNTER_NODE_VECTORS], l);
421 stat_validate_counter_vector (&sm->directory_vector
422 [STAT_COUNTER_NODE_CALLS], l);
423 stat_validate_counter_vector (&sm->directory_vector
424 [STAT_COUNTER_NODE_SUSPENDS], l);
426 vlib_stat_segment_unlock ();
427 clib_mem_set_heap (oldheap);
431 for (j = 0; j < vec_len (node_dups); j++)
433 vlib_node_t **nodes = node_dups[j];
434 u32 l = vec_len (nodes);
436 for (i = 0; i < vec_len (nodes); i++)
438 counter_t **counters;
440 vlib_node_t *n = nodes[i];
443 stat_segment_pointer (shared_header,
445 [STAT_COUNTER_NODE_CLOCKS].offset);
447 c[n->index] = n->stats_total.clocks - n->stats_last_clear.clocks;
450 stat_segment_pointer (shared_header,
452 [STAT_COUNTER_NODE_VECTORS].offset);
454 c[n->index] = n->stats_total.vectors - n->stats_last_clear.vectors;
457 stat_segment_pointer (shared_header,
459 [STAT_COUNTER_NODE_CALLS].offset);
461 c[n->index] = n->stats_total.calls - n->stats_last_clear.calls;
464 stat_segment_pointer (shared_header,
466 [STAT_COUNTER_NODE_SUSPENDS].offset);
469 n->stats_total.suspends - n->stats_last_clear.suspends;
475 do_stat_segment_updates (stat_segment_main_t * sm)
477 vlib_main_t *vm = vlib_mains[0];
479 u64 input_packets, last_input_packets;
481 vlib_main_t *this_vlib_main;
485 * Compute the average vector rate across all workers
489 start = vec_len (vlib_mains) > 1 ? 1 : 0;
491 for (i = start; i < vec_len (vlib_mains); i++)
493 this_vlib_main = vlib_mains[i];
494 vector_rate += vlib_last_vector_length_per_node (this_vlib_main);
496 vector_rate /= (f64) (i - start);
498 sm->directory_vector[STAT_COUNTER_VECTOR_RATE].value =
499 vector_rate / ((f64) (vec_len (vlib_mains) - start));
502 * Compute the aggregate input rate
504 now = vlib_time_now (vm);
505 dt = now - sm->directory_vector[STAT_COUNTER_LAST_UPDATE].value;
506 input_packets = vnet_get_aggregate_rx_packets ();
507 sm->directory_vector[STAT_COUNTER_INPUT_RATE].value =
508 (f64) (input_packets - sm->last_input_packets) / dt;
509 sm->directory_vector[STAT_COUNTER_LAST_UPDATE].value = now;
510 sm->last_input_packets = input_packets;
511 sm->directory_vector[STAT_COUNTER_LAST_STATS_CLEAR].value =
512 vm->node_main.time_last_runtime_stats_clear;
514 if (sm->node_counters_enabled)
515 update_node_counters (sm);
517 /* Heartbeat, so clients detect we're still here */
518 sm->directory_vector[STAT_COUNTER_HEARTBEAT].value++;
522 * Accept connection on the socket and exchange the fd for the shared
525 static clib_error_t *
526 stats_socket_accept_ready (clib_file_t * uf)
528 stat_segment_main_t *sm = &stat_segment_main;
530 clib_socket_t client = { 0 };
532 err = clib_socket_accept (sm->socket, &client);
535 clib_error_report (err);
539 /* Send the fd across and close */
540 err = clib_socket_sendmsg (&client, 0, 0, &sm->memfd, 1);
542 clib_error_report (err);
543 clib_socket_close (&client);
549 stats_segment_socket_init (void)
551 stat_segment_main_t *sm = &stat_segment_main;
553 clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t));
555 memset (s, 0, sizeof (clib_socket_t));
556 s->config = (char *) sm->socket_name;
557 s->flags = CLIB_SOCKET_F_IS_SERVER | CLIB_SOCKET_F_SEQPACKET |
558 CLIB_SOCKET_F_ALLOW_GROUP_WRITE | CLIB_SOCKET_F_PASSCRED;
560 if ((error = clib_socket_init (s)))
562 clib_error_report (error);
566 clib_file_t template = { 0 };
567 template.read_function = stats_socket_accept_ready;
568 template.file_descriptor = s->fd;
569 template.description = format (0, "stats segment listener %s", s->config);
570 clib_file_add (&file_main, &template);
575 static clib_error_t *
576 stats_segment_socket_exit (vlib_main_t * vm)
579 * cleanup the listener socket on exit.
581 stat_segment_main_t *sm = &stat_segment_main;
582 unlink ((char *) sm->socket_name);
586 VLIB_MAIN_LOOP_EXIT_FUNCTION (stats_segment_socket_exit);
589 stat_segment_collector_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
592 stat_segment_main_t *sm = &stat_segment_main;
594 /* Wait for Godot... */
595 f64 sleep_duration = 10;
599 do_stat_segment_updates (sm);
600 vlib_process_suspend (vm, sleep_duration);
602 return 0; /* or not */
605 static clib_error_t *
606 statseg_init (vlib_main_t * vm)
608 stat_segment_main_t *sm = &stat_segment_main;
611 /* dependent on unix_input_init */
612 if ((error = vlib_call_init_function (vm, unix_input_init)))
616 stats_segment_socket_init ();
621 static clib_error_t *
622 statseg_config (vlib_main_t * vm, unformat_input_t * input)
624 stat_segment_main_t *sm = &stat_segment_main;
626 /* set default socket file name when statseg config stanza is empty. */
627 sm->socket_name = format (0, "%s", STAT_SEGMENT_SOCKET_FILE);
629 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
631 if (unformat (input, "socket-name %s", &sm->socket_name))
633 else if (unformat (input, "default"))
634 sm->socket_name = format (0, "%s", STAT_SEGMENT_SOCKET_FILE);
637 (input, "size %U", unformat_memory_size, &sm->memory_size))
639 else if (unformat (input, "per-node-counters on"))
640 sm->node_counters_enabled = 1;
641 else if (unformat (input, "per-node-counters off"))
642 sm->node_counters_enabled = 0;
644 return clib_error_return (0, "unknown input `%U'",
645 format_unformat_error, input);
650 VLIB_INIT_FUNCTION (statseg_init);
651 VLIB_EARLY_CONFIG_FUNCTION (statseg_config, "statseg");
654 VLIB_REGISTER_NODE (stat_segment_collector, static) =
656 .function = stat_segment_collector_process,
657 .name = "statseg-collector-process",
658 .type = VLIB_NODE_TYPE_PROCESS,
664 * fd.io coding-style-patch-verification: ON
667 * eval: (c-set-style "gnu")