2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * node_cli.c: node CLI
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vlib/threads.h>
44 node_cmp (void *a1, void *a2)
46 vlib_node_t **n1 = a1;
47 vlib_node_t **n2 = a2;
49 return vec_cmp (n1[0]->name, n2[0]->name);
53 show_node_graph (vlib_main_t * vm,
54 unformat_input_t * input, vlib_cli_command_t * cmd)
56 vlib_node_main_t *nm = &vm->node_main;
60 vlib_cli_output (vm, "%U\n", format_vlib_node_graph, nm, 0);
62 if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
64 n = vlib_get_node (vm, node_index);
65 vlib_cli_output (vm, "%U\n", format_vlib_node_graph, nm, n);
69 vlib_node_t **nodes = vec_dup (nm->nodes);
72 vec_sort_with_function (nodes, node_cmp);
74 for (i = 0; i < vec_len (nodes); i++)
75 vlib_cli_output (vm, "%U\n\n", format_vlib_node_graph, nm, nodes[i]);
84 VLIB_CLI_COMMAND (show_node_graph_command, static) = {
85 .path = "show vlib graph",
86 .short_help = "Show packet processing node graph",
87 .function = show_node_graph,
92 format_vlib_node_stats (u8 * s, va_list * va)
94 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
95 vlib_node_t *n = va_arg (*va, vlib_node_t *);
96 int max = va_arg (*va, int);
111 "%=30s%=17s%=16s%=16s%=16s%=16s",
112 "Name", "Max Node Clocks", "Vectors at Max",
113 "Max Clocks", "Avg Clocks", "Avg Vectors/Call");
116 "%=30s%=12s%=16s%=16s%=16s%=16s%=16s",
117 "Name", "State", "Calls", "Vectors", "Suspends",
118 "Clocks", "Vectors/Call");
121 indent = format_get_indent (s);
123 l = n->stats_total.clocks - n->stats_last_clear.clocks;
124 c = n->stats_total.calls - n->stats_last_clear.calls;
125 p = n->stats_total.vectors - n->stats_last_clear.vectors;
126 d = n->stats_total.suspends - n->stats_last_clear.suspends;
127 maxc = (f64) n->stats_total.max_clock;
128 maxn = n->stats_total.max_clock_n;
129 if (n->stats_total.max_clock_n)
130 maxcn = (f64) n->stats_total.max_clock / (f64) maxn;
134 /* Clocks per packet, per call or per suspend. */
137 x = (f64) l / (f64) p;
139 x = (f64) l / (f64) c;
141 x = (f64) l / (f64) d;
144 v = (double) p / (double) c;
149 if (n->type == VLIB_NODE_TYPE_PROCESS)
151 vlib_process_t *p = vlib_get_process_from_node (vm, n);
153 /* Show processes with events pending. This helps spot bugs where events are not
155 if (!clib_bitmap_is_zero (p->non_empty_event_type_bitmap))
156 misc_info = format (misc_info, "events pending, ");
158 switch (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
159 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT))
162 if (!(p->flags & VLIB_PROCESS_IS_RUNNING))
166 case VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK:
170 case VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT:
171 state = "event wait";
174 case (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK):
180 else if (n->type != VLIB_NODE_TYPE_INTERNAL)
183 if (n->state == VLIB_NODE_STATE_DISABLED)
185 else if (n->state == VLIB_NODE_STATE_INTERRUPT)
186 state = "interrupt wait";
192 s = format (s, "%-30v%=17.2e%=16d%=16.2e%=16.2e%=16.2e",
193 ns, maxc, maxn, maxcn, x, v);
195 s = format (s, "%-30v%=12s%16Ld%16Ld%16Ld%16.2e%16.2f", ns, state,
203 s = format (s, "\n%U%v", format_white_space, indent + 4, misc_info);
204 vec_free (misc_info);
210 static clib_error_t *
211 show_node_runtime (vlib_main_t * vm,
212 unformat_input_t * input, vlib_cli_command_t * cmd)
214 vlib_node_main_t *nm = &vm->node_main;
218 vlib_node_t ***node_dups = 0;
219 f64 *vectors_per_main_loop = 0;
220 f64 *last_vector_length_per_node = 0;
222 time_now = vlib_time_now (vm);
224 if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
226 n = vlib_get_node (vm, node_index);
227 vlib_node_sync_stats (vm, n);
228 vlib_cli_output (vm, "%U\n", format_vlib_node_stats, vm, 0, 0);
229 vlib_cli_output (vm, "%U\n", format_vlib_node_stats, vm, n, 0);
236 u64 n_input, n_output, n_drop, n_punt;
237 u64 n_internal_vectors, n_internal_calls;
238 u64 n_clocks, l, v, c, d;
241 vlib_main_t **stat_vms = 0, *stat_vm;
243 /* Suppress nodes with zero calls since last clear */
244 if (unformat (input, "brief") || unformat (input, "b"))
246 if (unformat (input, "verbose") || unformat (input, "v"))
248 if (unformat (input, "max") || unformat (input, "m"))
251 for (i = 0; i < vec_len (vlib_mains); i++)
253 stat_vm = vlib_mains[i];
255 vec_add1 (stat_vms, stat_vm);
259 * Barrier sync across stats scraping.
260 * Otherwise, the counts will be grossly inaccurate.
262 vlib_worker_thread_barrier_sync (vm);
264 for (j = 0; j < vec_len (stat_vms); j++)
266 stat_vm = stat_vms[j];
267 nm = &stat_vm->node_main;
269 for (i = 0; i < vec_len (nm->nodes); i++)
272 vlib_node_sync_stats (stat_vm, n);
275 nodes = vec_dup (nm->nodes);
277 vec_add1 (node_dups, nodes);
278 vec_add1 (vectors_per_main_loop,
279 vlib_last_vectors_per_main_loop_as_f64 (stat_vm));
280 vec_add1 (last_vector_length_per_node,
281 vlib_last_vector_length_per_node (stat_vm));
283 vlib_worker_thread_barrier_release (vm);
286 for (j = 0; j < vec_len (stat_vms); j++)
288 stat_vm = stat_vms[j];
289 nodes = node_dups[j];
291 vec_sort_with_function (nodes, node_cmp);
293 n_input = n_output = n_drop = n_punt = n_clocks = 0;
294 n_internal_vectors = n_internal_calls = 0;
295 for (i = 0; i < vec_len (nodes); i++)
299 l = n->stats_total.clocks - n->stats_last_clear.clocks;
302 v = n->stats_total.vectors - n->stats_last_clear.vectors;
303 c = n->stats_total.calls - n->stats_last_clear.calls;
310 case VLIB_NODE_TYPE_INTERNAL:
311 n_output += (n->flags & VLIB_NODE_FLAG_IS_OUTPUT) ? v : 0;
312 n_drop += (n->flags & VLIB_NODE_FLAG_IS_DROP) ? v : 0;
313 n_punt += (n->flags & VLIB_NODE_FLAG_IS_PUNT) ? v : 0;
314 if (!(n->flags & VLIB_NODE_FLAG_IS_OUTPUT))
316 n_internal_vectors += v;
317 n_internal_calls += c;
319 if (n->flags & VLIB_NODE_FLAG_IS_HANDOFF)
323 case VLIB_NODE_TYPE_INPUT:
329 if (vec_len (vlib_mains) > 1)
331 vlib_worker_thread_t *w = vlib_worker_threads + j;
333 vlib_cli_output (vm, "---------------");
335 if (w->lcore_id > -1)
336 vlib_cli_output (vm, "Thread %d %s (lcore %u)", j, w->name,
339 vlib_cli_output (vm, "Thread %d %s", j, w->name);
342 dt = time_now - nm->time_last_runtime_stats_clear;
345 "Time %.1f, average vectors/node %.2f, last %d main loops %.2f per node %.2f"
346 "\n vector rates in %.4e, out %.4e, drop %.4e, punt %.4e",
348 (n_internal_calls > 0
349 ? (f64) n_internal_vectors / (f64) n_internal_calls
351 1 << VLIB_LOG2_MAIN_LOOPS_PER_STATS_UPDATE,
352 vectors_per_main_loop[j],
353 last_vector_length_per_node[j],
355 (f64) n_output / dt, (f64) n_drop / dt, (f64) n_punt / dt);
357 vlib_cli_output (vm, "%U", format_vlib_node_stats, stat_vm, 0, max);
358 for (i = 0; i < vec_len (nodes); i++)
361 nodes[i]->stats_total.calls -
362 nodes[i]->stats_last_clear.calls;
364 nodes[i]->stats_total.suspends -
365 nodes[i]->stats_last_clear.suspends;
366 if (c || d || !brief)
368 vlib_cli_output (vm, "%U", format_vlib_node_stats, stat_vm,
375 vec_free (node_dups);
376 vec_free (vectors_per_main_loop);
377 vec_free (last_vector_length_per_node);
384 VLIB_CLI_COMMAND (show_node_runtime_command, static) = {
385 .path = "show runtime",
386 .short_help = "Show packet processing runtime",
387 .function = show_node_runtime,
392 static clib_error_t *
393 clear_node_runtime (vlib_main_t * vm,
394 unformat_input_t * input, vlib_cli_command_t * cmd)
396 vlib_node_main_t *nm;
399 vlib_main_t **stat_vms = 0, *stat_vm;
400 vlib_node_runtime_t *r;
402 for (i = 0; i < vec_len (vlib_mains); i++)
404 stat_vm = vlib_mains[i];
406 vec_add1 (stat_vms, stat_vm);
409 vlib_worker_thread_barrier_sync (vm);
411 for (j = 0; j < vec_len (stat_vms); j++)
413 stat_vm = stat_vms[j];
414 nm = &stat_vm->node_main;
416 for (i = 0; i < vec_len (nm->nodes); i++)
419 vlib_node_sync_stats (stat_vm, n);
420 n->stats_last_clear = n->stats_total;
422 r = vlib_node_get_runtime (stat_vm, n->index);
425 /* Note: input/output rates computed using vlib_global_main */
426 nm->time_last_runtime_stats_clear = vlib_time_now (vm);
429 vlib_worker_thread_barrier_release (vm);
437 VLIB_CLI_COMMAND (clear_node_runtime_command, static) = {
438 .path = "clear runtime",
439 .short_help = "Clear packet processing runtime statistics",
440 .function = clear_node_runtime,
444 /* Dummy function to get us linked in. */
446 vlib_node_cli_reference (void)
451 * fd.io coding-style-patch-verification: ON
454 * eval: (c-set-style "gnu")