2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * node_cli.c: node CLI
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vlib/threads.h>
44 node_cmp (void * a1, void *a2)
46 vlib_node_t ** n1 = a1;
47 vlib_node_t ** n2 = a2;
49 return vec_cmp (n1[0]->name, n2[0]->name);
53 show_node_graph (vlib_main_t * vm,
54 unformat_input_t * input,
55 vlib_cli_command_t * cmd)
57 vlib_node_main_t * nm = &vm->node_main;
61 vlib_cli_output (vm, "%U\n", format_vlib_node_graph, nm, 0);
63 if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
65 n = vlib_get_node (vm, node_index);
66 vlib_cli_output (vm, "%U\n", format_vlib_node_graph, nm, n);
70 vlib_node_t ** nodes = vec_dup (nm->nodes);
73 vec_sort_with_function (nodes, node_cmp);
75 for (i = 0; i < vec_len (nodes); i++)
76 vlib_cli_output (vm, "%U\n\n", format_vlib_node_graph, nm, nodes[i]);
84 VLIB_CLI_COMMAND (show_node_graph_command, static) = {
85 .path = "show vlib graph",
86 .short_help = "Show packet processing node graph",
87 .function = show_node_graph,
90 static u8 * format_vlib_node_stats (u8 * s, va_list * va)
92 vlib_main_t * vm = va_arg (*va, vlib_main_t *);
93 vlib_node_t * n = va_arg (*va, vlib_node_t *);
94 int max = va_arg (*va, int);
109 "%=30s%=17s%=16s%=16s%=16s%=16s",
110 "Name", "Max Node Clocks", "Vectors at Max", "Max Clocks", "Avg Clocks", "Avg Vectors/Call");
113 "%=30s%=12s%=16s%=16s%=16s%=16s%=16s",
114 "Name", "State", "Calls", "Vectors", "Suspends", "Clocks", "Vectors/Call");
117 indent = format_get_indent (s);
119 l = n->stats_total.clocks - n->stats_last_clear.clocks;
120 c = n->stats_total.calls - n->stats_last_clear.calls;
121 p = n->stats_total.vectors - n->stats_last_clear.vectors;
122 d = n->stats_total.suspends - n->stats_last_clear.suspends;
123 maxc = (f64)n->stats_total.max_clock;
124 maxn = n->stats_total.max_clock_n;
125 if (n->stats_total.max_clock_n)
126 maxcn = (f64)n->stats_total.max_clock / (f64)maxn;
130 /* Clocks per packet, per call or per suspend. */
133 x = (f64) l / (f64) p;
135 x = (f64) l / (f64) c;
137 x = (f64) l / (f64) d;
140 v = (double)p / (double)c;
145 if (n->type == VLIB_NODE_TYPE_PROCESS)
147 vlib_process_t * p = vlib_get_process_from_node (vm, n);
149 /* Show processes with events pending. This helps spot bugs where events are not
151 if (! clib_bitmap_is_zero (p->non_empty_event_type_bitmap))
152 misc_info = format (misc_info, "events pending, ");
154 switch (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
155 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT))
158 if (! (p->flags & VLIB_PROCESS_IS_RUNNING))
162 case VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK:
166 case VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT:
167 state = "event wait";
170 case (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT
171 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK):
176 else if (n->type != VLIB_NODE_TYPE_INTERNAL)
179 if (n->state == VLIB_NODE_STATE_DISABLED)
181 else if (n->state == VLIB_NODE_STATE_INTERRUPT)
182 state = "interrupt wait";
188 s = format (s, "%-30v%=17.2e%=16d%=16.2e%=16.2e%=16.2e",
189 ns, maxc, maxn, maxcn, x, v);
191 s = format (s, "%-30v%=12s%16Ld%16Ld%16Ld%16.2e%16.2f", ns, state,
199 s = format (s, "\n%U%v", format_white_space, indent + 4, misc_info);
200 vec_free (misc_info);
206 static clib_error_t *
207 show_node_runtime (vlib_main_t * vm,
208 unformat_input_t * input,
209 vlib_cli_command_t * cmd)
211 vlib_node_main_t * nm = &vm->node_main;
215 vlib_node_t *** node_dups = 0;
216 f64 * vectors_per_main_loop = 0;
217 f64 * last_vector_length_per_node = 0;
219 time_now = vlib_time_now (vm);
221 if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
223 n = vlib_get_node (vm, node_index);
224 vlib_node_sync_stats (vm, n);
225 vlib_cli_output (vm, "%U\n", format_vlib_node_stats, vm, 0, 0);
226 vlib_cli_output (vm, "%U\n", format_vlib_node_stats, vm, n, 0);
230 vlib_node_t ** nodes;
233 u64 n_input, n_output, n_drop, n_punt;
234 u64 n_internal_vectors, n_internal_calls;
235 u64 n_clocks, l, v, c, d;
238 vlib_main_t ** stat_vms = 0, *stat_vm;
240 /* Suppress nodes with zero calls since last clear */
241 if (unformat (input, "brief") || unformat (input, "b"))
243 if (unformat (input, "verbose") || unformat(input, "v"))
245 if (unformat (input, "max") || unformat(input, "m"))
248 if (vec_len(vlib_mains) == 0)
249 vec_add1 (stat_vms, vm);
252 for (i = 0; i < vec_len (vlib_mains); i++)
254 stat_vm = vlib_mains[i];
256 vec_add1 (stat_vms, stat_vm);
261 * Barrier sync across stats scraping.
262 * Otherwise, the counts will be grossly inaccurate.
264 vlib_worker_thread_barrier_sync(vm);
266 for (j = 0; j < vec_len (stat_vms); j++)
268 stat_vm = stat_vms[j];
269 nm = &stat_vm->node_main;
271 for (i = 0; i < vec_len (nm->nodes); i++)
274 vlib_node_sync_stats (stat_vm, n);
277 nodes = vec_dup (nm->nodes);
279 vec_add1(node_dups, nodes);
280 vec_add1 (vectors_per_main_loop,
281 vlib_last_vectors_per_main_loop_as_f64 (stat_vm));
282 vec_add1 (last_vector_length_per_node,
283 vlib_last_vector_length_per_node (stat_vm));
285 vlib_worker_thread_barrier_release(vm);
288 for (j = 0; j < vec_len (stat_vms); j++)
290 stat_vm = stat_vms[j];
291 nodes = node_dups[j];
293 vec_sort_with_function (nodes, node_cmp);
295 n_input = n_output = n_drop = n_punt = n_clocks = 0;
296 n_internal_vectors = n_internal_calls = 0;
297 for (i = 0; i < vec_len (nodes); i++)
301 l = n->stats_total.clocks - n->stats_last_clear.clocks;
304 v = n->stats_total.vectors - n->stats_last_clear.vectors;
305 c = n->stats_total.calls - n->stats_last_clear.calls;
312 case VLIB_NODE_TYPE_INTERNAL:
313 n_output += (n->flags & VLIB_NODE_FLAG_IS_OUTPUT) ? v : 0;
314 n_drop += (n->flags & VLIB_NODE_FLAG_IS_DROP) ? v : 0;
315 n_punt += (n->flags & VLIB_NODE_FLAG_IS_PUNT) ? v : 0;
316 if (! (n->flags & VLIB_NODE_FLAG_IS_OUTPUT))
318 n_internal_vectors += v;
319 n_internal_calls += c;
321 if (n->flags & VLIB_NODE_FLAG_IS_HANDOFF)
325 case VLIB_NODE_TYPE_INPUT:
331 if (vec_len (vlib_mains))
333 vlib_worker_thread_t *w = vlib_worker_threads + j;
335 vlib_cli_output (vm, "---------------");
337 if ( w->dpdk_lcore_id > -1)
338 vlib_cli_output (vm, "Thread %d %s (lcore %u)", j, w->name,
341 vlib_cli_output (vm, "Thread %d %s", j,
345 dt = time_now - nm->time_last_runtime_stats_clear;
348 "Time %.1f, average vectors/node %.2f, last %d main loops %.2f per node %.2f"
349 "\n vector rates in %.4e, out %.4e, drop %.4e, punt %.4e",
351 (n_internal_calls > 0
352 ? (f64) n_internal_vectors / (f64) n_internal_calls
354 1 << VLIB_LOG2_MAIN_LOOPS_PER_STATS_UPDATE,
355 vectors_per_main_loop [j],
356 last_vector_length_per_node [j],
362 vlib_cli_output (vm, "%U", format_vlib_node_stats, stat_vm, 0, max);
363 for (i = 0; i < vec_len (nodes); i++)
365 c = nodes[i]->stats_total.calls - nodes[i]->stats_last_clear.calls;
366 d = nodes[i]->stats_total.suspends - nodes[i]->stats_last_clear.suspends;
367 if (c || d || ! brief)
369 vlib_cli_output (vm, "%U", format_vlib_node_stats, stat_vm,
376 vec_free (node_dups);
377 vec_free (vectors_per_main_loop);
378 vec_free (last_vector_length_per_node);
384 VLIB_CLI_COMMAND (show_node_runtime_command, static) = {
385 .path = "show runtime",
386 .short_help = "Show packet processing runtime",
387 .function = show_node_runtime,
391 static clib_error_t *
392 clear_node_runtime (vlib_main_t * vm,
393 unformat_input_t * input,
394 vlib_cli_command_t * cmd)
396 vlib_node_main_t * nm;
399 vlib_main_t ** stat_vms = 0, *stat_vm;
400 vlib_node_runtime_t * r;
402 if (vec_len(vlib_mains) == 0)
403 vec_add1 (stat_vms, vm);
406 for (i = 0; i < vec_len (vlib_mains); i++)
408 stat_vm = vlib_mains[i];
410 vec_add1 (stat_vms, stat_vm);
414 vlib_worker_thread_barrier_sync(vm);
416 for (j = 0; j < vec_len (stat_vms); j++)
418 stat_vm = stat_vms[j];
419 nm = &stat_vm->node_main;
421 for (i = 0; i < vec_len (nm->nodes); i++)
424 vlib_node_sync_stats (stat_vm, n);
425 n->stats_last_clear = n->stats_total;
427 r = vlib_node_get_runtime (stat_vm, n->index);
430 /* Note: input/output rates computed using vlib_global_main */
431 nm->time_last_runtime_stats_clear = vlib_time_now (vm);
434 vlib_worker_thread_barrier_release(vm);
441 VLIB_CLI_COMMAND (clear_node_runtime_command, static) = {
442 .path = "clear runtime",
443 .short_help = "Clear packet processing runtime statistics",
444 .function = clear_node_runtime,
447 /* Dummy function to get us linked in. */
448 void vlib_node_cli_reference (void) {}