2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * node_cli.c: node CLI
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vlib/threads.h>
44 show_node_graph (vlib_main_t * vm,
45 unformat_input_t * input,
46 vlib_cli_command_t * cmd)
48 vlib_node_main_t * nm = &vm->node_main;
52 vlib_cli_output (vm, "%U\n", format_vlib_node_graph, nm, 0);
54 if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
56 n = vlib_get_node (vm, node_index);
57 vlib_cli_output (vm, "%U\n", format_vlib_node_graph, nm, n);
61 vlib_node_t ** nodes = vec_dup (nm->nodes);
64 vec_sort (nodes, n1, n2,
65 vec_cmp (n1[0]->name, n2[0]->name));
67 for (i = 0; i < vec_len (nodes); i++)
68 vlib_cli_output (vm, "%U\n\n", format_vlib_node_graph, nm, nodes[i]);
76 VLIB_CLI_COMMAND (show_node_graph_command, static) = {
77 .path = "show vlib graph",
78 .short_help = "Show packet processing node graph",
79 .function = show_node_graph,
82 static u8 * format_vlib_node_stats (u8 * s, va_list * va)
84 vlib_main_t * vm = va_arg (*va, vlib_main_t *);
85 vlib_node_t * n = va_arg (*va, vlib_node_t *);
86 int max = va_arg (*va, int);
101 "%=30s%=17s%=16s%=16s%=16s%=16s",
102 "Name", "Max Node Clocks", "Vectors at Max", "Max Clocks", "Avg Clocks", "Avg Vectors/Call");
105 "%=30s%=12s%=16s%=16s%=16s%=16s%=16s",
106 "Name", "State", "Calls", "Vectors", "Suspends", "Clocks", "Vectors/Call");
109 indent = format_get_indent (s);
111 l = n->stats_total.clocks - n->stats_last_clear.clocks;
112 c = n->stats_total.calls - n->stats_last_clear.calls;
113 p = n->stats_total.vectors - n->stats_last_clear.vectors;
114 d = n->stats_total.suspends - n->stats_last_clear.suspends;
115 maxc = (f64)n->stats_total.max_clock;
116 maxn = n->stats_total.max_clock_n;
117 if (n->stats_total.max_clock_n)
118 maxcn = (f64)n->stats_total.max_clock / (f64)maxn;
122 /* Clocks per packet, per call or per suspend. */
125 x = (f64) l / (f64) p;
127 x = (f64) l / (f64) c;
129 x = (f64) l / (f64) d;
132 v = (double)p / (double)c;
137 if (n->type == VLIB_NODE_TYPE_PROCESS)
139 vlib_process_t * p = vlib_get_process_from_node (vm, n);
141 /* Show processes with events pending. This helps spot bugs where events are not
143 if (! clib_bitmap_is_zero (p->non_empty_event_type_bitmap))
144 misc_info = format (misc_info, "events pending, ");
146 switch (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
147 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT))
150 if (! (p->flags & VLIB_PROCESS_IS_RUNNING))
154 case VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK:
158 case VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT:
159 state = "event wait";
162 case (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT
163 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK):
168 else if (n->type != VLIB_NODE_TYPE_INTERNAL)
171 if (n->state == VLIB_NODE_STATE_DISABLED)
173 else if (n->state == VLIB_NODE_STATE_INTERRUPT)
174 state = "interrupt wait";
180 s = format (s, "%-30v%=17.2e%=16d%=16.2e%=16.2e%=16.2e",
181 ns, maxc, maxn, maxcn, x, v);
183 s = format (s, "%-30v%=12s%16Ld%16Ld%16Ld%16.2e%16.2f", ns, state,
191 s = format (s, "\n%U%v", format_white_space, indent + 4, misc_info);
192 vec_free (misc_info);
198 static clib_error_t *
199 show_node_runtime (vlib_main_t * vm,
200 unformat_input_t * input,
201 vlib_cli_command_t * cmd)
203 vlib_node_main_t * nm = &vm->node_main;
207 vlib_node_t *** node_dups = 0;
208 f64 * vectors_per_main_loop = 0;
209 f64 * last_vector_length_per_node = 0;
211 time_now = vlib_time_now (vm);
213 if (unformat (input, "%U", unformat_vlib_node, vm, &node_index))
215 n = vlib_get_node (vm, node_index);
216 vlib_node_sync_stats (vm, n);
217 vlib_cli_output (vm, "%U\n", format_vlib_node_stats, vm, 0, 0);
218 vlib_cli_output (vm, "%U\n", format_vlib_node_stats, vm, n, 0);
222 vlib_node_t ** nodes;
225 u64 n_input, n_output, n_drop, n_punt;
226 u64 n_internal_vectors, n_internal_calls;
227 u64 n_clocks, l, v, c, d;
230 vlib_main_t ** stat_vms = 0, *stat_vm;
232 /* Suppress nodes with zero calls since last clear */
233 if (unformat (input, "brief") || unformat (input, "b"))
235 if (unformat (input, "verbose") || unformat(input, "v"))
237 if (unformat (input, "max") || unformat(input, "m"))
240 if (vec_len(vlib_mains) == 0)
241 vec_add1 (stat_vms, vm);
244 for (i = 0; i < vec_len (vlib_mains); i++)
246 stat_vm = vlib_mains[i];
248 vec_add1 (stat_vms, stat_vm);
253 * Barrier sync across stats scraping.
254 * Otherwise, the counts will be grossly inaccurate.
256 vlib_worker_thread_barrier_sync(vm);
258 for (j = 0; j < vec_len (stat_vms); j++)
260 stat_vm = stat_vms[j];
261 nm = &stat_vm->node_main;
263 for (i = 0; i < vec_len (nm->nodes); i++)
266 vlib_node_sync_stats (stat_vm, n);
269 nodes = vec_dup (nm->nodes);
271 vec_add1(node_dups, nodes);
272 vec_add1 (vectors_per_main_loop,
273 vlib_last_vectors_per_main_loop_as_f64 (stat_vm));
274 vec_add1 (last_vector_length_per_node,
275 vlib_last_vector_length_per_node (stat_vm));
277 vlib_worker_thread_barrier_release(vm);
280 for (j = 0; j < vec_len (stat_vms); j++)
282 stat_vm = stat_vms[j];
283 nodes = node_dups[j];
285 vec_sort (nodes, n1, n2,
286 vec_cmp (n1[0]->name, n2[0]->name));
288 n_input = n_output = n_drop = n_punt = n_clocks = 0;
289 n_internal_vectors = n_internal_calls = 0;
290 for (i = 0; i < vec_len (nodes); i++)
294 l = n->stats_total.clocks - n->stats_last_clear.clocks;
297 v = n->stats_total.vectors - n->stats_last_clear.vectors;
298 c = n->stats_total.calls - n->stats_last_clear.calls;
305 case VLIB_NODE_TYPE_INTERNAL:
306 n_output += (n->flags & VLIB_NODE_FLAG_IS_OUTPUT) ? v : 0;
307 n_drop += (n->flags & VLIB_NODE_FLAG_IS_DROP) ? v : 0;
308 n_punt += (n->flags & VLIB_NODE_FLAG_IS_PUNT) ? v : 0;
309 if (! (n->flags & VLIB_NODE_FLAG_IS_OUTPUT))
311 n_internal_vectors += v;
312 n_internal_calls += c;
314 if (n->flags & VLIB_NODE_FLAG_IS_HANDOFF)
318 case VLIB_NODE_TYPE_INPUT:
324 if (vec_len (vlib_mains))
326 vlib_worker_thread_t *w = vlib_worker_threads + j;
328 vlib_cli_output (vm, "---------------");
330 if ( w->dpdk_lcore_id > -1)
331 vlib_cli_output (vm, "Thread %d %v (lcore %u)", j, w->name,
334 vlib_cli_output (vm, "Thread %d %v", j,
338 dt = time_now - nm->time_last_runtime_stats_clear;
341 "Time %.1f, average vectors/node %.2f, last %d main loops %.2f per node %.2f"
342 "\n vector rates in %.4e, out %.4e, drop %.4e, punt %.4e",
344 (n_internal_calls > 0
345 ? (f64) n_internal_vectors / (f64) n_internal_calls
347 1 << VLIB_LOG2_MAIN_LOOPS_PER_STATS_UPDATE,
348 vectors_per_main_loop [j],
349 last_vector_length_per_node [j],
355 vlib_cli_output (vm, "%U", format_vlib_node_stats, stat_vm, 0, max);
356 for (i = 0; i < vec_len (nodes); i++)
358 c = nodes[i]->stats_total.calls - nodes[i]->stats_last_clear.calls;
359 d = nodes[i]->stats_total.suspends - nodes[i]->stats_last_clear.suspends;
360 if (c || d || ! brief)
362 vlib_cli_output (vm, "%U", format_vlib_node_stats, stat_vm,
369 vec_free (node_dups);
370 vec_free (vectors_per_main_loop);
371 vec_free (last_vector_length_per_node);
377 VLIB_CLI_COMMAND (show_node_runtime_command, static) = {
378 .path = "show runtime",
379 .short_help = "Show packet processing runtime",
380 .function = show_node_runtime,
384 static clib_error_t *
385 clear_node_runtime (vlib_main_t * vm,
386 unformat_input_t * input,
387 vlib_cli_command_t * cmd)
389 vlib_node_main_t * nm;
392 vlib_main_t ** stat_vms = 0, *stat_vm;
393 vlib_node_runtime_t * r;
395 if (vec_len(vlib_mains) == 0)
396 vec_add1 (stat_vms, vm);
399 for (i = 0; i < vec_len (vlib_mains); i++)
401 stat_vm = vlib_mains[i];
403 vec_add1 (stat_vms, stat_vm);
407 vlib_worker_thread_barrier_sync(vm);
409 for (j = 0; j < vec_len (stat_vms); j++)
411 stat_vm = stat_vms[j];
412 nm = &stat_vm->node_main;
414 for (i = 0; i < vec_len (nm->nodes); i++)
417 vlib_node_sync_stats (stat_vm, n);
418 n->stats_last_clear = n->stats_total;
420 r = vlib_node_get_runtime (stat_vm, n->index);
423 /* Note: input/output rates computed using vlib_global_main */
424 nm->time_last_runtime_stats_clear = vlib_time_now (vm);
427 vlib_worker_thread_barrier_release(vm);
434 VLIB_CLI_COMMAND (clear_node_runtime_command, static) = {
435 .path = "clear runtime",
436 .short_help = "Clear packet processing runtime statistics",
437 .function = clear_node_runtime,
440 /* Dummy function to get us linked in. */
441 void vlib_node_cli_reference (void) {}