2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * trace_funcs.h: VLIB trace buffer.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #ifndef included_vlib_trace_funcs_h
41 #define included_vlib_trace_funcs_h
43 extern u8 *vnet_trace_dummy;
46 vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b)
48 ASSERT (!pool_is_free_index (tm->trace_buffer_pool,
49 vlib_buffer_get_trace_index (b)));
52 void vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b);
55 vlib_add_trace_inline (vlib_main_t * vm,
56 vlib_node_runtime_t * r, vlib_buffer_t * b,
59 vlib_trace_main_t *tm = &vm->trace_main;
60 vlib_trace_header_t *h;
63 ASSERT (vnet_trace_dummy);
65 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_IS_TRACED) == 0))
66 return vnet_trace_dummy;
68 if (PREDICT_FALSE (tm->add_trace_callback != 0))
70 return tm->add_trace_callback ((struct vlib_main_t *) vm,
71 (struct vlib_node_runtime_t *) r,
72 (struct vlib_buffer_t *) b,
75 else if (PREDICT_FALSE (tm->trace_enable == 0))
77 ASSERT (vec_len (vnet_trace_dummy) >= n_data_bytes + sizeof (*h));
78 return vnet_trace_dummy;
81 /* Are we trying to trace a handoff case? */
82 if (PREDICT_FALSE (vlib_buffer_get_trace_thread (b) != vm->thread_index))
83 vlib_add_handoff_trace (vm, b);
85 vlib_validate_trace (tm, b);
87 n_data_bytes = round_pow2 (n_data_bytes, sizeof (h[0]));
88 n_data_words = n_data_bytes / sizeof (h[0]);
89 vec_add2_aligned (tm->trace_buffer_pool[vlib_buffer_get_trace_index (b)], h,
90 1 + n_data_words, sizeof (h[0]));
92 h->time = vm->cpu_time_last_node_dispatch;
93 h->n_data = n_data_words;
94 h->node_index = r->node_index;
99 /* Non-inline (typical use-case) version of the above */
100 void *vlib_add_trace (vlib_main_t * vm,
101 vlib_node_runtime_t * r, vlib_buffer_t * b,
104 always_inline vlib_trace_header_t *
105 vlib_trace_header_next (vlib_trace_header_t * h)
107 return h + 1 + h->n_data;
111 vlib_free_trace (vlib_main_t * vm, vlib_buffer_t * b)
113 vlib_trace_main_t *tm = &vm->trace_main;
114 u32 trace_index = vlib_buffer_get_trace_index (b);
115 vlib_validate_trace (tm, b);
116 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
117 pool_put_index (tm->trace_buffer_pool, trace_index);
121 vlib_trace_next_frame (vlib_main_t * vm,
122 vlib_node_runtime_t * r, u32 next_index)
124 vlib_next_frame_t *nf;
125 nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
126 nf->flags |= VLIB_FRAME_TRACE;
129 void trace_apply_filter (vlib_main_t * vm);
130 int vnet_is_packet_traced (vlib_buffer_t * b,
131 u32 classify_table_index, int func);
134 /* Mark buffer as traced and allocate trace buffer. */
136 vlib_trace_buffer (vlib_main_t * vm,
137 vlib_node_runtime_t * r,
138 u32 next_index, vlib_buffer_t * b, int follow_chain)
140 vlib_trace_main_t *tm = &vm->trace_main;
141 vlib_trace_header_t **h;
143 if (PREDICT_FALSE (tm->trace_enable == 0))
146 /* Classifier filter in use? */
147 if (PREDICT_FALSE (vlib_global_main.trace_filter.trace_filter_enable))
149 /* See if we're supposed to trace this packet... */
150 if (vnet_is_packet_traced
151 (b, vlib_global_main.trace_filter.trace_classify_table_index,
152 0 /* full classify */ ) != 1)
157 * Apply filter to existing traces to keep number of allocated traces low.
158 * Performed each time around the main loop.
160 if (tm->last_main_loop_count != vm->main_loop_count)
162 tm->last_main_loop_count = vm->main_loop_count;
163 trace_apply_filter (vm);
165 if (tm->trace_buffer_callback)
166 (tm->trace_buffer_callback) ((struct vlib_main_t *) vm,
167 (struct vlib_trace_main_t *) tm);
170 vlib_trace_next_frame (vm, r, next_index);
172 pool_get (tm->trace_buffer_pool, h);
176 b->flags |= VLIB_BUFFER_IS_TRACED;
177 b->trace_handle = vlib_buffer_make_trace_handle
178 (vm->thread_index, h - tm->trace_buffer_pool);
180 while (follow_chain && (b = vlib_get_next_buffer (vm, b)));
184 vlib_buffer_copy_trace_flag (vlib_main_t * vm, vlib_buffer_t * b,
187 vlib_buffer_t *b_target = vlib_get_buffer (vm, bi_target);
188 b_target->flags |= b->flags & VLIB_BUFFER_IS_TRACED;
189 b_target->trace_handle = b->trace_handle;
193 vlib_get_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt)
195 vlib_trace_main_t *tm = &vm->trace_main;
196 vlib_trace_node_t *tn;
199 if (rt->node_index >= vec_len (tm->nodes))
201 tn = tm->nodes + rt->node_index;
202 n = tn->limit - tn->count;
209 vlib_set_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 count)
211 vlib_trace_main_t *tm = &vm->trace_main;
212 vlib_trace_node_t *tn = vec_elt_at_index (tm->nodes, rt->node_index);
214 ASSERT (count <= tn->limit);
215 tn->count = tn->limit - count;
218 /* Helper function for nodes which only trace buffer data. */
220 vlib_trace_frame_buffers_only (vlib_main_t * vm,
221 vlib_node_runtime_t * node,
224 uword next_buffer_stride,
225 uword n_buffer_data_bytes_in_trace);
227 #endif /* included_vlib_trace_funcs_h */
230 * fd.io coding-style-patch-verification: ON
233 * eval: (c-set-style "gnu")