X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Ftrace_funcs.h;h=9313d41eb7d177c92274d831eca5a0ccdb9bf014;hb=53d8d4fd625c21777d11172dac9ea3aa6602edd0;hp=eb06799305cda88f6113aee02e5196bed73cd601;hpb=f8b8586b699bae9e786726f2697c3e642d904c61;p=vpp.git diff --git a/src/vlib/trace_funcs.h b/src/vlib/trace_funcs.h index eb06799305c..9313d41eb7d 100644 --- a/src/vlib/trace_funcs.h +++ b/src/vlib/trace_funcs.h @@ -40,41 +40,66 @@ #ifndef included_vlib_trace_funcs_h #define included_vlib_trace_funcs_h -extern u8 *vnet_trace_dummy; +extern u8 *vnet_trace_placeholder; always_inline void vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b) { - /* - * this assert seems right, but goes off constantly. - * disabling it appears to make the pain go away - */ - ASSERT (1 || b->flags & VLIB_BUFFER_IS_TRACED); - ASSERT (!pool_is_free_index (tm->trace_buffer_pool, b->trace_index)); + ASSERT (!pool_is_free_index (tm->trace_buffer_pool, + vlib_buffer_get_trace_index (b))); } +int vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b); + always_inline void * -vlib_add_trace (vlib_main_t * vm, - vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes) +vlib_add_trace_inline (vlib_main_t * vm, + vlib_node_runtime_t * r, vlib_buffer_t * b, + u32 n_data_bytes) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_header_t *h; - u32 n_data_words; + u32 n_data_words, trace_index; - ASSERT (vnet_trace_dummy); + ASSERT (vnet_trace_placeholder); - if (PREDICT_FALSE (tm->trace_enable == 0)) + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_IS_TRACED) == 0)) + return vnet_trace_placeholder; + + if (PREDICT_FALSE (tm->add_trace_callback != 0)) { - ASSERT (vec_len (vnet_trace_dummy) >= n_data_bytes + sizeof (*h)); - return vnet_trace_dummy; + return tm->add_trace_callback ((struct vlib_main_t *) vm, + (struct vlib_node_runtime_t *) r, + (struct vlib_buffer_t *) b, + n_data_bytes); + } + else if (PREDICT_FALSE (tm->trace_enable == 0)) + { + ASSERT (vec_len (vnet_trace_placeholder) >= n_data_bytes + sizeof (*h)); + return vnet_trace_placeholder; } - vlib_validate_trace (tm, b); + /* Are we trying to trace a handoff case? */ + if (PREDICT_FALSE (vlib_buffer_get_trace_thread (b) != vm->thread_index)) + if (PREDICT_FALSE (!vlib_add_handoff_trace (vm, b))) + return vnet_trace_placeholder; + + /* + * there is a small chance of a race condition with 'clear trace' here: if a + * buffer was set to be traced before the 'clear trace' and is still going + * through the graph after the 'clear trace', its trace_index is staled as + * the pool was destroyed. + * The pool may have been re-allocated because of a new traced buffer, and + * the trace_index might be valid by pure (bad) luck. In that case the trace + * will be a mix of both buffer traces, but this should be acceptable. + */ + trace_index = vlib_buffer_get_trace_index (b); + if (PREDICT_FALSE (pool_is_free_index (tm->trace_buffer_pool, trace_index))) + return vnet_trace_placeholder; n_data_bytes = round_pow2 (n_data_bytes, sizeof (h[0])); n_data_words = n_data_bytes / sizeof (h[0]); - vec_add2_aligned (tm->trace_buffer_pool[b->trace_index], h, - 1 + n_data_words, sizeof (h[0])); + vec_add2_aligned (tm->trace_buffer_pool[trace_index], h, 1 + n_data_words, + sizeof (h[0])); h->time = vm->cpu_time_last_node_dispatch; h->n_data = n_data_words; @@ -83,6 +108,11 @@ vlib_add_trace (vlib_main_t * vm, return h->data; } +/* Non-inline (typical use-case) version of the above */ +void *vlib_add_trace (vlib_main_t * vm, + vlib_node_runtime_t * r, vlib_buffer_t * b, + u32 n_data_bytes); + always_inline vlib_trace_header_t * vlib_trace_header_next (vlib_trace_header_t * h) { @@ -93,9 +123,10 @@ always_inline void vlib_free_trace (vlib_main_t * vm, vlib_buffer_t * b) { vlib_trace_main_t *tm = &vm->trace_main; + u32 trace_index = vlib_buffer_get_trace_index (b); vlib_validate_trace (tm, b); - _vec_len (tm->trace_buffer_pool[b->trace_index]) = 0; - pool_put_index (tm->trace_buffer_pool, b->trace_index); + _vec_len (tm->trace_buffer_pool[trace_index]) = 0; + pool_put_index (tm->trace_buffer_pool, trace_index); } always_inline void @@ -108,9 +139,17 @@ vlib_trace_next_frame (vlib_main_t * vm, } void trace_apply_filter (vlib_main_t * vm); +int vnet_is_packet_traced (vlib_buffer_t * b, + u32 classify_table_index, int func); -/* Mark buffer as traced and allocate trace buffer. */ -always_inline void + +/* + * Mark buffer as traced and allocate trace buffer. + * return 1 if the buffer is successfully traced, 0 if not + * A buffer might not be traced if tracing is off or if the packet did not + * match the filter. + */ +always_inline __clib_warn_unused_result int vlib_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, vlib_buffer_t * b, int follow_chain) @@ -119,7 +158,17 @@ vlib_trace_buffer (vlib_main_t * vm, vlib_trace_header_t **h; if (PREDICT_FALSE (tm->trace_enable == 0)) - return; + return 0; + + /* Classifier filter in use? */ + if (PREDICT_FALSE (vlib_global_main.trace_filter.trace_filter_enable)) + { + /* See if we're supposed to trace this packet... */ + if (vnet_is_packet_traced ( + b, vlib_global_main.trace_filter.classify_table_index, + 0 /* full classify */) != 1) + return 0; + } /* * Apply filter to existing traces to keep number of allocated traces low. @@ -129,6 +178,10 @@ vlib_trace_buffer (vlib_main_t * vm, { tm->last_main_loop_count = vm->main_loop_count; trace_apply_filter (vm); + + if (tm->trace_buffer_callback) + (tm->trace_buffer_callback) ((struct vlib_main_t *) vm, + (struct vlib_trace_main_t *) tm); } vlib_trace_next_frame (vm, r, next_index); @@ -138,9 +191,12 @@ vlib_trace_buffer (vlib_main_t * vm, do { b->flags |= VLIB_BUFFER_IS_TRACED; - b->trace_index = h - tm->trace_buffer_pool; + b->trace_handle = vlib_buffer_make_trace_handle + (vm->thread_index, h - tm->trace_buffer_pool); } while (follow_chain && (b = vlib_get_next_buffer (vm, b))); + + return 1; } always_inline void @@ -149,7 +205,7 @@ vlib_buffer_copy_trace_flag (vlib_main_t * vm, vlib_buffer_t * b, { vlib_buffer_t *b_target = vlib_get_buffer (vm, bi_target); b_target->flags |= b->flags & VLIB_BUFFER_IS_TRACED; - b_target->trace_index = b->trace_index; + b_target->trace_handle = b->trace_handle; } always_inline u32 @@ -157,15 +213,13 @@ vlib_get_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt) { vlib_trace_main_t *tm = &vm->trace_main; vlib_trace_node_t *tn; - int n; if (rt->node_index >= vec_len (tm->nodes)) return 0; tn = tm->nodes + rt->node_index; - n = tn->limit - tn->count; - ASSERT (n >= 0); + ASSERT (tn->count <= tn->limit); - return n; + return tn->limit - tn->count; } always_inline void