X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Ftrace_funcs.h;h=4261f675aecc708a5645e2fb70c8f778ca84171f;hb=4fbb9daa90b53e0abaa060cf6db7762e708ce5b6;hp=eb06799305cda88f6113aee02e5196bed73cd601;hpb=f8b8586b699bae9e786726f2697c3e642d904c61;p=vpp.git diff --git a/src/vlib/trace_funcs.h b/src/vlib/trace_funcs.h index eb06799305c..4261f675aec 100644 --- a/src/vlib/trace_funcs.h +++ b/src/vlib/trace_funcs.h @@ -45,14 +45,12 @@ extern u8 *vnet_trace_dummy; always_inline void vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b) { - /* - * this assert seems right, but goes off constantly. - * disabling it appears to make the pain go away - */ - ASSERT (1 || b->flags & VLIB_BUFFER_IS_TRACED); - ASSERT (!pool_is_free_index (tm->trace_buffer_pool, b->trace_index)); + ASSERT (!pool_is_free_index (tm->trace_buffer_pool, + vlib_buffer_get_trace_index (b))); } +void vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b); + always_inline void * vlib_add_trace (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes) @@ -63,17 +61,31 @@ vlib_add_trace (vlib_main_t * vm, ASSERT (vnet_trace_dummy); - if (PREDICT_FALSE (tm->trace_enable == 0)) + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_IS_TRACED) == 0)) + return vnet_trace_dummy; + + if (PREDICT_FALSE (tm->add_trace_callback != 0)) + { + return tm->add_trace_callback ((struct vlib_main_t *) vm, + (struct vlib_node_runtime_t *) r, + (struct vlib_buffer_t *) b, + n_data_bytes); + } + else if (PREDICT_FALSE (tm->trace_enable == 0)) { ASSERT (vec_len (vnet_trace_dummy) >= n_data_bytes + sizeof (*h)); return vnet_trace_dummy; } + /* Are we trying to trace a handoff case? */ + if (PREDICT_FALSE (vlib_buffer_get_trace_thread (b) != vm->thread_index)) + vlib_add_handoff_trace (vm, b); + vlib_validate_trace (tm, b); n_data_bytes = round_pow2 (n_data_bytes, sizeof (h[0])); n_data_words = n_data_bytes / sizeof (h[0]); - vec_add2_aligned (tm->trace_buffer_pool[b->trace_index], h, + vec_add2_aligned (tm->trace_buffer_pool[vlib_buffer_get_trace_index (b)], h, 1 + n_data_words, sizeof (h[0])); h->time = vm->cpu_time_last_node_dispatch; @@ -93,9 +105,10 @@ always_inline void vlib_free_trace (vlib_main_t * vm, vlib_buffer_t * b) { vlib_trace_main_t *tm = &vm->trace_main; + u32 trace_index = vlib_buffer_get_trace_index (b); vlib_validate_trace (tm, b); - _vec_len (tm->trace_buffer_pool[b->trace_index]) = 0; - pool_put_index (tm->trace_buffer_pool, b->trace_index); + _vec_len (tm->trace_buffer_pool[trace_index]) = 0; + pool_put_index (tm->trace_buffer_pool, trace_index); } always_inline void @@ -108,6 +121,9 @@ vlib_trace_next_frame (vlib_main_t * vm, } void trace_apply_filter (vlib_main_t * vm); +int vnet_is_packet_traced (vlib_buffer_t * b, + u32 classify_table_index, int func); + /* Mark buffer as traced and allocate trace buffer. */ always_inline void @@ -121,6 +137,16 @@ vlib_trace_buffer (vlib_main_t * vm, if (PREDICT_FALSE (tm->trace_enable == 0)) return; + /* Classifier filter in use? */ + if (PREDICT_FALSE (vlib_global_main.trace_filter.trace_filter_enable)) + { + /* See if we're supposed to trace this packet... */ + if (vnet_is_packet_traced + (b, vlib_global_main.trace_filter.trace_classify_table_index, + 0 /* full classify */ ) != 1) + return; + } + /* * Apply filter to existing traces to keep number of allocated traces low. * Performed each time around the main loop. @@ -129,6 +155,10 @@ vlib_trace_buffer (vlib_main_t * vm, { tm->last_main_loop_count = vm->main_loop_count; trace_apply_filter (vm); + + if (tm->trace_buffer_callback) + (tm->trace_buffer_callback) ((struct vlib_main_t *) vm, + (struct vlib_trace_main_t *) tm); } vlib_trace_next_frame (vm, r, next_index); @@ -138,7 +168,8 @@ vlib_trace_buffer (vlib_main_t * vm, do { b->flags |= VLIB_BUFFER_IS_TRACED; - b->trace_index = h - tm->trace_buffer_pool; + b->trace_handle = vlib_buffer_make_trace_handle + (vm->thread_index, h - tm->trace_buffer_pool); } while (follow_chain && (b = vlib_get_next_buffer (vm, b))); } @@ -149,7 +180,7 @@ vlib_buffer_copy_trace_flag (vlib_main_t * vm, vlib_buffer_t * b, { vlib_buffer_t *b_target = vlib_get_buffer (vm, bi_target); b_target->flags |= b->flags & VLIB_BUFFER_IS_TRACED; - b_target->trace_index = b->trace_index; + b_target->trace_handle = b->trace_handle; } always_inline u32