Packet tracing performance doesn't justify inlining
vlib_add_trace(...) over 500 times.
It makes a 15% text-segment size difference in a representative use-case:
Inline:
$ size .../vnet_skx.dir/ipsec/ipsec_input.c.o
text data bss dec hex filename
6831 80 0 6911 1aff .../vnet_skx.dir/ipsec/ipsec_input.c.o
Not inline:
$ size .../vnet_skx.dir/ipsec/ipsec_input.c.o
text data bss dec hex filename
5776 80 0 5856 16e0 .../vnet_skx.dir/ipsec/ipsec_input.c.o
Retain the original code as vlib_add_trace_inline, instantiate once as
vlib_add_trace.
Type: refactor
Signed-off-by: Dave Barach <[email protected]>
Change-Id: Iaf431dbf00c4aad03663d86f9dd1322e84d03962
return 1;
}
+void *
+vlib_add_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
+{
+ return vlib_add_trace_inline (vm, r, b, n_data_bytes);
+}
+
+
+
/*
* fd.io coding-style-patch-verification: ON
*
void vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b);
always_inline void *
-vlib_add_trace (vlib_main_t * vm,
- vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
+vlib_add_trace_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * r, vlib_buffer_t * b,
+ u32 n_data_bytes)
{
vlib_trace_main_t *tm = &vm->trace_main;
vlib_trace_header_t *h;
return h->data;
}
+/* Non-inline (typical use-case) version of the above */
+void *vlib_add_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * r, vlib_buffer_t * b,
+ u32 n_data_bytes);
+
always_inline vlib_trace_header_t *
vlib_trace_header_next (vlib_trace_header_t * h)
{