Deadly combination is clib_{set,long}jmp + lazy linking + tail call compiler
optimization. On the first call to clib_setjmp, dynamic linker executes loader
code which then calls clib_setjmp, so stored stack position contains dynamic
loader data. Tail call optimization simply jumps back to the calling
code when clib_longjump is called and that results in wrong return
address used from the stack.
Change-Id: Ia7d8dbd5b2c425cdd0449374aa07ab6b684a330e
Type: fix
Signed-off-by: Damjan Marion <damarion@cisco.com>
vat_main_t vat_main;
-void
-vat_suspend (vlib_main_t * vm, f64 interval)
+void __clib_no_tail_calls
+vat_suspend (vlib_main_t *vm, f64 interval)
{
vlib_process_suspend (vm, interval);
}
#define __clib_section(s) __attribute__ ((section(s)))
#define __clib_warn_unused_result __attribute__ ((warn_unused_result))
#define __clib_export __attribute__ ((visibility("default")))
+#ifdef __clang__
+#define __clib_no_tail_calls __attribute__ ((disable_tail_calls))
+#else
+#define __clib_no_tail_calls \
+ __attribute__ ((optimize ("no-optimize-sibling-calls")))
+#endif
#define never_inline __attribute__ ((__noinline__))