/* Pre-allocate interupt runtime indices and lock. */
vec_alloc (nm->pending_local_interrupts, 32);
vec_alloc (nm->pending_remote_interrupts, 32);
+ vec_alloc_aligned (nm->pending_remote_interrupts_notify, 1,
+ CLIB_CACHE_LINE_BYTES);
clib_spinlock_init (&nm->pending_interrupt_lock);
/* Pre-allocate expired nodes. */
cpu_time_now = dispatch_pending_interrupts (vm, nm, cpu_time_now);
/* handle remote interruots */
- if (_vec_len (nm->pending_remote_interrupts))
+ if (PREDICT_FALSE (_vec_len (nm->pending_remote_interrupts)))
{
vlib_node_interrupt_t *in;
in = nm->pending_local_interrupts;
nm->pending_local_interrupts = nm->pending_remote_interrupts;
nm->pending_remote_interrupts = in;
+ *nm->pending_remote_interrupts_notify = 0;
clib_spinlock_unlock (&nm->pending_interrupt_lock);
cpu_time_now = dispatch_pending_interrupts (vm, nm, cpu_time_now);
/* Node runtime indices for input nodes with pending interrupts. */
vlib_node_interrupt_t *pending_local_interrupts;
vlib_node_interrupt_t *pending_remote_interrupts;
+ volatile u32 *pending_remote_interrupts_notify;
clib_spinlock_t pending_interrupt_lock;
/* Input nodes are switched from/to interrupt to/from polling mode
vec_add2 (nm->pending_remote_interrupts, i, 1);
i->node_runtime_index = n->runtime_index;
i->data = data;
+ *nm->pending_remote_interrupts_notify = 1;
clib_spinlock_unlock (&nm->pending_interrupt_lock);
}
}
while (nanosleep (&ts, &tsrem) < 0)
ts = tsrem;
- if (*vlib_worker_threads->wait_at_barrier)
+ if (*vlib_worker_threads->wait_at_barrier
+ || *nm->pending_remote_interrupts_notify)
goto done;
}
}
#define _vec_resize_numa(V,L,DB,HB,A,S) \
({ \
__typeof__ ((V)) _V; \
- _V = _vec_resize_inline(V,L,DB,HB,clib_max((__alignof__((V)[0])),(A)),(S)); \
+ _V = _vec_resize_inline((void *)V,L,DB,HB,clib_max((__alignof__((V)[0])),(A)),(S)); \
_V; \
})