In tap tx routine, virtio_interface_tx_inline, there used to be an
interface spinlock to ensure packets are processed in an orderly fashion
clib_spinlock_lock_if_init (&vif->lockp);
When virtio code was introduced in 19.04, that line is changed to
clib_spinlock_lock_if_init (&vring->lockp);
to accommodate multi-queues.
Unfortunately, althrough the spinlock exists in the vring, it was never
initialized for tap, only for virtio. As a result, many nasty things can
happen when running tap interface in multi-thread environment. Crash is
inevitable.
The fix is to initialize vring->lockp for tap and remove vif->lockp as it
is not used anymore.
Change-Id: Ibc8f5c8192af550e3940597c06992dfdaccb4c49
Signed-off-by: Steven Luong <sluong@cisco.com>
tap_create_if (vlib_main_t * vm, tap_create_if_args_t * args)
{
vnet_main_t *vnm = vnet_get_main ();
tap_create_if (vlib_main_t * vm, tap_create_if_args_t * args)
{
vnet_main_t *vnm = vnet_get_main ();
- vlib_thread_main_t *thm = vlib_get_thread_main ();
virtio_main_t *vim = &virtio_main;
tap_main_t *tm = &tap_main;
vnet_sw_interface_t *sw;
virtio_main_t *vim = &virtio_main;
tap_main_t *tm = &tap_main;
vnet_sw_interface_t *sw;
VNET_HW_INTERFACE_FLAG_LINK_UP);
vif->cxq_vring = NULL;
VNET_HW_INTERFACE_FLAG_LINK_UP);
vif->cxq_vring = NULL;
- if (thm->n_vlib_mains > 1)
- clib_spinlock_init (&vif->lockp);
vec_free (vif->txq_vrings);
tm->tap_ids = clib_bitmap_set (tm->tap_ids, vif->id, 0);
vec_free (vif->txq_vrings);
tm->tap_ids = clib_bitmap_set (tm->tap_ids, vif->id, 0);
- clib_spinlock_free (&vif->lockp);
clib_memset (vif, 0, sizeof (*vif));
pool_put (mm->interfaces, vif);
clib_memset (vif, 0, sizeof (*vif));
pool_put (mm->interfaces, vif);
+ vlib_thread_main_t *thm = vlib_get_thread_main ();
vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (idx),
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (idx),
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
+ if (thm->n_vlib_mains > 1)
+ clib_spinlock_init (&vring->lockp);
if (vring->avail)
clib_mem_free (vring->avail);
vec_free (vring->buffers);
if (vring->avail)
clib_mem_free (vring->avail);
vec_free (vring->buffers);
+ clib_spinlock_free (&vring->lockp);
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 flags;
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 flags;
u32 dev_instance;
u32 hw_if_index;
u32 dev_instance;
u32 hw_if_index;