u16 last = vring->last_used_idx;
u16 n_left = vring->used->idx - last;
- if (vif->packet_coalesce)
+ if (vif->packet_coalesce
+ && clib_spinlock_trylock_if_init (&txq_vring->lockp))
{
vnet_gro_flow_table_schedule_node_on_dispatcher (vm,
txq_vring->flow_table);
+ clib_spinlock_unlock_if_init (&txq_vring->lockp);
}
if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
clib_error_t *
virtio_pci_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 queue_num)
{
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
clib_error_t *error = 0;
u16 queue_size = 0;
virtio_vring_t *vring;
vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num),
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num));
- clib_spinlock_init (&vring->lockp);
+ if (vif->max_queue_pairs < vtm->n_vlib_mains)
+ clib_spinlock_init (&vring->lockp);
}
else
{
CLIB_LOCK_DBG (p);
}
+static_always_inline int
+clib_spinlock_trylock (clib_spinlock_t * p)
+{
+ if (PREDICT_FALSE (CLIB_SPINLOCK_IS_LOCKED (p)))
+ return 0;
+ clib_spinlock_lock (p);
+ return 1;
+}
+
static_always_inline void
clib_spinlock_lock_if_init (clib_spinlock_t * p)
{
clib_spinlock_lock (p);
}
+static_always_inline int
+clib_spinlock_trylock_if_init (clib_spinlock_t * p)
+{
+ if (PREDICT_FALSE (*p != 0))
+ return clib_spinlock_trylock (p);
+ return 1;
+}
+
static_always_inline void
clib_spinlock_unlock (clib_spinlock_t * p)
{