virtio: fix txq locking 42/28842/2
authorBenoît Ganne <bganne@cisco.com>
Tue, 15 Sep 2020 08:58:07 +0000 (10:58 +0200)
committerDamjan Marion <dmarion@me.com>
Fri, 18 Sep 2020 10:40:47 +0000 (10:40 +0000)
Initialize txq lock only if some txq are shared and check if another
worker is already operating on the txq before processing gro timeouts
in input node.

Type: fix

Change-Id: I89dab6c0e6eb6a7aa621fa1548b0a2c76e6c7581
Signed-off-by: Benoît Ganne <bganne@cisco.com>
src/vnet/devices/virtio/node.c
src/vnet/devices/virtio/pci.c
src/vppinfra/lock.h

index 7fabe36..1c9cfd0 100644 (file)
@@ -279,10 +279,12 @@ virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   u16 last = vring->last_used_idx;
   u16 n_left = vring->used->idx - last;
 
-  if (vif->packet_coalesce)
+  if (vif->packet_coalesce
+      && clib_spinlock_trylock_if_init (&txq_vring->lockp))
     {
       vnet_gro_flow_table_schedule_node_on_dispatcher (vm,
                                                       txq_vring->flow_table);
+      clib_spinlock_unlock_if_init (&txq_vring->lockp);
     }
 
   if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
index 5ba9f36..df8e2bd 100644 (file)
@@ -494,6 +494,7 @@ virtio_pci_control_vring_init (vlib_main_t * vm, virtio_if_t * vif,
 clib_error_t *
 virtio_pci_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 queue_num)
 {
+  vlib_thread_main_t *vtm = vlib_get_thread_main ();
   clib_error_t *error = 0;
   u16 queue_size = 0;
   virtio_vring_t *vring;
@@ -519,7 +520,8 @@ virtio_pci_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 queue_num)
       vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num),
                            CLIB_CACHE_LINE_BYTES);
       vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num));
-      clib_spinlock_init (&vring->lockp);
+      if (vif->max_queue_pairs < vtm->n_vlib_mains)
+       clib_spinlock_init (&vring->lockp);
     }
   else
     {
index 3cfe11c..470890b 100644 (file)
@@ -91,6 +91,15 @@ clib_spinlock_lock (clib_spinlock_t * p)
   CLIB_LOCK_DBG (p);
 }
 
+static_always_inline int
+clib_spinlock_trylock (clib_spinlock_t * p)
+{
+  if (PREDICT_FALSE (CLIB_SPINLOCK_IS_LOCKED (p)))
+    return 0;
+  clib_spinlock_lock (p);
+  return 1;
+}
+
 static_always_inline void
 clib_spinlock_lock_if_init (clib_spinlock_t * p)
 {
@@ -98,6 +107,14 @@ clib_spinlock_lock_if_init (clib_spinlock_t * p)
     clib_spinlock_lock (p);
 }
 
+static_always_inline int
+clib_spinlock_trylock_if_init (clib_spinlock_t * p)
+{
+  if (PREDICT_FALSE (*p != 0))
+    return clib_spinlock_trylock (p);
+  return 1;
+}
+
 static_always_inline void
 clib_spinlock_unlock (clib_spinlock_t * p)
 {