virtio: move retry logic to outer function
[vpp.git] / src / vnet / devices / virtio / device.c
index a414f82..29b38a2 100644 (file)
@@ -74,8 +74,8 @@ typedef struct
 {
   u32 buffer_index;
   u32 sw_if_index;
-  vlib_buffer_t buffer;
   generic_header_offset_t gho;
+  vlib_buffer_t buffer;
 } virtio_tx_trace_t;
 
 static u8 *
@@ -312,11 +312,11 @@ set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr, int is_l2)
 }
 
 static_always_inline u16
-add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
-                   virtio_if_type_t type, virtio_vring_t * vring,
-                   u32 bi, u16 free_desc_count,
+add_buffer_to_slot (vlib_main_t * vm, vlib_node_runtime_t * node,
+                   virtio_if_t * vif, virtio_if_type_t type,
+                   virtio_vring_t * vring, u32 bi, u16 free_desc_count,
                    u16 avail, u16 next, u16 mask, int do_gso,
-                   int csum_offload, uword node_index)
+                   int csum_offload)
 {
   u16 n_added = 0;
   int hdr_sz = vif->virtio_net_hdr_sz;
@@ -334,7 +334,7 @@ add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
        set_gso_offsets (b, hdr, is_l2);
       else
        {
-         virtio_interface_drop_inline (vm, node_index, &bi, 1,
+         virtio_interface_drop_inline (vm, node->node_index, &bi, 1,
                                        VIRTIO_TX_ERROR_GSO_PACKET_DROP);
          return n_added;
        }
@@ -346,12 +346,17 @@ add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
        set_checksum_offsets (b, hdr, is_l2);
       else
        {
-         virtio_interface_drop_inline (vm, node_index, &bi, 1,
+         virtio_interface_drop_inline (vm, node->node_index, &bi, 1,
                                        VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP);
          return n_added;
        }
     }
 
+  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
+    {
+      virtio_tx_trace (vm, node, type, b, bi);
+    }
+
   if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
     {
       d->addr =
@@ -374,7 +379,7 @@ add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
       u32 indirect_buffer = 0;
       if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
        {
-         virtio_interface_drop_inline (vm, node_index, &bi, 1,
+         virtio_interface_drop_inline (vm, node->node_index, &bi, 1,
                                        VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED);
          return n_added;
        }
@@ -536,38 +541,17 @@ virtio_find_free_desc (virtio_vring_t * vring, u16 size, u16 mask,
     }
 }
 
-static_always_inline uword
+static_always_inline u16
 virtio_interface_tx_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                               vlib_frame_t * frame, virtio_if_t * vif,
-                               virtio_if_type_t type, int do_gso,
-                               int csum_offload, int do_gro)
+                               virtio_if_t * vif,
+                               virtio_if_type_t type, virtio_vring_t * vring,
+                               u32 * buffers, u16 n_left, int do_gso,
+                               int csum_offload)
 {
-  u16 n_left = frame->n_vectors;
-  virtio_vring_t *vring;
-  u16 qid = vm->thread_index % vif->num_txqs;
-  vring = vec_elt_at_index (vif->txq_vrings, qid);
   u16 used, next, avail, n_buffers = 0, n_buffers_left = 0;
   u16 sz = vring->size;
   u16 mask = sz - 1;
-  u16 retry_count = 2;
-  u32 *buffers = vlib_frame_vector_args (frame);
-  u32 to[GRO_TO_VECTOR_SIZE (n_left)];
-
-  clib_spinlock_lock_if_init (&vring->lockp);
-
-  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
-      (vring->last_kick_avail_idx != vring->avail->idx))
-    virtio_kick (vm, vring, vif);
-
-  if (do_gro)
-    {
-      n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
-      buffers = to;
-    }
-
-retry:
-  /* free consumed buffers */
-  virtio_free_used_device_desc (vm, vring, node->node_index);
+  u16 n_vectors = n_left;
 
   used = vring->desc_in_use;
   next = vring->desc_next;
@@ -599,15 +583,11 @@ retry:
          u32 bi = virtio_vring_buffering_read_from_front (vring->buffering);
          if (bi == ~0)
            break;
-         vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
-         if (b0->flags & VLIB_BUFFER_IS_TRACED)
-           {
-             virtio_tx_trace (vm, node, type, b0, buffers[0]);
-           }
+
          n_added =
-           add_buffer_to_slot (vm, vif, type, vring, bi, free_desc_count,
-                               avail, next, mask, do_gso, csum_offload,
-                               node->node_index);
+           add_buffer_to_slot (vm, node, vif, type, vring, bi,
+                               free_desc_count, avail, next, mask, do_gso,
+                               csum_offload);
          if (PREDICT_FALSE (n_added == 0))
            {
              n_buffers_left--;
@@ -628,15 +608,10 @@ retry:
     {
       u16 n_added = 0;
 
-      vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[0]);
-      if (b0->flags & VLIB_BUFFER_IS_TRACED)
-       {
-         virtio_tx_trace (vm, node, type, b0, buffers[0]);
-       }
       n_added =
-       add_buffer_to_slot (vm, vif, type, vring, buffers[0], free_desc_count,
-                           avail, next, mask, do_gso, csum_offload,
-                           node->node_index);
+       add_buffer_to_slot (vm, node, vif, type, vring, buffers[0],
+                           free_desc_count, avail, next, mask, do_gso,
+                           csum_offload);
 
       if (PREDICT_FALSE (n_added == 0))
        {
@@ -655,7 +630,7 @@ retry:
       free_desc_count -= n_added;
     }
 
-  if (n_left != frame->n_vectors || n_buffers != n_buffers_left)
+  if (n_left != n_vectors || n_buffers != n_buffers_left)
     {
       CLIB_MEMORY_STORE_BARRIER ();
       vring->avail->idx = avail;
@@ -665,54 +640,32 @@ retry:
        virtio_kick (vm, vring, vif);
     }
 
-  if (n_left)
-    {
-      if (retry_count--)
-       goto retry;
-
-      if (vif->packet_buffering)
-       {
-
-         u16 n_buffered =
-           virtio_vring_buffering_store_packets (vring->buffering, buffers,
-                                                 n_left);
-         buffers += n_buffered;
-         n_left -= n_buffered;
-       }
-      if (n_left)
-       virtio_interface_drop_inline (vm, node->node_index,
-                                     buffers, n_left,
-                                     VIRTIO_TX_ERROR_NO_FREE_SLOTS);
-    }
-
-  clib_spinlock_unlock_if_init (&vring->lockp);
-
-  return frame->n_vectors - n_left;
+  return n_left;
 }
 
-static_always_inline uword
+static_always_inline u16
 virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                           vlib_frame_t * frame, virtio_if_t * vif,
-                           virtio_if_type_t type)
+                           virtio_if_t * vif,
+                           virtio_vring_t * vring, virtio_if_type_t type,
+                           u32 * buffers, u16 n_left)
 {
   vnet_main_t *vnm = vnet_get_main ();
   vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
 
   if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
-    return virtio_interface_tx_gso_inline (vm, node, frame, vif, type,
-                                          1 /* do_gso */ ,
-                                          1 /* checksum offload */ ,
-                                          vif->packet_coalesce);
+    return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
+                                          buffers, n_left, 1 /* do_gso */ ,
+                                          1 /* checksum offload */ );
   else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
-    return virtio_interface_tx_gso_inline (vm, node, frame, vif, type,
+    return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
+                                          buffers, n_left,
                                           0 /* no do_gso */ ,
-                                          1 /* checksum offload */ ,
-                                          0 /* do_gro */ );
+                                          1 /* checksum offload */ );
   else
-    return virtio_interface_tx_gso_inline (vm, node, frame, vif, type,
+    return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
+                                          buffers, n_left,
                                           0 /* no do_gso */ ,
-                                          0 /* no checksum offload */ ,
-                                          0 /* do_gro */ );
+                                          0 /* no checksum offload */ );
 }
 
 VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
@@ -722,20 +675,61 @@ VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
   virtio_main_t *nm = &virtio_main;
   vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
   virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
+  u16 qid = vm->thread_index % vif->num_txqs;
+  virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, qid);
+  u16 n_left = frame->n_vectors;
+  u32 *buffers = vlib_frame_vector_args (frame);
+  u32 to[GRO_TO_VECTOR_SIZE (n_left)];
+
+  clib_spinlock_lock_if_init (&vring->lockp);
+
+  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
+      (vring->last_kick_avail_idx != vring->avail->idx))
+    virtio_kick (vm, vring, vif);
+
+  if (vif->packet_coalesce)
+    {
+      n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
+      buffers = to;
+    }
+
+  u16 retry_count = 2;
+
+retry:
+  /* free consumed buffers */
+  virtio_free_used_device_desc (vm, vring, node->node_index);
 
   if (vif->type == VIRTIO_IF_TYPE_TAP)
-    return virtio_interface_tx_inline (vm, node, frame, vif,
-                                      VIRTIO_IF_TYPE_TAP);
+    n_left = virtio_interface_tx_inline (vm, node, vif, vring,
+                                        VIRTIO_IF_TYPE_TAP, buffers, n_left);
   else if (vif->type == VIRTIO_IF_TYPE_PCI)
-    return virtio_interface_tx_inline (vm, node, frame, vif,
-                                      VIRTIO_IF_TYPE_PCI);
+    n_left = virtio_interface_tx_inline (vm, node, vif, vring,
+                                        VIRTIO_IF_TYPE_PCI, buffers, n_left);
   else if (vif->type == VIRTIO_IF_TYPE_TUN)
-    return virtio_interface_tx_inline (vm, node, frame, vif,
-                                      VIRTIO_IF_TYPE_TUN);
+    n_left = virtio_interface_tx_inline (vm, node, vif, vring,
+                                        VIRTIO_IF_TYPE_TUN, buffers, n_left);
   else
     ASSERT (0);
 
-  return 0;
+  if (n_left && retry_count--)
+    goto retry;
+
+  if (vif->packet_buffering && n_left)
+    {
+      u16 n_buffered =
+       virtio_vring_buffering_store_packets (vring->buffering, buffers,
+                                             n_left);
+      buffers += n_buffered;
+      n_left -= n_buffered;
+    }
+  if (n_left)
+    virtio_interface_drop_inline (vm, node->node_index,
+                                 buffers + frame->n_vectors - n_left, n_left,
+                                 VIRTIO_TX_ERROR_NO_FREE_SLOTS);
+
+  clib_spinlock_unlock_if_init (&vring->lockp);
+
+  return frame->n_vectors - n_left;
 }
 
 static void
@@ -766,13 +760,13 @@ virtio_clear_hw_interface_counters (u32 instance)
 
 static clib_error_t *
 virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
-                                vnet_hw_interface_rx_mode mode)
+                                vnet_hw_if_rx_mode mode)
 {
+  vlib_main_t *vm = vnm->vlib_main;
   virtio_main_t *mm = &virtio_main;
   vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
   virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
   virtio_vring_t *rx_vring = vec_elt_at_index (vif->rxq_vrings, qid);
-  virtio_vring_t *tx_vring = 0;
 
   if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
     {
@@ -780,34 +774,34 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
       return clib_error_return (0, "interrupt mode is not supported");
     }
 
-  if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+  if (mode == VNET_HW_IF_RX_MODE_POLLING)
     {
-      vec_foreach (tx_vring, vif->txq_vrings)
-      {
-       /* only enable packet coalesce in poll mode */
-       gro_flow_table_set_is_enable (tx_vring->flow_table, 1);
-       /* only enable packet buffering in poll mode */
-       virtio_vring_buffering_set_is_enable (tx_vring->buffering, 1);
-      }
+      if (vif->packet_coalesce || vif->packet_buffering)
+       {
+         if (mm->interrupt_queues_count > 0)
+           mm->interrupt_queues_count--;
+         if (mm->interrupt_queues_count == 0)
+           vlib_process_signal_event (vm,
+                                      virtio_send_interrupt_node.index,
+                                      VIRTIO_EVENT_STOP_TIMER, 0);
+       }
       rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
     }
   else
     {
       if (vif->packet_coalesce || vif->packet_buffering)
        {
-         virtio_log_warning (vif,
-                             "interface %U is in interrupt mode, disabling packet coalescing or buffering",
-                             format_vnet_sw_if_index_name, vnet_get_main (),
-                             vif->sw_if_index);
-         vec_foreach (tx_vring, vif->txq_vrings)
-         {
-           gro_flow_table_set_is_enable (tx_vring->flow_table, 0);
-           virtio_vring_buffering_set_is_enable (tx_vring->buffering, 0);
-         }
+         mm->interrupt_queues_count++;
+         if (mm->interrupt_queues_count == 1)
+           vlib_process_signal_event (vm,
+                                      virtio_send_interrupt_node.index,
+                                      VIRTIO_EVENT_START_TIMER, 0);
        }
       rx_vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
     }
 
+  rx_vring->mode = mode;
+
   return 0;
 }