virtio: add simple drop counter for interface
[vpp.git] / src / vnet / devices / virtio / device.c
index 6b57dd6..237e3d9 100644 (file)
@@ -99,9 +99,9 @@ format_virtio_tx_trace (u8 * s, va_list * va)
   return s;
 }
 
-static_always_inline void
-virtio_tx_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
-                vlib_buffer_t * b0, u32 bi, int is_tun)
+static void
+virtio_tx_trace (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0,
+                u32 bi, int is_tun)
 {
   virtio_tx_trace_t *t;
   t = vlib_add_trace (vm, node, b0, sizeof (t[0]));
@@ -135,17 +135,20 @@ virtio_tx_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
                    sizeof (t->buffer.pre_data));
 }
 
-static_always_inline void
-virtio_interface_drop_inline (vlib_main_t * vm, uword node_index,
-                             u32 * buffers, u16 n,
+static void
+virtio_interface_drop_inline (vlib_main_t *vm, virtio_if_t *vif,
+                             uword node_index, u32 *buffers, u16 n,
                              virtio_tx_func_error_t error)
 {
   vlib_error_count (vm, node_index, error, n);
+  vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
+                                  VNET_INTERFACE_COUNTER_DROP,
+                                vm->thread_index, vif->sw_if_index, n);
   vlib_buffer_free (vm, buffers, n);
 }
 
-static_always_inline void
-virtio_memset_ring_u32 (u32 * ring, u32 start, u32 ring_size, u32 n_buffers)
+static void
+virtio_memset_ring_u32 (u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
 {
   ASSERT (n_buffers <= ring_size);
 
@@ -160,8 +163,8 @@ virtio_memset_ring_u32 (u32 * ring, u32 start, u32 ring_size, u32 n_buffers)
     }
 }
 
-static_always_inline void
-virtio_free_used_device_desc_split (vlib_main_t * vm, virtio_vring_t * vring,
+static void
+virtio_free_used_device_desc_split (vlib_main_t *vm, virtio_vring_t *vring,
                                    uword node_index)
 {
   u16 used = vring->desc_in_use;
@@ -226,8 +229,8 @@ virtio_free_used_device_desc_split (vlib_main_t * vm, virtio_vring_t * vring,
   vring->last_used_idx = last;
 }
 
-static_always_inline void
-virtio_free_used_device_desc_packed (vlib_main_t * vm, virtio_vring_t * vring,
+static void
+virtio_free_used_device_desc_packed (vlib_main_t *vm, virtio_vring_t *vring,
                                     uword node_index)
 {
   vring_packed_desc_t *d;
@@ -267,8 +270,8 @@ virtio_free_used_device_desc_packed (vlib_main_t * vm, virtio_vring_t * vring,
     }
 }
 
-static_always_inline void
-virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring,
+static void
+virtio_free_used_device_desc (vlib_main_t *vm, virtio_vring_t *vring,
                              uword node_index, int packed)
 {
   if (packed)
@@ -278,10 +281,12 @@ virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring,
 
 }
 
-static_always_inline void
-set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
+static void
+set_checksum_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr,
                      const int is_l2)
 {
+  u32 oflags = vnet_buffer2 (b)->oflags;
+
   if (b->flags & VNET_BUFFER_F_IS_IP4)
     {
       ip4_header_t *ip4;
@@ -290,11 +295,11 @@ set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
                                         0 /* ip6 */ );
       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
       hdr->csum_start = gho.l4_hdr_offset;     // 0x22;
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
        }
-      else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+      else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
        }
@@ -305,7 +310,7 @@ set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
        */
       ip4 =
        (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
        ip4->checksum = ip4_header_checksum (ip4);
     }
   else if (b->flags & VNET_BUFFER_F_IS_IP6)
@@ -315,21 +320,22 @@ set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
                                         1 /* ip6 */ );
       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
       hdr->csum_start = gho.l4_hdr_offset;     // 0x36;
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
        }
-      else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+      else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
        }
     }
 }
 
-static_always_inline void
-set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
-                const int is_l2)
+static void
+set_gso_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
 {
+  u32 oflags = vnet_buffer2 (b)->oflags;
+
   if (b->flags & VNET_BUFFER_F_IS_IP4)
     {
       ip4_header_t *ip4;
@@ -348,7 +354,7 @@ set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
        * virtio devices do not support IP4 checksum offload. So driver takes care
        * of it while doing tx.
        */
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
        ip4->checksum = ip4_header_checksum (ip4);
     }
   else if (b->flags & VNET_BUFFER_F_IS_IP6)
@@ -365,12 +371,12 @@ set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
     }
 }
 
-static_always_inline u16
-add_buffer_to_slot (vlib_main_t * vm, vlib_node_runtime_t * node,
-                   virtio_vring_t * vring, u32 bi, u16 free_desc_count,
-                   u16 avail, u16 next, u16 mask, int hdr_sz, int do_gso,
-                   int csum_offload, int is_pci, int is_tun, int is_indirect,
-                   int is_any_layout)
+static u16
+add_buffer_to_slot (vlib_main_t *vm, vlib_node_runtime_t *node,
+                   virtio_if_t *vif, virtio_vring_t *vring, u32 bi,
+                   u16 free_desc_count, u16 avail, u16 next, u16 mask,
+                   int hdr_sz, int do_gso, int csum_offload, int is_pci,
+                   int is_tun, int is_indirect, int is_any_layout)
 {
   u16 n_added = 0;
   vring_desc_t *d;
@@ -392,8 +398,7 @@ add_buffer_to_slot (vlib_main_t * vm, vlib_node_runtime_t * node,
          goto done;
        }
     }
-  else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
-                      VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+  else if (b->flags & VNET_BUFFER_F_OFFLOAD)
     {
       if (csum_offload)
        set_checksum_offsets (b, hdr, is_l2);
@@ -553,15 +558,16 @@ add_buffer_to_slot (vlib_main_t * vm, vlib_node_runtime_t * node,
 
 done:
   if (drop_inline != ~0)
-    virtio_interface_drop_inline (vm, node->node_index, &bi, 1, drop_inline);
+    virtio_interface_drop_inline (vm, vif, node->node_index, &bi, 1,
+                                 drop_inline);
 
   return n_added;
 }
 
-static_always_inline u16
-add_buffer_to_slot_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
-                          virtio_vring_t * vring, u32 bi, u16 next,
-                          int hdr_sz, int do_gso, int csum_offload,
+static u16
+add_buffer_to_slot_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
+                          virtio_if_t *vif, virtio_vring_t *vring, u32 bi,
+                          u16 next, int hdr_sz, int do_gso, int csum_offload,
                           int is_pci, int is_tun, int is_indirect,
                           int is_any_layout)
 {
@@ -584,8 +590,7 @@ add_buffer_to_slot_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
          goto done;
        }
     }
-  else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
-                      VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+  else if (b->flags & VNET_BUFFER_F_OFFLOAD)
     {
       if (csum_offload)
        set_checksum_offsets (b, hdr, is_l2);
@@ -694,19 +699,18 @@ add_buffer_to_slot_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
 
 done:
   if (drop_inline != ~0)
-    virtio_interface_drop_inline (vm, node->node_index, &bi, 1, drop_inline);
+    virtio_interface_drop_inline (vm, vif, node->node_index, &bi, 1,
+                                 drop_inline);
 
   return n_added;
 }
 
-static_always_inline uword
-virtio_interface_tx_packed_gso_inline (vlib_main_t * vm,
-                                      vlib_node_runtime_t * node,
-                                      virtio_if_t * vif,
-                                      virtio_if_type_t type,
-                                      virtio_vring_t * vring,
-                                      u32 * buffers, u16 n_left,
-                                      const int do_gso,
+static uword
+virtio_interface_tx_packed_gso_inline (vlib_main_t *vm,
+                                      vlib_node_runtime_t *node,
+                                      virtio_if_t *vif, virtio_if_type_t type,
+                                      virtio_vring_t *vring, u32 *buffers,
+                                      u16 n_left, const int do_gso,
                                       const int csum_offload)
 {
   int is_pci = (type == VIRTIO_IF_TYPE_PCI);
@@ -735,11 +739,9 @@ virtio_interface_tx_packed_gso_inline (vlib_main_t * vm,
          u32 bi = virtio_vring_buffering_read_from_front (vring->buffering);
          if (bi == ~0)
            break;
-         n_added = add_buffer_to_slot_packed (vm, node,
-                                              vring, bi, next,
-                                              hdr_sz, do_gso, csum_offload,
-                                              is_pci, is_tun, is_indirect,
-                                              is_any_layout);
+         n_added = add_buffer_to_slot_packed (
+           vm, node, vif, vring, bi, next, hdr_sz, do_gso, csum_offload,
+           is_pci, is_tun, is_indirect, is_any_layout);
          n_buffers_left--;
          if (PREDICT_FALSE (n_added == 0))
            continue;
@@ -758,11 +760,9 @@ virtio_interface_tx_packed_gso_inline (vlib_main_t * vm,
     {
       u16 n_added = 0;
 
-      n_added = add_buffer_to_slot_packed (vm, node,
-                                          vring, buffers[0], next,
-                                          hdr_sz, do_gso, csum_offload,
-                                          is_pci, is_tun, is_indirect,
-                                          is_any_layout);
+      n_added = add_buffer_to_slot_packed (
+       vm, node, vif, vring, buffers[0], next, hdr_sz, do_gso, csum_offload,
+       is_pci, is_tun, is_indirect, is_any_layout);
       buffers++;
       n_left--;
       if (PREDICT_FALSE (n_added == 0))
@@ -790,10 +790,10 @@ virtio_interface_tx_packed_gso_inline (vlib_main_t * vm,
   return n_left;
 }
 
-static_always_inline void
-virtio_find_free_desc (virtio_vring_t * vring, u16 size, u16 mask,
-                      u16 req, u16 next, u32 * first_free_desc_index,
-                      u16 * free_desc_count)
+static void
+virtio_find_free_desc (virtio_vring_t *vring, u16 size, u16 mask, u16 req,
+                      u16 next, u32 *first_free_desc_index,
+                      u16 *free_desc_count)
 {
   u16 start = 0;
   /* next is used as hint: from where to start looking */
@@ -826,14 +826,12 @@ virtio_find_free_desc (virtio_vring_t * vring, u16 size, u16 mask,
     }
 }
 
-static_always_inline u16
-virtio_interface_tx_split_gso_inline (vlib_main_t * vm,
-                                     vlib_node_runtime_t * node,
-                                     virtio_if_t * vif,
-                                     virtio_if_type_t type,
-                                     virtio_vring_t * vring, u32 * buffers,
-                                     u16 n_left, int do_gso,
-                                     int csum_offload)
+static u16
+virtio_interface_tx_split_gso_inline (vlib_main_t *vm,
+                                     vlib_node_runtime_t *node,
+                                     virtio_if_t *vif, virtio_if_type_t type,
+                                     virtio_vring_t *vring, u32 *buffers,
+                                     u16 n_left, int do_gso, int csum_offload)
 {
   u16 used, next, avail, n_buffers = 0, n_buffers_left = 0;
   int is_pci = (type == VIRTIO_IF_TYPE_PCI);
@@ -878,10 +876,10 @@ virtio_interface_tx_split_gso_inline (vlib_main_t * vm,
          if (bi == ~0)
            break;
 
-         n_added = add_buffer_to_slot (vm, node, vring, bi, free_desc_count,
-                                       avail, next, mask, hdr_sz, do_gso,
-                                       csum_offload, is_pci, is_tun,
-                                       is_indirect, is_any_layout);
+         n_added = add_buffer_to_slot (vm, node, vif, vring, bi,
+                                       free_desc_count, avail, next, mask,
+                                       hdr_sz, do_gso, csum_offload, is_pci,
+                                       is_tun, is_indirect, is_any_layout);
          if (PREDICT_FALSE (n_added == 0))
            {
              n_buffers_left--;
@@ -902,10 +900,10 @@ virtio_interface_tx_split_gso_inline (vlib_main_t * vm,
     {
       u16 n_added = 0;
 
-      n_added = add_buffer_to_slot (vm, node, vring, buffers[0],
-                                   free_desc_count, avail, next, mask,
-                                   hdr_sz, do_gso, csum_offload, is_pci,
-                                   is_tun, is_indirect, is_any_layout);
+      n_added =
+       add_buffer_to_slot (vm, node, vif, vring, buffers[0], free_desc_count,
+                           avail, next, mask, hdr_sz, do_gso, csum_offload,
+                           is_pci, is_tun, is_indirect, is_any_layout);
 
       if (PREDICT_FALSE (n_added == 0))
        {
@@ -926,51 +924,49 @@ virtio_interface_tx_split_gso_inline (vlib_main_t * vm,
 
   if (n_left != n_vectors || n_buffers != n_buffers_left)
     {
-      CLIB_MEMORY_STORE_BARRIER ();
-      vring->avail->idx = avail;
+      clib_atomic_store_seq_cst (&vring->avail->idx, avail);
       vring->desc_next = next;
       vring->desc_in_use = used;
-      if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0)
+      if ((clib_atomic_load_seq_cst (&vring->used->flags) &
+          VRING_USED_F_NO_NOTIFY) == 0)
        virtio_kick (vm, vring, vif);
     }
 
   return n_left;
 }
 
-static_always_inline u16
-virtio_interface_tx_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                               virtio_if_t * vif,
-                               virtio_if_type_t type, virtio_vring_t * vring,
-                               u32 * buffers, u16 n_left, int packed,
-                               int do_gso, int csum_offload)
+static u16
+virtio_interface_tx_gso_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+                               virtio_if_t *vif, virtio_if_type_t type,
+                               virtio_vring_t *vring, u32 *buffers,
+                               u16 n_left, int packed, int do_gso,
+                               int csum_offload)
 {
   if (packed)
     return virtio_interface_tx_packed_gso_inline (vm, node, vif, type, vring,
                                                  buffers, n_left,
-                                                 1 /* do_gso */ ,
-                                                 1 /* checksum offload */ );
+                                                 do_gso, csum_offload);
   else
     return virtio_interface_tx_split_gso_inline (vm, node, vif, type, vring,
                                                 buffers, n_left,
-                                                1 /* do_gso */ ,
-                                                1 /* checksum offload */ );
+                                                do_gso, csum_offload);
 }
 
-static_always_inline u16
-virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                           virtio_if_t * vif,
-                           virtio_vring_t * vring, virtio_if_type_t type,
-                           u32 * buffers, u16 n_left, int packed)
+static u16
+virtio_interface_tx_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+                           virtio_if_t *vif, virtio_vring_t *vring,
+                           virtio_if_type_t type, u32 *buffers, u16 n_left,
+                           int packed)
 {
   vnet_main_t *vnm = vnet_get_main ();
   vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
 
-  if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
+  if (hw->caps & VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO)
     return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
                                           buffers, n_left, packed,
                                           1 /* do_gso */ ,
                                           1 /* checksum offload */ );
-  else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
+  else if (hw->caps & VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM)
     return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
                                           buffers, n_left, packed,
                                           0 /* no do_gso */ ,
@@ -995,16 +991,14 @@ VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
   u32 *buffers = vlib_frame_vector_args (frame);
   u32 to[GRO_TO_VECTOR_SIZE (n_left)];
   int packed = vif->is_packed;
+  u16 n_vectors = frame->n_vectors;
 
   clib_spinlock_lock_if_init (&vring->lockp);
 
-  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
-      (vring->last_kick_avail_idx != vring->avail->idx))
-    virtio_kick (vm, vring, vif);
-
   if (vif->packet_coalesce)
     {
-      n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
+      n_vectors = n_left =
+       vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
       buffers = to;
     }
 
@@ -1017,17 +1011,17 @@ retry:
   if (vif->type == VIRTIO_IF_TYPE_TAP)
     n_left = virtio_interface_tx_inline (vm, node, vif, vring,
                                         VIRTIO_IF_TYPE_TAP,
-                                        &buffers[frame->n_vectors - n_left],
+                                        &buffers[n_vectors - n_left],
                                         n_left, packed);
   else if (vif->type == VIRTIO_IF_TYPE_PCI)
     n_left = virtio_interface_tx_inline (vm, node, vif, vring,
                                         VIRTIO_IF_TYPE_PCI,
-                                        &buffers[frame->n_vectors - n_left],
+                                        &buffers[n_vectors - n_left],
                                         n_left, packed);
   else if (vif->type == VIRTIO_IF_TYPE_TUN)
     n_left = virtio_interface_tx_inline (vm, node, vif, vring,
                                         VIRTIO_IF_TYPE_TUN,
-                                        &buffers[frame->n_vectors - n_left],
+                                        &buffers[n_vectors - n_left],
                                         n_left, packed);
   else
     ASSERT (0);
@@ -1039,14 +1033,14 @@ retry:
     {
       u16 n_buffered = virtio_vring_buffering_store_packets (vring->buffering,
                                                             &buffers
-                                                            [frame->n_vectors
+                                                            [n_vectors
                                                              - n_left],
                                                             n_left);
       n_left -= n_buffered;
     }
   if (n_left)
-    virtio_interface_drop_inline (vm, node->node_index,
-                                 &buffers[frame->n_vectors - n_left], n_left,
+    virtio_interface_drop_inline (vm, vif, node->node_index,
+                                 &buffers[n_vectors - n_left], n_left,
                                  VIRTIO_TX_ERROR_NO_FREE_SLOTS);
 
   clib_spinlock_unlock_if_init (&vring->lockp);
@@ -1080,6 +1074,24 @@ virtio_clear_hw_interface_counters (u32 instance)
   /* Nothing for now */
 }
 
+static void
+virtio_set_rx_interrupt (virtio_if_t *vif, virtio_vring_t *vring)
+{
+  if (vif->is_packed)
+    vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
+  else
+    vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+static void
+virtio_set_rx_polling (virtio_if_t *vif, virtio_vring_t *vring)
+{
+  if (vif->is_packed)
+    vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
+  else
+    vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
 static clib_error_t *
 virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
                                 vnet_hw_if_rx_mode mode)
@@ -1092,7 +1104,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
 
   if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
     {
-      rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+      virtio_set_rx_polling (vif, rx_vring);
       return clib_error_return (0, "interrupt mode is not supported");
     }
 
@@ -1107,7 +1119,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
                                       virtio_send_interrupt_node.index,
                                       VIRTIO_EVENT_STOP_TIMER, 0);
        }
-      rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+      virtio_set_rx_polling (vif, rx_vring);
     }
   else
     {
@@ -1119,7 +1131,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
                                       virtio_send_interrupt_node.index,
                                       VIRTIO_EVENT_START_TIMER, 0);
        }
-      rx_vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+      virtio_set_rx_interrupt (vif, rx_vring);
     }
 
   rx_vring->mode = mode;