vlib: refactor checksum offload support
[vpp.git] / src / vnet / devices / virtio / device.c
index 6b57dd6..fb996d5 100644 (file)
@@ -282,6 +282,8 @@ static_always_inline void
 set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
                      const int is_l2)
 {
+  u32 oflags = vnet_buffer2 (b)->oflags;
+
   if (b->flags & VNET_BUFFER_F_IS_IP4)
     {
       ip4_header_t *ip4;
@@ -290,11 +292,11 @@ set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
                                         0 /* ip6 */ );
       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
       hdr->csum_start = gho.l4_hdr_offset;     // 0x22;
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
        }
-      else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+      else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
        }
@@ -305,7 +307,7 @@ set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
        */
       ip4 =
        (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
        ip4->checksum = ip4_header_checksum (ip4);
     }
   else if (b->flags & VNET_BUFFER_F_IS_IP6)
@@ -315,11 +317,11 @@ set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
                                         1 /* ip6 */ );
       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
       hdr->csum_start = gho.l4_hdr_offset;     // 0x36;
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
        }
-      else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+      else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
        {
          hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
        }
@@ -330,6 +332,8 @@ static_always_inline void
 set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
                 const int is_l2)
 {
+  u32 oflags = vnet_buffer2 (b)->oflags;
+
   if (b->flags & VNET_BUFFER_F_IS_IP4)
     {
       ip4_header_t *ip4;
@@ -348,7 +352,7 @@ set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
        * virtio devices do not support IP4 checksum offload. So driver takes care
        * of it while doing tx.
        */
-      if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+      if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
        ip4->checksum = ip4_header_checksum (ip4);
     }
   else if (b->flags & VNET_BUFFER_F_IS_IP6)
@@ -392,8 +396,7 @@ add_buffer_to_slot (vlib_main_t * vm, vlib_node_runtime_t * node,
          goto done;
        }
     }
-  else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
-                      VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+  else if (b->flags & VNET_BUFFER_F_OFFLOAD)
     {
       if (csum_offload)
        set_checksum_offsets (b, hdr, is_l2);
@@ -584,8 +587,7 @@ add_buffer_to_slot_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
          goto done;
        }
     }
-  else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
-                      VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+  else if (b->flags & VNET_BUFFER_F_OFFLOAD)
     {
       if (csum_offload)
        set_checksum_offsets (b, hdr, is_l2);
@@ -926,11 +928,11 @@ virtio_interface_tx_split_gso_inline (vlib_main_t * vm,
 
   if (n_left != n_vectors || n_buffers != n_buffers_left)
     {
-      CLIB_MEMORY_STORE_BARRIER ();
-      vring->avail->idx = avail;
+      clib_atomic_store_seq_cst (&vring->avail->idx, avail);
       vring->desc_next = next;
       vring->desc_in_use = used;
-      if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0)
+      if ((clib_atomic_load_seq_cst (&vring->used->flags) &
+          VRING_USED_F_NO_NOTIFY) == 0)
        virtio_kick (vm, vring, vif);
     }
 
@@ -947,13 +949,11 @@ virtio_interface_tx_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   if (packed)
     return virtio_interface_tx_packed_gso_inline (vm, node, vif, type, vring,
                                                  buffers, n_left,
-                                                 1 /* do_gso */ ,
-                                                 1 /* checksum offload */ );
+                                                 do_gso, csum_offload);
   else
     return virtio_interface_tx_split_gso_inline (vm, node, vif, type, vring,
                                                 buffers, n_left,
-                                                1 /* do_gso */ ,
-                                                1 /* checksum offload */ );
+                                                do_gso, csum_offload);
 }
 
 static_always_inline u16
@@ -995,16 +995,14 @@ VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
   u32 *buffers = vlib_frame_vector_args (frame);
   u32 to[GRO_TO_VECTOR_SIZE (n_left)];
   int packed = vif->is_packed;
+  u16 n_vectors = frame->n_vectors;
 
   clib_spinlock_lock_if_init (&vring->lockp);
 
-  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
-      (vring->last_kick_avail_idx != vring->avail->idx))
-    virtio_kick (vm, vring, vif);
-
   if (vif->packet_coalesce)
     {
-      n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
+      n_vectors = n_left =
+       vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
       buffers = to;
     }
 
@@ -1017,17 +1015,17 @@ retry:
   if (vif->type == VIRTIO_IF_TYPE_TAP)
     n_left = virtio_interface_tx_inline (vm, node, vif, vring,
                                         VIRTIO_IF_TYPE_TAP,
-                                        &buffers[frame->n_vectors - n_left],
+                                        &buffers[n_vectors - n_left],
                                         n_left, packed);
   else if (vif->type == VIRTIO_IF_TYPE_PCI)
     n_left = virtio_interface_tx_inline (vm, node, vif, vring,
                                         VIRTIO_IF_TYPE_PCI,
-                                        &buffers[frame->n_vectors - n_left],
+                                        &buffers[n_vectors - n_left],
                                         n_left, packed);
   else if (vif->type == VIRTIO_IF_TYPE_TUN)
     n_left = virtio_interface_tx_inline (vm, node, vif, vring,
                                         VIRTIO_IF_TYPE_TUN,
-                                        &buffers[frame->n_vectors - n_left],
+                                        &buffers[n_vectors - n_left],
                                         n_left, packed);
   else
     ASSERT (0);
@@ -1039,14 +1037,14 @@ retry:
     {
       u16 n_buffered = virtio_vring_buffering_store_packets (vring->buffering,
                                                             &buffers
-                                                            [frame->n_vectors
+                                                            [n_vectors
                                                              - n_left],
                                                             n_left);
       n_left -= n_buffered;
     }
   if (n_left)
     virtio_interface_drop_inline (vm, node->node_index,
-                                 &buffers[frame->n_vectors - n_left], n_left,
+                                 &buffers[n_vectors - n_left], n_left,
                                  VIRTIO_TX_ERROR_NO_FREE_SLOTS);
 
   clib_spinlock_unlock_if_init (&vring->lockp);
@@ -1080,6 +1078,24 @@ virtio_clear_hw_interface_counters (u32 instance)
   /* Nothing for now */
 }
 
+static_always_inline void
+virtio_set_rx_interrupt (virtio_if_t * vif, virtio_vring_t * vring)
+{
+  if (vif->is_packed)
+    vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
+  else
+    vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+static_always_inline void
+virtio_set_rx_polling (virtio_if_t * vif, virtio_vring_t * vring)
+{
+  if (vif->is_packed)
+    vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
+  else
+    vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
 static clib_error_t *
 virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
                                 vnet_hw_if_rx_mode mode)
@@ -1092,7 +1108,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
 
   if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
     {
-      rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+      virtio_set_rx_polling (vif, rx_vring);
       return clib_error_return (0, "interrupt mode is not supported");
     }
 
@@ -1107,7 +1123,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
                                       virtio_send_interrupt_node.index,
                                       VIRTIO_EVENT_STOP_TIMER, 0);
        }
-      rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+      virtio_set_rx_polling (vif, rx_vring);
     }
   else
     {
@@ -1119,7 +1135,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
                                       virtio_send_interrupt_node.index,
                                       VIRTIO_EVENT_START_TIMER, 0);
        }
-      rx_vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+      virtio_set_rx_interrupt (vif, rx_vring);
     }
 
   rx_vring->mode = mode;