vhost: Add event index for interrupt notification to driver
[vpp.git] / src / vnet / devices / virtio / vhost_user_output.c
index f400f18..465c0ea 100644 (file)
@@ -17,6 +17,7 @@
  *------------------------------------------------------------------
  */
 
+#include <stddef.h>
 #include <fcntl.h>             /* for open */
 #include <sys/ioctl.h>
 #include <sys/socket.h>
@@ -33,8 +34,6 @@
 #include <vlib/vlib.h>
 #include <vlib/unix/unix.h>
 
-#include <vnet/ip/ip.h>
-
 #include <vnet/ethernet/ethernet.h>
 #include <vnet/devices/devices.h>
 #include <vnet/feature/feature.h>
@@ -42,6 +41,7 @@
 #include <vnet/devices/virtio/vhost_user.h>
 #include <vnet/devices/virtio/vhost_user_inline.h>
 
+#include <vnet/gso/hdr_offset_parser.h>
 /*
  * On the transmit side, we keep processing the buffers from vlib in the while
  * loop and prepare the copy order to be executed later. However, the static
  * entries. In order to not corrupt memory, we have to do the copy when the
  * static array reaches the copy threshold. We subtract 40 in case the code
  * goes into the inner loop for a maximum of 64k frames which may require
- * more array entries.
+ * more array entries. We subtract 200 because our default buffer size is
+ * 2048 and the default desc len is likely 1536. While it takes less than 40
+ * vlib buffers for the jumbo frame, it may take twice as much descriptors
+ * for the same jumbo frame. Use 200 for the extra head room.
  */
-#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
+#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
 
-vnet_device_class_t vhost_user_device_class;
+extern vnet_device_class_t vhost_user_device_class;
 
 #define foreach_vhost_user_tx_func_error      \
   _(NONE, "no error")  \
@@ -100,36 +103,28 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
 {
   // FIXME: check if the new dev instance is already used
   vhost_user_main_t *vum = &vhost_user_main;
+  vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
+                                             hi->dev_instance);
+
   vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
                           hi->dev_instance, ~0);
 
   vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
     new_dev_instance;
 
-  DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
-           hi->dev_instance, new_dev_instance);
+  vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
+               hi->dev_instance, new_dev_instance);
 
   return 0;
 }
 
-/**
- * @brief Try once to lock the vring
- * @return 0 on success, non-zero on failure.
- */
-static_always_inline int
-vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
-{
-  return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
-}
-
 /**
  * @brief Spin until the vring is successfully locked
  */
 static_always_inline void
 vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
 {
-  while (vhost_user_vring_try_lock (vui, qid))
-    ;
+  clib_spinlock_lock_if_init (&vui->vrings[qid].vring_lock);
 }
 
 /**
@@ -138,7 +133,7 @@ vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
 static_always_inline void
 vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
 {
-  *vui->vring_locks[qid] = 0;
+  clib_spinlock_unlock_if_init (&vui->vrings[qid].vring_lock);
 }
 
 static_always_inline void
@@ -152,23 +147,23 @@ vhost_user_tx_trace (vhost_trace_t * t,
   vring_desc_t *hdr_desc = 0;
   u32 hint = 0;
 
-  memset (t, 0, sizeof (*t));
+  clib_memset (t, 0, sizeof (*t));
   t->device_index = vui - vum->vhost_user_interfaces;
   t->qid = qid;
 
   hdr_desc = &rxvq->desc[desc_current];
-  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+  if (rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
     {
       t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
       /* Header is the first here */
       hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
     }
-  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+  if (rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
     {
       t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
     }
-  if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
-      !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
+  if (!(rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
+      !(rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
     {
       t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
     }
@@ -202,8 +197,8 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
          CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
          CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
 
-         clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
-         clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
+         clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
+         clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
 
          vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
          vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
@@ -215,7 +210,7 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
     {
       if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
        return 1;
-      clib_memcpy (dst0, (void *) cpy->src, cpy->len);
+      clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
       vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
       copy_len -= 1;
       cpy += 1;
@@ -223,13 +218,490 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
   return 0;
 }
 
+static_always_inline void
+vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
+                             virtio_net_hdr_t * hdr)
+{
+  generic_header_offset_t gho = { 0 };
+  int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
+  int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
+
+  ASSERT (!(is_ip4 && is_ip6));
+  vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6);
+  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+    {
+      ip4_header_t *ip4;
+
+      ip4 =
+       (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
+      ip4->checksum = ip4_header_checksum (ip4);
+    }
+
+  /* checksum offload */
+  if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+    {
+      hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+      hdr->csum_start = gho.l4_hdr_offset;
+      hdr->csum_offset = offsetof (udp_header_t, checksum);
+    }
+  else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+    {
+      hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+      hdr->csum_start = gho.l4_hdr_offset;
+      hdr->csum_offset = offsetof (tcp_header_t, checksum);
+    }
+
+  /* GSO offload */
+  if (b->flags & VNET_BUFFER_F_GSO)
+    {
+      if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+       {
+         if (is_ip4 &&
+             (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO4)))
+           {
+             hdr->gso_size = vnet_buffer2 (b)->gso_size;
+             hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+           }
+         else if (is_ip6 &&
+                  (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6)))
+           {
+             hdr->gso_size = vnet_buffer2 (b)->gso_size;
+             hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+           }
+       }
+      else if ((vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_UFO)) &&
+              (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+       {
+         hdr->gso_size = vnet_buffer2 (b)->gso_size;
+         hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+       }
+    }
+}
+
+static_always_inline void
+vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_intf_t * vui,
+                               vhost_user_vring_t * rxvq,
+                               u16 * n_descs_processed, u8 chained,
+                               vlib_frame_t * frame, u32 n_left)
+{
+  u16 desc_idx, flags;
+  vring_packed_desc_t *desc_table = rxvq->packed_desc;
+  u16 last_used_idx = rxvq->last_used_idx;
+
+  if (PREDICT_FALSE (*n_descs_processed == 0))
+    return;
+
+  if (rxvq->used_wrap_counter)
+    flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
+      (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
+  else
+    flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
+      ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
+
+  vhost_user_advance_last_used_idx (rxvq);
+
+  for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
+    {
+      if (rxvq->used_wrap_counter)
+       desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
+         (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
+      else
+       desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
+         ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
+      vhost_user_advance_last_used_idx (rxvq);
+    }
+
+  desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
+
+  *n_descs_processed = 0;
+
+  if (chained)
+    {
+      vring_packed_desc_t *desc_table = rxvq->packed_desc;
+
+      while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
+            VRING_DESC_F_NEXT)
+       vhost_user_advance_last_used_idx (rxvq);
+
+      /* Advance past the current chained table entries */
+      vhost_user_advance_last_used_idx (rxvq);
+    }
+
+  /* interrupt (call) handling */
+  if ((rxvq->callfd_idx != ~0) &&
+      (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
+    {
+      vhost_user_main_t *vum = &vhost_user_main;
+
+      rxvq->n_since_last_int += frame->n_vectors - n_left;
+      if (rxvq->n_since_last_int > vum->coalesce_frames)
+       vhost_user_send_call (vm, vui, rxvq);
+    }
+}
+
+static_always_inline void
+vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
+                           u16 qid, vlib_buffer_t * b,
+                           vhost_user_vring_t * rxvq)
+{
+  vhost_user_main_t *vum = &vhost_user_main;
+  u32 last_avail_idx = rxvq->last_avail_idx;
+  u32 desc_current = last_avail_idx & rxvq->qsz_mask;
+  vring_packed_desc_t *hdr_desc = 0;
+  u32 hint = 0;
+
+  clib_memset (t, 0, sizeof (*t));
+  t->device_index = vui - vum->vhost_user_interfaces;
+  t->qid = qid;
+
+  hdr_desc = &rxvq->packed_desc[desc_current];
+  if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
+    {
+      t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
+      /* Header is the first here */
+      hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
+                               &hint);
+    }
+  if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
+    {
+      t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
+    }
+  if (!(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
+      !(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
+    {
+      t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
+    }
+
+  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
+}
 
-uword
-CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm,
-                                  vlib_node_runtime_t * node,
-                                  vlib_frame_t * frame)
+static_always_inline uword
+vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
+                               vlib_frame_t * frame)
 {
-  u32 *buffers = vlib_frame_args (frame);
+  u32 *buffers = vlib_frame_vector_args (frame);
+  u32 n_left = frame->n_vectors;
+  vhost_user_main_t *vum = &vhost_user_main;
+  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+  vhost_user_intf_t *vui =
+    pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
+  u32 qid;
+  vhost_user_vring_t *rxvq;
+  u8 error;
+  u32 thread_index = vm->thread_index;
+  vhost_cpu_t *cpu = &vum->cpus[thread_index];
+  u32 map_hint = 0;
+  u8 retry = 8;
+  u16 copy_len;
+  u16 tx_headers_len;
+  vring_packed_desc_t *desc_table;
+  u32 or_flags;
+  u16 desc_head, desc_index, desc_len;
+  u16 n_descs_processed;
+  u8 indirect, chained;
+
+  qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
+                                              thread_index));
+  rxvq = &vui->vrings[qid];
+
+retry:
+  error = VHOST_USER_TX_FUNC_ERROR_NONE;
+  tx_headers_len = 0;
+  copy_len = 0;
+  n_descs_processed = 0;
+
+  while (n_left > 0)
+    {
+      vlib_buffer_t *b0, *current_b0;
+      uword buffer_map_addr;
+      u32 buffer_len;
+      u16 bytes_left;
+      u32 total_desc_len = 0;
+      u16 n_entries = 0;
+
+      indirect = 0;
+      chained = 0;
+      if (PREDICT_TRUE (n_left > 1))
+       vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
+
+      b0 = vlib_get_buffer (vm, buffers[0]);
+      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+       {
+         cpu->current_trace = vlib_add_trace (vm, node, b0,
+                                              sizeof (*cpu->current_trace));
+         vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
+                                     rxvq);
+       }
+
+      desc_table = rxvq->packed_desc;
+      desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
+      if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
+       {
+         error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+         goto done;
+       }
+      /*
+       * Go deeper in case of indirect descriptor.
+       * To test it, turn off mrg_rxbuf.
+       */
+      if (desc_table[desc_head].flags & VRING_DESC_F_INDIRECT)
+       {
+         indirect = 1;
+         if (PREDICT_FALSE (desc_table[desc_head].len <
+                            sizeof (vring_packed_desc_t)))
+           {
+             error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
+             goto done;
+           }
+         n_entries = desc_table[desc_head].len >> 4;
+         desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
+                                     &map_hint);
+         if (PREDICT_FALSE (desc_table == 0))
+           {
+             error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+             goto done;
+           }
+         desc_index = 0;
+       }
+      else if (rxvq->packed_desc[desc_head].flags & VRING_DESC_F_NEXT)
+       chained = 1;
+
+      desc_len = vui->virtio_net_hdr_sz;
+      buffer_map_addr = desc_table[desc_index].addr;
+      buffer_len = desc_table[desc_index].len;
+
+      /* Get a header from the header array */
+      virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
+      tx_headers_len++;
+      hdr->hdr.flags = 0;
+      hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
+      hdr->num_buffers = 1;
+
+      or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
+       (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
+       (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
+
+      /* Guest supports csum offload and buffer requires checksum offload? */
+      if (or_flags &&
+         (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
+       vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
+
+      /* Prepare a copy order executed later for the header */
+      ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
+      vhost_copy_t *cpy = &cpu->copy[copy_len];
+      copy_len++;
+      cpy->len = vui->virtio_net_hdr_sz;
+      cpy->dst = buffer_map_addr;
+      cpy->src = (uword) hdr;
+
+      buffer_map_addr += vui->virtio_net_hdr_sz;
+      buffer_len -= vui->virtio_net_hdr_sz;
+      bytes_left = b0->current_length;
+      current_b0 = b0;
+      while (1)
+       {
+         if (buffer_len == 0)
+           {
+             /* Get new output */
+             if (chained)
+               {
+                 /*
+                  * Next one is chained
+                  * Test it with both indirect and mrg_rxbuf off
+                  */
+                 if (PREDICT_FALSE (!(desc_table[desc_index].flags &
+                                      VRING_DESC_F_NEXT)))
+                   {
+                     /*
+                      * Last descriptor in chain.
+                      * Dequeue queued descriptors for this packet
+                      */
+                     vhost_user_dequeue_chained_descs (rxvq,
+                                                       &n_descs_processed);
+                     error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+                     goto done;
+                   }
+                 vhost_user_advance_last_avail_idx (rxvq);
+                 desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
+                 n_descs_processed++;
+                 buffer_map_addr = desc_table[desc_index].addr;
+                 buffer_len = desc_table[desc_index].len;
+                 total_desc_len += desc_len;
+                 desc_len = 0;
+               }
+             else if (indirect)
+               {
+                 /*
+                  * Indirect table
+                  * Test it with mrg_rxnuf off
+                  */
+                 if (PREDICT_TRUE (n_entries > 0))
+                   n_entries--;
+                 else
+                   {
+                     /* Dequeue queued descriptors for this packet */
+                     vhost_user_dequeue_chained_descs (rxvq,
+                                                       &n_descs_processed);
+                     error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
+                     goto done;
+                   }
+                 total_desc_len += desc_len;
+                 desc_index = (desc_index + 1) & rxvq->qsz_mask;
+                 buffer_map_addr = desc_table[desc_index].addr;
+                 buffer_len = desc_table[desc_index].len;
+                 desc_len = 0;
+               }
+             else if (vui->virtio_net_hdr_sz == 12)
+               {
+                 /*
+                  * MRG is available
+                  * This is the default setting for the guest VM
+                  */
+                 virtio_net_hdr_mrg_rxbuf_t *hdr =
+                   &cpu->tx_headers[tx_headers_len - 1];
+
+                 desc_table[desc_index].len = desc_len;
+                 vhost_user_advance_last_avail_idx (rxvq);
+                 desc_head = desc_index =
+                   rxvq->last_avail_idx & rxvq->qsz_mask;
+                 hdr->num_buffers++;
+                 n_descs_processed++;
+                 desc_len = 0;
+
+                 if (PREDICT_FALSE (!vhost_user_packed_desc_available
+                                    (rxvq, desc_index)))
+                   {
+                     /* Dequeue queued descriptors for this packet */
+                     vhost_user_dequeue_descs (rxvq, hdr,
+                                               &n_descs_processed);
+                     error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+                     goto done;
+                   }
+
+                 buffer_map_addr = desc_table[desc_index].addr;
+                 buffer_len = desc_table[desc_index].len;
+               }
+             else
+               {
+                 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
+                 goto done;
+               }
+           }
+
+         ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
+         vhost_copy_t *cpy = &cpu->copy[copy_len];
+         copy_len++;
+         cpy->len = bytes_left;
+         cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
+         cpy->dst = buffer_map_addr;
+         cpy->src = (uword) vlib_buffer_get_current (current_b0) +
+           current_b0->current_length - bytes_left;
+
+         bytes_left -= cpy->len;
+         buffer_len -= cpy->len;
+         buffer_map_addr += cpy->len;
+         desc_len += cpy->len;
+
+         CLIB_PREFETCH (&rxvq->packed_desc, CLIB_CACHE_LINE_BYTES, LOAD);
+
+         /* Check if vlib buffer has more data. If not, get more or break */
+         if (PREDICT_TRUE (!bytes_left))
+           {
+             if (PREDICT_FALSE
+                 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
+               {
+                 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
+                 bytes_left = current_b0->current_length;
+               }
+             else
+               {
+                 /* End of packet */
+                 break;
+               }
+           }
+       }
+
+      /* Move from available to used ring */
+      total_desc_len += desc_len;
+      rxvq->packed_desc[desc_head].len = total_desc_len;
+
+      vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
+      n_descs_processed++;
+
+      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+       cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
+
+      n_left--;
+
+      /*
+       * Do the copy periodically to prevent
+       * cpu->copy array overflow and corrupt memory
+       */
+      if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
+       {
+         if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
+                                                &map_hint)))
+           vlib_error_count (vm, node->node_index,
+                             VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
+         copy_len = 0;
+
+         /* give buffers back to driver */
+         vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
+                                         chained, frame, n_left);
+       }
+
+      buffers++;
+    }
+
+done:
+  if (PREDICT_TRUE (copy_len))
+    {
+      if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
+                                            &map_hint)))
+       vlib_error_count (vm, node->node_index,
+                         VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
+
+      vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
+                                     chained, frame, n_left);
+    }
+
+  /*
+   * When n_left is set, error is always set to something too.
+   * In case error is due to lack of remaining buffers, we go back up and
+   * retry.
+   * The idea is that it is better to waste some time on packets
+   * that have been processed already than dropping them and get
+   * more fresh packets with a good likelyhood that they will be dropped too.
+   * This technique also gives more time to VM driver to pick-up packets.
+   * In case the traffic flows from physical to virtual interfaces, this
+   * technique will end-up leveraging the physical NIC buffer in order to
+   * absorb the VM's CPU jitter.
+   */
+  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
+    {
+      retry--;
+      goto retry;
+    }
+
+  vhost_user_vring_unlock (vui, qid);
+
+  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
+    {
+      vlib_error_count (vm, node->node_index, error, n_left);
+      vlib_increment_simple_counter
+       (vnet_main.interface_main.sw_if_counters +
+        VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left);
+    }
+
+  vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
+  return frame->n_vectors;
+}
+
+VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
+                                                  vlib_node_runtime_t *
+                                                  node, vlib_frame_t * frame)
+{
+  u32 *buffers = vlib_frame_vector_args (frame);
   u32 n_left = frame->n_vectors;
   vhost_user_main_t *vum = &vhost_user_main;
   vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
@@ -238,11 +710,13 @@ CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm,
   u32 qid = ~0;
   vhost_user_vring_t *rxvq;
   u8 error;
-  u32 thread_index = vlib_get_thread_index ();
+  u32 thread_index = vm->thread_index;
+  vhost_cpu_t *cpu = &vum->cpus[thread_index];
   u32 map_hint = 0;
   u8 retry = 8;
   u16 copy_len;
   u16 tx_headers_len;
+  u32 or_flags;
 
   if (PREDICT_FALSE (!vui->admin_up))
     {
@@ -250,19 +724,27 @@ CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm,
       goto done3;
     }
 
-  if (PREDICT_FALSE (!vui->is_up))
+  if (PREDICT_FALSE (!vui->is_ready))
     {
       error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
       goto done3;
     }
 
-  qid =
-    VHOST_VRING_IDX_RX (*vec_elt_at_index
-                       (vui->per_cpu_tx_qid, thread_index));
+  qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
+                                              thread_index));
   rxvq = &vui->vrings[qid];
+  if (PREDICT_FALSE (rxvq->avail == 0))
+    {
+      error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+      goto done3;
+    }
+
   if (PREDICT_FALSE (vui->use_tx_spinlock))
     vhost_user_vring_lock (vui, qid);
 
+  if (vhost_user_is_packed_ring_supported (vui))
+    return (vhost_user_device_class_packed (vm, node, frame));
+
 retry:
   error = VHOST_USER_TX_FUNC_ERROR_NONE;
   tx_headers_len = 0;
@@ -283,11 +765,9 @@ retry:
 
       if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
        {
-         vum->cpus[thread_index].current_trace =
-           vlib_add_trace (vm, node, b0,
-                           sizeof (*vum->cpus[thread_index].current_trace));
-         vhost_user_tx_trace (vum->cpus[thread_index].current_trace,
-                              vui, qid / 2, b0, rxvq);
+         cpu->current_trace = vlib_add_trace (vm, node, b0,
+                                              sizeof (*cpu->current_trace));
+         vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
        }
 
       if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
@@ -302,7 +782,7 @@ retry:
 
       /* Go deeper in case of indirect descriptor
        * I don't know of any driver providing indirect for RX. */
-      if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
+      if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
        {
          if (PREDICT_FALSE
              (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
@@ -327,15 +807,24 @@ retry:
 
       {
        // Get a header from the header array
-       virtio_net_hdr_mrg_rxbuf_t *hdr =
-         &vum->cpus[thread_index].tx_headers[tx_headers_len];
+       virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
        tx_headers_len++;
        hdr->hdr.flags = 0;
-       hdr->hdr.gso_type = 0;
+       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
        hdr->num_buffers = 1;   //This is local, no need to check
 
+       or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
+         (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
+         (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
+
+       /* Guest supports csum offload and buffer requires checksum offload? */
+       if (or_flags
+           && (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
+         vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
+
        // Prepare a copy order executed later for the header
-       vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
+       ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
+       vhost_copy_t *cpy = &cpu->copy[copy_len];
        copy_len++;
        cpy->len = vui->virtio_net_hdr_sz;
        cpy->dst = buffer_map_addr;
@@ -350,7 +839,7 @@ retry:
        {
          if (buffer_len == 0)
            {                   //Get new output
-             if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
+             if (desc_table[desc_index].flags & VRING_DESC_F_NEXT)
                {
                  //Next one is chained
                  desc_index = desc_table[desc_index].next;
@@ -360,7 +849,7 @@ retry:
              else if (vui->virtio_net_hdr_sz == 12)    //MRG is available
                {
                  virtio_net_hdr_mrg_rxbuf_t *hdr =
-                   &vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
+                   &cpu->tx_headers[tx_headers_len - 1];
 
                  //Move from available to used buffer
                  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
@@ -390,7 +879,7 @@ retry:
                  desc_head = desc_index =
                    rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
                  if (PREDICT_FALSE
-                     (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
+                     (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
                    {
                      //It is seriously unlikely that a driver will put indirect descriptor
                      //after non-indirect descriptor.
@@ -422,7 +911,8 @@ retry:
            }
 
          {
-           vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
+           ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
+           vhost_copy_t *cpy = &cpu->copy[copy_len];
            copy_len++;
            cpy->len = bytes_left;
            cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
@@ -465,21 +955,19 @@ retry:
 
       if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
        {
-         vum->cpus[thread_index].current_trace->hdr =
-           vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
+         cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
        }
 
       n_left--;                        //At the end for error counting when 'goto done' is invoked
 
       /*
        * Do the copy periodically to prevent
-       * vum->cpus[thread_index].copy array overflow and corrupt memory
+       * cpu->copy array overflow and corrupt memory
        */
       if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
        {
-         if (PREDICT_FALSE
-             (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
-                                  copy_len, &map_hint)))
+         if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
+                                                &map_hint)))
            {
              vlib_error_count (vm, node->node_index,
                                VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
@@ -496,9 +984,8 @@ retry:
 
 done:
   //Do the memory copies
-  if (PREDICT_FALSE
-      (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
-                          copy_len, &map_hint)))
+  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
+                                        &map_hint)))
     {
       vlib_error_count (vm, node->node_index,
                        VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
@@ -514,7 +1001,7 @@ done:
    * retry.
    * The idea is that it is better to waste some time on packets
    * that have been processed already than dropping them and get
-   * more fresh packets with a good likelyhood that they will be dropped too.
+   * more fresh packets with a good likelihood that they will be dropped too.
    * This technique also gives more time to VM driver to pick-up packets.
    * In case the traffic flows from physical to virtual interfaces, this
    * technique will end-up leveraging the physical NIC buffer in order to
@@ -533,7 +1020,7 @@ done:
       rxvq->n_since_last_int += frame->n_vectors - n_left;
 
       if (rxvq->n_since_last_int > vum->coalesce_frames)
-       vhost_user_send_call (vm, rxvq);
+       vhost_user_send_call (vm, vui, rxvq);
     }
 
   vhost_user_vring_unlock (vui, qid);
@@ -548,13 +1035,13 @@ done3:
         thread_index, vui->sw_if_index, n_left);
     }
 
-  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+  vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
   return frame->n_vectors;
 }
 
 static __clib_unused clib_error_t *
 vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
-                                    u32 qid, vnet_hw_interface_rx_mode mode)
+                                    u32 qid, vnet_hw_if_rx_mode mode)
 {
   vlib_main_t *vm = vnm->vlib_main;
   vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
@@ -563,15 +1050,15 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
     pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
   vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
 
-  if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
-      (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
+  if ((mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
+      (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
     {
       if (txvq->kickfd_idx == ~0)
        {
          // We cannot support interrupt mode if the driver opts out
          return clib_error_return (0, "Driver does not support interrupt");
        }
-      if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+      if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING)
        {
          vum->ifq_count++;
          // Start the timer if this is the first encounter on interrupt
@@ -583,11 +1070,10 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
                                       VHOST_USER_EVENT_START_TIMER, 0);
        }
     }
-  else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+  else if (mode == VNET_HW_IF_RX_MODE_POLLING)
     {
-      if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
-          (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
-         vum->ifq_count)
+      if (((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
+          (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)) && vum->ifq_count)
        {
          vum->ifq_count--;
          // Stop the timer if there is no more interrupt interface/queue
@@ -600,15 +1086,15 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
     }
 
   txvq->mode = mode;
-  if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+  if (mode == VNET_HW_IF_RX_MODE_POLLING)
     txvq->used->flags = VRING_USED_F_NO_NOTIFY;
-  else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
-          (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
+  else if ((mode == VNET_HW_IF_RX_MODE_ADAPTIVE) ||
+          (mode == VNET_HW_IF_RX_MODE_INTERRUPT))
     txvq->used->flags = 0;
   else
     {
-      clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode,
-                   hw_if_index, qid);
+      vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
+                 hw_if_index, qid);
       return clib_error_return (0, "unsupported");
     }
 
@@ -623,20 +1109,24 @@ vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
   vhost_user_main_t *vum = &vhost_user_main;
   vhost_user_intf_t *vui =
     pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
-  u32 hw_flags = 0;
+  u8 link_old, link_new;
+
+  link_old = vui_is_link_up (vui);
+
   vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
-  hw_flags = vui->admin_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
 
-  vnet_hw_interface_set_flags (vnm, vui->hw_if_index, hw_flags);
+  link_new = vui_is_link_up (vui);
+
+  if (link_old != link_new)
+    vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
+                                VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
 
   return /* no error */ 0;
 }
 
-#ifndef CLIB_MARCH_VARIANT
 /* *INDENT-OFF* */
 VNET_DEVICE_CLASS (vhost_user_device_class) = {
   .name = "vhost-user",
-  .tx_function = vhost_user_tx,
   .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
   .tx_function_error_strings = vhost_user_tx_func_error_strings,
   .format_device_name = format_vhost_user_interface_name,
@@ -646,20 +1136,6 @@ VNET_DEVICE_CLASS (vhost_user_device_class) = {
   .format_tx_trace = format_vhost_trace,
 };
 
-#if __x86_64__
-vlib_node_function_t __clib_weak vhost_user_tx_avx512;
-vlib_node_function_t __clib_weak vhost_user_tx_avx2;
-static void __clib_constructor
-vhost_user_tx_multiarch_select (void)
-{
-  if (vhost_user_tx_avx512 && clib_cpu_supports_avx512f ())
-    vhost_user_device_class.tx_function = vhost_user_tx_avx512;
-  else if (vhost_user_tx_avx2 && clib_cpu_supports_avx2 ())
-    vhost_user_device_class.tx_function = vhost_user_tx_avx2;
-}
-#endif
-#endif
-
 /* *INDENT-ON* */
 
 /*