+static_always_inline u16
+avf_tx_enqueue (vlib_main_t * vm, avf_txq_t * txq, u32 * buffers,
+ u32 n_packets, int use_va_dma)
+{
+ u16 next = txq->next;
+ u64 bits = AVF_TXD_CMD_EOP | AVF_TXD_CMD_RSV;
+ u16 n_desc = 0;
+ u16 *slot, n_desc_left, n_packets_left = n_packets;
+ u16 mask = txq->size - 1;
+ vlib_buffer_t *b[4];
+ avf_tx_desc_t *d = txq->descs + next;
+
+ /* avoid ring wrap */
+ n_desc_left = txq->size - clib_max (txq->next, txq->n_enqueued + 8);
+
+ while (n_packets_left && n_desc_left)
+ {
+ u32 or_flags;
+ if (n_packets_left < 8 || n_desc_left < 4)
+ goto one_by_one;
+
+ vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
+
+ b[0] = vlib_get_buffer (vm, buffers[0]);
+ b[1] = vlib_get_buffer (vm, buffers[1]);
+ b[2] = vlib_get_buffer (vm, buffers[2]);
+ b[3] = vlib_get_buffer (vm, buffers[3]);
+
+ or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
+
+ if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
+ goto one_by_one;
+
+ clib_memcpy_fast (txq->bufs + next, buffers, sizeof (u32) * 4);
+
+ if (use_va_dma)
+ {
+ d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
+ d[1].qword[0] = vlib_buffer_get_current_va (b[1]);
+ d[2].qword[0] = vlib_buffer_get_current_va (b[2]);
+ d[3].qword[0] = vlib_buffer_get_current_va (b[3]);
+ }
+ else
+ {
+ d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
+ d[1].qword[0] = vlib_buffer_get_current_pa (vm, b[1]);
+ d[2].qword[0] = vlib_buffer_get_current_pa (vm, b[2]);
+ d[3].qword[0] = vlib_buffer_get_current_pa (vm, b[3]);
+ }
+
+ d[0].qword[1] = ((u64) b[0]->current_length) << 34 | bits;
+ d[1].qword[1] = ((u64) b[1]->current_length) << 34 | bits;
+ d[2].qword[1] = ((u64) b[2]->current_length) << 34 | bits;
+ d[3].qword[1] = ((u64) b[3]->current_length) << 34 | bits;
+
+ next += 4;
+ n_desc += 4;
+ buffers += 4;
+ n_packets_left -= 4;
+ n_desc_left -= 4;
+ d += 4;
+ continue;
+
+ one_by_one:
+ txq->bufs[next] = buffers[0];
+ b[0] = vlib_get_buffer (vm, buffers[0]);
+
+ if (use_va_dma)
+ d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
+ else
+ d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
+
+ d[0].qword[1] = (((u64) b[0]->current_length) << 34) | bits;
+
+ next += 1;
+ n_desc += 1;
+ buffers += 1;
+ n_packets_left -= 1;
+ n_desc_left -= 1;
+ d += 1;
+ }
+
+ if ((slot = clib_ring_enq (txq->rs_slots)))
+ {
+ u16 rs_slot = slot[0] = (next - 1) & mask;
+ d = txq->descs + rs_slot;
+ d[0].qword[1] |= AVF_TXD_CMD_RS;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+ *(txq->qtx_tail) = txq->next = next & mask;
+ txq->n_enqueued += n_desc;
+ return n_packets - n_packets_left;
+}
+
+VNET_DEVICE_CLASS_TX_FN (avf_device_class) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)