#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
#include <vlib/pci/pci.h>
+#include <vppinfra/ring.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
}
static_always_inline u16
-avf_tx_enqueue (vlib_main_t * vm, avf_txq_t * txq, u32 * buffers,
- u32 n_packets, int use_va_dma)
+avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq,
+ u32 * buffers, u32 n_packets, int use_va_dma)
{
u16 next = txq->next;
- u64 bits = (AVF_TXD_CMD_EOP | AVF_TXD_CMD_RS | AVF_TXD_CMD_RSV);
+ u64 bits = AVF_TXD_CMD_EOP | AVF_TXD_CMD_RSV;
u16 n_desc = 0;
- u16 n_desc_left, n_packets_left = n_packets;
+ u16 *slot, n_desc_left, n_packets_left = n_packets;
u16 mask = txq->size - 1;
vlib_buffer_t *b[4];
avf_tx_desc_t *d = txq->descs + next;
+ u16 n_desc_needed;
+ vlib_buffer_t *b0;
/* avoid ring wrap */
n_desc_left = txq->size - clib_max (txq->next, txq->n_enqueued + 8);
+ if (n_desc_left == 0)
+ return 0;
+
+ /* Fast path, no ring wrap */
while (n_packets_left && n_desc_left)
{
u32 or_flags;
if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
goto one_by_one;
- clib_memcpy_fast (txq->bufs + next, buffers, sizeof (u32) * 4);
+ vlib_buffer_copy_indices (txq->bufs + next, buffers, 4);
if (use_va_dma)
{
txq->bufs[next] = buffers[0];
b[0] = vlib_get_buffer (vm, buffers[0]);
+ /* Deal with chain buffer if present */
+ if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ n_desc_needed = 1;
+ b0 = b[0];
+
+ /* Wish there were a buffer count for chain buffer */
+ while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ b0 = vlib_get_buffer (vm, b0->next_buffer);
+ n_desc_needed++;
+ }
+
+ /* spec says data descriptor is limited to 8 segments */
+ if (PREDICT_FALSE (n_desc_needed > 8))
+ {
+ vlib_buffer_free_one (vm, buffers[0]);
+ vlib_error_count (vm, node->node_index,
+ AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
+ n_packets_left -= 1;
+ buffers += 1;
+ continue;
+ }
+
+ if (PREDICT_FALSE (n_desc_left < n_desc_needed))
+ /*
+ * Slow path may be able to to deal with this since it can handle
+ * ring wrap
+ */
+ break;
+
+ while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ if (use_va_dma)
+ d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
+ else
+ d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
+
+ d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
+ AVF_TXD_CMD_RSV;
+
+ next += 1;
+ n_desc += 1;
+ n_desc_left -= 1;
+ d += 1;
+
+ txq->bufs[next] = b[0]->next_buffer;
+ b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
+ }
+ }
+
if (use_va_dma)
d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
else
d += 1;
}
+ /* Slow path to support ring wrap */
+ if (PREDICT_FALSE (n_packets_left))
+ {
+ txq->n_enqueued += n_desc;
+
+ n_desc = 0;
+ d = txq->descs + (next & mask);
+
+ /* +8 to be consistent with fast path */
+ n_desc_left = txq->size - (txq->n_enqueued + 8);
+
+ while (n_packets_left && n_desc_left)
+ {
+ txq->bufs[next & mask] = buffers[0];
+ b[0] = vlib_get_buffer (vm, buffers[0]);
+
+ /* Deal with chain buffer if present */
+ if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ n_desc_needed = 1;
+ b0 = b[0];
+
+ while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ b0 = vlib_get_buffer (vm, b0->next_buffer);
+ n_desc_needed++;
+ }
+
+ /* Spec says data descriptor is limited to 8 segments */
+ if (PREDICT_FALSE (n_desc_needed > 8))
+ {
+ vlib_buffer_free_one (vm, buffers[0]);
+ vlib_error_count (vm, node->node_index,
+ AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
+ n_packets_left -= 1;
+ buffers += 1;
+ continue;
+ }
+
+ if (PREDICT_FALSE (n_desc_left < n_desc_needed))
+ break;
+
+ while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ if (use_va_dma)
+ d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
+ else
+ d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
+
+ d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
+ AVF_TXD_CMD_RSV;
+
+ next += 1;
+ n_desc += 1;
+ n_desc_left -= 1;
+ d = txq->descs + (next & mask);
+
+ txq->bufs[next & mask] = b[0]->next_buffer;
+ b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
+ }
+ }
+
+ if (use_va_dma)
+ d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
+ else
+ d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
+
+ d[0].qword[1] = (((u64) b[0]->current_length) << 34) | bits;
+
+ next += 1;
+ n_desc += 1;
+ buffers += 1;
+ n_packets_left -= 1;
+ n_desc_left -= 1;
+ d = txq->descs + (next & mask);
+ }
+ }
+
+ if ((slot = clib_ring_enq (txq->rs_slots)))
+ {
+ u16 rs_slot = slot[0] = (next - 1) & mask;
+ d = txq->descs + rs_slot;
+ d[0].qword[1] |= AVF_TXD_CMD_RS;
+ }
+
CLIB_MEMORY_BARRIER ();
*(txq->qtx_tail) = txq->next = next & mask;
txq->n_enqueued += n_desc;
avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
u32 *buffers = vlib_frame_vector_args (frame);
u16 n_enq, n_left;
- u16 n_retry = 5;
+ u16 n_retry = 2;
clib_spinlock_lock_if_init (&txq->lock);
/* release consumed bufs */
if (txq->n_enqueued)
{
- avf_tx_desc_t *d0;
- u16 first, slot, n_free = 0, mask = txq->size - 1;
- first = slot = (txq->next - txq->n_enqueued) & mask;
- d0 = txq->descs + slot;
- while (n_free < txq->n_enqueued && avf_tx_desc_get_dtyp (d0) == 0x0F)
+ i32 complete_slot = -1;
+ while (1)
{
- n_free++;
- slot = (slot + 1) & mask;
- d0 = txq->descs + slot;
+ u16 *slot = clib_ring_get_first (txq->rs_slots);
+
+ if (slot == 0)
+ break;
+
+ if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F)
+ break;
+
+ complete_slot = slot[0];
+
+ clib_ring_deq (txq->rs_slots);
}
- if (n_free)
+ if (complete_slot >= 0)
{
+ u16 first, mask, n_free;
+ mask = txq->size - 1;
+ first = (txq->next - txq->n_enqueued) & mask;
+ n_free = (complete_slot + 1 - first) & mask;
+
txq->n_enqueued -= n_free;
- vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
- n_free);
+ vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size,
+ n_free);
}
}
if (ad->flags & AVF_DEVICE_F_VA_DMA)
- n_enq = avf_tx_enqueue (vm, txq, buffers, n_left, 1);
+ n_enq = avf_tx_enqueue (vm, node, txq, buffers, n_left, 1);
else
- n_enq = avf_tx_enqueue (vm, txq, buffers, n_left, 0);
+ n_enq = avf_tx_enqueue (vm, node, txq, buffers, n_left, 0);
n_left -= n_enq;