#include <avf/avf.h>
#define foreach_avf_input_error \
- _(BUFFER_ALLOC, "buffer alloc error") \
- _(RX_PACKET_ERROR, "Rx packet errors")
+ _(BUFFER_ALLOC, "buffer alloc error")
typedef enum
{
#undef _
};
-#define AVF_RX_DESC_STATUS(x) (1 << x)
-#define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
-#define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
-
#define AVF_INPUT_REFILL_TRESHOLD 32
+
+static_always_inline void
+avf_rx_desc_write (avf_rx_desc_t * d, u64 addr)
+{
+#ifdef CLIB_HAVE_VEC256
+ u64x4 v = { addr, 0, 0, 0 };
+ u64x4_store_unaligned (v, (void *) d);
+#else
+ d->qword[0] = addr;
+ d->qword[1] = 0;
+#endif
+}
+
static_always_inline void
avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
int use_va_dma)
{
- u16 n_refill, mask, n_alloc, slot;
- u32 s0, s1, s2, s3;
- vlib_buffer_t *b[4];
- avf_rx_desc_t *d[4];
-
- n_refill = rxq->size - 1 - rxq->n_enqueued;
+ u16 n_refill, mask, n_alloc, slot, size;
+ vlib_buffer_t *b[8];
+ avf_rx_desc_t *d, *first_d;
+ void *p[8];
+
+ size = rxq->size;
+ mask = size - 1;
+ n_refill = mask - rxq->n_enqueued;
if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
return;
- mask = rxq->size - 1;
slot = (rxq->next - n_refill - 1) & mask;
n_refill &= ~7; /* round to 8 */
- n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
- n_refill);
+ n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, size, n_refill);
if (PREDICT_FALSE (n_alloc != n_refill))
{
vlib_error_count (vm, node->node_index,
AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
if (n_alloc)
- vlib_buffer_free_from_ring (vm, rxq->bufs, slot, rxq->size, n_alloc);
+ vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
return;
}
rxq->n_enqueued += n_alloc;
+ first_d = rxq->descs;
- while (n_alloc >= 4)
- {
- if (PREDICT_TRUE (slot + 3 < rxq->size))
- {
- s0 = slot;
- s1 = slot + 1;
- s2 = slot + 2;
- s3 = slot + 3;
- }
- else
- {
- s0 = slot;
- s1 = (slot + 1) & mask;
- s2 = (slot + 2) & mask;
- s3 = (slot + 3) & mask;
- }
+ ASSERT (slot % 8 == 0);
- d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
- d[1] = ((avf_rx_desc_t *) rxq->descs) + s1;
- d[2] = ((avf_rx_desc_t *) rxq->descs) + s2;
- d[3] = ((avf_rx_desc_t *) rxq->descs) + s3;
- b[0] = vlib_get_buffer (vm, rxq->bufs[s0]);
- b[1] = vlib_get_buffer (vm, rxq->bufs[s1]);
- b[2] = vlib_get_buffer (vm, rxq->bufs[s2]);
- b[3] = vlib_get_buffer (vm, rxq->bufs[s3]);
+ while (n_alloc >= 8)
+ {
+ d = first_d + slot;
if (use_va_dma)
{
- d[0]->qword[0] = vlib_buffer_get_va (b[0]);
- d[1]->qword[0] = vlib_buffer_get_va (b[1]);
- d[2]->qword[0] = vlib_buffer_get_va (b[2]);
- d[3]->qword[0] = vlib_buffer_get_va (b[3]);
+ vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
+ sizeof (vlib_buffer_t));
+ avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
+ avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
+ avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
+ avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
+ avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
+ avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
+ avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
+ avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
}
else
{
- d[0]->qword[0] = vlib_buffer_get_pa (vm, b[0]);
- d[1]->qword[0] = vlib_buffer_get_pa (vm, b[1]);
- d[2]->qword[0] = vlib_buffer_get_pa (vm, b[2]);
- d[3]->qword[0] = vlib_buffer_get_pa (vm, b[3]);
+ vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
+ avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
+ avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
+ avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
+ avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
+ avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
+ avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
+ avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
+ avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
}
- d[0]->qword[1] = 0;
- d[1]->qword[1] = 0;
- d[2]->qword[1] = 0;
- d[3]->qword[1] = 0;
-
- /* next */
- slot = (slot + 4) & mask;
- n_alloc -= 4;
- }
- while (n_alloc)
- {
- s0 = slot;
- d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
- b[0] = vlib_get_buffer (vm, rxq->bufs[s0]);
- if (use_va_dma)
- d[0]->qword[0] = vlib_buffer_get_va (b[0]);
- else
- d[0]->qword[0] = vlib_buffer_get_pa (vm, b[0]);
- d[0]->qword[1] = 0;
-
/* next */
- slot = (slot + 1) & mask;
- n_alloc -= 1;
+ slot = (slot + 8) & mask;
+ n_alloc -= 8;
}
- CLIB_MEMORY_BARRIER ();
+ CLIB_MEMORY_STORE_BARRIER ();
*(rxq->qrx_tail) = slot;
}
-static_always_inline void
-avf_check_for_error (vlib_node_runtime_t * node, avf_rx_vector_entry_t * rxve,
- vlib_buffer_t * b, u16 * next)
+
+static_always_inline uword
+avf_rx_attach_tail (vlib_main_t * vm, vlib_buffer_t * bt, vlib_buffer_t * b,
+ u64 qw1, avf_rx_tail_t * t)
{
- avf_main_t *am = &avf_main;
- avf_ptype_t *ptype;
- if (PREDICT_FALSE (rxve->error))
+ vlib_buffer_t *hb = b;
+ u32 tlnifb = 0, i = 0;
+
+ if (qw1 & AVF_RXD_STATUS_EOP)
+ return 0;
+
+ while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
{
- b->error = node->errors[AVF_INPUT_ERROR_RX_PACKET_ERROR];
- ptype = am->ptypes + rxve->ptype;
- /* retract */
- vlib_buffer_advance (b, --ptype->buffer_advance);
- *next = VNET_DEVICE_INPUT_NEXT_DROP;
+ ASSERT (i < AVF_RX_MAX_DESC_IN_CHAIN - 1);
+ ASSERT (qw1 & AVF_RXD_STATUS_DD);
+ qw1 = t->qw1s[i];
+ b->next_buffer = t->buffers[i];
+ b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b = vlib_get_buffer (vm, b->next_buffer);
+ clib_memcpy_fast (b, bt, sizeof (vlib_buffer_t));
+ tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
+ i++;
}
-}
-static_always_inline u32
-avf_find_next (avf_rx_vector_entry_t * rxve, vlib_buffer_t * b,
- int maybe_tagged)
-{
- avf_main_t *am = &avf_main;
- ethernet_header_t *e = (ethernet_header_t *) b->data;
- avf_ptype_t *ptype;
- if (maybe_tagged && ethernet_frame_is_tagged (e->type))
- return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
- ptype = am->ptypes + rxve->ptype;
- vlib_buffer_advance (b, ptype->buffer_advance);
- b->flags |= ptype->flags;
- return ptype->next_node;
+ hb->total_length_not_including_first_buffer = tlnifb;
+ hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ return tlnifb;
}
-
static_always_inline uword
avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_buffer_t * bt, avf_rx_vector_entry_t * rxve,
- vlib_buffer_t ** b, u16 * next, u32 n_rxv,
- u8 maybe_error, int known_next)
+ avf_per_thread_data_t * ptd, u32 n_left,
+ int maybe_multiseg)
{
+ vlib_buffer_t *bt = &ptd->buffer_template;
+ vlib_buffer_t **b = ptd->bufs;
+ u64 *qw1 = ptd->qw1s;
+ avf_rx_tail_t *tail = ptd->tails;
uword n_rx_bytes = 0;
- while (n_rxv >= 4)
+ while (n_left >= 4)
{
- if (n_rxv >= 12)
+ if (n_left >= 12)
{
vlib_prefetch_buffer_header (b[8], LOAD);
vlib_prefetch_buffer_header (b[9], LOAD);
vlib_prefetch_buffer_header (b[10], LOAD);
vlib_prefetch_buffer_header (b[11], LOAD);
- if (!known_next)
- {
- CLIB_PREFETCH (b[8]->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b[9]->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b[10]->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b[11]->data, CLIB_CACHE_LINE_BYTES, LOAD);
- }
}
- n_rx_bytes += b[0]->current_length = rxve[0].length;
- n_rx_bytes += b[1]->current_length = rxve[1].length;
- n_rx_bytes += b[2]->current_length = rxve[2].length;
- n_rx_bytes += b[3]->current_length = rxve[3].length;
-
- if (!known_next)
- {
- ethernet_header_t *e0, *e1, *e2, *e3;
+ clib_memcpy64_x4 (b[0], b[1], b[2], b[3], bt);
- e0 = (ethernet_header_t *) b[0]->data;
- e1 = (ethernet_header_t *) b[1]->data;
- e2 = (ethernet_header_t *) b[2]->data;
- e3 = (ethernet_header_t *) b[3]->data;
+ n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
+ n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
+ n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
+ n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
- if (ethernet_frame_is_any_tagged_x4 (e0->type, e1->type,
- e2->type, e3->type))
- {
- next[0] = avf_find_next (rxve, b[0], 1);
- next[1] = avf_find_next (rxve + 1, b[1], 1);
- next[2] = avf_find_next (rxve + 2, b[2], 1);
- next[3] = avf_find_next (rxve + 3, b[3], 1);
- }
- else
- {
- next[0] = avf_find_next (rxve, b[0], 0);
- next[1] = avf_find_next (rxve + 1, b[1], 0);
- next[2] = avf_find_next (rxve + 2, b[2], 0);
- next[3] = avf_find_next (rxve + 3, b[3], 0);
- }
-
- if (PREDICT_FALSE (maybe_error))
- {
- avf_check_for_error (node, rxve + 0, b[0], next);
- avf_check_for_error (node, rxve + 1, b[1], next + 1);
- avf_check_for_error (node, rxve + 2, b[2], next + 2);
- avf_check_for_error (node, rxve + 3, b[3], next + 3);
- }
- }
- else if (bt->current_config_index)
+ if (maybe_multiseg)
{
- b[0]->current_config_index = bt->current_config_index;
- b[1]->current_config_index = bt->current_config_index;
- b[2]->current_config_index = bt->current_config_index;
- b[3]->current_config_index = bt->current_config_index;
- vnet_buffer (b[0])->feature_arc_index =
- vnet_buffer (bt)->feature_arc_index;
- vnet_buffer (b[1])->feature_arc_index =
- vnet_buffer (bt)->feature_arc_index;
- vnet_buffer (b[2])->feature_arc_index =
- vnet_buffer (bt)->feature_arc_index;
- vnet_buffer (b[3])->feature_arc_index =
- vnet_buffer (bt)->feature_arc_index;
+ n_rx_bytes += avf_rx_attach_tail (vm, bt, b[0], qw1[0], tail + 0);
+ n_rx_bytes += avf_rx_attach_tail (vm, bt, b[1], qw1[1], tail + 1);
+ n_rx_bytes += avf_rx_attach_tail (vm, bt, b[2], qw1[2], tail + 2);
+ n_rx_bytes += avf_rx_attach_tail (vm, bt, b[3], qw1[3], tail + 3);
}
- clib_memcpy (vnet_buffer (b[0])->sw_if_index,
- vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
- clib_memcpy (vnet_buffer (b[1])->sw_if_index,
- vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
- clib_memcpy (vnet_buffer (b[2])->sw_if_index,
- vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
- clib_memcpy (vnet_buffer (b[3])->sw_if_index,
- vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
-
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
/* next */
- rxve += 4;
+ qw1 += 4;
+ tail += 4;
b += 4;
- next += 4;
- n_rxv -= 4;
+ n_left -= 4;
}
- while (n_rxv)
+ while (n_left)
{
- b[0]->current_length = rxve->length;
- n_rx_bytes += b[0]->current_length;
+ clib_memcpy_fast (b[0], bt, sizeof (vlib_buffer_t));
- if (!known_next)
- {
- next[0] = avf_find_next (rxve, b[0], 1);
- avf_check_for_error (node, rxve + 0, b[0], next);
- }
- else if (bt->current_config_index)
- {
- b[0]->current_config_index = bt->current_config_index;
- vnet_buffer (b[0])->feature_arc_index =
- vnet_buffer (bt)->feature_arc_index;
- }
+ n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
- clib_memcpy (vnet_buffer (b[0])->sw_if_index,
- vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
+ if (maybe_multiseg)
+ n_rx_bytes += avf_rx_attach_tail (vm, bt, b[0], qw1[0], tail + 0);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
/* next */
- rxve += 1;
+ qw1 += 1;
+ tail += 1;
b += 1;
- next += 1;
- n_rxv -= 1;
-
+ n_left -= 1;
}
return n_rx_bytes;
}
avf_per_thread_data_t *ptd =
vec_elt_at_index (am->per_thread_data, thr_idx);
avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
- avf_rx_vector_entry_t *rxve = 0;
- uword n_trace;
- avf_rx_desc_t *d;
- u32 n_rx_packets = 0, n_rx_bytes = 0;
- u16 mask = rxq->size - 1;
- u16 n_rxv = 0;
- u8 maybe_error = 0;
- u32 buffer_indices[AVF_RX_VECTOR_SZ], *bi;
- u16 nexts[AVF_RX_VECTOR_SZ], *next;
- vlib_buffer_t *bufs[AVF_RX_VECTOR_SZ];
+ u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
+ u16 n_tail_desc = 0;
+ u64 or_qw1 = 0;
+ u32 *bi, *to_next, n_left_to_next;
vlib_buffer_t *bt = &ptd->buffer_template;
- int known_next = 0;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u16 next = rxq->next;
+ u16 size = rxq->size;
+ u16 mask = size - 1;
+ avf_rx_desc_t *d, *fd = rxq->descs;
+#ifdef CLIB_HAVE_VEC256
+ u64x4 q1x4, or_q1x4 = { 0 };
+ u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
+#endif
+
+ /* is there anything on the ring */
+ d = fd + next;
+ if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
+ goto done;
- STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
- STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, status, 0);
- STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, length, 4);
- STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, ptype, 6);
- STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, error, 7);
+ if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
+ next_index = ad->per_interface_next_index;
+ vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
/* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
copy needed data from descriptor to rx vector */
- d = rxq->descs + rxq->next;
- bi = buffer_indices;
- while (n_rxv < AVF_RX_VECTOR_SZ)
+ bi = to_next;
+
+ while (n_rx_packets < AVF_RX_VECTOR_SZ)
{
- if (rxq->next + 11 < rxq->size)
+ if (next + 11 < size)
{
int stride = 8;
- CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride)),
+ CLIB_PREFETCH ((void *) (fd + (next + stride)),
CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 1)),
+ CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 2)),
+ CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 3)),
+ CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
CLIB_CACHE_LINE_BYTES, LOAD);
}
#ifdef CLIB_HAVE_VEC256
- u64x4 q1x4, v, err4;
- u64x4 status_dd_eop_mask = u64x4_splat (0x3);
-
- if (n_rxv >= AVF_RX_VECTOR_SZ - 4)
+ if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
goto one_by_one;
- if (rxq->next >= rxq->size - 4)
- goto one_by_one;
-
- /* load 1st quadword of 4 dscriptors into 256-bit vector register */
- /* *INDENT-OFF* */
- q1x4 = (u64x4) {
- d[0].qword[1],
- d[1].qword[1],
- d[2].qword[1],
- d[3].qword[1]
- };
- /* *INDENT-ON* */
+ q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
+ (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
/* not all packets are ready or at least one of them is chained */
- if (!u64x4_is_equal (q1x4 & status_dd_eop_mask, status_dd_eop_mask))
+ if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
goto one_by_one;
- /* shift and mask status, length, ptype and err */
- v = q1x4 & u64x4_splat ((u64) 0x3FFFFULL);
- v |= (q1x4 >> 6) & u64x4_splat ((u64) 0xFFFF << 32);
- v |= (q1x4 << 18) & u64x4_splat ((u64) 0xFF << 48);
- v |= err4 = (q1x4 << 37) & u64x4_splat ((u64) 0xFF << 56);
-
- u64x4_store_unaligned (v, ptd->rx_vector + n_rxv);
- maybe_error |= !u64x4_is_all_zero (err4);
-
- clib_memcpy (bi, rxq->bufs + rxq->next, 4 * sizeof (u32));
+ or_q1x4 |= q1x4;
+ u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
+ clib_memcpy_fast (bi, rxq->bufs + next, 4 * sizeof (u32));
/* next */
- rxq->next = (rxq->next + 4) & mask;
- d = rxq->descs + rxq->next;
- n_rxv += 4;
- rxq->n_enqueued -= 4;
+ next = (next + 4) & mask;
+ d = fd + next;
+ n_rx_packets += 4;
bi += 4;
continue;
one_by_one:
#endif
- CLIB_PREFETCH ((void *) (rxq->descs + ((rxq->next + 8) & mask)),
+ CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
CLIB_CACHE_LINE_BYTES, LOAD);
- if ((d->qword[1] & AVF_RX_DESC_STATUS_DD) == 0)
+
+ if (avf_rxd_is_not_dd (d))
break;
- rxve = ptd->rx_vector + n_rxv;
- bi[0] = rxq->bufs[rxq->next];
- rxve->status = avf_get_u64_bits ((void *) d, 8, 18, 0);
- rxve->error = avf_get_u64_bits ((void *) d, 8, 26, 19);
- rxve->ptype = avf_get_u64_bits ((void *) d, 8, 37, 30);
- rxve->length = avf_get_u64_bits ((void *) d, 8, 63, 38);
- maybe_error |= rxve->error;
+
+ bi[0] = rxq->bufs[next];
/* deal with chained buffers */
- while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
+ if (PREDICT_FALSE (avf_rxd_is_not_eop (d)))
{
- clib_error ("fixme");
+ u16 tail_desc = 0;
+ u16 tail_next = next;
+ avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
+ avf_rx_desc_t *td;
+ do
+ {
+ tail_next = (tail_next + 1) & mask;
+ td = fd + tail_next;
+
+ /* bail out in case of incomplete transaction */
+ if (avf_rxd_is_not_dd (td))
+ goto no_more_desc;
+
+ or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
+ tail->buffers[tail_desc] = rxq->bufs[tail_next];
+ tail_desc++;
+ }
+ while (avf_rxd_is_not_eop (td));
+ next = tail_next;
+ n_tail_desc += tail_desc;
}
+ or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
+
/* next */
- rxq->next = (rxq->next + 1) & mask;
- d = rxq->descs + rxq->next;
- n_rxv++;
- rxq->n_enqueued--;
+ next = (next + 1) & mask;
+ d = fd + next;
+ n_rx_packets++;
bi++;
}
+no_more_desc:
- if (n_rxv == 0)
+ if (n_rx_packets == 0)
goto done;
+ rxq->next = next;
+ rxq->n_enqueued -= n_rx_packets + n_tail_desc;
+
+#ifdef CLIB_HAVE_VEC256
+ or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
+#endif
+
/* refill rx ring */
if (ad->flags & AVF_DEVICE_F_VA_DMA)
avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
else
avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
- vlib_get_buffers (vm, buffer_indices, bufs, n_rxv);
- n_rx_packets = n_rxv;
+ vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
- /* receive burst of packets from DPDK PMD */
- if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
- {
- known_next = 1;
- next_index = ad->per_interface_next_index;
- }
-
- /* as all packets belong to thr same interface feature arc lookup
- can be don once and result stored */
- if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
- {
- vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
- known_next = 1;
- }
-
- if (known_next)
- {
- clib_memset_u16 (nexts, next_index, n_rxv);
- n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
- nexts, n_rxv, maybe_error, 1);
- vnet_buffer (bt)->feature_arc_index = 0;
- bt->current_config_index = 0;
- }
+ if (n_tail_desc)
+ n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
else
- n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
- nexts, n_rxv, maybe_error, 0);
+ n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
/* packet trace if enabled */
if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
{
- u32 n_left = n_rx_packets;
- bi = buffer_indices;
- next = nexts;
+ u32 n_left = n_rx_packets, i = 0, j;
+ bi = to_next;
+
while (n_trace && n_left)
{
vlib_buffer_t *b;
avf_input_trace_t *tr;
b = vlib_get_buffer (vm, bi[0]);
- vlib_trace_buffer (vm, node, next[0], b, /* follow_chain */ 0);
+ vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
tr = vlib_add_trace (vm, node, b, sizeof (*tr));
- tr->next_index = next[0];
+ tr->next_index = next_index;
tr->hw_if_index = ad->hw_if_index;
- clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
+ tr->qw1s[0] = ptd->qw1s[i];
+ for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
+ tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
/* next */
n_trace--;
n_left--;
bi++;
- next++;
+ i++;
}
vlib_set_trace_count (vm, node, n_trace);
}
- vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_rx_packets);
+
+ if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
+ {
+ vlib_next_frame_t *nf;
+ vlib_frame_t *f;
+ ethernet_input_frame_t *ef;
+ nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
+ f = vlib_get_frame (vm, nf->frame_index);
+ f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
+
+ ef = vlib_frame_scalar_args (f);
+ ef->sw_if_index = ad->sw_if_index;
+ ef->hw_if_index = ad->hw_if_index;
+
+ if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
+ f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
+ }
+
+ n_left_to_next -= n_rx_packets;
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX, thr_idx,
ad->hw_if_index, n_rx_packets, n_rx_bytes);