#include <avf/avf.h>
#define foreach_avf_input_error \
- _(BUFFER_ALLOC, "buffer alloc error") \
- _(RX_PACKET_ERROR, "Rx packet errors")
+ _(BUFFER_ALLOC, "buffer alloc error")
typedef enum
{
#undef _
};
-#define AVF_RX_DESC_STATUS(x) (1 << x)
-#define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
-#define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
+#define AVF_INPUT_REFILL_TRESHOLD 32
static_always_inline void
-avf_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next0,
- vlib_buffer_t * b0, uword * n_trace, avf_device_t * ad,
- avf_rx_vector_entry_t * rxve)
+avf_rx_desc_write (avf_rx_desc_t * d, u64 addr)
{
- avf_input_trace_t *tr;
- vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
- vlib_set_trace_count (vm, node, --(*n_trace));
- tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->hw_if_index = ad->hw_if_index;
- clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
+#ifdef CLIB_HAVE_VEC256
+ u64x4 v = { addr, 0, 0, 0 };
+ u64x4_store_unaligned (v, (void *) d);
+#else
+ d->qword[0] = addr;
+ d->qword[1] = 0;
+#endif
}
-#define AVF_INPUT_REFILL_TRESHOLD 32
static_always_inline void
avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
- int use_iova)
+ int use_va_dma)
{
- u16 n_refill, mask, n_alloc, slot;
- avf_rx_desc_t *d;
-
- n_refill = rxq->size - 1 - rxq->n_bufs;
+ u16 n_refill, mask, n_alloc, slot, size;
+ vlib_buffer_t *b[8];
+ avf_rx_desc_t *d, *first_d;
+ void *p[8];
+
+ size = rxq->size;
+ mask = size - 1;
+ n_refill = mask - rxq->n_enqueued;
if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
return;
- mask = rxq->size - 1;
slot = (rxq->next - n_refill - 1) & mask;
n_refill &= ~7; /* round to 8 */
- n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
- n_refill);
+ n_alloc =
+ vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
+ rxq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_refill))
{
vlib_error_count (vm, node->node_index,
AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
if (n_alloc)
- vlib_buffer_free (vm, rxq->bufs + slot, n_alloc);
+ vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
return;
}
- rxq->n_bufs += n_alloc;
+ rxq->n_enqueued += n_alloc;
+ first_d = rxq->descs;
+
+ ASSERT (slot % 8 == 0);
- while (n_alloc--)
+ while (n_alloc >= 8)
{
- u64 addr;
- d = ((avf_rx_desc_t *) rxq->descs) + slot;
- if (use_iova)
+ d = first_d + slot;
+
+ if (use_va_dma)
{
- vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[slot]);
- addr = pointer_to_uword (b->data);
+ vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
+ sizeof (vlib_buffer_t));
+ avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
+ avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
+ avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
+ avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
+ avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
+ avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
+ avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
+ avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
}
else
- addr = vlib_get_buffer_data_physical_address (vm, rxq->bufs[slot]);
- d->qword[0] = addr;
- d->qword[1] = 0;
- slot = (slot + 1) & mask;
+ {
+ vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
+ avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
+ avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
+ avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
+ avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
+ avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
+ avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
+ avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
+ avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
+ }
+
+ /* next */
+ slot = (slot + 8) & mask;
+ n_alloc -= 8;
}
- CLIB_MEMORY_BARRIER ();
- *(rxq->qrx_tail) = slot;
+ clib_atomic_store_rel_n (rxq->qrx_tail, slot);
}
-static_always_inline void
-avf_check_for_error (vlib_node_runtime_t * node, avf_rx_vector_entry_t * rxve,
- vlib_buffer_t * b, u32 * next)
+
+static_always_inline uword
+avf_rx_attach_tail (vlib_main_t * vm, vlib_buffer_t * bt, vlib_buffer_t * b,
+ u64 qw1, avf_rx_tail_t * t)
{
- avf_main_t *am = &avf_main;
- avf_ptype_t *ptype;
- if (PREDICT_FALSE (rxve->error))
+ vlib_buffer_t *hb = b;
+ u32 tlnifb = 0, i = 0;
+
+ if (qw1 & AVF_RXD_STATUS_EOP)
+ return 0;
+
+ while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
{
- b->error = node->errors[AVF_INPUT_ERROR_RX_PACKET_ERROR];
- ptype = am->ptypes + rxve->ptype;
- /* retract */
- vlib_buffer_advance (b, --ptype->buffer_advance);
- *next = VNET_DEVICE_INPUT_NEXT_DROP;
+ ASSERT (i < AVF_RX_MAX_DESC_IN_CHAIN - 1);
+ ASSERT (qw1 & AVF_RXD_STATUS_DD);
+ qw1 = t->qw1s[i];
+ b->next_buffer = t->buffers[i];
+ b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ b = vlib_get_buffer (vm, b->next_buffer);
+ vlib_buffer_copy_template (b, bt);
+ tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
+ i++;
}
+
+ hb->total_length_not_including_first_buffer = tlnifb;
+ hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ return tlnifb;
}
-static_always_inline u32
-avf_find_next (avf_rx_vector_entry_t * rxve, vlib_buffer_t * b,
- int maybe_tagged)
+static_always_inline uword
+avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
+ avf_per_thread_data_t * ptd, u32 n_left,
+ int maybe_multiseg)
{
- avf_main_t *am = &avf_main;
- ethernet_header_t *e = (ethernet_header_t *) b->data;
- avf_ptype_t *ptype;
- if (maybe_tagged && ethernet_frame_is_tagged (e->type))
- return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
- ptype = am->ptypes + rxve->ptype;
- vlib_buffer_advance (b, ptype->buffer_advance);
- b->flags |= ptype->flags;
- return ptype->next_node;
-}
+ vlib_buffer_t bt;
+ vlib_buffer_t **b = ptd->bufs;
+ u64 *qw1 = ptd->qw1s;
+ avf_rx_tail_t *tail = ptd->tails;
+ uword n_rx_bytes = 0;
+
+ /* copy template into local variable - will save per packet load */
+ vlib_buffer_copy_template (&bt, &ptd->buffer_template);
+
+ while (n_left >= 4)
+ {
+ if (n_left >= 12)
+ {
+ vlib_prefetch_buffer_header (b[8], LOAD);
+ vlib_prefetch_buffer_header (b[9], LOAD);
+ vlib_prefetch_buffer_header (b[10], LOAD);
+ vlib_prefetch_buffer_header (b[11], LOAD);
+ }
+
+ vlib_buffer_copy_template (b[0], &bt);
+ vlib_buffer_copy_template (b[1], &bt);
+ vlib_buffer_copy_template (b[2], &bt);
+ vlib_buffer_copy_template (b[3], &bt);
+
+ n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
+ n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
+ n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
+ n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
+
+ if (maybe_multiseg)
+ {
+ n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
+ n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[1], qw1[1], tail + 1);
+ n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[2], qw1[2], tail + 2);
+ n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[3], qw1[3], tail + 3);
+ }
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
+ /* next */
+ qw1 += 4;
+ tail += 4;
+ b += 4;
+ n_left -= 4;
+ }
+ while (n_left)
+ {
+ vlib_buffer_copy_template (b[0], &bt);
+
+ n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
+
+ if (maybe_multiseg)
+ n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
+
+ /* next */
+ qw1 += 1;
+ tail += 1;
+ b += 1;
+ n_left -= 1;
+ }
+ return n_rx_bytes;
+}
static_always_inline uword
avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame, avf_device_t * ad, u16 qid,
- int with_features_or_tracing)
+ vlib_frame_t * frame, avf_device_t * ad, u16 qid)
{
avf_main_t *am = &avf_main;
vnet_main_t *vnm = vnet_get_main ();
avf_per_thread_data_t *ptd =
vec_elt_at_index (am->per_thread_data, thr_idx);
avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
- avf_rx_vector_entry_t *rxve;
- uword n_trace = vlib_get_trace_count (vm, node);
+ u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
+ u16 n_tail_desc = 0;
+ u64 or_qw1 = 0;
+ u32 *bi, *to_next, n_left_to_next;
+ vlib_buffer_t *bt = &ptd->buffer_template;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
- avf_rx_desc_t *d;
- u32 *to_next = 0;
- u32 n_rx_packets = 0;
- u32 n_rx_bytes = 0;
- u32 sw_if_idx[VLIB_N_RX_TX] = {[VLIB_RX] = ad->sw_if_index,[VLIB_TX] = ~0 };
- u16 mask = rxq->size - 1;
- u16 n_rxv = 0;
- u8 maybe_error = 0;
+ u16 next = rxq->next;
+ u16 size = rxq->size;
+ u16 mask = size - 1;
+ avf_rx_desc_t *d, *fd = rxq->descs;
+#ifdef CLIB_HAVE_VEC256
+ u64x4 q1x4, or_q1x4 = { 0 };
+ u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
+#endif
+
+ /* is there anything on the ring */
+ d = fd + next;
+ if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
+ goto done;
+
+ if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
+ next_index = ad->per_interface_next_index;
+
+ if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
+ vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
+
+ vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
/* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
copy needed data from descriptor to rx vector */
- d = rxq->descs + rxq->next;
- while ((d->qword[1] & AVF_RX_DESC_STATUS_DD) && n_rxv < AVF_RX_VECTOR_SZ)
- {
- u16 next_pf = (rxq->next + 8) & mask;
- CLIB_PREFETCH (rxq->descs + next_pf, CLIB_CACHE_LINE_BYTES, LOAD);
- rxve = ptd->rx_vector + n_rxv;
- rxve->bi = rxq->bufs[rxq->next];
- rxve->status = avf_get_u64_bits (d, 8, 18, 0);
- rxve->error = avf_get_u64_bits (d, 8, 26, 19);
- rxve->ptype = avf_get_u64_bits (d, 8, 37, 30);
- rxve->length = avf_get_u64_bits (d, 8, 63, 38);
- maybe_error |= rxve->error;
+ bi = to_next;
- /* deal with chained buffers */
- while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
+ while (n_rx_packets < AVF_RX_VECTOR_SZ)
+ {
+ if (next + 11 < size)
{
- clib_error ("fixme");
+ int stride = 8;
+ CLIB_PREFETCH ((void *) (fd + (next + stride)),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
+ CLIB_CACHE_LINE_BYTES, LOAD);
}
- /* next */
- rxq->next = (rxq->next + 1) & mask;
- d = rxq->descs + rxq->next;
- n_rxv++;
- rxq->n_bufs--;
- }
+#ifdef CLIB_HAVE_VEC256
+ if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
+ goto one_by_one;
- if (n_rxv == 0)
- return 0;
+ q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
+ (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
- /* refill rx ring */
- if (ad->flags & AVF_DEVICE_F_IOVA)
- avf_rxq_refill (vm, node, rxq, 1 /* use_iova */ );
- else
- avf_rxq_refill (vm, node, rxq, 0 /* use_iova */ );
+ /* not all packets are ready or at least one of them is chained */
+ if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
+ goto one_by_one;
- n_rx_packets = n_rxv;
- rxve = ptd->rx_vector;
- while (n_rxv)
- {
- u32 n_left_to_next;
- u32 bi0, bi1, bi2, bi3;
- vlib_buffer_t *b0, *b1, *b2, *b3;
- u32 next0, next1, next2, next3;
+ or_q1x4 |= q1x4;
+ u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
+ vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ /* next */
+ next = (next + 4) & mask;
+ d = fd + next;
+ n_rx_packets += 4;
+ bi += 4;
+ continue;
+ one_by_one:
+#endif
+ CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+
+ if (avf_rxd_is_not_dd (d))
+ break;
+
+ bi[0] = rxq->bufs[next];
- while (n_rxv >= 12 && n_left_to_next >= 4)
+ /* deal with chained buffers */
+ if (PREDICT_FALSE (avf_rxd_is_not_eop (d)))
{
- vlib_buffer_t *p;
- p = vlib_get_buffer (vm, rxve[8].bi);
- vlib_prefetch_buffer_header (p, LOAD);
- CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
-
- p = vlib_get_buffer (vm, rxve[9].bi);
- vlib_prefetch_buffer_header (p, LOAD);
- CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
-
- p = vlib_get_buffer (vm, rxve[10].bi);
- vlib_prefetch_buffer_header (p, LOAD);
- CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
-
- p = vlib_get_buffer (vm, rxve[11].bi);
- vlib_prefetch_buffer_header (p, LOAD);
- CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
-
- to_next[0] = bi0 = rxve[0].bi;
- to_next[1] = bi1 = rxve[1].bi;
- to_next[2] = bi2 = rxve[2].bi;
- to_next[3] = bi3 = rxve[3].bi;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
- b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
-
- b0->current_length = rxve[0].length;
- b1->current_length = rxve[1].length;
- b2->current_length = rxve[2].length;
- b3->current_length = rxve[3].length;
-
- n_rx_bytes += b0->current_length;
- n_rx_bytes += b1->current_length;
- n_rx_bytes += b2->current_length;
- n_rx_bytes += b3->current_length;
-
- if (PREDICT_TRUE (ad->per_interface_next_index == ~0))
- {
- ethernet_header_t *e0, *e1, *e2, *e3;
-
- e0 = (ethernet_header_t *) b0->data;
- e1 = (ethernet_header_t *) b1->data;
- e2 = (ethernet_header_t *) b2->data;
- e3 = (ethernet_header_t *) b3->data;
-
- if (ethernet_frame_is_any_tagged_x4 (e0->type, e1->type,
- e2->type, e3->type))
- {
- next0 = avf_find_next (rxve, b0, 1);
- next1 = avf_find_next (rxve + 1, b1, 1);
- next2 = avf_find_next (rxve + 2, b2, 1);
- next3 = avf_find_next (rxve + 3, b3, 1);
- }
- else
- {
- next0 = avf_find_next (rxve, b0, 0);
- next1 = avf_find_next (rxve + 1, b1, 0);
- next2 = avf_find_next (rxve + 2, b2, 0);
- next3 = avf_find_next (rxve + 3, b3, 0);
- }
-
- if (with_features_or_tracing)
- vnet_feature_start_device_input_x4 (ad->sw_if_index, &next0,
- &next1, &next2, &next3,
- b0, b1, b2, b3);
-
- if (PREDICT_FALSE (maybe_error))
- {
- avf_check_for_error (node, rxve + 0, b0, &next0);
- avf_check_for_error (node, rxve + 1, b1, &next1);
- avf_check_for_error (node, rxve + 2, b2, &next2);
- avf_check_for_error (node, rxve + 3, b3, &next3);
- }
- }
- else
- next0 = next1 = next2 = next3 = ad->per_interface_next_index;
-
- clib_memcpy (vnet_buffer (b0)->sw_if_index, sw_if_idx,
- sizeof (sw_if_idx));
- clib_memcpy (vnet_buffer (b1)->sw_if_index, sw_if_idx,
- sizeof (sw_if_idx));
- clib_memcpy (vnet_buffer (b2)->sw_if_index, sw_if_idx,
- sizeof (sw_if_idx));
- clib_memcpy (vnet_buffer (b3)->sw_if_index, sw_if_idx,
- sizeof (sw_if_idx));
-
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
-
- if (with_features_or_tracing && PREDICT_FALSE (n_trace))
+ u16 tail_desc = 0;
+ u16 tail_next = next;
+ avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
+ avf_rx_desc_t *td;
+ do
{
- avf_input_trace (vm, node, next0, b0, &n_trace, ad, rxve);
- if (n_trace)
- avf_input_trace (vm, node, next1, b1, &n_trace, ad, rxve + 1);
- if (n_trace)
- avf_input_trace (vm, node, next2, b2, &n_trace, ad, rxve + 2);
- if (n_trace)
- avf_input_trace (vm, node, next3, b3, &n_trace, ad, rxve + 3);
- }
+ tail_next = (tail_next + 1) & mask;
+ td = fd + tail_next;
- /* next */
- to_next += 4;
- n_left_to_next -= 4;
- rxve += 4;
- n_rxv -= 4;
-
- /* enqueue */
- vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
- n_left_to_next, bi0, bi1, bi2, bi3,
- next0, next1, next2, next3);
+ /* bail out in case of incomplete transaction */
+ if (avf_rxd_is_not_dd (td))
+ goto no_more_desc;
+
+ or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
+ tail->buffers[tail_desc] = rxq->bufs[tail_next];
+ tail_desc++;
+ }
+ while (avf_rxd_is_not_eop (td));
+ next = tail_next;
+ n_tail_desc += tail_desc;
}
- while (n_rxv && n_left_to_next)
- {
- bi0 = rxve[0].bi;
- to_next[0] = bi0;
- b0 = vlib_get_buffer (vm, bi0);
- b0->current_length = rxve->length;
- n_rx_bytes += b0->current_length;
+ or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
- if (PREDICT_TRUE (ad->per_interface_next_index == ~0))
- {
- next0 = avf_find_next (rxve, b0, 1);
- if (with_features_or_tracing)
- vnet_feature_start_device_input_x1 (ad->sw_if_index, &next0,
- b0);
- avf_check_for_error (node, rxve + 0, b0, &next0);
- }
- else
- next0 = ad->per_interface_next_index;
+ /* next */
+ next = (next + 1) & mask;
+ d = fd + next;
+ n_rx_packets++;
+ bi++;
+ }
+no_more_desc:
+
+ if (n_rx_packets == 0)
+ goto done;
+
+ rxq->next = next;
+ rxq->n_enqueued -= n_rx_packets + n_tail_desc;
- clib_memcpy (vnet_buffer (b0)->sw_if_index, sw_if_idx,
- sizeof (sw_if_idx));
+#ifdef CLIB_HAVE_VEC256
+ or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
+#endif
+
+ vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
+
+ vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
+ vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
+ bt->buffer_pool_index = rxq->buffer_pool_index;
+ bt->ref_count = 1;
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
- if (with_features_or_tracing && PREDICT_FALSE (n_trace > 0))
- avf_input_trace (vm, node, next0, b0, &n_trace, ad, rxve);
+ if (n_tail_desc)
+ n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
+ else
+ n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
+
+ /* packet trace if enabled */
+ if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
+ {
+ u32 n_left = n_rx_packets, i = 0, j;
+ bi = to_next;
+
+ while (n_trace && n_left)
+ {
+ vlib_buffer_t *b;
+ avf_input_trace_t *tr;
+ b = vlib_get_buffer (vm, bi[0]);
+ vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
+ tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->next_index = next_index;
+ tr->qid = qid;
+ tr->hw_if_index = ad->hw_if_index;
+ tr->qw1s[0] = ptd->qw1s[i];
+ for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
+ tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
/* next */
- to_next += 1;
- n_left_to_next -= 1;
- rxve += 1;
- n_rxv -= 1;
-
- /* enqueue */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi0, next0);
+ n_trace--;
+ n_left--;
+ bi++;
+ i++;
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_set_trace_count (vm, node, n_trace);
+ }
+
+ if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
+ {
+ vlib_next_frame_t *nf;
+ vlib_frame_t *f;
+ ethernet_input_frame_t *ef;
+ nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
+ f = vlib_get_frame (vm, nf->frame);
+ f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
+
+ ef = vlib_frame_scalar_args (f);
+ ef->sw_if_index = ad->sw_if_index;
+ ef->hw_if_index = ad->hw_if_index;
+
+ if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
+ f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
+ vlib_frame_no_append (f);
}
+ n_left_to_next -= n_rx_packets;
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX, thr_idx,
ad->hw_if_index, n_rx_packets, n_rx_bytes);
+done:
+ /* refill rx ring */
+ if (ad->flags & AVF_DEVICE_F_VA_DMA)
+ avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
+ else
+ avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
+
return n_rx_packets;
}
vlib_frame_t * frame)
{
u32 n_rx = 0;
- avf_main_t *am = &avf_main;
vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
vnet_device_and_queue_t *dq;
foreach_device_and_queue (dq, rt->devices_and_queues)
{
avf_device_t *ad;
- ad = vec_elt_at_index (am->devices, dq->dev_instance);
+ ad = avf_get_device (dq->dev_instance);
if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
continue;
- if (vnet_device_input_have_features (ad->sw_if_index) ||
- vlib_get_trace_count (vm, node))
- n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id, 1);
- else
- n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id, 0);
+ n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
}
return n_rx;
}
-#ifndef CLIB_MARCH_VARIANT
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (avf_input_node) = {
.name = "avf-input",
.state = VLIB_NODE_STATE_DISABLED,
.n_errors = AVF_INPUT_N_ERROR,
.error_strings = avf_input_error_strings,
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
};
-#endif
/* *INDENT-ON* */