slot = (rxq->next - n_refill - 1) & mask;
n_refill &= ~7; /* round to 8 */
- n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, size, n_refill);
+ n_alloc =
+ vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
+ rxq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_refill))
{
n_alloc -= 8;
}
- CLIB_MEMORY_STORE_BARRIER ();
- *(rxq->qrx_tail) = slot;
+ clib_atomic_store_rel_n (rxq->qrx_tail, slot);
}
#ifdef CLIB_HAVE_VEC256
u64x4 q1x4, or_q1x4 = { 0 };
u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
+#elif defined(CLIB_HAVE_VEC128)
+ u32x4 q1x4_lo, q1x4_hi, or_q1x4 = { 0 };
+ u32x4 dd_eop_mask4 = u32x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
#endif
/* is there anything on the ring */
if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
next_index = ad->per_interface_next_index;
+
+ if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
+ vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
+
vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
/* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
or_q1x4 |= q1x4;
u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
- clib_memcpy_fast (bi, rxq->bufs + next, 4 * sizeof (u32));
+#elif defined(CLIB_HAVE_VEC128)
+ if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
+ goto one_by_one;
+
+ q1x4_lo =
+ u32x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
+ (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
+
+ /* not all packets are ready or at least one of them is chained */
+ if (!u32x4_is_equal (q1x4_lo & dd_eop_mask4, dd_eop_mask4))
+ goto one_by_one;
+
+ q1x4_hi = u32x4_gather (
+ (void *) &d[0].qword[1] + 4, (void *) &d[1].qword[1] + 4,
+ (void *) &d[2].qword[1] + 4, (void *) &d[3].qword[1] + 4);
+
+ or_q1x4 |= q1x4_lo;
+ ptd->qw1s[n_rx_packets + 0] = (u64) q1x4_hi[0] << 32 | (u64) q1x4_lo[0];
+ ptd->qw1s[n_rx_packets + 1] = (u64) q1x4_hi[1] << 32 | (u64) q1x4_lo[1];
+ ptd->qw1s[n_rx_packets + 2] = (u64) q1x4_hi[2] << 32 | (u64) q1x4_lo[2];
+ ptd->qw1s[n_rx_packets + 3] = (u64) q1x4_hi[3] << 32 | (u64) q1x4_lo[3];
+#endif
+#if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
+ vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
/* next */
next = (next + 4) & mask;
rxq->next = next;
rxq->n_enqueued -= n_rx_packets + n_tail_desc;
-#ifdef CLIB_HAVE_VEC256
+#if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
#endif
- /* refill rx ring */
- if (ad->flags & AVF_DEVICE_F_VA_DMA)
- avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
- else
- avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
-
vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
+ bt->buffer_pool_index = rxq->buffer_pool_index;
+ bt->ref_count = 1;
if (n_tail_desc)
n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
while (n_trace && n_left)
{
- vlib_buffer_t *b;
- avf_input_trace_t *tr;
- b = vlib_get_buffer (vm, bi[0]);
- vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
- tr = vlib_add_trace (vm, node, b, sizeof (*tr));
- tr->next_index = next_index;
- tr->hw_if_index = ad->hw_if_index;
- tr->qw1s[0] = ptd->qw1s[i];
- for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
- tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
+ if (PREDICT_TRUE
+ (vlib_trace_buffer
+ (vm, node, next_index, b, /* follow_chain */ 0)))
+ {
+ avf_input_trace_t *tr =
+ vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->next_index = next_index;
+ tr->qid = qid;
+ tr->hw_if_index = ad->hw_if_index;
+ tr->qw1s[0] = ptd->qw1s[i];
+ for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
+ tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
+
+ n_trace--;
+ }
/* next */
- n_trace--;
n_left--;
bi++;
i++;
vlib_frame_t *f;
ethernet_input_frame_t *ef;
nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
- f = vlib_get_frame (vm, nf->frame_index);
+ f = vlib_get_frame (vm, nf->frame);
f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
ef = vlib_frame_scalar_args (f);
if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
+ vlib_frame_no_append (f);
}
n_left_to_next -= n_rx_packets;
ad->hw_if_index, n_rx_packets, n_rx_bytes);
done:
+ /* refill rx ring */
+ if (ad->flags & AVF_DEVICE_F_VA_DMA)
+ avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
+ else
+ avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
+
return n_rx_packets;
}
vlib_frame_t * frame)
{
u32 n_rx = 0;
- avf_main_t *am = &avf_main;
vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
vnet_device_and_queue_t *dq;
foreach_device_and_queue (dq, rt->devices_and_queues)
{
avf_device_t *ad;
- ad = vec_elt_at_index (am->devices, dq->dev_instance);
+ ad = avf_get_device (dq->dev_instance);
if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
continue;
n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
.state = VLIB_NODE_STATE_DISABLED,
.n_errors = AVF_INPUT_N_ERROR,
.error_strings = avf_input_error_strings,
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
};
/* *INDENT-ON* */