slot = (rxq->next - n_refill - 1) & mask;
n_refill &= ~7; /* round to 8 */
- n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, size, n_refill);
+ n_alloc =
+ vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
+ rxq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_refill))
{
if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
next_index = ad->per_interface_next_index;
+
+ if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
+ vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
+
vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
/* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
#endif
- /* refill rx ring */
- if (ad->flags & AVF_DEVICE_F_VA_DMA)
- avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
- else
- avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
-
vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
+ bt->buffer_pool_index = rxq->buffer_pool_index;
+ bt->ref_count = 1;
if (n_tail_desc)
n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
tr = vlib_add_trace (vm, node, b, sizeof (*tr));
tr->next_index = next_index;
+ tr->qid = qid;
tr->hw_if_index = ad->hw_if_index;
tr->qw1s[0] = ptd->qw1s[i];
for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
vlib_frame_t *f;
ethernet_input_frame_t *ef;
nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
- f = vlib_get_frame (vm, nf->frame_index);
+ f = vlib_get_frame (vm, nf->frame);
f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
ef = vlib_frame_scalar_args (f);
if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
+ vlib_frame_no_append (f);
}
n_left_to_next -= n_rx_packets;
ad->hw_if_index, n_rx_packets, n_rx_bytes);
done:
+ /* refill rx ring */
+ if (ad->flags & AVF_DEVICE_F_VA_DMA)
+ avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
+ else
+ avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
+
return n_rx_packets;
}
.state = VLIB_NODE_STATE_DISABLED,
.n_errors = AVF_INPUT_N_ERROR,
.error_strings = avf_input_error_strings,
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
};
/* *INDENT-ON* */