X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Favf%2Finput.c;h=509b4970dadc982f7e803e72574b0ab527df33a9;hb=e4f849c137101871b1caa0d959c7ea794e7d77f7;hp=27c3683f57c563bb7b3d16d9408626392daa51a2;hpb=6e36351faf5b69a0bfb8235b3b06f8b2c24e5547;p=vpp.git diff --git a/src/plugins/avf/input.c b/src/plugins/avf/input.c index 27c3683f57c..509b4970dad 100644 --- a/src/plugins/avf/input.c +++ b/src/plugins/avf/input.c @@ -48,10 +48,11 @@ static __clib_unused char *avf_input_error_strings[] = { #define AVF_INPUT_REFILL_TRESHOLD 32 static_always_inline void avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq, - int use_iova) + int use_va_dma) { u16 n_refill, mask, n_alloc, slot; u32 s0, s1, s2, s3; + vlib_buffer_t *b[4]; avf_rx_desc_t *d[4]; n_refill = rxq->size - 1 - rxq->n_enqueued; @@ -70,7 +71,7 @@ avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq, vlib_error_count (vm, node->node_index, AVF_INPUT_ERROR_BUFFER_ALLOC, 1); if (n_alloc) - vlib_buffer_free (vm, rxq->bufs + slot, n_alloc); + vlib_buffer_free_from_ring (vm, rxq->bufs, slot, rxq->size, n_alloc); return; } @@ -97,28 +98,24 @@ avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq, d[1] = ((avf_rx_desc_t *) rxq->descs) + s1; d[2] = ((avf_rx_desc_t *) rxq->descs) + s2; d[3] = ((avf_rx_desc_t *) rxq->descs) + s3; - if (use_iova) + b[0] = vlib_get_buffer (vm, rxq->bufs[s0]); + b[1] = vlib_get_buffer (vm, rxq->bufs[s1]); + b[2] = vlib_get_buffer (vm, rxq->bufs[s2]); + b[3] = vlib_get_buffer (vm, rxq->bufs[s3]); + + if (use_va_dma) { - vlib_buffer_t *b; - b = vlib_get_buffer (vm, rxq->bufs[s0]); - d[0]->qword[0] = pointer_to_uword (b->data); - b = vlib_get_buffer (vm, rxq->bufs[s1]); - d[1]->qword[0] = pointer_to_uword (b->data); - b = vlib_get_buffer (vm, rxq->bufs[s2]); - d[2]->qword[0] = pointer_to_uword (b->data); - b = vlib_get_buffer (vm, rxq->bufs[s3]); - d[3]->qword[0] = pointer_to_uword (b->data); + d[0]->qword[0] = vlib_buffer_get_va (b[0]); + d[1]->qword[0] = vlib_buffer_get_va (b[1]); + d[2]->qword[0] = vlib_buffer_get_va (b[2]); + d[3]->qword[0] = vlib_buffer_get_va (b[3]); } else { - d[0]->qword[0] = - vlib_get_buffer_data_physical_address (vm, rxq->bufs[s0]); - d[1]->qword[0] = - vlib_get_buffer_data_physical_address (vm, rxq->bufs[s1]); - d[2]->qword[0] = - vlib_get_buffer_data_physical_address (vm, rxq->bufs[s2]); - d[3]->qword[0] = - vlib_get_buffer_data_physical_address (vm, rxq->bufs[s3]); + d[0]->qword[0] = vlib_buffer_get_pa (vm, b[0]); + d[1]->qword[0] = vlib_buffer_get_pa (vm, b[1]); + d[2]->qword[0] = vlib_buffer_get_pa (vm, b[2]); + d[3]->qword[0] = vlib_buffer_get_pa (vm, b[3]); } d[0]->qword[1] = 0; @@ -134,14 +131,11 @@ avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq, { s0 = slot; d[0] = ((avf_rx_desc_t *) rxq->descs) + s0; - if (use_iova) - { - vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[s0]); - d[0]->qword[0] = pointer_to_uword (b->data); - } + b[0] = vlib_get_buffer (vm, rxq->bufs[s0]); + if (use_va_dma) + d[0]->qword[0] = vlib_buffer_get_va (b[0]); else - d[0]->qword[0] = - vlib_get_buffer_data_physical_address (vm, rxq->bufs[s0]); + d[0]->qword[0] = vlib_buffer_get_pa (vm, b[0]); d[0]->qword[1] = 0; /* next */ @@ -264,14 +258,14 @@ avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (bt)->feature_arc_index; } - clib_memcpy (vnet_buffer (b[0])->sw_if_index, - vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); - clib_memcpy (vnet_buffer (b[1])->sw_if_index, - vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); - clib_memcpy (vnet_buffer (b[2])->sw_if_index, - vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); - clib_memcpy (vnet_buffer (b[3])->sw_if_index, - vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); + clib_memcpy_fast (vnet_buffer (b[0])->sw_if_index, + vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); + clib_memcpy_fast (vnet_buffer (b[1])->sw_if_index, + vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); + clib_memcpy_fast (vnet_buffer (b[2])->sw_if_index, + vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); + clib_memcpy_fast (vnet_buffer (b[3])->sw_if_index, + vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); @@ -301,8 +295,8 @@ avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (bt)->feature_arc_index; } - clib_memcpy (vnet_buffer (b[0])->sw_if_index, - vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); + clib_memcpy_fast (vnet_buffer (b[0])->sw_if_index, + vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32)); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); @@ -398,7 +392,7 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u64x4_store_unaligned (v, ptd->rx_vector + n_rxv); maybe_error |= !u64x4_is_all_zero (err4); - clib_memcpy (bi, rxq->bufs + rxq->next, 4 * sizeof (u32)); + clib_memcpy_fast (bi, rxq->bufs + rxq->next, 4 * sizeof (u32)); /* next */ rxq->next = (rxq->next + 4) & mask; @@ -439,10 +433,10 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto done; /* refill rx ring */ - if (ad->flags & AVF_DEVICE_F_IOVA) - avf_rxq_refill (vm, node, rxq, 1 /* use_iova */ ); + if (ad->flags & AVF_DEVICE_F_VA_DMA) + avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ ); else - avf_rxq_refill (vm, node, rxq, 0 /* use_iova */ ); + avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ ); vlib_get_buffers (vm, buffer_indices, bufs, n_rxv); n_rx_packets = n_rxv; @@ -492,7 +486,7 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tr = vlib_add_trace (vm, node, b, sizeof (*tr)); tr->next_index = next[0]; tr->hw_if_index = ad->hw_if_index; - clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t)); + clib_memcpy_fast (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t)); /* next */ n_trace--;