1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
6 #include <vnet/dev/dev.h>
7 #include <vnet/ethernet/ethernet.h>
8 #include <dev_iavf/iavf.h>
10 #define IAVF_RX_REFILL_TRESHOLD 32
12 static const iavf_rx_desc_qw1_t mask_eop = { .eop = 1 };
13 static const iavf_rx_desc_qw1_t mask_flm = { .flm = 1 };
14 static const iavf_rx_desc_qw1_t mask_dd = { .dd = 1 };
15 static const iavf_rx_desc_qw1_t mask_ipe = { .ipe = 1 };
16 static const iavf_rx_desc_qw1_t mask_dd_eop = { .dd = 1, .eop = 1 };
18 static_always_inline int
19 iavf_rxd_is_not_eop (iavf_rx_desc_t *d)
21 return (d->qw1.as_u64 & mask_eop.as_u64) == 0;
24 static_always_inline int
25 iavf_rxd_is_not_dd (iavf_rx_desc_t *d)
27 return (d->qw1.as_u64 & mask_dd.as_u64) == 0;
30 static_always_inline void
31 iavf_rx_desc_write (iavf_rx_desc_t *d, u64 addr)
33 #ifdef CLIB_HAVE_VEC256
34 *(u64x4 *) d = (u64x4){ addr, 0, 0, 0 };
41 static_always_inline void
42 iavf_rxq_refill (vlib_main_t *vm, vlib_node_runtime_t *node,
43 vnet_dev_rx_queue_t *rxq, int use_va_dma)
45 u16 n_refill, mask, n_alloc, slot, size;
46 iavf_rxq_t *arq = vnet_dev_get_rx_queue_data (rxq);
48 iavf_rx_desc_t *d, *first_d;
53 n_refill = mask - arq->n_enqueued;
54 if (PREDICT_TRUE (n_refill <= IAVF_RX_REFILL_TRESHOLD))
57 slot = (arq->next - n_refill - 1) & mask;
59 n_refill &= ~7; /* round to 8 */
60 n_alloc = vlib_buffer_alloc_to_ring_from_pool (
61 vm, arq->buffer_indices, slot, size, n_refill,
62 vnet_dev_get_rx_queue_buffer_pool_index (rxq));
64 if (PREDICT_FALSE (n_alloc != n_refill))
66 vlib_error_count (vm, node->node_index, IAVF_RX_NODE_CTR_BUFFER_ALLOC,
69 vlib_buffer_free_from_ring (vm, arq->buffer_indices, slot, size,
74 arq->n_enqueued += n_alloc;
77 ASSERT (slot % 8 == 0);
85 vlib_get_buffers_with_offset (vm, arq->buffer_indices + slot, p, 8,
86 sizeof (vlib_buffer_t));
87 iavf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
88 iavf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
89 iavf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
90 iavf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
91 iavf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
92 iavf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
93 iavf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
94 iavf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
98 vlib_get_buffers (vm, arq->buffer_indices + slot, b, 8);
99 iavf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
100 iavf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
101 iavf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
102 iavf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
103 iavf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
104 iavf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
105 iavf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
106 iavf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
110 slot = (slot + 8) & mask;
114 /* RXQ can be smaller than 256 packets, especially if jumbo. */
115 arq->descs[slot].qword[1] = 0;
117 __atomic_store_n (arq->qrx_tail, slot, __ATOMIC_RELEASE);
120 static_always_inline uword
121 iavf_rx_attach_tail (vlib_main_t *vm, vlib_buffer_template_t *bt,
122 vlib_buffer_t *b, u64 qw1, iavf_rx_tail_t *t)
124 vlib_buffer_t *hb = b;
125 u32 tlnifb = 0, i = 0;
127 if (qw1 & mask_eop.as_u64)
130 while ((qw1 & mask_eop.as_u64) == 0)
132 ASSERT (i < IAVF_RX_MAX_DESC_IN_CHAIN - 1);
133 ASSERT (qw1 & mask_dd.as_u64);
135 b->next_buffer = t->buffers[i];
136 b->flags |= VLIB_BUFFER_NEXT_PRESENT;
137 b = vlib_get_buffer (vm, b->next_buffer);
139 tlnifb += b->current_length = ((iavf_rx_desc_qw1_t) qw1).length;
143 hb->total_length_not_including_first_buffer = tlnifb;
144 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
148 static_always_inline void
149 iavf_process_flow_offload (vnet_dev_port_t *port, iavf_rt_data_t *rtd,
153 iavf_flow_lookup_entry_t fle;
154 iavf_port_t *ap = vnet_dev_get_port_data (port);
156 for (n = 0; n < n_rx_packets; n++)
158 if ((rtd->qw1s[n] & mask_flm.as_u64) == 0)
161 fle = *pool_elt_at_index (ap->flow_lookup_entries, rtd->flow_ids[n]);
163 if (fle.next_index != (u16) ~0)
164 rtd->next[n] = fle.next_index;
166 if (fle.flow_id != ~0)
167 rtd->bufs[n]->flow_id = fle.flow_id;
169 if (fle.buffer_advance != ~0)
170 vlib_buffer_advance (rtd->bufs[n], fle.buffer_advance);
174 static_always_inline uword
175 iavf_process_rx_burst (vlib_main_t *vm, vlib_node_runtime_t *node,
176 vnet_dev_rx_queue_t *rxq, iavf_rt_data_t *rtd,
177 vlib_buffer_template_t *bt, u32 n_left,
180 vlib_buffer_t **b = rtd->bufs;
181 u64 *qw1 = rtd->qw1s;
182 iavf_rx_tail_t *tail = rtd->tails;
183 uword n_rx_bytes = 0;
189 vlib_prefetch_buffer_header (b[8], LOAD);
190 vlib_prefetch_buffer_header (b[9], LOAD);
191 vlib_prefetch_buffer_header (b[10], LOAD);
192 vlib_prefetch_buffer_header (b[11], LOAD);
195 b[0]->template = *bt;
196 b[1]->template = *bt;
197 b[2]->template = *bt;
198 b[3]->template = *bt;
200 n_rx_bytes += b[0]->current_length =
201 ((iavf_rx_desc_qw1_t) qw1[0]).length;
202 n_rx_bytes += b[1]->current_length =
203 ((iavf_rx_desc_qw1_t) qw1[1]).length;
204 n_rx_bytes += b[2]->current_length =
205 ((iavf_rx_desc_qw1_t) qw1[2]).length;
206 n_rx_bytes += b[3]->current_length =
207 ((iavf_rx_desc_qw1_t) qw1[3]).length;
211 n_rx_bytes += iavf_rx_attach_tail (vm, bt, b[0], qw1[0], tail + 0);
212 n_rx_bytes += iavf_rx_attach_tail (vm, bt, b[1], qw1[1], tail + 1);
213 n_rx_bytes += iavf_rx_attach_tail (vm, bt, b[2], qw1[2], tail + 2);
214 n_rx_bytes += iavf_rx_attach_tail (vm, bt, b[3], qw1[3], tail + 3);
226 b[0]->template = *bt;
228 n_rx_bytes += b[0]->current_length =
229 ((iavf_rx_desc_qw1_t) qw1[0]).length;
232 n_rx_bytes += iavf_rx_attach_tail (vm, bt, b[0], qw1[0], tail + 0);
243 static_always_inline uword
244 iavf_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
245 vlib_frame_t *frame, vnet_dev_port_t *port,
246 vnet_dev_rx_queue_t *rxq, int with_flows)
248 vnet_main_t *vnm = vnet_get_main ();
249 u32 thr_idx = vlib_get_thread_index ();
250 iavf_rt_data_t *rtd = vnet_dev_get_rt_temp_space (vm);
251 iavf_rxq_t *arq = vnet_dev_get_rx_queue_data (rxq);
252 vlib_buffer_template_t bt = rxq->buffer_template;
253 u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
256 u32 *bi, *to_next, n_left_to_next;
257 u32 next_index = rxq->next_index;
258 u32 sw_if_index = port->intf.sw_if_index;
259 u32 hw_if_index = port->intf.hw_if_index;
260 u16 next = arq->next;
261 u16 size = rxq->size;
263 iavf_rx_desc_t *d, *descs = arq->descs;
264 #ifdef CLIB_HAVE_VEC256
265 u64x4 q1x4, or_q1x4 = { 0 };
267 u64x4 dd_eop_mask4 = u64x4_splat (mask_dd_eop.as_u64);
268 #elif defined(CLIB_HAVE_VEC128)
269 u32x4 q1x4_lo, q1x4_hi, or_q1x4 = { 0 };
271 u32x4 dd_eop_mask4 = u32x4_splat (mask_dd_eop.as_u64);
275 /* is there anything on the ring */
277 if ((d->qword[1] & mask_dd.as_u64) == 0)
280 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
282 /* fetch up to IAVF_RX_VECTOR_SZ from the rx ring, unflatten them and
283 copy needed data from descriptor to rx vector */
286 while (n_rx_packets < IAVF_RX_VECTOR_SZ)
288 if (next + 11 < size)
291 clib_prefetch_load ((void *) (descs + (next + stride)));
292 clib_prefetch_load ((void *) (descs + (next + stride + 1)));
293 clib_prefetch_load ((void *) (descs + (next + stride + 2)));
294 clib_prefetch_load ((void *) (descs + (next + stride + 3)));
297 #ifdef CLIB_HAVE_VEC256
298 if (n_rx_packets >= IAVF_RX_VECTOR_SZ - 4 || next >= size - 4)
301 q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
302 (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
304 /* not all packets are ready or at least one of them is chained */
305 if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
310 u64x4_store_unaligned (q1x4, rtd->qw1s + n_rx_packets);
311 #elif defined(CLIB_HAVE_VEC128)
312 if (n_rx_packets >= IAVF_RX_VECTOR_SZ - 4 || next >= size - 4)
316 u32x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
317 (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
319 /* not all packets are ready or at least one of them is chained */
320 if (!u32x4_is_equal (q1x4_lo & dd_eop_mask4, dd_eop_mask4))
323 q1x4_hi = u32x4_gather (
324 (void *) &d[0].qword[1] + 4, (void *) &d[1].qword[1] + 4,
325 (void *) &d[2].qword[1] + 4, (void *) &d[3].qword[1] + 4);
328 rtd->qw1s[n_rx_packets + 0] = (u64) q1x4_hi[0] << 32 | (u64) q1x4_lo[0];
329 rtd->qw1s[n_rx_packets + 1] = (u64) q1x4_hi[1] << 32 | (u64) q1x4_lo[1];
330 rtd->qw1s[n_rx_packets + 2] = (u64) q1x4_hi[2] << 32 | (u64) q1x4_lo[2];
331 rtd->qw1s[n_rx_packets + 3] = (u64) q1x4_hi[3] << 32 | (u64) q1x4_lo[3];
333 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
337 fdidx4 = u32x4_gather (
338 (void *) &d[0].fdid_flex_hi, (void *) &d[1].fdid_flex_hi,
339 (void *) &d[2].fdid_flex_hi, (void *) &d[3].fdid_flex_hi);
340 u32x4_store_unaligned (fdidx4, rtd->flow_ids + n_rx_packets);
343 vlib_buffer_copy_indices (bi, arq->buffer_indices + next, 4);
346 next = (next + 4) & mask;
353 clib_prefetch_load ((void *) (descs + ((next + 8) & mask)));
355 if (iavf_rxd_is_not_dd (d))
358 bi[0] = arq->buffer_indices[next];
360 /* deal with chained buffers */
361 if (PREDICT_FALSE (iavf_rxd_is_not_eop (d)))
364 u16 tail_next = next;
365 iavf_rx_tail_t *tail = rtd->tails + n_rx_packets;
369 tail_next = (tail_next + 1) & mask;
370 td = descs + tail_next;
372 /* bail out in case of incomplete transaction */
373 if (iavf_rxd_is_not_dd (td))
376 or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
377 tail->buffers[tail_desc] = arq->buffer_indices[tail_next];
380 while (iavf_rxd_is_not_eop (td));
382 n_tail_desc += tail_desc;
385 or_qw1 |= rtd->qw1s[n_rx_packets] = d[0].qword[1];
386 if (PREDICT_FALSE (with_flows))
388 rtd->flow_ids[n_rx_packets] = d[0].fdid_flex_hi;
392 next = (next + 1) & mask;
399 if (n_rx_packets == 0)
403 arq->n_enqueued -= n_rx_packets + n_tail_desc;
405 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
406 or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
409 vlib_get_buffers (vm, to_next, rtd->bufs, n_rx_packets);
413 iavf_process_rx_burst (vm, node, rxq, rtd, &bt, n_rx_packets, 1) :
414 iavf_process_rx_burst (vm, node, rxq, rtd, &bt, n_rx_packets, 0);
416 /* the MARKed packets may have different next nodes */
417 if (PREDICT_FALSE (with_flows && (or_qw1 & mask_flm.as_u64)))
421 for (n = 0; n < n_rx_packets; n++)
422 rtd->next[n] = next_index;
424 iavf_process_flow_offload (port, rtd, n_rx_packets);
427 /* packet trace if enabled */
428 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
430 u32 n_left = n_rx_packets;
432 u16 *next_indices = rtd->next;
435 while (n_trace && n_left)
437 vlib_buffer_t *b = rtd->bufs[i];
438 if (PREDICT_FALSE (single_next == 0))
439 next_index = next_indices[0];
441 if (PREDICT_TRUE (vlib_trace_buffer (vm, node, next_index, b,
442 /* follow_chain */ 0)))
444 iavf_rx_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
445 tr->next_index = next_index;
446 tr->qid = rxq->queue_id;
447 tr->hw_if_index = hw_if_index;
448 tr->qw1s[0] = rtd->qw1s[i];
450 (tr->qw1s[0] & mask_flm.as_u64) ? rtd->flow_ids[i] : 0;
451 for (j = 1; j < IAVF_RX_MAX_DESC_IN_CHAIN; j++)
452 tr->qw1s[j] = rtd->tails[i].qw1s[j - 1];
462 vlib_set_trace_count (vm, node, n_trace);
465 /* enqueu the packets to the next nodes */
466 if (PREDICT_FALSE (with_flows && (or_qw1 & mask_flm.as_u64)))
468 /* release next node's frame vector, in this case we use
469 vlib_buffer_enqueue_to_next to place the packets
471 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
473 /* enqueue buffers to the next node */
474 vlib_buffer_enqueue_to_next (vm, node, to_next, rtd->next, n_rx_packets);
478 if (PREDICT_TRUE (next_index == VNET_DEV_ETH_RX_PORT_NEXT_ETH_INPUT))
480 vlib_next_frame_t *nf;
482 ethernet_input_frame_t *ef;
483 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
484 f = vlib_get_frame (vm, nf->frame);
485 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
487 ef = vlib_frame_scalar_args (f);
488 ef->sw_if_index = sw_if_index;
489 ef->hw_if_index = hw_if_index;
491 if ((or_qw1 & mask_ipe.as_u64) == 0)
492 f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
493 vlib_frame_no_append (f);
496 n_left_to_next -= n_rx_packets;
497 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
500 vlib_increment_combined_counter (
501 vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
502 thr_idx, hw_if_index, n_rx_packets, n_rx_bytes);
508 VNET_DEV_NODE_FN (iavf_rx_node)
509 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
512 foreach_vnet_dev_rx_queue_runtime (rxq, node)
514 vnet_dev_port_t *port = rxq->port;
515 iavf_port_t *ap = vnet_dev_get_port_data (port);
516 if (PREDICT_FALSE (ap->flow_offload))
517 n_rx += iavf_device_input_inline (vm, node, frame, port, rxq, 1);
519 n_rx += iavf_device_input_inline (vm, node, frame, port, rxq, 0);
522 if (rxq->port->dev->va_dma)
523 iavf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */);
525 iavf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */);