2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/interface/rx_queue_funcs.h>
26 #define foreach_avf_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error")
31 #define _(f,s) AVF_INPUT_ERROR_##f,
32 foreach_avf_input_error
37 static __clib_unused char *avf_input_error_strings[] = {
39 foreach_avf_input_error
43 #define AVF_INPUT_REFILL_TRESHOLD 32
45 static_always_inline void
46 avf_rx_desc_write (avf_rx_desc_t * d, u64 addr)
48 #ifdef CLIB_HAVE_VEC256
49 u64x4 v = { addr, 0, 0, 0 };
50 u64x4_store_unaligned (v, (void *) d);
57 static_always_inline void
58 avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
61 u16 n_refill, mask, n_alloc, slot, size;
63 avf_rx_desc_t *d, *first_d;
68 n_refill = mask - rxq->n_enqueued;
69 if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
72 slot = (rxq->next - n_refill - 1) & mask;
74 n_refill &= ~7; /* round to 8 */
76 vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
77 rxq->buffer_pool_index);
79 if (PREDICT_FALSE (n_alloc != n_refill))
81 vlib_error_count (vm, node->node_index,
82 AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
84 vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
88 rxq->n_enqueued += n_alloc;
91 ASSERT (slot % 8 == 0);
99 vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
100 sizeof (vlib_buffer_t));
101 avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
102 avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
103 avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
104 avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
105 avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
106 avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
107 avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
108 avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
112 vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
113 avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
114 avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
115 avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
116 avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
117 avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
118 avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
119 avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
120 avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
124 slot = (slot + 8) & mask;
128 /* RXQ can be smaller than 256 packets, especially if jumbo. */
129 rxq->descs[slot].qword[1] = 0;
131 avf_tail_write (rxq->qrx_tail, slot);
135 static_always_inline uword
136 avf_rx_attach_tail (vlib_main_t * vm, vlib_buffer_t * bt, vlib_buffer_t * b,
137 u64 qw1, avf_rx_tail_t * t)
139 vlib_buffer_t *hb = b;
140 u32 tlnifb = 0, i = 0;
142 if (qw1 & AVF_RXD_STATUS_EOP)
145 while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
147 ASSERT (i < AVF_RX_MAX_DESC_IN_CHAIN - 1);
148 ASSERT (qw1 & AVF_RXD_STATUS_DD);
150 b->next_buffer = t->buffers[i];
151 b->flags |= VLIB_BUFFER_NEXT_PRESENT;
152 b = vlib_get_buffer (vm, b->next_buffer);
153 vlib_buffer_copy_template (b, bt);
154 tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
158 hb->total_length_not_including_first_buffer = tlnifb;
159 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
163 static_always_inline void
164 avf_process_flow_offload (avf_device_t *ad, avf_per_thread_data_t *ptd,
168 avf_flow_lookup_entry_t *fle;
170 for (n = 0; n < n_rx_packets; n++)
172 if ((ptd->qw1s[n] & AVF_RXD_STATUS_FLM) == 0)
175 fle = pool_elt_at_index (ad->flow_lookup_entries, ptd->flow_ids[n]);
177 if (fle->next_index != (u16) ~0)
179 ptd->next[n] = fle->next_index;
182 if (fle->flow_id != ~0)
184 ptd->bufs[n]->flow_id = fle->flow_id;
187 if (fle->buffer_advance != ~0)
189 vlib_buffer_advance (ptd->bufs[n], fle->buffer_advance);
194 static_always_inline uword
195 avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
196 avf_per_thread_data_t * ptd, u32 n_left,
200 vlib_buffer_t **b = ptd->bufs;
201 u64 *qw1 = ptd->qw1s;
202 avf_rx_tail_t *tail = ptd->tails;
203 uword n_rx_bytes = 0;
205 /* copy template into local variable - will save per packet load */
206 vlib_buffer_copy_template (&bt, &ptd->buffer_template);
212 vlib_prefetch_buffer_header (b[8], LOAD);
213 vlib_prefetch_buffer_header (b[9], LOAD);
214 vlib_prefetch_buffer_header (b[10], LOAD);
215 vlib_prefetch_buffer_header (b[11], LOAD);
218 vlib_buffer_copy_template (b[0], &bt);
219 vlib_buffer_copy_template (b[1], &bt);
220 vlib_buffer_copy_template (b[2], &bt);
221 vlib_buffer_copy_template (b[3], &bt);
223 n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
224 n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
225 n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
226 n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
230 n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
231 n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[1], qw1[1], tail + 1);
232 n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[2], qw1[2], tail + 2);
233 n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[3], qw1[3], tail + 3);
245 vlib_buffer_copy_template (b[0], &bt);
247 n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
250 n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
261 static_always_inline uword
262 avf_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
263 vlib_frame_t *frame, avf_device_t *ad, u16 qid,
266 avf_main_t *am = &avf_main;
267 vnet_main_t *vnm = vnet_get_main ();
268 u32 thr_idx = vlib_get_thread_index ();
269 avf_per_thread_data_t *ptd =
270 vec_elt_at_index (am->per_thread_data, thr_idx);
271 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
272 u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
275 u32 *bi, *to_next, n_left_to_next;
276 vlib_buffer_t *bt = &ptd->buffer_template;
277 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
278 u16 next = rxq->next;
279 u16 size = rxq->size;
281 avf_rx_desc_t *d, *fd = rxq->descs;
282 #ifdef CLIB_HAVE_VEC256
283 u64x4 q1x4, or_q1x4 = { 0 };
285 u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
286 #elif defined(CLIB_HAVE_VEC128)
287 u32x4 q1x4_lo, q1x4_hi, or_q1x4 = { 0 };
289 u32x4 dd_eop_mask4 = u32x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
293 /* is there anything on the ring */
295 if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
298 if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
299 next_index = ad->per_interface_next_index;
301 if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
302 vnet_feature_start_device_input (ad->sw_if_index, &next_index, bt);
304 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
306 /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
307 copy needed data from descriptor to rx vector */
310 while (n_rx_packets < AVF_RX_VECTOR_SZ)
312 if (next + 11 < size)
315 clib_prefetch_load ((void *) (fd + (next + stride)));
316 clib_prefetch_load ((void *) (fd + (next + stride + 1)));
317 clib_prefetch_load ((void *) (fd + (next + stride + 2)));
318 clib_prefetch_load ((void *) (fd + (next + stride + 3)));
321 #ifdef CLIB_HAVE_VEC256
322 if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
325 q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
326 (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
328 /* not all packets are ready or at least one of them is chained */
329 if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
334 u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
335 #elif defined(CLIB_HAVE_VEC128)
336 if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
340 u32x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
341 (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
343 /* not all packets are ready or at least one of them is chained */
344 if (!u32x4_is_equal (q1x4_lo & dd_eop_mask4, dd_eop_mask4))
347 q1x4_hi = u32x4_gather (
348 (void *) &d[0].qword[1] + 4, (void *) &d[1].qword[1] + 4,
349 (void *) &d[2].qword[1] + 4, (void *) &d[3].qword[1] + 4);
352 ptd->qw1s[n_rx_packets + 0] = (u64) q1x4_hi[0] << 32 | (u64) q1x4_lo[0];
353 ptd->qw1s[n_rx_packets + 1] = (u64) q1x4_hi[1] << 32 | (u64) q1x4_lo[1];
354 ptd->qw1s[n_rx_packets + 2] = (u64) q1x4_hi[2] << 32 | (u64) q1x4_lo[2];
355 ptd->qw1s[n_rx_packets + 3] = (u64) q1x4_hi[3] << 32 | (u64) q1x4_lo[3];
357 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
361 fdidx4 = u32x4_gather (
362 (void *) &d[0].fdid_flex_hi, (void *) &d[1].fdid_flex_hi,
363 (void *) &d[2].fdid_flex_hi, (void *) &d[3].fdid_flex_hi);
364 u32x4_store_unaligned (fdidx4, ptd->flow_ids + n_rx_packets);
367 vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
370 next = (next + 4) & mask;
377 clib_prefetch_load ((void *) (fd + ((next + 8) & mask)));
379 if (avf_rxd_is_not_dd (d))
382 bi[0] = rxq->bufs[next];
384 /* deal with chained buffers */
385 if (PREDICT_FALSE (avf_rxd_is_not_eop (d)))
388 u16 tail_next = next;
389 avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
393 tail_next = (tail_next + 1) & mask;
396 /* bail out in case of incomplete transaction */
397 if (avf_rxd_is_not_dd (td))
400 or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
401 tail->buffers[tail_desc] = rxq->bufs[tail_next];
404 while (avf_rxd_is_not_eop (td));
406 n_tail_desc += tail_desc;
409 or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
410 if (PREDICT_FALSE (with_flows))
412 ptd->flow_ids[n_rx_packets] = d[0].fdid_flex_hi;
416 next = (next + 1) & mask;
423 if (n_rx_packets == 0)
427 rxq->n_enqueued -= n_rx_packets + n_tail_desc;
429 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
430 or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
433 vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
435 vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
436 vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
437 bt->buffer_pool_index = rxq->buffer_pool_index;
441 n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
443 n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
445 /* the MARKed packets may have different next nodes */
446 if (PREDICT_FALSE (with_flows && (or_qw1 & AVF_RXD_STATUS_FLM)))
450 for (n = 0; n < n_rx_packets; n++)
451 ptd->next[n] = next_index;
453 avf_process_flow_offload (ad, ptd, n_rx_packets);
456 /* packet trace if enabled */
457 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
459 u32 n_left = n_rx_packets;
461 u16 *next_indices = ptd->next;
464 while (n_trace && n_left)
466 vlib_buffer_t *b = ptd->bufs[i];
467 if (PREDICT_FALSE (single_next == 0))
468 next_index = next_indices[0];
472 (vm, node, next_index, b, /* follow_chain */ 0)))
474 avf_input_trace_t *tr =
475 vlib_add_trace (vm, node, b, sizeof (*tr));
476 tr->next_index = next_index;
478 tr->hw_if_index = ad->hw_if_index;
479 tr->qw1s[0] = ptd->qw1s[i];
481 (tr->qw1s[0] & AVF_RXD_STATUS_FLM) ? ptd->flow_ids[i] : 0;
482 for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
483 tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
493 vlib_set_trace_count (vm, node, n_trace);
496 /* enqueu the packets to the next nodes */
497 if (PREDICT_FALSE (with_flows && (or_qw1 & AVF_RXD_STATUS_FLM)))
499 /* release next node's frame vector, in this case we use
500 vlib_buffer_enqueue_to_next to place the packets
502 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
504 /* enqueue buffers to the next node */
505 vlib_buffer_enqueue_to_next (vm, node, to_next, ptd->next, n_rx_packets);
509 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
511 vlib_next_frame_t *nf;
513 ethernet_input_frame_t *ef;
514 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
515 f = vlib_get_frame (vm, nf->frame);
516 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
518 ef = vlib_frame_scalar_args (f);
519 ef->sw_if_index = ad->sw_if_index;
520 ef->hw_if_index = ad->hw_if_index;
522 if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
523 f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
524 vlib_frame_no_append (f);
527 n_left_to_next -= n_rx_packets;
528 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
531 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
532 + VNET_INTERFACE_COUNTER_RX, thr_idx,
533 ad->hw_if_index, n_rx_packets, n_rx_bytes);
537 if (ad->flags & AVF_DEVICE_F_VA_DMA)
538 avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
540 avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
545 VLIB_NODE_FN (avf_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
546 vlib_frame_t * frame)
549 vnet_hw_if_rxq_poll_vector_t *pv;
551 pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
553 for (int i = 0; i < vec_len (pv); i++)
555 avf_device_t *ad = avf_get_device (pv[i].dev_instance);
556 if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
558 if (PREDICT_FALSE (ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD))
560 avf_device_input_inline (vm, node, frame, ad, pv[i].queue_id, 1);
563 avf_device_input_inline (vm, node, frame, ad, pv[i].queue_id, 0);
569 VLIB_REGISTER_NODE (avf_input_node) = {
571 .sibling_of = "device-input",
572 .format_trace = format_avf_input_trace,
573 .type = VLIB_NODE_TYPE_INPUT,
574 .state = VLIB_NODE_STATE_DISABLED,
575 .n_errors = AVF_INPUT_N_ERROR,
576 .error_strings = avf_input_error_strings,
577 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
583 * fd.io coding-style-patch-verification: ON
586 * eval: (c-set-style "gnu")