2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
26 #define foreach_avf_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error") \
28 _(RX_PACKET_ERROR, "Rx packet errors")
32 #define _(f,s) AVF_INPUT_ERROR_##f,
33 foreach_avf_input_error
38 static __clib_unused char *avf_input_error_strings[] = {
40 foreach_avf_input_error
44 #define AVF_RX_DESC_STATUS(x) (1 << x)
45 #define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
46 #define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
48 #define AVF_INPUT_REFILL_TRESHOLD 32
49 static_always_inline void
50 avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
53 u16 n_refill, mask, n_alloc, slot;
57 n_refill = rxq->size - 1 - rxq->n_bufs;
58 if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
62 slot = (rxq->next - n_refill - 1) & mask;
64 n_refill &= ~7; /* round to 8 */
65 n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
68 if (PREDICT_FALSE (n_alloc != n_refill))
70 vlib_error_count (vm, node->node_index,
71 AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
73 vlib_buffer_free (vm, rxq->bufs + slot, n_alloc);
77 rxq->n_bufs += n_alloc;
81 if (PREDICT_TRUE (slot + 3 < rxq->size))
91 s1 = (slot + 1) & mask;
92 s2 = (slot + 2) & mask;
93 s3 = (slot + 3) & mask;
96 d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
97 d[1] = ((avf_rx_desc_t *) rxq->descs) + s1;
98 d[2] = ((avf_rx_desc_t *) rxq->descs) + s2;
99 d[3] = ((avf_rx_desc_t *) rxq->descs) + s3;
103 b = vlib_get_buffer (vm, rxq->bufs[s0]);
104 d[0]->qword[0] = pointer_to_uword (b->data);
105 b = vlib_get_buffer (vm, rxq->bufs[s1]);
106 d[1]->qword[0] = pointer_to_uword (b->data);
107 b = vlib_get_buffer (vm, rxq->bufs[s2]);
108 d[2]->qword[0] = pointer_to_uword (b->data);
109 b = vlib_get_buffer (vm, rxq->bufs[s3]);
110 d[3]->qword[0] = pointer_to_uword (b->data);
115 vlib_get_buffer_data_physical_address (vm, rxq->bufs[s0]);
117 vlib_get_buffer_data_physical_address (vm, rxq->bufs[s1]);
119 vlib_get_buffer_data_physical_address (vm, rxq->bufs[s2]);
121 vlib_get_buffer_data_physical_address (vm, rxq->bufs[s3]);
130 slot = (slot + 4) & mask;
136 d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
139 vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[s0]);
140 d[0]->qword[0] = pointer_to_uword (b->data);
144 vlib_get_buffer_data_physical_address (vm, rxq->bufs[s0]);
148 slot = (slot + 1) & mask;
152 CLIB_MEMORY_BARRIER ();
153 *(rxq->qrx_tail) = slot;
156 static_always_inline void
157 avf_check_for_error (vlib_node_runtime_t * node, avf_rx_vector_entry_t * rxve,
158 vlib_buffer_t * b, u16 * next)
160 avf_main_t *am = &avf_main;
162 if (PREDICT_FALSE (rxve->error))
164 b->error = node->errors[AVF_INPUT_ERROR_RX_PACKET_ERROR];
165 ptype = am->ptypes + rxve->ptype;
167 vlib_buffer_advance (b, --ptype->buffer_advance);
168 *next = VNET_DEVICE_INPUT_NEXT_DROP;
172 static_always_inline u32
173 avf_find_next (avf_rx_vector_entry_t * rxve, vlib_buffer_t * b,
176 avf_main_t *am = &avf_main;
177 ethernet_header_t *e = (ethernet_header_t *) b->data;
179 if (maybe_tagged && ethernet_frame_is_tagged (e->type))
180 return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
181 ptype = am->ptypes + rxve->ptype;
182 vlib_buffer_advance (b, ptype->buffer_advance);
183 b->flags |= ptype->flags;
184 return ptype->next_node;
188 static_always_inline uword
189 avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
190 vlib_buffer_t * bt, avf_rx_vector_entry_t * rxve,
191 vlib_buffer_t ** b, u16 * next, u32 n_rxv,
192 u8 maybe_error, int known_next)
194 uword n_rx_bytes = 0;
200 vlib_prefetch_buffer_header (b[8], LOAD);
201 vlib_prefetch_buffer_header (b[9], LOAD);
202 vlib_prefetch_buffer_header (b[10], LOAD);
203 vlib_prefetch_buffer_header (b[11], LOAD);
206 CLIB_PREFETCH (b[8]->data, CLIB_CACHE_LINE_BYTES, LOAD);
207 CLIB_PREFETCH (b[9]->data, CLIB_CACHE_LINE_BYTES, LOAD);
208 CLIB_PREFETCH (b[10]->data, CLIB_CACHE_LINE_BYTES, LOAD);
209 CLIB_PREFETCH (b[11]->data, CLIB_CACHE_LINE_BYTES, LOAD);
213 n_rx_bytes += b[0]->current_length = rxve[0].length;
214 n_rx_bytes += b[1]->current_length = rxve[1].length;
215 n_rx_bytes += b[2]->current_length = rxve[2].length;
216 n_rx_bytes += b[3]->current_length = rxve[3].length;
220 ethernet_header_t *e0, *e1, *e2, *e3;
222 e0 = (ethernet_header_t *) b[0]->data;
223 e1 = (ethernet_header_t *) b[1]->data;
224 e2 = (ethernet_header_t *) b[2]->data;
225 e3 = (ethernet_header_t *) b[3]->data;
227 if (ethernet_frame_is_any_tagged_x4 (e0->type, e1->type,
230 next[0] = avf_find_next (rxve, b[0], 1);
231 next[1] = avf_find_next (rxve + 1, b[1], 1);
232 next[2] = avf_find_next (rxve + 2, b[2], 1);
233 next[3] = avf_find_next (rxve + 3, b[3], 1);
237 next[0] = avf_find_next (rxve, b[0], 0);
238 next[1] = avf_find_next (rxve + 1, b[1], 0);
239 next[2] = avf_find_next (rxve + 2, b[2], 0);
240 next[3] = avf_find_next (rxve + 3, b[3], 0);
243 if (PREDICT_FALSE (maybe_error))
245 avf_check_for_error (node, rxve + 0, b[0], next);
246 avf_check_for_error (node, rxve + 1, b[1], next + 1);
247 avf_check_for_error (node, rxve + 2, b[2], next + 2);
248 avf_check_for_error (node, rxve + 3, b[3], next + 3);
251 else if (bt->current_config_index)
253 b[0]->current_config_index = bt->current_config_index;
254 b[1]->current_config_index = bt->current_config_index;
255 b[2]->current_config_index = bt->current_config_index;
256 b[3]->current_config_index = bt->current_config_index;
257 vnet_buffer (b[0])->feature_arc_index =
258 vnet_buffer (bt)->feature_arc_index;
259 vnet_buffer (b[1])->feature_arc_index =
260 vnet_buffer (bt)->feature_arc_index;
261 vnet_buffer (b[2])->feature_arc_index =
262 vnet_buffer (bt)->feature_arc_index;
263 vnet_buffer (b[3])->feature_arc_index =
264 vnet_buffer (bt)->feature_arc_index;
267 clib_memcpy (vnet_buffer (b[0])->sw_if_index,
268 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
269 clib_memcpy (vnet_buffer (b[1])->sw_if_index,
270 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
271 clib_memcpy (vnet_buffer (b[2])->sw_if_index,
272 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
273 clib_memcpy (vnet_buffer (b[3])->sw_if_index,
274 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
276 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
277 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
278 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
279 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
289 b[0]->current_length = rxve->length;
290 n_rx_bytes += b[0]->current_length;
294 next[0] = avf_find_next (rxve, b[0], 1);
295 avf_check_for_error (node, rxve + 0, b[0], next);
297 else if (bt->current_config_index)
299 b[0]->current_config_index = bt->current_config_index;
300 vnet_buffer (b[0])->feature_arc_index =
301 vnet_buffer (bt)->feature_arc_index;
304 clib_memcpy (vnet_buffer (b[0])->sw_if_index,
305 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
307 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
319 static_always_inline uword
320 avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
321 vlib_frame_t * frame, avf_device_t * ad, u16 qid)
323 avf_main_t *am = &avf_main;
324 vnet_main_t *vnm = vnet_get_main ();
325 u32 thr_idx = vlib_get_thread_index ();
326 avf_per_thread_data_t *ptd =
327 vec_elt_at_index (am->per_thread_data, thr_idx);
328 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
329 avf_rx_vector_entry_t *rxve = 0;
332 u32 n_rx_packets = 0, n_rx_bytes = 0;
333 u16 mask = rxq->size - 1;
336 u32 buffer_indices[AVF_RX_VECTOR_SZ], *bi;
337 u16 nexts[AVF_RX_VECTOR_SZ], *next;
338 vlib_buffer_t *bufs[AVF_RX_VECTOR_SZ];
339 vlib_buffer_t *bt = &ptd->buffer_template;
341 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
343 STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
344 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, status, 0);
345 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, length, 4);
346 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, ptype, 6);
347 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, error, 7);
349 /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
350 copy needed data from descriptor to rx vector */
351 d = rxq->descs + rxq->next;
353 while (n_rxv < AVF_RX_VECTOR_SZ)
355 if (rxq->next + 11 < rxq->size)
358 CLIB_PREFETCH (rxq->descs + (rxq->next + stride),
359 CLIB_CACHE_LINE_BYTES, LOAD);
360 CLIB_PREFETCH (rxq->descs + (rxq->next + stride + 1),
361 CLIB_CACHE_LINE_BYTES, LOAD);
362 CLIB_PREFETCH (rxq->descs + (rxq->next + stride + 2),
363 CLIB_CACHE_LINE_BYTES, LOAD);
364 CLIB_PREFETCH (rxq->descs + (rxq->next + stride + 3),
365 CLIB_CACHE_LINE_BYTES, LOAD);
368 #ifdef CLIB_HAVE_VEC256
370 u64x4 status_dd_eop_mask = u64x4_splat (0x3);
372 if (n_rxv >= AVF_RX_VECTOR_SZ - 4)
375 if (rxq->next >= rxq->size - 4)
378 /* load 1st quadword of 4 dscriptors into 256-bit vector register */
388 /* not all packets are ready or at least one of them is chained */
389 if (!u64x4_is_equal (q1x4 & status_dd_eop_mask, status_dd_eop_mask))
392 /* shift and mask status, length, ptype and err */
393 v = q1x4 & u64x4_splat ((u64) 0x3FFFFULL);
394 v |= (q1x4 >> 6) & u64x4_splat ((u64) 0xFFFF << 32);
395 v |= (q1x4 << 18) & u64x4_splat ((u64) 0xFF << 48);
396 v |= err4 = (q1x4 << 37) & u64x4_splat ((u64) 0xFF << 56);
398 u64x4_store_unaligned (v, ptd->rx_vector + n_rxv);
399 maybe_error |= !u64x4_is_all_zero (err4);
401 clib_memcpy (bi, rxq->bufs + rxq->next, 4 * sizeof (u32));
404 rxq->next = (rxq->next + 4) & mask;
405 d = rxq->descs + rxq->next;
412 CLIB_PREFETCH (rxq->descs + ((rxq->next + 8) & mask),
413 CLIB_CACHE_LINE_BYTES, LOAD);
414 if ((d->qword[1] & AVF_RX_DESC_STATUS_DD) == 0)
416 rxve = ptd->rx_vector + n_rxv;
417 bi[0] = rxq->bufs[rxq->next];
418 rxve->status = avf_get_u64_bits (d, 8, 18, 0);
419 rxve->error = avf_get_u64_bits (d, 8, 26, 19);
420 rxve->ptype = avf_get_u64_bits (d, 8, 37, 30);
421 rxve->length = avf_get_u64_bits (d, 8, 63, 38);
422 maybe_error |= rxve->error;
424 /* deal with chained buffers */
425 while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
427 clib_error ("fixme");
431 rxq->next = (rxq->next + 1) & mask;
432 d = rxq->descs + rxq->next;
442 if (ad->flags & AVF_DEVICE_F_IOVA)
443 avf_rxq_refill (vm, node, rxq, 1 /* use_iova */ );
445 avf_rxq_refill (vm, node, rxq, 0 /* use_iova */ );
447 vlib_get_buffers (vm, buffer_indices, bufs, n_rxv);
448 n_rx_packets = n_rxv;
450 vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
451 vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
453 /* receive burst of packets from DPDK PMD */
454 if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
457 next_index = ad->per_interface_next_index;
460 /* as all packets belong to thr same interface feature arc lookup
461 can be don once and result stored */
462 if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
464 vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
470 clib_memset_u16 (nexts, next_index, n_rxv);
471 n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
472 nexts, n_rxv, maybe_error, 1);
473 vnet_buffer (bt)->feature_arc_index = 0;
474 bt->current_config_index = 0;
477 n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
478 nexts, n_rxv, maybe_error, 0);
480 /* packet trace if enabled */
481 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
483 u32 n_left = n_rx_packets;
486 while (n_trace && n_left)
489 avf_input_trace_t *tr;
490 b = vlib_get_buffer (vm, bi[0]);
491 vlib_trace_buffer (vm, node, next[0], b, /* follow_chain */ 0);
492 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
493 tr->next_index = next[0];
494 tr->hw_if_index = ad->hw_if_index;
495 clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
503 vlib_set_trace_count (vm, node, n_trace);
505 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_rx_packets);
506 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
507 + VNET_INTERFACE_COUNTER_RX, thr_idx,
508 ad->hw_if_index, n_rx_packets, n_rx_bytes);
514 VLIB_NODE_FN (avf_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
515 vlib_frame_t * frame)
518 avf_main_t *am = &avf_main;
519 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
520 vnet_device_and_queue_t *dq;
522 foreach_device_and_queue (dq, rt->devices_and_queues)
525 ad = vec_elt_at_index (am->devices, dq->dev_instance);
526 if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
528 n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
533 #ifndef CLIB_MARCH_VARIANT
535 VLIB_REGISTER_NODE (avf_input_node) = {
537 .sibling_of = "device-input",
538 .format_trace = format_avf_input_trace,
539 .type = VLIB_NODE_TYPE_INPUT,
540 .state = VLIB_NODE_STATE_DISABLED,
541 .n_errors = AVF_INPUT_N_ERROR,
542 .error_strings = avf_input_error_strings,
550 * fd.io coding-style-patch-verification: ON
553 * eval: (c-set-style "gnu")