2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
29 #include <dpdk/device/dpdk_priv.h>
31 #ifndef CLIB_MULTIARCH_VARIANT
32 static char *dpdk_error_strings[] = {
40 dpdk_rx_next_from_etype (struct rte_mbuf *mb)
42 ethernet_header_t *h = rte_pktmbuf_mtod (mb, ethernet_header_t *);
43 if (PREDICT_TRUE (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP4)))
45 if (PREDICT_TRUE ((mb->ol_flags & PKT_RX_IP_CKSUM_GOOD) != 0))
46 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
48 return VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
50 else if (PREDICT_TRUE (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)))
51 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
53 if (PREDICT_TRUE (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS)))
54 return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
56 return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
60 dpdk_rx_error_from_mb (struct rte_mbuf *mb, u32 * next, u8 * error)
62 if (mb->ol_flags & PKT_RX_IP_CKSUM_BAD)
64 *error = DPDK_ERROR_IP_CHECKSUM_ERROR;
65 *next = VNET_DEVICE_INPUT_NEXT_DROP;
68 *error = DPDK_ERROR_NONE;
71 static_always_inline void
72 dpdk_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next,
73 dpdk_device_t * xd, u16 queue_id,
74 vlib_buffer_t * b, struct rte_mbuf *mb)
76 vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0);
78 dpdk_rx_dma_trace_t *t0 = vlib_add_trace (vm, node, b, sizeof t0[0]);
79 t0->queue_index = queue_id;
80 t0->device_index = xd->device_index;
81 t0->buffer_index = vlib_get_buffer_index (vm, b);
83 clib_memcpy (&t0->mb, mb, sizeof t0->mb);
84 clib_memcpy (&t0->buffer, b, sizeof b[0] - sizeof b->pre_data);
85 clib_memcpy (t0->buffer.pre_data, b->data, sizeof t0->buffer.pre_data);
86 clib_memcpy (&t0->data, mb->buf_addr + mb->data_off, sizeof t0->data);
90 dpdk_rx_burst (dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id)
96 n_left = VLIB_FRAME_SIZE;
99 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
103 n_this_chunk = rte_eth_rx_burst (xd->device_index, queue_id,
104 xd->rx_vectors[queue_id] +
106 n_buffers += n_this_chunk;
107 n_left -= n_this_chunk;
109 /* Empirically, DPDK r1.8 produces vectors w/ 32 or fewer elts */
110 if (n_this_chunk < 32)
122 static_always_inline void
123 dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b,
124 struct rte_mbuf *mb, vlib_buffer_free_list_t * fl)
127 struct rte_mbuf *mb_seg = 0;
128 vlib_buffer_t *b_seg, *b_chain = 0;
135 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
136 b->total_length_not_including_first_buffer = 0;
138 while (nb_seg < mb->nb_segs)
140 ASSERT (mb_seg != 0);
142 b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
143 vlib_buffer_init_for_free_list (b_seg, fl);
145 ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
146 ASSERT (b_seg->current_data == 0);
149 * The driver (e.g. virtio) may not put the packet data at the start
150 * of the segment, so don't assume b_seg->current_data == 0 is correct.
152 b_seg->current_data =
153 (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
155 b_seg->current_length = mb_seg->data_len;
156 b->total_length_not_including_first_buffer += mb_seg->data_len;
158 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
159 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
162 mb_seg = mb_seg->next;
167 static_always_inline void
168 dpdk_prefetch_buffer (struct rte_mbuf *mb)
170 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
171 CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
172 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, STORE);
175 static_always_inline void
176 dpdk_prefetch_ethertype (struct rte_mbuf *mb)
178 CLIB_PREFETCH (mb->buf_addr + mb->data_off +
179 STRUCT_OFFSET_OF (ethernet_header_t, type),
180 CLIB_CACHE_LINE_BYTES, LOAD);
184 * This function is used when there are no worker threads.
185 * The main thread performs IO and forwards the packets.
187 static_always_inline u32
188 dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd,
189 vlib_node_runtime_t * node, u32 thread_index, u16 queue_id,
190 int maybe_multiseg, u32 n_trace)
193 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
194 u32 n_left_to_next, *to_next;
196 vlib_main_t *vm = vlib_get_main ();
197 uword n_rx_bytes = 0;
198 vlib_buffer_free_list_t *fl;
199 vlib_buffer_t *bt = vec_elt_at_index (dm->buffer_templates, thread_index);
201 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
204 n_buffers = dpdk_rx_burst (dm, xd, queue_id);
211 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
213 /* Update buffer template */
214 vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
215 bt->error = node->errors[DPDK_ERROR_NONE];
216 /* as DPDK is allocating empty buffers from mempool provided before interface
217 start for each queue, it is safe to store this in the template */
218 bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
222 while (n_buffers > 0)
224 vlib_buffer_t *b0, *b1, *b2, *b3;
229 u8 error0, error1, error2, error3;
230 i16 offset0, offset1, offset2, offset3;
233 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
235 while (n_buffers >= 12 && n_left_to_next >= 4)
237 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
239 /* prefetches are interleaved with the rest of the code to reduce
240 pressure on L1 cache */
241 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 8]);
242 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 4]);
244 mb0 = xd->rx_vectors[queue_id][mb_index];
245 mb1 = xd->rx_vectors[queue_id][mb_index + 1];
246 mb2 = xd->rx_vectors[queue_id][mb_index + 2];
247 mb3 = xd->rx_vectors[queue_id][mb_index + 3];
256 if (PREDICT_FALSE (mb0->nb_segs > 1))
257 dpdk_prefetch_buffer (mb0->next);
258 if (PREDICT_FALSE (mb1->nb_segs > 1))
259 dpdk_prefetch_buffer (mb1->next);
260 if (PREDICT_FALSE (mb2->nb_segs > 1))
261 dpdk_prefetch_buffer (mb2->next);
262 if (PREDICT_FALSE (mb3->nb_segs > 1))
263 dpdk_prefetch_buffer (mb3->next);
266 b0 = vlib_buffer_from_rte_mbuf (mb0);
267 b1 = vlib_buffer_from_rte_mbuf (mb1);
268 b2 = vlib_buffer_from_rte_mbuf (mb2);
269 b3 = vlib_buffer_from_rte_mbuf (mb3);
271 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 9]);
272 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 5]);
274 clib_memcpy64_x4 (b0, b1, b2, b3, bt);
276 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 10]);
277 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 6]);
279 bi0 = vlib_get_buffer_index (vm, b0);
280 bi1 = vlib_get_buffer_index (vm, b1);
281 bi2 = vlib_get_buffer_index (vm, b2);
282 bi3 = vlib_get_buffer_index (vm, b3);
291 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
293 next0 = next1 = next2 = next3 = xd->per_interface_next_index;
297 next0 = dpdk_rx_next_from_etype (mb0);
298 next1 = dpdk_rx_next_from_etype (mb1);
299 next2 = dpdk_rx_next_from_etype (mb2);
300 next3 = dpdk_rx_next_from_etype (mb3);
302 or_ol_flags = (mb0->ol_flags | mb1->ol_flags |
303 mb2->ol_flags | mb3->ol_flags);
304 if (PREDICT_FALSE (or_ol_flags & PKT_RX_IP_CKSUM_BAD))
306 dpdk_rx_error_from_mb (mb0, &next0, &error0);
307 dpdk_rx_error_from_mb (mb1, &next1, &error1);
308 dpdk_rx_error_from_mb (mb2, &next2, &error2);
309 dpdk_rx_error_from_mb (mb3, &next3, &error3);
310 b0->error = node->errors[error0];
311 b1->error = node->errors[error1];
312 b2->error = node->errors[error2];
313 b3->error = node->errors[error3];
317 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 11]);
318 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 7]);
320 offset0 = device_input_next_node_advance[next0];
321 b0->current_data = mb0->data_off + offset0 - RTE_PKTMBUF_HEADROOM;
322 b0->flags |= device_input_next_node_flags[next0];
323 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
324 vnet_buffer (b0)->l2_hdr_offset =
325 mb0->data_off - RTE_PKTMBUF_HEADROOM;
326 b0->current_length = mb0->data_len - offset0;
327 n_rx_bytes += mb0->pkt_len;
329 offset1 = device_input_next_node_advance[next1];
330 b1->current_data = mb1->data_off + offset1 - RTE_PKTMBUF_HEADROOM;
331 b1->flags |= device_input_next_node_flags[next1];
332 vnet_buffer (b1)->l3_hdr_offset = b1->current_data;
333 vnet_buffer (b1)->l2_hdr_offset =
334 mb1->data_off - RTE_PKTMBUF_HEADROOM;
335 b1->current_length = mb1->data_len - offset1;
336 n_rx_bytes += mb1->pkt_len;
338 offset2 = device_input_next_node_advance[next2];
339 b2->current_data = mb2->data_off + offset2 - RTE_PKTMBUF_HEADROOM;
340 b2->flags |= device_input_next_node_flags[next2];
341 vnet_buffer (b2)->l3_hdr_offset = b2->current_data;
342 vnet_buffer (b2)->l2_hdr_offset =
343 mb2->data_off - RTE_PKTMBUF_HEADROOM;
344 b2->current_length = mb2->data_len - offset2;
345 n_rx_bytes += mb2->pkt_len;
347 offset3 = device_input_next_node_advance[next3];
348 b3->current_data = mb3->data_off + offset3 - RTE_PKTMBUF_HEADROOM;
349 b3->flags |= device_input_next_node_flags[next3];
350 vnet_buffer (b3)->l3_hdr_offset = b3->current_data;
351 vnet_buffer (b3)->l2_hdr_offset =
352 mb3->data_off - RTE_PKTMBUF_HEADROOM;
353 b3->current_length = mb3->data_len - offset3;
354 n_rx_bytes += mb3->pkt_len;
357 /* Process subsequent segments of multi-segment packets */
360 dpdk_process_subseq_segs (vm, b0, mb0, fl);
361 dpdk_process_subseq_segs (vm, b1, mb1, fl);
362 dpdk_process_subseq_segs (vm, b2, mb2, fl);
363 dpdk_process_subseq_segs (vm, b3, mb3, fl);
367 * Turn this on if you run into
368 * "bad monkey" contexts, and you want to know exactly
369 * which nodes they've visited... See main.c...
371 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
372 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
373 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
374 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
376 /* Do we have any driver RX features configured on the interface? */
377 vnet_feature_start_device_input_x4 (xd->vlib_sw_if_index,
378 &next0, &next1, &next2, &next3,
381 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
382 to_next, n_left_to_next,
384 next0, next1, next2, next3);
390 dpdk_add_trace (vm, node, next0, xd, queue_id, b0, mb0);
395 dpdk_add_trace (vm, node, next1, xd, queue_id, b1, mb1);
400 dpdk_add_trace (vm, node, next2, xd, queue_id, b2, mb2);
405 dpdk_add_trace (vm, node, next3, xd, queue_id, b3, mb3);
409 while (n_buffers > 0 && n_left_to_next > 0)
411 struct rte_mbuf *mb0 = xd->rx_vectors[queue_id][mb_index];
413 if (PREDICT_TRUE (n_buffers > 3))
415 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 2]);
416 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id]
422 b0 = vlib_buffer_from_rte_mbuf (mb0);
424 /* Prefetch one next segment if it exists. */
425 if (PREDICT_FALSE (mb0->nb_segs > 1))
426 dpdk_prefetch_buffer (mb0->next);
428 clib_memcpy (b0, bt, CLIB_CACHE_LINE_BYTES);
430 bi0 = vlib_get_buffer_index (vm, b0);
436 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
437 next0 = xd->per_interface_next_index;
440 next0 = dpdk_rx_next_from_etype (mb0);
442 dpdk_rx_error_from_mb (mb0, &next0, &error0);
443 b0->error = node->errors[error0];
446 offset0 = device_input_next_node_advance[next0];
447 b0->current_data = mb0->data_off + offset0 - RTE_PKTMBUF_HEADROOM;
448 b0->flags |= device_input_next_node_flags[next0];
449 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
450 vnet_buffer (b0)->l2_hdr_offset =
451 mb0->data_off - RTE_PKTMBUF_HEADROOM;
452 b0->current_length = mb0->data_len - offset0;
453 n_rx_bytes += mb0->pkt_len;
455 /* Process subsequent segments of multi-segment packets */
456 dpdk_process_subseq_segs (vm, b0, mb0, fl);
459 * Turn this on if you run into
460 * "bad monkey" contexts, and you want to know exactly
461 * which nodes they've visited... See main.c...
463 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
465 /* Do we have any driver RX features configured on the interface? */
466 vnet_feature_start_device_input_x1 (xd->vlib_sw_if_index, &next0,
469 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
470 to_next, n_left_to_next,
477 dpdk_add_trace (vm, node, next0, xd, queue_id, b0, mb0);
481 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
484 vlib_increment_combined_counter
485 (vnet_get_main ()->interface_main.combined_sw_if_counters
486 + VNET_INTERFACE_COUNTER_RX,
487 thread_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes);
489 vnet_device_increment_rx_packets (thread_index, mb_index);
494 static_always_inline u32
495 dpdk_device_input_mseg (dpdk_main_t * dm, dpdk_device_t * xd,
496 vlib_node_runtime_t * node, u32 thread_index,
497 u16 queue_id, u32 n_trace)
499 if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
500 return dpdk_device_input (dm, xd, node, thread_index, queue_id,
501 /* maybe_multiseg */ 1, n_trace);
503 return dpdk_device_input (dm, xd, node, thread_index, queue_id,
504 /* maybe_multiseg */ 0, n_trace);
508 poll_rate_limit (dpdk_main_t * dm)
510 /* Limit the poll rate by sleeping for N msec between polls */
511 if (PREDICT_FALSE (dm->poll_sleep_usec != 0))
513 struct timespec ts, tsrem;
516 ts.tv_nsec = 1000 * dm->poll_sleep_usec;
518 while (nanosleep (&ts, &tsrem) < 0)
525 /** \brief Main DPDK input node
528 This is the main DPDK input node: across each assigned interface,
529 call rte_eth_rx_burst(...) or similar to obtain a vector of
530 packets to process. Handle early packet discard. Derive @c
531 vlib_buffer_t metadata from <code>struct rte_mbuf</code> metadata,
532 Depending on the resulting metadata: adjust <code>b->current_data,
533 b->current_length </code> and dispatch directly to
534 ip4-input-no-checksum, or ip6-input. Trace the packet if required.
536 @param vm vlib_main_t corresponding to the current thread
537 @param node vlib_node_runtime_t
538 @param f vlib_frame_t input-node, not used.
540 @par Graph mechanics: buffer metadata, next index usage
543 - <code>struct rte_mbuf mb->ol_flags</code>
544 - PKT_RX_IP_CKSUM_BAD
545 - <code> RTE_ETH_IS_xxx_HDR(mb->packet_type) </code>
546 - packet classification result
549 - <code>b->error</code> if the packet is to be dropped immediately
550 - <code>b->current_data, b->current_length</code>
551 - adjusted as needed to skip the L2 header in direct-dispatch cases
552 - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
553 - rx interface sw_if_index
554 - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
555 - required by ipX-lookup
556 - <code>b->flags</code>
557 - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
560 - Static arcs to: error-drop, ethernet-input,
561 ip4-input-no-checksum, ip6-input, mpls-input
562 - per-interface redirection, controlled by
563 <code>xd->per_interface_next_index</code>
567 CLIB_MULTIARCH_FN (dpdk_input) (vlib_main_t * vm, vlib_node_runtime_t * node,
570 dpdk_main_t *dm = &dpdk_main;
572 uword n_rx_packets = 0;
573 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
574 vnet_device_and_queue_t *dq;
575 u32 thread_index = node->thread_index;
578 * Poll all devices on this cpu for input/interrupts.
581 foreach_device_and_queue (dq, rt->devices_and_queues)
583 xd = vec_elt_at_index(dm->devices, dq->dev_instance);
584 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
585 continue; /* Do not poll slave to a bonded interface */
586 u32 n_trace = vlib_get_trace_count (vm, node);
587 if (PREDICT_TRUE(n_trace == 0))
588 n_rx_packets += dpdk_device_input_mseg (dm, xd, node, thread_index,
592 u32 n_tr_packets = dpdk_device_input_mseg (dm, xd, node, thread_index,
593 dq->queue_id, n_trace);
594 n_rx_packets += n_tr_packets;
595 vlib_set_trace_count (vm, node,
596 n_trace - clib_min(n_trace, n_tr_packets));
601 poll_rate_limit (dm);
606 #ifndef CLIB_MULTIARCH_VARIANT
608 VLIB_REGISTER_NODE (dpdk_input_node) = {
609 .function = dpdk_input,
610 .type = VLIB_NODE_TYPE_INPUT,
611 .name = "dpdk-input",
612 .sibling_of = "device-input",
614 /* Will be enabled if/when hardware is detected. */
615 .state = VLIB_NODE_STATE_DISABLED,
617 .format_buffer = format_ethernet_header_with_length,
618 .format_trace = format_dpdk_rx_dma_trace,
620 .n_errors = DPDK_N_ERROR,
621 .error_strings = dpdk_error_strings,
625 vlib_node_function_t __clib_weak dpdk_input_avx512;
626 vlib_node_function_t __clib_weak dpdk_input_avx2;
629 static void __clib_constructor
630 dpdk_input_multiarch_select (void)
632 if (dpdk_input_avx512 && clib_cpu_supports_avx512f ())
633 dpdk_input_node.function = dpdk_input_avx512;
634 else if (dpdk_input_avx2 && clib_cpu_supports_avx2 ())
635 dpdk_input_node.function = dpdk_input_avx2;
641 * fd.io coding-style-patch-verification: ON
644 * eval: (c-set-style "gnu")