2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
29 #include <dpdk/device/dpdk_priv.h>
31 static char *dpdk_error_strings[] = {
38 vlib_buffer_is_ip4 (vlib_buffer_t * b)
40 ethernet_header_t *h = (ethernet_header_t *) vlib_buffer_get_current (b);
41 return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
45 vlib_buffer_is_ip6 (vlib_buffer_t * b)
47 ethernet_header_t *h = (ethernet_header_t *) vlib_buffer_get_current (b);
48 return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
52 vlib_buffer_is_mpls (vlib_buffer_t * b)
54 ethernet_header_t *h = (ethernet_header_t *) vlib_buffer_get_current (b);
55 return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
59 dpdk_rx_next_from_etype (struct rte_mbuf * mb, vlib_buffer_t * b0)
61 if (PREDICT_TRUE (vlib_buffer_is_ip4 (b0)))
63 if (PREDICT_TRUE ((mb->ol_flags & PKT_RX_IP_CKSUM_GOOD) != 0))
64 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
66 return VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
68 else if (PREDICT_TRUE (vlib_buffer_is_ip6 (b0)))
69 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
70 else if (PREDICT_TRUE (vlib_buffer_is_mpls (b0)))
71 return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
73 return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
77 dpdk_rx_next_from_packet_start (struct rte_mbuf * mb, vlib_buffer_t * b0)
82 start_delta = b0->current_data -
83 ((mb->buf_addr + mb->data_off) - (void *) b0->data);
85 vlib_buffer_advance (b0, -start_delta);
87 if (PREDICT_TRUE (vlib_buffer_is_ip4 (b0)))
89 if (PREDICT_TRUE ((mb->ol_flags & PKT_RX_IP_CKSUM_GOOD) != 0))
90 rv = VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
92 rv = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
94 else if (PREDICT_TRUE (vlib_buffer_is_ip6 (b0)))
95 rv = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
96 else if (PREDICT_TRUE (vlib_buffer_is_mpls (b0)))
97 rv = VNET_DEVICE_INPUT_NEXT_MPLS_INPUT;
99 rv = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
101 vlib_buffer_advance (b0, start_delta);
106 dpdk_rx_error_from_mb (struct rte_mbuf *mb, u32 * next, u8 * error)
108 if (mb->ol_flags & PKT_RX_IP_CKSUM_BAD)
110 *error = DPDK_ERROR_IP_CHECKSUM_ERROR;
111 *next = VNET_DEVICE_INPUT_NEXT_DROP;
114 *error = DPDK_ERROR_NONE;
118 dpdk_rx_trace (dpdk_main_t * dm,
119 vlib_node_runtime_t * node,
121 u16 queue_id, u32 * buffers, uword n_buffers)
123 vlib_main_t *vm = vlib_get_main ();
134 dpdk_rx_dma_trace_t *t0;
141 b0 = vlib_get_buffer (vm, bi0);
142 mb = rte_mbuf_from_vlib_buffer (b0);
144 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
145 next0 = xd->per_interface_next_index;
147 next0 = dpdk_rx_next_from_packet_start (mb, b0);
149 dpdk_rx_error_from_mb (mb, &next0, &error0);
151 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
152 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
153 t0->queue_index = queue_id;
154 t0->device_index = xd->device_index;
155 t0->buffer_index = bi0;
157 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
158 clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
159 clib_memcpy (t0->buffer.pre_data, b0->data,
160 sizeof (t0->buffer.pre_data));
161 clib_memcpy (&t0->data, mb->buf_addr + mb->data_off, sizeof (t0->data));
168 dpdk_rx_burst (dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id)
174 n_left = VLIB_FRAME_SIZE;
177 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
181 n_this_chunk = rte_eth_rx_burst (xd->device_index, queue_id,
182 xd->rx_vectors[queue_id] +
184 n_buffers += n_this_chunk;
185 n_left -= n_this_chunk;
187 /* Empirically, DPDK r1.8 produces vectors w/ 32 or fewer elts */
188 if (n_this_chunk < 32)
201 static_always_inline void
202 dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b,
203 struct rte_mbuf *mb, vlib_buffer_free_list_t * fl)
206 struct rte_mbuf *mb_seg = 0;
207 vlib_buffer_t *b_seg, *b_chain = 0;
214 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
215 b->total_length_not_including_first_buffer = 0;
217 while (nb_seg < mb->nb_segs)
219 ASSERT (mb_seg != 0);
221 b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
222 vlib_buffer_init_for_free_list (b_seg, fl);
224 ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
225 ASSERT (b_seg->current_data == 0);
228 * The driver (e.g. virtio) may not put the packet data at the start
229 * of the segment, so don't assume b_seg->current_data == 0 is correct.
231 b_seg->current_data =
232 (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
234 b_seg->current_length = mb_seg->data_len;
235 b->total_length_not_including_first_buffer += mb_seg->data_len;
237 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
238 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
241 mb_seg = mb_seg->next;
246 static_always_inline void
247 dpdk_prefetch_buffer (struct rte_mbuf *mb)
249 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
250 CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
251 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, STORE);
254 static_always_inline void
255 dpdk_prefetch_ethertype (struct rte_mbuf *mb)
257 CLIB_PREFETCH (mb->buf_addr + mb->data_off +
258 STRUCT_OFFSET_OF (ethernet_header_t, type),
259 CLIB_CACHE_LINE_BYTES, LOAD);
264 This function should fill 1st cacheline of vlib_buffer_t metadata with data
265 from buffer template. Instead of filling field by field, we construct
266 template and then use 128/256 bit vector instruction to copy data.
267 This code first loads whole cacheline into 4 128-bit registers (xmm)
268 or two 256 bit registers (ymm) and then stores data into all 4 buffers
269 efectively saving on register load operations.
272 static_always_inline void
273 dpdk_buffer_init_from_template (void *d0, void *d1, void *d2, void *d3,
276 #if defined(CLIB_HAVE_VEC128)
278 for (i = 0; i < 2; i++)
280 *(u8x32 *) (((u8 *) d0) + i * 32) =
281 *(u8x32 *) (((u8 *) d1) + i * 32) =
282 *(u8x32 *) (((u8 *) d2) + i * 32) =
283 *(u8x32 *) (((u8 *) d3) + i * 32) = *(u8x32 *) (((u8 *) s) + i * 32);
285 #elif defined(CLIB_HAVE_VEC64)
287 for (i = 0; i < 4; i++)
289 *(u8x16 *) (((u8 *) d0) + i * 16) =
290 *(u8x16 *) (((u8 *) d1) + i * 16) =
291 *(u8x16 *) (((u8 *) d2) + i * 16) =
292 *(u8x16 *) (((u8 *) d3) + i * 16) = *(u8x16 *) (((u8 *) s) + i * 16);
295 #error "Either CLIB_HAVE_VEC128 or CLIB_HAVE_VEC64 has to be defined"
300 * This function is used when there are no worker threads.
301 * The main thread performs IO and forwards the packets.
303 static_always_inline u32
304 dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd,
305 vlib_node_runtime_t * node, u32 thread_index, u16 queue_id,
309 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
310 u32 n_left_to_next, *to_next;
312 vlib_main_t *vm = vlib_get_main ();
313 uword n_rx_bytes = 0;
314 u32 n_trace, trace_cnt __attribute__ ((unused));
315 vlib_buffer_free_list_t *fl;
316 vlib_buffer_t *bt = vec_elt_at_index (dm->buffer_templates, thread_index);
318 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
321 n_buffers = dpdk_rx_burst (dm, xd, queue_id);
328 vec_reset_length (xd->d_trace_buffers[thread_index]);
329 trace_cnt = n_trace = vlib_get_trace_count (vm, node);
333 u32 n = clib_min (n_trace, n_buffers);
338 struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index++];
339 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
340 vec_add1 (xd->d_trace_buffers[thread_index],
341 vlib_get_buffer_index (vm, b));
345 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
347 /* Update buffer template */
348 vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
349 bt->error = node->errors[DPDK_ERROR_NONE];
350 /* as DPDK is allocating empty buffers from mempool provided before interface
351 start for each queue, it is safe to store this in the template */
352 bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
356 while (n_buffers > 0)
358 vlib_buffer_t *b0, *b1, *b2, *b3;
363 u8 error0, error1, error2, error3;
366 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
368 while (n_buffers >= 12 && n_left_to_next >= 4)
370 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
372 /* prefetches are interleaved with the rest of the code to reduce
373 pressure on L1 cache */
374 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 8]);
375 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 4]);
377 mb0 = xd->rx_vectors[queue_id][mb_index];
378 mb1 = xd->rx_vectors[queue_id][mb_index + 1];
379 mb2 = xd->rx_vectors[queue_id][mb_index + 2];
380 mb3 = xd->rx_vectors[queue_id][mb_index + 3];
389 if (PREDICT_FALSE (mb0->nb_segs > 1))
390 dpdk_prefetch_buffer (mb0->next);
391 if (PREDICT_FALSE (mb1->nb_segs > 1))
392 dpdk_prefetch_buffer (mb1->next);
393 if (PREDICT_FALSE (mb2->nb_segs > 1))
394 dpdk_prefetch_buffer (mb2->next);
395 if (PREDICT_FALSE (mb3->nb_segs > 1))
396 dpdk_prefetch_buffer (mb3->next);
399 b0 = vlib_buffer_from_rte_mbuf (mb0);
400 b1 = vlib_buffer_from_rte_mbuf (mb1);
401 b2 = vlib_buffer_from_rte_mbuf (mb2);
402 b3 = vlib_buffer_from_rte_mbuf (mb3);
404 dpdk_buffer_init_from_template (b0, b1, b2, b3, bt);
406 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 9]);
407 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 5]);
409 /* current_data must be set to -RTE_PKTMBUF_HEADROOM in template */
410 b0->current_data += mb0->data_off;
411 b1->current_data += mb1->data_off;
412 b2->current_data += mb2->data_off;
413 b3->current_data += mb3->data_off;
415 b0->current_length = mb0->data_len;
416 b1->current_length = mb1->data_len;
417 b2->current_length = mb2->data_len;
418 b3->current_length = mb3->data_len;
420 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 10]);
421 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 7]);
423 bi0 = vlib_get_buffer_index (vm, b0);
424 bi1 = vlib_get_buffer_index (vm, b1);
425 bi2 = vlib_get_buffer_index (vm, b2);
426 bi3 = vlib_get_buffer_index (vm, b3);
435 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
437 next0 = next1 = next2 = next3 = xd->per_interface_next_index;
441 next0 = dpdk_rx_next_from_etype (mb0, b0);
442 next1 = dpdk_rx_next_from_etype (mb1, b1);
443 next2 = dpdk_rx_next_from_etype (mb2, b2);
444 next3 = dpdk_rx_next_from_etype (mb3, b3);
447 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 11]);
448 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id][mb_index + 6]);
450 or_ol_flags = (mb0->ol_flags | mb1->ol_flags |
451 mb2->ol_flags | mb3->ol_flags);
452 if (PREDICT_FALSE (or_ol_flags & PKT_RX_IP_CKSUM_BAD))
454 dpdk_rx_error_from_mb (mb0, &next0, &error0);
455 dpdk_rx_error_from_mb (mb1, &next1, &error1);
456 dpdk_rx_error_from_mb (mb2, &next2, &error2);
457 dpdk_rx_error_from_mb (mb3, &next3, &error3);
458 b0->error = node->errors[error0];
459 b1->error = node->errors[error1];
460 b2->error = node->errors[error2];
461 b3->error = node->errors[error3];
464 vlib_buffer_advance (b0, device_input_next_node_advance[next0]);
465 vlib_buffer_advance (b1, device_input_next_node_advance[next1]);
466 vlib_buffer_advance (b2, device_input_next_node_advance[next2]);
467 vlib_buffer_advance (b3, device_input_next_node_advance[next3]);
469 n_rx_bytes += mb0->pkt_len;
470 n_rx_bytes += mb1->pkt_len;
471 n_rx_bytes += mb2->pkt_len;
472 n_rx_bytes += mb3->pkt_len;
474 /* Process subsequent segments of multi-segment packets */
477 dpdk_process_subseq_segs (vm, b0, mb0, fl);
478 dpdk_process_subseq_segs (vm, b1, mb1, fl);
479 dpdk_process_subseq_segs (vm, b2, mb2, fl);
480 dpdk_process_subseq_segs (vm, b3, mb3, fl);
484 * Turn this on if you run into
485 * "bad monkey" contexts, and you want to know exactly
486 * which nodes they've visited... See main.c...
488 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
489 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
490 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
491 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
493 /* Do we have any driver RX features configured on the interface? */
494 vnet_feature_start_device_input_x4 (xd->vlib_sw_if_index,
495 &next0, &next1, &next2, &next3,
498 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
499 to_next, n_left_to_next,
501 next0, next1, next2, next3);
505 while (n_buffers > 0 && n_left_to_next > 0)
507 struct rte_mbuf *mb0 = xd->rx_vectors[queue_id][mb_index];
509 if (PREDICT_TRUE (n_buffers > 3))
511 dpdk_prefetch_buffer (xd->rx_vectors[queue_id][mb_index + 2]);
512 dpdk_prefetch_ethertype (xd->rx_vectors[queue_id]
518 b0 = vlib_buffer_from_rte_mbuf (mb0);
520 /* Prefetch one next segment if it exists. */
521 if (PREDICT_FALSE (mb0->nb_segs > 1))
522 dpdk_prefetch_buffer (mb0->next);
524 clib_memcpy (b0, bt, CLIB_CACHE_LINE_BYTES);
526 ASSERT (b0->current_data == -RTE_PKTMBUF_HEADROOM);
527 b0->current_data += mb0->data_off;
528 b0->current_length = mb0->data_len;
530 bi0 = vlib_get_buffer_index (vm, b0);
536 if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
537 next0 = xd->per_interface_next_index;
539 next0 = dpdk_rx_next_from_etype (mb0, b0);
541 dpdk_rx_error_from_mb (mb0, &next0, &error0);
542 b0->error = node->errors[error0];
544 vlib_buffer_advance (b0, device_input_next_node_advance[next0]);
546 n_rx_bytes += mb0->pkt_len;
548 /* Process subsequent segments of multi-segment packets */
549 dpdk_process_subseq_segs (vm, b0, mb0, fl);
552 * Turn this on if you run into
553 * "bad monkey" contexts, and you want to know exactly
554 * which nodes they've visited... See main.c...
556 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
558 /* Do we have any driver RX features configured on the interface? */
559 vnet_feature_start_device_input_x1 (xd->vlib_sw_if_index, &next0,
562 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
563 to_next, n_left_to_next,
568 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
571 if (PREDICT_FALSE (vec_len (xd->d_trace_buffers[thread_index]) > 0))
573 dpdk_rx_trace (dm, node, xd, queue_id,
574 xd->d_trace_buffers[thread_index],
575 vec_len (xd->d_trace_buffers[thread_index]));
576 vlib_set_trace_count (vm, node,
578 vec_len (xd->d_trace_buffers[thread_index]));
581 vlib_increment_combined_counter
582 (vnet_get_main ()->interface_main.combined_sw_if_counters
583 + VNET_INTERFACE_COUNTER_RX,
584 thread_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes);
586 vnet_device_increment_rx_packets (thread_index, mb_index);
592 poll_rate_limit (dpdk_main_t * dm)
594 /* Limit the poll rate by sleeping for N msec between polls */
595 if (PREDICT_FALSE (dm->poll_sleep_usec != 0))
597 struct timespec ts, tsrem;
600 ts.tv_nsec = 1000 * dm->poll_sleep_usec;
602 while (nanosleep (&ts, &tsrem) < 0)
609 /** \brief Main DPDK input node
612 This is the main DPDK input node: across each assigned interface,
613 call rte_eth_rx_burst(...) or similar to obtain a vector of
614 packets to process. Handle early packet discard. Derive @c
615 vlib_buffer_t metadata from <code>struct rte_mbuf</code> metadata,
616 Depending on the resulting metadata: adjust <code>b->current_data,
617 b->current_length </code> and dispatch directly to
618 ip4-input-no-checksum, or ip6-input. Trace the packet if required.
620 @param vm vlib_main_t corresponding to the current thread
621 @param node vlib_node_runtime_t
622 @param f vlib_frame_t input-node, not used.
624 @par Graph mechanics: buffer metadata, next index usage
627 - <code>struct rte_mbuf mb->ol_flags</code>
628 - PKT_RX_IP_CKSUM_BAD
629 - <code> RTE_ETH_IS_xxx_HDR(mb->packet_type) </code>
630 - packet classification result
633 - <code>b->error</code> if the packet is to be dropped immediately
634 - <code>b->current_data, b->current_length</code>
635 - adjusted as needed to skip the L2 header in direct-dispatch cases
636 - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
637 - rx interface sw_if_index
638 - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
639 - required by ipX-lookup
640 - <code>b->flags</code>
641 - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
644 - Static arcs to: error-drop, ethernet-input,
645 ip4-input-no-checksum, ip6-input, mpls-input
646 - per-interface redirection, controlled by
647 <code>xd->per_interface_next_index</code>
651 dpdk_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
653 dpdk_main_t *dm = &dpdk_main;
655 uword n_rx_packets = 0;
656 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
657 vnet_device_and_queue_t *dq;
658 u32 thread_index = node->thread_index;
661 * Poll all devices on this cpu for input/interrupts.
664 foreach_device_and_queue (dq, rt->devices_and_queues)
666 xd = vec_elt_at_index(dm->devices, dq->dev_instance);
667 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
668 continue; /* Do not poll slave to a bonded interface */
669 if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
670 n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 1);
672 n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 0);
676 poll_rate_limit (dm);
682 VLIB_REGISTER_NODE (dpdk_input_node) = {
683 .function = dpdk_input,
684 .type = VLIB_NODE_TYPE_INPUT,
685 .name = "dpdk-input",
686 .sibling_of = "device-input",
688 /* Will be enabled if/when hardware is detected. */
689 .state = VLIB_NODE_STATE_DISABLED,
691 .format_buffer = format_ethernet_header_with_length,
692 .format_trace = format_dpdk_rx_dma_trace,
694 .n_errors = DPDK_N_ERROR,
695 .error_strings = dpdk_error_strings,
698 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_input_node, dpdk_input);
702 * fd.io coding-style-patch-verification: ON
705 * eval: (c-set-style "gnu")