2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_dpdk_h__
16 #define __included_dpdk_h__
18 /* $$$$ We should rename always_inline -> clib_always_inline */
21 #include <rte_config.h>
23 #include <rte_common.h>
26 #include <rte_memory.h>
27 #include <rte_memzone.h>
28 #include <rte_tailq.h>
30 #include <rte_per_lcore.h>
31 #include <rte_launch.h>
32 #include <rte_atomic.h>
33 #include <rte_cycles.h>
34 #include <rte_prefetch.h>
35 #include <rte_lcore.h>
36 #include <rte_per_lcore.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_interrupts.h>
40 #include <rte_random.h>
41 #include <rte_debug.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_mempool.h>
50 #include <rte_virtio_net.h>
51 #include <rte_pci_dev_ids.h>
52 #include <rte_version.h>
53 #include <rte_eth_bond.h>
55 #include <vnet/unix/pcap.h>
56 #include <vnet/devices/virtio/vhost-user.h>
59 #define always_inline static inline
61 #define always_inline static inline __attribute__ ((__always_inline__))
64 #include <vlib/pci/pci.h>
66 #define NB_MBUF (32<<10)
68 extern vnet_device_class_t dpdk_device_class;
69 extern vlib_node_registration_t dpdk_input_node;
70 extern vlib_node_registration_t dpdk_io_input_node;
71 extern vlib_node_registration_t handoff_dispatch_node;
74 VNET_DPDK_DEV_ETH = 1, /* Standard DPDK PMD driver */
75 VNET_DPDK_DEV_KNI, /* Kernel NIC Interface */
76 VNET_DPDK_DEV_VHOST_USER,
77 VNET_DPDK_DEV_UNKNOWN, /* must be last */
80 #define foreach_dpdk_pmd \
81 _ ("rte_nicvf_pmd", THUNDERX) \
82 _ ("rte_em_pmd", E1000EM) \
83 _ ("rte_igb_pmd", IGB) \
84 _ ("rte_igbvf_pmd", IGBVF) \
85 _ ("rte_ixgbe_pmd", IXGBE) \
86 _ ("rte_ixgbevf_pmd", IXGBEVF) \
87 _ ("rte_i40e_pmd", I40E) \
88 _ ("rte_i40evf_pmd", I40EVF) \
89 _ ("rte_virtio_pmd", VIRTIO) \
90 _ ("rte_vice_pmd", VICE) \
91 _ ("rte_enic_pmd", ENIC) \
92 _ ("rte_vmxnet3_pmd", VMXNET3) \
93 _ ("AF_PACKET PMD", AF_PACKET) \
94 _ ("rte_bond_pmd", BOND) \
95 _ ("rte_pmd_fm10k", FM10K) \
96 _ ("rte_cxgbe_pmd", CXGBE)
100 #define _(s,f) VNET_DPDK_PMD_##f,
104 VNET_DPDK_PMD_NETMAP,
106 VNET_DPDK_PMD_UNKNOWN, /* must be last */
110 VNET_DPDK_PORT_TYPE_ETH_1G,
111 VNET_DPDK_PORT_TYPE_ETH_10G,
112 VNET_DPDK_PORT_TYPE_ETH_40G,
113 VNET_DPDK_PORT_TYPE_ETH_BOND,
114 VNET_DPDK_PORT_TYPE_ETH_SWITCH,
116 VNET_DPDK_PORT_TYPE_NETMAP,
118 VNET_DPDK_PORT_TYPE_AF_PACKET,
119 VNET_DPDK_PORT_TYPE_UNKNOWN,
124 vlib_frame_t * frame;
127 #define DPDK_EFD_MAX_DISCARD_RATE 10
133 u32 consec_full_frames_cnt;
138 u32 total_packet_cnt;
145 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
149 u32 n_since_last_int;
160 char sock_filename[256];
167 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
168 dpdk_vu_vring vrings[VHOST_MAX_QUEUE_PAIRS * 2];
170 dpdk_vu_vring vrings[2];
172 u64 region_addr[VHOST_MEMORY_MAX_NREGIONS];
173 u32 region_fd[VHOST_MEMORY_MAX_NREGIONS];
176 typedef void (*dpdk_flowcontrol_callback_t) (vlib_main_t *vm,
181 * The header for the tx_vector in dpdk_device_t.
182 * Head and tail are indexes into the tx_vector and are of type
183 * u64 so they never overflow.
191 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
192 volatile u32 **lockp;
197 u32 vlib_hw_if_index;
198 u32 vlib_sw_if_index;
200 /* next node index if we decide to steal the rx graph arc */
201 u32 per_interface_next_index;
203 /* dpdk rte_mbuf rx and tx vectors, VLIB_FRAME_SIZE */
204 struct rte_mbuf *** tx_vectors; /* one per worker thread */
205 struct rte_mbuf *** rx_vectors;
207 /* vector of traced contexts, per device */
208 u32 * d_trace_buffers;
210 /* per-worker destination frame queue */
211 dpdk_frame_t * frames;
213 dpdk_device_type_t dev_type:8;
220 CLIB_CACHE_LINE_ALIGN_MARK(cacheline1);
227 u16 * cpu_socket_id_by_queue;
228 struct rte_eth_conf port_conf;
229 struct rte_eth_txconf tx_conf;
235 /* vhost-user related */
237 struct virtio_net vu_vhost_dev;
239 dpdk_vu_intf_t *vu_intf;
242 u8 af_packet_port_id;
244 struct rte_eth_link link;
245 f64 time_last_link_update;
247 struct rte_eth_stats stats;
248 struct rte_eth_stats last_stats;
249 struct rte_eth_stats last_cleared_stats;
250 struct rte_eth_xstats * xstats;
251 struct rte_eth_xstats * last_cleared_xstats;
252 f64 time_last_stats_update;
253 dpdk_port_type_t port_type;
255 dpdk_efd_agent_t efd_agent;
256 u8 need_txlock; /* Used by VNET_DPDK_DEV_VHOST_USER */
261 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
269 i32 n_vectors[MAX_NELTS];
270 } frame_queue_trace_t;
273 u64 count[MAX_NELTS];
274 } frame_queue_nelt_counter_t;
276 #define DPDK_TX_RING_SIZE (4 * 1024)
278 #define DPDK_STATS_POLL_INTERVAL (10.0)
279 #define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */
281 #define DPDK_LINK_POLL_INTERVAL (3.0)
282 #define DPDK_MIN_LINK_POLL_INTERVAL (0.001) /* 1msec */
285 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
287 /* total input packet counter */
288 u64 aggregate_rx_packets;
294 } dpdk_device_and_queue_t;
296 /* Early-Fast-Discard (EFD) */
297 #define DPDK_EFD_DISABLED 0
298 #define DPDK_EFD_DISCARD_ENABLED (1 << 0)
299 #define DPDK_EFD_MONITOR_ENABLED (1 << 1)
300 #define DPDK_EFD_DROPALL_ENABLED (1 << 2)
302 #define DPDK_EFD_DEFAULT_DEVICE_QUEUE_HI_THRESH_PCT 90
303 #define DPDK_EFD_DEFAULT_CONSEC_FULL_FRAMES_HI_THRESH 6
305 typedef struct dpdk_efd_t {
308 u16 consec_full_frames_hi_thresh;
312 #define foreach_dpdk_device_config_item \
317 vlib_pci_addr_t pci_addr;
319 #define _(x) uword x;
320 foreach_dpdk_device_config_item
322 } dpdk_device_config_t;
328 u8 * eal_init_args_str;
329 u8 * uio_driver_name;
331 u8 enable_tcp_udp_checksum;
333 /* Required config parameters */
334 u8 coremask_set_manually;
335 u8 nchannels_set_manually;
339 u8 num_kni;/* while kni_init allows u32, port_id in callback fn is only u8 */
342 * format interface names ala xxxEthernet%d/%d/%d instead of
343 * xxxEthernet%x/%x/%x. For VIRL.
345 u8 interface_name_format_decimal;
347 /* virtio vhost-user switch */
350 /* vhost-user coalescence frames config */
351 u32 vhost_coalesce_frames;
352 f64 vhost_coalesce_time;
354 /* per-device config */
355 dpdk_device_config_t default_devconf;
356 dpdk_device_config_t * dev_confs;
357 uword * device_config_index_by_pci_addr;
359 } dpdk_config_main_t;
361 dpdk_config_main_t dpdk_config_main;
366 dpdk_device_t * devices;
367 dpdk_device_and_queue_t ** devices_by_cpu;
369 /* per-thread recycle lists */
372 /* buffer flags template, configurable to enable/disable tcp / udp cksum */
373 u32 buffer_flags_template;
375 /* flow control callback. If 0 then flow control is disabled */
376 dpdk_flowcontrol_callback_t flowcontrol_callback;
378 /* vlib buffer free list, must be same size as an rte_mbuf */
379 u32 vlib_buffer_free_list_index;
381 /* dpdk worker "threads" */
382 dpdk_worker_t * workers;
385 /* Ethernet input node index */
386 u32 ethernet_input_node_index;
388 /* dpdk i/o thread initialization barrier */
389 volatile u32 io_thread_release;
391 /* pcap tracing [only works if (CLIB_DEBUG > 0)] */
393 pcap_main_t pcap_main;
395 u32 pcap_sw_if_index;
396 u32 pcap_pkts_to_capture;
399 uword * dpdk_device_by_kni_port_id;
400 uword * vu_sw_if_index_by_listener_fd;
401 uword * vu_sw_if_index_by_sock_fd;
402 u32 * vu_inactive_interfaces_device_index;
406 /* efd (early-fast-discard) settings */
410 * flag indicating that a posted admin up/down
411 * (via post_sw_interface_set_flags) is in progress
413 u8 admin_up_down_in_progress;
418 /* which cpus are running dpdk-input */
419 int input_cpu_first_index;
422 /* control interval of dpdk link state and stat polling */
423 f64 link_state_poll_interval;
424 f64 stat_poll_interval;
426 /* for frame queue tracing */
427 frame_queue_trace_t *frame_queue_traces;
428 frame_queue_nelt_counter_t *frame_queue_histogram;
430 /* Sleep for this many MS after each device poll */
434 vlib_main_t * vlib_main;
435 vnet_main_t * vnet_main;
436 dpdk_config_main_t * conf;
439 dpdk_main_t dpdk_main;
442 DPDK_RX_NEXT_IP4_INPUT,
443 DPDK_RX_NEXT_IP6_INPUT,
444 DPDK_RX_NEXT_MPLS_INPUT,
445 DPDK_RX_NEXT_ETHERNET_INPUT,
455 /* Copy of VLIB buffer; packet data stored in pre_data. */
456 vlib_buffer_t buffer;
457 } dpdk_tx_dma_trace_t;
464 vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
465 } dpdk_rx_dma_trace_t;
467 void vnet_buffer_needs_dpdk_mb (vlib_buffer_t * b);
469 void dpdk_set_next_node (dpdk_rx_next_t, char *);
471 clib_error_t * dpdk_set_mac_address (vnet_hw_interface_t * hi, char * address);
473 clib_error_t * dpdk_set_mc_filter (vnet_hw_interface_t * hi,
474 struct ether_addr mc_addr_vec[], int naddr);
476 typedef void (*dpdk_io_thread_callback_t) (vlib_main_t *vm);
478 void dpdk_io_thread (vlib_worker_thread_t * w,
482 dpdk_io_thread_callback_t callback);
483 void dpdk_thread_input (dpdk_main_t * dm, dpdk_device_t * xd);
485 clib_error_t * dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd);
487 void dpdk_set_flowcontrol_callback (vlib_main_t *vm,
488 dpdk_flowcontrol_callback_t callback);
490 u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance);
492 vlib_frame_queue_elt_t * vlib_get_handoff_queue_elt (u32 vlib_worker_index);
494 u32 dpdk_get_handoff_node_index (void);
496 void set_efd_bitmap (u8 *bitmap, u32 value, u32 op);
498 struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b);
499 struct rte_mbuf * dpdk_zerocopy_replicate_packet_mb (vlib_buffer_t * b);
501 #define foreach_dpdk_error \
502 _(NONE, "no error") \
503 _(RX_PACKET_ERROR, "Rx packet errors") \
504 _(RX_BAD_FCS, "Rx bad fcs") \
505 _(L4_CHECKSUM_ERROR, "Rx L4 checksum errors") \
506 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
507 _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
508 _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
509 _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error") \
510 _(IPV4_EFD_DROP_PKTS, "IPV4 Early Fast Discard rx drops") \
511 _(IPV6_EFD_DROP_PKTS, "IPV6 Early Fast Discard rx drops") \
512 _(MPLS_EFD_DROP_PKTS, "MPLS Early Fast Discard rx drops") \
513 _(VLAN_EFD_DROP_PKTS, "VLAN Early Fast Discard rx drops")
516 #define _(f,s) DPDK_ERROR_##f,
523 * Increment EFD drop counter
526 void increment_efd_drop_counter (vlib_main_t * vm, u32 counter_index, u32 count)
530 my_n = vlib_get_node (vm, dpdk_input_node.index);
531 vm->error_main.counters[my_n->error_heap_index+counter_index] += count;
534 int dpdk_set_stat_poll_interval (f64 interval);
535 int dpdk_set_link_state_poll_interval (f64 interval);
536 void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
537 void dpdk_device_lock_init(dpdk_device_t * xd);
538 void dpdk_device_lock_free(dpdk_device_t * xd);
539 void dpdk_efd_update_counters(dpdk_device_t *xd, u32 n_buffers, u16 enabled);
540 u32 is_efd_discardable(vlib_thread_main_t *tm,
542 struct rte_mbuf *mb);
544 /* dpdk vhost-user interrupt management */
545 u8 dpdk_vhost_user_want_interrupt (dpdk_device_t *xd, int idx);
546 void dpdk_vhost_user_send_interrupt (vlib_main_t * vm, dpdk_device_t * xd,
550 static inline u64 vnet_get_aggregate_rx_packets (void)
552 dpdk_main_t * dm = &dpdk_main;
556 vec_foreach(dw, dm->workers)
557 sum += dw->aggregate_rx_packets;
562 void dpdk_rx_trace (dpdk_main_t * dm,
563 vlib_node_runtime_t * node,
569 #define EFD_OPERATION_LESS_THAN 0
570 #define EFD_OPERATION_GREATER_OR_EQUAL 1
572 void efd_config(u32 enabled,
573 u32 ip_prec, u32 ip_op,
574 u32 mpls_exp, u32 mpls_op,
575 u32 vlan_cos, u32 vlan_op);
577 void post_sw_interface_set_flags (vlib_main_t *vm, u32 sw_if_index, u32 flags);
579 typedef struct vhost_user_memory vhost_user_memory_t;
581 void dpdk_vhost_user_process_init (void **ctx);
582 void dpdk_vhost_user_process_cleanup (void *ctx);
583 uword dpdk_vhost_user_process_if (vlib_main_t *vm, dpdk_device_t *xd, void *ctx);
586 int dpdk_vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
587 const char * sock_filename,
591 u8 renumber, u32 custom_dev_instance,
593 int dpdk_vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
594 const char * sock_filename,
598 u8 renumber, u32 custom_dev_instance);
599 int dpdk_vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
601 int dpdk_vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
602 vhost_user_intf_details_t **out_vuids);
604 u32 dpdk_get_admin_up_down_in_progress (void);
606 u32 dpdk_num_mbufs (void);
608 int dpdk_io_thread_release (void);
610 dpdk_pmd_t dpdk_get_pmd_type (vnet_hw_interface_t *hi);
612 i8 dpdk_get_cpu_socket (vnet_hw_interface_t *hi);
614 void * dpdk_input_multiarch_select();
615 void * dpdk_input_rss_multiarch_select();
616 void * dpdk_input_efd_multiarch_select();
619 dpdk_get_hw_interface_stats (u32 hw_if_index, struct rte_eth_stats* dest);
621 format_function_t format_dpdk_device_name;
622 format_function_t format_dpdk_device;
623 format_function_t format_dpdk_tx_dma_trace;
624 format_function_t format_dpdk_rx_dma_trace;
625 format_function_t format_dpdk_rte_mbuf;
626 format_function_t format_dpdk_rx_rte_mbuf;
627 unformat_function_t unformat_socket_mem;
631 dpdk_pmd_constructor_init()
633 /* Add references to DPDK Driver Constructor functions to get the dynamic
634 * loader to pull in the driver library & run the constructors.
638 void devinitfn_ ##d(void); \
639 __attribute__((unused)) void (* volatile pf)(void); \
640 pf = devinitfn_ ##d; \
643 #ifdef RTE_LIBRTE_EM_PMD
647 #ifdef RTE_LIBRTE_IGB_PMD
651 #ifdef RTE_LIBRTE_IXGBE_PMD
655 #ifdef RTE_LIBRTE_I40E_PMD
660 #ifdef RTE_LIBRTE_FM10K_PMD
664 #ifdef RTE_LIBRTE_VIRTIO_PMD
668 #ifdef RTE_LIBRTE_VMXNET3_PMD
669 _(rte_vmxnet3_driver)
672 #ifdef RTE_LIBRTE_VICE_PMD
676 #ifdef RTE_LIBRTE_ENIC_PMD
680 #ifdef RTE_LIBRTE_PMD_AF_PACKET
684 #ifdef RTE_LIBRTE_CXGBE_PMD
688 #ifdef RTE_LIBRTE_PMD_BOND
695 * At the moment, the ThunderX NIC driver doesn't have
696 * an entry point named "devinitfn_rte_xxx_driver"
701 __attribute__((unused)) void (* volatile pf)(void); \
705 #ifdef RTE_LIBRTE_THUNDERVNIC_PMD
706 _(rte_nicvf_pmd_init)
713 admin_up_down_process (vlib_main_t * vm,
714 vlib_node_runtime_t * rt,
717 #endif /* __included_dpdk_h__ */