2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_dpdk_h__
16 #define __included_dpdk_h__
18 /* $$$$ We should rename always_inline -> clib_always_inline */
21 #include <rte_config.h>
23 #include <rte_common.h>
26 #include <rte_memory.h>
27 #include <rte_memzone.h>
28 #include <rte_tailq.h>
30 #include <rte_per_lcore.h>
31 #include <rte_launch.h>
32 #include <rte_atomic.h>
33 #include <rte_cycles.h>
34 #include <rte_prefetch.h>
35 #include <rte_lcore.h>
36 #include <rte_per_lcore.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_interrupts.h>
40 #include <rte_random.h>
41 #include <rte_debug.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_mempool.h>
50 #include <rte_virtio_net.h>
51 #include <rte_version.h>
52 #include <rte_eth_bond.h>
53 #include <rte_sched.h>
55 #include <vnet/unix/pcap.h>
56 #include <vnet/devices/virtio/vhost-user.h>
59 #define always_inline static inline
61 #define always_inline static inline __attribute__ ((__always_inline__))
64 #if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 0)
65 #define DPDK_VHOST_USER 1
67 #define DPDK_VHOST_USER 0
70 #include <vlib/pci/pci.h>
72 #define NB_MBUF (16<<10)
74 extern vnet_device_class_t dpdk_device_class;
75 extern vlib_node_registration_t dpdk_input_node;
76 extern vlib_node_registration_t handoff_dispatch_node;
78 #define foreach_dpdk_pmd \
79 _ ("rte_nicvf_pmd", THUNDERX) \
80 _ ("rte_em_pmd", E1000EM) \
81 _ ("rte_igb_pmd", IGB) \
82 _ ("rte_igbvf_pmd", IGBVF) \
83 _ ("rte_ixgbe_pmd", IXGBE) \
84 _ ("rte_ixgbevf_pmd", IXGBEVF) \
85 _ ("rte_i40e_pmd", I40E) \
86 _ ("rte_i40evf_pmd", I40EVF) \
87 _ ("rte_virtio_pmd", VIRTIO) \
88 _ ("rte_enic_pmd", ENIC) \
89 _ ("rte_vmxnet3_pmd", VMXNET3) \
90 _ ("AF_PACKET PMD", AF_PACKET) \
91 _ ("rte_bond_pmd", BOND) \
92 _ ("rte_pmd_fm10k", FM10K) \
93 _ ("rte_cxgbe_pmd", CXGBE) \
94 _ ("rte_dpaa2_dpni", DPAA2)
99 #define _(s,f) VNET_DPDK_PMD_##f,
102 VNET_DPDK_PMD_UNKNOWN, /* must be last */
107 VNET_DPDK_PORT_TYPE_ETH_1G,
108 VNET_DPDK_PORT_TYPE_ETH_10G,
109 VNET_DPDK_PORT_TYPE_ETH_40G,
110 VNET_DPDK_PORT_TYPE_ETH_BOND,
111 VNET_DPDK_PORT_TYPE_ETH_SWITCH,
112 VNET_DPDK_PORT_TYPE_AF_PACKET,
113 VNET_DPDK_PORT_TYPE_UNKNOWN,
122 #define DPDK_EFD_MAX_DISCARD_RATE 10
129 u32 consec_full_frames_cnt;
134 u32 total_packet_cnt;
145 u32 n_since_last_int;
157 char sock_filename[256];
164 dpdk_vu_vring vrings[VHOST_MAX_QUEUE_PAIRS * 2];
165 u64 region_addr[VHOST_MEMORY_MAX_NREGIONS];
166 u32 region_fd[VHOST_MEMORY_MAX_NREGIONS];
167 u64 region_offset[VHOST_MEMORY_MAX_NREGIONS];
171 typedef void (*dpdk_flowcontrol_callback_t) (vlib_main_t * vm,
172 u32 hw_if_index, u32 n_packets);
175 * The header for the tx_vector in dpdk_device_t.
176 * Head and tail are indexes into the tx_vector and are of type
177 * u64 so they never overflow.
187 struct rte_ring *swq;
189 u64 hqos_field0_slabmask;
190 u32 hqos_field0_slabpos;
191 u32 hqos_field0_slabshr;
192 u64 hqos_field1_slabmask;
193 u32 hqos_field1_slabpos;
194 u32 hqos_field1_slabshr;
195 u64 hqos_field2_slabmask;
196 u32 hqos_field2_slabpos;
197 u32 hqos_field2_slabshr;
198 u32 hqos_tc_table[64];
199 } dpdk_device_hqos_per_worker_thread_t;
203 struct rte_ring **swq;
204 struct rte_mbuf **pkts_enq;
205 struct rte_mbuf **pkts_deq;
206 struct rte_sched_port *hqos;
211 } dpdk_device_hqos_per_hqos_thread_t;
215 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
216 volatile u32 **lockp;
221 u32 vlib_hw_if_index;
222 u32 vlib_sw_if_index;
224 /* next node index if we decide to steal the rx graph arc */
225 u32 per_interface_next_index;
227 /* dpdk rte_mbuf rx and tx vectors, VLIB_FRAME_SIZE */
228 struct rte_mbuf ***tx_vectors; /* one per worker thread */
229 struct rte_mbuf ***rx_vectors;
231 /* vector of traced contexts, per device */
232 u32 *d_trace_buffers;
238 #define DPDK_DEVICE_FLAG_ADMIN_UP (1 << 0)
239 #define DPDK_DEVICE_FLAG_PROMISC (1 << 1)
240 #define DPDK_DEVICE_FLAG_PMD (1 << 2)
241 #define DPDK_DEVICE_FLAG_KNI (1 << 3)
242 #define DPDK_DEVICE_FLAG_VHOST_USER (1 << 4)
243 #define DPDK_DEVICE_FLAG_HAVE_SUBIF (1 << 5)
244 #define DPDK_DEVICE_FLAG_HQOS (1 << 6)
247 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
249 u8 *interface_name_suffix;
251 /* number of sub-interfaces */
258 u16 *cpu_socket_id_by_queue;
259 struct rte_eth_conf port_conf;
260 struct rte_eth_txconf tx_conf;
263 dpdk_device_hqos_per_worker_thread_t *hqos_wt;
264 dpdk_device_hqos_per_hqos_thread_t *hqos_ht;
271 /* vhost-user related */
273 struct virtio_net vu_vhost_dev;
275 dpdk_vu_intf_t *vu_intf;
279 u8 af_packet_port_id;
281 struct rte_eth_link link;
282 f64 time_last_link_update;
284 struct rte_eth_stats stats;
285 struct rte_eth_stats last_stats;
286 struct rte_eth_stats last_cleared_stats;
287 #if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0)
288 struct rte_eth_xstat *xstats;
289 struct rte_eth_xstat *last_cleared_xstats;
291 struct rte_eth_xstats *xstats;
292 struct rte_eth_xstats *last_cleared_xstats;
294 f64 time_last_stats_update;
295 dpdk_port_type_t port_type;
297 dpdk_efd_agent_t efd_agent;
298 u8 need_txlock; /* Used by VNET_DPDK_DEV_VHOST_USER */
301 #define DPDK_STATS_POLL_INTERVAL (10.0)
302 #define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */
304 #define DPDK_LINK_POLL_INTERVAL (3.0)
305 #define DPDK_MIN_LINK_POLL_INTERVAL (0.001) /* 1msec */
309 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
311 /* total input packet counter */
312 u64 aggregate_rx_packets;
317 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
319 /* total input packet counter */
320 u64 aggregate_rx_packets;
321 } dpdk_hqos_thread_t;
327 } dpdk_device_and_queue_t;
329 /* Early-Fast-Discard (EFD) */
330 #define DPDK_EFD_DISABLED 0
331 #define DPDK_EFD_DISCARD_ENABLED (1 << 0)
332 #define DPDK_EFD_MONITOR_ENABLED (1 << 1)
333 #define DPDK_EFD_DROPALL_ENABLED (1 << 2)
335 #define DPDK_EFD_DEFAULT_DEVICE_QUEUE_HI_THRESH_PCT 90
336 #define DPDK_EFD_DEFAULT_CONSEC_FULL_FRAMES_HI_THRESH 6
338 typedef struct dpdk_efd_t
342 u16 consec_full_frames_hi_thresh;
346 #ifndef DPDK_HQOS_DBG_BYPASS
347 #define DPDK_HQOS_DBG_BYPASS 0
350 typedef struct dpdk_device_config_hqos_t
353 u32 hqos_thread_valid;
359 u32 pktfield0_slabpos;
360 u32 pktfield1_slabpos;
361 u32 pktfield2_slabpos;
362 u64 pktfield0_slabmask;
363 u64 pktfield1_slabmask;
364 u64 pktfield2_slabmask;
367 struct rte_sched_port_params port;
368 struct rte_sched_subport_params *subport;
369 struct rte_sched_pipe_params *pipe;
371 } dpdk_device_config_hqos_t;
373 int dpdk_hqos_validate_mask (u64 mask, u32 n);
374 void dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
375 hqos, u32 pipe_profile_id);
376 void dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos);
377 clib_error_t *dpdk_port_setup_hqos (dpdk_device_t * xd,
378 dpdk_device_config_hqos_t * hqos);
379 void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
380 struct rte_mbuf **pkts, u32 n_pkts);
382 #define foreach_dpdk_device_config_item \
391 vlib_pci_addr_t pci_addr;
393 u8 vlan_strip_offload;
394 #define DPDK_DEVICE_VLAN_STRIP_DEFAULT 0
395 #define DPDK_DEVICE_VLAN_STRIP_OFF 1
396 #define DPDK_DEVICE_VLAN_STRIP_ON 2
398 #define _(x) uword x;
399 foreach_dpdk_device_config_item
401 clib_bitmap_t * workers;
403 dpdk_device_config_hqos_t hqos;
404 } dpdk_device_config_t;
411 u8 *eal_init_args_str;
414 u8 enable_tcp_udp_checksum;
416 /* Required config parameters */
417 u8 coremask_set_manually;
418 u8 nchannels_set_manually;
422 u8 num_kni; /* while kni_init allows u32, port_id in callback fn is only u8 */
425 * format interface names ala xxxEthernet%d/%d/%d instead of
426 * xxxEthernet%x/%x/%x.
428 u8 interface_name_format_decimal;
430 /* virtio vhost-user switch */
433 /* vhost-user coalescence frames config */
434 u32 vhost_coalesce_frames;
435 f64 vhost_coalesce_time;
437 /* per-device config */
438 dpdk_device_config_t default_devconf;
439 dpdk_device_config_t *dev_confs;
440 uword *device_config_index_by_pci_addr;
442 } dpdk_config_main_t;
444 dpdk_config_main_t dpdk_config_main;
450 dpdk_device_t *devices;
451 dpdk_device_and_queue_t **devices_by_cpu;
452 dpdk_device_and_queue_t **devices_by_hqos_cpu;
454 /* per-thread recycle lists */
457 /* buffer flags template, configurable to enable/disable tcp / udp cksum */
458 u32 buffer_flags_template;
460 /* flow control callback. If 0 then flow control is disabled */
461 dpdk_flowcontrol_callback_t flowcontrol_callback;
463 /* vlib buffer free list, must be same size as an rte_mbuf */
464 u32 vlib_buffer_free_list_index;
466 /* dpdk worker "threads" */
467 dpdk_worker_t *workers;
469 /* dpdk HQoS "threads" */
470 dpdk_hqos_thread_t *hqos_threads;
472 /* Ethernet input node index */
473 u32 ethernet_input_node_index;
475 /* pcap tracing [only works if (CLIB_DEBUG > 0)] */
477 pcap_main_t pcap_main;
479 u32 pcap_sw_if_index;
480 u32 pcap_pkts_to_capture;
483 uword *dpdk_device_by_kni_port_id;
484 uword *vu_sw_if_index_by_listener_fd;
485 uword *vu_sw_if_index_by_sock_fd;
486 u32 *vu_inactive_interfaces_device_index;
490 /* efd (early-fast-discard) settings */
494 * flag indicating that a posted admin up/down
495 * (via post_sw_interface_set_flags) is in progress
497 u8 admin_up_down_in_progress;
501 /* which cpus are running dpdk-input */
502 int input_cpu_first_index;
505 /* which cpus are running I/O TX */
506 int hqos_cpu_first_index;
509 /* control interval of dpdk link state and stat polling */
510 f64 link_state_poll_interval;
511 f64 stat_poll_interval;
513 /* Sleep for this many MS after each device poll */
517 vlib_main_t *vlib_main;
518 vnet_main_t *vnet_main;
519 dpdk_config_main_t *conf;
522 dpdk_main_t dpdk_main;
526 DPDK_RX_NEXT_IP4_INPUT,
527 DPDK_RX_NEXT_IP6_INPUT,
528 DPDK_RX_NEXT_MPLS_INPUT,
529 DPDK_RX_NEXT_ETHERNET_INPUT,
540 /* Copy of VLIB buffer; packet data stored in pre_data. */
541 vlib_buffer_t buffer;
542 } dpdk_tx_dma_trace_t;
550 vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
551 u8 data[256]; /* First 256 data bytes, used for hexdump */
552 } dpdk_rx_dma_trace_t;
554 void vnet_buffer_needs_dpdk_mb (vlib_buffer_t * b);
556 void dpdk_set_next_node (dpdk_rx_next_t, char *);
558 clib_error_t *dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address);
560 clib_error_t *dpdk_set_mc_filter (vnet_hw_interface_t * hi,
561 struct ether_addr mc_addr_vec[], int naddr);
563 void dpdk_thread_input (dpdk_main_t * dm, dpdk_device_t * xd);
565 clib_error_t *dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd);
567 void dpdk_set_flowcontrol_callback (vlib_main_t * vm,
568 dpdk_flowcontrol_callback_t callback);
570 u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance);
572 void set_efd_bitmap (u8 * bitmap, u32 value, u32 op);
574 struct rte_mbuf *dpdk_replicate_packet_mb (vlib_buffer_t * b);
575 struct rte_mbuf *dpdk_zerocopy_replicate_packet_mb (vlib_buffer_t * b);
577 #define foreach_dpdk_error \
578 _(NONE, "no error") \
579 _(RX_PACKET_ERROR, "Rx packet errors") \
580 _(RX_BAD_FCS, "Rx bad fcs") \
581 _(L4_CHECKSUM_ERROR, "Rx L4 checksum errors") \
582 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
583 _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
584 _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
585 _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error") \
586 _(IPV4_EFD_DROP_PKTS, "IPV4 Early Fast Discard rx drops") \
587 _(IPV6_EFD_DROP_PKTS, "IPV6 Early Fast Discard rx drops") \
588 _(MPLS_EFD_DROP_PKTS, "MPLS Early Fast Discard rx drops") \
589 _(VLAN_EFD_DROP_PKTS, "VLAN Early Fast Discard rx drops")
593 #define _(f,s) DPDK_ERROR_##f,
600 * Increment EFD drop counter
602 static_always_inline void
603 increment_efd_drop_counter (vlib_main_t * vm, u32 counter_index, u32 count)
607 my_n = vlib_get_node (vm, dpdk_input_node.index);
608 vm->error_main.counters[my_n->error_heap_index + counter_index] += count;
611 int dpdk_set_stat_poll_interval (f64 interval);
612 int dpdk_set_link_state_poll_interval (f64 interval);
613 void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
614 void dpdk_device_lock_init (dpdk_device_t * xd);
615 void dpdk_device_lock_free (dpdk_device_t * xd);
616 void dpdk_efd_update_counters (dpdk_device_t * xd, u32 n_buffers,
618 u32 is_efd_discardable (vlib_thread_main_t * tm, vlib_buffer_t * b0,
619 struct rte_mbuf *mb);
622 /* dpdk vhost-user interrupt management */
623 u8 dpdk_vhost_user_want_interrupt (dpdk_device_t * xd, int idx);
624 void dpdk_vhost_user_send_interrupt (vlib_main_t * vm, dpdk_device_t * xd,
630 vnet_get_aggregate_rx_packets (void)
632 dpdk_main_t *dm = &dpdk_main;
636 vec_foreach (dw, dm->workers) sum += dw->aggregate_rx_packets;
641 void dpdk_rx_trace (dpdk_main_t * dm,
642 vlib_node_runtime_t * node,
644 u16 queue_id, u32 * buffers, uword n_buffers);
646 #define EFD_OPERATION_LESS_THAN 0
647 #define EFD_OPERATION_GREATER_OR_EQUAL 1
649 void efd_config (u32 enabled,
650 u32 ip_prec, u32 ip_op,
651 u32 mpls_exp, u32 mpls_op, u32 vlan_cos, u32 vlan_op);
653 void post_sw_interface_set_flags (vlib_main_t * vm, u32 sw_if_index,
657 typedef struct vhost_user_memory vhost_user_memory_t;
659 void dpdk_vhost_user_process_init (void **ctx);
660 void dpdk_vhost_user_process_cleanup (void *ctx);
661 uword dpdk_vhost_user_process_if (vlib_main_t * vm, dpdk_device_t * xd,
665 int dpdk_vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
666 const char *sock_filename,
670 u8 renumber, u32 custom_dev_instance,
672 int dpdk_vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
673 const char *sock_filename,
677 u8 renumber, u32 custom_dev_instance);
678 int dpdk_vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
680 int dpdk_vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
681 vhost_user_intf_details_t ** out_vuids);
684 u32 dpdk_get_admin_up_down_in_progress (void);
686 u32 dpdk_num_mbufs (void);
688 dpdk_pmd_t dpdk_get_pmd_type (vnet_hw_interface_t * hi);
690 i8 dpdk_get_cpu_socket (vnet_hw_interface_t * hi);
692 void *dpdk_input_multiarch_select ();
693 void *dpdk_input_rss_multiarch_select ();
694 void *dpdk_input_efd_multiarch_select ();
696 clib_error_t *dpdk_get_hw_interface_stats (u32 hw_if_index,
697 struct rte_eth_stats *dest);
699 format_function_t format_dpdk_device_name;
700 format_function_t format_dpdk_device;
701 format_function_t format_dpdk_tx_dma_trace;
702 format_function_t format_dpdk_rx_dma_trace;
703 format_function_t format_dpdk_rte_mbuf;
704 format_function_t format_dpdk_rx_rte_mbuf;
705 unformat_function_t unformat_socket_mem;
706 clib_error_t *unformat_rss_fn (unformat_input_t * input, uword * rss_fn);
707 clib_error_t *unformat_hqos (unformat_input_t * input,
708 dpdk_device_config_hqos_t * hqos);
711 admin_up_down_process (vlib_main_t * vm,
712 vlib_node_runtime_t * rt, vlib_frame_t * f);
714 #endif /* __included_dpdk_h__ */
717 * fd.io coding-style-patch-verification: ON
720 * eval: (c-set-style "gnu")