2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_dpdk_h__
16 #define __included_dpdk_h__
18 /* $$$$ We should rename always_inline -> clib_always_inline */
21 #include <rte_config.h>
23 #include <rte_common.h>
26 #include <rte_memory.h>
27 #include <rte_memzone.h>
28 #include <rte_tailq.h>
30 #include <rte_per_lcore.h>
31 #include <rte_launch.h>
32 #include <rte_atomic.h>
33 #include <rte_cycles.h>
34 #include <rte_prefetch.h>
35 #include <rte_lcore.h>
36 #include <rte_per_lcore.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_interrupts.h>
40 #include <rte_random.h>
41 #include <rte_debug.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_mempool.h>
47 #include <rte_virtio_net.h>
48 #include <rte_version.h>
49 #include <rte_eth_bond.h>
50 #include <rte_sched.h>
52 #include <vnet/unix/pcap.h>
53 #include <vnet/devices/devices.h>
56 #define always_inline static inline
58 #define always_inline static inline __attribute__ ((__always_inline__))
61 #include <vlib/pci/pci.h>
63 #define NB_MBUF (16<<10)
65 extern vnet_device_class_t dpdk_device_class;
66 extern vlib_node_registration_t dpdk_input_node;
67 extern vlib_node_registration_t handoff_dispatch_node;
69 #if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0)
70 #define foreach_dpdk_pmd \
71 _ ("net_thunderx", THUNDERX) \
72 _ ("net_e1000_em", E1000EM) \
73 _ ("net_e1000_igb", IGB) \
74 _ ("net_e1000_igb_vf", IGBVF) \
75 _ ("net_ixgbe", IXGBE) \
76 _ ("net_ixgbe_vf", IXGBEVF) \
77 _ ("net_i40e", I40E) \
78 _ ("net_i40e_vf", I40EVF) \
79 _ ("net_virtio", VIRTIO) \
80 _ ("net_enic", ENIC) \
81 _ ("net_vmxnet3", VMXNET3) \
82 _ ("AF_PACKET PMD", AF_PACKET) \
83 _ ("rte_bond_pmd", BOND) \
84 _ ("net_fm10k", FM10K) \
85 _ ("net_cxgbe", CXGBE) \
86 _ ("net_mlx5", MLX5) \
87 _ ("net_dpaa2", DPAA2)
89 #define foreach_dpdk_pmd \
90 _ ("rte_nicvf_pmd", THUNDERX) \
91 _ ("rte_em_pmd", E1000EM) \
92 _ ("rte_igb_pmd", IGB) \
93 _ ("rte_igbvf_pmd", IGBVF) \
94 _ ("rte_ixgbe_pmd", IXGBE) \
95 _ ("rte_ixgbevf_pmd", IXGBEVF) \
96 _ ("rte_i40e_pmd", I40E) \
97 _ ("rte_i40evf_pmd", I40EVF) \
98 _ ("rte_virtio_pmd", VIRTIO) \
99 _ ("rte_enic_pmd", ENIC) \
100 _ ("rte_vmxnet3_pmd", VMXNET3) \
101 _ ("AF_PACKET PMD", AF_PACKET) \
102 _ ("rte_bond_pmd", BOND) \
103 _ ("rte_pmd_fm10k", FM10K) \
104 _ ("rte_cxgbe_pmd", CXGBE) \
105 _ ("rte_dpaa2_dpni", DPAA2)
111 #define _(s,f) VNET_DPDK_PMD_##f,
114 VNET_DPDK_PMD_UNKNOWN, /* must be last */
119 VNET_DPDK_PORT_TYPE_ETH_1G,
120 VNET_DPDK_PORT_TYPE_ETH_10G,
121 VNET_DPDK_PORT_TYPE_ETH_40G,
122 VNET_DPDK_PORT_TYPE_ETH_100G,
123 VNET_DPDK_PORT_TYPE_ETH_BOND,
124 VNET_DPDK_PORT_TYPE_ETH_SWITCH,
125 VNET_DPDK_PORT_TYPE_AF_PACKET,
126 VNET_DPDK_PORT_TYPE_UNKNOWN,
130 * The header for the tx_vector in dpdk_device_t.
131 * Head and tail are indexes into the tx_vector and are of type
132 * u64 so they never overflow.
142 struct rte_ring *swq;
144 u64 hqos_field0_slabmask;
145 u32 hqos_field0_slabpos;
146 u32 hqos_field0_slabshr;
147 u64 hqos_field1_slabmask;
148 u32 hqos_field1_slabpos;
149 u32 hqos_field1_slabshr;
150 u64 hqos_field2_slabmask;
151 u32 hqos_field2_slabpos;
152 u32 hqos_field2_slabshr;
153 u32 hqos_tc_table[64];
154 } dpdk_device_hqos_per_worker_thread_t;
158 struct rte_ring **swq;
159 struct rte_mbuf **pkts_enq;
160 struct rte_mbuf **pkts_deq;
161 struct rte_sched_port *hqos;
167 } dpdk_device_hqos_per_hqos_thread_t;
171 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
172 volatile u32 **lockp;
177 u32 vlib_hw_if_index;
178 u32 vlib_sw_if_index;
180 /* next node index if we decide to steal the rx graph arc */
181 u32 per_interface_next_index;
183 /* dpdk rte_mbuf rx and tx vectors, VLIB_FRAME_SIZE */
184 struct rte_mbuf ***tx_vectors; /* one per worker thread */
185 struct rte_mbuf ***rx_vectors;
187 /* vector of traced contexts, per device */
188 u32 **d_trace_buffers;
194 #define DPDK_DEVICE_FLAG_ADMIN_UP (1 << 0)
195 #define DPDK_DEVICE_FLAG_PROMISC (1 << 1)
196 #define DPDK_DEVICE_FLAG_PMD (1 << 2)
197 #define DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE (1 << 3)
198 #define DPDK_DEVICE_FLAG_MAYBE_MULTISEG (1 << 4)
199 #define DPDK_DEVICE_FLAG_HAVE_SUBIF (1 << 5)
200 #define DPDK_DEVICE_FLAG_HQOS (1 << 6)
203 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
205 u8 *interface_name_suffix;
207 /* number of sub-interfaces */
214 u16 *cpu_socket_id_by_queue;
215 struct rte_eth_conf port_conf;
216 struct rte_eth_txconf tx_conf;
219 dpdk_device_hqos_per_worker_thread_t *hqos_wt;
220 dpdk_device_hqos_per_hqos_thread_t *hqos_ht;
223 u8 af_packet_port_id;
225 struct rte_eth_link link;
226 f64 time_last_link_update;
228 struct rte_eth_stats stats;
229 struct rte_eth_stats last_stats;
230 struct rte_eth_stats last_cleared_stats;
231 struct rte_eth_xstat *xstats;
232 struct rte_eth_xstat *last_cleared_xstats;
233 f64 time_last_stats_update;
234 dpdk_port_type_t port_type;
237 u8 *default_mac_address;
240 #define DPDK_STATS_POLL_INTERVAL (10.0)
241 #define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */
243 #define DPDK_LINK_POLL_INTERVAL (3.0)
244 #define DPDK_MIN_LINK_POLL_INTERVAL (0.001) /* 1msec */
248 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
250 /* total input packet counter */
251 u64 aggregate_rx_packets;
256 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
258 /* total input packet counter */
259 u64 aggregate_rx_packets;
260 } dpdk_hqos_thread_t;
266 } dpdk_device_and_queue_t;
268 #ifndef DPDK_HQOS_DBG_BYPASS
269 #define DPDK_HQOS_DBG_BYPASS 0
272 #ifndef HQOS_FLUSH_COUNT_THRESHOLD
273 #define HQOS_FLUSH_COUNT_THRESHOLD 100000
276 typedef struct dpdk_device_config_hqos_t
279 u32 hqos_thread_valid;
285 u32 pktfield0_slabpos;
286 u32 pktfield1_slabpos;
287 u32 pktfield2_slabpos;
288 u64 pktfield0_slabmask;
289 u64 pktfield1_slabmask;
290 u64 pktfield2_slabmask;
293 struct rte_sched_port_params port;
294 struct rte_sched_subport_params *subport;
295 struct rte_sched_pipe_params *pipe;
297 } dpdk_device_config_hqos_t;
299 int dpdk_hqos_validate_mask (u64 mask, u32 n);
300 void dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
301 hqos, u32 pipe_profile_id);
302 void dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos);
303 clib_error_t *dpdk_port_setup_hqos (dpdk_device_t * xd,
304 dpdk_device_config_hqos_t * hqos);
305 void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
306 struct rte_mbuf **pkts, u32 n_pkts);
308 #define foreach_dpdk_device_config_item \
317 vlib_pci_addr_t pci_addr;
319 u8 vlan_strip_offload;
320 #define DPDK_DEVICE_VLAN_STRIP_DEFAULT 0
321 #define DPDK_DEVICE_VLAN_STRIP_OFF 1
322 #define DPDK_DEVICE_VLAN_STRIP_ON 2
324 #define _(x) uword x;
325 foreach_dpdk_device_config_item
327 clib_bitmap_t * workers;
329 dpdk_device_config_hqos_t hqos;
330 } dpdk_device_config_t;
337 u8 *eal_init_args_str;
340 u8 enable_tcp_udp_checksum;
342 /* Required config parameters */
343 u8 coremask_set_manually;
344 u8 nchannels_set_manually;
348 u8 num_kni; /* while kni_init allows u32, port_id in callback fn is only u8 */
351 * format interface names ala xxxEthernet%d/%d/%d instead of
352 * xxxEthernet%x/%x/%x.
354 u8 interface_name_format_decimal;
356 /* per-device config */
357 dpdk_device_config_t default_devconf;
358 dpdk_device_config_t *dev_confs;
359 uword *device_config_index_by_pci_addr;
361 } dpdk_config_main_t;
363 dpdk_config_main_t dpdk_config_main;
369 dpdk_device_t *devices;
370 dpdk_device_and_queue_t **devices_by_cpu;
371 dpdk_device_and_queue_t **devices_by_hqos_cpu;
373 /* per-thread recycle lists */
376 /* buffer flags template, configurable to enable/disable tcp / udp cksum */
377 u32 buffer_flags_template;
379 /* vlib buffer free list, must be same size as an rte_mbuf */
380 u32 vlib_buffer_free_list_index;
382 /* dpdk worker "threads" */
383 dpdk_worker_t *workers;
385 /* dpdk HQoS "threads" */
386 dpdk_hqos_thread_t *hqos_threads;
388 /* Ethernet input node index */
389 u32 ethernet_input_node_index;
391 /* pcap tracing [only works if (CLIB_DEBUG > 0)] */
393 pcap_main_t pcap_main;
395 u32 pcap_sw_if_index;
396 u32 pcap_pkts_to_capture;
399 uword *dpdk_device_by_kni_port_id;
400 uword *vu_sw_if_index_by_listener_fd;
401 uword *vu_sw_if_index_by_sock_fd;
402 u32 *vu_inactive_interfaces_device_index;
405 * flag indicating that a posted admin up/down
406 * (via post_sw_interface_set_flags) is in progress
408 u8 admin_up_down_in_progress;
412 /* which cpus are running dpdk-input */
413 int input_cpu_first_index;
416 /* which cpus are running I/O TX */
417 int hqos_cpu_first_index;
420 /* control interval of dpdk link state and stat polling */
421 f64 link_state_poll_interval;
422 f64 stat_poll_interval;
424 /* Sleep for this many MS after each device poll */
428 vlib_main_t *vlib_main;
429 vnet_main_t *vnet_main;
430 dpdk_config_main_t *conf;
433 struct rte_mempool **pktmbuf_pools;
436 dpdk_main_t dpdk_main;
444 /* Copy of VLIB buffer; packet data stored in pre_data. */
445 vlib_buffer_t buffer;
446 } dpdk_tx_dma_trace_t;
454 vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
455 u8 data[256]; /* First 256 data bytes, used for hexdump */
456 } dpdk_rx_dma_trace_t;
458 void vnet_buffer_needs_dpdk_mb (vlib_buffer_t * b);
460 clib_error_t *dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address);
462 clib_error_t *dpdk_set_mc_filter (vnet_hw_interface_t * hi,
463 struct ether_addr mc_addr_vec[], int naddr);
465 void dpdk_thread_input (dpdk_main_t * dm, dpdk_device_t * xd);
467 clib_error_t *dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd);
469 u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance);
471 struct rte_mbuf *dpdk_replicate_packet_mb (vlib_buffer_t * b);
472 struct rte_mbuf *dpdk_zerocopy_replicate_packet_mb (vlib_buffer_t * b);
474 #define foreach_dpdk_error \
475 _(NONE, "no error") \
476 _(RX_PACKET_ERROR, "Rx packet errors") \
477 _(RX_BAD_FCS, "Rx bad fcs") \
478 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
479 _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
480 _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
481 _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error")
485 #define _(f,s) DPDK_ERROR_##f,
491 int dpdk_set_stat_poll_interval (f64 interval);
492 int dpdk_set_link_state_poll_interval (f64 interval);
493 void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
494 void dpdk_device_lock_init (dpdk_device_t * xd);
495 void dpdk_device_lock_free (dpdk_device_t * xd);
498 vnet_get_aggregate_rx_packets (void)
500 dpdk_main_t *dm = &dpdk_main;
504 vec_foreach (dw, dm->workers) sum += dw->aggregate_rx_packets;
509 void dpdk_rx_trace (dpdk_main_t * dm,
510 vlib_node_runtime_t * node,
512 u16 queue_id, u32 * buffers, uword n_buffers);
514 #define EFD_OPERATION_LESS_THAN 0
515 #define EFD_OPERATION_GREATER_OR_EQUAL 1
517 format_function_t format_dpdk_device_name;
518 format_function_t format_dpdk_device;
519 format_function_t format_dpdk_tx_dma_trace;
520 format_function_t format_dpdk_rx_dma_trace;
521 format_function_t format_dpdk_rte_mbuf;
522 format_function_t format_dpdk_rx_rte_mbuf;
523 unformat_function_t unformat_socket_mem;
524 clib_error_t *unformat_rss_fn (unformat_input_t * input, uword * rss_fn);
525 clib_error_t *unformat_hqos (unformat_input_t * input,
526 dpdk_device_config_hqos_t * hqos);
529 admin_up_down_process (vlib_main_t * vm,
530 vlib_node_runtime_t * rt, vlib_frame_t * f);
532 #endif /* __included_dpdk_h__ */
535 * fd.io coding-style-patch-verification: ON
538 * eval: (c-set-style "gnu")