2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_dpdk_h__
16 #define __included_dpdk_h__
18 /* $$$$ We should rename always_inline -> clib_always_inline */
21 #include <rte_config.h>
23 #include <rte_common.h>
25 #include <rte_memory.h>
27 #include <rte_per_lcore.h>
28 #include <rte_cycles.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_interrupts.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
36 #include <rte_mempool.h>
38 #include <rte_version.h>
39 #include <rte_eth_bond.h>
40 #include <rte_sched.h>
42 #include <vnet/unix/pcap.h>
43 #include <vnet/devices/devices.h>
46 #define always_inline static inline
48 #define always_inline static inline __attribute__ ((__always_inline__))
51 #include <vlib/pci/pci.h>
53 #define NB_MBUF (16<<10)
55 extern vnet_device_class_t dpdk_device_class;
56 extern vlib_node_registration_t dpdk_input_node;
58 #define foreach_dpdk_pmd \
59 _ ("net_thunderx", THUNDERX) \
60 _ ("net_e1000_em", E1000EM) \
61 _ ("net_e1000_igb", IGB) \
62 _ ("net_e1000_igb_vf", IGBVF) \
63 _ ("net_ixgbe", IXGBE) \
64 _ ("net_ixgbe_vf", IXGBEVF) \
65 _ ("net_i40e", I40E) \
66 _ ("net_i40e_vf", I40EVF) \
67 _ ("net_virtio", VIRTIO) \
68 _ ("net_enic", ENIC) \
69 _ ("net_vmxnet3", VMXNET3) \
70 _ ("AF_PACKET PMD", AF_PACKET) \
71 _ ("net_bonding", BOND) \
72 _ ("net_fm10k", FM10K) \
73 _ ("net_cxgbe", CXGBE) \
74 _ ("net_mlx4", MLX4) \
75 _ ("net_mlx5", MLX5) \
76 _ ("net_dpaa2", DPAA2)
81 #define _(s,f) VNET_DPDK_PMD_##f,
84 VNET_DPDK_PMD_UNKNOWN, /* must be last */
89 VNET_DPDK_PORT_TYPE_ETH_1G,
90 VNET_DPDK_PORT_TYPE_ETH_10G,
91 VNET_DPDK_PORT_TYPE_ETH_25G,
92 VNET_DPDK_PORT_TYPE_ETH_40G,
93 VNET_DPDK_PORT_TYPE_ETH_100G,
94 VNET_DPDK_PORT_TYPE_ETH_BOND,
95 VNET_DPDK_PORT_TYPE_ETH_SWITCH,
96 VNET_DPDK_PORT_TYPE_AF_PACKET,
97 VNET_DPDK_PORT_TYPE_ETH_VF,
98 VNET_DPDK_PORT_TYPE_UNKNOWN,
102 * The header for the tx_vector in dpdk_device_t.
103 * Head and tail are indexes into the tx_vector and are of type
104 * u64 so they never overflow.
114 struct rte_ring *swq;
116 u64 hqos_field0_slabmask;
117 u32 hqos_field0_slabpos;
118 u32 hqos_field0_slabshr;
119 u64 hqos_field1_slabmask;
120 u32 hqos_field1_slabpos;
121 u32 hqos_field1_slabshr;
122 u64 hqos_field2_slabmask;
123 u32 hqos_field2_slabpos;
124 u32 hqos_field2_slabshr;
125 u32 hqos_tc_table[64];
126 } dpdk_device_hqos_per_worker_thread_t;
130 struct rte_ring **swq;
131 struct rte_mbuf **pkts_enq;
132 struct rte_mbuf **pkts_deq;
133 struct rte_sched_port *hqos;
139 } dpdk_device_hqos_per_hqos_thread_t;
143 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
144 volatile u32 **lockp;
150 u32 vlib_sw_if_index;
152 /* next node index if we decide to steal the rx graph arc */
153 u32 per_interface_next_index;
155 /* dpdk rte_mbuf rx and tx vectors, VLIB_FRAME_SIZE */
156 struct rte_mbuf ***tx_vectors; /* one per worker thread */
157 struct rte_mbuf ***rx_vectors;
159 /* vector of traced contexts, per device */
160 u32 **d_trace_buffers;
166 #define DPDK_DEVICE_FLAG_ADMIN_UP (1 << 0)
167 #define DPDK_DEVICE_FLAG_PROMISC (1 << 1)
168 #define DPDK_DEVICE_FLAG_PMD (1 << 2)
169 #define DPDK_DEVICE_FLAG_PMD_INIT_FAIL (1 << 3)
170 #define DPDK_DEVICE_FLAG_MAYBE_MULTISEG (1 << 4)
171 #define DPDK_DEVICE_FLAG_HAVE_SUBIF (1 << 5)
172 #define DPDK_DEVICE_FLAG_HQOS (1 << 6)
175 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
177 u8 *interface_name_suffix;
179 /* number of sub-interfaces */
186 u16 *cpu_socket_id_by_queue;
187 struct rte_eth_conf port_conf;
188 struct rte_eth_txconf tx_conf;
191 dpdk_device_hqos_per_worker_thread_t *hqos_wt;
192 dpdk_device_hqos_per_hqos_thread_t *hqos_ht;
194 /* af_packet or BondEthernet instance number */
197 struct rte_eth_link link;
198 f64 time_last_link_update;
200 struct rte_eth_stats stats;
201 struct rte_eth_stats last_stats;
202 struct rte_eth_stats last_cleared_stats;
203 struct rte_eth_xstat *xstats;
204 struct rte_eth_xstat *last_cleared_xstats;
205 f64 time_last_stats_update;
206 dpdk_port_type_t port_type;
209 u8 *default_mac_address;
212 clib_error_t *errors;
215 #define DPDK_STATS_POLL_INTERVAL (10.0)
216 #define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */
218 #define DPDK_LINK_POLL_INTERVAL (3.0)
219 #define DPDK_MIN_LINK_POLL_INTERVAL (0.001) /* 1msec */
225 } dpdk_device_and_queue_t;
227 #ifndef DPDK_HQOS_DBG_BYPASS
228 #define DPDK_HQOS_DBG_BYPASS 0
231 #ifndef HQOS_FLUSH_COUNT_THRESHOLD
232 #define HQOS_FLUSH_COUNT_THRESHOLD 100000
235 typedef struct dpdk_device_config_hqos_t
238 u32 hqos_thread_valid;
244 u32 pktfield0_slabpos;
245 u32 pktfield1_slabpos;
246 u32 pktfield2_slabpos;
247 u64 pktfield0_slabmask;
248 u64 pktfield1_slabmask;
249 u64 pktfield2_slabmask;
252 struct rte_sched_port_params port;
253 struct rte_sched_subport_params *subport;
254 struct rte_sched_pipe_params *pipe;
256 } dpdk_device_config_hqos_t;
258 int dpdk_hqos_validate_mask (u64 mask, u32 n);
259 void dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
260 hqos, u32 pipe_profile_id);
261 void dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos);
262 clib_error_t *dpdk_port_setup_hqos (dpdk_device_t * xd,
263 dpdk_device_config_hqos_t * hqos);
264 void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
265 struct rte_mbuf **pkts, u32 n_pkts);
267 #define foreach_dpdk_device_config_item \
276 vlib_pci_addr_t pci_addr;
278 u8 vlan_strip_offload;
279 #define DPDK_DEVICE_VLAN_STRIP_DEFAULT 0
280 #define DPDK_DEVICE_VLAN_STRIP_OFF 1
281 #define DPDK_DEVICE_VLAN_STRIP_ON 2
283 #define _(x) uword x;
284 foreach_dpdk_device_config_item
286 clib_bitmap_t * workers;
288 dpdk_device_config_hqos_t hqos;
289 } dpdk_device_config_t;
296 u8 *eal_init_args_str;
299 u8 enable_tcp_udp_checksum;
301 /* Required config parameters */
302 u8 coremask_set_manually;
303 u8 nchannels_set_manually;
309 * format interface names ala xxxEthernet%d/%d/%d instead of
310 * xxxEthernet%x/%x/%x.
312 u8 interface_name_format_decimal;
314 /* per-device config */
315 dpdk_device_config_t default_devconf;
316 dpdk_device_config_t *dev_confs;
317 uword *device_config_index_by_pci_addr;
319 } dpdk_config_main_t;
321 dpdk_config_main_t dpdk_config_main;
327 dpdk_device_t *devices;
328 dpdk_device_and_queue_t **devices_by_hqos_cpu;
330 /* per-thread recycle lists */
333 /* per-thread buffer templates */
334 vlib_buffer_t *buffer_templates;
336 /* buffer flags template, configurable to enable/disable tcp / udp cksum */
337 u32 buffer_flags_template;
339 /* vlib buffer free list, must be same size as an rte_mbuf */
340 u32 vlib_buffer_free_list_index;
342 /* Ethernet input node index */
343 u32 ethernet_input_node_index;
345 /* pcap tracing [only works if (CLIB_DEBUG > 0)] */
347 pcap_main_t pcap_main;
349 u32 pcap_sw_if_index;
350 u32 pcap_pkts_to_capture;
353 * flag indicating that a posted admin up/down
354 * (via post_sw_interface_set_flags) is in progress
356 u8 admin_up_down_in_progress;
360 /* which cpus are running I/O TX */
361 int hqos_cpu_first_index;
364 /* control interval of dpdk link state and stat polling */
365 f64 link_state_poll_interval;
366 f64 stat_poll_interval;
368 /* Sleep for this many usec after each device poll */
372 vlib_main_t *vlib_main;
373 vnet_main_t *vnet_main;
374 dpdk_config_main_t *conf;
377 struct rte_mempool **pktmbuf_pools;
379 /* API message ID base */
383 extern dpdk_main_t dpdk_main;
391 /* Copy of VLIB buffer; packet data stored in pre_data. */
392 vlib_buffer_t buffer;
393 } dpdk_tx_dma_trace_t;
401 vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
402 u8 data[256]; /* First 256 data bytes, used for hexdump */
403 } dpdk_rx_dma_trace_t;
405 void dpdk_device_setup (dpdk_device_t * xd);
406 void dpdk_device_start (dpdk_device_t * xd);
407 void dpdk_device_stop (dpdk_device_t * xd);
409 #define foreach_dpdk_error \
410 _(NONE, "no error") \
411 _(RX_PACKET_ERROR, "Rx packet errors") \
412 _(RX_BAD_FCS, "Rx bad fcs") \
413 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
414 _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
415 _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
416 _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error")
420 #define _(f,s) DPDK_ERROR_##f,
426 void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
428 format_function_t format_dpdk_device_name;
429 format_function_t format_dpdk_device;
430 format_function_t format_dpdk_device_errors;
431 format_function_t format_dpdk_tx_dma_trace;
432 format_function_t format_dpdk_rx_dma_trace;
433 format_function_t format_dpdk_rte_mbuf;
434 format_function_t format_dpdk_rx_rte_mbuf;
435 unformat_function_t unformat_dpdk_log_level;
436 clib_error_t *unformat_rss_fn (unformat_input_t * input, uword * rss_fn);
437 clib_error_t *unformat_hqos (unformat_input_t * input,
438 dpdk_device_config_hqos_t * hqos);
441 admin_up_down_process (vlib_main_t * vm,
442 vlib_node_runtime_t * rt, vlib_frame_t * f);
444 #endif /* __included_dpdk_h__ */
447 * fd.io coding-style-patch-verification: ON
450 * eval: (c-set-style "gnu")