2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_dpdk_h__
16 #define __included_dpdk_h__
18 /* $$$$ We should rename always_inline -> clib_always_inline */
21 #define ALLOW_EXPERIMENTAL_API
23 #include <rte_config.h>
25 #include <rte_common.h>
27 #include <rte_memory.h>
29 #include <rte_per_lcore.h>
30 #include <rte_cycles.h>
31 #include <rte_lcore.h>
32 #include <rte_per_lcore.h>
33 #include <rte_interrupts.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
40 #include <rte_version.h>
41 #include <rte_eth_bond.h>
42 #include <rte_sched.h>
44 #include <rte_bus_pci.h>
47 #include <vppinfra/pcap.h>
48 #include <vnet/devices/devices.h>
51 #define always_inline static inline
53 #define always_inline static inline __attribute__ ((__always_inline__))
56 #include <vlib/pci/pci.h>
57 #include <vnet/flow/flow.h>
59 extern vnet_device_class_t dpdk_device_class;
60 extern vlib_node_registration_t dpdk_input_node;
61 extern vlib_node_registration_t admin_up_down_process_node;
63 #define foreach_dpdk_pmd \
64 _ ("net_thunderx", THUNDERX) \
65 _ ("net_e1000_em", E1000EM) \
66 _ ("net_e1000_igb", IGB) \
67 _ ("net_e1000_igb_vf", IGBVF) \
68 _ ("net_ixgbe", IXGBE) \
69 _ ("net_ixgbe_vf", IXGBEVF) \
70 _ ("net_i40e", I40E) \
71 _ ("net_i40e_vf", I40EVF) \
72 _ ("net_virtio", VIRTIO) \
73 _ ("net_enic", ENIC) \
74 _ ("net_vmxnet3", VMXNET3) \
75 _ ("AF_PACKET PMD", AF_PACKET) \
76 _ ("net_bonding", BOND) \
77 _ ("net_fm10k", FM10K) \
78 _ ("net_cxgbe", CXGBE) \
79 _ ("net_mlx4", MLX4) \
80 _ ("net_mlx5", MLX5) \
81 _ ("net_dpaa2", DPAA2) \
82 _ ("net_virtio_user", VIRTIO_USER) \
83 _ ("net_vhost", VHOST_ETHER) \
85 _ ("net_failsafe", FAILSAFE) \
86 _ ("net_liovf", LIOVF_ETHER) \
87 _ ("net_qede", QEDE) \
88 _ ("net_netvsc", NETVSC)
93 #define _(s,f) VNET_DPDK_PMD_##f,
96 VNET_DPDK_PMD_UNKNOWN, /* must be last */
101 VNET_DPDK_PORT_TYPE_ETH_1G,
102 VNET_DPDK_PORT_TYPE_ETH_2_5G,
103 VNET_DPDK_PORT_TYPE_ETH_5G,
104 VNET_DPDK_PORT_TYPE_ETH_10G,
105 VNET_DPDK_PORT_TYPE_ETH_20G,
106 VNET_DPDK_PORT_TYPE_ETH_25G,
107 VNET_DPDK_PORT_TYPE_ETH_40G,
108 VNET_DPDK_PORT_TYPE_ETH_50G,
109 VNET_DPDK_PORT_TYPE_ETH_56G,
110 VNET_DPDK_PORT_TYPE_ETH_100G,
111 VNET_DPDK_PORT_TYPE_ETH_BOND,
112 VNET_DPDK_PORT_TYPE_ETH_SWITCH,
113 VNET_DPDK_PORT_TYPE_AF_PACKET,
114 VNET_DPDK_PORT_TYPE_ETH_VF,
115 VNET_DPDK_PORT_TYPE_VIRTIO_USER,
116 VNET_DPDK_PORT_TYPE_VHOST_ETHER,
117 VNET_DPDK_PORT_TYPE_FAILSAFE,
118 VNET_DPDK_PORT_TYPE_NETVSC,
119 VNET_DPDK_PORT_TYPE_UNKNOWN,
122 typedef uint16_t dpdk_portid_t;
126 /* Required for vec_validate_aligned */
127 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
129 struct rte_ring *swq;
131 u64 hqos_field0_slabmask;
132 u32 hqos_field0_slabpos;
133 u32 hqos_field0_slabshr;
134 u64 hqos_field1_slabmask;
135 u32 hqos_field1_slabpos;
136 u32 hqos_field1_slabshr;
137 u64 hqos_field2_slabmask;
138 u32 hqos_field2_slabpos;
139 u32 hqos_field2_slabshr;
140 u32 hqos_tc_table[64];
141 } dpdk_device_hqos_per_worker_thread_t;
145 /* Required for vec_validate_aligned */
146 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
147 struct rte_ring **swq;
148 struct rte_mbuf **pkts_enq;
149 struct rte_mbuf **pkts_deq;
150 struct rte_sched_port *hqos;
156 } dpdk_device_hqos_per_hqos_thread_t;
158 #define foreach_dpdk_device_flags \
159 _( 0, ADMIN_UP, "admin-up") \
160 _( 1, PROMISC, "promisc") \
162 _( 3, PMD_INIT_FAIL, "pmd-init-fail") \
163 _( 4, MAYBE_MULTISEG, "maybe-multiseg") \
164 _( 5, HAVE_SUBIF, "subif") \
165 _( 6, HQOS, "hqos") \
166 _( 7, BOND_SLAVE, "bond-slave") \
167 _( 8, BOND_SLAVE_UP, "bond-slave-up") \
168 _( 9, TX_OFFLOAD, "tx-offload") \
169 _(10, INTEL_PHDR_CKSUM, "intel-phdr-cksum") \
170 _(11, RX_FLOW_OFFLOAD, "rx-flow-offload") \
171 _(12, RX_IP4_CKSUM, "rx-ip4-cksum")
175 #define _(a, b, c) DPDK_DEVICE_FLAG_##b = (1 << a),
176 foreach_dpdk_device_flags
184 struct rte_flow *handle;
192 } dpdk_flow_lookup_entry_t;
196 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
197 volatile u32 **lockp;
199 /* Instance ID to access internal device array. */
200 dpdk_portid_t device_index;
202 /* DPDK device port number */
203 dpdk_portid_t port_id;
208 /* next node index if we decide to steal the rx graph arc */
209 u32 per_interface_next_index;
217 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
220 u8 *interface_name_suffix;
222 /* number of sub-interfaces */
229 u16 *cpu_socket_id_by_queue;
230 u8 *buffer_pool_for_queue;
231 struct rte_eth_conf port_conf;
232 struct rte_eth_txconf tx_conf;
235 u32 supported_flow_actions;
236 dpdk_flow_entry_t *flow_entries; /* pool */
237 dpdk_flow_lookup_entry_t *flow_lookup_entries; /* pool */
238 u32 *parked_lookup_indexes; /* vector */
239 u32 parked_loop_count;
240 struct rte_flow_error last_flow_error;
243 dpdk_device_hqos_per_worker_thread_t *hqos_wt;
244 dpdk_device_hqos_per_hqos_thread_t *hqos_ht;
246 /* af_packet or BondEthernet instance number */
247 u16 af_packet_instance_num;
248 u16 bond_instance_num;
250 /* Bonded interface port# of a slave -
251 only valid if DPDK_DEVICE_FLAG_BOND_SLAVE bit is set */
252 dpdk_portid_t bond_port;
254 struct rte_eth_link link;
255 f64 time_last_link_update;
257 struct rte_eth_stats stats;
258 struct rte_eth_stats last_stats;
259 struct rte_eth_stats last_cleared_stats;
260 struct rte_eth_xstat *xstats;
261 struct rte_eth_xstat *last_cleared_xstats;
262 f64 time_last_stats_update;
263 dpdk_port_type_t port_type;
266 u8 *default_mac_address;
269 clib_error_t *errors;
272 #define DPDK_STATS_POLL_INTERVAL (10.0)
273 #define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */
275 #define DPDK_LINK_POLL_INTERVAL (3.0)
276 #define DPDK_MIN_LINK_POLL_INTERVAL (0.001) /* 1msec */
282 } dpdk_device_and_queue_t;
284 #ifndef DPDK_HQOS_DBG_BYPASS
285 #define DPDK_HQOS_DBG_BYPASS 0
288 #ifndef HQOS_FLUSH_COUNT_THRESHOLD
289 #define HQOS_FLUSH_COUNT_THRESHOLD 100000
292 typedef struct dpdk_device_config_hqos_t
295 u32 hqos_thread_valid;
301 u32 pktfield0_slabpos;
302 u32 pktfield1_slabpos;
303 u32 pktfield2_slabpos;
304 u64 pktfield0_slabmask;
305 u64 pktfield1_slabmask;
306 u64 pktfield2_slabmask;
309 struct rte_sched_port_params port;
310 struct rte_sched_subport_params *subport;
311 struct rte_sched_pipe_params *pipe;
313 } dpdk_device_config_hqos_t;
315 int dpdk_hqos_validate_mask (u64 mask, u32 n);
316 void dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
317 hqos, u32 pipe_profile_id);
319 void dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos);
321 clib_error_t *dpdk_port_setup_hqos (dpdk_device_t * xd,
322 dpdk_device_config_hqos_t * hqos);
323 void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
324 struct rte_mbuf **pkts, u32 n_pkts);
326 #define foreach_dpdk_device_config_item \
335 vlib_pci_addr_t pci_addr;
338 u8 vlan_strip_offload;
339 #define DPDK_DEVICE_VLAN_STRIP_DEFAULT 0
340 #define DPDK_DEVICE_VLAN_STRIP_OFF 1
341 #define DPDK_DEVICE_VLAN_STRIP_ON 2
343 #define _(x) uword x;
344 foreach_dpdk_device_config_item
346 clib_bitmap_t * workers;
348 dpdk_device_config_hqos_t hqos;
349 } dpdk_device_config_t;
356 u8 *eal_init_args_str;
359 u8 enable_tcp_udp_checksum;
360 u8 no_tx_checksum_offload;
362 /* Required config parameters */
363 u8 coremask_set_manually;
364 u8 nchannels_set_manually;
367 u32 num_crypto_mbufs;
370 * format interface names ala xxxEthernet%d/%d/%d instead of
371 * xxxEthernet%x/%x/%x.
373 u8 interface_name_format_decimal;
375 /* per-device config */
376 dpdk_device_config_t default_devconf;
377 dpdk_device_config_t *dev_confs;
378 uword *device_config_index_by_pci_addr;
380 /* devices blacklist by pci vendor_id, device_id */
381 u32 *blacklist_by_pci_vendor_and_device;
383 } dpdk_config_main_t;
385 extern dpdk_config_main_t dpdk_config_main;
387 #define DPDK_RX_BURST_SZ VLIB_FRAME_SIZE
391 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
392 struct rte_mbuf *mbufs[DPDK_RX_BURST_SZ];
393 u32 buffers[DPDK_RX_BURST_SZ];
394 u16 next[DPDK_RX_BURST_SZ];
395 u16 etype[DPDK_RX_BURST_SZ];
396 u8 flags[DPDK_RX_BURST_SZ];
397 vlib_buffer_t buffer_template;
398 } dpdk_per_thread_data_t;
403 u32 pcap_sw_if_index;
404 pcap_main_t pcap_main;
411 dpdk_device_t *devices;
412 dpdk_device_and_queue_t **devices_by_hqos_cpu;
413 dpdk_per_thread_data_t *per_thread_data;
415 /* buffer flags template, configurable to enable/disable tcp / udp cksum */
416 u32 buffer_flags_template;
419 dpdk_pcap_t pcap[VLIB_N_RX_TX];
422 pcap_main_t pcap_main;
424 u32 pcap_sw_if_index;
425 u32 pcap_pkts_to_capture;
428 * flag indicating that a posted admin up/down
429 * (via post_sw_interface_set_flags) is in progress
431 u8 admin_up_down_in_progress;
433 /* which cpus are running I/O TX */
434 int hqos_cpu_first_index;
437 /* control interval of dpdk link state and stat polling */
438 f64 link_state_poll_interval;
439 f64 stat_poll_interval;
442 vlib_main_t *vlib_main;
443 vnet_main_t *vnet_main;
444 dpdk_config_main_t *conf;
446 /* API message ID base */
450 vlib_log_class_t log_default;
453 extern dpdk_main_t dpdk_main;
461 /* Copy of VLIB buffer; packet data stored in pre_data. */
462 vlib_buffer_t buffer;
463 u8 data[256]; /* First 256 data bytes, used for hexdump */
472 vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
473 u8 data[256]; /* First 256 data bytes, used for hexdump */
476 void dpdk_device_setup (dpdk_device_t * xd);
477 void dpdk_device_start (dpdk_device_t * xd);
478 void dpdk_device_stop (dpdk_device_t * xd);
480 int dpdk_port_state_callback (dpdk_portid_t port_id,
481 enum rte_eth_event_type type,
482 void *param, void *ret_param);
484 #define foreach_dpdk_error \
485 _(NONE, "no error") \
486 _(RX_PACKET_ERROR, "Rx packet errors") \
487 _(RX_BAD_FCS, "Rx bad fcs") \
488 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
489 _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
490 _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
491 _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error")
495 #define _(f,s) DPDK_ERROR_##f,
501 #define dpdk_log_err(...) \
502 vlib_log(VLIB_LOG_LEVEL_ERR, dpdk_main.log_default, __VA_ARGS__)
503 #define dpdk_log_warn(...) \
504 vlib_log(VLIB_LOG_LEVEL_WARNING, dpdk_main.log_default, __VA_ARGS__)
505 #define dpdk_log_notice(...) \
506 vlib_log(VLIB_LOG_LEVEL_NOTICE, dpdk_main.log_default, __VA_ARGS__)
507 #define dpdk_log_info(...) \
508 vlib_log(VLIB_LOG_LEVEL_INFO, dpdk_main.log_default, __VA_ARGS__)
510 void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
512 format_function_t format_dpdk_device_name;
513 format_function_t format_dpdk_device;
514 format_function_t format_dpdk_device_errors;
515 format_function_t format_dpdk_tx_trace;
516 format_function_t format_dpdk_rx_trace;
517 format_function_t format_dpdk_rte_mbuf;
518 format_function_t format_dpdk_rx_rte_mbuf;
519 format_function_t format_dpdk_flow;
520 format_function_t format_dpdk_rss_hf_name;
521 format_function_t format_dpdk_rx_offload_caps;
522 format_function_t format_dpdk_tx_offload_caps;
523 unformat_function_t unformat_dpdk_log_level;
524 vnet_flow_dev_ops_function_t dpdk_flow_ops_fn;
526 clib_error_t *unformat_rss_fn (unformat_input_t * input, uword * rss_fn);
527 clib_error_t *unformat_hqos (unformat_input_t * input,
528 dpdk_device_config_hqos_t * hqos);
530 struct rte_pci_device *dpdk_get_pci_device (const struct rte_eth_dev_info
534 int dpdk_buffer_validate_trajectory_all (u32 * uninitialized);
535 void dpdk_buffer_poison_trajectory_all (void);
538 #endif /* __included_dpdk_h__ */
541 * fd.io coding-style-patch-verification: ON
544 * eval: (c-set-style "gnu")