1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 #include <linux/netlink.h>
17 #include <linux/rtnetlink.h>
20 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_malloc.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
33 #include <rte_bus_pci.h>
34 #include <rte_common.h>
35 #include <rte_config.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_kvargs.h>
38 #include <rte_rwlock.h>
39 #include <rte_spinlock.h>
40 #include <rte_string_fns.h>
43 #include "mlx5_utils.h"
44 #include "mlx5_rxtx.h"
45 #include "mlx5_autoconf.h"
46 #include "mlx5_defs.h"
47 #include "mlx5_glue.h"
49 #include "mlx5_flow.h"
51 /* Device parameter to enable RX completion queue compression. */
52 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
54 /* Device parameter to enable RX completion entry padding to 128B. */
55 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
57 /* Device parameter to enable Multi-Packet Rx queue. */
58 #define MLX5_RX_MPRQ_EN "mprq_en"
60 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
61 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
63 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
64 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
66 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
67 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
69 /* Device parameter to configure inline send. */
70 #define MLX5_TXQ_INLINE "txq_inline"
73 * Device parameter to configure the number of TX queues threshold for
74 * enabling inline send.
76 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
79 * Device parameter to configure the number of TX queues threshold for
80 * enabling vectorized Tx.
82 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
84 /* Device parameter to enable multi-packet send WQEs. */
85 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
87 /* Device parameter to include 2 dsegs in the title WQEBB. */
88 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
90 /* Device parameter to limit the size of inlining packet. */
91 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
93 /* Device parameter to enable hardware Tx vector. */
94 #define MLX5_TX_VEC_EN "tx_vec_en"
96 /* Device parameter to enable hardware Rx vector. */
97 #define MLX5_RX_VEC_EN "rx_vec_en"
99 /* Allow L3 VXLAN flow creation. */
100 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
102 /* Activate DV flow steering. */
103 #define MLX5_DV_FLOW_EN "dv_flow_en"
105 /* Activate Netlink support in VF mode. */
106 #define MLX5_VF_NL_EN "vf_nl_en"
108 /* Select port representors to instantiate. */
109 #define MLX5_REPRESENTOR "representor"
111 #ifndef HAVE_IBV_MLX5_MOD_MPW
112 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
113 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
116 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
117 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
120 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
122 /* Shared memory between primary and secondary processes. */
123 struct mlx5_shared_data *mlx5_shared_data;
125 /* Spinlock for mlx5_shared_data allocation. */
126 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
128 /** Driver-specific log messages type. */
132 * Prepare shared data between primary and secondary process.
135 mlx5_prepare_shared_data(void)
137 const struct rte_memzone *mz;
139 rte_spinlock_lock(&mlx5_shared_data_lock);
140 if (mlx5_shared_data == NULL) {
141 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
142 /* Allocate shared memory. */
143 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
144 sizeof(*mlx5_shared_data),
147 /* Lookup allocated shared memory. */
148 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
151 rte_panic("Cannot allocate mlx5 shared data\n");
152 mlx5_shared_data = mz->addr;
153 /* Initialize shared data. */
154 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
155 LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
156 rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
158 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
159 mlx5_mr_mem_event_cb, NULL);
161 rte_spinlock_unlock(&mlx5_shared_data_lock);
165 * Retrieve integer value from environment variable.
168 * Environment variable name.
171 * Integer value, 0 if the variable is not set.
174 mlx5_getenv_int(const char *name)
176 const char *val = getenv(name);
184 * Verbs callback to allocate a memory. This function should allocate the space
185 * according to the size provided residing inside a huge page.
186 * Please note that all allocation must respect the alignment from libmlx5
187 * (i.e. currently sysconf(_SC_PAGESIZE)).
190 * The size in bytes of the memory to allocate.
192 * A pointer to the callback data.
195 * Allocated buffer, NULL otherwise and rte_errno is set.
198 mlx5_alloc_verbs_buf(size_t size, void *data)
200 struct priv *priv = data;
202 size_t alignment = sysconf(_SC_PAGESIZE);
203 unsigned int socket = SOCKET_ID_ANY;
205 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
206 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
208 socket = ctrl->socket;
209 } else if (priv->verbs_alloc_ctx.type ==
210 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
211 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
213 socket = ctrl->socket;
215 assert(data != NULL);
216 ret = rte_malloc_socket(__func__, size, alignment, socket);
223 * Verbs callback to free a memory.
226 * A pointer to the memory to free.
228 * A pointer to the callback data.
231 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
233 assert(data != NULL);
238 * DPDK callback to close the device.
240 * Destroy all queues and objects, free memory.
243 * Pointer to Ethernet device structure.
246 mlx5_dev_close(struct rte_eth_dev *dev)
248 struct priv *priv = dev->data->dev_private;
252 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
254 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
255 /* In case mlx5_dev_stop() has not been called. */
256 mlx5_dev_interrupt_handler_uninstall(dev);
257 mlx5_traffic_disable(dev);
258 mlx5_flow_flush(dev, NULL);
259 /* Prevent crashes when queues are still in use. */
260 dev->rx_pkt_burst = removed_rx_burst;
261 dev->tx_pkt_burst = removed_tx_burst;
262 if (priv->rxqs != NULL) {
263 /* XXX race condition if mlx5_rx_burst() is still running. */
265 for (i = 0; (i != priv->rxqs_n); ++i)
266 mlx5_rxq_release(dev, i);
270 if (priv->txqs != NULL) {
271 /* XXX race condition if mlx5_tx_burst() is still running. */
273 for (i = 0; (i != priv->txqs_n); ++i)
274 mlx5_txq_release(dev, i);
278 mlx5_mprq_free_mp(dev);
279 mlx5_mr_release(dev);
280 if (priv->pd != NULL) {
281 assert(priv->ctx != NULL);
282 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
283 claim_zero(mlx5_glue->close_device(priv->ctx));
285 assert(priv->ctx == NULL);
286 if (priv->rss_conf.rss_key != NULL)
287 rte_free(priv->rss_conf.rss_key);
288 if (priv->reta_idx != NULL)
289 rte_free(priv->reta_idx);
290 if (priv->primary_socket)
291 mlx5_socket_uninit(dev);
293 mlx5_nl_mac_addr_flush(dev);
294 if (priv->nl_socket_route >= 0)
295 close(priv->nl_socket_route);
296 if (priv->nl_socket_rdma >= 0)
297 close(priv->nl_socket_rdma);
298 if (priv->tcf_context)
299 mlx5_flow_tcf_context_destroy(priv->tcf_context);
300 ret = mlx5_hrxq_ibv_verify(dev);
302 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
304 ret = mlx5_ind_table_ibv_verify(dev);
306 DRV_LOG(WARNING, "port %u some indirection table still remain",
308 ret = mlx5_rxq_ibv_verify(dev);
310 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
312 ret = mlx5_rxq_verify(dev);
314 DRV_LOG(WARNING, "port %u some Rx queues still remain",
316 ret = mlx5_txq_ibv_verify(dev);
318 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
320 ret = mlx5_txq_verify(dev);
322 DRV_LOG(WARNING, "port %u some Tx queues still remain",
324 ret = mlx5_flow_verify(dev);
326 DRV_LOG(WARNING, "port %u some flows still remain",
328 if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
330 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
333 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
336 rte_eth_devices[port_id[i]].data->dev_private;
339 opriv->domain_id != priv->domain_id ||
340 &rte_eth_devices[port_id[i]] == dev)
345 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
347 memset(priv, 0, sizeof(*priv));
348 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
350 * flag to rte_eth_dev_close() that it should release the port resources
351 * (calling rte_eth_dev_release_port()) in addition to closing it.
353 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
355 * Reset mac_addrs to NULL such that it is not freed as part of
356 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
357 * it is freed when dev_private is freed.
359 dev->data->mac_addrs = NULL;
362 const struct eth_dev_ops mlx5_dev_ops = {
363 .dev_configure = mlx5_dev_configure,
364 .dev_start = mlx5_dev_start,
365 .dev_stop = mlx5_dev_stop,
366 .dev_set_link_down = mlx5_set_link_down,
367 .dev_set_link_up = mlx5_set_link_up,
368 .dev_close = mlx5_dev_close,
369 .promiscuous_enable = mlx5_promiscuous_enable,
370 .promiscuous_disable = mlx5_promiscuous_disable,
371 .allmulticast_enable = mlx5_allmulticast_enable,
372 .allmulticast_disable = mlx5_allmulticast_disable,
373 .link_update = mlx5_link_update,
374 .stats_get = mlx5_stats_get,
375 .stats_reset = mlx5_stats_reset,
376 .xstats_get = mlx5_xstats_get,
377 .xstats_reset = mlx5_xstats_reset,
378 .xstats_get_names = mlx5_xstats_get_names,
379 .dev_infos_get = mlx5_dev_infos_get,
380 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
381 .vlan_filter_set = mlx5_vlan_filter_set,
382 .rx_queue_setup = mlx5_rx_queue_setup,
383 .tx_queue_setup = mlx5_tx_queue_setup,
384 .rx_queue_release = mlx5_rx_queue_release,
385 .tx_queue_release = mlx5_tx_queue_release,
386 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
387 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
388 .mac_addr_remove = mlx5_mac_addr_remove,
389 .mac_addr_add = mlx5_mac_addr_add,
390 .mac_addr_set = mlx5_mac_addr_set,
391 .set_mc_addr_list = mlx5_set_mc_addr_list,
392 .mtu_set = mlx5_dev_set_mtu,
393 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
394 .vlan_offload_set = mlx5_vlan_offload_set,
395 .reta_update = mlx5_dev_rss_reta_update,
396 .reta_query = mlx5_dev_rss_reta_query,
397 .rss_hash_update = mlx5_rss_hash_update,
398 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
399 .filter_ctrl = mlx5_dev_filter_ctrl,
400 .rx_descriptor_status = mlx5_rx_descriptor_status,
401 .tx_descriptor_status = mlx5_tx_descriptor_status,
402 .rx_queue_count = mlx5_rx_queue_count,
403 .rx_queue_intr_enable = mlx5_rx_intr_enable,
404 .rx_queue_intr_disable = mlx5_rx_intr_disable,
405 .is_removed = mlx5_is_removed,
408 static const struct eth_dev_ops mlx5_dev_sec_ops = {
409 .stats_get = mlx5_stats_get,
410 .stats_reset = mlx5_stats_reset,
411 .xstats_get = mlx5_xstats_get,
412 .xstats_reset = mlx5_xstats_reset,
413 .xstats_get_names = mlx5_xstats_get_names,
414 .dev_infos_get = mlx5_dev_infos_get,
415 .rx_descriptor_status = mlx5_rx_descriptor_status,
416 .tx_descriptor_status = mlx5_tx_descriptor_status,
419 /* Available operators in flow isolated mode. */
420 const struct eth_dev_ops mlx5_dev_ops_isolate = {
421 .dev_configure = mlx5_dev_configure,
422 .dev_start = mlx5_dev_start,
423 .dev_stop = mlx5_dev_stop,
424 .dev_set_link_down = mlx5_set_link_down,
425 .dev_set_link_up = mlx5_set_link_up,
426 .dev_close = mlx5_dev_close,
427 .promiscuous_enable = mlx5_promiscuous_enable,
428 .promiscuous_disable = mlx5_promiscuous_disable,
429 .allmulticast_enable = mlx5_allmulticast_enable,
430 .allmulticast_disable = mlx5_allmulticast_disable,
431 .link_update = mlx5_link_update,
432 .stats_get = mlx5_stats_get,
433 .stats_reset = mlx5_stats_reset,
434 .xstats_get = mlx5_xstats_get,
435 .xstats_reset = mlx5_xstats_reset,
436 .xstats_get_names = mlx5_xstats_get_names,
437 .dev_infos_get = mlx5_dev_infos_get,
438 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
439 .vlan_filter_set = mlx5_vlan_filter_set,
440 .rx_queue_setup = mlx5_rx_queue_setup,
441 .tx_queue_setup = mlx5_tx_queue_setup,
442 .rx_queue_release = mlx5_rx_queue_release,
443 .tx_queue_release = mlx5_tx_queue_release,
444 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
445 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
446 .mac_addr_remove = mlx5_mac_addr_remove,
447 .mac_addr_add = mlx5_mac_addr_add,
448 .mac_addr_set = mlx5_mac_addr_set,
449 .set_mc_addr_list = mlx5_set_mc_addr_list,
450 .mtu_set = mlx5_dev_set_mtu,
451 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
452 .vlan_offload_set = mlx5_vlan_offload_set,
453 .filter_ctrl = mlx5_dev_filter_ctrl,
454 .rx_descriptor_status = mlx5_rx_descriptor_status,
455 .tx_descriptor_status = mlx5_tx_descriptor_status,
456 .rx_queue_intr_enable = mlx5_rx_intr_enable,
457 .rx_queue_intr_disable = mlx5_rx_intr_disable,
458 .is_removed = mlx5_is_removed,
462 * Verify and store value for device argument.
465 * Key argument to verify.
467 * Value associated with key.
472 * 0 on success, a negative errno value otherwise and rte_errno is set.
475 mlx5_args_check(const char *key, const char *val, void *opaque)
477 struct mlx5_dev_config *config = opaque;
480 /* No-op, port representors are processed in mlx5_dev_spawn(). */
481 if (!strcmp(MLX5_REPRESENTOR, key))
484 tmp = strtoul(val, NULL, 0);
487 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
490 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
491 config->cqe_comp = !!tmp;
492 } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
493 config->cqe_pad = !!tmp;
494 } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
495 config->mprq.enabled = !!tmp;
496 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
497 config->mprq.stride_num_n = tmp;
498 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
499 config->mprq.max_memcpy_len = tmp;
500 } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
501 config->mprq.min_rxqs_num = tmp;
502 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
503 config->txq_inline = tmp;
504 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
505 config->txqs_inline = tmp;
506 } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
507 config->txqs_vec = tmp;
508 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
510 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
511 config->mpw_hdr_dseg = !!tmp;
512 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
513 config->inline_max_packet_sz = tmp;
514 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
515 config->tx_vec_en = !!tmp;
516 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
517 config->rx_vec_en = !!tmp;
518 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
519 config->l3_vxlan_en = !!tmp;
520 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
521 config->vf_nl_en = !!tmp;
522 } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
523 config->dv_flow_en = !!tmp;
525 DRV_LOG(WARNING, "%s: unknown parameter", key);
533 * Parse device parameters.
536 * Pointer to device configuration structure.
538 * Device arguments structure.
541 * 0 on success, a negative errno value otherwise and rte_errno is set.
544 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
546 const char **params = (const char *[]){
547 MLX5_RXQ_CQE_COMP_EN,
550 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
551 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
554 MLX5_TXQS_MIN_INLINE,
557 MLX5_TXQ_MPW_HDR_DSEG_EN,
558 MLX5_TXQ_MAX_INLINE_LEN,
567 struct rte_kvargs *kvlist;
573 /* Following UGLY cast is done to pass checkpatch. */
574 kvlist = rte_kvargs_parse(devargs->args, params);
577 /* Process parameters. */
578 for (i = 0; (params[i] != NULL); ++i) {
579 if (rte_kvargs_count(kvlist, params[i])) {
580 ret = rte_kvargs_process(kvlist, params[i],
581 mlx5_args_check, config);
584 rte_kvargs_free(kvlist);
589 rte_kvargs_free(kvlist);
593 static struct rte_pci_driver mlx5_driver;
596 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
597 * local resource used by both primary and secondary to avoid duplicate
599 * The space has to be available on both primary and secondary process,
600 * TXQ UAR maps to this area using fixed mmap w/o double check.
602 static void *uar_base;
605 find_lower_va_bound(const struct rte_memseg_list *msl,
606 const struct rte_memseg *ms, void *arg)
615 *addr = RTE_MIN(*addr, ms->addr);
621 * Reserve UAR address space for primary process.
624 * Pointer to Ethernet device.
627 * 0 on success, a negative errno value otherwise and rte_errno is set.
630 mlx5_uar_init_primary(struct rte_eth_dev *dev)
632 struct priv *priv = dev->data->dev_private;
633 void *addr = (void *)0;
635 if (uar_base) { /* UAR address space mapped. */
636 priv->uar_base = uar_base;
639 /* find out lower bound of hugepage segments */
640 rte_memseg_walk(find_lower_va_bound, &addr);
642 /* keep distance to hugepages to minimize potential conflicts. */
643 addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
644 /* anonymous mmap, no real memory consumption. */
645 addr = mmap(addr, MLX5_UAR_SIZE,
646 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
647 if (addr == MAP_FAILED) {
649 "port %u failed to reserve UAR address space, please"
650 " adjust MLX5_UAR_SIZE or try --base-virtaddr",
655 /* Accept either same addr or a new addr returned from mmap if target
658 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
659 dev->data->port_id, addr);
660 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
661 uar_base = addr; /* process local, don't reserve again. */
666 * Reserve UAR address space for secondary process, align with
670 * Pointer to Ethernet device.
673 * 0 on success, a negative errno value otherwise and rte_errno is set.
676 mlx5_uar_init_secondary(struct rte_eth_dev *dev)
678 struct priv *priv = dev->data->dev_private;
681 assert(priv->uar_base);
682 if (uar_base) { /* already reserved. */
683 assert(uar_base == priv->uar_base);
686 /* anonymous mmap, no real memory consumption. */
687 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
688 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
689 if (addr == MAP_FAILED) {
690 DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
691 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
695 if (priv->uar_base != addr) {
697 "port %u UAR address %p size %llu occupied, please"
698 " adjust MLX5_UAR_OFFSET or try EAL parameter"
700 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
704 uar_base = addr; /* process local, don't reserve again */
705 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
706 dev->data->port_id, addr);
711 * Spawn an Ethernet device from Verbs information.
714 * Backing DPDK device.
718 * Device configuration parameters.
719 * @param[in] switch_info
720 * Switch properties of Ethernet device.
723 * A valid Ethernet device object on success, NULL otherwise and rte_errno
724 * is set. The following errors are defined:
726 * EBUSY: device is not supposed to be spawned.
727 * EEXIST: device is already spawned
729 static struct rte_eth_dev *
730 mlx5_dev_spawn(struct rte_device *dpdk_dev,
731 struct ibv_device *ibv_dev,
732 struct mlx5_dev_config config,
733 const struct mlx5_switch_info *switch_info)
735 struct ibv_context *ctx;
736 struct ibv_device_attr_ex attr;
737 struct ibv_port_attr port_attr;
738 struct ibv_pd *pd = NULL;
739 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
740 struct rte_eth_dev *eth_dev = NULL;
741 struct priv *priv = NULL;
744 unsigned int cqe_comp;
745 unsigned int cqe_pad = 0;
746 unsigned int tunnel_en = 0;
747 unsigned int mpls_en = 0;
748 unsigned int swp = 0;
749 unsigned int mprq = 0;
750 unsigned int mprq_min_stride_size_n = 0;
751 unsigned int mprq_max_stride_size_n = 0;
752 unsigned int mprq_min_stride_num_n = 0;
753 unsigned int mprq_max_stride_num_n = 0;
754 struct ether_addr mac;
755 char name[RTE_ETH_NAME_MAX_LEN];
756 int own_domain_id = 0;
760 /* Determine if this port representor is supposed to be spawned. */
761 if (switch_info->representor && dpdk_dev->devargs) {
762 struct rte_eth_devargs eth_da;
764 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da);
767 DRV_LOG(ERR, "failed to process device arguments: %s",
768 strerror(rte_errno));
771 for (i = 0; i < eth_da.nb_representor_ports; ++i)
772 if (eth_da.representor_ports[i] ==
773 (uint16_t)switch_info->port_name)
775 if (i == eth_da.nb_representor_ports) {
780 /* Build device name. */
781 if (!switch_info->representor)
782 rte_strlcpy(name, dpdk_dev->name, sizeof(name));
784 snprintf(name, sizeof(name), "%s_representor_%u",
785 dpdk_dev->name, switch_info->port_name);
786 /* check if the device is already spawned */
787 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
791 /* Prepare shared data between primary and secondary process. */
792 mlx5_prepare_shared_data();
794 ctx = mlx5_glue->open_device(ibv_dev);
796 rte_errno = errno ? errno : ENODEV;
799 #ifdef HAVE_IBV_MLX5_MOD_SWP
800 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
803 * Multi-packet send is supported by ConnectX-4 Lx PF as well
804 * as all ConnectX-5 devices.
806 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
807 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
809 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
810 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
812 mlx5_glue->dv_query_device(ctx, &dv_attr);
813 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
814 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
815 DRV_LOG(DEBUG, "enhanced MPW is supported");
816 mps = MLX5_MPW_ENHANCED;
818 DRV_LOG(DEBUG, "MPW is supported");
822 DRV_LOG(DEBUG, "MPW isn't supported");
823 mps = MLX5_MPW_DISABLED;
825 #ifdef HAVE_IBV_MLX5_MOD_SWP
826 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
827 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
828 DRV_LOG(DEBUG, "SWP support: %u", swp);
831 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
832 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
833 struct mlx5dv_striding_rq_caps mprq_caps =
834 dv_attr.striding_rq_caps;
836 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
837 mprq_caps.min_single_stride_log_num_of_bytes);
838 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
839 mprq_caps.max_single_stride_log_num_of_bytes);
840 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
841 mprq_caps.min_single_wqe_log_num_of_strides);
842 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
843 mprq_caps.max_single_wqe_log_num_of_strides);
844 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
845 mprq_caps.supported_qpts);
846 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
848 mprq_min_stride_size_n =
849 mprq_caps.min_single_stride_log_num_of_bytes;
850 mprq_max_stride_size_n =
851 mprq_caps.max_single_stride_log_num_of_bytes;
852 mprq_min_stride_num_n =
853 mprq_caps.min_single_wqe_log_num_of_strides;
854 mprq_max_stride_num_n =
855 mprq_caps.max_single_wqe_log_num_of_strides;
856 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
857 mprq_min_stride_num_n);
860 if (RTE_CACHE_LINE_SIZE == 128 &&
861 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
865 config.cqe_comp = cqe_comp;
866 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
867 /* Whether device supports 128B Rx CQE padding. */
868 cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
869 (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
871 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
872 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
873 tunnel_en = ((dv_attr.tunnel_offloads_caps &
874 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
875 (dv_attr.tunnel_offloads_caps &
876 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
878 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
879 tunnel_en ? "" : "not ");
882 "tunnel offloading disabled due to old OFED/rdma-core version");
884 config.tunnel_en = tunnel_en;
885 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
886 mpls_en = ((dv_attr.tunnel_offloads_caps &
887 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
888 (dv_attr.tunnel_offloads_caps &
889 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
890 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
891 mpls_en ? "" : "not ");
893 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
894 " old OFED/rdma-core version or firmware configuration");
896 config.mpls_en = mpls_en;
897 err = mlx5_glue->query_device_ex(ctx, NULL, &attr);
899 DEBUG("ibv_query_device_ex() failed");
902 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
903 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
904 eth_dev = rte_eth_dev_attach_secondary(name);
905 if (eth_dev == NULL) {
906 DRV_LOG(ERR, "can not attach rte ethdev");
911 eth_dev->device = dpdk_dev;
912 eth_dev->dev_ops = &mlx5_dev_sec_ops;
913 err = mlx5_uar_init_secondary(eth_dev);
918 /* Receive command fd from primary process */
919 err = mlx5_socket_connect(eth_dev);
924 /* Remap UAR for Tx queues. */
925 err = mlx5_tx_uar_remap(eth_dev, err);
931 * Ethdev pointer is still required as input since
932 * the primary device is not accessible from the
935 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
936 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
937 claim_zero(mlx5_glue->close_device(ctx));
940 /* Check port status. */
941 err = mlx5_glue->query_port(ctx, 1, &port_attr);
943 DRV_LOG(ERR, "port query failed: %s", strerror(err));
946 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
947 DRV_LOG(ERR, "port is not configured in Ethernet mode");
951 if (port_attr.state != IBV_PORT_ACTIVE)
952 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
953 mlx5_glue->port_state_str(port_attr.state),
955 /* Allocate protection domain. */
956 pd = mlx5_glue->alloc_pd(ctx);
958 DRV_LOG(ERR, "PD allocation failure");
962 priv = rte_zmalloc("ethdev private structure",
964 RTE_CACHE_LINE_SIZE);
966 DRV_LOG(ERR, "priv allocation failure");
971 strncpy(priv->ibdev_name, priv->ctx->device->name,
972 sizeof(priv->ibdev_name));
973 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
974 sizeof(priv->ibdev_path));
975 priv->device_attr = attr;
977 priv->mtu = ETHER_MTU;
979 /* Initialize UAR access locks for 32bit implementations. */
980 rte_spinlock_init(&priv->uar_lock_cq);
981 for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
982 rte_spinlock_init(&priv->uar_lock[i]);
984 /* Some internal functions rely on Netlink sockets, open them now. */
985 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
986 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
988 priv->representor = !!switch_info->representor;
989 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
990 priv->representor_id =
991 switch_info->representor ? switch_info->port_name : -1;
993 * Look for sibling devices in order to reuse their switch domain
994 * if any, otherwise allocate one.
996 i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
1000 i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
1002 const struct priv *opriv =
1003 rte_eth_devices[port_id[i]].data->dev_private;
1007 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1009 priv->domain_id = opriv->domain_id;
1013 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1014 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1017 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1018 strerror(rte_errno));
1023 err = mlx5_args(&config, dpdk_dev->devargs);
1026 DRV_LOG(ERR, "failed to process device arguments: %s",
1027 strerror(rte_errno));
1030 config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
1031 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1032 (config.hw_csum ? "" : "not "));
1033 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1034 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1035 DRV_LOG(DEBUG, "counters are not supported");
1037 #ifndef HAVE_IBV_FLOW_DV_SUPPORT
1038 if (config.dv_flow_en) {
1039 DRV_LOG(WARNING, "DV flow is not supported");
1040 config.dv_flow_en = 0;
1043 config.ind_table_max_size =
1044 attr.rss_caps.max_rwq_indirection_table_size;
1046 * Remove this check once DPDK supports larger/variable
1047 * indirection tables.
1049 if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1050 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1051 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1052 config.ind_table_max_size);
1053 config.hw_vlan_strip = !!(attr.raw_packet_caps &
1054 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1055 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1056 (config.hw_vlan_strip ? "" : "not "));
1057 config.hw_fcs_strip = !!(attr.raw_packet_caps &
1058 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1059 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1060 (config.hw_fcs_strip ? "" : "not "));
1061 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
1062 config.hw_padding = !!attr.rx_pad_end_addr_align;
1064 DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported",
1065 (config.hw_padding ? "" : "not "));
1066 config.tso = (attr.tso_caps.max_tso > 0 &&
1067 (attr.tso_caps.supported_qpts &
1068 (1 << IBV_QPT_RAW_PACKET)));
1070 config.tso_max_payload_sz = attr.tso_caps.max_tso;
1072 * MPW is disabled by default, while the Enhanced MPW is enabled
1075 if (config.mps == MLX5_ARG_UNSET)
1076 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1079 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
1080 DRV_LOG(INFO, "%sMPS is %s",
1081 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1082 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1083 if (config.cqe_comp && !cqe_comp) {
1084 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1085 config.cqe_comp = 0;
1087 if (config.cqe_pad && !cqe_pad) {
1088 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
1090 } else if (config.cqe_pad) {
1091 DRV_LOG(INFO, "Rx CQE padding is enabled");
1093 if (config.mprq.enabled && mprq) {
1094 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1095 config.mprq.stride_num_n < mprq_min_stride_num_n) {
1096 config.mprq.stride_num_n =
1097 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1098 mprq_min_stride_num_n);
1100 "the number of strides"
1101 " for Multi-Packet RQ is out of range,"
1102 " setting default value (%u)",
1103 1 << config.mprq.stride_num_n);
1105 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1106 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
1107 } else if (config.mprq.enabled && !mprq) {
1108 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1109 config.mprq.enabled = 0;
1111 eth_dev = rte_eth_dev_allocate(name);
1112 if (eth_dev == NULL) {
1113 DRV_LOG(ERR, "can not allocate rte ethdev");
1117 if (priv->representor) {
1118 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1119 eth_dev->data->representor_id = priv->representor_id;
1121 eth_dev->data->dev_private = priv;
1122 priv->dev_data = eth_dev->data;
1123 eth_dev->data->mac_addrs = priv->mac;
1124 eth_dev->device = dpdk_dev;
1125 err = mlx5_uar_init_primary(eth_dev);
1130 /* Configure the first MAC address by default. */
1131 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1133 "port %u cannot get MAC address, is mlx5_en"
1134 " loaded? (errno: %s)",
1135 eth_dev->data->port_id, strerror(rte_errno));
1140 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1141 eth_dev->data->port_id,
1142 mac.addr_bytes[0], mac.addr_bytes[1],
1143 mac.addr_bytes[2], mac.addr_bytes[3],
1144 mac.addr_bytes[4], mac.addr_bytes[5]);
1147 char ifname[IF_NAMESIZE];
1149 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1150 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1151 eth_dev->data->port_id, ifname);
1153 DRV_LOG(DEBUG, "port %u ifname is unknown",
1154 eth_dev->data->port_id);
1157 /* Get actual MTU if possible. */
1158 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1163 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1165 /* Initialize burst functions to prevent crashes before link-up. */
1166 eth_dev->rx_pkt_burst = removed_rx_burst;
1167 eth_dev->tx_pkt_burst = removed_tx_burst;
1168 eth_dev->dev_ops = &mlx5_dev_ops;
1169 /* Register MAC address. */
1170 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1171 if (config.vf && config.vf_nl_en)
1172 mlx5_nl_mac_addr_sync(eth_dev);
1173 priv->tcf_context = mlx5_flow_tcf_context_create();
1174 if (!priv->tcf_context) {
1177 "flow rules relying on switch offloads will not be"
1178 " supported: cannot open libmnl socket: %s",
1179 strerror(rte_errno));
1181 struct rte_flow_error error;
1182 unsigned int ifindex = mlx5_ifindex(eth_dev);
1187 "cannot retrieve network interface index";
1189 err = mlx5_flow_tcf_init(priv->tcf_context,
1194 "flow rules relying on switch offloads will"
1195 " not be supported: %s: %s",
1196 error.message, strerror(rte_errno));
1197 mlx5_flow_tcf_context_destroy(priv->tcf_context);
1198 priv->tcf_context = NULL;
1201 TAILQ_INIT(&priv->flows);
1202 TAILQ_INIT(&priv->ctrl_flows);
1203 /* Hint libmlx5 to use PMD allocator for data plane resources */
1204 struct mlx5dv_ctx_allocators alctr = {
1205 .alloc = &mlx5_alloc_verbs_buf,
1206 .free = &mlx5_free_verbs_buf,
1209 mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1210 (void *)((uintptr_t)&alctr));
1211 /* Bring Ethernet device up. */
1212 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1213 eth_dev->data->port_id);
1214 mlx5_set_link_up(eth_dev);
1216 * Even though the interrupt handler is not installed yet,
1217 * interrupts will still trigger on the asyn_fd from
1218 * Verbs context returned by ibv_open_device().
1220 mlx5_link_update(eth_dev, 0);
1221 /* Store device configuration on private structure. */
1222 priv->config = config;
1223 /* Supported Verbs flow priority number detection. */
1224 err = mlx5_flow_discover_priorities(eth_dev);
1227 priv->config.flow_prio = err;
1229 * Once the device is added to the list of memory event
1230 * callback, its global MR cache table cannot be expanded
1231 * on the fly because of deadlock. If it overflows, lookup
1232 * should be done by searching MR list linearly, which is slow.
1234 err = mlx5_mr_btree_init(&priv->mr.cache,
1235 MLX5_MR_BTREE_CACHE_N * 2,
1236 eth_dev->device->numa_node);
1241 /* Add device to memory callback list. */
1242 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1243 LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
1244 priv, mem_event_cb);
1245 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1249 if (priv->nl_socket_route >= 0)
1250 close(priv->nl_socket_route);
1251 if (priv->nl_socket_rdma >= 0)
1252 close(priv->nl_socket_rdma);
1253 if (priv->tcf_context)
1254 mlx5_flow_tcf_context_destroy(priv->tcf_context);
1256 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1258 if (eth_dev != NULL)
1259 eth_dev->data->dev_private = NULL;
1262 claim_zero(mlx5_glue->dealloc_pd(pd));
1263 if (eth_dev != NULL) {
1264 /* mac_addrs must not be freed alone because part of dev_private */
1265 eth_dev->data->mac_addrs = NULL;
1266 rte_eth_dev_release_port(eth_dev);
1269 claim_zero(mlx5_glue->close_device(ctx));
1275 /** Data associated with devices to spawn. */
1276 struct mlx5_dev_spawn_data {
1277 unsigned int ifindex; /**< Network interface index. */
1278 struct mlx5_switch_info info; /**< Switch information. */
1279 struct ibv_device *ibv_dev; /**< Associated IB device. */
1280 struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
1284 * Comparison callback to sort device data.
1286 * This is meant to be used with qsort().
1289 * Pointer to pointer to first data object.
1291 * Pointer to pointer to second data object.
1294 * 0 if both objects are equal, less than 0 if the first argument is less
1295 * than the second, greater than 0 otherwise.
1298 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1300 const struct mlx5_switch_info *si_a =
1301 &((const struct mlx5_dev_spawn_data *)a)->info;
1302 const struct mlx5_switch_info *si_b =
1303 &((const struct mlx5_dev_spawn_data *)b)->info;
1306 /* Master device first. */
1307 ret = si_b->master - si_a->master;
1310 /* Then representor devices. */
1311 ret = si_b->representor - si_a->representor;
1314 /* Unidentified devices come last in no specific order. */
1315 if (!si_a->representor)
1317 /* Order representors by name. */
1318 return si_a->port_name - si_b->port_name;
1322 * DPDK callback to register a PCI device.
1324 * This function spawns Ethernet devices out of a given PCI device.
1326 * @param[in] pci_drv
1327 * PCI driver structure (mlx5_driver).
1328 * @param[in] pci_dev
1329 * PCI device information.
1332 * 0 on success, a negative errno value otherwise and rte_errno is set.
1335 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1336 struct rte_pci_device *pci_dev)
1338 struct ibv_device **ibv_list;
1340 struct mlx5_dev_config dev_config;
1343 assert(pci_drv == &mlx5_driver);
1345 ibv_list = mlx5_glue->get_device_list(&ret);
1347 rte_errno = errno ? errno : ENOSYS;
1348 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1352 struct ibv_device *ibv_match[ret + 1];
1355 struct rte_pci_addr pci_addr;
1357 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1358 if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
1360 if (pci_dev->addr.domain != pci_addr.domain ||
1361 pci_dev->addr.bus != pci_addr.bus ||
1362 pci_dev->addr.devid != pci_addr.devid ||
1363 pci_dev->addr.function != pci_addr.function)
1365 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1366 ibv_list[ret]->name);
1367 ibv_match[n++] = ibv_list[ret];
1369 ibv_match[n] = NULL;
1371 struct mlx5_dev_spawn_data list[n];
1372 int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1;
1373 int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1;
1378 * The existence of several matching entries (n > 1) means port
1379 * representors have been instantiated. No existing Verbs call nor
1380 * /sys entries can tell them apart, this can only be done through
1381 * Netlink calls assuming kernel drivers are recent enough to
1384 * In the event of identification failure through Netlink, try again
1385 * through sysfs, then either:
1387 * 1. No device matches (n == 0), complain and bail out.
1388 * 2. A single IB device matches (n == 1) and is not a representor,
1389 * assume no switch support.
1390 * 3. Otherwise no safe assumptions can be made; complain louder and
1393 for (i = 0; i != n; ++i) {
1394 list[i].ibv_dev = ibv_match[i];
1395 list[i].eth_dev = NULL;
1397 list[i].ifindex = 0;
1399 list[i].ifindex = mlx5_nl_ifindex
1400 (nl_rdma, list[i].ibv_dev->name);
1403 mlx5_nl_switch_info(nl_route, list[i].ifindex,
1405 ((!list[i].info.representor && !list[i].info.master) &&
1406 mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
1407 list[i].ifindex = 0;
1408 memset(&list[i].info, 0, sizeof(list[i].info));
1416 /* Count unidentified devices. */
1417 for (u = 0, i = 0; i != n; ++i)
1418 if (!list[i].info.master && !list[i].info.representor)
1421 if (n == 1 && u == 1) {
1423 DRV_LOG(INFO, "no switch support detected");
1427 "unable to tell which of the matching devices"
1428 " is the master (lack of kernel support?)");
1433 * Sort list to probe devices in natural order for users convenience
1434 * (i.e. master first, then representors from lowest to highest ID).
1437 qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
1438 /* Default configuration. */
1439 dev_config = (struct mlx5_dev_config){
1440 .mps = MLX5_ARG_UNSET,
1443 .txq_inline = MLX5_ARG_UNSET,
1444 .txqs_inline = MLX5_ARG_UNSET,
1445 .txqs_vec = MLX5_ARG_UNSET,
1446 .inline_max_packet_sz = MLX5_ARG_UNSET,
1449 .enabled = 0, /* Disabled by default. */
1450 .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
1451 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
1452 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
1455 /* Device speicific configuration. */
1456 switch (pci_dev->id.device_id) {
1457 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
1458 dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD;
1460 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1461 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1462 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1463 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1469 /* Set architecture-dependent default value if unset. */
1470 if (dev_config.txqs_vec == MLX5_ARG_UNSET)
1471 dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS;
1472 for (i = 0; i != n; ++i) {
1475 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
1476 list[i].ibv_dev, dev_config,
1478 if (!list[i].eth_dev) {
1479 if (rte_errno != EBUSY && rte_errno != EEXIST)
1481 /* Device is disabled or already spawned. Ignore it. */
1484 restore = list[i].eth_dev->data->dev_flags;
1485 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
1486 /* Restore non-PCI flags cleared by the above call. */
1487 list[i].eth_dev->data->dev_flags |= restore;
1488 rte_eth_dev_probing_finish(list[i].eth_dev);
1490 mlx5_glue->free_device_list(ibv_list);
1493 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1494 " are kernel drivers loaded?",
1495 pci_dev->addr.domain, pci_dev->addr.bus,
1496 pci_dev->addr.devid, pci_dev->addr.function);
1499 } else if (i != n) {
1501 "probe of PCI device " PCI_PRI_FMT " aborted after"
1502 " encountering an error: %s",
1503 pci_dev->addr.domain, pci_dev->addr.bus,
1504 pci_dev->addr.devid, pci_dev->addr.function,
1505 strerror(rte_errno));
1509 if (!list[i].eth_dev)
1511 mlx5_dev_close(list[i].eth_dev);
1512 /* mac_addrs must not be freed because in dev_private */
1513 list[i].eth_dev->data->mac_addrs = NULL;
1514 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
1516 /* Restore original error. */
1525 * DPDK callback to remove a PCI device.
1527 * This function removes all Ethernet devices belong to a given PCI device.
1529 * @param[in] pci_dev
1530 * Pointer to the PCI device.
1533 * 0 on success, the function cannot fail.
1536 mlx5_pci_remove(struct rte_pci_device *pci_dev)
1539 struct rte_eth_dev *port;
1541 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1542 port = &rte_eth_devices[port_id];
1543 if (port->state != RTE_ETH_DEV_UNUSED &&
1544 port->device == &pci_dev->device)
1545 rte_eth_dev_close(port_id);
1550 static const struct rte_pci_id mlx5_pci_id_map[] = {
1552 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1553 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
1556 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1557 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
1560 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1561 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
1564 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1565 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
1568 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1569 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1572 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1573 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1576 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1577 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1580 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1581 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1584 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1585 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
1588 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1589 PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
1596 static struct rte_pci_driver mlx5_driver = {
1598 .name = MLX5_DRIVER_NAME
1600 .id_table = mlx5_pci_id_map,
1601 .probe = mlx5_pci_probe,
1602 .remove = mlx5_pci_remove,
1603 .drv_flags = (RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
1604 RTE_PCI_DRV_PROBE_AGAIN),
1607 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1610 * Suffix RTE_EAL_PMD_PATH with "-glue".
1612 * This function performs a sanity check on RTE_EAL_PMD_PATH before
1613 * suffixing its last component.
1616 * Output buffer, should be large enough otherwise NULL is returned.
1621 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
1624 mlx5_glue_path(char *buf, size_t size)
1626 static const char *const bad[] = { "/", ".", "..", NULL };
1627 const char *path = RTE_EAL_PMD_PATH;
1628 size_t len = strlen(path);
1632 while (len && path[len - 1] == '/')
1634 for (off = len; off && path[off - 1] != '/'; --off)
1636 for (i = 0; bad[i]; ++i)
1637 if (!strncmp(path + off, bad[i], (int)(len - off)))
1639 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
1640 if (i == -1 || (size_t)i >= size)
1645 "unable to append \"-glue\" to last component of"
1646 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
1647 " please re-configure DPDK");
1652 * Initialization routine for run-time dependency on rdma-core.
1655 mlx5_glue_init(void)
1657 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
1658 const char *path[] = {
1660 * A basic security check is necessary before trusting
1661 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
1663 (geteuid() == getuid() && getegid() == getgid() ?
1664 getenv("MLX5_GLUE_PATH") : NULL),
1666 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
1667 * variant, otherwise let dlopen() look up libraries on its
1670 (*RTE_EAL_PMD_PATH ?
1671 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
1674 void *handle = NULL;
1678 while (!handle && i != RTE_DIM(path)) {
1687 end = strpbrk(path[i], ":;");
1689 end = path[i] + strlen(path[i]);
1690 len = end - path[i];
1695 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
1697 (!len || *(end - 1) == '/') ? "" : "/");
1700 if (sizeof(name) != (size_t)ret + 1)
1702 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
1704 handle = dlopen(name, RTLD_LAZY);
1715 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
1718 sym = dlsym(handle, "mlx5_glue");
1719 if (!sym || !*sym) {
1723 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
1732 "cannot initialize PMD due to missing run-time dependency on"
1733 " rdma-core libraries (libibverbs, libmlx5)");
1740 * Driver initialization routine.
1742 RTE_INIT(rte_mlx5_pmd_init)
1744 /* Initialize driver log type. */
1745 mlx5_logtype = rte_log_register("pmd.net.mlx5");
1746 if (mlx5_logtype >= 0)
1747 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
1749 /* Build the static tables for Verbs conversion. */
1750 mlx5_set_ptype_table();
1751 mlx5_set_cksum_table();
1752 mlx5_set_swp_types_table();
1754 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1755 * huge pages. Calling ibv_fork_init() during init allows
1756 * applications to use fork() safely for purposes other than
1757 * using this PMD, which is not supported in forked processes.
1759 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1760 /* Match the size of Rx completion entry to the size of a cacheline. */
1761 if (RTE_CACHE_LINE_SIZE == 128)
1762 setenv("MLX5_CQE_SIZE", "128", 0);
1764 * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
1765 * cleanup all the Verbs resources even when the device was removed.
1767 setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
1768 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1769 if (mlx5_glue_init())
1774 /* Glue structure must not contain any NULL pointers. */
1778 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
1779 assert(((const void *const *)mlx5_glue)[i]);
1782 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
1784 "rdma-core glue \"%s\" mismatch: \"%s\" is required",
1785 mlx5_glue->version, MLX5_GLUE_VERSION);
1788 mlx5_glue->fork_init();
1789 rte_pci_register(&mlx5_driver);
1792 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1793 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1794 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");