4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
47 #pragma GCC diagnostic ignored "-Wpedantic"
49 #include <infiniband/verbs.h>
51 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_malloc.h>
55 #include <rte_ethdev.h>
56 #include <rte_ethdev_pci.h>
58 #include <rte_bus_pci.h>
59 #include <rte_common.h>
60 #include <rte_eal_memconfig.h>
61 #include <rte_kvargs.h>
64 #include "mlx5_utils.h"
65 #include "mlx5_rxtx.h"
66 #include "mlx5_autoconf.h"
67 #include "mlx5_defs.h"
69 /* Device parameter to enable RX completion queue compression. */
70 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
72 /* Device parameter to configure inline send. */
73 #define MLX5_TXQ_INLINE "txq_inline"
76 * Device parameter to configure the number of TX queues threshold for
77 * enabling inline send.
79 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
82 * Device parameter to configure the number of TX queues threshold for
83 * enabling vectorized Tx.
85 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
87 /* Device parameter to enable multi-packet send WQEs. */
88 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
90 /* Device parameter to include 2 dsegs in the title WQEBB. */
91 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
93 /* Device parameter to limit the size of inlining packet. */
94 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
96 /* Device parameter to enable hardware TSO offload. */
97 #define MLX5_TSO "tso"
99 /* Device parameter to enable hardware Tx vector. */
100 #define MLX5_TX_VEC_EN "tx_vec_en"
102 /* Device parameter to enable hardware Rx vector. */
103 #define MLX5_RX_VEC_EN "rx_vec_en"
105 /* Default PMD specific parameter value. */
106 #define MLX5_ARG_UNSET (-1)
108 #ifndef HAVE_IBV_MLX5_MOD_MPW
109 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
110 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
113 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
114 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
124 int inline_max_packet_sz;
130 /** Driver-specific log messages type. */
134 * Retrieve integer value from environment variable.
137 * Environment variable name.
140 * Integer value, 0 if the variable is not set.
143 mlx5_getenv_int(const char *name)
145 const char *val = getenv(name);
153 * Verbs callback to allocate a memory. This function should allocate the space
154 * according to the size provided residing inside a huge page.
155 * Please note that all allocation must respect the alignment from libmlx5
156 * (i.e. currently sysconf(_SC_PAGESIZE)).
159 * The size in bytes of the memory to allocate.
161 * A pointer to the callback data.
164 * Allocated buffer, NULL otherwise and rte_errno is set.
167 mlx5_alloc_verbs_buf(size_t size, void *data)
169 struct priv *priv = data;
171 size_t alignment = sysconf(_SC_PAGESIZE);
172 unsigned int socket = SOCKET_ID_ANY;
174 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
175 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
177 socket = ctrl->socket;
178 } else if (priv->verbs_alloc_ctx.type ==
179 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
180 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
182 socket = ctrl->socket;
184 assert(data != NULL);
185 ret = rte_malloc_socket(__func__, size, alignment, socket);
192 * Verbs callback to free a memory.
195 * A pointer to the memory to free.
197 * A pointer to the callback data.
200 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
202 assert(data != NULL);
207 * DPDK callback to close the device.
209 * Destroy all queues and objects, free memory.
212 * Pointer to Ethernet device structure.
215 mlx5_dev_close(struct rte_eth_dev *dev)
217 struct priv *priv = dev->data->dev_private;
221 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
223 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
224 /* In case mlx5_dev_stop() has not been called. */
225 mlx5_dev_interrupt_handler_uninstall(dev);
226 mlx5_traffic_disable(dev);
227 /* Prevent crashes when queues are still in use. */
228 dev->rx_pkt_burst = removed_rx_burst;
229 dev->tx_pkt_burst = removed_tx_burst;
230 if (priv->rxqs != NULL) {
231 /* XXX race condition if mlx5_rx_burst() is still running. */
233 for (i = 0; (i != priv->rxqs_n); ++i)
234 mlx5_rxq_release(dev, i);
238 if (priv->txqs != NULL) {
239 /* XXX race condition if mlx5_tx_burst() is still running. */
241 for (i = 0; (i != priv->txqs_n); ++i)
242 mlx5_txq_release(dev, i);
246 mlx5_mr_deregister_memseg(dev);
247 if (priv->pd != NULL) {
248 assert(priv->ctx != NULL);
249 claim_zero(ibv_dealloc_pd(priv->pd));
250 claim_zero(ibv_close_device(priv->ctx));
252 assert(priv->ctx == NULL);
253 if (priv->rss_conf.rss_key != NULL)
254 rte_free(priv->rss_conf.rss_key);
255 if (priv->reta_idx != NULL)
256 rte_free(priv->reta_idx);
257 if (priv->primary_socket)
258 mlx5_socket_uninit(dev);
259 ret = mlx5_hrxq_ibv_verify(dev);
261 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
263 ret = mlx5_ind_table_ibv_verify(dev);
265 DRV_LOG(WARNING, "port %u some indirection table still remain",
267 ret = mlx5_rxq_ibv_verify(dev);
269 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
271 ret = mlx5_rxq_verify(dev);
273 DRV_LOG(WARNING, "port %u some Rx queues still remain",
275 ret = mlx5_txq_ibv_verify(dev);
277 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
279 ret = mlx5_txq_verify(dev);
281 DRV_LOG(WARNING, "port %u some Tx queues still remain",
283 ret = mlx5_flow_verify(dev);
285 DRV_LOG(WARNING, "port %u some flows still remain",
287 memset(priv, 0, sizeof(*priv));
290 const struct eth_dev_ops mlx5_dev_ops = {
291 .dev_configure = mlx5_dev_configure,
292 .dev_start = mlx5_dev_start,
293 .dev_stop = mlx5_dev_stop,
294 .dev_set_link_down = mlx5_set_link_down,
295 .dev_set_link_up = mlx5_set_link_up,
296 .dev_close = mlx5_dev_close,
297 .promiscuous_enable = mlx5_promiscuous_enable,
298 .promiscuous_disable = mlx5_promiscuous_disable,
299 .allmulticast_enable = mlx5_allmulticast_enable,
300 .allmulticast_disable = mlx5_allmulticast_disable,
301 .link_update = mlx5_link_update,
302 .stats_get = mlx5_stats_get,
303 .stats_reset = mlx5_stats_reset,
304 .xstats_get = mlx5_xstats_get,
305 .xstats_reset = mlx5_xstats_reset,
306 .xstats_get_names = mlx5_xstats_get_names,
307 .dev_infos_get = mlx5_dev_infos_get,
308 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
309 .vlan_filter_set = mlx5_vlan_filter_set,
310 .rx_queue_setup = mlx5_rx_queue_setup,
311 .tx_queue_setup = mlx5_tx_queue_setup,
312 .rx_queue_release = mlx5_rx_queue_release,
313 .tx_queue_release = mlx5_tx_queue_release,
314 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
315 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
316 .mac_addr_remove = mlx5_mac_addr_remove,
317 .mac_addr_add = mlx5_mac_addr_add,
318 .mac_addr_set = mlx5_mac_addr_set,
319 .mtu_set = mlx5_dev_set_mtu,
320 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
321 .vlan_offload_set = mlx5_vlan_offload_set,
322 .reta_update = mlx5_dev_rss_reta_update,
323 .reta_query = mlx5_dev_rss_reta_query,
324 .rss_hash_update = mlx5_rss_hash_update,
325 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
326 .filter_ctrl = mlx5_dev_filter_ctrl,
327 .rx_descriptor_status = mlx5_rx_descriptor_status,
328 .tx_descriptor_status = mlx5_tx_descriptor_status,
329 .rx_queue_intr_enable = mlx5_rx_intr_enable,
330 .rx_queue_intr_disable = mlx5_rx_intr_disable,
333 static const struct eth_dev_ops mlx5_dev_sec_ops = {
334 .stats_get = mlx5_stats_get,
335 .stats_reset = mlx5_stats_reset,
336 .xstats_get = mlx5_xstats_get,
337 .xstats_reset = mlx5_xstats_reset,
338 .xstats_get_names = mlx5_xstats_get_names,
339 .dev_infos_get = mlx5_dev_infos_get,
340 .rx_descriptor_status = mlx5_rx_descriptor_status,
341 .tx_descriptor_status = mlx5_tx_descriptor_status,
344 /* Available operators in flow isolated mode. */
345 const struct eth_dev_ops mlx5_dev_ops_isolate = {
346 .dev_configure = mlx5_dev_configure,
347 .dev_start = mlx5_dev_start,
348 .dev_stop = mlx5_dev_stop,
349 .dev_set_link_down = mlx5_set_link_down,
350 .dev_set_link_up = mlx5_set_link_up,
351 .dev_close = mlx5_dev_close,
352 .promiscuous_enable = mlx5_promiscuous_enable,
353 .promiscuous_disable = mlx5_promiscuous_disable,
354 .allmulticast_enable = mlx5_allmulticast_enable,
355 .allmulticast_disable = mlx5_allmulticast_disable,
356 .link_update = mlx5_link_update,
357 .stats_get = mlx5_stats_get,
358 .stats_reset = mlx5_stats_reset,
359 .xstats_get = mlx5_xstats_get,
360 .xstats_reset = mlx5_xstats_reset,
361 .xstats_get_names = mlx5_xstats_get_names,
362 .dev_infos_get = mlx5_dev_infos_get,
363 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
364 .vlan_filter_set = mlx5_vlan_filter_set,
365 .rx_queue_setup = mlx5_rx_queue_setup,
366 .tx_queue_setup = mlx5_tx_queue_setup,
367 .rx_queue_release = mlx5_rx_queue_release,
368 .tx_queue_release = mlx5_tx_queue_release,
369 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
370 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
371 .mac_addr_remove = mlx5_mac_addr_remove,
372 .mac_addr_add = mlx5_mac_addr_add,
373 .mac_addr_set = mlx5_mac_addr_set,
374 .mtu_set = mlx5_dev_set_mtu,
375 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
376 .vlan_offload_set = mlx5_vlan_offload_set,
377 .filter_ctrl = mlx5_dev_filter_ctrl,
378 .rx_descriptor_status = mlx5_rx_descriptor_status,
379 .tx_descriptor_status = mlx5_tx_descriptor_status,
380 .rx_queue_intr_enable = mlx5_rx_intr_enable,
381 .rx_queue_intr_disable = mlx5_rx_intr_disable,
385 struct rte_pci_addr pci_addr; /* associated PCI address */
386 uint32_t ports; /* physical ports bitfield. */
390 * Get device index in mlx5_dev[] from PCI bus address.
392 * @param[in] pci_addr
393 * PCI bus address to look for.
396 * mlx5_dev[] index on success, -1 on failure.
399 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
404 assert(pci_addr != NULL);
405 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
406 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
407 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
408 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
409 (mlx5_dev[i].pci_addr.function == pci_addr->function))
411 if ((mlx5_dev[i].ports == 0) && (ret == -1))
418 * Verify and store value for device argument.
421 * Key argument to verify.
423 * Value associated with key.
428 * 0 on success, a negative errno value otherwise and rte_errno is set.
431 mlx5_args_check(const char *key, const char *val, void *opaque)
433 struct mlx5_args *args = opaque;
437 tmp = strtoul(val, NULL, 0);
440 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
443 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
444 args->cqe_comp = !!tmp;
445 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
446 args->txq_inline = tmp;
447 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
448 args->txqs_inline = tmp;
449 } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
450 args->txqs_vec = tmp;
451 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
453 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
454 args->mpw_hdr_dseg = !!tmp;
455 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
456 args->inline_max_packet_sz = tmp;
457 } else if (strcmp(MLX5_TSO, key) == 0) {
459 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
460 args->tx_vec_en = !!tmp;
461 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
462 args->rx_vec_en = !!tmp;
464 DRV_LOG(WARNING, "%s: unknown parameter", key);
472 * Parse device parameters.
475 * Pointer to private structure.
477 * Device arguments structure.
480 * 0 on success, a negative errno value otherwise and rte_errno is set.
483 mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
485 const char **params = (const char *[]){
486 MLX5_RXQ_CQE_COMP_EN,
488 MLX5_TXQS_MIN_INLINE,
491 MLX5_TXQ_MPW_HDR_DSEG_EN,
492 MLX5_TXQ_MAX_INLINE_LEN,
498 struct rte_kvargs *kvlist;
504 /* Following UGLY cast is done to pass checkpatch. */
505 kvlist = rte_kvargs_parse(devargs->args, params);
508 /* Process parameters. */
509 for (i = 0; (params[i] != NULL); ++i) {
510 if (rte_kvargs_count(kvlist, params[i])) {
511 ret = rte_kvargs_process(kvlist, params[i],
512 mlx5_args_check, args);
515 rte_kvargs_free(kvlist);
520 rte_kvargs_free(kvlist);
524 static struct rte_pci_driver mlx5_driver;
527 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
528 * local resource used by both primary and secondary to avoid duplicate
530 * The space has to be available on both primary and secondary process,
531 * TXQ UAR maps to this area using fixed mmap w/o double check.
533 static void *uar_base;
536 * Reserve UAR address space for primary process.
539 * Pointer to Ethernet device.
542 * 0 on success, a negative errno value otherwise and rte_errno is set.
545 mlx5_uar_init_primary(struct rte_eth_dev *dev)
547 struct priv *priv = dev->data->dev_private;
548 void *addr = (void *)0;
550 const struct rte_mem_config *mcfg;
552 if (uar_base) { /* UAR address space mapped. */
553 priv->uar_base = uar_base;
556 /* find out lower bound of hugepage segments */
557 mcfg = rte_eal_get_configuration()->mem_config;
558 for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
560 addr = RTE_MIN(addr, mcfg->memseg[i].addr);
562 addr = mcfg->memseg[i].addr;
564 /* keep distance to hugepages to minimize potential conflicts. */
565 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
566 /* anonymous mmap, no real memory consumption. */
567 addr = mmap(addr, MLX5_UAR_SIZE,
568 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
569 if (addr == MAP_FAILED) {
571 "port %u failed to reserve UAR address space, please"
572 " adjust MLX5_UAR_SIZE or try --base-virtaddr",
577 /* Accept either same addr or a new addr returned from mmap if target
580 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
581 dev->data->port_id, addr);
582 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
583 uar_base = addr; /* process local, don't reserve again. */
588 * Reserve UAR address space for secondary process, align with
592 * Pointer to Ethernet device.
595 * 0 on success, a negative errno value otherwise and rte_errno is set.
598 mlx5_uar_init_secondary(struct rte_eth_dev *dev)
600 struct priv *priv = dev->data->dev_private;
603 assert(priv->uar_base);
604 if (uar_base) { /* already reserved. */
605 assert(uar_base == priv->uar_base);
608 /* anonymous mmap, no real memory consumption. */
609 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
610 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
611 if (addr == MAP_FAILED) {
612 DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
613 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
617 if (priv->uar_base != addr) {
619 "port %u UAR address %p size %llu occupied, please"
620 " adjust MLX5_UAR_OFFSET or try EAL parameter"
622 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
626 uar_base = addr; /* process local, don't reserve again */
627 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
628 dev->data->port_id, addr);
633 * Assign parameters from args into priv, only non default
634 * values are considered.
637 * Pointer to private structure.
639 * Pointer to args values.
642 mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
644 if (args->cqe_comp != MLX5_ARG_UNSET)
645 priv->cqe_comp = args->cqe_comp;
646 if (args->txq_inline != MLX5_ARG_UNSET)
647 priv->txq_inline = args->txq_inline;
648 if (args->txqs_inline != MLX5_ARG_UNSET)
649 priv->txqs_inline = args->txqs_inline;
650 if (args->txqs_vec != MLX5_ARG_UNSET)
651 priv->txqs_vec = args->txqs_vec;
652 if (args->mps != MLX5_ARG_UNSET) {
653 priv->mps = args->mps ? priv->mps : 0;
654 } else if (priv->mps == MLX5_MPW) {
656 * MPW is disabled by default, while the Enhanced MPW is enabled
659 priv->mps = MLX5_MPW_DISABLED;
661 if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
662 priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
663 if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
664 priv->inline_max_packet_sz = args->inline_max_packet_sz;
665 if (args->tso != MLX5_ARG_UNSET)
666 priv->tso = args->tso;
667 if (args->tx_vec_en != MLX5_ARG_UNSET)
668 priv->tx_vec_en = args->tx_vec_en;
669 if (args->rx_vec_en != MLX5_ARG_UNSET)
670 priv->rx_vec_en = args->rx_vec_en;
674 * DPDK callback to register a PCI device.
676 * This function creates an Ethernet device for each port of a given
680 * PCI driver structure (mlx5_driver).
682 * PCI device information.
685 * 0 on success, a negative errno value otherwise and rte_errno is set.
688 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
689 struct rte_pci_device *pci_dev)
691 struct ibv_device **list = NULL;
692 struct ibv_device *ibv_dev;
694 struct ibv_context *attr_ctx = NULL;
695 struct ibv_device_attr_ex device_attr;
697 unsigned int cqe_comp;
698 unsigned int tunnel_en = 0;
699 unsigned int txqs_vec = MLX5_VPMD_MAX_TXQS;
702 struct mlx5dv_context attrs_out;
703 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
704 struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
707 assert(pci_drv == &mlx5_driver);
708 /* Get mlx5_dev[] index. */
709 idx = mlx5_dev_idx(&pci_dev->addr);
711 DRV_LOG(ERR, "this driver cannot support any more adapters");
715 DRV_LOG(DEBUG, "using driver device index %d", idx);
716 /* Save PCI address. */
717 mlx5_dev[idx].pci_addr = pci_dev->addr;
718 list = ibv_get_device_list(&i);
724 "cannot list devices, is ib_uverbs loaded?");
729 * For each listed device, check related sysfs entry against
730 * the provided PCI ID.
733 struct rte_pci_addr pci_addr;
736 DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
737 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
739 if ((pci_dev->addr.domain != pci_addr.domain) ||
740 (pci_dev->addr.bus != pci_addr.bus) ||
741 (pci_dev->addr.devid != pci_addr.devid) ||
742 (pci_dev->addr.function != pci_addr.function))
744 switch (pci_dev->id.device_id) {
745 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
746 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
747 case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
748 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
749 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
750 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
753 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
754 txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD;
760 DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
762 attr_ctx = ibv_open_device(list[i]);
767 if (attr_ctx == NULL) {
771 "cannot access device, is mlx5_ib loaded?");
776 "cannot use device, are drivers up to date?");
782 DRV_LOG(DEBUG, "device opened");
784 * Multi-packet send is supported by ConnectX-4 Lx PF as well
785 * as all ConnectX-5 devices.
787 mlx5dv_query_device(attr_ctx, &attrs_out);
788 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
789 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
790 DRV_LOG(DEBUG, "enhanced MPW is supported");
791 mps = MLX5_MPW_ENHANCED;
793 DRV_LOG(DEBUG, "MPW is supported");
797 DRV_LOG(DEBUG, "MPW isn't supported");
798 mps = MLX5_MPW_DISABLED;
800 if (RTE_CACHE_LINE_SIZE == 128 &&
801 !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
805 err = ibv_query_device_ex(attr_ctx, NULL, &device_attr);
807 DEBUG("ibv_query_device_ex() failed");
810 DRV_LOG(INFO, "%u port(s) detected",
811 device_attr.orig_attr.phys_port_cnt);
812 for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
813 char name[RTE_ETH_NAME_MAX_LEN];
814 uint32_t port = i + 1; /* ports are indexed from one */
815 uint32_t test = (1 << i);
816 struct ibv_context *ctx = NULL;
817 struct ibv_port_attr port_attr;
818 struct ibv_pd *pd = NULL;
819 struct priv *priv = NULL;
820 struct rte_eth_dev *eth_dev = NULL;
821 struct ibv_device_attr_ex device_attr_ex;
822 struct ether_addr mac;
823 struct mlx5_args args = {
824 .cqe_comp = MLX5_ARG_UNSET,
825 .txq_inline = MLX5_ARG_UNSET,
826 .txqs_inline = MLX5_ARG_UNSET,
827 .txqs_vec = MLX5_ARG_UNSET,
828 .mps = MLX5_ARG_UNSET,
829 .mpw_hdr_dseg = MLX5_ARG_UNSET,
830 .inline_max_packet_sz = MLX5_ARG_UNSET,
831 .tso = MLX5_ARG_UNSET,
832 .tx_vec_en = MLX5_ARG_UNSET,
833 .rx_vec_en = MLX5_ARG_UNSET,
836 snprintf(name, sizeof(name), PCI_PRI_FMT,
837 pci_dev->addr.domain, pci_dev->addr.bus,
838 pci_dev->addr.devid, pci_dev->addr.function);
839 mlx5_dev[idx].ports |= test;
840 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
841 eth_dev = rte_eth_dev_attach_secondary(name);
842 if (eth_dev == NULL) {
843 DRV_LOG(ERR, "can not attach rte ethdev");
848 eth_dev->device = &pci_dev->device;
849 eth_dev->dev_ops = &mlx5_dev_sec_ops;
850 err = mlx5_uar_init_secondary(eth_dev);
855 /* Receive command fd from primary process */
856 err = mlx5_socket_connect(eth_dev);
861 /* Remap UAR for Tx queues. */
862 err = mlx5_tx_uar_remap(eth_dev, err);
868 * Ethdev pointer is still required as input since
869 * the primary device is not accessible from the
872 eth_dev->rx_pkt_burst =
873 mlx5_select_rx_function(eth_dev);
874 eth_dev->tx_pkt_burst =
875 mlx5_select_tx_function(eth_dev);
878 DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
879 ctx = ibv_open_device(ibv_dev);
884 /* Check port status. */
885 err = ibv_query_port(ctx, port, &port_attr);
887 DRV_LOG(ERR, "port query failed: %s", strerror(err));
890 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
892 "port %d is not configured in Ethernet mode",
897 if (port_attr.state != IBV_PORT_ACTIVE)
898 DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
899 port, ibv_port_state_str(port_attr.state),
901 /* Allocate protection domain. */
902 pd = ibv_alloc_pd(ctx);
904 DRV_LOG(ERR, "PD allocation failure");
908 mlx5_dev[idx].ports |= test;
909 /* from rte_ethdev.c */
910 priv = rte_zmalloc("ethdev private structure",
912 RTE_CACHE_LINE_SIZE);
914 DRV_LOG(ERR, "priv allocation failure");
919 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
920 sizeof(priv->ibdev_path));
921 priv->device_attr = device_attr;
924 priv->mtu = ETHER_MTU;
925 priv->mps = mps; /* Enable MPW by default if supported. */
926 priv->cqe_comp = cqe_comp;
927 priv->tunnel_en = tunnel_en;
928 /* Enable vector by default if supported. */
931 priv->txqs_vec = txqs_vec;
932 err = mlx5_args(&args, pci_dev->device.devargs);
934 DRV_LOG(ERR, "failed to process device arguments: %s",
939 mlx5_args_assign(priv, &args);
940 err = ibv_query_device_ex(ctx, NULL, &device_attr_ex);
942 DRV_LOG(ERR, "ibv_query_device_ex() failed");
946 !!(device_attr_ex.device_cap_flags_ex &
947 IBV_DEVICE_RAW_IP_CSUM);
948 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
949 (priv->hw_csum ? "" : "not "));
951 #ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
952 priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
953 IBV_DEVICE_VXLAN_SUPPORT);
955 DRV_LOG(DEBUG, "Rx L2 tunnel checksum offloads are %ssupported",
956 (priv->hw_csum_l2tun ? "" : "not "));
958 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
959 priv->counter_set_supported = !!(device_attr.max_counter_sets);
960 ibv_describe_counter_set(ctx, 0, &cs_desc);
962 "counter type = %d, num of cs = %ld, attributes = %d",
963 cs_desc.counter_type, cs_desc.num_of_cs,
966 priv->ind_table_max_size =
967 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
968 /* Remove this check once DPDK supports larger/variable
969 * indirection tables. */
970 if (priv->ind_table_max_size >
971 (unsigned int)ETH_RSS_RETA_SIZE_512)
972 priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
973 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
974 priv->ind_table_max_size);
975 priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
976 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
977 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
978 (priv->hw_vlan_strip ? "" : "not "));
980 priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
981 IBV_RAW_PACKET_CAP_SCATTER_FCS);
982 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
983 (priv->hw_fcs_strip ? "" : "not "));
985 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
986 priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
989 "hardware Rx end alignment padding is %ssupported",
990 (priv->hw_padding ? "" : "not "));
991 priv->tso = ((priv->tso) &&
992 (device_attr_ex.tso_caps.max_tso > 0) &&
993 (device_attr_ex.tso_caps.supported_qpts &
994 (1 << IBV_QPT_RAW_PACKET)));
996 priv->max_tso_payload_sz =
997 device_attr_ex.tso_caps.max_tso;
998 if (priv->mps && !mps) {
1000 "multi-packet send not supported on this device"
1001 " (" MLX5_TXQ_MPW_EN ")");
1004 } else if (priv->mps && priv->tso) {
1006 "multi-packet send not supported in conjunction"
1007 " with TSO. MPS disabled");
1010 DRV_LOG(INFO, "%s MPS is %s",
1011 priv->mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1012 priv->mps != MLX5_MPW_DISABLED ? "enabled" :
1014 /* Set default values for Enhanced MPW, a.k.a MPWv2. */
1015 if (priv->mps == MLX5_MPW_ENHANCED) {
1016 if (args.txqs_inline == MLX5_ARG_UNSET)
1017 priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
1018 if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
1019 priv->inline_max_packet_sz =
1020 MLX5_EMPW_MAX_INLINE_LEN;
1021 if (args.txq_inline == MLX5_ARG_UNSET)
1022 priv->txq_inline = MLX5_WQE_SIZE_MAX -
1025 if (priv->cqe_comp && !cqe_comp) {
1026 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1029 eth_dev = rte_eth_dev_allocate(name);
1030 if (eth_dev == NULL) {
1031 DRV_LOG(ERR, "can not allocate rte ethdev");
1035 eth_dev->data->dev_private = priv;
1036 priv->dev_data = eth_dev->data;
1037 eth_dev->data->mac_addrs = priv->mac;
1038 eth_dev->device = &pci_dev->device;
1039 rte_eth_copy_pci_info(eth_dev, pci_dev);
1040 eth_dev->device->driver = &mlx5_driver.driver;
1041 err = mlx5_uar_init_primary(eth_dev);
1046 /* Configure the first MAC address by default. */
1047 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1049 "port %u cannot get MAC address, is mlx5_en"
1050 " loaded? (errno: %s)",
1051 eth_dev->data->port_id, strerror(errno));
1056 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1057 eth_dev->data->port_id,
1058 mac.addr_bytes[0], mac.addr_bytes[1],
1059 mac.addr_bytes[2], mac.addr_bytes[3],
1060 mac.addr_bytes[4], mac.addr_bytes[5]);
1063 char ifname[IF_NAMESIZE];
1065 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1066 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1067 eth_dev->data->port_id, ifname);
1069 DRV_LOG(DEBUG, "port %u ifname is unknown",
1070 eth_dev->data->port_id);
1073 /* Get actual MTU if possible. */
1074 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1079 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1082 * Initialize burst functions to prevent crashes before link-up.
1084 eth_dev->rx_pkt_burst = removed_rx_burst;
1085 eth_dev->tx_pkt_burst = removed_tx_burst;
1086 eth_dev->dev_ops = &mlx5_dev_ops;
1087 /* Register MAC address. */
1088 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1089 TAILQ_INIT(&priv->flows);
1090 TAILQ_INIT(&priv->ctrl_flows);
1091 /* Hint libmlx5 to use PMD allocator for data plane resources */
1092 struct mlx5dv_ctx_allocators alctr = {
1093 .alloc = &mlx5_alloc_verbs_buf,
1094 .free = &mlx5_free_verbs_buf,
1097 mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1098 (void *)((uintptr_t)&alctr));
1099 /* Bring Ethernet device up. */
1100 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1101 eth_dev->data->port_id);
1102 mlx5_set_link_up(eth_dev);
1104 * Even though the interrupt handler is not installed yet,
1105 * interrupts will still trigger on the asyn_fd from
1106 * Verbs context returned by ibv_open_device().
1108 mlx5_link_update(eth_dev, 0);
1114 claim_zero(ibv_dealloc_pd(pd));
1116 claim_zero(ibv_close_device(ctx));
1117 if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY)
1118 rte_eth_dev_release_port(eth_dev);
1122 * XXX if something went wrong in the loop above, there is a resource
1123 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1124 * long as the dpdk does not provide a way to deallocate a ethdev and a
1125 * way to enumerate the registered ethdevs to free the previous ones.
1127 /* no port found, complain */
1128 if (!mlx5_dev[idx].ports) {
1134 claim_zero(ibv_close_device(attr_ctx));
1136 ibv_free_device_list(list);
1144 static const struct rte_pci_id mlx5_pci_id_map[] = {
1146 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1147 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
1150 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1151 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
1154 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1155 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
1158 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1159 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
1162 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1163 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1166 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1167 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1170 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1171 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1174 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1175 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1178 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1179 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
1186 static struct rte_pci_driver mlx5_driver = {
1188 .name = MLX5_DRIVER_NAME
1190 .id_table = mlx5_pci_id_map,
1191 .probe = mlx5_pci_probe,
1192 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1196 * Driver initialization routine.
1198 RTE_INIT(rte_mlx5_pmd_init);
1200 rte_mlx5_pmd_init(void)
1202 /* Build the static table for ptype conversion. */
1203 mlx5_set_ptype_table();
1205 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1206 * huge pages. Calling ibv_fork_init() during init allows
1207 * applications to use fork() safely for purposes other than
1208 * using this PMD, which is not supported in forked processes.
1210 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1211 /* Match the size of Rx completion entry to the size of a cacheline. */
1212 if (RTE_CACHE_LINE_SIZE == 128)
1213 setenv("MLX5_CQE_SIZE", "128", 0);
1215 rte_pci_register(&mlx5_driver);
1218 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1219 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1220 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
1222 /** Initialize driver log type. */
1223 RTE_INIT(vdev_netvsc_init_log)
1225 mlx5_logtype = rte_log_register("pmd.net.mlx5");
1226 if (mlx5_logtype >= 0)
1227 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);