4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_malloc.h>
54 #include <rte_ethdev.h>
55 #include <rte_common.h>
57 #include "mlx5_utils.h"
58 #include "mlx5_defs.h"
60 #include "mlx5_rxtx.h"
61 #include "mlx5_autoconf.h"
64 * Allocate TX queue elements.
67 * Pointer to TX queue structure.
70 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
72 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
75 for (i = 0; (i != elts_n); ++i)
76 (*txq_ctrl->txq.elts)[i] = NULL;
77 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
78 PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
79 txq_ctrl->txq.elts_head = 0;
80 txq_ctrl->txq.elts_tail = 0;
81 txq_ctrl->txq.elts_comp = 0;
85 * Free TX queue elements.
88 * Pointer to TX queue structure.
91 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
93 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
94 const uint16_t elts_m = elts_n - 1;
95 uint16_t elts_head = txq_ctrl->txq.elts_head;
96 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
97 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
99 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
100 PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
101 txq_ctrl->txq.elts_head = 0;
102 txq_ctrl->txq.elts_tail = 0;
103 txq_ctrl->txq.elts_comp = 0;
105 while (elts_tail != elts_head) {
106 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
109 rte_pktmbuf_free_seg(elt);
112 memset(&(*elts)[elts_tail & elts_m],
114 sizeof((*elts)[elts_tail & elts_m]));
121 * DPDK callback to configure a TX queue.
124 * Pointer to Ethernet device structure.
128 * Number of descriptors to configure in queue.
130 * NUMA socket on which memory must be allocated.
132 * Thresholds parameters.
135 * 0 on success, a negative errno value otherwise and rte_errno is set.
138 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
139 unsigned int socket, const struct rte_eth_txconf *conf)
141 struct priv *priv = dev->data->dev_private;
142 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
143 struct mlx5_txq_ctrl *txq_ctrl =
144 container_of(txq, struct mlx5_txq_ctrl, txq);
146 if (desc <= MLX5_TX_COMP_THRESH) {
148 "port %u number of descriptors requested for Tx queue"
149 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
151 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
152 desc = MLX5_TX_COMP_THRESH + 1;
154 if (!rte_is_power_of_2(desc)) {
155 desc = 1 << log2above(desc);
157 "port %u increased number of descriptors in Tx queue"
158 " %u to the next power of two (%d)",
159 dev->data->port_id, idx, desc);
161 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
162 dev->data->port_id, idx, desc);
163 if (idx >= priv->txqs_n) {
164 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
165 dev->data->port_id, idx, priv->txqs_n);
166 rte_errno = EOVERFLOW;
169 if (!mlx5_txq_releasable(dev, idx)) {
171 DRV_LOG(ERR, "port %u unable to release queue index %u",
172 dev->data->port_id, idx);
175 mlx5_txq_release(dev, idx);
176 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
178 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
179 dev->data->port_id, idx);
182 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
183 dev->data->port_id, idx);
184 (*priv->txqs)[idx] = &txq_ctrl->txq;
189 * DPDK callback to release a TX queue.
192 * Generic TX queue pointer.
195 mlx5_tx_queue_release(void *dpdk_txq)
197 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
198 struct mlx5_txq_ctrl *txq_ctrl;
204 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
205 priv = txq_ctrl->priv;
206 for (i = 0; (i != priv->txqs_n); ++i)
207 if ((*priv->txqs)[i] == txq) {
208 mlx5_txq_release(ETH_DEV(priv), i);
209 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
210 PORT_ID(priv), txq_ctrl->idx);
217 * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
218 * Both primary and secondary process do mmap to make UAR address
222 * Pointer to Ethernet device.
224 * Verbs file descriptor to map UAR pages.
227 * 0 on success, a negative errno value otherwise and rte_errno is set.
230 mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
232 struct priv *priv = dev->data->dev_private;
234 uintptr_t pages[priv->txqs_n];
235 unsigned int pages_n = 0;
240 struct mlx5_txq_data *txq;
241 struct mlx5_txq_ctrl *txq_ctrl;
243 size_t page_size = sysconf(_SC_PAGESIZE);
245 memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
247 * As rdma-core, UARs are mapped in size of OS page size.
248 * Use aligned address to avoid duplicate mmap.
249 * Ref to libmlx5 function: mlx5_init_context()
251 for (i = 0; i != priv->txqs_n; ++i) {
252 if (!(*priv->txqs)[i])
254 txq = (*priv->txqs)[i];
255 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
256 assert(txq_ctrl->idx == (uint16_t)i);
257 /* UAR addr form verbs used to find dup and offset in page. */
258 uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
259 off = uar_va & (page_size - 1); /* offset in page. */
260 uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
262 for (j = 0; j != pages_n; ++j) {
263 if (pages[j] == uar_va) {
268 /* new address in reserved UAR address space. */
269 addr = RTE_PTR_ADD(priv->uar_base,
270 uar_va & (MLX5_UAR_SIZE - 1));
271 if (!already_mapped) {
272 pages[pages_n++] = uar_va;
273 /* fixed mmap to specified address in reserved
276 ret = mmap(addr, page_size,
277 PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
278 txq_ctrl->uar_mmap_offset);
280 /* fixed mmap have to return same address */
282 "port %u call to mmap failed on UAR"
284 dev->data->port_id, txq_ctrl->idx);
289 if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
290 txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
292 assert(txq_ctrl->txq.bf_reg ==
293 RTE_PTR_ADD((void *)addr, off));
299 * Create the Tx queue Verbs object.
302 * Pointer to Ethernet device.
304 * Queue index in DPDK Rx queue array
307 * The Verbs object initialised, NULL otherwise and rte_errno is set.
309 struct mlx5_txq_ibv *
310 mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
312 struct priv *priv = dev->data->dev_private;
313 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
314 struct mlx5_txq_ctrl *txq_ctrl =
315 container_of(txq_data, struct mlx5_txq_ctrl, txq);
316 struct mlx5_txq_ibv tmpl;
317 struct mlx5_txq_ibv *txq_ibv;
319 struct ibv_qp_init_attr_ex init;
320 struct ibv_cq_init_attr_ex cq;
321 struct ibv_qp_attr mod;
322 struct ibv_cq_ex cq_attr;
325 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
326 struct mlx5dv_cq cq_info;
327 struct mlx5dv_obj obj;
328 const int desc = 1 << txq_data->elts_n;
332 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
333 priv->verbs_alloc_ctx.obj = txq_ctrl;
334 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
336 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
341 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
342 attr.cq = (struct ibv_cq_init_attr_ex){
345 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
346 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
347 if (priv->mps == MLX5_MPW_ENHANCED)
348 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
349 tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
350 if (tmpl.cq == NULL) {
351 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
352 dev->data->port_id, idx);
356 attr.init = (struct ibv_qp_init_attr_ex){
357 /* CQ to be associated with the send queue. */
359 /* CQ to be associated with the receive queue. */
362 /* Max number of outstanding WRs. */
364 ((priv->device_attr.orig_attr.max_qp_wr <
366 priv->device_attr.orig_attr.max_qp_wr :
369 * Max number of scatter/gather elements in a WR,
370 * must be 1 to prevent libmlx5 from trying to affect
371 * too much memory. TX gather is not impacted by the
372 * priv->device_attr.max_sge limit and will still work
377 .qp_type = IBV_QPT_RAW_PACKET,
379 * Do *NOT* enable this, completions events are managed per
384 .comp_mask = IBV_QP_INIT_ATTR_PD,
386 if (txq_data->inline_en)
387 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
388 if (txq_data->tso_en) {
389 attr.init.max_tso_header = txq_ctrl->max_tso_header;
390 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
392 tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
393 if (tmpl.qp == NULL) {
394 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
395 dev->data->port_id, idx);
399 attr.mod = (struct ibv_qp_attr){
400 /* Move the QP to this state. */
401 .qp_state = IBV_QPS_INIT,
402 /* Primary port number. */
403 .port_num = priv->port
405 ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
408 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
409 dev->data->port_id, idx);
413 attr.mod = (struct ibv_qp_attr){
414 .qp_state = IBV_QPS_RTR
416 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
419 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
420 dev->data->port_id, idx);
424 attr.mod.qp_state = IBV_QPS_RTS;
425 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
428 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
429 dev->data->port_id, idx);
433 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
436 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
437 dev->data->port_id, idx);
442 obj.cq.out = &cq_info;
445 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
450 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
452 "port %u wrong MLX5_CQE_SIZE environment variable"
453 " value: it should be set to %u",
454 dev->data->port_id, RTE_CACHE_LINE_SIZE);
458 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
459 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
460 txq_data->wqes = qp.sq.buf;
461 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
462 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
463 txq_ctrl->bf_reg_orig = qp.bf.reg;
464 txq_data->cq_db = cq_info.dbrec;
466 (volatile struct mlx5_cqe (*)[])
467 (uintptr_t)cq_info.buf;
472 txq_data->wqe_ci = 0;
473 txq_data->wqe_pi = 0;
474 txq_ibv->qp = tmpl.qp;
475 txq_ibv->cq = tmpl.cq;
476 rte_atomic32_inc(&txq_ibv->refcnt);
477 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
478 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
481 "port %u failed to retrieve UAR info, invalid"
487 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
488 dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
489 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
490 txq_ibv->txq_ctrl = txq_ctrl;
491 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
494 ret = rte_errno; /* Save rte_errno before cleanup. */
496 claim_zero(ibv_destroy_cq(tmpl.cq));
498 claim_zero(ibv_destroy_qp(tmpl.qp));
499 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
500 rte_errno = ret; /* Restore rte_errno. */
505 * Get an Tx queue Verbs object.
508 * Pointer to Ethernet device.
510 * Queue index in DPDK Rx queue array
513 * The Verbs object if it exists.
515 struct mlx5_txq_ibv *
516 mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
518 struct priv *priv = dev->data->dev_private;
519 struct mlx5_txq_ctrl *txq_ctrl;
521 if (idx >= priv->txqs_n)
523 if (!(*priv->txqs)[idx])
525 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
527 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
528 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
529 dev->data->port_id, txq_ctrl->idx,
530 rte_atomic32_read(&txq_ctrl->ibv->refcnt));
532 return txq_ctrl->ibv;
536 * Release an Tx verbs queue object.
539 * Verbs Tx queue object.
542 * 1 while a reference on it exists, 0 when freed.
545 mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
548 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
549 PORT_ID(txq_ibv->txq_ctrl->priv),
550 txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
551 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
552 claim_zero(ibv_destroy_qp(txq_ibv->qp));
553 claim_zero(ibv_destroy_cq(txq_ibv->cq));
554 LIST_REMOVE(txq_ibv, next);
562 * Return true if a single reference exists on the object.
565 * Verbs Tx queue object.
568 mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
571 return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
575 * Verify the Verbs Tx queue list is empty
578 * Pointer to Ethernet device.
581 * The number of object not released.
584 mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
586 struct priv *priv = dev->data->dev_private;
588 struct mlx5_txq_ibv *txq_ibv;
590 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
591 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
592 dev->data->port_id, txq_ibv->txq_ctrl->idx);
599 * Create a DPDK Tx queue.
602 * Pointer to Ethernet device.
606 * Number of descriptors to configure in queue.
608 * NUMA socket on which memory must be allocated.
610 * Thresholds parameters.
613 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
615 struct mlx5_txq_ctrl *
616 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
617 unsigned int socket, const struct rte_eth_txconf *conf)
619 struct priv *priv = dev->data->dev_private;
620 const unsigned int max_tso_inline =
621 ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
622 RTE_CACHE_LINE_SIZE);
623 struct mlx5_txq_ctrl *tmpl;
624 const unsigned int mr_n = MR_TABLE_SZ(priv->mr_n);
626 tmpl = rte_calloc_socket("TXQ", 1,
628 desc * sizeof(struct rte_mbuf *) +
629 mr_n * sizeof(struct mlx5_mr_cache),
635 assert(desc > MLX5_TX_COMP_THRESH);
636 tmpl->txq.flags = conf->txq_flags;
638 tmpl->socket = socket;
639 tmpl->txq.elts_n = log2above(desc);
641 if (priv->mps == MLX5_MPW_ENHANCED)
642 tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
643 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
644 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
645 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
646 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
647 if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
650 tmpl->txq.max_inline =
651 ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
652 RTE_CACHE_LINE_SIZE);
653 tmpl->txq.inline_en = 1;
654 /* TSO and MPS can't be enabled concurrently. */
655 assert(!priv->tso || !priv->mps);
656 if (priv->mps == MLX5_MPW_ENHANCED) {
657 tmpl->txq.inline_max_packet_sz =
658 priv->inline_max_packet_sz;
659 /* To minimize the size of data set, avoid requesting
662 tmpl->max_inline_data =
663 ((RTE_MIN(priv->txq_inline,
664 priv->inline_max_packet_sz) +
665 (RTE_CACHE_LINE_SIZE - 1)) /
666 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
668 tmpl->max_inline_data =
669 tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
672 * Check if the inline size is too large in a way which
673 * can make the WQE DS to overflow.
674 * Considering in calculation:
679 ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
680 if (ds_cnt > MLX5_DSEG_MAX) {
681 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
684 max_inline = max_inline - (max_inline %
685 RTE_CACHE_LINE_SIZE);
687 "port %u txq inline is too large (%d) setting it"
688 " to the maximum possible: %d\n",
689 PORT_ID(priv), priv->txq_inline, max_inline);
690 tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
694 tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
695 tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
697 tmpl->txq.tso_en = 1;
700 tmpl->txq.tunnel_en = 1;
702 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
703 tmpl->txq.mr_ctrl.cache_bh =
704 (struct mlx5_mr_cache (*)[mr_n])
705 &(*tmpl->txq.elts)[1 << tmpl->txq.elts_n];
706 tmpl->txq.stats.idx = idx;
707 rte_atomic32_inc(&tmpl->refcnt);
708 DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
709 idx, rte_atomic32_read(&tmpl->refcnt));
710 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
718 * Pointer to Ethernet device.
723 * A pointer to the queue if it exists.
725 struct mlx5_txq_ctrl *
726 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
728 struct priv *priv = dev->data->dev_private;
729 struct mlx5_txq_ctrl *ctrl = NULL;
731 if ((*priv->txqs)[idx]) {
732 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
735 mlx5_txq_ibv_get(dev, idx);
736 rte_atomic32_inc(&ctrl->refcnt);
737 DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
739 ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
745 * Release a Tx queue.
748 * Pointer to Ethernet device.
753 * 1 while a reference on it exists, 0 when freed.
756 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
758 struct priv *priv = dev->data->dev_private;
759 struct mlx5_txq_ctrl *txq;
760 size_t page_size = sysconf(_SC_PAGESIZE);
762 if (!(*priv->txqs)[idx])
764 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
765 DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
766 txq->idx, rte_atomic32_read(&txq->refcnt));
767 if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
770 munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
771 page_size), page_size);
772 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
774 LIST_REMOVE(txq, next);
776 (*priv->txqs)[idx] = NULL;
783 * Verify if the queue can be released.
786 * Pointer to Ethernet device.
791 * 1 if the queue can be released.
794 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
796 struct priv *priv = dev->data->dev_private;
797 struct mlx5_txq_ctrl *txq;
799 if (!(*priv->txqs)[idx])
801 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
802 return (rte_atomic32_read(&txq->refcnt) == 1);
806 * Verify the Tx Queue list is empty
809 * Pointer to Ethernet device.
812 * The number of object not released.
815 mlx5_txq_verify(struct rte_eth_dev *dev)
817 struct priv *priv = dev->data->dev_private;
818 struct mlx5_txq_ctrl *txq;
821 LIST_FOREACH(txq, &priv->txqsctrl, next) {
822 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
823 dev->data->port_id, txq->idx);