X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=7ca99f5aa97fe7ddfa5fc21e5776b5efcf54c97f;hb=c3f15def2ebe9cc255cf0e5cf32aa171f5b4326d;hp=6fe61c4aef3f8f664c83bc1d057ed0dbe3d405c5;hpb=8b25d1ad5d2264bdfc2818c7bda74ee2697df6db;p=deb_dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 6fe61c4a..7ca99f5a 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -36,56 +36,44 @@ #include #include #include +#include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif -/* DPDK headers don't like -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" -#endif #include #include #include #include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" -#endif #include "mlx5_utils.h" #include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" -#include "mlx5_defs.h" /** * Allocate TX queue elements. * * @param txq_ctrl * Pointer to TX queue structure. - * @param elts_n - * Number of elements to allocate. */ -static void -txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) +void +txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) { + const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n; unsigned int i; for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) { - volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i]; - - memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); - } DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; @@ -99,11 +87,12 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) * Pointer to TX queue structure. */ static void -txq_free_elts(struct txq_ctrl *txq_ctrl) +txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { - unsigned int elts_n = txq_ctrl->txq.elts_n; - unsigned int elts_head = txq_ctrl->txq.elts_head; - unsigned int elts_tail = txq_ctrl->txq.elts_tail; + const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; + const uint16_t elts_m = elts_n - 1; + uint16_t elts_head = txq_ctrl->txq.elts_head; + uint16_t elts_tail = txq_ctrl->txq.elts_tail; struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; DEBUG("%p: freeing WRs", (void *)txq_ctrl); @@ -112,214 +101,242 @@ txq_free_elts(struct txq_ctrl *txq_ctrl) txq_ctrl->txq.elts_comp = 0; while (elts_tail != elts_head) { - struct rte_mbuf *elt = (*elts)[elts_tail]; + struct rte_mbuf *elt = (*elts)[elts_tail & elts_m]; assert(elt != NULL); - rte_pktmbuf_free(elt); + rte_pktmbuf_free_seg(elt); #ifndef NDEBUG /* Poisoning. */ - memset(&(*elts)[elts_tail], + memset(&(*elts)[elts_tail & elts_m], 0x77, - sizeof((*elts)[elts_tail])); + sizeof((*elts)[elts_tail & elts_m])); #endif - if (++elts_tail == elts_n) - elts_tail = 0; + ++elts_tail; } } /** - * Clean up a TX queue. + * DPDK callback to configure a TX queue. * - * Destroy objects, free allocated memory and reset the structure for reuse. + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. * - * @param txq_ctrl - * Pointer to TX queue structure. + * @return + * 0 on success, negative errno value on failure. */ -void -txq_cleanup(struct txq_ctrl *txq_ctrl) +int +mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) { - struct ibv_exp_release_intf_params params; - size_t i; - - DEBUG("cleaning up %p", (void *)txq_ctrl); - txq_free_elts(txq_ctrl); - if (txq_ctrl->if_qp != NULL) { - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - assert(txq_ctrl->qp != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, - txq_ctrl->if_qp, - ¶ms)); + struct priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + int ret = 0; + + priv_lock(priv); + if (desc <= MLX5_TX_COMP_THRESH) { + WARN("%p: number of descriptors requested for TX queue %u" + " must be higher than MLX5_TX_COMP_THRESH, using" + " %u instead of %u", + (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); + desc = MLX5_TX_COMP_THRESH + 1; } - if (txq_ctrl->if_cq != NULL) { - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - assert(txq_ctrl->cq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, - txq_ctrl->if_cq, - ¶ms)); + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in TX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); } - if (txq_ctrl->qp != NULL) - claim_zero(ibv_destroy_qp(txq_ctrl->qp)); - if (txq_ctrl->cq != NULL) - claim_zero(ibv_destroy_cq(txq_ctrl->cq)); - if (txq_ctrl->rd != NULL) { - struct ibv_exp_destroy_res_domain_attr attr = { - .comp_mask = 0, - }; - - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx, - txq_ctrl->rd, - &attr)); + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->txqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->txqs_n); + priv_unlock(priv); + return -EOVERFLOW; } - for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { - if (txq_ctrl->txq.mp2mr[i].mp == NULL) - break; - assert(txq_ctrl->txq.mp2mr[i].mr != NULL); - claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr)); + if (!mlx5_priv_txq_releasable(priv, idx)) { + ret = EBUSY; + ERROR("%p: unable to release queue index %u", + (void *)dev, idx); + goto out; } - memset(txq_ctrl, 0, sizeof(*txq_ctrl)); + mlx5_priv_txq_release(priv, idx); + txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf); + if (!txq_ctrl) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + ret = ENOMEM; + goto out; + } + DEBUG("%p: adding TX queue %p to list", + (void *)dev, (void *)txq_ctrl); + (*priv->txqs)[idx] = &txq_ctrl->txq; +out: + priv_unlock(priv); + return -ret; } /** - * Initialize TX queue. + * DPDK callback to release a TX queue. * - * @param tmpl - * Pointer to TX queue control template. - * @param txq_ctrl - * Pointer to TX queue control. + * @param dpdk_txq + * Generic TX queue pointer. + */ +void +mlx5_tx_queue_release(void *dpdk_txq) +{ + struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; + struct mlx5_txq_ctrl *txq_ctrl; + struct priv *priv; + unsigned int i; + + if (txq == NULL) + return; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + priv = txq_ctrl->priv; + priv_lock(priv); + for (i = 0; (i != priv->txqs_n); ++i) + if ((*priv->txqs)[i] == txq) { + DEBUG("%p: removing TX queue %p from list", + (void *)priv->dev, (void *)txq_ctrl); + mlx5_priv_txq_release(priv, i); + break; + } + priv_unlock(priv); +} + + +/** + * Map locally UAR used in Tx queues for BlueFlame doorbell. + * + * @param[in] priv + * Pointer to private structure. + * @param fd + * Verbs file descriptor to map UAR pages. * * @return * 0 on success, errno value on failure. */ -static inline int -txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) +int +priv_tx_uar_remap(struct priv *priv, int fd) { - struct mlx5_qp *qp = to_mqp(tmpl->qp); - struct ibv_cq *ibcq = tmpl->cq; - struct mlx5_cq *cq = to_mxxx(cq, cq); + unsigned int i, j; + uintptr_t pages[priv->txqs_n]; + unsigned int pages_n = 0; + uintptr_t uar_va; + void *addr; + struct mlx5_txq_data *txq; + struct mlx5_txq_ctrl *txq_ctrl; + int already_mapped; + size_t page_size = sysconf(_SC_PAGESIZE); - if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); - return EINVAL; + memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); + /* + * As rdma-core, UARs are mapped in size of OS page size. + * Use aligned address to avoid duplicate mmap. + * Ref to libmlx5 function: mlx5_init_context() + */ + for (i = 0; i != priv->txqs_n; ++i) { + if (!(*priv->txqs)[i]) + continue; + txq = (*priv->txqs)[i]; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + uar_va = (uintptr_t)txq_ctrl->txq.bf_reg; + uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); + already_mapped = 0; + for (j = 0; j != pages_n; ++j) { + if (pages[j] == uar_va) { + already_mapped = 1; + break; + } + } + if (already_mapped) + continue; + pages[pages_n++] = uar_va; + addr = mmap((void *)uar_va, page_size, + PROT_WRITE, MAP_FIXED | MAP_SHARED, fd, + txq_ctrl->uar_mmap_offset); + if (addr != (void *)uar_va) { + ERROR("call to mmap failed on UAR for txq %d\n", i); + return -1; + } } - tmpl->txq.cqe_n = ibcq->cqe + 1; - tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; - tmpl->txq.wqes = - (volatile union mlx5_wqe (*)[]) - (uintptr_t)qp->gen_data.sqstart; - tmpl->txq.wqe_n = qp->sq.wqe_cnt; - tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; - tmpl->txq.bf_reg = qp->gen_data.bf->reg; - tmpl->txq.bf_offset = qp->gen_data.bf->offset; - tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size; - tmpl->txq.cq_db = cq->dbrec; - tmpl->txq.cqes = - (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq->active_buf->buf; - tmpl->txq.elts = - (struct rte_mbuf *(*)[tmpl->txq.elts_n]) - ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); return 0; } /** - * Configure a TX queue. + * Create the Tx queue Verbs object. * - * @param dev - * Pointer to Ethernet device structure. - * @param txq_ctrl - * Pointer to TX queue structure. - * @param desc - * Number of descriptors to configure in queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param[in] conf - * Thresholds parameters. + * @param priv + * Pointer to private structure. + * @param idx + * Queue index in DPDK Rx queue array * * @return - * 0 on success, errno value on failure. + * The Verbs object initialised if it can be created. */ -int -txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, - uint16_t desc, unsigned int socket, - const struct rte_eth_txconf *conf) +struct mlx5_txq_ibv* +mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) { - struct priv *priv = mlx5_get_priv(dev); - struct txq_ctrl tmpl = { - .priv = priv, - .socket = socket, - }; + struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq_data, struct mlx5_txq_ctrl, txq); + struct mlx5_txq_ibv tmpl; + struct mlx5_txq_ibv *txq_ibv; union { - struct ibv_exp_query_intf_params params; - struct ibv_exp_qp_init_attr init; - struct ibv_exp_res_domain_init_attr rd; - struct ibv_exp_cq_init_attr cq; - struct ibv_exp_qp_attr mod; - struct ibv_exp_cq_attr cq_attr; + struct ibv_qp_init_attr_ex init; + struct ibv_cq_init_attr_ex cq; + struct ibv_qp_attr mod; + struct ibv_cq_ex cq_attr; } attr; - enum ibv_exp_query_intf_status status; + unsigned int cqe_n; + struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET }; + struct mlx5dv_cq cq_info; + struct mlx5dv_obj obj; + const int desc = 1 << txq_data->elts_n; int ret = 0; + assert(txq_data); if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { - ret = ENOTSUP; ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); goto error; } - (void)conf; /* Thresholds configuration (ignored). */ - assert(desc > MLX5_TX_COMP_THRESH); - tmpl.txq.elts_n = desc; + memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); /* MRs will be registered in mp2mr[] later. */ - attr.rd = (struct ibv_exp_res_domain_init_attr){ - .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | - IBV_EXP_RES_DOMAIN_MSG_MODEL), - .thread_model = IBV_EXP_THREAD_SINGLE, - .msg_model = IBV_EXP_MSG_HIGH_BW, + attr.cq = (struct ibv_cq_init_attr_ex){ + .comp_mask = 0, }; - tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); - if (tmpl.rd == NULL) { - ret = ENOMEM; - ERROR("%p: RD creation failure: %s", - (void *)dev, strerror(ret)); - goto error; - } - attr.cq = (struct ibv_exp_cq_init_attr){ - .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, - .res_domain = tmpl.rd, - }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, - (((desc / MLX5_TX_COMP_THRESH) - 1) ? - ((desc / MLX5_TX_COMP_THRESH) - 1) : 1), - NULL, NULL, 0, &attr.cq); + cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? + ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; + if (priv->mps == MLX5_MPW_ENHANCED) + cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; + tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { - ret = ENOMEM; - ERROR("%p: CQ creation failure: %s", - (void *)dev, strerror(ret)); + ERROR("%p: CQ creation failure", (void *)txq_ctrl); goto error; } - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.max_sge); - attr.init = (struct ibv_exp_qp_init_attr){ + attr.init = (struct ibv_qp_init_attr_ex){ /* CQ to be associated with the send queue. */ .send_cq = tmpl.cq, /* CQ to be associated with the receive queue. */ .recv_cq = tmpl.cq, .cap = { /* Max number of outstanding WRs. */ - .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? - priv->device_attr.max_qp_wr : - desc), + .max_send_wr = + ((priv->device_attr.orig_attr.max_qp_wr < + desc) ? + priv->device_attr.orig_attr.max_qp_wr : + desc), /* * Max number of scatter/gather elements in a WR, * must be 1 to prevent libmlx5 from trying to affect @@ -330,116 +347,204 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, .max_send_sge = 1, }, .qp_type = IBV_QPT_RAW_PACKET, - /* Do *NOT* enable this, completions events are managed per - * TX burst. */ + /* + * Do *NOT* enable this, completions events are managed per + * Tx burst. + */ .sq_sig_all = 0, .pd = priv->pd, - .res_domain = tmpl.rd, - .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | - IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + .comp_mask = IBV_QP_INIT_ATTR_PD, }; - if (priv->txq_inline && priv->txqs_n >= priv->txqs_inline) { - tmpl.txq.max_inline = priv->txq_inline; - attr.init.cap.max_inline_data = tmpl.txq.max_inline; + if (txq_data->inline_en) + attr.init.cap.max_inline_data = txq_ctrl->max_inline_data; + if (txq_data->tso_en) { + attr.init.max_tso_header = txq_ctrl->max_tso_header; + attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; } - tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { - ret = (errno ? errno : EINVAL); - ERROR("%p: QP creation failure: %s", - (void *)dev, strerror(ret)); + ERROR("%p: QP creation failure", (void *)txq_ctrl); goto error; } - DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u," - " max_inline_data=%u", - attr.init.cap.max_send_wr, - attr.init.cap.max_send_sge, - attr.init.cap.max_inline_data); - attr.mod = (struct ibv_exp_qp_attr){ + attr.mod = (struct ibv_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ .port_num = priv->port }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, - (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { - ERROR("%p: QP state to IBV_QPS_INIT failed: %s", - (void *)dev, strerror(ret)); + ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl); goto error; } - ret = txq_setup(&tmpl, txq_ctrl); - if (ret) { - ERROR("%p: cannot initialize TX queue structure: %s", - (void *)dev, strerror(ret)); - goto error; - } - txq_alloc_elts(&tmpl, desc); - attr.mod = (struct ibv_exp_qp_attr){ + attr.mod = (struct ibv_qp_attr){ .qp_state = IBV_QPS_RTR }; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("%p: QP state to IBV_QPS_RTR failed: %s", - (void *)dev, strerror(ret)); + ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl); goto error; } attr.mod.qp_state = IBV_QPS_RTS; - ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("%p: QP state to IBV_QPS_RTS failed: %s", - (void *)dev, strerror(ret)); + ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, - }; - tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_cq == NULL) { - ret = EINVAL; - ERROR("%p: CQ interface family query failed with status %d", - (void *)dev, status); + txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, + txq_ctrl->socket); + if (!txq_ibv) { + ERROR("%p: cannot allocate memory", (void *)txq_ctrl); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_QP_BURST, - .intf_version = 1, - .obj = tmpl.qp, - /* Enable multi-packet send if supported. */ - .family_flags = - ((priv->mps && !priv->sriov) ? - IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : - 0), - }; - tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_qp == NULL) { - ret = EINVAL; - ERROR("%p: QP interface family query failed with status %d", - (void *)dev, status); + obj.cq.in = tmpl.cq; + obj.cq.out = &cq_info; + obj.qp.in = tmpl.qp; + obj.qp.out = &qp; + ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); + if (ret != 0) + goto error; + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); goto error; } - /* Clean up txq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); - txq_cleanup(txq_ctrl); - *txq_ctrl = tmpl; - DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl); - /* Pre-register known mempools. */ - rte_mempool_walk(txq_mp2mr_iter, txq_ctrl); - assert(ret == 0); - return 0; + txq_data->cqe_n = log2above(cq_info.cqe_cnt); + txq_data->qp_num_8s = tmpl.qp->qp_num << 8; + txq_data->wqes = qp.sq.buf; + txq_data->wqe_n = log2above(qp.sq.wqe_cnt); + txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR]; + txq_data->bf_reg = qp.bf.reg; + txq_data->cq_db = cq_info.dbrec; + txq_data->cqes = + (volatile struct mlx5_cqe (*)[]) + (uintptr_t)cq_info.buf; + txq_data->cq_ci = 0; + txq_data->cq_pi = 0; + txq_data->wqe_ci = 0; + txq_data->wqe_pi = 0; + txq_ibv->qp = tmpl.qp; + txq_ibv->cq = tmpl.cq; + rte_atomic32_inc(&txq_ibv->refcnt); + if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { + txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + } else { + ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + goto error; + } + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); + LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); + return txq_ibv; error: - txq_cleanup(&tmpl); - assert(ret > 0); + if (tmpl.cq) + claim_zero(ibv_destroy_cq(tmpl.cq)); + if (tmpl.qp) + claim_zero(ibv_destroy_qp(tmpl.qp)); + return NULL; +} + +/** + * Get an Tx queue Verbs object. + * + * @param priv + * Pointer to private structure. + * @param idx + * Queue index in DPDK Rx queue array + * + * @return + * The Verbs object if it exists. + */ +struct mlx5_txq_ibv* +mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) +{ + struct mlx5_txq_ctrl *txq_ctrl; + + if (idx >= priv->txqs_n) + return NULL; + if (!(*priv->txqs)[idx]) + return NULL; + txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + if (txq_ctrl->ibv) { + rte_atomic32_inc(&txq_ctrl->ibv->refcnt); + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + (void *)txq_ctrl->ibv, + rte_atomic32_read(&txq_ctrl->ibv->refcnt)); + } + return txq_ctrl->ibv; +} + +/** + * Release an Tx verbs queue object. + * + * @param priv + * Pointer to private structure. + * @param txq_ibv + * Verbs Tx queue object. + * + * @return + * 0 on success, errno on failure. + */ +int +mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +{ + (void)priv; + assert(txq_ibv); + DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, + (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); + if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { + claim_zero(ibv_destroy_qp(txq_ibv->qp)); + claim_zero(ibv_destroy_cq(txq_ibv->cq)); + LIST_REMOVE(txq_ibv, next); + rte_free(txq_ibv); + return 0; + } + return EBUSY; +} + +/** + * Return true if a single reference exists on the object. + * + * @param priv + * Pointer to private structure. + * @param txq_ibv + * Verbs Tx queue object. + */ +int +mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +{ + (void)priv; + assert(txq_ibv); + return (rte_atomic32_read(&txq_ibv->refcnt) == 1); +} + +/** + * Verify the Verbs Tx queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_txq_ibv_verify(struct priv *priv) +{ + int ret = 0; + struct mlx5_txq_ibv *txq_ibv; + + LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { + DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv, + (void *)txq_ibv); + ++ret; + } return ret; } /** - * DPDK callback to configure a TX queue. + * Create a DPDK Tx queue. * - * @param dev - * Pointer to Ethernet device structure. + * @param priv + * Pointer to private structure. * @param idx * TX queue index. * @param desc @@ -447,153 +552,236 @@ error: * @param socket * NUMA socket on which memory must be allocated. * @param[in] conf - * Thresholds parameters. + * Thresholds parameters. * * @return - * 0 on success, negative errno value on failure. + * A DPDK queue object on success. */ -int -mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_txconf *conf) +struct mlx5_txq_ctrl* +mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_txconf *conf) { - struct priv *priv = dev->data->dev_private; - struct txq *txq = (*priv->txqs)[idx]; - struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); - int ret; + const unsigned int max_tso_inline = + ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); + struct mlx5_txq_ctrl *tmpl; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; + tmpl = rte_calloc_socket("TXQ", 1, + sizeof(*tmpl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); + if (!tmpl) + return NULL; + assert(desc > MLX5_TX_COMP_THRESH); + tmpl->txq.flags = conf->txq_flags; + tmpl->priv = priv; + tmpl->socket = socket; + tmpl->txq.elts_n = log2above(desc); + if (priv->mps == MLX5_MPW_ENHANCED) + tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; + /* MRs will be registered in mp2mr[] later. */ + DEBUG("priv->device_attr.max_qp_wr is %d", + priv->device_attr.orig_attr.max_qp_wr); + DEBUG("priv->device_attr.max_sge is %d", + priv->device_attr.orig_attr.max_sge); + if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) { + unsigned int ds_cnt; - priv_lock(priv); - if (desc <= MLX5_TX_COMP_THRESH) { - WARN("%p: number of descriptors requested for TX queue %u" - " must be higher than MLX5_TX_COMP_THRESH, using" - " %u instead of %u", - (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); - desc = MLX5_TX_COMP_THRESH + 1; - } - if (!rte_is_power_of_2(desc)) { - desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in TX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); - } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); - if (idx >= priv->txqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->txqs_n); - priv_unlock(priv); - return -EOVERFLOW; - } - if (txq != NULL) { - DEBUG("%p: reusing already allocated queue index %u (%p)", - (void *)dev, idx, (void *)txq); - if (priv->started) { - priv_unlock(priv); - return -EEXIST; + tmpl->txq.max_inline = + ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); + tmpl->txq.inline_en = 1; + /* TSO and MPS can't be enabled concurrently. */ + assert(!priv->tso || !priv->mps); + if (priv->mps == MLX5_MPW_ENHANCED) { + tmpl->txq.inline_max_packet_sz = + priv->inline_max_packet_sz; + /* To minimize the size of data set, avoid requesting + * too large WQ. + */ + tmpl->max_inline_data = + ((RTE_MIN(priv->txq_inline, + priv->inline_max_packet_sz) + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; + } else if (priv->tso) { + int inline_diff = tmpl->txq.max_inline - max_tso_inline; + + /* + * Adjust inline value as Verbs aggregates + * tso_inline and txq_inline fields. + */ + tmpl->max_inline_data = inline_diff > 0 ? + inline_diff * + RTE_CACHE_LINE_SIZE : + 0; + } else { + tmpl->max_inline_data = + tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE; } - (*priv->txqs)[idx] = NULL; - txq_cleanup(txq_ctrl); - } else { - txq_ctrl = - rte_calloc_socket("TXQ", 1, - sizeof(*txq_ctrl) + - desc * sizeof(struct rte_mbuf *), - 0, socket); - if (txq_ctrl == NULL) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - priv_unlock(priv); - return -ENOMEM; + /* + * Check if the inline size is too large in a way which + * can make the WQE DS to overflow. + * Considering in calculation: + * WQE CTRL (1 DS) + * WQE ETH (1 DS) + * Inline part (N DS) + */ + ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE); + if (ds_cnt > MLX5_DSEG_MAX) { + unsigned int max_inline = (MLX5_DSEG_MAX - 2) * + MLX5_WQE_DWORD_SIZE; + + max_inline = max_inline - (max_inline % + RTE_CACHE_LINE_SIZE); + WARN("txq inline is too large (%d) setting it to " + "the maximum possible: %d\n", + priv->txq_inline, max_inline); + tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; } } - ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf); - if (ret) - rte_free(txq_ctrl); - else { - txq_ctrl->txq.stats.idx = idx; - DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq_ctrl); - (*priv->txqs)[idx] = &txq_ctrl->txq; - /* Update send callback. */ - priv_select_tx_function(priv); + if (priv->tso) { + tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE; + tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline, + max_tso_inline); + tmpl->txq.tso_en = 1; } - priv_unlock(priv); - return -ret; + if (priv->tunnel_en) + tmpl->txq.tunnel_en = 1; + tmpl->txq.elts = + (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); + tmpl->txq.stats.idx = idx; + rte_atomic32_inc(&tmpl->refcnt); + DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); + return tmpl; } /** - * DPDK callback to release a TX queue. + * Get a Tx queue. * - * @param dpdk_txq - * Generic TX queue pointer. + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * + * @return + * A pointer to the queue if it exists. */ -void -mlx5_tx_queue_release(void *dpdk_txq) +struct mlx5_txq_ctrl* +mlx5_priv_txq_get(struct priv *priv, uint16_t idx) { - struct txq *txq = (struct txq *)dpdk_txq; - struct txq_ctrl *txq_ctrl; - struct priv *priv; - unsigned int i; + struct mlx5_txq_ctrl *ctrl = NULL; - if (mlx5_is_secondary()) - return; + if ((*priv->txqs)[idx]) { + ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, + txq); + unsigned int i; - if (txq == NULL) - return; - txq_ctrl = container_of(txq, struct txq_ctrl, txq); - priv = txq_ctrl->priv; - priv_lock(priv); - for (i = 0; (i != priv->txqs_n); ++i) - if ((*priv->txqs)[i] == txq) { - DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq_ctrl); - (*priv->txqs)[i] = NULL; - break; + mlx5_priv_txq_ibv_get(priv, idx); + for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { + struct mlx5_mr *mr = NULL; + + (void)mr; + if (ctrl->txq.mp2mr[i]) { + mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); + assert(mr); + } } - txq_cleanup(txq_ctrl); - rte_free(txq_ctrl); - priv_unlock(priv); + rte_atomic32_inc(&ctrl->refcnt); + DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + (void *)ctrl, rte_atomic32_read(&ctrl->refcnt)); + } + return ctrl; } /** - * DPDK callback for TX in secondary processes. + * Release a Tx queue. * - * This function configures all queues from primary process information - * if necessary before reverting to the normal TX burst callback. - * - * @param dpdk_txq - * Generic pointer to TX queue structure. - * @param[in] pkts - * Packets to transmit. - * @param pkts_n - * Number of packets in array. + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. * * @return - * Number of packets successfully transmitted (<= pkts_n). + * 0 on success, errno on failure. */ -uint16_t -mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, - uint16_t pkts_n) +int +mlx5_priv_txq_release(struct priv *priv, uint16_t idx) { - struct txq *txq = dpdk_txq; - struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); - struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv); - struct priv *primary_priv; - unsigned int index; + unsigned int i; + struct mlx5_txq_ctrl *txq; - if (priv == NULL) + if (!(*priv->txqs)[idx]) return 0; - primary_priv = - mlx5_secondary_data[priv->dev->data->port_id].primary_priv; - /* Look for queue index in both private structures. */ - for (index = 0; index != priv->txqs_n; ++index) - if (((*primary_priv->txqs)[index] == txq) || - ((*priv->txqs)[index] == txq)) - break; - if (index == priv->txqs_n) + txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, + (void *)txq, rte_atomic32_read(&txq->refcnt)); + if (txq->ibv) { + int ret; + + ret = mlx5_priv_txq_ibv_release(priv, txq->ibv); + if (!ret) + txq->ibv = NULL; + } + for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { + if (txq->txq.mp2mr[i]) { + priv_mr_release(priv, txq->txq.mp2mr[i]); + txq->txq.mp2mr[i] = NULL; + } + } + if (rte_atomic32_dec_and_test(&txq->refcnt)) { + txq_free_elts(txq); + LIST_REMOVE(txq, next); + rte_free(txq); + (*priv->txqs)[idx] = NULL; return 0; - txq = (*priv->txqs)[index]; - return priv->dev->tx_pkt_burst(txq, pkts, pkts_n); + } + return EBUSY; +} + +/** + * Verify if the queue can be released. + * + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * + * @return + * 1 if the queue can be released. + */ +int +mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) +{ + struct mlx5_txq_ctrl *txq; + + if (!(*priv->txqs)[idx]) + return -1; + txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + return (rte_atomic32_read(&txq->refcnt) == 1); +} + +/** + * Verify the Tx Queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_txq_verify(struct priv *priv) +{ + struct mlx5_txq_ctrl *txq; + int ret = 0; + + LIST_FOREACH(txq, &priv->txqsctrl, next) { + DEBUG("%p: Tx Queue %p still referenced", (void *)priv, + (void *)txq); + ++ret; + } + return ret; }