4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
48 #include <infiniband/mlx5dv.h>
50 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_malloc.h>
55 #include <rte_ethdev.h>
56 #include <rte_common.h>
57 #include <rte_interrupts.h>
58 #include <rte_debug.h>
62 #include "mlx5_rxtx.h"
63 #include "mlx5_utils.h"
64 #include "mlx5_autoconf.h"
65 #include "mlx5_defs.h"
67 /* Default RSS hash key also used for ConnectX-3. */
68 uint8_t rss_hash_default_key[] = {
69 0x2c, 0xc6, 0x81, 0xd1,
70 0x5b, 0xdb, 0xf4, 0xf7,
71 0xfc, 0xa2, 0x83, 0x19,
72 0xdb, 0x1a, 0x3e, 0x94,
73 0x6b, 0x9e, 0x38, 0xd9,
74 0x2c, 0x9c, 0x03, 0xd1,
75 0xad, 0x99, 0x44, 0xa7,
76 0xd9, 0x56, 0x3d, 0x59,
77 0x06, 0x3c, 0x25, 0xf3,
78 0xfc, 0x1f, 0xdc, 0x2a,
81 /* Length of the default RSS hash key. */
82 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
85 * Allocate RX queue elements.
88 * Pointer to RX queue structure.
91 * 0 on success, a negative errno value otherwise and rte_errno is set.
94 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
96 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
97 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
101 /* Iterate on segments. */
102 for (i = 0; (i != elts_n); ++i) {
103 struct rte_mbuf *buf;
105 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
107 DRV_LOG(ERR, "port %u empty mbuf pool",
108 PORT_ID(rxq_ctrl->priv));
112 /* Headroom is reserved by rte_pktmbuf_alloc(). */
113 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
114 /* Buffer is supposed to be empty. */
115 assert(rte_pktmbuf_data_len(buf) == 0);
116 assert(rte_pktmbuf_pkt_len(buf) == 0);
118 /* Only the first segment keeps headroom. */
120 SET_DATA_OFF(buf, 0);
121 PORT(buf) = rxq_ctrl->rxq.port_id;
122 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
123 PKT_LEN(buf) = DATA_LEN(buf);
125 (*rxq_ctrl->rxq.elts)[i] = buf;
127 /* If Rx vector is activated. */
128 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
129 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
130 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
133 /* Initialize default rearm_data for vPMD. */
134 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
135 rte_mbuf_refcnt_set(mbuf_init, 1);
136 mbuf_init->nb_segs = 1;
137 mbuf_init->port = rxq->port_id;
139 * prevent compiler reordering:
140 * rearm_data covers previous fields.
142 rte_compiler_barrier();
143 rxq->mbuf_initializer =
144 *(uint64_t *)&mbuf_init->rearm_data;
145 /* Padding with a fake mbuf for vectorized Rx. */
146 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
147 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
150 "port %u Rx queue %u allocated and configured %u segments"
152 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
153 elts_n / (1 << rxq_ctrl->rxq.sges_n));
156 err = rte_errno; /* Save rte_errno before cleanup. */
158 for (i = 0; (i != elts_n); ++i) {
159 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
160 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
161 (*rxq_ctrl->rxq.elts)[i] = NULL;
163 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
164 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
165 rte_errno = err; /* Restore rte_errno. */
170 * Free RX queue elements.
173 * Pointer to RX queue structure.
176 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
178 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
179 const uint16_t q_n = (1 << rxq->elts_n);
180 const uint16_t q_mask = q_n - 1;
181 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
184 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
185 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
186 if (rxq->elts == NULL)
189 * Some mbuf in the Ring belongs to the application. They cannot be
192 if (mlx5_rxq_check_vec_support(rxq) > 0) {
193 for (i = 0; i < used; ++i)
194 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
195 rxq->rq_pi = rxq->rq_ci;
197 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
198 if ((*rxq->elts)[i] != NULL)
199 rte_pktmbuf_free_seg((*rxq->elts)[i]);
200 (*rxq->elts)[i] = NULL;
205 * Clean up a RX queue.
207 * Destroy objects, free allocated memory and reset the structure for reuse.
210 * Pointer to RX queue structure.
213 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
215 DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
216 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
218 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
219 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
225 * Pointer to Ethernet device structure.
229 * Number of descriptors to configure in queue.
231 * NUMA socket on which memory must be allocated.
233 * Thresholds parameters.
235 * Memory pool for buffer allocations.
238 * 0 on success, a negative errno value otherwise and rte_errno is set.
241 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
243 const struct rte_eth_rxconf *conf __rte_unused,
244 struct rte_mempool *mp)
246 struct priv *priv = dev->data->dev_private;
247 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
248 struct mlx5_rxq_ctrl *rxq_ctrl =
249 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
251 if (!rte_is_power_of_2(desc)) {
252 desc = 1 << log2above(desc);
254 "port %u increased number of descriptors in Rx queue %u"
255 " to the next power of two (%d)",
256 dev->data->port_id, idx, desc);
258 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
259 dev->data->port_id, idx, desc);
260 if (idx >= priv->rxqs_n) {
261 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
262 dev->data->port_id, idx, priv->rxqs_n);
263 rte_errno = EOVERFLOW;
266 if (!mlx5_rxq_releasable(dev, idx)) {
267 DRV_LOG(ERR, "port %u unable to release queue index %u",
268 dev->data->port_id, idx);
272 mlx5_rxq_release(dev, idx);
273 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, mp);
275 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
276 dev->data->port_id, idx);
280 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
281 dev->data->port_id, idx);
282 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
287 * DPDK callback to release a RX queue.
290 * Generic RX queue pointer.
293 mlx5_rx_queue_release(void *dpdk_rxq)
295 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
296 struct mlx5_rxq_ctrl *rxq_ctrl;
301 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
302 priv = rxq_ctrl->priv;
303 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
304 rte_panic("port %u Rx queue %u is still used by a flow and"
305 " cannot be removed\n",
306 PORT_ID(priv), rxq_ctrl->idx);
307 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
311 * Allocate queue vector and fill epoll fd list for Rx interrupts.
314 * Pointer to Ethernet device.
317 * 0 on success, a negative errno value otherwise and rte_errno is set.
320 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
322 struct priv *priv = dev->data->dev_private;
324 unsigned int rxqs_n = priv->rxqs_n;
325 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
326 unsigned int count = 0;
327 struct rte_intr_handle *intr_handle = dev->intr_handle;
329 if (!dev->data->dev_conf.intr_conf.rxq)
331 mlx5_rx_intr_vec_disable(dev);
332 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
333 if (intr_handle->intr_vec == NULL) {
335 "port %u failed to allocate memory for interrupt"
336 " vector, Rx interrupts will not be supported",
341 intr_handle->type = RTE_INTR_HANDLE_EXT;
342 for (i = 0; i != n; ++i) {
343 /* This rxq ibv must not be released in this function. */
344 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
349 /* Skip queues that cannot request interrupts. */
350 if (!rxq_ibv || !rxq_ibv->channel) {
351 /* Use invalid intr_vec[] index to disable entry. */
352 intr_handle->intr_vec[i] =
353 RTE_INTR_VEC_RXTX_OFFSET +
354 RTE_MAX_RXTX_INTR_VEC_ID;
357 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
359 "port %u too many Rx queues for interrupt"
360 " vector size (%d), Rx interrupts cannot be"
362 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
363 mlx5_rx_intr_vec_disable(dev);
367 fd = rxq_ibv->channel->fd;
368 flags = fcntl(fd, F_GETFL);
369 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
373 "port %u failed to make Rx interrupt file"
374 " descriptor %d non-blocking for queue index"
376 dev->data->port_id, fd, i);
377 mlx5_rx_intr_vec_disable(dev);
380 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
381 intr_handle->efds[count] = fd;
385 mlx5_rx_intr_vec_disable(dev);
387 intr_handle->nb_efd = count;
392 * Clean up Rx interrupts handler.
395 * Pointer to Ethernet device.
398 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
400 struct priv *priv = dev->data->dev_private;
401 struct rte_intr_handle *intr_handle = dev->intr_handle;
403 unsigned int rxqs_n = priv->rxqs_n;
404 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
406 if (!dev->data->dev_conf.intr_conf.rxq)
408 if (!intr_handle->intr_vec)
410 for (i = 0; i != n; ++i) {
411 struct mlx5_rxq_ctrl *rxq_ctrl;
412 struct mlx5_rxq_data *rxq_data;
414 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
415 RTE_MAX_RXTX_INTR_VEC_ID)
418 * Need to access directly the queue to release the reference
419 * kept in priv_rx_intr_vec_enable().
421 rxq_data = (*priv->rxqs)[i];
422 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
423 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
426 rte_intr_free_epoll_fd(intr_handle);
427 if (intr_handle->intr_vec)
428 free(intr_handle->intr_vec);
429 intr_handle->nb_efd = 0;
430 intr_handle->intr_vec = NULL;
434 * MLX5 CQ notification .
437 * Pointer to receive queue structure.
439 * Sequence number per receive queue .
442 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
445 uint32_t doorbell_hi;
447 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
449 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
450 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
451 doorbell = (uint64_t)doorbell_hi << 32;
452 doorbell |= rxq->cqn;
453 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
455 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
459 * DPDK callback for Rx queue interrupt enable.
462 * Pointer to Ethernet device structure.
467 * 0 on success, a negative errno value otherwise and rte_errno is set.
470 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
472 struct priv *priv = dev->data->dev_private;
473 struct mlx5_rxq_data *rxq_data;
474 struct mlx5_rxq_ctrl *rxq_ctrl;
476 rxq_data = (*priv->rxqs)[rx_queue_id];
481 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
483 struct mlx5_rxq_ibv *rxq_ibv;
485 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
490 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
491 mlx5_rxq_ibv_release(rxq_ibv);
497 * DPDK callback for Rx queue interrupt disable.
500 * Pointer to Ethernet device structure.
505 * 0 on success, a negative errno value otherwise and rte_errno is set.
508 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
510 struct priv *priv = dev->data->dev_private;
511 struct mlx5_rxq_data *rxq_data;
512 struct mlx5_rxq_ctrl *rxq_ctrl;
513 struct mlx5_rxq_ibv *rxq_ibv = NULL;
514 struct ibv_cq *ev_cq;
518 rxq_data = (*priv->rxqs)[rx_queue_id];
523 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
526 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
531 ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
532 if (ret || ev_cq != rxq_ibv->cq) {
536 rxq_data->cq_arm_sn++;
537 ibv_ack_cq_events(rxq_ibv->cq, 1);
540 ret = rte_errno; /* Save rte_errno before cleanup. */
542 mlx5_rxq_ibv_release(rxq_ibv);
543 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
544 dev->data->port_id, rx_queue_id);
545 rte_errno = ret; /* Restore rte_errno. */
550 * Create the Rx queue Verbs object.
553 * Pointer to Ethernet device.
555 * Queue index in DPDK Rx queue array
558 * The Verbs object initialised, NULL otherwise and rte_errno is set.
560 struct mlx5_rxq_ibv *
561 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
563 struct priv *priv = dev->data->dev_private;
564 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
565 struct mlx5_rxq_ctrl *rxq_ctrl =
566 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
567 struct ibv_wq_attr mod;
570 struct ibv_cq_init_attr_ex ibv;
571 struct mlx5dv_cq_init_attr mlx5;
573 struct ibv_wq_init_attr wq;
574 struct ibv_cq_ex cq_attr;
576 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
577 struct mlx5_rxq_ibv *tmpl;
578 struct mlx5dv_cq cq_info;
579 struct mlx5dv_rwq rwq;
582 struct mlx5dv_obj obj;
585 assert(!rxq_ctrl->ibv);
586 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
587 priv->verbs_alloc_ctx.obj = rxq_ctrl;
588 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
592 "port %u Rx queue %u cannot allocate verbs resources",
593 dev->data->port_id, rxq_ctrl->idx);
597 tmpl->rxq_ctrl = rxq_ctrl;
598 /* Use the entire RX mempool as the memory region. */
599 tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
601 tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
603 DRV_LOG(ERR, "port %u: memeroy region creation failure",
609 tmpl->channel = ibv_create_comp_channel(priv->ctx);
610 if (!tmpl->channel) {
611 DRV_LOG(ERR, "port %u: comp channel creation failure",
617 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
619 .channel = tmpl->channel,
622 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
625 if (priv->cqe_comp && !rxq_data->hw_timestamp) {
626 attr.cq.mlx5.comp_mask |=
627 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
628 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
630 * For vectorized Rx, it must not be doubled in order to
631 * make cq_ci and rq_ci aligned.
633 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
634 attr.cq.ibv.cqe *= 2;
635 } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
637 "port %u Rx CQE compression is disabled for HW"
641 tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
643 if (tmpl->cq == NULL) {
644 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
645 dev->data->port_id, idx);
649 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
650 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
651 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
652 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
653 attr.wq = (struct ibv_wq_init_attr){
654 .wq_context = NULL, /* Could be useful in the future. */
655 .wq_type = IBV_WQT_RQ,
656 /* Max number of outstanding WRs. */
657 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
658 /* Max number of scatter/gather elements in a WR. */
659 .max_sge = 1 << rxq_data->sges_n,
663 IBV_WQ_FLAGS_CVLAN_STRIPPING |
665 .create_flags = (rxq_data->vlan_strip ?
666 IBV_WQ_FLAGS_CVLAN_STRIPPING :
669 /* By default, FCS (CRC) is stripped by hardware. */
670 if (rxq_data->crc_present) {
671 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
672 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
674 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
675 if (priv->hw_padding) {
676 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
677 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
680 tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
681 if (tmpl->wq == NULL) {
682 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
683 dev->data->port_id, idx);
688 * Make sure number of WRs*SGEs match expectations since a queue
689 * cannot allocate more than "desc" buffers.
691 if (((int)attr.wq.max_wr !=
692 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
693 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
695 "port %u Rx queue %u requested %u*%u but got %u*%u"
697 dev->data->port_id, idx,
698 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
699 (1 << rxq_data->sges_n),
700 attr.wq.max_wr, attr.wq.max_sge);
704 /* Change queue state to ready. */
705 mod = (struct ibv_wq_attr){
706 .attr_mask = IBV_WQ_ATTR_STATE,
707 .wq_state = IBV_WQS_RDY,
709 ret = ibv_modify_wq(tmpl->wq, &mod);
712 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
713 dev->data->port_id, idx);
717 obj.cq.in = tmpl->cq;
718 obj.cq.out = &cq_info;
719 obj.rwq.in = tmpl->wq;
721 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
726 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
728 "port %u wrong MLX5_CQE_SIZE environment variable"
729 " value: it should be set to %u",
730 dev->data->port_id, RTE_CACHE_LINE_SIZE);
734 /* Fill the rings. */
735 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
737 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
738 struct rte_mbuf *buf = (*rxq_data->elts)[i];
739 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
741 /* scat->addr must be able to store a pointer. */
742 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
743 *scat = (struct mlx5_wqe_data_seg){
744 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
746 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
747 .lkey = tmpl->mr->lkey,
750 rxq_data->rq_db = rwq.dbrec;
751 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
755 rxq_data->zip = (struct rxq_zip){
758 rxq_data->cq_db = cq_info.dbrec;
759 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
760 rxq_data->cq_uar = cq_info.cq_uar;
761 rxq_data->cqn = cq_info.cqn;
762 rxq_data->cq_arm_sn = 0;
763 /* Update doorbell counter. */
764 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
766 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
767 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
769 rte_atomic32_inc(&tmpl->refcnt);
770 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
771 dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
772 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
773 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
776 ret = rte_errno; /* Save rte_errno before cleanup. */
778 claim_zero(ibv_destroy_wq(tmpl->wq));
780 claim_zero(ibv_destroy_cq(tmpl->cq));
782 claim_zero(ibv_destroy_comp_channel(tmpl->channel));
784 mlx5_mr_release(tmpl->mr);
785 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
786 rte_errno = ret; /* Restore rte_errno. */
791 * Get an Rx queue Verbs object.
794 * Pointer to Ethernet device.
796 * Queue index in DPDK Rx queue array
799 * The Verbs object if it exists.
801 struct mlx5_rxq_ibv *
802 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
804 struct priv *priv = dev->data->dev_private;
805 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
806 struct mlx5_rxq_ctrl *rxq_ctrl;
808 if (idx >= priv->rxqs_n)
812 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
814 mlx5_mr_get(dev, rxq_data->mp);
815 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
816 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
817 dev->data->port_id, rxq_ctrl->idx,
818 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
820 return rxq_ctrl->ibv;
824 * Release an Rx verbs queue object.
827 * Verbs Rx queue object.
830 * 1 while a reference on it exists, 0 when freed.
833 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
841 ret = mlx5_mr_release(rxq_ibv->mr);
844 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
845 PORT_ID(rxq_ibv->rxq_ctrl->priv),
846 rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
847 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
848 rxq_free_elts(rxq_ibv->rxq_ctrl);
849 claim_zero(ibv_destroy_wq(rxq_ibv->wq));
850 claim_zero(ibv_destroy_cq(rxq_ibv->cq));
851 if (rxq_ibv->channel)
852 claim_zero(ibv_destroy_comp_channel(rxq_ibv->channel));
853 LIST_REMOVE(rxq_ibv, next);
861 * Verify the Verbs Rx queue list is empty
864 * Pointer to Ethernet device.
867 * The number of object not released.
870 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
872 struct priv *priv = dev->data->dev_private;
874 struct mlx5_rxq_ibv *rxq_ibv;
876 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
877 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
878 dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
885 * Return true if a single reference exists on the object.
888 * Verbs Rx queue object.
891 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
894 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
898 * Create a DPDK Rx queue.
901 * Pointer to Ethernet device.
905 * Number of descriptors to configure in queue.
907 * NUMA socket on which memory must be allocated.
910 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
912 struct mlx5_rxq_ctrl *
913 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
914 unsigned int socket, struct rte_mempool *mp)
916 struct priv *priv = dev->data->dev_private;
917 struct mlx5_rxq_ctrl *tmpl;
918 const uint16_t desc_n =
919 desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
920 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
922 tmpl = rte_calloc_socket("RXQ", 1,
924 desc_n * sizeof(struct rte_mbuf *),
930 tmpl->socket = socket;
931 if (dev->data->dev_conf.intr_conf.rxq)
933 /* Enable scattered packets support for this queue if necessary. */
934 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
935 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
936 (mb_len - RTE_PKTMBUF_HEADROOM)) {
937 tmpl->rxq.sges_n = 0;
938 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
940 RTE_PKTMBUF_HEADROOM +
941 dev->data->dev_conf.rxmode.max_rx_pkt_len;
945 * Determine the number of SGEs needed for a full packet
946 * and round it to the next power of two.
948 sges_n = log2above((size / mb_len) + !!(size % mb_len));
949 tmpl->rxq.sges_n = sges_n;
950 /* Make sure rxq.sges_n did not overflow. */
951 size = mb_len * (1 << tmpl->rxq.sges_n);
952 size -= RTE_PKTMBUF_HEADROOM;
953 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
955 "port %u too many SGEs (%u) needed to handle"
956 " requested maximum packet size %u",
959 dev->data->dev_conf.rxmode.max_rx_pkt_len);
960 rte_errno = EOVERFLOW;
965 "port %u the requested maximum Rx packet size (%u) is"
966 " larger than a single mbuf (%u) and scattered mode has"
967 " not been requested",
969 dev->data->dev_conf.rxmode.max_rx_pkt_len,
970 mb_len - RTE_PKTMBUF_HEADROOM);
972 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
973 dev->data->port_id, 1 << tmpl->rxq.sges_n);
974 if (desc % (1 << tmpl->rxq.sges_n)) {
976 "port %u number of Rx queue descriptors (%u) is not a"
977 " multiple of SGEs per packet (%u)",
980 1 << tmpl->rxq.sges_n);
984 /* Toggle RX checksum offload if hardware supports it. */
986 tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
987 if (priv->hw_csum_l2tun)
988 tmpl->rxq.csum_l2tun =
989 !!dev->data->dev_conf.rxmode.hw_ip_checksum;
990 tmpl->rxq.hw_timestamp =
991 !!dev->data->dev_conf.rxmode.hw_timestamp;
992 /* Configure VLAN stripping. */
993 tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
994 !!dev->data->dev_conf.rxmode.hw_vlan_strip);
995 /* By default, FCS (CRC) is stripped by hardware. */
996 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
997 tmpl->rxq.crc_present = 0;
998 } else if (priv->hw_fcs_strip) {
999 tmpl->rxq.crc_present = 1;
1002 "port %u CRC stripping has been disabled but will"
1003 " still be performed by hardware, make sure MLNX_OFED"
1004 " and firmware are up to date",
1005 dev->data->port_id);
1006 tmpl->rxq.crc_present = 0;
1009 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1010 " incoming frames to hide it",
1012 tmpl->rxq.crc_present ? "disabled" : "enabled",
1013 tmpl->rxq.crc_present << 2);
1015 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1016 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1017 tmpl->rxq.port_id = dev->data->port_id;
1020 tmpl->rxq.stats.idx = idx;
1021 tmpl->rxq.elts_n = log2above(desc);
1023 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1025 rte_atomic32_inc(&tmpl->refcnt);
1026 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1027 idx, rte_atomic32_read(&tmpl->refcnt));
1028 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1039 * Pointer to Ethernet device.
1044 * A pointer to the queue if it exists, NULL otherwise.
1046 struct mlx5_rxq_ctrl *
1047 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1049 struct priv *priv = dev->data->dev_private;
1050 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1052 if ((*priv->rxqs)[idx]) {
1053 rxq_ctrl = container_of((*priv->rxqs)[idx],
1054 struct mlx5_rxq_ctrl,
1056 mlx5_rxq_ibv_get(dev, idx);
1057 rte_atomic32_inc(&rxq_ctrl->refcnt);
1058 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
1059 dev->data->port_id, rxq_ctrl->idx,
1060 rte_atomic32_read(&rxq_ctrl->refcnt));
1066 * Release a Rx queue.
1069 * Pointer to Ethernet device.
1074 * 1 while a reference on it exists, 0 when freed.
1077 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1079 struct priv *priv = dev->data->dev_private;
1080 struct mlx5_rxq_ctrl *rxq_ctrl;
1082 if (!(*priv->rxqs)[idx])
1084 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1085 assert(rxq_ctrl->priv);
1086 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1087 rxq_ctrl->ibv = NULL;
1088 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1089 rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
1090 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1091 LIST_REMOVE(rxq_ctrl, next);
1093 (*priv->rxqs)[idx] = NULL;
1100 * Verify if the queue can be released.
1103 * Pointer to Ethernet device.
1108 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1112 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1114 struct priv *priv = dev->data->dev_private;
1115 struct mlx5_rxq_ctrl *rxq_ctrl;
1117 if (!(*priv->rxqs)[idx]) {
1121 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1122 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1126 * Verify the Rx Queue list is empty
1129 * Pointer to Ethernet device.
1132 * The number of object not released.
1135 mlx5_rxq_verify(struct rte_eth_dev *dev)
1137 struct priv *priv = dev->data->dev_private;
1138 struct mlx5_rxq_ctrl *rxq_ctrl;
1141 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1142 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1143 dev->data->port_id, rxq_ctrl->idx);
1150 * Create an indirection table.
1153 * Pointer to Ethernet device.
1155 * Queues entering in the indirection table.
1157 * Number of queues in the array.
1160 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1162 struct mlx5_ind_table_ibv *
1163 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
1166 struct priv *priv = dev->data->dev_private;
1167 struct mlx5_ind_table_ibv *ind_tbl;
1168 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1169 log2above(queues_n) :
1170 log2above(priv->ind_table_max_size);
1171 struct ibv_wq *wq[1 << wq_n];
1175 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1176 queues_n * sizeof(uint16_t), 0);
1181 for (i = 0; i != queues_n; ++i) {
1182 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1186 wq[i] = rxq->ibv->wq;
1187 ind_tbl->queues[i] = queues[i];
1189 ind_tbl->queues_n = queues_n;
1190 /* Finalise indirection table. */
1191 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1193 ind_tbl->ind_table = ibv_create_rwq_ind_table(
1195 &(struct ibv_rwq_ind_table_init_attr){
1196 .log_ind_tbl_size = wq_n,
1200 if (!ind_tbl->ind_table) {
1204 rte_atomic32_inc(&ind_tbl->refcnt);
1205 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1206 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1207 dev->data->port_id, (void *)ind_tbl,
1208 rte_atomic32_read(&ind_tbl->refcnt));
1212 DRV_LOG(DEBUG, "port %u cannot create indirection table",
1213 dev->data->port_id);
1218 * Get an indirection table.
1221 * Pointer to Ethernet device.
1223 * Queues entering in the indirection table.
1225 * Number of queues in the array.
1228 * An indirection table if found.
1230 struct mlx5_ind_table_ibv *
1231 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
1234 struct priv *priv = dev->data->dev_private;
1235 struct mlx5_ind_table_ibv *ind_tbl;
1237 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1238 if ((ind_tbl->queues_n == queues_n) &&
1239 (memcmp(ind_tbl->queues, queues,
1240 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1247 rte_atomic32_inc(&ind_tbl->refcnt);
1248 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1249 dev->data->port_id, (void *)ind_tbl,
1250 rte_atomic32_read(&ind_tbl->refcnt));
1251 for (i = 0; i != ind_tbl->queues_n; ++i)
1252 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1258 * Release an indirection table.
1261 * Pointer to Ethernet device.
1263 * Indirection table to release.
1266 * 1 while a reference on it exists, 0 when freed.
1269 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1270 struct mlx5_ind_table_ibv *ind_tbl)
1274 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1275 dev->data->port_id, (void *)ind_tbl,
1276 rte_atomic32_read(&ind_tbl->refcnt));
1277 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1278 claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
1279 for (i = 0; i != ind_tbl->queues_n; ++i)
1280 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1281 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1282 LIST_REMOVE(ind_tbl, next);
1290 * Verify the Rx Queue list is empty
1293 * Pointer to Ethernet device.
1296 * The number of object not released.
1299 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1301 struct priv *priv = dev->data->dev_private;
1302 struct mlx5_ind_table_ibv *ind_tbl;
1305 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1307 "port %u Verbs indirection table %p still referenced",
1308 dev->data->port_id, (void *)ind_tbl);
1315 * Create an Rx Hash queue.
1318 * Pointer to Ethernet device.
1320 * RSS key for the Rx hash queue.
1321 * @param rss_key_len
1323 * @param hash_fields
1324 * Verbs protocol hash field to make the RSS on.
1326 * Queues entering in hash queue. In case of empty hash_fields only the
1327 * first queue index will be taken for the indirection table.
1332 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1335 mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
1336 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1338 struct priv *priv = dev->data->dev_private;
1339 struct mlx5_hrxq *hrxq;
1340 struct mlx5_ind_table_ibv *ind_tbl;
1344 queues_n = hash_fields ? queues_n : 1;
1345 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1347 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1352 qp = ibv_create_qp_ex(
1354 &(struct ibv_qp_init_attr_ex){
1355 .qp_type = IBV_QPT_RAW_PACKET,
1357 IBV_QP_INIT_ATTR_PD |
1358 IBV_QP_INIT_ATTR_IND_TABLE |
1359 IBV_QP_INIT_ATTR_RX_HASH,
1360 .rx_hash_conf = (struct ibv_rx_hash_conf){
1361 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1362 .rx_hash_key_len = rss_key_len,
1363 .rx_hash_key = rss_key,
1364 .rx_hash_fields_mask = hash_fields,
1366 .rwq_ind_tbl = ind_tbl->ind_table,
1373 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1376 hrxq->ind_table = ind_tbl;
1378 hrxq->rss_key_len = rss_key_len;
1379 hrxq->hash_fields = hash_fields;
1380 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1381 rte_atomic32_inc(&hrxq->refcnt);
1382 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1383 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1384 dev->data->port_id, (void *)hrxq,
1385 rte_atomic32_read(&hrxq->refcnt));
1388 err = rte_errno; /* Save rte_errno before cleanup. */
1389 mlx5_ind_table_ibv_release(dev, ind_tbl);
1391 claim_zero(ibv_destroy_qp(qp));
1392 rte_errno = err; /* Restore rte_errno. */
1397 * Get an Rx Hash queue.
1400 * Pointer to Ethernet device.
1402 * RSS configuration for the Rx hash queue.
1404 * Queues entering in hash queue. In case of empty hash_fields only the
1405 * first queue index will be taken for the indirection table.
1410 * An hash Rx queue on success.
1413 mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
1414 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1416 struct priv *priv = dev->data->dev_private;
1417 struct mlx5_hrxq *hrxq;
1419 queues_n = hash_fields ? queues_n : 1;
1420 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1421 struct mlx5_ind_table_ibv *ind_tbl;
1423 if (hrxq->rss_key_len != rss_key_len)
1425 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1427 if (hrxq->hash_fields != hash_fields)
1429 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1432 if (ind_tbl != hrxq->ind_table) {
1433 mlx5_ind_table_ibv_release(dev, ind_tbl);
1436 rte_atomic32_inc(&hrxq->refcnt);
1437 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1438 dev->data->port_id, (void *)hrxq,
1439 rte_atomic32_read(&hrxq->refcnt));
1446 * Release the hash Rx queue.
1449 * Pointer to Ethernet device.
1451 * Pointer to Hash Rx queue to release.
1454 * 1 while a reference on it exists, 0 when freed.
1457 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1459 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1460 dev->data->port_id, (void *)hrxq,
1461 rte_atomic32_read(&hrxq->refcnt));
1462 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1463 claim_zero(ibv_destroy_qp(hrxq->qp));
1464 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1465 LIST_REMOVE(hrxq, next);
1469 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1474 * Verify the Rx Queue list is empty
1477 * Pointer to Ethernet device.
1480 * The number of object not released.
1483 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1485 struct priv *priv = dev->data->dev_private;
1486 struct mlx5_hrxq *hrxq;
1489 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1491 "port %u Verbs hash Rx queue %p still referenced",
1492 dev->data->port_id, (void *)hrxq);