New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <fcntl.h>
12 #include <sys/queue.h>
13
14 /* Verbs header. */
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic ignored "-Wpedantic"
18 #endif
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
21 #ifdef PEDANTIC
22 #pragma GCC diagnostic error "-Wpedantic"
23 #endif
24
25 #include <rte_mbuf.h>
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
31 #include <rte_io.h>
32
33 #include "mlx5.h"
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
39
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42         0x2c, 0xc6, 0x81, 0xd1,
43         0x5b, 0xdb, 0xf4, 0xf7,
44         0xfc, 0xa2, 0x83, 0x19,
45         0xdb, 0x1a, 0x3e, 0x94,
46         0x6b, 0x9e, 0x38, 0xd9,
47         0x2c, 0x9c, 0x03, 0xd1,
48         0xad, 0x99, 0x44, 0xa7,
49         0xd9, 0x56, 0x3d, 0x59,
50         0x06, 0x3c, 0x25, 0xf3,
51         0xfc, 0x1f, 0xdc, 0x2a,
52 };
53
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56               (unsigned int)sizeof(rss_hash_default_key),
57               "wrong RSS default key size.");
58
59 /**
60  * Check whether Multi-Packet RQ can be enabled for the device.
61  *
62  * @param dev
63  *   Pointer to Ethernet device.
64  *
65  * @return
66  *   1 if supported, negative errno value if not.
67  */
68 inline int
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
70 {
71         struct priv *priv = dev->data->dev_private;
72
73         if (priv->config.mprq.enabled &&
74             priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
75                 return 1;
76         return -ENOTSUP;
77 }
78
79 /**
80  * Check whether Multi-Packet RQ is enabled for the Rx queue.
81  *
82  *  @param rxq
83  *     Pointer to receive queue structure.
84  *
85  * @return
86  *   0 if disabled, otherwise enabled.
87  */
88 inline int
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
90 {
91         return rxq->strd_num_n > 0;
92 }
93
94 /**
95  * Check whether Multi-Packet RQ is enabled for the device.
96  *
97  * @param dev
98  *   Pointer to Ethernet device.
99  *
100  * @return
101  *   0 if disabled, otherwise enabled.
102  */
103 inline int
104 mlx5_mprq_enabled(struct rte_eth_dev *dev)
105 {
106         struct priv *priv = dev->data->dev_private;
107         uint16_t i;
108         uint16_t n = 0;
109
110         if (mlx5_check_mprq_support(dev) < 0)
111                 return 0;
112         /* All the configured queues should be enabled. */
113         for (i = 0; i < priv->rxqs_n; ++i) {
114                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
115
116                 if (!rxq)
117                         continue;
118                 if (mlx5_rxq_mprq_enabled(rxq))
119                         ++n;
120         }
121         /* Multi-Packet RQ can't be partially configured. */
122         assert(n == 0 || n == priv->rxqs_n);
123         return n == priv->rxqs_n;
124 }
125
126 /**
127  * Allocate RX queue elements for Multi-Packet RQ.
128  *
129  * @param rxq_ctrl
130  *   Pointer to RX queue structure.
131  *
132  * @return
133  *   0 on success, a negative errno value otherwise and rte_errno is set.
134  */
135 static int
136 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
137 {
138         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
139         unsigned int wqe_n = 1 << rxq->elts_n;
140         unsigned int i;
141         int err;
142
143         /* Iterate on segments. */
144         for (i = 0; i <= wqe_n; ++i) {
145                 struct mlx5_mprq_buf *buf;
146
147                 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
148                         DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
149                         rte_errno = ENOMEM;
150                         goto error;
151                 }
152                 if (i < wqe_n)
153                         (*rxq->mprq_bufs)[i] = buf;
154                 else
155                         rxq->mprq_repl = buf;
156         }
157         DRV_LOG(DEBUG,
158                 "port %u Rx queue %u allocated and configured %u segments",
159                 rxq->port_id, rxq_ctrl->idx, wqe_n);
160         return 0;
161 error:
162         err = rte_errno; /* Save rte_errno before cleanup. */
163         wqe_n = i;
164         for (i = 0; (i != wqe_n); ++i) {
165                 if ((*rxq->mprq_bufs)[i] != NULL)
166                         rte_mempool_put(rxq->mprq_mp,
167                                         (*rxq->mprq_bufs)[i]);
168                 (*rxq->mprq_bufs)[i] = NULL;
169         }
170         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
171                 rxq->port_id, rxq_ctrl->idx);
172         rte_errno = err; /* Restore rte_errno. */
173         return -rte_errno;
174 }
175
176 /**
177  * Allocate RX queue elements for Single-Packet RQ.
178  *
179  * @param rxq_ctrl
180  *   Pointer to RX queue structure.
181  *
182  * @return
183  *   0 on success, errno value on failure.
184  */
185 static int
186 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
187 {
188         const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
189         unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
190         unsigned int i;
191         int err;
192
193         /* Iterate on segments. */
194         for (i = 0; (i != elts_n); ++i) {
195                 struct rte_mbuf *buf;
196
197                 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
198                 if (buf == NULL) {
199                         DRV_LOG(ERR, "port %u empty mbuf pool",
200                                 PORT_ID(rxq_ctrl->priv));
201                         rte_errno = ENOMEM;
202                         goto error;
203                 }
204                 /* Headroom is reserved by rte_pktmbuf_alloc(). */
205                 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
206                 /* Buffer is supposed to be empty. */
207                 assert(rte_pktmbuf_data_len(buf) == 0);
208                 assert(rte_pktmbuf_pkt_len(buf) == 0);
209                 assert(!buf->next);
210                 /* Only the first segment keeps headroom. */
211                 if (i % sges_n)
212                         SET_DATA_OFF(buf, 0);
213                 PORT(buf) = rxq_ctrl->rxq.port_id;
214                 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
215                 PKT_LEN(buf) = DATA_LEN(buf);
216                 NB_SEGS(buf) = 1;
217                 (*rxq_ctrl->rxq.elts)[i] = buf;
218         }
219         /* If Rx vector is activated. */
220         if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
221                 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
222                 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
223                 int j;
224
225                 /* Initialize default rearm_data for vPMD. */
226                 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
227                 rte_mbuf_refcnt_set(mbuf_init, 1);
228                 mbuf_init->nb_segs = 1;
229                 mbuf_init->port = rxq->port_id;
230                 /*
231                  * prevent compiler reordering:
232                  * rearm_data covers previous fields.
233                  */
234                 rte_compiler_barrier();
235                 rxq->mbuf_initializer =
236                         *(uint64_t *)&mbuf_init->rearm_data;
237                 /* Padding with a fake mbuf for vectorized Rx. */
238                 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
239                         (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
240         }
241         DRV_LOG(DEBUG,
242                 "port %u Rx queue %u allocated and configured %u segments"
243                 " (max %u packets)",
244                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
245                 elts_n / (1 << rxq_ctrl->rxq.sges_n));
246         return 0;
247 error:
248         err = rte_errno; /* Save rte_errno before cleanup. */
249         elts_n = i;
250         for (i = 0; (i != elts_n); ++i) {
251                 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
252                         rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
253                 (*rxq_ctrl->rxq.elts)[i] = NULL;
254         }
255         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
256                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
257         rte_errno = err; /* Restore rte_errno. */
258         return -rte_errno;
259 }
260
261 /**
262  * Allocate RX queue elements.
263  *
264  * @param rxq_ctrl
265  *   Pointer to RX queue structure.
266  *
267  * @return
268  *   0 on success, errno value on failure.
269  */
270 int
271 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
272 {
273         return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
274                rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
275 }
276
277 /**
278  * Free RX queue elements for Multi-Packet RQ.
279  *
280  * @param rxq_ctrl
281  *   Pointer to RX queue structure.
282  */
283 static void
284 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
285 {
286         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
287         uint16_t i;
288
289         DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
290                 rxq->port_id, rxq_ctrl->idx);
291         if (rxq->mprq_bufs == NULL)
292                 return;
293         assert(mlx5_rxq_check_vec_support(rxq) < 0);
294         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
295                 if ((*rxq->mprq_bufs)[i] != NULL)
296                         mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
297                 (*rxq->mprq_bufs)[i] = NULL;
298         }
299         if (rxq->mprq_repl != NULL) {
300                 mlx5_mprq_buf_free(rxq->mprq_repl);
301                 rxq->mprq_repl = NULL;
302         }
303 }
304
305 /**
306  * Free RX queue elements for Single-Packet RQ.
307  *
308  * @param rxq_ctrl
309  *   Pointer to RX queue structure.
310  */
311 static void
312 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
313 {
314         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
315         const uint16_t q_n = (1 << rxq->elts_n);
316         const uint16_t q_mask = q_n - 1;
317         uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
318         uint16_t i;
319
320         DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
321                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
322         if (rxq->elts == NULL)
323                 return;
324         /**
325          * Some mbuf in the Ring belongs to the application.  They cannot be
326          * freed.
327          */
328         if (mlx5_rxq_check_vec_support(rxq) > 0) {
329                 for (i = 0; i < used; ++i)
330                         (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
331                 rxq->rq_pi = rxq->rq_ci;
332         }
333         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
334                 if ((*rxq->elts)[i] != NULL)
335                         rte_pktmbuf_free_seg((*rxq->elts)[i]);
336                 (*rxq->elts)[i] = NULL;
337         }
338 }
339
340 /**
341  * Free RX queue elements.
342  *
343  * @param rxq_ctrl
344  *   Pointer to RX queue structure.
345  */
346 static void
347 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
348 {
349         if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
350                 rxq_free_elts_mprq(rxq_ctrl);
351         else
352                 rxq_free_elts_sprq(rxq_ctrl);
353 }
354
355 /**
356  * Clean up a RX queue.
357  *
358  * Destroy objects, free allocated memory and reset the structure for reuse.
359  *
360  * @param rxq_ctrl
361  *   Pointer to RX queue structure.
362  */
363 void
364 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
365 {
366         DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
367                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
368         if (rxq_ctrl->ibv)
369                 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
370         memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
371 }
372
373 /**
374  * Returns the per-queue supported offloads.
375  *
376  * @param dev
377  *   Pointer to Ethernet device.
378  *
379  * @return
380  *   Supported Rx offloads.
381  */
382 uint64_t
383 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
384 {
385         struct priv *priv = dev->data->dev_private;
386         struct mlx5_dev_config *config = &priv->config;
387         uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
388                              DEV_RX_OFFLOAD_TIMESTAMP |
389                              DEV_RX_OFFLOAD_JUMBO_FRAME);
390
391         if (config->hw_fcs_strip)
392                 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
393
394         if (config->hw_csum)
395                 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
396                              DEV_RX_OFFLOAD_UDP_CKSUM |
397                              DEV_RX_OFFLOAD_TCP_CKSUM);
398         if (config->hw_vlan_strip)
399                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
400         return offloads;
401 }
402
403
404 /**
405  * Returns the per-port supported offloads.
406  *
407  * @return
408  *   Supported Rx offloads.
409  */
410 uint64_t
411 mlx5_get_rx_port_offloads(void)
412 {
413         uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
414
415         return offloads;
416 }
417
418 /**
419  *
420  * @param dev
421  *   Pointer to Ethernet device structure.
422  * @param idx
423  *   RX queue index.
424  * @param desc
425  *   Number of descriptors to configure in queue.
426  * @param socket
427  *   NUMA socket on which memory must be allocated.
428  * @param[in] conf
429  *   Thresholds parameters.
430  * @param mp
431  *   Memory pool for buffer allocations.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 int
437 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
438                     unsigned int socket, const struct rte_eth_rxconf *conf,
439                     struct rte_mempool *mp)
440 {
441         struct priv *priv = dev->data->dev_private;
442         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
443         struct mlx5_rxq_ctrl *rxq_ctrl =
444                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
445
446         if (!rte_is_power_of_2(desc)) {
447                 desc = 1 << log2above(desc);
448                 DRV_LOG(WARNING,
449                         "port %u increased number of descriptors in Rx queue %u"
450                         " to the next power of two (%d)",
451                         dev->data->port_id, idx, desc);
452         }
453         DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
454                 dev->data->port_id, idx, desc);
455         if (idx >= priv->rxqs_n) {
456                 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
457                         dev->data->port_id, idx, priv->rxqs_n);
458                 rte_errno = EOVERFLOW;
459                 return -rte_errno;
460         }
461         if (!mlx5_rxq_releasable(dev, idx)) {
462                 DRV_LOG(ERR, "port %u unable to release queue index %u",
463                         dev->data->port_id, idx);
464                 rte_errno = EBUSY;
465                 return -rte_errno;
466         }
467         mlx5_rxq_release(dev, idx);
468         rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
469         if (!rxq_ctrl) {
470                 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
471                         dev->data->port_id, idx);
472                 rte_errno = ENOMEM;
473                 return -rte_errno;
474         }
475         DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
476                 dev->data->port_id, idx);
477         (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
478         return 0;
479 }
480
481 /**
482  * DPDK callback to release a RX queue.
483  *
484  * @param dpdk_rxq
485  *   Generic RX queue pointer.
486  */
487 void
488 mlx5_rx_queue_release(void *dpdk_rxq)
489 {
490         struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
491         struct mlx5_rxq_ctrl *rxq_ctrl;
492         struct priv *priv;
493
494         if (rxq == NULL)
495                 return;
496         rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
497         priv = rxq_ctrl->priv;
498         if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
499                 rte_panic("port %u Rx queue %u is still used by a flow and"
500                           " cannot be removed\n",
501                           PORT_ID(priv), rxq_ctrl->idx);
502         mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
503 }
504
505 /**
506  * Allocate queue vector and fill epoll fd list for Rx interrupts.
507  *
508  * @param dev
509  *   Pointer to Ethernet device.
510  *
511  * @return
512  *   0 on success, a negative errno value otherwise and rte_errno is set.
513  */
514 int
515 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
516 {
517         struct priv *priv = dev->data->dev_private;
518         unsigned int i;
519         unsigned int rxqs_n = priv->rxqs_n;
520         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
521         unsigned int count = 0;
522         struct rte_intr_handle *intr_handle = dev->intr_handle;
523
524         if (!dev->data->dev_conf.intr_conf.rxq)
525                 return 0;
526         mlx5_rx_intr_vec_disable(dev);
527         intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
528         if (intr_handle->intr_vec == NULL) {
529                 DRV_LOG(ERR,
530                         "port %u failed to allocate memory for interrupt"
531                         " vector, Rx interrupts will not be supported",
532                         dev->data->port_id);
533                 rte_errno = ENOMEM;
534                 return -rte_errno;
535         }
536         intr_handle->type = RTE_INTR_HANDLE_EXT;
537         for (i = 0; i != n; ++i) {
538                 /* This rxq ibv must not be released in this function. */
539                 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
540                 int fd;
541                 int flags;
542                 int rc;
543
544                 /* Skip queues that cannot request interrupts. */
545                 if (!rxq_ibv || !rxq_ibv->channel) {
546                         /* Use invalid intr_vec[] index to disable entry. */
547                         intr_handle->intr_vec[i] =
548                                 RTE_INTR_VEC_RXTX_OFFSET +
549                                 RTE_MAX_RXTX_INTR_VEC_ID;
550                         continue;
551                 }
552                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
553                         DRV_LOG(ERR,
554                                 "port %u too many Rx queues for interrupt"
555                                 " vector size (%d), Rx interrupts cannot be"
556                                 " enabled",
557                                 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
558                         mlx5_rx_intr_vec_disable(dev);
559                         rte_errno = ENOMEM;
560                         return -rte_errno;
561                 }
562                 fd = rxq_ibv->channel->fd;
563                 flags = fcntl(fd, F_GETFL);
564                 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
565                 if (rc < 0) {
566                         rte_errno = errno;
567                         DRV_LOG(ERR,
568                                 "port %u failed to make Rx interrupt file"
569                                 " descriptor %d non-blocking for queue index"
570                                 " %d",
571                                 dev->data->port_id, fd, i);
572                         mlx5_rx_intr_vec_disable(dev);
573                         return -rte_errno;
574                 }
575                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
576                 intr_handle->efds[count] = fd;
577                 count++;
578         }
579         if (!count)
580                 mlx5_rx_intr_vec_disable(dev);
581         else
582                 intr_handle->nb_efd = count;
583         return 0;
584 }
585
586 /**
587  * Clean up Rx interrupts handler.
588  *
589  * @param dev
590  *   Pointer to Ethernet device.
591  */
592 void
593 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
594 {
595         struct priv *priv = dev->data->dev_private;
596         struct rte_intr_handle *intr_handle = dev->intr_handle;
597         unsigned int i;
598         unsigned int rxqs_n = priv->rxqs_n;
599         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
600
601         if (!dev->data->dev_conf.intr_conf.rxq)
602                 return;
603         if (!intr_handle->intr_vec)
604                 goto free;
605         for (i = 0; i != n; ++i) {
606                 struct mlx5_rxq_ctrl *rxq_ctrl;
607                 struct mlx5_rxq_data *rxq_data;
608
609                 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
610                     RTE_MAX_RXTX_INTR_VEC_ID)
611                         continue;
612                 /**
613                  * Need to access directly the queue to release the reference
614                  * kept in priv_rx_intr_vec_enable().
615                  */
616                 rxq_data = (*priv->rxqs)[i];
617                 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
618                 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
619         }
620 free:
621         rte_intr_free_epoll_fd(intr_handle);
622         if (intr_handle->intr_vec)
623                 free(intr_handle->intr_vec);
624         intr_handle->nb_efd = 0;
625         intr_handle->intr_vec = NULL;
626 }
627
628 /**
629  *  MLX5 CQ notification .
630  *
631  *  @param rxq
632  *     Pointer to receive queue structure.
633  *  @param sq_n_rxq
634  *     Sequence number per receive queue .
635  */
636 static inline void
637 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
638 {
639         int sq_n = 0;
640         uint32_t doorbell_hi;
641         uint64_t doorbell;
642         void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
643
644         sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
645         doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
646         doorbell = (uint64_t)doorbell_hi << 32;
647         doorbell |=  rxq->cqn;
648         rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
649         mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
650                          cq_db_reg, rxq->uar_lock_cq);
651 }
652
653 /**
654  * DPDK callback for Rx queue interrupt enable.
655  *
656  * @param dev
657  *   Pointer to Ethernet device structure.
658  * @param rx_queue_id
659  *   Rx queue number.
660  *
661  * @return
662  *   0 on success, a negative errno value otherwise and rte_errno is set.
663  */
664 int
665 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
666 {
667         struct priv *priv = dev->data->dev_private;
668         struct mlx5_rxq_data *rxq_data;
669         struct mlx5_rxq_ctrl *rxq_ctrl;
670
671         rxq_data = (*priv->rxqs)[rx_queue_id];
672         if (!rxq_data) {
673                 rte_errno = EINVAL;
674                 return -rte_errno;
675         }
676         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
677         if (rxq_ctrl->irq) {
678                 struct mlx5_rxq_ibv *rxq_ibv;
679
680                 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
681                 if (!rxq_ibv) {
682                         rte_errno = EINVAL;
683                         return -rte_errno;
684                 }
685                 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
686                 mlx5_rxq_ibv_release(rxq_ibv);
687         }
688         return 0;
689 }
690
691 /**
692  * DPDK callback for Rx queue interrupt disable.
693  *
694  * @param dev
695  *   Pointer to Ethernet device structure.
696  * @param rx_queue_id
697  *   Rx queue number.
698  *
699  * @return
700  *   0 on success, a negative errno value otherwise and rte_errno is set.
701  */
702 int
703 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
704 {
705         struct priv *priv = dev->data->dev_private;
706         struct mlx5_rxq_data *rxq_data;
707         struct mlx5_rxq_ctrl *rxq_ctrl;
708         struct mlx5_rxq_ibv *rxq_ibv = NULL;
709         struct ibv_cq *ev_cq;
710         void *ev_ctx;
711         int ret;
712
713         rxq_data = (*priv->rxqs)[rx_queue_id];
714         if (!rxq_data) {
715                 rte_errno = EINVAL;
716                 return -rte_errno;
717         }
718         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
719         if (!rxq_ctrl->irq)
720                 return 0;
721         rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
722         if (!rxq_ibv) {
723                 rte_errno = EINVAL;
724                 return -rte_errno;
725         }
726         ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
727         if (ret || ev_cq != rxq_ibv->cq) {
728                 rte_errno = EINVAL;
729                 goto exit;
730         }
731         rxq_data->cq_arm_sn++;
732         mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
733         return 0;
734 exit:
735         ret = rte_errno; /* Save rte_errno before cleanup. */
736         if (rxq_ibv)
737                 mlx5_rxq_ibv_release(rxq_ibv);
738         DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
739                 dev->data->port_id, rx_queue_id);
740         rte_errno = ret; /* Restore rte_errno. */
741         return -rte_errno;
742 }
743
744 /**
745  * Create the Rx queue Verbs object.
746  *
747  * @param dev
748  *   Pointer to Ethernet device.
749  * @param idx
750  *   Queue index in DPDK Rx queue array
751  *
752  * @return
753  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
754  */
755 struct mlx5_rxq_ibv *
756 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
757 {
758         struct priv *priv = dev->data->dev_private;
759         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
760         struct mlx5_rxq_ctrl *rxq_ctrl =
761                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
762         struct ibv_wq_attr mod;
763         union {
764                 struct {
765                         struct ibv_cq_init_attr_ex ibv;
766                         struct mlx5dv_cq_init_attr mlx5;
767                 } cq;
768                 struct {
769                         struct ibv_wq_init_attr ibv;
770 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
771                         struct mlx5dv_wq_init_attr mlx5;
772 #endif
773                 } wq;
774                 struct ibv_cq_ex cq_attr;
775         } attr;
776         unsigned int cqe_n;
777         unsigned int wqe_n = 1 << rxq_data->elts_n;
778         struct mlx5_rxq_ibv *tmpl;
779         struct mlx5dv_cq cq_info;
780         struct mlx5dv_rwq rwq;
781         unsigned int i;
782         int ret = 0;
783         struct mlx5dv_obj obj;
784         struct mlx5_dev_config *config = &priv->config;
785         const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
786
787         assert(rxq_data);
788         assert(!rxq_ctrl->ibv);
789         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
790         priv->verbs_alloc_ctx.obj = rxq_ctrl;
791         tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
792                                  rxq_ctrl->socket);
793         if (!tmpl) {
794                 DRV_LOG(ERR,
795                         "port %u Rx queue %u cannot allocate verbs resources",
796                         dev->data->port_id, rxq_ctrl->idx);
797                 rte_errno = ENOMEM;
798                 goto error;
799         }
800         tmpl->rxq_ctrl = rxq_ctrl;
801         if (rxq_ctrl->irq) {
802                 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
803                 if (!tmpl->channel) {
804                         DRV_LOG(ERR, "port %u: comp channel creation failure",
805                                 dev->data->port_id);
806                         rte_errno = ENOMEM;
807                         goto error;
808                 }
809         }
810         if (mprq_en)
811                 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
812         else
813                 cqe_n = wqe_n  - 1;
814         attr.cq.ibv = (struct ibv_cq_init_attr_ex){
815                 .cqe = cqe_n,
816                 .channel = tmpl->channel,
817                 .comp_mask = 0,
818         };
819         attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
820                 .comp_mask = 0,
821         };
822         if (config->cqe_comp && !rxq_data->hw_timestamp) {
823                 attr.cq.mlx5.comp_mask |=
824                         MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
825 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
826                 attr.cq.mlx5.cqe_comp_res_format =
827                         mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
828                                   MLX5DV_CQE_RES_FORMAT_HASH;
829 #else
830                 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
831 #endif
832                 /*
833                  * For vectorized Rx, it must not be doubled in order to
834                  * make cq_ci and rq_ci aligned.
835                  */
836                 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
837                         attr.cq.ibv.cqe *= 2;
838         } else if (config->cqe_comp && rxq_data->hw_timestamp) {
839                 DRV_LOG(DEBUG,
840                         "port %u Rx CQE compression is disabled for HW"
841                         " timestamp",
842                         dev->data->port_id);
843         }
844         tmpl->cq = mlx5_glue->cq_ex_to_cq
845                 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
846                                          &attr.cq.mlx5));
847         if (tmpl->cq == NULL) {
848                 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
849                         dev->data->port_id, idx);
850                 rte_errno = ENOMEM;
851                 goto error;
852         }
853         DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
854                 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
855         DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
856                 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
857         attr.wq.ibv = (struct ibv_wq_init_attr){
858                 .wq_context = NULL, /* Could be useful in the future. */
859                 .wq_type = IBV_WQT_RQ,
860                 /* Max number of outstanding WRs. */
861                 .max_wr = wqe_n >> rxq_data->sges_n,
862                 /* Max number of scatter/gather elements in a WR. */
863                 .max_sge = 1 << rxq_data->sges_n,
864                 .pd = priv->pd,
865                 .cq = tmpl->cq,
866                 .comp_mask =
867                         IBV_WQ_FLAGS_CVLAN_STRIPPING |
868                         0,
869                 .create_flags = (rxq_data->vlan_strip ?
870                                  IBV_WQ_FLAGS_CVLAN_STRIPPING :
871                                  0),
872         };
873         /* By default, FCS (CRC) is stripped by hardware. */
874         if (rxq_data->crc_present) {
875                 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
876                 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
877         }
878 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
879         if (config->hw_padding) {
880                 attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
881                 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
882         }
883 #endif
884 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
885         attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
886                 .comp_mask = 0,
887         };
888         if (mprq_en) {
889                 struct mlx5dv_striding_rq_init_attr *mprq_attr =
890                         &attr.wq.mlx5.striding_rq_attrs;
891
892                 attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
893                 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
894                         .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
895                         .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
896                         .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
897                 };
898         }
899         tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
900                                            &attr.wq.mlx5);
901 #else
902         tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
903 #endif
904         if (tmpl->wq == NULL) {
905                 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
906                         dev->data->port_id, idx);
907                 rte_errno = ENOMEM;
908                 goto error;
909         }
910         /*
911          * Make sure number of WRs*SGEs match expectations since a queue
912          * cannot allocate more than "desc" buffers.
913          */
914         if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
915             attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
916                 DRV_LOG(ERR,
917                         "port %u Rx queue %u requested %u*%u but got %u*%u"
918                         " WRs*SGEs",
919                         dev->data->port_id, idx,
920                         wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
921                         attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
922                 rte_errno = EINVAL;
923                 goto error;
924         }
925         /* Change queue state to ready. */
926         mod = (struct ibv_wq_attr){
927                 .attr_mask = IBV_WQ_ATTR_STATE,
928                 .wq_state = IBV_WQS_RDY,
929         };
930         ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
931         if (ret) {
932                 DRV_LOG(ERR,
933                         "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
934                         dev->data->port_id, idx);
935                 rte_errno = ret;
936                 goto error;
937         }
938         obj.cq.in = tmpl->cq;
939         obj.cq.out = &cq_info;
940         obj.rwq.in = tmpl->wq;
941         obj.rwq.out = &rwq;
942         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
943         if (ret) {
944                 rte_errno = ret;
945                 goto error;
946         }
947         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
948                 DRV_LOG(ERR,
949                         "port %u wrong MLX5_CQE_SIZE environment variable"
950                         " value: it should be set to %u",
951                         dev->data->port_id, RTE_CACHE_LINE_SIZE);
952                 rte_errno = EINVAL;
953                 goto error;
954         }
955         /* Fill the rings. */
956         rxq_data->wqes = rwq.buf;
957         for (i = 0; (i != wqe_n); ++i) {
958                 volatile struct mlx5_wqe_data_seg *scat;
959                 uintptr_t addr;
960                 uint32_t byte_count;
961
962                 if (mprq_en) {
963                         struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
964
965                         scat = &((volatile struct mlx5_wqe_mprq *)
966                                  rxq_data->wqes)[i].dseg;
967                         addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
968                         byte_count = (1 << rxq_data->strd_sz_n) *
969                                      (1 << rxq_data->strd_num_n);
970                 } else {
971                         struct rte_mbuf *buf = (*rxq_data->elts)[i];
972
973                         scat = &((volatile struct mlx5_wqe_data_seg *)
974                                  rxq_data->wqes)[i];
975                         addr = rte_pktmbuf_mtod(buf, uintptr_t);
976                         byte_count = DATA_LEN(buf);
977                 }
978                 /* scat->addr must be able to store a pointer. */
979                 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
980                 *scat = (struct mlx5_wqe_data_seg){
981                         .addr = rte_cpu_to_be_64(addr),
982                         .byte_count = rte_cpu_to_be_32(byte_count),
983                         .lkey = mlx5_rx_addr2mr(rxq_data, addr),
984                 };
985         }
986         rxq_data->rq_db = rwq.dbrec;
987         rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
988         rxq_data->cq_ci = 0;
989         rxq_data->consumed_strd = 0;
990         rxq_data->rq_pi = 0;
991         rxq_data->zip = (struct rxq_zip){
992                 .ai = 0,
993         };
994         rxq_data->cq_db = cq_info.dbrec;
995         rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
996         rxq_data->cq_uar = cq_info.cq_uar;
997         rxq_data->cqn = cq_info.cqn;
998         rxq_data->cq_arm_sn = 0;
999         /* Update doorbell counter. */
1000         rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
1001         rte_wmb();
1002         *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
1003         DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1004                 idx, (void *)&tmpl);
1005         rte_atomic32_inc(&tmpl->refcnt);
1006         LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
1007         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1008         return tmpl;
1009 error:
1010         ret = rte_errno; /* Save rte_errno before cleanup. */
1011         if (tmpl->wq)
1012                 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1013         if (tmpl->cq)
1014                 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1015         if (tmpl->channel)
1016                 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
1017         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1018         rte_errno = ret; /* Restore rte_errno. */
1019         return NULL;
1020 }
1021
1022 /**
1023  * Get an Rx queue Verbs object.
1024  *
1025  * @param dev
1026  *   Pointer to Ethernet device.
1027  * @param idx
1028  *   Queue index in DPDK Rx queue array
1029  *
1030  * @return
1031  *   The Verbs object if it exists.
1032  */
1033 struct mlx5_rxq_ibv *
1034 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
1035 {
1036         struct priv *priv = dev->data->dev_private;
1037         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1038         struct mlx5_rxq_ctrl *rxq_ctrl;
1039
1040         if (idx >= priv->rxqs_n)
1041                 return NULL;
1042         if (!rxq_data)
1043                 return NULL;
1044         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1045         if (rxq_ctrl->ibv) {
1046                 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
1047         }
1048         return rxq_ctrl->ibv;
1049 }
1050
1051 /**
1052  * Release an Rx verbs queue object.
1053  *
1054  * @param rxq_ibv
1055  *   Verbs Rx queue object.
1056  *
1057  * @return
1058  *   1 while a reference on it exists, 0 when freed.
1059  */
1060 int
1061 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
1062 {
1063         assert(rxq_ibv);
1064         assert(rxq_ibv->wq);
1065         assert(rxq_ibv->cq);
1066         if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
1067                 rxq_free_elts(rxq_ibv->rxq_ctrl);
1068                 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
1069                 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
1070                 if (rxq_ibv->channel)
1071                         claim_zero(mlx5_glue->destroy_comp_channel
1072                                    (rxq_ibv->channel));
1073                 LIST_REMOVE(rxq_ibv, next);
1074                 rte_free(rxq_ibv);
1075                 return 0;
1076         }
1077         return 1;
1078 }
1079
1080 /**
1081  * Verify the Verbs Rx queue list is empty
1082  *
1083  * @param dev
1084  *   Pointer to Ethernet device.
1085  *
1086  * @return
1087  *   The number of object not released.
1088  */
1089 int
1090 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
1091 {
1092         struct priv *priv = dev->data->dev_private;
1093         int ret = 0;
1094         struct mlx5_rxq_ibv *rxq_ibv;
1095
1096         LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
1097                 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
1098                         dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
1099                 ++ret;
1100         }
1101         return ret;
1102 }
1103
1104 /**
1105  * Return true if a single reference exists on the object.
1106  *
1107  * @param rxq_ibv
1108  *   Verbs Rx queue object.
1109  */
1110 int
1111 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
1112 {
1113         assert(rxq_ibv);
1114         return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
1115 }
1116
1117 /**
1118  * Callback function to initialize mbufs for Multi-Packet RQ.
1119  */
1120 static inline void
1121 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
1122                     void *_m, unsigned int i __rte_unused)
1123 {
1124         struct mlx5_mprq_buf *buf = _m;
1125
1126         memset(_m, 0, sizeof(*buf));
1127         buf->mp = mp;
1128         rte_atomic16_set(&buf->refcnt, 1);
1129 }
1130
1131 /**
1132  * Free mempool of Multi-Packet RQ.
1133  *
1134  * @param dev
1135  *   Pointer to Ethernet device.
1136  *
1137  * @return
1138  *   0 on success, negative errno value on failure.
1139  */
1140 int
1141 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1142 {
1143         struct priv *priv = dev->data->dev_private;
1144         struct rte_mempool *mp = priv->mprq_mp;
1145         unsigned int i;
1146
1147         if (mp == NULL)
1148                 return 0;
1149         DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1150                 dev->data->port_id, mp->name);
1151         /*
1152          * If a buffer in the pool has been externally attached to a mbuf and it
1153          * is still in use by application, destroying the Rx qeueue can spoil
1154          * the packet. It is unlikely to happen but if application dynamically
1155          * creates and destroys with holding Rx packets, this can happen.
1156          *
1157          * TODO: It is unavoidable for now because the mempool for Multi-Packet
1158          * RQ isn't provided by application but managed by PMD.
1159          */
1160         if (!rte_mempool_full(mp)) {
1161                 DRV_LOG(ERR,
1162                         "port %u mempool for Multi-Packet RQ is still in use",
1163                         dev->data->port_id);
1164                 rte_errno = EBUSY;
1165                 return -rte_errno;
1166         }
1167         rte_mempool_free(mp);
1168         /* Unset mempool for each Rx queue. */
1169         for (i = 0; i != priv->rxqs_n; ++i) {
1170                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1171
1172                 if (rxq == NULL)
1173                         continue;
1174                 rxq->mprq_mp = NULL;
1175         }
1176         return 0;
1177 }
1178
1179 /**
1180  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1181  * mempool. If already allocated, reuse it if there're enough elements.
1182  * Otherwise, resize it.
1183  *
1184  * @param dev
1185  *   Pointer to Ethernet device.
1186  *
1187  * @return
1188  *   0 on success, negative errno value on failure.
1189  */
1190 int
1191 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1192 {
1193         struct priv *priv = dev->data->dev_private;
1194         struct rte_mempool *mp = priv->mprq_mp;
1195         char name[RTE_MEMPOOL_NAMESIZE];
1196         unsigned int desc = 0;
1197         unsigned int buf_len;
1198         unsigned int obj_num;
1199         unsigned int obj_size;
1200         unsigned int strd_num_n = 0;
1201         unsigned int strd_sz_n = 0;
1202         unsigned int i;
1203
1204         if (!mlx5_mprq_enabled(dev))
1205                 return 0;
1206         /* Count the total number of descriptors configured. */
1207         for (i = 0; i != priv->rxqs_n; ++i) {
1208                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1209
1210                 if (rxq == NULL)
1211                         continue;
1212                 desc += 1 << rxq->elts_n;
1213                 /* Get the max number of strides. */
1214                 if (strd_num_n < rxq->strd_num_n)
1215                         strd_num_n = rxq->strd_num_n;
1216                 /* Get the max size of a stride. */
1217                 if (strd_sz_n < rxq->strd_sz_n)
1218                         strd_sz_n = rxq->strd_sz_n;
1219         }
1220         assert(strd_num_n && strd_sz_n);
1221         buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1222         obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
1223         /*
1224          * Received packets can be either memcpy'd or externally referenced. In
1225          * case that the packet is attached to an mbuf as an external buffer, as
1226          * it isn't possible to predict how the buffers will be queued by
1227          * application, there's no option to exactly pre-allocate needed buffers
1228          * in advance but to speculatively prepares enough buffers.
1229          *
1230          * In the data path, if this Mempool is depleted, PMD will try to memcpy
1231          * received packets to buffers provided by application (rxq->mp) until
1232          * this Mempool gets available again.
1233          */
1234         desc *= 4;
1235         obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1236         /*
1237          * rte_mempool_create_empty() has sanity check to refuse large cache
1238          * size compared to the number of elements.
1239          * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1240          * constant number 2 instead.
1241          */
1242         obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1243         /* Check a mempool is already allocated and if it can be resued. */
1244         if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1245                 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1246                         dev->data->port_id, mp->name);
1247                 /* Reuse. */
1248                 goto exit;
1249         } else if (mp != NULL) {
1250                 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1251                         dev->data->port_id, mp->name);
1252                 /*
1253                  * If failed to free, which means it may be still in use, no way
1254                  * but to keep using the existing one. On buffer underrun,
1255                  * packets will be memcpy'd instead of external buffer
1256                  * attachment.
1257                  */
1258                 if (mlx5_mprq_free_mp(dev)) {
1259                         if (mp->elt_size >= obj_size)
1260                                 goto exit;
1261                         else
1262                                 return -rte_errno;
1263                 }
1264         }
1265         snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
1266         mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1267                                 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
1268                                 dev->device->numa_node, 0);
1269         if (mp == NULL) {
1270                 DRV_LOG(ERR,
1271                         "port %u failed to allocate a mempool for"
1272                         " Multi-Packet RQ, count=%u, size=%u",
1273                         dev->data->port_id, obj_num, obj_size);
1274                 rte_errno = ENOMEM;
1275                 return -rte_errno;
1276         }
1277         priv->mprq_mp = mp;
1278 exit:
1279         /* Set mempool for each Rx queue. */
1280         for (i = 0; i != priv->rxqs_n; ++i) {
1281                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1282
1283                 if (rxq == NULL)
1284                         continue;
1285                 rxq->mprq_mp = mp;
1286         }
1287         DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1288                 dev->data->port_id);
1289         return 0;
1290 }
1291
1292 /**
1293  * Create a DPDK Rx queue.
1294  *
1295  * @param dev
1296  *   Pointer to Ethernet device.
1297  * @param idx
1298  *   RX queue index.
1299  * @param desc
1300  *   Number of descriptors to configure in queue.
1301  * @param socket
1302  *   NUMA socket on which memory must be allocated.
1303  *
1304  * @return
1305  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1306  */
1307 struct mlx5_rxq_ctrl *
1308 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1309              unsigned int socket, const struct rte_eth_rxconf *conf,
1310              struct rte_mempool *mp)
1311 {
1312         struct priv *priv = dev->data->dev_private;
1313         struct mlx5_rxq_ctrl *tmpl;
1314         unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1315         unsigned int mprq_stride_size;
1316         struct mlx5_dev_config *config = &priv->config;
1317         /*
1318          * Always allocate extra slots, even if eventually
1319          * the vector Rx will not be used.
1320          */
1321         uint16_t desc_n =
1322                 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1323         uint64_t offloads = conf->offloads |
1324                            dev->data->dev_conf.rxmode.offloads;
1325         const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1326
1327         tmpl = rte_calloc_socket("RXQ", 1,
1328                                  sizeof(*tmpl) +
1329                                  desc_n * sizeof(struct rte_mbuf *),
1330                                  0, socket);
1331         if (!tmpl) {
1332                 rte_errno = ENOMEM;
1333                 return NULL;
1334         }
1335         if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1336                                MLX5_MR_BTREE_CACHE_N, socket)) {
1337                 /* rte_errno is already set. */
1338                 goto error;
1339         }
1340         tmpl->socket = socket;
1341         if (dev->data->dev_conf.intr_conf.rxq)
1342                 tmpl->irq = 1;
1343         /*
1344          * This Rx queue can be configured as a Multi-Packet RQ if all of the
1345          * following conditions are met:
1346          *  - MPRQ is enabled.
1347          *  - The number of descs is more than the number of strides.
1348          *  - max_rx_pkt_len plus overhead is less than the max size of a
1349          *    stride.
1350          *  Otherwise, enable Rx scatter if necessary.
1351          */
1352         assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1353         mprq_stride_size =
1354                 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1355                 sizeof(struct rte_mbuf_ext_shared_info) +
1356                 RTE_PKTMBUF_HEADROOM;
1357         if (mprq_en &&
1358             desc > (1U << config->mprq.stride_num_n) &&
1359             mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1360                 /* TODO: Rx scatter isn't supported yet. */
1361                 tmpl->rxq.sges_n = 0;
1362                 /* Trim the number of descs needed. */
1363                 desc >>= config->mprq.stride_num_n;
1364                 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1365                 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1366                                               config->mprq.min_stride_size_n);
1367                 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1368                 tmpl->rxq.mprq_max_memcpy_len =
1369                         RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
1370                                 config->mprq.max_memcpy_len);
1371                 DRV_LOG(DEBUG,
1372                         "port %u Rx queue %u: Multi-Packet RQ is enabled"
1373                         " strd_num_n = %u, strd_sz_n = %u",
1374                         dev->data->port_id, idx,
1375                         tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1376         } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
1377                    (mb_len - RTE_PKTMBUF_HEADROOM)) {
1378                 tmpl->rxq.sges_n = 0;
1379         } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1380                 unsigned int size =
1381                         RTE_PKTMBUF_HEADROOM +
1382                         dev->data->dev_conf.rxmode.max_rx_pkt_len;
1383                 unsigned int sges_n;
1384
1385                 /*
1386                  * Determine the number of SGEs needed for a full packet
1387                  * and round it to the next power of two.
1388                  */
1389                 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1390                 tmpl->rxq.sges_n = sges_n;
1391                 /* Make sure rxq.sges_n did not overflow. */
1392                 size = mb_len * (1 << tmpl->rxq.sges_n);
1393                 size -= RTE_PKTMBUF_HEADROOM;
1394                 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1395                         DRV_LOG(ERR,
1396                                 "port %u too many SGEs (%u) needed to handle"
1397                                 " requested maximum packet size %u",
1398                                 dev->data->port_id,
1399                                 1 << sges_n,
1400                                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1401                         rte_errno = EOVERFLOW;
1402                         goto error;
1403                 }
1404         } else {
1405                 DRV_LOG(WARNING,
1406                         "port %u the requested maximum Rx packet size (%u) is"
1407                         " larger than a single mbuf (%u) and scattered mode has"
1408                         " not been requested",
1409                         dev->data->port_id,
1410                         dev->data->dev_conf.rxmode.max_rx_pkt_len,
1411                         mb_len - RTE_PKTMBUF_HEADROOM);
1412         }
1413         if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1414                 DRV_LOG(WARNING,
1415                         "port %u MPRQ is requested but cannot be enabled"
1416                         " (requested: desc = %u, stride_sz = %u,"
1417                         " supported: min_stride_num = %u, max_stride_sz = %u).",
1418                         dev->data->port_id, desc, mprq_stride_size,
1419                         (1 << config->mprq.stride_num_n),
1420                         (1 << config->mprq.max_stride_size_n));
1421         DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1422                 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1423         if (desc % (1 << tmpl->rxq.sges_n)) {
1424                 DRV_LOG(ERR,
1425                         "port %u number of Rx queue descriptors (%u) is not a"
1426                         " multiple of SGEs per packet (%u)",
1427                         dev->data->port_id,
1428                         desc,
1429                         1 << tmpl->rxq.sges_n);
1430                 rte_errno = EINVAL;
1431                 goto error;
1432         }
1433         /* Toggle RX checksum offload if hardware supports it. */
1434         tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1435         tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1436         /* Configure VLAN stripping. */
1437         tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1438         /* By default, FCS (CRC) is stripped by hardware. */
1439         tmpl->rxq.crc_present = 0;
1440         if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1441                 if (config->hw_fcs_strip) {
1442                         tmpl->rxq.crc_present = 1;
1443                 } else {
1444                         DRV_LOG(WARNING,
1445                                 "port %u CRC stripping has been disabled but will"
1446                                 " still be performed by hardware, make sure MLNX_OFED"
1447                                 " and firmware are up to date",
1448                                 dev->data->port_id);
1449                 }
1450         }
1451         DRV_LOG(DEBUG,
1452                 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1453                 " incoming frames to hide it",
1454                 dev->data->port_id,
1455                 tmpl->rxq.crc_present ? "disabled" : "enabled",
1456                 tmpl->rxq.crc_present << 2);
1457         /* Save port ID. */
1458         tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1459                 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1460         tmpl->rxq.port_id = dev->data->port_id;
1461         tmpl->priv = priv;
1462         tmpl->rxq.mp = mp;
1463         tmpl->rxq.stats.idx = idx;
1464         tmpl->rxq.elts_n = log2above(desc);
1465         tmpl->rxq.elts =
1466                 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1467 #ifndef RTE_ARCH_64
1468         tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1469 #endif
1470         tmpl->idx = idx;
1471         rte_atomic32_inc(&tmpl->refcnt);
1472         LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1473         return tmpl;
1474 error:
1475         rte_free(tmpl);
1476         return NULL;
1477 }
1478
1479 /**
1480  * Get a Rx queue.
1481  *
1482  * @param dev
1483  *   Pointer to Ethernet device.
1484  * @param idx
1485  *   TX queue index.
1486  *
1487  * @return
1488  *   A pointer to the queue if it exists, NULL otherwise.
1489  */
1490 struct mlx5_rxq_ctrl *
1491 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1492 {
1493         struct priv *priv = dev->data->dev_private;
1494         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1495
1496         if ((*priv->rxqs)[idx]) {
1497                 rxq_ctrl = container_of((*priv->rxqs)[idx],
1498                                         struct mlx5_rxq_ctrl,
1499                                         rxq);
1500                 mlx5_rxq_ibv_get(dev, idx);
1501                 rte_atomic32_inc(&rxq_ctrl->refcnt);
1502         }
1503         return rxq_ctrl;
1504 }
1505
1506 /**
1507  * Release a Rx queue.
1508  *
1509  * @param dev
1510  *   Pointer to Ethernet device.
1511  * @param idx
1512  *   TX queue index.
1513  *
1514  * @return
1515  *   1 while a reference on it exists, 0 when freed.
1516  */
1517 int
1518 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1519 {
1520         struct priv *priv = dev->data->dev_private;
1521         struct mlx5_rxq_ctrl *rxq_ctrl;
1522
1523         if (!(*priv->rxqs)[idx])
1524                 return 0;
1525         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1526         assert(rxq_ctrl->priv);
1527         if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1528                 rxq_ctrl->ibv = NULL;
1529         if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1530                 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1531                 LIST_REMOVE(rxq_ctrl, next);
1532                 rte_free(rxq_ctrl);
1533                 (*priv->rxqs)[idx] = NULL;
1534                 return 0;
1535         }
1536         return 1;
1537 }
1538
1539 /**
1540  * Verify if the queue can be released.
1541  *
1542  * @param dev
1543  *   Pointer to Ethernet device.
1544  * @param idx
1545  *   TX queue index.
1546  *
1547  * @return
1548  *   1 if the queue can be released, negative errno otherwise and rte_errno is
1549  *   set.
1550  */
1551 int
1552 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1553 {
1554         struct priv *priv = dev->data->dev_private;
1555         struct mlx5_rxq_ctrl *rxq_ctrl;
1556
1557         if (!(*priv->rxqs)[idx]) {
1558                 rte_errno = EINVAL;
1559                 return -rte_errno;
1560         }
1561         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1562         return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1563 }
1564
1565 /**
1566  * Verify the Rx Queue list is empty
1567  *
1568  * @param dev
1569  *   Pointer to Ethernet device.
1570  *
1571  * @return
1572  *   The number of object not released.
1573  */
1574 int
1575 mlx5_rxq_verify(struct rte_eth_dev *dev)
1576 {
1577         struct priv *priv = dev->data->dev_private;
1578         struct mlx5_rxq_ctrl *rxq_ctrl;
1579         int ret = 0;
1580
1581         LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1582                 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1583                         dev->data->port_id, rxq_ctrl->idx);
1584                 ++ret;
1585         }
1586         return ret;
1587 }
1588
1589 /**
1590  * Create an indirection table.
1591  *
1592  * @param dev
1593  *   Pointer to Ethernet device.
1594  * @param queues
1595  *   Queues entering in the indirection table.
1596  * @param queues_n
1597  *   Number of queues in the array.
1598  *
1599  * @return
1600  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1601  */
1602 struct mlx5_ind_table_ibv *
1603 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1604                        uint32_t queues_n)
1605 {
1606         struct priv *priv = dev->data->dev_private;
1607         struct mlx5_ind_table_ibv *ind_tbl;
1608         const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1609                 log2above(queues_n) :
1610                 log2above(priv->config.ind_table_max_size);
1611         struct ibv_wq *wq[1 << wq_n];
1612         unsigned int i;
1613         unsigned int j;
1614
1615         ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1616                              queues_n * sizeof(uint16_t), 0);
1617         if (!ind_tbl) {
1618                 rte_errno = ENOMEM;
1619                 return NULL;
1620         }
1621         for (i = 0; i != queues_n; ++i) {
1622                 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1623
1624                 if (!rxq)
1625                         goto error;
1626                 wq[i] = rxq->ibv->wq;
1627                 ind_tbl->queues[i] = queues[i];
1628         }
1629         ind_tbl->queues_n = queues_n;
1630         /* Finalise indirection table. */
1631         for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1632                 wq[i] = wq[j];
1633         ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1634                 (priv->ctx,
1635                  &(struct ibv_rwq_ind_table_init_attr){
1636                         .log_ind_tbl_size = wq_n,
1637                         .ind_tbl = wq,
1638                         .comp_mask = 0,
1639                  });
1640         if (!ind_tbl->ind_table) {
1641                 rte_errno = errno;
1642                 goto error;
1643         }
1644         rte_atomic32_inc(&ind_tbl->refcnt);
1645         LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1646         return ind_tbl;
1647 error:
1648         rte_free(ind_tbl);
1649         DEBUG("port %u cannot create indirection table", dev->data->port_id);
1650         return NULL;
1651 }
1652
1653 /**
1654  * Get an indirection table.
1655  *
1656  * @param dev
1657  *   Pointer to Ethernet device.
1658  * @param queues
1659  *   Queues entering in the indirection table.
1660  * @param queues_n
1661  *   Number of queues in the array.
1662  *
1663  * @return
1664  *   An indirection table if found.
1665  */
1666 struct mlx5_ind_table_ibv *
1667 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1668                        uint32_t queues_n)
1669 {
1670         struct priv *priv = dev->data->dev_private;
1671         struct mlx5_ind_table_ibv *ind_tbl;
1672
1673         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1674                 if ((ind_tbl->queues_n == queues_n) &&
1675                     (memcmp(ind_tbl->queues, queues,
1676                             ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1677                      == 0))
1678                         break;
1679         }
1680         if (ind_tbl) {
1681                 unsigned int i;
1682
1683                 rte_atomic32_inc(&ind_tbl->refcnt);
1684                 for (i = 0; i != ind_tbl->queues_n; ++i)
1685                         mlx5_rxq_get(dev, ind_tbl->queues[i]);
1686         }
1687         return ind_tbl;
1688 }
1689
1690 /**
1691  * Release an indirection table.
1692  *
1693  * @param dev
1694  *   Pointer to Ethernet device.
1695  * @param ind_table
1696  *   Indirection table to release.
1697  *
1698  * @return
1699  *   1 while a reference on it exists, 0 when freed.
1700  */
1701 int
1702 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1703                            struct mlx5_ind_table_ibv *ind_tbl)
1704 {
1705         unsigned int i;
1706
1707         if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1708                 claim_zero(mlx5_glue->destroy_rwq_ind_table
1709                            (ind_tbl->ind_table));
1710         for (i = 0; i != ind_tbl->queues_n; ++i)
1711                 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1712         if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1713                 LIST_REMOVE(ind_tbl, next);
1714                 rte_free(ind_tbl);
1715                 return 0;
1716         }
1717         return 1;
1718 }
1719
1720 /**
1721  * Verify the Rx Queue list is empty
1722  *
1723  * @param dev
1724  *   Pointer to Ethernet device.
1725  *
1726  * @return
1727  *   The number of object not released.
1728  */
1729 int
1730 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1731 {
1732         struct priv *priv = dev->data->dev_private;
1733         struct mlx5_ind_table_ibv *ind_tbl;
1734         int ret = 0;
1735
1736         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1737                 DRV_LOG(DEBUG,
1738                         "port %u Verbs indirection table %p still referenced",
1739                         dev->data->port_id, (void *)ind_tbl);
1740                 ++ret;
1741         }
1742         return ret;
1743 }
1744
1745 /**
1746  * Create an Rx Hash queue.
1747  *
1748  * @param dev
1749  *   Pointer to Ethernet device.
1750  * @param rss_key
1751  *   RSS key for the Rx hash queue.
1752  * @param rss_key_len
1753  *   RSS key length.
1754  * @param hash_fields
1755  *   Verbs protocol hash field to make the RSS on.
1756  * @param queues
1757  *   Queues entering in hash queue. In case of empty hash_fields only the
1758  *   first queue index will be taken for the indirection table.
1759  * @param queues_n
1760  *   Number of queues.
1761  *
1762  * @return
1763  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1764  */
1765 struct mlx5_hrxq *
1766 mlx5_hrxq_new(struct rte_eth_dev *dev,
1767               const uint8_t *rss_key, uint32_t rss_key_len,
1768               uint64_t hash_fields,
1769               const uint16_t *queues, uint32_t queues_n,
1770               int tunnel __rte_unused)
1771 {
1772         struct priv *priv = dev->data->dev_private;
1773         struct mlx5_hrxq *hrxq;
1774         struct mlx5_ind_table_ibv *ind_tbl;
1775         struct ibv_qp *qp;
1776         int err;
1777
1778         queues_n = hash_fields ? queues_n : 1;
1779         ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1780         if (!ind_tbl)
1781                 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1782         if (!ind_tbl) {
1783                 rte_errno = ENOMEM;
1784                 return NULL;
1785         }
1786         if (!rss_key_len) {
1787                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
1788                 rss_key = rss_hash_default_key;
1789         }
1790 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1791         qp = mlx5_glue->dv_create_qp
1792                 (priv->ctx,
1793                  &(struct ibv_qp_init_attr_ex){
1794                         .qp_type = IBV_QPT_RAW_PACKET,
1795                         .comp_mask =
1796                                 IBV_QP_INIT_ATTR_PD |
1797                                 IBV_QP_INIT_ATTR_IND_TABLE |
1798                                 IBV_QP_INIT_ATTR_RX_HASH,
1799                         .rx_hash_conf = (struct ibv_rx_hash_conf){
1800                                 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1801                                 .rx_hash_key_len = rss_key_len ? rss_key_len :
1802                                                    MLX5_RSS_HASH_KEY_LEN,
1803                                 .rx_hash_key = rss_key ?
1804                                                (void *)(uintptr_t)rss_key :
1805                                                rss_hash_default_key,
1806                                 .rx_hash_fields_mask = hash_fields,
1807                         },
1808                         .rwq_ind_tbl = ind_tbl->ind_table,
1809                         .pd = priv->pd,
1810                  },
1811                  &(struct mlx5dv_qp_init_attr){
1812                         .comp_mask = tunnel ?
1813                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0,
1814                         .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
1815                  });
1816 #else
1817         qp = mlx5_glue->create_qp_ex
1818                 (priv->ctx,
1819                  &(struct ibv_qp_init_attr_ex){
1820                         .qp_type = IBV_QPT_RAW_PACKET,
1821                         .comp_mask =
1822                                 IBV_QP_INIT_ATTR_PD |
1823                                 IBV_QP_INIT_ATTR_IND_TABLE |
1824                                 IBV_QP_INIT_ATTR_RX_HASH,
1825                         .rx_hash_conf = (struct ibv_rx_hash_conf){
1826                                 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1827                                 .rx_hash_key_len = rss_key_len ? rss_key_len :
1828                                                    MLX5_RSS_HASH_KEY_LEN,
1829                                 .rx_hash_key = rss_key ?
1830                                                (void *)(uintptr_t)rss_key :
1831                                                rss_hash_default_key,
1832                                 .rx_hash_fields_mask = hash_fields,
1833                         },
1834                         .rwq_ind_tbl = ind_tbl->ind_table,
1835                         .pd = priv->pd,
1836                  });
1837 #endif
1838         if (!qp) {
1839                 rte_errno = errno;
1840                 goto error;
1841         }
1842         hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1843         if (!hrxq)
1844                 goto error;
1845         hrxq->ind_table = ind_tbl;
1846         hrxq->qp = qp;
1847         hrxq->rss_key_len = rss_key_len;
1848         hrxq->hash_fields = hash_fields;
1849         memcpy(hrxq->rss_key, rss_key, rss_key_len);
1850         rte_atomic32_inc(&hrxq->refcnt);
1851         LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1852         return hrxq;
1853 error:
1854         err = rte_errno; /* Save rte_errno before cleanup. */
1855         mlx5_ind_table_ibv_release(dev, ind_tbl);
1856         if (qp)
1857                 claim_zero(mlx5_glue->destroy_qp(qp));
1858         rte_errno = err; /* Restore rte_errno. */
1859         return NULL;
1860 }
1861
1862 /**
1863  * Get an Rx Hash queue.
1864  *
1865  * @param dev
1866  *   Pointer to Ethernet device.
1867  * @param rss_conf
1868  *   RSS configuration for the Rx hash queue.
1869  * @param queues
1870  *   Queues entering in hash queue. In case of empty hash_fields only the
1871  *   first queue index will be taken for the indirection table.
1872  * @param queues_n
1873  *   Number of queues.
1874  *
1875  * @return
1876  *   An hash Rx queue on success.
1877  */
1878 struct mlx5_hrxq *
1879 mlx5_hrxq_get(struct rte_eth_dev *dev,
1880               const uint8_t *rss_key, uint32_t rss_key_len,
1881               uint64_t hash_fields,
1882               const uint16_t *queues, uint32_t queues_n)
1883 {
1884         struct priv *priv = dev->data->dev_private;
1885         struct mlx5_hrxq *hrxq;
1886
1887         queues_n = hash_fields ? queues_n : 1;
1888         LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1889                 struct mlx5_ind_table_ibv *ind_tbl;
1890
1891                 if (hrxq->rss_key_len != rss_key_len)
1892                         continue;
1893                 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1894                         continue;
1895                 if (hrxq->hash_fields != hash_fields)
1896                         continue;
1897                 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1898                 if (!ind_tbl)
1899                         continue;
1900                 if (ind_tbl != hrxq->ind_table) {
1901                         mlx5_ind_table_ibv_release(dev, ind_tbl);
1902                         continue;
1903                 }
1904                 rte_atomic32_inc(&hrxq->refcnt);
1905                 return hrxq;
1906         }
1907         return NULL;
1908 }
1909
1910 /**
1911  * Release the hash Rx queue.
1912  *
1913  * @param dev
1914  *   Pointer to Ethernet device.
1915  * @param hrxq
1916  *   Pointer to Hash Rx queue to release.
1917  *
1918  * @return
1919  *   1 while a reference on it exists, 0 when freed.
1920  */
1921 int
1922 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1923 {
1924         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1925                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1926                 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1927                 LIST_REMOVE(hrxq, next);
1928                 rte_free(hrxq);
1929                 return 0;
1930         }
1931         claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1932         return 1;
1933 }
1934
1935 /**
1936  * Verify the Rx Queue list is empty
1937  *
1938  * @param dev
1939  *   Pointer to Ethernet device.
1940  *
1941  * @return
1942  *   The number of object not released.
1943  */
1944 int
1945 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1946 {
1947         struct priv *priv = dev->data->dev_private;
1948         struct mlx5_hrxq *hrxq;
1949         int ret = 0;
1950
1951         LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1952                 DRV_LOG(DEBUG,
1953                         "port %u Verbs hash Rx queue %p still referenced",
1954                         dev->data->port_id, (void *)hrxq);
1955                 ++ret;
1956         }
1957         return ret;
1958 }
1959
1960 /**
1961  * Create a drop Rx queue Verbs object.
1962  *
1963  * @param dev
1964  *   Pointer to Ethernet device.
1965  *
1966  * @return
1967  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1968  */
1969 struct mlx5_rxq_ibv *
1970 mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
1971 {
1972         struct priv *priv = dev->data->dev_private;
1973         struct ibv_cq *cq;
1974         struct ibv_wq *wq = NULL;
1975         struct mlx5_rxq_ibv *rxq;
1976
1977         if (priv->drop_queue.rxq)
1978                 return priv->drop_queue.rxq;
1979         cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
1980         if (!cq) {
1981                 DEBUG("port %u cannot allocate CQ for drop queue",
1982                       dev->data->port_id);
1983                 rte_errno = errno;
1984                 goto error;
1985         }
1986         wq = mlx5_glue->create_wq(priv->ctx,
1987                  &(struct ibv_wq_init_attr){
1988                         .wq_type = IBV_WQT_RQ,
1989                         .max_wr = 1,
1990                         .max_sge = 1,
1991                         .pd = priv->pd,
1992                         .cq = cq,
1993                  });
1994         if (!wq) {
1995                 DEBUG("port %u cannot allocate WQ for drop queue",
1996                       dev->data->port_id);
1997                 rte_errno = errno;
1998                 goto error;
1999         }
2000         rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2001         if (!rxq) {
2002                 DEBUG("port %u cannot allocate drop Rx queue memory",
2003                       dev->data->port_id);
2004                 rte_errno = ENOMEM;
2005                 goto error;
2006         }
2007         rxq->cq = cq;
2008         rxq->wq = wq;
2009         priv->drop_queue.rxq = rxq;
2010         return rxq;
2011 error:
2012         if (wq)
2013                 claim_zero(mlx5_glue->destroy_wq(wq));
2014         if (cq)
2015                 claim_zero(mlx5_glue->destroy_cq(cq));
2016         return NULL;
2017 }
2018
2019 /**
2020  * Release a drop Rx queue Verbs object.
2021  *
2022  * @param dev
2023  *   Pointer to Ethernet device.
2024  *
2025  * @return
2026  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
2027  */
2028 void
2029 mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
2030 {
2031         struct priv *priv = dev->data->dev_private;
2032         struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
2033
2034         if (rxq->wq)
2035                 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2036         if (rxq->cq)
2037                 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2038         rte_free(rxq);
2039         priv->drop_queue.rxq = NULL;
2040 }
2041
2042 /**
2043  * Create a drop indirection table.
2044  *
2045  * @param dev
2046  *   Pointer to Ethernet device.
2047  *
2048  * @return
2049  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
2050  */
2051 struct mlx5_ind_table_ibv *
2052 mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
2053 {
2054         struct priv *priv = dev->data->dev_private;
2055         struct mlx5_ind_table_ibv *ind_tbl;
2056         struct mlx5_rxq_ibv *rxq;
2057         struct mlx5_ind_table_ibv tmpl;
2058
2059         rxq = mlx5_rxq_ibv_drop_new(dev);
2060         if (!rxq)
2061                 return NULL;
2062         tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2063                 (priv->ctx,
2064                  &(struct ibv_rwq_ind_table_init_attr){
2065                         .log_ind_tbl_size = 0,
2066                         .ind_tbl = &rxq->wq,
2067                         .comp_mask = 0,
2068                  });
2069         if (!tmpl.ind_table) {
2070                 DEBUG("port %u cannot allocate indirection table for drop"
2071                       " queue",
2072                       dev->data->port_id);
2073                 rte_errno = errno;
2074                 goto error;
2075         }
2076         ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2077         if (!ind_tbl) {
2078                 rte_errno = ENOMEM;
2079                 goto error;
2080         }
2081         ind_tbl->ind_table = tmpl.ind_table;
2082         return ind_tbl;
2083 error:
2084         mlx5_rxq_ibv_drop_release(dev);
2085         return NULL;
2086 }
2087
2088 /**
2089  * Release a drop indirection table.
2090  *
2091  * @param dev
2092  *   Pointer to Ethernet device.
2093  */
2094 void
2095 mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
2096 {
2097         struct priv *priv = dev->data->dev_private;
2098         struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
2099
2100         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2101         mlx5_rxq_ibv_drop_release(dev);
2102         rte_free(ind_tbl);
2103         priv->drop_queue.hrxq->ind_table = NULL;
2104 }
2105
2106 /**
2107  * Create a drop Rx Hash queue.
2108  *
2109  * @param dev
2110  *   Pointer to Ethernet device.
2111  *
2112  * @return
2113  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
2114  */
2115 struct mlx5_hrxq *
2116 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2117 {
2118         struct priv *priv = dev->data->dev_private;
2119         struct mlx5_ind_table_ibv *ind_tbl;
2120         struct ibv_qp *qp;
2121         struct mlx5_hrxq *hrxq;
2122
2123         if (priv->drop_queue.hrxq) {
2124                 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2125                 return priv->drop_queue.hrxq;
2126         }
2127         ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
2128         if (!ind_tbl)
2129                 return NULL;
2130         qp = mlx5_glue->create_qp_ex(priv->ctx,
2131                  &(struct ibv_qp_init_attr_ex){
2132                         .qp_type = IBV_QPT_RAW_PACKET,
2133                         .comp_mask =
2134                                 IBV_QP_INIT_ATTR_PD |
2135                                 IBV_QP_INIT_ATTR_IND_TABLE |
2136                                 IBV_QP_INIT_ATTR_RX_HASH,
2137                         .rx_hash_conf = (struct ibv_rx_hash_conf){
2138                                 .rx_hash_function =
2139                                         IBV_RX_HASH_FUNC_TOEPLITZ,
2140                                 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2141                                 .rx_hash_key = rss_hash_default_key,
2142                                 .rx_hash_fields_mask = 0,
2143                                 },
2144                         .rwq_ind_tbl = ind_tbl->ind_table,
2145                         .pd = priv->pd
2146                  });
2147         if (!qp) {
2148                 DEBUG("port %u cannot allocate QP for drop queue",
2149                       dev->data->port_id);
2150                 rte_errno = errno;
2151                 goto error;
2152         }
2153         hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2154         if (!hrxq) {
2155                 DRV_LOG(WARNING,
2156                         "port %u cannot allocate memory for drop queue",
2157                         dev->data->port_id);
2158                 rte_errno = ENOMEM;
2159                 goto error;
2160         }
2161         hrxq->ind_table = ind_tbl;
2162         hrxq->qp = qp;
2163         priv->drop_queue.hrxq = hrxq;
2164         rte_atomic32_set(&hrxq->refcnt, 1);
2165         return hrxq;
2166 error:
2167         if (ind_tbl)
2168                 mlx5_ind_table_ibv_drop_release(dev);
2169         return NULL;
2170 }
2171
2172 /**
2173  * Release a drop hash Rx queue.
2174  *
2175  * @param dev
2176  *   Pointer to Ethernet device.
2177  */
2178 void
2179 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2180 {
2181         struct priv *priv = dev->data->dev_private;
2182         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2183
2184         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2185                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2186                 mlx5_ind_table_ibv_drop_release(dev);
2187                 rte_free(hrxq);
2188                 priv->drop_queue.hrxq = NULL;
2189         }
2190 }