4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_H_
35 #define RTE_PMD_MLX5_RXTX_H_
39 #include <sys/queue.h>
42 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
44 #pragma GCC diagnostic ignored "-Wpedantic"
46 #include <infiniband/verbs.h>
47 #include <infiniband/mlx5dv.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_mempool.h>
54 #include <rte_common.h>
55 #include <rte_hexdump.h>
56 #include <rte_atomic.h>
58 #include "mlx5_utils.h"
60 #include "mlx5_autoconf.h"
61 #include "mlx5_defs.h"
64 struct mlx5_rxq_stats {
65 unsigned int idx; /**< Mapping index. */
66 #ifdef MLX5_PMD_SOFT_COUNTERS
67 uint64_t ipackets; /**< Total of successfully received packets. */
68 uint64_t ibytes; /**< Total of successfully received bytes. */
70 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
71 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
74 struct mlx5_txq_stats {
75 unsigned int idx; /**< Mapping index. */
76 #ifdef MLX5_PMD_SOFT_COUNTERS
77 uint64_t opackets; /**< Total of successfully sent packets. */
78 uint64_t obytes; /**< Total of successfully sent bytes. */
80 uint64_t oerrors; /**< Total number of failed transmitted packets. */
85 /* Memory Region object. */
87 const struct rte_memseg *memseg;
88 struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
91 /* Cache entry for Memory Region. */
92 struct mlx5_mr_cache {
93 uintptr_t start; /* Start address of MR. */
94 uintptr_t end; /* End address of MR. */
95 uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
98 /* Per-queue MR control descriptor. */
100 uint16_t bh_n; /* Size of MR cache table for bottom-half. */
101 uint16_t mru; /* Index of last hit entry. */
102 uint16_t head; /* Index of the oldest entry. */
103 struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* MR cache. */
104 struct mlx5_mr_cache (*cache_bh)[]; /* MR cache for bottom-half. */
107 /* MR table size including padding at index 0. */
108 #define MR_TABLE_SZ(n) ((n) + MLX5_MR_LOOKUP_TABLE_PAD)
110 /* Actual table size excluding padding at index 0. */
111 #define MR_N(n) ((n) - MLX5_MR_LOOKUP_TABLE_PAD)
113 /* Whether there's only one entry in MR lookup table. */
114 #define IS_SINGLE_MR(n) (MR_N(n) <= 1)
116 /* Compressed CQE context. */
118 uint16_t ai; /* Array index. */
119 uint16_t ca; /* Current array index. */
120 uint16_t na; /* Next array index. */
121 uint16_t cq_ci; /* The next CQE. */
122 uint32_t cqe_cnt; /* Number of CQEs. */
125 /* RX queue descriptor. */
126 struct mlx5_rxq_data {
127 unsigned int csum:1; /* Enable checksum offloading. */
128 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
129 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
130 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
131 unsigned int crc_present:1; /* CRC must be subtracted. */
132 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
133 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
134 unsigned int elts_n:4; /* Log 2 of Mbufs. */
135 unsigned int rss_hash:1; /* RSS hash result is enabled. */
136 unsigned int mark:1; /* Marked flow available on the queue. */
137 unsigned int :15; /* Remaining bits. */
138 volatile uint32_t *rq_db;
139 volatile uint32_t *cq_db;
144 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
145 struct mlx5_mr_ctrl mr_ctrl;
146 volatile struct mlx5_wqe_data_seg(*wqes)[];
147 volatile struct mlx5_cqe(*cqes)[];
148 struct rxq_zip zip; /* Compressed context. */
149 struct rte_mbuf *(*elts)[];
150 struct rte_mempool *mp;
151 struct mlx5_rxq_stats stats;
152 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
153 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
154 void *cq_uar; /* CQ user access region. */
155 uint32_t cqn; /* CQ number. */
156 uint8_t cq_arm_sn; /* CQ arm seq number. */
157 } __rte_cache_aligned;
159 /* Verbs Rx queue elements. */
160 struct mlx5_rxq_ibv {
161 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
162 rte_atomic32_t refcnt; /* Reference counter. */
163 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
164 struct ibv_cq *cq; /* Completion Queue. */
165 struct ibv_wq *wq; /* Work Queue. */
166 struct ibv_comp_channel *channel;
169 /* RX queue control descriptor. */
170 struct mlx5_rxq_ctrl {
171 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
172 rte_atomic32_t refcnt; /* Reference counter. */
173 struct priv *priv; /* Back pointer to private data. */
174 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
175 struct mlx5_rxq_data rxq; /* Data path structure. */
176 unsigned int socket; /* CPU socket ID for allocations. */
177 unsigned int irq:1; /* Whether IRQ is enabled. */
178 uint16_t idx; /* Queue index. */
181 /* Indirection table. */
182 struct mlx5_ind_table_ibv {
183 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
184 rte_atomic32_t refcnt; /* Reference counter. */
185 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
186 uint16_t queues_n; /**< Number of queues in the list. */
187 uint16_t queues[]; /**< Queue list. */
192 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
193 rte_atomic32_t refcnt; /* Reference counter. */
194 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
195 struct ibv_qp *qp; /* Verbs queue pair. */
196 uint64_t hash_fields; /* Verbs Hash fields. */
197 uint8_t rss_key_len; /* Hash key length in bytes. */
198 uint8_t rss_key[]; /* Hash key. */
201 /* TX queue descriptor. */
203 struct mlx5_txq_data {
204 uint16_t elts_head; /* Current counter in (*elts)[]. */
205 uint16_t elts_tail; /* Counter of first element awaiting completion. */
206 uint16_t elts_comp; /* Counter since last completion request. */
207 uint16_t mpw_comp; /* WQ index since last completion request. */
208 uint16_t cq_ci; /* Consumer index for completion queue. */
210 uint16_t cq_pi; /* Producer index for completion queue. */
212 uint16_t wqe_ci; /* Consumer index for work queue. */
213 uint16_t wqe_pi; /* Producer index for work queue. */
214 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
215 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
216 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
217 uint16_t inline_en:1; /* When set inline is enabled. */
218 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
219 uint16_t tunnel_en:1;
220 /* When set TX offload for tunneled packets are supported. */
221 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
222 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
223 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
224 uint32_t qp_num_8s; /* QP number shifted by 8. */
225 uint32_t flags; /* Flags for Tx Queue. */
226 struct mlx5_mr_ctrl mr_ctrl;
227 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
228 volatile void *wqes; /* Work queue (use volatile to write into). */
229 volatile uint32_t *qp_db; /* Work queue doorbell. */
230 volatile uint32_t *cq_db; /* Completion queue doorbell. */
231 volatile void *bf_reg; /* Blueflame register remapped. */
232 struct rte_mbuf *(*elts)[]; /* TX elements. */
233 struct mlx5_txq_stats stats; /* TX queue counters. */
234 } __rte_cache_aligned;
236 /* Verbs Rx queue elements. */
237 struct mlx5_txq_ibv {
238 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
239 rte_atomic32_t refcnt; /* Reference counter. */
240 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
241 struct ibv_cq *cq; /* Completion Queue. */
242 struct ibv_qp *qp; /* Queue Pair. */
245 /* TX queue control descriptor. */
246 struct mlx5_txq_ctrl {
247 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
248 rte_atomic32_t refcnt; /* Reference counter. */
249 struct priv *priv; /* Back pointer to private data. */
250 unsigned int socket; /* CPU socket ID for allocations. */
251 unsigned int max_inline_data; /* Max inline data. */
252 unsigned int max_tso_header; /* Max TSO header size. */
253 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
254 struct mlx5_txq_data txq; /* Data path structure. */
255 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
256 volatile void *bf_reg_orig; /* Blueflame register from verbs. */
257 uint16_t idx; /* Queue index. */
262 extern uint8_t rss_hash_default_key[];
263 extern const size_t rss_hash_default_key_len;
265 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
266 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
267 unsigned int socket, const struct rte_eth_rxconf *conf,
268 struct rte_mempool *mp);
269 void mlx5_rx_queue_release(void *dpdk_rxq);
270 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
271 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
272 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
273 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
274 struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
275 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
276 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
277 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
278 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
279 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
280 uint16_t desc, unsigned int socket,
281 struct rte_mempool *mp);
282 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
283 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
284 int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
285 int mlx5_rxq_verify(struct rte_eth_dev *dev);
286 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
287 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
290 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
293 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
294 struct mlx5_ind_table_ibv *ind_tbl);
295 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
296 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key,
297 uint8_t rss_key_len, uint64_t hash_fields,
298 uint16_t queues[], uint16_t queues_n);
299 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key,
300 uint8_t rss_key_len, uint64_t hash_fields,
301 uint16_t queues[], uint16_t queues_n);
302 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
303 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
307 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
308 unsigned int socket, const struct rte_eth_txconf *conf);
309 void mlx5_tx_queue_release(void *dpdk_txq);
310 int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
311 struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
312 struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
313 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
314 int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
315 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
316 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
317 uint16_t desc, unsigned int socket,
318 const struct rte_eth_txconf *conf);
319 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
320 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
321 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
322 int mlx5_txq_verify(struct rte_eth_dev *dev);
323 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
327 extern uint32_t mlx5_ptype_table[];
329 void mlx5_set_ptype_table(void);
330 uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
332 uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
334 uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
336 uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
338 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
339 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
341 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
343 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
344 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
346 /* Vectorized version of mlx5_rxtx.c */
347 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
348 int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
349 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
350 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
351 uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
353 uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
355 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
360 int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_cache *lkp_tbl,
361 uint16_t n, struct rte_mempool *mp);
362 uint32_t mlx5_rx_mb2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
363 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
367 * Verify or set magic value in CQE.
376 check_cqe_seen(volatile struct mlx5_cqe *cqe)
378 static const uint8_t magic[] = "seen";
379 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
383 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
384 if (!ret || (*buf)[i] != magic[i]) {
386 (*buf)[i] = magic[i];
393 * Check whether CQE is valid.
398 * Size of completion queue.
403 * 0 on success, 1 on failure.
405 static __rte_always_inline int
406 check_cqe(volatile struct mlx5_cqe *cqe,
407 unsigned int cqes_n, const uint16_t ci)
409 uint16_t idx = ci & cqes_n;
410 uint8_t op_own = cqe->op_own;
411 uint8_t op_owner = MLX5_CQE_OWNER(op_own);
412 uint8_t op_code = MLX5_CQE_OPCODE(op_own);
414 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
415 return 1; /* No CQE. */
417 if ((op_code == MLX5_CQE_RESP_ERR) ||
418 (op_code == MLX5_CQE_REQ_ERR)) {
419 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
420 uint8_t syndrome = err_cqe->syndrome;
422 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
423 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
425 if (!check_cqe_seen(cqe)) {
427 "unexpected CQE error %u (0x%02x) syndrome"
429 op_code, op_code, syndrome);
430 rte_hexdump(stderr, "MLX5 Error CQE:",
431 (const void *)((uintptr_t)err_cqe),
435 } else if ((op_code != MLX5_CQE_RESP_SEND) &&
436 (op_code != MLX5_CQE_REQ)) {
437 if (!check_cqe_seen(cqe)) {
438 DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
440 rte_hexdump(stderr, "MLX5 CQE:",
441 (const void *)((uintptr_t)cqe),
451 * Return the address of the WQE.
454 * Pointer to TX queue structure.
456 * WQE consumer index.
461 static inline uintptr_t *
462 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
464 ci &= ((1 << txq->wqe_n) - 1);
465 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
469 * Manage TX completions.
471 * When sending a burst, mlx5_tx_burst() posts several WRs.
474 * Pointer to TX queue structure.
476 static __rte_always_inline void
477 mlx5_tx_complete(struct mlx5_txq_data *txq)
479 const uint16_t elts_n = 1 << txq->elts_n;
480 const uint16_t elts_m = elts_n - 1;
481 const unsigned int cqe_n = 1 << txq->cqe_n;
482 const unsigned int cqe_cnt = cqe_n - 1;
483 uint16_t elts_free = txq->elts_tail;
485 uint16_t cq_ci = txq->cq_ci;
486 volatile struct mlx5_cqe *cqe = NULL;
487 volatile struct mlx5_wqe_ctrl *ctrl;
488 struct rte_mbuf *m, *free[elts_n];
489 struct rte_mempool *pool = NULL;
490 unsigned int blk_n = 0;
492 cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
493 if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
496 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
497 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
498 if (!check_cqe_seen(cqe)) {
499 DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
500 rte_hexdump(stderr, "MLX5 TXQ:",
501 (const void *)((uintptr_t)txq->wqes),
509 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
510 ctrl = (volatile struct mlx5_wqe_ctrl *)
511 tx_mlx5_wqe(txq, txq->wqe_pi);
512 elts_tail = ctrl->ctrl3;
513 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
515 while (elts_free != elts_tail) {
516 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
517 if (likely(m != NULL)) {
518 if (likely(m->pool == pool)) {
521 if (likely(pool != NULL))
522 rte_mempool_put_bulk(pool,
532 rte_mempool_put_bulk(pool, (void *)free, blk_n);
534 elts_free = txq->elts_tail;
536 while (elts_free != elts_tail) {
537 memset(&(*txq->elts)[elts_free & elts_m],
539 sizeof((*txq->elts)[elts_free & elts_m]));
544 txq->elts_tail = elts_tail;
545 /* Update the consumer index. */
546 rte_compiler_barrier();
547 *txq->cq_db = rte_cpu_to_be_32(cq_ci);
551 * Look up LKEY from given lookup table by linear search. Firstly look up the
552 * last-hit entry. If miss, the entire array is searched. If found, update the
553 * last-hit index and return LKEY.
556 * Pointer to lookup table.
557 * @param[in,out] cached_idx
558 * Pointer to last-hit index.
560 * Size of lookup table.
565 * Searched LKEY on success, UINT32_MAX on no match.
567 static __rte_always_inline uint32_t
568 mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx,
569 uint16_t n, uintptr_t addr)
573 if (likely(addr >= lkp_tbl[*cached_idx].start &&
574 addr < lkp_tbl[*cached_idx].end))
575 return lkp_tbl[*cached_idx].lkey;
576 for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
577 if (addr >= lkp_tbl[idx].start &&
578 addr < lkp_tbl[idx].end) {
581 return lkp_tbl[idx].lkey;
588 * Query LKEY from address for Rx.
591 * Pointer to Rx queue structure.
598 static __rte_always_inline uint32_t
599 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
603 /* Linear search on MR cache array. */
604 lkey = mlx5_mr_lookup_cache(rxq->mr_ctrl.cache,
606 MLX5_MR_CACHE_N, addr);
607 if (likely(lkey != UINT32_MAX))
609 DEBUG("No found in rxq->mr_cache[], last-hit = %u, head = %u)",
610 rxq->mr_ctrl.mru, rxq->mr_ctrl.head);
611 /* Take slower bottom-half (binary search) on miss. */
612 return mlx5_rx_mb2mr_bh(rxq, addr);
615 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
618 * Query LKEY from address for Tx.
621 * Pointer to Tx queue structure.
628 static __rte_always_inline uint32_t
629 mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
633 /* Linear search on MR cache array. */
634 lkey = mlx5_mr_lookup_cache(txq->mr_ctrl.cache,
636 MLX5_MR_CACHE_N, addr);
637 if (likely(lkey != UINT32_MAX))
639 DEBUG("No found in txq->mr_cache[], last-hit = %u, head = %u)",
640 txq->mr_ctrl.mru, txq->mr_ctrl.head);
641 /* Take slower bottom-half (binary search) on miss. */
642 return mlx5_tx_mb2mr_bh(txq, addr);
645 #define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
648 * Ring TX queue doorbell and flush the update if requested.
651 * Pointer to TX queue structure.
653 * Pointer to the last WQE posted in the NIC.
655 * Request for write memory barrier after BlueFlame update.
657 static __rte_always_inline void
658 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
661 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
662 volatile uint64_t *src = ((volatile uint64_t *)wqe);
665 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
666 /* Ensure ordering between DB record and BF copy. */
674 * Ring TX queue doorbell and flush the update by write memory barrier.
677 * Pointer to TX queue structure.
679 * Pointer to the last WQE posted in the NIC.
681 static __rte_always_inline void
682 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
684 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
688 * Convert the Checksum offloads to Verbs.
691 * Pointer to the Tx queue.
693 * Pointer to the mbuf.
696 * the converted cs_flags.
698 static __rte_always_inline uint8_t
699 txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
701 uint8_t cs_flags = 0;
703 /* Should we enable HW CKSUM offload */
705 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
706 PKT_TX_OUTER_IP_CKSUM)) {
707 if (txq_data->tunnel_en &&
709 (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
710 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
711 MLX5_ETH_WQE_L4_INNER_CSUM;
712 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
713 cs_flags |= MLX5_ETH_WQE_L3_CSUM;
715 cs_flags = MLX5_ETH_WQE_L3_CSUM |
716 MLX5_ETH_WQE_L4_CSUM;
722 #endif /* RTE_PMD_MLX5_RXTX_H_ */