4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Data plane functions for mlx4 driver.
43 /* Verbs headers do not support -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
52 #include <rte_branch_prediction.h>
53 #include <rte_common.h>
56 #include <rte_mempool.h>
57 #include <rte_prefetch.h>
61 #include "mlx4_rxtx.h"
62 #include "mlx4_utils.h"
64 #define WQE_ONE_DATA_SEG_SIZE \
65 (sizeof(struct mlx4_wqe_ctrl_seg) + sizeof(struct mlx4_wqe_data_seg))
68 * Pointer-value pair structure used in tx_post_send for saving the first
69 * DWORD (32 byte) of a TXBB.
72 volatile struct mlx4_wqe_data_seg *dseg;
76 /** A table to translate Rx completion flags to packet type. */
77 uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
79 * The index to the array should have:
80 * bit[7] - MLX4_CQE_L2_TUNNEL
81 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
82 * bit[5] - MLX4_CQE_STATUS_UDP
83 * bit[4] - MLX4_CQE_STATUS_TCP
84 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
85 * bit[2] - MLX4_CQE_STATUS_IPV6
86 * bit[1] - MLX4_CQE_STATUS_IPV4F
87 * bit[0] - MLX4_CQE_STATUS_IPV4
88 * giving a total of up to 256 entries.
90 [0x00] = RTE_PTYPE_L2_ETHER,
91 [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
93 [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
95 [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
97 [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
98 [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT,
99 [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
101 [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
103 [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
105 [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
107 [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
109 [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
111 [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
113 [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
115 [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
117 [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
119 [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
121 [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
123 [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
125 /* Tunneled - L3 IPV6 */
126 [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
127 [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
128 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
129 [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
130 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
131 RTE_PTYPE_INNER_L4_FRAG,
132 [0x83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
133 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
134 RTE_PTYPE_INNER_L4_FRAG,
135 [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
136 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
137 [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
138 RTE_PTYPE_INNER_L3_IPV4_EXT,
139 [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
140 RTE_PTYPE_INNER_L3_IPV4_EXT,
141 [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
142 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG,
143 /* Tunneled - L3 IPV6, TCP */
144 [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
145 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
146 RTE_PTYPE_INNER_L4_TCP,
147 [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
149 RTE_PTYPE_INNER_L4_FRAG |
150 RTE_PTYPE_INNER_L4_TCP,
151 [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
152 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
153 RTE_PTYPE_INNER_L4_FRAG |
154 RTE_PTYPE_INNER_L4_TCP,
155 [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
156 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
157 RTE_PTYPE_INNER_L4_TCP,
158 [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
159 RTE_PTYPE_INNER_L3_IPV4_EXT |
160 RTE_PTYPE_INNER_L4_TCP,
161 [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
162 RTE_PTYPE_INNER_L3_IPV4_EXT |
163 RTE_PTYPE_INNER_L4_TCP,
164 [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
165 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
166 RTE_PTYPE_INNER_L4_TCP,
167 /* Tunneled - L3 IPV6, UDP */
168 [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
170 RTE_PTYPE_INNER_L4_UDP,
171 [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
173 RTE_PTYPE_INNER_L4_FRAG |
174 RTE_PTYPE_INNER_L4_UDP,
175 [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
176 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
177 RTE_PTYPE_INNER_L4_FRAG |
178 RTE_PTYPE_INNER_L4_UDP,
179 [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
180 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
181 RTE_PTYPE_INNER_L4_UDP,
182 [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 RTE_PTYPE_INNER_L3_IPV4_EXT |
184 RTE_PTYPE_INNER_L4_UDP,
185 [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 RTE_PTYPE_INNER_L3_IPV4_EXT |
187 RTE_PTYPE_INNER_L4_UDP,
188 [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
189 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
190 RTE_PTYPE_INNER_L4_UDP,
191 /* Tunneled - L3 IPV4 */
192 [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
193 [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
194 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
195 [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
197 RTE_PTYPE_INNER_L4_FRAG,
198 [0xc3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
200 RTE_PTYPE_INNER_L4_FRAG,
201 [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
202 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
203 [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L3_IPV4_EXT,
205 [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
206 RTE_PTYPE_INNER_L3_IPV4_EXT,
207 [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L3_IPV4_EXT |
209 RTE_PTYPE_INNER_L4_FRAG,
210 /* Tunneled - L3 IPV4, TCP */
211 [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L4_TCP,
214 [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L4_TCP,
217 [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_FRAG |
220 RTE_PTYPE_INNER_L4_TCP,
221 [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L4_FRAG |
224 RTE_PTYPE_INNER_L4_TCP,
225 [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_TCP,
228 [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV4_EXT |
230 RTE_PTYPE_INNER_L4_TCP,
231 [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT |
233 RTE_PTYPE_INNER_L4_TCP,
234 [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
236 RTE_PTYPE_INNER_L4_TCP,
237 /* Tunneled - L3 IPV4, UDP */
238 [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_UDP,
241 [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_UDP,
244 [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_FRAG |
247 RTE_PTYPE_INNER_L4_UDP,
248 [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L4_FRAG |
251 RTE_PTYPE_INNER_L4_UDP,
252 [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_UDP,
255 [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
257 [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
259 [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
261 RTE_PTYPE_INNER_L4_UDP,
265 * Stamp a WQE so it won't be reused by the HW.
267 * Routine is used when freeing WQE used by the chip or when failing
268 * building an WQ entry has failed leaving partial information on the queue.
271 * Pointer to the SQ structure.
273 * Index of the freed WQE.
275 * Number of blocks to stamp.
276 * If < 0 the routine will use the size written in the WQ entry.
278 * The value of the WQE owner bit to use in the stamp.
281 * The number of Tx basic blocs (TXBB) the WQE contained.
284 mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
286 uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
287 (!!owner << MLX4_SQ_STAMP_SHIFT));
288 volatile uint8_t *wqe = mlx4_get_send_wqe(sq,
289 (index & sq->txbb_cnt_mask));
290 volatile uint32_t *ptr = (volatile uint32_t *)wqe;
295 /* Extract the size from the control segment of the WQE. */
296 num_txbbs = MLX4_SIZE_TO_TXBBS((((volatile struct mlx4_wqe_ctrl_seg *)
297 wqe)->fence_size & 0x3f) << 4);
298 txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
299 /* Optimize the common case when there is no wrap-around. */
300 if (wqe + txbbs_size <= sq->eob) {
301 /* Stamp the freed descriptor. */
302 for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
304 ptr += MLX4_SQ_STAMP_DWORDS;
307 /* Stamp the freed descriptor. */
308 for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
310 ptr += MLX4_SQ_STAMP_DWORDS;
311 if ((volatile uint8_t *)ptr >= sq->eob) {
312 ptr = (volatile uint32_t *)sq->buf;
313 stamp ^= RTE_BE32(0x80000000);
321 * Manage Tx completions.
323 * When sending a burst, mlx4_tx_burst() posts several WRs.
324 * To improve performance, a completion event is only required once every
325 * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
326 * for other WRs, but this information would not be used anyway.
329 * Pointer to Tx queue structure.
332 * 0 on success, -1 on failure.
335 mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
338 unsigned int elts_comp = txq->elts_comp;
339 unsigned int elts_tail = txq->elts_tail;
340 unsigned int sq_tail = sq->tail;
341 struct mlx4_cq *cq = &txq->mcq;
342 volatile struct mlx4_cqe *cqe;
343 uint32_t cons_index = cq->cons_index;
345 uint16_t nr_txbbs = 0;
349 * Traverse over all CQ entries reported and handle each WQ entry
353 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
354 if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
355 !!(cons_index & cq->cqe_cnt)))
358 * Make sure we read the CQE after we read the ownership bit.
362 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
363 MLX4_CQE_OPCODE_ERROR)) {
364 volatile struct mlx4_err_cqe *cqe_err =
365 (volatile struct mlx4_err_cqe *)cqe;
366 ERROR("%p CQE error - vendor syndrome: 0x%x"
368 (void *)txq, cqe_err->vendor_err,
372 /* Get WQE index reported in the CQE. */
374 rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
376 /* Free next descriptor. */
379 mlx4_txq_stamp_freed_wqe(sq,
380 sq_tail & sq->txbb_cnt_mask,
381 !!(sq_tail & sq->txbb_cnt));
383 } while ((sq_tail & sq->txbb_cnt_mask) != new_index);
386 if (unlikely(pkts == 0))
389 cq->cons_index = cons_index;
390 *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & MLX4_CQ_DB_CI_MASK);
391 sq->tail = sq_tail + nr_txbbs;
392 /* Update the list of packets posted for transmission. */
394 assert(elts_comp <= txq->elts_comp);
396 * Assume completion status is successful as nothing can be done about
400 if (elts_tail >= elts_n)
402 txq->elts_tail = elts_tail;
403 txq->elts_comp = elts_comp;
408 * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
409 * the cloned mbuf is allocated is returned instead.
415 * Memory pool where data is located for given mbuf.
417 static struct rte_mempool *
418 mlx4_txq_mb2mp(struct rte_mbuf *buf)
420 if (unlikely(RTE_MBUF_INDIRECT(buf)))
421 return rte_mbuf_from_indirect(buf)->pool;
426 mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
427 volatile struct mlx4_wqe_ctrl_seg **pctrl)
431 struct pv *pv = (struct pv *)txq->bounce_buf;
432 struct mlx4_sq *sq = &txq->msq;
433 uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
434 volatile struct mlx4_wqe_ctrl_seg *ctrl;
435 volatile struct mlx4_wqe_data_seg *dseg;
436 struct rte_mbuf *sbuf;
442 /* Calculate the needed work queue entry size for this packet. */
443 wqe_real_size = sizeof(volatile struct mlx4_wqe_ctrl_seg) +
444 buf->nb_segs * sizeof(volatile struct mlx4_wqe_data_seg);
445 nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
447 * Check that there is room for this WQE in the send queue and that
448 * the WQE size is legal.
450 if (((sq->head - sq->tail) + nr_txbbs +
451 sq->headroom_txbbs) >= sq->txbb_cnt ||
452 nr_txbbs > MLX4_MAX_WQE_TXBBS) {
455 /* Get the control and data entries of the WQE. */
456 ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
457 mlx4_get_send_wqe(sq, head_idx);
458 dseg = (volatile struct mlx4_wqe_data_seg *)
459 ((uintptr_t)ctrl + sizeof(struct mlx4_wqe_ctrl_seg));
461 /* Fill the data segments with buffer information. */
462 for (sbuf = buf; sbuf != NULL; sbuf = sbuf->next, dseg++) {
463 addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
464 rte_prefetch0((volatile void *)addr);
465 /* Handle WQE wraparound. */
466 if (dseg >= (volatile struct mlx4_wqe_data_seg *)sq->eob)
467 dseg = (volatile struct mlx4_wqe_data_seg *)sq->buf;
468 dseg->addr = rte_cpu_to_be_64(addr);
469 /* Memory region key (big endian) for this memory pool. */
470 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
471 dseg->lkey = rte_cpu_to_be_32(lkey);
472 /* Calculate the needed work queue entry size for this packet */
473 if (unlikely(dseg->lkey == rte_cpu_to_be_32((uint32_t)-1))) {
474 /* MR does not exist. */
475 DEBUG("%p: unable to get MP <-> MR association",
478 * Restamp entry in case of failure.
479 * Make sure that size is written correctly
480 * Note that we give ownership to the SW, not the HW.
482 wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
483 buf->nb_segs * sizeof(struct mlx4_wqe_data_seg);
484 ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
485 mlx4_txq_stamp_freed_wqe(sq, head_idx,
486 (sq->head & sq->txbb_cnt) ? 0 : 1);
489 if (likely(sbuf->data_len)) {
490 byte_count = rte_cpu_to_be_32(sbuf->data_len);
493 * Zero length segment is treated as inline segment
496 byte_count = RTE_BE32(0x80000000);
499 * If the data segment is not at the beginning of a
500 * Tx basic block (TXBB) then write the byte count,
501 * else postpone the writing to just before updating the
504 if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {
505 #if RTE_CACHE_LINE_SIZE < 64
507 * Need a barrier here before writing the byte_count
508 * fields to make sure that all the data is visible
509 * before the byte_count field is set.
510 * Otherwise, if the segment begins a new cacheline,
511 * the HCA prefetcher could grab the 64-byte chunk and
512 * get a valid (!= 0xffffffff) byte count but stale
513 * data, and end up sending the wrong data.
516 #endif /* RTE_CACHE_LINE_SIZE */
517 dseg->byte_count = byte_count;
520 * This data segment starts at the beginning of a new
521 * TXBB, so we need to postpone its byte_count writing
524 pv[pv_counter].dseg = dseg;
525 pv[pv_counter++].val = byte_count;
528 /* Write the first DWORD of each TXBB save earlier. */
530 /* Need a barrier here before writing the byte_count. */
532 for (--pv_counter; pv_counter >= 0; pv_counter--)
533 pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
535 /* Fill the control parameters for this packet. */
536 ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
541 * DPDK callback for Tx.
544 * Generic pointer to Tx queue structure.
546 * Packets to transmit.
548 * Number of packets in array.
551 * Number of packets successfully transmitted (<= pkts_n).
554 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
556 struct txq *txq = (struct txq *)dpdk_txq;
557 unsigned int elts_head = txq->elts_head;
558 const unsigned int elts_n = txq->elts_n;
559 unsigned int bytes_sent = 0;
562 struct mlx4_sq *sq = &txq->msq;
565 assert(txq->elts_comp_cd != 0);
566 if (likely(txq->elts_comp != 0))
567 mlx4_txq_complete(txq, elts_n, sq);
568 max = (elts_n - (elts_head - txq->elts_tail));
572 assert(max <= elts_n);
573 /* Always leave one free entry in the ring. */
577 for (i = 0; (i != max); ++i) {
578 struct rte_mbuf *buf = pkts[i];
579 unsigned int elts_head_next =
580 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
581 struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
582 struct txq_elt *elt = &(*txq->elts)[elts_head];
583 uint32_t owner_opcode = MLX4_OPCODE_SEND;
584 volatile struct mlx4_wqe_ctrl_seg *ctrl;
585 volatile struct mlx4_wqe_data_seg *dseg;
590 uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
594 /* Clean up old buffer. */
595 if (likely(elt->buf != NULL)) {
596 struct rte_mbuf *tmp = elt->buf;
600 memset(elt, 0x66, sizeof(*elt));
602 /* Faster than rte_pktmbuf_free(). */
604 struct rte_mbuf *next = tmp->next;
606 rte_pktmbuf_free_seg(tmp);
608 } while (tmp != NULL);
610 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
611 if (buf->nb_segs == 1) {
613 * Check that there is room for this WQE in the send
614 * queue and that the WQE size is legal
616 if (((sq->head - sq->tail) + 1 + sq->headroom_txbbs) >=
617 sq->txbb_cnt || 1 > MLX4_MAX_WQE_TXBBS) {
621 /* Get the control and data entries of the WQE. */
622 ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
623 mlx4_get_send_wqe(sq, head_idx);
624 dseg = (volatile struct mlx4_wqe_data_seg *)
626 sizeof(struct mlx4_wqe_ctrl_seg));
627 addr = rte_pktmbuf_mtod(buf, uintptr_t);
628 rte_prefetch0((volatile void *)addr);
629 /* Handle WQE wraparound. */
631 (volatile struct mlx4_wqe_data_seg *)sq->eob)
632 dseg = (volatile struct mlx4_wqe_data_seg *)
634 dseg->addr = rte_cpu_to_be_64(addr);
635 /* Memory region key (big endian). */
636 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
637 dseg->lkey = rte_cpu_to_be_32(lkey);
638 if (unlikely(dseg->lkey ==
639 rte_cpu_to_be_32((uint32_t)-1))) {
640 /* MR does not exist. */
641 DEBUG("%p: unable to get MP <-> MR association",
644 * Restamp entry in case of failure.
645 * Make sure that size is written correctly
646 * Note that we give ownership to the SW,
650 (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
651 mlx4_txq_stamp_freed_wqe(sq, head_idx,
652 (sq->head & sq->txbb_cnt) ? 0 : 1);
656 /* Never be TXBB aligned, no need compiler barrier. */
657 dseg->byte_count = rte_cpu_to_be_32(buf->data_len);
658 /* Fill the control parameters for this packet. */
659 ctrl->fence_size = (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
662 nr_txbbs = mlx4_tx_burst_segs(buf, txq, &ctrl);
669 * For raw Ethernet, the SOLICIT flag is used to indicate
670 * that no ICRC should be calculated.
672 txq->elts_comp_cd -= nr_txbbs;
673 if (unlikely(txq->elts_comp_cd <= 0)) {
674 txq->elts_comp_cd = txq->elts_comp_cd_init;
675 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
676 MLX4_WQE_CTRL_CQ_UPDATE);
678 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
680 /* Enable HW checksum offload if requested */
683 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
684 const uint64_t is_tunneled = (buf->ol_flags &
686 PKT_TX_TUNNEL_VXLAN));
688 if (is_tunneled && txq->csum_l2tun) {
689 owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
690 MLX4_WQE_CTRL_IL4_HDR_CSUM;
691 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
693 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
696 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
697 MLX4_WQE_CTRL_TCP_UDP_CSUM);
702 * Copy destination MAC address to the WQE, this allows
703 * loopback in eSwitch, so that VFs and PF can
704 * communicate with each other.
706 srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *));
707 ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *,
712 ctrl->srcrb_flags = srcrb.flags;
714 * Make sure descriptor is fully written before
715 * setting ownership bit (because HW can start
716 * executing as soon as we do).
719 ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |
720 ((sq->head & sq->txbb_cnt) ?
721 MLX4_BIT_WQE_OWN : 0));
722 sq->head += nr_txbbs;
724 bytes_sent += buf->pkt_len;
725 elts_head = elts_head_next;
727 /* Take a shortcut if nothing must be sent. */
728 if (unlikely(i == 0))
730 /* Increment send statistics counters. */
731 txq->stats.opackets += i;
732 txq->stats.obytes += bytes_sent;
733 /* Make sure that descriptors are written before doorbell record. */
735 /* Ring QP doorbell. */
736 rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
737 txq->elts_head = elts_head;
743 * Translate Rx completion flags to packet type.
749 * Packet type for struct rte_mbuf.
751 static inline uint32_t
752 rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
753 uint32_t l2tun_offload)
756 uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
757 uint32_t status = rte_be_to_cpu_32(cqe->status);
760 * The index to the array should have:
761 * bit[7] - MLX4_CQE_L2_TUNNEL
762 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
764 if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL))
765 idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
766 ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
768 * The index to the array should have:
769 * bit[5] - MLX4_CQE_STATUS_UDP
770 * bit[4] - MLX4_CQE_STATUS_TCP
771 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
772 * bit[2] - MLX4_CQE_STATUS_IPV6
773 * bit[1] - MLX4_CQE_STATUS_IPV4F
774 * bit[0] - MLX4_CQE_STATUS_IPV4
775 * giving a total of up to 256 entries.
777 idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
778 return mlx4_ptype_table[idx];
782 * Translate Rx completion flags to offload flags.
785 * Rx completion flags returned by mlx4_cqe_flags().
787 * Whether Rx checksums are enabled.
789 * Whether Rx L2 tunnel checksums are enabled.
792 * Offload flags (ol_flags) in mbuf format.
794 static inline uint32_t
795 rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
797 uint32_t ol_flags = 0;
801 mlx4_transpose(flags,
802 MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
803 PKT_RX_IP_CKSUM_GOOD) |
804 mlx4_transpose(flags,
805 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
806 PKT_RX_L4_CKSUM_GOOD);
807 if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
809 mlx4_transpose(flags,
810 MLX4_CQE_L2_TUNNEL_IPOK,
811 PKT_RX_IP_CKSUM_GOOD) |
812 mlx4_transpose(flags,
813 MLX4_CQE_L2_TUNNEL_L4_CSUM,
814 PKT_RX_L4_CKSUM_GOOD);
819 * Extract checksum information from CQE flags.
822 * Pointer to CQE structure.
824 * Whether Rx checksums are enabled.
826 * Whether Rx L2 tunnel checksums are enabled.
829 * CQE checksum information.
831 static inline uint32_t
832 mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
837 * The relevant bits are in different locations on their
838 * CQE fields therefore we can join them in one 32bit
842 flags = (rte_be_to_cpu_32(cqe->status) &
843 MLX4_CQE_STATUS_IPV4_CSUM_OK);
845 flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
846 (MLX4_CQE_L2_TUNNEL |
847 MLX4_CQE_L2_TUNNEL_IPOK |
848 MLX4_CQE_L2_TUNNEL_L4_CSUM |
849 MLX4_CQE_L2_TUNNEL_IPV4));
854 * Poll one CQE from CQ.
857 * Pointer to the receive queue structure.
862 * Number of bytes of the CQE, 0 in case there is no completion.
865 mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
868 volatile struct mlx4_cqe *cqe = NULL;
869 struct mlx4_cq *cq = &rxq->mcq;
871 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
872 if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
873 !!(cq->cons_index & cq->cqe_cnt))
876 * Make sure we read CQ entry contents after we've checked the
880 assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
881 assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
882 MLX4_CQE_OPCODE_ERROR);
883 ret = rte_be_to_cpu_32(cqe->byte_cnt);
891 * DPDK callback for Rx with scattered packets support.
894 * Generic pointer to Rx queue structure.
896 * Array to store received packets.
898 * Maximum number of packets in array.
901 * Number of packets successfully received (<= pkts_n).
904 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
906 struct rxq *rxq = dpdk_rxq;
907 const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
908 const uint16_t sges_n = rxq->sges_n;
909 struct rte_mbuf *pkt = NULL;
910 struct rte_mbuf *seg = NULL;
912 uint32_t rq_ci = rxq->rq_ci << sges_n;
916 volatile struct mlx4_cqe *cqe;
917 uint32_t idx = rq_ci & wr_cnt;
918 struct rte_mbuf *rep = (*rxq->elts)[idx];
919 volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
921 /* Update the 'next' pointer of the previous segment. */
927 rep = rte_mbuf_raw_alloc(rxq->mp);
928 if (unlikely(rep == NULL)) {
929 ++rxq->stats.rx_nombuf;
932 * No buffers before we even started,
938 assert(pkt != (*rxq->elts)[idx]);
942 rte_mbuf_raw_free(pkt);
948 /* Looking for the new packet. */
949 len = mlx4_cq_poll_one(rxq, &cqe);
951 rte_mbuf_raw_free(rep);
954 if (unlikely(len < 0)) {
955 /* Rx error, packet is likely too large. */
956 rte_mbuf_raw_free(rep);
957 ++rxq->stats.idropped;
961 /* Update packet information. */
963 rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
966 if (rxq->csum | rxq->csum_l2tun) {
973 rxq_cq_to_ol_flags(flags,
979 rep->port = rxq->port_id;
980 rep->data_len = seg->data_len;
981 rep->data_off = seg->data_off;
982 (*rxq->elts)[idx] = rep;
984 * Fill NIC descriptor with the new buffer. The lkey and size
985 * of the buffers are already known, only the buffer address
988 scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
989 if (len > seg->data_len) {
990 len -= seg->data_len;
995 /* The last segment. */
997 /* Increment bytes counter. */
998 rxq->stats.ibytes += pkt->pkt_len;
1005 /* Align consumer index to the next stride. */
1010 if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
1012 /* Update the consumer index. */
1013 rxq->rq_ci = rq_ci >> sges_n;
1015 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1016 *rxq->mcq.set_ci_db =
1017 rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
1018 /* Increment packets counter. */
1019 rxq->stats.ipackets += i;
1024 * Dummy DPDK callback for Tx.
1026 * This function is used to temporarily replace the real callback during
1027 * unsafe control operations on the queue, or in case of error.
1030 * Generic pointer to Tx queue structure.
1032 * Packets to transmit.
1034 * Number of packets in array.
1037 * Number of packets successfully transmitted (<= pkts_n).
1040 mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1049 * Dummy DPDK callback for Rx.
1051 * This function is used to temporarily replace the real callback during
1052 * unsafe control operations on the queue, or in case of error.
1055 * Generic pointer to Rx queue structure.
1057 * Array to store received packets.
1059 * Maximum number of packets in array.
1062 * Number of packets successfully received (<= pkts_n).
1065 mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)