2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/devices/devices.h>
24 #include <rdma/rdma.h>
26 #ifndef MLX5_ETH_L2_INLINE_HEADER_SIZE
27 #define MLX5_ETH_L2_INLINE_HEADER_SIZE 18
30 #define RDMA_TX_RETRIES 5
32 #define RDMA_TXQ_DV_DSEG_SZ(txq) (RDMA_MLX5_WQE_DS * RDMA_TXQ_DV_SQ_SZ(txq))
33 #define RDMA_TXQ_DV_DSEG2WQE(d) (((d) + RDMA_MLX5_WQE_DS - 1) / RDMA_MLX5_WQE_DS)
36 * MLX5 direct verbs tx/free functions
39 static_always_inline void
40 rdma_device_output_free_mlx5 (vlib_main_t * vm,
41 const vlib_node_runtime_t * node,
44 u16 idx = txq->dv_cq_idx;
45 u32 cq_mask = pow2_mask (txq->dv_cq_log2sz);
46 u32 sq_mask = pow2_mask (txq->dv_sq_log2sz);
47 u32 mask = pow2_mask (txq->bufs_log2sz);
48 u32 buf_sz = RDMA_TXQ_BUF_SZ (txq);
49 u32 log2_cq_sz = txq->dv_cq_log2sz;
50 struct mlx5_cqe64 *cqes = txq->dv_cq_cqes, *cur = cqes + (idx & cq_mask);
52 const rdma_mlx5_wqe_t *wqe;
56 op_own = *(volatile u8 *) &cur->op_own;
57 if (((idx >> log2_cq_sz) & MLX5_CQE_OWNER_MASK) !=
58 (op_own & MLX5_CQE_OWNER_MASK) || (op_own >> 4) == MLX5_CQE_INVALID)
60 if (PREDICT_FALSE ((op_own >> 4)) != MLX5_CQE_REQ)
61 vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_COMPLETION, 1);
63 cur = cqes + (idx & cq_mask);
66 if (idx == txq->dv_cq_idx)
67 return; /* nothing to do */
69 cur = cqes + ((idx - 1) & cq_mask);
75 /* retrieve original WQE and get new tail counter */
76 wqe = txq->dv_sq_wqes + (be16toh (cur->wqe_counter) & sq_mask);
77 if (PREDICT_FALSE (wqe->ctrl.imm == RDMA_TXQ_DV_INVALID_ID))
78 return; /* can happen if CQE reports error for an intermediate WQE */
80 ASSERT (RDMA_TXQ_USED_SZ (txq->head, wqe->ctrl.imm) <= buf_sz &&
81 RDMA_TXQ_USED_SZ (wqe->ctrl.imm, txq->tail) < buf_sz);
83 /* free sent buffers and update txq head */
84 vlib_buffer_free_from_ring (vm, txq->bufs, txq->head & mask, buf_sz,
85 RDMA_TXQ_USED_SZ (txq->head, wqe->ctrl.imm));
86 txq->head = wqe->ctrl.imm;
89 CLIB_MEMORY_STORE_BARRIER ();
90 txq->dv_cq_dbrec[0] = htobe32 (idx);
93 static_always_inline void
94 rdma_device_output_tx_mlx5_doorbell (rdma_txq_t * txq, rdma_mlx5_wqe_t * last,
95 const u16 tail, u32 sq_mask)
97 last->ctrl.imm = tail; /* register item to free */
98 last->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; /* generate a CQE so we can free buffers */
100 ASSERT (tail != txq->tail &&
101 RDMA_TXQ_AVAIL_SZ (txq, txq->head, txq->tail) >=
102 RDMA_TXQ_USED_SZ (txq->tail, tail));
104 CLIB_MEMORY_STORE_BARRIER ();
105 txq->dv_sq_dbrec[MLX5_SND_DBR] = htobe32 (tail);
106 CLIB_COMPILER_BARRIER ();
107 txq->dv_sq_db[0] = *(u64 *) (txq->dv_sq_wqes + (txq->tail & sq_mask));
111 static_always_inline void
112 rdma_mlx5_wqe_init (rdma_mlx5_wqe_t * wqe, const void *tmpl,
113 vlib_buffer_t * b, const u16 tail)
115 u16 sz = b->current_length;
116 u16 inline_sz = clib_min (sz, MLX5_ETH_L2_INLINE_HEADER_SIZE);
118 clib_memcpy_fast (wqe, tmpl, RDMA_MLX5_WQE_SZ);
120 wqe->ctrl.opmod_idx_opcode |= ((u32) htobe16 (tail)) << 8;
121 /* speculatively copy at least MLX5_ETH_L2_INLINE_HEADER_SIZE (18-bytes) */
122 const void *cur = vlib_buffer_get_current (b);
123 clib_memcpy_fast (wqe->eseg.inline_hdr_start,
124 cur, MLX5_ETH_L2_INLINE_HEADER_SIZE);
125 wqe->eseg.inline_hdr_sz = htobe16 (inline_sz);
126 wqe->dseg.byte_count = htobe32 (sz - inline_sz);
127 wqe->dseg.addr = htobe64 (pointer_to_uword (cur) + inline_sz);
131 * specific data path for chained buffers, supporting ring wrap-around
132 * contrary to the normal path - otherwise we may fail to enqueue chained
133 * buffers because we are close to the end of the ring while we still have
134 * plenty of descriptors available
136 static_always_inline u32
137 rdma_device_output_tx_mlx5_chained (vlib_main_t * vm,
138 const vlib_node_runtime_t * node,
139 const rdma_device_t * rd,
140 rdma_txq_t * txq, u32 n_left_from, u32 n,
141 u32 * bi, vlib_buffer_t ** b,
142 rdma_mlx5_wqe_t * wqe, u16 tail)
144 rdma_mlx5_wqe_t *last = wqe;
145 u32 wqe_n = RDMA_TXQ_AVAIL_SZ (txq, txq->head, tail);
146 u32 sq_mask = pow2_mask (txq->dv_sq_log2sz);
147 u32 mask = pow2_mask (txq->bufs_log2sz);
148 u32 dseg_mask = RDMA_TXQ_DV_DSEG_SZ (txq) - 1;
149 const u32 lkey = wqe[0].dseg.lkey;
151 vlib_buffer_copy_indices (txq->bufs + (txq->tail & mask), bi,
154 while (n >= 1 && wqe_n >= 1)
156 u32 *bufs = txq->bufs + (tail & mask);
157 rdma_mlx5_wqe_t *wqe = txq->dv_sq_wqes + (tail & sq_mask);
159 /* setup the head WQE */
160 rdma_mlx5_wqe_init (wqe, txq->dv_wqe_tmpl, b[0], tail);
164 if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
167 * max number of available dseg:
168 * - 4 dseg per WQEBB available
169 * - max 32 dseg per WQE (5-bits length field in WQE ctrl)
171 #define RDMA_MLX5_WQE_DS_MAX (1 << 5)
173 clib_min (RDMA_MLX5_WQE_DS * (wqe_n - 1), RDMA_MLX5_WQE_DS_MAX);
174 vlib_buffer_t *chained_b = b[0];
177 /* there are exactly 4 dseg per WQEBB and we rely on that */
178 STATIC_ASSERT (RDMA_MLX5_WQE_DS *
179 sizeof (struct mlx5_wqe_data_seg) ==
180 MLX5_SEND_WQE_BB, "wrong size");
183 * iterate over fragments, supporting ring wrap-around contrary to
184 * the normal path - otherwise we may fail to enqueue chained
185 * buffers because we are close to the end of the ring while we
186 * still have plenty of descriptors available
188 while (chained_n < dseg_max
189 && chained_b->flags & VLIB_BUFFER_NEXT_PRESENT)
191 struct mlx5_wqe_data_seg *dseg = (void *) txq->dv_sq_wqes;
192 dseg += ((tail + 1) * RDMA_MLX5_WQE_DS + chained_n) & dseg_mask;
193 if (((clib_address_t) dseg & (MLX5_SEND_WQE_BB - 1)) == 0)
197 * head/tail are shared between buffers and descriptor
198 * In order to maintain 1:1 correspondance between
199 * buffer index and descriptor index, we build
200 * 4-fragments chains and save the head
202 chained_b->flags &= ~(VLIB_BUFFER_NEXT_PRESENT |
203 VLIB_BUFFER_TOTAL_LENGTH_VALID);
204 u32 idx = tail + 1 + RDMA_TXQ_DV_DSEG2WQE (chained_n);
206 txq->bufs[idx] = chained_b->next_buffer;
209 chained_b = vlib_get_buffer (vm, chained_b->next_buffer);
210 dseg->byte_count = htobe32 (chained_b->current_length);
212 dseg->addr = htobe64 (vlib_buffer_get_current_va (chained_b));
217 if (chained_b->flags & VLIB_BUFFER_NEXT_PRESENT)
220 * no descriptors left: drop the chain including 1st WQE
221 * skip the problematic packet and continue
223 vlib_buffer_free_from_ring (vm, txq->bufs, tail & mask,
224 RDMA_TXQ_BUF_SZ (txq), 1 +
225 RDMA_TXQ_DV_DSEG2WQE (chained_n));
226 vlib_error_count (vm, node->node_index,
227 dseg_max == chained_n ?
228 RDMA_TX_ERROR_SEGMENT_SIZE_EXCEEDED :
229 RDMA_TX_ERROR_NO_FREE_SLOTS, 1);
231 /* fixup tail to overwrite wqe head with next packet */
236 /* update WQE descriptor with new dseg number */
237 ((u8 *) & wqe[0].ctrl.qpn_ds)[3] = RDMA_MLX5_WQE_DS + chained_n;
239 tail += RDMA_TXQ_DV_DSEG2WQE (chained_n);
240 wqe_n -= RDMA_TXQ_DV_DSEG2WQE (chained_n);
258 if (n == n_left_from)
259 return 0; /* we fail to enqueue even a single packet */
261 rdma_device_output_tx_mlx5_doorbell (txq, last, tail, sq_mask);
262 return n_left_from - n;
265 static_always_inline u32
266 rdma_device_output_tx_mlx5 (vlib_main_t * vm,
267 const vlib_node_runtime_t * node,
268 const rdma_device_t * rd, rdma_txq_t * txq,
269 const u32 n_left_from, u32 * bi,
272 u32 sq_mask = pow2_mask (txq->dv_sq_log2sz);
273 u32 mask = pow2_mask (txq->bufs_log2sz);
274 rdma_mlx5_wqe_t *wqe = txq->dv_sq_wqes + (txq->tail & sq_mask);
276 u16 tail = txq->tail;
278 ASSERT (RDMA_TXQ_BUF_SZ (txq) <= RDMA_TXQ_DV_SQ_SZ (txq));
282 u32 flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
283 if (PREDICT_FALSE (flags & VLIB_BUFFER_NEXT_PRESENT))
284 return rdma_device_output_tx_mlx5_chained (vm, node, rd, txq,
285 n_left_from, n, bi, b, wqe,
288 if (PREDICT_TRUE (n >= 8))
290 vlib_prefetch_buffer_header (b + 4, LOAD);
291 vlib_prefetch_buffer_header (b + 5, LOAD);
292 vlib_prefetch_buffer_header (b + 6, LOAD);
293 vlib_prefetch_buffer_header (b + 7, LOAD);
294 clib_prefetch_load (wqe + 4);
297 rdma_mlx5_wqe_init (wqe + 0, txq->dv_wqe_tmpl, b[0], tail + 0);
298 rdma_mlx5_wqe_init (wqe + 1, txq->dv_wqe_tmpl, b[1], tail + 1);
299 rdma_mlx5_wqe_init (wqe + 2, txq->dv_wqe_tmpl, b[2], tail + 2);
300 rdma_mlx5_wqe_init (wqe + 3, txq->dv_wqe_tmpl, b[3], tail + 3);
310 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT))
311 return rdma_device_output_tx_mlx5_chained (vm, node, rd, txq,
312 n_left_from, n, bi, b, wqe,
315 rdma_mlx5_wqe_init (wqe, txq->dv_wqe_tmpl, b[0], tail);
323 vlib_buffer_copy_indices (txq->bufs + (txq->tail & mask), bi, n_left_from);
325 rdma_device_output_tx_mlx5_doorbell (txq, &wqe[-1], tail, sq_mask);
330 * standard ibverb tx/free functions
333 static_always_inline void
334 rdma_device_output_free_ibverb (vlib_main_t * vm,
335 const vlib_node_runtime_t * node,
338 struct ibv_wc wc[VLIB_FRAME_SIZE];
339 u32 mask = pow2_mask (txq->bufs_log2sz);
343 n = ibv_poll_cq (txq->ibv_cq, VLIB_FRAME_SIZE, wc);
346 if (PREDICT_FALSE (n < 0))
347 vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_COMPLETION, 1);
351 while (PREDICT_FALSE (IBV_WC_SUCCESS != wc[n - 1].status))
353 vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_COMPLETION, 1);
359 tail = wc[n - 1].wr_id;
360 vlib_buffer_free_from_ring (vm, txq->bufs, txq->head & mask,
361 RDMA_TXQ_BUF_SZ (txq),
362 RDMA_TXQ_USED_SZ (txq->head, tail));
366 static_always_inline u32
367 rdma_device_output_tx_ibverb (vlib_main_t * vm,
368 const vlib_node_runtime_t * node,
369 const rdma_device_t * rd, rdma_txq_t * txq,
370 u32 n_left_from, u32 * bi, vlib_buffer_t ** b)
372 struct ibv_send_wr wr[VLIB_FRAME_SIZE], *w = wr;
373 struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
374 u32 mask = txq->bufs_log2sz;
377 memset (w, 0, n_left_from * sizeof (w[0]));
381 if (PREDICT_TRUE (n >= 8))
383 vlib_prefetch_buffer_header (b[4 + 0], LOAD);
384 vlib_prefetch_buffer_header (b[4 + 1], LOAD);
385 vlib_prefetch_buffer_header (b[4 + 2], LOAD);
386 vlib_prefetch_buffer_header (b[4 + 3], LOAD);
388 CLIB_PREFETCH (&s[4 + 0], 4 * sizeof (s[0]), STORE);
390 CLIB_PREFETCH (&w[4 + 0], CLIB_CACHE_LINE_BYTES, STORE);
391 CLIB_PREFETCH (&w[4 + 1], CLIB_CACHE_LINE_BYTES, STORE);
392 CLIB_PREFETCH (&w[4 + 2], CLIB_CACHE_LINE_BYTES, STORE);
393 CLIB_PREFETCH (&w[4 + 3], CLIB_CACHE_LINE_BYTES, STORE);
396 s[0].addr = vlib_buffer_get_current_va (b[0]);
397 s[0].length = b[0]->current_length;
398 s[0].lkey = rd->lkey;
400 s[1].addr = vlib_buffer_get_current_va (b[1]);
401 s[1].length = b[1]->current_length;
402 s[1].lkey = rd->lkey;
404 s[2].addr = vlib_buffer_get_current_va (b[2]);
405 s[2].length = b[2]->current_length;
406 s[2].lkey = rd->lkey;
408 s[3].addr = vlib_buffer_get_current_va (b[3]);
409 s[3].length = b[3]->current_length;
410 s[3].lkey = rd->lkey;
412 w[0].next = &w[0] + 1;
413 w[0].sg_list = &s[0];
415 w[0].opcode = IBV_WR_SEND;
417 w[1].next = &w[1] + 1;
418 w[1].sg_list = &s[1];
420 w[1].opcode = IBV_WR_SEND;
422 w[2].next = &w[2] + 1;
423 w[2].sg_list = &s[2];
425 w[2].opcode = IBV_WR_SEND;
427 w[3].next = &w[3] + 1;
428 w[3].sg_list = &s[3];
430 w[3].opcode = IBV_WR_SEND;
440 s[0].addr = vlib_buffer_get_current_va (b[0]);
441 s[0].length = b[0]->current_length;
442 s[0].lkey = rd->lkey;
444 w[0].next = &w[0] + 1;
445 w[0].sg_list = &s[0];
447 w[0].opcode = IBV_WR_SEND;
455 w[-1].wr_id = txq->tail; /* register item to free */
456 w[-1].next = 0; /* fix next pointer in WR linked-list */
457 w[-1].send_flags = IBV_SEND_SIGNALED; /* generate a CQE so we can free buffers */
460 if (PREDICT_FALSE (0 != ibv_post_send (txq->ibv_qp, w, &w)))
462 vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_SUBMISSION,
463 n_left_from - (w - wr));
464 n_left_from = w - wr;
467 vlib_buffer_copy_indices (txq->bufs + (txq->tail & mask), bi, n_left_from);
468 txq->tail += n_left_from;
473 * common tx/free functions
476 static_always_inline void
477 rdma_device_output_free (vlib_main_t * vm, const vlib_node_runtime_t * node,
478 rdma_txq_t * txq, int is_mlx5dv)
481 rdma_device_output_free_mlx5 (vm, node, txq);
483 rdma_device_output_free_ibverb (vm, node, txq);
486 static_always_inline u32
487 rdma_device_output_tx_try (vlib_main_t * vm, const vlib_node_runtime_t * node,
488 const rdma_device_t * rd, rdma_txq_t * txq,
489 u32 n_left_from, u32 * bi, int is_mlx5dv)
491 vlib_buffer_t *b[VLIB_FRAME_SIZE];
492 u32 mask = pow2_mask (txq->bufs_log2sz);
494 /* do not enqueue more packet than ring space */
495 n_left_from = clib_min (n_left_from, RDMA_TXQ_AVAIL_SZ (txq, txq->head,
497 /* avoid wrap-around logic in core loop */
498 n_left_from = clib_min (n_left_from, RDMA_TXQ_BUF_SZ (txq) -
501 /* if ring is full, do nothing */
502 if (PREDICT_FALSE (n_left_from == 0))
505 vlib_get_buffers (vm, bi, b, n_left_from);
508 rdma_device_output_tx_mlx5 (vm, node, rd, txq, n_left_from, bi, b) :
509 rdma_device_output_tx_ibverb (vm, node, rd, txq, n_left_from, bi, b);
512 static_always_inline uword
513 rdma_device_output_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
514 vlib_frame_t * frame, rdma_device_t * rd,
517 u32 thread_index = vm->thread_index;
519 vec_elt_at_index (rd->txqs, thread_index % vec_len (rd->txqs));
524 ASSERT (RDMA_TXQ_BUF_SZ (txq) >= VLIB_FRAME_SIZE);
526 from = vlib_frame_vector_args (frame);
527 n_left_from = frame->n_vectors;
529 clib_spinlock_lock_if_init (&txq->lock);
531 for (i = 0; i < RDMA_TX_RETRIES && n_left_from > 0; i++)
534 rdma_device_output_free (vm, node, txq, is_mlx5dv);
535 n_enq = rdma_device_output_tx_try (vm, node, rd, txq, n_left_from, from,
538 n_left_from -= n_enq;
542 clib_spinlock_unlock_if_init (&txq->lock);
544 if (PREDICT_FALSE (n_left_from))
546 vlib_buffer_free (vm, from, n_left_from);
547 vlib_error_count (vm, node->node_index,
548 RDMA_TX_ERROR_NO_FREE_SLOTS, n_left_from);
551 return frame->n_vectors - n_left_from;
554 VNET_DEVICE_CLASS_TX_FN (rdma_device_class) (vlib_main_t * vm,
555 vlib_node_runtime_t * node,
556 vlib_frame_t * frame)
558 rdma_main_t *rm = &rdma_main;
559 vnet_interface_output_runtime_t *ord = (void *) node->runtime_data;
560 rdma_device_t *rd = pool_elt_at_index (rm->devices, ord->dev_instance);
562 if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_MLX5DV))
563 return rdma_device_output_tx (vm, node, frame, rd, 1 /* is_mlx5dv */ );
565 return rdma_device_output_tx (vm, node, frame, rd, 0 /* is_mlx5dv */ );
569 * fd.io coding-style-patch-verification: ON
572 * eval: (c-set-style "gnu")