2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
24 #include <rdma/rdma.h>
26 #define foreach_rdma_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error")
31 #define _(f,s) RDMA_INPUT_ERROR_##f,
32 foreach_rdma_input_error
37 static __clib_unused char *rdma_input_error_strings[] = {
39 foreach_rdma_input_error
44 static_always_inline void
45 ibv_set_recv_wr_and_sge (struct ibv_recv_wr *w, struct ibv_sge *s, u64 va,
46 u32 data_size, u32 lkey)
49 s[0].length = data_size;
56 static_always_inline void
57 rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd,
58 rdma_rxq_t * rxq, int is_mlx5dv, int is_striding)
62 struct ibv_recv_wr wr[VLIB_FRAME_SIZE], *w = wr;
63 struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
64 u32 mask = rxq->size - 1;
65 u32 slot = rxq->tail & mask;
66 u32 *bufs = rxq->bufs + slot;
67 u32 data_size = rxq->buf_sz;
69 int log_stride_per_wqe = rxq->log_stride_per_wqe;
70 int log_wqe_sz = rxq->log_wqe_sz;
72 /* refilled buffers must be a multiple of 8 and of strides per WQE */
73 u32 alloc_multiple = 1 << (clib_max (3, log_stride_per_wqe));
75 ring_space = rxq->size - (rxq->tail - rxq->head);
77 n_alloc = clib_min (VLIB_FRAME_SIZE, ring_space);
79 /* do not bother to allocate if too small */
80 if (n_alloc < 2 * alloc_multiple)
83 /* avoid wrap-around logic in core loop */
84 n_alloc = clib_min (n_alloc, rxq->size - slot);
86 n_alloc &= ~(alloc_multiple - 1); /* round to alloc_multiple */
88 n = vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, rxq->size,
91 if (PREDICT_FALSE (n != n_alloc))
94 if (n < alloc_multiple)
97 vlib_buffer_free_from_ring (vm, rxq->bufs, slot, rxq->size, n);
101 /* partial allocation, round and return rest */
102 n_free = n & (alloc_multiple - 1);
105 vlib_buffer_free_from_ring (vm, rxq->bufs, (slot + n) & mask,
113 u64 __clib_aligned (32) va[8];
115 /* slot does not necessarily correspond to the slot
116 in the wqes ring (in 16B words) */
117 u32 wqes_slot = slot << (log_wqe_sz - log_stride_per_wqe);
118 u32 wqe_cnt = rxq->wqe_cnt;
119 mlx5dv_wqe_ds_t *wqe = rxq->wqes + wqes_slot;
120 int wqe_sz = 1 << log_wqe_sz;
121 int stride_per_wqe = 1 << log_stride_per_wqe;
122 int current_data_seg = 0;
126 vlib_get_buffers_with_offset (vm, rxq->bufs + slot, (void **) va, 8,
127 sizeof (vlib_buffer_t));
128 #ifdef CLIB_HAVE_VEC256
129 *(u64x4 *) va = u64x4_byte_swap (*(u64x4 *) va);
130 *(u64x4 *) (va + 4) = u64x4_byte_swap (*(u64x4 *) (va + 4));
132 for (int i = 0; i < 8; i++)
133 va[i] = clib_host_to_net_u64 (va[i]);
136 /*In striding RQ mode, the first 16B-word of the WQE is the SRQ header.
137 It is initialised as if it were a LINKED_LIST, as we have no guarantee
138 about what RDMA core does (CYCLIC_RQ or LINKED_LIST_RQ). In cyclic
139 mode, the SRQ header is ignored anyways... */
142 if (is_striding && !(current_data_seg & (wqe_sz - 1)))
143 *(mlx5dv_wqe_srq_next_t *) wqe = (mlx5dv_wqe_srq_next_t)
146 .next_wqe_index = clib_host_to_net_u16 (((wqes_slot >> log_wqe_sz) + 1) & (wqe_cnt - 1)),
152 if (!is_striding || !(current_data_seg & ~(stride_per_wqe - 1)))
154 wqe[0 + is_striding].addr = va[0];
155 wqe[1 + is_striding].addr = va[1];
156 wqe[2 + is_striding].addr = va[2];
157 wqe[3 + is_striding].addr = va[3];
158 wqe[4 + is_striding].addr = va[4];
159 wqe[5 + is_striding].addr = va[5];
160 wqe[6 + is_striding].addr = va[6];
161 wqe[7 + is_striding].addr = va[7];
167 current_data_seg += 8;
168 current_data_seg &= wqe_sz - 1;
171 CLIB_MEMORY_STORE_BARRIER ();
172 rxq->tail += n_alloc;
175 rxq->striding_wqe_tail += n_alloc >> log_stride_per_wqe;
176 rxq->wq_db[MLX5_RCV_DBR] =
177 clib_host_to_net_u32 (rxq->striding_wqe_tail);
180 rxq->wq_db[MLX5_RCV_DBR] = clib_host_to_net_u32 (rxq->tail);
187 if (PREDICT_TRUE (n >= 16))
189 clib_prefetch_store (s + 16);
190 clib_prefetch_store (w + 16);
193 vlib_get_buffers_with_offset (vm, bufs, (void **) va, 8,
194 sizeof (vlib_buffer_t));
196 ibv_set_recv_wr_and_sge (w++, s++, va[0], data_size, lkey);
197 ibv_set_recv_wr_and_sge (w++, s++, va[1], data_size, lkey);
198 ibv_set_recv_wr_and_sge (w++, s++, va[2], data_size, lkey);
199 ibv_set_recv_wr_and_sge (w++, s++, va[3], data_size, lkey);
200 ibv_set_recv_wr_and_sge (w++, s++, va[4], data_size, lkey);
201 ibv_set_recv_wr_and_sge (w++, s++, va[5], data_size, lkey);
202 ibv_set_recv_wr_and_sge (w++, s++, va[6], data_size, lkey);
203 ibv_set_recv_wr_and_sge (w++, s++, va[7], data_size, lkey);
209 w[-1].next = 0; /* fix next pointer in WR linked-list last item */
212 if (ibv_post_wq_recv (rxq->wq, wr, &w) != 0)
215 vlib_buffer_free_from_ring (vm, rxq->bufs, slot + n, rxq->size,
222 static_always_inline void
223 rdma_device_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
224 const rdma_device_t * rd, u32 n_left,
225 const u32 * bi, u32 next_index, u16 * cqe_flags,
228 u32 n_trace = vlib_get_trace_count (vm, node);
230 if (PREDICT_TRUE (0 == n_trace))
233 while (n_trace && n_left)
235 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
237 (vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0)))
239 rdma_input_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
240 tr->next_index = next_index;
241 tr->hw_if_index = rd->hw_if_index;
242 tr->cqe_flags = is_mlx5dv ? clib_net_to_host_u16 (cqe_flags[0]) : 0;
250 vlib_set_trace_count (vm, node, n_trace);
253 static_always_inline void
254 rdma_device_input_ethernet (vlib_main_t * vm, vlib_node_runtime_t * node,
255 const rdma_device_t * rd, u32 next_index,
258 vlib_next_frame_t *nf;
260 ethernet_input_frame_t *ef;
262 if (PREDICT_FALSE (VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT != next_index))
266 vlib_node_runtime_get_next_frame (vm, node,
267 VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT);
268 f = vlib_get_frame (vm, nf->frame);
269 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
271 f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
273 ef = vlib_frame_scalar_args (f);
274 ef->sw_if_index = rd->sw_if_index;
275 ef->hw_if_index = rd->hw_if_index;
278 static_always_inline u32
279 rdma_device_input_bufs (vlib_main_t * vm, const rdma_device_t * rd,
280 vlib_buffer_t ** b, struct ibv_wc *wc,
281 u32 n_left_from, vlib_buffer_t * bt)
285 while (n_left_from >= 4)
287 if (PREDICT_TRUE (n_left_from >= 8))
289 CLIB_PREFETCH (&wc[4 + 0], CLIB_CACHE_LINE_BYTES, LOAD);
290 CLIB_PREFETCH (&wc[4 + 1], CLIB_CACHE_LINE_BYTES, LOAD);
291 CLIB_PREFETCH (&wc[4 + 2], CLIB_CACHE_LINE_BYTES, LOAD);
292 CLIB_PREFETCH (&wc[4 + 3], CLIB_CACHE_LINE_BYTES, LOAD);
293 vlib_prefetch_buffer_header (b[4 + 0], STORE);
294 vlib_prefetch_buffer_header (b[4 + 1], STORE);
295 vlib_prefetch_buffer_header (b[4 + 2], STORE);
296 vlib_prefetch_buffer_header (b[4 + 3], STORE);
299 vlib_buffer_copy_template (b[0], bt);
300 vlib_buffer_copy_template (b[1], bt);
301 vlib_buffer_copy_template (b[2], bt);
302 vlib_buffer_copy_template (b[3], bt);
304 n_rx_bytes += b[0]->current_length = wc[0].byte_len;
305 n_rx_bytes += b[1]->current_length = wc[1].byte_len;
306 n_rx_bytes += b[2]->current_length = wc[2].byte_len;
307 n_rx_bytes += b[3]->current_length = wc[3].byte_len;
314 while (n_left_from >= 1)
316 vlib_buffer_copy_template (b[0], bt);
317 n_rx_bytes += b[0]->current_length = wc[0].byte_len;
327 static_always_inline void
328 process_mini_cqes (rdma_rxq_t * rxq, u32 skip, u32 n_left, u32 cq_ci,
329 u32 mask, u32 * byte_cnt)
331 mlx5dv_mini_cqe_t *mcqe;
332 u32 mcqe_array_index = (cq_ci + 1) & mask;
333 mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
335 mcqe_array_index = cq_ci;
343 mcqe_array_index = (mcqe_array_index + n) & mask;
344 mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
350 n = clib_min (8 - skip, n_left);
351 for (int i = 0; i < n; i++)
352 byte_cnt[i] = mcqe[skip + i].byte_count;
353 mcqe_array_index = (mcqe_array_index + 8) & mask;
354 mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
363 for (int i = 0; i < 8; i++)
364 byte_cnt[i] = mcqe[i].byte_count;
368 mcqe_array_index = (mcqe_array_index + 8) & mask;
369 mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
374 for (int i = 0; i < n_left; i++)
375 byte_cnt[i] = mcqe[i].byte_count;
379 static_always_inline void
380 cqe_set_owner (mlx5dv_cqe_t * cqe, u32 n_left, u8 owner)
384 cqe[0].opcode_cqefmt_se_owner = owner;
385 cqe[1].opcode_cqefmt_se_owner = owner;
386 cqe[2].opcode_cqefmt_se_owner = owner;
387 cqe[3].opcode_cqefmt_se_owner = owner;
388 cqe[4].opcode_cqefmt_se_owner = owner;
389 cqe[5].opcode_cqefmt_se_owner = owner;
390 cqe[6].opcode_cqefmt_se_owner = owner;
391 cqe[7].opcode_cqefmt_se_owner = owner;
397 cqe[0].opcode_cqefmt_se_owner = owner;
403 static_always_inline void
404 compressed_cqe_reset_owner (rdma_rxq_t * rxq, u32 n_mini_cqes, u32 cq_ci,
405 u32 mask, u32 log2_cq_size)
408 u32 offset, cq_size = 1 << log2_cq_size;
411 /* first CQE is reset by hardware */
415 offset = cq_ci & mask;
416 owner = 0xf0 | ((cq_ci >> log2_cq_size) & 1);
418 if (offset + n_mini_cqes < cq_size)
420 cqe_set_owner (rxq->cqes + offset, n_mini_cqes, owner);
424 u32 n = cq_size - offset;
425 cqe_set_owner (rxq->cqes + offset, n, owner);
426 cqe_set_owner (rxq->cqes, n_mini_cqes - n, owner ^ 1);
431 static_always_inline uword
432 rdma_device_poll_cq_mlx5dv (rdma_device_t * rd, rdma_rxq_t * rxq,
433 u32 * byte_cnt, u16 * cqe_flags)
435 u32 n_rx_packets = 0;
436 u32 log2_cq_size = rxq->log2_cq_size;
437 u32 mask = pow2_mask (log2_cq_size);
438 u32 cq_ci = rxq->cq_ci;
440 if (rxq->n_mini_cqes_left)
442 /* partially processed mini-cqe array */
443 u32 n_mini_cqes = rxq->n_mini_cqes;
444 u32 n_mini_cqes_left = rxq->n_mini_cqes_left;
445 process_mini_cqes (rxq, n_mini_cqes - n_mini_cqes_left,
446 n_mini_cqes_left, cq_ci, mask, byte_cnt);
447 compressed_cqe_reset_owner (rxq, n_mini_cqes, cq_ci, mask,
449 clib_memset_u16 (cqe_flags, rxq->last_cqe_flags, n_mini_cqes_left);
450 n_rx_packets = n_mini_cqes_left;
451 byte_cnt += n_mini_cqes_left;
452 cqe_flags += n_mini_cqes_left;
453 rxq->n_mini_cqes_left = 0;
454 rxq->cq_ci = cq_ci = cq_ci + n_mini_cqes;
457 while (n_rx_packets < VLIB_FRAME_SIZE)
459 u8 cqe_last_byte, owner;
460 mlx5dv_cqe_t *cqe = rxq->cqes + (cq_ci & mask);
462 clib_prefetch_load (rxq->cqes + ((cq_ci + 8) & mask));
464 owner = (cq_ci >> log2_cq_size) & 1;
465 cqe_last_byte = cqe->opcode_cqefmt_se_owner;
467 if ((cqe_last_byte & 0x1) != owner)
470 cqe_last_byte &= 0xfc; /* remove owner and solicited bits */
472 if (cqe_last_byte == 0x2c) /* OPCODE = 0x2 (Responder Send), Format = 0x3 (Compressed CQE) */
474 u32 n_mini_cqes = clib_net_to_host_u32 (cqe->mini_cqe_num);
475 u32 n_left = VLIB_FRAME_SIZE - n_rx_packets;
476 u16 flags = cqe->flags;
478 if (n_left >= n_mini_cqes)
480 process_mini_cqes (rxq, 0, n_mini_cqes, cq_ci, mask, byte_cnt);
481 clib_memset_u16 (cqe_flags, flags, n_mini_cqes);
482 compressed_cqe_reset_owner (rxq, n_mini_cqes, cq_ci, mask,
484 n_rx_packets += n_mini_cqes;
485 byte_cnt += n_mini_cqes;
486 cqe_flags += n_mini_cqes;
487 cq_ci += n_mini_cqes;
491 process_mini_cqes (rxq, 0, n_left, cq_ci, mask, byte_cnt);
492 clib_memset_u16 (cqe_flags, flags, n_left);
493 n_rx_packets = VLIB_FRAME_SIZE;
494 rxq->n_mini_cqes = n_mini_cqes;
495 rxq->n_mini_cqes_left = n_mini_cqes - n_left;
496 rxq->last_cqe_flags = flags;
502 if (cqe_last_byte == 0x20) /* OPCODE = 0x2 (Responder Send), Format = 0x0 (no inline data) */
504 byte_cnt[0] = cqe->byte_cnt;
505 cqe_flags[0] = cqe->flags;
512 rd->flags |= RDMA_DEVICE_F_ERROR;
518 rxq->cq_db[0] = rxq->cq_ci = cq_ci;
522 static_always_inline int
523 rdma_device_mlx5dv_striding_rq_parse_bc (int n_rx_packets, int *n_rx_segs,
526 /* Determine if slow path is needed */
528 for (int i = 0; i < n_rx_packets; i++)
531 (bc[i] & CQE_BC_CONSUMED_STRIDES_MASK) >>
532 CQE_BC_CONSUMED_STRIDES_SHIFT;
533 filler |= ! !(bc[i] & CQE_BC_FILLER_MASK);
535 return n_rx_packets != *n_rx_segs || filler;
538 static_always_inline int
539 rdma_device_mlx5dv_l3_validate_and_swap_bc (rdma_per_thread_data_t
540 * ptd, int n_rx_packets, u32 * bc)
542 u16 mask = CQE_FLAG_L3_HDR_TYPE_MASK | CQE_FLAG_L3_OK;
543 u16 match = CQE_FLAG_L3_HDR_TYPE_IP4 << CQE_FLAG_L3_HDR_TYPE_SHIFT;
545 /* verify that all ip4 packets have l3_ok flag set and convert packet
546 length from network to host byte order */
547 int skip_ip4_cksum = 1;
549 #if defined CLIB_HAVE_VEC256
550 u16x16 mask16 = u16x16_splat (mask);
551 u16x16 match16 = u16x16_splat (match);
554 for (int i = 0; i * 16 < n_rx_packets; i++)
555 r |= (ptd->cqe_flags16[i] & mask16) != match16;
557 if (!u16x16_is_all_zero (r))
560 for (int i = 0; i < n_rx_packets; i += 8)
561 *(u32x8 *) (bc + i) = u32x8_byte_swap (*(u32x8 *) (bc + i));
562 #elif defined CLIB_HAVE_VEC128
563 u16x8 mask8 = u16x8_splat (mask);
564 u16x8 match8 = u16x8_splat (match);
567 for (int i = 0; i * 8 < n_rx_packets; i++)
568 r |= (ptd->cqe_flags8[i] & mask8) != match8;
570 if (!u16x8_is_all_zero (r))
573 for (int i = 0; i < n_rx_packets; i += 4)
574 *(u32x4 *) (bc + i) = u32x4_byte_swap (*(u32x4 *) (bc + i));
576 for (int i = 0; i < n_rx_packets; i++)
577 if ((ptd->cqe_flags[i] & mask) == match)
580 for (int i = 0; i < n_rx_packets; i++)
581 bc[i] = clib_net_to_host_u32 (bc[i]);
583 return skip_ip4_cksum;
586 static_always_inline u32
587 rdma_device_mlx5dv_fast_input (vlib_main_t * vm, rdma_rxq_t * rxq,
588 u32 qs_mask, vlib_buffer_t * bt,
589 u32 * to_next, u32 n_rx_segs, u32 * bc,
592 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
593 vlib_buffer_t **b = bufs;
594 u32 n_left = n_rx_segs;
596 vlib_buffer_copy_indices_from_ring (to_next, rxq->bufs,
597 rxq->head & qs_mask, rxq->size,
599 rxq->head += n_rx_segs;
600 vlib_get_buffers (vm, to_next, bufs, n_rx_segs);
603 clib_prefetch_store (b[4]);
604 vlib_buffer_copy_template (b[0], bt);
605 n_rx_bytes += b[0]->current_length = bc[0] & bc_mask;
606 clib_prefetch_store (b[5]);
607 vlib_buffer_copy_template (b[1], bt);
608 n_rx_bytes += b[1]->current_length = bc[1] & bc_mask;
609 clib_prefetch_store (b[6]);
610 vlib_buffer_copy_template (b[2], bt);
611 n_rx_bytes += b[2]->current_length = bc[2] & bc_mask;
612 clib_prefetch_store (b[7]);
613 vlib_buffer_copy_template (b[3], bt);
614 n_rx_bytes += b[3]->current_length = bc[3] & bc_mask;
622 vlib_buffer_copy_template (b[0], bt);
623 n_rx_bytes += b[0]->current_length = bc[0] & bc_mask;
632 static_always_inline u32
633 rdma_device_mlx5dv_striding_rq_input (vlib_main_t * vm,
634 rdma_per_thread_data_t * ptd,
636 vlib_buffer_t * bt, u32 * to_next,
637 int n_rx_segs, int *n_rx_packets,
638 u32 * bc, int slow_path_needed)
640 u32 mask = rxq->size - 1;
642 if (PREDICT_TRUE (!slow_path_needed))
645 rdma_device_mlx5dv_fast_input (vm, rxq, mask, bt, to_next,
646 n_rx_segs, bc, CQE_BC_BYTE_COUNT_MASK);
648 else /* Slow path with multiseg */
650 vlib_buffer_t *pkt_head; /*Current head buffer */
651 vlib_buffer_t *pkt_prev; /* Buffer processed at the previous iteration */
654 uword n_segs_remaining = 0; /*Remaining strides in current buffer */
655 u32 n_bytes_remaining = 0; /*Remaining bytes in current buffer */
656 u32 *next_in_frame = to_next;
657 u32 *next_to_free = ptd->to_free_buffers;
658 bt->current_length = vlib_buffer_get_default_data_size (vm);
661 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
662 u32 n_left = clib_min (n_rx_segs, VLIB_FRAME_SIZE);
664 vlib_buffer_copy_indices_from_ring (ptd->current_segs,
665 rxq->bufs, rxq->head & mask,
668 vlib_get_buffers (vm, ptd->current_segs, bufs, n_left);
672 /* Initialize the current buffer as full size */
673 vlib_buffer_copy_template (pkt[0], bt);
674 if (!n_segs_remaining) /* No pending chain */
677 (bc[0] & CQE_BC_CONSUMED_STRIDES_MASK) >>
678 CQE_BC_CONSUMED_STRIDES_SHIFT;
680 pkt_head_idx = ptd->current_segs[pkt - bufs];
681 n_bytes_remaining = bc[0] & CQE_BC_BYTE_COUNT_MASK;
682 pkt_head->total_length_not_including_first_buffer =
684 1 ? n_bytes_remaining - pkt[0]->current_length : 0;
686 else /* Perform chaining if it's a continuation buffer */
688 pkt_prev->next_buffer = ptd->current_segs[pkt - bufs];
689 pkt_prev->flags |= VLIB_BUFFER_NEXT_PRESENT;
690 pkt[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
692 if (n_segs_remaining == 1) /* Last buffer of the chain */
694 pkt[0]->current_length = n_bytes_remaining;
695 if (bc[0] & CQE_BC_FILLER_MASK)
697 (next_to_free++)[0] = pkt_head_idx;
703 (next_in_frame++)[0] = pkt_head_idx;
705 pkt_head->current_length +
706 pkt_head->total_length_not_including_first_buffer;
713 n_bytes_remaining -= pkt[0]->current_length;
722 while (n_rx_segs > 0);
723 vlib_buffer_free (vm, ptd->to_free_buffers,
724 next_to_free - ptd->to_free_buffers);
729 static_always_inline uword
730 rdma_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
731 vlib_frame_t * frame, rdma_device_t * rd,
732 u16 qid, int use_mlx5dv)
734 rdma_main_t *rm = &rdma_main;
735 vnet_main_t *vnm = vnet_get_main ();
736 rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data,
738 rdma_rxq_t *rxq = vec_elt_at_index (rd->rxqs, qid);
739 struct ibv_wc wc[VLIB_FRAME_SIZE];
740 u32 __clib_aligned (32) byte_cnts[VLIB_FRAME_SIZE];
742 u32 next_index, *to_next, n_left_to_next, n_rx_bytes = 0;
743 int n_rx_packets, skip_ip4_cksum = 0;
744 u32 mask = rxq->size - 1;
747 n_rx_packets = rdma_device_poll_cq_mlx5dv (rd, rxq, byte_cnts,
750 n_rx_packets = ibv_poll_cq (rxq->cq, VLIB_FRAME_SIZE, wc);
752 if (PREDICT_FALSE (n_rx_packets <= 0))
755 /* init buffer template */
756 vlib_buffer_copy_template (&bt, &ptd->buffer_template);
757 vnet_buffer (&bt)->sw_if_index[VLIB_RX] = rd->sw_if_index;
758 bt.buffer_pool_index = rd->pool;
760 /* update buffer template for input feature arcs if any */
761 next_index = rd->per_interface_next_index;
762 if (PREDICT_FALSE (vnet_device_input_have_features (rd->sw_if_index)))
763 vnet_feature_start_device_input_x1 (rd->sw_if_index, &next_index, &bt);
765 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
770 int slow_path_needed;
772 rdma_device_mlx5dv_l3_validate_and_swap_bc (ptd, n_rx_packets, bc);
773 if (rd->flags & RDMA_DEVICE_F_STRIDING_RQ)
777 rdma_device_mlx5dv_striding_rq_parse_bc (n_rx_packets,
780 rdma_device_mlx5dv_striding_rq_input (vm, ptd, rxq, &bt,
787 /*For now, legacy path doesn't support multiseg */
789 rdma_device_mlx5dv_fast_input (vm, rxq, mask, &bt, to_next,
790 n_rx_packets, bc, ~1);
796 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
797 vlib_buffer_copy_indices_from_ring (to_next, rxq->bufs,
799 rxq->size, n_rx_packets);
800 vlib_get_buffers (vm, to_next, bufs, n_rx_packets);
801 rxq->head += n_rx_packets;
803 rdma_device_input_bufs (vm, rd, bufs, wc, n_rx_packets, &bt);
807 rdma_device_input_ethernet (vm, node, rd, next_index, skip_ip4_cksum);
808 vlib_put_next_frame (vm, node, next_index, n_left_to_next - n_rx_packets);
809 rdma_device_input_trace (vm, node, rd, n_rx_packets, to_next,
810 next_index, ptd->cqe_flags, use_mlx5dv);
811 /* reset flags to zero for the next run */
813 clib_memset_u16 (ptd->cqe_flags, 0, VLIB_FRAME_SIZE);
814 vlib_increment_combined_counter (vnm->interface_main.
815 combined_sw_if_counters +
816 VNET_INTERFACE_COUNTER_RX,
817 vm->thread_index, rd->hw_if_index,
818 n_rx_packets, n_rx_bytes);
820 rdma_device_input_refill (vm, rd, rxq, use_mlx5dv,
821 ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ));
825 VLIB_NODE_FN (rdma_input_node) (vlib_main_t * vm,
826 vlib_node_runtime_t * node,
827 vlib_frame_t * frame)
830 rdma_main_t *rm = &rdma_main;
831 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
832 vnet_device_and_queue_t *dq;
834 foreach_device_and_queue (dq, rt->devices_and_queues)
837 rd = vec_elt_at_index (rm->devices, dq->dev_instance);
838 if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_ADMIN_UP) == 0)
841 if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_ERROR))
844 if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_MLX5DV))
845 n_rx += rdma_device_input_inline (vm, node, frame, rd, dq->queue_id, 1);
847 n_rx += rdma_device_input_inline (vm, node, frame, rd, dq->queue_id, 0);
853 VLIB_REGISTER_NODE (rdma_input_node) = {
854 .name = "rdma-input",
855 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
856 .sibling_of = "device-input",
857 .format_trace = format_rdma_input_trace,
858 .type = VLIB_NODE_TYPE_INPUT,
859 .state = VLIB_NODE_STATE_DISABLED,
860 .n_errors = RDMA_INPUT_N_ERROR,
861 .error_strings = rdma_input_error_strings,
868 * fd.io coding-style-patch-verification: ON
871 * eval: (c-set-style "gnu")