+ /* avoid wrap-around logic in core loop */
+ n_alloc = clib_min (n_alloc, rxq->size - slot);
+
+ n_alloc &= ~7; /* round to 8 */
+
+ n = vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, rxq->size,
+ n_alloc, rd->pool);
+
+ if (PREDICT_FALSE (n != n_alloc))
+ {
+ u32 n_free;
+ if (n < 8)
+ {
+ if (n)
+ vlib_buffer_free_from_ring (vm, rxq->bufs, slot, rxq->size, n);
+ return;
+ }
+
+ /* partial allocation, round and return rest */
+ n_free = n - (n & 7);
+ n -= n_free;
+ if (n_free)
+ vlib_buffer_free_from_ring (vm, rxq->bufs, (slot + n) & mask,
+ rxq->size, n_free);
+ }
+
+ n_alloc = n;
+
+ if (is_mlx5dv)
+ {
+ u64 va[8];
+ mlx5dv_rwq_t *wqe = rxq->wqes + slot;
+
+ while (n >= 1)
+ {
+ vlib_get_buffers_with_offset (vm, rxq->bufs + slot, (void **) va, 8,
+ sizeof (vlib_buffer_t));
+#ifdef CLIB_HAVE_VEC256
+ *(u64x4 *) va = u64x4_byte_swap (*(u64x4 *) va);
+ *(u64x4 *) (va + 4) = u64x4_byte_swap (*(u64x4 *) (va + 4));
+#else
+ for (int i = 0; i < 8; i++)
+ va[i] = clib_host_to_net_u64 (va[i]);
+#endif
+ wqe[0].addr = va[0];
+ wqe[1].addr = va[1];
+ wqe[2].addr = va[2];
+ wqe[3].addr = va[3];
+ wqe[4].addr = va[4];
+ wqe[5].addr = va[5];
+ wqe[6].addr = va[6];
+ wqe[7].addr = va[7];
+ wqe += 8;
+ slot += 8;
+ n -= 8;
+ }
+
+ CLIB_MEMORY_STORE_BARRIER ();
+ rxq->tail += n_alloc;
+ rxq->wq_db[MLX5_RCV_DBR] = clib_host_to_net_u32 (rxq->tail);
+ return;
+ }
+
+ while (n >= 8)
+ {
+ u64 va[8];
+ if (PREDICT_TRUE (n >= 16))
+ {
+ clib_prefetch_store (s + 16);
+ clib_prefetch_store (w + 16);
+ }
+
+ vlib_get_buffers_with_offset (vm, bufs, (void **) va, 8,
+ sizeof (vlib_buffer_t));
+
+ ibv_set_recv_wr_and_sge (w++, s++, va[0], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[1], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[2], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[3], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[4], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[5], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[6], data_size, lkey);
+ ibv_set_recv_wr_and_sge (w++, s++, va[7], data_size, lkey);
+
+ bufs += 8;
+ n -= 8;
+ }
+
+ w[-1].next = 0; /* fix next pointer in WR linked-list last item */
+
+ n = n_alloc;
+ if (ibv_post_wq_recv (rxq->wq, wr, &w) != 0)
+ {
+ n = w - wr;
+ vlib_buffer_free_from_ring (vm, rxq->bufs, slot + n, rxq->size,
+ n_alloc - n);
+ }
+
+ rxq->tail += n;
+}
+
+static_always_inline void
+rdma_device_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
+ const rdma_device_t * rd, u32 n_left, const u32 * bi,
+ u32 next_index, u16 * cqe_flags, int is_mlx5dv)
+{
+ u32 n_trace, i;
+
+ if (PREDICT_TRUE (0 == (n_trace = vlib_get_trace_count (vm, node))))
+ return;
+
+ i = 0;
+ while (n_trace && n_left)
+ {
+ vlib_buffer_t *b;
+ rdma_input_trace_t *tr;
+ b = vlib_get_buffer (vm, bi[0]);
+ vlib_trace_buffer (vm, node, next_index, b,
+ /* follow_chain */ 0);
+ tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->next_index = next_index;
+ tr->hw_if_index = rd->hw_if_index;
+ tr->cqe_flags = is_mlx5dv ? clib_net_to_host_u16 (cqe_flags[0]) : 0;
+
+ /* next */
+ n_trace--;
+ n_left--;
+ cqe_flags++;
+ bi++;
+ i++;
+ }
+ vlib_set_trace_count (vm, node, n_trace);
+}
+
+static_always_inline void
+rdma_device_input_ethernet (vlib_main_t * vm, vlib_node_runtime_t * node,
+ const rdma_device_t * rd, u32 next_index,
+ int skip_ip4_cksum)
+{
+ vlib_next_frame_t *nf;
+ vlib_frame_t *f;
+ ethernet_input_frame_t *ef;
+
+ if (PREDICT_FALSE (VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT != next_index))
+ return;
+
+ nf =
+ vlib_node_runtime_get_next_frame (vm, node,
+ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT);
+ f = vlib_get_frame (vm, nf->frame);
+ f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
+ if (skip_ip4_cksum)
+ f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
+
+ ef = vlib_frame_scalar_args (f);
+ ef->sw_if_index = rd->sw_if_index;
+ ef->hw_if_index = rd->hw_if_index;
+}
+
+static_always_inline u32
+rdma_device_input_bufs (vlib_main_t * vm, const rdma_device_t * rd,
+ vlib_buffer_t ** b, struct ibv_wc *wc,
+ u32 n_left_from, vlib_buffer_t * bt)
+{
+ u32 n_rx_bytes = 0;
+
+ while (n_left_from >= 4)
+ {
+ if (PREDICT_TRUE (n_left_from >= 8))
+ {
+ CLIB_PREFETCH (&wc[4 + 0], CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&wc[4 + 1], CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&wc[4 + 2], CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&wc[4 + 3], CLIB_CACHE_LINE_BYTES, LOAD);
+ vlib_prefetch_buffer_header (b[4 + 0], STORE);
+ vlib_prefetch_buffer_header (b[4 + 1], STORE);
+ vlib_prefetch_buffer_header (b[4 + 2], STORE);
+ vlib_prefetch_buffer_header (b[4 + 3], STORE);
+ }
+
+ vlib_buffer_copy_template (b[0], bt);
+ vlib_buffer_copy_template (b[1], bt);
+ vlib_buffer_copy_template (b[2], bt);
+ vlib_buffer_copy_template (b[3], bt);
+
+ n_rx_bytes += b[0]->current_length = wc[0].byte_len;
+ n_rx_bytes += b[1]->current_length = wc[1].byte_len;
+ n_rx_bytes += b[2]->current_length = wc[2].byte_len;
+ n_rx_bytes += b[3]->current_length = wc[3].byte_len;
+
+ b += 4;
+ wc += 4;
+ n_left_from -= 4;
+ }
+
+ while (n_left_from >= 1)
+ {
+ vlib_buffer_copy_template (b[0], bt);
+ n_rx_bytes += b[0]->current_length = wc[0].byte_len;
+
+ b += 1;
+ wc += 1;
+ n_left_from -= 1;
+ }
+
+ return n_rx_bytes;
+}
+
+static_always_inline void
+process_mini_cqes (rdma_rxq_t * rxq, u32 skip, u32 n_left, u32 cq_ci,
+ u32 mask, u32 * byte_cnt)
+{
+ mlx5dv_mini_cqe_t *mcqe;
+ u32 mcqe_array_index = (cq_ci + 1) & mask;
+ mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
+
+ mcqe_array_index = cq_ci;
+
+ if (skip)
+ {
+ u32 n = skip & ~7;
+
+ if (n)
+ {
+ mcqe_array_index = (mcqe_array_index + n) & mask;
+ mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
+ skip -= n;
+ }
+
+ if (skip)
+ {
+ n = clib_min (8 - skip, n_left);
+ for (int i = 0; i < n; i++)
+ byte_cnt[i] = mcqe[skip + i].byte_count;
+ mcqe_array_index = (mcqe_array_index + 8) & mask;
+ mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
+ n_left -= n;
+ byte_cnt += n;
+ }
+
+ }
+
+ while (n_left >= 8)
+ {
+ for (int i = 0; i < 8; i++)
+ byte_cnt[i] = mcqe[i].byte_count;
+
+ n_left -= 8;
+ byte_cnt += 8;
+ mcqe_array_index = (mcqe_array_index + 8) & mask;
+ mcqe = (mlx5dv_mini_cqe_t *) (rxq->cqes + mcqe_array_index);
+ }
+
+ if (n_left)
+ {
+ for (int i = 0; i < n_left; i++)
+ byte_cnt[i] = mcqe[i].byte_count;
+ }
+}
+
+static_always_inline void
+cqe_set_owner (mlx5dv_cqe_t * cqe, u32 n_left, u8 owner)
+{
+ while (n_left >= 8)
+ {
+ cqe[0].opcode_cqefmt_se_owner = owner;
+ cqe[1].opcode_cqefmt_se_owner = owner;
+ cqe[2].opcode_cqefmt_se_owner = owner;
+ cqe[3].opcode_cqefmt_se_owner = owner;
+ cqe[4].opcode_cqefmt_se_owner = owner;
+ cqe[5].opcode_cqefmt_se_owner = owner;
+ cqe[6].opcode_cqefmt_se_owner = owner;
+ cqe[7].opcode_cqefmt_se_owner = owner;
+ n_left -= 8;
+ cqe += 8;
+ }
+ while (n_left)
+ {
+ cqe[0].opcode_cqefmt_se_owner = owner;
+ n_left--;
+ cqe++;
+ }
+}
+
+static_always_inline void
+compressed_cqe_reset_owner (rdma_rxq_t * rxq, u32 n_mini_cqes, u32 cq_ci,
+ u32 mask, u32 log2_cq_size)
+{
+ u8 owner;
+ u32 offset, cq_size = 1 << log2_cq_size;
+