1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
6 #include <vnet/dev/dev.h>
7 #include <vnet/ethernet/ethernet.h>
8 #include <vppinfra/vector/mask_compare.h>
9 #include <vppinfra/vector/compress.h>
11 #include <dev_ena/ena.h>
12 #include <dev_ena/ena_inlines.h>
14 #define ENA_RX_REFILL_BATCH 32
21 ena_rx_cdesc_status_t st_or;
22 ena_rx_cdesc_status_t st_and;
23 u16 *comp_sqe_indices;
24 u32 *sq_buffer_indices;
27 static_always_inline void
28 ena_device_input_status_to_flags (ena_rx_cdesc_status_t *statuses, u32 *flags,
29 u32 n_desc, vlib_frame_bitmap_t first_bmp,
32 const ena_rx_cdesc_status_t mask_first = { .first = 1 },
33 match_first1 = { .first = 1 };
35 const ena_rx_cdesc_status_t mask_last = { .last = 1 },
36 match_last0 = { .last = 0 };
38 const ena_rx_cdesc_status_t mask_l4_csum = { .ipv4_frag = 1,
41 match_l4_csum_ok = { .l4_csum_checked = 1 };
43 clib_memset_u32 (statuses + n_desc, 0, 8);
44 #if defined(CLIB_HAVE_VEC128)
46 #if defined(CxLIB_HAVE_VEC512)
49 #define u32xNu u32x16u
50 #define u32xN_splat u32x16_splat
51 #elif defined(CxLIB_HAVE_VEC256)
55 #define u32xN_splat u32x8_splat
60 #define u32xN_splat u32x4_splat
63 const u32xN st_mask_first = u32xN_splat (mask_first.as_u32);
64 const u32xN st_match_first1 = u32xN_splat (match_first1.as_u32);
65 const u32xN st_mask_last = u32xN_splat (mask_last.as_u32);
66 const u32xN st_match_last0 = u32xN_splat (match_last0.as_u32);
67 const u32xN st_mask_l4_csum = u32xN_splat (mask_l4_csum.as_u32);
68 const u32xN st_match_l4_csum_ok = u32xN_splat (match_l4_csum_ok.as_u32);
69 const u32xN f_total_len_valid = u32xN_splat (VLIB_BUFFER_TOTAL_LENGTH_VALID);
70 const u32xN f_next_preset = u32xN_splat (VLIB_BUFFER_NEXT_PRESENT);
71 const u32xN f_l4_csum = u32xN_splat (VNET_BUFFER_F_L4_CHECKSUM_CORRECT |
72 VNET_BUFFER_F_L4_CHECKSUM_COMPUTED);
74 for (u32 i = 0; i < round_pow2 (n_desc, 2 * N); i += 2 * N)
78 u32xN s0 = ((u32xNu *) (statuses + i))[0];
79 u32xN s1 = ((u32xNu *) (statuses + i))[1];
81 r0 = (s0 & st_mask_first) == st_match_first1;
82 r1 = (s1 & st_mask_first) == st_match_first1;
83 f0 = r0 & f_total_len_valid;
84 f1 = r1 & f_total_len_valid;
88 #if defined(CxLIB_HAVE_VEC512)
89 u64 msb_mask = 0x1111111111111111;
90 msk = bit_extract_u64 (u8x64_msb_mask ((u8x64) r0), msb_mask);
91 msk |= bit_extract_u64 (u8x64_msb_mask ((u8x64) r1), msb_mask) << 16;
92 #elif defined(CxLIB_HAVE_VEC256)
93 msk = u8x32_msb_mask ((u8x32) r0);
94 msk |= (u64) u8x32_msb_mask ((u8x32) r1) << 32;
95 msk = bit_extract_u64 (msk, 0x1111111111111111);
97 msk = u8x16_msb_mask ((u8x16) r0);
98 msk |= (u32) u8x16_msb_mask ((u8x16) r1) << 16;
99 msk = bit_extract_u32 (msk, 0x11111111);
101 first_bmp[i / uword_bits] |= msk << (i % uword_bits);
104 f0 |= ((s0 & st_mask_last) == st_match_last0) & f_next_preset;
105 f1 |= ((s1 & st_mask_last) == st_match_last0) & f_next_preset;
107 f0 |= ((s0 & st_mask_l4_csum) == st_match_l4_csum_ok) & f_l4_csum;
108 f1 |= ((s1 & st_mask_l4_csum) == st_match_l4_csum_ok) & f_l4_csum;
110 ((u32xNu *) (flags + i))[0] = f0;
111 ((u32xNu *) (flags + i))[1] = f1;
117 ena_rx_cdesc_status_t st = statuses++[0];
119 if ((st.as_u32 & mask_first.as_u32) == match_first1.as_u32)
120 f |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
122 if ((st.as_u32 & mask_last.as_u32) == match_last0.as_u32)
123 f |= VLIB_BUFFER_NEXT_PRESENT;
125 if ((st.as_u32 & mask_l4_csum.as_u32) == match_l4_csum_ok.as_u32)
126 f |= VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
127 VNET_BUFFER_F_L4_CHECKSUM_CORRECT;
135 static_always_inline u16
136 ena_device_input_cq_dequeue_no_wrap (ena_rx_ctx_t *ctx, ena_rxq_t *q,
137 ena_rx_cdesc_status_t *statuses,
138 u16 *lengths, u16 *csi)
140 u32 next = q->cq_next;
141 ena_rx_cdesc_t *cqes = q->cqes;
142 u32 phase = (next & ctx->size << 1) != 0;
143 u16 index = next & ctx->mask;
144 ena_rx_cdesc_t *cd = cqes + index;
145 ena_rx_cdesc_status_t st;
146 u32 n_to_check, i = 0;
149 if (st.phase == phase)
152 n_to_check = clib_min (VLIB_FRAME_SIZE, ctx->size - index);
154 ctx->st_or.as_u32 |= st.as_u32;
155 ctx->st_and.as_u32 &= st.as_u32;
157 lengths[i] = cd->length;
163 for (st = cd->status; i < n_to_check && st.phase != phase;
164 i++, st = (++cd)->status)
166 ctx->st_or.as_u32 |= st.as_u32;
167 ctx->st_and.as_u32 &= st.as_u32;
169 lengths[i] = cd->length;
175 n_to_check = VLIB_FRAME_SIZE - n_to_check;
184 /* revert incomplete */
185 if (PREDICT_FALSE (statuses[i - 1].last == 0))
188 while (i && statuses[i - 1].last == 0)
195 static_always_inline void
196 ena_device_input_refill (vlib_main_t *vm, ena_rx_ctx_t *ctx,
197 vnet_dev_rx_queue_t *rxq, int use_va)
199 ena_rxq_t *q = vnet_dev_get_rx_queue_data (rxq);
200 const u64x2 flip_phase = (ena_rx_desc_t){ .lo.phase = 1 }.as_u64x2;
201 u32 buffer_indices[ENA_RX_REFILL_BATCH];
202 uword dma_addr[ENA_RX_REFILL_BATCH];
203 u32 n_alloc, n_compl_sqes = q->n_compl_sqes;
204 u16 *csi = ctx->comp_sqe_indices;
205 ena_rx_desc_t *sqes = q->sqes;
207 while (n_compl_sqes > 0)
209 n_alloc = vlib_buffer_alloc_from_pool (
210 vm, buffer_indices, clib_min (ENA_RX_REFILL_BATCH, n_compl_sqes),
211 vnet_dev_get_rx_queue_buffer_pool_index (rxq));
213 if (PREDICT_FALSE (n_alloc == 0))
216 vlib_get_buffers_with_offset (vm, buffer_indices, (void **) dma_addr,
218 STRUCT_OFFSET_OF (vlib_buffer_t, data));
221 for (u32 i = 0; i < n_alloc; i++)
222 dma_addr[i] = vlib_physmem_get_pa (vm, (void *) dma_addr[i]);
224 for (u32 i = 0; i < n_alloc; i++)
227 u64x2 r = sqes[slot].as_u64x2 ^ flip_phase;
228 ctx->sq_buffer_indices[slot] = buffer_indices[i];
230 sqes[slot].as_u64x2 = r; /* write SQE as single 16-byte store */
234 n_compl_sqes -= n_alloc;
237 if (n_compl_sqes == q->n_compl_sqes)
240 q->sq_next += q->n_compl_sqes - n_compl_sqes;
241 __atomic_store_n (q->sq_db, q->sq_next, __ATOMIC_RELEASE);
243 if (PREDICT_FALSE (n_compl_sqes))
244 clib_memmove (ctx->comp_sqe_indices, csi, n_compl_sqes * sizeof (csi[0]));
246 q->n_compl_sqes = n_compl_sqes;
249 static_always_inline uword
250 ena_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
251 vnet_dev_rx_queue_t *rxq)
253 ena_rxq_t *q = vnet_dev_get_rx_queue_data (rxq);
254 vnet_dev_port_t *port = rxq->port;
255 vnet_main_t *vnm = vnet_get_main ();
256 vlib_buffer_t *buffers[VLIB_FRAME_SIZE], **b;
257 ena_rx_cdesc_status_t statuses[VLIB_FRAME_SIZE + 8];
258 u16 lengths[VLIB_FRAME_SIZE + 8], *l;
259 u32 flags[VLIB_FRAME_SIZE + 8], *f;
261 uword n_rx_packets = 0, n_rx_bytes = 0;
262 vlib_frame_bitmap_t head_bmp = {};
263 u32 sw_if_index = port->intf.sw_if_index;
264 u32 hw_if_index = port->intf.hw_if_index;
265 u32 n_trace, n_deq, n_left;
266 u32 cq_next = q->cq_next;
267 u32 next_index = rxq->next_index;
268 vlib_frame_t *next_frame;
269 vlib_buffer_template_t bt = rxq->buffer_template;
273 ASSERT (count_set_bits (rxq->size) == 1);
276 .mask = rxq->size - 1,
278 .comp_sqe_indices = q->compl_sqe_indices,
279 .sq_buffer_indices = q->buffer_indices,
282 /* we may have completed SQE indices from previous run */
283 csi = ctx.comp_sqe_indices + q->n_compl_sqes;
286 ena_device_input_cq_dequeue_no_wrap (&ctx, q, statuses, lengths, csi);
291 q->n_compl_sqes += n_deq;
293 maybe_chained = ctx.st_and.first && ctx.st_and.last ? 0 : 1;
296 vlib_get_next_frame_internal (vm, node, next_index, /* new frame */ 1);
297 bi = vlib_frame_vector_args (next_frame);
299 /* move buffer indices from the ring */
300 for (u32 i = 0; i < n_deq; i++)
303 bi[i] = ctx.sq_buffer_indices[slot];
304 ctx.sq_buffer_indices[slot] = VLIB_BUFFER_INVALID_INDEX;
307 vlib_get_buffers (vm, bi, buffers, n_deq);
309 if (PREDICT_FALSE (maybe_chained))
310 ena_device_input_status_to_flags (statuses, flags, n_deq, head_bmp, 1);
312 ena_device_input_status_to_flags (statuses, flags, n_deq, head_bmp, 0);
314 for (b = buffers, l = lengths, f = flags, n_left = n_deq; n_left >= 8;
315 b += 4, f += 4, l += 4, n_left -= 4)
317 clib_prefetch_store (b[4]);
318 clib_prefetch_store (b[5]);
319 clib_prefetch_store (b[6]);
320 clib_prefetch_store (b[7]);
322 n_rx_bytes += b[0]->current_length = l[0];
325 n_rx_bytes += b[1]->current_length = l[1];
328 n_rx_bytes += b[2]->current_length = l[2];
331 n_rx_bytes += b[3]->current_length = l[3];
335 for (; n_left > 0; b += 1, f += 1, l += 1, n_left -= 1)
338 n_rx_bytes += b[0]->current_length = l[0];
344 vlib_buffer_t *hb = 0;
345 vlib_frame_bitmap_t tail_buf_bmp = {};
346 u32 i, total_len = 0, head_flags = 0, tail_flags = 0;
347 n_rx_packets = vlib_frame_bitmap_count_set_bits (head_bmp);
349 vlib_frame_bitmap_init (tail_buf_bmp, n_deq);
350 vlib_frame_bitmap_xor (tail_buf_bmp, head_bmp);
352 foreach_vlib_frame_bitmap_set_bit_index (i, tail_buf_bmp)
354 vlib_buffer_t *pb = buffers[i - 1];
355 /* only store opertations here */
356 pb->next_buffer = bi[i];
357 if (vlib_frame_bitmap_is_bit_set (tail_buf_bmp, i - 1) == 0)
361 hb->total_length_not_including_first_buffer = total_len;
362 /* tail descriptor contains protocol info so we need to
363 * combine head and tail buffer flags */
364 hb->flags = head_flags | tail_flags;
366 head_flags = flags[i - 1];
370 total_len += lengths[i];
371 tail_flags = flags[i];
374 hb->total_length_not_including_first_buffer = total_len;
375 hb->flags = head_flags | tail_flags;
378 n_rx_packets = n_deq;
381 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
385 vlib_frame_bitmap_init (head_bmp, n_deq);
386 foreach_vlib_frame_bitmap_set_bit_index (i, head_bmp)
388 vlib_buffer_t *b = buffers[i];
389 if (vlib_trace_buffer (vm, node, next_index, b, 0))
392 ena_rx_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
393 tr->next_index = next_index;
394 tr->qid = rxq->queue_id;
395 tr->hw_if_index = hw_if_index;
397 tr->length = lengths[i];
399 tr->status = statuses[i];
400 while (statuses[j].last == 0)
404 tr->length += lengths[j];
406 tr->status = statuses[j];
413 vlib_set_trace_count (vm, node, n_trace);
416 if (PREDICT_FALSE (maybe_chained))
417 clib_compress_u32 (bi, bi, head_bmp, n_deq);
419 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
421 ethernet_input_frame_t *ef;
422 next_frame->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
424 ef = vlib_frame_scalar_args (next_frame);
425 ef->sw_if_index = sw_if_index;
426 ef->hw_if_index = hw_if_index;
428 if (ctx.st_or.l3_csum_err == 0)
429 next_frame->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
430 vlib_frame_no_append (next_frame);
433 vlib_put_next_frame (vm, node, next_index, VLIB_FRAME_SIZE - n_rx_packets);
435 vlib_increment_combined_counter (
436 vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
437 vm->thread_index, hw_if_index, n_rx_packets, n_rx_bytes);
439 q->cq_next = cq_next + n_deq;
442 if (rxq->port->dev->va_dma)
443 ena_device_input_refill (vm, &ctx, rxq, 1);
445 ena_device_input_refill (vm, &ctx, rxq, 0);
450 VNET_DEV_NODE_FN (ena_rx_node)
451 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
454 foreach_vnet_dev_rx_queue_runtime (rxq, node)
455 n_rx += ena_device_input_inline (vm, node, rxq);