1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
6 #include <vppinfra/ring.h>
7 #include <vppinfra/vector/ip_csum.h>
9 #include <vnet/dev/dev.h>
10 #include <vnet/ethernet/ethernet.h>
11 #include <vnet/ip/ip4_packet.h>
12 #include <vnet/ip/ip6_packet.h>
13 #include <vnet/udp/udp_packet.h>
14 #include <vnet/tcp/tcp_packet.h>
16 #include <dev_octeon/octeon.h>
20 union nix_send_hdr_w0_u hdr_w0_teplate;
21 vlib_node_runtime_t *node;
24 vlib_buffer_t *drop[VLIB_FRAME_SIZE];
25 u32 batch_alloc_not_ready;
26 u32 batch_alloc_issue_fail;
29 lmt_line_t *lmt_lines;
32 static_always_inline u32
33 oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq)
35 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
39 oct_npa_batch_alloc_cl128_t *cl;
41 num_cl = ctq->ba_num_cl;
44 u16 off = ctq->hdr_off;
45 u32 *bi = (u32 *) ctq->ba_buffer;
47 for (cl = ctq->ba_buffer + ctq->ba_first_cl; num_cl > 0; num_cl--, cl++)
49 oct_npa_batch_alloc_status_t st;
51 if ((st.as_u64 = __atomic_load_n (cl->iova, __ATOMIC_RELAXED)) ==
52 OCT_BATCH_ALLOC_IOVA0_MASK + ALLOC_CCODE_INVAL)
55 ctx->batch_alloc_not_ready++;
56 n_freed = bi - (u32 *) ctq->ba_buffer;
59 vlib_buffer_free_no_next (vm, (u32 *) ctq->ba_buffer,
61 ctq->ba_num_cl = num_cl;
62 ctq->ba_first_cl = cl - ctq->ba_buffer;
69 if (st.status.count > 8 &&
70 __atomic_load_n (cl->iova + 8, __ATOMIC_RELAXED) ==
71 OCT_BATCH_ALLOC_IOVA0_MASK)
75 cl->iova[0] &= OCT_BATCH_ALLOC_IOVA0_MASK;
77 if (PREDICT_TRUE (st.status.count == 16))
79 /* optimize for likely case where cacheline is full */
80 vlib_get_buffer_indices_with_offset (vm, (void **) cl, bi, 16,
86 vlib_get_buffer_indices_with_offset (vm, (void **) cl, bi,
87 st.status.count, off);
88 bi += st.status.count;
92 n_freed = bi - (u32 *) ctq->ba_buffer;
94 vlib_buffer_free_no_next (vm, (u32 *) ctq->ba_buffer, n_freed);
96 /* clear status bits in each cacheline */
97 n = cl - ctq->ba_buffer;
98 for (u32 i = 0; i < n; i++)
99 ctq->ba_buffer[i].iova[0] = ctq->ba_buffer[i].iova[8] =
100 OCT_BATCH_ALLOC_IOVA0_MASK;
102 ctq->ba_num_cl = ctq->ba_first_cl = 0;
105 ah = ctq->aura_handle;
107 if ((n = roc_npa_aura_op_available (ah)) >= 32)
111 n = clib_min (n, ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS);
113 oct_npa_batch_alloc_compare_t cmp = {
114 .compare_s = { .aura = roc_npa_aura_handle_to_aura (ah),
115 .stype = ALLOC_STYPE_STF,
119 addr = roc_npa_aura_handle_to_base (ah) + NPA_LF_AURA_BATCH_ALLOC;
120 res = roc_atomic64_casl (cmp.as_u64, (uint64_t) ctq->ba_buffer,
122 if (res == ALLOC_RESULT_ACCEPTED || res == ALLOC_RESULT_NOCORE)
124 ctq->ba_num_cl = (n + 15) / 16;
125 ctq->ba_first_cl = 0;
128 ctx->batch_alloc_issue_fail++;
134 static_always_inline u8
135 oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b,
136 lmt_line_t *line, u32 flags, int simple, int trace, u32 *n,
142 .hdr_w0 = ctx->hdr_w0_teplate,
145 .subdc = NIX_SUBDC_SG,
148 .subdc = NIX_SUBDC_SG,
152 if (!simple && flags & VLIB_BUFFER_NEXT_PRESENT)
155 vlib_buffer_t *tail_segs[5], *t = b;
157 while (t->flags & VLIB_BUFFER_NEXT_PRESENT)
159 t = vlib_get_buffer (vm, t->next_buffer);
160 tail_segs[n_tail_segs++] = t;
163 ctx->drop[ctx->n_drop++] = b;
171 d.sg[7].u = (u64) vlib_buffer_get_current (tail_segs[4]);
172 total_len += d.sg[4].seg3_size = tail_segs[4]->current_length;
175 d.sg[6].u = (u64) vlib_buffer_get_current (tail_segs[3]);
176 total_len += d.sg[4].seg2_size = tail_segs[3]->current_length;
180 d.sg[5].u = (u64) vlib_buffer_get_current (tail_segs[2]);
181 total_len += d.sg[4].seg1_size = tail_segs[2]->current_length;
185 d.sg[3].u = (u64) vlib_buffer_get_current (tail_segs[1]);
186 total_len += d.sg[0].seg3_size = tail_segs[1]->current_length;
189 d.sg[2].u = (u64) vlib_buffer_get_current (tail_segs[0]);
190 total_len += d.sg[0].seg2_size = tail_segs[0]->current_length;
196 d.hdr_w0.sizem1 = n_dwords - 1;
199 if (!simple && flags & VNET_BUFFER_F_OFFLOAD)
201 vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
202 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
204 d.hdr_w1.ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
205 d.hdr_w1.ol3ptr = vnet_buffer (b)->l3_hdr_offset;
207 vnet_buffer (b)->l3_hdr_offset + sizeof (ip4_header_t);
209 if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
211 d.hdr_w1.ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
212 d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
214 else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
216 d.hdr_w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
217 d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
221 total_len += d.sg[0].seg1_size = b->current_length;
222 d.hdr_w0.total = total_len;
223 d.sg[1].u = (u64) vlib_buffer_get_current (b);
225 if (trace && flags & VLIB_BUFFER_IS_TRACED)
227 oct_tx_trace_t *t = vlib_add_trace (vm, ctx->node, b, sizeof (*t));
229 t->sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
232 for (u32 i = 0; i < n_dwords; i++)
233 line->dwords[i] = d.as_u128[i];
241 static_always_inline u32
242 oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq,
243 vlib_buffer_t **b, u32 n_pkts, int trace)
245 u8 dwords_per_line[16], *dpl = dwords_per_line;
246 u64 lmt_arg, ioaddr, n_lines;
247 u32 n_left, or_flags_16 = 0, n = 0;
248 const u32 not_simple_flags =
249 VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD;
250 lmt_line_t *l = ctx->lmt_lines;
252 /* Data Store Memory Barrier - outer shareable domain */
253 asm volatile("dmb oshst" ::: "memory");
255 for (n_left = n_pkts; n_left >= 8; n_left -= 8, b += 8)
257 u32 f0, f1, f2, f3, f4, f5, f6, f7, or_f = 0;
258 vlib_prefetch_buffer_header (b[8], LOAD);
259 or_f |= f0 = b[0]->flags;
260 or_f |= f1 = b[1]->flags;
261 vlib_prefetch_buffer_header (b[9], LOAD);
262 or_f |= f2 = b[2]->flags;
263 or_f |= f3 = b[3]->flags;
264 vlib_prefetch_buffer_header (b[10], LOAD);
265 or_f |= f4 = b[4]->flags;
266 or_f |= f5 = b[5]->flags;
267 vlib_prefetch_buffer_header (b[11], LOAD);
268 or_f |= f6 = b[6]->flags;
269 or_f |= f7 = b[7]->flags;
270 vlib_prefetch_buffer_header (b[12], LOAD);
273 if ((or_f & not_simple_flags) == 0)
276 oct_tx_enq1 (vm, ctx, b[0], l, f0, simple, trace, &n, &dpl[n]);
277 oct_tx_enq1 (vm, ctx, b[1], l + n, f1, simple, trace, &n, &dpl[n]);
278 vlib_prefetch_buffer_header (b[13], LOAD);
279 oct_tx_enq1 (vm, ctx, b[2], l + n, f2, simple, trace, &n, &dpl[n]);
280 oct_tx_enq1 (vm, ctx, b[3], l + n, f3, simple, trace, &n, &dpl[n]);
281 vlib_prefetch_buffer_header (b[14], LOAD);
282 oct_tx_enq1 (vm, ctx, b[4], l + n, f4, simple, trace, &n, &dpl[n]);
283 oct_tx_enq1 (vm, ctx, b[5], l + n, f5, simple, trace, &n, &dpl[n]);
284 vlib_prefetch_buffer_header (b[15], LOAD);
285 oct_tx_enq1 (vm, ctx, b[6], l + n, f6, simple, trace, &n, &dpl[n]);
286 oct_tx_enq1 (vm, ctx, b[7], l + n, f7, simple, trace, &n, &dpl[n]);
291 oct_tx_enq1 (vm, ctx, b[0], l, f0, simple, trace, &n, &dpl[n]);
292 oct_tx_enq1 (vm, ctx, b[1], l + n, f1, simple, trace, &n, &dpl[n]);
293 vlib_prefetch_buffer_header (b[13], LOAD);
294 oct_tx_enq1 (vm, ctx, b[2], l + n, f2, simple, trace, &n, &dpl[n]);
295 oct_tx_enq1 (vm, ctx, b[3], l + n, f3, simple, trace, &n, &dpl[n]);
296 vlib_prefetch_buffer_header (b[14], LOAD);
297 oct_tx_enq1 (vm, ctx, b[4], l + n, f4, simple, trace, &n, &dpl[n]);
298 oct_tx_enq1 (vm, ctx, b[5], l + n, f5, simple, trace, &n, &dpl[n]);
299 vlib_prefetch_buffer_header (b[15], LOAD);
300 oct_tx_enq1 (vm, ctx, b[6], l + n, f6, simple, trace, &n, &dpl[n]);
301 oct_tx_enq1 (vm, ctx, b[7], l + n, f7, simple, trace, &n, &dpl[n]);
308 for (; n_left > 0; n_left -= 1, b += 1)
310 u32 f0 = b[0]->flags;
311 oct_tx_enq1 (vm, ctx, b[0], l, f0, 0, trace, &n, &dpl[n]);
318 lmt_arg = ctx->lmt_id;
319 ioaddr = ctx->lmt_ioaddr;
320 n_lines = dpl - dwords_per_line;
322 if (PREDICT_FALSE (!n_lines))
325 if (PREDICT_FALSE (or_flags_16 & VLIB_BUFFER_NEXT_PRESENT))
327 dpl = dwords_per_line;
328 ioaddr |= (dpl[0] - 1) << 4;
332 lmt_arg |= (--n_lines) << 12;
334 for (u8 bit_off = 19; n_lines; n_lines--, bit_off += 3, dpl++)
335 lmt_arg |= ((u64) dpl[1] - 1) << bit_off;
340 const u64 n_dwords = 2;
341 ioaddr |= (n_dwords - 1) << 4;
345 lmt_arg |= (--n_lines) << 12;
347 for (u8 bit_off = 19; n_lines; n_lines--, bit_off += 3)
348 lmt_arg |= (n_dwords - 1) << bit_off;
352 roc_lmt_submit_steorl (lmt_arg, ioaddr);
357 VNET_DEV_NODE_FN (oct_tx_node)
358 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
360 vnet_dev_tx_node_runtime_t *rt = vnet_dev_get_tx_node_runtime (node);
361 vnet_dev_tx_queue_t *txq = rt->tx_queue;
362 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
363 u32 node_index = node->node_index;
364 u32 *from = vlib_frame_vector_args (frame);
365 u32 n, n_enq, n_left, n_pkts = frame->n_vectors;
366 vlib_buffer_t *buffers[VLIB_FRAME_SIZE + 8], **b = buffers;
367 u64 lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2;
372 .aura = roc_npa_aura_handle_to_aura (ctq->aura_handle),
377 .lmt_ioaddr = ctq->io_addr,
378 .lmt_lines = ctq->lmt_addr + (lmt_id << ROC_LMT_LINE_SIZE_LOG2),
381 vlib_get_buffers (vm, vlib_frame_vector_args (frame), b, n_pkts);
382 for (int i = 0; i < 8; i++)
383 b[n_pkts + i] = b[n_pkts - 1];
385 vnet_dev_tx_queue_lock_if_needed (txq);
388 n_enq -= oct_batch_free (vm, &ctx, txq);
390 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
392 for (n_left = clib_min (n_pkts, txq->size - n_enq), n = 0; n_left >= 16;
393 n_left -= 16, b += 16)
394 n += oct_tx_enq16 (vm, &ctx, txq, b, 16, /* trace */ 1);
397 n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 1);
401 for (n_left = clib_min (n_pkts, txq->size - n_enq), n = 0; n_left >= 16;
402 n_left -= 16, b += 16)
403 n += oct_tx_enq16 (vm, &ctx, txq, b, 16, /* trace */ 0);
406 n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 0);
409 ctq->n_enq = n_enq + n - ctx.n_drop;
413 u32 n_free = n_pkts - n;
414 vlib_buffer_free (vm, from + n, n_free);
415 vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_NO_FREE_SLOTS,
421 vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_CHAIN_TOO_LONG,
424 if (ctx.batch_alloc_not_ready)
425 vlib_error_count (vm, node_index,
426 OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_NOT_READY,
427 ctx.batch_alloc_not_ready);
429 if (ctx.batch_alloc_issue_fail)
430 vlib_error_count (vm, node_index,
431 OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_ISSUE_FAIL,
432 ctx.batch_alloc_issue_fail);
434 vnet_dev_tx_queue_unlock_if_needed (txq);
438 u32 bi[VLIB_FRAME_SIZE];
439 vlib_get_buffer_indices (vm, ctx.drop, bi, ctx.n_drop);
440 vlib_buffer_free (vm, bi, ctx.n_drop);
441 n_pkts -= ctx.n_drop;