1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
6 #include <vppinfra/ring.h>
7 #include <vppinfra/vector/ip_csum.h>
9 #include <vnet/dev/dev.h>
10 #include <vnet/ethernet/ethernet.h>
11 #include <vnet/ip/ip4_packet.h>
12 #include <vnet/ip/ip6_packet.h>
13 #include <vnet/udp/udp_packet.h>
14 #include <vnet/tcp/tcp_packet.h>
16 #include <dev_octeon/octeon.h>
20 union nix_send_hdr_w0_u hdr_w0_teplate;
21 vlib_node_runtime_t *node;
24 vlib_buffer_t *drop[VLIB_FRAME_SIZE];
25 u32 batch_alloc_not_ready;
26 u32 batch_alloc_issue_fail;
29 lmt_line_t *lmt_lines;
32 static_always_inline u32
33 oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq)
35 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
39 oct_npa_batch_alloc_cl128_t *cl;
41 num_cl = ctq->ba_num_cl;
44 u16 off = ctq->hdr_off;
45 u32 *bi = (u32 *) ctq->ba_buffer;
47 for (cl = ctq->ba_buffer + ctq->ba_first_cl; num_cl > 0; num_cl--, cl++)
50 if (cl->status.ccode == ALLOC_CCODE_INVAL)
52 ctx->batch_alloc_not_ready++;
53 n_freed = bi - (u32 *) ctq->ba_buffer;
56 vlib_buffer_free_no_next (vm, (u32 *) ctq->ba_buffer,
58 ctq->ba_num_cl = num_cl;
59 ctq->ba_first_cl = cl - ctq->ba_buffer;
66 count = cl->status.count;
68 cl->status.count = cl->status.ccode = 0;
70 if (PREDICT_TRUE (count == 16))
72 /* optimize for likely case where cacheline is full */
73 vlib_get_buffer_indices_with_offset (vm, (void **) cl, bi, 16,
79 vlib_get_buffer_indices_with_offset (vm, (void **) cl, bi, count,
85 n_freed = bi - (u32 *) ctq->ba_buffer;
87 vlib_buffer_free_no_next (vm, (u32 *) ctq->ba_buffer, n_freed);
89 /* clear status bits in each cacheline */
90 n = cl - ctq->ba_buffer;
91 for (u32 i = 0; i < n; i++)
92 ctq->ba_buffer[i].iova[0] = 0;
94 ctq->ba_num_cl = ctq->ba_first_cl = 0;
97 ah = ctq->aura_handle;
99 if ((n = roc_npa_aura_op_available (ah)) >= 32)
103 n = clib_min (n, ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS);
105 oct_npa_batch_alloc_compare_t cmp = {
106 .compare_s = { .aura = roc_npa_aura_handle_to_aura (ah),
107 .stype = ALLOC_STYPE_STF,
111 addr = roc_npa_aura_handle_to_base (ah) + NPA_LF_AURA_BATCH_ALLOC;
112 res = roc_atomic64_casl (cmp.as_u64, (uint64_t) ctq->ba_buffer,
114 if (res == ALLOC_RESULT_ACCEPTED || res == ALLOC_RESULT_NOCORE)
116 ctq->ba_num_cl = (n + 15) / 16;
117 ctq->ba_first_cl = 0;
120 ctx->batch_alloc_issue_fail++;
126 static_always_inline u8
127 oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b,
128 lmt_line_t *line, u32 flags, int simple, int trace)
133 .hdr_w0 = ctx->hdr_w0_teplate,
136 .subdc = NIX_SUBDC_SG,
139 .subdc = NIX_SUBDC_SG,
143 if (!simple && flags & VLIB_BUFFER_NEXT_PRESENT)
146 vlib_buffer_t *tail_segs[5], *t = b;
148 while (t->flags & VLIB_BUFFER_NEXT_PRESENT)
150 t = vlib_get_buffer (vm, t->next_buffer);
151 tail_segs[n_tail_segs++] = t;
154 ctx->drop[ctx->n_drop++] = t;
162 d.sg[7].u = (u64) vlib_buffer_get_current (tail_segs[4]);
163 total_len += d.sg[4].seg3_size = tail_segs[4]->current_length;
166 d.sg[6].u = (u64) vlib_buffer_get_current (tail_segs[3]);
167 total_len += d.sg[4].seg2_size = tail_segs[3]->current_length;
171 d.sg[5].u = (u64) vlib_buffer_get_current (tail_segs[2]);
172 total_len += d.sg[4].seg1_size = tail_segs[2]->current_length;
176 d.sg[3].u = (u64) vlib_buffer_get_current (tail_segs[1]);
177 total_len += d.sg[0].seg3_size = tail_segs[1]->current_length;
180 d.sg[2].u = (u64) vlib_buffer_get_current (tail_segs[0]);
181 total_len += d.sg[0].seg2_size = tail_segs[0]->current_length;
187 d.hdr_w0.sizem1 = n_dwords - 1;
190 if (!simple && flags & VNET_BUFFER_F_OFFLOAD)
192 vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
193 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
195 d.hdr_w1.ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
196 d.hdr_w1.ol3ptr = vnet_buffer (b)->l3_hdr_offset;
198 vnet_buffer (b)->l3_hdr_offset + sizeof (ip4_header_t);
200 if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
202 d.hdr_w1.ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
203 d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
205 else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
207 d.hdr_w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
208 d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
212 total_len += d.sg[0].seg1_size = b->current_length;
213 d.hdr_w0.total = total_len;
214 d.sg[1].u = (u64) vlib_buffer_get_current (b);
216 if (trace && flags & VLIB_BUFFER_IS_TRACED)
218 oct_tx_trace_t *t = vlib_add_trace (vm, ctx->node, b, sizeof (*t));
220 t->sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
223 for (u32 i = 0; i < n_dwords; i++)
224 line->dwords[i] = d.as_u128[i];
229 static_always_inline u32
230 oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq,
231 vlib_buffer_t **b, u32 n_pkts, int trace)
233 u8 dwords_per_line[16], *dpl = dwords_per_line;
234 u64 lmt_arg, ioaddr, n_lines;
235 u32 n_left, or_flags_16 = 0;
236 const u32 not_simple_flags =
237 VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD;
238 lmt_line_t *l = ctx->lmt_lines;
240 /* Data Store Memory Barrier - outer shareable domain */
241 asm volatile("dmb oshst" ::: "memory");
243 for (n_left = n_pkts; n_left >= 8; n_left -= 8, b += 8, l += 8)
245 u32 f0, f1, f2, f3, f4, f5, f6, f7, or_f = 0;
246 vlib_prefetch_buffer_header (b[8], LOAD);
247 or_f |= f0 = b[0]->flags;
248 or_f |= f1 = b[1]->flags;
249 vlib_prefetch_buffer_header (b[9], LOAD);
250 or_f |= f2 = b[2]->flags;
251 or_f |= f3 = b[3]->flags;
252 vlib_prefetch_buffer_header (b[10], LOAD);
253 or_f |= f4 = b[4]->flags;
254 or_f |= f5 = b[5]->flags;
255 vlib_prefetch_buffer_header (b[11], LOAD);
256 or_f |= f6 = b[6]->flags;
257 or_f |= f7 = b[7]->flags;
258 vlib_prefetch_buffer_header (b[12], LOAD);
261 if ((or_f & not_simple_flags) == 0)
264 oct_tx_enq1 (vm, ctx, b[0], l, f0, simple, trace);
265 oct_tx_enq1 (vm, ctx, b[1], l + 1, f1, simple, trace);
266 vlib_prefetch_buffer_header (b[13], LOAD);
267 oct_tx_enq1 (vm, ctx, b[2], l + 2, f2, simple, trace);
268 oct_tx_enq1 (vm, ctx, b[3], l + 3, f3, simple, trace);
269 vlib_prefetch_buffer_header (b[14], LOAD);
270 oct_tx_enq1 (vm, ctx, b[4], l + 4, f4, simple, trace);
271 oct_tx_enq1 (vm, ctx, b[5], l + 5, f5, simple, trace);
272 vlib_prefetch_buffer_header (b[15], LOAD);
273 oct_tx_enq1 (vm, ctx, b[6], l + 6, f6, simple, trace);
274 oct_tx_enq1 (vm, ctx, b[7], l + 7, f7, simple, trace);
275 dpl[0] = dpl[1] = dpl[2] = dpl[3] = 2;
276 dpl[4] = dpl[5] = dpl[6] = dpl[7] = 2;
281 dpl[0] = oct_tx_enq1 (vm, ctx, b[0], l, f0, simple, trace);
282 dpl[1] = oct_tx_enq1 (vm, ctx, b[1], l + 1, f1, simple, trace);
283 vlib_prefetch_buffer_header (b[13], LOAD);
284 dpl[2] = oct_tx_enq1 (vm, ctx, b[2], l + 2, f2, simple, trace);
285 dpl[3] = oct_tx_enq1 (vm, ctx, b[3], l + 3, f3, simple, trace);
286 vlib_prefetch_buffer_header (b[14], LOAD);
287 dpl[4] = oct_tx_enq1 (vm, ctx, b[4], l + 4, f4, simple, trace);
288 dpl[5] = oct_tx_enq1 (vm, ctx, b[5], l + 5, f5, simple, trace);
289 vlib_prefetch_buffer_header (b[15], LOAD);
290 dpl[6] = oct_tx_enq1 (vm, ctx, b[6], l + 6, f6, simple, trace);
291 dpl[7] = oct_tx_enq1 (vm, ctx, b[7], l + 7, f7, simple, trace);
296 for (; n_left > 0; n_left -= 1, b += 1, l += 1)
298 u32 f0 = b[0]->flags;
299 dpl++[0] = oct_tx_enq1 (vm, ctx, b[0], l, f0, 0, trace);
303 lmt_arg = ctx->lmt_id;
304 ioaddr = ctx->lmt_ioaddr;
307 if (PREDICT_FALSE (or_flags_16 & VLIB_BUFFER_NEXT_PRESENT))
309 dpl = dwords_per_line;
310 ioaddr |= (dpl[0] - 1) << 4;
314 lmt_arg |= (--n_lines) << 12;
316 for (u8 bit_off = 19; n_lines; n_lines--, bit_off += 3, dpl++)
317 lmt_arg |= ((u64) dpl[1] - 1) << bit_off;
322 const u64 n_dwords = 2;
323 ioaddr |= (n_dwords - 1) << 4;
327 lmt_arg |= (--n_lines) << 12;
329 for (u8 bit_off = 19; n_lines; n_lines--, bit_off += 3)
330 lmt_arg |= (n_dwords - 1) << bit_off;
334 roc_lmt_submit_steorl (lmt_arg, ioaddr);
339 VNET_DEV_NODE_FN (oct_tx_node)
340 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
342 vnet_dev_tx_node_runtime_t *rt = vnet_dev_get_tx_node_runtime (node);
343 vnet_dev_tx_queue_t *txq = rt->tx_queue;
344 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
345 u32 node_index = node->node_index;
346 u32 *from = vlib_frame_vector_args (frame);
347 u32 n, n_enq, n_left, n_pkts = frame->n_vectors;
348 vlib_buffer_t *buffers[VLIB_FRAME_SIZE + 8], **b = buffers;
349 u64 lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2;
354 .aura = roc_npa_aura_handle_to_aura (ctq->aura_handle),
359 .lmt_ioaddr = ctq->io_addr,
360 .lmt_lines = ctq->lmt_addr + (lmt_id << ROC_LMT_LINE_SIZE_LOG2),
363 vlib_get_buffers (vm, vlib_frame_vector_args (frame), b, n_pkts);
364 for (int i = 0; i < 8; i++)
365 b[n_pkts + i] = b[n_pkts - 1];
367 vnet_dev_tx_queue_lock_if_needed (txq);
370 n_enq -= oct_batch_free (vm, &ctx, txq);
372 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
374 for (n_left = clib_min (n_pkts, txq->size - n_enq), n = 0; n_left >= 16;
375 n_left -= 16, b += 16)
376 n += oct_tx_enq16 (vm, &ctx, txq, b, 16, /* trace */ 1);
379 n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 1);
383 for (n_left = clib_min (n_pkts, txq->size - n_enq), n = 0; n_left >= 16;
384 n_left -= 16, b += 16)
385 n += oct_tx_enq16 (vm, &ctx, txq, b, 16, /* trace */ 0);
388 n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 0);
391 ctq->n_enq = n_enq + n;
396 vlib_buffer_free (vm, from + n, n);
397 vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_NO_FREE_SLOTS,
399 n_pkts -= ctx.n_drop;
403 vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_CHAIN_TOO_LONG,
406 if (ctx.batch_alloc_not_ready)
407 vlib_error_count (vm, node_index,
408 OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_NOT_READY,
409 ctx.batch_alloc_not_ready);
411 if (ctx.batch_alloc_issue_fail)
412 vlib_error_count (vm, node_index,
413 OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_ISSUE_FAIL,
414 ctx.batch_alloc_issue_fail);
416 vnet_dev_tx_queue_unlock_if_needed (txq);
420 u32 bi[VLIB_FRAME_SIZE];
421 vlib_get_buffer_indices (vm, ctx.drop, bi, ctx.n_drop);
422 vlib_buffer_free (vm, bi, ctx.n_drop);
423 n_pkts -= ctx.n_drop;