1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
6 #include <vnet/dev/dev.h>
7 #include <vnet/ethernet/ethernet.h>
8 #include <dev_octeon/octeon.h>
9 #include <dev_octeon/hw_defs.h>
18 oct_nix_rx_cqe_desc_t *next_desc;
27 static_always_inline vlib_buffer_t *
28 oct_seg_to_bp (void *p)
30 return (vlib_buffer_t *) p - 1;
33 static_always_inline void
34 oct_rx_attach_tail (vlib_main_t *vm, oct_rx_node_ctx_t *ctx, vlib_buffer_t *h,
35 oct_nix_rx_cqe_desc_t *d)
37 u32 tail_sz = 0, n_tail_segs = 0;
39 u8 segs0 = d->sg0.segs, segs1 = 0;
44 b = oct_seg_to_bp (d->segs0[1]);
45 h->next_buffer = vlib_get_buffer_index (vm, b);
46 tail_sz += b->current_length = d->sg0.seg2_size;
53 p->flags = VLIB_BUFFER_NEXT_PRESENT;
54 b = oct_seg_to_bp (d->segs0[2]);
55 p->next_buffer = vlib_get_buffer_index (vm, b);
56 tail_sz += b->current_length = d->sg0.seg3_size;
59 if (d->sg1.subdc != NIX_SUBDC_SG)
67 p->flags = VLIB_BUFFER_NEXT_PRESENT;
68 b = oct_seg_to_bp (d->segs1[0]);
69 p->next_buffer = vlib_get_buffer_index (vm, b);
70 tail_sz += b->current_length = d->sg1.seg1_size;
77 p->flags = VLIB_BUFFER_NEXT_PRESENT;
78 b = oct_seg_to_bp (d->segs1[1]);
79 p->next_buffer = vlib_get_buffer_index (vm, b);
80 tail_sz += b->current_length = d->sg1.seg2_size;
87 p->flags = VLIB_BUFFER_NEXT_PRESENT;
88 b = oct_seg_to_bp (d->segs1[2]);
89 p->next_buffer = vlib_get_buffer_index (vm, b);
90 tail_sz += b->current_length = d->sg1.seg3_size;
95 h->total_length_not_including_first_buffer = tail_sz;
96 h->flags |= VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
97 ctx->n_rx_bytes += tail_sz;
98 ctx->n_segs += n_tail_segs;
101 static_always_inline u32
102 oct_rx_batch (vlib_main_t *vm, oct_rx_node_ctx_t *ctx,
103 vnet_dev_rx_queue_t *rxq, u32 n)
105 oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
106 vlib_buffer_template_t bt = rxq->buffer_template;
108 oct_nix_rx_cqe_desc_t *d = ctx->next_desc;
111 for (n_left = n; n_left >= 8; d += 4, n_left -= 4, ctx->to_next += 4)
114 clib_prefetch_store (oct_seg_to_bp (d[4].segs0[0]));
115 clib_prefetch_store (oct_seg_to_bp (d[5].segs0[0]));
116 b[0] = oct_seg_to_bp (d[0].segs0[0]);
117 clib_prefetch_store (oct_seg_to_bp (d[6].segs0[0]));
118 b[1] = oct_seg_to_bp (d[1].segs0[0]);
119 clib_prefetch_store (oct_seg_to_bp (d[7].segs0[0]));
120 b[2] = oct_seg_to_bp (d[2].segs0[0]);
121 b[3] = oct_seg_to_bp (d[3].segs0[0]);
122 ctx->to_next[0] = vlib_get_buffer_index (vm, b[0]);
123 ctx->to_next[1] = vlib_get_buffer_index (vm, b[1]);
124 ctx->to_next[2] = vlib_get_buffer_index (vm, b[2]);
125 ctx->to_next[3] = vlib_get_buffer_index (vm, b[3]);
130 ctx->n_rx_bytes += b[0]->current_length = d[0].sg0.seg1_size;
131 ctx->n_rx_bytes += b[1]->current_length = d[1].sg0.seg1_size;
132 ctx->n_rx_bytes += b[2]->current_length = d[2].sg0.seg1_size;
133 ctx->n_rx_bytes += b[3]->current_length = d[3].sg0.seg1_size;
135 segs = d[0].sg0.segs + d[1].sg0.segs + d[2].sg0.segs + d[3].sg0.segs;
137 if (PREDICT_FALSE (segs > 4))
139 oct_rx_attach_tail (vm, ctx, b[0], d + 0);
140 oct_rx_attach_tail (vm, ctx, b[1], d + 1);
141 oct_rx_attach_tail (vm, ctx, b[2], d + 2);
142 oct_rx_attach_tail (vm, ctx, b[3], d + 3);
146 for (; n_left; d += 1, n_left -= 1, ctx->to_next += 1)
148 b[0] = (vlib_buffer_t *) d->segs0[0] - 1;
149 ctx->to_next[0] = vlib_get_buffer_index (vm, b[0]);
151 ctx->n_rx_bytes += b[0]->current_length = d[0].sg0.seg1_size;
153 if (d[0].sg0.segs > 1)
154 oct_rx_attach_tail (vm, ctx, b[0], d + 0);
157 plt_write64 ((crq->cq.wdata | n), crq->cq.door);
159 ctx->n_left_to_next -= n;
163 static_always_inline void
164 oct_rxq_refill_batch (vlib_main_t *vm, u64 lmt_id, u64 addr,
165 oct_npa_lf_aura_batch_free_line_t *lines, u32 *bi,
166 oct_npa_lf_aura_batch_free0_t w0, u64 n_lines)
170 for (u32 i = 0; i < n_lines; i++, bi += 15)
173 vlib_get_buffers (vm, bi, (vlib_buffer_t **) lines[i].data, 15);
176 data = lmt_id | ((n_lines - 1) << 12) | ((1ULL << (n_lines * 3)) - 1) << 19;
177 roc_lmt_submit_steorl (data, addr);
179 /* Data Store Memory Barrier - outer shareable domain */
180 asm volatile("dmb oshst" ::: "memory");
183 static_always_inline u32
184 oct_rxq_refill (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq, u16 n_refill)
186 const u32 batch_max_lines = 16;
187 const u32 bufs_per_line = 15;
188 const u32 batch_max_bufs = 15 * 16;
190 u32 batch_bufs, n_lines, n_alloc;
191 u32 buffer_indices[batch_max_bufs];
192 u64 lmt_addr, lmt_id, addr, n_enq = 0;
193 u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
194 oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
195 oct_npa_lf_aura_batch_free_line_t *lines;
197 if (n_refill < bufs_per_line)
200 n_lines = n_refill / bufs_per_line;
202 addr = crq->aura_batch_free_ioaddr;
203 lmt_addr = crq->lmt_base_addr;
204 lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2;
205 lmt_addr += lmt_id << ROC_LMT_LINE_SIZE_LOG2;
206 lines = (oct_npa_lf_aura_batch_free_line_t *) lmt_addr;
208 oct_npa_lf_aura_batch_free0_t w0 = {
209 .aura = roc_npa_aura_handle_to_aura (crq->aura_handle),
213 while (n_lines >= batch_max_lines)
215 n_alloc = vlib_buffer_alloc (vm, buffer_indices, batch_max_bufs);
216 if (PREDICT_FALSE (n_alloc < batch_max_bufs))
218 oct_rxq_refill_batch (vm, lmt_id, addr, lines, buffer_indices, w0,
220 n_lines -= batch_max_lines;
221 n_enq += batch_max_bufs;
227 batch_bufs = n_lines * bufs_per_line;
228 n_alloc = vlib_buffer_alloc_from_pool (vm, buffer_indices, batch_bufs, bpi);
230 if (PREDICT_FALSE (n_alloc < batch_bufs))
233 if (n_alloc >= bufs_per_line)
236 n_lines = n_alloc / bufs_per_line;
237 batch_bufs = n_lines * bufs_per_line;
238 n_unalloc = n_alloc - batch_bufs;
241 vlib_buffer_unalloc_to_pool (vm, buffer_indices + batch_bufs,
247 vlib_buffer_unalloc_to_pool (vm, buffer_indices, n_alloc, bpi);
252 oct_rxq_refill_batch (vm, lmt_id, addr, lines, buffer_indices, w0, n_lines);
258 static_always_inline void
259 oct_rx_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
260 oct_rx_node_ctx_t *ctx, oct_nix_rx_cqe_desc_t *d, u32 n_desc)
263 if (PREDICT_TRUE (ctx->trace_count == 0))
266 while (ctx->n_traced < ctx->trace_count && i < n_desc)
268 vlib_buffer_t *b = (vlib_buffer_t *) d[i].segs0[0] - 1;
270 if (PREDICT_TRUE (vlib_trace_buffer (vm, node, ctx->next_index, b,
271 /* follow_chain */ 0)))
273 oct_rx_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
274 tr->next_index = ctx->next_index;
275 tr->sw_if_index = ctx->sw_if_index;
283 static_always_inline uword
284 oct_rx_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
285 vlib_frame_t *frame, vnet_dev_port_t *port,
286 vnet_dev_rx_queue_t *rxq, int with_flows)
288 vnet_main_t *vnm = vnet_get_main ();
289 u32 thr_idx = vlib_get_thread_index ();
290 oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
291 u32 n_desc, head, n, n_enq;
292 u32 cq_size = crq->cq.nb_desc;
293 u32 cq_mask = crq->cq.qmask;
294 oct_nix_rx_cqe_desc_t *descs = crq->cq.desc_base;
295 oct_nix_lf_cq_op_status_t status;
296 oct_rx_node_ctx_t _ctx = {
297 .next_index = rxq->next_index,
298 .sw_if_index = port->intf.sw_if_index,
299 .hw_if_index = port->intf.hw_if_index,
302 /* get head and tail from NIX_LF_CQ_OP_STATUS */
303 status.as_u64 = roc_atomic64_add_sync (crq->cq.wdata, crq->cq.status);
304 if (status.cq_err || status.op_err)
308 n_desc = (status.tail - head) & cq_mask;
313 vlib_get_new_next_frame (vm, node, ctx->next_index, ctx->to_next,
314 ctx->n_left_to_next);
316 ctx->trace_count = vlib_get_trace_count (vm, node);
320 ctx->next_desc = descs + head;
321 n = clib_min (cq_size - head, clib_min (n_desc, ctx->n_left_to_next));
322 n = oct_rx_batch (vm, ctx, rxq, n);
323 oct_rx_trace (vm, node, ctx, descs + head, n);
325 if (ctx->n_left_to_next == 0)
328 status.as_u64 = roc_atomic64_add_sync (crq->cq.wdata, crq->cq.status);
329 if (status.cq_err || status.op_err)
333 n_desc = (status.tail - head) & cq_mask;
339 vlib_set_trace_count (vm, node, ctx->trace_count - ctx->n_traced);
341 if (PREDICT_TRUE (ctx->next_index == VNET_DEV_ETH_RX_PORT_NEXT_ETH_INPUT))
343 vlib_next_frame_t *nf;
345 ethernet_input_frame_t *ef;
346 oct_nix_rx_parse_t p = { .w[0] = ctx->parse_w0_or };
347 nf = vlib_node_runtime_get_next_frame (vm, node, ctx->next_index);
348 f = vlib_get_frame (vm, nf->frame);
349 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
351 ef = vlib_frame_scalar_args (f);
352 ef->sw_if_index = ctx->sw_if_index;
353 ef->hw_if_index = ctx->hw_if_index;
355 if (p.f.errcode == 0 && p.f.errlev == 0)
356 f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
358 vlib_frame_no_append (f);
361 vlib_put_next_frame (vm, node, ctx->next_index, ctx->n_left_to_next);
363 n_enq = crq->n_enq - ctx->n_segs;
364 n_enq += oct_rxq_refill (vm, rxq, rxq->size - n_enq);
367 vlib_increment_combined_counter (
368 vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
369 thr_idx, ctx->hw_if_index, ctx->n_rx_pkts, ctx->n_rx_bytes);
371 return ctx->n_rx_pkts;
374 VNET_DEV_NODE_FN (oct_rx_node)
375 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
378 foreach_vnet_dev_rx_queue_runtime (rxq, node)
380 vnet_dev_port_t *port = rxq->port;
381 n_rx += oct_rx_node_inline (vm, node, frame, port, rxq, 0);