1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
6 #include <vnet/dev/dev.h>
7 #include <vnet/dev/pci.h>
8 #include <vnet/dev/counters.h>
9 #include <dev_octeon/octeon.h>
10 #include <vnet/ethernet/ethernet.h>
11 #include <vnet/plugin/plugin.h>
12 #include <vpp/app/version.h>
14 VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
15 .class_name = "octeon",
16 .subclass_name = "queue",
20 oct_roc_err (vnet_dev_t *dev, int rv, char *fmt, ...)
26 s = va_format (s, fmt, &va);
29 log_err (dev, "%v - ROC error %s (%d)", s, roc_error_msg_get (rv), rv);
32 return VNET_DEV_ERR_INTERNAL;
36 oct_rx_queue_alloc (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
38 vnet_dev_port_t *port = rxq->port;
39 vnet_dev_t *dev = port->dev;
41 log_debug (dev, "rx_queue_alloc: queue %u alocated", rxq->queue_id);
46 oct_rx_queue_free (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
48 vnet_dev_port_t *port = rxq->port;
49 vnet_dev_t *dev = port->dev;
51 log_debug (dev, "rx_queue_free: queue %u", rxq->queue_id);
55 oct_tx_queue_alloc (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
57 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
58 vnet_dev_port_t *port = txq->port;
59 vnet_dev_t *dev = port->dev;
60 u32 sz = sizeof (void *) * ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS;
63 log_debug (dev, "tx_queue_alloc: queue %u alocated", txq->queue_id);
65 rv = vnet_dev_dma_mem_alloc (vm, dev, sz, 128, (void **) &ctq->ba_buffer);
67 if (rv != VNET_DEV_OK)
70 clib_memset_u64 (ctq->ba_buffer, OCT_BATCH_ALLOC_IOVA0_MASK,
71 ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS);
77 oct_tx_queue_free (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
79 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
80 vnet_dev_port_t *port = txq->port;
81 vnet_dev_t *dev = port->dev;
83 log_debug (dev, "tx_queue_free: queue %u", txq->queue_id);
85 vnet_dev_dma_mem_free (vm, dev, ctq->ba_buffer);
89 oct_rxq_init (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
91 oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
92 vnet_dev_t *dev = rxq->port->dev;
93 oct_device_t *cd = vnet_dev_get_data (dev);
94 vlib_buffer_pool_t *bp =
95 vlib_get_buffer_pool (vm, vnet_dev_get_rx_queue_buffer_pool_index (rxq));
96 struct roc_nix *nix = cd->nix;
99 struct npa_aura_s aura = {};
100 struct npa_pool_s npapool = { .nat_align = 1 };
102 if ((rrv = roc_npa_pool_create (&crq->aura_handle, bp->alloc_size, rxq->size,
103 &aura, &npapool, 0)))
105 oct_rxq_deinit (vm, rxq);
106 return oct_roc_err (dev, rrv, "roc_npa_pool_create() failed");
109 crq->npa_pool_initialized = 1;
110 log_notice (dev, "NPA pool created, aura_handle = 0x%lx", crq->aura_handle);
112 crq->cq = (struct roc_nix_cq){
113 .nb_desc = rxq->size,
114 .qid = rxq->queue_id,
117 if ((rrv = roc_nix_cq_init (nix, &crq->cq)))
119 oct_rxq_deinit (vm, rxq);
120 return oct_roc_err (dev, rrv,
121 "roc_nix_cq_init(qid = %u, nb_desc = %u) failed",
122 crq->cq.nb_desc, crq->cq.nb_desc);
125 crq->cq_initialized = 1;
126 log_debug (dev, "CQ %u initialised (qmask 0x%x wdata 0x%lx)", crq->cq.qid,
127 crq->cq.qmask, crq->cq.wdata);
129 crq->hdr_off = vm->buffer_main->ext_hdr_size;
131 crq->rq = (struct roc_nix_rq){
132 .qid = rxq->queue_id,
134 .aura_handle = crq->aura_handle,
135 .first_skip = crq->hdr_off + sizeof (vlib_buffer_t),
136 .later_skip = crq->hdr_off + sizeof (vlib_buffer_t),
137 .lpb_size = bp->data_size + crq->hdr_off + sizeof (vlib_buffer_t),
138 .flow_tag_width = 32,
141 if ((rrv = roc_nix_rq_init (nix, &crq->rq, 1 /* disable */)))
143 oct_rxq_deinit (vm, rxq);
144 return oct_roc_err (dev, rrv, "roc_nix_rq_init(qid = %u) failed",
148 crq->rq_initialized = 1;
149 crq->lmt_base_addr = roc_idev_lmt_base_addr_get ();
150 crq->aura_batch_free_ioaddr =
151 (roc_npa_aura_handle_to_base (crq->aura_handle) +
152 NPA_LF_AURA_BATCH_FREE0) |
155 log_debug (dev, "RQ %u initialised", crq->cq.qid);
161 oct_rxq_deinit (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
163 oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
164 vnet_dev_t *dev = rxq->port->dev;
167 if (crq->rq_initialized)
169 rrv = roc_nix_rq_fini (&crq->rq);
171 oct_roc_err (dev, rrv, "roc_nix_rq_fini() failed");
172 crq->rq_initialized = 0;
175 if (crq->cq_initialized)
177 rrv = roc_nix_cq_fini (&crq->cq);
179 oct_roc_err (dev, rrv, "roc_nix_cq_fini() failed");
180 crq->cq_initialized = 0;
183 if (crq->npa_pool_initialized)
185 rrv = roc_npa_pool_destroy (crq->aura_handle);
187 oct_roc_err (dev, rrv, "roc_npa_pool_destroy() failed");
188 crq->npa_pool_initialized = 0;
193 oct_txq_init (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
195 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
196 vnet_dev_t *dev = txq->port->dev;
197 oct_device_t *cd = vnet_dev_get_data (dev);
198 struct roc_nix *nix = cd->nix;
199 struct npa_aura_s aura = {};
200 struct npa_pool_s npapool = { .nat_align = 1 };
202 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, 0);
204 if ((rrv = roc_npa_pool_create (
205 &ctq->aura_handle, bp->alloc_size,
206 txq->size * 6 /* worst case - two SG with 3 segs each = 6 */, &aura,
209 oct_txq_deinit (vm, txq);
210 return oct_roc_err (dev, rrv, "roc_npa_pool_create() failed");
213 ctq->npa_pool_initialized = 1;
214 log_notice (dev, "NPA pool created, aura_handle = 0x%lx", ctq->aura_handle);
216 ctq->sq = (struct roc_nix_sq){
217 .nb_desc = txq->size,
218 .qid = txq->queue_id,
219 .max_sqe_sz = NIX_MAXSQESZ_W16,
222 if ((rrv = roc_nix_sq_init (nix, &ctq->sq)))
224 oct_txq_deinit (vm, txq);
227 "roc_nix_sq_init(qid = %u, nb_desc = %u, max_sqe_sz = %u) failed",
228 ctq->sq.nb_desc, ctq->sq.max_sqe_sz);
231 ctq->sq_initialized = 1;
232 log_debug (dev, "SQ initialised, qid %u, nb_desc %u, max_sqe_sz %u",
233 ctq->sq.qid, ctq->sq.nb_desc, ctq->sq.max_sqe_sz);
235 ctq->hdr_off = vm->buffer_main->ext_hdr_size;
237 if (ctq->sq.lmt_addr == 0)
238 ctq->sq.lmt_addr = (void *) nix->lmt_base;
239 ctq->io_addr = ctq->sq.io_addr & ~0x7fULL;
240 ctq->lmt_addr = ctq->sq.lmt_addr;
246 oct_txq_deinit (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
248 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
249 vnet_dev_t *dev = txq->port->dev;
252 if (ctq->sq_initialized)
254 rrv = roc_nix_sq_fini (&ctq->sq);
256 oct_roc_err (dev, rrv, "roc_nix_sq_fini() failed");
257 ctq->sq_initialized = 0;
260 if (ctq->npa_pool_initialized)
262 rrv = roc_npa_pool_destroy (ctq->aura_handle);
264 oct_roc_err (dev, rrv, "roc_npa_pool_destroy() failed");
265 ctq->npa_pool_initialized = 0;
270 format_oct_rxq_info (u8 *s, va_list *args)
272 vnet_dev_format_args_t *a = va_arg (*args, vnet_dev_format_args_t *);
273 vnet_dev_rx_queue_t *rxq = va_arg (*args, vnet_dev_rx_queue_t *);
274 oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
275 u32 indent = format_get_indent (s);
279 s = format (s, "n_enq %u cq_nb_desc %u", crq->n_enq, crq->cq.nb_desc);
280 s = format (s, "\n%Uaura: id 0x%x count %u limit %u avail %u",
281 format_white_space, indent,
282 roc_npa_aura_handle_to_aura (crq->aura_handle),
283 roc_npa_aura_op_cnt_get (crq->aura_handle),
284 roc_npa_aura_op_limit_get (crq->aura_handle),
285 roc_npa_aura_op_available (crq->aura_handle));
291 format_oct_txq_info (u8 *s, va_list *args)
293 vnet_dev_format_args_t *a = va_arg (*args, vnet_dev_format_args_t *);
294 vnet_dev_tx_queue_t *txq = va_arg (*args, vnet_dev_tx_queue_t *);
295 oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
296 u32 indent = format_get_indent (s);
300 s = format (s, "n_enq %u sq_nb_desc %u io_addr %p lmt_addr %p",
301 ctq->n_enq, ctq->sq.nb_desc, ctq->io_addr, ctq->lmt_addr);
302 s = format (s, "\n%Uaura: id 0x%x count %u limit %u avail %u",
303 format_white_space, indent,
304 roc_npa_aura_handle_to_aura (ctq->aura_handle),
305 roc_npa_aura_op_cnt_get (ctq->aura_handle),
306 roc_npa_aura_op_limit_get (ctq->aura_handle),
307 roc_npa_aura_op_available (ctq->aura_handle));