#include <base/roc_api.h>
static_always_inline u32
-oct_aura_free_all_buffers (vlib_main_t *vm, u64 aura_handle, u16 hdr_off)
+oct_aura_free_all_buffers (vlib_main_t *vm, u64 aura_handle, u16 hdr_off,
+ u32 num_buffers)
{
u32 n = 0;
u64 iova;
vlib_buffer_t *b = (void *) iova + hdr_off;
vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
n++;
+ if (num_buffers && n == num_buffers)
+ break;
}
return n;
}
vnet_dev_rv_t oct_rxq_init (vlib_main_t *, vnet_dev_rx_queue_t *);
vnet_dev_rv_t oct_txq_init (vlib_main_t *, vnet_dev_tx_queue_t *);
void oct_rxq_deinit (vlib_main_t *, vnet_dev_rx_queue_t *);
+int oct_drain_queue (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq);
void oct_txq_deinit (vlib_main_t *, vnet_dev_tx_queue_t *);
format_function_t format_oct_rxq_info;
format_function_t format_oct_txq_info;
if ((rrv = roc_nix_rq_ena_dis (&crq->rq, 0)))
oct_roc_err (dev, rrv, "roc_nix_rq_ena_dis() failed");
- n = oct_aura_free_all_buffers (vm, crq->aura_handle, crq->hdr_off);
+ n = oct_drain_queue (vm, rxq);
+ n += oct_aura_free_all_buffers (vm, crq->aura_handle, crq->hdr_off,
+ crq->n_enq - n);
if (crq->n_enq - n > 0)
log_err (dev, "%u buffers leaked on rx queue %u stop", crq->n_enq - n,
oct_npa_batch_alloc_cl128_t *cl;
u32 n, off = ctq->hdr_off;
- n = oct_aura_free_all_buffers (vm, ctq->aura_handle, off);
- ctq->n_enq -= n;
-
- if (ctq->n_enq > 0 && ctq->ba_num_cl > 0)
+ if (ctq->ba_num_cl > 0)
for (n = ctq->ba_num_cl, cl = ctq->ba_buffer + ctq->ba_first_cl; n;
cl++, n--)
{
if (st.status.ccode != ALLOC_CCODE_INVAL)
for (u32 i = 0; i < st.status.count; i++)
{
+#if (CLIB_DEBUG > 0)
+ if (!i || (i == 8))
+ cl->iova[i] &= OCT_BATCH_ALLOC_IOVA0_MASK;
+#endif
vlib_buffer_t *b = (vlib_buffer_t *) (cl->iova[i] + off);
vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
ctq->n_enq--;
}
}
+ n = oct_aura_free_all_buffers (vm, ctq->aura_handle, off,
+ 0 /* To free all availiable buffers */);
+ ctq->n_enq -= n;
+
if (ctq->n_enq > 0)
log_err (dev, "%u buffers leaked on tx queue %u stop", ctq->n_enq,
txq->queue_id);
log_debug (dev, "%u buffers freed from tx queue %u", n, txq->queue_id);
ctq->n_enq = 0;
+ ctq->ba_num_cl = ctq->ba_first_cl = 0;
}
vnet_dev_rv_t
return VNET_DEV_OK;
}
+static_always_inline vlib_buffer_t *
+oct_seg_to_bp (void *p)
+{
+ return (vlib_buffer_t *) p - 1;
+}
+
+static void
+oct_multi_seg_free (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
+ oct_nix_rx_cqe_desc_t *d)
+{
+ vlib_buffer_t *t;
+ u8 s0 = d->sg0.segs, s1;
+
+ t = oct_seg_to_bp (d->segs0[1]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (s0 == 2)
+ return;
+ t = oct_seg_to_bp (d->segs0[2]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (d->sg1.subdc != NIX_SUBDC_SG)
+ return;
+
+ s1 = d->sg1.segs;
+ if (s1 == 0)
+ return;
+
+ t = oct_seg_to_bp (d->segs1[0]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (s1 == 1)
+ return;
+ t = oct_seg_to_bp (d->segs1[1]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (s1 == 2)
+ return;
+ t = oct_seg_to_bp (d->segs1[2]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+}
+
+int
+oct_drain_queue (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ oct_nix_rx_cqe_desc_t *descs = crq->cq.desc_base;
+ oct_nix_lf_cq_op_status_t status;
+ u32 cq_size = crq->cq.nb_desc;
+ u32 cq_mask = crq->cq.qmask;
+ vlib_buffer_t *b;
+ u32 i, head, n_desc, n, f_cnt = 0;
+
+ /* Free all CQ entries */
+ while (1)
+ {
+ /* get head and tail from NIX_LF_CQ_OP_STATUS */
+ status.as_u64 = roc_atomic64_add_sync (crq->cq.wdata, crq->cq.status);
+ if (status.cq_err || status.op_err)
+ return f_cnt;
+
+ head = status.head;
+ n_desc = (status.tail - head) & cq_mask;
+
+ if (n_desc == 0)
+ return f_cnt;
+
+ n = clib_min (cq_size - head, n_desc);
+ for (i = head; i < n; i++)
+ {
+ b = oct_seg_to_bp (descs[i].segs0[0]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
+ if (descs[i].sg0.segs > 1)
+ oct_multi_seg_free (vm, rxq, &descs[i]);
+ }
+ f_cnt += n;
+ plt_write64 ((crq->cq.wdata | n), crq->cq.door);
+ plt_wmb ();
+ }
+
+ return f_cnt;
+}
+
void
oct_rxq_deinit (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
if (crq->cq_initialized)
{
+ oct_drain_queue (vm, rxq);
rrv = roc_nix_cq_fini (&crq->cq);
if (rrv)
oct_roc_err (dev, rrv, "roc_nix_cq_fini() failed");
foreach_vnet_dev_rx_queue_runtime (rxq, node)
{
vnet_dev_port_t *port = rxq->port;
+
+ if (!rxq->started)
+ continue;
+
n_rx += oct_rx_node_inline (vm, node, frame, port, rxq, 0);
}