return n;
}
+#ifdef PLATFORM_OCTEON9
+static_always_inline u32
+oct_rxq_refill (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq, u16 n_refill)
+{
+ u32 n_alloc, n_free;
+ u32 buffer_indices[n_refill];
+ vlib_buffer_t *buffers[n_refill];
+ u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ u64 aura = roc_npa_aura_handle_to_aura (crq->aura_handle);
+ const uint64_t addr =
+ roc_npa_aura_handle_to_base (crq->aura_handle) + NPA_LF_AURA_OP_FREE0;
+
+ if (n_refill < 256)
+ return 0;
+
+ n_alloc = vlib_buffer_alloc (vm, buffer_indices, n_refill);
+ if (PREDICT_FALSE (n_alloc < n_refill))
+ goto alloc_fail;
+
+ vlib_get_buffers (vm, buffer_indices, (vlib_buffer_t **) buffers, n_alloc);
+
+ for (n_free = 0; n_free < n_alloc; n_free++)
+ roc_store_pair ((u64) buffers[n_free], aura, addr);
+
+ return n_alloc;
+
+alloc_fail:
+ vlib_buffer_unalloc_to_pool (vm, buffer_indices, n_alloc, bpi);
+ return 0;
+}
+#else
static_always_inline void
oct_rxq_refill_batch (vlib_main_t *vm, u64 lmt_id, u64 addr,
oct_npa_lf_aura_batch_free_line_t *lines, u32 *bi,
return n_enq;
}
+#endif
static_always_inline void
oct_rx_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
lmt_line_t *lmt_lines;
} oct_tx_ctx_t;
+#ifdef PLATFORM_OCTEON9
+static_always_inline u32
+oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq)
+{
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ u16 off = ctq->hdr_off;
+ u64 ah = ctq->aura_handle;
+ u32 n_freed = 0, n;
+
+ ah = ctq->aura_handle;
+
+ if ((n = roc_npa_aura_op_available (ah)) >= 32)
+ {
+ u64 buffers[n];
+ u32 bi[n];
+
+ n_freed = roc_npa_aura_op_bulk_alloc (ah, buffers, n, 0, 1);
+ vlib_get_buffer_indices_with_offset (vm, (void **) &buffers, bi, n_freed,
+ off);
+ vlib_buffer_free_no_next (vm, bi, n_freed);
+ }
+
+ return n_freed;
+}
+
+static_always_inline void
+oct_lmt_copy (void *lmt_addr, u64 io_addr, void *desc, u64 dwords)
+{
+ u64 lmt_status;
+
+ do
+ {
+ roc_lmt_mov_seg (lmt_addr, desc, dwords);
+ lmt_status = roc_lmt_submit_ldeor (io_addr);
+ }
+ while (lmt_status == 0);
+}
+#else
static_always_inline u32
oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq)
{
return n_freed;
}
+#endif
static_always_inline u8
oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b,
return 0;
}
+#ifdef PLATFORM_OCTEON9
+ /* Override line for Octeon9 */
+ line = ctx->lmt_lines;
+#endif
+
if (!simple && flags & VLIB_BUFFER_NEXT_PRESENT)
{
u8 n_tail_segs = 0;
t->sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
}
+#ifdef PLATFORM_OCTEON9
+ oct_lmt_copy (line, ctx->lmt_ioaddr, &d, n_dwords);
+#else
for (u32 i = 0; i < n_dwords; i++)
line->dwords[i] = d.as_u128[i];
+#endif
*dpl = n_dwords;
*n = *n + 1;
vlib_buffer_t **b, u32 n_pkts, int trace)
{
u8 dwords_per_line[16], *dpl = dwords_per_line;
- u64 lmt_arg, ioaddr, n_lines;
+ u64 __attribute__ ((unused)) lmt_arg, ioaddr, n_lines;
u32 n_left, or_flags_16 = 0, n = 0;
const u32 not_simple_flags =
VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD;
if (PREDICT_FALSE (!n_lines))
return n_pkts;
+#ifndef PLATFORM_OCTEON9
if (PREDICT_FALSE (or_flags_16 & VLIB_BUFFER_NEXT_PRESENT))
{
dpl = dwords_per_line;
}
roc_lmt_submit_steorl (lmt_arg, ioaddr);
+#endif
return n_pkts;
}
u32 *from = vlib_frame_vector_args (frame);
u32 n, n_enq, n_left, n_pkts = frame->n_vectors;
vlib_buffer_t *buffers[VLIB_FRAME_SIZE + 8], **b = buffers;
+#ifdef PLATFORM_OCTEON9
+ u64 lmt_id = 0;
+#else
u64 lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2;
+#endif
oct_tx_ctx_t ctx = {
.node = node,