#include <memif/memif.h>
#include <memif/private.h>
-#define foreach_memif_tx_func_error \
-_(NO_FREE_SLOTS, "no free tx slots") \
-_(ROLLBACK, "no enough space in tx buffers")
+#define foreach_memif_tx_func_error \
+ _ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots") \
+ _ (ROLLBACK, rollback, ERROR, "no enough space in tx buffers")
typedef enum
{
-#define _(f,s) MEMIF_TX_ERROR_##f,
+#define _(f, n, s, d) MEMIF_TX_ERROR_##f,
foreach_memif_tx_func_error
#undef _
MEMIF_TX_N_ERROR,
} memif_tx_func_error_t;
-static char *memif_tx_func_error_strings[] = {
-#define _(n,s) s,
+static vlib_error_desc_t memif_tx_func_error_counters[] = {
+#define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
foreach_memif_tx_func_error
#undef _
};
memif_per_thread_data_t * ptd)
{
memif_ring_t *ring;
- u32 *buffers = vlib_frame_args (frame);
+ u32 *buffers = vlib_frame_vector_args (frame);
u32 n_left = frame->n_vectors;
u32 n_copy_op;
u16 ring_size, mask, slot, free_slots;
memif_copy_op_t *co;
memif_region_index_t last_region = ~0;
void *last_region_shm = 0;
+ u16 head, tail;
ring = mq->ring;
ring_size = 1 << mq->log2_ring_size;
retry:
- free_slots = ring->tail - mq->last_tail;
- mq->last_tail += free_slots;
- slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
-
if (type == MEMIF_RING_S2M)
- free_slots = ring_size - ring->head + mq->last_tail;
+ {
+ slot = head = ring->head;
+ tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
+ mq->last_tail += tail - mq->last_tail;
+ free_slots = ring_size - head + mq->last_tail;
+ }
else
- free_slots = ring->head - ring->tail;
+ {
+ slot = tail = ring->tail;
+ head = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
+ mq->last_tail += tail - mq->last_tail;
+ free_slots = head - tail;
+ }
while (n_left && free_slots)
{
b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
- clib_memcpy (co[0].data, b0->data + co[0].buffer_offset,
- co[0].data_len);
- clib_memcpy (co[1].data, b1->data + co[1].buffer_offset,
- co[1].data_len);
- clib_memcpy (co[2].data, b2->data + co[2].buffer_offset,
- co[2].data_len);
- clib_memcpy (co[3].data, b3->data + co[3].buffer_offset,
- co[3].data_len);
+ clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
+ co[0].data_len);
+ clib_memcpy_fast (co[1].data, b1->data + co[1].buffer_offset,
+ co[1].data_len);
+ clib_memcpy_fast (co[2].data, b2->data + co[2].buffer_offset,
+ co[2].data_len);
+ clib_memcpy_fast (co[3].data, b3->data + co[3].buffer_offset,
+ co[3].data_len);
co += 4;
n_copy_op -= 4;
while (n_copy_op)
{
b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
- clib_memcpy (co[0].data, b0->data + co[0].buffer_offset,
- co[0].data_len);
+ clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
+ co[0].data_len);
co += 1;
n_copy_op -= 1;
}
vec_reset_length (ptd->copy_ops);
vec_reset_length (ptd->buffers);
- CLIB_MEMORY_STORE_BARRIER ();
if (type == MEMIF_RING_S2M)
- ring->head = slot;
+ __atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
else
- ring->tail = slot;
+ __atomic_store_n (&ring->tail, slot, __ATOMIC_RELEASE);
if (n_left && n_retries--)
goto retry;
mq->int_count++;
}
- vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
return frame->n_vectors;
}
memif_per_thread_data_t * ptd)
{
memif_ring_t *ring = mq->ring;
- u32 *buffers = vlib_frame_args (frame);
+ u32 *buffers = vlib_frame_vector_args (frame);
u32 n_left = frame->n_vectors;
u16 slot, free_slots, n_free;
u16 ring_size = 1 << mq->log2_ring_size;
u16 mask = ring_size - 1;
int n_retries = 5;
vlib_buffer_t *b0;
+ u16 head, tail;
retry:
- n_free = ring->tail - mq->last_tail;
+ slot = tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
+ head = ring->head;
+
+ n_free = tail - mq->last_tail;
if (n_free >= 16)
{
vlib_buffer_free_from_ring_no_next (vm, mq->buffers,
mq->last_tail += n_free;
}
- slot = ring->head;
- free_slots = ring_size - ring->head + mq->last_tail;
+ free_slots = ring_size - head + mq->last_tail;
while (n_left && free_slots)
{
}
no_free_slots:
- CLIB_MEMORY_STORE_BARRIER ();
- ring->head = slot;
+ __atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
if (n_left && n_retries--)
goto retry;
thread_index);
u8 tx_queues = vec_len (mif->tx_queues);
- if (tx_queues < vec_len (vlib_mains))
+ if (tx_queues < vlib_get_n_threads ())
{
ASSERT (tx_queues > 0);
mq = vec_elt_at_index (mif->tx_queues, thread_index % tx_queues);
static clib_error_t *
memif_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
- vnet_hw_interface_rx_mode mode)
+ vnet_hw_if_rx_mode mode)
{
memif_main_t *mm = &memif_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
- if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ if (mode == VNET_HW_IF_RX_MODE_POLLING)
mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT;
else
mq->ring->flags &= ~MEMIF_RING_FLAG_MASK_INT;
return 0;
}
-static clib_error_t *
-memif_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
-{
- memif_main_t *mm = &memif_main;
- vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
- static clib_error_t *error = 0;
-
- if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
- mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
- else
- mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
-
- return error;
-}
-
static clib_error_t *
memif_subif_add_del_function (vnet_main_t * vnm,
u32 hw_if_index,
.format_device = format_memif_device,
.format_tx_trace = format_memif_tx_trace,
.tx_function_n_errors = MEMIF_TX_N_ERROR,
- .tx_function_error_strings = memif_tx_func_error_strings,
+ .tx_function_error_counters = memif_tx_func_error_counters,
.rx_redirect_to_node = memif_set_interface_next_node,
.clear_counters = memif_clear_hw_interface_counters,
.admin_up_down_function = memif_interface_admin_up_down,