#include <memif/memif.h>
#include <memif/private.h>
-#define foreach_memif_input_error
+#define foreach_memif_input_error \
+ _(BUFFER_ALLOC_FAIL, "buffer allocation failed") \
+ _(NOT_IP, "not ip packet")
typedef enum
{
MEMIF_INPUT_N_ERROR,
} memif_input_error_t;
-static char *memif_input_error_strings[] = {
+static __clib_unused char *memif_input_error_strings[] = {
#define _(n,s) s,
foreach_memif_input_error
#undef _
u16 ring;
} memif_input_trace_t;
-static u8 *
+static __clib_unused u8 *
format_memif_input_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
s = format (s, "memif: hw_if_index %d next-index %d",
t->hw_if_index, t->next_index);
return s;
}
+static_always_inline u32
+memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
+{
+ u8 *ptr = vlib_buffer_get_current (b);
+ u8 v = *ptr & 0xf0;
+
+ if (PREDICT_TRUE (v == 0x40))
+ return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
+ else if (PREDICT_TRUE (v == 0x60))
+ return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+
+ b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
+ return VNET_DEVICE_INPUT_NEXT_DROP;
+}
+
static_always_inline void
-memif_prefetch (vlib_main_t * vm, u32 bi)
+memif_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
+ memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
+ uword * n_tracep)
+{
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
+
+ if (PREDICT_TRUE (b != 0))
+ {
+ memif_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0);
+ vlib_set_trace_count (vm, node, --(*n_tracep));
+ tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->next_index = next;
+ tr->hw_if_index = mif->hw_if_index;
+ tr->ring = qid;
+ }
+}
+
+static_always_inline void
+memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
+ u16 buffer_offset, u16 buffer_vec_index)
+{
+ memif_copy_op_t *co;
+ vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
+ co->data = data;
+ co->data_len = len;
+ co->buffer_offset = buffer_offset;
+ co->buffer_vec_index = buffer_vec_index;
+}
+
+static_always_inline void
+memif_add_to_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * buffers,
+ u32 buffer_size)
+{
+ vlib_buffer_t *seg = b;
+ i32 bytes_left = b->current_length - buffer_size + b->current_data;
+
+ if (PREDICT_TRUE (bytes_left <= 0))
+ return;
+
+ b->current_length -= bytes_left;
+ b->total_length_not_including_first_buffer = bytes_left;
+
+ while (bytes_left)
+ {
+ seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ seg->next_buffer = buffers[0];
+ seg = vlib_get_buffer (vm, buffers[0]);
+ buffers++;
+ seg->current_data = 0;
+ seg->current_length = clib_min (buffer_size, bytes_left);
+ bytes_left -= seg->current_length;
+ }
+}
+
+static_always_inline u32
+sat_sub (u32 x, u32 y)
+{
+ u32 res = x - y;
+ res &= -(res <= x);
+ return res;
+}
+
+/* branchless validation of the descriptor - uses saturated subtraction */
+static_always_inline u32
+memif_desc_is_invalid (memif_if_t * mif, memif_desc_t * d, u32 buffer_length)
{
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- vlib_prefetch_buffer_header (b, STORE);
- CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, STORE);
+ u32 rv;
+ u16 valid_flags = MEMIF_DESC_FLAG_NEXT;
+
+ rv = d->flags & (~valid_flags);
+ rv |= sat_sub (d->region + 1, vec_len (mif->regions));
+ rv |= sat_sub (d->length, buffer_length);
+ rv |= sat_sub (d->offset + d->length, mif->regions[d->region].region_size);
+
+ if (PREDICT_FALSE (rv))
+ {
+ mif->flags |= MEMIF_IF_FLAG_ERROR;
+ return 1;
+ }
+
+ return 0;
}
static_always_inline uword
memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, memif_if_t * mif,
- memif_ring_type_t type, u16 qid)
+ memif_ring_type_t type, u16 qid,
+ memif_interface_mode_t mode)
{
vnet_main_t *vnm = vnet_get_main ();
+ memif_main_t *mm = &memif_main;
memif_ring_t *ring;
memif_queue_t *mq;
- u16 head;
- u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u16 buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
+ u32 next_index;
uword n_trace = vlib_get_trace_count (vm, node);
- memif_main_t *nm = &memif_main;
- u32 n_rx_packets = 0;
- u32 n_rx_bytes = 0;
- u32 *to_next = 0;
- u32 n_free_bufs;
- u32 thread_index = vlib_get_thread_index ();
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u16 ring_size, mask, num_slots;
- void *mb0, *mb1;
+ u32 n_rx_packets = 0, n_rx_bytes = 0;
+ u32 n_left, *to_next = 0;
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 thread_index = vm->thread_index;
+ memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
+ thread_index);
+ vlib_buffer_t *bt = &ptd->buffer_template;
+ u16 cur_slot, last_slot, ring_size, n_slots, mask;
+ i16 start_offset;
+ u16 n_buffers = 0, n_alloc;
+ memif_copy_op_t *co;
+ memif_packet_op_t *po;
+ memif_region_index_t last_region = ~0;
+ void *last_region_shm = 0;
mq = vec_elt_at_index (mif->rx_queues, qid);
ring = mq->ring;
ring_size = 1 << mq->log2_ring_size;
mask = ring_size - 1;
- if (mif->per_interface_next_index != ~0)
- next_index = mif->per_interface_next_index;
+ next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
+ VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+
+ /* asume that somebody will want to add ethernet header on the packet
+ so start with IP header at offset 14 */
+ start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
+
+ /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
+ are producers of empty buffers */
+ cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
+ last_slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
+ if (cur_slot == last_slot)
+ goto refill;
+ n_slots = last_slot - cur_slot;
- n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
- if (PREDICT_FALSE (n_free_bufs < ring_size))
+ /* construct copy and packet vector out of ring slots */
+ while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
{
- vec_validate (nm->rx_buffers[thread_index],
- ring_size + n_free_bufs - 1);
- n_free_bufs +=
- vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
- ring_size);
- _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
+ u32 dst_off, src_off, n_bytes_left;
+ u16 s0;
+ memif_desc_t *d0;
+ void *mb0;
+ po = ptd->packet_ops + n_rx_packets;
+ n_rx_packets++;
+ po->first_buffer_vec_index = n_buffers++;
+ po->packet_len = 0;
+ src_off = 0;
+ dst_off = start_offset;
+
+ next_slot:
+ CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
+ n_bytes_left = d0->length;
+
+ /* slave resets buffer length,
+ * so it can produce full size buffer for master
+ */
+ if (type == MEMIF_RING_M2S)
+ d0->length = mif->run.buffer_size;
+
+ po->packet_len += n_bytes_left;
+ if (PREDICT_FALSE (last_region != d0->region))
+ {
+ last_region_shm = mif->regions[d0->region].shm;
+ last_region = d0->region;
+ }
+ mb0 = last_region_shm + d0->offset;
+
+ do
+ {
+ u32 dst_free = buffer_size - dst_off;
+ if (dst_free == 0)
+ {
+ dst_off = 0;
+ dst_free = buffer_size;
+ n_buffers++;
+ }
+ u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
+ memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
+ n_buffers - 1);
+ n_bytes_left -= bytes_to_copy;
+ src_off += bytes_to_copy;
+ dst_off += bytes_to_copy;
+ }
+ while (PREDICT_FALSE (n_bytes_left));
+
+ cur_slot++;
+ n_slots--;
+ if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
+ {
+ src_off = 0;
+ goto next_slot;
+ }
}
- head = ring->head;
- if (head == mq->last_head)
- return 0;
+ /* allocate free buffers */
+ vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
+ n_alloc = vlib_buffer_alloc (vm, ptd->buffers, n_buffers);
+ if (PREDICT_FALSE (n_alloc != n_buffers))
+ {
+ if (n_alloc)
+ vlib_buffer_free (vm, ptd->buffers, n_alloc);
+ vlib_error_count (vm, node->node_index,
+ MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
+ goto refill;
+ }
+
+ /* copy data */
+ n_left = vec_len (ptd->copy_ops);
+ co = ptd->copy_ops;
+ while (n_left >= 8)
+ {
+ CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
+ b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
+ b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
+ b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
+
+ clib_memcpy (b0->data + co[0].buffer_offset, co[0].data,
+ co[0].data_len);
+ clib_memcpy (b1->data + co[1].buffer_offset, co[1].data,
+ co[1].data_len);
+ clib_memcpy (b2->data + co[2].buffer_offset, co[2].data,
+ co[2].data_len);
+ clib_memcpy (b3->data + co[3].buffer_offset, co[3].data,
+ co[3].data_len);
+
+ co += 4;
+ n_left -= 4;
+ }
+ while (n_left)
+ {
+ b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
+ clib_memcpy (b0->data + co[0].buffer_offset, co[0].data,
+ co[0].data_len);
+ co += 1;
+ n_left -= 1;
+ }
- if (head > mq->last_head)
- num_slots = head - mq->last_head;
+ /* release slots from the ring */
+ if (type == MEMIF_RING_S2M)
+ {
+ CLIB_MEMORY_STORE_BARRIER ();
+ ring->tail = mq->last_head = cur_slot;
+ }
else
- num_slots = ring_size - mq->last_head + head;
+ {
+ mq->last_tail = cur_slot;
+ }
- while (num_slots)
+ u32 n_from = n_rx_packets;
+ po = ptd->packet_ops;
+
+ vnet_buffer (bt)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ bt->current_data = start_offset;
+
+ while (n_from)
{
u32 n_left_to_next;
- u32 next0 = next_index;
- u32 next1 = next_index;
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ u32 next0, next1, next2, next3;
- while (num_slots > 5 && n_left_to_next > 2)
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_from >= 8 && n_left_to_next >= 4)
{
- if (PREDICT_TRUE (mq->last_head + 5 < ring_size))
+ b0 = vlib_get_buffer (vm, po[4].first_buffer_vec_index);
+ b1 = vlib_get_buffer (vm, po[5].first_buffer_vec_index);
+ b2 = vlib_get_buffer (vm, po[6].first_buffer_vec_index);
+ b3 = vlib_get_buffer (vm, po[7].first_buffer_vec_index);
+ vlib_prefetch_buffer_header (b0, STORE);
+ vlib_prefetch_buffer_header (b1, STORE);
+ vlib_prefetch_buffer_header (b2, STORE);
+ vlib_prefetch_buffer_header (b3, STORE);
+
+ /* enqueue buffer */
+ u32 fbvi0 = po[0].first_buffer_vec_index;
+ u32 fbvi1 = po[1].first_buffer_vec_index;
+ u32 fbvi2 = po[2].first_buffer_vec_index;
+ u32 fbvi3 = po[3].first_buffer_vec_index;
+ to_next[0] = bi0 = ptd->buffers[fbvi0];
+ to_next[1] = bi1 = ptd->buffers[fbvi1];
+ to_next[2] = bi2 = ptd->buffers[fbvi2];
+ to_next[3] = bi3 = ptd->buffers[fbvi3];
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ clib_memcpy64_x4 (b0, b1, b2, b3, bt);
+
+ b0->current_length = po[0].packet_len;
+ n_rx_bytes += b0->current_length;
+ b1->current_length = po[1].packet_len;
+ n_rx_bytes += b1->current_length;
+ b2->current_length = po[2].packet_len;
+ n_rx_bytes += b2->current_length;
+ b3->current_length = po[3].packet_len;
+ n_rx_bytes += b3->current_length;
+
+ memif_add_to_chain (vm, b0, ptd->buffers + fbvi0 + 1, buffer_size);
+ memif_add_to_chain (vm, b1, ptd->buffers + fbvi1 + 1, buffer_size);
+ memif_add_to_chain (vm, b2, ptd->buffers + fbvi2 + 1, buffer_size);
+ memif_add_to_chain (vm, b3, ptd->buffers + fbvi3 + 1, buffer_size);
+
+ if (mode == MEMIF_INTERFACE_MODE_IP)
{
- CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 2),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 3),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[mq->last_head + 4],
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[mq->last_head + 5],
- CLIB_CACHE_LINE_BYTES, LOAD);
+ next0 = memif_next_from_ip_hdr (node, b0);
+ next1 = memif_next_from_ip_hdr (node, b1);
+ next2 = memif_next_from_ip_hdr (node, b2);
+ next3 = memif_next_from_ip_hdr (node, b3);
}
- else
+ else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
{
- CLIB_PREFETCH (memif_get_buffer
- (mif, ring, (mq->last_head + 2) % mask),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (memif_get_buffer
- (mif, ring, (mq->last_head + 3) % mask),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) % mask],
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) % mask],
- CLIB_CACHE_LINE_BYTES, LOAD);
+ if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
+ {
+ next0 = mif->per_interface_next_index;
+ next1 = mif->per_interface_next_index;
+ next2 = mif->per_interface_next_index;
+ next3 = mif->per_interface_next_index;
+ }
+ else
+ {
+ next0 = next1 = next2 = next3 = next_index;
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next0, b0);
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next1, b1);
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next2, b2);
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next3, b3);
+ }
}
- /* get empty buffer */
- u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
- bi0 = nm->rx_buffers[thread_index][last_buf];
- bi1 = nm->rx_buffers[thread_index][last_buf - 1];
- _vec_len (nm->rx_buffers[thread_index]) -= 2;
- if (last_buf > 4)
+ /* trace */
+ if (PREDICT_FALSE (n_trace > 0))
{
- memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]);
- memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]);
+ memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
}
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+
+ /* next */
+ n_from -= 4;
+ po += 4;
+ }
+ while (n_from && n_left_to_next)
+ {
/* enqueue buffer */
- to_next[0] = bi0;
- to_next[1] = bi1;
- to_next += 2;
- n_left_to_next -= 2;
+ u32 fbvi0 = po->first_buffer_vec_index;
+ to_next[0] = bi0 = ptd->buffers[fbvi0];
+ to_next += 1;
+ n_left_to_next--;
- /* fill buffer metadata */
b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
+ clib_memcpy (b0, bt, 64);
+ b0->current_length = po->packet_len;
+ n_rx_bytes += b0->current_length;
- vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
- vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ memif_add_to_chain (vm, b0, ptd->buffers + fbvi0 + 1, buffer_size);
+
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ {
+ next0 = memif_next_from_ip_hdr (node, b0);
+ }
+ else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
+ {
+ if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
+ next0 = mif->per_interface_next_index;
+ else
+ {
+ next0 = next_index;
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next0, b0);
+ }
+
+ }
+
+ /* trace */
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
- vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ /* next */
+ n_from--;
+ po++;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
- /* copy buffer */
- mb0 = memif_get_buffer (mif, ring, mq->last_head);
- clib_memcpy (vlib_buffer_get_current (b0), mb0,
- CLIB_CACHE_LINE_BYTES);
- b0->current_length = ring->desc[mq->last_head].length;
- mq->last_head = (mq->last_head + 1) & mask;
+ vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX, thread_index,
+ mif->hw_if_index, n_rx_packets,
+ n_rx_bytes);
- mb1 = memif_get_buffer (mif, ring, mq->last_head);
- clib_memcpy (vlib_buffer_get_current (b1), mb1,
- CLIB_CACHE_LINE_BYTES);
- b1->current_length = ring->desc[mq->last_head].length;
- mq->last_head = (mq->last_head + 1) & mask;
+ /* refill ring with empty buffers */
+refill:
+ vec_reset_length (ptd->buffers);
+ vec_reset_length (ptd->copy_ops);
- if (b0->current_length > CLIB_CACHE_LINE_BYTES)
- clib_memcpy (vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
- mb0 + CLIB_CACHE_LINE_BYTES,
- b0->current_length - CLIB_CACHE_LINE_BYTES);
+ if (type == MEMIF_RING_M2S)
+ {
+ u16 head = ring->head;
+ n_slots = ring_size - head + mq->last_tail;
- if (b1->current_length > CLIB_CACHE_LINE_BYTES)
- clib_memcpy (vlib_buffer_get_current (b1) + CLIB_CACHE_LINE_BYTES,
- mb1 + CLIB_CACHE_LINE_BYTES,
- b1->current_length - CLIB_CACHE_LINE_BYTES);
+ while (n_slots--)
+ {
+ u16 s = head++ & mask;
+ memif_desc_t *d = &ring->desc[s];
+ d->length = mif->run.buffer_size;
+ }
- /* trace */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
+ CLIB_MEMORY_STORE_BARRIER ();
+ ring->head = head;
+ }
- if (PREDICT_FALSE (n_trace > 0))
+ return n_rx_packets;
+}
+
+static_always_inline uword
+memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, memif_if_t * mif,
+ u16 qid, memif_interface_mode_t mode)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ memif_main_t *mm = &memif_main;
+ memif_ring_t *ring;
+ memif_queue_t *mq;
+ u32 next_index;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 n_rx_packets = 0, n_rx_bytes = 0;
+ u32 *to_next = 0, *buffers;
+ u32 bi0, bi1, bi2, bi3;
+ u16 s0, s1, s2, s3;
+ memif_desc_t *d0, *d1, *d2, *d3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 thread_index = vm->thread_index;
+ memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
+ thread_index);
+ u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
+ i16 start_offset;
+ u32 buffer_length;
+ u16 n_alloc, n_from;
+
+ mq = vec_elt_at_index (mif->rx_queues, qid);
+ ring = mq->ring;
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+
+ next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
+ VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+
+ /* asume that somebody will want to add ethernet header on the packet
+ so start with IP header at offset 14 */
+ start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
+ buffer_length = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES - start_offset;
+
+ cur_slot = mq->last_tail;
+ last_slot = ring->tail;
+ if (cur_slot == last_slot)
+ goto refill;
+ n_slots = last_slot - cur_slot;
+
+ /* process ring slots */
+ vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
+ CLIB_CACHE_LINE_BYTES);
+ while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
+ {
+ vlib_buffer_t *hb;
+
+ s0 = cur_slot & mask;
+ bi0 = mq->buffers[s0];
+ ptd->buffers[n_rx_packets++] = bi0;
+
+ CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ d0 = &ring->desc[s0];
+ hb = b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = start_offset;
+ b0->current_length = start_offset + d0->length;
+ n_rx_bytes += d0->length;
+
+ if (0 && memif_desc_is_invalid (mif, d0, buffer_length))
+ return 0;
+
+ cur_slot++;
+ n_slots--;
+ if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
+ {
+ hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ next_slot:
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
+ bi0 = mq->buffers[s0];
+
+ /* previous buffer */
+ b0->next_buffer = bi0;
+ b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* current buffer */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = start_offset;
+ b0->current_length = start_offset + d0->length;
+ hb->total_length_not_including_first_buffer += d0->length;
+ n_rx_bytes += d0->length;
+
+ cur_slot++;
+ n_slots--;
+ if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
+ goto next_slot;
+ }
+ }
+
+ /* release slots from the ring */
+ mq->last_tail = cur_slot;
+
+ n_from = n_rx_packets;
+ buffers = ptd->buffers;
+
+ while (n_from)
+ {
+ u32 n_left_to_next;
+ u32 next0, next1, next2, next3;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_from >= 8 && n_left_to_next >= 4)
+ {
+ b0 = vlib_get_buffer (vm, buffers[4]);
+ b1 = vlib_get_buffer (vm, buffers[5]);
+ b2 = vlib_get_buffer (vm, buffers[6]);
+ b3 = vlib_get_buffer (vm, buffers[7]);
+ vlib_prefetch_buffer_header (b0, STORE);
+ vlib_prefetch_buffer_header (b1, STORE);
+ vlib_prefetch_buffer_header (b2, STORE);
+ vlib_prefetch_buffer_header (b3, STORE);
+
+ /* enqueue buffer */
+ to_next[0] = bi0 = buffers[0];
+ to_next[1] = bi1 = buffers[1];
+ to_next[2] = bi2 = buffers[2];
+ to_next[3] = bi3 = buffers[3];
+ to_next += 4;
+ n_left_to_next -= 4;
+ buffers += 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
+
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ {
+ next0 = memif_next_from_ip_hdr (node, b0);
+ next1 = memif_next_from_ip_hdr (node, b1);
+ next2 = memif_next_from_ip_hdr (node, b2);
+ next3 = memif_next_from_ip_hdr (node, b3);
+ }
+ else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
{
- /* b0 */
- memif_input_trace_t *tr;
- vlib_trace_buffer (vm, node, next0, b0,
- /* follow_chain */ 0);
- vlib_set_trace_count (vm, node, --n_trace);
- tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->hw_if_index = mif->hw_if_index;
- tr->ring = qid;
-
- if (n_trace)
+ if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
+ {
+ next0 = mif->per_interface_next_index;
+ next1 = mif->per_interface_next_index;
+ next2 = mif->per_interface_next_index;
+ next3 = mif->per_interface_next_index;
+ }
+ else
{
- /* b1 */
- memif_input_trace_t *tr;
- vlib_trace_buffer (vm, node, next1, b1,
- /* follow_chain */ 0);
- vlib_set_trace_count (vm, node, --n_trace);
- tr = vlib_add_trace (vm, node, b1, sizeof (*tr));
- tr->next_index = next1;
- tr->hw_if_index = mif->hw_if_index;
- tr->ring = qid;
+ next0 = next1 = next2 = next3 = next_index;
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next0, b0);
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next1, b1);
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next2, b2);
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next3, b3);
}
}
- /* redirect if feature path enabled */
- vnet_feature_start_device_input_x2 (mif->sw_if_index,
- &next0, &next1, b0, b1);
+ /* trace */
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
+ }
/* enqueue */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
- n_left_to_next,
- bi0, bi1, next0, next1);
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
- /* next packet */
- num_slots -= 2;
- n_rx_packets += 2;
- n_rx_bytes += b0->current_length;
- n_rx_bytes += b1->current_length;
+ /* next */
+ n_from -= 4;
}
- while (num_slots && n_left_to_next)
+ while (n_from && n_left_to_next)
{
- /* get empty buffer */
- u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
- bi0 = nm->rx_buffers[thread_index][last_buf];
- _vec_len (nm->rx_buffers[thread_index]) = last_buf;
-
/* enqueue buffer */
- to_next[0] = bi0;
+ to_next[0] = bi0 = buffers[0];
to_next += 1;
n_left_to_next--;
+ buffers += 1;
- /* fill buffer metadata */
b0 = vlib_get_buffer (vm, bi0);
- b0->current_length = ring->desc[mq->last_head].length;
vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-
- /* copy buffer */
- mb0 = memif_get_buffer (mif, ring, mq->last_head);
- clib_memcpy (vlib_buffer_get_current (b0), mb0,
- CLIB_CACHE_LINE_BYTES);
- if (b0->current_length > CLIB_CACHE_LINE_BYTES)
- clib_memcpy (vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
- mb0 + CLIB_CACHE_LINE_BYTES,
- b0->current_length - CLIB_CACHE_LINE_BYTES);
-
- /* trace */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
- if (PREDICT_FALSE (n_trace > 0))
+ if (mode == MEMIF_INTERFACE_MODE_IP)
{
- memif_input_trace_t *tr;
- vlib_trace_buffer (vm, node, next0, b0,
- /* follow_chain */ 0);
- vlib_set_trace_count (vm, node, --n_trace);
- tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->hw_if_index = mif->hw_if_index;
- tr->ring = qid;
+ next0 = memif_next_from_ip_hdr (node, b0);
+ }
+ else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
+ {
+ if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
+ next0 = mif->per_interface_next_index;
+ else
+ {
+ next0 = next_index;
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (mif->sw_if_index,
+ &next0, b0);
+ }
}
-
- /* redirect if feature path enabled */
- vnet_feature_start_device_input_x1 (mif->sw_if_index, &next0, b0);
+ /* trace */
+ if (PREDICT_FALSE (n_trace > 0))
+ memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
/* enqueue */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
- /* next packet */
- mq->last_head = (mq->last_head + 1) & mask;
- num_slots--;
- n_rx_packets++;
- n_rx_bytes += b0->current_length;
+ /* next */
+ n_from--;
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- CLIB_MEMORY_STORE_BARRIER ();
- ring->tail = head;
vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX, thread_index,
mif->hw_if_index, n_rx_packets,
n_rx_bytes);
+ /* refill ring with empty buffers */
+refill:
+ vec_reset_length (ptd->buffers);
+
+ head = ring->head;
+ n_slots = ring_size - head + mq->last_tail;
+
+ if (n_slots < 32)
+ goto done;
+
+ memif_desc_t *dt = &ptd->desc_template;
+ memset (dt, 0, sizeof (memif_desc_t));
+ dt->length = buffer_length;
+
+ n_alloc = vlib_buffer_alloc_to_ring (vm, mq->buffers, head & mask,
+ ring_size, n_slots);
+
+ if (PREDICT_FALSE (n_alloc != n_slots))
+ {
+ vlib_error_count (vm, node->node_index,
+ MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
+ }
+
+ while (n_alloc >= 32)
+ {
+ bi0 = mq->buffers[(head + 4) & mask];
+ vlib_prefetch_buffer_with_index (vm, bi0, LOAD);
+ bi1 = mq->buffers[(head + 5) & mask];
+ vlib_prefetch_buffer_with_index (vm, bi1, LOAD);
+ bi2 = mq->buffers[(head + 6) & mask];
+ vlib_prefetch_buffer_with_index (vm, bi2, LOAD);
+ bi3 = mq->buffers[(head + 7) & mask];
+ vlib_prefetch_buffer_with_index (vm, bi3, LOAD);
+
+ s0 = head++ & mask;
+ s1 = head++ & mask;
+ s2 = head++ & mask;
+ s3 = head++ & mask;
+
+ d0 = &ring->desc[s0];
+ d1 = &ring->desc[s1];
+ d2 = &ring->desc[s2];
+ d3 = &ring->desc[s3];
+
+ clib_memcpy (d0, dt, sizeof (memif_desc_t));
+ clib_memcpy (d1, dt, sizeof (memif_desc_t));
+ clib_memcpy (d2, dt, sizeof (memif_desc_t));
+ clib_memcpy (d3, dt, sizeof (memif_desc_t));
+
+ b0 = vlib_get_buffer (vm, mq->buffers[s0]);
+ b1 = vlib_get_buffer (vm, mq->buffers[s1]);
+ b2 = vlib_get_buffer (vm, mq->buffers[s2]);
+ b3 = vlib_get_buffer (vm, mq->buffers[s3]);
+
+ d0->region = b0->buffer_pool_index + 1;
+ d1->region = b1->buffer_pool_index + 1;
+ d2->region = b2->buffer_pool_index + 1;
+ d3->region = b3->buffer_pool_index + 1;
+
+ d0->offset =
+ (void *) b0->data - mif->regions[d0->region].shm + start_offset;
+ d1->offset =
+ (void *) b1->data - mif->regions[d1->region].shm + start_offset;
+ d2->offset =
+ (void *) b2->data - mif->regions[d2->region].shm + start_offset;
+ d3->offset =
+ (void *) b3->data - mif->regions[d3->region].shm + start_offset;
+
+ n_alloc -= 4;
+ }
+ while (n_alloc)
+ {
+ s0 = head++ & mask;
+ d0 = &ring->desc[s0];
+ clib_memcpy (d0, dt, sizeof (memif_desc_t));
+ b0 = vlib_get_buffer (vm, mq->buffers[s0]);
+ d0->region = b0->buffer_pool_index + 1;
+ d0->offset =
+ (void *) b0->data - mif->regions[d0->region].shm + start_offset;
+
+ n_alloc -= 1;
+ }
+
+ CLIB_MEMORY_STORE_BARRIER ();
+ ring->head = head;
+
+done:
return n_rx_packets;
}
-static uword
-memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+
+VLIB_NODE_FN (memif_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
u32 n_rx = 0;
- memif_main_t *nm = &memif_main;
+ memif_main_t *mm = &memif_main;
vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
vnet_device_and_queue_t *dq;
+ memif_interface_mode_t mode_ip = MEMIF_INTERFACE_MODE_IP;
+ memif_interface_mode_t mode_eth = MEMIF_INTERFACE_MODE_ETHERNET;
foreach_device_and_queue (dq, rt->devices_and_queues)
{
memif_if_t *mif;
- mif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
+ mif = vec_elt_at_index (mm->interfaces, dq->dev_instance);
if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
(mif->flags & MEMIF_IF_FLAG_CONNECTED))
{
- if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
- n_rx += memif_device_input_inline (vm, node, frame, mif,
- MEMIF_RING_M2S, dq->queue_id);
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ if (mif->mode == MEMIF_INTERFACE_MODE_IP)
+ n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
+ dq->queue_id, mode_ip);
+ else
+ n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
+ dq->queue_id, mode_eth);
+ }
+ else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
+ {
+ if (mif->mode == MEMIF_INTERFACE_MODE_IP)
+ n_rx += memif_device_input_inline (vm, node, frame, mif,
+ MEMIF_RING_M2S, dq->queue_id,
+ mode_ip);
+ else
+ n_rx += memif_device_input_inline (vm, node, frame, mif,
+ MEMIF_RING_M2S, dq->queue_id,
+ mode_eth);
+ }
else
- n_rx += memif_device_input_inline (vm, node, frame, mif,
- MEMIF_RING_S2M, dq->queue_id);
+ {
+ if (mif->mode == MEMIF_INTERFACE_MODE_IP)
+ n_rx += memif_device_input_inline (vm, node, frame, mif,
+ MEMIF_RING_S2M, dq->queue_id,
+ mode_ip);
+ else
+ n_rx += memif_device_input_inline (vm, node, frame, mif,
+ MEMIF_RING_S2M, dq->queue_id,
+ mode_eth);
+ }
}
}
return n_rx;
}
+#ifndef CLIB_MARCH_VARIANT
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (memif_input_node) = {
- .function = memif_input_fn,
.name = "memif-input",
.sibling_of = "device-input",
.format_trace = format_memif_input_trace,
.n_errors = MEMIF_INPUT_N_ERROR,
.error_strings = memif_input_error_strings,
};
+#endif
-VLIB_NODE_FUNCTION_MULTIARCH (memif_input_node, memif_input_fn)
/* *INDENT-ON* */