+}
+
+static_always_inline uword
+memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ memif_if_t *mif, memif_ring_type_t type, u16 qid,
+ memif_interface_mode_t mode)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ memif_main_t *mm = &memif_main;
+ memif_ring_t *ring;
+ memif_queue_t *mq;
+ u16 buffer_size = vlib_buffer_get_default_data_size (vm);
+ uword n_trace;
+ u16 nexts[MEMIF_RX_VECTOR_SZ], *next = nexts;
+ u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
+ u32 n_left_to_next;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ vlib_buffer_t *buffer_ptrs[MEMIF_RX_VECTOR_SZ];
+ u32 thread_index = vm->thread_index;
+ memif_per_thread_data_t *ptd =
+ vec_elt_at_index (mm->per_thread_data, thread_index);
+ u16 cur_slot, ring_size, n_slots, mask;
+ u16 n_buffers, n_alloc, n_desc;
+ i16 start_offset;
+ memif_copy_op_t *co;
+ int is_slave = (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) != 0;
+ int is_simple = 1;
+ int i;
+
+ mq = vec_elt_at_index (mif->rx_queues, qid);
+ ring = mq->ring;
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+
+ start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? MEMIF_IP_OFFSET : 0;
+
+ if (is_slave)
+ {
+ cur_slot = mq->last_tail;
+ n_slots = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE) - cur_slot;
+ }
+ else
+ {
+ cur_slot = mq->last_head;
+ n_slots = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE) - cur_slot;
+ }
+
+ if (n_slots == 0)
+ goto refill;
+
+ n_desc = memif_parse_desc (ptd, mif, mq, cur_slot, n_slots);
+
+ if (n_desc != ptd->n_packets)
+ is_simple = 0;
+
+ cur_slot += n_desc;
+
+ if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET)
+ memif_validate_desc_data (ptd, mif, n_desc, /* is_ethernet */ 1);
+ else
+ memif_validate_desc_data (ptd, mif, n_desc, /* is_ethernet */ 0);
+
+ if (ptd->max_desc_len > buffer_size - start_offset)
+ is_simple = 0;
+
+ if (ptd->xor_status != 0)
+ is_simple = 0;
+
+ if (is_simple)
+ n_buffers = ptd->n_packets;
+ else
+ n_buffers = memif_process_desc (vm, node, ptd, mif);
+
+ /* allocate free buffers */
+ vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
+ n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
+ mq->buffer_pool_index);
+ if (PREDICT_FALSE (n_alloc != n_buffers))
+ {
+ if (n_alloc)
+ vlib_buffer_free (vm, ptd->buffers, n_alloc);
+ vlib_error_count (vm, node->node_index,
+ MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
+ goto refill;
+ }
+
+ /* copy data */
+ if (is_simple)
+ {
+ int n_pkts = ptd->n_packets;
+ void **desc_data = ptd->desc_data;
+ u16 *desc_len = ptd->desc_len;
+
+ vlib_get_buffers (vm, ptd->buffers, buffer_ptrs, n_buffers);
+
+ for (i = 0; i + 8 < n_pkts; i++)
+ {
+ clib_prefetch_load (desc_data[i + 8]);
+ clib_prefetch_store (buffer_ptrs[i + 8]->data);
+ clib_memcpy_fast (buffer_ptrs[i]->data + start_offset, desc_data[i],
+ desc_len[i]);
+ }
+ for (; i < n_pkts; i++)
+ clib_memcpy_fast (buffer_ptrs[i]->data + start_offset, desc_data[i],
+ desc_len[i]);
+ }
+ else
+ {
+ vlib_buffer_t *b;
+ u32 n_pkts = vec_len (ptd->copy_ops);
+ co = ptd->copy_ops;
+
+ for (i = 0; i + 8 < n_pkts; i++)
+ {
+ clib_prefetch_load (co[i + 8].data);
+ b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
+ clib_memcpy_fast (b->data + co[i].buffer_offset, co[i].data,
+ co[i].data_len);
+ }
+ for (; i < n_pkts; i++)
+ {
+ b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
+ clib_memcpy_fast (b->data + co[i].buffer_offset, co[i].data,
+ co[i].data_len);
+ }
+ }
+
+ /* release slots from the ring */
+ if (type == MEMIF_RING_S2M)
+ {
+ __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ mq->last_head = cur_slot;
+ }
+ else
+ {
+ mq->last_tail = cur_slot;
+ }
+
+ /* prepare buffer template and next indices */
+ vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
+ ptd->buffer_template.current_data = start_offset;
+ ptd->buffer_template.current_config_index = 0;
+ ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
+ ptd->buffer_template.ref_count = 1;
+
+ if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
+ {
+ next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ if (mif->per_interface_next_index != ~0)
+ next_index = mif->per_interface_next_index;
+ else
+ vnet_feature_start_device_input_x1 (mif->sw_if_index, &next_index,
+ &ptd->buffer_template);
+
+ vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
+ n_left_to_next);
+ if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
+ {
+ vlib_next_frame_t *nf;
+ vlib_frame_t *f;
+ ethernet_input_frame_t *ef;
+ nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
+ f = vlib_get_frame (vm, nf->frame);
+ f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
+
+ ef = vlib_frame_scalar_args (f);
+ ef->sw_if_index = mif->sw_if_index;
+ ef->hw_if_index = mif->hw_if_index;
+ vlib_frame_no_append (f);
+ }
+ }
+
+ if (is_simple)
+ {
+ vlib_buffer_copy_indices (to_next_bufs, ptd->buffers, ptd->n_packets);
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ memif_fill_buffer_mdata_simple (node, ptd, buffer_ptrs, nexts, 1);
+ else
+ memif_fill_buffer_mdata_simple (node, ptd, buffer_ptrs, nexts, 0);
+ }
+ else
+ {
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ memif_fill_buffer_mdata (vm, node, ptd, mif, to_next_bufs, nexts, 1);
+ else
+ memif_fill_buffer_mdata (vm, node, ptd, mif, to_next_bufs, nexts, 0);
+ }