+ /* prepare buffer template and next indices */
+ vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
+ ptd->buffer_template.current_data = start_offset;
+ ptd->buffer_template.current_config_index = 0;
+ ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
+ ptd->buffer_template.ref_count = 1;
+
+ if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
+ {
+ next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ if (mif->per_interface_next_index != ~0)
+ next_index = mif->per_interface_next_index;
+ else
+ vnet_feature_start_device_input_x1 (mif->sw_if_index, &next_index,
+ &ptd->buffer_template);
+
+ vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
+ n_left_to_next);
+ if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
+ {
+ vlib_next_frame_t *nf;
+ vlib_frame_t *f;
+ ethernet_input_frame_t *ef;
+ nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
+ f = vlib_get_frame (vm, nf->frame);
+ f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
+
+ ef = vlib_frame_scalar_args (f);
+ ef->sw_if_index = mif->sw_if_index;
+ ef->hw_if_index = mif->hw_if_index;
+ vlib_frame_no_append (f);
+ }
+ }
+
+ if (is_simple)
+ {
+ vlib_buffer_copy_indices (to_next_bufs, ptd->buffers, ptd->n_packets);
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ memif_fill_buffer_mdata_simple (node, ptd, buffer_ptrs, nexts, 1);
+ else
+ memif_fill_buffer_mdata_simple (node, ptd, buffer_ptrs, nexts, 0);
+ }
+ else
+ {
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ memif_fill_buffer_mdata (vm, node, ptd, mif, to_next_bufs, nexts, 1);
+ else
+ memif_fill_buffer_mdata (vm, node, ptd, mif, to_next_bufs, nexts, 0);
+ }
+
+ /* packet trace if enabled */
+ if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
+ {
+ u32 n_left = ptd->n_packets;
+ bi = to_next_bufs;
+ next = nexts;
+ u32 ni = next_index;
+ while (n_trace && n_left)
+ {
+ vlib_buffer_t *b;
+ memif_input_trace_t *tr;
+ if (mode != MEMIF_INTERFACE_MODE_ETHERNET)
+ ni = next[0];
+ b = vlib_get_buffer (vm, bi[0]);
+ if (PREDICT_TRUE
+ (vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0)))
+ {
+ tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->next_index = ni;
+ tr->hw_if_index = mif->hw_if_index;
+ tr->ring = qid;
+ n_trace--;
+ }
+
+ /* next */
+ n_left--;
+ bi++;
+ next++;
+ }
+ vlib_set_trace_count (vm, node, n_trace);
+ }
+
+ if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
+ {
+ n_left_to_next -= ptd->n_packets;
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ else
+ vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts,
+ ptd->n_packets);
+
+ vlib_increment_combined_counter (
+ vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ thread_index, mif->sw_if_index, ptd->n_packets, ptd->n_rx_bytes);
+
+ /* refill ring with empty buffers */
+refill:
+ vec_reset_length (ptd->buffers);
+ vec_reset_length (ptd->copy_ops);
+
+ if (type == MEMIF_RING_M2S)
+ {
+ u16 head = ring->head;
+ n_slots = ring_size - head + mq->last_tail;
+
+ while (n_slots--)
+ {
+ u16 s = head++ & mask;
+ memif_desc_t *d = &ring->desc[s];
+ d->length = mif->run.buffer_size;
+ }
+
+ __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
+ }
+
+ return ptd->n_packets;
+}
+
+static_always_inline uword
+memif_device_input_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ memif_if_t *mif, u16 qid,
+ memif_interface_mode_t mode)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ memif_main_t *mm = &memif_main;
+ memif_ring_t *ring;
+ memif_queue_t *mq;
+ u32 next_index;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 n_rx_packets = 0, n_rx_bytes = 0;
+ u32 *to_next = 0, *buffers;
+ u32 bi0, bi1, bi2, bi3;
+ u16 slot, s0;
+ memif_desc_t *d0;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 thread_index = vm->thread_index;
+ memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
+ thread_index);
+ u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
+ i16 start_offset;
+ u64 offset;
+ u32 buffer_length;
+ u16 n_alloc, n_from;
+
+ mq = vec_elt_at_index (mif->rx_queues, qid);
+ ring = mq->ring;
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+
+ next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
+ VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+
+ /* asume that somebody will want to add ethernet header on the packet
+ so start with IP header at offset 14 */
+ start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
+ buffer_length = vlib_buffer_get_default_data_size (vm) - start_offset;
+
+ cur_slot = mq->last_tail;
+ last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
+ if (cur_slot == last_slot)
+ goto refill;
+ n_slots = last_slot - cur_slot;
+
+ /* process ring slots */
+ vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
+ CLIB_CACHE_LINE_BYTES);
+ while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
+ {
+ vlib_buffer_t *hb;
+
+ s0 = cur_slot & mask;
+ bi0 = mq->buffers[s0];
+ ptd->buffers[n_rx_packets++] = bi0;
+
+ clib_prefetch_load (&ring->desc[(cur_slot + 8) & mask]);
+ d0 = &ring->desc[s0];
+ hb = b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = start_offset;
+ b0->current_length = d0->length;
+ n_rx_bytes += d0->length;
+
+ cur_slot++;
+ n_slots--;
+ if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
+ {
+ hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ hb->total_length_not_including_first_buffer = 0;
+ next_slot:
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
+ bi0 = mq->buffers[s0];
+
+ /* previous buffer */
+ b0->next_buffer = bi0;
+ b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* current buffer */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = start_offset;
+ b0->current_length = d0->length;
+ hb->total_length_not_including_first_buffer += d0->length;
+ n_rx_bytes += d0->length;
+
+ cur_slot++;
+ n_slots--;
+ if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
+ goto next_slot;
+ }
+ }
+
+ /* release slots from the ring */
+ mq->last_tail = cur_slot;