+ return n_rx_packets;
+}
+
+static_always_inline uword
+memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, memif_if_t * mif,
+ u16 qid, memif_interface_mode_t mode)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ memif_main_t *mm = &memif_main;
+ memif_ring_t *ring;
+ memif_queue_t *mq;
+ u32 next_index;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 n_rx_packets = 0, n_rx_bytes = 0;
+ u32 *to_next = 0, *buffers;
+ u32 bi0, bi1, bi2, bi3;
+ u16 s0, s1, s2, s3;
+ memif_desc_t *d0, *d1, *d2, *d3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 thread_index = vm->thread_index;
+ memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
+ thread_index);
+ u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
+ i16 start_offset;
+ u32 buffer_length;
+ u16 n_alloc, n_from;
+
+ mq = vec_elt_at_index (mif->rx_queues, qid);
+ ring = mq->ring;
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+
+ next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
+ VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+
+ /* asume that somebody will want to add ethernet header on the packet
+ so start with IP header at offset 14 */
+ start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
+ buffer_length = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES - start_offset;
+
+ cur_slot = mq->last_tail;
+ last_slot = ring->tail;
+ if (cur_slot == last_slot)
+ goto refill;
+ n_slots = last_slot - cur_slot;
+
+ /* process ring slots */
+ vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
+ CLIB_CACHE_LINE_BYTES);
+ while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
+ {
+ vlib_buffer_t *hb;
+
+ s0 = cur_slot & mask;
+ bi0 = mq->buffers[s0];
+ ptd->buffers[n_rx_packets++] = bi0;
+
+ CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ d0 = &ring->desc[s0];
+ hb = b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = start_offset;
+ b0->current_length = start_offset + d0->length;
+ n_rx_bytes += d0->length;
+
+ if (0 && memif_desc_is_invalid (mif, d0, buffer_length))
+ return 0;
+
+ cur_slot++;
+ n_slots--;
+ if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
+ {
+ hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ next_slot:
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
+ bi0 = mq->buffers[s0];
+
+ /* previous buffer */
+ b0->next_buffer = bi0;
+ b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* current buffer */
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = start_offset;
+ b0->current_length = start_offset + d0->length;
+ hb->total_length_not_including_first_buffer += d0->length;
+ n_rx_bytes += d0->length;
+
+ cur_slot++;
+ n_slots--;
+ if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
+ goto next_slot;
+ }
+ }
+
+ /* release slots from the ring */
+ mq->last_tail = cur_slot;
+
+ n_from = n_rx_packets;
+ buffers = ptd->buffers;
+
+ while (n_from)
+ {
+ u32 n_left_to_next;
+ u32 next0, next1, next2, next3;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_from >= 8 && n_left_to_next >= 4)
+ {
+ b0 = vlib_get_buffer (vm, buffers[4]);
+ b1 = vlib_get_buffer (vm, buffers[5]);
+ b2 = vlib_get_buffer (vm, buffers[6]);
+ b3 = vlib_get_buffer (vm, buffers[7]);
+ vlib_prefetch_buffer_header (b0, STORE);
+ vlib_prefetch_buffer_header (b1, STORE);
+ vlib_prefetch_buffer_header (b2, STORE);
+ vlib_prefetch_buffer_header (b3, STORE);
+
+ /* enqueue buffer */
+ to_next[0] = bi0 = buffers[0];
+ to_next[1] = bi1 = buffers[1];
+ to_next[2] = bi2 = buffers[2];
+ to_next[3] = bi3 = buffers[3];
+ to_next += 4;
+ n_left_to_next -= 4;
+ buffers += 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
+ vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
+
+ if (mode == MEMIF_INTERFACE_MODE_IP)
+ {
+ next0 = memif_next_from_ip_hdr (node, b0);
+ next1 = memif_next_from_ip_hdr (node, b1);
+ next2 = memif_next_from_ip_hdr (node, b2);
+ next3 = memif_next_from_ip_hdr (node, b3);
+ }
+ else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)