2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
31 #include <memif/private.h>
33 #define foreach_memif_input_error \
34 _(BUFFER_ALLOC_FAIL, "buffer allocation failed") \
35 _(NOT_IP, "not ip packet")
39 #define _(f,s) MEMIF_INPUT_ERROR_##f,
40 foreach_memif_input_error
43 } memif_input_error_t;
45 static __clib_unused char *memif_input_error_strings[] = {
47 foreach_memif_input_error
56 } memif_input_trace_t;
58 static __clib_unused u8 *
59 format_memif_input_trace (u8 * s, va_list * args)
61 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
64 u32 indent = format_get_indent (s);
66 s = format (s, "memif: hw_if_index %d next-index %d",
67 t->hw_if_index, t->next_index);
68 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
73 static_always_inline u32
74 memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
76 u8 *ptr = vlib_buffer_get_current (b);
79 if (PREDICT_TRUE (v == 0x40))
80 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
81 else if (PREDICT_TRUE (v == 0x60))
82 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
84 b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
85 return VNET_DEVICE_INPUT_NEXT_DROP;
88 static_always_inline void
89 memif_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
90 memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
93 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
95 if (PREDICT_TRUE (b != 0))
97 memif_input_trace_t *tr;
98 vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0);
99 vlib_set_trace_count (vm, node, --(*n_tracep));
100 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
101 tr->next_index = next;
102 tr->hw_if_index = mif->hw_if_index;
107 static_always_inline void
108 memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
109 u16 buffer_offset, u16 buffer_vec_index)
112 vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
115 co->buffer_offset = buffer_offset;
116 co->buffer_vec_index = buffer_vec_index;
119 static_always_inline void
120 memif_add_to_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * buffers,
123 vlib_buffer_t *seg = b;
124 i32 bytes_left = b->current_length - buffer_size + b->current_data;
126 if (PREDICT_TRUE (bytes_left <= 0))
129 b->current_length -= bytes_left;
130 b->total_length_not_including_first_buffer = bytes_left;
134 seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
135 seg->next_buffer = buffers[0];
136 seg = vlib_get_buffer (vm, buffers[0]);
138 seg->current_data = 0;
139 seg->current_length = clib_min (buffer_size, bytes_left);
140 bytes_left -= seg->current_length;
144 static_always_inline u32
145 sat_sub (u32 x, u32 y)
152 /* branchless validation of the descriptor - uses saturated subtraction */
153 static_always_inline u32
154 memif_desc_is_invalid (memif_if_t * mif, memif_desc_t * d, u32 buffer_length)
157 u16 valid_flags = MEMIF_DESC_FLAG_NEXT;
159 rv = d->flags & (~valid_flags);
160 rv |= sat_sub (d->region + 1, vec_len (mif->regions));
161 rv |= sat_sub (d->length, buffer_length);
162 rv |= sat_sub (d->offset + d->length, mif->regions[d->region].region_size);
164 if (PREDICT_FALSE (rv))
166 mif->flags |= MEMIF_IF_FLAG_ERROR;
173 static_always_inline uword
174 memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
175 vlib_frame_t * frame, memif_if_t * mif,
176 memif_ring_type_t type, u16 qid,
177 memif_interface_mode_t mode)
179 vnet_main_t *vnm = vnet_get_main ();
180 memif_main_t *mm = &memif_main;
183 u16 buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
185 uword n_trace = vlib_get_trace_count (vm, node);
186 u32 n_rx_packets = 0, n_rx_bytes = 0;
187 u32 n_left, *to_next = 0;
188 u32 bi0, bi1, bi2, bi3;
189 vlib_buffer_t *b0, *b1, *b2, *b3;
190 u32 thread_index = vlib_get_thread_index ();
191 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
193 vlib_buffer_t *bt = &ptd->buffer_template;
194 u16 cur_slot, last_slot, ring_size, n_slots, mask;
196 u16 n_buffers = 0, n_alloc;
198 memif_packet_op_t *po;
199 memif_region_index_t last_region = ~0;
200 void *last_region_shm = 0;
202 mq = vec_elt_at_index (mif->rx_queues, qid);
204 ring_size = 1 << mq->log2_ring_size;
205 mask = ring_size - 1;
207 next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
208 VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
210 /* asume that somebody will want to add ethernet header on the packet
211 so start with IP header at offset 14 */
212 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
214 /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
215 are producers of empty buffers */
216 cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
217 last_slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
218 if (cur_slot == last_slot)
220 n_slots = last_slot - cur_slot;
222 /* construct copy and packet vector out of ring slots */
223 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
225 u32 dst_off, src_off, n_bytes_left;
229 po = ptd->packet_ops + n_rx_packets;
231 po->first_buffer_vec_index = n_buffers++;
234 dst_off = start_offset;
237 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
238 CLIB_CACHE_LINE_BYTES, LOAD);
239 s0 = cur_slot & mask;
240 d0 = &ring->desc[s0];
241 n_bytes_left = d0->length;
243 /* slave resets buffer length,
244 * so it can produce full size buffer for master
246 if (type == MEMIF_RING_M2S)
247 d0->length = mif->run.buffer_size;
249 po->packet_len += n_bytes_left;
250 if (PREDICT_FALSE (last_region != d0->region))
252 last_region_shm = mif->regions[d0->region].shm;
253 last_region = d0->region;
255 mb0 = last_region_shm + d0->offset;
259 u32 dst_free = buffer_size - dst_off;
263 dst_free = buffer_size;
266 u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
267 memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
269 n_bytes_left -= bytes_to_copy;
270 src_off += bytes_to_copy;
271 dst_off += bytes_to_copy;
273 while (PREDICT_FALSE (n_bytes_left));
277 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
284 /* allocate free buffers */
285 vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
286 n_alloc = vlib_buffer_alloc (vm, ptd->buffers, n_buffers);
287 if (PREDICT_FALSE (n_alloc != n_buffers))
290 vlib_buffer_free (vm, ptd->buffers, n_alloc);
291 vlib_error_count (vm, node->node_index,
292 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
297 n_left = vec_len (ptd->copy_ops);
301 CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
302 CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
303 CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
304 CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
306 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
307 b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
308 b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
309 b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
311 clib_memcpy (b0->data + co[0].buffer_offset, co[0].data,
313 clib_memcpy (b1->data + co[1].buffer_offset, co[1].data,
315 clib_memcpy (b2->data + co[2].buffer_offset, co[2].data,
317 clib_memcpy (b3->data + co[3].buffer_offset, co[3].data,
325 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
326 clib_memcpy (b0->data + co[0].buffer_offset, co[0].data,
332 /* release slots from the ring */
333 if (type == MEMIF_RING_S2M)
335 CLIB_MEMORY_STORE_BARRIER ();
336 ring->tail = mq->last_head = cur_slot;
340 mq->last_tail = cur_slot;
343 u32 n_from = n_rx_packets;
344 po = ptd->packet_ops;
346 vnet_buffer (bt)->sw_if_index[VLIB_RX] = mif->sw_if_index;
347 bt->current_data = start_offset;
352 u32 next0, next1, next2, next3;
354 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
355 while (n_from >= 8 && n_left_to_next >= 4)
357 b0 = vlib_get_buffer (vm, po[4].first_buffer_vec_index);
358 b1 = vlib_get_buffer (vm, po[5].first_buffer_vec_index);
359 b2 = vlib_get_buffer (vm, po[6].first_buffer_vec_index);
360 b3 = vlib_get_buffer (vm, po[7].first_buffer_vec_index);
361 vlib_prefetch_buffer_header (b0, STORE);
362 vlib_prefetch_buffer_header (b1, STORE);
363 vlib_prefetch_buffer_header (b2, STORE);
364 vlib_prefetch_buffer_header (b3, STORE);
367 u32 fbvi0 = po[0].first_buffer_vec_index;
368 u32 fbvi1 = po[1].first_buffer_vec_index;
369 u32 fbvi2 = po[2].first_buffer_vec_index;
370 u32 fbvi3 = po[3].first_buffer_vec_index;
371 to_next[0] = bi0 = ptd->buffers[fbvi0];
372 to_next[1] = bi1 = ptd->buffers[fbvi1];
373 to_next[2] = bi2 = ptd->buffers[fbvi2];
374 to_next[3] = bi3 = ptd->buffers[fbvi3];
378 b0 = vlib_get_buffer (vm, bi0);
379 b1 = vlib_get_buffer (vm, bi1);
380 b2 = vlib_get_buffer (vm, bi2);
381 b3 = vlib_get_buffer (vm, bi3);
383 clib_memcpy64_x4 (b0, b1, b2, b3, bt);
385 b0->current_length = po[0].packet_len;
386 b1->current_length = po[1].packet_len;
387 b2->current_length = po[2].packet_len;
388 b3->current_length = po[3].packet_len;
390 memif_add_to_chain (vm, b0, ptd->buffers + fbvi0 + 1, buffer_size);
391 memif_add_to_chain (vm, b1, ptd->buffers + fbvi1 + 1, buffer_size);
392 memif_add_to_chain (vm, b2, ptd->buffers + fbvi2 + 1, buffer_size);
393 memif_add_to_chain (vm, b3, ptd->buffers + fbvi3 + 1, buffer_size);
395 if (mode == MEMIF_INTERFACE_MODE_IP)
397 next0 = memif_next_from_ip_hdr (node, b0);
398 next1 = memif_next_from_ip_hdr (node, b1);
399 next2 = memif_next_from_ip_hdr (node, b2);
400 next3 = memif_next_from_ip_hdr (node, b3);
402 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
404 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
406 next0 = mif->per_interface_next_index;
407 next1 = mif->per_interface_next_index;
408 next2 = mif->per_interface_next_index;
409 next3 = mif->per_interface_next_index;
413 next0 = next1 = next2 = next3 = next_index;
414 /* redirect if feature path enabled */
415 vnet_feature_start_device_input_x1 (mif->sw_if_index,
417 vnet_feature_start_device_input_x1 (mif->sw_if_index,
419 vnet_feature_start_device_input_x1 (mif->sw_if_index,
421 vnet_feature_start_device_input_x1 (mif->sw_if_index,
427 if (PREDICT_FALSE (n_trace > 0))
429 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
430 if (PREDICT_FALSE (n_trace > 0))
431 memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
432 if (PREDICT_FALSE (n_trace > 0))
433 memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
434 if (PREDICT_FALSE (n_trace > 0))
435 memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
439 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
440 n_left_to_next, bi0, bi1, bi2, bi3,
441 next0, next1, next2, next3);
447 while (n_from && n_left_to_next)
450 u32 fbvi0 = po->first_buffer_vec_index;
451 to_next[0] = bi0 = ptd->buffers[fbvi0];
455 b0 = vlib_get_buffer (vm, bi0);
456 clib_memcpy (b0, bt, 64);
457 b0->current_length = po->packet_len;
459 memif_add_to_chain (vm, b0, ptd->buffers + fbvi0 + 1, buffer_size);
461 if (mode == MEMIF_INTERFACE_MODE_IP)
463 next0 = memif_next_from_ip_hdr (node, b0);
465 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
467 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
468 next0 = mif->per_interface_next_index;
472 /* redirect if feature path enabled */
473 vnet_feature_start_device_input_x1 (mif->sw_if_index,
480 if (PREDICT_FALSE (n_trace > 0))
481 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
484 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
485 n_left_to_next, bi0, next0);
491 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
494 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
495 + VNET_INTERFACE_COUNTER_RX, thread_index,
496 mif->hw_if_index, n_rx_packets,
499 /* refill ring with empty buffers */
501 vec_reset_length (ptd->buffers);
502 vec_reset_length (ptd->copy_ops);
504 if (type == MEMIF_RING_M2S)
506 u16 head = ring->head;
507 n_slots = ring_size - head + mq->last_tail;
511 u16 s = head++ & mask;
512 memif_desc_t *d = &ring->desc[s];
513 d->length = mif->run.buffer_size;
516 CLIB_MEMORY_STORE_BARRIER ();
523 static_always_inline uword
524 memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
525 vlib_frame_t * frame, memif_if_t * mif,
526 u16 qid, memif_interface_mode_t mode)
528 vnet_main_t *vnm = vnet_get_main ();
529 memif_main_t *mm = &memif_main;
533 uword n_trace = vlib_get_trace_count (vm, node);
534 u32 n_rx_packets = 0, n_rx_bytes = 0;
535 u32 *to_next = 0, *buffers;
536 u32 bi0, bi1, bi2, bi3;
537 vlib_buffer_t *b0, *b1, *b2, *b3;
538 u32 thread_index = vlib_get_thread_index ();
539 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
541 vlib_buffer_t *bt = &ptd->buffer_template;
542 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
547 mq = vec_elt_at_index (mif->rx_queues, qid);
549 ring_size = 1 << mq->log2_ring_size;
550 mask = ring_size - 1;
552 next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
553 VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
555 /* asume that somebody will want to add ethernet header on the packet
556 so start with IP header at offset 14 */
557 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
558 buffer_length = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES - start_offset;
560 cur_slot = mq->last_tail;
561 last_slot = ring->tail;
562 if (cur_slot == last_slot)
564 n_slots = last_slot - cur_slot;
566 /* process ring slots */
567 vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
568 CLIB_CACHE_LINE_BYTES);
569 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
575 s0 = cur_slot & mask;
576 bi0 = mq->buffers[s0];
577 ptd->buffers[n_rx_packets++] = bi0;
579 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
580 CLIB_CACHE_LINE_BYTES, LOAD);
581 d0 = &ring->desc[s0];
582 hb = b0 = vlib_get_buffer (vm, bi0);
583 b0->current_data = start_offset;
584 b0->current_length = start_offset + d0->length;
587 if (0 && memif_desc_is_invalid (mif, d0, buffer_length))
592 if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
594 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
596 s0 = cur_slot & mask;
597 d0 = &ring->desc[s0];
598 bi0 = mq->buffers[s0];
601 b0->next_buffer = bi0;
602 b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
605 b0 = vlib_get_buffer (vm, bi0);
606 b0->current_data = start_offset;
607 b0->current_length = start_offset + d0->length;
608 hb->total_length_not_including_first_buffer += d0->length;
612 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
617 /* release slots from the ring */
618 mq->last_tail = cur_slot;
620 u32 n_from = n_rx_packets;
622 vnet_buffer (bt)->sw_if_index[VLIB_RX] = mif->sw_if_index;
624 buffers = ptd->buffers;
629 u32 next0, next1, next2, next3;
631 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
632 while (n_from >= 8 && n_left_to_next >= 4)
634 b0 = vlib_get_buffer (vm, buffers[4]);
635 b1 = vlib_get_buffer (vm, buffers[5]);
636 b2 = vlib_get_buffer (vm, buffers[6]);
637 b3 = vlib_get_buffer (vm, buffers[7]);
638 vlib_prefetch_buffer_header (b0, STORE);
639 vlib_prefetch_buffer_header (b1, STORE);
640 vlib_prefetch_buffer_header (b2, STORE);
641 vlib_prefetch_buffer_header (b3, STORE);
644 to_next[0] = bi0 = buffers[0];
645 to_next[1] = bi1 = buffers[1];
646 to_next[2] = bi2 = buffers[2];
647 to_next[3] = bi3 = buffers[3];
652 b0 = vlib_get_buffer (vm, bi0);
653 b1 = vlib_get_buffer (vm, bi1);
654 b2 = vlib_get_buffer (vm, bi2);
655 b3 = vlib_get_buffer (vm, bi3);
657 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
658 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
659 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
660 vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
661 vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
662 vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
663 vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
664 vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
666 if (mode == MEMIF_INTERFACE_MODE_IP)
668 next0 = memif_next_from_ip_hdr (node, b0);
669 next1 = memif_next_from_ip_hdr (node, b1);
670 next2 = memif_next_from_ip_hdr (node, b2);
671 next3 = memif_next_from_ip_hdr (node, b3);
673 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
675 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
677 next0 = mif->per_interface_next_index;
678 next1 = mif->per_interface_next_index;
679 next2 = mif->per_interface_next_index;
680 next3 = mif->per_interface_next_index;
684 next0 = next1 = next2 = next3 = next_index;
685 /* redirect if feature path enabled */
686 vnet_feature_start_device_input_x1 (mif->sw_if_index,
688 vnet_feature_start_device_input_x1 (mif->sw_if_index,
690 vnet_feature_start_device_input_x1 (mif->sw_if_index,
692 vnet_feature_start_device_input_x1 (mif->sw_if_index,
698 if (PREDICT_FALSE (n_trace > 0))
700 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
701 if (PREDICT_FALSE (n_trace > 0))
702 memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
703 if (PREDICT_FALSE (n_trace > 0))
704 memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
705 if (PREDICT_FALSE (n_trace > 0))
706 memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
710 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
711 n_left_to_next, bi0, bi1, bi2, bi3,
712 next0, next1, next2, next3);
717 while (n_from && n_left_to_next)
720 to_next[0] = bi0 = buffers[0];
725 b0 = vlib_get_buffer (vm, bi0);
726 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
727 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
729 if (mode == MEMIF_INTERFACE_MODE_IP)
731 next0 = memif_next_from_ip_hdr (node, b0);
733 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
735 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
736 next0 = mif->per_interface_next_index;
740 /* redirect if feature path enabled */
741 vnet_feature_start_device_input_x1 (mif->sw_if_index,
747 if (PREDICT_FALSE (n_trace > 0))
748 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
751 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
752 n_left_to_next, bi0, next0);
757 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
760 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
761 + VNET_INTERFACE_COUNTER_RX, thread_index,
762 mif->hw_if_index, n_rx_packets,
765 /* refill ring with empty buffers */
767 vec_reset_length (ptd->buffers);
770 n_slots = ring_size - head + mq->last_tail;
775 memif_desc_t *dt = &ptd->desc_template;
776 memset (dt, 0, sizeof (memif_desc_t));
777 dt->length = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES - start_offset;
779 n_alloc = vlib_buffer_alloc_to_ring (vm, mq->buffers, head & mask,
782 if (PREDICT_FALSE (n_alloc != n_slots))
784 vlib_error_count (vm, node->node_index,
785 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
790 u16 s = head++ & mask;
791 memif_desc_t *d = &ring->desc[s];
792 clib_memcpy (d, dt, sizeof (memif_desc_t));
793 b0 = vlib_get_buffer (vm, mq->buffers[s]);
794 d->region = b0->buffer_pool_index + 1;
796 (void *) b0->data - mif->regions[d->region].shm + start_offset;
799 CLIB_MEMORY_STORE_BARRIER ();
807 CLIB_MULTIARCH_FN (memif_input_fn) (vlib_main_t * vm,
808 vlib_node_runtime_t * node,
809 vlib_frame_t * frame)
812 memif_main_t *mm = &memif_main;
813 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
814 vnet_device_and_queue_t *dq;
815 memif_interface_mode_t mode_ip = MEMIF_INTERFACE_MODE_IP;
816 memif_interface_mode_t mode_eth = MEMIF_INTERFACE_MODE_ETHERNET;
818 foreach_device_and_queue (dq, rt->devices_and_queues)
821 mif = vec_elt_at_index (mm->interfaces, dq->dev_instance);
822 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
823 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
825 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
827 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
828 n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
829 dq->queue_id, mode_ip);
831 n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
832 dq->queue_id, mode_eth);
834 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
836 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
837 n_rx += memif_device_input_inline (vm, node, frame, mif,
838 MEMIF_RING_M2S, dq->queue_id,
841 n_rx += memif_device_input_inline (vm, node, frame, mif,
842 MEMIF_RING_M2S, dq->queue_id,
847 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
848 n_rx += memif_device_input_inline (vm, node, frame, mif,
849 MEMIF_RING_S2M, dq->queue_id,
852 n_rx += memif_device_input_inline (vm, node, frame, mif,
853 MEMIF_RING_S2M, dq->queue_id,
862 #ifndef CLIB_MULTIARCH_VARIANT
864 VLIB_REGISTER_NODE (memif_input_node) = {
865 .function = memif_input_fn,
866 .name = "memif-input",
867 .sibling_of = "device-input",
868 .format_trace = format_memif_input_trace,
869 .type = VLIB_NODE_TYPE_INPUT,
870 .state = VLIB_NODE_STATE_INTERRUPT,
871 .n_errors = MEMIF_INPUT_N_ERROR,
872 .error_strings = memif_input_error_strings,
875 vlib_node_function_t __clib_weak memif_input_fn_avx512;
876 vlib_node_function_t __clib_weak memif_input_fn_avx2;
879 static void __clib_constructor
880 memif_input_multiarch_select (void)
882 if (memif_input_fn_avx512 && clib_cpu_supports_avx512f ())
883 memif_input_node.function = memif_input_fn_avx512;
884 else if (memif_input_fn_avx2 && clib_cpu_supports_avx2 ())
885 memif_input_node.function = memif_input_fn_avx2;
894 * fd.io coding-style-patch-verification: ON
897 * eval: (c-set-style "gnu")