2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/interface/rx_queue_funcs.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
31 #include <memif/private.h>
33 #define foreach_memif_input_error \
34 _ (BUFFER_ALLOC_FAIL, "buffer allocation failed") \
35 _ (BAD_DESC, "bad descriptor") \
36 _ (NOT_IP, "not ip packet")
40 #define _(f,s) MEMIF_INPUT_ERROR_##f,
41 foreach_memif_input_error
44 } memif_input_error_t;
46 static __clib_unused char *memif_input_error_strings[] = {
48 foreach_memif_input_error
57 } memif_input_trace_t;
59 static __clib_unused u8 *
60 format_memif_input_trace (u8 * s, va_list * args)
62 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
63 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
64 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
65 u32 indent = format_get_indent (s);
67 s = format (s, "memif: hw_if_index %d next-index %d",
68 t->hw_if_index, t->next_index);
69 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
74 static_always_inline u32
75 memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
77 u8 *ptr = vlib_buffer_get_current (b);
80 if (PREDICT_TRUE (v == 0x40))
81 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
82 else if (PREDICT_TRUE (v == 0x60))
83 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
85 b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
86 return VNET_DEVICE_INPUT_NEXT_DROP;
89 static_always_inline void
90 memif_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
91 memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
94 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
97 (b != 0 && vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0)))
99 memif_input_trace_t *tr;
100 vlib_set_trace_count (vm, node, --(*n_tracep));
101 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
102 tr->next_index = next;
103 tr->hw_if_index = mif->hw_if_index;
108 static_always_inline void
109 memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
110 u16 buffer_offset, u16 buffer_vec_index)
113 vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
116 co->buffer_offset = buffer_offset;
117 co->buffer_vec_index = buffer_vec_index;
120 static_always_inline void
121 memif_add_to_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * buffers,
124 vlib_buffer_t *seg = b;
125 i32 bytes_left = b->current_length - buffer_size + b->current_data;
127 if (PREDICT_TRUE (bytes_left <= 0))
130 b->current_length -= bytes_left;
131 b->total_length_not_including_first_buffer = bytes_left;
135 seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
136 seg->next_buffer = buffers[0];
137 seg = vlib_get_buffer (vm, buffers[0]);
139 seg->current_data = 0;
140 seg->current_length = clib_min (buffer_size, bytes_left);
141 bytes_left -= seg->current_length;
145 static_always_inline u32
146 sat_sub (u32 x, u32 y)
153 /* branchless validation of the descriptor - uses saturated subtraction */
154 static_always_inline u32
155 memif_desc_is_invalid (memif_if_t * mif, memif_desc_t * d, u32 buffer_length)
158 u16 valid_flags = MEMIF_DESC_FLAG_NEXT;
160 rv = d->flags & (~valid_flags);
161 rv |= sat_sub (d->region + 1, vec_len (mif->regions));
162 rv |= sat_sub (d->length, buffer_length);
163 rv |= sat_sub (d->offset + d->length, mif->regions[d->region].region_size);
165 if (PREDICT_FALSE (rv))
167 mif->flags |= MEMIF_IF_FLAG_ERROR;
174 static_always_inline uword
175 memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
176 vlib_frame_t * frame, memif_if_t * mif,
177 memif_ring_type_t type, u16 qid,
178 memif_interface_mode_t mode)
180 vnet_main_t *vnm = vnet_get_main ();
181 memif_main_t *mm = &memif_main;
184 u16 buffer_size = vlib_buffer_get_default_data_size (vm);
185 uword n_trace = vlib_get_trace_count (vm, node);
186 u16 nexts[MEMIF_RX_VECTOR_SZ], *next = nexts;
187 u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
188 u32 n_rx_packets = 0, n_rx_bytes = 0;
189 u32 n_left, n_left_to_next;
190 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
191 vlib_buffer_t *b0, *b1, *b2, *b3;
192 u32 thread_index = vm->thread_index;
193 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
196 u16 cur_slot, last_slot, ring_size, n_slots, mask;
198 u16 n_buffers = 0, n_alloc;
200 memif_packet_op_t *po;
201 memif_region_index_t last_region = ~0;
202 void *last_region_shm = 0;
203 void *last_region_max = 0;
205 mq = vec_elt_at_index (mif->rx_queues, qid);
207 ring_size = 1 << mq->log2_ring_size;
208 mask = ring_size - 1;
210 /* assume that somebody will want to add ethernet header on the packet
211 so start with IP header at offset 14 */
212 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
214 /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
215 are producers of empty buffers */
216 cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
217 last_slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
218 if (cur_slot == last_slot)
220 n_slots = last_slot - cur_slot;
222 /* construct copy and packet vector out of ring slots */
223 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
225 u32 dst_off, src_off, n_bytes_left;
229 po = ptd->packet_ops + n_rx_packets;
231 po->first_buffer_vec_index = n_buffers++;
234 dst_off = start_offset;
237 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
238 CLIB_CACHE_LINE_BYTES, LOAD);
239 s0 = cur_slot & mask;
240 d0 = &ring->desc[s0];
241 n_bytes_left = d0->length;
243 /* slave resets buffer length,
244 * so it can produce full size buffer for master
246 if (type == MEMIF_RING_M2S)
247 d0->length = mif->run.buffer_size;
249 po->packet_len += n_bytes_left;
250 if (PREDICT_FALSE (last_region != d0->region))
252 last_region_shm = mif->regions[d0->region].shm;
253 last_region = d0->region;
255 last_region_shm + mif->regions[last_region].region_size;
257 mb0 = last_region_shm + d0->offset;
259 if (PREDICT_FALSE (mb0 + n_bytes_left > last_region_max))
260 vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC, 1);
264 u32 dst_free = buffer_size - dst_off;
268 dst_free = buffer_size;
271 u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
272 memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
274 n_bytes_left -= bytes_to_copy;
275 src_off += bytes_to_copy;
276 dst_off += bytes_to_copy;
278 while (PREDICT_FALSE (n_bytes_left));
282 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
289 /* allocate free buffers */
290 vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
291 n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
292 mq->buffer_pool_index);
293 if (PREDICT_FALSE (n_alloc != n_buffers))
296 vlib_buffer_free (vm, ptd->buffers, n_alloc);
297 vlib_error_count (vm, node->node_index,
298 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
303 n_left = vec_len (ptd->copy_ops);
307 CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
308 CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
309 CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
310 CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
312 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
313 b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
314 b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
315 b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
317 clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
319 clib_memcpy_fast (b1->data + co[1].buffer_offset, co[1].data,
321 clib_memcpy_fast (b2->data + co[2].buffer_offset, co[2].data,
323 clib_memcpy_fast (b3->data + co[3].buffer_offset, co[3].data,
331 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
332 clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
338 /* release slots from the ring */
339 if (type == MEMIF_RING_S2M)
341 CLIB_MEMORY_STORE_BARRIER ();
342 ring->tail = mq->last_head = cur_slot;
346 mq->last_tail = cur_slot;
349 /* prepare buffer template and next indices */
350 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] =
352 vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
353 ptd->buffer_template.current_data = start_offset;
354 ptd->buffer_template.current_config_index = 0;
355 ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
356 ptd->buffer_template.ref_count = 1;
358 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
360 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
361 if (mif->per_interface_next_index != ~0)
362 next_index = mif->per_interface_next_index;
364 vnet_feature_start_device_input_x1 (mif->sw_if_index, &next_index,
365 &ptd->buffer_template);
367 vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
369 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
371 vlib_next_frame_t *nf;
373 ethernet_input_frame_t *ef;
374 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
375 f = vlib_get_frame (vm, nf->frame);
376 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
378 ef = vlib_frame_scalar_args (f);
379 ef->sw_if_index = mif->sw_if_index;
380 ef->hw_if_index = mif->hw_if_index;
381 vlib_frame_no_append (f);
385 /* process buffer metadata */
386 u32 n_from = n_rx_packets;
387 po = ptd->packet_ops;
390 /* copy template into local variable - will save per packet load */
391 vlib_buffer_copy_template (&bt, &ptd->buffer_template);
395 b0 = vlib_get_buffer (vm, ptd->buffers[po[0].first_buffer_vec_index]);
396 b1 = vlib_get_buffer (vm, ptd->buffers[po[1].first_buffer_vec_index]);
397 b2 = vlib_get_buffer (vm, ptd->buffers[po[2].first_buffer_vec_index]);
398 b3 = vlib_get_buffer (vm, ptd->buffers[po[3].first_buffer_vec_index]);
400 vlib_prefetch_buffer_header (b0, STORE);
401 vlib_prefetch_buffer_header (b1, STORE);
402 vlib_prefetch_buffer_header (b2, STORE);
403 vlib_prefetch_buffer_header (b3, STORE);
407 fbvi[0] = po[0].first_buffer_vec_index;
408 fbvi[1] = po[1].first_buffer_vec_index;
409 fbvi[2] = po[2].first_buffer_vec_index;
410 fbvi[3] = po[3].first_buffer_vec_index;
412 bi[0] = ptd->buffers[fbvi[0]];
413 bi[1] = ptd->buffers[fbvi[1]];
414 bi[2] = ptd->buffers[fbvi[2]];
415 bi[3] = ptd->buffers[fbvi[3]];
417 b0 = vlib_get_buffer (vm, bi[0]);
418 b1 = vlib_get_buffer (vm, bi[1]);
419 b2 = vlib_get_buffer (vm, bi[2]);
420 b3 = vlib_get_buffer (vm, bi[3]);
422 vlib_buffer_copy_template (b0, &bt);
423 vlib_buffer_copy_template (b1, &bt);
424 vlib_buffer_copy_template (b2, &bt);
425 vlib_buffer_copy_template (b3, &bt);
427 b0->current_length = po[0].packet_len;
428 n_rx_bytes += b0->current_length;
429 b1->current_length = po[1].packet_len;
430 n_rx_bytes += b1->current_length;
431 b2->current_length = po[2].packet_len;
432 n_rx_bytes += b2->current_length;
433 b3->current_length = po[3].packet_len;
434 n_rx_bytes += b3->current_length;
436 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
437 memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size);
438 memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size);
439 memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size);
441 if (mode == MEMIF_INTERFACE_MODE_IP)
443 next[0] = memif_next_from_ip_hdr (node, b0);
444 next[1] = memif_next_from_ip_hdr (node, b1);
445 next[2] = memif_next_from_ip_hdr (node, b2);
446 next[3] = memif_next_from_ip_hdr (node, b3);
459 fbvi[0] = po[0].first_buffer_vec_index;
460 bi[0] = ptd->buffers[fbvi[0]];
461 b0 = vlib_get_buffer (vm, bi[0]);
462 vlib_buffer_copy_template (b0, &bt);
463 b0->current_length = po->packet_len;
464 n_rx_bytes += b0->current_length;
466 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
468 if (mode == MEMIF_INTERFACE_MODE_IP)
470 next[0] = memif_next_from_ip_hdr (node, b0);
480 /* packet trace if enabled */
481 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
483 u32 n_left = n_rx_packets;
487 while (n_trace && n_left)
490 memif_input_trace_t *tr;
491 if (mode != MEMIF_INTERFACE_MODE_ETHERNET)
493 b = vlib_get_buffer (vm, bi[0]);
495 (vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0)))
497 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
499 tr->hw_if_index = mif->hw_if_index;
509 vlib_set_trace_count (vm, node, n_trace);
512 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
514 n_left_to_next -= n_rx_packets;
515 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
518 vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts, n_rx_packets);
520 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
521 + VNET_INTERFACE_COUNTER_RX, thread_index,
522 mif->sw_if_index, n_rx_packets,
525 /* refill ring with empty buffers */
527 vec_reset_length (ptd->buffers);
528 vec_reset_length (ptd->copy_ops);
530 if (type == MEMIF_RING_M2S)
532 u16 head = ring->head;
533 n_slots = ring_size - head + mq->last_tail;
537 u16 s = head++ & mask;
538 memif_desc_t *d = &ring->desc[s];
539 d->length = mif->run.buffer_size;
542 CLIB_MEMORY_STORE_BARRIER ();
549 static_always_inline uword
550 memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
551 vlib_frame_t * frame, memif_if_t * mif,
552 u16 qid, memif_interface_mode_t mode)
554 vnet_main_t *vnm = vnet_get_main ();
555 memif_main_t *mm = &memif_main;
559 uword n_trace = vlib_get_trace_count (vm, node);
560 u32 n_rx_packets = 0, n_rx_bytes = 0;
561 u32 *to_next = 0, *buffers;
562 u32 bi0, bi1, bi2, bi3;
564 memif_desc_t *d0, *d1, *d2, *d3;
565 vlib_buffer_t *b0, *b1, *b2, *b3;
566 u32 thread_index = vm->thread_index;
567 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
569 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
574 mq = vec_elt_at_index (mif->rx_queues, qid);
576 ring_size = 1 << mq->log2_ring_size;
577 mask = ring_size - 1;
579 next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
580 VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
582 /* asume that somebody will want to add ethernet header on the packet
583 so start with IP header at offset 14 */
584 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
585 buffer_length = vlib_buffer_get_default_data_size (vm) - start_offset;
587 cur_slot = mq->last_tail;
588 last_slot = ring->tail;
589 if (cur_slot == last_slot)
591 n_slots = last_slot - cur_slot;
593 /* process ring slots */
594 vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
595 CLIB_CACHE_LINE_BYTES);
596 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
600 s0 = cur_slot & mask;
601 bi0 = mq->buffers[s0];
602 ptd->buffers[n_rx_packets++] = bi0;
604 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
605 CLIB_CACHE_LINE_BYTES, LOAD);
606 d0 = &ring->desc[s0];
607 hb = b0 = vlib_get_buffer (vm, bi0);
608 b0->current_data = start_offset;
609 b0->current_length = d0->length;
610 n_rx_bytes += d0->length;
612 if (0 && memif_desc_is_invalid (mif, d0, buffer_length))
617 if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
619 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
621 s0 = cur_slot & mask;
622 d0 = &ring->desc[s0];
623 bi0 = mq->buffers[s0];
625 /* previous buffer */
626 b0->next_buffer = bi0;
627 b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
630 b0 = vlib_get_buffer (vm, bi0);
631 b0->current_data = start_offset;
632 b0->current_length = d0->length;
633 hb->total_length_not_including_first_buffer += d0->length;
634 n_rx_bytes += d0->length;
638 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
643 /* release slots from the ring */
644 mq->last_tail = cur_slot;
646 n_from = n_rx_packets;
647 buffers = ptd->buffers;
652 u32 next0, next1, next2, next3;
654 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
655 while (n_from >= 8 && n_left_to_next >= 4)
657 b0 = vlib_get_buffer (vm, buffers[4]);
658 b1 = vlib_get_buffer (vm, buffers[5]);
659 b2 = vlib_get_buffer (vm, buffers[6]);
660 b3 = vlib_get_buffer (vm, buffers[7]);
661 vlib_prefetch_buffer_header (b0, STORE);
662 vlib_prefetch_buffer_header (b1, STORE);
663 vlib_prefetch_buffer_header (b2, STORE);
664 vlib_prefetch_buffer_header (b3, STORE);
667 to_next[0] = bi0 = buffers[0];
668 to_next[1] = bi1 = buffers[1];
669 to_next[2] = bi2 = buffers[2];
670 to_next[3] = bi3 = buffers[3];
675 b0 = vlib_get_buffer (vm, bi0);
676 b1 = vlib_get_buffer (vm, bi1);
677 b2 = vlib_get_buffer (vm, bi2);
678 b3 = vlib_get_buffer (vm, bi3);
680 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
681 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
682 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
683 vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
684 vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
685 vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
686 vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
687 vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
689 if (mode == MEMIF_INTERFACE_MODE_IP)
691 next0 = memif_next_from_ip_hdr (node, b0);
692 next1 = memif_next_from_ip_hdr (node, b1);
693 next2 = memif_next_from_ip_hdr (node, b2);
694 next3 = memif_next_from_ip_hdr (node, b3);
696 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
698 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
700 next0 = mif->per_interface_next_index;
701 next1 = mif->per_interface_next_index;
702 next2 = mif->per_interface_next_index;
703 next3 = mif->per_interface_next_index;
707 next0 = next1 = next2 = next3 = next_index;
708 /* redirect if feature path enabled */
709 vnet_feature_start_device_input_x1 (mif->sw_if_index,
711 vnet_feature_start_device_input_x1 (mif->sw_if_index,
713 vnet_feature_start_device_input_x1 (mif->sw_if_index,
715 vnet_feature_start_device_input_x1 (mif->sw_if_index,
721 if (PREDICT_FALSE (n_trace > 0))
723 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
724 if (PREDICT_FALSE (n_trace > 0))
725 memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
726 if (PREDICT_FALSE (n_trace > 0))
727 memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
728 if (PREDICT_FALSE (n_trace > 0))
729 memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
733 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
734 n_left_to_next, bi0, bi1, bi2, bi3,
735 next0, next1, next2, next3);
740 while (n_from && n_left_to_next)
743 to_next[0] = bi0 = buffers[0];
748 b0 = vlib_get_buffer (vm, bi0);
749 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
750 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
752 if (mode == MEMIF_INTERFACE_MODE_IP)
754 next0 = memif_next_from_ip_hdr (node, b0);
756 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
758 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
759 next0 = mif->per_interface_next_index;
763 /* redirect if feature path enabled */
764 vnet_feature_start_device_input_x1 (mif->sw_if_index,
770 if (PREDICT_FALSE (n_trace > 0))
771 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
774 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
775 n_left_to_next, bi0, next0);
780 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
783 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
784 + VNET_INTERFACE_COUNTER_RX, thread_index,
785 mif->sw_if_index, n_rx_packets,
788 /* refill ring with empty buffers */
790 vec_reset_length (ptd->buffers);
793 n_slots = ring_size - head + mq->last_tail;
798 memif_desc_t *dt = &ptd->desc_template;
799 clib_memset (dt, 0, sizeof (memif_desc_t));
800 dt->length = buffer_length;
802 n_alloc = vlib_buffer_alloc_to_ring_from_pool (vm, mq->buffers, head & mask,
804 mq->buffer_pool_index);
806 if (PREDICT_FALSE (n_alloc != n_slots))
808 vlib_error_count (vm, node->node_index,
809 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
812 while (n_alloc >= 32)
814 bi0 = mq->buffers[(head + 4) & mask];
815 vlib_prefetch_buffer_with_index (vm, bi0, LOAD);
816 bi1 = mq->buffers[(head + 5) & mask];
817 vlib_prefetch_buffer_with_index (vm, bi1, LOAD);
818 bi2 = mq->buffers[(head + 6) & mask];
819 vlib_prefetch_buffer_with_index (vm, bi2, LOAD);
820 bi3 = mq->buffers[(head + 7) & mask];
821 vlib_prefetch_buffer_with_index (vm, bi3, LOAD);
828 d0 = &ring->desc[s0];
829 d1 = &ring->desc[s1];
830 d2 = &ring->desc[s2];
831 d3 = &ring->desc[s3];
833 clib_memcpy_fast (d0, dt, sizeof (memif_desc_t));
834 clib_memcpy_fast (d1, dt, sizeof (memif_desc_t));
835 clib_memcpy_fast (d2, dt, sizeof (memif_desc_t));
836 clib_memcpy_fast (d3, dt, sizeof (memif_desc_t));
838 b0 = vlib_get_buffer (vm, mq->buffers[s0]);
839 b1 = vlib_get_buffer (vm, mq->buffers[s1]);
840 b2 = vlib_get_buffer (vm, mq->buffers[s2]);
841 b3 = vlib_get_buffer (vm, mq->buffers[s3]);
843 d0->region = b0->buffer_pool_index + 1;
844 d1->region = b1->buffer_pool_index + 1;
845 d2->region = b2->buffer_pool_index + 1;
846 d3->region = b3->buffer_pool_index + 1;
849 (void *) b0->data - mif->regions[d0->region].shm + start_offset;
851 (void *) b1->data - mif->regions[d1->region].shm + start_offset;
853 (void *) b2->data - mif->regions[d2->region].shm + start_offset;
855 (void *) b3->data - mif->regions[d3->region].shm + start_offset;
862 d0 = &ring->desc[s0];
863 clib_memcpy_fast (d0, dt, sizeof (memif_desc_t));
864 b0 = vlib_get_buffer (vm, mq->buffers[s0]);
865 d0->region = b0->buffer_pool_index + 1;
867 (void *) b0->data - mif->regions[d0->region].shm + start_offset;
872 CLIB_MEMORY_STORE_BARRIER ();
880 VLIB_NODE_FN (memif_input_node) (vlib_main_t * vm,
881 vlib_node_runtime_t * node,
882 vlib_frame_t * frame)
885 memif_main_t *mm = &memif_main;
886 memif_interface_mode_t mode_ip = MEMIF_INTERFACE_MODE_IP;
887 memif_interface_mode_t mode_eth = MEMIF_INTERFACE_MODE_ETHERNET;
889 vnet_hw_if_rxq_poll_vector_t *pv;
890 pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
891 for (int i = 0; i < vec_len (pv); i++)
895 mif = vec_elt_at_index (mm->interfaces, pv[i].dev_instance);
896 qid = pv[i].queue_id;
897 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
898 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
900 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
902 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
903 n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
906 n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
909 else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
911 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
912 n_rx += memif_device_input_inline (
913 vm, node, frame, mif, MEMIF_RING_M2S, qid, mode_ip);
915 n_rx += memif_device_input_inline (
916 vm, node, frame, mif, MEMIF_RING_M2S, qid, mode_eth);
920 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
921 n_rx += memif_device_input_inline (
922 vm, node, frame, mif, MEMIF_RING_S2M, qid, mode_ip);
924 n_rx += memif_device_input_inline (
925 vm, node, frame, mif, MEMIF_RING_S2M, qid, mode_eth);
934 VLIB_REGISTER_NODE (memif_input_node) = {
935 .name = "memif-input",
936 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
937 .sibling_of = "device-input",
938 .format_trace = format_memif_input_trace,
939 .type = VLIB_NODE_TYPE_INPUT,
940 .state = VLIB_NODE_STATE_INTERRUPT,
941 .n_errors = MEMIF_INPUT_N_ERROR,
942 .error_strings = memif_input_error_strings,
949 * fd.io coding-style-patch-verification: ON
952 * eval: (c-set-style "gnu")