2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/interface/rx_queue_funcs.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
31 #include <memif/private.h>
33 #define foreach_memif_input_error \
34 _ (BUFFER_ALLOC_FAIL, buffer_alloc, ERROR, "buffer allocation failed") \
35 _ (BAD_DESC, bad_desc, ERROR, "bad descriptor") \
36 _ (NOT_IP, not_ip, INFO, "not ip packet")
40 #define _(f, n, s, d) MEMIF_INPUT_ERROR_##f,
41 foreach_memif_input_error
44 } memif_input_error_t;
46 static vlib_error_desc_t memif_input_error_counters[] = {
47 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
48 foreach_memif_input_error
57 } memif_input_trace_t;
59 static __clib_unused u8 *
60 format_memif_input_trace (u8 * s, va_list * args)
62 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
63 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
64 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
65 u32 indent = format_get_indent (s);
67 s = format (s, "memif: hw_if_index %d next-index %d",
68 t->hw_if_index, t->next_index);
69 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
74 static_always_inline u32
75 memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
77 u8 *ptr = vlib_buffer_get_current (b);
80 if (PREDICT_TRUE (v == 0x40))
81 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
82 else if (PREDICT_TRUE (v == 0x60))
83 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
85 b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
86 return VNET_DEVICE_INPUT_NEXT_DROP;
89 static_always_inline void
90 memif_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
91 memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
95 (b != 0 && vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0)))
97 memif_input_trace_t *tr;
98 vlib_set_trace_count (vm, node, --(*n_tracep));
99 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
100 tr->next_index = next;
101 tr->hw_if_index = mif->hw_if_index;
106 static_always_inline void
107 memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
108 u16 buffer_offset, u16 buffer_vec_index)
111 vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
114 co->buffer_offset = buffer_offset;
115 co->buffer_vec_index = buffer_vec_index;
118 static_always_inline void
119 memif_add_to_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * buffers,
122 vlib_buffer_t *seg = b;
123 i32 bytes_left = b->current_length - buffer_size + b->current_data;
125 if (PREDICT_TRUE (bytes_left <= 0))
128 b->current_length -= bytes_left;
129 b->total_length_not_including_first_buffer = bytes_left;
133 seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
134 seg->next_buffer = buffers[0];
135 seg = vlib_get_buffer (vm, buffers[0]);
137 seg->current_data = 0;
138 seg->current_length = clib_min (buffer_size, bytes_left);
139 bytes_left -= seg->current_length;
143 static_always_inline uword
144 memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
145 memif_if_t *mif, memif_ring_type_t type, u16 qid,
146 memif_interface_mode_t mode)
148 vnet_main_t *vnm = vnet_get_main ();
149 memif_main_t *mm = &memif_main;
152 u16 buffer_size = vlib_buffer_get_default_data_size (vm);
154 u16 nexts[MEMIF_RX_VECTOR_SZ], *next = nexts;
155 u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
156 u32 n_rx_packets = 0, n_rx_bytes = 0;
157 u32 n_left, n_left_to_next;
158 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
159 vlib_buffer_t *b0, *b1, *b2, *b3;
160 u32 thread_index = vm->thread_index;
161 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
164 u16 cur_slot, last_slot, ring_size, n_slots, mask;
166 u16 n_buffers = 0, n_alloc;
168 memif_packet_op_t *po;
169 memif_region_index_t last_region = ~0;
170 void *last_region_shm = 0;
171 void *last_region_max = 0;
173 mq = vec_elt_at_index (mif->rx_queues, qid);
175 ring_size = 1 << mq->log2_ring_size;
176 mask = ring_size - 1;
178 /* assume that somebody will want to add ethernet header on the packet
179 so start with IP header at offset 14 */
180 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
182 /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
183 are producers of empty buffers */
184 cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
186 if (type == MEMIF_RING_S2M)
187 last_slot = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
189 last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
191 if (cur_slot == last_slot)
193 n_slots = last_slot - cur_slot;
195 /* construct copy and packet vector out of ring slots */
196 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
198 u32 dst_off, src_off, n_bytes_left;
202 po = ptd->packet_ops + n_rx_packets;
204 po->first_buffer_vec_index = n_buffers++;
207 dst_off = start_offset;
210 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
211 CLIB_CACHE_LINE_BYTES, LOAD);
212 s0 = cur_slot & mask;
213 d0 = &ring->desc[s0];
214 n_bytes_left = d0->length;
216 /* slave resets buffer length,
217 * so it can produce full size buffer for master
219 if (type == MEMIF_RING_M2S)
220 d0->length = mif->run.buffer_size;
222 po->packet_len += n_bytes_left;
223 if (PREDICT_FALSE (last_region != d0->region))
225 last_region_shm = mif->regions[d0->region].shm;
226 last_region = d0->region;
228 last_region_shm + mif->regions[last_region].region_size;
230 mb0 = last_region_shm + d0->offset;
232 if (PREDICT_FALSE (mb0 + n_bytes_left > last_region_max))
233 vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC, 1);
237 u32 dst_free = buffer_size - dst_off;
241 dst_free = buffer_size;
244 u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
245 memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
247 n_bytes_left -= bytes_to_copy;
248 src_off += bytes_to_copy;
249 dst_off += bytes_to_copy;
251 while (PREDICT_FALSE (n_bytes_left));
255 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
262 /* allocate free buffers */
263 vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
264 n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
265 mq->buffer_pool_index);
266 if (PREDICT_FALSE (n_alloc != n_buffers))
269 vlib_buffer_free (vm, ptd->buffers, n_alloc);
270 vlib_error_count (vm, node->node_index,
271 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
276 n_left = vec_len (ptd->copy_ops);
280 CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
281 CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
282 CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
283 CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
285 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
286 b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
287 b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
288 b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
290 clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
292 clib_memcpy_fast (b1->data + co[1].buffer_offset, co[1].data,
294 clib_memcpy_fast (b2->data + co[2].buffer_offset, co[2].data,
296 clib_memcpy_fast (b3->data + co[3].buffer_offset, co[3].data,
304 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
305 clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
311 /* release slots from the ring */
312 if (type == MEMIF_RING_S2M)
314 __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
315 mq->last_head = cur_slot;
319 mq->last_tail = cur_slot;
322 /* prepare buffer template and next indices */
323 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] =
325 vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
326 ptd->buffer_template.current_data = start_offset;
327 ptd->buffer_template.current_config_index = 0;
328 ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
329 ptd->buffer_template.ref_count = 1;
331 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
333 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
334 if (mif->per_interface_next_index != ~0)
335 next_index = mif->per_interface_next_index;
337 vnet_feature_start_device_input_x1 (mif->sw_if_index, &next_index,
338 &ptd->buffer_template);
340 vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
342 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
344 vlib_next_frame_t *nf;
346 ethernet_input_frame_t *ef;
347 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
348 f = vlib_get_frame (vm, nf->frame);
349 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
351 ef = vlib_frame_scalar_args (f);
352 ef->sw_if_index = mif->sw_if_index;
353 ef->hw_if_index = mif->hw_if_index;
354 vlib_frame_no_append (f);
358 /* process buffer metadata */
359 u32 n_from = n_rx_packets;
360 po = ptd->packet_ops;
363 /* copy template into local variable - will save per packet load */
364 vlib_buffer_copy_template (&bt, &ptd->buffer_template);
368 b0 = vlib_get_buffer (vm, ptd->buffers[po[4].first_buffer_vec_index]);
369 b1 = vlib_get_buffer (vm, ptd->buffers[po[5].first_buffer_vec_index]);
370 b2 = vlib_get_buffer (vm, ptd->buffers[po[6].first_buffer_vec_index]);
371 b3 = vlib_get_buffer (vm, ptd->buffers[po[7].first_buffer_vec_index]);
373 vlib_prefetch_buffer_header (b0, STORE);
374 vlib_prefetch_buffer_header (b1, STORE);
375 vlib_prefetch_buffer_header (b2, STORE);
376 vlib_prefetch_buffer_header (b3, STORE);
380 fbvi[0] = po[0].first_buffer_vec_index;
381 fbvi[1] = po[1].first_buffer_vec_index;
382 fbvi[2] = po[2].first_buffer_vec_index;
383 fbvi[3] = po[3].first_buffer_vec_index;
385 bi[0] = ptd->buffers[fbvi[0]];
386 bi[1] = ptd->buffers[fbvi[1]];
387 bi[2] = ptd->buffers[fbvi[2]];
388 bi[3] = ptd->buffers[fbvi[3]];
390 b0 = vlib_get_buffer (vm, bi[0]);
391 b1 = vlib_get_buffer (vm, bi[1]);
392 b2 = vlib_get_buffer (vm, bi[2]);
393 b3 = vlib_get_buffer (vm, bi[3]);
395 vlib_buffer_copy_template (b0, &bt);
396 vlib_buffer_copy_template (b1, &bt);
397 vlib_buffer_copy_template (b2, &bt);
398 vlib_buffer_copy_template (b3, &bt);
400 b0->current_length = po[0].packet_len;
401 n_rx_bytes += b0->current_length;
402 b1->current_length = po[1].packet_len;
403 n_rx_bytes += b1->current_length;
404 b2->current_length = po[2].packet_len;
405 n_rx_bytes += b2->current_length;
406 b3->current_length = po[3].packet_len;
407 n_rx_bytes += b3->current_length;
409 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
410 memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size);
411 memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size);
412 memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size);
414 if (mode == MEMIF_INTERFACE_MODE_IP)
416 next[0] = memif_next_from_ip_hdr (node, b0);
417 next[1] = memif_next_from_ip_hdr (node, b1);
418 next[2] = memif_next_from_ip_hdr (node, b2);
419 next[3] = memif_next_from_ip_hdr (node, b3);
432 fbvi[0] = po[0].first_buffer_vec_index;
433 bi[0] = ptd->buffers[fbvi[0]];
434 b0 = vlib_get_buffer (vm, bi[0]);
435 vlib_buffer_copy_template (b0, &bt);
436 b0->current_length = po->packet_len;
437 n_rx_bytes += b0->current_length;
439 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
441 if (mode == MEMIF_INTERFACE_MODE_IP)
443 next[0] = memif_next_from_ip_hdr (node, b0);
453 /* packet trace if enabled */
454 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
456 u32 n_left = n_rx_packets;
460 while (n_trace && n_left)
463 memif_input_trace_t *tr;
464 if (mode != MEMIF_INTERFACE_MODE_ETHERNET)
466 b = vlib_get_buffer (vm, bi[0]);
468 (vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0)))
470 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
472 tr->hw_if_index = mif->hw_if_index;
482 vlib_set_trace_count (vm, node, n_trace);
485 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
487 n_left_to_next -= n_rx_packets;
488 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
491 vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts, n_rx_packets);
493 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
494 + VNET_INTERFACE_COUNTER_RX, thread_index,
495 mif->sw_if_index, n_rx_packets,
498 /* refill ring with empty buffers */
500 vec_reset_length (ptd->buffers);
501 vec_reset_length (ptd->copy_ops);
503 if (type == MEMIF_RING_M2S)
505 u16 head = ring->head;
506 n_slots = ring_size - head + mq->last_tail;
510 u16 s = head++ & mask;
511 memif_desc_t *d = &ring->desc[s];
512 d->length = mif->run.buffer_size;
515 __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
521 static_always_inline uword
522 memif_device_input_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
523 memif_if_t *mif, u16 qid,
524 memif_interface_mode_t mode)
526 vnet_main_t *vnm = vnet_get_main ();
527 memif_main_t *mm = &memif_main;
531 uword n_trace = vlib_get_trace_count (vm, node);
532 u32 n_rx_packets = 0, n_rx_bytes = 0;
533 u32 *to_next = 0, *buffers;
534 u32 bi0, bi1, bi2, bi3;
537 vlib_buffer_t *b0, *b1, *b2, *b3;
538 u32 thread_index = vm->thread_index;
539 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
541 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
547 mq = vec_elt_at_index (mif->rx_queues, qid);
549 ring_size = 1 << mq->log2_ring_size;
550 mask = ring_size - 1;
552 next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
553 VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
555 /* asume that somebody will want to add ethernet header on the packet
556 so start with IP header at offset 14 */
557 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
558 buffer_length = vlib_buffer_get_default_data_size (vm) - start_offset;
560 cur_slot = mq->last_tail;
561 last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
562 if (cur_slot == last_slot)
564 n_slots = last_slot - cur_slot;
566 /* process ring slots */
567 vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
568 CLIB_CACHE_LINE_BYTES);
569 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
573 s0 = cur_slot & mask;
574 bi0 = mq->buffers[s0];
575 ptd->buffers[n_rx_packets++] = bi0;
577 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
578 CLIB_CACHE_LINE_BYTES, LOAD);
579 d0 = &ring->desc[s0];
580 hb = b0 = vlib_get_buffer (vm, bi0);
581 b0->current_data = start_offset;
582 b0->current_length = d0->length;
583 n_rx_bytes += d0->length;
587 if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
589 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
591 s0 = cur_slot & mask;
592 d0 = &ring->desc[s0];
593 bi0 = mq->buffers[s0];
595 /* previous buffer */
596 b0->next_buffer = bi0;
597 b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
600 b0 = vlib_get_buffer (vm, bi0);
601 b0->current_data = start_offset;
602 b0->current_length = d0->length;
603 hb->total_length_not_including_first_buffer += d0->length;
604 n_rx_bytes += d0->length;
608 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
613 /* release slots from the ring */
614 mq->last_tail = cur_slot;
616 n_from = n_rx_packets;
617 buffers = ptd->buffers;
622 u32 next0, next1, next2, next3;
624 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
625 while (n_from >= 8 && n_left_to_next >= 4)
627 b0 = vlib_get_buffer (vm, buffers[4]);
628 b1 = vlib_get_buffer (vm, buffers[5]);
629 b2 = vlib_get_buffer (vm, buffers[6]);
630 b3 = vlib_get_buffer (vm, buffers[7]);
631 vlib_prefetch_buffer_header (b0, STORE);
632 vlib_prefetch_buffer_header (b1, STORE);
633 vlib_prefetch_buffer_header (b2, STORE);
634 vlib_prefetch_buffer_header (b3, STORE);
637 to_next[0] = bi0 = buffers[0];
638 to_next[1] = bi1 = buffers[1];
639 to_next[2] = bi2 = buffers[2];
640 to_next[3] = bi3 = buffers[3];
645 b0 = vlib_get_buffer (vm, bi0);
646 b1 = vlib_get_buffer (vm, bi1);
647 b2 = vlib_get_buffer (vm, bi2);
648 b3 = vlib_get_buffer (vm, bi3);
650 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
651 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
652 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
653 vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
654 vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
655 vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
656 vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
657 vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
659 if (mode == MEMIF_INTERFACE_MODE_IP)
661 next0 = memif_next_from_ip_hdr (node, b0);
662 next1 = memif_next_from_ip_hdr (node, b1);
663 next2 = memif_next_from_ip_hdr (node, b2);
664 next3 = memif_next_from_ip_hdr (node, b3);
666 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
668 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
670 next0 = mif->per_interface_next_index;
671 next1 = mif->per_interface_next_index;
672 next2 = mif->per_interface_next_index;
673 next3 = mif->per_interface_next_index;
677 next0 = next1 = next2 = next3 = next_index;
678 /* redirect if feature path enabled */
679 vnet_feature_start_device_input_x1 (mif->sw_if_index,
681 vnet_feature_start_device_input_x1 (mif->sw_if_index,
683 vnet_feature_start_device_input_x1 (mif->sw_if_index,
685 vnet_feature_start_device_input_x1 (mif->sw_if_index,
691 if (PREDICT_FALSE (n_trace > 0))
693 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
694 if (PREDICT_FALSE (n_trace > 0))
695 memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
696 if (PREDICT_FALSE (n_trace > 0))
697 memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
698 if (PREDICT_FALSE (n_trace > 0))
699 memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
703 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
704 n_left_to_next, bi0, bi1, bi2, bi3,
705 next0, next1, next2, next3);
710 while (n_from && n_left_to_next)
713 to_next[0] = bi0 = buffers[0];
718 b0 = vlib_get_buffer (vm, bi0);
719 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
720 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
722 if (mode == MEMIF_INTERFACE_MODE_IP)
724 next0 = memif_next_from_ip_hdr (node, b0);
726 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
728 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
729 next0 = mif->per_interface_next_index;
733 /* redirect if feature path enabled */
734 vnet_feature_start_device_input_x1 (mif->sw_if_index,
740 if (PREDICT_FALSE (n_trace > 0))
741 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
744 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
745 n_left_to_next, bi0, next0);
750 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
753 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
754 + VNET_INTERFACE_COUNTER_RX, thread_index,
755 mif->sw_if_index, n_rx_packets,
758 /* refill ring with empty buffers */
760 vec_reset_length (ptd->buffers);
763 n_slots = ring_size - head + mq->last_tail;
771 memif_desc_t desc_template, *dt = &desc_template;
772 clib_memset (dt, 0, sizeof (memif_desc_t));
773 dt->length = buffer_length;
775 n_alloc = vlib_buffer_alloc_to_ring_from_pool (
776 vm, mq->buffers, slot, ring_size, n_slots, mq->buffer_pool_index);
777 dt->region = mq->buffer_pool_index + 1;
778 offset = (u64) mif->regions[dt->region].shm + start_offset;
780 if (PREDICT_FALSE (n_alloc != n_slots))
781 vlib_error_count (vm, node->node_index,
782 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
788 memif_desc_t *d = ring->desc + slot;
789 u32 *bi = mq->buffers + slot;
791 if (PREDICT_FALSE (((slot + 7 > mask) || (n_alloc < 8))))
794 clib_memcpy_fast (d + 0, dt, sizeof (memif_desc_t));
795 clib_memcpy_fast (d + 1, dt, sizeof (memif_desc_t));
796 clib_memcpy_fast (d + 2, dt, sizeof (memif_desc_t));
797 clib_memcpy_fast (d + 3, dt, sizeof (memif_desc_t));
798 clib_memcpy_fast (d + 4, dt, sizeof (memif_desc_t));
799 clib_memcpy_fast (d + 5, dt, sizeof (memif_desc_t));
800 clib_memcpy_fast (d + 6, dt, sizeof (memif_desc_t));
801 clib_memcpy_fast (d + 7, dt, sizeof (memif_desc_t));
803 d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
804 d[1].offset = (u64) vlib_get_buffer (vm, bi[1])->data - offset;
805 d[2].offset = (u64) vlib_get_buffer (vm, bi[2])->data - offset;
806 d[3].offset = (u64) vlib_get_buffer (vm, bi[3])->data - offset;
807 d[4].offset = (u64) vlib_get_buffer (vm, bi[4])->data - offset;
808 d[5].offset = (u64) vlib_get_buffer (vm, bi[5])->data - offset;
809 d[6].offset = (u64) vlib_get_buffer (vm, bi[6])->data - offset;
810 d[7].offset = (u64) vlib_get_buffer (vm, bi[7])->data - offset;
817 clib_memcpy_fast (d, dt, sizeof (memif_desc_t));
818 d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
820 slot = (slot + 1) & mask;
824 __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
831 VLIB_NODE_FN (memif_input_node) (vlib_main_t * vm,
832 vlib_node_runtime_t * node,
833 vlib_frame_t * frame)
836 memif_main_t *mm = &memif_main;
837 memif_interface_mode_t mode_ip = MEMIF_INTERFACE_MODE_IP;
838 memif_interface_mode_t mode_eth = MEMIF_INTERFACE_MODE_ETHERNET;
840 vnet_hw_if_rxq_poll_vector_t *pv;
841 pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
842 for (int i = 0; i < vec_len (pv); i++)
846 mif = vec_elt_at_index (mm->interfaces, pv[i].dev_instance);
847 qid = pv[i].queue_id;
848 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
849 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
851 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
853 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
855 memif_device_input_zc_inline (vm, node, mif, qid, mode_ip);
858 memif_device_input_zc_inline (vm, node, mif, qid, mode_eth);
860 else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
862 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
863 n_rx += memif_device_input_inline (
864 vm, node, mif, MEMIF_RING_M2S, qid, mode_ip);
866 n_rx += memif_device_input_inline (
867 vm, node, mif, MEMIF_RING_M2S, qid, mode_eth);
871 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
872 n_rx += memif_device_input_inline (
873 vm, node, mif, MEMIF_RING_S2M, qid, mode_ip);
875 n_rx += memif_device_input_inline (
876 vm, node, mif, MEMIF_RING_S2M, qid, mode_eth);
885 VLIB_REGISTER_NODE (memif_input_node) = {
886 .name = "memif-input",
887 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
888 .sibling_of = "device-input",
889 .format_trace = format_memif_input_trace,
890 .type = VLIB_NODE_TYPE_INPUT,
891 .state = VLIB_NODE_STATE_INTERRUPT,
892 .n_errors = MEMIF_INPUT_N_ERROR,
893 .error_counters = memif_input_error_counters,
900 * fd.io coding-style-patch-verification: ON
903 * eval: (c-set-style "gnu")