2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
31 #include <memif/private.h>
33 #define foreach_memif_input_error \
34 _(BUFFER_ALLOC_FAIL, "buffer allocation failed") \
35 _(NOT_IP, "not ip packet")
39 #define _(f,s) MEMIF_INPUT_ERROR_##f,
40 foreach_memif_input_error
43 } memif_input_error_t;
45 static __clib_unused char *memif_input_error_strings[] = {
47 foreach_memif_input_error
56 } memif_input_trace_t;
58 static __clib_unused u8 *
59 format_memif_input_trace (u8 * s, va_list * args)
61 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
64 u32 indent = format_get_indent (s);
66 s = format (s, "memif: hw_if_index %d next-index %d",
67 t->hw_if_index, t->next_index);
68 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
73 static_always_inline u32
74 memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
76 u8 *ptr = vlib_buffer_get_current (b);
79 if (PREDICT_TRUE (v == 0x40))
80 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
81 else if (PREDICT_TRUE (v == 0x60))
82 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
84 b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
85 return VNET_DEVICE_INPUT_NEXT_DROP;
88 static_always_inline void
89 memif_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
90 memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
93 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
95 if (PREDICT_TRUE (b != 0))
97 memif_input_trace_t *tr;
98 vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0);
99 vlib_set_trace_count (vm, node, --(*n_tracep));
100 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
101 tr->next_index = next;
102 tr->hw_if_index = mif->hw_if_index;
107 static_always_inline void
108 memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
109 u16 buffer_offset, u16 buffer_vec_index)
112 vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
115 co->buffer_offset = buffer_offset;
116 co->buffer_vec_index = buffer_vec_index;
119 static_always_inline void
120 memif_add_to_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * buffers,
123 vlib_buffer_t *seg = b;
124 i32 bytes_left = b->current_length - buffer_size + b->current_data;
126 if (PREDICT_TRUE (bytes_left <= 0))
129 b->current_length -= bytes_left;
130 b->total_length_not_including_first_buffer = bytes_left;
134 seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
135 seg->next_buffer = buffers[0];
136 seg = vlib_get_buffer (vm, buffers[0]);
138 seg->current_data = 0;
139 seg->current_length = clib_min (buffer_size, bytes_left);
140 bytes_left -= seg->current_length;
144 static_always_inline u32
145 sat_sub (u32 x, u32 y)
152 /* branchless validation of the descriptor - uses saturated subtraction */
153 static_always_inline u32
154 memif_desc_is_invalid (memif_if_t * mif, memif_desc_t * d, u32 buffer_length)
157 u16 valid_flags = MEMIF_DESC_FLAG_NEXT;
159 rv = d->flags & (~valid_flags);
160 rv |= sat_sub (d->region + 1, vec_len (mif->regions));
161 rv |= sat_sub (d->length, buffer_length);
162 rv |= sat_sub (d->offset + d->length, mif->regions[d->region].region_size);
164 if (PREDICT_FALSE (rv))
166 mif->flags |= MEMIF_IF_FLAG_ERROR;
173 static_always_inline uword
174 memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
175 vlib_frame_t * frame, memif_if_t * mif,
176 memif_ring_type_t type, u16 qid,
177 memif_interface_mode_t mode)
179 vnet_main_t *vnm = vnet_get_main ();
180 memif_main_t *mm = &memif_main;
183 u16 buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
184 uword n_trace = vlib_get_trace_count (vm, node);
185 u32 n_rx_packets = 0, n_rx_bytes = 0;
187 vlib_buffer_t *b0, *b1, *b2, *b3;
188 u32 thread_index = vm->thread_index;
189 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
191 vlib_buffer_t *bt = &ptd->buffer_template;
192 u16 cur_slot, last_slot, ring_size, n_slots, mask;
194 u16 n_buffers = 0, n_alloc;
196 memif_packet_op_t *po;
197 memif_region_index_t last_region = ~0;
198 void *last_region_shm = 0;
200 mq = vec_elt_at_index (mif->rx_queues, qid);
202 ring_size = 1 << mq->log2_ring_size;
203 mask = ring_size - 1;
205 /* asume that somebody will want to add ethernet header on the packet
206 so start with IP header at offset 14 */
207 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
209 /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
210 are producers of empty buffers */
211 cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
212 last_slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
213 if (cur_slot == last_slot)
215 n_slots = last_slot - cur_slot;
217 /* construct copy and packet vector out of ring slots */
218 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
220 u32 dst_off, src_off, n_bytes_left;
224 po = ptd->packet_ops + n_rx_packets;
226 po->first_buffer_vec_index = n_buffers++;
229 dst_off = start_offset;
232 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
233 CLIB_CACHE_LINE_BYTES, LOAD);
234 s0 = cur_slot & mask;
235 d0 = &ring->desc[s0];
236 n_bytes_left = d0->length;
238 /* slave resets buffer length,
239 * so it can produce full size buffer for master
241 if (type == MEMIF_RING_M2S)
242 d0->length = mif->run.buffer_size;
244 po->packet_len += n_bytes_left;
245 if (PREDICT_FALSE (last_region != d0->region))
247 last_region_shm = mif->regions[d0->region].shm;
248 last_region = d0->region;
250 mb0 = last_region_shm + d0->offset;
254 u32 dst_free = buffer_size - dst_off;
258 dst_free = buffer_size;
261 u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
262 memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
264 n_bytes_left -= bytes_to_copy;
265 src_off += bytes_to_copy;
266 dst_off += bytes_to_copy;
268 while (PREDICT_FALSE (n_bytes_left));
272 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
279 /* allocate free buffers */
280 vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
281 n_alloc = vlib_buffer_alloc (vm, ptd->buffers, n_buffers);
282 if (PREDICT_FALSE (n_alloc != n_buffers))
285 vlib_buffer_free (vm, ptd->buffers, n_alloc);
286 vlib_error_count (vm, node->node_index,
287 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
292 n_left = vec_len (ptd->copy_ops);
296 CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
297 CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
298 CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
299 CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
301 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
302 b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
303 b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
304 b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
306 clib_memcpy (b0->data + co[0].buffer_offset, co[0].data,
308 clib_memcpy (b1->data + co[1].buffer_offset, co[1].data,
310 clib_memcpy (b2->data + co[2].buffer_offset, co[2].data,
312 clib_memcpy (b3->data + co[3].buffer_offset, co[3].data,
320 b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
321 clib_memcpy (b0->data + co[0].buffer_offset, co[0].data,
327 /* release slots from the ring */
328 if (type == MEMIF_RING_S2M)
330 CLIB_MEMORY_STORE_BARRIER ();
331 ring->tail = mq->last_head = cur_slot;
335 mq->last_tail = cur_slot;
338 u16 nexts[MEMIF_RX_VECTOR_SZ], *next = nexts;
339 u32 to_next_buffers[MEMIF_RX_VECTOR_SZ], *bi = to_next_buffers;
341 /* prepare buffer template and next indices */
342 vnet_buffer (bt)->sw_if_index[VLIB_RX] = mif->sw_if_index;
343 vnet_buffer (bt)->feature_arc_index = 0;
344 bt->current_data = start_offset;
345 bt->current_config_index = 0;
347 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
349 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
350 if (mif->per_interface_next_index != ~0)
351 next_index = mif->per_interface_next_index;
353 vnet_feature_start_device_input_x1 (mif->sw_if_index, &next_index,
355 clib_memset_u16 (nexts, next_index, n_rx_packets);
358 /* process buffer metadata */
359 u32 n_from = n_rx_packets;
360 po = ptd->packet_ops;
364 b0 = vlib_get_buffer (vm, po[4].first_buffer_vec_index);
365 b1 = vlib_get_buffer (vm, po[5].first_buffer_vec_index);
366 b2 = vlib_get_buffer (vm, po[6].first_buffer_vec_index);
367 b3 = vlib_get_buffer (vm, po[7].first_buffer_vec_index);
368 vlib_prefetch_buffer_header (b0, STORE);
369 vlib_prefetch_buffer_header (b1, STORE);
370 vlib_prefetch_buffer_header (b2, STORE);
371 vlib_prefetch_buffer_header (b3, STORE);
375 fbvi[0] = po[0].first_buffer_vec_index;
376 fbvi[1] = po[1].first_buffer_vec_index;
377 fbvi[2] = po[2].first_buffer_vec_index;
378 fbvi[3] = po[3].first_buffer_vec_index;
380 bi[0] = ptd->buffers[fbvi[0]];
381 bi[1] = ptd->buffers[fbvi[1]];
382 bi[2] = ptd->buffers[fbvi[2]];
383 bi[3] = ptd->buffers[fbvi[3]];
385 b0 = vlib_get_buffer (vm, bi[0]);
386 b1 = vlib_get_buffer (vm, bi[1]);
387 b2 = vlib_get_buffer (vm, bi[2]);
388 b3 = vlib_get_buffer (vm, bi[3]);
390 clib_memcpy64_x4 (b0, b1, b2, b3, bt);
392 b0->current_length = po[0].packet_len;
393 n_rx_bytes += b0->current_length;
394 b1->current_length = po[1].packet_len;
395 n_rx_bytes += b1->current_length;
396 b2->current_length = po[2].packet_len;
397 n_rx_bytes += b2->current_length;
398 b3->current_length = po[3].packet_len;
399 n_rx_bytes += b3->current_length;
401 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
402 memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size);
403 memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size);
404 memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size);
406 if (mode == MEMIF_INTERFACE_MODE_IP)
408 next[0] = memif_next_from_ip_hdr (node, b0);
409 next[1] = memif_next_from_ip_hdr (node, b1);
410 next[2] = memif_next_from_ip_hdr (node, b2);
411 next[3] = memif_next_from_ip_hdr (node, b3);
424 fbvi[0] = po[0].first_buffer_vec_index;
425 bi[0] = ptd->buffers[fbvi[0]];
426 b0 = vlib_get_buffer (vm, bi[0]);
427 clib_memcpy (b0, bt, 64);
428 b0->current_length = po->packet_len;
429 n_rx_bytes += b0->current_length;
431 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
433 if (mode == MEMIF_INTERFACE_MODE_IP)
435 next[0] = memif_next_from_ip_hdr (node, b0);
445 /* packet trace if enabled */
446 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
448 u32 n_left = n_rx_packets;
449 bi = to_next_buffers;
451 while (n_trace && n_left)
454 memif_input_trace_t *tr;
455 b = vlib_get_buffer (vm, bi[0]);
456 vlib_trace_buffer (vm, node, next[0], b, /* follow_chain */ 0);
457 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
458 tr->next_index = next[0];
459 tr->hw_if_index = mif->hw_if_index;
468 vlib_set_trace_count (vm, node, n_trace);
471 vlib_buffer_enqueue_to_next (vm, node, to_next_buffers, nexts,
474 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
475 + VNET_INTERFACE_COUNTER_RX, thread_index,
476 mif->hw_if_index, n_rx_packets,
479 /* refill ring with empty buffers */
481 vec_reset_length (ptd->buffers);
482 vec_reset_length (ptd->copy_ops);
484 if (type == MEMIF_RING_M2S)
486 u16 head = ring->head;
487 n_slots = ring_size - head + mq->last_tail;
491 u16 s = head++ & mask;
492 memif_desc_t *d = &ring->desc[s];
493 d->length = mif->run.buffer_size;
496 CLIB_MEMORY_STORE_BARRIER ();
503 static_always_inline uword
504 memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
505 vlib_frame_t * frame, memif_if_t * mif,
506 u16 qid, memif_interface_mode_t mode)
508 vnet_main_t *vnm = vnet_get_main ();
509 memif_main_t *mm = &memif_main;
513 uword n_trace = vlib_get_trace_count (vm, node);
514 u32 n_rx_packets = 0, n_rx_bytes = 0;
515 u32 *to_next = 0, *buffers;
516 u32 bi0, bi1, bi2, bi3;
518 memif_desc_t *d0, *d1, *d2, *d3;
519 vlib_buffer_t *b0, *b1, *b2, *b3;
520 u32 thread_index = vm->thread_index;
521 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
523 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
528 mq = vec_elt_at_index (mif->rx_queues, qid);
530 ring_size = 1 << mq->log2_ring_size;
531 mask = ring_size - 1;
533 next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
534 VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
536 /* asume that somebody will want to add ethernet header on the packet
537 so start with IP header at offset 14 */
538 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
539 buffer_length = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES - start_offset;
541 cur_slot = mq->last_tail;
542 last_slot = ring->tail;
543 if (cur_slot == last_slot)
545 n_slots = last_slot - cur_slot;
547 /* process ring slots */
548 vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
549 CLIB_CACHE_LINE_BYTES);
550 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
554 s0 = cur_slot & mask;
555 bi0 = mq->buffers[s0];
556 ptd->buffers[n_rx_packets++] = bi0;
558 CLIB_PREFETCH (&ring->desc[(cur_slot + 8) & mask],
559 CLIB_CACHE_LINE_BYTES, LOAD);
560 d0 = &ring->desc[s0];
561 hb = b0 = vlib_get_buffer (vm, bi0);
562 b0->current_data = start_offset;
563 b0->current_length = start_offset + d0->length;
564 n_rx_bytes += d0->length;
566 if (0 && memif_desc_is_invalid (mif, d0, buffer_length))
571 if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
573 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
575 s0 = cur_slot & mask;
576 d0 = &ring->desc[s0];
577 bi0 = mq->buffers[s0];
579 /* previous buffer */
580 b0->next_buffer = bi0;
581 b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
584 b0 = vlib_get_buffer (vm, bi0);
585 b0->current_data = start_offset;
586 b0->current_length = start_offset + d0->length;
587 hb->total_length_not_including_first_buffer += d0->length;
588 n_rx_bytes += d0->length;
592 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
597 /* release slots from the ring */
598 mq->last_tail = cur_slot;
600 n_from = n_rx_packets;
601 buffers = ptd->buffers;
606 u32 next0, next1, next2, next3;
608 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
609 while (n_from >= 8 && n_left_to_next >= 4)
611 b0 = vlib_get_buffer (vm, buffers[4]);
612 b1 = vlib_get_buffer (vm, buffers[5]);
613 b2 = vlib_get_buffer (vm, buffers[6]);
614 b3 = vlib_get_buffer (vm, buffers[7]);
615 vlib_prefetch_buffer_header (b0, STORE);
616 vlib_prefetch_buffer_header (b1, STORE);
617 vlib_prefetch_buffer_header (b2, STORE);
618 vlib_prefetch_buffer_header (b3, STORE);
621 to_next[0] = bi0 = buffers[0];
622 to_next[1] = bi1 = buffers[1];
623 to_next[2] = bi2 = buffers[2];
624 to_next[3] = bi3 = buffers[3];
629 b0 = vlib_get_buffer (vm, bi0);
630 b1 = vlib_get_buffer (vm, bi1);
631 b2 = vlib_get_buffer (vm, bi2);
632 b3 = vlib_get_buffer (vm, bi3);
634 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
635 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
636 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
637 vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
638 vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
639 vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
640 vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
641 vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
643 if (mode == MEMIF_INTERFACE_MODE_IP)
645 next0 = memif_next_from_ip_hdr (node, b0);
646 next1 = memif_next_from_ip_hdr (node, b1);
647 next2 = memif_next_from_ip_hdr (node, b2);
648 next3 = memif_next_from_ip_hdr (node, b3);
650 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
652 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
654 next0 = mif->per_interface_next_index;
655 next1 = mif->per_interface_next_index;
656 next2 = mif->per_interface_next_index;
657 next3 = mif->per_interface_next_index;
661 next0 = next1 = next2 = next3 = next_index;
662 /* redirect if feature path enabled */
663 vnet_feature_start_device_input_x1 (mif->sw_if_index,
665 vnet_feature_start_device_input_x1 (mif->sw_if_index,
667 vnet_feature_start_device_input_x1 (mif->sw_if_index,
669 vnet_feature_start_device_input_x1 (mif->sw_if_index,
675 if (PREDICT_FALSE (n_trace > 0))
677 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
678 if (PREDICT_FALSE (n_trace > 0))
679 memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
680 if (PREDICT_FALSE (n_trace > 0))
681 memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
682 if (PREDICT_FALSE (n_trace > 0))
683 memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
687 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
688 n_left_to_next, bi0, bi1, bi2, bi3,
689 next0, next1, next2, next3);
694 while (n_from && n_left_to_next)
697 to_next[0] = bi0 = buffers[0];
702 b0 = vlib_get_buffer (vm, bi0);
703 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
704 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
706 if (mode == MEMIF_INTERFACE_MODE_IP)
708 next0 = memif_next_from_ip_hdr (node, b0);
710 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
712 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
713 next0 = mif->per_interface_next_index;
717 /* redirect if feature path enabled */
718 vnet_feature_start_device_input_x1 (mif->sw_if_index,
724 if (PREDICT_FALSE (n_trace > 0))
725 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
728 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
729 n_left_to_next, bi0, next0);
734 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
737 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
738 + VNET_INTERFACE_COUNTER_RX, thread_index,
739 mif->hw_if_index, n_rx_packets,
742 /* refill ring with empty buffers */
744 vec_reset_length (ptd->buffers);
747 n_slots = ring_size - head + mq->last_tail;
752 memif_desc_t *dt = &ptd->desc_template;
753 memset (dt, 0, sizeof (memif_desc_t));
754 dt->length = buffer_length;
756 n_alloc = vlib_buffer_alloc_to_ring (vm, mq->buffers, head & mask,
759 if (PREDICT_FALSE (n_alloc != n_slots))
761 vlib_error_count (vm, node->node_index,
762 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
765 while (n_alloc >= 32)
767 bi0 = mq->buffers[(head + 4) & mask];
768 vlib_prefetch_buffer_with_index (vm, bi0, LOAD);
769 bi1 = mq->buffers[(head + 5) & mask];
770 vlib_prefetch_buffer_with_index (vm, bi1, LOAD);
771 bi2 = mq->buffers[(head + 6) & mask];
772 vlib_prefetch_buffer_with_index (vm, bi2, LOAD);
773 bi3 = mq->buffers[(head + 7) & mask];
774 vlib_prefetch_buffer_with_index (vm, bi3, LOAD);
781 d0 = &ring->desc[s0];
782 d1 = &ring->desc[s1];
783 d2 = &ring->desc[s2];
784 d3 = &ring->desc[s3];
786 clib_memcpy (d0, dt, sizeof (memif_desc_t));
787 clib_memcpy (d1, dt, sizeof (memif_desc_t));
788 clib_memcpy (d2, dt, sizeof (memif_desc_t));
789 clib_memcpy (d3, dt, sizeof (memif_desc_t));
791 b0 = vlib_get_buffer (vm, mq->buffers[s0]);
792 b1 = vlib_get_buffer (vm, mq->buffers[s1]);
793 b2 = vlib_get_buffer (vm, mq->buffers[s2]);
794 b3 = vlib_get_buffer (vm, mq->buffers[s3]);
796 d0->region = b0->buffer_pool_index + 1;
797 d1->region = b1->buffer_pool_index + 1;
798 d2->region = b2->buffer_pool_index + 1;
799 d3->region = b3->buffer_pool_index + 1;
802 (void *) b0->data - mif->regions[d0->region].shm + start_offset;
804 (void *) b1->data - mif->regions[d1->region].shm + start_offset;
806 (void *) b2->data - mif->regions[d2->region].shm + start_offset;
808 (void *) b3->data - mif->regions[d3->region].shm + start_offset;
815 d0 = &ring->desc[s0];
816 clib_memcpy (d0, dt, sizeof (memif_desc_t));
817 b0 = vlib_get_buffer (vm, mq->buffers[s0]);
818 d0->region = b0->buffer_pool_index + 1;
820 (void *) b0->data - mif->regions[d0->region].shm + start_offset;
825 CLIB_MEMORY_STORE_BARRIER ();
833 VLIB_NODE_FN (memif_input_node) (vlib_main_t * vm,
834 vlib_node_runtime_t * node,
835 vlib_frame_t * frame)
838 memif_main_t *mm = &memif_main;
839 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
840 vnet_device_and_queue_t *dq;
841 memif_interface_mode_t mode_ip = MEMIF_INTERFACE_MODE_IP;
842 memif_interface_mode_t mode_eth = MEMIF_INTERFACE_MODE_ETHERNET;
844 foreach_device_and_queue (dq, rt->devices_and_queues)
847 mif = vec_elt_at_index (mm->interfaces, dq->dev_instance);
848 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
849 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
851 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
853 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
854 n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
855 dq->queue_id, mode_ip);
857 n_rx += memif_device_input_zc_inline (vm, node, frame, mif,
858 dq->queue_id, mode_eth);
860 else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
862 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
863 n_rx += memif_device_input_inline (vm, node, frame, mif,
864 MEMIF_RING_M2S, dq->queue_id,
867 n_rx += memif_device_input_inline (vm, node, frame, mif,
868 MEMIF_RING_M2S, dq->queue_id,
873 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
874 n_rx += memif_device_input_inline (vm, node, frame, mif,
875 MEMIF_RING_S2M, dq->queue_id,
878 n_rx += memif_device_input_inline (vm, node, frame, mif,
879 MEMIF_RING_S2M, dq->queue_id,
889 VLIB_REGISTER_NODE (memif_input_node) = {
890 .name = "memif-input",
891 .sibling_of = "device-input",
892 .format_trace = format_memif_input_trace,
893 .type = VLIB_NODE_TYPE_INPUT,
894 .state = VLIB_NODE_STATE_INTERRUPT,
895 .n_errors = MEMIF_INPUT_N_ERROR,
896 .error_strings = memif_input_error_strings,
903 * fd.io coding-style-patch-verification: ON
906 * eval: (c-set-style "gnu")