2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/interface/rx_queue_funcs.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
31 #include <memif/private.h>
33 #define MEMIF_IP_OFFSET 14
35 #define foreach_memif_input_error \
36 _ (BUFFER_ALLOC_FAIL, buffer_alloc, ERROR, "buffer allocation failed") \
37 _ (BAD_DESC, bad_desc, ERROR, "bad descriptor") \
38 _ (NOT_IP, not_ip, INFO, "not ip packet")
42 #define _(f, n, s, d) MEMIF_INPUT_ERROR_##f,
43 foreach_memif_input_error
46 } memif_input_error_t;
48 static vlib_error_desc_t memif_input_error_counters[] = {
49 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
50 foreach_memif_input_error
59 } memif_input_trace_t;
61 static __clib_unused u8 *
62 format_memif_input_trace (u8 * s, va_list * args)
64 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
65 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
66 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
67 u32 indent = format_get_indent (s);
69 s = format (s, "memif: hw_if_index %d next-index %d",
70 t->hw_if_index, t->next_index);
71 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
76 static_always_inline u32
77 memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
79 u8 *ptr = vlib_buffer_get_current (b);
82 if (PREDICT_TRUE (v == 0x40))
83 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
84 else if (PREDICT_TRUE (v == 0x60))
85 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
87 b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
88 return VNET_DEVICE_INPUT_NEXT_DROP;
91 static_always_inline void
92 memif_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
93 memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
97 (b != 0 && vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0)))
99 memif_input_trace_t *tr;
100 vlib_set_trace_count (vm, node, --(*n_tracep));
101 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
102 tr->next_index = next;
103 tr->hw_if_index = mif->hw_if_index;
108 static_always_inline void
109 memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
110 u16 buffer_offset, u16 buffer_vec_index)
113 vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
116 co->buffer_offset = buffer_offset;
117 co->buffer_vec_index = buffer_vec_index;
120 static_always_inline void
121 memif_add_to_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * buffers,
124 vlib_buffer_t *seg = b;
125 i32 bytes_left = b->current_length - buffer_size + b->current_data;
127 if (PREDICT_TRUE (bytes_left <= 0))
130 b->current_length -= bytes_left;
131 b->total_length_not_including_first_buffer = bytes_left;
135 seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
136 seg->next_buffer = buffers[0];
137 seg = vlib_get_buffer (vm, buffers[0]);
139 seg->current_data = 0;
140 seg->current_length = clib_min (buffer_size, bytes_left);
141 bytes_left -= seg->current_length;
145 static_always_inline u16
146 memif_parse_desc (memif_per_thread_data_t *ptd, memif_if_t *mif,
147 memif_queue_t *mq, u16 next, u16 n_avail)
149 memif_ring_t *ring = mq->ring;
150 memif_desc_t *descs = ring->desc;
151 void **desc_data = ptd->desc_data;
152 u16 *desc_len = ptd->desc_len;
153 memif_desc_status_t *desc_status = ptd->desc_status;
154 u16 n_desc = 0, n_pkts = 0;
156 u16 mask = pow2_mask (mq->log2_ring_size);
163 d = descs + (slot++ & mask);
164 desc_data[i] = (void *) ((u64) d->region << 32 | d->offset);
165 desc_len[i] = d->length;
166 desc_status[i].as_u8 = flags = d->flags;
168 if (PREDICT_FALSE ((flags & MEMIF_DESC_FLAG_NEXT)) == 0)
171 if (++n_pkts == MEMIF_RX_VECTOR_SZ)
178 ptd->n_packets = n_pkts;
182 static_always_inline void
183 memif_desc_status_set_err (memif_desc_status_t *p,
184 memif_desc_status_err_code_t e)
186 memif_desc_status_t s = { .err = 1, .err_code = e };
190 static_always_inline void
191 memif_validate_desc_data (memif_per_thread_data_t *ptd, memif_if_t *mif,
192 u16 n_desc, int is_ethernet)
194 void **desc_data = ptd->desc_data;
195 u16 *desc_len = ptd->desc_len;
196 memif_desc_status_t *desc_status = ptd->desc_status;
197 u16 n_regions = vec_len (mif->regions);
202 for (u32 i = 0; i < n_desc; i++)
204 u16 region = ((u64) desc_data[i]) >> 32;
205 u32 offset = (u64) desc_data[i];
206 u16 len = desc_len[i];
207 memif_region_t *r = mif->regions + region;
209 if (region >= n_regions)
210 memif_desc_status_set_err (desc_status + i,
211 MEMIF_DESC_STATUS_ERR_BAD_REGION);
212 else if (offset + len > r->region_size)
213 memif_desc_status_set_err (desc_status + i,
214 MEMIF_DESC_STATUS_ERR_REGION_OVERRUN);
215 else if (is_ethernet && len > ETHERNET_MAX_PACKET_BYTES)
216 memif_desc_status_set_err (desc_status + i,
217 MEMIF_DESC_STATUS_ERR_DATA_TOO_BIG);
219 memif_desc_status_set_err (desc_status + i,
220 MEMIF_DESC_STATUS_ERR_ZERO_LENGTH);
223 desc_data[i] = r->shm + offset;
228 xor_status |= desc_status[i].as_u8;
231 ptd->max_desc_len = max_len;
232 ptd->xor_status = xor_status;
233 ptd->n_rx_bytes = n_rx_bytes;
236 static_always_inline u32
237 memif_process_desc (vlib_main_t *vm, vlib_node_runtime_t *node,
238 memif_per_thread_data_t *ptd, memif_if_t *mif)
240 u16 buffer_size = vlib_buffer_get_default_data_size (vm);
241 int is_ip = mif->mode == MEMIF_INTERFACE_MODE_IP;
242 i16 start_offset = (is_ip) ? MEMIF_IP_OFFSET : 0;
243 memif_packet_op_t *po = ptd->packet_ops;
244 void **desc_data = ptd->desc_data;
245 u16 *desc_len = ptd->desc_len;
246 memif_desc_status_t *desc_status = ptd->desc_status;
248 u32 n_left = ptd->n_packets;
253 /* construct copy and packet vector out of ring slots */
256 u32 dst_off, src_off, n_bytes_left;
258 po->first_buffer_vec_index = n_buffers++;
262 dst_off = start_offset;
265 i++; /* next descriptor */
266 n_bytes_left = desc_len[i];
268 packet_len += n_bytes_left;
271 if (PREDICT_FALSE (desc_status[i].err))
273 vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC,
276 ASSERT (n_buffers > 0);
283 u32 dst_free = buffer_size - dst_off;
287 dst_free = buffer_size;
290 u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
291 memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
293 n_bytes_left -= bytes_to_copy;
294 src_off += bytes_to_copy;
295 dst_off += bytes_to_copy;
297 while (PREDICT_FALSE (n_bytes_left));
299 if (desc_status[i].next)
305 /* update packet op */
306 po->packet_len = packet_len;
313 ASSERT (ptd->n_packets >= bad_packets);
314 ptd->n_packets -= bad_packets;
317 static_always_inline void
318 memif_fill_buffer_mdata_simple (vlib_node_runtime_t *node,
319 memif_per_thread_data_t *ptd,
320 vlib_buffer_t **b, u16 *next, int is_ip)
323 u16 *dl = ptd->desc_len;
324 /* process buffer metadata */
326 u32 n_left = ptd->n_packets;
328 /* copy template into local variable - will save per packet load */
329 vlib_buffer_copy_template (&bt, &ptd->buffer_template);
333 vlib_prefetch_buffer_header (b[4], STORE);
334 vlib_prefetch_buffer_header (b[5], STORE);
335 vlib_prefetch_buffer_header (b[6], STORE);
336 vlib_prefetch_buffer_header (b[7], STORE);
338 vlib_buffer_copy_template (b[0], &bt);
339 vlib_buffer_copy_template (b[1], &bt);
340 vlib_buffer_copy_template (b[2], &bt);
341 vlib_buffer_copy_template (b[3], &bt);
343 b[0]->current_length = dl[0];
344 b[1]->current_length = dl[1];
345 b[2]->current_length = dl[2];
346 b[3]->current_length = dl[3];
350 next[0] = memif_next_from_ip_hdr (node, b[0]);
351 next[1] = memif_next_from_ip_hdr (node, b[1]);
352 next[2] = memif_next_from_ip_hdr (node, b[2]);
353 next[3] = memif_next_from_ip_hdr (node, b[3]);
366 vlib_buffer_copy_template (b[0], &bt);
367 b[0]->current_length = dl[0];
369 next[0] = memif_next_from_ip_hdr (node, b[0]);
379 static_always_inline void
380 memif_fill_buffer_mdata (vlib_main_t *vm, vlib_node_runtime_t *node,
381 memif_per_thread_data_t *ptd, memif_if_t *mif,
382 u32 *bi, u16 *next, int is_ip)
384 u16 buffer_size = vlib_buffer_get_default_data_size (vm);
385 vlib_buffer_t *b0, *b1, *b2, *b3, bt;
386 memif_packet_op_t *po;
387 /* process buffer metadata */
389 u32 n_from = ptd->n_packets;
390 po = ptd->packet_ops;
392 /* copy template into local variable - will save per packet load */
393 vlib_buffer_copy_template (&bt, &ptd->buffer_template);
397 b0 = vlib_get_buffer (vm, ptd->buffers[po[4].first_buffer_vec_index]);
398 b1 = vlib_get_buffer (vm, ptd->buffers[po[5].first_buffer_vec_index]);
399 b2 = vlib_get_buffer (vm, ptd->buffers[po[6].first_buffer_vec_index]);
400 b3 = vlib_get_buffer (vm, ptd->buffers[po[7].first_buffer_vec_index]);
402 vlib_prefetch_buffer_header (b0, STORE);
403 vlib_prefetch_buffer_header (b1, STORE);
404 vlib_prefetch_buffer_header (b2, STORE);
405 vlib_prefetch_buffer_header (b3, STORE);
409 fbvi[0] = po[0].first_buffer_vec_index;
410 fbvi[1] = po[1].first_buffer_vec_index;
411 fbvi[2] = po[2].first_buffer_vec_index;
412 fbvi[3] = po[3].first_buffer_vec_index;
414 bi[0] = ptd->buffers[fbvi[0]];
415 bi[1] = ptd->buffers[fbvi[1]];
416 bi[2] = ptd->buffers[fbvi[2]];
417 bi[3] = ptd->buffers[fbvi[3]];
419 b0 = vlib_get_buffer (vm, bi[0]);
420 b1 = vlib_get_buffer (vm, bi[1]);
421 b2 = vlib_get_buffer (vm, bi[2]);
422 b3 = vlib_get_buffer (vm, bi[3]);
424 vlib_buffer_copy_template (b0, &bt);
425 vlib_buffer_copy_template (b1, &bt);
426 vlib_buffer_copy_template (b2, &bt);
427 vlib_buffer_copy_template (b3, &bt);
429 b0->current_length = po[0].packet_len;
430 b1->current_length = po[1].packet_len;
431 b2->current_length = po[2].packet_len;
432 b3->current_length = po[3].packet_len;
434 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
435 memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size);
436 memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size);
437 memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size);
441 next[0] = memif_next_from_ip_hdr (node, b0);
442 next[1] = memif_next_from_ip_hdr (node, b1);
443 next[2] = memif_next_from_ip_hdr (node, b2);
444 next[3] = memif_next_from_ip_hdr (node, b3);
457 fbvi[0] = po[0].first_buffer_vec_index;
458 bi[0] = ptd->buffers[fbvi[0]];
459 b0 = vlib_get_buffer (vm, bi[0]);
460 vlib_buffer_copy_template (b0, &bt);
461 b0->current_length = po->packet_len;
463 memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
466 next[0] = memif_next_from_ip_hdr (node, b0);
476 static_always_inline void
477 memif_advance_ring (memif_ring_type_t type, memif_queue_t *mq,
478 memif_ring_t *ring, u16 cur_slot)
480 if (type == MEMIF_RING_S2M)
482 __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
483 mq->last_head = cur_slot;
487 mq->last_tail = cur_slot;
491 static_always_inline uword
492 memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
493 memif_if_t *mif, memif_ring_type_t type, u16 qid,
494 memif_interface_mode_t mode)
496 vnet_main_t *vnm = vnet_get_main ();
497 memif_main_t *mm = &memif_main;
500 u16 buffer_size = vlib_buffer_get_default_data_size (vm);
502 u16 nexts[MEMIF_RX_VECTOR_SZ], *next = nexts;
503 u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
505 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
506 vlib_buffer_t *buffer_ptrs[MEMIF_RX_VECTOR_SZ];
507 u32 thread_index = vm->thread_index;
508 memif_per_thread_data_t *ptd =
509 vec_elt_at_index (mm->per_thread_data, thread_index);
510 u16 cur_slot, ring_size, n_slots, mask;
511 u16 n_buffers, n_alloc, n_desc;
514 int is_slave = (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) != 0;
518 mq = vec_elt_at_index (mif->rx_queues, qid);
520 ring_size = 1 << mq->log2_ring_size;
521 mask = ring_size - 1;
523 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? MEMIF_IP_OFFSET : 0;
527 cur_slot = mq->last_tail;
528 n_slots = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE) - cur_slot;
532 cur_slot = mq->last_head;
533 n_slots = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE) - cur_slot;
542 n_desc = memif_parse_desc (ptd, mif, mq, cur_slot, n_slots);
544 if (n_desc != ptd->n_packets)
549 if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET)
550 memif_validate_desc_data (ptd, mif, n_desc, /* is_ethernet */ 1);
552 memif_validate_desc_data (ptd, mif, n_desc, /* is_ethernet */ 0);
554 if (ptd->max_desc_len > buffer_size - start_offset)
557 if (ptd->xor_status != 0)
561 n_buffers = ptd->n_packets;
563 n_buffers = memif_process_desc (vm, node, ptd, mif);
565 if (PREDICT_FALSE (n_buffers == 0))
567 /* All descriptors are bad. Release slots in the ring and bail */
568 memif_advance_ring (type, mq, ring, cur_slot);
572 /* allocate free buffers */
573 vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
574 n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
575 mq->buffer_pool_index);
576 if (PREDICT_FALSE (n_alloc != n_buffers))
579 vlib_buffer_free (vm, ptd->buffers, n_alloc);
580 vlib_error_count (vm, node->node_index,
581 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
588 int n_pkts = ptd->n_packets;
589 void **desc_data = ptd->desc_data;
590 u16 *desc_len = ptd->desc_len;
592 vlib_get_buffers (vm, ptd->buffers, buffer_ptrs, n_buffers);
594 for (i = 0; i + 8 < n_pkts; i++)
596 clib_prefetch_load (desc_data[i + 8]);
597 clib_prefetch_store (buffer_ptrs[i + 8]->data);
598 clib_memcpy_fast (buffer_ptrs[i]->data + start_offset, desc_data[i],
601 for (; i < n_pkts; i++)
602 clib_memcpy_fast (buffer_ptrs[i]->data + start_offset, desc_data[i],
608 u32 n_pkts = vec_len (ptd->copy_ops);
611 for (i = 0; i + 8 < n_pkts; i++)
613 clib_prefetch_load (co[i + 8].data);
614 b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
615 clib_memcpy_fast (b->data + co[i].buffer_offset, co[i].data,
618 for (; i < n_pkts; i++)
620 b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
621 clib_memcpy_fast (b->data + co[i].buffer_offset, co[i].data,
626 /* release slots from the ring */
627 memif_advance_ring (type, mq, ring, cur_slot);
629 /* prepare buffer template and next indices */
630 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] = mif->sw_if_index;
631 vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
632 ptd->buffer_template.current_data = start_offset;
633 ptd->buffer_template.current_config_index = 0;
634 ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
635 ptd->buffer_template.ref_count = 1;
637 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
639 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
640 if (mif->per_interface_next_index != ~0)
641 next_index = mif->per_interface_next_index;
643 vnet_feature_start_device_input (mif->sw_if_index, &next_index,
644 &ptd->buffer_template);
646 vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
648 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
650 vlib_next_frame_t *nf;
652 ethernet_input_frame_t *ef;
653 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
654 f = vlib_get_frame (vm, nf->frame);
655 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
657 ef = vlib_frame_scalar_args (f);
658 ef->sw_if_index = mif->sw_if_index;
659 ef->hw_if_index = mif->hw_if_index;
660 vlib_frame_no_append (f);
666 vlib_buffer_copy_indices (to_next_bufs, ptd->buffers, ptd->n_packets);
667 if (mode == MEMIF_INTERFACE_MODE_IP)
668 memif_fill_buffer_mdata_simple (node, ptd, buffer_ptrs, nexts, 1);
670 memif_fill_buffer_mdata_simple (node, ptd, buffer_ptrs, nexts, 0);
674 if (mode == MEMIF_INTERFACE_MODE_IP)
675 memif_fill_buffer_mdata (vm, node, ptd, mif, to_next_bufs, nexts, 1);
677 memif_fill_buffer_mdata (vm, node, ptd, mif, to_next_bufs, nexts, 0);
680 /* packet trace if enabled */
681 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
683 u32 n_left = ptd->n_packets;
687 while (n_trace && n_left)
690 memif_input_trace_t *tr;
691 if (mode != MEMIF_INTERFACE_MODE_ETHERNET)
693 b = vlib_get_buffer (vm, bi[0]);
695 (vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0)))
697 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
699 tr->hw_if_index = mif->hw_if_index;
709 vlib_set_trace_count (vm, node, n_trace);
712 if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
714 n_left_to_next -= ptd->n_packets;
715 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
718 vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts,
721 vlib_increment_combined_counter (
722 vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
723 thread_index, mif->sw_if_index, ptd->n_packets, ptd->n_rx_bytes);
725 /* refill ring with empty buffers */
727 vec_reset_length (ptd->buffers);
728 vec_reset_length (ptd->copy_ops);
730 if (type == MEMIF_RING_M2S)
732 u16 head = ring->head;
733 n_slots = ring_size - head + mq->last_tail;
737 u16 s = head++ & mask;
738 memif_desc_t *d = &ring->desc[s];
739 d->length = mif->run.buffer_size;
742 __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
745 return ptd->n_packets;
748 static_always_inline uword
749 memif_device_input_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
750 memif_if_t *mif, u16 qid,
751 memif_interface_mode_t mode)
753 vnet_main_t *vnm = vnet_get_main ();
754 memif_main_t *mm = &memif_main;
758 uword n_trace = vlib_get_trace_count (vm, node);
759 u32 n_rx_packets = 0, n_rx_bytes = 0;
760 u32 *to_next = 0, *buffers;
761 u32 bi0, bi1, bi2, bi3;
764 vlib_buffer_t *b0, *b1, *b2, *b3;
765 u32 thread_index = vm->thread_index;
766 memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
768 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
774 mq = vec_elt_at_index (mif->rx_queues, qid);
776 ring_size = 1 << mq->log2_ring_size;
777 mask = ring_size - 1;
779 next_index = (mode == MEMIF_INTERFACE_MODE_IP) ?
780 VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
782 /* asume that somebody will want to add ethernet header on the packet
783 so start with IP header at offset 14 */
784 start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
785 buffer_length = vlib_buffer_get_default_data_size (vm) - start_offset;
787 cur_slot = mq->last_tail;
788 last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
789 if (cur_slot == last_slot)
791 n_slots = last_slot - cur_slot;
793 /* process ring slots */
794 vec_validate_aligned (ptd->buffers, MEMIF_RX_VECTOR_SZ,
795 CLIB_CACHE_LINE_BYTES);
796 while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
800 s0 = cur_slot & mask;
801 bi0 = mq->buffers[s0];
802 ptd->buffers[n_rx_packets++] = bi0;
804 clib_prefetch_load (&ring->desc[(cur_slot + 8) & mask]);
805 d0 = &ring->desc[s0];
806 hb = b0 = vlib_get_buffer (vm, bi0);
807 b0->current_data = start_offset;
808 b0->current_length = d0->length;
809 n_rx_bytes += d0->length;
813 if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
815 hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
816 hb->total_length_not_including_first_buffer = 0;
818 s0 = cur_slot & mask;
819 d0 = &ring->desc[s0];
820 bi0 = mq->buffers[s0];
822 /* previous buffer */
823 b0->next_buffer = bi0;
824 b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
827 b0 = vlib_get_buffer (vm, bi0);
828 b0->current_data = start_offset;
829 b0->current_length = d0->length;
830 hb->total_length_not_including_first_buffer += d0->length;
831 n_rx_bytes += d0->length;
835 if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
840 /* release slots from the ring */
841 mq->last_tail = cur_slot;
843 n_from = n_rx_packets;
844 buffers = ptd->buffers;
849 u32 next0, next1, next2, next3;
851 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
852 while (n_from >= 8 && n_left_to_next >= 4)
854 b0 = vlib_get_buffer (vm, buffers[4]);
855 b1 = vlib_get_buffer (vm, buffers[5]);
856 b2 = vlib_get_buffer (vm, buffers[6]);
857 b3 = vlib_get_buffer (vm, buffers[7]);
858 vlib_prefetch_buffer_header (b0, STORE);
859 vlib_prefetch_buffer_header (b1, STORE);
860 vlib_prefetch_buffer_header (b2, STORE);
861 vlib_prefetch_buffer_header (b3, STORE);
864 to_next[0] = bi0 = buffers[0];
865 to_next[1] = bi1 = buffers[1];
866 to_next[2] = bi2 = buffers[2];
867 to_next[3] = bi3 = buffers[3];
872 b0 = vlib_get_buffer (vm, bi0);
873 b1 = vlib_get_buffer (vm, bi1);
874 b2 = vlib_get_buffer (vm, bi2);
875 b3 = vlib_get_buffer (vm, bi3);
877 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
878 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
879 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
880 vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
881 vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
882 vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
883 vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
884 vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
886 if (mode == MEMIF_INTERFACE_MODE_IP)
888 next0 = memif_next_from_ip_hdr (node, b0);
889 next1 = memif_next_from_ip_hdr (node, b1);
890 next2 = memif_next_from_ip_hdr (node, b2);
891 next3 = memif_next_from_ip_hdr (node, b3);
893 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
895 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
897 next0 = mif->per_interface_next_index;
898 next1 = mif->per_interface_next_index;
899 next2 = mif->per_interface_next_index;
900 next3 = mif->per_interface_next_index;
904 next0 = next1 = next2 = next3 = next_index;
905 /* redirect if feature path enabled */
906 vnet_feature_start_device_input (mif->sw_if_index, &next0,
908 vnet_feature_start_device_input (mif->sw_if_index, &next1,
910 vnet_feature_start_device_input (mif->sw_if_index, &next2,
912 vnet_feature_start_device_input (mif->sw_if_index, &next3,
918 if (PREDICT_FALSE (n_trace > 0))
920 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
921 if (PREDICT_FALSE (n_trace > 0))
922 memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
923 if (PREDICT_FALSE (n_trace > 0))
924 memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
925 if (PREDICT_FALSE (n_trace > 0))
926 memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
930 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
931 n_left_to_next, bi0, bi1, bi2, bi3,
932 next0, next1, next2, next3);
937 while (n_from && n_left_to_next)
940 to_next[0] = bi0 = buffers[0];
945 b0 = vlib_get_buffer (vm, bi0);
946 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
947 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
949 if (mode == MEMIF_INTERFACE_MODE_IP)
951 next0 = memif_next_from_ip_hdr (node, b0);
953 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
955 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
956 next0 = mif->per_interface_next_index;
960 /* redirect if feature path enabled */
961 vnet_feature_start_device_input (mif->sw_if_index, &next0,
967 if (PREDICT_FALSE (n_trace > 0))
968 memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
971 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
972 n_left_to_next, bi0, next0);
977 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
980 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
981 + VNET_INTERFACE_COUNTER_RX, thread_index,
982 mif->sw_if_index, n_rx_packets,
985 /* refill ring with empty buffers */
987 vec_reset_length (ptd->buffers);
990 n_slots = ring_size - head + mq->last_tail;
998 memif_desc_t desc_template, *dt = &desc_template;
999 clib_memset (dt, 0, sizeof (memif_desc_t));
1000 dt->length = buffer_length;
1002 n_alloc = vlib_buffer_alloc_to_ring_from_pool (
1003 vm, mq->buffers, slot, ring_size, n_slots, mq->buffer_pool_index);
1004 dt->region = mq->buffer_pool_index + 1;
1005 offset = (u64) mif->regions[dt->region].shm - start_offset;
1007 if (PREDICT_FALSE (n_alloc != n_slots))
1008 vlib_error_count (vm, node->node_index,
1009 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
1015 memif_desc_t *d = ring->desc + slot;
1016 u32 *bi = mq->buffers + slot;
1018 if (PREDICT_FALSE (((slot + 7 > mask) || (n_alloc < 8))))
1021 clib_memcpy_fast (d + 0, dt, sizeof (memif_desc_t));
1022 clib_memcpy_fast (d + 1, dt, sizeof (memif_desc_t));
1023 clib_memcpy_fast (d + 2, dt, sizeof (memif_desc_t));
1024 clib_memcpy_fast (d + 3, dt, sizeof (memif_desc_t));
1025 clib_memcpy_fast (d + 4, dt, sizeof (memif_desc_t));
1026 clib_memcpy_fast (d + 5, dt, sizeof (memif_desc_t));
1027 clib_memcpy_fast (d + 6, dt, sizeof (memif_desc_t));
1028 clib_memcpy_fast (d + 7, dt, sizeof (memif_desc_t));
1030 d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
1031 d[1].offset = (u64) vlib_get_buffer (vm, bi[1])->data - offset;
1032 d[2].offset = (u64) vlib_get_buffer (vm, bi[2])->data - offset;
1033 d[3].offset = (u64) vlib_get_buffer (vm, bi[3])->data - offset;
1034 d[4].offset = (u64) vlib_get_buffer (vm, bi[4])->data - offset;
1035 d[5].offset = (u64) vlib_get_buffer (vm, bi[5])->data - offset;
1036 d[6].offset = (u64) vlib_get_buffer (vm, bi[6])->data - offset;
1037 d[7].offset = (u64) vlib_get_buffer (vm, bi[7])->data - offset;
1039 slot = (slot + 8) & mask;
1044 clib_memcpy_fast (d, dt, sizeof (memif_desc_t));
1045 d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
1047 slot = (slot + 1) & mask;
1051 __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
1054 return n_rx_packets;
1057 CLIB_MARCH_FN (memif_dma_completion_cb, void, vlib_main_t *vm,
1058 vlib_dma_batch_t *b)
1060 memif_main_t *mm = &memif_main;
1061 memif_if_t *mif = vec_elt_at_index (mm->interfaces, b->cookie >> 16);
1062 u32 thread_index = vm->thread_index;
1063 u32 n_left_to_next = 0;
1064 u16 nexts[MEMIF_RX_VECTOR_SZ], *next;
1065 u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
1067 memif_dma_info_t *dma_info;
1068 u16 qid = b->cookie & 0xffff;
1069 memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
1070 dma_info = mq->dma_info + mq->dma_info_head;
1071 memif_per_thread_data_t *ptd = &dma_info->data;
1072 vnet_main_t *vnm = vnet_get_main ();
1074 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1076 __atomic_store_n (&mq->ring->tail, dma_info->dma_tail, __ATOMIC_RELEASE);
1078 /* prepare buffer template and next indices */
1080 (dma_info->mode == MEMIF_INTERFACE_MODE_IP) ? MEMIF_IP_OFFSET : 0;
1081 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] = mif->sw_if_index;
1082 vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
1083 ptd->buffer_template.current_data = start_offset;
1084 ptd->buffer_template.current_config_index = 0;
1085 ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
1086 ptd->buffer_template.ref_count = 1;
1088 if (dma_info->mode == MEMIF_INTERFACE_MODE_ETHERNET)
1090 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1091 if (mif->per_interface_next_index != ~0)
1092 next_index = mif->per_interface_next_index;
1094 vnet_feature_start_device_input (mif->sw_if_index, &next_index,
1095 &ptd->buffer_template);
1097 vlib_get_new_next_frame (vm, dma_info->node, next_index, to_next_bufs,
1099 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
1101 vlib_next_frame_t *nf;
1103 ethernet_input_frame_t *ef;
1105 vlib_node_runtime_get_next_frame (vm, dma_info->node, next_index);
1106 f = vlib_get_frame (vm, nf->frame);
1107 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
1109 ef = vlib_frame_scalar_args (f);
1110 ef->sw_if_index = mif->sw_if_index;
1111 ef->hw_if_index = mif->hw_if_index;
1112 vlib_frame_no_append (f);
1116 vec_reset_length (ptd->buffers);
1118 if (dma_info->mode == MEMIF_INTERFACE_MODE_IP)
1119 memif_fill_buffer_mdata (vm, dma_info->node, ptd, mif, to_next_bufs, nexts,
1122 memif_fill_buffer_mdata (vm, dma_info->node, ptd, mif, to_next_bufs, nexts,
1125 /* packet trace if enabled */
1126 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, dma_info->node))))
1128 u32 n_left = ptd->n_packets;
1131 u32 ni = next_index;
1132 while (n_trace && n_left)
1135 memif_input_trace_t *tr;
1136 if (dma_info->mode != MEMIF_INTERFACE_MODE_ETHERNET)
1138 b = vlib_get_buffer (vm, bi[0]);
1139 if (PREDICT_TRUE (vlib_trace_buffer (vm, dma_info->node, ni, b,
1140 /* follow_chain */ 0)))
1142 tr = vlib_add_trace (vm, dma_info->node, b, sizeof (*tr));
1143 tr->next_index = ni;
1144 tr->hw_if_index = mif->hw_if_index;
1154 vlib_set_trace_count (vm, dma_info->node, n_trace);
1157 if (dma_info->mode == MEMIF_INTERFACE_MODE_ETHERNET)
1159 n_left_to_next -= ptd->n_packets;
1160 vlib_put_next_frame (vm, dma_info->node, next_index, n_left_to_next);
1163 vlib_buffer_enqueue_to_next (vm, dma_info->node, to_next_bufs, nexts,
1166 vlib_increment_combined_counter (
1167 vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1168 thread_index, mif->sw_if_index, ptd->n_packets, ptd->n_rx_bytes);
1170 mq->dma_info_head++;
1171 if (mq->dma_info_head == mq->dma_info_size)
1172 mq->dma_info_head = 0;
1177 #ifndef CLIB_MARCH_VARIANT
1179 memif_dma_completion_cb (vlib_main_t *vm, vlib_dma_batch_t *b)
1181 return CLIB_MARCH_FN_SELECT (memif_dma_completion_cb) (vm, b);
1185 static_always_inline uword
1186 memif_device_input_inline_dma (vlib_main_t *vm, vlib_node_runtime_t *node,
1187 memif_if_t *mif, memif_ring_type_t type,
1188 u16 qid, memif_interface_mode_t mode)
1190 memif_main_t *mm = &memif_main;
1193 memif_per_thread_data_t *ptd;
1194 u16 cur_slot, n_slots;
1195 u16 n_buffers, n_alloc, n_desc;
1196 memif_copy_op_t *co;
1197 memif_dma_info_t *dma_info;
1199 u16 mif_id = mif - mm->interfaces;
1202 mq = vec_elt_at_index (mif->rx_queues, qid);
1205 cur_slot = mq->last_head;
1206 n_slots = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE) - cur_slot;
1211 if ((mq->dma_info_tail + 1 == mq->dma_info_head) ||
1212 ((mq->dma_info_head == mq->dma_info_size - 1) &&
1213 (mq->dma_info_tail == 0)))
1216 vlib_dma_batch_t *db;
1217 db = vlib_dma_batch_new (vm, mif->dma_input_config);
1221 dma_info = mq->dma_info + mq->dma_info_tail;
1222 dma_info->node = node;
1223 dma_info->mode = mode;
1224 ptd = &dma_info->data;
1225 vec_validate_aligned (dma_info->data.desc_len,
1226 pow2_mask (mq->log2_ring_size), CLIB_CACHE_LINE_BYTES);
1228 n_desc = memif_parse_desc (&dma_info->data, mif, mq, cur_slot, n_slots);
1231 if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET)
1232 memif_validate_desc_data (&dma_info->data, mif, n_desc,
1233 /* is_ethernet */ 1);
1235 memif_validate_desc_data (&dma_info->data, mif, n_desc,
1236 /* is_ethernet */ 0);
1238 n_buffers = memif_process_desc (vm, node, ptd, mif);
1240 if (PREDICT_FALSE (n_buffers == 0))
1242 /* All descriptors are bad. Release slots in the ring and bail */
1243 memif_advance_ring (type, mq, ring, cur_slot);
1247 /* allocate free buffers */
1248 vec_validate_aligned (dma_info->data.buffers, n_buffers - 1,
1249 CLIB_CACHE_LINE_BYTES);
1250 n_alloc = vlib_buffer_alloc_from_pool (vm, dma_info->data.buffers, n_buffers,
1251 mq->buffer_pool_index);
1252 if (PREDICT_FALSE (n_alloc != n_buffers))
1255 vlib_buffer_free (vm, dma_info->data.buffers, n_alloc);
1256 vlib_error_count (vm, node->node_index,
1257 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
1261 dma_info->data.n_rx_bytes = ptd->n_rx_bytes;
1262 dma_info->data.n_packets = ptd->n_packets;
1265 u32 n_pkts = clib_min (MEMIF_RX_VECTOR_SZ, vec_len (ptd->copy_ops));
1268 for (i = 0; i < n_pkts; i++)
1270 b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
1271 vlib_dma_batch_add (vm, db, b->data + co[i].buffer_offset, co[i].data,
1275 for (i = n_pkts; i < vec_len (ptd->copy_ops); i++)
1277 b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
1278 vlib_dma_batch_add (vm, db, b->data + co[i].buffer_offset, co[i].data,
1282 dma_info->dma_tail = cur_slot;
1283 mq->last_head = dma_info->dma_tail;
1284 mq->dma_info_tail++;
1285 if (mq->dma_info_tail == mq->dma_info_size)
1286 mq->dma_info_tail = 0;
1289 vlib_dma_batch_set_cookie (vm, db, ((u64) mif_id << 16) | qid);
1290 vlib_dma_batch_submit (vm, db);
1291 vec_reset_length (ptd->copy_ops);
1293 return ptd->n_packets;
1296 VLIB_NODE_FN (memif_input_node) (vlib_main_t * vm,
1297 vlib_node_runtime_t * node,
1298 vlib_frame_t * frame)
1301 memif_main_t *mm = &memif_main;
1302 memif_interface_mode_t mode_ip = MEMIF_INTERFACE_MODE_IP;
1303 memif_interface_mode_t mode_eth = MEMIF_INTERFACE_MODE_ETHERNET;
1305 vnet_hw_if_rxq_poll_vector_t *pv;
1306 pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
1307 for (int i = 0; i < vec_len (pv); i++)
1311 mif = vec_elt_at_index (mm->interfaces, pv[i].dev_instance);
1312 qid = pv[i].queue_id;
1313 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
1314 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
1316 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
1318 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
1320 memif_device_input_zc_inline (vm, node, mif, qid, mode_ip);
1323 memif_device_input_zc_inline (vm, node, mif, qid, mode_eth);
1325 else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
1327 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
1328 n_rx += memif_device_input_inline (
1329 vm, node, mif, MEMIF_RING_M2S, qid, mode_ip);
1331 n_rx += memif_device_input_inline (
1332 vm, node, mif, MEMIF_RING_M2S, qid, mode_eth);
1336 if ((mif->flags & MEMIF_IF_FLAG_USE_DMA) &&
1337 (mif->dma_input_config >= 0))
1339 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
1340 n_rx += memif_device_input_inline_dma (
1341 vm, node, mif, MEMIF_RING_S2M, qid, mode_ip);
1343 n_rx += memif_device_input_inline_dma (
1344 vm, node, mif, MEMIF_RING_S2M, qid, mode_eth);
1348 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
1349 n_rx += memif_device_input_inline (
1350 vm, node, mif, MEMIF_RING_S2M, qid, mode_ip);
1352 n_rx += memif_device_input_inline (
1353 vm, node, mif, MEMIF_RING_S2M, qid, mode_eth);
1362 VLIB_REGISTER_NODE (memif_input_node) = {
1363 .name = "memif-input",
1364 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1365 .sibling_of = "device-input",
1366 .format_trace = format_memif_input_trace,
1367 .type = VLIB_NODE_TYPE_INPUT,
1368 .state = VLIB_NODE_STATE_INTERRUPT,
1369 .n_errors = MEMIF_INPUT_N_ERROR,
1370 .error_counters = memif_input_error_counters,
1376 * fd.io coding-style-patch-verification: ON
1379 * eval: (c-set-style "gnu")