2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
31 #include <memif/private.h>
33 #define foreach_memif_input_error \
34 _(NOT_IP, "not ip packet")
38 #define _(f,s) MEMIF_INPUT_ERROR_##f,
39 foreach_memif_input_error
42 } memif_input_error_t;
44 static __clib_unused char *memif_input_error_strings[] = {
46 foreach_memif_input_error
55 } memif_input_trace_t;
57 static __clib_unused u8 *
58 format_memif_input_trace (u8 * s, va_list * args)
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
63 u32 indent = format_get_indent (s);
65 s = format (s, "memif: hw_if_index %d next-index %d",
66 t->hw_if_index, t->next_index);
67 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
72 static_always_inline void
73 memif_prefetch (vlib_main_t * vm, u32 bi)
75 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
76 vlib_prefetch_buffer_header (b, STORE);
77 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, STORE);
80 static_always_inline void
81 memif_buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi,
84 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
85 vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
86 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
88 /* update first buffer */
89 first_b->total_length_not_including_first_buffer += b->current_length;
91 /* update previous buffer */
92 prev_b->next_buffer = bi;
93 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
95 /* update current buffer */
100 * @brief Copy buffer from rx ring
103 * @param * mif (in) pointer to memif interface
104 * @param * ring (in) pointer to memif ring
105 * @param * rd (in) pointer to ring data
106 * @param ring_size (in) ring size
107 * @param * n_free_bufs (in/out) the number of free vlib buffers available
108 * @param ** first_b (out) the first vlib buffer pointer
109 * @param * first_bi (out) the first vlib buffer index
110 * @param * bi (in/out) the current buffer index
111 * #param * num_slots (in/out) the number of descriptors available to read
113 * @return total bytes read from rx ring also written to vlib buffers
115 static_always_inline uword
116 memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
117 memif_ring_t * ring, memif_queue_t * mq,
118 u16 ring_size, u32 n_buffer_bytes,
119 u32 * n_free_bufs, vlib_buffer_t ** first_b,
120 u32 * first_bi, u32 * bi, u16 * num_slots)
122 memif_main_t *nm = &memif_main;
123 u32 thread_index = vlib_get_thread_index ();
124 u32 total_bytes = 0, offset = 0;
129 u16 mask = ring_size - 1;
135 data_len = ring->desc[mq->last_head & mask].length;
136 while (data_len && (*n_free_bufs))
138 /* get empty buffer */
139 u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
141 *bi = nm->rx_buffers[thread_index][last_buf];
142 b = vlib_get_buffer (vm, *bi);
143 /* Clear the error first to ensure following node forget setting it */
144 /* It will cause null-node error counter increasement instead of potential crash */
146 _vec_len (nm->rx_buffers[thread_index]) = last_buf;
148 if (PREDICT_FALSE (*n_free_bufs == 0))
151 vlib_buffer_alloc (vm,
152 &nm->rx_buffers[thread_index]
153 [*n_free_bufs], ring_size);
154 _vec_len (nm->rx_buffers[thread_index]) = *n_free_bufs;
159 memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]);
160 memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]);
165 data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
167 mb = memif_get_buffer (mif, ring, mq->last_head & mask);
168 clib_memcpy (vlib_buffer_get_current (b), mb + offset,
169 CLIB_CACHE_LINE_BYTES);
170 if (bytes_to_copy > CLIB_CACHE_LINE_BYTES)
171 clib_memcpy (vlib_buffer_get_current (b) + CLIB_CACHE_LINE_BYTES,
172 mb + CLIB_CACHE_LINE_BYTES + offset,
173 bytes_to_copy - CLIB_CACHE_LINE_BYTES);
175 /* fill buffer header */
176 b->current_length = bytes_to_copy;
178 if (total_bytes == 0)
180 /* fill buffer metadata */
181 b->total_length_not_including_first_buffer = 0;
182 b->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
183 vnet_buffer (b)->sw_if_index[VLIB_RX] = mif->sw_if_index;
184 vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
186 *first_b = vlib_get_buffer (vm, *first_bi);
189 memif_buffer_add_to_chain (vm, *bi, *first_bi, prev_bi);
191 offset += bytes_to_copy;
192 total_bytes += bytes_to_copy;
193 data_len -= bytes_to_copy;
195 last_head = mq->last_head;
196 /* Advance to next descriptor */
200 if ((ring->desc[last_head & mask].flags & MEMIF_DESC_FLAG_NEXT) == 0)
204 return (total_bytes);
208 static_always_inline u32
209 memif_next_from_ip_hdr (vlib_node_runtime_t * node, vlib_buffer_t * b)
211 u8 *ptr = vlib_buffer_get_current (b);
214 if (PREDICT_TRUE (v == 0x40))
215 return VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
216 else if (PREDICT_TRUE (v == 0x60))
217 return VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
219 b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
220 return VNET_DEVICE_INPUT_NEXT_DROP;
223 static_always_inline uword
224 memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
225 vlib_frame_t * frame, memif_if_t * mif,
226 memif_ring_type_t type, u16 qid,
227 memif_interface_mode_t mode)
229 vnet_main_t *vnm = vnet_get_main ();
234 uword n_trace = vlib_get_trace_count (vm, node);
235 memif_main_t *nm = &memif_main;
236 u32 n_rx_packets = 0;
240 u32 b0_total, b1_total;
241 u32 thread_index = vlib_get_thread_index ();
242 u16 ring_size, mask, num_slots;
243 u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
244 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
246 mq = vec_elt_at_index (mif->rx_queues, qid);
248 ring_size = 1 << mq->log2_ring_size;
249 mask = ring_size - 1;
251 if (mode == MEMIF_INTERFACE_MODE_IP)
253 next_index = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
257 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
260 n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
261 if (PREDICT_FALSE (n_free_bufs < ring_size))
263 vec_validate (nm->rx_buffers[thread_index],
264 ring_size + n_free_bufs - 1);
266 vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
268 _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
272 mq->last_head = ring->tail;
273 if (head == mq->last_head)
276 num_slots = head - mq->last_head;
281 u32 next0 = next_index;
282 u32 next1 = next_index;
283 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
285 while (num_slots > 11 && n_left_to_next > 2)
287 CLIB_PREFETCH (memif_get_buffer
288 (mif, ring, (mq->last_head + 2) & mask),
289 CLIB_CACHE_LINE_BYTES, LOAD);
290 CLIB_PREFETCH (memif_get_buffer
291 (mif, ring, (mq->last_head + 3) & mask),
292 CLIB_CACHE_LINE_BYTES, LOAD);
293 CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) & mask],
294 CLIB_CACHE_LINE_BYTES, LOAD);
295 CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) & mask],
296 CLIB_CACHE_LINE_BYTES, LOAD);
298 vlib_buffer_t *first_b0 = 0;
299 u32 bi0 = 0, first_bi0 = 0;
300 b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
303 &n_free_bufs, &first_b0,
307 vlib_buffer_t *first_b1 = 0;
308 u32 bi1 = 0, first_bi1 = 0;
309 b1_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
312 &n_free_bufs, &first_b1,
316 if (PREDICT_FALSE (!first_bi0 || !first_bi1))
321 to_next[0] = first_bi0;
322 to_next[1] = first_bi1;
327 if (mode == MEMIF_INTERFACE_MODE_IP)
329 next0 = memif_next_from_ip_hdr (node, first_b0);
330 next1 = memif_next_from_ip_hdr (node, first_b1);
332 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
334 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
335 next0 = next1 = mif->per_interface_next_index;
337 /* redirect if feature path
339 vnet_feature_start_device_input_x2 (mif->sw_if_index,
345 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
346 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b1);
348 if (PREDICT_FALSE (n_trace > 0))
351 if (PREDICT_TRUE (first_b0 != 0))
353 memif_input_trace_t *tr;
354 vlib_trace_buffer (vm, node, next0, first_b0,
355 /* follow_chain */ 0);
356 vlib_set_trace_count (vm, node, --n_trace);
357 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
358 tr->next_index = next0;
359 tr->hw_if_index = mif->hw_if_index;
365 if (PREDICT_TRUE (first_b1 != 0))
367 memif_input_trace_t *tr;
368 vlib_trace_buffer (vm, node, next1, first_b1,
369 /* follow_chain */ 0);
370 vlib_set_trace_count (vm, node, --n_trace);
371 tr = vlib_add_trace (vm, node, first_b1, sizeof (*tr));
372 tr->next_index = next1;
373 tr->hw_if_index = mif->hw_if_index;
380 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
381 n_left_to_next, first_bi0,
382 first_bi1, next0, next1);
386 n_rx_bytes += b0_total + b1_total;
390 if (!first_bi0 && !first_bi1)
396 first_bi0 = first_bi1;
402 if (mode == MEMIF_INTERFACE_MODE_IP)
404 next0 = memif_next_from_ip_hdr (node, first_b0);
406 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
408 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
409 next0 = mif->per_interface_next_index;
411 /* redirect if feature path
413 vnet_feature_start_device_input_x1 (mif->sw_if_index, &next0,
418 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
420 if (PREDICT_FALSE (n_trace > 0))
422 if (PREDICT_TRUE (first_b0 != 0))
424 memif_input_trace_t *tr;
425 vlib_trace_buffer (vm, node, next0, first_b0,
426 /* follow_chain */ 0);
427 vlib_set_trace_count (vm, node, --n_trace);
428 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
429 tr->next_index = next0;
430 tr->hw_if_index = mif->hw_if_index;
436 to_next[0] = first_bi0;
441 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
442 n_left_to_next, first_bi0, next0);
446 n_rx_bytes += b0_total;
448 while (num_slots && n_left_to_next)
450 vlib_buffer_t *first_b0 = 0;
451 u32 bi0 = 0, first_bi0 = 0;
452 b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
455 &n_free_bufs, &first_b0,
458 if (PREDICT_FALSE (!first_bi0))
463 if (mode == MEMIF_INTERFACE_MODE_IP)
465 next0 = memif_next_from_ip_hdr (node, first_b0);
467 else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
469 if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
470 next0 = mif->per_interface_next_index;
472 /* redirect if feature path
474 vnet_feature_start_device_input_x1 (mif->sw_if_index,
479 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
481 if (PREDICT_FALSE (n_trace > 0))
483 if (PREDICT_TRUE (first_b0 != 0))
485 memif_input_trace_t *tr;
486 vlib_trace_buffer (vm, node, next0, first_b0,
487 /* follow_chain */ 0);
488 vlib_set_trace_count (vm, node, --n_trace);
489 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
490 tr->next_index = next0;
491 tr->hw_if_index = mif->hw_if_index;
497 to_next[0] = first_bi0;
502 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
503 n_left_to_next, first_bi0, next0);
507 n_rx_bytes += b0_total;
512 if (PREDICT_TRUE (n_rx_packets != 0))
514 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
517 CLIB_MEMORY_STORE_BARRIER ();
518 ring->tail = mq->last_head;
520 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
521 + VNET_INTERFACE_COUNTER_RX, thread_index,
522 mif->hw_if_index, n_rx_packets,
529 CLIB_MULTIARCH_FN (memif_input_fn) (vlib_main_t * vm,
530 vlib_node_runtime_t * node,
531 vlib_frame_t * frame)
534 memif_main_t *nm = &memif_main;
535 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
536 vnet_device_and_queue_t *dq;
538 foreach_device_and_queue (dq, rt->devices_and_queues)
541 mif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
542 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
543 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
545 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
547 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
548 n_rx += memif_device_input_inline (vm, node, frame, mif,
549 MEMIF_RING_M2S, dq->queue_id,
550 MEMIF_INTERFACE_MODE_IP);
552 n_rx += memif_device_input_inline (vm, node, frame, mif,
553 MEMIF_RING_M2S, dq->queue_id,
554 MEMIF_INTERFACE_MODE_ETHERNET);
558 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
559 n_rx += memif_device_input_inline (vm, node, frame, mif,
560 MEMIF_RING_S2M, dq->queue_id,
561 MEMIF_INTERFACE_MODE_IP);
563 n_rx += memif_device_input_inline (vm, node, frame, mif,
564 MEMIF_RING_S2M, dq->queue_id,
565 MEMIF_INTERFACE_MODE_ETHERNET);
573 #ifndef CLIB_MULTIARCH_VARIANT
575 VLIB_REGISTER_NODE (memif_input_node) = {
576 .function = memif_input_fn,
577 .name = "memif-input",
578 .sibling_of = "device-input",
579 .format_trace = format_memif_input_trace,
580 .type = VLIB_NODE_TYPE_INPUT,
581 .state = VLIB_NODE_STATE_INTERRUPT,
582 .n_errors = MEMIF_INPUT_N_ERROR,
583 .error_strings = memif_input_error_strings,
586 vlib_node_function_t __clib_weak memif_input_fn_avx512;
587 vlib_node_function_t __clib_weak memif_input_fn_avx2;
590 static void __clib_constructor
591 memif_input_multiarch_select (void)
593 if (memif_input_fn_avx512 && clib_cpu_supports_avx512f ())
594 memif_input_node.function = memif_input_fn_avx512;
595 else if (memif_input_fn_avx2 && clib_cpu_supports_avx2 ())
596 memif_input_node.function = memif_input_fn_avx2;
605 * fd.io coding-style-patch-verification: ON
608 * eval: (c-set-style "gnu")