2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
32 #define foreach_memif_input_error
36 #define _(f,s) MEMIF_INPUT_ERROR_##f,
37 foreach_memif_input_error
40 } memif_input_error_t;
42 static char *memif_input_error_strings[] = {
44 foreach_memif_input_error
53 } memif_input_trace_t;
56 format_memif_input_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
61 uword indent = format_get_indent (s);
63 s = format (s, "memif: hw_if_index %d next-index %d",
64 t->hw_if_index, t->next_index);
65 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
70 static_always_inline void
71 memif_prefetch (vlib_main_t * vm, u32 bi)
73 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
74 vlib_prefetch_buffer_header (b, STORE);
75 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, STORE);
78 static_always_inline uword
79 memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
80 vlib_frame_t * frame, memif_if_t * mif,
81 memif_ring_type_t type)
83 vnet_main_t *vnm = vnet_get_main ();
84 u8 rid = 0; /* Ring id */
85 memif_ring_t *ring = memif_get_ring (mif, type, rid);
86 memif_ring_data_t *rd =
87 vec_elt_at_index (mif->ring_data, rid + type * mif->num_s2m_rings);
90 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
91 uword n_trace = vlib_get_trace_count (vm, node);
92 memif_main_t *nm = &memif_main;
97 u32 cpu_index = os_get_cpu_number ();
99 vlib_buffer_t *b0, *b1;
100 u16 ring_size = 1 << mif->log2_ring_size;
101 u16 mask = ring_size - 1;
105 if (mif->per_interface_next_index != ~0)
106 next_index = mif->per_interface_next_index;
108 n_free_bufs = vec_len (nm->rx_buffers[cpu_index]);
109 if (PREDICT_FALSE (n_free_bufs < ring_size))
111 vec_validate (nm->rx_buffers[cpu_index], ring_size + n_free_bufs - 1);
113 vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs],
115 _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs;
119 if (head == rd->last_head)
122 if (head > rd->last_head)
123 num_slots = head - rd->last_head;
125 num_slots = ring_size - rd->last_head + head;
130 u32 next0 = next_index;
131 u32 next1 = next_index;
132 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
134 while (num_slots > 5 && n_left_to_next > 2)
136 if (PREDICT_TRUE (rd->last_head + 5 < ring_size))
138 CLIB_PREFETCH (memif_get_buffer (mif, ring, rd->last_head + 2),
139 CLIB_CACHE_LINE_BYTES, LOAD);
140 CLIB_PREFETCH (memif_get_buffer (mif, ring, rd->last_head + 3),
141 CLIB_CACHE_LINE_BYTES, LOAD);
142 CLIB_PREFETCH (&ring->desc[rd->last_head + 4],
143 CLIB_CACHE_LINE_BYTES, LOAD);
144 CLIB_PREFETCH (&ring->desc[rd->last_head + 5],
145 CLIB_CACHE_LINE_BYTES, LOAD);
149 CLIB_PREFETCH (memif_get_buffer
150 (mif, ring, (rd->last_head + 2) % mask),
151 CLIB_CACHE_LINE_BYTES, LOAD);
152 CLIB_PREFETCH (memif_get_buffer
153 (mif, ring, (rd->last_head + 3) % mask),
154 CLIB_CACHE_LINE_BYTES, LOAD);
155 CLIB_PREFETCH (&ring->desc[(rd->last_head + 4) % mask],
156 CLIB_CACHE_LINE_BYTES, LOAD);
157 CLIB_PREFETCH (&ring->desc[(rd->last_head + 5) % mask],
158 CLIB_CACHE_LINE_BYTES, LOAD);
160 /* get empty buffer */
161 u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1;
162 bi0 = nm->rx_buffers[cpu_index][last_buf];
163 bi1 = nm->rx_buffers[cpu_index][last_buf - 1];
164 _vec_len (nm->rx_buffers[cpu_index]) -= 2;
168 memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 2]);
169 memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 3]);
178 /* fill buffer metadata */
179 b0 = vlib_get_buffer (vm, bi0);
180 b1 = vlib_get_buffer (vm, bi1);
182 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
183 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
185 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
186 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
189 mb0 = memif_get_buffer (mif, ring, rd->last_head);
190 clib_memcpy (vlib_buffer_get_current (b0), mb0,
191 CLIB_CACHE_LINE_BYTES);
192 b0->current_length = ring->desc[rd->last_head].length;
193 rd->last_head = (rd->last_head + 1) & mask;
195 mb1 = memif_get_buffer (mif, ring, rd->last_head);
196 clib_memcpy (vlib_buffer_get_current (b1), mb1,
197 CLIB_CACHE_LINE_BYTES);
198 b1->current_length = ring->desc[rd->last_head].length;
199 rd->last_head = (rd->last_head + 1) & mask;
201 if (b0->current_length > CLIB_CACHE_LINE_BYTES)
202 clib_memcpy (vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
203 mb0 + CLIB_CACHE_LINE_BYTES,
204 b0->current_length - CLIB_CACHE_LINE_BYTES);
206 if (b1->current_length > CLIB_CACHE_LINE_BYTES)
207 clib_memcpy (vlib_buffer_get_current (b1) + CLIB_CACHE_LINE_BYTES,
208 mb1 + CLIB_CACHE_LINE_BYTES,
209 b1->current_length - CLIB_CACHE_LINE_BYTES);
212 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
213 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
215 if (PREDICT_FALSE (n_trace > 0))
218 memif_input_trace_t *tr;
219 vlib_trace_buffer (vm, node, next0, b0,
220 /* follow_chain */ 0);
221 vlib_set_trace_count (vm, node, --n_trace);
222 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
223 tr->next_index = next0;
224 tr->hw_if_index = mif->hw_if_index;
230 memif_input_trace_t *tr;
231 vlib_trace_buffer (vm, node, next1, b1,
232 /* follow_chain */ 0);
233 vlib_set_trace_count (vm, node, --n_trace);
234 tr = vlib_add_trace (vm, node, b1, sizeof (*tr));
235 tr->next_index = next1;
236 tr->hw_if_index = mif->hw_if_index;
241 /* redirect if feature path enabled */
242 vnet_feature_start_device_input_x2 (mif->sw_if_index,
243 &next0, &next1, b0, b1);
246 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
248 bi0, bi1, next0, next1);
253 n_rx_bytes += b0->current_length;
254 n_rx_bytes += b1->current_length;
256 while (num_slots && n_left_to_next)
258 /* get empty buffer */
259 u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1;
260 bi0 = nm->rx_buffers[cpu_index][last_buf];
261 _vec_len (nm->rx_buffers[cpu_index]) = last_buf;
268 /* fill buffer metadata */
269 b0 = vlib_get_buffer (vm, bi0);
270 b0->current_length = ring->desc[rd->last_head].length;
271 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
272 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
275 mb0 = memif_get_buffer (mif, ring, rd->last_head);
276 clib_memcpy (vlib_buffer_get_current (b0), mb0,
277 CLIB_CACHE_LINE_BYTES);
278 if (b0->current_length > CLIB_CACHE_LINE_BYTES)
279 clib_memcpy (vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
280 mb0 + CLIB_CACHE_LINE_BYTES,
281 b0->current_length - CLIB_CACHE_LINE_BYTES);
284 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
286 if (PREDICT_FALSE (n_trace > 0))
288 memif_input_trace_t *tr;
289 vlib_trace_buffer (vm, node, next0, b0,
290 /* follow_chain */ 0);
291 vlib_set_trace_count (vm, node, --n_trace);
292 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
293 tr->next_index = next0;
294 tr->hw_if_index = mif->hw_if_index;
299 /* redirect if feature path enabled */
300 vnet_feature_start_device_input_x1 (mif->sw_if_index, &next0, b0);
303 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
304 n_left_to_next, bi0, next0);
307 rd->last_head = (rd->last_head + 1) & mask;
310 n_rx_bytes += b0->current_length;
312 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
314 CLIB_MEMORY_STORE_BARRIER ();
317 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
318 + VNET_INTERFACE_COUNTER_RX, cpu_index,
319 mif->hw_if_index, n_rx_packets,
326 memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
327 vlib_frame_t * frame)
329 u32 n_rx_packets = 0;
330 u32 cpu_index = os_get_cpu_number ();
331 memif_main_t *nm = &memif_main;
335 pool_foreach (mif, nm->interfaces,
337 if (mif->flags & MEMIF_IF_FLAG_ADMIN_UP &&
338 mif->flags & MEMIF_IF_FLAG_CONNECTED &&
339 (mif->if_index % nm->input_cpu_count) ==
340 (cpu_index - nm->input_cpu_first_index))
342 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
344 memif_device_input_inline (vm, node, frame, mif,
348 memif_device_input_inline (vm, node, frame, mif,
358 VLIB_REGISTER_NODE (memif_input_node) = {
359 .function = memif_input_fn,
360 .name = "memif-input",
361 .sibling_of = "device-input",
362 .format_trace = format_memif_input_trace,
363 .type = VLIB_NODE_TYPE_INPUT,
364 .state = VLIB_NODE_STATE_INTERRUPT,
365 .n_errors = MEMIF_INPUT_N_ERROR,
366 .error_strings = memif_input_error_strings,
369 VLIB_NODE_FUNCTION_MULTIARCH (memif_input_node, memif_input_fn)
374 * fd.io coding-style-patch-verification: ON
377 * eval: (c-set-style "gnu")