2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
30 #include <memif/memif.h>
32 #define foreach_memif_input_error
36 #define _(f,s) MEMIF_INPUT_ERROR_##f,
37 foreach_memif_input_error
40 } memif_input_error_t;
42 static char *memif_input_error_strings[] = {
44 foreach_memif_input_error
53 } memif_input_trace_t;
56 format_memif_input_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
61 uword indent = format_get_indent (s);
63 s = format (s, "memif: hw_if_index %d next-index %d",
64 t->hw_if_index, t->next_index);
65 s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
70 static_always_inline void
71 memif_prefetch (vlib_main_t * vm, u32 bi)
73 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
74 vlib_prefetch_buffer_header (b, STORE);
75 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, STORE);
78 static_always_inline uword
79 memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
80 vlib_frame_t * frame, memif_if_t * mif,
81 memif_ring_type_t type, u16 rid)
83 vnet_main_t *vnm = vnet_get_main ();
84 memif_ring_t *ring = memif_get_ring (mif, type, rid);
85 memif_ring_data_t *rd;
87 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
88 uword n_trace = vlib_get_trace_count (vm, node);
89 memif_main_t *nm = &memif_main;
94 u32 thread_index = vlib_get_thread_index ();
96 vlib_buffer_t *b0, *b1;
97 u16 ring_size = 1 << mif->log2_ring_size;
98 u16 mask = ring_size - 1;
102 rd = vec_elt_at_index (mif->ring_data, rid + type * mif->num_s2m_rings);
103 if (mif->per_interface_next_index != ~0)
104 next_index = mif->per_interface_next_index;
106 n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
107 if (PREDICT_FALSE (n_free_bufs < ring_size))
109 vec_validate (nm->rx_buffers[thread_index],
110 ring_size + n_free_bufs - 1);
112 vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
114 _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
118 if (head == rd->last_head)
121 if (head > rd->last_head)
122 num_slots = head - rd->last_head;
124 num_slots = ring_size - rd->last_head + head;
129 u32 next0 = next_index;
130 u32 next1 = next_index;
131 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
133 while (num_slots > 5 && n_left_to_next > 2)
135 if (PREDICT_TRUE (rd->last_head + 5 < ring_size))
137 CLIB_PREFETCH (memif_get_buffer (mif, ring, rd->last_head + 2),
138 CLIB_CACHE_LINE_BYTES, LOAD);
139 CLIB_PREFETCH (memif_get_buffer (mif, ring, rd->last_head + 3),
140 CLIB_CACHE_LINE_BYTES, LOAD);
141 CLIB_PREFETCH (&ring->desc[rd->last_head + 4],
142 CLIB_CACHE_LINE_BYTES, LOAD);
143 CLIB_PREFETCH (&ring->desc[rd->last_head + 5],
144 CLIB_CACHE_LINE_BYTES, LOAD);
148 CLIB_PREFETCH (memif_get_buffer
149 (mif, ring, (rd->last_head + 2) % mask),
150 CLIB_CACHE_LINE_BYTES, LOAD);
151 CLIB_PREFETCH (memif_get_buffer
152 (mif, ring, (rd->last_head + 3) % mask),
153 CLIB_CACHE_LINE_BYTES, LOAD);
154 CLIB_PREFETCH (&ring->desc[(rd->last_head + 4) % mask],
155 CLIB_CACHE_LINE_BYTES, LOAD);
156 CLIB_PREFETCH (&ring->desc[(rd->last_head + 5) % mask],
157 CLIB_CACHE_LINE_BYTES, LOAD);
159 /* get empty buffer */
160 u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
161 bi0 = nm->rx_buffers[thread_index][last_buf];
162 bi1 = nm->rx_buffers[thread_index][last_buf - 1];
163 _vec_len (nm->rx_buffers[thread_index]) -= 2;
167 memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]);
168 memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]);
177 /* fill buffer metadata */
178 b0 = vlib_get_buffer (vm, bi0);
179 b1 = vlib_get_buffer (vm, bi1);
181 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
182 vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
184 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
185 vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
188 mb0 = memif_get_buffer (mif, ring, rd->last_head);
189 clib_memcpy (vlib_buffer_get_current (b0), mb0,
190 CLIB_CACHE_LINE_BYTES);
191 b0->current_length = ring->desc[rd->last_head].length;
192 rd->last_head = (rd->last_head + 1) & mask;
194 mb1 = memif_get_buffer (mif, ring, rd->last_head);
195 clib_memcpy (vlib_buffer_get_current (b1), mb1,
196 CLIB_CACHE_LINE_BYTES);
197 b1->current_length = ring->desc[rd->last_head].length;
198 rd->last_head = (rd->last_head + 1) & mask;
200 if (b0->current_length > CLIB_CACHE_LINE_BYTES)
201 clib_memcpy (vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
202 mb0 + CLIB_CACHE_LINE_BYTES,
203 b0->current_length - CLIB_CACHE_LINE_BYTES);
205 if (b1->current_length > CLIB_CACHE_LINE_BYTES)
206 clib_memcpy (vlib_buffer_get_current (b1) + CLIB_CACHE_LINE_BYTES,
207 mb1 + CLIB_CACHE_LINE_BYTES,
208 b1->current_length - CLIB_CACHE_LINE_BYTES);
211 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
212 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
214 if (PREDICT_FALSE (n_trace > 0))
217 memif_input_trace_t *tr;
218 vlib_trace_buffer (vm, node, next0, b0,
219 /* follow_chain */ 0);
220 vlib_set_trace_count (vm, node, --n_trace);
221 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
222 tr->next_index = next0;
223 tr->hw_if_index = mif->hw_if_index;
229 memif_input_trace_t *tr;
230 vlib_trace_buffer (vm, node, next1, b1,
231 /* follow_chain */ 0);
232 vlib_set_trace_count (vm, node, --n_trace);
233 tr = vlib_add_trace (vm, node, b1, sizeof (*tr));
234 tr->next_index = next1;
235 tr->hw_if_index = mif->hw_if_index;
240 /* redirect if feature path enabled */
241 vnet_feature_start_device_input_x2 (mif->sw_if_index,
242 &next0, &next1, b0, b1);
245 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
247 bi0, bi1, next0, next1);
252 n_rx_bytes += b0->current_length;
253 n_rx_bytes += b1->current_length;
255 while (num_slots && n_left_to_next)
257 /* get empty buffer */
258 u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
259 bi0 = nm->rx_buffers[thread_index][last_buf];
260 _vec_len (nm->rx_buffers[thread_index]) = last_buf;
267 /* fill buffer metadata */
268 b0 = vlib_get_buffer (vm, bi0);
269 b0->current_length = ring->desc[rd->last_head].length;
270 vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
271 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
274 mb0 = memif_get_buffer (mif, ring, rd->last_head);
275 clib_memcpy (vlib_buffer_get_current (b0), mb0,
276 CLIB_CACHE_LINE_BYTES);
277 if (b0->current_length > CLIB_CACHE_LINE_BYTES)
278 clib_memcpy (vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
279 mb0 + CLIB_CACHE_LINE_BYTES,
280 b0->current_length - CLIB_CACHE_LINE_BYTES);
283 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
285 if (PREDICT_FALSE (n_trace > 0))
287 memif_input_trace_t *tr;
288 vlib_trace_buffer (vm, node, next0, b0,
289 /* follow_chain */ 0);
290 vlib_set_trace_count (vm, node, --n_trace);
291 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
292 tr->next_index = next0;
293 tr->hw_if_index = mif->hw_if_index;
298 /* redirect if feature path enabled */
299 vnet_feature_start_device_input_x1 (mif->sw_if_index, &next0, b0);
302 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
303 n_left_to_next, bi0, next0);
306 rd->last_head = (rd->last_head + 1) & mask;
309 n_rx_bytes += b0->current_length;
311 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
313 CLIB_MEMORY_STORE_BARRIER ();
316 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
317 + VNET_INTERFACE_COUNTER_RX, thread_index,
318 mif->hw_if_index, n_rx_packets,
325 memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
326 vlib_frame_t * frame)
328 u32 n_rx_packets = 0;
329 memif_main_t *nm = &memif_main;
331 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
332 vnet_device_and_queue_t *dq;
333 memif_ring_type_t type;
335 foreach_device_and_queue (dq, rt->devices_and_queues)
337 mif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
338 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
339 (mif->flags & MEMIF_IF_FLAG_CONNECTED))
341 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
342 type = MEMIF_RING_M2S;
344 type = MEMIF_RING_S2M;
346 memif_device_input_inline (vm, node, frame, mif, type,
355 VLIB_REGISTER_NODE (memif_input_node) = {
356 .function = memif_input_fn,
357 .name = "memif-input",
358 .sibling_of = "device-input",
359 .format_trace = format_memif_input_trace,
360 .type = VLIB_NODE_TYPE_INPUT,
361 .state = VLIB_NODE_STATE_INTERRUPT,
362 .n_errors = MEMIF_INPUT_N_ERROR,
363 .error_strings = memif_input_error_strings,
366 VLIB_NODE_FUNCTION_MULTIARCH (memif_input_node, memif_input_fn)
371 * fd.io coding-style-patch-verification: ON
374 * eval: (c-set-style "gnu")