2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
28 #include <memif/memif.h>
30 #define foreach_memif_tx_func_error \
31 _(NO_FREE_SLOTS, "no free tx slots") \
32 _(PENDING_MSGS, "pending msgs in tx ring")
36 #define _(f,s) MEMIF_TX_ERROR_##f,
37 foreach_memif_tx_func_error
40 } memif_tx_func_error_t;
42 static char *memif_tx_func_error_strings[] = {
44 foreach_memif_tx_func_error
50 format_memif_device_name (u8 * s, va_list * args)
52 u32 i = va_arg (*args, u32);
54 s = format (s, "memif%u", i);
59 format_memif_device (u8 * s, va_list * args)
61 u32 dev_instance = va_arg (*args, u32);
62 int verbose = va_arg (*args, int);
63 uword indent = format_get_indent (s);
65 s = format (s, "MEMIF interface");
68 s = format (s, "\n%U instance %u", format_white_space, indent + 2,
75 format_memif_tx_trace (u8 * s, va_list * args)
77 s = format (s, "Unimplemented...");
81 static_always_inline void
82 memif_prefetch_buffer_and_data (vlib_main_t * vm, u32 bi)
84 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
85 vlib_prefetch_buffer_header (b, LOAD);
86 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
89 static_always_inline uword
90 memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
91 vlib_frame_t * frame, memif_if_t * mif,
92 memif_ring_type_t type)
96 u32 *buffers = vlib_frame_args (frame);
97 u32 n_left = frame->n_vectors;
98 u16 ring_size = 1 << mif->log2_ring_size;
99 u16 mask = ring_size - 1;
102 u32 thread_index = vlib_get_thread_index ();
103 u8 tx_queues = memif_get_tx_queues (mif);
105 if (tx_queues < vec_len (vlib_mains))
107 rid = thread_index % tx_queues;
108 clib_spinlock_lock_if_init (&mif->lockp);
114 ring = memif_get_ring (mif, type, rid);
116 /* free consumed buffers */
122 free_slots = tail - head;
124 free_slots = ring_size - head + tail;
126 while (n_left > 5 && free_slots > 1)
128 if (PREDICT_TRUE (head + 5 < ring_size))
130 CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 2),
131 CLIB_CACHE_LINE_BYTES, STORE);
132 CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 3),
133 CLIB_CACHE_LINE_BYTES, STORE);
134 CLIB_PREFETCH (&ring->desc[head + 4], CLIB_CACHE_LINE_BYTES, STORE);
135 CLIB_PREFETCH (&ring->desc[head + 5], CLIB_CACHE_LINE_BYTES, STORE);
139 CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) % mask),
140 CLIB_CACHE_LINE_BYTES, STORE);
141 CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) % mask),
142 CLIB_CACHE_LINE_BYTES, STORE);
143 CLIB_PREFETCH (&ring->desc[(head + 4) % mask],
144 CLIB_CACHE_LINE_BYTES, STORE);
145 CLIB_PREFETCH (&ring->desc[(head + 5) % mask],
146 CLIB_CACHE_LINE_BYTES, STORE);
149 memif_prefetch_buffer_and_data (vm, buffers[2]);
150 memif_prefetch_buffer_and_data (vm, buffers[3]);
152 vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[0]);
153 vlib_buffer_t *b1 = vlib_get_buffer (vm, buffers[1]);
155 void *mb0 = memif_get_buffer (mif, ring, head);
156 clib_memcpy (mb0, vlib_buffer_get_current (b0), CLIB_CACHE_LINE_BYTES);
157 ring->desc[head].length = b0->current_length;
158 head = (head + 1) & mask;
160 void *mb1 = memif_get_buffer (mif, ring, head);
161 clib_memcpy (mb1, vlib_buffer_get_current (b1), CLIB_CACHE_LINE_BYTES);
162 ring->desc[head].length = b1->current_length;
163 head = (head + 1) & mask;
165 if (b0->current_length > CLIB_CACHE_LINE_BYTES)
167 clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES,
168 vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
169 b0->current_length - CLIB_CACHE_LINE_BYTES);
171 if (b1->current_length > CLIB_CACHE_LINE_BYTES)
173 clib_memcpy (mb1 + CLIB_CACHE_LINE_BYTES,
174 vlib_buffer_get_current (b1) + CLIB_CACHE_LINE_BYTES,
175 b1->current_length - CLIB_CACHE_LINE_BYTES);
184 while (n_left && free_slots)
186 vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[0]);
187 void *mb0 = memif_get_buffer (mif, ring, head);
188 clib_memcpy (mb0, vlib_buffer_get_current (b0), CLIB_CACHE_LINE_BYTES);
190 if (b0->current_length > CLIB_CACHE_LINE_BYTES)
192 clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES,
193 vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
194 b0->current_length - CLIB_CACHE_LINE_BYTES);
196 ring->desc[head].length = b0->current_length;
197 head = (head + 1) & mask;
204 CLIB_MEMORY_STORE_BARRIER ();
207 clib_spinlock_unlock_if_init (&mif->lockp);
211 vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
213 vlib_buffer_free (vm, buffers, n_left);
216 vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
217 if (mif->interrupt_line.fd > 0)
220 CLIB_UNUSED (int r) = write (mif->interrupt_line.fd, &b, sizeof (b));
223 return frame->n_vectors;
227 memif_interface_tx (vlib_main_t * vm,
228 vlib_node_runtime_t * node, vlib_frame_t * frame)
230 memif_main_t *nm = &memif_main;
231 vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
232 memif_if_t *mif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
234 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
235 return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M);
237 return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S);
241 memif_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
244 memif_main_t *apm = &memif_main;
245 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
246 memif_if_t *mif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
248 /* Shut off redirection */
249 if (node_index == ~0)
251 mif->per_interface_next_index = node_index;
255 mif->per_interface_next_index =
256 vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
260 memif_clear_hw_interface_counters (u32 instance)
262 /* Nothing for now */
265 static clib_error_t *
266 memif_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
268 memif_main_t *apm = &memif_main;
269 vlib_main_t *vm = vlib_get_main ();
270 memif_msg_t msg = { 0 };
271 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
272 memif_if_t *mif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
273 static clib_error_t *error = 0;
275 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
276 mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
279 mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
280 if (!(mif->flags & MEMIF_IF_FLAG_DELETING)
281 && mif->connection.index != ~0)
283 msg.version = MEMIF_VERSION;
284 msg.type = MEMIF_MSG_TYPE_DISCONNECT;
285 if (send (mif->connection.fd, &msg, sizeof (msg), 0) < 0)
287 clib_unix_warning ("Failed to send disconnect request");
288 error = clib_error_return_unix (0, "send fd %d",
290 memif_disconnect (vm, mif);
298 static clib_error_t *
299 memif_subif_add_del_function (vnet_main_t * vnm,
301 struct vnet_sw_interface_t *st, int is_add)
303 /* Nothing for now */
308 VNET_DEVICE_CLASS (memif_device_class) = {
310 .tx_function = memif_interface_tx,
311 .format_device_name = format_memif_device_name,
312 .format_device = format_memif_device,
313 .format_tx_trace = format_memif_tx_trace,
314 .tx_function_n_errors = MEMIF_TX_N_ERROR,
315 .tx_function_error_strings = memif_tx_func_error_strings,
316 .rx_redirect_to_node = memif_set_interface_next_node,
317 .clear_counters = memif_clear_hw_interface_counters,
318 .admin_up_down_function = memif_interface_admin_up_down,
319 .subif_add_del_function = memif_subif_add_del_function,
322 VLIB_DEVICE_TX_FUNCTION_MULTIARCH(memif_device_class,
327 * fd.io coding-style-patch-verification: ON
330 * eval: (c-set-style "gnu")