2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
28 #include <memif/memif.h>
29 #include <memif/private.h>
31 #define foreach_memif_tx_func_error \
32 _(NO_FREE_SLOTS, "no free tx slots") \
33 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
34 _(PENDING_MSGS, "pending msgs in tx ring")
38 #define _(f,s) MEMIF_TX_ERROR_##f,
39 foreach_memif_tx_func_error
42 } memif_tx_func_error_t;
44 static char *memif_tx_func_error_strings[] = {
46 foreach_memif_tx_func_error
51 format_memif_device_name (u8 * s, va_list * args)
53 u32 dev_instance = va_arg (*args, u32);
54 memif_main_t *mm = &memif_main;
55 memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
57 s = format (s, "memif%lu/%lu", mif->socket_file_index, mif->id);
62 format_memif_device (u8 * s, va_list * args)
64 u32 dev_instance = va_arg (*args, u32);
65 int verbose = va_arg (*args, int);
66 uword indent = format_get_indent (s);
68 s = format (s, "MEMIF interface");
71 s = format (s, "\n%U instance %u", format_white_space, indent + 2,
78 format_memif_tx_trace (u8 * s, va_list * args)
80 s = format (s, "Unimplemented...");
84 static_always_inline void
85 memif_prefetch_buffer_and_data (vlib_main_t * vm, u32 bi)
87 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
88 vlib_prefetch_buffer_header (b, LOAD);
89 CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD);
93 * @brief Copy buffer to tx ring
97 * @param * mif (in) pointer to memif interface
98 * @param bi (in) vlib buffer index
99 * @param * ring (in) pointer to memif ring
100 * @param * head (in/out) ring head
101 * @param mask (in) ring size - 1
103 static_always_inline void
104 memif_copy_buffer_to_tx_ring (vlib_main_t * vm, vlib_node_runtime_t * node,
105 memif_if_t * mif, u32 bi, memif_ring_t * ring,
106 u16 * head, u16 mask)
112 mb0 = memif_get_buffer (mif, ring, *head);
113 ring->desc[*head].flags = 0;
116 b0 = vlib_get_buffer (vm, bi);
117 len = b0->current_length;
118 if (PREDICT_FALSE (ring->desc[*head].buffer_length < (total + len)))
120 if (PREDICT_TRUE (total))
122 ring->desc[*head].length = total;
124 ring->desc[*head].flags |= MEMIF_DESC_FLAG_NEXT;
125 *head = (*head + 1) & mask;
126 mb0 = memif_get_buffer (mif, ring, *head);
127 ring->desc[*head].flags = 0;
130 if (PREDICT_TRUE (ring->desc[*head].buffer_length >= (total + len)))
132 clib_memcpy (mb0 + total, vlib_buffer_get_current (b0),
133 CLIB_CACHE_LINE_BYTES);
134 if (len > CLIB_CACHE_LINE_BYTES)
135 clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES + total,
136 vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES,
137 len - CLIB_CACHE_LINE_BYTES);
142 vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_TRUNC_PACKET,
147 while ((bi = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0));
149 if (PREDICT_TRUE (total))
151 ring->desc[*head].length = total;
152 *head = (*head + 1) & mask;
156 static_always_inline uword
157 memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
158 vlib_frame_t * frame, memif_if_t * mif,
159 memif_ring_type_t type)
163 u32 *buffers = vlib_frame_args (frame);
164 u32 n_left = frame->n_vectors;
168 u32 thread_index = vlib_get_thread_index ();
169 u8 tx_queues = vec_len (mif->tx_queues);
172 if (tx_queues < vec_len (vlib_mains))
174 qid = thread_index % tx_queues;
175 clib_spinlock_lock_if_init (&mif->lockp);
181 mq = vec_elt_at_index (mif->tx_queues, qid);
183 ring_size = 1 << mq->log2_ring_size;
184 mask = ring_size - 1;
186 /* free consumed buffers */
192 free_slots = tail - head;
194 free_slots = ring_size - head + tail;
196 while (n_left > 5 && free_slots > 1)
198 if (PREDICT_TRUE (head + 5 < ring_size))
200 CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 2),
201 CLIB_CACHE_LINE_BYTES, STORE);
202 CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 3),
203 CLIB_CACHE_LINE_BYTES, STORE);
204 CLIB_PREFETCH (&ring->desc[head + 4], CLIB_CACHE_LINE_BYTES, STORE);
205 CLIB_PREFETCH (&ring->desc[head + 5], CLIB_CACHE_LINE_BYTES, STORE);
209 CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) % mask),
210 CLIB_CACHE_LINE_BYTES, STORE);
211 CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) % mask),
212 CLIB_CACHE_LINE_BYTES, STORE);
213 CLIB_PREFETCH (&ring->desc[(head + 4) % mask],
214 CLIB_CACHE_LINE_BYTES, STORE);
215 CLIB_PREFETCH (&ring->desc[(head + 5) % mask],
216 CLIB_CACHE_LINE_BYTES, STORE);
219 memif_prefetch_buffer_and_data (vm, buffers[2]);
220 memif_prefetch_buffer_and_data (vm, buffers[3]);
222 memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
224 memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[1], ring, &head,
232 while (n_left && free_slots)
234 memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
241 CLIB_MEMORY_STORE_BARRIER ();
244 clib_spinlock_unlock_if_init (&mif->lockp);
248 vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
252 vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
253 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
256 CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
260 return frame->n_vectors;
264 memif_interface_tx (vlib_main_t * vm,
265 vlib_node_runtime_t * node, vlib_frame_t * frame)
267 memif_main_t *nm = &memif_main;
268 vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
269 memif_if_t *mif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
271 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
272 return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M);
274 return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S);
278 memif_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
281 memif_main_t *apm = &memif_main;
282 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
283 memif_if_t *mif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
285 /* Shut off redirection */
286 if (node_index == ~0)
288 mif->per_interface_next_index = node_index;
292 mif->per_interface_next_index =
293 vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
297 memif_clear_hw_interface_counters (u32 instance)
299 /* Nothing for now */
302 static clib_error_t *
303 memif_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
304 vnet_hw_interface_rx_mode mode)
306 memif_main_t *mm = &memif_main;
307 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
308 memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
309 memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
311 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
312 mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT;
314 mq->ring->flags &= ~MEMIF_RING_FLAG_MASK_INT;
319 static clib_error_t *
320 memif_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
322 memif_main_t *mm = &memif_main;
323 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
324 memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
325 static clib_error_t *error = 0;
327 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
328 mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
330 mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
335 static clib_error_t *
336 memif_subif_add_del_function (vnet_main_t * vnm,
338 struct vnet_sw_interface_t *st, int is_add)
340 /* Nothing for now */
345 VNET_DEVICE_CLASS (memif_device_class) = {
347 .tx_function = memif_interface_tx,
348 .format_device_name = format_memif_device_name,
349 .format_device = format_memif_device,
350 .format_tx_trace = format_memif_tx_trace,
351 .tx_function_n_errors = MEMIF_TX_N_ERROR,
352 .tx_function_error_strings = memif_tx_func_error_strings,
353 .rx_redirect_to_node = memif_set_interface_next_node,
354 .clear_counters = memif_clear_hw_interface_counters,
355 .admin_up_down_function = memif_interface_admin_up_down,
356 .subif_add_del_function = memif_subif_add_del_function,
357 .rx_mode_change_function = memif_interface_rx_mode_change,
360 VLIB_DEVICE_TX_FUNCTION_MULTIARCH(memif_device_class,
365 * fd.io coding-style-patch-verification: ON
368 * eval: (c-set-style "gnu")