X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fmemif%2Fdevice.c;h=3809061e4346e22d75cbac0954352e01baaefec0;hb=f72212e00282712f5829a7f76a8cb6f486cdae60;hp=4faeb0554f3cc87d0ff9ef68bcd3866dc83dfe8f;hpb=1927da29ccbe1d4cc8e59ccfa197eb41c257814f;p=vpp.git diff --git a/src/plugins/memif/device.c b/src/plugins/memif/device.c index 4faeb0554f3..3809061e434 100644 --- a/src/plugins/memif/device.c +++ b/src/plugins/memif/device.c @@ -26,9 +26,11 @@ #include #include +#include #define foreach_memif_tx_func_error \ _(NO_FREE_SLOTS, "no free tx slots") \ +_(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \ _(PENDING_MSGS, "pending msgs in tx ring") typedef enum @@ -39,28 +41,31 @@ typedef enum MEMIF_TX_N_ERROR, } memif_tx_func_error_t; -static char *memif_tx_func_error_strings[] = { +static __clib_unused char *memif_tx_func_error_strings[] = { #define _(n,s) s, foreach_memif_tx_func_error #undef _ }; - -static u8 * +#ifndef CLIB_MULTIARCH_VARIANT +u8 * format_memif_device_name (u8 * s, va_list * args) { - u32 i = va_arg (*args, u32); + u32 dev_instance = va_arg (*args, u32); + memif_main_t *mm = &memif_main; + memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance); - s = format (s, "memif%u", i); + s = format (s, "memif%lu/%lu", mif->socket_file_index, mif->id); return s; } +#endif -static u8 * +static __clib_unused u8 * format_memif_device (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); int verbose = va_arg (*args, int); - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); s = format (s, "MEMIF interface"); if (verbose) @@ -71,7 +76,7 @@ format_memif_device (u8 * s, va_list * args) return s; } -static u8 * +static __clib_unused u8 * format_memif_tx_trace (u8 * s, va_list * args) { s = format (s, "Unimplemented..."); @@ -86,84 +91,128 @@ memif_prefetch_buffer_and_data (vlib_main_t * vm, u32 bi) CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD); } +/** + * @brief Copy buffer to tx ring + * + * @param * vm (in) + * @param * node (in) + * @param * mif (in) pointer to memif interface + * @param bi (in) vlib buffer index + * @param * ring (in) pointer to memif ring + * @param * head (in/out) ring head + * @param mask (in) ring size - 1 + */ +static_always_inline void +memif_copy_buffer_to_tx_ring (vlib_main_t * vm, vlib_node_runtime_t * node, + memif_if_t * mif, u32 bi, memif_ring_t * ring, + u16 * head, u16 mask) +{ + vlib_buffer_t *b0; + void *mb0; + u32 total = 0, len; + u16 slot = (*head) & mask; + + mb0 = memif_get_buffer (mif, ring, slot); + ring->desc[slot].flags = 0; + do + { + b0 = vlib_get_buffer (vm, bi); + len = b0->current_length; + if (PREDICT_FALSE (ring->desc[slot].buffer_length < (total + len))) + { + if (PREDICT_TRUE (total)) + { + ring->desc[slot].length = total; + total = 0; + ring->desc[slot].flags |= MEMIF_DESC_FLAG_NEXT; + (*head)++; + slot = (*head) & mask; + mb0 = memif_get_buffer (mif, ring, slot); + ring->desc[slot].flags = 0; + } + } + if (PREDICT_TRUE (ring->desc[slot].buffer_length >= (total + len))) + { + clib_memcpy (mb0 + total, vlib_buffer_get_current (b0), + CLIB_CACHE_LINE_BYTES); + if (len > CLIB_CACHE_LINE_BYTES) + clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES + total, + vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES, + len - CLIB_CACHE_LINE_BYTES); + total += len; + } + else + { + vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_TRUNC_PACKET, + 1); + break; + } + } + while ((bi = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0)); + + if (PREDICT_TRUE (total)) + { + ring->desc[slot].length = total; + (*head)++; + } +} + static_always_inline uword memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, memif_if_t * mif, memif_ring_type_t type) { - u8 rid = 0; - memif_ring_t *ring = memif_get_ring (mif, type, rid); + u8 qid; + memif_ring_t *ring; u32 *buffers = vlib_frame_args (frame); u32 n_left = frame->n_vectors; - u16 ring_size = 1 << mif->log2_ring_size; - u16 mask = ring_size - 1; + u16 ring_size, mask; u16 head, tail; u16 free_slots; + u32 thread_index = vlib_get_thread_index (); + u8 tx_queues = vec_len (mif->tx_queues); + memif_queue_t *mq; + int n_retries = 5; + + if (tx_queues < vec_len (vlib_mains)) + { + ASSERT (tx_queues > 0); + qid = thread_index % tx_queues; + clib_spinlock_lock_if_init (&mif->lockp); + } + else + qid = thread_index; - clib_spinlock_lock_if_init (&mif->lockp); + mq = vec_elt_at_index (mif->tx_queues, qid); + ring = mq->ring; + ring_size = 1 << mq->log2_ring_size; + mask = ring_size - 1; +retry: /* free consumed buffers */ head = ring->head; tail = ring->tail; - if (tail > head) - free_slots = tail - head; - else - free_slots = ring_size - head + tail; + free_slots = ring_size - head + tail; while (n_left > 5 && free_slots > 1) { - if (PREDICT_TRUE (head + 5 < ring_size)) - { - CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 2), - CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 3), - CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (&ring->desc[head + 4], CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (&ring->desc[head + 5], CLIB_CACHE_LINE_BYTES, STORE); - } - else - { - CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) % mask), - CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) % mask), - CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (&ring->desc[(head + 4) % mask], - CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (&ring->desc[(head + 5) % mask], - CLIB_CACHE_LINE_BYTES, STORE); - } - + CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) & mask), + CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) & mask), + CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (&ring->desc[(head + 4) & mask], CLIB_CACHE_LINE_BYTES, + STORE); + CLIB_PREFETCH (&ring->desc[(head + 5) & mask], CLIB_CACHE_LINE_BYTES, + STORE); memif_prefetch_buffer_and_data (vm, buffers[2]); memif_prefetch_buffer_and_data (vm, buffers[3]); - vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[0]); - vlib_buffer_t *b1 = vlib_get_buffer (vm, buffers[1]); - - void *mb0 = memif_get_buffer (mif, ring, head); - clib_memcpy (mb0, vlib_buffer_get_current (b0), CLIB_CACHE_LINE_BYTES); - ring->desc[head].length = b0->current_length; - head = (head + 1) & mask; - - void *mb1 = memif_get_buffer (mif, ring, head); - clib_memcpy (mb1, vlib_buffer_get_current (b1), CLIB_CACHE_LINE_BYTES); - ring->desc[head].length = b1->current_length; - head = (head + 1) & mask; - - if (b0->current_length > CLIB_CACHE_LINE_BYTES) - { - clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES, - vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES, - b0->current_length - CLIB_CACHE_LINE_BYTES); - } - if (b1->current_length > CLIB_CACHE_LINE_BYTES) - { - clib_memcpy (mb1 + CLIB_CACHE_LINE_BYTES, - vlib_buffer_get_current (b1) + CLIB_CACHE_LINE_BYTES, - b1->current_length - CLIB_CACHE_LINE_BYTES); - } - + memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head, + mask); + memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[1], ring, &head, + mask); buffers += 2; n_left -= 2; @@ -172,19 +221,8 @@ memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, while (n_left && free_slots) { - vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[0]); - void *mb0 = memif_get_buffer (mif, ring, head); - clib_memcpy (mb0, vlib_buffer_get_current (b0), CLIB_CACHE_LINE_BYTES); - - if (b0->current_length > CLIB_CACHE_LINE_BYTES) - { - clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES, - vlib_buffer_get_current (b0) + CLIB_CACHE_LINE_BYTES, - b0->current_length - CLIB_CACHE_LINE_BYTES); - } - ring->desc[head].length = b0->current_length; - head = (head + 1) & mask; - + memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head, + mask); buffers++; n_left--; free_slots--; @@ -193,28 +231,33 @@ memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_MEMORY_STORE_BARRIER (); ring->head = head; - clib_spinlock_unlock (&mif->lockp); + if (n_left && n_retries--) + goto retry; + + clib_spinlock_unlock_if_init (&mif->lockp); if (n_left) { vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS, n_left); - vlib_buffer_free (vm, buffers, n_left); } - vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); - if (mif->interrupt_line.fd > 0) + if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1) { - u8 b = rid; - CLIB_UNUSED (int r) = write (mif->interrupt_line.fd, &b, sizeof (b)); + u64 b = 1; + CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b)); + mq->int_count++; } + vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); + return frame->n_vectors; } -static uword -memif_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +uword +CLIB_MULTIARCH_FN (memif_interface_tx) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { memif_main_t *nm = &memif_main; vnet_interface_output_runtime_t *rund = (void *) node->runtime_data; @@ -226,7 +269,7 @@ memif_interface_tx (vlib_main_t * vm, return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S); } -static void +static __clib_unused void memif_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index, u32 node_index) { @@ -245,38 +288,46 @@ memif_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index, vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index); } -static void +static __clib_unused void memif_clear_hw_interface_counters (u32 instance) { /* Nothing for now */ } -static clib_error_t * +static __clib_unused clib_error_t * +memif_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid, + vnet_hw_interface_rx_mode mode) +{ + memif_main_t *mm = &memif_main; + vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); + memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance); + memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid); + + if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING) + mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT; + else + mq->ring->flags &= ~MEMIF_RING_FLAG_MASK_INT; + + return 0; +} + +static __clib_unused clib_error_t * memif_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { - memif_main_t *apm = &memif_main; - memif_msg_t msg; + memif_main_t *mm = &memif_main; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); - memif_if_t *mif = pool_elt_at_index (apm->interfaces, hw->dev_instance); + memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance); + static clib_error_t *error = 0; if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) mif->flags |= MEMIF_IF_FLAG_ADMIN_UP; else - { - mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP; - if (!(mif->flags & MEMIF_IF_FLAG_DELETING) - && mif->connection.index != ~0) - { - msg.version = MEMIF_VERSION; - msg.type = MEMIF_MSG_TYPE_DISCONNECT; - send (mif->connection.fd, &msg, sizeof (msg), 0); - } - } + mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP; - return 0; + return error; } -static clib_error_t * +static __clib_unused clib_error_t * memif_subif_add_del_function (vnet_main_t * vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add) @@ -285,6 +336,7 @@ memif_subif_add_del_function (vnet_main_t * vnm, return 0; } +#ifndef CLIB_MULTIARCH_VARIANT /* *INDENT-OFF* */ VNET_DEVICE_CLASS (memif_device_class) = { .name = "memif", @@ -298,10 +350,23 @@ VNET_DEVICE_CLASS (memif_device_class) = { .clear_counters = memif_clear_hw_interface_counters, .admin_up_down_function = memif_interface_admin_up_down, .subif_add_del_function = memif_subif_add_del_function, + .rx_mode_change_function = memif_interface_rx_mode_change, }; -VLIB_DEVICE_TX_FUNCTION_MULTIARCH(memif_device_class, - memif_interface_tx) +#if __x86_64__ +vlib_node_function_t __clib_weak memif_interface_tx_avx512; +vlib_node_function_t __clib_weak memif_interface_tx_avx2; +static void __clib_constructor +dpdk_interface_tx_multiarch_select (void) +{ + if (memif_interface_tx_avx512 && clib_cpu_supports_avx512f ()) + memif_device_class.tx_function = memif_interface_tx_avx512; + else if (memif_interface_tx_avx2 && clib_cpu_supports_avx2 ()) + memif_device_class.tx_function = memif_interface_tx_avx2; +} +#endif +#endif + /* *INDENT-ON* */ /*