X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fbuffer.c;h=f3137a996d66c54f0b04113052de1598b6b65b2a;hb=HEAD;hp=ee63f76b0d4f57e6f952993891017bed042becb0;hpb=671e60e65635b8d030bf303c88411192c747b59e;p=vpp.git diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index ee63f76b0d4..f3137a996d6 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Cisco and/or its affiliates. + * Copyright (c) 2017-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -12,633 +12,463 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/* - * buffer.c: allocate/free network buffers. - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/** - * @file - * - * Allocate/free network buffers. - */ #include +#include #include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include +#include #include #include -#include -#include -#include -#include +#include STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM"); -typedef struct -{ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - struct rte_mbuf **mbuf_alloc_list; -} dpdk_buffer_per_thread_data; - -typedef struct -{ - int vfio_container_fd; - dpdk_buffer_per_thread_data *ptd; -} dpdk_buffer_main_t; - -dpdk_buffer_main_t dpdk_buffer_main; +extern struct rte_mbuf *dpdk_mbuf_template_by_pool_index; +#ifndef CLIB_MARCH_VARIANT +struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0; +struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index = 0; +struct rte_mbuf *dpdk_mbuf_template_by_pool_index = 0; -static_always_inline void -dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b, - int maybe_next) +clib_error_t * +dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp) { - struct rte_mbuf *mb; - u32 next, flags; - -next: - flags = b->flags; - next = b->next_buffer; - mb = rte_mbuf_from_vlib_buffer (b); + uword buffer_mem_start = vm->buffer_main->buffer_mem_start; + struct rte_mempool *mp, *nmp; + struct rte_pktmbuf_pool_private priv; + enum rte_iova_mode iova_mode; + u32 i; + u8 *name = 0; - if (PREDICT_FALSE (b->n_add_refs)) - { - rte_mbuf_refcnt_update (mb, b->n_add_refs); - b->n_add_refs = 0; - } + u32 elt_size = + sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size; - if ((mb = rte_pktmbuf_prefree_seg (mb))) - rte_mempool_put (mb->pool, mb); + /* create empty mempools */ + vec_validate_aligned (dpdk_mempool_by_buffer_pool_index, bp->index, + CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (dpdk_no_cache_mempool_by_buffer_pool_index, bp->index, + CLIB_CACHE_LINE_BYTES); - if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT)) + /* normal mempool */ + name = format (name, "vpp pool %u%c", bp->index, 0); + mp = rte_mempool_create_empty ((char *) name, bp->n_buffers, + elt_size, 512, sizeof (priv), + bp->numa_node, 0); + if (!mp) { - b = vlib_get_buffer (vm, next); - goto next; + vec_free (name); + return clib_error_return (0, + "failed to create normal mempool for numa node %u", + bp->index); } -} - -#ifndef CLIB_MARCH_VARIANT -static void -del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f) -{ - u32 i; - vlib_buffer_t *b; - u32 thread_index = vlib_get_thread_index (); - - for (i = 0; i < vec_len (f->buffers); i++) + vec_reset_length (name); + + /* non-cached mempool */ + name = format (name, "vpp pool %u (no cache)%c", bp->index, 0); + nmp = rte_mempool_create_empty ((char *) name, bp->n_buffers, + elt_size, 0, sizeof (priv), + bp->numa_node, 0); + if (!nmp) { - b = vlib_get_buffer (vm, f->buffers[i]); - dpdk_rte_pktmbuf_free (vm, thread_index, b, 1); + rte_mempool_free (mp); + vec_free (name); + return clib_error_return (0, + "failed to create non-cache mempool for numa nude %u", + bp->index); } + vec_free (name); - vec_free (f->name); - vec_free (f->buffers); - /* Poison it. */ - clib_memset (f, 0xab, sizeof (f[0])); -} - -/* Add buffer free list. */ -static void -dpdk_buffer_delete_free_list (vlib_main_t * vm, - vlib_buffer_free_list_index_t free_list_index) -{ - vlib_buffer_free_list_t *f; - int i; + dpdk_mempool_by_buffer_pool_index[bp->index] = mp; + dpdk_no_cache_mempool_by_buffer_pool_index[bp->index] = nmp; - ASSERT (vlib_get_thread_index () == 0); + mp->pool_id = nmp->pool_id = bp->index; - f = vlib_buffer_get_free_list (vm, free_list_index); + rte_mempool_set_ops_byname (mp, "vpp", NULL); + rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL); - del_free_list (vm, f); + /* Call the mempool priv initializer */ + memset (&priv, 0, sizeof (priv)); + priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE + + vlib_buffer_get_default_data_size (vm); + priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE; + rte_pktmbuf_pool_init (mp, &priv); + rte_pktmbuf_pool_init (nmp, &priv); - pool_put (vm->buffer_free_list_pool, f); + iova_mode = rte_eal_iova_mode (); - for (i = 1; i < vec_len (vlib_mains); i++) + /* populate mempool object buffer header */ + for (i = 0; i < bp->n_buffers; i++) { - vlib_main_t *wvm = vlib_mains[i]; - f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index); - del_free_list (wvm, f); - pool_put (wvm->buffer_free_list_pool, f); + struct rte_mempool_objhdr *hdr; + vlib_buffer_t *b = vlib_get_buffer (vm, bp->buffers[i]); + struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b); + hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr)); + hdr->mp = mp; + hdr->iova = (iova_mode == RTE_IOVA_VA) ? + pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb); + STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next); + STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next); + mp->populated_size++; + nmp->populated_size++; } -} +#if RTE_VERSION >= RTE_VERSION_NUM(22, 3, 0, 0) + mp->flags &= ~RTE_MEMPOOL_F_NON_IO; #endif -/* Make sure free list has at least given number of free buffers. */ -uword -CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm, - vlib_buffer_free_list_t * fl, - uword min_free_buffers) -{ - dpdk_main_t *dm = &dpdk_main; - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; - struct rte_mbuf **mb; - uword n_left, first; - word n_alloc; - unsigned socket_id = rte_socket_id (); - u32 thread_index = vlib_get_thread_index (); - dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index); - struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id]; - dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp); - vlib_buffer_t bt; - u32 *bi; - - /* Too early? */ - if (PREDICT_FALSE (rmp == 0)) - return 0; - - /* Already have enough free buffers on free list? */ - n_alloc = min_free_buffers - vec_len (fl->buffers); - if (n_alloc <= 0) - return min_free_buffers; - - /* Always allocate round number of buffers. */ - n_alloc = round_pow2 (n_alloc, CLIB_CACHE_LINE_BYTES / sizeof (u32)); - - /* Always allocate new buffers in reasonably large sized chunks. */ - n_alloc = clib_max (n_alloc, fl->min_n_buffers_each_alloc); - - vec_validate_aligned (d->mbuf_alloc_list, n_alloc - 1, - CLIB_CACHE_LINE_BYTES); - - if (rte_mempool_get_bulk (rmp, (void *) d->mbuf_alloc_list, n_alloc) < 0) - return 0; - - clib_memset (&bt, 0, sizeof (vlib_buffer_t)); - vlib_buffer_init_for_free_list (&bt, fl); - bt.buffer_pool_index = privp->buffer_pool_index; + /* call the object initializers */ + rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0); - _vec_len (d->mbuf_alloc_list) = n_alloc; - - first = vec_len (fl->buffers); - vec_resize_aligned (fl->buffers, n_alloc, CLIB_CACHE_LINE_BYTES); - - n_left = n_alloc; - mb = d->mbuf_alloc_list; - bi = fl->buffers + first; - - ASSERT (n_left % 8 == 0); + /* create mbuf header tempate from the first buffer in the pool */ + vec_validate_aligned (dpdk_mbuf_template_by_pool_index, bp->index, + CLIB_CACHE_LINE_BYTES); + clib_memcpy (vec_elt_at_index (dpdk_mbuf_template_by_pool_index, bp->index), + rte_mbuf_from_vlib_buffer (vlib_buffer_ptr_from_index + (buffer_mem_start, *bp->buffers, + 0)), sizeof (struct rte_mbuf)); - while (n_left >= 8) + for (i = 0; i < bp->n_buffers; i++) { - if (PREDICT_FALSE (n_left < 24)) - goto no_prefetch; - - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[16]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[17]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[18]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[19]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[20]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[21]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[22]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[23]), STORE); - - no_prefetch: - vlib_get_buffer_indices_with_offset (vm, (void **) mb, bi, 8, - sizeof (struct rte_mbuf)); - clib_memcpy64_x4 (vlib_buffer_from_rte_mbuf (mb[0]), - vlib_buffer_from_rte_mbuf (mb[1]), - vlib_buffer_from_rte_mbuf (mb[2]), - vlib_buffer_from_rte_mbuf (mb[3]), &bt); - clib_memcpy64_x4 (vlib_buffer_from_rte_mbuf (mb[4]), - vlib_buffer_from_rte_mbuf (mb[5]), - vlib_buffer_from_rte_mbuf (mb[6]), - vlib_buffer_from_rte_mbuf (mb[7]), &bt); - - n_left -= 8; - mb += 8; - bi += 8; + vlib_buffer_t *b; + b = vlib_buffer_ptr_from_index (buffer_mem_start, bp->buffers[i], 0); + b->template = bp->buffer_template; } - if (fl->buffer_init_function) - fl->buffer_init_function (vm, fl, fl->buffers + first, n_alloc); + /* map DMA pages if at least one physical device exists */ + if (rte_eth_dev_count_avail () || rte_cryptodev_count ()) + { + uword i; + size_t page_sz; + vlib_physmem_map_t *pm; + int do_vfio_map = 1; + + pm = vlib_physmem_get_map (vm, bp->physmem_map_index); + page_sz = 1ULL << pm->log2_page_size; - fl->n_alloc += n_alloc; + for (i = 0; i < pm->n_pages; i++) + { + char *va = ((char *) pm->base) + i * page_sz; + uword pa = (iova_mode == RTE_IOVA_VA) ? + pointer_to_uword (va) : pm->page_table[i]; + + if (do_vfio_map && +#if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0) + rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz)) +#else + rte_vfio_container_dma_map (RTE_VFIO_DEFAULT_CONTAINER_FD, + pointer_to_uword (va), pa, page_sz)) +#endif + do_vfio_map = 0; + + struct rte_mempool_memhdr *memhdr; + memhdr = clib_mem_alloc (sizeof (*memhdr)); + memhdr->mp = mp; + memhdr->addr = va; + memhdr->iova = pa; + memhdr->len = page_sz; + memhdr->free_cb = 0; + memhdr->opaque = 0; + + STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next); + mp->nb_mem_chunks++; + } + } - return n_alloc; + return 0; } -static_always_inline void -dpdk_prefetch_buffer (vlib_buffer_t * b) +static int +dpdk_ops_vpp_alloc (struct rte_mempool *mp) { - struct rte_mbuf *mb; - mb = rte_mbuf_from_vlib_buffer (b); - CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD); + clib_warning (""); + return 0; } -static_always_inline void -recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi, - vlib_buffer_t * b) +static void +dpdk_ops_vpp_free (struct rte_mempool *mp) { - u32 thread_index = vlib_get_thread_index (); - - dpdk_rte_pktmbuf_free (vm, thread_index, b, 1); + clib_warning (""); } +#endif + static_always_inline void -vlib_buffer_free_inline (vlib_main_t * vm, - u32 * buffers, u32 n_buffers, u32 follow_buffer_next) +dpdk_ops_vpp_enqueue_one (vlib_buffer_template_t *bt, void *obj) { - vlib_buffer_main_t *bm = &buffer_main; - vlib_buffer_t *bufp[n_buffers], **b = bufp; - u32 thread_index = vlib_get_thread_index (); - int i = 0; - u32 simple_mask = (VLIB_BUFFER_NON_DEFAULT_FREELIST | - VLIB_BUFFER_NEXT_PRESENT); - u32 n_left, *bi; - u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers, - u32 follow_buffer_next); + /* Only non-replicated packets (b->ref_count == 1) expected */ - cb = bm->buffer_free_callback; - - if (PREDICT_FALSE (cb != 0)) - n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next); + struct rte_mbuf *mb = obj; + vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); + ASSERT (b->ref_count == 1); + ASSERT (b->buffer_pool_index == bt->buffer_pool_index); + b->template = *bt; +} - if (!n_buffers) - return; +int +CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp, + void *const *obj_table, unsigned n) +{ + const int batch_size = 32; + vlib_main_t *vm = vlib_get_main (); + vlib_buffer_template_t bt; + u8 buffer_pool_index = mp->pool_id; + vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index); + u32 bufs[batch_size]; + u32 n_left = n; + void *const *obj = obj_table; - n_left = n_buffers; - bi = buffers; - b = bufp; - vlib_get_buffers (vm, bi, b, n_buffers); + bt = bp->buffer_template; while (n_left >= 4) { - u32 or_flags; - vlib_buffer_t **p; - - if (n_left < 16) - goto no_prefetch; - - p = b + 12; - dpdk_prefetch_buffer (p[0]); - dpdk_prefetch_buffer (p[1]); - dpdk_prefetch_buffer (p[2]); - dpdk_prefetch_buffer (p[3]); - no_prefetch: - - for (i = 0; i < 4; i++) - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[i]); - - or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags; - - if (or_flags & simple_mask) - { - recycle_or_free (vm, bm, bi[0], b[0]); - recycle_or_free (vm, bm, bi[1], b[1]); - recycle_or_free (vm, bm, bi[2], b[2]); - recycle_or_free (vm, bm, bi[3], b[3]); - } - else - { - dpdk_rte_pktmbuf_free (vm, thread_index, b[0], 0); - dpdk_rte_pktmbuf_free (vm, thread_index, b[1], 0); - dpdk_rte_pktmbuf_free (vm, thread_index, b[2], 0); - dpdk_rte_pktmbuf_free (vm, thread_index, b[3], 0); - } - bi += 4; - b += 4; + dpdk_ops_vpp_enqueue_one (&bt, obj[0]); + dpdk_ops_vpp_enqueue_one (&bt, obj[1]); + dpdk_ops_vpp_enqueue_one (&bt, obj[2]); + dpdk_ops_vpp_enqueue_one (&bt, obj[3]); + obj += 4; n_left -= 4; } + while (n_left) { - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); - recycle_or_free (vm, bm, bi[0], b[0]); - bi += 1; - b += 1; + dpdk_ops_vpp_enqueue_one (&bt, obj[0]); + obj += 1; n_left -= 1; } -} -void -CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers, - u32 n_buffers) -{ - vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ - 1); + while (n >= batch_size) + { + vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs, + batch_size, + sizeof (struct rte_mbuf)); + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size); + n -= batch_size; + obj_table += batch_size; + } + + if (n) + { + vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs, + n, sizeof (struct rte_mbuf)); + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n); + } + + return 0; } -void -CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers, - u32 n_buffers) +CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue); + +static_always_inline void +dpdk_ops_vpp_enqueue_no_cache_one (vlib_main_t *vm, struct rte_mempool *old, + struct rte_mempool *new, void *obj, + vlib_buffer_template_t *bt) { - vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ - 0); + struct rte_mbuf *mb = obj; + vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); + + if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0) + { + u32 bi = vlib_get_buffer_index (vm, b); + b->template = *bt; + vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1); + return; + } } -#ifndef CLIB_MARCH_VARIANT -clib_error_t * -dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size, - u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa, - struct rte_mempool **_mp, u32 * map_index) +int +CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue_no_cache) (struct rte_mempool * cmp, + void *const *obj_table, + unsigned n) { + vlib_main_t *vm = vlib_get_main (); + vlib_buffer_template_t bt; struct rte_mempool *mp; - enum rte_iova_mode iova_mode; - dpdk_mempool_private_t priv; - vlib_physmem_map_t *pm; - clib_error_t *error = 0; - size_t min_chunk_size, align; - int map_dma = 1; - u32 size; - i32 ret; - uword i; - - mp = rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size, - 512, pool_priv_size, numa, 0); - if (!mp) - return clib_error_return (0, "failed to create %s", pool_name); - - rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL); + mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id]; + u8 buffer_pool_index = cmp->pool_id; + vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index); + bt = bp->buffer_template; - size = rte_mempool_op_calc_mem_size_default (mp, num_elts, 21, - &min_chunk_size, &align); - - if ((error = vlib_physmem_shared_map_create (vm, (char *) pool_name, size, - 0, numa, map_index))) + while (n >= 4) { - rte_mempool_free (mp); - return error; + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[1], &bt); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[2], &bt); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[3], &bt); + obj_table += 4; + n -= 4; } - pm = vlib_physmem_get_map (vm, *map_index); - - /* Call the mempool priv initializer */ - priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE + - VLIB_BUFFER_DATA_SIZE; - priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE; - rte_pktmbuf_pool_init (mp, &priv); - if (rte_eth_dev_count_avail () == 0) - map_dma = 0; - - iova_mode = rte_eal_iova_mode (); - for (i = 0; i < pm->n_pages; i++) + while (n) { - size_t page_sz = 1ULL << pm->log2_page_size; - char *va = ((char *) pm->base) + i * page_sz; - uword pa = iova_mode == RTE_IOVA_VA ? - pointer_to_uword (va) : pm->page_table[i]; - ret = rte_mempool_populate_iova (mp, va, pa, page_sz, 0, 0); - if (ret < 0) - { - rte_mempool_free (mp); - return clib_error_return (0, "failed to populate %s", pool_name); - } - /* -1 likely means there is no PCI devices assigned to vfio - container or noiommu mode is used so we stop trying */ - if (map_dma && rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz)) - map_dma = 0; + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt); + obj_table += 1; + n -= 1; } - _mp[0] = mp; - return 0; } -clib_error_t * -dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, - unsigned socket_id) -{ - dpdk_main_t *dm = &dpdk_main; - struct rte_mempool *rmp; - clib_error_t *error = 0; - u8 *pool_name; - u32 elt_size, i; - u32 map_index; - - vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES); - - /* pool already exists, nothing to do */ - if (dm->pktmbuf_pools[socket_id]) - return 0; - - pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0); - - elt_size = sizeof (struct rte_mbuf) + - VLIB_BUFFER_HDR_SIZE /* priv size */ + - VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */ +CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue_no_cache); - error = dpdk_pool_create (vm, pool_name, elt_size, num_mbufs, - sizeof (dpdk_mempool_private_t), 512, socket_id, - &rmp, &map_index); - - vec_free (pool_name); +static_always_inline void +dpdk_mbuf_init_from_template (struct rte_mbuf **mba, struct rte_mbuf *mt, + int count) +{ + /* Assumptions about rte_mbuf layout */ + STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_addr, 0); + STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_iova, 8); + STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8); + STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8); + STATIC_ASSERT_SIZEOF (struct rte_mbuf, 128); + + while (count--) + { + struct rte_mbuf *mb = mba[0]; + int i; + /* bytes 0 .. 15 hold buf_addr and buf_iova which we need to preserve */ + /* copy bytes 16 .. 31 */ + *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1); + + /* copy bytes 32 .. 127 */ +#ifdef CLIB_HAVE_VEC256 + for (i = 1; i < 4; i++) + *((u8x32 *) mb + i) = *((u8x32 *) mt + i); +#else + for (i = 2; i < 8; i++) + *((u8x16 *) mb + i) = *((u8x16 *) mt + i); +#endif + mba++; + } +} - if (!error) +int +CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp, + void **obj_table, unsigned n) +{ + const int batch_size = 32; + vlib_main_t *vm = vlib_get_main (); + u32 bufs[batch_size], total = 0, n_alloc = 0; + u8 buffer_pool_index = mp->pool_id; + void **obj = obj_table; + struct rte_mbuf t = dpdk_mbuf_template_by_pool_index[buffer_pool_index]; + + while (n >= batch_size) { - /* call the object initializers */ - rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0); + n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, batch_size, + buffer_pool_index); + if (n_alloc != batch_size) + goto alloc_fail; + + vlib_get_buffers_with_offset (vm, bufs, obj, batch_size, + -(i32) sizeof (struct rte_mbuf)); + dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, batch_size); + total += batch_size; + obj += batch_size; + n -= batch_size; + } - dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp); - privp->buffer_pool_index = - vlib_buffer_register_physmem_map (vm, map_index); + if (n) + { + n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, n, buffer_pool_index); - dm->pktmbuf_pools[socket_id] = rmp; + if (n_alloc != n) + goto alloc_fail; - return 0; + vlib_get_buffers_with_offset (vm, bufs, obj, n, + -(i32) sizeof (struct rte_mbuf)); + dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, n); } - clib_error_report (error); + return 0; - /* no usable pool for this socket, try to use pool from another one */ - for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) +alloc_fail: + /* dpdk doesn't support partial alloc, so we need to return what we + already got */ + if (n_alloc) + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n_alloc); + obj = obj_table; + while (total) { - if (dm->pktmbuf_pools[i]) - { - clib_warning ("WARNING: Failed to allocate mempool for CPU socket " - "%u. Threads running on socket %u will use socket %u " - "mempool.", socket_id, socket_id, i); - dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i]; - return 0; - } - } + vlib_get_buffer_indices_with_offset (vm, obj, bufs, batch_size, + sizeof (struct rte_mbuf)); + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size); - return clib_error_return (0, "failed to allocate mempool on socket %u", - socket_id); + obj += batch_size; + total -= batch_size; + } + return -ENOENT; } -#if CLIB_DEBUG > 0 +CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_dequeue); -u32 *vlib_buffer_state_validation_lock; -uword *vlib_buffer_state_validation_hash; -void *vlib_buffer_state_heap; +#ifndef CLIB_MARCH_VARIANT -static clib_error_t * -buffer_state_validation_init (vlib_main_t * vm) +static int +dpdk_ops_vpp_dequeue_no_cache (struct rte_mempool *mp, void **obj_table, + unsigned n) { - void *oldheap; - - vlib_buffer_state_heap = - mheap_alloc_with_lock (0, 10 << 20, 0 /* locked */ ); - oldheap = clib_mem_set_heap (vlib_buffer_state_heap); - - vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword)); - vec_validate_aligned (vlib_buffer_state_validation_lock, 0, - CLIB_CACHE_LINE_BYTES); - clib_mem_set_heap (oldheap); + clib_error ("bug"); return 0; } -VLIB_INIT_FUNCTION (buffer_state_validation_init); -#endif - -#if CLI_DEBUG -struct dpdk_validate_buf_result -{ - u32 invalid; - u32 uninitialized; -}; - -#define DPDK_TRAJECTORY_POISON 31 - -static void -dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque, - void *obj, unsigned obj_idx) +static unsigned +dpdk_ops_vpp_get_count (const struct rte_mempool *mp) { - vlib_buffer_t *b; - struct dpdk_validate_buf_result *counter = opaque; - b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj); - if (b->pre_data[0] != 0) + vlib_main_t *vm = vlib_get_main (); + if (mp) { - if (b->pre_data[0] == DPDK_TRAJECTORY_POISON) - counter->uninitialized++; - else - counter->invalid++; + vlib_buffer_pool_t *pool = vlib_get_buffer_pool (vm, mp->pool_id); + if (pool) + { + return pool->n_avail; + } } + return 0; } -int -dpdk_buffer_validate_trajectory_all (u32 * uninitialized) -{ - dpdk_main_t *dm = &dpdk_main; - struct dpdk_validate_buf_result counter = { 0 }; - int i; - - for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) - rte_mempool_obj_iter (dm->pktmbuf_pools[i], - dpdk_buffer_validate_trajectory, &counter); - if (uninitialized) - *uninitialized = counter.uninitialized; - return counter.invalid; -} - -static void -dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque, - void *obj, unsigned obj_idx) +static unsigned +dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp) { - vlib_buffer_t *b; - b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj); - b->pre_data[0] = DPDK_TRAJECTORY_POISON; + struct rte_mempool *cmp; + cmp = dpdk_no_cache_mempool_by_buffer_pool_index[mp->pool_id]; + return dpdk_ops_vpp_get_count (cmp); } -void -dpdk_buffer_poison_trajectory_all (void) -{ - dpdk_main_t *dm = &dpdk_main; - int i; - - for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) - rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory, - 0); -} -#endif - -static clib_error_t * -dpdk_buffer_init (vlib_main_t * vm) +clib_error_t * +dpdk_buffer_pools_create (vlib_main_t * vm) { - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - - vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1, - CLIB_CACHE_LINE_BYTES); - - dbm->vfio_container_fd = -1; - + clib_error_t *err; + vlib_buffer_pool_t *bp; + + struct rte_mempool_ops ops = { }; + + strncpy (ops.name, "vpp", 4); + ops.alloc = dpdk_ops_vpp_alloc; + ops.free = dpdk_ops_vpp_free; + ops.get_count = dpdk_ops_vpp_get_count; + ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue); + ops.dequeue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_dequeue); + rte_mempool_register_ops (&ops); + + strncpy (ops.name, "vpp-no-cache", 13); + ops.get_count = dpdk_ops_vpp_get_count_no_cache; + ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue_no_cache); + ops.dequeue = dpdk_ops_vpp_dequeue_no_cache; + rte_mempool_register_ops (&ops); + + vec_foreach (bp, vm->buffer_main->buffer_pools) + if (bp->start && (err = dpdk_buffer_pool_init (vm, bp))) + return err; return 0; } -VLIB_INIT_FUNCTION (dpdk_buffer_init); - -/* *INDENT-OFF* */ -VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = { - .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list, - .vlib_buffer_free_cb = &dpdk_buffer_free, - .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next, - .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list, -}; -/* *INDENT-ON* */ - -#if __x86_64__ -vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512; -vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2; -vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512; -vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2; -vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512; -vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2; - -static void __clib_constructor -dpdk_input_multiarch_select (void) -{ - vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks; - if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ()) - { - cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512; - cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512; - cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512; - } - else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ()) - { - cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2; - cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2; - cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2; - } -} -#endif +VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) + + sizeof (struct rte_mbuf)); + #endif /** @endcond */