X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fbuffer.c;h=d7a791613863a156fdead2e04c04a6a8138b0110;hb=b6e8b1a7c8bf9f9fbd05cdc3c90111d9e7a6897b;hp=11d8cd7962b7b3752153bb435e82a31dad87d1b8;hpb=336a00f7911c26da12dbeb9eefe0d38077938269;p=vpp.git diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index 11d8cd7962b..d7a79161386 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Cisco and/or its affiliates. + * Copyright (c) 2017-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -12,755 +12,446 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/* - * buffer.c: allocate/free network buffers. - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/** - * @file - * - * Allocate/free network buffers. - */ #include -#include -#include +#include #include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include +#include #include -#include -#include -#include -#include -#include -#include +#include STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM"); -typedef struct -{ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - struct rte_mbuf ***mbuf_pending_free_list; - - /* cached last pool */ - struct rte_mempool *last_pool; - u8 last_buffer_pool_index; -} dpdk_buffer_per_thread_data; +extern struct rte_mbuf *dpdk_mbuf_template_by_pool_index; +#ifndef CLIB_MARCH_VARIANT +struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0; +struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index = 0; +struct rte_mbuf *dpdk_mbuf_template_by_pool_index = 0; -typedef struct +clib_error_t * +dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp) { - int vfio_container_fd; - dpdk_buffer_per_thread_data *ptd; -} dpdk_buffer_main_t; + uword buffer_mem_start = vm->buffer_main->buffer_mem_start; + struct rte_mempool *mp, *nmp; + struct rte_pktmbuf_pool_private priv; + enum rte_iova_mode iova_mode; + u32 i; + u8 *name = 0; -dpdk_buffer_main_t dpdk_buffer_main; + u32 elt_size = + sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size; -static_always_inline void -dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b) -{ - vlib_buffer_t *hb = b; - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; - dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index); - struct rte_mbuf *mb; - u32 next, flags; - mb = rte_mbuf_from_vlib_buffer (hb); - -next: - flags = b->flags; - next = b->next_buffer; - mb = rte_mbuf_from_vlib_buffer (b); - - if (PREDICT_FALSE (b->n_add_refs)) - { - rte_mbuf_refcnt_update (mb, b->n_add_refs); - b->n_add_refs = 0; - } - - mb = rte_pktmbuf_prefree_seg (mb); - if (mb) - { - if (mb->pool != d->last_pool) - { - d->last_pool = mb->pool; - dpdk_mempool_private_t *privp = rte_mempool_get_priv (d->last_pool); - d->last_buffer_pool_index = privp->buffer_pool_index; - vec_validate_aligned (d->mbuf_pending_free_list, - d->last_buffer_pool_index, - CLIB_CACHE_LINE_BYTES); - } - vec_add1 (d->mbuf_pending_free_list[d->last_buffer_pool_index], mb); - } + /* create empty mempools */ + vec_validate_aligned (dpdk_mempool_by_buffer_pool_index, bp->index, + CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (dpdk_no_cache_mempool_by_buffer_pool_index, bp->index, + CLIB_CACHE_LINE_BYTES); - if (flags & VLIB_BUFFER_NEXT_PRESENT) + /* normal mempool */ + name = format (name, "vpp pool %u%c", bp->index, 0); + mp = rte_mempool_create_empty ((char *) name, bp->n_buffers, + elt_size, 512, sizeof (priv), + bp->numa_node, 0); + if (!mp) { - b = vlib_get_buffer (vm, next); - goto next; + vec_free (name); + return clib_error_return (0, + "failed to create normal mempool for numa node %u", + bp->index); } -} - -#ifndef CLIB_MULTIARCH_VARIANT -static void -del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f) -{ - u32 i; - vlib_buffer_t *b; - u32 thread_index = vlib_get_thread_index (); - - for (i = 0; i < vec_len (f->buffers); i++) + vec_reset_length (name); + + /* non-cached mempool */ + name = format (name, "vpp pool %u (no cache)%c", bp->index, 0); + nmp = rte_mempool_create_empty ((char *) name, bp->n_buffers, + elt_size, 0, sizeof (priv), + bp->numa_node, 0); + if (!nmp) { - b = vlib_get_buffer (vm, f->buffers[i]); - dpdk_rte_pktmbuf_free (vm, thread_index, b); + rte_mempool_free (mp); + vec_free (name); + return clib_error_return (0, + "failed to create non-cache mempool for numa nude %u", + bp->index); } + vec_free (name); - vec_free (f->name); - vec_free (f->buffers); - /* Poison it. */ - memset (f, 0xab, sizeof (f[0])); -} - -/* Add buffer free list. */ -static void -dpdk_buffer_delete_free_list (vlib_main_t * vm, - vlib_buffer_free_list_index_t free_list_index) -{ - vlib_buffer_free_list_t *f; - int i; + dpdk_mempool_by_buffer_pool_index[bp->index] = mp; + dpdk_no_cache_mempool_by_buffer_pool_index[bp->index] = nmp; - ASSERT (vlib_get_thread_index () == 0); + mp->pool_id = nmp->pool_id = bp->index; - f = vlib_buffer_get_free_list (vm, free_list_index); + rte_mempool_set_ops_byname (mp, "vpp", NULL); + rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL); - del_free_list (vm, f); + /* Call the mempool priv initializer */ + priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE + + vlib_buffer_get_default_data_size (vm); + priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE; + rte_pktmbuf_pool_init (mp, &priv); + rte_pktmbuf_pool_init (nmp, &priv); - pool_put (vm->buffer_free_list_pool, f); + iova_mode = rte_eal_iova_mode (); - for (i = 1; i < vec_len (vlib_mains); i++) + /* populate mempool object buffer header */ + for (i = 0; i < bp->n_buffers; i++) { - vlib_main_t *wvm = vlib_mains[i]; - f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index); - del_free_list (wvm, f); - pool_put (wvm->buffer_free_list_pool, f); + struct rte_mempool_objhdr *hdr; + vlib_buffer_t *b = vlib_get_buffer (vm, bp->buffers[i]); + struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b); + hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr)); + hdr->mp = mp; + hdr->iova = (iova_mode == RTE_IOVA_VA) ? + pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb); + STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next); + STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next); + mp->populated_size++; + nmp->populated_size++; } -} -#endif - -/* Make sure free list has at least given number of free buffers. */ -uword -CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm, - vlib_buffer_free_list_t * fl, - uword min_free_buffers) -{ - dpdk_main_t *dm = &dpdk_main; - vlib_buffer_t *b0, *b1, *b2, *b3; - int n, i; - u32 bi0, bi1, bi2, bi3; - unsigned socket_id = rte_socket_id (); - struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id]; - dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp); - struct rte_mbuf *mb0, *mb1, *mb2, *mb3; - vlib_buffer_t bt; - /* Too early? */ - if (PREDICT_FALSE (rmp == 0)) - return 0; + /* call the object initializers */ + rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0); - /* Already have enough free buffers on free list? */ - n = min_free_buffers - vec_len (fl->buffers); - if (n <= 0) - return min_free_buffers; - - /* Always allocate round number of buffers. */ - n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32)); - - /* Always allocate new buffers in reasonably large sized chunks. */ - n = clib_max (n, fl->min_n_buffers_each_alloc); - - vec_validate_aligned (vm->mbuf_alloc_list, n - 1, CLIB_CACHE_LINE_BYTES); - - if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0) - return 0; - - memset (&bt, 0, sizeof (vlib_buffer_t)); - vlib_buffer_init_for_free_list (&bt, fl); - bt.buffer_pool_index = privp->buffer_pool_index; - - _vec_len (vm->mbuf_alloc_list) = n; - - i = 0; - int f = vec_len (fl->buffers); - vec_resize_aligned (fl->buffers, n, CLIB_CACHE_LINE_BYTES); + /* create mbuf header tempate from the first buffer in the pool */ + vec_validate_aligned (dpdk_mbuf_template_by_pool_index, bp->index, + CLIB_CACHE_LINE_BYTES); + clib_memcpy (vec_elt_at_index (dpdk_mbuf_template_by_pool_index, bp->index), + rte_mbuf_from_vlib_buffer (vlib_buffer_ptr_from_index + (buffer_mem_start, *bp->buffers, + 0)), sizeof (struct rte_mbuf)); - while (i < (n - 7)) + for (i = 0; i < bp->n_buffers; i++) { - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf - (vm->mbuf_alloc_list[i + 4]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf - (vm->mbuf_alloc_list[i + 5]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf - (vm->mbuf_alloc_list[i + 6]), STORE); - vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf - (vm->mbuf_alloc_list[i + 7]), STORE); - - mb0 = vm->mbuf_alloc_list[i]; - mb1 = vm->mbuf_alloc_list[i + 1]; - mb2 = vm->mbuf_alloc_list[i + 2]; - mb3 = vm->mbuf_alloc_list[i + 3]; - - b0 = vlib_buffer_from_rte_mbuf (mb0); - b1 = vlib_buffer_from_rte_mbuf (mb1); - b2 = vlib_buffer_from_rte_mbuf (mb2); - b3 = vlib_buffer_from_rte_mbuf (mb3); - - bi0 = vlib_get_buffer_index (vm, b0); - bi1 = vlib_get_buffer_index (vm, b1); - bi2 = vlib_get_buffer_index (vm, b2); - bi3 = vlib_get_buffer_index (vm, b3); - - fl->buffers[f++] = bi0; - fl->buffers[f++] = bi1; - fl->buffers[f++] = bi2; - fl->buffers[f++] = bi3; - - clib_memcpy64_x4 (b0, b1, b2, b3, &bt); - - if (fl->buffer_init_function) - { - fl->buffer_init_function (vm, fl, &bi0, 1); - fl->buffer_init_function (vm, fl, &bi1, 1); - fl->buffer_init_function (vm, fl, &bi2, 1); - fl->buffer_init_function (vm, fl, &bi3, 1); - } - i += 4; + vlib_buffer_t *b; + b = vlib_buffer_ptr_from_index (buffer_mem_start, bp->buffers[i], 0); + vlib_buffer_copy_template (b, &bp->buffer_template); } - while (i < n) + /* map DMA pages if at least one physical device exists */ + if (rte_eth_dev_count_avail ()) { - mb0 = vm->mbuf_alloc_list[i]; - - b0 = vlib_buffer_from_rte_mbuf (mb0); - bi0 = vlib_get_buffer_index (vm, b0); + uword i; + size_t page_sz; + vlib_physmem_map_t *pm; + int do_vfio_map = 1; - fl->buffers[f++] = bi0; - clib_memcpy (b0, &bt, sizeof (vlib_buffer_t)); + pm = vlib_physmem_get_map (vm, bp->physmem_map_index); + page_sz = 1ULL << pm->log2_page_size; - if (fl->buffer_init_function) - fl->buffer_init_function (vm, fl, &bi0, 1); - i++; + for (i = 0; i < pm->n_pages; i++) + { + char *va = ((char *) pm->base) + i * page_sz; + uword pa = (iova_mode == RTE_IOVA_VA) ? + pointer_to_uword (va) : pm->page_table[i]; + + if (do_vfio_map && + rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz)) + do_vfio_map = 0; + + struct rte_mempool_memhdr *memhdr; + memhdr = clib_mem_alloc (sizeof (*memhdr)); + memhdr->mp = mp; + memhdr->addr = va; + memhdr->iova = pa; + memhdr->len = page_sz; + memhdr->free_cb = 0; + memhdr->opaque = 0; + + STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next); + mp->nb_mem_chunks++; + } } - fl->n_alloc += n; - - return n; + return 0; } -static_always_inline void -dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi) +static int +dpdk_ops_vpp_alloc (struct rte_mempool *mp) { - vlib_buffer_t *b; - struct rte_mbuf *mb; - b = vlib_get_buffer (vm, bi); - mb = rte_mbuf_from_vlib_buffer (b); - CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD); + clib_warning (""); + return 0; } -static_always_inline void -recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi, - vlib_buffer_t * b) +static void +dpdk_ops_vpp_free (struct rte_mempool *mp) { - vlib_buffer_free_list_t *fl; - u32 thread_index = vlib_get_thread_index (); - vlib_buffer_free_list_index_t fi; - fl = vlib_buffer_get_buffer_free_list (vm, b, &fi); - - /* The only current use of this callback: multicast recycle */ - if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0)) - { - int j; - - vlib_buffer_add_to_free_list (vm, fl, bi, - (b->flags & VLIB_BUFFER_RECYCLE) == 0); - - for (j = 0; j < vec_len (vm->buffer_announce_list); j++) - { - if (fl == vm->buffer_announce_list[j]) - goto already_announced; - } - vec_add1 (vm->buffer_announce_list, fl); - already_announced: - ; - } - else - { - if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0)) - dpdk_rte_pktmbuf_free (vm, thread_index, b); - } + clib_warning (""); } +#endif + static_always_inline void -vlib_buffer_free_inline (vlib_main_t * vm, - u32 * buffers, u32 n_buffers, u32 follow_buffer_next) +dpdk_ops_vpp_enqueue_one (vlib_buffer_t * bt, void *obj) { - vlib_buffer_main_t *bm = &buffer_main; - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; - vlib_buffer_t *b0, *b1, *b2, *b3; - u32 thread_index = vlib_get_thread_index (); - dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index); - int i = 0; - u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers, - u32 follow_buffer_next); + /* Only non-replicated packets (b->ref_count == 1) expected */ - cb = bm->buffer_free_callback; + struct rte_mbuf *mb = obj; + vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); + ASSERT (b->ref_count == 1); + ASSERT (b->buffer_pool_index == bt->buffer_pool_index); + vlib_buffer_copy_template (b, bt); +} - if (PREDICT_FALSE (cb != 0)) - n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next); +int +CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp, + void *const *obj_table, unsigned n) +{ + const int batch_size = 32; + vlib_main_t *vm = vlib_get_main (); + vlib_buffer_t bt; + u8 buffer_pool_index = mp->pool_id; + vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index); + u32 bufs[batch_size]; + u32 n_left = n; + void *const *obj = obj_table; - if (!n_buffers) - return; + vlib_buffer_copy_template (&bt, &bp->buffer_template); - while (i + 7 < n_buffers) + while (n_left >= 4) { - dpdk_prefetch_buffer_by_index (vm, buffers[i + 4]); - dpdk_prefetch_buffer_by_index (vm, buffers[i + 5]); - dpdk_prefetch_buffer_by_index (vm, buffers[i + 6]); - dpdk_prefetch_buffer_by_index (vm, buffers[i + 7]); - - b0 = vlib_get_buffer (vm, buffers[i]); - b1 = vlib_get_buffer (vm, buffers[i + 1]); - b2 = vlib_get_buffer (vm, buffers[i + 2]); - b3 = vlib_get_buffer (vm, buffers[i + 3]); - - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3); - - recycle_or_free (vm, bm, buffers[i], b0); - recycle_or_free (vm, bm, buffers[i + 1], b1); - recycle_or_free (vm, bm, buffers[i + 2], b2); - recycle_or_free (vm, bm, buffers[i + 3], b3); - - i += 4; + dpdk_ops_vpp_enqueue_one (&bt, obj[0]); + dpdk_ops_vpp_enqueue_one (&bt, obj[1]); + dpdk_ops_vpp_enqueue_one (&bt, obj[2]); + dpdk_ops_vpp_enqueue_one (&bt, obj[3]); + obj += 4; + n_left -= 4; } - while (i < n_buffers) + + while (n_left) { - b0 = vlib_get_buffer (vm, buffers[i]); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - recycle_or_free (vm, bm, buffers[i], b0); - i++; + dpdk_ops_vpp_enqueue_one (&bt, obj[0]); + obj += 1; + n_left -= 1; } - if (vec_len (vm->buffer_announce_list)) + + while (n >= batch_size) { - vlib_buffer_free_list_t *fl; - for (i = 0; i < vec_len (vm->buffer_announce_list); i++) - { - fl = vm->buffer_announce_list[i]; - fl->buffers_added_to_freelist_function (vm, fl); - } - _vec_len (vm->buffer_announce_list) = 0; + vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs, + batch_size, + sizeof (struct rte_mbuf)); + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size); + n -= batch_size; + obj_table += batch_size; } - vec_foreach_index (i, d->mbuf_pending_free_list) - { - int len = vec_len (d->mbuf_pending_free_list[i]); - if (len) - { - rte_mempool_put_bulk (d->mbuf_pending_free_list[i][len - 1]->pool, - (void *) d->mbuf_pending_free_list[i], len); - vec_reset_length (d->mbuf_pending_free_list[i]); - } - } -} - -void -CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers, - u32 n_buffers) -{ - vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ - 1); -} + if (n) + { + vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs, + n, sizeof (struct rte_mbuf)); + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n); + } -void -CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers, - u32 n_buffers) -{ - vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ - 0); + return 0; } -#ifndef CLIB_MULTIARCH_VARIANT -static void -dpdk_packet_template_init (vlib_main_t * vm, - void *vt, - void *packet_data, - uword n_packet_data_bytes, - uword min_n_buffers_each_alloc, u8 * name) -{ - vlib_packet_template_t *t = (vlib_packet_template_t *) vt; - - vlib_worker_thread_barrier_sync (vm); - memset (t, 0, sizeof (t[0])); +CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue); - vec_add (t->packet_data, packet_data, n_packet_data_bytes); - - vlib_worker_thread_barrier_release (vm); -} - -static clib_error_t * -scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name) +static_always_inline void +dpdk_ops_vpp_enqueue_no_cache_one (vlib_main_t * vm, struct rte_mempool *old, + struct rte_mempool *new, void *obj, + vlib_buffer_t * bt) { - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; - linux_vfio_main_t *lvm = &vfio_main; - const char fn[] = "/dev/vfio/vfio"; - char buff[sizeof (fn)] = { 0 }; - int fd; - u8 *path = format (0, "%v%c", path_name, 0); - - if (readlink ((char *) path, buff, sizeof (fn)) + 1 != sizeof (fn)) - goto done; + struct rte_mbuf *mb = obj; + vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); - if (strncmp (fn, buff, sizeof (fn))) - goto done; - - fd = atoi ((char *) file_name); - if (fd != lvm->container_fd) - dbm->vfio_container_fd = fd; - -done: - vec_free (path); - return 0; + if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0) + { + u32 bi = vlib_get_buffer_index (vm, b); + vlib_buffer_copy_template (b, bt); + vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1); + return; + } } -clib_error_t * -dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size, - u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa, - struct rte_mempool ** _mp, - vlib_physmem_region_index_t * pri) +int +CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue_no_cache) (struct rte_mempool * cmp, + void *const *obj_table, + unsigned n) { - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; + vlib_main_t *vm = vlib_get_main (); + vlib_buffer_t bt; struct rte_mempool *mp; - vlib_physmem_region_t *pr; - clib_error_t *error = 0; - u32 size, obj_size; - i32 ret; - - obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0); - size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0); - - error = - vlib_physmem_region_alloc (vm, (i8 *) pool_name, size, numa, - VLIB_PHYSMEM_F_HUGETLB | VLIB_PHYSMEM_F_SHARED, - pri); - if (error) - return error; - - pr = vlib_physmem_get_region (vm, pri[0]); - - mp = - rte_mempool_create_empty ((i8 *) pool_name, num_elts, elt_size, - 512, pool_priv_size, numa, 0); - if (!mp) - return clib_error_return (0, "failed to create %s", pool_name); + mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id]; + u8 buffer_pool_index = cmp->pool_id; + vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index); + vlib_buffer_copy_template (&bt, &bp->buffer_template); - rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL); - - ret = - rte_mempool_populate_iova_tab (mp, pr->mem, pr->page_table, pr->n_pages, - pr->log2_page_size, NULL, NULL); - if (ret != (i32) mp->size) + while (n >= 4) { - rte_mempool_free (mp); - return clib_error_return (0, "failed to populate %s", pool_name); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[1], &bt); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[2], &bt); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[3], &bt); + obj_table += 4; + n -= 4; } - _mp[0] = mp; - - /* DPDK currently doesn't provide API to map DMA memory for empty mempool - so we are using this hack, will be nice to have at least API to get - VFIO container FD */ - if (dbm->vfio_container_fd == -1) - foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0); - - if (dbm->vfio_container_fd != -1) + while (n) { - struct vfio_iommu_type1_dma_map dm = { 0 }; - int i, rv = 0; - dm.argsz = sizeof (struct vfio_iommu_type1_dma_map); - dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; - - /* *INDENT-OFF* */ - vec_foreach_index (i, pr->page_table) - { - dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size); - dm.size = 1 << pr->log2_page_size; - dm.iova = pr->page_table[i]; - if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm))) - break; - } - /* *INDENT-ON* */ - if (rv != 0 && errno != EINVAL) - clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name); + dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt); + obj_table += 1; + n -= 1; } return 0; } -clib_error_t * -dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, - unsigned socket_id) -{ - dpdk_main_t *dm = &dpdk_main; - struct rte_mempool *rmp; - dpdk_mempool_private_t priv; - vlib_physmem_region_index_t pri; - clib_error_t *error = 0; - u8 *pool_name; - u32 elt_size, i; - - vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES); - - /* pool already exists, nothing to do */ - if (dm->pktmbuf_pools[socket_id]) - return 0; - - pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0); - - elt_size = sizeof (struct rte_mbuf) + - VLIB_BUFFER_HDR_SIZE /* priv size */ + - VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */ +CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue_no_cache); - error = - dpdk_pool_create (vm, pool_name, elt_size, num_mbufs, - sizeof (dpdk_mempool_private_t), 512, socket_id, - &rmp, &pri); - - vec_free (pool_name); - - if (!error) +static_always_inline void +dpdk_mbuf_init_from_template (struct rte_mbuf **mba, struct rte_mbuf *mt, + int count) +{ + /* Assumptions about rte_mbuf layout */ + STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_addr, 0); + STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_iova, 8); + STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8); + STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8); + STATIC_ASSERT_SIZEOF (struct rte_mbuf, 128); + + while (count--) { - priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE + - VLIB_BUFFER_DATA_SIZE; - priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE; - - /* call the mempool priv initializer */ - rte_pktmbuf_pool_init (rmp, &priv); - - /* call the object initializers */ - rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0); - - dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp); - privp->buffer_pool_index = vlib_buffer_pool_create (vm, pri, 0); - - dm->pktmbuf_pools[socket_id] = rmp; - - return 0; + struct rte_mbuf *mb = mba[0]; + int i; + /* bytes 0 .. 15 hold buf_addr and buf_iova which we need to preserve */ + /* copy bytes 16 .. 31 */ + *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1); + + /* copy bytes 32 .. 127 */ +#ifdef CLIB_HAVE_VEC256 + for (i = 1; i < 4; i++) + *((u8x32 *) mb + i) = *((u8x32 *) mt + i); +#else + for (i = 2; i < 8; i++) + *((u8x16 *) mb + i) = *((u8x16 *) mt + i); +#endif + mba++; } +} - clib_error_report (error); - - /* no usable pool for this socket, try to use pool from another one */ - for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) +int +CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp, + void **obj_table, unsigned n) +{ + const int batch_size = 32; + vlib_main_t *vm = vlib_get_main (); + u32 bufs[batch_size], total = 0, n_alloc = 0; + u8 buffer_pool_index = mp->pool_id; + void **obj = obj_table; + struct rte_mbuf t = dpdk_mbuf_template_by_pool_index[buffer_pool_index]; + + while (n >= batch_size) { - if (dm->pktmbuf_pools[i]) - { - clib_warning ("WARNING: Failed to allocate mempool for CPU socket " - "%u. Threads running on socket %u will use socket %u " - "mempool.", socket_id, socket_id, i); - dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i]; - return 0; - } + n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, batch_size, + buffer_pool_index); + if (n_alloc != batch_size) + goto alloc_fail; + + vlib_get_buffers_with_offset (vm, bufs, obj, batch_size, + -(i32) sizeof (struct rte_mbuf)); + dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, batch_size); + total += batch_size; + obj += batch_size; + n -= batch_size; } - return clib_error_return (0, "failed to allocate mempool on socket %u", - socket_id); -} - -#if CLIB_DEBUG > 0 - -u32 *vlib_buffer_state_validation_lock; -uword *vlib_buffer_state_validation_hash; -void *vlib_buffer_state_heap; - -static clib_error_t * -buffer_state_validation_init (vlib_main_t * vm) -{ - void *oldheap; + if (n) + { + n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, n, buffer_pool_index); - vlib_buffer_state_heap = mheap_alloc (0, 10 << 20); + if (n_alloc != n) + goto alloc_fail; - oldheap = clib_mem_set_heap (vlib_buffer_state_heap); + vlib_get_buffers_with_offset (vm, bufs, obj, n, + -(i32) sizeof (struct rte_mbuf)); + dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, n); + } - vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword)); - vec_validate_aligned (vlib_buffer_state_validation_lock, 0, - CLIB_CACHE_LINE_BYTES); - clib_mem_set_heap (oldheap); return 0; -} -VLIB_INIT_FUNCTION (buffer_state_validation_init); -#endif - -#if CLI_DEBUG -struct dpdk_validate_buf_result -{ - u32 invalid; - u32 uninitialized; -}; - -#define DPDK_TRAJECTORY_POISON 31 - -static void -dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque, - void *obj, unsigned obj_idx) -{ - vlib_buffer_t *b; - struct dpdk_validate_buf_result *counter = opaque; - b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj); - if (b->pre_data[0] != 0) +alloc_fail: + /* dpdk doesn't support partial alloc, so we need to return what we + already got */ + if (n_alloc) + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n_alloc); + obj = obj_table; + while (total) { - if (b->pre_data[0] == DPDK_TRAJECTORY_POISON) - counter->uninitialized++; - else - counter->invalid++; + vlib_get_buffer_indices_with_offset (vm, obj, bufs, batch_size, + sizeof (struct rte_mbuf)); + vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size); + + obj += batch_size; + total -= batch_size; } + return -ENOENT; } -int -dpdk_buffer_validate_trajectory_all (u32 * uninitialized) +CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_dequeue); + +#ifndef CLIB_MARCH_VARIANT + +static int +dpdk_ops_vpp_dequeue_no_cache (struct rte_mempool *mp, void **obj_table, + unsigned n) { - dpdk_main_t *dm = &dpdk_main; - struct dpdk_validate_buf_result counter = { 0 }; - int i; - - for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) - rte_mempool_obj_iter (dm->pktmbuf_pools[i], - dpdk_buffer_validate_trajectory, &counter); - if (uninitialized) - *uninitialized = counter.uninitialized; - return counter.invalid; + clib_error ("bug"); + return 0; } -static void -dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque, - void *obj, unsigned obj_idx) +static unsigned +dpdk_ops_vpp_get_count (const struct rte_mempool *mp) { - vlib_buffer_t *b; - b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj); - b->pre_data[0] = DPDK_TRAJECTORY_POISON; + clib_warning (""); + return 0; } -void -dpdk_buffer_poison_trajectory_all (void) +static unsigned +dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp) { - dpdk_main_t *dm = &dpdk_main; - int i; - - for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) - rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory, - 0); + struct rte_mempool *cmp; + cmp = dpdk_no_cache_mempool_by_buffer_pool_index[mp->pool_id]; + return dpdk_ops_vpp_get_count (cmp); } -#endif -static clib_error_t * -dpdk_buffer_init (vlib_main_t * vm) +clib_error_t * +dpdk_buffer_pools_create (vlib_main_t * vm) { - dpdk_buffer_main_t *dbm = &dpdk_buffer_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - - vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1, - CLIB_CACHE_LINE_BYTES); - - dbm->vfio_container_fd = -1; - + clib_error_t *err; + vlib_buffer_pool_t *bp; + + struct rte_mempool_ops ops = { }; + + strncpy (ops.name, "vpp", 4); + ops.alloc = dpdk_ops_vpp_alloc; + ops.free = dpdk_ops_vpp_free; + ops.get_count = dpdk_ops_vpp_get_count; + ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue); + ops.dequeue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_dequeue); + rte_mempool_register_ops (&ops); + + strncpy (ops.name, "vpp-no-cache", 13); + ops.get_count = dpdk_ops_vpp_get_count_no_cache; + ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue_no_cache); + ops.dequeue = dpdk_ops_vpp_dequeue_no_cache; + rte_mempool_register_ops (&ops); + + /* *INDENT-OFF* */ + vec_foreach (bp, vm->buffer_main->buffer_pools) + if (bp->start && (err = dpdk_buffer_pool_init (vm, bp))) + return err; + /* *INDENT-ON* */ return 0; } -VLIB_INIT_FUNCTION (dpdk_buffer_init); - -/* *INDENT-OFF* */ -VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = { - .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list, - .vlib_buffer_free_cb = &dpdk_buffer_free, - .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next, - .vlib_packet_template_init_cb = &dpdk_packet_template_init, - .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list, -}; -/* *INDENT-ON* */ - -#if __x86_64__ -vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512; -vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2; -vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512; -vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2; -vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512; -vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2; - -static void __clib_constructor -dpdk_input_multiarch_select (void) -{ - vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks; - if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ()) - { - cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512; - cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512; - cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512; - } - else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ()) - { - cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2; - cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2; - cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2; - } -} -#endif +VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) + + sizeof (struct rte_mbuf)); + #endif /** @endcond */