/* * Copyright (c) 2017 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * buffer.c: allocate/free network buffers. * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * @file * * Allocate/free network buffers. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM"); #define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32)) static void del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f) { u32 i; struct rte_mbuf *mb; vlib_buffer_t *b; for (i = 0; i < vec_len (f->unaligned_buffers); i++) { b = vlib_get_buffer (vm, f->unaligned_buffers[i]); mb = rte_mbuf_from_vlib_buffer (b); ASSERT (rte_mbuf_refcnt_read (mb) == 1); rte_pktmbuf_free (mb); } for (i = 0; i < vec_len (f->aligned_buffers); i++) { b = vlib_get_buffer (vm, f->aligned_buffers[i]); mb = rte_mbuf_from_vlib_buffer (b); ASSERT (rte_mbuf_refcnt_read (mb) == 1); rte_pktmbuf_free (mb); } vec_free (f->name); vec_free (f->unaligned_buffers); vec_free (f->aligned_buffers); } /* Add buffer free list. */ static void dpdk_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_free_list_t *f; u32 merge_index; int i; ASSERT (os_get_cpu_number () == 0); f = vlib_buffer_get_free_list (vm, free_list_index); merge_index = vlib_buffer_get_free_list_with_size (vm, f->n_data_bytes); if (merge_index != ~0 && merge_index != free_list_index) { vlib_buffer_merge_free_lists (pool_elt_at_index (bm->buffer_free_list_pool, merge_index), f); } del_free_list (vm, f); /* Poison it. */ memset (f, 0xab, sizeof (f[0])); pool_put (bm->buffer_free_list_pool, f); for (i = 1; i < vec_len (vlib_mains); i++) { bm = vlib_mains[i]->buffer_main; f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index);; memset (f, 0xab, sizeof (f[0])); pool_put (bm->buffer_free_list_pool, f); } } /* Make sure free list has at least given number of free buffers. */ static uword fill_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * fl, uword min_free_buffers) { dpdk_main_t *dm = &dpdk_main; vlib_buffer_t *b; int n, i; u32 bi; u32 n_remaining = 0, n_alloc = 0; unsigned socket_id = rte_socket_id (); struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id]; struct rte_mbuf *mb; /* Too early? */ if (PREDICT_FALSE (rmp == 0)) return 0; vlib_buffer_free_list_trim_aligned (fl); /* Already have enough free buffers on free list? */ n = min_free_buffers - vec_len (fl->aligned_buffers); if (n <= 0) return min_free_buffers; /* Always allocate round number of buffers. */ n = round_pow2 (n, BUFFERS_PER_COPY); /* Always allocate new buffers in reasonably large sized chunks. */ n = clib_max (n, fl->min_n_buffers_each_physmem_alloc); vec_validate (vm->mbuf_alloc_list, n - 1); if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0) return 0; _vec_len (vm->mbuf_alloc_list) = n; for (i = 0; i < n; i++) { mb = vm->mbuf_alloc_list[i]; ASSERT (rte_mbuf_refcnt_read (mb) == 0); rte_mbuf_refcnt_set (mb, 1); b = vlib_buffer_from_rte_mbuf (mb); bi = vlib_get_buffer_index (vm, b); vec_add1_aligned (fl->aligned_buffers, bi, sizeof (vlib_copy_unit_t)); n_alloc++; n_remaining--; vlib_buffer_init_for_free_list (b, fl); if (fl->buffer_init_function) fl->buffer_init_function (vm, fl, &bi, 1); } fl->n_alloc += n; return n; } always_inline uword copy_alignment (u32 * x) { return (pointer_to_uword (x) / sizeof (x[0])) % BUFFERS_PER_COPY; } static u32 alloc_from_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * free_list, u32 * alloc_buffers, u32 n_alloc_buffers) { u32 *dst, *u_src; uword u_len, n_left; uword n_unaligned_start, n_unaligned_end, n_filled; n_left = n_alloc_buffers; dst = alloc_buffers; n_unaligned_start = ((BUFFERS_PER_COPY - copy_alignment (dst)) & (BUFFERS_PER_COPY - 1)); n_filled = fill_free_list (vm, free_list, n_alloc_buffers); if (n_filled == 0) return 0; n_left = n_filled < n_left ? n_filled : n_left; n_alloc_buffers = n_left; if (n_unaligned_start >= n_left) { n_unaligned_start = n_left; n_unaligned_end = 0; } else n_unaligned_end = copy_alignment (dst + n_alloc_buffers); vlib_buffer_free_list_fill_unaligned (vm, free_list, n_unaligned_start + n_unaligned_end); u_len = vec_len (free_list->unaligned_buffers); u_src = free_list->unaligned_buffers + u_len - 1; if (n_unaligned_start) { uword n_copy = n_unaligned_start; if (n_copy > n_left) n_copy = n_left; n_left -= n_copy; while (n_copy > 0) { *dst++ = *u_src--; n_copy--; u_len--; } /* Now dst should be aligned. */ if (n_left > 0) ASSERT (pointer_to_uword (dst) % sizeof (vlib_copy_unit_t) == 0); } /* Aligned copy. */ { vlib_copy_unit_t *d, *s; uword n_copy; if (vec_len (free_list->aligned_buffers) < ((n_left / BUFFERS_PER_COPY) * BUFFERS_PER_COPY)) abort (); n_copy = n_left / BUFFERS_PER_COPY; n_left = n_left % BUFFERS_PER_COPY; /* Remove buffers from aligned free list. */ _vec_len (free_list->aligned_buffers) -= n_copy * BUFFERS_PER_COPY; s = (vlib_copy_unit_t *) vec_end (free_list->aligned_buffers); d = (vlib_copy_unit_t *) dst; /* Fast path loop. */ while (n_copy >= 4) { d[0] = s[0]; d[1] = s[1]; d[2] = s[2]; d[3] = s[3]; n_copy -= 4; s += 4; d += 4; } while (n_copy >= 1) { d[0] = s[0]; n_copy -= 1; s += 1; d += 1; } dst = (void *) d; } /* Unaligned copy. */ ASSERT (n_unaligned_end == n_left); while (n_left > 0) { *dst++ = *u_src--; n_left--; u_len--; } if (!free_list->unaligned_buffers) ASSERT (u_len == 0); else _vec_len (free_list->unaligned_buffers) = u_len; return n_alloc_buffers; } /* Allocate a given number of buffers into given array. Returns number actually allocated which will be either zero or number requested. */ u32 dpdk_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers) { vlib_buffer_main_t *bm = vm->buffer_main; return alloc_from_free_list (vm, pool_elt_at_index (bm->buffer_free_list_pool, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX), buffers, n_buffers); } u32 dpdk_buffer_alloc_from_free_list (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u32 free_list_index) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_free_list_t *f; f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index); return alloc_from_free_list (vm, f, buffers, n_buffers); } static_always_inline void vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u32 follow_buffer_next) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_free_list_t *fl; u32 fi; int i; u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u32 follow_buffer_next); cb = bm->buffer_free_callback; if (PREDICT_FALSE (cb != 0)) n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next); if (!n_buffers) return; for (i = 0; i < n_buffers; i++) { vlib_buffer_t *b; struct rte_mbuf *mb; b = vlib_get_buffer (vm, buffers[i]); fl = vlib_buffer_get_buffer_free_list (vm, b, &fi); /* The only current use of this callback: multicast recycle */ if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0)) { int j; vlib_buffer_add_to_free_list (vm, fl, buffers[i], (b->flags & VLIB_BUFFER_RECYCLE) == 0); for (j = 0; j < vec_len (bm->announce_list); j++) { if (fl == bm->announce_list[j]) goto already_announced; } vec_add1 (bm->announce_list, fl); already_announced: ; } else { if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0)) { mb = rte_mbuf_from_vlib_buffer (b); ASSERT (rte_mbuf_refcnt_read (mb) == 1); rte_pktmbuf_free (mb); } } } if (vec_len (bm->announce_list)) { vlib_buffer_free_list_t *fl; for (i = 0; i < vec_len (bm->announce_list); i++) { fl = bm->announce_list[i]; fl->buffers_added_to_freelist_function (vm, fl); } _vec_len (bm->announce_list) = 0; } } static void dpdk_buffer_free (vlib_main_t * vm, u32 * buffers, u32 n_buffers) { vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ 1); } static void dpdk_buffer_free_no_next (vlib_main_t * vm, u32 * buffers, u32 n_buffers) { vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ 0); } static void dpdk_packet_template_init (vlib_main_t * vm, void *vt, void *packet_data, uword n_packet_data_bytes, uword min_n_buffers_each_physmem_alloc, u8 * name) { vlib_packet_template_t *t = (vlib_packet_template_t *) vt; vlib_worker_thread_barrier_sync (vm); memset (t, 0, sizeof (t[0])); vec_add (t->packet_data, packet_data, n_packet_data_bytes); vlib_worker_thread_barrier_release (vm); } clib_error_t * vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, unsigned socket_id) { dpdk_main_t *dm = &dpdk_main; vlib_physmem_main_t *vpm = &vm->physmem_main; struct rte_mempool *rmp; int i; vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES); /* pool already exists, nothing to do */ if (dm->pktmbuf_pools[socket_id]) return 0; u8 *pool_name = format (0, "mbuf_pool_socket%u%c", socket_id, 0); rmp = rte_pktmbuf_pool_create ((char *) pool_name, /* pool name */ num_mbufs, /* number of mbufs */ 512, /* cache size */ VLIB_BUFFER_HDR_SIZE, /* priv size */ VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE, /* dataroom size */ socket_id); /* cpu socket */ if (rmp) { { uword this_pool_end; uword this_pool_start; uword this_pool_size; uword save_vpm_start, save_vpm_end, save_vpm_size; struct rte_mempool_memhdr *memhdr; this_pool_start = ~0ULL; this_pool_end = 0LL; STAILQ_FOREACH (memhdr, &rmp->mem_list, next) { if (((uword) (memhdr->addr + memhdr->len)) > this_pool_end) this_pool_end = (uword) (memhdr->addr + memhdr->len); if (((uword) memhdr->addr) < this_pool_start) this_pool_start = (uword) (memhdr->addr); } ASSERT (this_pool_start < ~0ULL && this_pool_end > 0); this_pool_size = this_pool_end - this_pool_start; if (CLIB_DEBUG > 1) { clib_warning ("%s: pool start %llx pool end %llx pool size %lld", pool_name, this_pool_start, this_pool_end, this_pool_size); clib_warning ("before: virtual.start %llx virtual.end %llx virtual.size %lld", vpm->virtual.start, vpm->virtual.end, vpm->virtual.size); } save_vpm_start = vpm->virtual.start; save_vpm_end = vpm->virtual.end; save_vpm_size = vpm->virtual.size; if ((this_pool_start < vpm->virtual.start) || vpm->virtual.start == 0) vpm->virtual.start = this_pool_start; if (this_pool_end > vpm->virtual.end) vpm->virtual.end = this_pool_end; vpm->virtual.size = vpm->virtual.end - vpm->virtual.start; if (CLIB_DEBUG > 1) { clib_warning ("after: virtual.start %llx virtual.end %llx virtual.size %lld", vpm->virtual.start, vpm->virtual.end, vpm->virtual.size); } /* check if fits into buffer index range */ if ((u64) vpm->virtual.size > ((u64) 1 << (32 + CLIB_LOG2_CACHE_LINE_BYTES))) { clib_warning ("physmem: virtual size out of range!"); vpm->virtual.start = save_vpm_start; vpm->virtual.end = save_vpm_end; vpm->virtual.size = save_vpm_size; rmp = 0; } } if (rmp) { dm->pktmbuf_pools[socket_id] = rmp; vec_free (pool_name); return 0; } } vec_free (pool_name); /* no usable pool for this socket, try to use pool from another one */ for (i = 0; i < vec_len (dm->pktmbuf_pools); i++) { if (dm->pktmbuf_pools[i]) { clib_warning ("WARNING: Failed to allocate mempool for CPU socket %u. " "Threads running on socket %u will use socket %u mempool.", socket_id, socket_id, i); dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i]; return 0; } } return clib_error_return (0, "failed to allocate mempool on socket %u", socket_id); } #if CLIB_DEBUG > 0 u32 *vlib_buffer_state_validation_lock; uword *vlib_buffer_state_validation_hash; void *vlib_buffer_state_heap; static clib_error_t * buffer_state_validation_init (vlib_main_t * vm) { void *oldheap; vlib_buffer_state_heap = mheap_alloc (0, 10 << 20); oldheap = clib_mem_set_heap (vlib_buffer_state_heap); vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword)); vec_validate_aligned (vlib_buffer_state_validation_lock, 0, CLIB_CACHE_LINE_BYTES); clib_mem_set_heap (oldheap); return 0; } VLIB_INIT_FUNCTION (buffer_state_validation_init); #endif static vlib_buffer_callbacks_t callbacks = { .vlib_buffer_alloc_cb = &dpdk_buffer_alloc, .vlib_buffer_alloc_from_free_list_cb = &dpdk_buffer_alloc_from_free_list, .vlib_buffer_free_cb = &dpdk_buffer_free, .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next, .vlib_packet_template_init_cb = &dpdk_packet_template_init, .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list, }; static clib_error_t * dpdk_buffer_init (vlib_main_t * vm) { vlib_buffer_cb_register (vm, &callbacks); return 0; } VLIB_INIT_FUNCTION (dpdk_buffer_init); /** @endcond */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */