1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/queue.h>
17 #include <rte_fbarray.h>
18 #include <rte_memory.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_errno.h>
24 #include "eal_memalloc.h"
25 #include "eal_private.h"
26 #include "eal_internal_cfg.h"
29 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
30 * pointer to the mmap'd area and keep *size unmodified. Else, retry
31 * with a smaller zone: decrease *size by hugepage_sz until it reaches
32 * 0. In this case, return NULL. Note: this function returns an address
33 * which is a multiple of hugepage size.
36 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
38 static void *next_baseaddr;
39 static uint64_t system_page_sz;
43 * Linux kernel uses a really high address as starting address for serving
44 * mmaps calls. If there exists addressing limitations and IOVA mode is VA,
45 * this starting address is likely too high for those devices. However, it
46 * is possible to use a lower address in the process virtual address space
47 * as with 64 bits there is a lot of available space.
49 * Current known limitations are 39 or 40 bits. Setting the starting address
50 * at 4GB implies there are 508GB or 1020GB for mapping the available
51 * hugepages. This is likely enough for most systems, although a device with
52 * addressing limitations should call rte_mem_check_dma_mask for ensuring all
53 * memory is within supported range.
55 static uint64_t baseaddr = 0x100000000;
58 #define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
60 eal_get_virtual_area(void *requested_addr, size_t *size,
61 size_t page_sz, int flags, int mmap_flags)
63 bool addr_is_hint, allow_shrink, unmap, no_align;
65 void *mapped_addr, *aligned_addr;
68 if (system_page_sz == 0)
69 system_page_sz = sysconf(_SC_PAGESIZE);
71 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
73 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
75 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
76 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
77 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
79 if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
80 rte_eal_process_type() == RTE_PROC_PRIMARY)
81 next_baseaddr = (void *) internal_config.base_virtaddr;
84 if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 &&
85 rte_eal_process_type() == RTE_PROC_PRIMARY)
86 next_baseaddr = (void *) baseaddr;
88 if (requested_addr == NULL && next_baseaddr != NULL) {
89 requested_addr = next_baseaddr;
90 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
94 /* we don't need alignment of resulting pointer in the following cases:
96 * 1. page size is equal to system size
97 * 2. we have a requested address, and it is page-aligned, and we will
98 * be discarding the address if we get a different one.
100 * for all other cases, alignment is potentially necessary.
102 no_align = (requested_addr != NULL &&
103 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
105 page_sz == system_page_sz;
108 map_sz = no_align ? *size : *size + page_sz;
109 if (map_sz > SIZE_MAX) {
110 RTE_LOG(ERR, EAL, "Map size too big\n");
115 mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
117 if (mapped_addr == MAP_FAILED && allow_shrink)
120 if (mapped_addr != MAP_FAILED && addr_is_hint &&
121 mapped_addr != requested_addr) {
123 next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
124 if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) {
125 /* hint was not used. Try with another offset */
126 munmap(mapped_addr, map_sz);
127 mapped_addr = MAP_FAILED;
128 requested_addr = next_baseaddr;
131 } while ((allow_shrink || addr_is_hint) &&
132 mapped_addr == MAP_FAILED && *size > 0);
134 /* align resulting address - if map failed, we will ignore the value
135 * anyway, so no need to add additional checks.
137 aligned_addr = no_align ? mapped_addr :
138 RTE_PTR_ALIGN(mapped_addr, page_sz);
141 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
145 } else if (mapped_addr == MAP_FAILED) {
146 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
148 /* pass errno up the call chain */
151 } else if (requested_addr != NULL && !addr_is_hint &&
152 aligned_addr != requested_addr) {
153 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
154 requested_addr, aligned_addr);
155 munmap(mapped_addr, map_sz);
156 rte_errno = EADDRNOTAVAIL;
158 } else if (requested_addr != NULL && addr_is_hint &&
159 aligned_addr != requested_addr) {
160 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
161 requested_addr, aligned_addr);
162 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
163 } else if (next_baseaddr != NULL) {
164 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
167 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
168 aligned_addr, *size);
171 munmap(mapped_addr, map_sz);
172 } else if (!no_align) {
173 void *map_end, *aligned_end;
174 size_t before_len, after_len;
176 /* when we reserve space with alignment, we add alignment to
177 * mapping size. On 32-bit, if 1GB alignment was requested, this
178 * would waste 1GB of address space, which is a luxury we cannot
179 * afford. so, if alignment was performed, check if any unneeded
180 * address space can be unmapped back.
183 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
184 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
186 /* unmap space before aligned mmap address */
187 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
189 munmap(mapped_addr, before_len);
191 /* unmap space after aligned end mmap address */
192 after_len = RTE_PTR_DIFF(map_end, aligned_end);
194 munmap(aligned_end, after_len);
200 static struct rte_memseg *
201 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
203 const struct rte_fbarray *arr;
210 /* a memseg list was specified, check if it's the right one */
211 start = msl->base_va;
212 end = RTE_PTR_ADD(start, msl->len);
214 if (addr < start || addr >= end)
217 /* now, calculate index */
218 arr = &msl->memseg_arr;
219 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
220 return rte_fbarray_get(arr, ms_idx);
223 static struct rte_memseg_list *
224 virt2memseg_list(const void *addr)
226 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
227 struct rte_memseg_list *msl;
230 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
232 msl = &mcfg->memsegs[msl_idx];
234 start = msl->base_va;
235 end = RTE_PTR_ADD(start, msl->len);
236 if (addr >= start && addr < end)
239 /* if we didn't find our memseg list */
240 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
245 __rte_experimental struct rte_memseg_list *
246 rte_mem_virt2memseg_list(const void *addr)
248 return virt2memseg_list(addr);
256 find_virt(const struct rte_memseg_list *msl __rte_unused,
257 const struct rte_memseg *ms, void *arg)
259 struct virtiova *vi = arg;
260 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
261 size_t offset = vi->iova - ms->iova;
262 vi->virt = RTE_PTR_ADD(ms->addr, offset);
269 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
270 const struct rte_memseg *ms, size_t len, void *arg)
272 struct virtiova *vi = arg;
273 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
274 size_t offset = vi->iova - ms->iova;
275 vi->virt = RTE_PTR_ADD(ms->addr, offset);
282 __rte_experimental void *
283 rte_mem_iova2virt(rte_iova_t iova)
287 memset(&vi, 0, sizeof(vi));
290 /* for legacy mem, we can get away with scanning VA-contiguous segments,
291 * as we know they are PA-contiguous as well
293 if (internal_config.legacy_mem)
294 rte_memseg_contig_walk(find_virt_legacy, &vi);
296 rte_memseg_walk(find_virt, &vi);
301 __rte_experimental struct rte_memseg *
302 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
304 return virt2memseg(addr, msl != NULL ? msl :
305 rte_mem_virt2memseg_list(addr));
309 physmem_size(const struct rte_memseg_list *msl, void *arg)
311 uint64_t *total_len = arg;
316 *total_len += msl->memseg_arr.count * msl->page_sz;
321 /* get the total size of memory */
323 rte_eal_get_physmem_size(void)
325 uint64_t total_len = 0;
327 rte_memseg_list_walk(physmem_size, &total_len);
333 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
336 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
337 int msl_idx, ms_idx, fd;
340 msl_idx = msl - mcfg->memsegs;
341 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
344 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
348 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
349 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
350 "virt:%p, socket_id:%"PRId32", "
351 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
352 "nrank:%"PRIx32" fd:%i\n",
367 * Defining here because declared in rte_memory.h, but the actual implementation
368 * is in eal_common_memalloc.c, like all other memalloc internals.
370 int __rte_experimental
371 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
374 /* FreeBSD boots with legacy mem enabled by default */
375 if (internal_config.legacy_mem) {
376 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
380 return eal_memalloc_mem_event_callback_register(name, clb, arg);
383 int __rte_experimental
384 rte_mem_event_callback_unregister(const char *name, void *arg)
386 /* FreeBSD boots with legacy mem enabled by default */
387 if (internal_config.legacy_mem) {
388 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
392 return eal_memalloc_mem_event_callback_unregister(name, arg);
395 int __rte_experimental
396 rte_mem_alloc_validator_register(const char *name,
397 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
399 /* FreeBSD boots with legacy mem enabled by default */
400 if (internal_config.legacy_mem) {
401 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
405 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
409 int __rte_experimental
410 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
412 /* FreeBSD boots with legacy mem enabled by default */
413 if (internal_config.legacy_mem) {
414 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
418 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
421 /* Dump the physical memory layout on console */
423 rte_dump_physmem_layout(FILE *f)
425 rte_memseg_walk(dump_memseg, f);
429 check_iova(const struct rte_memseg_list *msl __rte_unused,
430 const struct rte_memseg *ms, void *arg)
432 uint64_t *mask = arg;
435 /* higher address within segment */
436 iova = (ms->iova + ms->len) - 1;
440 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
443 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
447 #define MAX_DMA_MASK_BITS 63
449 /* check memseg iovas are within the required range based on dma mask */
450 static int __rte_experimental
451 check_dma_mask(uint8_t maskbits, bool thread_unsafe)
453 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
457 /* Sanity check. We only check width can be managed with 64 bits
458 * variables. Indeed any higher value is likely wrong. */
459 if (maskbits > MAX_DMA_MASK_BITS) {
460 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
461 maskbits, MAX_DMA_MASK_BITS);
465 /* create dma mask */
466 mask = ~((1ULL << maskbits) - 1);
469 ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
471 ret = rte_memseg_walk(check_iova, &mask);
475 * Dma mask precludes hugepage usage.
476 * This device can not be used and we do not need to keep
482 * we need to keep the more restricted maskbit for checking
483 * potential dynamic memory allocation in the future.
485 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
486 RTE_MIN(mcfg->dma_maskbits, maskbits);
491 int __rte_experimental
492 rte_mem_check_dma_mask(uint8_t maskbits)
494 return check_dma_mask(maskbits, false);
497 int __rte_experimental
498 rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
500 return check_dma_mask(maskbits, true);
504 * Set dma mask to use when memory initialization is done.
506 * This function should ONLY be used by code executed before the memory
507 * initialization. PMDs should use rte_mem_check_dma_mask if addressing
508 * limitations by the device.
510 void __rte_experimental
511 rte_mem_set_dma_mask(uint8_t maskbits)
513 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
515 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
516 RTE_MIN(mcfg->dma_maskbits, maskbits);
519 /* return the number of memory channels */
520 unsigned rte_memory_get_nchannel(void)
522 return rte_eal_get_configuration()->mem_config->nchannel;
525 /* return the number of memory rank */
526 unsigned rte_memory_get_nrank(void)
528 return rte_eal_get_configuration()->mem_config->nrank;
532 rte_eal_memdevice_init(void)
534 struct rte_config *config;
536 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
539 config = rte_eal_get_configuration();
540 config->mem_config->nchannel = internal_config.force_nchannel;
541 config->mem_config->nrank = internal_config.force_nrank;
546 /* Lock page in physical memory and prevent from swapping. */
548 rte_mem_lock_page(const void *virt)
550 unsigned long virtual = (unsigned long)virt;
551 int page_size = getpagesize();
552 unsigned long aligned = (virtual & ~(page_size - 1));
553 return mlock((void *)aligned, page_size);
556 int __rte_experimental
557 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
559 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
560 int i, ms_idx, ret = 0;
562 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
563 struct rte_memseg_list *msl = &mcfg->memsegs[i];
564 const struct rte_memseg *ms;
565 struct rte_fbarray *arr;
567 if (msl->memseg_arr.count == 0)
570 arr = &msl->memseg_arr;
572 ms_idx = rte_fbarray_find_next_used(arr, 0);
573 while (ms_idx >= 0) {
577 ms = rte_fbarray_get(arr, ms_idx);
579 /* find how many more segments there are, starting with
582 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
583 len = n_segs * msl->page_sz;
585 ret = func(msl, ms, len, arg);
588 ms_idx = rte_fbarray_find_next_used(arr,
595 int __rte_experimental
596 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
598 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
601 /* do not allow allocations/frees/init while we iterate */
602 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
603 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
604 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
609 int __rte_experimental
610 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
612 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
613 int i, ms_idx, ret = 0;
615 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
616 struct rte_memseg_list *msl = &mcfg->memsegs[i];
617 const struct rte_memseg *ms;
618 struct rte_fbarray *arr;
620 if (msl->memseg_arr.count == 0)
623 arr = &msl->memseg_arr;
625 ms_idx = rte_fbarray_find_next_used(arr, 0);
626 while (ms_idx >= 0) {
627 ms = rte_fbarray_get(arr, ms_idx);
628 ret = func(msl, ms, arg);
631 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
637 int __rte_experimental
638 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
640 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
643 /* do not allow allocations/frees/init while we iterate */
644 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
645 ret = rte_memseg_walk_thread_unsafe(func, arg);
646 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
651 int __rte_experimental
652 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
654 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
657 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
658 struct rte_memseg_list *msl = &mcfg->memsegs[i];
660 if (msl->base_va == NULL)
663 ret = func(msl, arg);
670 int __rte_experimental
671 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
673 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
676 /* do not allow allocations/frees/init while we iterate */
677 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
678 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
679 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
684 int __rte_experimental
685 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
687 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
688 struct rte_memseg_list *msl;
689 struct rte_fbarray *arr;
690 int msl_idx, seg_idx, ret;
697 msl = rte_mem_virt2memseg_list(ms->addr);
702 arr = &msl->memseg_arr;
704 msl_idx = msl - mcfg->memsegs;
705 seg_idx = rte_fbarray_find_idx(arr, ms);
707 if (!rte_fbarray_is_used(arr, seg_idx)) {
712 /* segment fd API is not supported for external segments */
718 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
726 int __rte_experimental
727 rte_memseg_get_fd(const struct rte_memseg *ms)
729 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
732 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
733 ret = rte_memseg_get_fd_thread_unsafe(ms);
734 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
739 int __rte_experimental
740 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
743 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
744 struct rte_memseg_list *msl;
745 struct rte_fbarray *arr;
746 int msl_idx, seg_idx, ret;
748 if (ms == NULL || offset == NULL) {
753 msl = rte_mem_virt2memseg_list(ms->addr);
758 arr = &msl->memseg_arr;
760 msl_idx = msl - mcfg->memsegs;
761 seg_idx = rte_fbarray_find_idx(arr, ms);
763 if (!rte_fbarray_is_used(arr, seg_idx)) {
768 /* segment fd API is not supported for external segments */
774 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
782 int __rte_experimental
783 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
785 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
788 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
789 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
790 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
795 /* init memory subsystem */
797 rte_eal_memory_init(void)
799 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
801 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
806 /* lock mem hotplug here, to prevent races while we init */
807 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
809 if (rte_eal_memseg_init() < 0)
812 if (eal_memalloc_init() < 0)
815 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
816 rte_eal_hugepage_init() :
817 rte_eal_hugepage_attach();
821 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
826 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);