4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2013 6WIND.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #define _FILE_OFFSET_BITS 64
45 #include <sys/types.h>
47 #include <sys/queue.h>
51 #include <sys/ioctl.h>
55 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
61 #include <rte_memory.h>
62 #include <rte_launch.h>
64 #include <rte_eal_memconfig.h>
65 #include <rte_per_lcore.h>
66 #include <rte_lcore.h>
67 #include <rte_common.h>
68 #include <rte_string_fns.h>
70 #include "eal_private.h"
71 #include "eal_internal_cfg.h"
72 #include "eal_filesystem.h"
73 #include "eal_hugepages.h"
75 #define PFN_MASK_SIZE 8
79 * Huge page mapping under linux
81 * To reserve a big contiguous amount of memory, we use the hugepage
82 * feature of linux. For that, we need to have hugetlbfs mounted. This
83 * code will create many files in this directory (one per page) and
84 * map them in virtual memory. For each page, we will retrieve its
85 * physical address and remap it in order to have a virtual contiguous
86 * zone as well as a physical contiguous zone.
89 static uint64_t baseaddr_offset;
93 * Linux kernel uses a really high address as starting address for serving
94 * mmaps calls. If there exists addressing limitations and IOVA mode is VA,
95 * this starting address is likely too high for those devices. However, it
96 * is possible to use a lower address in the process virtual address space
97 * as with 64 bits there is a lot of available space.
99 * Current known limitations are 39 or 40 bits. Setting the starting address
100 * at 4GB implies there are 508GB or 1020GB for mapping the available
101 * hugepages. This is likely enough for most systems, although a device with
102 * addressing limitations should call rte_dev_check_dma_mask for ensuring all
103 * memory is within supported range.
105 static uint64_t baseaddr = 0x100000000;
108 static bool phys_addrs_available = true;
110 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
113 test_phys_addrs_available(void)
116 phys_addr_t physaddr;
118 if (!rte_eal_has_hugepages()) {
120 "Started without hugepages support, physical addresses not available\n");
121 phys_addrs_available = false;
125 physaddr = rte_mem_virt2phy(&tmp);
126 if (physaddr == RTE_BAD_PHYS_ADDR) {
127 if (rte_eal_iova_mode() == RTE_IOVA_PA)
129 "Cannot obtain physical addresses: %s. "
130 "Only vfio will function.\n",
132 phys_addrs_available = false;
137 * Get physical address of any mapped virtual address in the current process.
140 rte_mem_virt2phy(const void *virtaddr)
143 uint64_t page, physaddr;
144 unsigned long virt_pfn;
148 /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
149 if (!phys_addrs_available)
152 /* standard page size */
153 page_size = getpagesize();
155 fd = open("/proc/self/pagemap", O_RDONLY);
157 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
158 __func__, strerror(errno));
162 virt_pfn = (unsigned long)virtaddr / page_size;
163 offset = sizeof(uint64_t) * virt_pfn;
164 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
165 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
166 __func__, strerror(errno));
171 retval = read(fd, &page, PFN_MASK_SIZE);
174 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
175 __func__, strerror(errno));
177 } else if (retval != PFN_MASK_SIZE) {
178 RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
179 "but expected %d:\n",
180 __func__, retval, PFN_MASK_SIZE);
185 * the pfn (page frame number) are bits 0-54 (see
186 * pagemap.txt in linux Documentation)
188 if ((page & 0x7fffffffffffffULL) == 0)
191 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
192 + ((unsigned long)virtaddr % page_size);
198 rte_mem_virt2iova(const void *virtaddr)
200 if (rte_eal_iova_mode() == RTE_IOVA_VA)
201 return (uintptr_t)virtaddr;
202 return rte_mem_virt2phy(virtaddr);
206 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
207 * it by browsing the /proc/self/pagemap special file.
210 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
215 for (i = 0; i < hpi->num_pages[0]; i++) {
216 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
217 if (addr == RTE_BAD_PHYS_ADDR)
219 hugepg_tbl[i].physaddr = addr;
225 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
228 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
231 static phys_addr_t addr;
233 for (i = 0; i < hpi->num_pages[0]; i++) {
234 hugepg_tbl[i].physaddr = addr;
235 addr += hugepg_tbl[i].size;
241 * Check whether address-space layout randomization is enabled in
242 * the kernel. This is important for multi-process as it can prevent
243 * two processes mapping data to the same virtual address
245 * 0 - address space randomization disabled
246 * 1/2 - address space randomization enabled
247 * negative error code on error
253 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
256 retval = read(fd, &c, 1);
266 default: return -EINVAL;
273 if (internal_config.base_virtaddr != 0) {
274 return (void *) (uintptr_t)
275 (internal_config.base_virtaddr +
279 return (void *) (uintptr_t) (baseaddr +
288 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
289 * pointer to the mmap'd area and keep *size unmodified. Else, retry
290 * with a smaller zone: decrease *size by hugepage_sz until it reaches
291 * 0. In this case, return NULL. Note: this function returns an address
292 * which is a multiple of hugepage size.
295 get_virtual_area(size_t *size, size_t hugepage_sz)
297 void *addr, *addr_hint;
301 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
303 fd = open("/dev/zero", O_RDONLY);
305 RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
309 addr_hint = get_addr_hint();
311 addr = mmap(addr_hint,
312 (*size) + hugepage_sz, PROT_READ,
313 #ifdef RTE_ARCH_PPC_64
314 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
319 if (addr == MAP_FAILED) {
320 /* map failed. Let's try with less memory */
321 *size -= hugepage_sz;
322 } else if (addr_hint && addr != addr_hint) {
323 /* hint was not used. Try with another offset */
324 munmap(addr, (*size) + hugepage_sz);
326 baseaddr_offset += 0x100000000;
328 } while (addr == MAP_FAILED && *size > 0);
330 if (addr == MAP_FAILED) {
332 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
337 munmap(addr, (*size) + hugepage_sz);
340 /* align addr to a huge page size boundary */
341 aligned_addr = (long)addr;
342 aligned_addr += (hugepage_sz - 1);
343 aligned_addr &= (~(hugepage_sz - 1));
344 addr = (void *)(aligned_addr);
346 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
349 /* increment offset */
350 baseaddr_offset += *size;
355 static sigjmp_buf huge_jmpenv;
357 static void huge_sigbus_handler(int signo __rte_unused)
359 siglongjmp(huge_jmpenv, 1);
362 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
363 * non-static local variable in the stack frame calling sigsetjmp might be
364 * clobbered by a call to longjmp.
366 static int huge_wrap_sigsetjmp(void)
368 return sigsetjmp(huge_jmpenv, 1);
371 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
372 /* Callback for numa library. */
373 void numa_error(char *where)
375 RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
380 * Mmap all hugepages of hugepage table: it first open a file in
381 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
382 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
383 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
384 * map contiguous physical blocks in contiguous virtual blocks.
387 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
388 uint64_t *essential_memory __rte_unused, int orig)
393 void *vma_addr = NULL;
395 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
397 int essential_prev = 0;
399 struct bitmask *oldmask = NULL;
400 bool have_numa = true;
401 unsigned long maxnode = 0;
403 /* Check if kernel supports NUMA. */
404 if (numa_available() != 0) {
405 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
409 if (orig && have_numa) {
410 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
411 oldmask = numa_allocate_nodemask();
412 if (get_mempolicy(&oldpolicy, oldmask->maskp,
413 oldmask->size + 1, 0, 0) < 0) {
415 "Failed to get current mempolicy: %s. "
416 "Assuming MPOL_DEFAULT.\n", strerror(errno));
417 oldpolicy = MPOL_DEFAULT;
419 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
420 if (internal_config.socket_mem[i])
427 * Hugepages are first mmaped individually and then re-mmapped to
428 * another region for having contiguous physical pages in contiguous
429 * virtual addresses. Setting here vma_addr for the first hugepage
430 * mapped to a virtual address which will not collide with the second
431 * mmaping later. The next hugepages will use increments of this
434 * The final virtual address will be based on baseaddr which is
435 * 0x100000000. We use a hint here starting at 0x200000000, leaving
436 * another 4GB just in case, plus the total available hugepages memory.
438 vma_addr = (char *)0x200000000 + (hpi->hugepage_sz * hpi->num_pages[0]);
440 for (i = 0; i < hpi->num_pages[0]; i++) {
441 uint64_t hugepage_sz = hpi->hugepage_sz;
443 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
447 for (j = 0; j < maxnode; j++)
448 if (essential_memory[j])
452 node_id = (node_id + 1) % maxnode;
453 while (!internal_config.socket_mem[node_id]) {
460 essential_prev = essential_memory[j];
462 if (essential_memory[j] < hugepage_sz)
463 essential_memory[j] = 0;
465 essential_memory[j] -= hugepage_sz;
469 "Setting policy MPOL_PREFERRED for socket %d\n",
471 numa_set_preferred(node_id);
476 hugepg_tbl[i].file_id = i;
477 hugepg_tbl[i].size = hugepage_sz;
478 eal_get_hugefile_path(hugepg_tbl[i].filepath,
479 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
480 hugepg_tbl[i].file_id);
481 hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
484 /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
485 * original map address as final map address.
487 else if ((hugepage_sz == RTE_PGSIZE_1G)
488 || (hugepage_sz == RTE_PGSIZE_16G)) {
489 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
490 hugepg_tbl[i].orig_va = NULL;
494 else if (vma_len == 0) {
495 unsigned j, num_pages;
497 /* reserve a virtual area for next contiguous
498 * physical block: count the number of
499 * contiguous physical pages. */
500 for (j = i+1; j < hpi->num_pages[0] ; j++) {
501 #ifdef RTE_ARCH_PPC_64
502 /* The physical addresses are sorted in
503 * descending order on PPC64 */
504 if (hugepg_tbl[j].physaddr !=
505 hugepg_tbl[j-1].physaddr - hugepage_sz)
508 if (hugepg_tbl[j].physaddr !=
509 hugepg_tbl[j-1].physaddr + hugepage_sz)
514 vma_len = num_pages * hugepage_sz;
516 /* get the biggest virtual memory area up to
517 * vma_len. If it fails, vma_addr is NULL, so
518 * let the kernel provide the address. */
519 vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
520 if (vma_addr == NULL)
521 vma_len = hugepage_sz;
524 /* try to create hugepage file */
525 fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
527 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
532 /* map the segment, and populate page tables,
533 * the kernel fills this segment with zeros */
534 virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
535 MAP_SHARED | MAP_POPULATE, fd, 0);
536 if (virtaddr == MAP_FAILED) {
537 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
544 hugepg_tbl[i].orig_va = virtaddr;
547 /* rewrite physical addresses in IOVA as VA mode */
548 if (rte_eal_iova_mode() == RTE_IOVA_VA)
549 hugepg_tbl[i].physaddr = (uintptr_t)virtaddr;
550 hugepg_tbl[i].final_va = virtaddr;
554 /* In linux, hugetlb limitations, like cgroup, are
555 * enforced at fault time instead of mmap(), even
556 * with the option of MAP_POPULATE. Kernel will send
557 * a SIGBUS signal. To avoid to be killed, save stack
558 * environment here, if SIGBUS happens, we can jump
561 if (huge_wrap_sigsetjmp()) {
562 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
563 "hugepages of size %u MB\n",
564 (unsigned)(hugepage_sz / 0x100000));
565 munmap(virtaddr, hugepage_sz);
567 unlink(hugepg_tbl[i].filepath);
568 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
570 essential_memory[node_id] =
575 *(int *)virtaddr = 0;
579 /* set shared flock on the file. */
580 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
581 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
582 __func__, strerror(errno));
589 vma_addr = (char *)vma_addr + hugepage_sz;
590 vma_len -= hugepage_sz;
594 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
597 "Restoring previous memory policy: %d\n", oldpolicy);
598 if (oldpolicy == MPOL_DEFAULT) {
599 numa_set_localalloc();
600 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
601 oldmask->size + 1) < 0) {
602 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
604 numa_set_localalloc();
608 numa_free_cpumask(oldmask);
613 /* Unmap all hugepages from original mapping */
615 unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
618 for (i = 0; i < hpi->num_pages[0]; i++) {
619 if (hugepg_tbl[i].orig_va) {
620 munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
621 hugepg_tbl[i].orig_va = NULL;
628 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
632 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
636 unsigned i, hp_count = 0;
639 char hugedir_str[PATH_MAX];
642 f = fopen("/proc/self/numa_maps", "r");
644 RTE_LOG(NOTICE, EAL, "NUMA support not available"
645 " consider that all memory is in socket_id 0\n");
649 snprintf(hugedir_str, sizeof(hugedir_str),
650 "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
653 while (fgets(buf, sizeof(buf), f) != NULL) {
655 /* ignore non huge page */
656 if (strstr(buf, " huge ") == NULL &&
657 strstr(buf, hugedir_str) == NULL)
661 virt_addr = strtoull(buf, &end, 16);
662 if (virt_addr == 0 || end == buf) {
663 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
667 /* get node id (socket id) */
668 nodestr = strstr(buf, " N");
669 if (nodestr == NULL) {
670 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
674 end = strstr(nodestr, "=");
676 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
682 socket_id = strtoul(nodestr, &end, 0);
683 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
684 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
688 /* if we find this page in our mappings, set socket_id */
689 for (i = 0; i < hpi->num_pages[0]; i++) {
690 void *va = (void *)(unsigned long)virt_addr;
691 if (hugepg_tbl[i].orig_va == va) {
692 hugepg_tbl[i].socket_id = socket_id;
694 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
696 "Hugepage %s is on socket %d\n",
697 hugepg_tbl[i].filepath, socket_id);
703 if (hp_count < hpi->num_pages[0])
715 cmp_physaddr(const void *a, const void *b)
717 #ifndef RTE_ARCH_PPC_64
718 const struct hugepage_file *p1 = a;
719 const struct hugepage_file *p2 = b;
721 /* PowerPC needs memory sorted in reverse order from x86 */
722 const struct hugepage_file *p1 = b;
723 const struct hugepage_file *p2 = a;
725 if (p1->physaddr < p2->physaddr)
727 else if (p1->physaddr > p2->physaddr)
734 * Uses mmap to create a shared memory area for storage of data
735 * Used in this file to store the hugepage file map on disk
738 create_shared_memory(const char *filename, const size_t mem_size)
741 int fd = open(filename, O_CREAT | O_RDWR, 0666);
744 if (ftruncate(fd, mem_size) < 0) {
748 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
750 if (retval == MAP_FAILED)
756 * this copies *active* hugepages from one hugepage table to another.
757 * destination is typically the shared memory.
760 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
761 const struct hugepage_file * src, int src_size)
763 int src_pos, dst_pos = 0;
765 for (src_pos = 0; src_pos < src_size; src_pos++) {
766 if (src[src_pos].final_va != NULL) {
767 /* error on overflow attempt */
768 if (dst_pos == dest_size)
770 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
778 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
779 unsigned num_hp_info)
781 unsigned socket, size;
782 int page, nrpages = 0;
784 /* get total number of hugepages */
785 for (size = 0; size < num_hp_info; size++)
786 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
788 internal_config.hugepage_info[size].num_pages[socket];
790 for (page = 0; page < nrpages; page++) {
791 struct hugepage_file *hp = &hugepg_tbl[page];
793 if (hp->final_va != NULL && unlink(hp->filepath)) {
794 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
795 __func__, hp->filepath, strerror(errno));
802 * unmaps hugepages that are not going to be used. since we originally allocate
803 * ALL hugepages (not just those we need), additional unmapping needs to be done.
806 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
807 struct hugepage_info *hpi,
808 unsigned num_hp_info)
810 unsigned socket, size;
811 int page, nrpages = 0;
813 /* get total number of hugepages */
814 for (size = 0; size < num_hp_info; size++)
815 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
816 nrpages += internal_config.hugepage_info[size].num_pages[socket];
818 for (size = 0; size < num_hp_info; size++) {
819 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
820 unsigned pages_found = 0;
822 /* traverse until we have unmapped all the unused pages */
823 for (page = 0; page < nrpages; page++) {
824 struct hugepage_file *hp = &hugepg_tbl[page];
826 /* find a page that matches the criteria */
827 if ((hp->size == hpi[size].hugepage_sz) &&
828 (hp->socket_id == (int) socket)) {
830 /* if we skipped enough pages, unmap the rest */
831 if (pages_found == hpi[size].num_pages[socket]) {
834 unmap_len = hp->size;
836 /* get start addr and len of the remaining segment */
837 munmap(hp->final_va, (size_t) unmap_len);
840 if (unlink(hp->filepath) == -1) {
841 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
842 __func__, hp->filepath, strerror(errno));
846 /* lock the page and skip */
852 } /* foreach socket */
853 } /* foreach pagesize */
858 static inline uint64_t
859 get_socket_mem_size(int socket)
864 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
865 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
866 if (hpi->hugedir != NULL)
867 size += hpi->hugepage_sz * hpi->num_pages[socket];
874 * This function is a NUMA-aware equivalent of calc_num_pages.
875 * It takes in the list of hugepage sizes and the
876 * number of pages thereof, and calculates the best number of
877 * pages of each size to fulfill the request for <memory> ram
880 calc_num_pages_per_socket(uint64_t * memory,
881 struct hugepage_info *hp_info,
882 struct hugepage_info *hp_used,
883 unsigned num_hp_info)
885 unsigned socket, j, i = 0;
886 unsigned requested, available;
887 int total_num_pages = 0;
888 uint64_t remaining_mem, cur_mem;
889 uint64_t total_mem = internal_config.memory;
891 if (num_hp_info == 0)
894 /* if specific memory amounts per socket weren't requested */
895 if (internal_config.force_sockets == 0) {
896 int cpu_per_socket[RTE_MAX_NUMA_NODES];
897 size_t default_size, total_size;
900 /* Compute number of cores per socket */
901 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
902 RTE_LCORE_FOREACH(lcore_id) {
903 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
907 * Automatically spread requested memory amongst detected sockets according
908 * to number of cores from cpu mask present on each socket
910 total_size = internal_config.memory;
911 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
913 /* Set memory amount per socket */
914 default_size = (internal_config.memory * cpu_per_socket[socket])
917 /* Limit to maximum available memory on socket */
918 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
921 memory[socket] = default_size;
922 total_size -= default_size;
926 * If some memory is remaining, try to allocate it by getting all
927 * available memory from sockets, one after the other
929 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
930 /* take whatever is available */
931 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
935 memory[socket] += default_size;
936 total_size -= default_size;
940 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
941 /* skips if the memory on specific socket wasn't requested */
942 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
943 hp_used[i].hugedir = hp_info[i].hugedir;
944 hp_used[i].num_pages[socket] = RTE_MIN(
945 memory[socket] / hp_info[i].hugepage_sz,
946 hp_info[i].num_pages[socket]);
948 cur_mem = hp_used[i].num_pages[socket] *
949 hp_used[i].hugepage_sz;
951 memory[socket] -= cur_mem;
952 total_mem -= cur_mem;
954 total_num_pages += hp_used[i].num_pages[socket];
956 /* check if we have met all memory requests */
957 if (memory[socket] == 0)
960 /* check if we have any more pages left at this size, if so
961 * move on to next size */
962 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
964 /* At this point we know that there are more pages available that are
965 * bigger than the memory we want, so lets see if we can get enough
966 * from other page sizes.
969 for (j = i+1; j < num_hp_info; j++)
970 remaining_mem += hp_info[j].hugepage_sz *
971 hp_info[j].num_pages[socket];
973 /* is there enough other memory, if not allocate another page and quit */
974 if (remaining_mem < memory[socket]){
975 cur_mem = RTE_MIN(memory[socket],
976 hp_info[i].hugepage_sz);
977 memory[socket] -= cur_mem;
978 total_mem -= cur_mem;
979 hp_used[i].num_pages[socket]++;
981 break; /* we are done with this socket*/
984 /* if we didn't satisfy all memory requirements per socket */
985 if (memory[socket] > 0) {
986 /* to prevent icc errors */
987 requested = (unsigned) (internal_config.socket_mem[socket] /
989 available = requested -
990 ((unsigned) (memory[socket] / 0x100000));
991 RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
992 "Requested: %uMB, available: %uMB\n", socket,
993 requested, available);
998 /* if we didn't satisfy total memory requirements */
1000 requested = (unsigned) (internal_config.memory / 0x100000);
1001 available = requested - (unsigned) (total_mem / 0x100000);
1002 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
1003 " available: %uMB\n", requested, available);
1006 return total_num_pages;
1009 static inline size_t
1010 eal_get_hugepage_mem_size(void)
1015 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1016 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1017 if (hpi->hugedir != NULL) {
1018 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1019 size += hpi->hugepage_sz * hpi->num_pages[j];
1024 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1027 static struct sigaction huge_action_old;
1028 static int huge_need_recover;
1031 huge_register_sigbus(void)
1034 struct sigaction action;
1037 sigaddset(&mask, SIGBUS);
1038 action.sa_flags = 0;
1039 action.sa_mask = mask;
1040 action.sa_handler = huge_sigbus_handler;
1042 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1046 huge_recover_sigbus(void)
1048 if (huge_need_recover) {
1049 sigaction(SIGBUS, &huge_action_old, NULL);
1050 huge_need_recover = 0;
1055 * Prepare physical memory mapping: fill configuration structure with
1056 * these infos, return 0 on success.
1057 * 1. map N huge pages in separate files in hugetlbfs
1058 * 2. find associated physical addr
1059 * 3. find associated NUMA socket ID
1060 * 4. sort all huge pages by physical address
1061 * 5. remap these N huge pages in the correct order
1062 * 6. unmap the first mapping
1063 * 7. fill memsegs in configuration with contiguous zones
1066 rte_eal_hugepage_init(void)
1068 struct rte_mem_config *mcfg;
1069 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1070 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1072 uint64_t memory[RTE_MAX_NUMA_NODES];
1075 int i, j, new_memseg;
1076 int nr_hugefiles, nr_hugepages = 0;
1079 test_phys_addrs_available();
1081 memset(used_hp, 0, sizeof(used_hp));
1083 /* get pointer to global configuration */
1084 mcfg = rte_eal_get_configuration()->mem_config;
1086 /* hugetlbfs can be disabled */
1087 if (internal_config.no_hugetlbfs) {
1088 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1089 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1090 if (addr == MAP_FAILED) {
1091 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1095 if (rte_eal_iova_mode() == RTE_IOVA_VA)
1096 mcfg->memseg[0].iova = (uintptr_t)addr;
1098 mcfg->memseg[0].iova = RTE_BAD_IOVA;
1099 mcfg->memseg[0].addr = addr;
1100 mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
1101 mcfg->memseg[0].len = internal_config.memory;
1102 mcfg->memseg[0].socket_id = 0;
1106 /* calculate total number of hugepages available. at this point we haven't
1107 * yet started sorting them so they all are on socket 0 */
1108 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1109 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1110 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1112 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1116 * allocate a memory area for hugepage table.
1117 * this isn't shared memory yet. due to the fact that we need some
1118 * processing done on these pages, shared memory will be created
1121 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1125 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1127 hp_offset = 0; /* where we start the current page size entries */
1129 huge_register_sigbus();
1131 /* make a copy of socket_mem, needed for balanced allocation. */
1132 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1133 memory[i] = internal_config.socket_mem[i];
1136 /* map all hugepages and sort them */
1137 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1138 unsigned pages_old, pages_new;
1139 struct hugepage_info *hpi;
1142 * we don't yet mark hugepages as used at this stage, so
1143 * we just map all hugepages available to the system
1144 * all hugepages are still located on socket 0
1146 hpi = &internal_config.hugepage_info[i];
1148 if (hpi->num_pages[0] == 0)
1151 /* map all hugepages available */
1152 pages_old = hpi->num_pages[0];
1153 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
1155 if (pages_new < pages_old) {
1157 "%d not %d hugepages of size %u MB allocated\n",
1158 pages_new, pages_old,
1159 (unsigned)(hpi->hugepage_sz / 0x100000));
1161 int pages = pages_old - pages_new;
1163 nr_hugepages -= pages;
1164 hpi->num_pages[0] = pages_new;
1169 if (phys_addrs_available &&
1170 rte_eal_iova_mode() != RTE_IOVA_VA) {
1171 /* find physical addresses for each hugepage */
1172 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1173 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1174 "for %u MB pages\n",
1175 (unsigned int)(hpi->hugepage_sz / 0x100000));
1179 /* set physical addresses for each hugepage */
1180 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1181 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1182 "for %u MB pages\n",
1183 (unsigned int)(hpi->hugepage_sz / 0x100000));
1188 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1189 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1190 (unsigned)(hpi->hugepage_sz / 0x100000));
1194 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1195 sizeof(struct hugepage_file), cmp_physaddr);
1197 /* remap all hugepages */
1198 if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
1199 hpi->num_pages[0]) {
1200 RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
1201 (unsigned)(hpi->hugepage_sz / 0x100000));
1205 /* unmap original mappings */
1206 if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
1209 /* we have processed a num of hugepages of this size, so inc offset */
1210 hp_offset += hpi->num_pages[0];
1213 huge_recover_sigbus();
1215 if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1216 internal_config.memory = eal_get_hugepage_mem_size();
1218 nr_hugefiles = nr_hugepages;
1221 /* clean out the numbers of pages */
1222 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1223 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1224 internal_config.hugepage_info[i].num_pages[j] = 0;
1226 /* get hugepages for each socket */
1227 for (i = 0; i < nr_hugefiles; i++) {
1228 int socket = tmp_hp[i].socket_id;
1230 /* find a hugepage info with right size and increment num_pages */
1231 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1232 (int)internal_config.num_hugepage_sizes);
1233 for (j = 0; j < nb_hpsizes; j++) {
1234 if (tmp_hp[i].size ==
1235 internal_config.hugepage_info[j].hugepage_sz) {
1236 internal_config.hugepage_info[j].num_pages[socket]++;
1241 /* make a copy of socket_mem, needed for number of pages calculation */
1242 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1243 memory[i] = internal_config.socket_mem[i];
1245 /* calculate final number of pages */
1246 nr_hugepages = calc_num_pages_per_socket(memory,
1247 internal_config.hugepage_info, used_hp,
1248 internal_config.num_hugepage_sizes);
1250 /* error if not enough memory available */
1251 if (nr_hugepages < 0)
1255 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1256 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1257 if (used_hp[i].num_pages[j] > 0) {
1259 "Requesting %u pages of size %uMB"
1260 " from socket %i\n",
1261 used_hp[i].num_pages[j],
1263 (used_hp[i].hugepage_sz / 0x100000),
1269 /* create shared memory */
1270 hugepage = create_shared_memory(eal_hugepage_info_path(),
1271 nr_hugefiles * sizeof(struct hugepage_file));
1273 if (hugepage == NULL) {
1274 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1277 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1280 * unmap pages that we won't need (looks at used_hp).
1281 * also, sets final_va to NULL on pages that were unmapped.
1283 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1284 internal_config.num_hugepage_sizes) < 0) {
1285 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1290 * copy stuff from malloc'd hugepage* to the actual shared memory.
1291 * this procedure only copies those hugepages that have final_va
1292 * not NULL. has overflow protection.
1294 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1295 tmp_hp, nr_hugefiles) < 0) {
1296 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1300 /* free the hugepage backing files */
1301 if (internal_config.hugepage_unlink &&
1302 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1303 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1307 /* free the temporary hugepage table */
1311 /* first memseg index shall be 0 after incrementing it below */
1313 for (i = 0; i < nr_hugefiles; i++) {
1316 /* if this is a new section, create a new memseg */
1319 else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
1321 else if (hugepage[i].size != hugepage[i-1].size)
1324 #ifdef RTE_ARCH_PPC_64
1325 /* On PPC64 architecture, the mmap always start from higher
1326 * virtual address to lower address. Here, both the physical
1327 * address and virtual address are in descending order */
1328 else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
1331 else if (((unsigned long)hugepage[i-1].final_va -
1332 (unsigned long)hugepage[i].final_va) != hugepage[i].size)
1335 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
1338 else if (((unsigned long)hugepage[i].final_va -
1339 (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
1345 if (j == RTE_MAX_MEMSEG)
1348 mcfg->memseg[j].iova = hugepage[i].physaddr;
1349 mcfg->memseg[j].addr = hugepage[i].final_va;
1350 mcfg->memseg[j].len = hugepage[i].size;
1351 mcfg->memseg[j].socket_id = hugepage[i].socket_id;
1352 mcfg->memseg[j].hugepage_sz = hugepage[i].size;
1354 /* continuation of previous memseg */
1356 #ifdef RTE_ARCH_PPC_64
1357 /* Use the phy and virt address of the last page as segment
1358 * address for IBM Power architecture */
1359 mcfg->memseg[j].iova = hugepage[i].physaddr;
1360 mcfg->memseg[j].addr = hugepage[i].final_va;
1362 mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
1364 hugepage[i].memseg_id = j;
1367 if (i < nr_hugefiles) {
1368 RTE_LOG(ERR, EAL, "Can only reserve %d pages "
1369 "from %d requested\n"
1370 "Current %s=%d is not enough\n"
1371 "Please either increase it or request less amount "
1373 i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
1378 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1383 huge_recover_sigbus();
1385 if (hugepage != NULL)
1386 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1392 * uses fstat to report the size of a file on disk
1398 if (fstat(fd, &st) < 0)
1404 * This creates the memory mappings in the secondary process to match that of
1405 * the server process. It goes through each memory segment in the DPDK runtime
1406 * configuration and finds the hugepages which form that segment, mapping them
1407 * in order to form a contiguous block in the virtual memory space
1410 rte_eal_hugepage_attach(void)
1412 const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1413 struct hugepage_file *hp = NULL;
1414 unsigned num_hp = 0;
1415 unsigned i, s = 0; /* s used to track the segment number */
1416 unsigned max_seg = RTE_MAX_MEMSEG;
1418 int fd, fd_zero = -1, fd_hugepage = -1;
1420 if (aslr_enabled() > 0) {
1421 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1422 "(ASLR) is enabled in the kernel.\n");
1423 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1424 "into secondary processes\n");
1427 test_phys_addrs_available();
1429 fd_zero = open("/dev/zero", O_RDONLY);
1431 RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
1434 fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1435 if (fd_hugepage < 0) {
1436 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
1440 /* map all segments into memory to make sure we get the addrs */
1441 for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
1445 * the first memory segment with len==0 is the one that
1446 * follows the last valid segment.
1448 if (mcfg->memseg[s].len == 0)
1452 * fdzero is mmapped to get a contiguous block of virtual
1453 * addresses of the appropriate memseg size.
1454 * use mmap to get identical addresses as the primary process.
1456 base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
1458 #ifdef RTE_ARCH_PPC_64
1459 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
1464 if (base_addr == MAP_FAILED ||
1465 base_addr != mcfg->memseg[s].addr) {
1467 if (base_addr != MAP_FAILED) {
1468 /* errno is stale, don't use */
1469 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
1470 "in /dev/zero at [%p], got [%p] - "
1471 "please use '--base-virtaddr' option\n",
1472 (unsigned long long)mcfg->memseg[s].len,
1473 mcfg->memseg[s].addr, base_addr);
1474 munmap(base_addr, mcfg->memseg[s].len);
1476 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
1477 "in /dev/zero at [%p]: '%s'\n",
1478 (unsigned long long)mcfg->memseg[s].len,
1479 mcfg->memseg[s].addr, strerror(errno));
1481 if (aslr_enabled() > 0) {
1482 RTE_LOG(ERR, EAL, "It is recommended to "
1483 "disable ASLR in the kernel "
1484 "and retry running both primary "
1485 "and secondary processes\n");
1491 size = getFileSize(fd_hugepage);
1492 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1493 if (hp == MAP_FAILED) {
1494 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
1498 num_hp = size / sizeof(struct hugepage_file);
1499 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1502 while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
1503 void *addr, *base_addr;
1504 uintptr_t offset = 0;
1505 size_t mapping_size;
1507 * free previously mapped memory so we can map the
1508 * hugepages into the space
1510 base_addr = mcfg->memseg[s].addr;
1511 munmap(base_addr, mcfg->memseg[s].len);
1513 /* find the hugepages for this segment and map them
1514 * we don't need to worry about order, as the server sorted the
1515 * entries before it did the second mmap of them */
1516 for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
1517 if (hp[i].memseg_id == (int)s){
1518 fd = open(hp[i].filepath, O_RDWR);
1520 RTE_LOG(ERR, EAL, "Could not open %s\n",
1524 mapping_size = hp[i].size;
1525 addr = mmap(RTE_PTR_ADD(base_addr, offset),
1526 mapping_size, PROT_READ | PROT_WRITE,
1528 close(fd); /* close file both on success and on failure */
1529 if (addr == MAP_FAILED ||
1530 addr != RTE_PTR_ADD(base_addr, offset)) {
1531 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1535 offset+=mapping_size;
1538 RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
1539 (unsigned long long)mcfg->memseg[s].len);
1542 /* unmap the hugepage config file, since we are done using it */
1549 for (i = 0; i < max_seg && mcfg->memseg[i].len > 0; i++)
1550 munmap(mcfg->memseg[i].addr, mcfg->memseg[i].len);
1551 if (hp != NULL && hp != MAP_FAILED)
1555 if (fd_hugepage >= 0)
1561 rte_eal_using_phys_addrs(void)
1563 return phys_addrs_available;