X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Feal%2Feal_memory.c;h=bac969a1285a1cb6f51b45a79fb26a9ab2583b02;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=a956bb226e19f79733b799d4546f17dd1c323e0b;hpb=6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9;p=deb_dpdk.git diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index a956bb22..bac969a1 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -2,6 +2,7 @@ * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 6WIND. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,46 +31,16 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* BSD LICENSE - * - * Copyright(c) 2013 6WIND. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ #define _FILE_OFFSET_BITS 64 #include #include +#include #include #include #include #include #include -#include #include #include #include @@ -77,15 +48,17 @@ #include #include #include -#include #include #include #include #include +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES +#include +#include +#endif #include #include -#include #include #include #include @@ -101,13 +74,6 @@ #define PFN_MASK_SIZE 8 -#ifdef RTE_LIBRTE_XEN_DOM0 -int rte_xen_dom0_supported(void) -{ - return internal_config.xen_dom0_support; -} -#endif - /** * @file * Huge page mapping under linux @@ -122,36 +88,49 @@ int rte_xen_dom0_supported(void) static uint64_t baseaddr_offset; -static unsigned proc_pagemap_readable; +#ifdef RTE_ARCH_64 +/* + * Linux kernel uses a really high address as starting address for serving + * mmaps calls. If there exists addressing limitations and IOVA mode is VA, + * this starting address is likely too high for those devices. However, it + * is possible to use a lower address in the process virtual address space + * as with 64 bits there is a lot of available space. + * + * Current known limitations are 39 or 40 bits. Setting the starting address + * at 4GB implies there are 508GB or 1020GB for mapping the available + * hugepages. This is likely enough for most systems, although a device with + * addressing limitations should call rte_dev_check_dma_mask for ensuring all + * memory is within supported range. + */ +static uint64_t baseaddr = 0x100000000; +#endif + +static bool phys_addrs_available = true; #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space" static void -test_proc_pagemap_readable(void) +test_phys_addrs_available(void) { - int fd = open("/proc/self/pagemap", O_RDONLY); + uint64_t tmp = 0; + phys_addr_t physaddr; - if (fd < 0) { + if (!rte_eal_has_hugepages()) { RTE_LOG(ERR, EAL, - "Cannot open /proc/self/pagemap: %s. " - "virt2phys address translation will not work\n", - strerror(errno)); + "Started without hugepages support, physical addresses not available\n"); + phys_addrs_available = false; return; } - /* Is readable */ - close(fd); - proc_pagemap_readable = 1; -} - -/* Lock page in physical memory and prevent from swapping. */ -int -rte_mem_lock_page(const void *virt) -{ - unsigned long virtual = (unsigned long)virt; - int page_size = getpagesize(); - unsigned long aligned = (virtual & ~ (page_size - 1)); - return mlock((void*)aligned, page_size); + physaddr = rte_mem_virt2phy(&tmp); + if (physaddr == RTE_BAD_PHYS_ADDR) { + if (rte_eal_iova_mode() == RTE_IOVA_PA) + RTE_LOG(ERR, EAL, + "Cannot obtain physical addresses: %s. " + "Only vfio will function.\n", + strerror(errno)); + phys_addrs_available = false; + } } /* @@ -166,32 +145,9 @@ rte_mem_virt2phy(const void *virtaddr) int page_size; off_t offset; - /* when using dom0, /proc/self/pagemap always returns 0, check in - * dpdk memory by browsing the memsegs */ - if (rte_xen_dom0_supported()) { - struct rte_mem_config *mcfg; - struct rte_memseg *memseg; - unsigned i; - - mcfg = rte_eal_get_configuration()->mem_config; - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - memseg = &mcfg->memseg[i]; - if (memseg->addr == NULL) - break; - if (virtaddr > memseg->addr && - virtaddr < RTE_PTR_ADD(memseg->addr, - memseg->len)) { - return memseg->phys_addr + - RTE_PTR_DIFF(virtaddr, memseg->addr); - } - } - - return RTE_BAD_PHYS_ADDR; - } - /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */ - if (!proc_pagemap_readable) - return RTE_BAD_PHYS_ADDR; + if (!phys_addrs_available) + return RTE_BAD_IOVA; /* standard page size */ page_size = getpagesize(); @@ -200,7 +156,7 @@ rte_mem_virt2phy(const void *virtaddr) if (fd < 0) { RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n", __func__, strerror(errno)); - return RTE_BAD_PHYS_ADDR; + return RTE_BAD_IOVA; } virt_pfn = (unsigned long)virtaddr / page_size; @@ -209,7 +165,7 @@ rte_mem_virt2phy(const void *virtaddr) RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n", __func__, strerror(errno)); close(fd); - return RTE_BAD_PHYS_ADDR; + return RTE_BAD_IOVA; } retval = read(fd, &page, PFN_MASK_SIZE); @@ -217,24 +173,35 @@ rte_mem_virt2phy(const void *virtaddr) if (retval < 0) { RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n", __func__, strerror(errno)); - return RTE_BAD_PHYS_ADDR; + return RTE_BAD_IOVA; } else if (retval != PFN_MASK_SIZE) { RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap " "but expected %d:\n", __func__, retval, PFN_MASK_SIZE); - return RTE_BAD_PHYS_ADDR; + return RTE_BAD_IOVA; } /* * the pfn (page frame number) are bits 0-54 (see * pagemap.txt in linux Documentation) */ + if ((page & 0x7fffffffffffffULL) == 0) + return RTE_BAD_IOVA; + physaddr = ((page & 0x7fffffffffffffULL) * page_size) + ((unsigned long)virtaddr % page_size); return physaddr; } +rte_iova_t +rte_mem_virt2iova(const void *virtaddr) +{ + if (rte_eal_iova_mode() == RTE_IOVA_VA) + return (uintptr_t)virtaddr; + return rte_mem_virt2phy(virtaddr); +} + /* * For each hugepage in hugepg_tbl, fill the physaddr value. We find * it by browsing the /proc/self/pagemap special file. @@ -242,7 +209,7 @@ rte_mem_virt2phy(const void *virtaddr) static int find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) { - unsigned i; + unsigned int i; phys_addr_t addr; for (i = 0; i < hpi->num_pages[0]; i++) { @@ -254,6 +221,22 @@ find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) return 0; } +/* + * For each hugepage in hugepg_tbl, fill the physaddr value sequentially. + */ +static int +set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) +{ + unsigned int i; + static phys_addr_t addr; + + for (i = 0; i < hpi->num_pages[0]; i++) { + hugepg_tbl[i].physaddr = addr; + addr += hugepg_tbl[i].size; + } + return 0; +} + /* * Check whether address-space layout randomization is enabled in * the kernel. This is important for multi-process as it can prevent @@ -284,6 +267,23 @@ aslr_enabled(void) } } +static void * +get_addr_hint(void) +{ + if (internal_config.base_virtaddr != 0) { + return (void *) (uintptr_t) + (internal_config.base_virtaddr + + baseaddr_offset); + } else { +#ifdef RTE_ARCH_64 + return (void *) (uintptr_t) (baseaddr + + baseaddr_offset); +#else + return NULL; +#endif + } +} + /* * Try to mmap *size bytes in /dev/zero. If it is successful, return the * pointer to the mmap'd area and keep *size unmodified. Else, retry @@ -294,16 +294,10 @@ aslr_enabled(void) static void * get_virtual_area(size_t *size, size_t hugepage_sz) { - void *addr; + void *addr, *addr_hint; int fd; long aligned_addr; - if (internal_config.base_virtaddr != 0) { - addr = (void*) (uintptr_t) (internal_config.base_virtaddr + - baseaddr_offset); - } - else addr = NULL; - RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size); fd = open("/dev/zero", O_RDONLY); @@ -312,10 +306,25 @@ get_virtual_area(size_t *size, size_t hugepage_sz) return NULL; } do { - addr = mmap(addr, - (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0); - if (addr == MAP_FAILED) + addr_hint = get_addr_hint(); + + addr = mmap(addr_hint, + (*size) + hugepage_sz, PROT_READ, +#ifdef RTE_ARCH_PPC_64 + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, +#else + MAP_PRIVATE, +#endif + fd, 0); + if (addr == MAP_FAILED) { + /* map failed. Let's try with less memory */ *size -= hugepage_sz; + } else if (addr_hint && addr != addr_hint) { + /* hint was not used. Try with another offset */ + munmap(addr, (*size) + hugepage_sz); + addr = MAP_FAILED; + baseaddr_offset += 0x100000000; + } } while (addr == MAP_FAILED && *size > 0); if (addr == MAP_FAILED) { @@ -359,26 +368,94 @@ static int huge_wrap_sigsetjmp(void) return sigsetjmp(huge_jmpenv, 1); } +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES +/* Callback for numa library. */ +void numa_error(char *where) +{ + RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno)); +} +#endif + /* * Mmap all hugepages of hugepage table: it first open a file in * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to - * map continguous physical blocks in contiguous virtual blocks. + * map contiguous physical blocks in contiguous virtual blocks. */ static unsigned -map_all_hugepages(struct hugepage_file *hugepg_tbl, - struct hugepage_info *hpi, int orig) +map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi, + uint64_t *essential_memory __rte_unused, int orig) { int fd; unsigned i; void *virtaddr; void *vma_addr = NULL; size_t vma_len = 0; +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES + int node_id = -1; + int essential_prev = 0; + int oldpolicy; + struct bitmask *oldmask = numa_allocate_nodemask(); + bool have_numa = true; + unsigned long maxnode = 0; + + /* Check if kernel supports NUMA. */ + if (numa_available() != 0) { + RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n"); + have_numa = false; + } + + if (orig && have_numa) { + RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n"); + if (get_mempolicy(&oldpolicy, oldmask->maskp, + oldmask->size + 1, 0, 0) < 0) { + RTE_LOG(ERR, EAL, + "Failed to get current mempolicy: %s. " + "Assuming MPOL_DEFAULT.\n", strerror(errno)); + oldpolicy = MPOL_DEFAULT; + } + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) + if (internal_config.socket_mem[i]) + maxnode = i + 1; + } +#endif for (i = 0; i < hpi->num_pages[0]; i++) { uint64_t hugepage_sz = hpi->hugepage_sz; +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES + if (maxnode) { + unsigned int j; + + for (j = 0; j < maxnode; j++) + if (essential_memory[j]) + break; + + if (j == maxnode) { + node_id = (node_id + 1) % maxnode; + while (!internal_config.socket_mem[node_id]) { + node_id++; + node_id %= maxnode; + } + essential_prev = 0; + } else { + node_id = j; + essential_prev = essential_memory[j]; + + if (essential_memory[j] < hugepage_sz) + essential_memory[j] = 0; + else + essential_memory[j] -= hugepage_sz; + } + + RTE_LOG(DEBUG, EAL, + "Setting policy MPOL_PREFERRED for socket %d\n", + node_id); + numa_set_preferred(node_id); + } +#endif + if (orig) { hugepg_tbl[i].file_id = i; hugepg_tbl[i].size = hugepage_sz; @@ -433,7 +510,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, if (fd < 0) { RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__, strerror(errno)); - return i; + goto out; } /* map the segment, and populate page tables, @@ -444,13 +521,16 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno)); close(fd); - return i; + goto out; } if (orig) { hugepg_tbl[i].orig_va = virtaddr; } else { + /* rewrite physical addresses in IOVA as VA mode */ + if (rte_eal_iova_mode() == RTE_IOVA_VA) + hugepg_tbl[i].physaddr = (uintptr_t)virtaddr; hugepg_tbl[i].final_va = virtaddr; } @@ -469,7 +549,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, munmap(virtaddr, hugepage_sz); close(fd); unlink(hugepg_tbl[i].filepath); - return i; +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES + if (maxnode) + essential_memory[node_id] = + essential_prev; +#endif + goto out; } *(int *)virtaddr = 0; } @@ -480,7 +565,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n", __func__, strerror(errno)); close(fd); - return i; + goto out; } close(fd); @@ -489,6 +574,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, vma_len -= hugepage_sz; } +out: +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES + if (maxnode) { + RTE_LOG(DEBUG, EAL, + "Restoring previous memory policy: %d\n", oldpolicy); + if (oldpolicy == MPOL_DEFAULT) { + numa_set_localalloc(); + } else if (set_mempolicy(oldpolicy, oldmask->maskp, + oldmask->size + 1) < 0) { + RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n", + strerror(errno)); + numa_set_localalloc(); + } + } + numa_free_cpumask(oldmask); +#endif return i; } @@ -523,8 +624,8 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) f = fopen("/proc/self/numa_maps", "r"); if (f == NULL) { - RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps," - " consider that all memory is in socket_id 0\n"); + RTE_LOG(NOTICE, EAL, "NUMA support not available" + " consider that all memory is in socket_id 0\n"); return 0; } @@ -573,6 +674,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) if (hugepg_tbl[i].orig_va == va) { hugepg_tbl[i].socket_id = socket_id; hp_count++; +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES + RTE_LOG(DEBUG, EAL, + "Hugepage %s is on socket %d\n", + hugepg_tbl[i].filepath, socket_id); +#endif } } } @@ -592,12 +698,12 @@ static int cmp_physaddr(const void *a, const void *b) { #ifndef RTE_ARCH_PPC_64 - const struct hugepage_file *p1 = (const struct hugepage_file *)a; - const struct hugepage_file *p2 = (const struct hugepage_file *)b; + const struct hugepage_file *p1 = a; + const struct hugepage_file *p2 = b; #else /* PowerPC needs memory sorted in reverse order from x86 */ - const struct hugepage_file *p1 = (const struct hugepage_file *)b; - const struct hugepage_file *p2 = (const struct hugepage_file *)a; + const struct hugepage_file *p1 = b; + const struct hugepage_file *p2 = a; #endif if (p1->physaddr < p2->physaddr) return -1; @@ -624,6 +730,8 @@ create_shared_memory(const char *filename, const size_t mem_size) } retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); + if (retval == MAP_FAILED) + return NULL; return retval; } @@ -951,7 +1059,7 @@ rte_eal_hugepage_init(void) int nr_hugefiles, nr_hugepages = 0; void *addr; - test_proc_pagemap_readable(); + test_phys_addrs_available(); memset(used_hp, 0, sizeof(used_hp)); @@ -967,7 +1075,10 @@ rte_eal_hugepage_init(void) strerror(errno)); return -1; } - mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr; + if (rte_eal_iova_mode() == RTE_IOVA_VA) + mcfg->memseg[0].iova = (uintptr_t)addr; + else + mcfg->memseg[0].iova = RTE_BAD_IOVA; mcfg->memseg[0].addr = addr; mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K; mcfg->memseg[0].len = internal_config.memory; @@ -975,17 +1086,6 @@ rte_eal_hugepage_init(void) return 0; } -/* check if app runs on Xen Dom0 */ - if (internal_config.xen_dom0_support) { -#ifdef RTE_LIBRTE_XEN_DOM0 - /* use dom0_mm kernel driver to init memory */ - if (rte_xen_dom0_memory_init() < 0) - return -1; - else - return 0; -#endif - } - /* calculate total number of hugepages available. at this point we haven't * yet started sorting them so they all are on socket 0 */ for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) { @@ -1011,6 +1111,11 @@ rte_eal_hugepage_init(void) huge_register_sigbus(); + /* make a copy of socket_mem, needed for balanced allocation. */ + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) + memory[i] = internal_config.socket_mem[i]; + + /* map all hugepages and sort them */ for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){ unsigned pages_old, pages_new; @@ -1028,7 +1133,8 @@ rte_eal_hugepage_init(void) /* map all hugepages available */ pages_old = hpi->num_pages[0]; - pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1); + pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, + memory, 1); if (pages_new < pages_old) { RTE_LOG(DEBUG, EAL, "%d not %d hugepages of size %u MB allocated\n", @@ -1043,11 +1149,23 @@ rte_eal_hugepage_init(void) continue; } - /* find physical addresses and sockets for each hugepage */ - if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){ - RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n", - (unsigned)(hpi->hugepage_sz / 0x100000)); - goto fail; + if (phys_addrs_available && + rte_eal_iova_mode() != RTE_IOVA_VA) { + /* find physical addresses for each hugepage */ + if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) { + RTE_LOG(DEBUG, EAL, "Failed to find phys addr " + "for %u MB pages\n", + (unsigned int)(hpi->hugepage_sz / 0x100000)); + goto fail; + } + } else { + /* set physical addresses for each hugepage */ + if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) { + RTE_LOG(DEBUG, EAL, "Failed to set phys addr " + "for %u MB pages\n", + (unsigned int)(hpi->hugepage_sz / 0x100000)); + goto fail; + } } if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){ @@ -1060,7 +1178,7 @@ rte_eal_hugepage_init(void) sizeof(struct hugepage_file), cmp_physaddr); /* remap all hugepages */ - if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) != + if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) != hpi->num_pages[0]) { RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n", (unsigned)(hpi->hugepage_sz / 0x100000)); @@ -1210,7 +1328,7 @@ rte_eal_hugepage_init(void) if (j == RTE_MAX_MEMSEG) break; - mcfg->memseg[j].phys_addr = hugepage[i].physaddr; + mcfg->memseg[j].iova = hugepage[i].physaddr; mcfg->memseg[j].addr = hugepage[i].final_va; mcfg->memseg[j].len = hugepage[i].size; mcfg->memseg[j].socket_id = hugepage[i].socket_id; @@ -1221,7 +1339,7 @@ rte_eal_hugepage_init(void) #ifdef RTE_ARCH_PPC_64 /* Use the phy and virt address of the last page as segment * address for IBM Power architecture */ - mcfg->memseg[j].phys_addr = hugepage[i].physaddr; + mcfg->memseg[j].iova = hugepage[i].physaddr; mcfg->memseg[j].addr = hugepage[i].final_va; #endif mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz; @@ -1289,18 +1407,7 @@ rte_eal_hugepage_attach(void) "into secondary processes\n"); } - test_proc_pagemap_readable(); - - if (internal_config.xen_dom0_support) { -#ifdef RTE_LIBRTE_XEN_DOM0 - if (rte_xen_dom0_memory_attach() < 0) { - RTE_LOG(ERR, EAL, "Failed to attach memory segments of primary " - "process\n"); - return -1; - } - return 0; -#endif - } + test_phys_addrs_available(); fd_zero = open("/dev/zero", O_RDONLY); if (fd_zero < 0) { @@ -1330,7 +1437,13 @@ rte_eal_hugepage_attach(void) * use mmap to get identical addresses as the primary process. */ base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len, - PROT_READ, MAP_PRIVATE, fd_zero, 0); + PROT_READ, +#ifdef RTE_ARCH_PPC_64 + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, +#else + MAP_PRIVATE, +#endif + fd_zero, 0); if (base_addr == MAP_FAILED || base_addr != mcfg->memseg[s].addr) { max_seg = s; @@ -1426,3 +1539,9 @@ error: close(fd_hugepage); return -1; } + +int +rte_eal_using_phys_addrs(void) +{ + return phys_addrs_available; +}