New upstream version 18.02
[deb_dpdk.git] / lib / librte_eal / bsdapp / eal / eal_memory.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <sys/mman.h>
5 #include <unistd.h>
6 #include <sys/types.h>
7 #include <sys/sysctl.h>
8 #include <inttypes.h>
9 #include <fcntl.h>
10
11 #include <rte_eal.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_log.h>
14 #include <rte_string_fns.h>
15 #include "eal_private.h"
16 #include "eal_internal_cfg.h"
17 #include "eal_filesystem.h"
18
19 #define EAL_PAGE_SIZE (sysconf(_SC_PAGESIZE))
20
21 /*
22  * Get physical address of any mapped virtual address in the current process.
23  */
24 phys_addr_t
25 rte_mem_virt2phy(const void *virtaddr)
26 {
27         /* XXX not implemented. This function is only used by
28          * rte_mempool_virt2iova() when hugepages are disabled. */
29         (void)virtaddr;
30         return RTE_BAD_IOVA;
31 }
32 rte_iova_t
33 rte_mem_virt2iova(const void *virtaddr)
34 {
35         return rte_mem_virt2phy(virtaddr);
36 }
37
38 int
39 rte_eal_hugepage_init(void)
40 {
41         struct rte_mem_config *mcfg;
42         uint64_t total_mem = 0;
43         void *addr;
44         unsigned i, j, seg_idx = 0;
45
46         /* get pointer to global configuration */
47         mcfg = rte_eal_get_configuration()->mem_config;
48
49         /* for debug purposes, hugetlbfs can be disabled */
50         if (internal_config.no_hugetlbfs) {
51                 addr = malloc(internal_config.memory);
52                 mcfg->memseg[0].iova = (rte_iova_t)(uintptr_t)addr;
53                 mcfg->memseg[0].addr = addr;
54                 mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
55                 mcfg->memseg[0].len = internal_config.memory;
56                 mcfg->memseg[0].socket_id = 0;
57                 return 0;
58         }
59
60         /* map all hugepages and sort them */
61         for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
62                 struct hugepage_info *hpi;
63
64                 hpi = &internal_config.hugepage_info[i];
65                 for (j = 0; j < hpi->num_pages[0]; j++) {
66                         struct rte_memseg *seg;
67                         rte_iova_t physaddr;
68                         int error;
69                         size_t sysctl_size = sizeof(physaddr);
70                         char physaddr_str[64];
71
72                         addr = mmap(NULL, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
73                                     MAP_SHARED, hpi->lock_descriptor,
74                                     j * EAL_PAGE_SIZE);
75                         if (addr == MAP_FAILED) {
76                                 RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
77                                                 j, hpi->hugedir);
78                                 return -1;
79                         }
80
81                         snprintf(physaddr_str, sizeof(physaddr_str), "hw.contigmem"
82                                         ".physaddr.%d", j);
83                         error = sysctlbyname(physaddr_str, &physaddr, &sysctl_size,
84                                         NULL, 0);
85                         if (error < 0) {
86                                 RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
87                                                 "from %s\n", j, hpi->hugedir);
88                                 return -1;
89                         }
90
91                         seg = &mcfg->memseg[seg_idx++];
92                         seg->addr = addr;
93                         seg->iova = physaddr;
94                         seg->hugepage_sz = hpi->hugepage_sz;
95                         seg->len = hpi->hugepage_sz;
96                         seg->nchannel = mcfg->nchannel;
97                         seg->nrank = mcfg->nrank;
98                         seg->socket_id = 0;
99
100                         RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
101                                         PRIx64", len %zu\n",
102                                         seg_idx, addr, physaddr, hpi->hugepage_sz);
103                         if (total_mem >= internal_config.memory ||
104                                         seg_idx >= RTE_MAX_MEMSEG)
105                                 break;
106                 }
107         }
108         return 0;
109 }
110
111 int
112 rte_eal_hugepage_attach(void)
113 {
114         const struct hugepage_info *hpi;
115         int fd_hugepage_info, fd_hugepage = -1;
116         unsigned i = 0;
117         struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
118
119         /* Obtain a file descriptor for hugepage_info */
120         fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
121         if (fd_hugepage_info < 0) {
122                 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
123                 return -1;
124         }
125
126         /* Map the shared hugepage_info into the process address spaces */
127         hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
128                         fd_hugepage_info, 0);
129         if (hpi == MAP_FAILED) {
130                 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
131                 goto error;
132         }
133
134         /* Obtain a file descriptor for contiguous memory */
135         fd_hugepage = open(hpi->hugedir, O_RDWR);
136         if (fd_hugepage < 0) {
137                 RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
138                 goto error;
139         }
140
141         /* Map the contiguous memory into each memory segment */
142         for (i = 0; i < hpi->num_pages[0]; i++) {
143
144                 void *addr;
145                 struct rte_memseg *seg = &mcfg->memseg[i];
146
147                 addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
148                             MAP_SHARED|MAP_FIXED, fd_hugepage,
149                             i * EAL_PAGE_SIZE);
150                 if (addr == MAP_FAILED || addr != seg->addr) {
151                         RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
152                                 i, hpi->hugedir);
153                         goto error;
154                 }
155
156         }
157
158         /* hugepage_info is no longer required */
159         munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
160         close(fd_hugepage_info);
161         close(fd_hugepage);
162         return 0;
163
164 error:
165         if (fd_hugepage_info >= 0)
166                 close(fd_hugepage_info);
167         if (fd_hugepage >= 0)
168                 close(fd_hugepage);
169         return -1;
170 }
171
172 int
173 rte_eal_using_phys_addrs(void)
174 {
175         return 0;
176 }