New upstream version 18.11-rc1
[deb_dpdk.git] / lib / librte_eal / bsdapp / eal / eal_memory.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <sys/mman.h>
5 #include <unistd.h>
6 #include <sys/types.h>
7 #include <sys/sysctl.h>
8 #include <inttypes.h>
9 #include <errno.h>
10 #include <string.h>
11 #include <fcntl.h>
12
13 #include <rte_eal.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_errno.h>
16 #include <rte_log.h>
17 #include <rte_string_fns.h>
18 #include "eal_private.h"
19 #include "eal_internal_cfg.h"
20 #include "eal_filesystem.h"
21
22 #define EAL_PAGE_SIZE (sysconf(_SC_PAGESIZE))
23
24 /*
25  * Get physical address of any mapped virtual address in the current process.
26  */
27 phys_addr_t
28 rte_mem_virt2phy(const void *virtaddr)
29 {
30         /* XXX not implemented. This function is only used by
31          * rte_mempool_virt2iova() when hugepages are disabled. */
32         (void)virtaddr;
33         return RTE_BAD_IOVA;
34 }
35 rte_iova_t
36 rte_mem_virt2iova(const void *virtaddr)
37 {
38         return rte_mem_virt2phy(virtaddr);
39 }
40
41 int
42 rte_eal_hugepage_init(void)
43 {
44         struct rte_mem_config *mcfg;
45         uint64_t total_mem = 0;
46         void *addr;
47         unsigned int i, j, seg_idx = 0;
48
49         /* get pointer to global configuration */
50         mcfg = rte_eal_get_configuration()->mem_config;
51
52         /* for debug purposes, hugetlbfs can be disabled */
53         if (internal_config.no_hugetlbfs) {
54                 struct rte_memseg_list *msl;
55                 struct rte_fbarray *arr;
56                 struct rte_memseg *ms;
57                 uint64_t page_sz;
58                 int n_segs, cur_seg;
59
60                 /* create a memseg list */
61                 msl = &mcfg->memsegs[0];
62
63                 page_sz = RTE_PGSIZE_4K;
64                 n_segs = internal_config.memory / page_sz;
65
66                 if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
67                                 sizeof(struct rte_memseg))) {
68                         RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
69                         return -1;
70                 }
71
72                 addr = mmap(NULL, internal_config.memory,
73                                 PROT_READ | PROT_WRITE,
74                                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
75                 if (addr == MAP_FAILED) {
76                         RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
77                                         strerror(errno));
78                         return -1;
79                 }
80                 msl->base_va = addr;
81                 msl->page_sz = page_sz;
82                 msl->len = internal_config.memory;
83                 msl->socket_id = 0;
84
85                 /* populate memsegs. each memseg is 1 page long */
86                 for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
87                         arr = &msl->memseg_arr;
88
89                         ms = rte_fbarray_get(arr, cur_seg);
90                         if (rte_eal_iova_mode() == RTE_IOVA_VA)
91                                 ms->iova = (uintptr_t)addr;
92                         else
93                                 ms->iova = RTE_BAD_IOVA;
94                         ms->addr = addr;
95                         ms->hugepage_sz = page_sz;
96                         ms->len = page_sz;
97                         ms->socket_id = 0;
98
99                         rte_fbarray_set_used(arr, cur_seg);
100
101                         addr = RTE_PTR_ADD(addr, page_sz);
102                 }
103                 return 0;
104         }
105
106         /* map all hugepages and sort them */
107         for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
108                 struct hugepage_info *hpi;
109                 rte_iova_t prev_end = 0;
110                 int prev_ms_idx = -1;
111                 uint64_t page_sz, mem_needed;
112                 unsigned int n_pages, max_pages;
113
114                 hpi = &internal_config.hugepage_info[i];
115                 page_sz = hpi->hugepage_sz;
116                 max_pages = hpi->num_pages[0];
117                 mem_needed = RTE_ALIGN_CEIL(internal_config.memory - total_mem,
118                                 page_sz);
119
120                 n_pages = RTE_MIN(mem_needed / page_sz, max_pages);
121
122                 for (j = 0; j < n_pages; j++) {
123                         struct rte_memseg_list *msl;
124                         struct rte_fbarray *arr;
125                         struct rte_memseg *seg;
126                         int msl_idx, ms_idx;
127                         rte_iova_t physaddr;
128                         int error;
129                         size_t sysctl_size = sizeof(physaddr);
130                         char physaddr_str[64];
131                         bool is_adjacent;
132
133                         /* first, check if this segment is IOVA-adjacent to
134                          * the previous one.
135                          */
136                         snprintf(physaddr_str, sizeof(physaddr_str),
137                                         "hw.contigmem.physaddr.%d", j);
138                         error = sysctlbyname(physaddr_str, &physaddr,
139                                         &sysctl_size, NULL, 0);
140                         if (error < 0) {
141                                 RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
142                                                 "from %s\n", j, hpi->hugedir);
143                                 return -1;
144                         }
145
146                         is_adjacent = prev_end != 0 && physaddr == prev_end;
147                         prev_end = physaddr + hpi->hugepage_sz;
148
149                         for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
150                                         msl_idx++) {
151                                 bool empty, need_hole;
152                                 msl = &mcfg->memsegs[msl_idx];
153                                 arr = &msl->memseg_arr;
154
155                                 if (msl->page_sz != page_sz)
156                                         continue;
157
158                                 empty = arr->count == 0;
159
160                                 /* we need a hole if this isn't an empty memseg
161                                  * list, and if previous segment was not
162                                  * adjacent to current one.
163                                  */
164                                 need_hole = !empty && !is_adjacent;
165
166                                 /* we need 1, plus hole if not adjacent */
167                                 ms_idx = rte_fbarray_find_next_n_free(arr,
168                                                 0, 1 + (need_hole ? 1 : 0));
169
170                                 /* memseg list is full? */
171                                 if (ms_idx < 0)
172                                         continue;
173
174                                 if (need_hole && prev_ms_idx == ms_idx - 1)
175                                         ms_idx++;
176                                 prev_ms_idx = ms_idx;
177
178                                 break;
179                         }
180                         if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
181                                 RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
182                                         RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
183                                         RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
184                                 return -1;
185                         }
186                         arr = &msl->memseg_arr;
187                         seg = rte_fbarray_get(arr, ms_idx);
188
189                         addr = RTE_PTR_ADD(msl->base_va,
190                                         (size_t)msl->page_sz * ms_idx);
191
192                         /* address is already mapped in memseg list, so using
193                          * MAP_FIXED here is safe.
194                          */
195                         addr = mmap(addr, page_sz, PROT_READ|PROT_WRITE,
196                                         MAP_SHARED | MAP_FIXED,
197                                         hpi->lock_descriptor,
198                                         j * EAL_PAGE_SIZE);
199                         if (addr == MAP_FAILED) {
200                                 RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
201                                                 j, hpi->hugedir);
202                                 return -1;
203                         }
204
205                         seg->addr = addr;
206                         seg->iova = physaddr;
207                         seg->hugepage_sz = page_sz;
208                         seg->len = page_sz;
209                         seg->nchannel = mcfg->nchannel;
210                         seg->nrank = mcfg->nrank;
211                         seg->socket_id = 0;
212
213                         rte_fbarray_set_used(arr, ms_idx);
214
215                         RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
216                                         PRIx64", len %zu\n",
217                                         seg_idx++, addr, physaddr, page_sz);
218
219                         total_mem += seg->len;
220                 }
221                 if (total_mem >= internal_config.memory)
222                         break;
223         }
224         if (total_mem < internal_config.memory) {
225                 RTE_LOG(ERR, EAL, "Couldn't reserve requested memory, "
226                                 "requested: %" PRIu64 "M "
227                                 "available: %" PRIu64 "M\n",
228                                 internal_config.memory >> 20, total_mem >> 20);
229                 return -1;
230         }
231         return 0;
232 }
233
234 struct attach_walk_args {
235         int fd_hugepage;
236         int seg_idx;
237 };
238 static int
239 attach_segment(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
240                 void *arg)
241 {
242         struct attach_walk_args *wa = arg;
243         void *addr;
244
245         if (msl->external)
246                 return 0;
247
248         addr = mmap(ms->addr, ms->len, PROT_READ | PROT_WRITE,
249                         MAP_SHARED | MAP_FIXED, wa->fd_hugepage,
250                         wa->seg_idx * EAL_PAGE_SIZE);
251         if (addr == MAP_FAILED || addr != ms->addr)
252                 return -1;
253         wa->seg_idx++;
254
255         return 0;
256 }
257
258 int
259 rte_eal_hugepage_attach(void)
260 {
261         const struct hugepage_info *hpi;
262         int fd_hugepage = -1;
263         unsigned int i;
264
265         hpi = &internal_config.hugepage_info[0];
266
267         for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
268                 const struct hugepage_info *cur_hpi = &hpi[i];
269                 struct attach_walk_args wa;
270
271                 memset(&wa, 0, sizeof(wa));
272
273                 /* Obtain a file descriptor for contiguous memory */
274                 fd_hugepage = open(cur_hpi->hugedir, O_RDWR);
275                 if (fd_hugepage < 0) {
276                         RTE_LOG(ERR, EAL, "Could not open %s\n",
277                                         cur_hpi->hugedir);
278                         goto error;
279                 }
280                 wa.fd_hugepage = fd_hugepage;
281                 wa.seg_idx = 0;
282
283                 /* Map the contiguous memory into each memory segment */
284                 if (rte_memseg_walk(attach_segment, &wa) < 0) {
285                         RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
286                                 wa.seg_idx, cur_hpi->hugedir);
287                         goto error;
288                 }
289
290                 close(fd_hugepage);
291                 fd_hugepage = -1;
292         }
293
294         /* hugepage_info is no longer required */
295         return 0;
296
297 error:
298         if (fd_hugepage >= 0)
299                 close(fd_hugepage);
300         return -1;
301 }
302
303 int
304 rte_eal_using_phys_addrs(void)
305 {
306         return 0;
307 }
308
309 static uint64_t
310 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
311 {
312         uint64_t area_sz, max_pages;
313
314         /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
315         max_pages = RTE_MAX_MEMSEG_PER_LIST;
316         max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
317
318         area_sz = RTE_MIN(page_sz * max_pages, max_mem);
319
320         /* make sure the list isn't smaller than the page size */
321         area_sz = RTE_MAX(area_sz, page_sz);
322
323         return RTE_ALIGN(area_sz, page_sz);
324 }
325
326 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
327 static int
328 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
329                 int n_segs, int socket_id, int type_msl_idx)
330 {
331         char name[RTE_FBARRAY_NAME_LEN];
332
333         snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
334                  type_msl_idx);
335         if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
336                         sizeof(struct rte_memseg))) {
337                 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
338                         rte_strerror(rte_errno));
339                 return -1;
340         }
341
342         msl->page_sz = page_sz;
343         msl->socket_id = socket_id;
344         msl->base_va = NULL;
345
346         RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
347                         (size_t)page_sz >> 10, socket_id);
348
349         return 0;
350 }
351
352 static int
353 alloc_va_space(struct rte_memseg_list *msl)
354 {
355         uint64_t page_sz;
356         size_t mem_sz;
357         void *addr;
358         int flags = 0;
359
360 #ifdef RTE_ARCH_PPC_64
361         flags |= MAP_HUGETLB;
362 #endif
363
364         page_sz = msl->page_sz;
365         mem_sz = page_sz * msl->memseg_arr.len;
366
367         addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
368         if (addr == NULL) {
369                 if (rte_errno == EADDRNOTAVAIL)
370                         RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
371                                 (unsigned long long)mem_sz, msl->base_va);
372                 else
373                         RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
374                 return -1;
375         }
376         msl->base_va = addr;
377         msl->len = mem_sz;
378
379         return 0;
380 }
381
382
383 static int
384 memseg_primary_init(void)
385 {
386         struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
387         int hpi_idx, msl_idx = 0;
388         struct rte_memseg_list *msl;
389         uint64_t max_mem, total_mem;
390
391         /* no-huge does not need this at all */
392         if (internal_config.no_hugetlbfs)
393                 return 0;
394
395         /* FreeBSD has an issue where core dump will dump the entire memory
396          * contents, including anonymous zero-page memory. Therefore, while we
397          * will be limiting total amount of memory to RTE_MAX_MEM_MB, we will
398          * also be further limiting total memory amount to whatever memory is
399          * available to us through contigmem driver (plus spacing blocks).
400          *
401          * so, at each stage, we will be checking how much memory we are
402          * preallocating, and adjust all the values accordingly.
403          */
404
405         max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
406         total_mem = 0;
407
408         /* create memseg lists */
409         for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
410                         hpi_idx++) {
411                 uint64_t max_type_mem, total_type_mem = 0;
412                 uint64_t avail_mem;
413                 int type_msl_idx, max_segs, avail_segs, total_segs = 0;
414                 struct hugepage_info *hpi;
415                 uint64_t hugepage_sz;
416
417                 hpi = &internal_config.hugepage_info[hpi_idx];
418                 hugepage_sz = hpi->hugepage_sz;
419
420                 /* no NUMA support on FreeBSD */
421
422                 /* check if we've already exceeded total memory amount */
423                 if (total_mem >= max_mem)
424                         break;
425
426                 /* first, calculate theoretical limits according to config */
427                 max_type_mem = RTE_MIN(max_mem - total_mem,
428                         (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
429                 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
430
431                 /* now, limit all of that to whatever will actually be
432                  * available to us, because without dynamic allocation support,
433                  * all of that extra memory will be sitting there being useless
434                  * and slowing down core dumps in case of a crash.
435                  *
436                  * we need (N*2)-1 segments because we cannot guarantee that
437                  * each segment will be IOVA-contiguous with the previous one,
438                  * so we will allocate more and put spaces inbetween segments
439                  * that are non-contiguous.
440                  */
441                 avail_segs = (hpi->num_pages[0] * 2) - 1;
442                 avail_mem = avail_segs * hugepage_sz;
443
444                 max_type_mem = RTE_MIN(avail_mem, max_type_mem);
445                 max_segs = RTE_MIN(avail_segs, max_segs);
446
447                 type_msl_idx = 0;
448                 while (total_type_mem < max_type_mem &&
449                                 total_segs < max_segs) {
450                         uint64_t cur_max_mem, cur_mem;
451                         unsigned int n_segs;
452
453                         if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
454                                 RTE_LOG(ERR, EAL,
455                                         "No more space in memseg lists, please increase %s\n",
456                                         RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
457                                 return -1;
458                         }
459
460                         msl = &mcfg->memsegs[msl_idx++];
461
462                         cur_max_mem = max_type_mem - total_type_mem;
463
464                         cur_mem = get_mem_amount(hugepage_sz,
465                                         cur_max_mem);
466                         n_segs = cur_mem / hugepage_sz;
467
468                         if (alloc_memseg_list(msl, hugepage_sz, n_segs,
469                                         0, type_msl_idx))
470                                 return -1;
471
472                         total_segs += msl->memseg_arr.len;
473                         total_type_mem = total_segs * hugepage_sz;
474                         type_msl_idx++;
475
476                         if (alloc_va_space(msl)) {
477                                 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
478                                 return -1;
479                         }
480                 }
481                 total_mem += total_type_mem;
482         }
483         return 0;
484 }
485
486 static int
487 memseg_secondary_init(void)
488 {
489         struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
490         int msl_idx = 0;
491         struct rte_memseg_list *msl;
492
493         for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
494
495                 msl = &mcfg->memsegs[msl_idx];
496
497                 /* skip empty memseg lists */
498                 if (msl->memseg_arr.len == 0)
499                         continue;
500
501                 if (rte_fbarray_attach(&msl->memseg_arr)) {
502                         RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
503                         return -1;
504                 }
505
506                 /* preallocate VA space */
507                 if (alloc_va_space(msl)) {
508                         RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
509                         return -1;
510                 }
511         }
512
513         return 0;
514 }
515
516 int
517 rte_eal_memseg_init(void)
518 {
519         return rte_eal_process_type() == RTE_PROC_PRIMARY ?
520                         memseg_primary_init() :
521                         memseg_secondary_init();
522 }