2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <sys/types.h>
21 #include <sys/mount.h>
24 #include <linux/mempolicy.h>
25 #include <linux/memfd.h>
27 #include <vppinfra/clib.h>
28 #include <vppinfra/mem.h>
29 #include <vppinfra/time.h>
30 #include <vppinfra/format.h>
31 #include <vppinfra/clib_error.h>
32 #include <vppinfra/linux/syscall.h>
33 #include <vppinfra/linux/sysfs.h>
35 #ifndef F_LINUX_SPECIFIC_BASE
36 #define F_LINUX_SPECIFIC_BASE 1024
40 #define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41 #define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
43 #define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44 #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45 #define F_SEAL_GROW 0x0004 /* prevent file from growing */
46 #define F_SEAL_WRITE 0x0008 /* prevent writes */
50 #define MFD_HUGETLB 0x0004U
53 #ifndef MAP_HUGE_SHIFT
54 #define MAP_HUGE_SHIFT 26
57 #ifndef MAP_FIXED_NOREPLACE
58 #define MAP_FIXED_NOREPLACE 0x100000
62 clib_mem_get_default_hugepage_size (void)
64 unformat_input_t input;
72 * If the kernel doesn't support hugepages, /proc/meminfo won't
73 * say anything about it. Use the regular page size as a default.
75 size = clib_mem_get_page_size () / 1024;
77 if ((fd = open ("/proc/meminfo", 0)) == -1)
80 unformat_init_clib_file (&input, fd);
82 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
84 if (unformat (&input, "Hugepagesize:%_%u kB", &size))
87 unformat_skip_line (&input);
89 unformat_free (&input);
92 return 1024ULL * size;
95 static clib_mem_page_sz_t
96 legacy_get_log2_default_hugepage_size (void)
98 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
102 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
103 return CLIB_MEM_PAGE_SZ_UNKNOWN;
105 while (fscanf (fp, "%32s", tmp) > 0)
106 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
109 if (fscanf (fp, "%u", &size) > 0)
110 log2_page_size = 10 + min_log2 (size);
115 return log2_page_size;
119 clib_mem_main_init ()
121 clib_mem_main_t *mm = &clib_mem_main;
126 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
129 /* system page size */
130 page_size = sysconf (_SC_PAGESIZE);
131 mm->log2_page_sz = min_log2 (page_size);
133 /* default system hugeppage size */
134 if ((fd = memfd_create ("test", MFD_HUGETLB)) != -1)
136 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
139 else /* likely kernel older than 4.14 */
140 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
143 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
144 MAP_ANONYMOUS, -1, 0);
145 if (va == MAP_FAILED)
148 if (mlock (va, page_size))
151 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
154 if (move_pages (0, 1, &va, &i, &status, 0) == 0)
155 mm->numa_node_bitmap |= 1ULL << i;
159 munmap (va, page_size);
163 clib_mem_get_fd_page_size (int fd)
165 struct stat st = { 0 };
166 if (fstat (fd, &st) == -1)
168 return st.st_blksize;
172 clib_mem_get_fd_log2_page_size (int fd)
174 uword page_size = clib_mem_get_fd_page_size (fd);
175 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
179 clib_mem_vm_randomize_va (uword * requested_va,
180 clib_mem_page_sz_t log2_page_size)
184 if (log2_page_size <= 12)
186 else if (log2_page_size > 12 && log2_page_size <= 16)
192 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
196 clib_mem_create_fd (char *name, int *fdp)
202 if ((fd = memfd_create (name, MFD_ALLOW_SEALING)) == -1)
203 return clib_error_return_unix (0, "memfd_create");
205 if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
208 return clib_error_return_unix (0, "fcntl (F_ADD_SEALS)");
216 clib_mem_create_hugetlb_fd (char *name, int *fdp)
218 clib_error_t *err = 0;
220 static int memfd_hugetlb_supported = 1;
222 char template[] = "/tmp/hugepage_mount.XXXXXX";
227 if (memfd_hugetlb_supported)
229 if ((fd = memfd_create (name, MFD_HUGETLB)) != -1)
232 /* avoid further tries if memfd MFD_HUGETLB is not supported */
233 if (errno == EINVAL && strnlen (name, 256) <= 249)
234 memfd_hugetlb_supported = 0;
237 mount_dir = mkdtemp (template);
239 return clib_error_return_unix (0, "mkdtemp \'%s\'", template);
241 if (mount ("none", (char *) mount_dir, "hugetlbfs", 0, NULL))
243 rmdir ((char *) mount_dir);
244 err = clib_error_return_unix (0, "mount hugetlb directory '%s'",
248 filename = format (0, "%s/%s%c", mount_dir, name, 0);
249 fd = open ((char *) filename, O_CREAT | O_RDWR, 0755);
250 umount2 ((char *) mount_dir, MNT_DETACH);
251 rmdir ((char *) mount_dir);
254 err = clib_error_return_unix (0, "open");
263 clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a)
266 clib_error_t *err = 0;
273 long unsigned int old_mask[16] = { 0 };
275 /* save old numa mem policy if needed */
276 if (a->flags & (CLIB_MEM_VM_F_NUMA_PREFER | CLIB_MEM_VM_F_NUMA_FORCE))
279 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1,
284 if (a->numa_node != 0 && (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
286 err = clib_error_return_unix (0, "get_mempolicy");
294 if (a->flags & CLIB_MEM_VM_F_LOCKED)
295 mmap_flags |= MAP_LOCKED;
297 /* if we are creating shared segment, we need file descriptor */
298 if (a->flags & CLIB_MEM_VM_F_SHARED)
300 mmap_flags |= MAP_SHARED;
301 /* if hugepages are needed we need to create mount point */
302 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
304 if ((err = clib_mem_create_hugetlb_fd (a->name, &fd)))
307 mmap_flags |= MAP_LOCKED;
311 if ((err = clib_mem_create_fd (a->name, &fd)))
315 log2_page_size = clib_mem_get_fd_log2_page_size (fd);
316 if (log2_page_size == 0)
318 err = clib_error_return_unix (0, "cannot determine page size");
324 clib_mem_vm_randomize_va (&a->requested_va, log2_page_size);
325 mmap_flags |= MAP_FIXED;
328 else /* not CLIB_MEM_VM_F_SHARED */
330 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
331 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
333 mmap_flags |= MAP_HUGETLB;
338 log2_page_size = min_log2 (sysconf (_SC_PAGESIZE));
342 n_pages = ((a->size - 1) >> log2_page_size) + 1;
344 if (a->flags & CLIB_MEM_VM_F_HUGETLB_PREALLOC)
346 err = clib_sysfs_prealloc_hugepages (a->numa_node, log2_page_size,
354 if ((ftruncate (fd, (u64) n_pages * (1 << log2_page_size))) == -1)
356 err = clib_error_return_unix (0, "ftruncate");
363 long unsigned int mask[16] = { 0 };
364 mask[0] = 1 << a->numa_node;
365 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
366 if (rv == -1 && a->numa_node != 0 &&
367 (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
369 err = clib_error_return_unix (0, "set_mempolicy");
374 addr = mmap (uword_to_pointer (a->requested_va, void *), a->size,
375 (PROT_READ | PROT_WRITE), mmap_flags, fd, 0);
376 if (addr == MAP_FAILED)
378 err = clib_error_return_unix (0, "mmap");
382 /* re-apply old numa memory policy */
383 if (old_mpol != -1 &&
384 set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
386 err = clib_error_return_unix (0, "set_mempolicy");
390 a->log2_page_size = log2_page_size;
391 a->n_pages = n_pages;
394 CLIB_MEM_UNPOISON (addr, a->size);
407 clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a)
411 clib_mem_vm_free (a->addr, 1ull << a->log2_page_size);
418 clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
420 clib_mem_main_t *mm = &clib_mem_main;
421 uword pagesize = 1ULL << log2_page_sz;
422 uword sys_page_sz = 1ULL << mm->log2_page_sz;
426 size = round_pow2 (size, pagesize);
428 /* in adition of requested reservation, we also rserve one system page
429 * (typically 4K) adjacent to the start off reservation */
433 /* start address is provided, so we just need to make sure we are not
434 * replacing existing map */
435 if (start & pow2_mask (log2_page_sz))
438 base = (void *) start - sys_page_sz;
439 base = mmap (base, size + sys_page_sz, PROT_NONE,
440 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
441 return (base == MAP_FAILED) ? ~0 : start;
444 /* to make sure that we get reservation aligned to page_size we need to
445 * request one additional page as mmap will return us address which is
446 * aligned only to system page size */
447 base = mmap (0, size + pagesize, PROT_NONE,
448 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
450 if (base == MAP_FAILED)
453 /* return additional space at the end of allocation */
454 p = base + size + pagesize;
455 n_bytes = (uword) p & pow2_mask (log2_page_sz);
462 /* return additional space at the start of allocation */
463 n_bytes = pagesize - sys_page_sz - n_bytes;
466 munmap (base, n_bytes);
470 return (uword) base + sys_page_sz;
473 clib_mem_vm_map_hdr_t *
474 clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
476 clib_mem_main_t *mm = &clib_mem_main;
477 uword sys_page_sz = 1 << mm->log2_page_sz;
478 clib_mem_vm_map_hdr_t *next;
483 mprotect (hdr, sys_page_sz, PROT_READ);
487 mprotect (hdr, sys_page_sz, PROT_NONE);
489 mprotect (next, sys_page_sz, PROT_READ);
494 clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
495 uword size, int fd, uword offset, char *name)
497 clib_mem_main_t *mm = &clib_mem_main;
498 clib_mem_vm_map_hdr_t *hdr;
499 uword sys_page_sz = 1 << mm->log2_page_sz;
500 int mmap_flags = MAP_FIXED, is_huge = 0;
504 mmap_flags |= MAP_SHARED;
505 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
506 if (log2_page_sz > mm->log2_page_sz)
511 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
513 if (log2_page_sz == mm->log2_page_sz)
514 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
516 switch (log2_page_sz)
518 case CLIB_MEM_PAGE_SZ_UNKNOWN:
519 /* will fail later */
521 case CLIB_MEM_PAGE_SZ_DEFAULT:
522 log2_page_sz = mm->log2_page_sz;
524 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
525 mmap_flags |= MAP_HUGETLB;
526 log2_page_sz = mm->log2_default_hugepage_sz;
530 mmap_flags |= MAP_HUGETLB;
531 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
536 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
537 return CLIB_MEM_VM_MAP_FAILED;
539 size = round_pow2 (size, 1 << log2_page_sz);
541 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
543 if (base == (void *) ~0)
544 return CLIB_MEM_VM_MAP_FAILED;
546 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
548 if (base == MAP_FAILED)
549 return CLIB_MEM_VM_MAP_FAILED;
551 if (is_huge && (mlock (base, size) != 0))
554 return CLIB_MEM_VM_MAP_FAILED;
557 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
558 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
560 if (hdr != base - sys_page_sz)
563 return CLIB_MEM_VM_MAP_FAILED;
568 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
569 mm->last_map->next = hdr;
570 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
576 hdr->prev = mm->last_map;
579 hdr->base_addr = (uword) base;
580 hdr->log2_page_sz = log2_page_sz;
581 hdr->num_pages = size >> log2_page_sz;
582 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
583 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
584 mprotect (hdr, sys_page_sz, PROT_NONE);
586 CLIB_MEM_UNPOISON (base, size);
591 clib_mem_vm_unmap (void *base)
593 clib_mem_main_t *mm = &clib_mem_main;
594 uword size, sys_page_sz = 1 << mm->log2_page_sz;
595 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
597 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
600 size = hdr->num_pages << hdr->log2_page_sz;
601 if (munmap ((void *) hdr->base_addr, size) != 0)
606 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
607 hdr->next->prev = hdr->prev;
608 mprotect (hdr->next, sys_page_sz, PROT_NONE);
611 mm->last_map = hdr->prev;
615 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
616 hdr->prev->next = hdr->next;
617 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
620 mm->first_map = hdr->next;
622 if (munmap (hdr, sys_page_sz) != 0)
629 clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
630 uword n_pages, clib_mem_page_stats_t * stats)
635 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
637 vec_validate (status, n_pages - 1);
638 vec_validate (ptr, n_pages - 1);
640 for (i = 0; i < n_pages; i++)
641 ptr[i] = start + (i << log2_page_size);
643 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
645 if (move_pages (0, n_pages, ptr, 0, status, 0) != 0)
647 stats->unknown = n_pages;
651 for (i = 0; i < n_pages; i++)
653 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
656 stats->per_numa[status[i]]++;
658 else if (status[i] == -EFAULT)
667 clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
670 int pagesize = sysconf (_SC_PAGESIZE);
675 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
677 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
680 for (i = 0; i < n_pages; i++)
682 u64 seek, pagemap = 0;
683 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
684 seek = ((u64) vaddr / pagesize) * sizeof (u64);
685 if (lseek (fd, seek, SEEK_SET) != seek)
688 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
691 if ((pagemap & (1ULL << 63)) == 0)
694 pagemap &= pow2_mask (55);
695 vec_add1 (r, pagemap * pagesize);
700 if (vec_len (r) != n_pages)
709 clib_mem_vm_ext_map (clib_mem_vm_map_t * a)
711 long unsigned int old_mask[16] = { 0 };
712 int mmap_flags = MAP_SHARED;
713 clib_error_t *err = 0;
720 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0,
725 err = clib_error_return_unix (0, "get_mempolicy");
731 mmap_flags |= MAP_FIXED;
735 long unsigned int mask[16] = { 0 };
736 mask[0] = 1 << a->numa_node;
737 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
740 err = clib_error_return_unix (0, "set_mempolicy");
745 addr = (void *) mmap (uword_to_pointer (a->requested_va, void *), a->size,
746 PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0);
748 if (addr == MAP_FAILED)
749 return clib_error_return_unix (0, "mmap");
751 /* re-apply old numa memory policy */
752 if (old_mpol != -1 &&
753 set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
755 err = clib_error_return_unix (0, "set_mempolicy");
760 CLIB_MEM_UNPOISON (addr, a->size);
767 * fd.io coding-style-patch-verification: ON
770 * eval: (c-set-style "gnu")