2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <sys/types.h>
21 #include <sys/mount.h>
24 #include <linux/mempolicy.h>
25 #include <linux/memfd.h>
27 #include <vppinfra/clib.h>
28 #include <vppinfra/mem.h>
29 #include <vppinfra/time.h>
30 #include <vppinfra/format.h>
31 #include <vppinfra/clib_error.h>
32 #include <vppinfra/linux/syscall.h>
33 #include <vppinfra/linux/sysfs.h>
35 #ifndef F_LINUX_SPECIFIC_BASE
36 #define F_LINUX_SPECIFIC_BASE 1024
40 #define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41 #define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
43 #define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44 #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45 #define F_SEAL_GROW 0x0004 /* prevent file from growing */
46 #define F_SEAL_WRITE 0x0008 /* prevent writes */
50 #define MFD_HUGETLB 0x0004U
53 #ifndef MAP_HUGE_SHIFT
54 #define MAP_HUGE_SHIFT 26
57 #ifndef MFD_HUGE_SHIFT
58 #define MFD_HUGE_SHIFT 26
61 #ifndef MAP_FIXED_NOREPLACE
62 #define MAP_FIXED_NOREPLACE 0x100000
66 clib_mem_get_default_hugepage_size (void)
68 unformat_input_t input;
76 * If the kernel doesn't support hugepages, /proc/meminfo won't
77 * say anything about it. Use the regular page size as a default.
79 size = clib_mem_get_page_size () / 1024;
81 if ((fd = open ("/proc/meminfo", 0)) == -1)
84 unformat_init_clib_file (&input, fd);
86 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
88 if (unformat (&input, "Hugepagesize:%_%u kB", &size))
91 unformat_skip_line (&input);
93 unformat_free (&input);
96 return 1024ULL * size;
99 static clib_mem_page_sz_t
100 legacy_get_log2_default_hugepage_size (void)
102 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
106 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
107 return CLIB_MEM_PAGE_SZ_UNKNOWN;
109 while (fscanf (fp, "%32s", tmp) > 0)
110 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
113 if (fscanf (fp, "%u", &size) > 0)
114 log2_page_size = 10 + min_log2 (size);
119 return log2_page_size;
123 clib_mem_main_init ()
125 clib_mem_main_t *mm = &clib_mem_main;
130 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
133 /* system page size */
134 page_size = sysconf (_SC_PAGESIZE);
135 mm->log2_page_sz = min_log2 (page_size);
137 /* default system hugeppage size */
138 if ((fd = memfd_create ("test", MFD_HUGETLB)) != -1)
140 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
143 else /* likely kernel older than 4.14 */
144 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
147 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
148 MAP_ANONYMOUS, -1, 0);
149 if (va == MAP_FAILED)
152 if (mlock (va, page_size))
155 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
158 if (move_pages (0, 1, &va, &i, &status, 0) == 0)
159 mm->numa_node_bitmap |= 1ULL << i;
163 munmap (va, page_size);
167 clib_mem_get_fd_page_size (int fd)
169 struct stat st = { 0 };
170 if (fstat (fd, &st) == -1)
172 return st.st_blksize;
176 clib_mem_get_fd_log2_page_size (int fd)
178 uword page_size = clib_mem_get_fd_page_size (fd);
179 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
183 clib_mem_vm_randomize_va (uword * requested_va,
184 clib_mem_page_sz_t log2_page_size)
188 if (log2_page_size <= 12)
190 else if (log2_page_size > 12 && log2_page_size <= 16)
196 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
200 clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a)
202 clib_mem_main_t *mm = &clib_mem_main;
204 clib_error_t *err = 0;
211 long unsigned int old_mask[16] = { 0 };
213 /* save old numa mem policy if needed */
214 if (a->flags & (CLIB_MEM_VM_F_NUMA_PREFER | CLIB_MEM_VM_F_NUMA_FORCE))
217 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1,
222 if (a->numa_node != 0 && (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
224 err = clib_error_return_unix (0, "get_mempolicy");
232 if (a->flags & CLIB_MEM_VM_F_LOCKED)
233 mmap_flags |= MAP_LOCKED;
235 /* if we are creating shared segment, we need file descriptor */
236 if (a->flags & CLIB_MEM_VM_F_SHARED)
238 mmap_flags |= MAP_SHARED;
239 /* if hugepages are needed we need to create mount point */
240 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
242 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
243 mmap_flags |= MAP_LOCKED;
246 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
248 if ((fd = clib_mem_vm_create_fd (log2_page_size, "%s", a->name)) == -1)
250 err = clib_error_return (0, "%U", format_clib_error, mm->error);
254 log2_page_size = clib_mem_get_fd_log2_page_size (fd);
255 if (log2_page_size == 0)
257 err = clib_error_return_unix (0, "cannot determine page size");
263 clib_mem_vm_randomize_va (&a->requested_va, log2_page_size);
264 mmap_flags |= MAP_FIXED;
267 else /* not CLIB_MEM_VM_F_SHARED */
269 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
270 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
272 mmap_flags |= MAP_HUGETLB;
277 log2_page_size = min_log2 (sysconf (_SC_PAGESIZE));
281 n_pages = ((a->size - 1) >> log2_page_size) + 1;
283 if (a->flags & CLIB_MEM_VM_F_HUGETLB_PREALLOC)
285 err = clib_sysfs_prealloc_hugepages (a->numa_node, log2_page_size,
293 if ((ftruncate (fd, (u64) n_pages * (1 << log2_page_size))) == -1)
295 err = clib_error_return_unix (0, "ftruncate");
302 long unsigned int mask[16] = { 0 };
303 mask[0] = 1 << a->numa_node;
304 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
305 if (rv == -1 && a->numa_node != 0 &&
306 (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
308 err = clib_error_return_unix (0, "set_mempolicy");
313 addr = mmap (uword_to_pointer (a->requested_va, void *), a->size,
314 (PROT_READ | PROT_WRITE), mmap_flags, fd, 0);
315 if (addr == MAP_FAILED)
317 err = clib_error_return_unix (0, "mmap");
321 /* re-apply old numa memory policy */
322 if (old_mpol != -1 &&
323 set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
325 err = clib_error_return_unix (0, "set_mempolicy");
329 a->log2_page_size = log2_page_size;
330 a->n_pages = n_pages;
333 CLIB_MEM_UNPOISON (addr, a->size);
346 clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a)
350 clib_mem_vm_free (a->addr, 1ull << a->log2_page_size);
357 legacy_memfd_create (u8 * name)
359 clib_mem_main_t *mm = &clib_mem_main;
366 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
367 * it must not be a string constant, but should be declared as
370 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
372 /* create mount directory */
373 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
376 vec_reset_length (mm->error);
377 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
378 return CLIB_MEM_ERROR;
381 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
384 rmdir ((char *) mount_dir);
385 vec_reset_length (mm->error);
386 mm->error = clib_error_return_unix (mm->error, "mount");
387 return CLIB_MEM_ERROR;
390 filename = format (0, "%s/%s%c", mount_dir, name, 0);
392 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
394 vec_reset_length (mm->error);
395 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
398 umount2 ((char *) mount_dir, MNT_DETACH);
399 rmdir ((char *) mount_dir);
407 clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
409 clib_mem_main_t *mm = &clib_mem_main;
411 unsigned int memfd_flags;
415 if (log2_page_size == mm->log2_page_sz)
416 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
417 else if (log2_page_size == mm->log2_default_hugepage_sz)
418 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
420 switch (log2_page_size)
422 case CLIB_MEM_PAGE_SZ_UNKNOWN:
423 return CLIB_MEM_ERROR;
424 case CLIB_MEM_PAGE_SZ_DEFAULT:
425 memfd_flags = MFD_ALLOW_SEALING;
427 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
428 memfd_flags = MFD_HUGETLB;
431 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
435 s = va_format (0, fmt, &va);
438 /* memfd_create maximum string size is 249 chars without trailing zero */
439 if (vec_len (s) > 249)
443 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
444 fd = memfd_create ((char *) s, memfd_flags);
446 /* kernel versions < 4.14 does not support memfd_create for huge pages */
447 if (fd == -1 && errno == EINVAL &&
448 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
450 fd = legacy_memfd_create (s);
454 vec_reset_length (mm->error);
455 mm->error = clib_error_return_unix (mm->error, "memfd_create");
457 return CLIB_MEM_ERROR;
462 if ((memfd_flags & MFD_ALLOW_SEALING) &&
463 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
465 vec_reset_length (mm->error);
466 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
468 return CLIB_MEM_ERROR;
475 clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
477 clib_mem_main_t *mm = &clib_mem_main;
478 uword pagesize = 1ULL << log2_page_sz;
479 uword sys_page_sz = 1ULL << mm->log2_page_sz;
483 size = round_pow2 (size, pagesize);
485 /* in adition of requested reservation, we also rserve one system page
486 * (typically 4K) adjacent to the start off reservation */
490 /* start address is provided, so we just need to make sure we are not
491 * replacing existing map */
492 if (start & pow2_mask (log2_page_sz))
495 base = (void *) start - sys_page_sz;
496 base = mmap (base, size + sys_page_sz, PROT_NONE,
497 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
498 return (base == MAP_FAILED) ? ~0 : start;
501 /* to make sure that we get reservation aligned to page_size we need to
502 * request one additional page as mmap will return us address which is
503 * aligned only to system page size */
504 base = mmap (0, size + pagesize, PROT_NONE,
505 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
507 if (base == MAP_FAILED)
510 /* return additional space at the end of allocation */
511 p = base + size + pagesize;
512 n_bytes = (uword) p & pow2_mask (log2_page_sz);
519 /* return additional space at the start of allocation */
520 n_bytes = pagesize - sys_page_sz - n_bytes;
523 munmap (base, n_bytes);
527 return (uword) base + sys_page_sz;
530 clib_mem_vm_map_hdr_t *
531 clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
533 clib_mem_main_t *mm = &clib_mem_main;
534 uword sys_page_sz = 1 << mm->log2_page_sz;
535 clib_mem_vm_map_hdr_t *next;
540 mprotect (hdr, sys_page_sz, PROT_READ);
544 mprotect (hdr, sys_page_sz, PROT_NONE);
546 mprotect (next, sys_page_sz, PROT_READ);
551 clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
552 uword size, int fd, uword offset, char *name)
554 clib_mem_main_t *mm = &clib_mem_main;
555 clib_mem_vm_map_hdr_t *hdr;
556 uword sys_page_sz = 1 << mm->log2_page_sz;
557 int mmap_flags = MAP_FIXED, is_huge = 0;
561 mmap_flags |= MAP_SHARED;
562 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
563 if (log2_page_sz > mm->log2_page_sz)
568 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
570 if (log2_page_sz == mm->log2_page_sz)
571 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
573 switch (log2_page_sz)
575 case CLIB_MEM_PAGE_SZ_UNKNOWN:
576 /* will fail later */
578 case CLIB_MEM_PAGE_SZ_DEFAULT:
579 log2_page_sz = mm->log2_page_sz;
581 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
582 mmap_flags |= MAP_HUGETLB;
583 log2_page_sz = mm->log2_default_hugepage_sz;
587 mmap_flags |= MAP_HUGETLB;
588 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
593 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
594 return CLIB_MEM_VM_MAP_FAILED;
596 size = round_pow2 (size, 1 << log2_page_sz);
598 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
600 if (base == (void *) ~0)
601 return CLIB_MEM_VM_MAP_FAILED;
603 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
605 if (base == MAP_FAILED)
606 return CLIB_MEM_VM_MAP_FAILED;
608 if (is_huge && (mlock (base, size) != 0))
611 return CLIB_MEM_VM_MAP_FAILED;
614 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
615 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
617 if (hdr != base - sys_page_sz)
620 return CLIB_MEM_VM_MAP_FAILED;
625 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
626 mm->last_map->next = hdr;
627 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
633 hdr->prev = mm->last_map;
636 hdr->base_addr = (uword) base;
637 hdr->log2_page_sz = log2_page_sz;
638 hdr->num_pages = size >> log2_page_sz;
640 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
641 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
642 mprotect (hdr, sys_page_sz, PROT_NONE);
644 CLIB_MEM_UNPOISON (base, size);
649 clib_mem_vm_unmap (void *base)
651 clib_mem_main_t *mm = &clib_mem_main;
652 uword size, sys_page_sz = 1 << mm->log2_page_sz;
653 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
655 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
656 return CLIB_MEM_ERROR;
658 size = hdr->num_pages << hdr->log2_page_sz;
659 if (munmap ((void *) hdr->base_addr, size) != 0)
660 return CLIB_MEM_ERROR;
664 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
665 hdr->next->prev = hdr->prev;
666 mprotect (hdr->next, sys_page_sz, PROT_NONE);
669 mm->last_map = hdr->prev;
673 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
674 hdr->prev->next = hdr->next;
675 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
678 mm->first_map = hdr->next;
680 if (munmap (hdr, sys_page_sz) != 0)
681 return CLIB_MEM_ERROR;
687 clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
688 uword n_pages, clib_mem_page_stats_t * stats)
693 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
695 vec_validate (status, n_pages - 1);
696 vec_validate (ptr, n_pages - 1);
698 for (i = 0; i < n_pages; i++)
699 ptr[i] = start + (i << log2_page_size);
701 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
703 if (move_pages (0, n_pages, ptr, 0, status, 0) != 0)
705 stats->unknown = n_pages;
709 for (i = 0; i < n_pages; i++)
711 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
714 stats->per_numa[status[i]]++;
716 else if (status[i] == -EFAULT)
725 clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
728 int pagesize = sysconf (_SC_PAGESIZE);
733 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
735 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
738 for (i = 0; i < n_pages; i++)
740 u64 seek, pagemap = 0;
741 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
742 seek = ((u64) vaddr / pagesize) * sizeof (u64);
743 if (lseek (fd, seek, SEEK_SET) != seek)
746 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
749 if ((pagemap & (1ULL << 63)) == 0)
752 pagemap &= pow2_mask (55);
753 vec_add1 (r, pagemap * pagesize);
758 if (vec_len (r) != n_pages)
767 clib_mem_set_numa_affinity (u8 numa_node, int force)
769 clib_mem_main_t *mm = &clib_mem_main;
770 long unsigned int mask[16] = { 0 };
771 int mask_len = sizeof (mask) * 8 + 1;
773 /* no numa support */
774 if (mm->numa_node_bitmap == 0)
778 vec_reset_length (mm->error);
779 mm->error = clib_error_return (mm->error, "%s: numa not supported",
781 return CLIB_MEM_ERROR;
787 mask[0] = 1 << numa_node;
789 if (set_mempolicy (force ? MPOL_BIND : MPOL_PREFERRED, mask, mask_len))
792 vec_reset_length (mm->error);
796 vec_reset_length (mm->error);
797 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
798 return CLIB_MEM_ERROR;
802 clib_mem_set_default_numa_affinity ()
804 clib_mem_main_t *mm = &clib_mem_main;
806 if (set_mempolicy (MPOL_DEFAULT, 0, 0))
808 vec_reset_length (mm->error);
809 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
810 return CLIB_MEM_ERROR;
816 * fd.io coding-style-patch-verification: ON
819 * eval: (c-set-style "gnu")