2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <sys/types.h>
21 #include <sys/mount.h>
24 #include <linux/mempolicy.h>
25 #include <linux/memfd.h>
27 #include <vppinfra/clib.h>
28 #include <vppinfra/mem.h>
29 #include <vppinfra/lock.h>
30 #include <vppinfra/time.h>
31 #include <vppinfra/bitmap.h>
32 #include <vppinfra/format.h>
33 #include <vppinfra/clib_error.h>
35 #ifndef F_LINUX_SPECIFIC_BASE
36 #define F_LINUX_SPECIFIC_BASE 1024
40 #define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41 #define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
43 #define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44 #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45 #define F_SEAL_GROW 0x0004 /* prevent file from growing */
46 #define F_SEAL_WRITE 0x0008 /* prevent writes */
50 #define MFD_HUGETLB 0x0004U
53 #ifndef MAP_HUGE_SHIFT
54 #define MAP_HUGE_SHIFT 26
57 #ifndef MFD_HUGE_SHIFT
58 #define MFD_HUGE_SHIFT 26
61 #ifndef MAP_FIXED_NOREPLACE
62 #define MAP_FIXED_NOREPLACE 0x100000
68 while (clib_atomic_test_and_set (&clib_mem_main.map_lock))
75 clib_atomic_release (&clib_mem_main.map_lock);
78 static clib_mem_page_sz_t
79 legacy_get_log2_default_hugepage_size (void)
81 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
85 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
86 return CLIB_MEM_PAGE_SZ_UNKNOWN;
88 while (fscanf (fp, "%32s", tmp) > 0)
89 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
92 if (fscanf (fp, "%u", &size) > 0)
93 log2_page_size = 10 + min_log2 (size);
98 return log2_page_size;
102 clib_mem_main_init (void)
104 clib_mem_main_t *mm = &clib_mem_main;
105 long sysconf_page_size;
110 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
113 /* system page size */
114 sysconf_page_size = sysconf (_SC_PAGESIZE);
115 if (sysconf_page_size < 0)
117 clib_panic ("Could not determine the page size");
119 page_size = sysconf_page_size;
120 mm->log2_page_sz = min_log2 (page_size);
122 /* default system hugeppage size */
123 if ((fd = syscall (__NR_memfd_create, "test", MFD_HUGETLB)) != -1)
125 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
128 else /* likely kernel older than 4.14 */
129 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
131 mm->log2_sys_default_hugepage_sz = mm->log2_default_hugepage_sz;
134 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
135 MAP_ANONYMOUS, -1, 0);
136 if (va == MAP_FAILED)
139 if (mlock (va, page_size))
142 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
145 if (syscall (__NR_move_pages, 0, 1, &va, &i, &status, 0) == 0)
146 mm->numa_node_bitmap |= 1ULL << i;
150 munmap (va, page_size);
154 clib_mem_get_fd_page_size (int fd)
156 struct stat st = { 0 };
157 if (fstat (fd, &st) == -1)
159 return st.st_blksize;
162 __clib_export clib_mem_page_sz_t
163 clib_mem_get_fd_log2_page_size (int fd)
165 uword page_size = clib_mem_get_fd_page_size (fd);
166 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
170 clib_mem_vm_randomize_va (uword * requested_va,
171 clib_mem_page_sz_t log2_page_size)
175 if (log2_page_size <= 12)
177 else if (log2_page_size > 12 && log2_page_size <= 16)
183 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
187 legacy_memfd_create (u8 * name)
189 clib_mem_main_t *mm = &clib_mem_main;
196 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
197 * it must not be a string constant, but should be declared as
200 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
202 /* create mount directory */
203 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
206 vec_reset_length (mm->error);
207 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
208 return CLIB_MEM_ERROR;
211 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
214 rmdir ((char *) mount_dir);
215 vec_reset_length (mm->error);
216 mm->error = clib_error_return_unix (mm->error, "mount");
217 return CLIB_MEM_ERROR;
220 filename = format (0, "%s/%s%c", mount_dir, name, 0);
222 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
224 vec_reset_length (mm->error);
225 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
228 umount2 ((char *) mount_dir, MNT_DETACH);
229 rmdir ((char *) mount_dir);
237 clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
239 clib_mem_main_t *mm = &clib_mem_main;
241 unsigned int memfd_flags;
245 if (log2_page_size == mm->log2_page_sz)
246 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
247 else if (log2_page_size == mm->log2_sys_default_hugepage_sz)
248 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
250 switch (log2_page_size)
252 case CLIB_MEM_PAGE_SZ_UNKNOWN:
253 return CLIB_MEM_ERROR;
254 case CLIB_MEM_PAGE_SZ_DEFAULT:
255 memfd_flags = MFD_ALLOW_SEALING;
257 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
258 memfd_flags = MFD_HUGETLB;
261 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
265 s = va_format (0, fmt, &va);
268 /* memfd_create maximum string size is 249 chars without trailing zero */
269 if (vec_len (s) > 249)
270 vec_set_len (s, 249);
273 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
274 fd = syscall (__NR_memfd_create, (char *) s, memfd_flags);
276 /* kernel versions < 4.14 does not support memfd_create for huge pages */
277 if (fd == -1 && errno == EINVAL &&
278 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
280 fd = legacy_memfd_create (s);
284 vec_reset_length (mm->error);
285 mm->error = clib_error_return_unix (mm->error, "memfd_create");
287 return CLIB_MEM_ERROR;
292 if ((memfd_flags & MFD_ALLOW_SEALING) &&
293 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
295 vec_reset_length (mm->error);
296 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
298 return CLIB_MEM_ERROR;
305 clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
307 clib_mem_main_t *mm = &clib_mem_main;
308 uword pagesize = 1ULL << log2_page_sz;
309 uword sys_page_sz = 1ULL << mm->log2_page_sz;
313 size = round_pow2 (size, pagesize);
315 /* in adition of requested reservation, we also rserve one system page
316 * (typically 4K) adjacent to the start off reservation */
320 /* start address is provided, so we just need to make sure we are not
321 * replacing existing map */
322 if (start & pow2_mask (log2_page_sz))
325 base = (void *) start - sys_page_sz;
326 base = mmap (base, size + sys_page_sz, PROT_NONE,
327 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
328 return (base == MAP_FAILED) ? ~0 : start;
331 /* to make sure that we get reservation aligned to page_size we need to
332 * request one additional page as mmap will return us address which is
333 * aligned only to system page size */
334 base = mmap (0, size + pagesize, PROT_NONE,
335 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
337 if (base == MAP_FAILED)
340 /* return additional space at the end of allocation */
341 p = base + size + pagesize;
342 n_bytes = (uword) p & pow2_mask (log2_page_sz);
349 /* return additional space at the start of allocation */
350 n_bytes = pagesize - sys_page_sz - n_bytes;
353 munmap (base, n_bytes);
357 return (uword) base + sys_page_sz;
360 __clib_export clib_mem_vm_map_hdr_t *
361 clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
363 clib_mem_main_t *mm = &clib_mem_main;
364 uword sys_page_sz = 1ULL << mm->log2_page_sz;
365 clib_mem_vm_map_hdr_t *next;
370 mprotect (hdr, sys_page_sz, PROT_READ);
374 mprotect (hdr, sys_page_sz, PROT_NONE);
376 mprotect (next, sys_page_sz, PROT_READ);
381 clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
382 uword size, int fd, uword offset, char *name)
384 clib_mem_main_t *mm = &clib_mem_main;
385 clib_mem_vm_map_hdr_t *hdr;
386 uword sys_page_sz = 1ULL << mm->log2_page_sz;
387 int mmap_flags = MAP_FIXED, is_huge = 0;
391 mmap_flags |= MAP_SHARED;
392 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
393 if (log2_page_sz > mm->log2_page_sz)
398 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
400 if (log2_page_sz == mm->log2_page_sz)
401 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
403 switch (log2_page_sz)
405 case CLIB_MEM_PAGE_SZ_UNKNOWN:
406 /* will fail later */
408 case CLIB_MEM_PAGE_SZ_DEFAULT:
409 log2_page_sz = mm->log2_page_sz;
411 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
412 mmap_flags |= MAP_HUGETLB;
413 log2_page_sz = mm->log2_default_hugepage_sz;
417 mmap_flags |= MAP_HUGETLB;
418 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
423 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
424 return CLIB_MEM_VM_MAP_FAILED;
426 size = round_pow2 (size, 1ULL << log2_page_sz);
428 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
430 if (base == (void *) ~0)
431 return CLIB_MEM_VM_MAP_FAILED;
433 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
435 if (base == MAP_FAILED)
436 return CLIB_MEM_VM_MAP_FAILED;
438 if (is_huge && (mlock (base, size) != 0))
441 return CLIB_MEM_VM_MAP_FAILED;
444 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
445 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
447 if (hdr != base - sys_page_sz)
450 return CLIB_MEM_VM_MAP_FAILED;
457 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
458 mm->last_map->next = hdr;
459 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
464 clib_mem_unpoison (hdr, sys_page_sz);
466 hdr->prev = mm->last_map;
467 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
470 hdr->base_addr = (uword) base;
471 hdr->log2_page_sz = log2_page_sz;
472 hdr->num_pages = size >> log2_page_sz;
474 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
475 mprotect (hdr, sys_page_sz, PROT_NONE);
479 clib_mem_unpoison (base, size);
484 clib_mem_vm_unmap (void *base)
486 clib_mem_main_t *mm = &clib_mem_main;
487 uword size, sys_page_sz = 1ULL << mm->log2_page_sz;
488 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
491 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
494 size = hdr->num_pages << hdr->log2_page_sz;
495 if (munmap ((void *) hdr->base_addr, size) != 0)
500 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
501 hdr->next->prev = hdr->prev;
502 mprotect (hdr->next, sys_page_sz, PROT_NONE);
505 mm->last_map = hdr->prev;
509 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
510 hdr->prev->next = hdr->next;
511 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
514 mm->first_map = hdr->next;
518 if (munmap (hdr, sys_page_sz) != 0)
519 return CLIB_MEM_ERROR;
524 return CLIB_MEM_ERROR;
528 clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
529 uword n_pages, clib_mem_page_stats_t * stats)
534 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
536 vec_validate (status, n_pages - 1);
537 vec_validate (ptr, n_pages - 1);
539 for (i = 0; i < n_pages; i++)
540 ptr[i] = start + (i << log2_page_size);
542 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
543 stats->total = n_pages;
544 stats->log2_page_sz = log2_page_size;
546 if (syscall (__NR_move_pages, 0, n_pages, ptr, 0, status, 0) != 0)
548 stats->unknown = n_pages;
552 for (i = 0; i < n_pages; i++)
554 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
557 stats->per_numa[status[i]]++;
559 else if (status[i] == -EFAULT)
572 clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
575 int pagesize = sysconf (_SC_PAGESIZE);
580 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
582 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
585 for (i = 0; i < n_pages; i++)
587 u64 seek, pagemap = 0;
588 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
589 seek = ((u64) vaddr / pagesize) * sizeof (u64);
590 if (lseek (fd, seek, SEEK_SET) != seek)
593 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
596 if ((pagemap & (1ULL << 63)) == 0)
599 pagemap &= pow2_mask (55);
600 vec_add1 (r, pagemap * pagesize);
605 if (vec_len (r) != n_pages)
614 clib_mem_set_numa_affinity (u8 numa_node, int force)
616 clib_mem_main_t *mm = &clib_mem_main;
617 clib_bitmap_t *bmp = 0;
620 /* no numa support */
621 if (mm->numa_node_bitmap == 0)
625 vec_reset_length (mm->error);
626 mm->error = clib_error_return (mm->error, "%s: numa not supported",
628 return CLIB_MEM_ERROR;
634 bmp = clib_bitmap_set (bmp, numa_node, 1);
636 rv = syscall (__NR_set_mempolicy, force ? MPOL_BIND : MPOL_PREFERRED, bmp,
637 vec_len (bmp) * sizeof (bmp[0]) * 8 + 1);
639 clib_bitmap_free (bmp);
640 vec_reset_length (mm->error);
644 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
645 return CLIB_MEM_ERROR;
652 clib_mem_set_default_numa_affinity ()
654 clib_mem_main_t *mm = &clib_mem_main;
656 if (syscall (__NR_set_mempolicy, MPOL_DEFAULT, 0, 0))
658 vec_reset_length (mm->error);
659 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
660 return CLIB_MEM_ERROR;
666 * fd.io coding-style-patch-verification: ON
669 * eval: (c-set-style "gnu")