2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * physmem.c: Unix physical memory
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 #include <sys/types.h>
42 #include <sys/mount.h>
44 #include <sys/fcntl.h>
49 #include <vlib/vlib.h>
50 #include <vlib/physmem.h>
51 #include <vlib/unix/unix.h>
53 #ifndef __NR_memfd_create
54 #if defined __x86_64__
55 #define __NR_memfd_create 319
57 #define __NR_memfd_create 385
58 #elif defined __aarch64__
59 #define __NR_memfd_create 279
61 #error "__NR_memfd_create unknown for this architecture"
66 memfd_create (const char *name, unsigned int flags)
68 return syscall (__NR_memfd_create, name, flags);
71 #ifndef F_LINUX_SPECIFIC_BASE
72 #define F_LINUX_SPECIFIC_BASE 1024
74 #define MFD_ALLOW_SEALING 0x0002U
75 #define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
76 #define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
78 #define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
79 #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
80 #define F_SEAL_GROW 0x0004 /* prevent file from growing */
81 #define F_SEAL_WRITE 0x0008 /* prevent writes */
84 unix_physmem_alloc_aligned (vlib_main_t * vm, vlib_physmem_region_index_t idx,
85 uword n_bytes, uword alignment)
87 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
88 uword lo_offset, hi_offset;
94 /* IO memory is always at least cache aligned. */
95 alignment = clib_max (alignment, CLIB_CACHE_LINE_BYTES);
99 mheap_get_aligned (pr->heap, n_bytes,
100 /* align */ alignment,
101 /* align offset */ 0,
104 /* Allocation failed? */
108 if (pr->flags & VLIB_PHYSMEM_F_FAKE)
111 /* Make sure allocation does not span DMA physical chunk boundary. */
112 hi_offset = lo_offset + n_bytes - 1;
114 if ((lo_offset >> pr->log2_page_size) ==
115 (hi_offset >> pr->log2_page_size))
118 /* Allocation would span chunk boundary, queue it to be freed as soon as
119 we find suitable chunk. */
120 vec_add1 (to_free, lo_offset);
126 for (i = 0; i < vec_len (to_free); i++)
127 mheap_put (pr->heap, to_free[i]);
131 return lo_offset != ~0 ? pr->heap + lo_offset : 0;
135 unix_physmem_free (vlib_main_t * vm, vlib_physmem_region_index_t idx, void *x)
137 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
138 /* Return object to region's heap. */
139 mheap_put (pr->heap, x - pr->heap);
143 get_page_paddr (int fd, uword addr)
145 int pagesize = sysconf (_SC_PAGESIZE);
146 u64 seek, pagemap = 0;
148 seek = ((u64) addr / pagesize) * sizeof (u64);
149 if (lseek (fd, seek, SEEK_SET) != seek)
151 clib_unix_warning ("lseek to 0x%llx", seek);
154 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
156 clib_unix_warning ("read ptbits");
159 if ((pagemap & (1ULL << 63)) == 0)
162 pagemap &= pow2_mask (55);
164 return pagemap * pagesize;
167 static clib_error_t *
168 unix_physmem_region_alloc (vlib_main_t * vm, char *name, u32 size,
169 u8 numa_node, u32 flags,
170 vlib_physmem_region_index_t * idx)
172 vlib_physmem_main_t *vpm = &vm->physmem_main;
173 vlib_physmem_region_t *pr;
174 clib_error_t *error = 0;
181 struct bitmask *old_mask = numa_allocate_nodemask ();
183 if (geteuid () != 0 && (flags & VLIB_PHYSMEM_F_FAKE) == 0)
184 return clib_error_return (0, "not allowed");
186 pool_get (vpm->regions, pr);
188 if ((pr - vpm->regions) >= 256)
190 error = clib_error_return (0, "maximum number of regions reached");
194 pr->index = pr - vpm->regions;
198 if (get_mempolicy (&old_mpol, old_mask->maskp, old_mask->size + 1, NULL, 0)
201 error = clib_error_return_unix (0, "get_mempolicy");
205 if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
207 if ((pagemap_fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
209 error = clib_error_return_unix (0, "open '/proc/self/pagemap'");
213 mount_dir = format (0, "%s/physmem_region%d%c",
214 vlib_unix_get_runtime_dir (), pr->index, 0);
215 filename = format (0, "%s/mem%c", mount_dir, 0);
217 unlink ((char *) mount_dir);
219 error = vlib_unix_recursive_mkdir ((char *) mount_dir);
223 if (mount ("none", (char *) mount_dir, "hugetlbfs", 0, NULL))
225 error = clib_error_return_unix (0, "mount hugetlb directory '%s'",
230 if ((pr->fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
232 error = clib_error_return_unix (0, "open");
236 mmap_flags = MAP_SHARED | MAP_HUGETLB | MAP_LOCKED;
240 if ((pr->fd = memfd_create (name, MFD_ALLOW_SEALING)) == -1)
241 return clib_error_return_unix (0, "memfd_create");
243 if ((fcntl (pr->fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
246 clib_error_return_unix (0, "fcntl (F_ADD_SEALS, F_SEAL_SHRINK)");
249 mmap_flags = MAP_SHARED;
252 if (fstat (pr->fd, &st))
254 error = clib_error_return_unix (0, "fstat");
258 pr->log2_page_size = min_log2 (st.st_blksize);
259 pr->n_pages = ((size - 1) >> pr->log2_page_size) + 1;
260 size = pr->n_pages * (1 << pr->log2_page_size);
262 if ((ftruncate (pr->fd, size)) == -1)
264 error = clib_error_return_unix (0, "ftruncate length: %d", size);
268 if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
270 error = vlib_sysfs_prealloc_hugepages (numa_node,
271 1 << (pr->log2_page_size - 10),
277 numa_set_preferred (numa_node);
279 pr->mem = mmap (0, size, (PROT_READ | PROT_WRITE), mmap_flags, pr->fd, 0);
281 if (pr->mem == MAP_FAILED)
284 error = clib_error_return_unix (0, "mmap");
288 if (set_mempolicy (old_mpol, old_mask->maskp, old_mask->size + 1) == -1)
290 error = clib_error_return_unix (0, "set_mempolicy");
294 pr->size = pr->n_pages << pr->log2_page_size;
295 pr->page_mask = (1 << pr->log2_page_size) - 1;
296 pr->numa_node = numa_node;
297 pr->name = format (0, "%s", name);
299 if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
302 for (i = 0; i < pr->n_pages; i++)
304 void *ptr = pr->mem + (i << pr->log2_page_size);
306 move_pages (0, 1, &ptr, 0, &node, 0);
307 if (numa_node != node)
310 ("physmem page for region \'%s\' allocated on the wrong"
311 " numa node (requested %u actual %u)", pr->name,
312 pr->numa_node, node, i);
318 if (flags & VLIB_PHYSMEM_F_INIT_MHEAP)
320 pr->heap = mheap_alloc_with_flags (pr->mem, pr->size,
321 /* Don't want mheap mmap/munmap with IO memory. */
322 MHEAP_FLAG_DISABLE_VM |
323 MHEAP_FLAG_THREAD_SAFE);
324 fformat (stdout, "%U", format_mheap, pr->heap, /* verbose */ 1);
327 if (flags & VLIB_PHYSMEM_F_HAVE_BUFFERS)
329 vlib_buffer_add_mem_range (vm, pointer_to_uword (pr->mem), pr->size);
334 if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
337 for (i = 0; i < pr->n_pages; i++)
340 pointer_to_uword (pr->mem) + (((u64) i) << pr->log2_page_size);
341 u64 page_paddr = get_page_paddr (pagemap_fd, vaddr);
342 vec_add1 (pr->page_table, page_paddr);
353 munmap (pr->mem, size);
355 memset (pr, 0, sizeof (*pr));
356 pool_put (vpm->regions, pr);
361 umount2 ((char *) mount_dir, MNT_DETACH);
362 rmdir ((char *) mount_dir);
363 vec_free (mount_dir);
365 numa_free_cpumask (old_mask);
373 unix_physmem_region_free (vlib_main_t * vm, vlib_physmem_region_index_t idx)
375 vlib_physmem_main_t *vpm = &vm->physmem_main;
376 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
380 munmap (pr->mem, pr->size);
382 pool_put (vpm->regions, pr);
386 unix_physmem_init (vlib_main_t * vm)
388 clib_error_t *error = 0;
390 /* Avoid multiple calls. */
391 if (vm->os_physmem_alloc_aligned)
394 vm->os_physmem_alloc_aligned = unix_physmem_alloc_aligned;
395 vm->os_physmem_free = unix_physmem_free;
396 vm->os_physmem_region_alloc = unix_physmem_region_alloc;
397 vm->os_physmem_region_free = unix_physmem_region_free;
402 static clib_error_t *
403 show_physmem (vlib_main_t * vm,
404 unformat_input_t * input, vlib_cli_command_t * cmd)
406 vlib_physmem_main_t *vpm = &vm->physmem_main;
407 vlib_physmem_region_t *pr;
410 pool_foreach (pr, vpm->regions, (
412 vlib_cli_output (vm, "index %u name '%s' page-size %uKB num-pages %d "
413 "numa-node %u fd %d\n",
414 pr->index, pr->name, (1 << (pr->log2_page_size -10)),
415 pr->n_pages, pr->numa_node, pr->fd);
417 vlib_cli_output (vm, " %U", format_mheap, pr->heap, /* verbose */ 1);
419 vlib_cli_output (vm, " no heap\n");
426 VLIB_CLI_COMMAND (show_physmem_command, static) = {
427 .path = "show physmem",
428 .short_help = "Show physical memory allocation",
429 .function = show_physmem,
434 * fd.io coding-style-patch-verification: ON
437 * eval: (c-set-style "gnu")