2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <sys/types.h>
22 #include <linux/mempolicy.h>
23 #include <linux/memfd.h>
25 #include <vppinfra/format.h>
26 #include <vppinfra/linux/syscall.h>
27 #include <vppinfra/linux/sysfs.h>
28 #include <vppinfra/mem.h>
29 #include <vppinfra/hash.h>
30 #include <vppinfra/pmalloc.h>
32 #if __SIZEOF_POINTER__ >= 8
33 #define DEFAULT_RESERVED_MB 16384
35 #define DEFAULT_RESERVED_MB 256
38 static inline clib_pmalloc_chunk_t *
39 get_chunk (clib_pmalloc_page_t * pp, u32 index)
41 return pool_elt_at_index (pp->chunks, index);
45 pmalloc_size2pages (uword size, u32 log2_page_sz)
47 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
51 pmalloc_validate_numa_node (u32 * numa_node)
53 if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
56 if (getcpu (&cpu, numa_node, 0) != 0)
63 clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
69 ASSERT (pm->error == 0);
71 pagesize = clib_mem_get_default_hugepage_size ();
72 pm->def_log2_page_sz = min_log2 (pagesize);
73 pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
74 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
76 /* check if pagemap is accessible */
77 pt = clib_mem_vm_get_paddr (&pt, pm->sys_log2_page_sz, 1);
78 if (pt == 0 || pt[0] == 0)
79 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
81 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
82 size = round_pow2 (size, pagesize);
84 pm->max_pages = size >> pm->def_log2_page_sz;
86 /* reserve VA space for future growth */
87 mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
90 mmap_flags |= MAP_FIXED;
92 pm->base = mmap (uword_to_pointer (base_addr, void *), size + pagesize,
93 PROT_NONE, mmap_flags, -1, 0);
95 if (pm->base == MAP_FAILED)
97 pm->error = clib_error_return_unix (0, "failed to reserve %u pages");
101 off = round_pow2 (pointer_to_uword (pm->base), pagesize) -
102 pointer_to_uword (pm->base);
104 /* trim start and end of reservation to be page aligned */
107 munmap (pm->base, off);
111 munmap (pm->base + ((uword) pm->max_pages * pagesize), pagesize - off);
116 alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
117 u32 n_blocks, u32 block_align, u32 numa_node)
119 clib_pmalloc_chunk_t *c = 0;
120 clib_pmalloc_arena_t *a;
123 u32 alloc_chunk_index;
125 a = pool_elt_at_index (pm->arenas, pp->arena_index);
129 u32 i, start = 0, prev = ~0;
131 for (i = 0; i < a->subpages_per_page; i++)
133 pool_get (pp->chunks, c);
136 c->size = pp->n_free_blocks / a->subpages_per_page;
139 pp->first_chunk_index = c - pp->chunks;
141 pp->chunks[prev].next = c - pp->chunks;
142 prev = c - pp->chunks;
145 pp->n_free_chunks = a->subpages_per_page;
148 if (pp->n_free_blocks < n_blocks)
151 alloc_chunk_index = pp->first_chunk_index;
154 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
155 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
157 if (c->used || n_blocks + off > c->size)
161 alloc_chunk_index = c->next;
165 /* if alignment is needed create new empty chunk */
168 u32 offset_chunk_index;
169 clib_pmalloc_chunk_t *co;
170 pool_get (pp->chunks, c);
172 offset_chunk_index = alloc_chunk_index;
173 alloc_chunk_index = c - pp->chunks;
175 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
176 c->size = co->size - off;
178 c->start = co->start + off;
179 c->prev = offset_chunk_index;
181 co->next = alloc_chunk_index;
185 if (c->size > n_blocks)
187 u32 tail_chunk_index;
188 clib_pmalloc_chunk_t *ct;
189 pool_get (pp->chunks, ct);
191 tail_chunk_index = ct - pp->chunks;
192 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
193 ct->size = c->size - n_blocks;
195 ct->prev = alloc_chunk_index;
196 ct->start = c->start + n_blocks;
199 c->next = tail_chunk_index;
201 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
203 else if (c->next != ~0)
204 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
206 c = get_chunk (pp, alloc_chunk_index);
207 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
208 (c->start << PMALLOC_LOG2_BLOCK_SZ);
209 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
210 pp->n_free_blocks -= n_blocks;
216 pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
218 uword seek, va, pa, p;
220 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
222 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
223 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
225 p = (uword) first *elts_per_page;
226 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
228 while (p < (uword) elts_per_page * count)
230 pm->lookup_table[p] = pointer_to_uword (pm->base) +
231 (p << pm->lookup_log2_page_sz);
237 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
238 while (p < (uword) elts_per_page * count)
240 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
242 seek = (va >> pm->sys_log2_page_sz) * sizeof (pa);
243 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
244 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
245 pa & (1ULL << 63) /* page present bit */ )
247 pa = (pa & pow2_mask (55)) << pm->sys_log2_page_sz;
249 pm->lookup_table[p] = va - pa;
257 static inline clib_pmalloc_page_t *
258 pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
259 u32 numa_node, u32 n_pages)
261 clib_pmalloc_page_t *pp = 0;
262 int status, rv, i, mmap_flags;
265 long unsigned int mask[16] = { 0 };
266 long unsigned int old_mask[16] = { 0 };
267 uword page_size = 1ULL << a->log2_subpage_sz;
268 uword size = (uword) n_pages << pm->def_log2_page_sz;
270 clib_error_free (pm->error);
272 if (pm->max_pages <= vec_len (pm->pages))
274 pm->error = clib_error_return (0, "maximum number of pages reached");
278 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
280 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
281 a->log2_subpage_sz, n_pages);
287 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
288 /* failure to get mempolicy means we can only proceed with numa 0 maps */
289 if (rv == -1 && numa_node != 0)
291 pm->error = clib_error_return_unix (0, "failed to get mempolicy");
295 mask[0] = 1 << numa_node;
296 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
297 if (rv == -1 && numa_node != 0)
299 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
300 "numa node %u", numa_node);
304 mmap_flags = MAP_FIXED;
306 if ((pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP) == 0)
307 mmap_flags |= MAP_LOCKED;
309 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
311 mmap_flags |= MAP_SHARED;
312 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
313 pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
315 pm->error = clib_mem_create_fd ((char *) a->name, &a->fd);
318 if ((ftruncate (a->fd, size)) == -1)
323 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
324 mmap_flags |= MAP_HUGETLB;
326 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
330 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
331 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
334 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
335 "fd %d numa %d flags 0x%x", n_pages,
336 va, a->fd, numa_node, mmap_flags);
340 /* Check if huge page is not allocated,
341 wrong allocation will generate the SIGBUS */
342 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
344 for (int i = 0; i < n_pages; i++)
347 mincore (va + i * page_size, 1, &flag);
348 // flag is 1 if the page was successfully allocated and in memory
352 clib_error_return_unix (0,
353 "Unable to fulfill huge page allocation request");
359 clib_memset (va, 0, size);
361 rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
362 if (rv == -1 && numa_node != 0)
364 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
368 /* we tolerate move_pages failure only if request os for numa node 0
369 to support non-numa kernels */
370 rv = move_pages (0, 1, &va, 0, &status, 0);
371 if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
373 pm->error = rv == -1 ?
374 clib_error_return_unix (0, "page allocated on wrong node, numa node "
375 "%u status %d", numa_node, status) :
376 clib_error_return (0, "page allocated on wrong node, numa node "
377 "%u status %d", numa_node, status);
379 /* unmap & reesrve */
381 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
386 for (i = 0; i < n_pages; i++)
388 vec_add2 (pm->pages, pp, 1);
389 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
390 pp->index = pp - pm->pages;
391 pp->arena_index = a->index;
392 vec_add1 (a->page_indices, pp->index);
397 /* if new arena is using smaller page size, we need to rebuild whole
399 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
401 pm->lookup_log2_page_sz = a->log2_subpage_sz;
402 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
406 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
408 /* return pointer to 1st page */
409 return pp - (n_pages - 1);
418 clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
419 uword size, u32 log2_page_sz, u32 numa_node)
421 clib_pmalloc_arena_t *a;
422 clib_pmalloc_page_t *pp;
425 clib_error_free (pm->error);
427 if (log2_page_sz == 0)
428 log2_page_sz = pm->def_log2_page_sz;
429 else if (log2_page_sz != pm->def_log2_page_sz &&
430 log2_page_sz != pm->sys_log2_page_sz)
432 pm->error = clib_error_create ("unsupported page size (%uKB)",
433 1 << (log2_page_sz - 10));
437 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
439 if (n_pages + vec_len (pm->pages) > pm->max_pages)
442 if (pmalloc_validate_numa_node (&numa_node))
445 pool_get (pm->arenas, a);
446 a->index = a - pm->arenas;
447 a->name = format (0, "%s%c", name, 0);
448 a->numa_node = numa_node;
449 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
450 a->log2_subpage_sz = log2_page_sz;
451 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
453 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
456 memset (a, 0, sizeof (*a));
457 pool_put (pm->arenas, a);
461 return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
465 clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
466 uword size, uword align, u32 numa_node)
468 clib_pmalloc_page_t *pp;
469 u32 n_blocks, block_align, *page_index;
471 ASSERT (is_pow2 (align));
473 if (pmalloc_validate_numa_node (&numa_node))
478 if (size > 1ULL << pm->def_log2_page_sz)
481 vec_validate_init_empty (pm->default_arena_for_numa_node,
483 if (pm->default_arena_for_numa_node[numa_node] == ~0)
485 pool_get (pm->arenas, a);
486 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
487 a->name = format (0, "default-numa-%u%c", numa_node, 0);
488 a->numa_node = numa_node;
489 a->log2_subpage_sz = pm->def_log2_page_sz;
490 a->subpages_per_page = 1;
493 a = pool_elt_at_index (pm->arenas,
494 pm->default_arena_for_numa_node[numa_node]);
496 else if (size > 1ULL << a->log2_subpage_sz)
499 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
500 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
502 vec_foreach (page_index, a->page_indices)
504 pp = vec_elt_at_index (pm->pages, *page_index);
505 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
512 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
513 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
514 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
520 clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
521 uword align, u32 numa_node)
523 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
527 clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
529 return clib_pmalloc_alloc_inline (pm, 0, size, align,
530 CLIB_PMALLOC_NUMA_LOCAL);
534 clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t * pm, void *arena_va,
535 uword size, uword align)
537 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
538 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
542 pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
545 clib_pmalloc_chunk_t *c1, *c2;
547 if (ci1 == ~0 || ci2 == ~0)
550 c1 = get_chunk (pp, ci1);
551 c2 = get_chunk (pp, ci2);
553 if (c1->used || c2->used)
556 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
557 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
564 clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
566 clib_pmalloc_page_t *pp;
567 clib_pmalloc_chunk_t *c;
568 clib_pmalloc_arena_t *a;
570 u32 chunk_index, page_index;
572 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
578 page_index = clib_pmalloc_get_page_index (pm, va);
579 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
581 pp = vec_elt_at_index (pm->pages, page_index);
582 c = pool_elt_at_index (pp->chunks, chunk_index);
583 a = pool_elt_at_index (pm->arenas, pp->arena_index);
585 pp->n_free_blocks += c->size;
588 /* merge with next if free */
589 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
591 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
592 c->size += next->size;
593 c->next = next->next;
594 if (next->next != ~0)
595 get_chunk (pp, next->next)->prev = chunk_index;
596 memset (next, 0, sizeof (*next));
597 pool_put (pp->chunks, next);
601 /* merge with prev if free */
602 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
604 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
605 prev->size += c->size;
606 prev->next = c->next;
608 get_chunk (pp, c->next)->prev = c->prev;
609 memset (c, 0, sizeof (*c));
610 pool_put (pp->chunks, c);
616 format_log2_page_size (u8 * s, va_list * va)
618 u32 log2_page_sz = va_arg (*va, u32);
620 if (log2_page_sz >= 30)
621 return format (s, "%uGB", 1 << (log2_page_sz - 30));
623 if (log2_page_sz >= 20)
624 return format (s, "%uMB", 1 << (log2_page_sz - 20));
626 if (log2_page_sz >= 10)
627 return format (s, "%uKB", 1 << (log2_page_sz - 10));
629 return format (s, "%uB", 1 << log2_page_sz);
634 format_pmalloc_page (u8 * s, va_list * va)
636 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
637 int verbose = va_arg (*va, int);
638 u32 indent = format_get_indent (s);
643 s = format (s, "free %u chunks %u free-chunks %d ",
644 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
645 pool_elts (pp->chunks), pp->n_free_chunks);
649 clib_pmalloc_chunk_t *c;
650 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
651 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
652 format_white_space, indent + 2,
653 "chunk offset", "size", "used", "index", "prev", "next");
656 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
657 format_white_space, indent + 2,
658 c->start << PMALLOC_LOG2_BLOCK_SZ,
659 c->size << PMALLOC_LOG2_BLOCK_SZ,
660 c->used ? "yes" : "no",
661 c - pp->chunks, c->prev, c->next);
664 c = pool_elt_at_index (pp->chunks, c->next);
671 format_pmalloc (u8 * s, va_list * va)
673 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
674 int verbose = va_arg (*va, int);
675 u32 indent = format_get_indent (s);
677 clib_pmalloc_page_t *pp;
678 clib_pmalloc_arena_t *a;
680 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
681 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
682 format_log2_page_size, pm->def_log2_page_sz,
683 format_log2_page_size, pm->lookup_log2_page_sz,
684 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
688 s = format (s, " va-start %p", pm->base);
691 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
692 format_clib_error, pm->error);
696 pool_foreach (a, pm->arenas,
699 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
700 format_white_space, indent + 2, a->name,
701 vec_len (a->page_indices), format_log2_page_size,
702 a->log2_subpage_sz, a->numa_node);
704 s = format (s, " shared fd %d", a->fd);
706 vec_foreach (page_index, a->page_indices)
708 pp = vec_elt_at_index (pm->pages, *page_index);
709 s = format (s, "\n%U%U", format_white_space, indent + 4,
710 format_pmalloc_page, pp, verbose);
719 format_pmalloc_map (u8 * s, va_list * va)
721 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
724 s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
725 vec_foreach_index (index, pm->lookup_table)
727 uword *lookup_val, pa, va;
728 lookup_val = vec_elt_at_index (pm->lookup_table, index);
730 pointer_to_uword (pm->base) +
731 ((uword) index << pm->lookup_log2_page_sz);
732 pa = va - *lookup_val;
734 format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
735 uword_to_pointer (pa, u64), format_log2_page_size,
736 pm->lookup_log2_page_sz);
742 * fd.io coding-style-patch-verification: ON
745 * eval: (c-set-style "gnu")