2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <sys/types.h>
21 #include <linux/mempolicy.h>
22 #include <linux/memfd.h>
24 #include <vppinfra/format.h>
25 #include <vppinfra/linux/syscall.h>
26 #include <vppinfra/linux/sysfs.h>
27 #include <vppinfra/mem.h>
28 #include <vppinfra/hash.h>
29 #include <vppinfra/pmalloc.h>
31 #if __SIZEOF_POINTER__ >= 8
32 #define DEFAULT_RESERVED_MB 16384
34 #define DEFAULT_RESERVED_MB 256
37 static inline clib_pmalloc_chunk_t *
38 get_chunk (clib_pmalloc_page_t * pp, u32 index)
40 return pool_elt_at_index (pp->chunks, index);
44 pmalloc_size2pages (uword size, u32 log2_page_sz)
46 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
50 pmalloc_validate_numa_node (u32 * numa_node)
52 if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
55 if (getcpu (&cpu, numa_node, 0) != 0)
62 clib_pmalloc_init (clib_pmalloc_main_t * pm, uword size)
66 ASSERT (pm->error == 0);
68 pagesize = clib_mem_get_default_hugepage_size ();
69 pm->def_log2_page_sz = min_log2 (pagesize);
70 pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
71 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
73 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
74 size = round_pow2 (size, pagesize);
76 pm->max_pages = size >> pm->def_log2_page_sz;
78 /* reserve VA space for future growth */
79 pm->base = mmap (0, size + pagesize, PROT_NONE,
80 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
82 if (pm->base == MAP_FAILED)
84 pm->error = clib_error_return_unix (0, "failed to reserve %u pages");
88 off = round_pow2 (pointer_to_uword (pm->base), pagesize) -
89 pointer_to_uword (pm->base);
91 /* trim start and end of reservation to be page aligned */
94 munmap (pm->base, off);
98 munmap (pm->base + (pm->max_pages * pagesize), pagesize - off);
103 alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
104 u32 n_blocks, u32 block_align, u32 numa_node)
106 clib_pmalloc_chunk_t *c = 0;
107 clib_pmalloc_arena_t *a;
110 u32 alloc_chunk_index;
112 a = pool_elt_at_index (pm->arenas, pp->arena_index);
116 u32 i, start = 0, prev = ~0;
118 for (i = 0; i < a->subpages_per_page; i++)
120 pool_get (pp->chunks, c);
123 c->size = pp->n_free_blocks / a->subpages_per_page;
126 pp->first_chunk_index = c - pp->chunks;
128 pp->chunks[prev].next = c - pp->chunks;
129 prev = c - pp->chunks;
132 pp->n_free_chunks = a->subpages_per_page;
135 alloc_chunk_index = pp->first_chunk_index;
138 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
139 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
141 if (c->used || n_blocks + off > c->size)
145 alloc_chunk_index = c->next;
149 /* if alignment is needed create new empty chunk */
152 u32 offset_chunk_index;
153 clib_pmalloc_chunk_t *co;
154 pool_get (pp->chunks, c);
156 offset_chunk_index = alloc_chunk_index;
157 alloc_chunk_index = c - pp->chunks;
159 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
160 c->size = co->size - off;
162 c->start = co->start + off;
163 c->prev = offset_chunk_index;
165 co->next = alloc_chunk_index;
169 if (c->size > n_blocks)
171 u32 tail_chunk_index;
172 clib_pmalloc_chunk_t *ct;
173 pool_get (pp->chunks, ct);
175 tail_chunk_index = ct - pp->chunks;
176 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
177 ct->size = c->size - n_blocks;
179 ct->prev = alloc_chunk_index;
180 ct->start = c->start + n_blocks;
183 c->next = tail_chunk_index;
185 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
187 else if (c->next != ~0)
188 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
190 c = get_chunk (pp, alloc_chunk_index);
191 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
192 (c->start << PMALLOC_LOG2_BLOCK_SZ);
193 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
194 pp->n_free_blocks -= n_blocks;
200 pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
202 uword seek, va, pa, p;
204 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
206 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
207 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
209 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
211 p = first * elts_per_page;
212 while (p < elts_per_page * count)
214 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
215 seek = (va >> pm->sys_log2_page_sz) * sizeof (pa);
216 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
217 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
218 pa & (1ULL << 63) /* page present bit */ )
220 pa = (pa & pow2_mask (55)) << pm->sys_log2_page_sz;
222 pm->lookup_table[p] = va - pa;
230 static inline clib_pmalloc_page_t *
231 pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
232 u32 numa_node, u32 n_pages)
234 clib_pmalloc_page_t *pp = 0;
235 int status, rv, i, mmap_flags;
238 long unsigned int mask[16] = { 0 };
239 long unsigned int old_mask[16] = { 0 };
240 uword size = (uword) n_pages << pm->def_log2_page_sz;
242 clib_error_free (pm->error);
244 if (pm->max_pages <= vec_len (pm->pages))
246 pm->error = clib_error_return (0, "maximum number of pages reached");
250 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
252 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
253 a->log2_subpage_sz, n_pages);
259 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
260 /* failure to get mempolicy means we can only proceed with numa 0 maps */
261 if (rv == -1 && numa_node != 0)
263 pm->error = clib_error_return_unix (0, "failed to get mempolicy");
267 mask[0] = 1 << numa_node;
268 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
269 if (rv == -1 && numa_node != 0)
271 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
272 "numa node %u", numa_node);
276 mmap_flags = MAP_FIXED | MAP_ANONYMOUS | MAP_LOCKED;
278 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
279 mmap_flags |= MAP_HUGETLB;
281 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
283 mmap_flags |= MAP_SHARED;
284 if (mmap_flags & MAP_HUGETLB)
285 pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
287 pm->error = clib_mem_create_fd ((char *) a->name, &a->fd);
293 mmap_flags |= MAP_PRIVATE;
297 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
298 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
301 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
302 "fd %d numa %d flags 0x%x", n_pages,
303 va, a->fd, numa_node, mmap_flags);
307 clib_memset (va, 0, size);
309 rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
310 if (rv == -1 && numa_node != 0)
312 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
316 /* we tolerate move_pages failure only if request os for numa node 0
317 to support non-numa kernels */
318 rv = move_pages (0, 1, &va, 0, &status, 0);
319 if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
321 pm->error = rv == -1 ?
322 clib_error_return_unix (0, "page allocated on wrong node, numa node "
323 "%u status %d", numa_node, status) :
324 clib_error_return (0, "page allocated on wrong node, numa node "
325 "%u status %d", numa_node, status);
327 /* unmap & reesrve */
329 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
334 for (i = 0; i < n_pages; i++)
336 vec_add2 (pm->pages, pp, 1);
337 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
338 pp->index = pp - pm->pages;
339 pp->arena_index = a->index;
340 vec_add1 (a->page_indices, pp->index);
345 /* if new arena is using smaller page size, we need to rebuild whole
347 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
349 pm->lookup_log2_page_sz = a->log2_subpage_sz;
350 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
354 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
356 /* return pointer to 1st page */
357 return pp - (n_pages - 1);
366 clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
367 uword size, u32 log2_page_sz, u32 numa_node)
369 clib_pmalloc_arena_t *a;
370 clib_pmalloc_page_t *pp;
373 clib_error_free (pm->error);
375 if (log2_page_sz == 0)
376 log2_page_sz = pm->def_log2_page_sz;
377 else if (log2_page_sz != pm->def_log2_page_sz &&
378 log2_page_sz != pm->sys_log2_page_sz)
380 pm->error = clib_error_create ("unsupported page size (%uKB)",
381 1 << (log2_page_sz - 10));
385 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
387 if (n_pages + vec_len (pm->pages) > pm->max_pages)
390 if (pmalloc_validate_numa_node (&numa_node))
393 pool_get (pm->arenas, a);
394 a->index = a - pm->arenas;
395 a->name = format (0, "%s%c", name, 0);
396 a->numa_node = numa_node;
397 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
398 a->log2_subpage_sz = log2_page_sz;
399 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
401 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
404 memset (a, 0, sizeof (*a));
405 pool_put (pm->arenas, a);
409 return pm->base + (pp->index << pm->def_log2_page_sz);
413 clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
414 uword size, uword align, u32 numa_node)
416 clib_pmalloc_page_t *pp;
417 u32 n_blocks, block_align, *page_index;
419 ASSERT (is_pow2 (align));
421 if (pmalloc_validate_numa_node (&numa_node))
426 if (size > 1ULL << pm->def_log2_page_sz)
429 vec_validate_init_empty (pm->default_arena_for_numa_node,
431 if (pm->default_arena_for_numa_node[numa_node] == ~0)
433 pool_get (pm->arenas, a);
434 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
435 a->name = format (0, "default-numa-%u%c", numa_node, 0);
436 a->numa_node = numa_node;
437 a->log2_subpage_sz = pm->def_log2_page_sz;
438 a->subpages_per_page = 1;
441 a = pool_elt_at_index (pm->arenas,
442 pm->default_arena_for_numa_node[numa_node]);
444 else if (size > 1ULL << a->log2_subpage_sz)
447 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
448 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
450 vec_foreach (page_index, a->page_indices)
452 pp = vec_elt_at_index (pm->pages, *page_index);
453 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
460 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
461 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
462 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
468 clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
469 uword align, u32 numa_node)
471 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
475 clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
477 return clib_pmalloc_alloc_inline (pm, 0, size, align,
478 CLIB_PMALLOC_NUMA_LOCAL);
482 clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t * pm, void *arena_va,
483 uword size, uword align)
485 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
486 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
490 pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
493 clib_pmalloc_chunk_t *c1, *c2;
495 if (ci1 == ~0 || ci2 == ~0)
498 c1 = get_chunk (pp, ci1);
499 c2 = get_chunk (pp, ci2);
501 if (c1->used || c2->used)
504 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
505 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
512 clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
514 clib_pmalloc_page_t *pp;
515 clib_pmalloc_chunk_t *c;
516 clib_pmalloc_arena_t *a;
518 u32 chunk_index, page_index;
520 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
526 page_index = clib_pmalloc_get_page_index (pm, va);
527 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
529 pp = vec_elt_at_index (pm->pages, page_index);
530 c = pool_elt_at_index (pp->chunks, chunk_index);
531 a = pool_elt_at_index (pm->arenas, pp->arena_index);
533 pp->n_free_blocks += c->size;
536 /* merge with next if free */
537 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
539 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
540 c->size += next->size;
541 c->next = next->next;
542 if (next->next != ~0)
543 get_chunk (pp, next->next)->prev = chunk_index;
544 memset (next, 0, sizeof (*next));
545 pool_put (pp->chunks, next);
549 /* merge with prev if free */
550 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
552 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
553 prev->size += c->size;
554 prev->next = c->next;
556 get_chunk (pp, c->next)->prev = c->prev;
557 memset (c, 0, sizeof (*c));
558 pool_put (pp->chunks, c);
564 format_log2_page_size (u8 * s, va_list * va)
566 u32 log2_page_sz = va_arg (*va, u32);
568 if (log2_page_sz >= 30)
569 return format (s, "%uGB", 1 << (log2_page_sz - 30));
571 if (log2_page_sz >= 20)
572 return format (s, "%uMB", 1 << (log2_page_sz - 20));
574 if (log2_page_sz >= 10)
575 return format (s, "%uKB", 1 << (log2_page_sz - 10));
577 return format (s, "%uB", 1 << log2_page_sz);
582 format_pmalloc_page (u8 * s, va_list * va)
584 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
585 int verbose = va_arg (*va, int);
586 u32 indent = format_get_indent (s);
588 s = format (s, "page %u: phys-addr %p ", pp->index, pp->pa);
593 s = format (s, "free %u chunks %u free-chunks %d ",
594 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
595 pool_elts (pp->chunks), pp->n_free_chunks);
599 clib_pmalloc_chunk_t *c;
600 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
601 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
602 format_white_space, indent + 2,
603 "chunk offset", "size", "used", "index", "prev", "next");
606 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
607 format_white_space, indent + 2,
608 c->start << PMALLOC_LOG2_BLOCK_SZ,
609 c->size << PMALLOC_LOG2_BLOCK_SZ,
610 c->used ? "yes" : "no",
611 c - pp->chunks, c->prev, c->next);
614 c = pool_elt_at_index (pp->chunks, c->next);
621 format_pmalloc (u8 * s, va_list * va)
623 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
624 int verbose = va_arg (*va, int);
625 u32 indent = format_get_indent (s);
627 clib_pmalloc_page_t *pp;
628 clib_pmalloc_arena_t *a;
630 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
631 "lookup-page-size %U", vec_len (pm->pages), pm->max_pages,
632 format_log2_page_size, pm->def_log2_page_sz,
633 format_log2_page_size, pm->lookup_log2_page_sz);
637 s = format (s, " va-start %p", pm->base);
640 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
641 format_clib_error, pm->error);
645 pool_foreach (a, pm->arenas,
648 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
649 format_white_space, indent + 2, a->name,
650 vec_len (a->page_indices), format_log2_page_size,
651 a->log2_subpage_sz, a->numa_node);
653 s = format (s, " shared fd %d", a->fd);
655 vec_foreach (page_index, a->page_indices)
657 pp = vec_elt_at_index (pm->pages, *page_index);
658 s = format (s, "\n%U%U", format_white_space, indent + 4,
659 format_pmalloc_page, pp, verbose);
668 * fd.io coding-style-patch-verification: ON
671 * eval: (c-set-style "gnu")