2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <sys/types.h>
24 #include <vppinfra/format.h>
25 #include <vppinfra/linux/sysfs.h>
26 #include <vppinfra/mem.h>
27 #include <vppinfra/hash.h>
28 #include <vppinfra/pmalloc.h>
29 #include <vppinfra/cpu.h>
31 #if __SIZEOF_POINTER__ >= 8
32 #define DEFAULT_RESERVED_MB 16384
34 #define DEFAULT_RESERVED_MB 256
37 static inline clib_pmalloc_chunk_t *
38 get_chunk (clib_pmalloc_page_t * pp, u32 index)
40 return pool_elt_at_index (pp->chunks, index);
44 pmalloc_size2pages (uword size, u32 log2_page_sz)
46 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
50 clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
55 ASSERT (pm->error == 0);
57 pagesize = clib_mem_get_default_hugepage_size ();
58 pm->def_log2_page_sz = min_log2 (pagesize);
59 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
61 /* check if pagemap is accessible */
62 pt = clib_mem_vm_get_paddr (&pt, CLIB_MEM_PAGE_SZ_DEFAULT, 1);
63 if (pt == 0 || pt[0] == 0)
64 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
66 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
67 size = round_pow2 (size, pagesize);
69 pm->max_pages = size >> pm->def_log2_page_sz;
71 base = clib_mem_vm_reserve (base_addr, size, pm->def_log2_page_sz);
75 pm->error = clib_error_return (0, "failed to reserve %u pages",
80 pm->base = uword_to_pointer (base, void *);
85 alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
86 u32 n_blocks, u32 block_align, u32 numa_node)
88 clib_pmalloc_chunk_t *c = 0;
89 clib_pmalloc_arena_t *a;
92 u32 alloc_chunk_index;
94 a = pool_elt_at_index (pm->arenas, pp->arena_index);
98 u32 i, start = 0, prev = ~0;
100 for (i = 0; i < a->subpages_per_page; i++)
102 pool_get (pp->chunks, c);
105 c->size = pp->n_free_blocks / a->subpages_per_page;
108 pp->first_chunk_index = c - pp->chunks;
110 pp->chunks[prev].next = c - pp->chunks;
111 prev = c - pp->chunks;
114 pp->n_free_chunks = a->subpages_per_page;
117 if (pp->n_free_blocks < n_blocks)
120 alloc_chunk_index = pp->first_chunk_index;
123 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
124 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
126 if (c->used || n_blocks + off > c->size)
130 alloc_chunk_index = c->next;
134 /* if alignment is needed create new empty chunk */
137 u32 offset_chunk_index;
138 clib_pmalloc_chunk_t *co;
139 pool_get (pp->chunks, c);
141 offset_chunk_index = alloc_chunk_index;
142 alloc_chunk_index = c - pp->chunks;
144 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
145 c->size = co->size - off;
147 c->start = co->start + off;
148 c->prev = offset_chunk_index;
150 co->next = alloc_chunk_index;
154 if (c->size > n_blocks)
156 u32 tail_chunk_index;
157 clib_pmalloc_chunk_t *ct;
158 pool_get (pp->chunks, ct);
160 tail_chunk_index = ct - pp->chunks;
161 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
162 ct->size = c->size - n_blocks;
164 ct->prev = alloc_chunk_index;
165 ct->start = c->start + n_blocks;
168 c->next = tail_chunk_index;
170 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
172 else if (c->next != ~0)
173 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
175 c = get_chunk (pp, alloc_chunk_index);
176 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
177 (c->start << PMALLOC_LOG2_BLOCK_SZ);
178 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
179 pp->n_free_blocks -= n_blocks;
185 pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
187 uword seek, va, pa, p;
189 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
191 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
192 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
194 p = (uword) first *elts_per_page;
195 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
197 while (p < (uword) elts_per_page * count)
199 pm->lookup_table[p] = pointer_to_uword (pm->base) +
200 (p << pm->lookup_log2_page_sz);
206 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
207 while (p < (uword) elts_per_page * count)
209 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
211 seek = (va >> clib_mem_get_log2_page_size ()) * sizeof (pa);
212 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
213 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
214 pa & (1ULL << 63) /* page present bit */ )
216 pa = (pa & pow2_mask (55)) << clib_mem_get_log2_page_size ();
218 pm->lookup_table[p] = va - pa;
226 static inline clib_pmalloc_page_t *
227 pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
228 u32 numa_node, u32 n_pages)
230 clib_mem_page_stats_t stats = {};
231 clib_pmalloc_page_t *pp = 0;
232 int rv, i, mmap_flags;
233 void *va = MAP_FAILED;
234 uword size = (uword) n_pages << pm->def_log2_page_sz;
236 clib_error_free (pm->error);
238 if (pm->max_pages <= vec_len (pm->pages))
240 pm->error = clib_error_return (0, "maximum number of pages reached");
245 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
247 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
248 a->log2_subpage_sz, n_pages);
253 #endif /* __linux__ */
255 rv = clib_mem_set_numa_affinity (numa_node, /* force */ 1);
256 if (rv == CLIB_MEM_ERROR && numa_node != 0)
258 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
259 "numa node %u", numa_node);
263 mmap_flags = MAP_FIXED;
265 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
267 mmap_flags |= MAP_SHARED;
268 a->fd = clib_mem_vm_create_fd (a->log2_subpage_sz, "%s", a->name);
271 if ((ftruncate (a->fd, size)) == -1)
276 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
277 mmap_flags |= MAP_HUGETLB;
279 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
283 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
284 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
287 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
288 "fd %d numa %d flags 0x%x", n_pages,
289 va, a->fd, numa_node, mmap_flags);
294 if (a->log2_subpage_sz != clib_mem_get_log2_page_size () &&
295 mlock (va, size) != 0)
297 pm->error = clib_error_return_unix (0, "Unable to lock pages");
301 clib_memset (va, 0, size);
303 rv = clib_mem_set_default_numa_affinity ();
304 if (rv == CLIB_MEM_ERROR && numa_node != 0)
306 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
310 /* we tolerate move_pages failure only if request os for numa node 0
311 to support non-numa kernels */
312 clib_mem_get_page_stats (va, CLIB_MEM_PAGE_SZ_DEFAULT, 1, &stats);
314 if (stats.per_numa[numa_node] != 1 &&
315 !(numa_node == 0 && stats.unknown == 1))
317 u16 allocated_at = ~0;
319 clib_error_return (0,
320 "unable to get information about numa allocation");
322 for (u16 i = 0; i < CLIB_MAX_NUMAS; i++)
323 if (stats.per_numa[i] == 1)
326 clib_error_return (0,
327 "page allocated on the wrong numa node (%u), "
329 allocated_at, numa_node);
334 for (i = 0; i < n_pages; i++)
336 vec_add2 (pm->pages, pp, 1);
337 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
338 pp->index = pp - pm->pages;
339 pp->arena_index = a->index;
340 vec_add1 (a->page_indices, pp->index);
345 /* if new arena is using smaller page size, we need to rebuild whole
347 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
349 pm->lookup_log2_page_sz = a->log2_subpage_sz;
350 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
354 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
356 /* return pointer to 1st page */
357 return pp - (n_pages - 1);
360 if (va != MAP_FAILED)
362 /* unmap & reserve */
364 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
373 clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
374 uword size, u32 log2_page_sz, u32 numa_node)
376 clib_pmalloc_arena_t *a;
377 clib_pmalloc_page_t *pp;
380 clib_error_free (pm->error);
382 if (log2_page_sz == 0)
383 log2_page_sz = pm->def_log2_page_sz;
384 else if (log2_page_sz != pm->def_log2_page_sz &&
385 log2_page_sz != clib_mem_get_log2_page_size ())
387 pm->error = clib_error_create ("unsupported page size (%uKB)",
388 1 << (log2_page_sz - 10));
392 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
394 if (n_pages + vec_len (pm->pages) > pm->max_pages)
397 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
398 numa_node = clib_get_current_numa_node ();
400 pool_get (pm->arenas, a);
401 a->index = a - pm->arenas;
402 a->name = format (0, "%s%c", name, 0);
403 a->numa_node = numa_node;
404 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
405 a->log2_subpage_sz = log2_page_sz;
406 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
408 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
411 memset (a, 0, sizeof (*a));
412 pool_put (pm->arenas, a);
416 return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
420 clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
421 uword size, uword align, u32 numa_node)
423 clib_pmalloc_page_t *pp;
424 u32 n_blocks, block_align, *page_index;
426 ASSERT (is_pow2 (align));
428 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
429 numa_node = clib_get_current_numa_node ();
433 if (size > 1ULL << pm->def_log2_page_sz)
436 vec_validate_init_empty (pm->default_arena_for_numa_node,
438 if (pm->default_arena_for_numa_node[numa_node] == ~0)
440 pool_get (pm->arenas, a);
441 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
442 a->name = format (0, "default-numa-%u%c", numa_node, 0);
443 a->numa_node = numa_node;
444 a->log2_subpage_sz = pm->def_log2_page_sz;
445 a->subpages_per_page = 1;
448 a = pool_elt_at_index (pm->arenas,
449 pm->default_arena_for_numa_node[numa_node]);
451 else if (size > 1ULL << a->log2_subpage_sz)
454 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
455 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
457 vec_foreach (page_index, a->page_indices)
459 pp = vec_elt_at_index (pm->pages, *page_index);
460 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
467 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
468 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
469 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
475 clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
476 uword align, u32 numa_node)
478 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
482 clib_pmalloc_alloc_aligned (clib_pmalloc_main_t *pm, uword size, uword align)
484 return clib_pmalloc_alloc_inline (pm, 0, size, align,
485 CLIB_PMALLOC_NUMA_LOCAL);
489 clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t *pm, void *arena_va,
490 uword size, uword align)
492 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
493 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
497 pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
500 clib_pmalloc_chunk_t *c1, *c2;
502 if (ci1 == ~0 || ci2 == ~0)
505 c1 = get_chunk (pp, ci1);
506 c2 = get_chunk (pp, ci2);
508 if (c1->used || c2->used)
511 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
512 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
519 clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
521 clib_pmalloc_page_t *pp;
522 clib_pmalloc_chunk_t *c;
523 clib_pmalloc_arena_t *a;
525 u32 chunk_index, page_index;
527 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
533 page_index = clib_pmalloc_get_page_index (pm, va);
534 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
536 pp = vec_elt_at_index (pm->pages, page_index);
537 c = pool_elt_at_index (pp->chunks, chunk_index);
538 a = pool_elt_at_index (pm->arenas, pp->arena_index);
540 pp->n_free_blocks += c->size;
543 /* merge with next if free */
544 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
546 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
547 c->size += next->size;
548 c->next = next->next;
549 if (next->next != ~0)
550 get_chunk (pp, next->next)->prev = chunk_index;
551 memset (next, 0, sizeof (*next));
552 pool_put (pp->chunks, next);
556 /* merge with prev if free */
557 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
559 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
560 prev->size += c->size;
561 prev->next = c->next;
563 get_chunk (pp, c->next)->prev = c->prev;
564 memset (c, 0, sizeof (*c));
565 pool_put (pp->chunks, c);
571 format_pmalloc_page (u8 * s, va_list * va)
573 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
574 int verbose = va_arg (*va, int);
575 u32 indent = format_get_indent (s);
580 s = format (s, "free %u chunks %u free-chunks %d ",
581 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
582 pool_elts (pp->chunks), pp->n_free_chunks);
586 clib_pmalloc_chunk_t *c;
587 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
588 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
589 format_white_space, indent + 2,
590 "chunk offset", "size", "used", "index", "prev", "next");
593 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
594 format_white_space, indent + 2,
595 c->start << PMALLOC_LOG2_BLOCK_SZ,
596 c->size << PMALLOC_LOG2_BLOCK_SZ,
597 c->used ? "yes" : "no",
598 c - pp->chunks, c->prev, c->next);
601 c = pool_elt_at_index (pp->chunks, c->next);
608 format_pmalloc (u8 * s, va_list * va)
610 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
611 int verbose = va_arg (*va, int);
612 u32 indent = format_get_indent (s);
614 clib_pmalloc_page_t *pp;
615 clib_pmalloc_arena_t *a;
617 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
618 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
619 format_log2_page_size, pm->def_log2_page_sz,
620 format_log2_page_size, pm->lookup_log2_page_sz,
621 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
625 s = format (s, " va-start %p", pm->base);
628 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
629 format_clib_error, pm->error);
633 pool_foreach (a, pm->arenas)
636 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
637 format_white_space, indent + 2, a->name,
638 vec_len (a->page_indices), format_log2_page_size,
639 a->log2_subpage_sz, a->numa_node);
641 s = format (s, " shared fd %d", a->fd);
643 vec_foreach (page_index, a->page_indices)
645 pp = vec_elt_at_index (pm->pages, *page_index);
646 s = format (s, "\n%U%U", format_white_space, indent + 4,
647 format_pmalloc_page, pp, verbose);
656 format_pmalloc_map (u8 * s, va_list * va)
658 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
661 s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
662 vec_foreach_index (index, pm->lookup_table)
664 uword *lookup_val, pa, va;
665 lookup_val = vec_elt_at_index (pm->lookup_table, index);
667 pointer_to_uword (pm->base) +
668 ((uword) index << pm->lookup_log2_page_sz);
669 pa = va - *lookup_val;
671 format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
672 uword_to_pointer (pa, u64), format_log2_page_size,
673 pm->lookup_log2_page_sz);
679 * fd.io coding-style-patch-verification: ON
682 * eval: (c-set-style "gnu")