2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/dlmalloc.h>
50 #include <vppinfra/os.h>
51 #include <vppinfra/string.h> /* memcpy, clib_memset */
52 #include <vppinfra/sanitizer.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 16
56 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
60 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
61 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
62 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
63 CLIB_MEM_PAGE_SZ_4K = 12,
64 CLIB_MEM_PAGE_SZ_16K = 14,
65 CLIB_MEM_PAGE_SZ_64K = 16,
66 CLIB_MEM_PAGE_SZ_1M = 20,
67 CLIB_MEM_PAGE_SZ_2M = 21,
68 CLIB_MEM_PAGE_SZ_16M = 24,
69 CLIB_MEM_PAGE_SZ_32M = 25,
70 CLIB_MEM_PAGE_SZ_512M = 29,
71 CLIB_MEM_PAGE_SZ_1G = 30,
72 CLIB_MEM_PAGE_SZ_16G = 34,
75 typedef struct _clib_mem_vm_map_hdr
83 /* page size (log2) */
84 clib_mem_page_sz_t log2_page_sz;
87 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
88 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
91 struct _clib_mem_vm_map_hdr *prev, *next;
92 } clib_mem_vm_map_hdr_t;
96 /* log2 system page size */
97 clib_mem_page_sz_t log2_page_sz;
99 /* log2 system default hugepage size */
100 clib_mem_page_sz_t log2_default_hugepage_sz;
102 /* bitmap of available numa nodes */
103 u32 numa_node_bitmap;
106 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
109 void *per_numa_mheaps[CLIB_MAX_NUMAS];
112 clib_mem_vm_map_hdr_t *first_map, *last_map;
115 extern clib_mem_main_t clib_mem_main;
117 /* Unspecified NUMA socket */
118 #define VEC_NUMA_UNSPECIFIED (0xFF)
121 clib_mem_get_per_cpu_heap (void)
123 int cpu = os_get_thread_index ();
124 return clib_mem_main.per_cpu_mheaps[cpu];
128 clib_mem_set_per_cpu_heap (u8 * new_heap)
130 int cpu = os_get_thread_index ();
131 void *old = clib_mem_main.per_cpu_mheaps[cpu];
132 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
137 clib_mem_get_per_numa_heap (u32 numa_id)
139 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
140 return clib_mem_main.per_numa_mheaps[numa_id];
144 clib_mem_set_per_numa_heap (u8 * new_heap)
146 int numa = os_get_numa_index ();
147 void *old = clib_mem_main.per_numa_mheaps[numa];
148 clib_mem_main.per_numa_mheaps[numa] = new_heap;
153 clib_mem_set_thread_index (void)
156 * Find an unused slot in the per-cpu-mheaps array,
157 * and grab it for this thread. We need to be able to
158 * push/pop the thread heap without affecting other thread(s).
161 if (__os_thread_index != 0)
163 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
164 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
165 0, clib_mem_main.per_cpu_mheaps[0]))
167 os_set_thread_index (i);
170 ASSERT (__os_thread_index > 0);
174 clib_mem_size_nocheck (void *p)
176 return mspace_usable_size_with_delta (p);
179 /* Memory allocator which may call os_out_of_memory() if it fails */
181 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
182 int os_out_of_memory_on_failure)
187 if (align_offset > align)
190 align_offset %= align;
192 align_offset = align;
195 cpu = os_get_thread_index ();
196 heap = clib_mem_main.per_cpu_mheaps[cpu];
198 p = mspace_get_aligned (heap, size, align, align_offset);
200 if (PREDICT_FALSE (0 == p))
202 if (os_out_of_memory_on_failure)
207 CLIB_MEM_UNPOISON (p, size);
211 /* Memory allocator which calls os_out_of_memory() when it fails */
213 clib_mem_alloc (uword size)
215 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
216 /* align_offset */ 0,
217 /* os_out_of_memory */ 1);
221 clib_mem_alloc_aligned (uword size, uword align)
223 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
224 /* os_out_of_memory */ 1);
227 /* Memory allocator which calls os_out_of_memory() when it fails */
229 clib_mem_alloc_or_null (uword size)
231 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
232 /* align_offset */ 0,
233 /* os_out_of_memory */ 0);
237 clib_mem_alloc_aligned_or_null (uword size, uword align)
239 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
240 /* os_out_of_memory */ 0);
245 /* Memory allocator which panics when it fails.
246 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
247 #define clib_mem_alloc_aligned_no_fail(size,align) \
249 uword _clib_mem_alloc_size = (size); \
250 void * _clib_mem_alloc_p; \
251 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
252 if (! _clib_mem_alloc_p) \
253 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
257 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
259 /* Alias to stack allocator for naming consistency. */
260 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
263 clib_mem_is_heap_object (void *p)
265 void *heap = clib_mem_get_per_cpu_heap ();
267 return mspace_is_heap_object (heap, p);
271 clib_mem_free (void *p)
273 u8 *heap = clib_mem_get_per_cpu_heap ();
275 /* Make sure object is in the correct heap. */
276 ASSERT (clib_mem_is_heap_object (p));
278 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
280 mspace_put (heap, p);
284 clib_mem_realloc (void *p, uword new_size, uword old_size)
286 /* By default use alloc, copy and free to emulate realloc. */
287 void *q = clib_mem_alloc (new_size);
291 if (old_size < new_size)
292 copy_size = old_size;
294 copy_size = new_size;
295 clib_memcpy_fast (q, p, copy_size);
302 clib_mem_size (void *p)
304 ASSERT (clib_mem_is_heap_object (p));
305 return clib_mem_size_nocheck (p);
309 clib_mem_free_s (void *p)
311 uword size = clib_mem_size (p);
312 CLIB_MEM_UNPOISON (p, size);
313 memset_s_inline (p, size, 0, size);
318 clib_mem_get_heap (void)
320 return clib_mem_get_per_cpu_heap ();
324 clib_mem_set_heap (void *heap)
326 return clib_mem_set_per_cpu_heap (heap);
329 void clib_mem_main_init ();
330 void *clib_mem_init (void *heap, uword size);
331 void *clib_mem_init_with_page_size (uword memory_size,
332 clib_mem_page_sz_t log2_page_sz);
333 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
334 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
337 void clib_mem_exit (void);
339 void clib_mem_validate (void);
341 void clib_mem_trace (int enable);
343 int clib_mem_is_traced (void);
347 /* Total number of objects allocated. */
350 /* Total allocated bytes. Bytes used and free.
351 used + free = total */
352 uword bytes_total, bytes_used, bytes_free;
354 /* Number of bytes used by mheap data structure overhead
355 (e.g. free lists, mheap header). */
356 uword bytes_overhead;
358 /* Amount of free space returned to operating system. */
359 uword bytes_free_reclaimed;
361 /* For malloc which puts small objects in sbrk region and
362 large objects in mmap'ed regions. */
363 uword bytes_used_sbrk;
364 uword bytes_used_mmap;
366 /* Max. number of bytes in this heap. */
370 void clib_mem_usage (clib_mem_usage_t * usage);
372 u8 *format_clib_mem_usage (u8 * s, va_list * args);
374 /* Allocate virtual address space. */
376 clib_mem_vm_alloc (uword size)
379 uword flags = MAP_PRIVATE;
382 flags |= MAP_ANONYMOUS;
385 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
386 if (mmap_addr == (void *) -1)
389 CLIB_MEM_UNPOISON (mmap_addr, size);
395 clib_mem_vm_free (void *addr, uword size)
400 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
401 uword size, int fd, uword offset, char *name);
403 void *clib_mem_vm_map (void *start, uword size,
404 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
405 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
407 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
409 int clib_mem_vm_unmap (void *base);
410 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
415 #define CLIB_MEM_VM_F_SHARED (1 << 0)
416 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
417 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
418 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
419 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
420 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
421 u32 flags; /**< vm allocation flags:
422 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
423 descriptor will be provided on successful allocation.
424 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
425 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
426 numa node preference.
427 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
428 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
429 number of available pages is not sufficient.
430 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
432 char *name; /**< Name for memory allocation, set by caller. */
433 uword size; /**< Allocation size, set by caller. */
434 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
435 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
436 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
437 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
438 int n_pages; /* Number of pages. */
439 uword requested_va; /**< Request fixed position mapping */
440 } clib_mem_vm_alloc_t;
443 static_always_inline clib_mem_page_sz_t
444 clib_mem_get_log2_page_size (void)
446 return clib_mem_main.log2_page_sz;
449 static_always_inline uword
450 clib_mem_get_page_size (void)
452 return 1ULL << clib_mem_main.log2_page_sz;
455 static_always_inline clib_mem_page_sz_t
456 clib_mem_get_log2_default_hugepage_size ()
458 return clib_mem_main.log2_default_hugepage_sz;
461 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
462 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
463 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
464 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
465 uword clib_mem_get_fd_page_size (int fd);
466 uword clib_mem_get_default_hugepage_size (void);
467 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
468 uword clib_mem_vm_reserve (uword start, uword size,
469 clib_mem_page_sz_t log2_page_sz);
470 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
472 void clib_mem_destroy_mspace (void *mspace);
473 void clib_mem_destroy (void);
477 uword size; /**< Map size */
478 int fd; /**< File descriptor to be mapped */
479 uword requested_va; /**< Request fixed position mapping */
480 void *addr; /**< Pointer to mapped memory, if successful */
484 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
485 void clib_mem_vm_randomize_va (uword * requested_va,
486 clib_mem_page_sz_t log2_page_size);
487 void mheap_trace (void *v, int enable);
488 uword clib_mem_trace_enable_disable (uword enable);
489 void clib_mem_trace (int enable);
492 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
494 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
496 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
497 log2_page_size = clib_mem_get_log2_page_size ();
498 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
499 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
501 return round_pow2 (size, 1ULL << log2_page_size);
508 uword per_numa[CLIB_MAX_NUMAS];
510 } clib_mem_page_stats_t;
512 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
513 uword n_pages, clib_mem_page_stats_t * stats);
515 static_always_inline int
516 vlib_mem_get_next_numa_node (int numa)
518 clib_mem_main_t *mm = &clib_mem_main;
519 u32 bitmap = mm->numa_node_bitmap;
522 bitmap &= ~pow2_mask (numa + 1);
526 return count_trailing_zeros (bitmap);
529 static_always_inline clib_mem_page_sz_t
530 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
532 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
533 return clib_mem_get_log2_page_size ();
534 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
535 return clib_mem_get_log2_default_hugepage_size ();
536 return log2_page_size;
539 static_always_inline uword
540 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
542 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
546 #include <vppinfra/error.h> /* clib_panic */
548 #endif /* _included_clib_mem_h */
551 * fd.io coding-style-patch-verification: ON
554 * eval: (c-set-style "gnu")