2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/dlmalloc.h>
50 #include <vppinfra/os.h>
51 #include <vppinfra/string.h> /* memcpy, clib_memset */
52 #include <vppinfra/sanitizer.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 16
56 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
60 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
61 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
62 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
63 CLIB_MEM_PAGE_SZ_4K = 12,
64 CLIB_MEM_PAGE_SZ_16K = 14,
65 CLIB_MEM_PAGE_SZ_64K = 16,
66 CLIB_MEM_PAGE_SZ_1M = 20,
67 CLIB_MEM_PAGE_SZ_2M = 21,
68 CLIB_MEM_PAGE_SZ_16M = 24,
69 CLIB_MEM_PAGE_SZ_32M = 25,
70 CLIB_MEM_PAGE_SZ_512M = 29,
71 CLIB_MEM_PAGE_SZ_1G = 30,
72 CLIB_MEM_PAGE_SZ_16G = 34,
75 typedef struct _clib_mem_vm_map_hdr
83 /* page size (log2) */
84 clib_mem_page_sz_t log2_page_sz;
86 /* file descriptor, -1 if memory is not shared */
90 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
91 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
94 struct _clib_mem_vm_map_hdr *prev, *next;
95 } clib_mem_vm_map_hdr_t;
99 /* log2 system page size */
100 clib_mem_page_sz_t log2_page_sz;
102 /* log2 system default hugepage size */
103 clib_mem_page_sz_t log2_default_hugepage_sz;
105 /* bitmap of available numa nodes */
106 u32 numa_node_bitmap;
109 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
112 void *per_numa_mheaps[CLIB_MAX_NUMAS];
115 clib_mem_vm_map_hdr_t *first_map, *last_map;
118 extern clib_mem_main_t clib_mem_main;
120 /* Unspecified NUMA socket */
121 #define VEC_NUMA_UNSPECIFIED (0xFF)
124 clib_mem_get_per_cpu_heap (void)
126 int cpu = os_get_thread_index ();
127 return clib_mem_main.per_cpu_mheaps[cpu];
131 clib_mem_set_per_cpu_heap (u8 * new_heap)
133 int cpu = os_get_thread_index ();
134 void *old = clib_mem_main.per_cpu_mheaps[cpu];
135 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
140 clib_mem_get_per_numa_heap (u32 numa_id)
142 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
143 return clib_mem_main.per_numa_mheaps[numa_id];
147 clib_mem_set_per_numa_heap (u8 * new_heap)
149 int numa = os_get_numa_index ();
150 void *old = clib_mem_main.per_numa_mheaps[numa];
151 clib_mem_main.per_numa_mheaps[numa] = new_heap;
156 clib_mem_set_thread_index (void)
159 * Find an unused slot in the per-cpu-mheaps array,
160 * and grab it for this thread. We need to be able to
161 * push/pop the thread heap without affecting other thread(s).
164 if (__os_thread_index != 0)
166 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
167 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
168 0, clib_mem_main.per_cpu_mheaps[0]))
170 os_set_thread_index (i);
173 ASSERT (__os_thread_index > 0);
177 clib_mem_size_nocheck (void *p)
179 return mspace_usable_size_with_delta (p);
182 /* Memory allocator which may call os_out_of_memory() if it fails */
184 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
185 int os_out_of_memory_on_failure)
190 if (align_offset > align)
193 align_offset %= align;
195 align_offset = align;
198 cpu = os_get_thread_index ();
199 heap = clib_mem_main.per_cpu_mheaps[cpu];
201 p = mspace_get_aligned (heap, size, align, align_offset);
203 if (PREDICT_FALSE (0 == p))
205 if (os_out_of_memory_on_failure)
210 CLIB_MEM_UNPOISON (p, size);
214 /* Memory allocator which calls os_out_of_memory() when it fails */
216 clib_mem_alloc (uword size)
218 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
219 /* align_offset */ 0,
220 /* os_out_of_memory */ 1);
224 clib_mem_alloc_aligned (uword size, uword align)
226 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
227 /* os_out_of_memory */ 1);
230 /* Memory allocator which calls os_out_of_memory() when it fails */
232 clib_mem_alloc_or_null (uword size)
234 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
235 /* align_offset */ 0,
236 /* os_out_of_memory */ 0);
240 clib_mem_alloc_aligned_or_null (uword size, uword align)
242 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
243 /* os_out_of_memory */ 0);
248 /* Memory allocator which panics when it fails.
249 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
250 #define clib_mem_alloc_aligned_no_fail(size,align) \
252 uword _clib_mem_alloc_size = (size); \
253 void * _clib_mem_alloc_p; \
254 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
255 if (! _clib_mem_alloc_p) \
256 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
260 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
262 /* Alias to stack allocator for naming consistency. */
263 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
266 clib_mem_is_heap_object (void *p)
268 void *heap = clib_mem_get_per_cpu_heap ();
270 return mspace_is_heap_object (heap, p);
274 clib_mem_free (void *p)
276 u8 *heap = clib_mem_get_per_cpu_heap ();
278 /* Make sure object is in the correct heap. */
279 ASSERT (clib_mem_is_heap_object (p));
281 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
283 mspace_put (heap, p);
287 clib_mem_realloc (void *p, uword new_size, uword old_size)
289 /* By default use alloc, copy and free to emulate realloc. */
290 void *q = clib_mem_alloc (new_size);
294 if (old_size < new_size)
295 copy_size = old_size;
297 copy_size = new_size;
298 clib_memcpy_fast (q, p, copy_size);
305 clib_mem_size (void *p)
307 ASSERT (clib_mem_is_heap_object (p));
308 return clib_mem_size_nocheck (p);
312 clib_mem_free_s (void *p)
314 uword size = clib_mem_size (p);
315 CLIB_MEM_UNPOISON (p, size);
316 memset_s_inline (p, size, 0, size);
321 clib_mem_get_heap (void)
323 return clib_mem_get_per_cpu_heap ();
327 clib_mem_set_heap (void *heap)
329 return clib_mem_set_per_cpu_heap (heap);
332 void clib_mem_main_init ();
333 void *clib_mem_init (void *heap, uword size);
334 void *clib_mem_init_with_page_size (uword memory_size,
335 clib_mem_page_sz_t log2_page_sz);
336 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
337 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
340 void clib_mem_exit (void);
342 void clib_mem_validate (void);
344 void clib_mem_trace (int enable);
346 int clib_mem_is_traced (void);
350 /* Total number of objects allocated. */
353 /* Total allocated bytes. Bytes used and free.
354 used + free = total */
355 uword bytes_total, bytes_used, bytes_free;
357 /* Number of bytes used by mheap data structure overhead
358 (e.g. free lists, mheap header). */
359 uword bytes_overhead;
361 /* Amount of free space returned to operating system. */
362 uword bytes_free_reclaimed;
364 /* For malloc which puts small objects in sbrk region and
365 large objects in mmap'ed regions. */
366 uword bytes_used_sbrk;
367 uword bytes_used_mmap;
369 /* Max. number of bytes in this heap. */
373 void clib_mem_usage (clib_mem_usage_t * usage);
375 u8 *format_clib_mem_usage (u8 * s, va_list * args);
377 /* Allocate virtual address space. */
379 clib_mem_vm_alloc (uword size)
382 uword flags = MAP_PRIVATE;
385 flags |= MAP_ANONYMOUS;
388 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
389 if (mmap_addr == (void *) -1)
392 CLIB_MEM_UNPOISON (mmap_addr, size);
398 clib_mem_vm_free (void *addr, uword size)
403 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
404 uword size, int fd, uword offset, char *name);
406 void *clib_mem_vm_map (void *start, uword size,
407 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
408 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
410 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
412 int clib_mem_vm_unmap (void *base);
413 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
418 #define CLIB_MEM_VM_F_SHARED (1 << 0)
419 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
420 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
421 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
422 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
423 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
424 u32 flags; /**< vm allocation flags:
425 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
426 descriptor will be provided on successful allocation.
427 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
428 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
429 numa node preference.
430 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
431 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
432 number of available pages is not sufficient.
433 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
435 char *name; /**< Name for memory allocation, set by caller. */
436 uword size; /**< Allocation size, set by caller. */
437 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
438 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
439 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
440 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
441 int n_pages; /* Number of pages. */
442 uword requested_va; /**< Request fixed position mapping */
443 } clib_mem_vm_alloc_t;
446 static_always_inline clib_mem_page_sz_t
447 clib_mem_get_log2_page_size (void)
449 return clib_mem_main.log2_page_sz;
452 static_always_inline uword
453 clib_mem_get_page_size (void)
455 return 1ULL << clib_mem_main.log2_page_sz;
458 static_always_inline clib_mem_page_sz_t
459 clib_mem_get_log2_default_hugepage_size ()
461 return clib_mem_main.log2_default_hugepage_sz;
464 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
465 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
466 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
467 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
468 uword clib_mem_get_fd_page_size (int fd);
469 uword clib_mem_get_default_hugepage_size (void);
470 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
471 uword clib_mem_vm_reserve (uword start, uword size,
472 clib_mem_page_sz_t log2_page_sz);
473 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
475 void clib_mem_destroy_mspace (void *mspace);
476 void clib_mem_destroy (void);
480 uword size; /**< Map size */
481 int fd; /**< File descriptor to be mapped */
482 uword requested_va; /**< Request fixed position mapping */
483 void *addr; /**< Pointer to mapped memory, if successful */
487 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
488 void clib_mem_vm_randomize_va (uword * requested_va,
489 clib_mem_page_sz_t log2_page_size);
490 void mheap_trace (void *v, int enable);
491 uword clib_mem_trace_enable_disable (uword enable);
492 void clib_mem_trace (int enable);
495 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
497 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
499 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
500 log2_page_size = clib_mem_get_log2_page_size ();
501 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
502 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
504 return round_pow2 (size, 1ULL << log2_page_size);
511 uword per_numa[CLIB_MAX_NUMAS];
513 } clib_mem_page_stats_t;
515 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
516 uword n_pages, clib_mem_page_stats_t * stats);
518 static_always_inline int
519 vlib_mem_get_next_numa_node (int numa)
521 clib_mem_main_t *mm = &clib_mem_main;
522 u32 bitmap = mm->numa_node_bitmap;
525 bitmap &= ~pow2_mask (numa + 1);
529 return count_trailing_zeros (bitmap);
532 static_always_inline clib_mem_page_sz_t
533 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
535 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
536 return clib_mem_get_log2_page_size ();
537 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
538 return clib_mem_get_log2_default_hugepage_size ();
539 return log2_page_size;
542 static_always_inline uword
543 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
545 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
549 #include <vppinfra/error.h> /* clib_panic */
551 #endif /* _included_clib_mem_h */
554 * fd.io coding-style-patch-verification: ON
557 * eval: (c-set-style "gnu")