2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/dlmalloc.h>
50 #include <vppinfra/os.h>
51 #include <vppinfra/string.h> /* memcpy, clib_memset */
52 #include <vppinfra/sanitizer.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 16
56 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
60 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
61 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
62 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
63 CLIB_MEM_PAGE_SZ_4K = 12,
64 CLIB_MEM_PAGE_SZ_16K = 14,
65 CLIB_MEM_PAGE_SZ_64K = 16,
66 CLIB_MEM_PAGE_SZ_1M = 20,
67 CLIB_MEM_PAGE_SZ_2M = 21,
68 CLIB_MEM_PAGE_SZ_16M = 24,
69 CLIB_MEM_PAGE_SZ_32M = 25,
70 CLIB_MEM_PAGE_SZ_512M = 29,
71 CLIB_MEM_PAGE_SZ_1G = 30,
72 CLIB_MEM_PAGE_SZ_16G = 34,
75 typedef struct _clib_mem_vm_map_hdr
83 /* page size (log2) */
84 clib_mem_page_sz_t log2_page_sz;
86 /* file descriptor, -1 if memory is not shared */
90 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
91 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
94 struct _clib_mem_vm_map_hdr *prev, *next;
95 } clib_mem_vm_map_hdr_t;
99 /* log2 system page size */
100 clib_mem_page_sz_t log2_page_sz;
102 /* log2 system default hugepage size */
103 clib_mem_page_sz_t log2_default_hugepage_sz;
105 /* bitmap of available numa nodes */
106 u32 numa_node_bitmap;
109 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
112 void *per_numa_mheaps[CLIB_MAX_NUMAS];
115 clib_mem_vm_map_hdr_t *first_map, *last_map;
121 extern clib_mem_main_t clib_mem_main;
123 /* Unspecified NUMA socket */
124 #define VEC_NUMA_UNSPECIFIED (0xFF)
127 clib_mem_get_per_cpu_heap (void)
129 int cpu = os_get_thread_index ();
130 return clib_mem_main.per_cpu_mheaps[cpu];
134 clib_mem_set_per_cpu_heap (u8 * new_heap)
136 int cpu = os_get_thread_index ();
137 void *old = clib_mem_main.per_cpu_mheaps[cpu];
138 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
143 clib_mem_get_per_numa_heap (u32 numa_id)
145 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
146 return clib_mem_main.per_numa_mheaps[numa_id];
150 clib_mem_set_per_numa_heap (u8 * new_heap)
152 int numa = os_get_numa_index ();
153 void *old = clib_mem_main.per_numa_mheaps[numa];
154 clib_mem_main.per_numa_mheaps[numa] = new_heap;
159 clib_mem_set_thread_index (void)
162 * Find an unused slot in the per-cpu-mheaps array,
163 * and grab it for this thread. We need to be able to
164 * push/pop the thread heap without affecting other thread(s).
167 if (__os_thread_index != 0)
169 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
170 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
171 0, clib_mem_main.per_cpu_mheaps[0]))
173 os_set_thread_index (i);
176 ASSERT (__os_thread_index > 0);
180 clib_mem_size_nocheck (void *p)
182 return mspace_usable_size_with_delta (p);
185 /* Memory allocator which may call os_out_of_memory() if it fails */
187 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
188 int os_out_of_memory_on_failure)
193 if (align_offset > align)
196 align_offset %= align;
198 align_offset = align;
201 cpu = os_get_thread_index ();
202 heap = clib_mem_main.per_cpu_mheaps[cpu];
204 p = mspace_get_aligned (heap, size, align, align_offset);
206 if (PREDICT_FALSE (0 == p))
208 if (os_out_of_memory_on_failure)
213 CLIB_MEM_UNPOISON (p, size);
217 /* Memory allocator which calls os_out_of_memory() when it fails */
219 clib_mem_alloc (uword size)
221 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
222 /* align_offset */ 0,
223 /* os_out_of_memory */ 1);
227 clib_mem_alloc_aligned (uword size, uword align)
229 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
230 /* os_out_of_memory */ 1);
233 /* Memory allocator which calls os_out_of_memory() when it fails */
235 clib_mem_alloc_or_null (uword size)
237 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
238 /* align_offset */ 0,
239 /* os_out_of_memory */ 0);
243 clib_mem_alloc_aligned_or_null (uword size, uword align)
245 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
246 /* os_out_of_memory */ 0);
251 /* Memory allocator which panics when it fails.
252 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
253 #define clib_mem_alloc_aligned_no_fail(size,align) \
255 uword _clib_mem_alloc_size = (size); \
256 void * _clib_mem_alloc_p; \
257 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
258 if (! _clib_mem_alloc_p) \
259 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
263 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
265 /* Alias to stack allocator for naming consistency. */
266 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
269 clib_mem_is_heap_object (void *p)
271 void *heap = clib_mem_get_per_cpu_heap ();
273 return mspace_is_heap_object (heap, p);
277 clib_mem_free (void *p)
279 u8 *heap = clib_mem_get_per_cpu_heap ();
281 /* Make sure object is in the correct heap. */
282 ASSERT (clib_mem_is_heap_object (p));
284 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
286 mspace_put (heap, p);
290 clib_mem_realloc (void *p, uword new_size, uword old_size)
292 /* By default use alloc, copy and free to emulate realloc. */
293 void *q = clib_mem_alloc (new_size);
297 if (old_size < new_size)
298 copy_size = old_size;
300 copy_size = new_size;
301 clib_memcpy_fast (q, p, copy_size);
308 clib_mem_size (void *p)
310 ASSERT (clib_mem_is_heap_object (p));
311 return clib_mem_size_nocheck (p);
315 clib_mem_free_s (void *p)
317 uword size = clib_mem_size (p);
318 CLIB_MEM_UNPOISON (p, size);
319 memset_s_inline (p, size, 0, size);
324 clib_mem_get_heap (void)
326 return clib_mem_get_per_cpu_heap ();
330 clib_mem_set_heap (void *heap)
332 return clib_mem_set_per_cpu_heap (heap);
335 void clib_mem_main_init ();
336 void *clib_mem_init (void *heap, uword size);
337 void *clib_mem_init_with_page_size (uword memory_size,
338 clib_mem_page_sz_t log2_page_sz);
339 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
340 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
343 void clib_mem_exit (void);
345 void clib_mem_validate (void);
347 void clib_mem_trace (int enable);
349 int clib_mem_is_traced (void);
353 /* Total number of objects allocated. */
356 /* Total allocated bytes. Bytes used and free.
357 used + free = total */
358 uword bytes_total, bytes_used, bytes_free;
360 /* Number of bytes used by mheap data structure overhead
361 (e.g. free lists, mheap header). */
362 uword bytes_overhead;
364 /* Amount of free space returned to operating system. */
365 uword bytes_free_reclaimed;
367 /* For malloc which puts small objects in sbrk region and
368 large objects in mmap'ed regions. */
369 uword bytes_used_sbrk;
370 uword bytes_used_mmap;
372 /* Max. number of bytes in this heap. */
376 void clib_mem_usage (clib_mem_usage_t * usage);
378 u8 *format_clib_mem_usage (u8 * s, va_list * args);
380 /* Allocate virtual address space. */
382 clib_mem_vm_alloc (uword size)
385 uword flags = MAP_PRIVATE;
388 flags |= MAP_ANONYMOUS;
391 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
392 if (mmap_addr == (void *) -1)
395 CLIB_MEM_UNPOISON (mmap_addr, size);
401 clib_mem_vm_free (void *addr, uword size)
406 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
407 uword size, int fd, uword offset, char *name);
409 void *clib_mem_vm_map (void *start, uword size,
410 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
411 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
413 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
415 int clib_mem_vm_unmap (void *base);
416 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
421 #define CLIB_MEM_VM_F_SHARED (1 << 0)
422 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
423 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
424 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
425 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
426 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
427 u32 flags; /**< vm allocation flags:
428 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
429 descriptor will be provided on successful allocation.
430 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
431 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
432 numa node preference.
433 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
434 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
435 number of available pages is not sufficient.
436 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
438 char *name; /**< Name for memory allocation, set by caller. */
439 uword size; /**< Allocation size, set by caller. */
440 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
441 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
442 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
443 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
444 int n_pages; /* Number of pages. */
445 uword requested_va; /**< Request fixed position mapping */
446 } clib_mem_vm_alloc_t;
449 static_always_inline clib_mem_page_sz_t
450 clib_mem_get_log2_page_size (void)
452 return clib_mem_main.log2_page_sz;
455 static_always_inline uword
456 clib_mem_get_page_size (void)
458 return 1ULL << clib_mem_main.log2_page_sz;
461 static_always_inline clib_mem_page_sz_t
462 clib_mem_get_log2_default_hugepage_size ()
464 return clib_mem_main.log2_default_hugepage_sz;
467 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
468 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
469 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
470 uword clib_mem_get_fd_page_size (int fd);
471 uword clib_mem_get_default_hugepage_size (void);
472 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
473 uword clib_mem_vm_reserve (uword start, uword size,
474 clib_mem_page_sz_t log2_page_sz);
475 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
477 void clib_mem_destroy_mspace (void *mspace);
478 void clib_mem_destroy (void);
482 uword size; /**< Map size */
483 int fd; /**< File descriptor to be mapped */
484 uword requested_va; /**< Request fixed position mapping */
485 void *addr; /**< Pointer to mapped memory, if successful */
489 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
490 void clib_mem_vm_randomize_va (uword * requested_va,
491 clib_mem_page_sz_t log2_page_size);
492 void mheap_trace (void *v, int enable);
493 uword clib_mem_trace_enable_disable (uword enable);
494 void clib_mem_trace (int enable);
497 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
499 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
501 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
502 log2_page_size = clib_mem_get_log2_page_size ();
503 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
504 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
506 return round_pow2 (size, 1ULL << log2_page_size);
513 uword per_numa[CLIB_MAX_NUMAS];
515 } clib_mem_page_stats_t;
517 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
518 uword n_pages, clib_mem_page_stats_t * stats);
520 static_always_inline int
521 vlib_mem_get_next_numa_node (int numa)
523 clib_mem_main_t *mm = &clib_mem_main;
524 u32 bitmap = mm->numa_node_bitmap;
527 bitmap &= ~pow2_mask (numa + 1);
531 return count_trailing_zeros (bitmap);
534 static_always_inline clib_mem_page_sz_t
535 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
537 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
538 return clib_mem_get_log2_page_size ();
539 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
540 return clib_mem_get_log2_default_hugepage_size ();
541 return log2_page_size;
544 static_always_inline uword
545 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
547 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
550 static_always_inline clib_error_t *
551 clib_mem_get_last_error (void)
553 return clib_mem_main.error;
557 #include <vppinfra/error.h> /* clib_panic */
559 #endif /* _included_clib_mem_h */
562 * fd.io coding-style-patch-verification: ON
565 * eval: (c-set-style "gnu")