2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, clib_memset */
50 #ifdef CLIB_SANITIZE_ADDR
51 #include <sanitizer/asan_interface.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 16
56 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
57 #define CLIB_MEM_ERROR (-1)
58 #define CLIB_MEM_LOG2_MIN_ALIGN (3)
59 #define CLIB_MEM_MIN_ALIGN (1 << CLIB_MEM_LOG2_MIN_ALIGN)
63 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
64 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
65 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
66 CLIB_MEM_PAGE_SZ_4K = 12,
67 CLIB_MEM_PAGE_SZ_16K = 14,
68 CLIB_MEM_PAGE_SZ_64K = 16,
69 CLIB_MEM_PAGE_SZ_1M = 20,
70 CLIB_MEM_PAGE_SZ_2M = 21,
71 CLIB_MEM_PAGE_SZ_16M = 24,
72 CLIB_MEM_PAGE_SZ_32M = 25,
73 CLIB_MEM_PAGE_SZ_512M = 29,
74 CLIB_MEM_PAGE_SZ_1G = 30,
75 CLIB_MEM_PAGE_SZ_16G = 34,
78 typedef struct _clib_mem_vm_map_hdr
86 /* page size (log2) */
87 clib_mem_page_sz_t log2_page_sz;
89 /* file descriptor, -1 if memory is not shared */
93 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
94 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
97 struct _clib_mem_vm_map_hdr *prev, *next;
98 } clib_mem_vm_map_hdr_t;
100 #define foreach_clib_mem_heap_flag \
101 _ (0, LOCKED, "locked") \
102 _ (1, UNMAP_ON_DESTROY, "unmap-on-destroy") \
103 _ (2, TRACED, "traced")
107 #define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
108 foreach_clib_mem_heap_flag
110 } clib_mem_heap_flag_t;
117 /* dlmalloc mspace */
123 /* page size (log2) */
124 clib_mem_page_sz_t log2_page_sz:8;
127 clib_mem_heap_flag_t flags:8;
129 /* name - _MUST_ be last */
135 /* log2 system page size */
136 clib_mem_page_sz_t log2_page_sz;
138 /* log2 default hugepage size */
139 clib_mem_page_sz_t log2_default_hugepage_sz;
141 /* log2 system default hugepage size */
142 clib_mem_page_sz_t log2_sys_default_hugepage_sz;
144 /* bitmap of available numa nodes */
145 u32 numa_node_bitmap;
148 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
151 void *per_numa_mheaps[CLIB_MAX_NUMAS];
154 clib_mem_vm_map_hdr_t *first_map, *last_map;
163 extern clib_mem_main_t clib_mem_main;
165 /* Unspecified NUMA socket */
166 #define VEC_NUMA_UNSPECIFIED (0xFF)
168 static_always_inline void
169 clib_mem_poison (const void volatile *p, uword s)
171 #ifdef CLIB_SANITIZE_ADDR
172 ASAN_POISON_MEMORY_REGION (p, s);
176 static_always_inline void
177 clib_mem_unpoison (const void volatile *p, uword s)
179 #ifdef CLIB_SANITIZE_ADDR
180 ASAN_UNPOISON_MEMORY_REGION (p, s);
184 always_inline clib_mem_heap_t *
185 clib_mem_get_per_cpu_heap (void)
187 int cpu = os_get_thread_index ();
188 return clib_mem_main.per_cpu_mheaps[cpu];
192 clib_mem_set_per_cpu_heap (void *new_heap)
194 int cpu = os_get_thread_index ();
195 void *old = clib_mem_main.per_cpu_mheaps[cpu];
196 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
201 clib_mem_get_per_numa_heap (u32 numa_id)
203 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
204 return clib_mem_main.per_numa_mheaps[numa_id];
208 clib_mem_set_per_numa_heap (void *new_heap)
210 int numa = os_get_numa_index ();
211 void *old = clib_mem_main.per_numa_mheaps[numa];
212 clib_mem_main.per_numa_mheaps[numa] = new_heap;
217 clib_mem_set_thread_index (void)
220 * Find an unused slot in the per-cpu-mheaps array,
221 * and grab it for this thread. We need to be able to
222 * push/pop the thread heap without affecting other thread(s).
225 if (__os_thread_index != 0)
227 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
228 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
229 0, clib_mem_main.per_cpu_mheaps[0]))
231 os_set_thread_index (i);
234 ASSERT (__os_thread_index > 0);
237 /* Memory allocator which calls os_out_of_memory() when it fails */
238 void *clib_mem_alloc (uword size);
239 void *clib_mem_alloc_aligned (uword size, uword align);
240 void *clib_mem_alloc_or_null (uword size);
241 void *clib_mem_alloc_aligned_or_null (uword size, uword align);
242 void *clib_mem_realloc (void *p, uword new_size);
243 void *clib_mem_realloc_aligned (void *p, uword new_size, uword align);
244 uword clib_mem_is_heap_object (void *p);
245 void clib_mem_free (void *p);
247 void *clib_mem_heap_alloc (void *heap, uword size);
248 void *clib_mem_heap_alloc_aligned (void *heap, uword size, uword align);
249 void *clib_mem_heap_alloc_or_null (void *heap, uword size);
250 void *clib_mem_heap_alloc_aligned_or_null (void *heap, uword size,
252 void *clib_mem_heap_realloc (void *heap, void *p, uword new_size);
253 void *clib_mem_heap_realloc_aligned (void *heap, void *p, uword new_size,
255 uword clib_mem_heap_is_heap_object (void *heap, void *p);
256 void clib_mem_heap_free (void *heap, void *p);
258 uword clib_mem_size (void *p);
259 void clib_mem_free_s (void *p);
261 /* Memory allocator which panics when it fails.
262 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
263 #define clib_mem_alloc_aligned_no_fail(size,align) \
265 uword _clib_mem_alloc_size = (size); \
266 void * _clib_mem_alloc_p; \
267 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
268 if (! _clib_mem_alloc_p) \
269 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
273 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
275 /* Alias to stack allocator for naming consistency. */
276 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
278 always_inline clib_mem_heap_t *
279 clib_mem_get_heap (void)
281 return clib_mem_get_per_cpu_heap ();
284 always_inline clib_mem_heap_t *
285 clib_mem_set_heap (clib_mem_heap_t * heap)
287 return clib_mem_set_per_cpu_heap (heap);
290 void clib_mem_destroy_heap (clib_mem_heap_t * heap);
291 clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
294 void clib_mem_main_init ();
295 void *clib_mem_init (void *base, uword size);
296 void *clib_mem_init_with_page_size (uword memory_size,
297 clib_mem_page_sz_t log2_page_sz);
298 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
300 void clib_mem_exit (void);
302 void clib_mem_trace (int enable);
304 int clib_mem_is_traced (void);
308 /* Total number of objects allocated. */
311 /* Total allocated bytes. Bytes used and free.
312 used + free = total */
313 uword bytes_total, bytes_used, bytes_free;
315 /* Number of bytes used by mheap data structure overhead
316 (e.g. free lists, mheap header). */
317 uword bytes_overhead;
319 /* Amount of free space returned to operating system. */
320 uword bytes_free_reclaimed;
322 /* For malloc which puts small objects in sbrk region and
323 large objects in mmap'ed regions. */
324 uword bytes_used_sbrk;
325 uword bytes_used_mmap;
327 /* Max. number of bytes in this heap. */
331 void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
332 clib_mem_usage_t * usage);
334 void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
335 uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
336 uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
338 u8 *format_clib_mem_usage (u8 * s, va_list * args);
339 u8 *format_clib_mem_heap (u8 * s, va_list * va);
340 u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
342 /* Allocate virtual address space. */
344 clib_mem_vm_alloc (uword size)
347 uword flags = MAP_PRIVATE;
350 flags |= MAP_ANONYMOUS;
353 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
354 if (mmap_addr == (void *) -1)
357 clib_mem_unpoison (mmap_addr, size);
363 clib_mem_vm_free (void *addr, uword size)
368 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
369 uword size, int fd, uword offset, char *name);
371 void *clib_mem_vm_map (void *start, uword size,
372 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
373 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
375 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
377 int clib_mem_vm_unmap (void *base);
378 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
381 static_always_inline clib_mem_page_sz_t
382 clib_mem_get_log2_page_size (void)
384 return clib_mem_main.log2_page_sz;
387 static_always_inline uword
388 clib_mem_get_page_size (void)
390 return 1ULL << clib_mem_main.log2_page_sz;
393 static_always_inline void
394 clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
396 clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
399 static_always_inline clib_mem_page_sz_t
400 clib_mem_get_log2_default_hugepage_size ()
402 return clib_mem_main.log2_default_hugepage_sz;
405 static_always_inline uword
406 clib_mem_get_default_hugepage_size (void)
408 return 1ULL << clib_mem_main.log2_default_hugepage_sz;
411 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
412 uword clib_mem_get_fd_page_size (int fd);
413 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
414 uword clib_mem_vm_reserve (uword start, uword size,
415 clib_mem_page_sz_t log2_page_sz);
416 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
418 void clib_mem_destroy (void);
419 int clib_mem_set_numa_affinity (u8 numa_node, int force);
420 int clib_mem_set_default_numa_affinity ();
421 void clib_mem_vm_randomize_va (uword * requested_va,
422 clib_mem_page_sz_t log2_page_size);
423 void mheap_trace (clib_mem_heap_t * v, int enable);
424 uword clib_mem_trace_enable_disable (uword enable);
425 void clib_mem_trace (int enable);
428 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
430 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
432 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
433 log2_page_size = clib_mem_get_log2_page_size ();
434 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
435 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
437 return round_pow2 (size, 1ULL << log2_page_size);
442 clib_mem_page_sz_t log2_page_sz;
446 uword per_numa[CLIB_MAX_NUMAS];
448 } clib_mem_page_stats_t;
450 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
451 uword n_pages, clib_mem_page_stats_t * stats);
453 static_always_inline int
454 vlib_mem_get_next_numa_node (int numa)
456 clib_mem_main_t *mm = &clib_mem_main;
457 u32 bitmap = mm->numa_node_bitmap;
460 bitmap &= ~pow2_mask (numa + 1);
464 return count_trailing_zeros (bitmap);
467 static_always_inline clib_mem_page_sz_t
468 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
470 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
471 return clib_mem_get_log2_page_size ();
472 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
473 return clib_mem_get_log2_default_hugepage_size ();
474 return log2_page_size;
477 static_always_inline uword
478 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
480 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
483 static_always_inline clib_error_t *
484 clib_mem_get_last_error (void)
486 return clib_mem_main.error;
491 typedef void *clib_mem_bulk_handle_t;
492 clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
493 u32 min_elts_per_chunk);
494 void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
495 void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
496 void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
497 u8 *format_clib_mem_bulk (u8 *s, va_list *args);
499 #include <vppinfra/error.h> /* clib_panic */
501 #endif /* _included_clib_mem_h */
504 * fd.io coding-style-patch-verification: ON
507 * eval: (c-set-style "gnu")