2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, clib_memset */
50 #include <vppinfra/sanitizer.h>
52 #define CLIB_MAX_MHEAPS 256
53 #define CLIB_MAX_NUMAS 16
54 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
55 #define CLIB_MEM_ERROR (-1)
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
74 typedef struct _clib_mem_vm_map_hdr
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
85 /* file descriptor, -1 if memory is not shared */
89 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93 struct _clib_mem_vm_map_hdr *prev, *next;
94 } clib_mem_vm_map_hdr_t;
96 #define foreach_clib_mem_heap_flag \
97 _(0, LOCKED, "locked") \
98 _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
102 #define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
103 foreach_clib_mem_heap_flag
105 } clib_mem_heap_flag_t;
112 /* dlmalloc mspace */
118 /* page size (log2) */
119 clib_mem_page_sz_t log2_page_sz:8;
122 clib_mem_heap_flag_t flags:8;
124 /* name - _MUST_ be last */
130 /* log2 system page size */
131 clib_mem_page_sz_t log2_page_sz;
133 /* log2 default hugepage size */
134 clib_mem_page_sz_t log2_default_hugepage_sz;
136 /* log2 system default hugepage size */
137 clib_mem_page_sz_t log2_sys_default_hugepage_sz;
139 /* bitmap of available numa nodes */
140 u32 numa_node_bitmap;
143 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
146 void *per_numa_mheaps[CLIB_MAX_NUMAS];
149 clib_mem_vm_map_hdr_t *first_map, *last_map;
158 extern clib_mem_main_t clib_mem_main;
160 /* Unspecified NUMA socket */
161 #define VEC_NUMA_UNSPECIFIED (0xFF)
163 always_inline clib_mem_heap_t *
164 clib_mem_get_per_cpu_heap (void)
166 int cpu = os_get_thread_index ();
167 return clib_mem_main.per_cpu_mheaps[cpu];
171 clib_mem_set_per_cpu_heap (void *new_heap)
173 int cpu = os_get_thread_index ();
174 void *old = clib_mem_main.per_cpu_mheaps[cpu];
175 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
180 clib_mem_get_per_numa_heap (u32 numa_id)
182 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
183 return clib_mem_main.per_numa_mheaps[numa_id];
187 clib_mem_set_per_numa_heap (void *new_heap)
189 int numa = os_get_numa_index ();
190 void *old = clib_mem_main.per_numa_mheaps[numa];
191 clib_mem_main.per_numa_mheaps[numa] = new_heap;
196 clib_mem_set_thread_index (void)
199 * Find an unused slot in the per-cpu-mheaps array,
200 * and grab it for this thread. We need to be able to
201 * push/pop the thread heap without affecting other thread(s).
204 if (__os_thread_index != 0)
206 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
207 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
208 0, clib_mem_main.per_cpu_mheaps[0]))
210 os_set_thread_index (i);
213 ASSERT (__os_thread_index > 0);
217 clib_mem_size_nocheck (void *p)
219 size_t mspace_usable_size_with_delta (const void *p);
220 return mspace_usable_size_with_delta (p);
223 /* Memory allocator which may call os_out_of_memory() if it fails */
225 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
226 int os_out_of_memory_on_failure)
228 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
229 unsigned long align, unsigned long align_offset);
230 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
233 if (align_offset > align)
236 align_offset %= align;
238 align_offset = align;
241 p = mspace_get_aligned (h->mspace, size, align, align_offset);
243 if (PREDICT_FALSE (0 == p))
245 if (os_out_of_memory_on_failure)
250 CLIB_MEM_UNPOISON (p, size);
254 /* Memory allocator which calls os_out_of_memory() when it fails */
256 clib_mem_alloc (uword size)
258 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
259 /* align_offset */ 0,
260 /* os_out_of_memory */ 1);
264 clib_mem_alloc_aligned (uword size, uword align)
266 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
267 /* os_out_of_memory */ 1);
270 /* Memory allocator which calls os_out_of_memory() when it fails */
272 clib_mem_alloc_or_null (uword size)
274 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
275 /* align_offset */ 0,
276 /* os_out_of_memory */ 0);
280 clib_mem_alloc_aligned_or_null (uword size, uword align)
282 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
283 /* os_out_of_memory */ 0);
288 /* Memory allocator which panics when it fails.
289 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
290 #define clib_mem_alloc_aligned_no_fail(size,align) \
292 uword _clib_mem_alloc_size = (size); \
293 void * _clib_mem_alloc_p; \
294 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
295 if (! _clib_mem_alloc_p) \
296 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
300 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
302 /* Alias to stack allocator for naming consistency. */
303 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
306 clib_mem_is_heap_object (void *p)
308 int mspace_is_heap_object (void *msp, void *p);
309 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
310 return mspace_is_heap_object (h->mspace, p);
314 clib_mem_free (void *p)
316 void mspace_put (void *msp, void *p_arg);
317 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
319 /* Make sure object is in the correct heap. */
320 ASSERT (clib_mem_is_heap_object (p));
322 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
324 mspace_put (h->mspace, p);
328 clib_mem_realloc (void *p, uword new_size, uword old_size)
330 /* By default use alloc, copy and free to emulate realloc. */
331 void *q = clib_mem_alloc (new_size);
335 if (old_size < new_size)
336 copy_size = old_size;
338 copy_size = new_size;
339 clib_memcpy_fast (q, p, copy_size);
346 clib_mem_size (void *p)
348 ASSERT (clib_mem_is_heap_object (p));
349 return clib_mem_size_nocheck (p);
353 clib_mem_free_s (void *p)
355 uword size = clib_mem_size (p);
356 CLIB_MEM_UNPOISON (p, size);
357 memset_s_inline (p, size, 0, size);
361 always_inline clib_mem_heap_t *
362 clib_mem_get_heap (void)
364 return clib_mem_get_per_cpu_heap ();
367 always_inline clib_mem_heap_t *
368 clib_mem_set_heap (clib_mem_heap_t * heap)
370 return clib_mem_set_per_cpu_heap (heap);
373 void clib_mem_destroy_heap (clib_mem_heap_t * heap);
374 clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
377 void clib_mem_main_init ();
378 void *clib_mem_init (void *base, uword size);
379 void *clib_mem_init_with_page_size (uword memory_size,
380 clib_mem_page_sz_t log2_page_sz);
381 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
383 void clib_mem_exit (void);
385 void clib_mem_trace (int enable);
387 int clib_mem_is_traced (void);
391 /* Total number of objects allocated. */
394 /* Total allocated bytes. Bytes used and free.
395 used + free = total */
396 uword bytes_total, bytes_used, bytes_free;
398 /* Number of bytes used by mheap data structure overhead
399 (e.g. free lists, mheap header). */
400 uword bytes_overhead;
402 /* Amount of free space returned to operating system. */
403 uword bytes_free_reclaimed;
405 /* For malloc which puts small objects in sbrk region and
406 large objects in mmap'ed regions. */
407 uword bytes_used_sbrk;
408 uword bytes_used_mmap;
410 /* Max. number of bytes in this heap. */
414 void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
415 clib_mem_usage_t * usage);
417 void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
418 uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
419 uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
421 u8 *format_clib_mem_usage (u8 * s, va_list * args);
422 u8 *format_clib_mem_heap (u8 * s, va_list * va);
423 u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
425 /* Allocate virtual address space. */
427 clib_mem_vm_alloc (uword size)
430 uword flags = MAP_PRIVATE;
433 flags |= MAP_ANONYMOUS;
436 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
437 if (mmap_addr == (void *) -1)
440 CLIB_MEM_UNPOISON (mmap_addr, size);
446 clib_mem_vm_free (void *addr, uword size)
451 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
452 uword size, int fd, uword offset, char *name);
454 void *clib_mem_vm_map (void *start, uword size,
455 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
456 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
458 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
460 int clib_mem_vm_unmap (void *base);
461 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
464 static_always_inline clib_mem_page_sz_t
465 clib_mem_get_log2_page_size (void)
467 return clib_mem_main.log2_page_sz;
470 static_always_inline uword
471 clib_mem_get_page_size (void)
473 return 1ULL << clib_mem_main.log2_page_sz;
476 static_always_inline void
477 clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
479 clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
482 static_always_inline clib_mem_page_sz_t
483 clib_mem_get_log2_default_hugepage_size ()
485 return clib_mem_main.log2_default_hugepage_sz;
488 static_always_inline uword
489 clib_mem_get_default_hugepage_size (void)
491 return 1ULL << clib_mem_main.log2_default_hugepage_sz;
494 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
495 uword clib_mem_get_fd_page_size (int fd);
496 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
497 uword clib_mem_vm_reserve (uword start, uword size,
498 clib_mem_page_sz_t log2_page_sz);
499 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
501 void clib_mem_destroy (void);
502 int clib_mem_set_numa_affinity (u8 numa_node, int force);
503 int clib_mem_set_default_numa_affinity ();
504 void clib_mem_vm_randomize_va (uword * requested_va,
505 clib_mem_page_sz_t log2_page_size);
506 void mheap_trace (clib_mem_heap_t * v, int enable);
507 uword clib_mem_trace_enable_disable (uword enable);
508 void clib_mem_trace (int enable);
511 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
513 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
515 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
516 log2_page_size = clib_mem_get_log2_page_size ();
517 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
518 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
520 return round_pow2 (size, 1ULL << log2_page_size);
525 clib_mem_page_sz_t log2_page_sz;
529 uword per_numa[CLIB_MAX_NUMAS];
531 } clib_mem_page_stats_t;
533 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
534 uword n_pages, clib_mem_page_stats_t * stats);
536 static_always_inline int
537 vlib_mem_get_next_numa_node (int numa)
539 clib_mem_main_t *mm = &clib_mem_main;
540 u32 bitmap = mm->numa_node_bitmap;
543 bitmap &= ~pow2_mask (numa + 1);
547 return count_trailing_zeros (bitmap);
550 static_always_inline clib_mem_page_sz_t
551 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
553 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
554 return clib_mem_get_log2_page_size ();
555 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
556 return clib_mem_get_log2_default_hugepage_size ();
557 return log2_page_size;
560 static_always_inline uword
561 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
563 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
566 static_always_inline clib_error_t *
567 clib_mem_get_last_error (void)
569 return clib_mem_main.error;
574 typedef void *clib_mem_bulk_handle_t;
575 clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
576 u32 min_elts_per_chunk);
577 void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
578 void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
579 void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
580 u8 *format_clib_mem_bulk (u8 *s, va_list *args);
582 #include <vppinfra/error.h> /* clib_panic */
584 #endif /* _included_clib_mem_h */
587 * fd.io coding-style-patch-verification: ON
590 * eval: (c-set-style "gnu")