2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, clib_memset */
50 #include <vppinfra/sanitizer.h>
52 #define CLIB_MAX_MHEAPS 256
53 #define CLIB_MAX_NUMAS 16
54 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
55 #define CLIB_MEM_ERROR (-1)
56 #define CLIB_MEM_LOG2_MIN_ALIGN (3)
57 #define CLIB_MEM_MIN_ALIGN (1 << CLIB_MEM_LOG2_MIN_ALIGN)
61 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
62 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
63 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
64 CLIB_MEM_PAGE_SZ_4K = 12,
65 CLIB_MEM_PAGE_SZ_16K = 14,
66 CLIB_MEM_PAGE_SZ_64K = 16,
67 CLIB_MEM_PAGE_SZ_1M = 20,
68 CLIB_MEM_PAGE_SZ_2M = 21,
69 CLIB_MEM_PAGE_SZ_16M = 24,
70 CLIB_MEM_PAGE_SZ_32M = 25,
71 CLIB_MEM_PAGE_SZ_512M = 29,
72 CLIB_MEM_PAGE_SZ_1G = 30,
73 CLIB_MEM_PAGE_SZ_16G = 34,
76 typedef struct _clib_mem_vm_map_hdr
84 /* page size (log2) */
85 clib_mem_page_sz_t log2_page_sz;
87 /* file descriptor, -1 if memory is not shared */
91 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
92 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
95 struct _clib_mem_vm_map_hdr *prev, *next;
96 } clib_mem_vm_map_hdr_t;
98 #define foreach_clib_mem_heap_flag \
99 _ (0, LOCKED, "locked") \
100 _ (1, UNMAP_ON_DESTROY, "unmap-on-destroy") \
101 _ (2, TRACED, "traced")
105 #define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
106 foreach_clib_mem_heap_flag
108 } clib_mem_heap_flag_t;
115 /* dlmalloc mspace */
121 /* page size (log2) */
122 clib_mem_page_sz_t log2_page_sz:8;
125 clib_mem_heap_flag_t flags:8;
127 /* name - _MUST_ be last */
133 /* log2 system page size */
134 clib_mem_page_sz_t log2_page_sz;
136 /* log2 default hugepage size */
137 clib_mem_page_sz_t log2_default_hugepage_sz;
139 /* log2 system default hugepage size */
140 clib_mem_page_sz_t log2_sys_default_hugepage_sz;
142 /* bitmap of available numa nodes */
143 u32 numa_node_bitmap;
146 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
149 void *per_numa_mheaps[CLIB_MAX_NUMAS];
152 clib_mem_vm_map_hdr_t *first_map, *last_map;
161 extern clib_mem_main_t clib_mem_main;
163 /* Unspecified NUMA socket */
164 #define VEC_NUMA_UNSPECIFIED (0xFF)
166 always_inline clib_mem_heap_t *
167 clib_mem_get_per_cpu_heap (void)
169 int cpu = os_get_thread_index ();
170 return clib_mem_main.per_cpu_mheaps[cpu];
174 clib_mem_set_per_cpu_heap (void *new_heap)
176 int cpu = os_get_thread_index ();
177 void *old = clib_mem_main.per_cpu_mheaps[cpu];
178 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
183 clib_mem_get_per_numa_heap (u32 numa_id)
185 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
186 return clib_mem_main.per_numa_mheaps[numa_id];
190 clib_mem_set_per_numa_heap (void *new_heap)
192 int numa = os_get_numa_index ();
193 void *old = clib_mem_main.per_numa_mheaps[numa];
194 clib_mem_main.per_numa_mheaps[numa] = new_heap;
199 clib_mem_set_thread_index (void)
202 * Find an unused slot in the per-cpu-mheaps array,
203 * and grab it for this thread. We need to be able to
204 * push/pop the thread heap without affecting other thread(s).
207 if (__os_thread_index != 0)
209 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
210 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
211 0, clib_mem_main.per_cpu_mheaps[0]))
213 os_set_thread_index (i);
216 ASSERT (__os_thread_index > 0);
219 /* Memory allocator which calls os_out_of_memory() when it fails */
220 void *clib_mem_alloc (uword size);
221 void *clib_mem_alloc_aligned (uword size, uword align);
222 void *clib_mem_alloc_or_null (uword size);
223 void *clib_mem_alloc_aligned_or_null (uword size, uword align);
224 void *clib_mem_realloc (void *p, uword new_size);
225 void *clib_mem_realloc_aligned (void *p, uword new_size, uword align);
226 uword clib_mem_is_heap_object (void *p);
227 void clib_mem_free (void *p);
229 void *clib_mem_heap_alloc (void *heap, uword size);
230 void *clib_mem_heap_alloc_aligned (void *heap, uword size, uword align);
231 void *clib_mem_heap_alloc_or_null (void *heap, uword size);
232 void *clib_mem_heap_alloc_aligned_or_null (void *heap, uword size,
234 void *clib_mem_heap_realloc (void *heap, void *p, uword new_size);
235 void *clib_mem_heap_realloc_aligned (void *heap, void *p, uword new_size,
237 uword clib_mem_heap_is_heap_object (void *heap, void *p);
238 void clib_mem_heap_free (void *heap, void *p);
240 uword clib_mem_size (void *p);
241 void clib_mem_free_s (void *p);
243 /* Memory allocator which panics when it fails.
244 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
245 #define clib_mem_alloc_aligned_no_fail(size,align) \
247 uword _clib_mem_alloc_size = (size); \
248 void * _clib_mem_alloc_p; \
249 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
250 if (! _clib_mem_alloc_p) \
251 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
255 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
257 /* Alias to stack allocator for naming consistency. */
258 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
260 always_inline clib_mem_heap_t *
261 clib_mem_get_heap (void)
263 return clib_mem_get_per_cpu_heap ();
266 always_inline clib_mem_heap_t *
267 clib_mem_set_heap (clib_mem_heap_t * heap)
269 return clib_mem_set_per_cpu_heap (heap);
272 void clib_mem_destroy_heap (clib_mem_heap_t * heap);
273 clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
276 void clib_mem_main_init ();
277 void *clib_mem_init (void *base, uword size);
278 void *clib_mem_init_with_page_size (uword memory_size,
279 clib_mem_page_sz_t log2_page_sz);
280 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
282 void clib_mem_exit (void);
284 void clib_mem_trace (int enable);
286 int clib_mem_is_traced (void);
290 /* Total number of objects allocated. */
293 /* Total allocated bytes. Bytes used and free.
294 used + free = total */
295 uword bytes_total, bytes_used, bytes_free;
297 /* Number of bytes used by mheap data structure overhead
298 (e.g. free lists, mheap header). */
299 uword bytes_overhead;
301 /* Amount of free space returned to operating system. */
302 uword bytes_free_reclaimed;
304 /* For malloc which puts small objects in sbrk region and
305 large objects in mmap'ed regions. */
306 uword bytes_used_sbrk;
307 uword bytes_used_mmap;
309 /* Max. number of bytes in this heap. */
313 void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
314 clib_mem_usage_t * usage);
316 void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
317 uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
318 uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
320 u8 *format_clib_mem_usage (u8 * s, va_list * args);
321 u8 *format_clib_mem_heap (u8 * s, va_list * va);
322 u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
324 /* Allocate virtual address space. */
326 clib_mem_vm_alloc (uword size)
329 uword flags = MAP_PRIVATE;
332 flags |= MAP_ANONYMOUS;
335 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
336 if (mmap_addr == (void *) -1)
339 CLIB_MEM_UNPOISON (mmap_addr, size);
345 clib_mem_vm_free (void *addr, uword size)
350 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
351 uword size, int fd, uword offset, char *name);
353 void *clib_mem_vm_map (void *start, uword size,
354 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
355 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
357 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
359 int clib_mem_vm_unmap (void *base);
360 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
363 static_always_inline clib_mem_page_sz_t
364 clib_mem_get_log2_page_size (void)
366 return clib_mem_main.log2_page_sz;
369 static_always_inline uword
370 clib_mem_get_page_size (void)
372 return 1ULL << clib_mem_main.log2_page_sz;
375 static_always_inline void
376 clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
378 clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
381 static_always_inline clib_mem_page_sz_t
382 clib_mem_get_log2_default_hugepage_size ()
384 return clib_mem_main.log2_default_hugepage_sz;
387 static_always_inline uword
388 clib_mem_get_default_hugepage_size (void)
390 return 1ULL << clib_mem_main.log2_default_hugepage_sz;
393 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
394 uword clib_mem_get_fd_page_size (int fd);
395 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
396 uword clib_mem_vm_reserve (uword start, uword size,
397 clib_mem_page_sz_t log2_page_sz);
398 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
400 void clib_mem_destroy (void);
401 int clib_mem_set_numa_affinity (u8 numa_node, int force);
402 int clib_mem_set_default_numa_affinity ();
403 void clib_mem_vm_randomize_va (uword * requested_va,
404 clib_mem_page_sz_t log2_page_size);
405 void mheap_trace (clib_mem_heap_t * v, int enable);
406 uword clib_mem_trace_enable_disable (uword enable);
407 void clib_mem_trace (int enable);
410 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
412 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
414 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
415 log2_page_size = clib_mem_get_log2_page_size ();
416 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
417 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
419 return round_pow2 (size, 1ULL << log2_page_size);
424 clib_mem_page_sz_t log2_page_sz;
428 uword per_numa[CLIB_MAX_NUMAS];
430 } clib_mem_page_stats_t;
432 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
433 uword n_pages, clib_mem_page_stats_t * stats);
435 static_always_inline int
436 vlib_mem_get_next_numa_node (int numa)
438 clib_mem_main_t *mm = &clib_mem_main;
439 u32 bitmap = mm->numa_node_bitmap;
442 bitmap &= ~pow2_mask (numa + 1);
446 return count_trailing_zeros (bitmap);
449 static_always_inline clib_mem_page_sz_t
450 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
452 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
453 return clib_mem_get_log2_page_size ();
454 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
455 return clib_mem_get_log2_default_hugepage_size ();
456 return log2_page_size;
459 static_always_inline uword
460 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
462 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
465 static_always_inline clib_error_t *
466 clib_mem_get_last_error (void)
468 return clib_mem_main.error;
473 typedef void *clib_mem_bulk_handle_t;
474 clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
475 u32 min_elts_per_chunk);
476 void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
477 void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
478 void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
479 u8 *format_clib_mem_bulk (u8 *s, va_list *args);
481 #include <vppinfra/error.h> /* clib_panic */
483 #endif /* _included_clib_mem_h */
486 * fd.io coding-style-patch-verification: ON
489 * eval: (c-set-style "gnu")