2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, clib_memset */
50 #include <vppinfra/sanitizer.h>
52 #define CLIB_MAX_MHEAPS 256
53 #define CLIB_MAX_NUMAS 16
54 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
55 #define CLIB_MEM_ERROR (-1)
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
74 typedef struct _clib_mem_vm_map_hdr
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
85 /* file descriptor, -1 if memory is not shared */
89 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93 struct _clib_mem_vm_map_hdr *prev, *next;
94 } clib_mem_vm_map_hdr_t;
96 #define foreach_clib_mem_heap_flag \
97 _(0, LOCKED, "locked") \
98 _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
102 #define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
103 foreach_clib_mem_heap_flag
105 } clib_mem_heap_flag_t;
112 /* dlmalloc mspace */
118 /* page size (log2) */
119 clib_mem_page_sz_t log2_page_sz:8;
122 clib_mem_heap_flag_t flags:8;
124 /* name - _MUST_ be last */
130 /* log2 system page size */
131 clib_mem_page_sz_t log2_page_sz;
133 /* log2 system default hugepage size */
134 clib_mem_page_sz_t log2_default_hugepage_sz;
136 /* bitmap of available numa nodes */
137 u32 numa_node_bitmap;
140 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
143 void *per_numa_mheaps[CLIB_MAX_NUMAS];
146 clib_mem_vm_map_hdr_t *first_map, *last_map;
155 extern clib_mem_main_t clib_mem_main;
157 /* Unspecified NUMA socket */
158 #define VEC_NUMA_UNSPECIFIED (0xFF)
160 always_inline clib_mem_heap_t *
161 clib_mem_get_per_cpu_heap (void)
163 int cpu = os_get_thread_index ();
164 return clib_mem_main.per_cpu_mheaps[cpu];
168 clib_mem_set_per_cpu_heap (void *new_heap)
170 int cpu = os_get_thread_index ();
171 void *old = clib_mem_main.per_cpu_mheaps[cpu];
172 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
177 clib_mem_get_per_numa_heap (u32 numa_id)
179 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
180 return clib_mem_main.per_numa_mheaps[numa_id];
184 clib_mem_set_per_numa_heap (void *new_heap)
186 int numa = os_get_numa_index ();
187 void *old = clib_mem_main.per_numa_mheaps[numa];
188 clib_mem_main.per_numa_mheaps[numa] = new_heap;
193 clib_mem_set_thread_index (void)
196 * Find an unused slot in the per-cpu-mheaps array,
197 * and grab it for this thread. We need to be able to
198 * push/pop the thread heap without affecting other thread(s).
201 if (__os_thread_index != 0)
203 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
204 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
205 0, clib_mem_main.per_cpu_mheaps[0]))
207 os_set_thread_index (i);
210 ASSERT (__os_thread_index > 0);
214 clib_mem_size_nocheck (void *p)
216 size_t mspace_usable_size_with_delta (const void *p);
217 return mspace_usable_size_with_delta (p);
220 /* Memory allocator which may call os_out_of_memory() if it fails */
222 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
223 int os_out_of_memory_on_failure)
225 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
226 unsigned long align, unsigned long align_offset);
227 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
230 if (align_offset > align)
233 align_offset %= align;
235 align_offset = align;
238 p = mspace_get_aligned (h->mspace, size, align, align_offset);
240 if (PREDICT_FALSE (0 == p))
242 if (os_out_of_memory_on_failure)
247 CLIB_MEM_UNPOISON (p, size);
251 /* Memory allocator which calls os_out_of_memory() when it fails */
253 clib_mem_alloc (uword size)
255 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
256 /* align_offset */ 0,
257 /* os_out_of_memory */ 1);
261 clib_mem_alloc_aligned (uword size, uword align)
263 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
264 /* os_out_of_memory */ 1);
267 /* Memory allocator which calls os_out_of_memory() when it fails */
269 clib_mem_alloc_or_null (uword size)
271 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
272 /* align_offset */ 0,
273 /* os_out_of_memory */ 0);
277 clib_mem_alloc_aligned_or_null (uword size, uword align)
279 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
280 /* os_out_of_memory */ 0);
285 /* Memory allocator which panics when it fails.
286 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
287 #define clib_mem_alloc_aligned_no_fail(size,align) \
289 uword _clib_mem_alloc_size = (size); \
290 void * _clib_mem_alloc_p; \
291 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
292 if (! _clib_mem_alloc_p) \
293 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
297 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
299 /* Alias to stack allocator for naming consistency. */
300 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
303 clib_mem_is_heap_object (void *p)
305 int mspace_is_heap_object (void *msp, void *p);
306 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
307 return mspace_is_heap_object (h->mspace, p);
311 clib_mem_free (void *p)
313 void mspace_put (void *msp, void *p_arg);
314 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
316 /* Make sure object is in the correct heap. */
317 ASSERT (clib_mem_is_heap_object (p));
319 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
321 mspace_put (h->mspace, p);
325 clib_mem_realloc (void *p, uword new_size, uword old_size)
327 /* By default use alloc, copy and free to emulate realloc. */
328 void *q = clib_mem_alloc (new_size);
332 if (old_size < new_size)
333 copy_size = old_size;
335 copy_size = new_size;
336 clib_memcpy_fast (q, p, copy_size);
343 clib_mem_size (void *p)
345 ASSERT (clib_mem_is_heap_object (p));
346 return clib_mem_size_nocheck (p);
350 clib_mem_free_s (void *p)
352 uword size = clib_mem_size (p);
353 CLIB_MEM_UNPOISON (p, size);
354 memset_s_inline (p, size, 0, size);
358 always_inline clib_mem_heap_t *
359 clib_mem_get_heap (void)
361 return clib_mem_get_per_cpu_heap ();
364 always_inline clib_mem_heap_t *
365 clib_mem_set_heap (clib_mem_heap_t * heap)
367 return clib_mem_set_per_cpu_heap (heap);
370 void clib_mem_destroy_heap (clib_mem_heap_t * heap);
371 clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
374 void clib_mem_main_init ();
375 void *clib_mem_init (void *base, uword size);
376 void *clib_mem_init_with_page_size (uword memory_size,
377 clib_mem_page_sz_t log2_page_sz);
378 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
380 void clib_mem_exit (void);
382 void clib_mem_trace (int enable);
384 int clib_mem_is_traced (void);
388 /* Total number of objects allocated. */
391 /* Total allocated bytes. Bytes used and free.
392 used + free = total */
393 uword bytes_total, bytes_used, bytes_free;
395 /* Number of bytes used by mheap data structure overhead
396 (e.g. free lists, mheap header). */
397 uword bytes_overhead;
399 /* Amount of free space returned to operating system. */
400 uword bytes_free_reclaimed;
402 /* For malloc which puts small objects in sbrk region and
403 large objects in mmap'ed regions. */
404 uword bytes_used_sbrk;
405 uword bytes_used_mmap;
407 /* Max. number of bytes in this heap. */
411 void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
412 clib_mem_usage_t * usage);
414 void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
415 uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
416 uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
418 u8 *format_clib_mem_usage (u8 * s, va_list * args);
419 u8 *format_clib_mem_heap (u8 * s, va_list * va);
420 u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
422 /* Allocate virtual address space. */
424 clib_mem_vm_alloc (uword size)
427 uword flags = MAP_PRIVATE;
430 flags |= MAP_ANONYMOUS;
433 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
434 if (mmap_addr == (void *) -1)
437 CLIB_MEM_UNPOISON (mmap_addr, size);
443 clib_mem_vm_free (void *addr, uword size)
448 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
449 uword size, int fd, uword offset, char *name);
451 void *clib_mem_vm_map (void *start, uword size,
452 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
453 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
455 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
457 int clib_mem_vm_unmap (void *base);
458 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
461 static_always_inline clib_mem_page_sz_t
462 clib_mem_get_log2_page_size (void)
464 return clib_mem_main.log2_page_sz;
467 static_always_inline uword
468 clib_mem_get_page_size (void)
470 return 1ULL << clib_mem_main.log2_page_sz;
473 static_always_inline clib_mem_page_sz_t
474 clib_mem_get_log2_default_hugepage_size ()
476 return clib_mem_main.log2_default_hugepage_sz;
479 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
480 uword clib_mem_get_fd_page_size (int fd);
481 uword clib_mem_get_default_hugepage_size (void);
482 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
483 uword clib_mem_vm_reserve (uword start, uword size,
484 clib_mem_page_sz_t log2_page_sz);
485 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
487 void clib_mem_destroy (void);
488 int clib_mem_set_numa_affinity (u8 numa_node, int force);
489 int clib_mem_set_default_numa_affinity ();
490 void clib_mem_vm_randomize_va (uword * requested_va,
491 clib_mem_page_sz_t log2_page_size);
492 void mheap_trace (clib_mem_heap_t * v, int enable);
493 uword clib_mem_trace_enable_disable (uword enable);
494 void clib_mem_trace (int enable);
497 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
499 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
501 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
502 log2_page_size = clib_mem_get_log2_page_size ();
503 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
504 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
506 return round_pow2 (size, 1ULL << log2_page_size);
511 clib_mem_page_sz_t log2_page_sz;
515 uword per_numa[CLIB_MAX_NUMAS];
517 } clib_mem_page_stats_t;
519 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
520 uword n_pages, clib_mem_page_stats_t * stats);
522 static_always_inline int
523 vlib_mem_get_next_numa_node (int numa)
525 clib_mem_main_t *mm = &clib_mem_main;
526 u32 bitmap = mm->numa_node_bitmap;
529 bitmap &= ~pow2_mask (numa + 1);
533 return count_trailing_zeros (bitmap);
536 static_always_inline clib_mem_page_sz_t
537 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
539 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
540 return clib_mem_get_log2_page_size ();
541 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
542 return clib_mem_get_log2_default_hugepage_size ();
543 return log2_page_size;
546 static_always_inline uword
547 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
549 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
552 static_always_inline clib_error_t *
553 clib_mem_get_last_error (void)
555 return clib_mem_main.error;
560 typedef void *clib_mem_bulk_handle_t;
561 clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
562 u32 min_elts_per_chunk);
563 void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
564 void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
565 void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
566 u8 *format_clib_mem_bulk (u8 *s, va_list *args);
568 #include <vppinfra/error.h> /* clib_panic */
570 #endif /* _included_clib_mem_h */
573 * fd.io coding-style-patch-verification: ON
576 * eval: (c-set-style "gnu")