2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, clib_memset */
50 #include <vppinfra/sanitizer.h>
52 #define CLIB_MAX_MHEAPS 256
53 #define CLIB_MAX_NUMAS 16
54 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
55 #define CLIB_MEM_ERROR (-1)
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
74 typedef struct _clib_mem_vm_map_hdr
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
85 /* file descriptor, -1 if memory is not shared */
89 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93 struct _clib_mem_vm_map_hdr *prev, *next;
94 } clib_mem_vm_map_hdr_t;
96 #define foreach_clib_mem_heap_flag \
97 _(0, LOCKED, "locked") \
98 _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
102 #define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
103 foreach_clib_mem_heap_flag
105 } clib_mem_heap_flag_t;
112 /* dlmalloc mspace */
118 /* page size (log2) */
119 clib_mem_page_sz_t log2_page_sz:8;
122 clib_mem_heap_flag_t flags:8;
124 /* name - _MUST_ be last */
130 /* log2 system page size */
131 clib_mem_page_sz_t log2_page_sz;
133 /* log2 system default hugepage size */
134 clib_mem_page_sz_t log2_default_hugepage_sz;
136 /* bitmap of available numa nodes */
137 u32 numa_node_bitmap;
140 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
143 void *per_numa_mheaps[CLIB_MAX_NUMAS];
146 clib_mem_vm_map_hdr_t *first_map, *last_map;
152 extern clib_mem_main_t clib_mem_main;
154 /* Unspecified NUMA socket */
155 #define VEC_NUMA_UNSPECIFIED (0xFF)
157 always_inline clib_mem_heap_t *
158 clib_mem_get_per_cpu_heap (void)
160 int cpu = os_get_thread_index ();
161 return clib_mem_main.per_cpu_mheaps[cpu];
165 clib_mem_set_per_cpu_heap (void *new_heap)
167 int cpu = os_get_thread_index ();
168 void *old = clib_mem_main.per_cpu_mheaps[cpu];
169 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
174 clib_mem_get_per_numa_heap (u32 numa_id)
176 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
177 return clib_mem_main.per_numa_mheaps[numa_id];
181 clib_mem_set_per_numa_heap (void *new_heap)
183 int numa = os_get_numa_index ();
184 void *old = clib_mem_main.per_numa_mheaps[numa];
185 clib_mem_main.per_numa_mheaps[numa] = new_heap;
190 clib_mem_set_thread_index (void)
193 * Find an unused slot in the per-cpu-mheaps array,
194 * and grab it for this thread. We need to be able to
195 * push/pop the thread heap without affecting other thread(s).
198 if (__os_thread_index != 0)
200 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
201 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
202 0, clib_mem_main.per_cpu_mheaps[0]))
204 os_set_thread_index (i);
207 ASSERT (__os_thread_index > 0);
211 clib_mem_size_nocheck (void *p)
213 size_t mspace_usable_size_with_delta (const void *p);
214 return mspace_usable_size_with_delta (p);
217 /* Memory allocator which may call os_out_of_memory() if it fails */
219 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
220 int os_out_of_memory_on_failure)
222 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
223 unsigned long align, unsigned long align_offset);
224 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
227 if (align_offset > align)
230 align_offset %= align;
232 align_offset = align;
235 p = mspace_get_aligned (h->mspace, size, align, align_offset);
237 if (PREDICT_FALSE (0 == p))
239 if (os_out_of_memory_on_failure)
244 CLIB_MEM_UNPOISON (p, size);
248 /* Memory allocator which calls os_out_of_memory() when it fails */
250 clib_mem_alloc (uword size)
252 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
253 /* align_offset */ 0,
254 /* os_out_of_memory */ 1);
258 clib_mem_alloc_aligned (uword size, uword align)
260 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
261 /* os_out_of_memory */ 1);
264 /* Memory allocator which calls os_out_of_memory() when it fails */
266 clib_mem_alloc_or_null (uword size)
268 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
269 /* align_offset */ 0,
270 /* os_out_of_memory */ 0);
274 clib_mem_alloc_aligned_or_null (uword size, uword align)
276 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
277 /* os_out_of_memory */ 0);
282 /* Memory allocator which panics when it fails.
283 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
284 #define clib_mem_alloc_aligned_no_fail(size,align) \
286 uword _clib_mem_alloc_size = (size); \
287 void * _clib_mem_alloc_p; \
288 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
289 if (! _clib_mem_alloc_p) \
290 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
294 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
296 /* Alias to stack allocator for naming consistency. */
297 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
300 clib_mem_is_heap_object (void *p)
302 int mspace_is_heap_object (void *msp, void *p);
303 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
304 return mspace_is_heap_object (h->mspace, p);
308 clib_mem_free (void *p)
310 void mspace_put (void *msp, void *p_arg);
311 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
313 /* Make sure object is in the correct heap. */
314 ASSERT (clib_mem_is_heap_object (p));
316 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
318 mspace_put (h->mspace, p);
322 clib_mem_realloc (void *p, uword new_size, uword old_size)
324 /* By default use alloc, copy and free to emulate realloc. */
325 void *q = clib_mem_alloc (new_size);
329 if (old_size < new_size)
330 copy_size = old_size;
332 copy_size = new_size;
333 clib_memcpy_fast (q, p, copy_size);
340 clib_mem_size (void *p)
342 ASSERT (clib_mem_is_heap_object (p));
343 return clib_mem_size_nocheck (p);
347 clib_mem_free_s (void *p)
349 uword size = clib_mem_size (p);
350 CLIB_MEM_UNPOISON (p, size);
351 memset_s_inline (p, size, 0, size);
355 always_inline clib_mem_heap_t *
356 clib_mem_get_heap (void)
358 return clib_mem_get_per_cpu_heap ();
361 always_inline clib_mem_heap_t *
362 clib_mem_set_heap (clib_mem_heap_t * heap)
364 return clib_mem_set_per_cpu_heap (heap);
367 void clib_mem_destroy_heap (clib_mem_heap_t * heap);
368 clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
371 void clib_mem_main_init ();
372 void *clib_mem_init (void *base, uword size);
373 void *clib_mem_init_with_page_size (uword memory_size,
374 clib_mem_page_sz_t log2_page_sz);
375 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
377 void clib_mem_exit (void);
379 void clib_mem_trace (int enable);
381 int clib_mem_is_traced (void);
385 /* Total number of objects allocated. */
388 /* Total allocated bytes. Bytes used and free.
389 used + free = total */
390 uword bytes_total, bytes_used, bytes_free;
392 /* Number of bytes used by mheap data structure overhead
393 (e.g. free lists, mheap header). */
394 uword bytes_overhead;
396 /* Amount of free space returned to operating system. */
397 uword bytes_free_reclaimed;
399 /* For malloc which puts small objects in sbrk region and
400 large objects in mmap'ed regions. */
401 uword bytes_used_sbrk;
402 uword bytes_used_mmap;
404 /* Max. number of bytes in this heap. */
408 void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
409 clib_mem_usage_t * usage);
411 void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
412 uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
413 uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
415 u8 *format_clib_mem_usage (u8 * s, va_list * args);
416 u8 *format_clib_mem_heap (u8 * s, va_list * va);
417 u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
419 /* Allocate virtual address space. */
421 clib_mem_vm_alloc (uword size)
424 uword flags = MAP_PRIVATE;
427 flags |= MAP_ANONYMOUS;
430 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
431 if (mmap_addr == (void *) -1)
434 CLIB_MEM_UNPOISON (mmap_addr, size);
440 clib_mem_vm_free (void *addr, uword size)
445 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
446 uword size, int fd, uword offset, char *name);
448 void *clib_mem_vm_map (void *start, uword size,
449 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
450 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
452 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
454 int clib_mem_vm_unmap (void *base);
455 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
458 static_always_inline clib_mem_page_sz_t
459 clib_mem_get_log2_page_size (void)
461 return clib_mem_main.log2_page_sz;
464 static_always_inline uword
465 clib_mem_get_page_size (void)
467 return 1ULL << clib_mem_main.log2_page_sz;
470 static_always_inline clib_mem_page_sz_t
471 clib_mem_get_log2_default_hugepage_size ()
473 return clib_mem_main.log2_default_hugepage_sz;
476 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
477 uword clib_mem_get_fd_page_size (int fd);
478 uword clib_mem_get_default_hugepage_size (void);
479 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
480 uword clib_mem_vm_reserve (uword start, uword size,
481 clib_mem_page_sz_t log2_page_sz);
482 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
484 void clib_mem_destroy (void);
485 int clib_mem_set_numa_affinity (u8 numa_node, int force);
486 int clib_mem_set_default_numa_affinity ();
487 void clib_mem_vm_randomize_va (uword * requested_va,
488 clib_mem_page_sz_t log2_page_size);
489 void mheap_trace (clib_mem_heap_t * v, int enable);
490 uword clib_mem_trace_enable_disable (uword enable);
491 void clib_mem_trace (int enable);
494 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
496 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
498 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
499 log2_page_size = clib_mem_get_log2_page_size ();
500 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
501 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
503 return round_pow2 (size, 1ULL << log2_page_size);
508 clib_mem_page_sz_t log2_page_sz;
512 uword per_numa[CLIB_MAX_NUMAS];
514 } clib_mem_page_stats_t;
516 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
517 uword n_pages, clib_mem_page_stats_t * stats);
519 static_always_inline int
520 vlib_mem_get_next_numa_node (int numa)
522 clib_mem_main_t *mm = &clib_mem_main;
523 u32 bitmap = mm->numa_node_bitmap;
526 bitmap &= ~pow2_mask (numa + 1);
530 return count_trailing_zeros (bitmap);
533 static_always_inline clib_mem_page_sz_t
534 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
536 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
537 return clib_mem_get_log2_page_size ();
538 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
539 return clib_mem_get_log2_default_hugepage_size ();
540 return log2_page_size;
543 static_always_inline uword
544 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
546 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
549 static_always_inline clib_error_t *
550 clib_mem_get_last_error (void)
552 return clib_mem_main.error;
556 #include <vppinfra/error.h> /* clib_panic */
558 #endif /* _included_clib_mem_h */
561 * fd.io coding-style-patch-verification: ON
564 * eval: (c-set-style "gnu")