2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/dlmalloc.h>
50 #include <vppinfra/os.h>
51 #include <vppinfra/string.h> /* memcpy, clib_memset */
52 #include <vppinfra/sanitizer.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 16
56 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
57 #define CLIB_MEM_ERROR (-1)
61 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
62 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
63 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
64 CLIB_MEM_PAGE_SZ_4K = 12,
65 CLIB_MEM_PAGE_SZ_16K = 14,
66 CLIB_MEM_PAGE_SZ_64K = 16,
67 CLIB_MEM_PAGE_SZ_1M = 20,
68 CLIB_MEM_PAGE_SZ_2M = 21,
69 CLIB_MEM_PAGE_SZ_16M = 24,
70 CLIB_MEM_PAGE_SZ_32M = 25,
71 CLIB_MEM_PAGE_SZ_512M = 29,
72 CLIB_MEM_PAGE_SZ_1G = 30,
73 CLIB_MEM_PAGE_SZ_16G = 34,
76 typedef struct _clib_mem_vm_map_hdr
84 /* page size (log2) */
85 clib_mem_page_sz_t log2_page_sz;
87 /* file descriptor, -1 if memory is not shared */
91 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
92 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
95 struct _clib_mem_vm_map_hdr *prev, *next;
96 } clib_mem_vm_map_hdr_t;
100 /* log2 system page size */
101 clib_mem_page_sz_t log2_page_sz;
103 /* log2 system default hugepage size */
104 clib_mem_page_sz_t log2_default_hugepage_sz;
106 /* bitmap of available numa nodes */
107 u32 numa_node_bitmap;
110 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
113 void *per_numa_mheaps[CLIB_MAX_NUMAS];
116 clib_mem_vm_map_hdr_t *first_map, *last_map;
122 extern clib_mem_main_t clib_mem_main;
124 /* Unspecified NUMA socket */
125 #define VEC_NUMA_UNSPECIFIED (0xFF)
128 clib_mem_get_per_cpu_heap (void)
130 int cpu = os_get_thread_index ();
131 return clib_mem_main.per_cpu_mheaps[cpu];
135 clib_mem_set_per_cpu_heap (u8 * new_heap)
137 int cpu = os_get_thread_index ();
138 void *old = clib_mem_main.per_cpu_mheaps[cpu];
139 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
144 clib_mem_get_per_numa_heap (u32 numa_id)
146 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
147 return clib_mem_main.per_numa_mheaps[numa_id];
151 clib_mem_set_per_numa_heap (u8 * new_heap)
153 int numa = os_get_numa_index ();
154 void *old = clib_mem_main.per_numa_mheaps[numa];
155 clib_mem_main.per_numa_mheaps[numa] = new_heap;
160 clib_mem_set_thread_index (void)
163 * Find an unused slot in the per-cpu-mheaps array,
164 * and grab it for this thread. We need to be able to
165 * push/pop the thread heap without affecting other thread(s).
168 if (__os_thread_index != 0)
170 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
171 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
172 0, clib_mem_main.per_cpu_mheaps[0]))
174 os_set_thread_index (i);
177 ASSERT (__os_thread_index > 0);
181 clib_mem_size_nocheck (void *p)
183 return mspace_usable_size_with_delta (p);
186 /* Memory allocator which may call os_out_of_memory() if it fails */
188 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
189 int os_out_of_memory_on_failure)
194 if (align_offset > align)
197 align_offset %= align;
199 align_offset = align;
202 cpu = os_get_thread_index ();
203 heap = clib_mem_main.per_cpu_mheaps[cpu];
205 p = mspace_get_aligned (heap, size, align, align_offset);
207 if (PREDICT_FALSE (0 == p))
209 if (os_out_of_memory_on_failure)
214 CLIB_MEM_UNPOISON (p, size);
218 /* Memory allocator which calls os_out_of_memory() when it fails */
220 clib_mem_alloc (uword size)
222 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
223 /* align_offset */ 0,
224 /* os_out_of_memory */ 1);
228 clib_mem_alloc_aligned (uword size, uword align)
230 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
231 /* os_out_of_memory */ 1);
234 /* Memory allocator which calls os_out_of_memory() when it fails */
236 clib_mem_alloc_or_null (uword size)
238 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
239 /* align_offset */ 0,
240 /* os_out_of_memory */ 0);
244 clib_mem_alloc_aligned_or_null (uword size, uword align)
246 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
247 /* os_out_of_memory */ 0);
252 /* Memory allocator which panics when it fails.
253 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
254 #define clib_mem_alloc_aligned_no_fail(size,align) \
256 uword _clib_mem_alloc_size = (size); \
257 void * _clib_mem_alloc_p; \
258 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
259 if (! _clib_mem_alloc_p) \
260 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
264 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
266 /* Alias to stack allocator for naming consistency. */
267 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
270 clib_mem_is_heap_object (void *p)
272 void *heap = clib_mem_get_per_cpu_heap ();
274 return mspace_is_heap_object (heap, p);
278 clib_mem_free (void *p)
280 u8 *heap = clib_mem_get_per_cpu_heap ();
282 /* Make sure object is in the correct heap. */
283 ASSERT (clib_mem_is_heap_object (p));
285 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
287 mspace_put (heap, p);
291 clib_mem_realloc (void *p, uword new_size, uword old_size)
293 /* By default use alloc, copy and free to emulate realloc. */
294 void *q = clib_mem_alloc (new_size);
298 if (old_size < new_size)
299 copy_size = old_size;
301 copy_size = new_size;
302 clib_memcpy_fast (q, p, copy_size);
309 clib_mem_size (void *p)
311 ASSERT (clib_mem_is_heap_object (p));
312 return clib_mem_size_nocheck (p);
316 clib_mem_free_s (void *p)
318 uword size = clib_mem_size (p);
319 CLIB_MEM_UNPOISON (p, size);
320 memset_s_inline (p, size, 0, size);
325 clib_mem_get_heap (void)
327 return clib_mem_get_per_cpu_heap ();
331 clib_mem_set_heap (void *heap)
333 return clib_mem_set_per_cpu_heap (heap);
336 void clib_mem_main_init ();
337 void *clib_mem_init (void *heap, uword size);
338 void *clib_mem_init_with_page_size (uword memory_size,
339 clib_mem_page_sz_t log2_page_sz);
340 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
341 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
344 void clib_mem_exit (void);
346 void clib_mem_validate (void);
348 void clib_mem_trace (int enable);
350 int clib_mem_is_traced (void);
354 /* Total number of objects allocated. */
357 /* Total allocated bytes. Bytes used and free.
358 used + free = total */
359 uword bytes_total, bytes_used, bytes_free;
361 /* Number of bytes used by mheap data structure overhead
362 (e.g. free lists, mheap header). */
363 uword bytes_overhead;
365 /* Amount of free space returned to operating system. */
366 uword bytes_free_reclaimed;
368 /* For malloc which puts small objects in sbrk region and
369 large objects in mmap'ed regions. */
370 uword bytes_used_sbrk;
371 uword bytes_used_mmap;
373 /* Max. number of bytes in this heap. */
377 void clib_mem_usage (clib_mem_usage_t * usage);
379 u8 *format_clib_mem_usage (u8 * s, va_list * args);
381 /* Allocate virtual address space. */
383 clib_mem_vm_alloc (uword size)
386 uword flags = MAP_PRIVATE;
389 flags |= MAP_ANONYMOUS;
392 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
393 if (mmap_addr == (void *) -1)
396 CLIB_MEM_UNPOISON (mmap_addr, size);
402 clib_mem_vm_free (void *addr, uword size)
407 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
408 uword size, int fd, uword offset, char *name);
410 void *clib_mem_vm_map (void *start, uword size,
411 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
412 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
414 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
416 int clib_mem_vm_unmap (void *base);
417 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
422 #define CLIB_MEM_VM_F_SHARED (1 << 0)
423 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
424 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
425 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
426 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
427 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
428 u32 flags; /**< vm allocation flags:
429 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
430 descriptor will be provided on successful allocation.
431 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
432 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
433 numa node preference.
434 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
435 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
436 number of available pages is not sufficient.
437 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
439 char *name; /**< Name for memory allocation, set by caller. */
440 uword size; /**< Allocation size, set by caller. */
441 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
442 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
443 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
444 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
445 int n_pages; /* Number of pages. */
446 uword requested_va; /**< Request fixed position mapping */
447 } clib_mem_vm_alloc_t;
450 static_always_inline clib_mem_page_sz_t
451 clib_mem_get_log2_page_size (void)
453 return clib_mem_main.log2_page_sz;
456 static_always_inline uword
457 clib_mem_get_page_size (void)
459 return 1ULL << clib_mem_main.log2_page_sz;
462 static_always_inline clib_mem_page_sz_t
463 clib_mem_get_log2_default_hugepage_size ()
465 return clib_mem_main.log2_default_hugepage_sz;
468 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
469 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
470 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
471 uword clib_mem_get_fd_page_size (int fd);
472 uword clib_mem_get_default_hugepage_size (void);
473 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
474 uword clib_mem_vm_reserve (uword start, uword size,
475 clib_mem_page_sz_t log2_page_sz);
476 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
478 void clib_mem_destroy_mspace (void *mspace);
479 void clib_mem_destroy (void);
480 int clib_mem_set_numa_affinity (u8 numa_node, int force);
481 int clib_mem_set_default_numa_affinity ();
485 uword size; /**< Map size */
486 int fd; /**< File descriptor to be mapped */
487 uword requested_va; /**< Request fixed position mapping */
488 void *addr; /**< Pointer to mapped memory, if successful */
492 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
493 void clib_mem_vm_randomize_va (uword * requested_va,
494 clib_mem_page_sz_t log2_page_size);
495 void mheap_trace (void *v, int enable);
496 uword clib_mem_trace_enable_disable (uword enable);
497 void clib_mem_trace (int enable);
500 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
502 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
504 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
505 log2_page_size = clib_mem_get_log2_page_size ();
506 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
507 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
509 return round_pow2 (size, 1ULL << log2_page_size);
516 uword per_numa[CLIB_MAX_NUMAS];
518 } clib_mem_page_stats_t;
520 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
521 uword n_pages, clib_mem_page_stats_t * stats);
523 static_always_inline int
524 vlib_mem_get_next_numa_node (int numa)
526 clib_mem_main_t *mm = &clib_mem_main;
527 u32 bitmap = mm->numa_node_bitmap;
530 bitmap &= ~pow2_mask (numa + 1);
534 return count_trailing_zeros (bitmap);
537 static_always_inline clib_mem_page_sz_t
538 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
540 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
541 return clib_mem_get_log2_page_size ();
542 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
543 return clib_mem_get_log2_default_hugepage_size ();
544 return log2_page_size;
547 static_always_inline uword
548 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
550 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
553 static_always_inline clib_error_t *
554 clib_mem_get_last_error (void)
556 return clib_mem_main.error;
560 #include <vppinfra/error.h> /* clib_panic */
562 #endif /* _included_clib_mem_h */
565 * fd.io coding-style-patch-verification: ON
568 * eval: (c-set-style "gnu")