2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, clib_memset */
50 #include <vppinfra/sanitizer.h>
52 #define CLIB_MAX_MHEAPS 256
53 #define CLIB_MAX_NUMAS 16
54 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
55 #define CLIB_MEM_ERROR (-1)
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
74 typedef struct _clib_mem_vm_map_hdr
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
85 /* file descriptor, -1 if memory is not shared */
89 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93 struct _clib_mem_vm_map_hdr *prev, *next;
94 } clib_mem_vm_map_hdr_t;
98 /* log2 system page size */
99 clib_mem_page_sz_t log2_page_sz;
101 /* log2 system default hugepage size */
102 clib_mem_page_sz_t log2_default_hugepage_sz;
104 /* bitmap of available numa nodes */
105 u32 numa_node_bitmap;
108 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
111 void *per_numa_mheaps[CLIB_MAX_NUMAS];
114 clib_mem_vm_map_hdr_t *first_map, *last_map;
120 extern clib_mem_main_t clib_mem_main;
122 /* Unspecified NUMA socket */
123 #define VEC_NUMA_UNSPECIFIED (0xFF)
126 clib_mem_get_per_cpu_heap (void)
128 int cpu = os_get_thread_index ();
129 return clib_mem_main.per_cpu_mheaps[cpu];
133 clib_mem_set_per_cpu_heap (u8 * new_heap)
135 int cpu = os_get_thread_index ();
136 void *old = clib_mem_main.per_cpu_mheaps[cpu];
137 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
142 clib_mem_get_per_numa_heap (u32 numa_id)
144 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
145 return clib_mem_main.per_numa_mheaps[numa_id];
149 clib_mem_set_per_numa_heap (u8 * new_heap)
151 int numa = os_get_numa_index ();
152 void *old = clib_mem_main.per_numa_mheaps[numa];
153 clib_mem_main.per_numa_mheaps[numa] = new_heap;
158 clib_mem_set_thread_index (void)
161 * Find an unused slot in the per-cpu-mheaps array,
162 * and grab it for this thread. We need to be able to
163 * push/pop the thread heap without affecting other thread(s).
166 if (__os_thread_index != 0)
168 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
169 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
170 0, clib_mem_main.per_cpu_mheaps[0]))
172 os_set_thread_index (i);
175 ASSERT (__os_thread_index > 0);
179 clib_mem_size_nocheck (void *p)
181 size_t mspace_usable_size_with_delta (const void *p);
182 return mspace_usable_size_with_delta (p);
185 /* Memory allocator which may call os_out_of_memory() if it fails */
187 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
188 int os_out_of_memory_on_failure)
192 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
193 unsigned long align, unsigned long align_offset);
195 if (align_offset > align)
198 align_offset %= align;
200 align_offset = align;
203 cpu = os_get_thread_index ();
204 heap = clib_mem_main.per_cpu_mheaps[cpu];
206 p = mspace_get_aligned (heap, size, align, align_offset);
208 if (PREDICT_FALSE (0 == p))
210 if (os_out_of_memory_on_failure)
215 CLIB_MEM_UNPOISON (p, size);
219 /* Memory allocator which calls os_out_of_memory() when it fails */
221 clib_mem_alloc (uword size)
223 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
224 /* align_offset */ 0,
225 /* os_out_of_memory */ 1);
229 clib_mem_alloc_aligned (uword size, uword align)
231 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
232 /* os_out_of_memory */ 1);
235 /* Memory allocator which calls os_out_of_memory() when it fails */
237 clib_mem_alloc_or_null (uword size)
239 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
240 /* align_offset */ 0,
241 /* os_out_of_memory */ 0);
245 clib_mem_alloc_aligned_or_null (uword size, uword align)
247 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
248 /* os_out_of_memory */ 0);
253 /* Memory allocator which panics when it fails.
254 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
255 #define clib_mem_alloc_aligned_no_fail(size,align) \
257 uword _clib_mem_alloc_size = (size); \
258 void * _clib_mem_alloc_p; \
259 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
260 if (! _clib_mem_alloc_p) \
261 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
265 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
267 /* Alias to stack allocator for naming consistency. */
268 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
271 clib_mem_is_heap_object (void *p)
273 void *heap = clib_mem_get_per_cpu_heap ();
274 int mspace_is_heap_object (void *msp, void *p);
276 return mspace_is_heap_object (heap, p);
280 clib_mem_free (void *p)
282 u8 *heap = clib_mem_get_per_cpu_heap ();
284 void mspace_put (void *msp, void *p_arg);
285 /* Make sure object is in the correct heap. */
286 ASSERT (clib_mem_is_heap_object (p));
288 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
290 mspace_put (heap, p);
294 clib_mem_realloc (void *p, uword new_size, uword old_size)
296 /* By default use alloc, copy and free to emulate realloc. */
297 void *q = clib_mem_alloc (new_size);
301 if (old_size < new_size)
302 copy_size = old_size;
304 copy_size = new_size;
305 clib_memcpy_fast (q, p, copy_size);
312 clib_mem_size (void *p)
314 ASSERT (clib_mem_is_heap_object (p));
315 return clib_mem_size_nocheck (p);
319 clib_mem_free_s (void *p)
321 uword size = clib_mem_size (p);
322 CLIB_MEM_UNPOISON (p, size);
323 memset_s_inline (p, size, 0, size);
328 clib_mem_get_heap (void)
330 return clib_mem_get_per_cpu_heap ();
334 clib_mem_set_heap (void *heap)
336 return clib_mem_set_per_cpu_heap (heap);
339 void clib_mem_destroy_heap (void *heap);
340 void *clib_mem_create_heap (void *base, uword size, int is_locked, char *fmt,
343 void clib_mem_main_init ();
344 void *clib_mem_init (void *heap, uword size);
345 void *clib_mem_init_with_page_size (uword memory_size,
346 clib_mem_page_sz_t log2_page_sz);
347 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
348 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
351 void clib_mem_exit (void);
353 void clib_mem_trace (int enable);
355 int clib_mem_is_traced (void);
359 /* Total number of objects allocated. */
362 /* Total allocated bytes. Bytes used and free.
363 used + free = total */
364 uword bytes_total, bytes_used, bytes_free;
366 /* Number of bytes used by mheap data structure overhead
367 (e.g. free lists, mheap header). */
368 uword bytes_overhead;
370 /* Amount of free space returned to operating system. */
371 uword bytes_free_reclaimed;
373 /* For malloc which puts small objects in sbrk region and
374 large objects in mmap'ed regions. */
375 uword bytes_used_sbrk;
376 uword bytes_used_mmap;
378 /* Max. number of bytes in this heap. */
382 void clib_mem_get_heap_usage (void *heap, clib_mem_usage_t * usage);
384 void *clib_mem_get_heap_base (void *heap);
385 uword clib_mem_get_heap_size (void *heap);
386 uword clib_mem_get_heap_free_space (void *heap);
388 u8 *format_clib_mem_usage (u8 * s, va_list * args);
389 u8 *format_clib_mem_heap (u8 * s, va_list * va);
391 /* Allocate virtual address space. */
393 clib_mem_vm_alloc (uword size)
396 uword flags = MAP_PRIVATE;
399 flags |= MAP_ANONYMOUS;
402 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
403 if (mmap_addr == (void *) -1)
406 CLIB_MEM_UNPOISON (mmap_addr, size);
412 clib_mem_vm_free (void *addr, uword size)
417 void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
418 uword size, int fd, uword offset, char *name);
420 void *clib_mem_vm_map (void *start, uword size,
421 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
422 void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
424 void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
426 int clib_mem_vm_unmap (void *base);
427 clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
432 #define CLIB_MEM_VM_F_SHARED (1 << 0)
433 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
434 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
435 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
436 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
437 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
438 u32 flags; /**< vm allocation flags:
439 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
440 descriptor will be provided on successful allocation.
441 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
442 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
443 numa node preference.
444 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
445 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
446 number of available pages is not sufficient.
447 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
449 char *name; /**< Name for memory allocation, set by caller. */
450 uword size; /**< Allocation size, set by caller. */
451 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
452 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
453 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
454 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
455 int n_pages; /* Number of pages. */
456 uword requested_va; /**< Request fixed position mapping */
457 } clib_mem_vm_alloc_t;
460 static_always_inline clib_mem_page_sz_t
461 clib_mem_get_log2_page_size (void)
463 return clib_mem_main.log2_page_sz;
466 static_always_inline uword
467 clib_mem_get_page_size (void)
469 return 1ULL << clib_mem_main.log2_page_sz;
472 static_always_inline clib_mem_page_sz_t
473 clib_mem_get_log2_default_hugepage_size ()
475 return clib_mem_main.log2_default_hugepage_sz;
478 int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
479 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
480 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
481 uword clib_mem_get_fd_page_size (int fd);
482 uword clib_mem_get_default_hugepage_size (void);
483 clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
484 uword clib_mem_vm_reserve (uword start, uword size,
485 clib_mem_page_sz_t log2_page_sz);
486 u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
488 void clib_mem_destroy (void);
489 int clib_mem_set_numa_affinity (u8 numa_node, int force);
490 int clib_mem_set_default_numa_affinity ();
494 uword size; /**< Map size */
495 int fd; /**< File descriptor to be mapped */
496 uword requested_va; /**< Request fixed position mapping */
497 void *addr; /**< Pointer to mapped memory, if successful */
501 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
502 void clib_mem_vm_randomize_va (uword * requested_va,
503 clib_mem_page_sz_t log2_page_size);
504 void mheap_trace (void *v, int enable);
505 uword clib_mem_trace_enable_disable (uword enable);
506 void clib_mem_trace (int enable);
509 clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
511 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
513 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
514 log2_page_size = clib_mem_get_log2_page_size ();
515 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
516 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
518 return round_pow2 (size, 1ULL << log2_page_size);
525 uword per_numa[CLIB_MAX_NUMAS];
527 } clib_mem_page_stats_t;
529 void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
530 uword n_pages, clib_mem_page_stats_t * stats);
532 static_always_inline int
533 vlib_mem_get_next_numa_node (int numa)
535 clib_mem_main_t *mm = &clib_mem_main;
536 u32 bitmap = mm->numa_node_bitmap;
539 bitmap &= ~pow2_mask (numa + 1);
543 return count_trailing_zeros (bitmap);
546 static_always_inline clib_mem_page_sz_t
547 clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
549 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
550 return clib_mem_get_log2_page_size ();
551 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
552 return clib_mem_get_log2_default_hugepage_size ();
553 return log2_page_size;
556 static_always_inline uword
557 clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
559 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
562 static_always_inline clib_error_t *
563 clib_mem_get_last_error (void)
565 return clib_mem_main.error;
569 #include <vppinfra/error.h> /* clib_panic */
571 #endif /* _included_clib_mem_h */
574 * fd.io coding-style-patch-verification: ON
577 * eval: (c-set-style "gnu")