2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/dlmalloc.h>
50 #include <vppinfra/os.h>
51 #include <vppinfra/string.h> /* memcpy, clib_memset */
52 #include <vppinfra/sanitizer.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 8
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
76 /* log2 system page size */
77 clib_mem_page_sz_t log2_page_sz;
79 /* log2 system default hugepage size */
80 clib_mem_page_sz_t log2_default_hugepage_sz;
82 /* bitmap of available numa nodes */
86 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
89 void *per_numa_mheaps[CLIB_MAX_NUMAS];
92 extern clib_mem_main_t clib_mem_main;
94 /* Unspecified NUMA socket */
95 #define VEC_NUMA_UNSPECIFIED (0xFF)
98 clib_mem_get_per_cpu_heap (void)
100 int cpu = os_get_thread_index ();
101 return clib_mem_main.per_cpu_mheaps[cpu];
105 clib_mem_set_per_cpu_heap (u8 * new_heap)
107 int cpu = os_get_thread_index ();
108 void *old = clib_mem_main.per_cpu_mheaps[cpu];
109 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
114 clib_mem_get_per_numa_heap (u32 numa_id)
116 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
117 return clib_mem_main.per_numa_mheaps[numa_id];
121 clib_mem_set_per_numa_heap (u8 * new_heap)
123 int numa = os_get_numa_index ();
124 void *old = clib_mem_main.per_numa_mheaps[numa];
125 clib_mem_main.per_numa_mheaps[numa] = new_heap;
130 clib_mem_set_thread_index (void)
133 * Find an unused slot in the per-cpu-mheaps array,
134 * and grab it for this thread. We need to be able to
135 * push/pop the thread heap without affecting other thread(s).
138 if (__os_thread_index != 0)
140 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
141 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
142 0, clib_mem_main.per_cpu_mheaps[0]))
144 os_set_thread_index (i);
147 ASSERT (__os_thread_index > 0);
151 clib_mem_size_nocheck (void *p)
153 return mspace_usable_size_with_delta (p);
156 /* Memory allocator which may call os_out_of_memory() if it fails */
158 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
159 int os_out_of_memory_on_failure)
164 if (align_offset > align)
167 align_offset %= align;
169 align_offset = align;
172 cpu = os_get_thread_index ();
173 heap = clib_mem_main.per_cpu_mheaps[cpu];
175 p = mspace_get_aligned (heap, size, align, align_offset);
177 if (PREDICT_FALSE (0 == p))
179 if (os_out_of_memory_on_failure)
184 CLIB_MEM_UNPOISON (p, size);
188 /* Memory allocator which calls os_out_of_memory() when it fails */
190 clib_mem_alloc (uword size)
192 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
193 /* align_offset */ 0,
194 /* os_out_of_memory */ 1);
198 clib_mem_alloc_aligned (uword size, uword align)
200 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
201 /* os_out_of_memory */ 1);
204 /* Memory allocator which calls os_out_of_memory() when it fails */
206 clib_mem_alloc_or_null (uword size)
208 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
209 /* align_offset */ 0,
210 /* os_out_of_memory */ 0);
214 clib_mem_alloc_aligned_or_null (uword size, uword align)
216 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
217 /* os_out_of_memory */ 0);
222 /* Memory allocator which panics when it fails.
223 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
224 #define clib_mem_alloc_aligned_no_fail(size,align) \
226 uword _clib_mem_alloc_size = (size); \
227 void * _clib_mem_alloc_p; \
228 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
229 if (! _clib_mem_alloc_p) \
230 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
234 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
236 /* Alias to stack allocator for naming consistency. */
237 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
240 clib_mem_is_heap_object (void *p)
242 void *heap = clib_mem_get_per_cpu_heap ();
244 return mspace_is_heap_object (heap, p);
248 clib_mem_free (void *p)
250 u8 *heap = clib_mem_get_per_cpu_heap ();
252 /* Make sure object is in the correct heap. */
253 ASSERT (clib_mem_is_heap_object (p));
255 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
257 mspace_put (heap, p);
261 clib_mem_realloc (void *p, uword new_size, uword old_size)
263 /* By default use alloc, copy and free to emulate realloc. */
264 void *q = clib_mem_alloc (new_size);
268 if (old_size < new_size)
269 copy_size = old_size;
271 copy_size = new_size;
272 clib_memcpy_fast (q, p, copy_size);
279 clib_mem_size (void *p)
281 ASSERT (clib_mem_is_heap_object (p));
282 return clib_mem_size_nocheck (p);
286 clib_mem_free_s (void *p)
288 uword size = clib_mem_size (p);
289 CLIB_MEM_UNPOISON (p, size);
290 memset_s_inline (p, size, 0, size);
295 clib_mem_get_heap (void)
297 return clib_mem_get_per_cpu_heap ();
301 clib_mem_set_heap (void *heap)
303 return clib_mem_set_per_cpu_heap (heap);
306 void clib_mem_main_init ();
307 void *clib_mem_init (void *heap, uword size);
308 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
309 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
312 void clib_mem_exit (void);
314 uword clib_mem_get_page_size (void);
316 void clib_mem_validate (void);
318 void clib_mem_trace (int enable);
320 int clib_mem_is_traced (void);
324 /* Total number of objects allocated. */
327 /* Total allocated bytes. Bytes used and free.
328 used + free = total */
329 uword bytes_total, bytes_used, bytes_free;
331 /* Number of bytes used by mheap data structure overhead
332 (e.g. free lists, mheap header). */
333 uword bytes_overhead;
335 /* Amount of free space returned to operating system. */
336 uword bytes_free_reclaimed;
338 /* For malloc which puts small objects in sbrk region and
339 large objects in mmap'ed regions. */
340 uword bytes_used_sbrk;
341 uword bytes_used_mmap;
343 /* Max. number of bytes in this heap. */
347 void clib_mem_usage (clib_mem_usage_t * usage);
349 u8 *format_clib_mem_usage (u8 * s, va_list * args);
351 /* Allocate virtual address space. */
353 clib_mem_vm_alloc (uword size)
356 uword flags = MAP_PRIVATE;
359 flags |= MAP_ANONYMOUS;
362 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
363 if (mmap_addr == (void *) -1)
366 CLIB_MEM_UNPOISON (mmap_addr, size);
372 clib_mem_vm_free (void *addr, uword size)
378 clib_mem_vm_unmap (void *addr, uword size)
381 uword flags = MAP_PRIVATE | MAP_FIXED;
383 /* To unmap we "map" with no protection. If we actually called
384 munmap then other callers could steal the address space. By
385 changing to PROT_NONE the kernel can free up the pages which is
386 really what we want "unmap" to mean. */
387 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
388 if (mmap_addr == (void *) -1)
391 CLIB_MEM_UNPOISON (mmap_addr, size);
397 clib_mem_vm_map (void *addr, uword size)
400 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
402 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
403 if (mmap_addr == (void *) -1)
406 CLIB_MEM_UNPOISON (mmap_addr, size);
413 #define CLIB_MEM_VM_F_SHARED (1 << 0)
414 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
415 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
416 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
417 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
418 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
419 u32 flags; /**< vm allocation flags:
420 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
421 descriptor will be provided on successful allocation.
422 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
423 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
424 numa node preference.
425 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
426 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
427 number of available pages is not sufficient.
428 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
430 char *name; /**< Name for memory allocation, set by caller. */
431 uword size; /**< Allocation size, set by caller. */
432 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
433 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
434 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
435 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
436 int n_pages; /* Number of pages. */
437 uword requested_va; /**< Request fixed position mapping */
438 } clib_mem_vm_alloc_t;
440 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
441 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
442 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
443 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
444 u64 clib_mem_get_fd_page_size (int fd);
445 uword clib_mem_get_default_hugepage_size (void);
446 int clib_mem_get_fd_log2_page_size (int fd);
447 uword clib_mem_vm_reserve (uword start, uword size,
448 clib_mem_page_sz_t log2_page_sz);
449 u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
450 void clib_mem_destroy_mspace (void *mspace);
451 void clib_mem_destroy (void);
455 uword size; /**< Map size */
456 int fd; /**< File descriptor to be mapped */
457 uword requested_va; /**< Request fixed position mapping */
458 void *addr; /**< Pointer to mapped memory, if successful */
462 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
463 void clib_mem_vm_randomize_va (uword * requested_va,
464 clib_mem_page_sz_t log2_page_size);
465 void mheap_trace (void *v, int enable);
466 uword clib_mem_trace_enable_disable (uword enable);
467 void clib_mem_trace (int enable);
469 #include <vppinfra/error.h> /* clib_panic */
471 #endif /* _included_clib_mem_h */
474 * fd.io coding-style-patch-verification: ON
477 * eval: (c-set-style "gnu")