2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
48 #include <vppinfra/dlmalloc.h>
50 #include <vppinfra/os.h>
51 #include <vppinfra/string.h> /* memcpy, clib_memset */
52 #include <vppinfra/sanitizer.h>
54 #define CLIB_MAX_MHEAPS 256
55 #define CLIB_MAX_NUMAS 8
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
77 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
80 void *per_numa_mheaps[CLIB_MAX_NUMAS];
83 extern clib_mem_main_t clib_mem_main;
85 /* Unspecified NUMA socket */
86 #define VEC_NUMA_UNSPECIFIED (0xFF)
89 clib_mem_get_per_cpu_heap (void)
91 int cpu = os_get_thread_index ();
92 return clib_mem_main.per_cpu_mheaps[cpu];
96 clib_mem_set_per_cpu_heap (u8 * new_heap)
98 int cpu = os_get_thread_index ();
99 void *old = clib_mem_main.per_cpu_mheaps[cpu];
100 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
105 clib_mem_get_per_numa_heap (u32 numa_id)
107 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
108 return clib_mem_main.per_numa_mheaps[numa_id];
112 clib_mem_set_per_numa_heap (u8 * new_heap)
114 int numa = os_get_numa_index ();
115 void *old = clib_mem_main.per_numa_mheaps[numa];
116 clib_mem_main.per_numa_mheaps[numa] = new_heap;
121 clib_mem_set_thread_index (void)
124 * Find an unused slot in the per-cpu-mheaps array,
125 * and grab it for this thread. We need to be able to
126 * push/pop the thread heap without affecting other thread(s).
129 if (__os_thread_index != 0)
131 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
132 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
133 0, clib_mem_main.per_cpu_mheaps[0]))
135 os_set_thread_index (i);
138 ASSERT (__os_thread_index > 0);
142 clib_mem_size_nocheck (void *p)
144 return mspace_usable_size_with_delta (p);
147 /* Memory allocator which may call os_out_of_memory() if it fails */
149 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
150 int os_out_of_memory_on_failure)
155 if (align_offset > align)
158 align_offset %= align;
160 align_offset = align;
163 cpu = os_get_thread_index ();
164 heap = clib_mem_main.per_cpu_mheaps[cpu];
166 p = mspace_get_aligned (heap, size, align, align_offset);
168 if (PREDICT_FALSE (0 == p))
170 if (os_out_of_memory_on_failure)
175 CLIB_MEM_UNPOISON (p, size);
179 /* Memory allocator which calls os_out_of_memory() when it fails */
181 clib_mem_alloc (uword size)
183 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
184 /* align_offset */ 0,
185 /* os_out_of_memory */ 1);
189 clib_mem_alloc_aligned (uword size, uword align)
191 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
192 /* os_out_of_memory */ 1);
195 /* Memory allocator which calls os_out_of_memory() when it fails */
197 clib_mem_alloc_or_null (uword size)
199 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
200 /* align_offset */ 0,
201 /* os_out_of_memory */ 0);
205 clib_mem_alloc_aligned_or_null (uword size, uword align)
207 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
208 /* os_out_of_memory */ 0);
213 /* Memory allocator which panics when it fails.
214 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
215 #define clib_mem_alloc_aligned_no_fail(size,align) \
217 uword _clib_mem_alloc_size = (size); \
218 void * _clib_mem_alloc_p; \
219 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
220 if (! _clib_mem_alloc_p) \
221 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
225 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
227 /* Alias to stack allocator for naming consistency. */
228 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
231 clib_mem_is_heap_object (void *p)
233 void *heap = clib_mem_get_per_cpu_heap ();
235 return mspace_is_heap_object (heap, p);
239 clib_mem_free (void *p)
241 u8 *heap = clib_mem_get_per_cpu_heap ();
243 /* Make sure object is in the correct heap. */
244 ASSERT (clib_mem_is_heap_object (p));
246 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
248 mspace_put (heap, p);
252 clib_mem_realloc (void *p, uword new_size, uword old_size)
254 /* By default use alloc, copy and free to emulate realloc. */
255 void *q = clib_mem_alloc (new_size);
259 if (old_size < new_size)
260 copy_size = old_size;
262 copy_size = new_size;
263 clib_memcpy_fast (q, p, copy_size);
270 clib_mem_size (void *p)
272 ASSERT (clib_mem_is_heap_object (p));
273 return clib_mem_size_nocheck (p);
277 clib_mem_free_s (void *p)
279 uword size = clib_mem_size (p);
280 CLIB_MEM_UNPOISON (p, size);
281 memset_s_inline (p, size, 0, size);
286 clib_mem_get_heap (void)
288 return clib_mem_get_per_cpu_heap ();
292 clib_mem_set_heap (void *heap)
294 return clib_mem_set_per_cpu_heap (heap);
297 void *clib_mem_init (void *heap, uword size);
298 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
299 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
302 void clib_mem_exit (void);
304 uword clib_mem_get_page_size (void);
306 void clib_mem_validate (void);
308 void clib_mem_trace (int enable);
310 int clib_mem_is_traced (void);
314 /* Total number of objects allocated. */
317 /* Total allocated bytes. Bytes used and free.
318 used + free = total */
319 uword bytes_total, bytes_used, bytes_free;
321 /* Number of bytes used by mheap data structure overhead
322 (e.g. free lists, mheap header). */
323 uword bytes_overhead;
325 /* Amount of free space returned to operating system. */
326 uword bytes_free_reclaimed;
328 /* For malloc which puts small objects in sbrk region and
329 large objects in mmap'ed regions. */
330 uword bytes_used_sbrk;
331 uword bytes_used_mmap;
333 /* Max. number of bytes in this heap. */
337 void clib_mem_usage (clib_mem_usage_t * usage);
339 u8 *format_clib_mem_usage (u8 * s, va_list * args);
341 /* Allocate virtual address space. */
343 clib_mem_vm_alloc (uword size)
346 uword flags = MAP_PRIVATE;
349 flags |= MAP_ANONYMOUS;
352 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
353 if (mmap_addr == (void *) -1)
356 CLIB_MEM_UNPOISON (mmap_addr, size);
362 clib_mem_vm_free (void *addr, uword size)
368 clib_mem_vm_unmap (void *addr, uword size)
371 uword flags = MAP_PRIVATE | MAP_FIXED;
373 /* To unmap we "map" with no protection. If we actually called
374 munmap then other callers could steal the address space. By
375 changing to PROT_NONE the kernel can free up the pages which is
376 really what we want "unmap" to mean. */
377 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
378 if (mmap_addr == (void *) -1)
381 CLIB_MEM_UNPOISON (mmap_addr, size);
387 clib_mem_vm_map (void *addr, uword size)
390 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
392 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
393 if (mmap_addr == (void *) -1)
396 CLIB_MEM_UNPOISON (mmap_addr, size);
403 #define CLIB_MEM_VM_F_SHARED (1 << 0)
404 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
405 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
406 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
407 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
408 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
409 u32 flags; /**< vm allocation flags:
410 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
411 descriptor will be provided on successful allocation.
412 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
413 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
414 numa node preference.
415 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
416 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
417 number of available pages is not sufficient.
418 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
420 char *name; /**< Name for memory allocation, set by caller. */
421 uword size; /**< Allocation size, set by caller. */
422 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
423 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
424 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
425 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
426 int n_pages; /* Number of pages. */
427 uword requested_va; /**< Request fixed position mapping */
428 } clib_mem_vm_alloc_t;
430 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
431 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
432 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
433 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
434 u64 clib_mem_get_fd_page_size (int fd);
435 uword clib_mem_get_default_hugepage_size (void);
436 int clib_mem_get_fd_log2_page_size (int fd);
437 uword clib_mem_vm_reserve (uword start, uword size,
438 clib_mem_page_sz_t log2_page_sz);
439 u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
440 void clib_mem_destroy_mspace (void *mspace);
441 void clib_mem_destroy (void);
445 uword size; /**< Map size */
446 int fd; /**< File descriptor to be mapped */
447 uword requested_va; /**< Request fixed position mapping */
448 void *addr; /**< Pointer to mapped memory, if successful */
452 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
453 void clib_mem_vm_randomize_va (uword * requested_va,
454 clib_mem_page_sz_t log2_page_size);
455 void mheap_trace (void *v, int enable);
456 uword clib_mem_trace_enable_disable (uword enable);
457 void clib_mem_trace (int enable);
459 #include <vppinfra/error.h> /* clib_panic */
461 #endif /* _included_clib_mem_h */
464 * fd.io coding-style-patch-verification: ON
467 * eval: (c-set-style "gnu")