2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
49 #include <vppinfra/mheap_bootstrap.h>
51 #include <vppinfra/dlmalloc.h>
54 #include <vppinfra/os.h>
55 #include <vppinfra/string.h> /* memcpy, clib_memset */
56 #include <vppinfra/valgrind.h>
58 #define CLIB_MAX_MHEAPS 256
61 extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
64 clib_mem_set_thread_index (void)
67 * Find an unused slot in the per-cpu-mheaps array,
68 * and grab it for this thread. We need to be able to
69 * push/pop the thread heap without affecting other thread(s).
72 if (__os_thread_index != 0)
74 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
75 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
76 0, clib_per_cpu_mheaps[0]))
78 os_set_thread_index (i);
81 ASSERT (__os_thread_index > 0);
85 clib_mem_get_per_cpu_heap (void)
87 int cpu = os_get_thread_index ();
88 return clib_per_cpu_mheaps[cpu];
92 clib_mem_set_per_cpu_heap (u8 * new_heap)
94 int cpu = os_get_thread_index ();
95 void *old = clib_per_cpu_mheaps[cpu];
96 clib_per_cpu_mheaps[cpu] = new_heap;
100 /* Memory allocator which may call os_out_of_memory() if it fails */
102 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
103 int os_out_of_memory_on_failure)
108 if (align_offset > align)
111 align_offset %= align;
113 align_offset = align;
116 cpu = os_get_thread_index ();
117 heap = clib_per_cpu_mheaps[cpu];
119 #if USE_DLMALLOC == 0
121 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
122 clib_per_cpu_mheaps[cpu] = heap;
128 VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
134 if (os_out_of_memory_on_failure)
139 p = mspace_get_aligned (heap, size, align, align_offset);
140 if (PREDICT_FALSE (p == 0))
142 if (os_out_of_memory_on_failure)
148 #endif /* USE_DLMALLOC */
151 /* Memory allocator which calls os_out_of_memory() when it fails */
153 clib_mem_alloc (uword size)
155 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
156 /* align_offset */ 0,
157 /* os_out_of_memory */ 1);
161 clib_mem_alloc_aligned (uword size, uword align)
163 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
164 /* os_out_of_memory */ 1);
167 /* Memory allocator which calls os_out_of_memory() when it fails */
169 clib_mem_alloc_or_null (uword size)
171 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
172 /* align_offset */ 0,
173 /* os_out_of_memory */ 0);
177 clib_mem_alloc_aligned_or_null (uword size, uword align)
179 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
180 /* os_out_of_memory */ 0);
185 /* Memory allocator which panics when it fails.
186 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
187 #define clib_mem_alloc_aligned_no_fail(size,align) \
189 uword _clib_mem_alloc_size = (size); \
190 void * _clib_mem_alloc_p; \
191 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
192 if (! _clib_mem_alloc_p) \
193 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
197 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
199 /* Alias to stack allocator for naming consistency. */
200 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
203 clib_mem_is_heap_object (void *p)
205 #if USE_DLMALLOC == 0
206 void *heap = clib_mem_get_per_cpu_heap ();
207 uword offset = (uword) p - (uword) heap;
210 if (offset >= vec_len (heap))
213 e = mheap_elt_at_uoffset (heap, offset);
214 n = mheap_next_elt (e);
216 /* Check that heap forward and reverse pointers agree. */
217 return e->n_user_data == n->prev_n_user_data;
219 void *heap = clib_mem_get_per_cpu_heap ();
221 return mspace_is_heap_object (heap, p);
222 #endif /* USE_DLMALLOC */
226 clib_mem_free (void *p)
228 u8 *heap = clib_mem_get_per_cpu_heap ();
230 /* Make sure object is in the correct heap. */
231 ASSERT (clib_mem_is_heap_object (p));
233 #if USE_DLMALLOC == 0
234 mheap_put (heap, (u8 *) p - heap);
236 mspace_put (heap, p);
240 VALGRIND_FREELIKE_BLOCK (p, 0);
245 clib_mem_realloc (void *p, uword new_size, uword old_size)
247 /* By default use alloc, copy and free to emulate realloc. */
248 void *q = clib_mem_alloc (new_size);
252 if (old_size < new_size)
253 copy_size = old_size;
255 copy_size = new_size;
256 clib_memcpy_fast (q, p, copy_size);
263 clib_mem_size (void *p)
265 #if USE_DLMALLOC == 0
266 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
267 ASSERT (clib_mem_is_heap_object (p));
268 return mheap_elt_data_bytes (e);
270 ASSERT (clib_mem_is_heap_object (p));
271 return mspace_usable_size_with_delta (p);
276 clib_mem_free_s (void *p)
278 uword size = clib_mem_size (p);
279 memset_s_inline (p, size, 0, size);
284 clib_mem_get_heap (void)
286 return clib_mem_get_per_cpu_heap ();
290 clib_mem_set_heap (void *heap)
292 return clib_mem_set_per_cpu_heap (heap);
295 void *clib_mem_init (void *heap, uword size);
296 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
298 void clib_mem_exit (void);
300 uword clib_mem_get_page_size (void);
302 void clib_mem_validate (void);
304 void clib_mem_trace (int enable);
306 int clib_mem_is_traced (void);
310 /* Total number of objects allocated. */
313 /* Total allocated bytes. Bytes used and free.
314 used + free = total */
315 uword bytes_total, bytes_used, bytes_free;
317 /* Number of bytes used by mheap data structure overhead
318 (e.g. free lists, mheap header). */
319 uword bytes_overhead;
321 /* Amount of free space returned to operating system. */
322 uword bytes_free_reclaimed;
324 /* For malloc which puts small objects in sbrk region and
325 large objects in mmap'ed regions. */
326 uword bytes_used_sbrk;
327 uword bytes_used_mmap;
329 /* Max. number of bytes in this heap. */
333 void clib_mem_usage (clib_mem_usage_t * usage);
335 u8 *format_clib_mem_usage (u8 * s, va_list * args);
337 /* Allocate virtual address space. */
339 clib_mem_vm_alloc (uword size)
342 uword flags = MAP_PRIVATE;
345 flags |= MAP_ANONYMOUS;
348 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
349 if (mmap_addr == (void *) -1)
356 clib_mem_vm_free (void *addr, uword size)
362 clib_mem_vm_unmap (void *addr, uword size)
365 uword flags = MAP_PRIVATE | MAP_FIXED;
367 /* To unmap we "map" with no protection. If we actually called
368 munmap then other callers could steal the address space. By
369 changing to PROT_NONE the kernel can free up the pages which is
370 really what we want "unmap" to mean. */
371 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
372 if (mmap_addr == (void *) -1)
379 clib_mem_vm_map (void *addr, uword size)
382 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
384 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
385 if (mmap_addr == (void *) -1)
393 #define CLIB_MEM_VM_F_SHARED (1 << 0)
394 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
395 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
396 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
397 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
398 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
399 u32 flags; /**< vm allocation flags:
400 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
401 descriptor will be provided on successful allocation.
402 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
403 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
404 numa node preference.
405 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
406 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
407 number of available pages is not sufficient.
408 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
410 char *name; /**< Name for memory allocation, set by caller. */
411 uword size; /**< Allocation size, set by caller. */
412 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
413 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
414 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
415 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
416 int n_pages; /* Number of pages. */
417 uword requested_va; /**< Request fixed position mapping */
418 } clib_mem_vm_alloc_t;
420 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
421 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
422 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
423 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
424 u64 clib_mem_get_fd_page_size (int fd);
425 uword clib_mem_get_default_hugepage_size (void);
426 int clib_mem_get_fd_log2_page_size (int fd);
427 u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
431 uword size; /**< Map size */
432 int fd; /**< File descriptor to be mapped */
433 uword requested_va; /**< Request fixed position mapping */
434 void *addr; /**< Pointer to mapped memory, if successful */
437 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
438 void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
439 void mheap_trace (void *v, int enable);
440 uword clib_mem_trace_enable_disable (uword enable);
441 void clib_mem_trace (int enable);
443 #include <vppinfra/error.h> /* clib_panic */
445 #endif /* _included_clib_mem_h */
448 * fd.io coding-style-patch-verification: ON
451 * eval: (c-set-style "gnu")