2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
49 #include <vppinfra/mheap_bootstrap.h>
51 #include <vppinfra/dlmalloc.h>
54 #include <vppinfra/os.h>
55 #include <vppinfra/string.h> /* memcpy, clib_memset */
56 #include <vppinfra/sanitizer.h>
58 #define CLIB_MAX_MHEAPS 256
59 #define CLIB_MAX_NUMAS 8
61 /* Unspecified NUMA socket */
62 #define VEC_NUMA_UNSPECIFIED (0xFF)
65 extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
66 extern void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
69 clib_mem_get_per_cpu_heap (void)
71 int cpu = os_get_thread_index ();
72 return clib_per_cpu_mheaps[cpu];
76 clib_mem_set_per_cpu_heap (u8 * new_heap)
78 int cpu = os_get_thread_index ();
79 void *old = clib_per_cpu_mheaps[cpu];
80 clib_per_cpu_mheaps[cpu] = new_heap;
85 clib_mem_get_per_numa_heap (u32 numa_id)
87 ASSERT (numa_id >= 0 && numa_id < ARRAY_LEN (clib_per_numa_mheaps));
88 return clib_per_numa_mheaps[numa_id];
92 clib_mem_set_per_numa_heap (u8 * new_heap)
94 int numa = os_get_numa_index ();
95 void *old = clib_per_numa_mheaps[numa];
96 clib_per_numa_mheaps[numa] = new_heap;
101 clib_mem_set_thread_index (void)
104 * Find an unused slot in the per-cpu-mheaps array,
105 * and grab it for this thread. We need to be able to
106 * push/pop the thread heap without affecting other thread(s).
109 if (__os_thread_index != 0)
111 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
112 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
113 0, clib_per_cpu_mheaps[0]))
115 os_set_thread_index (i);
118 ASSERT (__os_thread_index > 0);
122 clib_mem_size_nocheck (void *p)
124 #if USE_DLMALLOC == 0
125 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
126 return mheap_elt_data_bytes (e);
128 return mspace_usable_size_with_delta (p);
132 /* Memory allocator which may call os_out_of_memory() if it fails */
134 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
135 int os_out_of_memory_on_failure)
140 if (align_offset > align)
143 align_offset %= align;
145 align_offset = align;
148 cpu = os_get_thread_index ();
149 heap = clib_per_cpu_mheaps[cpu];
151 #if USE_DLMALLOC == 0
153 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
154 clib_per_cpu_mheaps[cpu] = heap;
155 if (PREDICT_TRUE (offset != ~0))
158 p = mspace_get_aligned (heap, size, align, align_offset);
159 #endif /* USE_DLMALLOC */
161 if (PREDICT_FALSE (0 == p))
163 if (os_out_of_memory_on_failure)
168 CLIB_MEM_UNPOISON (p, size);
172 /* Memory allocator which calls os_out_of_memory() when it fails */
174 clib_mem_alloc (uword size)
176 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
177 /* align_offset */ 0,
178 /* os_out_of_memory */ 1);
182 clib_mem_alloc_aligned (uword size, uword align)
184 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
185 /* os_out_of_memory */ 1);
188 /* Memory allocator which calls os_out_of_memory() when it fails */
190 clib_mem_alloc_or_null (uword size)
192 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
193 /* align_offset */ 0,
194 /* os_out_of_memory */ 0);
198 clib_mem_alloc_aligned_or_null (uword size, uword align)
200 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
201 /* os_out_of_memory */ 0);
206 /* Memory allocator which panics when it fails.
207 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
208 #define clib_mem_alloc_aligned_no_fail(size,align) \
210 uword _clib_mem_alloc_size = (size); \
211 void * _clib_mem_alloc_p; \
212 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
213 if (! _clib_mem_alloc_p) \
214 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
218 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
220 /* Alias to stack allocator for naming consistency. */
221 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
224 clib_mem_is_heap_object (void *p)
226 #if USE_DLMALLOC == 0
227 void *heap = clib_mem_get_per_cpu_heap ();
228 uword offset = (uword) p - (uword) heap;
231 if (offset >= vec_len (heap))
234 e = mheap_elt_at_uoffset (heap, offset);
235 n = mheap_next_elt (e);
237 /* Check that heap forward and reverse pointers agree. */
238 return e->n_user_data == n->prev_n_user_data;
240 void *heap = clib_mem_get_per_cpu_heap ();
242 return mspace_is_heap_object (heap, p);
243 #endif /* USE_DLMALLOC */
247 clib_mem_free (void *p)
249 u8 *heap = clib_mem_get_per_cpu_heap ();
251 /* Make sure object is in the correct heap. */
252 ASSERT (clib_mem_is_heap_object (p));
254 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
256 #if USE_DLMALLOC == 0
257 mheap_put (heap, (u8 *) p - heap);
259 mspace_put (heap, p);
264 clib_mem_realloc (void *p, uword new_size, uword old_size)
266 /* By default use alloc, copy and free to emulate realloc. */
267 void *q = clib_mem_alloc (new_size);
271 if (old_size < new_size)
272 copy_size = old_size;
274 copy_size = new_size;
275 clib_memcpy_fast (q, p, copy_size);
282 clib_mem_size (void *p)
284 ASSERT (clib_mem_is_heap_object (p));
285 return clib_mem_size_nocheck (p);
289 clib_mem_free_s (void *p)
291 uword size = clib_mem_size (p);
292 CLIB_MEM_UNPOISON (p, size);
293 memset_s_inline (p, size, 0, size);
298 clib_mem_get_heap (void)
300 return clib_mem_get_per_cpu_heap ();
304 clib_mem_set_heap (void *heap)
306 return clib_mem_set_per_cpu_heap (heap);
309 void *clib_mem_init (void *heap, uword size);
310 void *clib_mem_init_thread_safe (void *memory, uword memory_size);
311 void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size);
313 void clib_mem_exit (void);
315 uword clib_mem_get_page_size (void);
317 void clib_mem_validate (void);
319 void clib_mem_trace (int enable);
321 int clib_mem_is_traced (void);
325 /* Total number of objects allocated. */
328 /* Total allocated bytes. Bytes used and free.
329 used + free = total */
330 uword bytes_total, bytes_used, bytes_free;
332 /* Number of bytes used by mheap data structure overhead
333 (e.g. free lists, mheap header). */
334 uword bytes_overhead;
336 /* Amount of free space returned to operating system. */
337 uword bytes_free_reclaimed;
339 /* For malloc which puts small objects in sbrk region and
340 large objects in mmap'ed regions. */
341 uword bytes_used_sbrk;
342 uword bytes_used_mmap;
344 /* Max. number of bytes in this heap. */
348 void clib_mem_usage (clib_mem_usage_t * usage);
350 u8 *format_clib_mem_usage (u8 * s, va_list * args);
352 /* Allocate virtual address space. */
354 clib_mem_vm_alloc (uword size)
357 uword flags = MAP_PRIVATE;
360 flags |= MAP_ANONYMOUS;
363 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
364 if (mmap_addr == (void *) -1)
371 clib_mem_vm_free (void *addr, uword size)
377 clib_mem_vm_unmap (void *addr, uword size)
380 uword flags = MAP_PRIVATE | MAP_FIXED;
382 /* To unmap we "map" with no protection. If we actually called
383 munmap then other callers could steal the address space. By
384 changing to PROT_NONE the kernel can free up the pages which is
385 really what we want "unmap" to mean. */
386 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
387 if (mmap_addr == (void *) -1)
394 clib_mem_vm_map (void *addr, uword size)
397 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
399 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
400 if (mmap_addr == (void *) -1)
408 #define CLIB_MEM_VM_F_SHARED (1 << 0)
409 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
410 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
411 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
412 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
413 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
414 u32 flags; /**< vm allocation flags:
415 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
416 descriptor will be provided on successful allocation.
417 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
418 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
419 numa node preference.
420 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
421 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
422 number of available pages is not sufficient.
423 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
425 char *name; /**< Name for memory allocation, set by caller. */
426 uword size; /**< Allocation size, set by caller. */
427 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
428 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
429 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
430 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
431 int n_pages; /* Number of pages. */
432 uword requested_va; /**< Request fixed position mapping */
433 } clib_mem_vm_alloc_t;
435 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
436 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
437 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
438 void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
439 u64 clib_mem_get_fd_page_size (int fd);
440 uword clib_mem_get_default_hugepage_size (void);
441 int clib_mem_get_fd_log2_page_size (int fd);
442 u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
446 uword size; /**< Map size */
447 int fd; /**< File descriptor to be mapped */
448 uword requested_va; /**< Request fixed position mapping */
449 void *addr; /**< Pointer to mapped memory, if successful */
452 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
453 void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
454 void mheap_trace (void *v, int enable);
455 uword clib_mem_trace_enable_disable (uword enable);
456 void clib_mem_trace (int enable);
458 #include <vppinfra/error.h> /* clib_panic */
460 #endif /* _included_clib_mem_h */
463 * fd.io coding-style-patch-verification: ON
466 * eval: (c-set-style "gnu")