2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
47 #include <vppinfra/mheap_bootstrap.h>
48 #include <vppinfra/os.h>
49 #include <vppinfra/string.h> /* memcpy, memset */
50 #include <vppinfra/valgrind.h>
52 #define CLIB_MAX_MHEAPS 256
55 extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
58 clib_mem_get_per_cpu_heap (void)
60 int cpu = os_get_thread_index ();
61 return clib_per_cpu_mheaps[cpu];
65 clib_mem_set_per_cpu_heap (u8 * new_heap)
67 int cpu = os_get_thread_index ();
68 void *old = clib_per_cpu_mheaps[cpu];
69 clib_per_cpu_mheaps[cpu] = new_heap;
73 /* Memory allocator which may call os_out_of_memory() if it fails */
75 clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
76 int os_out_of_memory_on_failure)
81 if (align_offset > align)
84 align_offset %= align;
89 cpu = os_get_thread_index ();
90 heap = clib_per_cpu_mheaps[cpu];
91 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
92 clib_per_cpu_mheaps[cpu] = heap;
98 VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
104 if (os_out_of_memory_on_failure)
110 /* Memory allocator which calls os_out_of_memory() when it fails */
112 clib_mem_alloc (uword size)
114 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
115 /* align_offset */ 0,
116 /* os_out_of_memory */ 1);
120 clib_mem_alloc_aligned (uword size, uword align)
122 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
123 /* os_out_of_memory */ 1);
126 /* Memory allocator which calls os_out_of_memory() when it fails */
128 clib_mem_alloc_or_null (uword size)
130 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
131 /* align_offset */ 0,
132 /* os_out_of_memory */ 0);
136 clib_mem_alloc_aligned_or_null (uword size, uword align)
138 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
139 /* os_out_of_memory */ 0);
144 /* Memory allocator which panics when it fails.
145 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
146 #define clib_mem_alloc_aligned_no_fail(size,align) \
148 uword _clib_mem_alloc_size = (size); \
149 void * _clib_mem_alloc_p; \
150 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
151 if (! _clib_mem_alloc_p) \
152 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
156 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
158 /* Alias to stack allocator for naming consistency. */
159 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
162 clib_mem_is_heap_object (void *p)
164 void *heap = clib_mem_get_per_cpu_heap ();
165 uword offset = (uword) p - (uword) heap;
168 if (offset >= vec_len (heap))
171 e = mheap_elt_at_uoffset (heap, offset);
172 n = mheap_next_elt (e);
174 /* Check that heap forward and reverse pointers agree. */
175 return e->n_user_data == n->prev_n_user_data;
179 clib_mem_free (void *p)
181 u8 *heap = clib_mem_get_per_cpu_heap ();
183 /* Make sure object is in the correct heap. */
184 ASSERT (clib_mem_is_heap_object (p));
186 mheap_put (heap, (u8 *) p - heap);
189 VALGRIND_FREELIKE_BLOCK (p, 0);
194 clib_mem_realloc (void *p, uword new_size, uword old_size)
196 /* By default use alloc, copy and free to emulate realloc. */
197 void *q = clib_mem_alloc (new_size);
201 if (old_size < new_size)
202 copy_size = old_size;
204 copy_size = new_size;
205 clib_memcpy (q, p, copy_size);
212 clib_mem_size (void *p)
214 ASSERT (clib_mem_is_heap_object (p));
215 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
216 return mheap_elt_data_bytes (e);
220 clib_mem_get_heap (void)
222 return clib_mem_get_per_cpu_heap ();
226 clib_mem_set_heap (void *heap)
228 return clib_mem_set_per_cpu_heap (heap);
231 void *clib_mem_init (void *heap, uword size);
233 void clib_mem_exit (void);
235 uword clib_mem_get_page_size (void);
237 void clib_mem_validate (void);
239 void clib_mem_trace (int enable);
243 /* Total number of objects allocated. */
246 /* Total allocated bytes. Bytes used and free.
247 used + free = total */
248 uword bytes_total, bytes_used, bytes_free;
250 /* Number of bytes used by mheap data structure overhead
251 (e.g. free lists, mheap header). */
252 uword bytes_overhead;
254 /* Amount of free space returned to operating system. */
255 uword bytes_free_reclaimed;
257 /* For malloc which puts small objects in sbrk region and
258 large objects in mmap'ed regions. */
259 uword bytes_used_sbrk;
260 uword bytes_used_mmap;
262 /* Max. number of bytes in this heap. */
266 void clib_mem_usage (clib_mem_usage_t * usage);
268 u8 *format_clib_mem_usage (u8 * s, va_list * args);
270 /* Allocate virtual address space. */
272 clib_mem_vm_alloc (uword size)
275 uword flags = MAP_PRIVATE;
278 flags |= MAP_ANONYMOUS;
281 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
282 if (mmap_addr == (void *) -1)
289 clib_mem_vm_free (void *addr, uword size)
295 clib_mem_vm_unmap (void *addr, uword size)
298 uword flags = MAP_PRIVATE | MAP_FIXED;
300 /* To unmap we "map" with no protection. If we actually called
301 munmap then other callers could steal the address space. By
302 changing to PROT_NONE the kernel can free up the pages which is
303 really what we want "unmap" to mean. */
304 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
305 if (mmap_addr == (void *) -1)
312 clib_mem_vm_map (void *addr, uword size)
315 uword flags = MAP_PRIVATE | MAP_FIXED;
317 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
318 if (mmap_addr == (void *) -1)
326 #define CLIB_MEM_VM_F_SHARED (1 << 0)
327 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
328 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
329 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
330 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
331 u32 flags; /**< vm allocation flags:
332 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
333 descriptor will be provided on successful allocation.
334 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
335 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
336 numa node preference.
337 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
338 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
339 number of available pages is not sufficient.
341 char *name; /**< Name for memory allocation, set by caller. */
342 uword size; /**< Allocation size, set by caller. */
343 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
344 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
345 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
346 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
347 int n_pages; /* Number of pages. */
348 uword requested_va; /**< Request fixed position mapping */
349 } clib_mem_vm_alloc_t;
351 clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
352 u64 clib_mem_vm_get_page_size (int fd);
353 int clib_mem_vm_get_log2_page_size (int fd);
354 u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
358 uword size; /**< Map size */
359 int fd; /**< File descriptor to be mapped */
360 uword requested_va; /**< Request fixed position mapping */
361 void *addr; /**< Pointer to mapped memory, if successful */
363 clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
366 #include <vppinfra/error.h> /* clib_panic */
368 #endif /* _included_clib_mem_h */
371 * fd.io coding-style-patch-verification: ON
374 * eval: (c-set-style "gnu")