2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/valloc.h>
19 @brief Simple first-fit virtual space allocator
22 /** Add a chunk of memory to a virtual allocation arena
23 @param vam - clib_valloc_main_t * pointer to the allocation arena
24 @param template - clib_valloc_chunk_t * pointer to a template chunk which
25 describes the virtual address range to add
27 @note only the baseva and size member of the template chunk are significant
28 It's perfectly OK for the new chunk to be discontinuous with previous
29 chunks, the chunk fusion algorithm won't merge them.
33 clib_valloc_add_chunk (clib_valloc_main_t *vam, clib_valloc_chunk_t *template)
35 clib_valloc_chunk_t *ch, *new_ch;
38 ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED);
40 clib_spinlock_lock_if_init (&vam->lock);
42 /* Add at the beginning, or at the end... */
43 index = vam->first_index;
46 * Make sure we're not trying to add an overlapping chunk..
47 * It's worth checking, because someone will eventually do that.
49 if (CLIB_DEBUG > 0 && index != ~0)
53 ch = pool_elt_at_index (vam->chunks, index);
54 ASSERT (template->baseva < ch->baseva || template->baseva >=
55 (ch->baseva + ch->size));
56 ASSERT (template->baseva + template->size < ch->baseva ||
57 template->baseva + template->size >=
58 (ch->baseva + ch->size));
61 index = vam->first_index;
65 ch = pool_elt_at_index (vam->chunks, index);
67 if (index == ~0 || template->baseva < ch->baseva)
69 pool_get (vam->chunks, new_ch);
70 clib_memset (new_ch, 0, sizeof (*new_ch));
74 ch = pool_elt_at_index (vam->chunks, index);
78 ch->prev = new_ch - vam->chunks;
82 new_ch->next = new_ch->prev = ~0;
85 new_ch->baseva = template->baseva;
86 new_ch->size = template->size;
88 vam->first_index = new_ch - vam->chunks;
90 hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
94 /* Walk to the end of the chunk chain */
97 ch = pool_elt_at_index (vam->chunks, index);
100 /* we want the last chunk index */
101 index = ch - vam->chunks;
103 pool_get (vam->chunks, new_ch);
104 clib_memset (new_ch, 0, sizeof (*new_ch));
106 ch = pool_elt_at_index (vam->chunks, index);
109 new_ch->prev = index;
110 ch->next = new_ch - vam->chunks;
112 new_ch->baseva = template->baseva;
113 new_ch->size = template->size;
115 hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
116 new_ch - vam->chunks);
119 clib_spinlock_unlock_if_init (&vam->lock);
122 /** Initialize a virtual memory allocation arena
123 @param vam - clib_valloc_main_t * pointer to the arena to initialize
124 @param template - clib_valloc_chunk_t * pointer to a template chunk which
125 describes the initial virtual address range
128 clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template,
131 ASSERT (template && template->baseva && template->size);
132 clib_memset (vam, 0, sizeof (*vam));
134 clib_spinlock_init (&vam->lock);
136 vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
137 vam->first_index = ~0;
138 vam->flags |= CLIB_VALLOC_INITIALIZED;
140 clib_valloc_add_chunk (vam, template);
143 /** Allocate virtual space
144 @param vam - clib_valloc_main_t * pointer to the allocation arena
145 @param size - u64 number of bytes to allocate
146 @os_out_of_memory_on_failure - 1=> panic on allocation failure
147 @return uword allocated space, 0=> failure
150 clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
151 int os_out_of_memory_on_failure)
153 clib_valloc_chunk_t *ch, *new_ch, *next_ch;
156 clib_spinlock_lock_if_init (&vam->lock);
158 index = vam->first_index;
162 ch = pool_elt_at_index (vam->chunks, index);
163 /* If the chunk is free... */
164 if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
170 if (ch->size == size)
172 ch->flags |= CLIB_VALLOC_BUSY;
173 clib_spinlock_unlock_if_init (&vam->lock);
177 * The current free chunk is larger than necessary, split the block.
179 pool_get (vam->chunks, new_ch);
180 /* ch might have just moved */
181 ch = pool_elt_at_index (vam->chunks, index);
182 clib_memset (new_ch, 0, sizeof (*new_ch));
183 new_ch->next = new_ch->prev = ~0;
184 new_ch->baseva = ch->baseva + size;
185 new_ch->size = ch->size - size;
188 /* Insert into doubly-linked list */
189 new_ch->next = ch->next;
190 new_ch->prev = ch - vam->chunks;
194 next_ch = pool_elt_at_index (vam->chunks, ch->next);
195 next_ch->prev = new_ch - vam->chunks;
197 ch->next = new_ch - vam->chunks;
199 hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
200 new_ch - vam->chunks);
202 ch->flags |= CLIB_VALLOC_BUSY;
203 clib_spinlock_unlock_if_init (&vam->lock);
210 clib_spinlock_unlock_if_init (&vam->lock);
212 if (os_out_of_memory_on_failure)
219 /** Free virtual space
220 @param vam - clib_valloc_main_t * pointer to the allocation arena
221 @param baseva - uword base virtual address of the returned space
222 @return uword - size of the freed virtual chunk
223 @note the size is returned since we know it / in case the caller
224 doesn't memorize chunk sizes
227 clib_valloc_free (clib_valloc_main_t * vam, uword baseva)
229 clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch;
231 uword return_size = 0;
234 clib_spinlock_lock_if_init (&vam->lock);
236 p = hash_get (vam->chunk_index_by_baseva, baseva);
238 /* Check even in production images */
244 ch = pool_elt_at_index (vam->chunks, index);
246 return_size = ch->size;
248 ASSERT (ch->baseva == baseva);
249 ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0);
251 ch->flags &= ~CLIB_VALLOC_BUSY;
253 /* combine with previous entry? */
256 prev_ch = pool_elt_at_index (vam->chunks, ch->prev);
258 * Previous item must be free as well, and
259 * tangent to this block.
261 if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0
262 && ((prev_ch->baseva + prev_ch->size) == ch->baseva))
264 hash_unset (vam->chunk_index_by_baseva, baseva);
265 prev_ch->size += ch->size;
266 prev_ch->next = ch->next;
269 next_ch = pool_elt_at_index (vam->chunks, ch->next);
270 next_ch->prev = ch->prev;
272 ASSERT (ch - vam->chunks != vam->first_index);
273 clib_memset (ch, 0xfe, sizeof (*ch));
274 pool_put (vam->chunks, ch);
275 /* See about combining with next elt */
280 /* Combine with next entry? */
283 next_ch = pool_elt_at_index (vam->chunks, ch->next);
285 if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0
286 && ((ch->baseva + ch->size) == next_ch->baseva))
288 hash_unset (vam->chunk_index_by_baseva, next_ch->baseva);
289 ch->size += next_ch->size;
290 ch->next = next_ch->next;
293 n2_ch = pool_elt_at_index (vam->chunks, next_ch->next);
294 n2_ch->prev = ch - vam->chunks;
296 ASSERT (next_ch - vam->chunks != vam->first_index);
297 clib_memset (next_ch, 0xfe, sizeof (*ch));
298 pool_put (vam->chunks, next_ch);
302 clib_spinlock_unlock_if_init (&vam->lock);
306 /** format a virtual allocation arena (varargs)
307 @param vam - clib_valloc_main_t pointer to the arena to format
308 @param verbose - int - verbosity level
312 format_valloc (u8 *s, va_list *va)
314 clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *);
315 int verbose = va_arg (*va, int);
316 clib_valloc_chunk_t *ch;
320 clib_spinlock_lock_if_init (&vam->lock);
322 s = format (s, "%d chunks, first index %d\n",
323 pool_elts (vam->chunks), vam->first_index);
327 index = vam->first_index;
330 ch = pool_elt_at_index (vam->chunks, index);
332 s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n",
333 index, ch->baseva, ch->size, ch->size, ch->prev,
334 (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free");
336 p = hash_get (vam->chunk_index_by_baseva, ch->baseva);
339 s = format (s, " BUG: baseva not in hash table!\n");
341 else if (p[0] != index)
343 s = format (s, " BUG: baseva in hash table %d not %d!\n",
350 clib_spinlock_unlock_if_init (&vam->lock);
356 * fd.io coding-style-patch-verification: ON
359 * eval: (c-set-style "gnu")