2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/valloc.h>
19 @brief Simple first-fit virtual space allocator
22 /** Add a chunk of memory to a virtual allocation arena
23 @param vam - clib_valloc_main_t * pointer to the allocation arena
24 @param template - clib_valloc_chunk_t * pointer to a template chunk which
25 describes the virtual address range to add
27 @note only the baseva and size member of the template chunk are significant
28 It's perfectly OK for the new chunk to be discontinuous with previous
29 chunks, the chunk fusion algorithm won't merge them.
33 clib_valloc_add_chunk (clib_valloc_main_t * vam,
34 clib_valloc_chunk_t * template)
36 clib_valloc_chunk_t *ch, *new_ch;
39 ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED);
41 clib_spinlock_lock_if_init (&vam->lock);
43 /* Add at the beginning, or at the end... */
44 index = vam->first_index;
47 * Make sure we're not trying to add an overlapping chunk..
48 * It's worth checking, because someone will eventually do that.
50 if (CLIB_DEBUG > 0 && index != ~0)
54 ch = pool_elt_at_index (vam->chunks, index);
55 ASSERT (template->baseva < ch->baseva || template->baseva >=
56 (ch->baseva + ch->size));
57 ASSERT (template->baseva + template->size < ch->baseva ||
58 template->baseva + template->size >=
59 (ch->baseva + ch->size));
62 index = vam->first_index;
66 ch = pool_elt_at_index (vam->chunks, index);
68 if (index == ~0 || template->baseva < ch->baseva)
70 pool_get (vam->chunks, new_ch);
71 clib_memset (new_ch, 0, sizeof (*new_ch));
75 ch = pool_elt_at_index (vam->chunks, index);
79 ch->prev = new_ch - vam->chunks;
83 new_ch->next = new_ch->prev = ~0;
86 new_ch->baseva = template->baseva;
87 new_ch->size = template->size;
89 vam->first_index = new_ch - vam->chunks;
91 hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
95 /* Walk to the end of the chunk chain */
98 ch = pool_elt_at_index (vam->chunks, index);
101 /* we want the last chunk index */
102 index = ch - vam->chunks;
104 pool_get (vam->chunks, new_ch);
105 clib_memset (new_ch, 0, sizeof (*new_ch));
107 ch = pool_elt_at_index (vam->chunks, index);
110 new_ch->prev = index;
111 ch->next = new_ch - vam->chunks;
113 new_ch->baseva = template->baseva;
114 new_ch->size = template->size;
116 hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
117 new_ch - vam->chunks);
120 clib_spinlock_unlock_if_init (&vam->lock);
123 /** Initialize a virtual memory allocation arena
124 @param vam - clib_valloc_main_t * pointer to the arena to initialize
125 @param template - clib_valloc_chunk_t * pointer to a template chunk which
126 describes the initial virtual address range
129 clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template,
132 ASSERT (template && template->baseva && template->size);
133 clib_memset (vam, 0, sizeof (*vam));
135 clib_spinlock_init (&vam->lock);
137 vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
138 vam->first_index = ~0;
139 vam->flags |= CLIB_VALLOC_INITIALIZED;
141 clib_valloc_add_chunk (vam, template);
144 /** Allocate virtual space
145 @param vam - clib_valloc_main_t * pointer to the allocation arena
146 @param size - u64 number of bytes to allocate
147 @os_out_of_memory_on_failure - 1=> panic on allocation failure
148 @return uword allocated space, 0=> failure
151 clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
152 int os_out_of_memory_on_failure)
154 clib_valloc_chunk_t *ch, *new_ch, *next_ch;
157 clib_spinlock_lock_if_init (&vam->lock);
159 index = vam->first_index;
163 ch = pool_elt_at_index (vam->chunks, index);
164 /* If the chunk is free... */
165 if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
171 if (ch->size == size)
173 ch->flags |= CLIB_VALLOC_BUSY;
174 clib_spinlock_unlock_if_init (&vam->lock);
178 * The current free chunk is larger than necessary, split the block.
180 pool_get (vam->chunks, new_ch);
181 /* ch might have just moved */
182 ch = pool_elt_at_index (vam->chunks, index);
183 clib_memset (new_ch, 0, sizeof (*new_ch));
184 new_ch->next = new_ch->prev = ~0;
185 new_ch->baseva = ch->baseva + size;
186 new_ch->size = ch->size - size;
189 /* Insert into doubly-linked list */
190 new_ch->next = ch->next;
191 new_ch->prev = ch - vam->chunks;
195 next_ch = pool_elt_at_index (vam->chunks, ch->next);
196 next_ch->prev = new_ch - vam->chunks;
198 ch->next = new_ch - vam->chunks;
200 hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
201 new_ch - vam->chunks);
203 ch->flags |= CLIB_VALLOC_BUSY;
204 clib_spinlock_unlock_if_init (&vam->lock);
211 clib_spinlock_unlock_if_init (&vam->lock);
213 if (os_out_of_memory_on_failure)
220 /** Free virtual space
221 @param vam - clib_valloc_main_t * pointer to the allocation arena
222 @param baseva - uword base virtual address of the returned space
223 @return uword - size of the freed virtual chunk
224 @note the size is returned since we know it / in case the caller
225 doesn't memorize chunk sizes
228 clib_valloc_free (clib_valloc_main_t * vam, uword baseva)
230 clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch;
232 uword return_size = 0;
235 clib_spinlock_lock_if_init (&vam->lock);
237 p = hash_get (vam->chunk_index_by_baseva, baseva);
239 /* Check even in production images */
245 ch = pool_elt_at_index (vam->chunks, index);
247 return_size = ch->size;
249 ASSERT (ch->baseva == baseva);
250 ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0);
252 ch->flags &= ~CLIB_VALLOC_BUSY;
254 /* combine with previous entry? */
257 prev_ch = pool_elt_at_index (vam->chunks, ch->prev);
259 * Previous item must be free as well, and
260 * tangent to this block.
262 if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0
263 && ((prev_ch->baseva + prev_ch->size) == ch->baseva))
265 hash_unset (vam->chunk_index_by_baseva, baseva);
266 prev_ch->size += ch->size;
267 prev_ch->next = ch->next;
270 next_ch = pool_elt_at_index (vam->chunks, ch->next);
271 next_ch->prev = ch->prev;
273 ASSERT (ch - vam->chunks != vam->first_index);
274 clib_memset (ch, 0xfe, sizeof (*ch));
275 pool_put (vam->chunks, ch);
276 /* See about combining with next elt */
281 /* Combine with next entry? */
284 next_ch = pool_elt_at_index (vam->chunks, ch->next);
286 if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0
287 && ((ch->baseva + ch->size) == next_ch->baseva))
289 hash_unset (vam->chunk_index_by_baseva, next_ch->baseva);
290 ch->size += next_ch->size;
291 ch->next = next_ch->next;
294 n2_ch = pool_elt_at_index (vam->chunks, next_ch->next);
295 n2_ch->prev = ch - vam->chunks;
297 ASSERT (next_ch - vam->chunks != vam->first_index);
298 clib_memset (next_ch, 0xfe, sizeof (*ch));
299 pool_put (vam->chunks, next_ch);
303 clib_spinlock_unlock_if_init (&vam->lock);
307 /** format a virtual allocation arena (varargs)
308 @param vam - clib_valloc_main_t pointer to the arena to format
309 @param verbose - int - verbosity level
313 format_valloc (u8 * s, va_list * va)
315 clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *);
316 int verbose = va_arg (*va, int);
317 clib_valloc_chunk_t *ch;
321 clib_spinlock_lock_if_init (&vam->lock);
323 s = format (s, "%d chunks, first index %d\n",
324 pool_elts (vam->chunks), vam->first_index);
328 index = vam->first_index;
331 ch = pool_elt_at_index (vam->chunks, index);
333 s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n",
334 index, ch->baseva, ch->size, ch->size, ch->prev,
335 (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free");
337 p = hash_get (vam->chunk_index_by_baseva, ch->baseva);
340 s = format (s, " BUG: baseva not in hash table!\n");
342 else if (p[0] != index)
344 s = format (s, " BUG: baseva in hash table %d not %d!\n",
351 clib_spinlock_unlock_if_init (&vam->lock);
357 * fd.io coding-style-patch-verification: ON
360 * eval: (c-set-style "gnu")