2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include <vppinfra/cache.h> /* for CLIB_CACHE_LINE_BYTES */
39 #include <vppinfra/mem.h>
40 #include <vppinfra/hash.h>
41 #include <vppinfra/vec.h>
42 #include <vppinfra/heap.h>
43 #include <vppinfra/error.h>
45 always_inline heap_elt_t *
46 elt_at (heap_header_t * h, uword i)
48 ASSERT (i < vec_len (h->elts));
52 always_inline heap_elt_t *
53 last (heap_header_t * h)
55 return elt_at (h, h->tail);
58 always_inline heap_elt_t *
59 first (heap_header_t * h)
61 return elt_at (h, h->head);
64 /* Objects sizes are binned into N_BINS bins.
65 Objects with size <= SMALL_BINS have their own bins.
66 Larger objects are grouped together in power or 2 sized
69 Sizes are in units of elt_bytes bytes. */
71 /* Convert size to bin. */
73 size_to_bin (uword size)
79 if (size <= HEAP_SMALL_BINS)
87 bin = HEAP_SMALL_BINS + max_log2 (size) - (HEAP_LOG2_SMALL_BINS + 1);
88 if (bin >= HEAP_N_BINS)
89 bin = HEAP_N_BINS - 1;
95 /* Convert bin to size. */
96 always_inline __attribute__ ((unused))
97 uword bin_to_size (uword bin)
101 if (bin <= HEAP_SMALL_BINS - 1)
104 size = (uword) 1 << ((bin - HEAP_SMALL_BINS) + HEAP_LOG2_SMALL_BINS + 1);
110 elt_delete (heap_header_t * h, heap_elt_t * e)
112 heap_elt_t *l = vec_end (h->elts) - 1;
114 ASSERT (e >= h->elts && e <= l);
116 /* Update doubly linked pointers. */
118 heap_elt_t *p = heap_prev (e);
119 heap_elt_t *n = heap_next (e);
124 h->head = n - h->elts;
129 h->tail = p - h->elts;
138 /* Add to index free list or delete from end. */
140 vec_add1 (h->free_elts, e - h->elts);
142 _vec_len (h->elts)--;
147 After : P ... NEW ... E
150 elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
152 heap_elt_t *p = heap_prev (e);
159 h->head = new - h->elts;
172 After : E ... NEW ... N
175 elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
177 heap_elt_t *n = heap_next (e);
184 h->tail = new - h->elts;
195 always_inline heap_elt_t *
196 elt_new (heap_header_t * h)
200 if ((l = vec_len (h->free_elts)) > 0)
202 e = elt_at (h, h->free_elts[l - 1]);
203 _vec_len (h->free_elts) -= 1;
206 vec_add2 (h->elts, e, 1);
210 /* Return pointer to object at given offset.
211 Used to write free list index of free objects. */
213 elt_data (void *v, heap_elt_t * e)
215 heap_header_t *h = heap_header (v);
216 return v + heap_offset (e) * h->elt_bytes;
220 set_free_elt (void *v, heap_elt_t * e, uword fi)
222 heap_header_t *h = heap_header (v);
224 e->offset |= HEAP_ELT_FREE_BIT;
225 if (h->elt_bytes >= sizeof (u32))
227 *elt_data (v, e) = fi;
231 /* For elt_bytes < 4 we must store free index in separate
233 uword elt_index = e - h->elts;
234 vec_validate (h->small_free_elt_free_index, elt_index);
235 h->small_free_elt_free_index[elt_index] = fi;
240 get_free_elt (void *v, heap_elt_t * e, uword * bin_result)
242 heap_header_t *h = heap_header (v);
245 ASSERT (heap_is_free (e));
246 fb = size_to_bin (heap_elt_size (v, e));
248 if (h->elt_bytes >= sizeof (u32))
250 fi = *elt_data (v, e);
254 uword elt_index = e - h->elts;
255 fi = vec_elt (h->small_free_elt_free_index, elt_index);
263 remove_free_block (void *v, uword b, uword i)
265 heap_header_t *h = heap_header (v);
268 ASSERT (b < vec_len (h->free_lists));
269 ASSERT (i < vec_len (h->free_lists[b]));
271 l = vec_len (h->free_lists[b]);
275 uword t = h->free_lists[b][l - 1];
276 h->free_lists[b][i] = t;
277 set_free_elt (v, elt_at (h, t), i);
279 _vec_len (h->free_lists[b]) = l - 1;
283 search_free_list (void *v, uword size)
285 heap_header_t *h = heap_header (v);
287 uword b, fb, f_size, f_index;
293 /* Search free lists for bins >= given size. */
294 for (b = size_to_bin (size); b < vec_len (h->free_lists); b++)
295 if ((l = vec_len (h->free_lists[b])) > 0)
297 /* Find an object that is large enough.
298 Search list in reverse so that more recently freed objects will be
299 allocated again sooner. */
304 f_index = h->free_lists[b][l];
305 f = elt_at (h, f_index);
306 f_size = heap_elt_size (v, f);
307 if ((s = f_size - size) >= 0)
315 /* If we fail to find a large enough object, try the next larger size. */
319 ASSERT (heap_is_free (f));
321 /* Link in used object (u) after free object (f). */
330 f = elt_at (h, f_index);
331 elt_insert_after (h, f, u);
332 fb = size_to_bin (s);
335 u->offset = heap_offset (f) + s;
339 if (fb < HEAP_N_BINS)
342 vec_validate (h->free_lists, fb);
343 i = vec_len (h->free_lists[fb]);
344 vec_add1 (h->free_lists[fb], f - h->elts);
345 set_free_elt (v, f, i);
348 remove_free_block (v, b, l);
357 static void combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1);
360 dealloc_elt (void *v, heap_elt_t * e)
362 heap_header_t *h = heap_header (v);
366 b = size_to_bin (heap_elt_size (v, e));
367 vec_validate (h->free_lists, b);
368 l = vec_len (h->free_lists[b]);
369 vec_add1 (h->free_lists[b], e - h->elts);
370 set_free_elt (v, e, l);
372 /* See if we can combine the block we just freed with neighboring free blocks. */
374 if (!heap_is_free (p))
378 if (!heap_is_free (n))
382 combine_free_blocks (v, p, n);
386 _heap_alloc (void *v,
389 uword elt_bytes, uword * offset_return, uword * handle_return)
391 uword offset = 0, align_size;
398 /* Round up alignment to power of 2. */
406 align = max_pow2 (align);
407 align_size = size + align - 1;
410 e = search_free_list (v, align_size);
412 /* If nothing found on free list, allocate object from end of vector. */
417 offset = vec_len (v);
418 max_len = heap_get_max_len (v);
420 if (max_len && offset + align_size > max_len)
424 if (!v || !(h->flags & HEAP_IS_STATIC))
425 v = _vec_realloc (v, offset + align_size, elt_bytes, sizeof (h[0]),
428 _vec_len (v) += align_size;
433 h->elt_bytes = elt_bytes;
439 /* Add new element to doubly linked chain of elements. */
444 elt_insert_after (h, last (h), e);
450 uword new_offset, old_offset;
452 old_offset = e->offset;
453 new_offset = (old_offset + align - 1) & ~(align - 1);
454 e->offset = new_offset;
455 e_index = e - h->elts;
457 /* Free fragments before and after aligned object. */
458 if (new_offset > old_offset)
460 heap_elt_t *before_e = elt_new (h);
461 before_e->offset = old_offset;
462 elt_insert_before (h, h->elts + e_index, before_e);
463 dealloc_elt (v, before_e);
466 if (new_offset + size < old_offset + align_size)
468 heap_elt_t *after_e = elt_new (h);
469 after_e->offset = new_offset + size;
470 elt_insert_after (h, h->elts + e_index, after_e);
471 dealloc_elt (v, after_e);
474 e = h->elts + e_index;
479 /* Keep track of used elements when debugging.
480 This allows deallocation to check that passed objects are valid. */
483 uword handle = e - h->elts;
484 ASSERT (!clib_bitmap_get (h->used_elt_bitmap, handle));
485 h->used_elt_bitmap = clib_bitmap_ori (h->used_elt_bitmap, handle);
488 *offset_return = e->offset;
489 *handle_return = e - h->elts;
493 *offset_return = *handle_return = ~0;
498 heap_dealloc (void *v, uword handle)
500 heap_header_t *h = heap_header (v);
503 ASSERT (handle < vec_len (h->elts));
505 /* For debugging we keep track of indices for valid objects.
506 We make sure user is not trying to free object with an invalid index. */
509 ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
510 h->used_elt_bitmap = clib_bitmap_andnoti (h->used_elt_bitmap, handle);
515 e = h->elts + handle;
516 ASSERT (!heap_is_free (e));
521 /* While freeing objects at INDEX we noticed free blocks i0 <= index and
522 i1 >= index. We combine these two or three blocks into one big free block. */
524 combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1)
526 heap_header_t *h = heap_header (v);
527 uword total_size, i, b, tb, ti, i_last, g_offset;
537 /* Compute total size of free objects i0 through i1. */
539 for (i = 0, e = e0; 1; e = heap_next (e), i++)
541 ASSERT (i < ARRAY_LEN (f));
543 ti = get_free_elt (v, e, &tb);
545 ASSERT (tb < vec_len (h->free_lists));
546 ASSERT (ti < vec_len (h->free_lists[tb]));
548 f[i].index = h->free_lists[tb][ti];
552 total_size += heap_elt_size (v, elt_at (h, f[i].index));
561 /* Compute combined bin. See if all objects can be
562 combined into existing bin. */
563 b = size_to_bin (total_size);
564 g.index = g.bin_index = 0;
565 for (i = 0; i <= i_last; i++)
572 /* Make sure we found a bin. */
575 g.index = elt_new (h) - h->elts;
576 vec_validate (h->free_lists, b);
577 g.bin_index = vec_len (h->free_lists[b]);
578 vec_add1 (h->free_lists[b], g.index);
579 elt_insert_before (h, elt_at (h, f[0].index), elt_at (h, g.index));
582 g_offset = elt_at (h, f[0].index)->offset;
584 /* Delete unused bins. */
585 for (i = 0; i <= i_last; i++)
586 if (g.index != f[i].index)
588 ti = get_free_elt (v, elt_at (h, f[i].index), &tb);
589 remove_free_block (v, tb, ti);
590 elt_delete (h, elt_at (h, f[i].index));
593 /* Initialize new element. */
594 elt_at (h, g.index)->offset = g_offset;
595 set_free_elt (v, elt_at (h, g.index), g.bin_index);
599 heap_len (void *v, word handle)
601 heap_header_t *h = heap_header (v);
604 ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
605 return heap_elt_size (v, elt_at (h, handle));
611 heap_header_t *h = heap_header (v);
617 clib_bitmap_free (h->used_elt_bitmap);
618 for (b = 0; b < vec_len (h->free_lists); b++)
619 vec_free (h->free_lists[b]);
620 vec_free (h->free_lists);
622 vec_free (h->free_elts);
623 vec_free (h->small_free_elt_free_index);
624 if (!(h->flags & HEAP_IS_STATIC))
632 heap_header_t *h = heap_header (v);
638 bytes = sizeof (h[0]);
639 bytes += vec_len (v) * sizeof (h->elt_bytes);
640 for (b = 0; b < vec_len (h->free_lists); b++)
641 bytes += vec_mem_size (h->free_lists[b]);
642 bytes += vec_bytes (h->free_lists);
643 bytes += vec_mem_size (h->elts);
644 bytes += vec_mem_size (h->free_elts);
645 bytes += vec_bytes (h->used_elt_bitmap);
651 debug_elt (u8 * s, void *v, word i, word n)
653 heap_elt_t *e, *e0, *e1;
654 heap_header_t *h = heap_header (v);
657 if (vec_len (h->elts) == 0)
665 for (j = 0; j < n / 2; j++)
670 e1 = h->elts + h->tail;
674 for (j = 0; j < n / 2; j++)
679 for (e = e0; 1; e = heap_next (e))
681 if (heap_is_free (e))
682 s = format (s, "index %4d, free\n", e - h->elts);
683 else if (h->format_elt)
684 s = format (s, "%U", h->format_elt, v, elt_data (v, e));
686 s = format (s, "index %4d, used\n", e - h->elts);
696 format_heap (u8 *s, va_list *va)
698 void *v = va_arg (*va, void *);
699 uword verbose = va_arg (*va, uword);
700 heap_header_t *h = heap_header (v);
703 clib_memset (&zero, 0, sizeof (zero));
709 f64 elt_bytes = vec_len (v) * h->elt_bytes;
710 f64 overhead_bytes = heap_bytes (v);
712 s = format (s, "heap %p, %6d objects, size %.1fk + overhead %.1fk\n",
713 v, h->used_count, elt_bytes / 1024,
714 (overhead_bytes - elt_bytes) / 1024);
718 s = debug_elt (s, v, -1, -1);
724 heap_validate (void *v)
726 heap_header_t *h = heap_header (v);
731 uword used_count, total_size;
732 uword free_count, free_size;
734 ASSERT (h->used_count == clib_bitmap_count_set_bits (h->used_elt_bitmap));
736 ASSERT (first (h)->prev == 0);
737 ASSERT (last (h)->next == 0);
739 /* Validate number of elements and size. */
740 free_size = free_count = 0;
741 for (i = 0; i < vec_len (h->free_lists); i++)
743 free_count += vec_len (h->free_lists[i]);
744 for (o = 0; o < vec_len (h->free_lists[i]); o++)
746 e = h->elts + h->free_lists[i][o];
747 s = heap_elt_size (v, e);
748 ASSERT (size_to_bin (s) == i);
749 ASSERT (heap_is_free (e));
755 uword elt_free_size, elt_free_count;
757 used_count = total_size = elt_free_size = elt_free_count = 0;
758 for (e = first (h); 1; e = n)
760 int is_free = heap_is_free (e);
762 s = heap_elt_size (v, e);
765 !clib_bitmap_get (h->used_elt_bitmap, e - h->elts));
774 ASSERT (last (h) == n);
778 /* We should never have two free adjacent elements. */
779 ASSERT (!(heap_is_free (e) && heap_is_free (n)));
782 ASSERT (free_count == elt_free_count);
783 ASSERT (free_size == elt_free_size);
784 ASSERT (used_count == h->used_count + free_count);
785 ASSERT (total_size == vec_len (v));
788 free_map = vec_new (u8, used_count);
791 for (i = o = 0; 1; i++)
793 ASSERT (heap_offset (e) == o);
794 s = heap_elt_size (v, e);
796 if (heap_is_free (e))
800 fi = get_free_elt (v, e, &fb);
802 ASSERT (fb < vec_len (h->free_lists));
803 ASSERT (fi < vec_len (h->free_lists[fb]));
804 ASSERT (h->free_lists[fb][fi] == e - h->elts);
806 ASSERT (!free_map[i]);
815 ASSERT (heap_prev (n) == e);
825 * fd.io coding-style-patch-verification: ON
828 * eval: (c-set-style "gnu")