2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include <vppinfra/cache.h> /* for CLIB_CACHE_LINE_BYTES */
39 #include <vppinfra/mem.h>
40 #include <vppinfra/hash.h>
41 #include <vppinfra/vec.h>
42 #include <vppinfra/heap.h>
43 #include <vppinfra/error.h>
45 always_inline heap_elt_t *
46 elt_at (heap_header_t * h, uword i)
48 ASSERT (i < vec_len (h->elts));
52 always_inline heap_elt_t *
53 last (heap_header_t * h)
55 return elt_at (h, h->tail);
58 always_inline heap_elt_t *
59 first (heap_header_t * h)
61 return elt_at (h, h->head);
64 /* Objects sizes are binned into N_BINS bins.
65 Objects with size <= SMALL_BINS have their own bins.
66 Larger objects are grouped together in power or 2 sized
69 Sizes are in units of elt_bytes bytes. */
71 /* Convert size to bin. */
73 size_to_bin (uword size)
79 if (size <= HEAP_SMALL_BINS)
87 bin = HEAP_SMALL_BINS + max_log2 (size) - (HEAP_LOG2_SMALL_BINS + 1);
88 if (bin >= HEAP_N_BINS)
89 bin = HEAP_N_BINS - 1;
95 /* Convert bin to size. */
96 always_inline __attribute__ ((unused))
97 uword bin_to_size (uword bin)
101 if (bin <= HEAP_SMALL_BINS - 1)
104 size = (uword) 1 << ((bin - HEAP_SMALL_BINS) + HEAP_LOG2_SMALL_BINS + 1);
110 elt_delete (heap_header_t * h, heap_elt_t * e)
112 heap_elt_t *l = vec_end (h->elts) - 1;
114 ASSERT (e >= h->elts && e <= l);
116 /* Update doubly linked pointers. */
118 heap_elt_t *p = heap_prev (e);
119 heap_elt_t *n = heap_next (e);
124 h->head = n - h->elts;
129 h->tail = p - h->elts;
138 /* Add to index free list or delete from end. */
140 vec_add1 (h->free_elts, e - h->elts);
142 _vec_len (h->elts)--;
147 After : P ... NEW ... E
150 elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
152 heap_elt_t *p = heap_prev (e);
159 h->head = new - h->elts;
172 After : E ... NEW ... N
175 elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
177 heap_elt_t *n = heap_next (e);
184 h->tail = new - h->elts;
195 always_inline heap_elt_t *
196 elt_new (heap_header_t * h)
200 if ((l = vec_len (h->free_elts)) > 0)
202 e = elt_at (h, h->free_elts[l - 1]);
203 _vec_len (h->free_elts) -= 1;
206 vec_add2 (h->elts, e, 1);
210 /* Return pointer to object at given offset.
211 Used to write free list index of free objects. */
213 elt_data (void *v, heap_elt_t * e)
215 heap_header_t *h = heap_header (v);
216 return v + heap_offset (e) * h->elt_bytes;
220 set_free_elt (void *v, heap_elt_t * e, uword fi)
222 heap_header_t *h = heap_header (v);
224 e->offset |= HEAP_ELT_FREE_BIT;
225 if (h->elt_bytes >= sizeof (u32))
227 *elt_data (v, e) = fi;
231 /* For elt_bytes < 4 we must store free index in separate
233 uword elt_index = e - h->elts;
234 vec_validate (h->small_free_elt_free_index, elt_index);
235 h->small_free_elt_free_index[elt_index] = fi;
240 get_free_elt (void *v, heap_elt_t * e, uword * bin_result)
242 heap_header_t *h = heap_header (v);
245 ASSERT (heap_is_free (e));
246 fb = size_to_bin (heap_elt_size (v, e));
248 if (h->elt_bytes >= sizeof (u32))
250 fi = *elt_data (v, e);
254 uword elt_index = e - h->elts;
255 fi = vec_elt (h->small_free_elt_free_index, elt_index);
263 remove_free_block (void *v, uword b, uword i)
265 heap_header_t *h = heap_header (v);
268 ASSERT (b < vec_len (h->free_lists));
269 ASSERT (i < vec_len (h->free_lists[b]));
271 l = vec_len (h->free_lists[b]);
275 uword t = h->free_lists[b][l - 1];
276 h->free_lists[b][i] = t;
277 set_free_elt (v, elt_at (h, t), i);
279 _vec_len (h->free_lists[b]) = l - 1;
283 search_free_list (void *v, uword size)
285 heap_header_t *h = heap_header (v);
287 uword b, fb, f_size, f_index;
293 /* Search free lists for bins >= given size. */
294 for (b = size_to_bin (size); b < vec_len (h->free_lists); b++)
295 if ((l = vec_len (h->free_lists[b])) > 0)
297 /* Find an object that is large enough.
298 Search list in reverse so that more recently freed objects will be
299 allocated again sooner. */
304 f_index = h->free_lists[b][l];
305 f = elt_at (h, f_index);
306 f_size = heap_elt_size (v, f);
307 if ((s = f_size - size) >= 0)
315 /* If we fail to find a large enough object, try the next larger size. */
319 ASSERT (heap_is_free (f));
321 /* Link in used object (u) after free object (f). */
330 f = elt_at (h, f_index);
331 elt_insert_after (h, f, u);
332 fb = size_to_bin (s);
335 u->offset = heap_offset (f) + s;
339 if (fb < HEAP_N_BINS)
342 vec_validate (h->free_lists, fb);
343 i = vec_len (h->free_lists[fb]);
344 vec_add1 (h->free_lists[fb], f - h->elts);
345 set_free_elt (v, f, i);
348 remove_free_block (v, b, l);
357 static void combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1);
360 dealloc_elt (void *v, heap_elt_t * e)
362 heap_header_t *h = heap_header (v);
366 b = size_to_bin (heap_elt_size (v, e));
367 vec_validate (h->free_lists, b);
368 l = vec_len (h->free_lists[b]);
369 vec_add1 (h->free_lists[b], e - h->elts);
370 set_free_elt (v, e, l);
372 /* See if we can combine the block we just freed with neighboring free blocks. */
374 if (!heap_is_free (p))
378 if (!heap_is_free (n))
382 combine_free_blocks (v, p, n);
386 _heap_alloc (void *v,
389 uword elt_bytes, uword * offset_return, uword * handle_return)
391 uword offset = 0, align_size;
398 /* Round up alignment to power of 2. */
406 align = max_pow2 (align);
407 align_size = size + align - 1;
410 e = search_free_list (v, align_size);
412 /* If nothing found on free list, allocate object from end of vector. */
417 offset = vec_len (v);
418 max_len = heap_get_max_len (v);
420 if (max_len && offset + align_size > max_len)
424 if (!v || !(h->flags & HEAP_IS_STATIC))
427 (offset + align_size) * elt_bytes,
428 sizeof (h[0]), HEAP_DATA_ALIGN);
430 _vec_len (v) += align_size;
435 h->elt_bytes = elt_bytes;
441 /* Add new element to doubly linked chain of elements. */
446 elt_insert_after (h, last (h), e);
452 uword new_offset, old_offset;
454 old_offset = e->offset;
455 new_offset = (old_offset + align - 1) & ~(align - 1);
456 e->offset = new_offset;
457 e_index = e - h->elts;
459 /* Free fragments before and after aligned object. */
460 if (new_offset > old_offset)
462 heap_elt_t *before_e = elt_new (h);
463 before_e->offset = old_offset;
464 elt_insert_before (h, h->elts + e_index, before_e);
465 dealloc_elt (v, before_e);
468 if (new_offset + size < old_offset + align_size)
470 heap_elt_t *after_e = elt_new (h);
471 after_e->offset = new_offset + size;
472 elt_insert_after (h, h->elts + e_index, after_e);
473 dealloc_elt (v, after_e);
476 e = h->elts + e_index;
481 /* Keep track of used elements when debugging.
482 This allows deallocation to check that passed objects are valid. */
485 uword handle = e - h->elts;
486 ASSERT (!clib_bitmap_get (h->used_elt_bitmap, handle));
487 h->used_elt_bitmap = clib_bitmap_ori (h->used_elt_bitmap, handle);
490 *offset_return = e->offset;
491 *handle_return = e - h->elts;
495 *offset_return = *handle_return = ~0;
500 heap_dealloc (void *v, uword handle)
502 heap_header_t *h = heap_header (v);
505 ASSERT (handle < vec_len (h->elts));
507 /* For debugging we keep track of indices for valid objects.
508 We make sure user is not trying to free object with an invalid index. */
511 ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
512 h->used_elt_bitmap = clib_bitmap_andnoti (h->used_elt_bitmap, handle);
517 e = h->elts + handle;
518 ASSERT (!heap_is_free (e));
523 /* While freeing objects at INDEX we noticed free blocks i0 <= index and
524 i1 >= index. We combine these two or three blocks into one big free block. */
526 combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1)
528 heap_header_t *h = heap_header (v);
529 uword total_size, i, b, tb, ti, i_last, g_offset;
539 /* Compute total size of free objects i0 through i1. */
541 for (i = 0, e = e0; 1; e = heap_next (e), i++)
543 ASSERT (i < ARRAY_LEN (f));
545 ti = get_free_elt (v, e, &tb);
547 ASSERT (tb < vec_len (h->free_lists));
548 ASSERT (ti < vec_len (h->free_lists[tb]));
550 f[i].index = h->free_lists[tb][ti];
554 total_size += heap_elt_size (v, elt_at (h, f[i].index));
563 /* Compute combined bin. See if all objects can be
564 combined into existing bin. */
565 b = size_to_bin (total_size);
566 g.index = g.bin_index = 0;
567 for (i = 0; i <= i_last; i++)
574 /* Make sure we found a bin. */
577 g.index = elt_new (h) - h->elts;
578 vec_validate (h->free_lists, b);
579 g.bin_index = vec_len (h->free_lists[b]);
580 vec_add1 (h->free_lists[b], g.index);
581 elt_insert_before (h, elt_at (h, f[0].index), elt_at (h, g.index));
584 g_offset = elt_at (h, f[0].index)->offset;
586 /* Delete unused bins. */
587 for (i = 0; i <= i_last; i++)
588 if (g.index != f[i].index)
590 ti = get_free_elt (v, elt_at (h, f[i].index), &tb);
591 remove_free_block (v, tb, ti);
592 elt_delete (h, elt_at (h, f[i].index));
595 /* Initialize new element. */
596 elt_at (h, g.index)->offset = g_offset;
597 set_free_elt (v, elt_at (h, g.index), g.bin_index);
601 heap_len (void *v, word handle)
603 heap_header_t *h = heap_header (v);
606 ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
607 return heap_elt_size (v, elt_at (h, handle));
613 heap_header_t *h = heap_header (v);
619 clib_bitmap_free (h->used_elt_bitmap);
620 for (b = 0; b < vec_len (h->free_lists); b++)
621 vec_free (h->free_lists[b]);
622 vec_free (h->free_lists);
624 vec_free (h->free_elts);
625 vec_free (h->small_free_elt_free_index);
626 if (!(h->flags & HEAP_IS_STATIC))
627 vec_free_h (v, sizeof (h[0]));
634 heap_header_t *h = heap_header (v);
640 bytes = sizeof (h[0]);
641 bytes += vec_len (v) * sizeof (h->elt_bytes);
642 for (b = 0; b < vec_len (h->free_lists); b++)
643 bytes += vec_capacity (h->free_lists[b], 0);
644 bytes += vec_bytes (h->free_lists);
645 bytes += vec_capacity (h->elts, 0);
646 bytes += vec_capacity (h->free_elts, 0);
647 bytes += vec_bytes (h->used_elt_bitmap);
653 debug_elt (u8 * s, void *v, word i, word n)
655 heap_elt_t *e, *e0, *e1;
656 heap_header_t *h = heap_header (v);
659 if (vec_len (h->elts) == 0)
667 for (j = 0; j < n / 2; j++)
672 e1 = h->elts + h->tail;
676 for (j = 0; j < n / 2; j++)
681 for (e = e0; 1; e = heap_next (e))
683 if (heap_is_free (e))
684 s = format (s, "index %4d, free\n", e - h->elts);
685 else if (h->format_elt)
686 s = format (s, "%U", h->format_elt, v, elt_data (v, e));
688 s = format (s, "index %4d, used\n", e - h->elts);
698 format_heap (u8 *s, va_list *va)
700 void *v = va_arg (*va, void *);
701 uword verbose = va_arg (*va, uword);
702 heap_header_t *h = heap_header (v);
705 clib_memset (&zero, 0, sizeof (zero));
711 f64 elt_bytes = vec_len (v) * h->elt_bytes;
712 f64 overhead_bytes = heap_bytes (v);
714 s = format (s, "heap %p, %6d objects, size %.1fk + overhead %.1fk\n",
715 v, h->used_count, elt_bytes / 1024,
716 (overhead_bytes - elt_bytes) / 1024);
720 s = debug_elt (s, v, -1, -1);
726 heap_validate (void *v)
728 heap_header_t *h = heap_header (v);
733 uword used_count, total_size;
734 uword free_count, free_size;
736 ASSERT (h->used_count == clib_bitmap_count_set_bits (h->used_elt_bitmap));
738 ASSERT (first (h)->prev == 0);
739 ASSERT (last (h)->next == 0);
741 /* Validate number of elements and size. */
742 free_size = free_count = 0;
743 for (i = 0; i < vec_len (h->free_lists); i++)
745 free_count += vec_len (h->free_lists[i]);
746 for (o = 0; o < vec_len (h->free_lists[i]); o++)
748 e = h->elts + h->free_lists[i][o];
749 s = heap_elt_size (v, e);
750 ASSERT (size_to_bin (s) == i);
751 ASSERT (heap_is_free (e));
757 uword elt_free_size, elt_free_count;
759 used_count = total_size = elt_free_size = elt_free_count = 0;
760 for (e = first (h); 1; e = n)
762 int is_free = heap_is_free (e);
764 s = heap_elt_size (v, e);
767 !clib_bitmap_get (h->used_elt_bitmap, e - h->elts));
776 ASSERT (last (h) == n);
780 /* We should never have two free adjacent elements. */
781 ASSERT (!(heap_is_free (e) && heap_is_free (n)));
784 ASSERT (free_count == elt_free_count);
785 ASSERT (free_size == elt_free_size);
786 ASSERT (used_count == h->used_count + free_count);
787 ASSERT (total_size == vec_len (v));
790 free_map = vec_new (u8, used_count);
793 for (i = o = 0; 1; i++)
795 ASSERT (heap_offset (e) == o);
796 s = heap_elt_size (v, e);
798 if (heap_is_free (e))
802 fi = get_free_elt (v, e, &fb);
804 ASSERT (fb < vec_len (h->free_lists));
805 ASSERT (fi < vec_len (h->free_lists[fb]));
806 ASSERT (h->free_lists[fb][fi] == e - h->elts);
808 ASSERT (!free_map[i]);
817 ASSERT (heap_prev (n) == e);
827 * fd.io coding-style-patch-verification: ON
830 * eval: (c-set-style "gnu")