2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include <vppinfra/cache.h> /* for CLIB_CACHE_LINE_BYTES */
39 #include <vppinfra/mem.h>
40 #include <vppinfra/hash.h>
41 #include <vppinfra/vec.h>
42 #include <vppinfra/heap.h>
43 #include <vppinfra/error.h>
45 always_inline heap_elt_t *
46 elt_at (heap_header_t * h, uword i)
48 ASSERT (i < vec_len (h->elts));
52 always_inline heap_elt_t *
53 last (heap_header_t * h)
55 return elt_at (h, h->tail);
58 always_inline heap_elt_t *
59 first (heap_header_t * h)
61 return elt_at (h, h->head);
64 /* Objects sizes are binned into N_BINS bins.
65 Objects with size <= SMALL_BINS have their own bins.
66 Larger objects are grouped together in power or 2 sized
69 Sizes are in units of elt_bytes bytes. */
71 /* Convert size to bin. */
73 size_to_bin (uword size)
79 if (size <= HEAP_SMALL_BINS)
87 bin = HEAP_SMALL_BINS + max_log2 (size) - (HEAP_LOG2_SMALL_BINS + 1);
88 if (bin >= HEAP_N_BINS)
89 bin = HEAP_N_BINS - 1;
95 /* Convert bin to size. */
96 always_inline __attribute__ ((unused))
97 uword bin_to_size (uword bin)
101 if (bin <= HEAP_SMALL_BINS - 1)
104 size = (uword) 1 << ((bin - HEAP_SMALL_BINS) + HEAP_LOG2_SMALL_BINS + 1);
110 elt_delete (heap_header_t * h, heap_elt_t * e)
112 heap_elt_t *l = vec_end (h->elts) - 1;
114 ASSERT (e >= h->elts && e <= l);
116 /* Update doubly linked pointers. */
118 heap_elt_t *p = heap_prev (e);
119 heap_elt_t *n = heap_next (e);
124 h->head = n - h->elts;
129 h->tail = p - h->elts;
138 /* Add to index free list or delete from end. */
140 vec_add1 (h->free_elts, e - h->elts);
142 _vec_len (h->elts)--;
147 After : P ... NEW ... E
150 elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
152 heap_elt_t *p = heap_prev (e);
159 h->head = new - h->elts;
172 After : E ... NEW ... N
175 elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
177 heap_elt_t *n = heap_next (e);
184 h->tail = new - h->elts;
195 always_inline heap_elt_t *
196 elt_new (heap_header_t * h)
200 if ((l = vec_len (h->free_elts)) > 0)
202 e = elt_at (h, h->free_elts[l - 1]);
203 _vec_len (h->free_elts) -= 1;
206 vec_add2 (h->elts, e, 1);
210 /* Return pointer to object at given offset.
211 Used to write free list index of free objects. */
213 elt_data (void *v, heap_elt_t * e)
215 heap_header_t *h = heap_header (v);
216 return v + heap_offset (e) * h->elt_bytes;
220 set_free_elt (void *v, heap_elt_t * e, uword fi)
222 heap_header_t *h = heap_header (v);
224 e->offset |= HEAP_ELT_FREE_BIT;
225 if (h->elt_bytes >= sizeof (u32))
227 *elt_data (v, e) = fi;
231 /* For elt_bytes < 4 we must store free index in separate
233 uword elt_index = e - h->elts;
234 vec_validate (h->small_free_elt_free_index, elt_index);
235 h->small_free_elt_free_index[elt_index] = fi;
240 get_free_elt (void *v, heap_elt_t * e, uword * bin_result)
242 heap_header_t *h = heap_header (v);
245 ASSERT (heap_is_free (e));
246 fb = size_to_bin (heap_elt_size (v, e));
248 if (h->elt_bytes >= sizeof (u32))
250 fi = *elt_data (v, e);
254 uword elt_index = e - h->elts;
255 fi = vec_elt (h->small_free_elt_free_index, elt_index);
263 remove_free_block (void *v, uword b, uword i)
265 heap_header_t *h = heap_header (v);
268 ASSERT (b < vec_len (h->free_lists));
269 ASSERT (i < vec_len (h->free_lists[b]));
271 l = vec_len (h->free_lists[b]);
275 uword t = h->free_lists[b][l - 1];
276 h->free_lists[b][i] = t;
277 set_free_elt (v, elt_at (h, t), i);
279 _vec_len (h->free_lists[b]) = l - 1;
283 search_free_list (void *v, uword size)
285 heap_header_t *h = heap_header (v);
287 uword b, fb, f_size, f_index;
293 /* Search free lists for bins >= given size. */
294 for (b = size_to_bin (size); b < vec_len (h->free_lists); b++)
295 if ((l = vec_len (h->free_lists[b])) > 0)
297 /* Find an object that is large enough.
298 Search list in reverse so that more recently freed objects will be
299 allocated again sooner. */
303 f_index = h->free_lists[b][l];
304 f = elt_at (h, f_index);
305 f_size = heap_elt_size (v, f);
306 if ((s = f_size - size) >= 0)
311 /* If we fail to find a large enough object, try the next larger size. */
315 ASSERT (heap_is_free (f));
317 /* Link in used object (u) after free object (f). */
326 f = elt_at (h, f_index);
327 elt_insert_after (h, f, u);
328 fb = size_to_bin (s);
331 u->offset = heap_offset (f) + s;
335 if (fb < HEAP_N_BINS)
338 vec_validate (h->free_lists, fb);
339 i = vec_len (h->free_lists[fb]);
340 vec_add1 (h->free_lists[fb], f - h->elts);
341 set_free_elt (v, f, i);
344 remove_free_block (v, b, l);
353 static void combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1);
356 dealloc_elt (void *v, heap_elt_t * e)
358 heap_header_t *h = heap_header (v);
362 b = size_to_bin (heap_elt_size (v, e));
363 vec_validate (h->free_lists, b);
364 l = vec_len (h->free_lists[b]);
365 vec_add1 (h->free_lists[b], e - h->elts);
366 set_free_elt (v, e, l);
368 /* See if we can combine the block we just freed with neighboring free blocks. */
370 if (!heap_is_free (p))
374 if (!heap_is_free (n))
378 combine_free_blocks (v, p, n);
382 _heap_alloc (void *v,
385 uword elt_bytes, uword * offset_return, uword * handle_return)
387 uword offset = 0, align_size;
394 /* Round up alignment to power of 2. */
402 align = max_pow2 (align);
403 align_size = size + align - 1;
406 e = search_free_list (v, align_size);
408 /* If nothing found on free list, allocate object from end of vector. */
413 offset = vec_len (v);
414 max_len = heap_get_max_len (v);
416 if (max_len && offset + align_size > max_len)
420 if (!v || !(h->flags & HEAP_IS_STATIC))
423 (offset + align_size) * elt_bytes,
424 sizeof (h[0]), HEAP_DATA_ALIGN);
426 _vec_len (v) += align_size;
431 h->elt_bytes = elt_bytes;
437 /* Add new element to doubly linked chain of elements. */
442 elt_insert_after (h, last (h), e);
448 uword new_offset, old_offset;
450 old_offset = e->offset;
451 new_offset = (old_offset + align - 1) & ~(align - 1);
452 e->offset = new_offset;
453 e_index = e - h->elts;
455 /* Free fragments before and after aligned object. */
456 if (new_offset > old_offset)
458 heap_elt_t *before_e = elt_new (h);
459 before_e->offset = old_offset;
460 elt_insert_before (h, h->elts + e_index, before_e);
461 dealloc_elt (v, before_e);
464 if (new_offset + size < old_offset + align_size)
466 heap_elt_t *after_e = elt_new (h);
467 after_e->offset = new_offset + size;
468 elt_insert_after (h, h->elts + e_index, after_e);
469 dealloc_elt (v, after_e);
472 e = h->elts + e_index;
477 /* Keep track of used elements when debugging.
478 This allows deallocation to check that passed objects are valid. */
481 uword handle = e - h->elts;
482 ASSERT (!clib_bitmap_get (h->used_elt_bitmap, handle));
483 h->used_elt_bitmap = clib_bitmap_ori (h->used_elt_bitmap, handle);
486 *offset_return = e->offset;
487 *handle_return = e - h->elts;
491 *offset_return = *handle_return = ~0;
496 heap_dealloc (void *v, uword handle)
498 heap_header_t *h = heap_header (v);
501 ASSERT (handle < vec_len (h->elts));
503 /* For debugging we keep track of indices for valid objects.
504 We make sure user is not trying to free object with an invalid index. */
507 ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
508 h->used_elt_bitmap = clib_bitmap_andnoti (h->used_elt_bitmap, handle);
513 e = h->elts + handle;
514 ASSERT (!heap_is_free (e));
519 /* While freeing objects at INDEX we noticed free blocks i0 <= index and
520 i1 >= index. We combine these two or three blocks into one big free block. */
522 combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1)
524 heap_header_t *h = heap_header (v);
525 uword total_size, i, b, tb, ti, i_last, g_offset;
535 /* Compute total size of free objects i0 through i1. */
537 for (i = 0, e = e0; 1; e = heap_next (e), i++)
539 ASSERT (i < ARRAY_LEN (f));
541 ti = get_free_elt (v, e, &tb);
543 ASSERT (tb < vec_len (h->free_lists));
544 ASSERT (ti < vec_len (h->free_lists[tb]));
546 f[i].index = h->free_lists[tb][ti];
550 total_size += heap_elt_size (v, elt_at (h, f[i].index));
559 /* Compute combined bin. See if all objects can be
560 combined into existing bin. */
561 b = size_to_bin (total_size);
562 g.index = g.bin_index = 0;
563 for (i = 0; i <= i_last; i++)
570 /* Make sure we found a bin. */
573 g.index = elt_new (h) - h->elts;
574 vec_validate (h->free_lists, b);
575 g.bin_index = vec_len (h->free_lists[b]);
576 vec_add1 (h->free_lists[b], g.index);
577 elt_insert_before (h, elt_at (h, f[0].index), elt_at (h, g.index));
580 g_offset = elt_at (h, f[0].index)->offset;
582 /* Delete unused bins. */
583 for (i = 0; i <= i_last; i++)
584 if (g.index != f[i].index)
586 ti = get_free_elt (v, elt_at (h, f[i].index), &tb);
587 remove_free_block (v, tb, ti);
588 elt_delete (h, elt_at (h, f[i].index));
591 /* Initialize new element. */
592 elt_at (h, g.index)->offset = g_offset;
593 set_free_elt (v, elt_at (h, g.index), g.bin_index);
597 heap_len (void *v, word handle)
599 heap_header_t *h = heap_header (v);
602 ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
603 return heap_elt_size (v, elt_at (h, handle));
609 heap_header_t *h = heap_header (v);
615 clib_bitmap_free (h->used_elt_bitmap);
616 for (b = 0; b < vec_len (h->free_lists); b++)
617 vec_free (h->free_lists[b]);
618 vec_free (h->free_lists);
620 vec_free (h->free_elts);
621 vec_free (h->small_free_elt_free_index);
622 if (!(h->flags & HEAP_IS_STATIC))
623 vec_free_h (v, sizeof (h[0]));
630 heap_header_t *h = heap_header (v);
636 bytes = sizeof (h[0]);
637 bytes += vec_len (v) * sizeof (h->elt_bytes);
638 for (b = 0; b < vec_len (h->free_lists); b++)
639 bytes += vec_capacity (h->free_lists[b], 0);
640 bytes += vec_bytes (h->free_lists);
641 bytes += vec_capacity (h->elts, 0);
642 bytes += vec_capacity (h->free_elts, 0);
643 bytes += vec_bytes (h->used_elt_bitmap);
649 debug_elt (u8 * s, void *v, word i, word n)
651 heap_elt_t *e, *e0, *e1;
652 heap_header_t *h = heap_header (v);
655 if (vec_len (h->elts) == 0)
663 for (j = 0; j < n / 2; j++)
668 e1 = h->elts + h->tail;
672 for (j = 0; j < n / 2; j++)
677 for (e = e0; 1; e = heap_next (e))
679 if (heap_is_free (e))
680 s = format (s, "index %4d, free\n", e - h->elts);
681 else if (h->format_elt)
682 s = format (s, "%U", h->format_elt, v, elt_data (v, e));
684 s = format (s, "index %4d, used\n", e - h->elts);
694 format_heap (u8 * s, va_list * va)
696 void *v = va_arg (*va, void *);
697 uword verbose = va_arg (*va, uword);
698 heap_header_t *h = heap_header (v);
701 memset (&zero, 0, sizeof (zero));
707 f64 elt_bytes = vec_len (v) * h->elt_bytes;
708 f64 overhead_bytes = heap_bytes (v);
710 s = format (s, "heap %p, %6d objects, size %.1fk + overhead %.1fk\n",
711 v, h->used_count, elt_bytes / 1024,
712 (overhead_bytes - elt_bytes) / 1024);
716 s = debug_elt (s, v, -1, -1);
722 heap_validate (void *v)
724 heap_header_t *h = heap_header (v);
729 uword used_count, total_size;
730 uword free_count, free_size;
732 ASSERT (h->used_count == clib_bitmap_count_set_bits (h->used_elt_bitmap));
734 ASSERT (first (h)->prev == 0);
735 ASSERT (last (h)->next == 0);
737 /* Validate number of elements and size. */
738 free_size = free_count = 0;
739 for (i = 0; i < vec_len (h->free_lists); i++)
741 free_count += vec_len (h->free_lists[i]);
742 for (o = 0; o < vec_len (h->free_lists[i]); o++)
744 e = h->elts + h->free_lists[i][o];
745 s = heap_elt_size (v, e);
746 ASSERT (size_to_bin (s) == i);
747 ASSERT (heap_is_free (e));
753 uword elt_free_size, elt_free_count;
755 used_count = total_size = elt_free_size = elt_free_count = 0;
756 for (e = first (h); 1; e = n)
758 int is_free = heap_is_free (e);
760 s = heap_elt_size (v, e);
763 !clib_bitmap_get (h->used_elt_bitmap, e - h->elts));
772 ASSERT (last (h) == n);
776 /* We should never have two free adjacent elements. */
777 ASSERT (!(heap_is_free (e) && heap_is_free (n)));
780 ASSERT (free_count == elt_free_count);
781 ASSERT (free_size == elt_free_size);
782 ASSERT (used_count == h->used_count + free_count);
783 ASSERT (total_size == vec_len (v));
786 free_map = vec_new (u8, used_count);
789 for (i = o = 0; 1; i++)
791 ASSERT (heap_offset (e) == o);
792 s = heap_elt_size (v, e);
794 if (heap_is_free (e))
798 fi = get_free_elt (v, e, &fb);
800 ASSERT (fb < vec_len (h->free_lists));
801 ASSERT (fi < vec_len (h->free_lists[fb]));
802 ASSERT (h->free_lists[fb][fi] == e - h->elts);
804 ASSERT (!free_map[i]);
813 ASSERT (heap_prev (n) == e);
823 * fd.io coding-style-patch-verification: ON
826 * eval: (c-set-style "gnu")