2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/clib.h>
17 #include <vppinfra/mem.h>
18 #include <vppinfra/time.h>
19 #include <vppinfra/format.h>
20 #include <vppinfra/clib_error.h>
22 /* while usage of dlmalloc APIs is genrally discouraged, in this particular
23 * case there is significant benefit of calling them directly due to
24 * smaller memory consuption (no wwp and headroom space) */
25 #include <vppinfra/dlmalloc.h>
27 #define CLIB_MEM_BULK_DEFAULT_MIN_ELTS_PER_CHUNK 32
29 typedef struct clib_mem_bulk_chunk_hdr
33 struct clib_mem_bulk_chunk_hdr *prev, *next;
34 } clib_mem_bulk_chunk_hdr_t;
44 clib_mem_bulk_chunk_hdr_t *full_chunks, *avail_chunks;
48 bulk_chunk_size (clib_mem_bulk_t *b)
50 return (uword) b->elts_per_chunk * b->elt_sz + b->chunk_hdr_sz;
53 __clib_export clib_mem_bulk_handle_t
54 clib_mem_bulk_init (u32 elt_sz, u32 align, u32 min_elts_per_chunk)
56 clib_mem_heap_t *heap = clib_mem_get_heap ();
60 if ((b = mspace_memalign (heap->mspace, 16, sizeof (clib_mem_bulk_t))) == 0)
66 if (min_elts_per_chunk == 0)
67 min_elts_per_chunk = CLIB_MEM_BULK_DEFAULT_MIN_ELTS_PER_CHUNK;
69 clib_mem_unpoison (b, sizeof (clib_mem_bulk_t));
70 clib_memset (b, 0, sizeof (clib_mem_bulk_t));
71 b->mspace = heap->mspace;
73 b->elt_sz = round_pow2 (elt_sz, align);
74 b->chunk_hdr_sz = round_pow2 (sizeof (clib_mem_bulk_chunk_hdr_t), align);
75 b->elts_per_chunk = min_elts_per_chunk;
76 sz = bulk_chunk_size (b);
77 b->chunk_align = max_pow2 (sz);
78 b->elts_per_chunk += (b->chunk_align - sz) / b->elt_sz;
83 clib_mem_bulk_destroy (clib_mem_bulk_handle_t h)
85 clib_mem_bulk_t *b = h;
86 clib_mem_bulk_chunk_hdr_t *c, *next;
95 clib_mem_poison (c, bulk_chunk_size (b));
107 clib_mem_poison (b, sizeof (clib_mem_bulk_t));
112 get_chunk_elt_ptr (clib_mem_bulk_t *b, clib_mem_bulk_chunk_hdr_t *c, u32 index)
114 return (u8 *) c + b->chunk_hdr_sz + index * b->elt_sz;
118 add_to_chunk_list (clib_mem_bulk_chunk_hdr_t **first,
119 clib_mem_bulk_chunk_hdr_t *c)
129 remove_from_chunk_list (clib_mem_bulk_chunk_hdr_t **first,
130 clib_mem_bulk_chunk_hdr_t *c)
133 c->next->prev = c->prev;
135 c->prev->next = c->next;
141 clib_mem_bulk_alloc (clib_mem_bulk_handle_t h)
143 clib_mem_bulk_t *b = h;
144 clib_mem_bulk_chunk_hdr_t *c = b->avail_chunks;
147 if (b->avail_chunks == 0)
149 u32 i, sz = bulk_chunk_size (b);
150 c = mspace_memalign (b->mspace, b->chunk_align, sz);
151 clib_mem_unpoison (c, sz);
152 clib_memset (c, 0, sizeof (clib_mem_bulk_chunk_hdr_t));
154 c->n_free = b->elts_per_chunk;
156 /* populate freelist */
157 for (i = 0; i < b->elts_per_chunk - 1; i++)
158 *((u32 *) get_chunk_elt_ptr (b, c, i)) = i + 1;
159 *((u32 *) get_chunk_elt_ptr (b, c, i)) = ~0;
162 ASSERT (c->freelist != ~0);
163 elt_idx = c->freelist;
164 c->freelist = *((u32 *) get_chunk_elt_ptr (b, c, elt_idx));
170 ASSERT (c->freelist == ~0);
171 remove_from_chunk_list (&b->avail_chunks, c);
172 add_to_chunk_list (&b->full_chunks, c);
175 return get_chunk_elt_ptr (b, c, elt_idx);
179 clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p)
181 clib_mem_bulk_t *b = h;
182 uword offset = (uword) p & (b->chunk_align - 1);
183 clib_mem_bulk_chunk_hdr_t *c = (void *) ((u8 *) p - offset);
184 u32 elt_idx = (offset - b->chunk_hdr_sz) / b->elt_sz;
186 ASSERT (elt_idx < b->elts_per_chunk);
187 ASSERT (get_chunk_elt_ptr (b, c, elt_idx) == p);
191 if (c->n_free == b->elts_per_chunk)
193 /* chunk is empty - give it back */
194 remove_from_chunk_list (&b->avail_chunks, c);
195 clib_mem_poison (c, bulk_chunk_size (b));
196 mspace_free (b->mspace, c);
202 /* move chunk to avail chunks */
203 remove_from_chunk_list (&b->full_chunks, c);
204 add_to_chunk_list (&b->avail_chunks, c);
207 /* add elt to freelist */
208 *(u32 *) p = c->freelist;
209 c->freelist = elt_idx;
213 format_clib_mem_bulk (u8 *s, va_list *args)
215 clib_mem_bulk_t *b = va_arg (*args, clib_mem_bulk_handle_t);
216 clib_mem_bulk_chunk_hdr_t *c;
217 uword n_chunks = 0, n_free_elts = 0, n_elts, chunk_sz;
230 n_free_elts += c->n_free;
234 n_elts = n_chunks * b->elts_per_chunk;
235 chunk_sz = b->chunk_hdr_sz + (uword) b->elts_per_chunk * b->elt_sz;
237 s = format (s, "%u bytes/elt, align %u, chunk-align %u, ", b->elt_sz,
238 b->align, b->chunk_align);
239 s = format (s, "%u elts-per-chunk, chunk size %lu bytes", b->elts_per_chunk,
243 return format (s, "\nempty");
245 s = format (s, "\n%lu chunks allocated, ", n_chunks);
246 s = format (s, "%lu / %lu free elts (%.1f%%), ", n_free_elts, n_elts,
247 (f64) n_free_elts * 100 / n_elts);
248 s = format (s, "%lu bytes of memory consumed", n_chunks * chunk_sz);