2 * Copyright (c) 2019 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <tle_memtank.h>
17 #include <rte_errno.h>
21 struct memchunk *chunk; /* ptr to the chunk it belongs to */
22 uint8_t buf[] __rte_cache_min_aligned; /* memory buffer */
23 } __rte_cache_min_aligned;
26 TAILQ_ENTRY(memchunk) link; /* link to the next chunk in the tank */
27 void *raw; /* un-aligned ptr returned by alloc() */
28 uint32_t nb_total; /* total number of objects in the chunk */
29 uint32_t nb_free; /* number of free object in the chunk */
30 void *free[]; /* array of free objects */
31 } __rte_cache_aligned;
34 TAILQ_HEAD(mchunk_head, memchunk);
38 struct mchunk_head chunk; /* list of chunks */
39 } __rte_cache_aligned;
42 MC_FULL, /* all memchunk objs are free */
43 MC_USED, /* some of memchunk objs are allocated */
48 /* user provided data */
49 struct tle_memtank_prm prm;
52 void *raw; /* un-aligned ptr returned by alloc() */
53 size_t chunk_size; /* full size of each memchunk */
54 uint32_t obj_size; /* full size of each memobj */
55 rte_atomic32_t nb_chunks; /* number of allocated chunks */
57 struct mchunk_list chl[MC_NUM]; /* lists of memchunks */
59 struct tle_memtank pub;
62 #define ALIGN_MUL_CEIL(v, mul) \
63 (((v) + (typeof(v))(mul) - 1) / ((typeof(v))(mul)))
66 * Obtain pointer to interal memtank struct from public one
68 static inline struct memtank *
69 tank_pub_full(void *p)
73 v = (uintptr_t)p - offsetof(struct memtank, pub);
74 return (struct memtank *)v;
78 * Obtain pointer to interal memobj struct from public one
80 static inline struct memobj *
85 v = (uintptr_t)p - offsetof(struct memobj, buf);
86 return (struct memobj *)v;
90 obj_full_pub(struct memobj *obj)
94 v = (uintptr_t)obj + offsetof(struct memobj, buf);
99 memtank_meta_size(uint32_t nb_free)
102 static const struct memtank *mt;
104 sz = sizeof(*mt) + nb_free * sizeof(mt->pub.free[0]);
105 sz = RTE_ALIGN_CEIL(sz, alignof(*mt));
110 memchunk_meta_size(uint32_t nb_obj)
113 static const struct memchunk *ch;
115 sz = sizeof(*ch) + nb_obj * sizeof(ch->free[0]);
116 sz = RTE_ALIGN_CEIL(sz, alignof(*ch));
121 memobj_size(uint32_t obj_sz)
124 static const struct memobj *obj;
126 sz = sizeof(*obj) + obj_sz;
127 sz = RTE_ALIGN_CEIL(sz, alignof(*obj));
132 memchunk_size(uint32_t nb_obj, uint32_t obj_sz)
136 sz = memchunk_meta_size(nb_obj);
137 sz += nb_obj * memobj_size(obj_sz);
142 init_chunk(struct memtank *mt, struct memchunk *ch)
147 n = mt->prm.nb_obj_chunk;
150 /* get start of memobj array */
151 obj = (struct memobj *)((uintptr_t)ch + memchunk_meta_size(n));
153 for (i = 0; i != n; i++) {
155 ch->free[i] = obj_full_pub(obj);
156 obj = (struct memobj *)((uintptr_t)obj + sz);
164 put_chunk(struct memtank *mt, struct memchunk *ch, void * const obj[],
168 struct mchunk_list *ls;
170 /* chunk should be in the *used* list */
173 rte_spinlock_lock(&ls->lock);
176 RTE_ASSERT(n + num <= ch->nb_total);
178 _copy_objs(ch->free, obj, num);
179 ch->nb_free = n + num;
181 /* chunk is full now */
182 if (ch->nb_free == ch->nb_total) {
183 TAILQ_REMOVE(&ls->chunk, ch, link);
185 /* chunk is not empty anymore, move it to the head */
187 TAILQ_REMOVE(&ls->chunk, ch, link);
188 TAILQ_INSERT_HEAD(&ls->chunk, ch, link);
191 rte_spinlock_unlock(&ls->lock);
193 /* insert this chunk into the *full* list */
196 rte_spinlock_lock(&ls->lock);
197 TAILQ_INSERT_HEAD(&ls->chunk, ch, link);
198 rte_spinlock_unlock(&ls->lock);
203 shrink_chunk(struct memtank *mt, uint32_t num)
206 struct mchunk_list *ls;
207 struct memchunk *ch[num];
209 ls = &mt->chl[MC_FULL];
210 rte_spinlock_lock(&ls->lock);
212 for (k = 0; k != num; k++) {
213 ch[k] = TAILQ_LAST(&ls->chunk, mchunk_head);
216 TAILQ_REMOVE(&ls->chunk, ch[k], link);
219 rte_spinlock_unlock(&ls->lock);
221 rte_atomic32_sub(&mt->nb_chunks, k);
223 for (i = 0; i != k; i++)
224 mt->prm.free(ch[i]->raw, mt->prm.udata);
229 static struct memchunk *
230 alloc_chunk(struct memtank *mt)
236 sz = mt->chunk_size + alignof(*ch);
237 p = mt->prm.alloc(sz, mt->prm.udata);
240 ch = RTE_PTR_ALIGN_CEIL(p, alignof(*ch));
245 /* Determine by how many chunks we can actually grow */
246 static inline uint32_t
247 grow_num(struct memtank *mt, uint32_t num)
251 max = mt->prm.max_chunk;
252 n = rte_atomic32_add_return(&mt->nb_chunks, num);
258 return (k >= num) ? 0 : num - k;
262 grow_chunk(struct memtank *mt, uint32_t num)
265 struct mchunk_list *fls;
266 struct mchunk_head ls;
267 struct memchunk *ch[num];
269 /* check can we grow further */
270 k = grow_num(mt, num);
272 for (n = 0; n != k; n++) {
273 ch[n] = alloc_chunk(mt);
280 for (i = 0; i != n; i++) {
281 init_chunk(mt, ch[i]);
282 TAILQ_INSERT_HEAD(&ls, ch[i], link);
286 fls = &mt->chl[MC_FULL];
287 rte_spinlock_lock(&fls->lock);
288 TAILQ_CONCAT(&fls->chunk, &ls, link);
289 rte_spinlock_unlock(&fls->lock);
293 rte_atomic32_sub(&mt->nb_chunks, num - n);
300 tle_memtank_chunk_free(struct tle_memtank *t, void * const obj[],
301 uint32_t nb_obj, uint32_t flags)
306 struct memchunk *ch[nb_obj];
308 mt = tank_pub_full(t);
310 for (i = 0; i != nb_obj; i++) {
311 mo = obj_pub_full(obj[i]);
316 for (i = 0; i != nb_obj; i += j) {
318 /* find number of consequtive objs from the same chunk */
319 for (j = i + 1; j != nb_obj && ch[j] == ch[i]; j++)
322 put_chunk(mt, ch[i], obj + i, j - i);
326 if (flags & TLE_MTANK_FREE_SHRINK)
331 get_chunk(struct mchunk_list *ls, struct mchunk_head *els,
332 struct mchunk_head *uls, void *obj[], uint32_t nb_obj)
335 struct memchunk *ch, *nch;
337 rte_spinlock_lock(&ls->lock);
340 for (ch = TAILQ_FIRST(&ls->chunk);
341 n != nb_obj && ch != NULL && ch->nb_free != 0;
344 k = RTE_MIN(nb_obj - n, ch->nb_free);
346 _copy_objs(obj + n, ch->free + l, k);
349 nch = TAILQ_NEXT(ch, link);
351 /* chunk is empty now */
353 TAILQ_REMOVE(&ls->chunk, ch, link);
354 TAILQ_INSERT_TAIL(els, ch, link);
355 } else if (uls != NULL) {
356 TAILQ_REMOVE(&ls->chunk, ch, link);
357 TAILQ_INSERT_HEAD(uls, ch, link);
361 rte_spinlock_unlock(&ls->lock);
366 tle_memtank_chunk_alloc(struct tle_memtank *t, void *obj[], uint32_t nb_obj,
371 struct mchunk_head els, uls;
373 mt = tank_pub_full(t);
375 /* walk though the the *used* list first */
376 n = get_chunk(&mt->chl[MC_USED], &mt->chl[MC_USED].chunk, NULL,
384 /* walk though the the *full* list */
385 n += get_chunk(&mt->chl[MC_FULL], &els, &uls,
386 obj + n, nb_obj - n);
388 if (n != nb_obj && (flags & TLE_MTANK_ALLOC_GROW) != 0) {
390 /* try to allocate extra memchunks */
391 k = ALIGN_MUL_CEIL(nb_obj - n,
392 mt->prm.nb_obj_chunk);
393 k = grow_chunk(mt, k);
395 /* walk through the *full* list again */
397 n += get_chunk(&mt->chl[MC_FULL], &els, &uls,
398 obj + n, nb_obj - n);
401 /* concatenate with *used* list our temporary lists */
402 rte_spinlock_lock(&mt->chl[MC_USED].lock);
404 /* put new non-emtpy elems at head of the *used* list */
405 TAILQ_CONCAT(&uls, &mt->chl[MC_USED].chunk, link);
406 TAILQ_CONCAT(&mt->chl[MC_USED].chunk, &uls, link);
408 /* put new emtpy elems at tail of the *used* list */
409 TAILQ_CONCAT(&mt->chl[MC_USED].chunk, &els, link);
411 rte_spinlock_unlock(&mt->chl[MC_USED].lock);
418 tle_memtank_grow(struct tle_memtank *t)
423 mt = tank_pub_full(t);
425 /* how many chunks we need to grow */
426 k = t->min_free - t->nb_free;
430 num = ALIGN_MUL_CEIL(k, mt->prm.nb_obj_chunk);
432 /* try to grow and refill the *free* */
433 n = grow_chunk(mt, num);
441 tle_memtank_shrink(struct tle_memtank *t)
446 mt = tank_pub_full(t);
448 if (t->nb_free < t->max_free)
451 /* how many chunks we need to free */
452 num = ALIGN_MUL_CEIL(t->min_free, mt->prm.nb_obj_chunk);
454 /* free up to *num* chunks */
455 return shrink_chunk(mt, num);
459 check_param(const struct tle_memtank_prm *prm)
461 if (prm->alloc == NULL || prm->free == NULL ||
462 prm->min_free > prm->max_free)
468 tle_memtank_create(const struct tle_memtank_prm *prm)
475 rc = check_param(prm);
481 sz = memtank_meta_size(prm->max_free);
482 p = prm->alloc(sz, prm->udata);
488 mt = RTE_PTR_ALIGN_CEIL(p, alignof(*mt));
490 memset(mt, 0, sizeof(*mt));
494 mt->chunk_size = memchunk_size(prm->nb_obj_chunk, prm->obj_size);
495 mt->obj_size = memobj_size(prm->obj_size);
497 mt->pub.min_free = prm->min_free;
498 mt->pub.max_free = prm->max_free;
500 TAILQ_INIT(&mt->chl[MC_FULL].chunk);
501 TAILQ_INIT(&mt->chl[MC_USED].chunk);
507 free_mchunk_list(struct memtank *mt, struct mchunk_list *ls)
511 for (ch = TAILQ_FIRST(&ls->chunk); ch != NULL;
512 ch = TAILQ_FIRST(&ls->chunk)) {
513 TAILQ_REMOVE(&ls->chunk, ch, link);
514 mt->prm.free(ch->raw, mt->prm.udata);
519 tle_memtank_destroy(struct tle_memtank *t)
523 mt = tank_pub_full(t);
524 free_mchunk_list(mt, &mt->chl[MC_FULL]);
525 free_mchunk_list(mt, &mt->chl[MC_USED]);
526 mt->prm.free(mt->raw, mt->prm.udata);