2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer.c: allocate/free network buffers.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 * Allocate/free network buffers.
47 #include <linux/vfio.h>
48 #include <sys/ioctl.h>
50 #include <rte_config.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_launch.h>
60 #include <rte_atomic.h>
61 #include <rte_cycles.h>
62 #include <rte_prefetch.h>
63 #include <rte_lcore.h>
64 #include <rte_per_lcore.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_interrupts.h>
68 #include <rte_random.h>
69 #include <rte_debug.h>
70 #include <rte_ether.h>
71 #include <rte_ethdev.h>
73 #include <rte_mempool.h>
75 #include <rte_version.h>
77 #include <vlib/vlib.h>
78 #include <vlib/unix/unix.h>
79 #include <vlib/pci/pci.h>
80 #include <vlib/linux/vfio.h>
81 #include <vnet/vnet.h>
82 #include <dpdk/device/dpdk.h>
83 #include <dpdk/device/dpdk_priv.h>
85 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
86 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
90 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
91 struct rte_mbuf **mbuf_alloc_list;
92 struct rte_mbuf ***mbuf_pending_free_list;
94 /* cached last pool */
95 struct rte_mempool *last_pool;
96 u8 last_buffer_pool_index;
97 } dpdk_buffer_per_thread_data;
101 int vfio_container_fd;
102 dpdk_buffer_per_thread_data *ptd;
103 } dpdk_buffer_main_t;
105 dpdk_buffer_main_t dpdk_buffer_main;
107 static_always_inline void
108 dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b)
110 vlib_buffer_t *hb = b;
111 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
112 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
115 mb = rte_mbuf_from_vlib_buffer (hb);
119 next = b->next_buffer;
120 mb = rte_mbuf_from_vlib_buffer (b);
122 if (PREDICT_FALSE (b->n_add_refs))
124 rte_mbuf_refcnt_update (mb, b->n_add_refs);
128 mb = rte_pktmbuf_prefree_seg (mb);
131 if (mb->pool != d->last_pool)
133 d->last_pool = mb->pool;
134 dpdk_mempool_private_t *privp = rte_mempool_get_priv (d->last_pool);
135 d->last_buffer_pool_index = privp->buffer_pool_index;
136 vec_validate_aligned (d->mbuf_pending_free_list,
137 d->last_buffer_pool_index,
138 CLIB_CACHE_LINE_BYTES);
140 vec_add1 (d->mbuf_pending_free_list[d->last_buffer_pool_index], mb);
143 if (flags & VLIB_BUFFER_NEXT_PRESENT)
145 b = vlib_get_buffer (vm, next);
150 #ifndef CLIB_MARCH_VARIANT
152 del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
156 u32 thread_index = vlib_get_thread_index ();
158 for (i = 0; i < vec_len (f->buffers); i++)
160 b = vlib_get_buffer (vm, f->buffers[i]);
161 dpdk_rte_pktmbuf_free (vm, thread_index, b);
165 vec_free (f->buffers);
167 memset (f, 0xab, sizeof (f[0]));
170 /* Add buffer free list. */
172 dpdk_buffer_delete_free_list (vlib_main_t * vm,
173 vlib_buffer_free_list_index_t free_list_index)
175 vlib_buffer_free_list_t *f;
178 ASSERT (vlib_get_thread_index () == 0);
180 f = vlib_buffer_get_free_list (vm, free_list_index);
182 del_free_list (vm, f);
184 pool_put (vm->buffer_free_list_pool, f);
186 for (i = 1; i < vec_len (vlib_mains); i++)
188 vlib_main_t *wvm = vlib_mains[i];
189 f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index);
190 del_free_list (wvm, f);
191 pool_put (wvm->buffer_free_list_pool, f);
196 /* Make sure free list has at least given number of free buffers. */
198 CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm,
199 vlib_buffer_free_list_t * fl,
200 uword min_free_buffers)
202 dpdk_main_t *dm = &dpdk_main;
203 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
204 struct rte_mbuf **mb;
207 unsigned socket_id = rte_socket_id ();
208 u32 thread_index = vlib_get_thread_index ();
209 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
210 struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
211 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
216 if (PREDICT_FALSE (rmp == 0))
219 /* Already have enough free buffers on free list? */
220 n_alloc = min_free_buffers - vec_len (fl->buffers);
222 return min_free_buffers;
224 /* Always allocate round number of buffers. */
225 n_alloc = round_pow2 (n_alloc, CLIB_CACHE_LINE_BYTES / sizeof (u32));
227 /* Always allocate new buffers in reasonably large sized chunks. */
228 n_alloc = clib_max (n_alloc, fl->min_n_buffers_each_alloc);
230 vec_validate_aligned (d->mbuf_alloc_list, n_alloc - 1,
231 CLIB_CACHE_LINE_BYTES);
233 if (rte_mempool_get_bulk (rmp, (void *) d->mbuf_alloc_list, n_alloc) < 0)
236 memset (&bt, 0, sizeof (vlib_buffer_t));
237 vlib_buffer_init_for_free_list (&bt, fl);
238 bt.buffer_pool_index = privp->buffer_pool_index;
240 _vec_len (d->mbuf_alloc_list) = n_alloc;
242 first = vec_len (fl->buffers);
243 vec_resize_aligned (fl->buffers, n_alloc, CLIB_CACHE_LINE_BYTES);
246 mb = d->mbuf_alloc_list;
247 bi = fl->buffers + first;
249 ASSERT (n_left % 8 == 0);
253 if (PREDICT_FALSE (n_left < 24))
256 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[16]), STORE);
257 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[17]), STORE);
258 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[18]), STORE);
259 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[19]), STORE);
260 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[20]), STORE);
261 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[21]), STORE);
262 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[22]), STORE);
263 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[23]), STORE);
266 vlib_get_buffer_indices_with_offset (vm, (void **) mb, bi, 8,
267 sizeof (struct rte_mbuf));
268 clib_memcpy64_x4 (vlib_buffer_from_rte_mbuf (mb[0]),
269 vlib_buffer_from_rte_mbuf (mb[1]),
270 vlib_buffer_from_rte_mbuf (mb[2]),
271 vlib_buffer_from_rte_mbuf (mb[3]), &bt);
272 clib_memcpy64_x4 (vlib_buffer_from_rte_mbuf (mb[4]),
273 vlib_buffer_from_rte_mbuf (mb[5]),
274 vlib_buffer_from_rte_mbuf (mb[6]),
275 vlib_buffer_from_rte_mbuf (mb[7]), &bt);
282 if (fl->buffer_init_function)
283 fl->buffer_init_function (vm, fl, fl->buffers + first, n_alloc);
285 fl->n_alloc += n_alloc;
290 static_always_inline void
291 dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
295 b = vlib_get_buffer (vm, bi);
296 mb = rte_mbuf_from_vlib_buffer (b);
297 CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE);
298 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
301 static_always_inline void
302 recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
305 vlib_buffer_free_list_t *fl;
306 u32 thread_index = vlib_get_thread_index ();
307 vlib_buffer_free_list_index_t fi;
308 fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
310 /* The only current use of this callback: multicast recycle */
311 if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
315 vlib_buffer_add_to_free_list (vm, fl, bi,
316 (b->flags & VLIB_BUFFER_RECYCLE) == 0);
318 for (j = 0; j < vec_len (vm->buffer_announce_list); j++)
320 if (fl == vm->buffer_announce_list[j])
321 goto already_announced;
323 vec_add1 (vm->buffer_announce_list, fl);
329 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
330 dpdk_rte_pktmbuf_free (vm, thread_index, b);
334 static_always_inline void
335 vlib_buffer_free_inline (vlib_main_t * vm,
336 u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
338 vlib_buffer_main_t *bm = &buffer_main;
339 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
340 vlib_buffer_t *b0, *b1, *b2, *b3;
341 u32 thread_index = vlib_get_thread_index ();
342 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
344 u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
345 u32 follow_buffer_next);
347 cb = bm->buffer_free_callback;
349 if (PREDICT_FALSE (cb != 0))
350 n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
355 while (i + 7 < n_buffers)
357 dpdk_prefetch_buffer_by_index (vm, buffers[i + 4]);
358 dpdk_prefetch_buffer_by_index (vm, buffers[i + 5]);
359 dpdk_prefetch_buffer_by_index (vm, buffers[i + 6]);
360 dpdk_prefetch_buffer_by_index (vm, buffers[i + 7]);
362 b0 = vlib_get_buffer (vm, buffers[i]);
363 b1 = vlib_get_buffer (vm, buffers[i + 1]);
364 b2 = vlib_get_buffer (vm, buffers[i + 2]);
365 b3 = vlib_get_buffer (vm, buffers[i + 3]);
367 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
368 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
369 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
370 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
372 recycle_or_free (vm, bm, buffers[i], b0);
373 recycle_or_free (vm, bm, buffers[i + 1], b1);
374 recycle_or_free (vm, bm, buffers[i + 2], b2);
375 recycle_or_free (vm, bm, buffers[i + 3], b3);
379 while (i < n_buffers)
381 b0 = vlib_get_buffer (vm, buffers[i]);
382 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
383 recycle_or_free (vm, bm, buffers[i], b0);
386 if (vec_len (vm->buffer_announce_list))
388 vlib_buffer_free_list_t *fl;
389 for (i = 0; i < vec_len (vm->buffer_announce_list); i++)
391 fl = vm->buffer_announce_list[i];
392 fl->buffers_added_to_freelist_function (vm, fl);
394 _vec_len (vm->buffer_announce_list) = 0;
397 vec_foreach_index (i, d->mbuf_pending_free_list)
399 int len = vec_len (d->mbuf_pending_free_list[i]);
402 rte_mempool_put_bulk (d->mbuf_pending_free_list[i][len - 1]->pool,
403 (void *) d->mbuf_pending_free_list[i], len);
404 vec_reset_length (d->mbuf_pending_free_list[i]);
410 CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers,
413 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
418 CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers,
421 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
425 #ifndef CLIB_MARCH_VARIANT
427 dpdk_packet_template_init (vlib_main_t * vm,
430 uword n_packet_data_bytes,
431 uword min_n_buffers_each_alloc, u8 * name)
433 vlib_packet_template_t *t = (vlib_packet_template_t *) vt;
435 vlib_worker_thread_barrier_sync (vm);
436 memset (t, 0, sizeof (t[0]));
438 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
440 vlib_worker_thread_barrier_release (vm);
443 static clib_error_t *
444 scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name)
446 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
447 linux_vfio_main_t *lvm = &vfio_main;
448 const char fn[] = "/dev/vfio/vfio";
449 char buff[sizeof (fn)] = { 0 };
451 u8 *path = format (0, "%v%c", path_name, 0);
453 if (readlink ((char *) path, buff, sizeof (fn)) + 1 != sizeof (fn))
456 if (strncmp (fn, buff, sizeof (fn)))
459 fd = atoi ((char *) file_name);
460 if (fd != lvm->container_fd)
461 dbm->vfio_container_fd = fd;
469 dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
470 u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
471 struct rte_mempool ** _mp,
472 vlib_physmem_region_index_t * pri)
474 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
475 struct rte_mempool *mp;
476 vlib_physmem_region_t *pr;
477 dpdk_mempool_private_t priv;
478 clib_error_t *error = 0;
482 obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
483 size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0);
486 vlib_physmem_region_alloc (vm, (char *) pool_name, size, numa,
487 VLIB_PHYSMEM_F_HUGETLB | VLIB_PHYSMEM_F_SHARED,
492 pr = vlib_physmem_get_region (vm, pri[0]);
495 rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size,
496 512, pool_priv_size, numa, 0);
498 return clib_error_return (0, "failed to create %s", pool_name);
500 rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
502 /* Call the mempool priv initializer */
503 priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
504 VLIB_BUFFER_DATA_SIZE;
505 priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
506 rte_pktmbuf_pool_init (mp, &priv);
509 rte_mempool_populate_iova_tab (mp, pr->mem, pr->page_table, pr->n_pages,
510 pr->log2_page_size, NULL, NULL);
511 if (ret != (i32) mp->size)
513 rte_mempool_free (mp);
514 return clib_error_return (0, "failed to populate %s", pool_name);
519 /* DPDK currently doesn't provide API to map DMA memory for empty mempool
520 so we are using this hack, will be nice to have at least API to get
522 if (dbm->vfio_container_fd == -1)
523 foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
525 if (dbm->vfio_container_fd != -1)
527 struct vfio_iommu_type1_dma_map dm = { 0 };
529 dm.argsz = sizeof (struct vfio_iommu_type1_dma_map);
530 dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
533 vec_foreach_index (i, pr->page_table)
535 dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
536 dm.size = 1 << pr->log2_page_size;
537 dm.iova = pr->page_table[i];
538 if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm)))
542 if (rv != 0 && errno != EINVAL)
543 clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name);
550 dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
553 dpdk_main_t *dm = &dpdk_main;
554 struct rte_mempool *rmp;
555 vlib_physmem_region_index_t pri;
556 clib_error_t *error = 0;
560 vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
562 /* pool already exists, nothing to do */
563 if (dm->pktmbuf_pools[socket_id])
566 pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
568 elt_size = sizeof (struct rte_mbuf) +
569 VLIB_BUFFER_HDR_SIZE /* priv size */ +
570 VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
573 dpdk_pool_create (vm, pool_name, elt_size, num_mbufs,
574 sizeof (dpdk_mempool_private_t), 512, socket_id,
577 vec_free (pool_name);
581 /* call the object initializers */
582 rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
584 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
585 privp->buffer_pool_index = vlib_buffer_pool_create (vm, pri, 0);
587 dm->pktmbuf_pools[socket_id] = rmp;
592 clib_error_report (error);
594 /* no usable pool for this socket, try to use pool from another one */
595 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
597 if (dm->pktmbuf_pools[i])
599 clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
600 "%u. Threads running on socket %u will use socket %u "
601 "mempool.", socket_id, socket_id, i);
602 dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
607 return clib_error_return (0, "failed to allocate mempool on socket %u",
613 u32 *vlib_buffer_state_validation_lock;
614 uword *vlib_buffer_state_validation_hash;
615 void *vlib_buffer_state_heap;
617 static clib_error_t *
618 buffer_state_validation_init (vlib_main_t * vm)
622 vlib_buffer_state_heap = mheap_alloc (0, 10 << 20);
624 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
626 vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword));
627 vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
628 CLIB_CACHE_LINE_BYTES);
629 clib_mem_set_heap (oldheap);
633 VLIB_INIT_FUNCTION (buffer_state_validation_init);
637 struct dpdk_validate_buf_result
643 #define DPDK_TRAJECTORY_POISON 31
646 dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque,
647 void *obj, unsigned obj_idx)
650 struct dpdk_validate_buf_result *counter = opaque;
651 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
652 if (b->pre_data[0] != 0)
654 if (b->pre_data[0] == DPDK_TRAJECTORY_POISON)
655 counter->uninitialized++;
662 dpdk_buffer_validate_trajectory_all (u32 * uninitialized)
664 dpdk_main_t *dm = &dpdk_main;
665 struct dpdk_validate_buf_result counter = { 0 };
668 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
669 rte_mempool_obj_iter (dm->pktmbuf_pools[i],
670 dpdk_buffer_validate_trajectory, &counter);
672 *uninitialized = counter.uninitialized;
673 return counter.invalid;
677 dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque,
678 void *obj, unsigned obj_idx)
681 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
682 b->pre_data[0] = DPDK_TRAJECTORY_POISON;
686 dpdk_buffer_poison_trajectory_all (void)
688 dpdk_main_t *dm = &dpdk_main;
691 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
692 rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory,
697 static clib_error_t *
698 dpdk_buffer_init (vlib_main_t * vm)
700 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
701 vlib_thread_main_t *tm = vlib_get_thread_main ();
703 vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1,
704 CLIB_CACHE_LINE_BYTES);
706 dbm->vfio_container_fd = -1;
711 VLIB_INIT_FUNCTION (dpdk_buffer_init);
714 VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = {
715 .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list,
716 .vlib_buffer_free_cb = &dpdk_buffer_free,
717 .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next,
718 .vlib_packet_template_init_cb = &dpdk_packet_template_init,
719 .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list,
724 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512;
725 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2;
726 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512;
727 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2;
728 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512;
729 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2;
731 static void __clib_constructor
732 dpdk_input_multiarch_select (void)
734 vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks;
735 if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ())
737 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512;
738 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512;
739 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512;
741 else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ())
743 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2;
744 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2;
745 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2;
753 * fd.io coding-style-patch-verification: ON
756 * eval: (c-set-style "gnu")