2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer.c: allocate/free network buffers.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 * Allocate/free network buffers.
47 #include <linux/vfio.h>
48 #include <sys/ioctl.h>
50 #include <rte_config.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_launch.h>
60 #include <rte_atomic.h>
61 #include <rte_cycles.h>
62 #include <rte_prefetch.h>
63 #include <rte_lcore.h>
64 #include <rte_per_lcore.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_interrupts.h>
68 #include <rte_random.h>
69 #include <rte_debug.h>
70 #include <rte_ether.h>
71 #include <rte_ethdev.h>
73 #include <rte_mempool.h>
75 #include <rte_version.h>
77 #include <vlib/vlib.h>
78 #include <vlib/unix/unix.h>
79 #include <vlib/pci/pci.h>
80 #include <vlib/linux/vfio.h>
81 #include <vnet/vnet.h>
82 #include <dpdk/device/dpdk.h>
83 #include <dpdk/device/dpdk_priv.h>
85 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
86 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
90 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
91 struct rte_mbuf ***mbuf_pending_free_list;
93 /* cached last pool */
94 struct rte_mempool *last_pool;
95 u8 last_buffer_pool_index;
96 } dpdk_buffer_per_thread_data;
100 int vfio_container_fd;
101 dpdk_buffer_per_thread_data *ptd;
102 } dpdk_buffer_main_t;
104 dpdk_buffer_main_t dpdk_buffer_main;
106 static_always_inline void
107 dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b)
109 vlib_buffer_t *hb = b;
110 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
111 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
114 mb = rte_mbuf_from_vlib_buffer (hb);
118 next = b->next_buffer;
119 mb = rte_mbuf_from_vlib_buffer (b);
121 if (PREDICT_FALSE (b->n_add_refs))
123 rte_mbuf_refcnt_update (mb, b->n_add_refs);
127 mb = rte_pktmbuf_prefree_seg (mb);
130 if (mb->pool != d->last_pool)
132 d->last_pool = mb->pool;
133 dpdk_mempool_private_t *privp = rte_mempool_get_priv (d->last_pool);
134 d->last_buffer_pool_index = privp->buffer_pool_index;
135 vec_validate_aligned (d->mbuf_pending_free_list,
136 d->last_buffer_pool_index,
137 CLIB_CACHE_LINE_BYTES);
139 vec_add1 (d->mbuf_pending_free_list[d->last_buffer_pool_index], mb);
142 if (flags & VLIB_BUFFER_NEXT_PRESENT)
144 b = vlib_get_buffer (vm, next);
149 #ifndef CLIB_MULTIARCH_VARIANT
151 del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
155 u32 thread_index = vlib_get_thread_index ();
157 for (i = 0; i < vec_len (f->buffers); i++)
159 b = vlib_get_buffer (vm, f->buffers[i]);
160 dpdk_rte_pktmbuf_free (vm, thread_index, b);
164 vec_free (f->buffers);
166 memset (f, 0xab, sizeof (f[0]));
169 /* Add buffer free list. */
171 dpdk_buffer_delete_free_list (vlib_main_t * vm,
172 vlib_buffer_free_list_index_t free_list_index)
174 vlib_buffer_free_list_t *f;
177 ASSERT (vlib_get_thread_index () == 0);
179 f = vlib_buffer_get_free_list (vm, free_list_index);
181 del_free_list (vm, f);
183 pool_put (vm->buffer_free_list_pool, f);
185 for (i = 1; i < vec_len (vlib_mains); i++)
187 vlib_main_t *wvm = vlib_mains[i];
188 f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index);
189 del_free_list (wvm, f);
190 pool_put (wvm->buffer_free_list_pool, f);
195 /* Make sure free list has at least given number of free buffers. */
197 CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm,
198 vlib_buffer_free_list_t * fl,
199 uword min_free_buffers)
201 dpdk_main_t *dm = &dpdk_main;
202 vlib_buffer_t *b0, *b1, *b2, *b3;
204 u32 bi0, bi1, bi2, bi3;
205 unsigned socket_id = rte_socket_id ();
206 struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
207 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
208 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
212 if (PREDICT_FALSE (rmp == 0))
215 /* Already have enough free buffers on free list? */
216 n = min_free_buffers - vec_len (fl->buffers);
218 return min_free_buffers;
220 /* Always allocate round number of buffers. */
221 n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
223 /* Always allocate new buffers in reasonably large sized chunks. */
224 n = clib_max (n, fl->min_n_buffers_each_alloc);
226 vec_validate_aligned (vm->mbuf_alloc_list, n - 1, CLIB_CACHE_LINE_BYTES);
228 if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
231 memset (&bt, 0, sizeof (vlib_buffer_t));
232 vlib_buffer_init_for_free_list (&bt, fl);
233 bt.buffer_pool_index = privp->buffer_pool_index;
235 _vec_len (vm->mbuf_alloc_list) = n;
238 int f = vec_len (fl->buffers);
239 vec_resize_aligned (fl->buffers, n, CLIB_CACHE_LINE_BYTES);
243 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
244 (vm->mbuf_alloc_list[i + 4]), STORE);
245 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
246 (vm->mbuf_alloc_list[i + 5]), STORE);
247 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
248 (vm->mbuf_alloc_list[i + 6]), STORE);
249 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
250 (vm->mbuf_alloc_list[i + 7]), STORE);
252 mb0 = vm->mbuf_alloc_list[i];
253 mb1 = vm->mbuf_alloc_list[i + 1];
254 mb2 = vm->mbuf_alloc_list[i + 2];
255 mb3 = vm->mbuf_alloc_list[i + 3];
257 b0 = vlib_buffer_from_rte_mbuf (mb0);
258 b1 = vlib_buffer_from_rte_mbuf (mb1);
259 b2 = vlib_buffer_from_rte_mbuf (mb2);
260 b3 = vlib_buffer_from_rte_mbuf (mb3);
262 bi0 = vlib_get_buffer_index (vm, b0);
263 bi1 = vlib_get_buffer_index (vm, b1);
264 bi2 = vlib_get_buffer_index (vm, b2);
265 bi3 = vlib_get_buffer_index (vm, b3);
267 fl->buffers[f++] = bi0;
268 fl->buffers[f++] = bi1;
269 fl->buffers[f++] = bi2;
270 fl->buffers[f++] = bi3;
272 clib_memcpy64_x4 (b0, b1, b2, b3, &bt);
274 if (fl->buffer_init_function)
276 fl->buffer_init_function (vm, fl, &bi0, 1);
277 fl->buffer_init_function (vm, fl, &bi1, 1);
278 fl->buffer_init_function (vm, fl, &bi2, 1);
279 fl->buffer_init_function (vm, fl, &bi3, 1);
286 mb0 = vm->mbuf_alloc_list[i];
288 b0 = vlib_buffer_from_rte_mbuf (mb0);
289 bi0 = vlib_get_buffer_index (vm, b0);
291 fl->buffers[f++] = bi0;
292 clib_memcpy (b0, &bt, sizeof (vlib_buffer_t));
294 if (fl->buffer_init_function)
295 fl->buffer_init_function (vm, fl, &bi0, 1);
304 static_always_inline void
305 dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
309 b = vlib_get_buffer (vm, bi);
310 mb = rte_mbuf_from_vlib_buffer (b);
311 CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE);
312 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
315 static_always_inline void
316 recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
319 vlib_buffer_free_list_t *fl;
320 u32 thread_index = vlib_get_thread_index ();
321 vlib_buffer_free_list_index_t fi;
322 fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
324 /* The only current use of this callback: multicast recycle */
325 if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
329 vlib_buffer_add_to_free_list (vm, fl, bi,
330 (b->flags & VLIB_BUFFER_RECYCLE) == 0);
332 for (j = 0; j < vec_len (vm->buffer_announce_list); j++)
334 if (fl == vm->buffer_announce_list[j])
335 goto already_announced;
337 vec_add1 (vm->buffer_announce_list, fl);
343 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
344 dpdk_rte_pktmbuf_free (vm, thread_index, b);
348 static_always_inline void
349 vlib_buffer_free_inline (vlib_main_t * vm,
350 u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
352 vlib_buffer_main_t *bm = &buffer_main;
353 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
354 vlib_buffer_t *b0, *b1, *b2, *b3;
355 u32 thread_index = vlib_get_thread_index ();
356 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
358 u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
359 u32 follow_buffer_next);
361 cb = bm->buffer_free_callback;
363 if (PREDICT_FALSE (cb != 0))
364 n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
369 while (i + 7 < n_buffers)
371 dpdk_prefetch_buffer_by_index (vm, buffers[i + 4]);
372 dpdk_prefetch_buffer_by_index (vm, buffers[i + 5]);
373 dpdk_prefetch_buffer_by_index (vm, buffers[i + 6]);
374 dpdk_prefetch_buffer_by_index (vm, buffers[i + 7]);
376 b0 = vlib_get_buffer (vm, buffers[i]);
377 b1 = vlib_get_buffer (vm, buffers[i + 1]);
378 b2 = vlib_get_buffer (vm, buffers[i + 2]);
379 b3 = vlib_get_buffer (vm, buffers[i + 3]);
381 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
382 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
383 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
384 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
386 recycle_or_free (vm, bm, buffers[i], b0);
387 recycle_or_free (vm, bm, buffers[i + 1], b1);
388 recycle_or_free (vm, bm, buffers[i + 2], b2);
389 recycle_or_free (vm, bm, buffers[i + 3], b3);
393 while (i < n_buffers)
395 b0 = vlib_get_buffer (vm, buffers[i]);
396 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
397 recycle_or_free (vm, bm, buffers[i], b0);
400 if (vec_len (vm->buffer_announce_list))
402 vlib_buffer_free_list_t *fl;
403 for (i = 0; i < vec_len (vm->buffer_announce_list); i++)
405 fl = vm->buffer_announce_list[i];
406 fl->buffers_added_to_freelist_function (vm, fl);
408 _vec_len (vm->buffer_announce_list) = 0;
411 vec_foreach_index (i, d->mbuf_pending_free_list)
413 int len = vec_len (d->mbuf_pending_free_list[i]);
416 rte_mempool_put_bulk (d->mbuf_pending_free_list[i][len - 1]->pool,
417 (void *) d->mbuf_pending_free_list[i], len);
418 vec_reset_length (d->mbuf_pending_free_list[i]);
424 CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers,
427 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
432 CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers,
435 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
439 #ifndef CLIB_MULTIARCH_VARIANT
441 dpdk_packet_template_init (vlib_main_t * vm,
444 uword n_packet_data_bytes,
445 uword min_n_buffers_each_alloc, u8 * name)
447 vlib_packet_template_t *t = (vlib_packet_template_t *) vt;
449 vlib_worker_thread_barrier_sync (vm);
450 memset (t, 0, sizeof (t[0]));
452 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
454 vlib_worker_thread_barrier_release (vm);
457 static clib_error_t *
458 scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name)
460 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
461 linux_vfio_main_t *lvm = &vfio_main;
462 const char fn[] = "/dev/vfio/vfio";
463 char buff[sizeof (fn)] = { 0 };
465 u8 *path = format (0, "%v%c", path_name, 0);
467 if (readlink ((char *) path, buff, sizeof (fn)) + 1 != sizeof (fn))
470 if (strncmp (fn, buff, sizeof (fn)))
473 fd = atoi ((char *) file_name);
474 if (fd != lvm->container_fd)
475 dbm->vfio_container_fd = fd;
483 dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
484 u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
485 struct rte_mempool ** _mp,
486 vlib_physmem_region_index_t * pri)
488 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
489 struct rte_mempool *mp;
490 vlib_physmem_region_t *pr;
491 dpdk_mempool_private_t priv;
492 clib_error_t *error = 0;
496 obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
497 size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0);
500 vlib_physmem_region_alloc (vm, (i8 *) pool_name, size, numa,
501 VLIB_PHYSMEM_F_HUGETLB | VLIB_PHYSMEM_F_SHARED,
506 pr = vlib_physmem_get_region (vm, pri[0]);
509 rte_mempool_create_empty ((i8 *) pool_name, num_elts, elt_size,
510 512, pool_priv_size, numa, 0);
512 return clib_error_return (0, "failed to create %s", pool_name);
514 rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
516 /* Call the mempool priv initializer */
517 priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
518 VLIB_BUFFER_DATA_SIZE;
519 priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
520 rte_pktmbuf_pool_init (mp, &priv);
523 rte_mempool_populate_iova_tab (mp, pr->mem, pr->page_table, pr->n_pages,
524 pr->log2_page_size, NULL, NULL);
525 if (ret != (i32) mp->size)
527 rte_mempool_free (mp);
528 return clib_error_return (0, "failed to populate %s", pool_name);
533 /* DPDK currently doesn't provide API to map DMA memory for empty mempool
534 so we are using this hack, will be nice to have at least API to get
536 if (dbm->vfio_container_fd == -1)
537 foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
539 if (dbm->vfio_container_fd != -1)
541 struct vfio_iommu_type1_dma_map dm = { 0 };
543 dm.argsz = sizeof (struct vfio_iommu_type1_dma_map);
544 dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
547 vec_foreach_index (i, pr->page_table)
549 dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
550 dm.size = 1 << pr->log2_page_size;
551 if (rte_eal_iova_mode() == RTE_IOVA_PA)
552 dm.iova = pr->page_table[i];
555 if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm)))
559 if (rv != 0 && errno != EINVAL)
560 clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name);
567 dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
570 dpdk_main_t *dm = &dpdk_main;
571 struct rte_mempool *rmp;
572 vlib_physmem_region_index_t pri;
573 clib_error_t *error = 0;
577 vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
579 /* pool already exists, nothing to do */
580 if (dm->pktmbuf_pools[socket_id])
583 pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
585 elt_size = sizeof (struct rte_mbuf) +
586 VLIB_BUFFER_HDR_SIZE /* priv size */ +
587 VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
590 dpdk_pool_create (vm, pool_name, elt_size, num_mbufs,
591 sizeof (dpdk_mempool_private_t), 512, socket_id,
594 vec_free (pool_name);
598 /* call the object initializers */
599 rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
601 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
602 privp->buffer_pool_index = vlib_buffer_pool_create (vm, pri, 0);
604 dm->pktmbuf_pools[socket_id] = rmp;
609 clib_error_report (error);
611 /* no usable pool for this socket, try to use pool from another one */
612 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
614 if (dm->pktmbuf_pools[i])
616 clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
617 "%u. Threads running on socket %u will use socket %u "
618 "mempool.", socket_id, socket_id, i);
619 dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
624 return clib_error_return (0, "failed to allocate mempool on socket %u",
630 u32 *vlib_buffer_state_validation_lock;
631 uword *vlib_buffer_state_validation_hash;
632 void *vlib_buffer_state_heap;
634 static clib_error_t *
635 buffer_state_validation_init (vlib_main_t * vm)
639 vlib_buffer_state_heap = mheap_alloc (0, 10 << 20);
641 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
643 vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword));
644 vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
645 CLIB_CACHE_LINE_BYTES);
646 clib_mem_set_heap (oldheap);
650 VLIB_INIT_FUNCTION (buffer_state_validation_init);
654 struct dpdk_validate_buf_result
660 #define DPDK_TRAJECTORY_POISON 31
663 dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque,
664 void *obj, unsigned obj_idx)
667 struct dpdk_validate_buf_result *counter = opaque;
668 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
669 if (b->pre_data[0] != 0)
671 if (b->pre_data[0] == DPDK_TRAJECTORY_POISON)
672 counter->uninitialized++;
679 dpdk_buffer_validate_trajectory_all (u32 * uninitialized)
681 dpdk_main_t *dm = &dpdk_main;
682 struct dpdk_validate_buf_result counter = { 0 };
685 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
686 rte_mempool_obj_iter (dm->pktmbuf_pools[i],
687 dpdk_buffer_validate_trajectory, &counter);
689 *uninitialized = counter.uninitialized;
690 return counter.invalid;
694 dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque,
695 void *obj, unsigned obj_idx)
698 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
699 b->pre_data[0] = DPDK_TRAJECTORY_POISON;
703 dpdk_buffer_poison_trajectory_all (void)
705 dpdk_main_t *dm = &dpdk_main;
708 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
709 rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory,
714 static clib_error_t *
715 dpdk_buffer_init (vlib_main_t * vm)
717 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
718 vlib_thread_main_t *tm = vlib_get_thread_main ();
720 vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1,
721 CLIB_CACHE_LINE_BYTES);
723 dbm->vfio_container_fd = -1;
728 VLIB_INIT_FUNCTION (dpdk_buffer_init);
731 VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = {
732 .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list,
733 .vlib_buffer_free_cb = &dpdk_buffer_free,
734 .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next,
735 .vlib_packet_template_init_cb = &dpdk_packet_template_init,
736 .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list,
741 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512;
742 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2;
743 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512;
744 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2;
745 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512;
746 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2;
748 static void __clib_constructor
749 dpdk_input_multiarch_select (void)
751 vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks;
752 if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ())
754 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512;
755 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512;
756 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512;
758 else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ())
760 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2;
761 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2;
762 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2;
770 * fd.io coding-style-patch-verification: ON
773 * eval: (c-set-style "gnu")