2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer.c: allocate/free network buffers.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 * Allocate/free network buffers.
47 #include <linux/vfio.h>
48 #include <sys/ioctl.h>
50 #include <rte_config.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_launch.h>
60 #include <rte_atomic.h>
61 #include <rte_cycles.h>
62 #include <rte_prefetch.h>
63 #include <rte_lcore.h>
64 #include <rte_per_lcore.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_interrupts.h>
68 #include <rte_random.h>
69 #include <rte_debug.h>
70 #include <rte_ether.h>
71 #include <rte_ethdev.h>
73 #include <rte_mempool.h>
75 #include <rte_version.h>
77 #include <vlib/vlib.h>
78 #include <vlib/unix/unix.h>
79 #include <vlib/pci/pci.h>
80 #include <vlib/linux/vfio.h>
81 #include <vnet/vnet.h>
82 #include <dpdk/device/dpdk.h>
83 #include <dpdk/device/dpdk_priv.h>
85 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
86 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
90 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
91 struct rte_mbuf ***mbuf_pending_free_list;
93 /* cached last pool */
94 struct rte_mempool *last_pool;
95 u8 last_buffer_pool_index;
96 } dpdk_buffer_per_thread_data;
100 int vfio_container_fd;
101 dpdk_buffer_per_thread_data *ptd;
102 } dpdk_buffer_main_t;
104 dpdk_buffer_main_t dpdk_buffer_main;
106 static_always_inline void
107 dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b)
109 vlib_buffer_t *hb = b;
110 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
111 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
114 mb = rte_mbuf_from_vlib_buffer (hb);
118 next = b->next_buffer;
119 mb = rte_mbuf_from_vlib_buffer (b);
121 if (PREDICT_FALSE (b->n_add_refs))
123 rte_mbuf_refcnt_update (mb, b->n_add_refs);
127 mb = rte_pktmbuf_prefree_seg (mb);
130 if (mb->pool != d->last_pool)
132 d->last_pool = mb->pool;
133 dpdk_mempool_private_t *privp = rte_mempool_get_priv (d->last_pool);
134 d->last_buffer_pool_index = privp->buffer_pool_index;
135 vec_validate_aligned (d->mbuf_pending_free_list,
136 d->last_buffer_pool_index,
137 CLIB_CACHE_LINE_BYTES);
139 vec_add1 (d->mbuf_pending_free_list[d->last_buffer_pool_index], mb);
142 if (flags & VLIB_BUFFER_NEXT_PRESENT)
144 b = vlib_get_buffer (vm, next);
149 #ifndef CLIB_MULTIARCH_VARIANT
151 del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
155 u32 thread_index = vlib_get_thread_index ();
157 for (i = 0; i < vec_len (f->buffers); i++)
159 b = vlib_get_buffer (vm, f->buffers[i]);
160 dpdk_rte_pktmbuf_free (vm, thread_index, b);
164 vec_free (f->buffers);
166 memset (f, 0xab, sizeof (f[0]));
169 /* Add buffer free list. */
171 dpdk_buffer_delete_free_list (vlib_main_t * vm,
172 vlib_buffer_free_list_index_t free_list_index)
174 vlib_buffer_free_list_t *f;
177 ASSERT (vlib_get_thread_index () == 0);
179 f = vlib_buffer_get_free_list (vm, free_list_index);
181 del_free_list (vm, f);
183 pool_put (vm->buffer_free_list_pool, f);
185 for (i = 1; i < vec_len (vlib_mains); i++)
187 vlib_main_t *wvm = vlib_mains[i];
188 f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index);
189 del_free_list (wvm, f);
190 pool_put (wvm->buffer_free_list_pool, f);
195 /* Make sure free list has at least given number of free buffers. */
197 CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm,
198 vlib_buffer_free_list_t * fl,
199 uword min_free_buffers)
201 dpdk_main_t *dm = &dpdk_main;
202 vlib_buffer_t *b0, *b1, *b2, *b3;
204 u32 bi0, bi1, bi2, bi3;
205 unsigned socket_id = rte_socket_id ();
206 struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
207 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
208 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
212 if (PREDICT_FALSE (rmp == 0))
215 /* Already have enough free buffers on free list? */
216 n = min_free_buffers - vec_len (fl->buffers);
218 return min_free_buffers;
220 /* Always allocate round number of buffers. */
221 n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
223 /* Always allocate new buffers in reasonably large sized chunks. */
224 n = clib_max (n, fl->min_n_buffers_each_alloc);
226 vec_validate_aligned (vm->mbuf_alloc_list, n - 1, CLIB_CACHE_LINE_BYTES);
228 if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
231 memset (&bt, 0, sizeof (vlib_buffer_t));
232 vlib_buffer_init_for_free_list (&bt, fl);
233 bt.buffer_pool_index = privp->buffer_pool_index;
235 _vec_len (vm->mbuf_alloc_list) = n;
238 int f = vec_len (fl->buffers);
239 vec_resize_aligned (fl->buffers, n, CLIB_CACHE_LINE_BYTES);
243 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
244 (vm->mbuf_alloc_list[i + 4]), STORE);
245 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
246 (vm->mbuf_alloc_list[i + 5]), STORE);
247 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
248 (vm->mbuf_alloc_list[i + 6]), STORE);
249 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
250 (vm->mbuf_alloc_list[i + 7]), STORE);
252 mb0 = vm->mbuf_alloc_list[i];
253 mb1 = vm->mbuf_alloc_list[i + 1];
254 mb2 = vm->mbuf_alloc_list[i + 2];
255 mb3 = vm->mbuf_alloc_list[i + 3];
257 b0 = vlib_buffer_from_rte_mbuf (mb0);
258 b1 = vlib_buffer_from_rte_mbuf (mb1);
259 b2 = vlib_buffer_from_rte_mbuf (mb2);
260 b3 = vlib_buffer_from_rte_mbuf (mb3);
262 bi0 = vlib_get_buffer_index (vm, b0);
263 bi1 = vlib_get_buffer_index (vm, b1);
264 bi2 = vlib_get_buffer_index (vm, b2);
265 bi3 = vlib_get_buffer_index (vm, b3);
267 fl->buffers[f++] = bi0;
268 fl->buffers[f++] = bi1;
269 fl->buffers[f++] = bi2;
270 fl->buffers[f++] = bi3;
272 clib_memcpy64_x4 (b0, b1, b2, b3, &bt);
274 if (fl->buffer_init_function)
276 fl->buffer_init_function (vm, fl, &bi0, 1);
277 fl->buffer_init_function (vm, fl, &bi1, 1);
278 fl->buffer_init_function (vm, fl, &bi2, 1);
279 fl->buffer_init_function (vm, fl, &bi3, 1);
286 mb0 = vm->mbuf_alloc_list[i];
288 b0 = vlib_buffer_from_rte_mbuf (mb0);
289 bi0 = vlib_get_buffer_index (vm, b0);
291 fl->buffers[f++] = bi0;
292 clib_memcpy (b0, &bt, sizeof (vlib_buffer_t));
294 if (fl->buffer_init_function)
295 fl->buffer_init_function (vm, fl, &bi0, 1);
304 static_always_inline void
305 dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
309 b = vlib_get_buffer (vm, bi);
310 mb = rte_mbuf_from_vlib_buffer (b);
311 CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE);
312 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
315 static_always_inline void
316 recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
319 vlib_buffer_free_list_t *fl;
320 u32 thread_index = vlib_get_thread_index ();
321 vlib_buffer_free_list_index_t fi;
322 fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
324 /* The only current use of this callback: multicast recycle */
325 if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
329 vlib_buffer_add_to_free_list (vm, fl, bi,
330 (b->flags & VLIB_BUFFER_RECYCLE) == 0);
332 for (j = 0; j < vec_len (vm->buffer_announce_list); j++)
334 if (fl == vm->buffer_announce_list[j])
335 goto already_announced;
337 vec_add1 (vm->buffer_announce_list, fl);
343 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
344 dpdk_rte_pktmbuf_free (vm, thread_index, b);
348 static_always_inline void
349 vlib_buffer_free_inline (vlib_main_t * vm,
350 u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
352 vlib_buffer_main_t *bm = &buffer_main;
353 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
354 vlib_buffer_t *b0, *b1, *b2, *b3;
355 u32 thread_index = vlib_get_thread_index ();
356 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
358 u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
359 u32 follow_buffer_next);
361 cb = bm->buffer_free_callback;
363 if (PREDICT_FALSE (cb != 0))
364 n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
369 while (i + 7 < n_buffers)
371 dpdk_prefetch_buffer_by_index (vm, buffers[i + 4]);
372 dpdk_prefetch_buffer_by_index (vm, buffers[i + 5]);
373 dpdk_prefetch_buffer_by_index (vm, buffers[i + 6]);
374 dpdk_prefetch_buffer_by_index (vm, buffers[i + 7]);
376 b0 = vlib_get_buffer (vm, buffers[i]);
377 b1 = vlib_get_buffer (vm, buffers[i + 1]);
378 b2 = vlib_get_buffer (vm, buffers[i + 2]);
379 b3 = vlib_get_buffer (vm, buffers[i + 3]);
381 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
382 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
383 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
384 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
386 recycle_or_free (vm, bm, buffers[i], b0);
387 recycle_or_free (vm, bm, buffers[i + 1], b1);
388 recycle_or_free (vm, bm, buffers[i + 2], b2);
389 recycle_or_free (vm, bm, buffers[i + 3], b3);
393 while (i < n_buffers)
395 b0 = vlib_get_buffer (vm, buffers[i]);
396 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
397 recycle_or_free (vm, bm, buffers[i], b0);
400 if (vec_len (vm->buffer_announce_list))
402 vlib_buffer_free_list_t *fl;
403 for (i = 0; i < vec_len (vm->buffer_announce_list); i++)
405 fl = vm->buffer_announce_list[i];
406 fl->buffers_added_to_freelist_function (vm, fl);
408 _vec_len (vm->buffer_announce_list) = 0;
411 vec_foreach_index (i, d->mbuf_pending_free_list)
413 int len = vec_len (d->mbuf_pending_free_list[i]);
416 rte_mempool_put_bulk (d->mbuf_pending_free_list[i][len - 1]->pool,
417 (void *) d->mbuf_pending_free_list[i], len);
418 vec_reset_length (d->mbuf_pending_free_list[i]);
424 CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers,
427 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
432 CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers,
435 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
439 #ifndef CLIB_MULTIARCH_VARIANT
441 dpdk_packet_template_init (vlib_main_t * vm,
444 uword n_packet_data_bytes,
445 uword min_n_buffers_each_alloc, u8 * name)
447 vlib_packet_template_t *t = (vlib_packet_template_t *) vt;
449 vlib_worker_thread_barrier_sync (vm);
450 memset (t, 0, sizeof (t[0]));
452 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
454 vlib_worker_thread_barrier_release (vm);
457 static clib_error_t *
458 scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name)
460 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
461 linux_vfio_main_t *lvm = &vfio_main;
462 const char fn[] = "/dev/vfio/vfio";
463 char buff[sizeof (fn)] = { 0 };
465 u8 *path = format (0, "%v%c", path_name, 0);
467 if (readlink ((char *) path, buff, sizeof (fn)) + 1 != sizeof (fn))
470 if (strncmp (fn, buff, sizeof (fn)))
473 fd = atoi ((char *) file_name);
474 if (fd != lvm->container_fd)
475 dbm->vfio_container_fd = fd;
483 dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
484 u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
485 struct rte_mempool ** _mp,
486 vlib_physmem_region_index_t * pri)
488 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
489 struct rte_mempool *mp;
490 vlib_physmem_region_t *pr;
491 clib_error_t *error = 0;
495 obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
496 size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0);
499 vlib_physmem_region_alloc (vm, (i8 *) pool_name, size, numa,
500 VLIB_PHYSMEM_F_HUGETLB | VLIB_PHYSMEM_F_SHARED,
505 pr = vlib_physmem_get_region (vm, pri[0]);
508 rte_mempool_create_empty ((i8 *) pool_name, num_elts, elt_size,
509 512, pool_priv_size, numa, 0);
511 return clib_error_return (0, "failed to create %s", pool_name);
513 rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
516 rte_mempool_populate_iova_tab (mp, pr->mem, pr->page_table, pr->n_pages,
517 pr->log2_page_size, NULL, NULL);
518 if (ret != (i32) mp->size)
520 rte_mempool_free (mp);
521 return clib_error_return (0, "failed to populate %s", pool_name);
526 /* DPDK currently doesn't provide API to map DMA memory for empty mempool
527 so we are using this hack, will be nice to have at least API to get
529 if (dbm->vfio_container_fd == -1)
530 foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
532 if (dbm->vfio_container_fd != -1)
534 struct vfio_iommu_type1_dma_map dm = { 0 };
536 dm.argsz = sizeof (struct vfio_iommu_type1_dma_map);
537 dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
540 vec_foreach_index (i, pr->page_table)
542 dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
543 dm.size = 1 << pr->log2_page_size;
544 dm.iova = pr->page_table[i];
545 if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm)))
549 if (rv != 0 && rv != EINVAL)
550 clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name);
557 dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
560 dpdk_main_t *dm = &dpdk_main;
561 struct rte_mempool *rmp;
562 dpdk_mempool_private_t priv;
563 vlib_physmem_region_index_t pri;
564 clib_error_t *error = 0;
568 vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
570 /* pool already exists, nothing to do */
571 if (dm->pktmbuf_pools[socket_id])
574 pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
576 elt_size = sizeof (struct rte_mbuf) +
577 VLIB_BUFFER_HDR_SIZE /* priv size */ +
578 VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
581 dpdk_pool_create (vm, pool_name, elt_size, num_mbufs,
582 sizeof (dpdk_mempool_private_t), 512, socket_id,
585 vec_free (pool_name);
589 priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
590 VLIB_BUFFER_DATA_SIZE;
591 priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
593 /* call the mempool priv initializer */
594 rte_pktmbuf_pool_init (rmp, &priv);
596 /* call the object initializers */
597 rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
599 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
600 privp->buffer_pool_index = vlib_buffer_pool_create (vm, pri, 0);
602 dm->pktmbuf_pools[socket_id] = rmp;
607 clib_error_report (error);
609 /* no usable pool for this socket, try to use pool from another one */
610 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
612 if (dm->pktmbuf_pools[i])
614 clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
615 "%u. Threads running on socket %u will use socket %u "
616 "mempool.", socket_id, socket_id, i);
617 dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
622 return clib_error_return (0, "failed to allocate mempool on socket %u",
628 u32 *vlib_buffer_state_validation_lock;
629 uword *vlib_buffer_state_validation_hash;
630 void *vlib_buffer_state_heap;
632 static clib_error_t *
633 buffer_state_validation_init (vlib_main_t * vm)
637 vlib_buffer_state_heap = mheap_alloc (0, 10 << 20);
639 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
641 vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword));
642 vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
643 CLIB_CACHE_LINE_BYTES);
644 clib_mem_set_heap (oldheap);
648 VLIB_INIT_FUNCTION (buffer_state_validation_init);
652 struct dpdk_validate_buf_result
658 #define DPDK_TRAJECTORY_POISON 31
661 dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque,
662 void *obj, unsigned obj_idx)
665 struct dpdk_validate_buf_result *counter = opaque;
666 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
667 if (b->pre_data[0] != 0)
669 if (b->pre_data[0] == DPDK_TRAJECTORY_POISON)
670 counter->uninitialized++;
677 dpdk_buffer_validate_trajectory_all (u32 * uninitialized)
679 dpdk_main_t *dm = &dpdk_main;
680 struct dpdk_validate_buf_result counter = { 0 };
683 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
684 rte_mempool_obj_iter (dm->pktmbuf_pools[i],
685 dpdk_buffer_validate_trajectory, &counter);
687 *uninitialized = counter.uninitialized;
688 return counter.invalid;
692 dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque,
693 void *obj, unsigned obj_idx)
696 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
697 b->pre_data[0] = DPDK_TRAJECTORY_POISON;
701 dpdk_buffer_poison_trajectory_all (void)
703 dpdk_main_t *dm = &dpdk_main;
706 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
707 rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory,
712 static clib_error_t *
713 dpdk_buffer_init (vlib_main_t * vm)
715 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
716 vlib_thread_main_t *tm = vlib_get_thread_main ();
718 vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1,
719 CLIB_CACHE_LINE_BYTES);
721 dbm->vfio_container_fd = -1;
726 VLIB_INIT_FUNCTION (dpdk_buffer_init);
729 VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = {
730 .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list,
731 .vlib_buffer_free_cb = &dpdk_buffer_free,
732 .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next,
733 .vlib_packet_template_init_cb = &dpdk_packet_template_init,
734 .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list,
739 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512;
740 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2;
741 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512;
742 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2;
743 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512;
744 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2;
746 static void __clib_constructor
747 dpdk_input_multiarch_select (void)
749 vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks;
750 if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ())
752 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512;
753 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512;
754 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512;
756 else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ())
758 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2;
759 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2;
760 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2;
768 * fd.io coding-style-patch-verification: ON
771 * eval: (c-set-style "gnu")