2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer.c: allocate/free network buffers.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 * Allocate/free network buffers.
47 #include <linux/vfio.h>
48 #include <sys/ioctl.h>
50 #include <rte_config.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_launch.h>
60 #include <rte_atomic.h>
61 #include <rte_cycles.h>
62 #include <rte_prefetch.h>
63 #include <rte_lcore.h>
64 #include <rte_per_lcore.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_interrupts.h>
68 #include <rte_random.h>
69 #include <rte_debug.h>
70 #include <rte_ether.h>
71 #include <rte_ethdev.h>
73 #include <rte_mempool.h>
75 #include <rte_version.h>
77 #include <vlib/vlib.h>
78 #include <vlib/unix/unix.h>
79 #include <vlib/pci/pci.h>
80 #include <vlib/linux/vfio.h>
81 #include <vnet/vnet.h>
82 #include <dpdk/device/dpdk.h>
83 #include <dpdk/device/dpdk_priv.h>
85 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
86 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
90 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
91 struct rte_mbuf ***mbuf_pending_free_list;
93 /* cached last pool */
94 struct rte_mempool *last_pool;
95 u8 last_buffer_pool_index;
96 } dpdk_buffer_per_thread_data;
100 int vfio_container_fd;
101 dpdk_buffer_per_thread_data *ptd;
102 } dpdk_buffer_main_t;
104 dpdk_buffer_main_t dpdk_buffer_main;
106 static_always_inline void
107 dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b)
109 vlib_buffer_t *hb = b;
110 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
111 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
114 mb = rte_mbuf_from_vlib_buffer (hb);
118 next = b->next_buffer;
119 mb = rte_mbuf_from_vlib_buffer (b);
121 if (PREDICT_FALSE (b->n_add_refs))
123 rte_mbuf_refcnt_update (mb, b->n_add_refs);
127 mb = rte_pktmbuf_prefree_seg (mb);
130 if (mb->pool != d->last_pool)
132 d->last_pool = mb->pool;
133 dpdk_mempool_private_t *privp = rte_mempool_get_priv (d->last_pool);
134 d->last_buffer_pool_index = privp->buffer_pool_index;
135 vec_validate_aligned (d->mbuf_pending_free_list,
136 d->last_buffer_pool_index,
137 CLIB_CACHE_LINE_BYTES);
139 vec_add1 (d->mbuf_pending_free_list[d->last_buffer_pool_index], mb);
142 if (flags & VLIB_BUFFER_NEXT_PRESENT)
144 b = vlib_get_buffer (vm, next);
149 #ifndef CLIB_MULTIARCH_VARIANT
151 del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
155 u32 thread_index = vlib_get_thread_index ();
157 for (i = 0; i < vec_len (f->buffers); i++)
159 b = vlib_get_buffer (vm, f->buffers[i]);
160 dpdk_rte_pktmbuf_free (vm, thread_index, b);
164 vec_free (f->buffers);
167 /* Add buffer free list. */
169 dpdk_buffer_delete_free_list (vlib_main_t * vm,
170 vlib_buffer_free_list_index_t free_list_index)
172 vlib_buffer_main_t *bm = vm->buffer_main;
173 vlib_buffer_free_list_t *f;
174 vlib_buffer_free_list_index_t merge_index;
177 ASSERT (vlib_get_thread_index () == 0);
179 f = vlib_buffer_get_free_list (vm, free_list_index);
181 merge_index = vlib_buffer_get_free_list_with_size (vm, f->n_data_bytes);
182 if (merge_index != (vlib_buffer_free_list_index_t) ~ 0 &&
183 merge_index != free_list_index)
185 vlib_buffer_merge_free_lists (pool_elt_at_index
186 (bm->buffer_free_list_pool, merge_index),
190 del_free_list (vm, f);
193 memset (f, 0xab, sizeof (f[0]));
195 pool_put (bm->buffer_free_list_pool, f);
197 for (i = 1; i < vec_len (vlib_mains); i++)
199 bm = vlib_mains[i]->buffer_main;
200 f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index);;
201 memset (f, 0xab, sizeof (f[0]));
202 pool_put (bm->buffer_free_list_pool, f);
207 /* Make sure free list has at least given number of free buffers. */
209 CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm,
210 vlib_buffer_free_list_t * fl,
211 uword min_free_buffers)
213 dpdk_main_t *dm = &dpdk_main;
214 vlib_buffer_t *b0, *b1, *b2, *b3;
216 u32 bi0, bi1, bi2, bi3;
217 unsigned socket_id = rte_socket_id ();
218 struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
219 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
220 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
224 if (PREDICT_FALSE (rmp == 0))
227 /* Already have enough free buffers on free list? */
228 n = min_free_buffers - vec_len (fl->buffers);
230 return min_free_buffers;
232 /* Always allocate round number of buffers. */
233 n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
235 /* Always allocate new buffers in reasonably large sized chunks. */
236 n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
238 vec_validate_aligned (vm->mbuf_alloc_list, n - 1, CLIB_CACHE_LINE_BYTES);
240 if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
243 memset (&bt, 0, sizeof (vlib_buffer_t));
244 vlib_buffer_init_for_free_list (&bt, fl);
245 bt.buffer_pool_index = privp->buffer_pool_index;
247 _vec_len (vm->mbuf_alloc_list) = n;
250 int f = vec_len (fl->buffers);
251 vec_resize_aligned (fl->buffers, n, CLIB_CACHE_LINE_BYTES);
255 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
256 (vm->mbuf_alloc_list[i + 4]), STORE);
257 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
258 (vm->mbuf_alloc_list[i + 5]), STORE);
259 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
260 (vm->mbuf_alloc_list[i + 6]), STORE);
261 vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf
262 (vm->mbuf_alloc_list[i + 7]), STORE);
264 mb0 = vm->mbuf_alloc_list[i];
265 mb1 = vm->mbuf_alloc_list[i + 1];
266 mb2 = vm->mbuf_alloc_list[i + 2];
267 mb3 = vm->mbuf_alloc_list[i + 3];
269 b0 = vlib_buffer_from_rte_mbuf (mb0);
270 b1 = vlib_buffer_from_rte_mbuf (mb1);
271 b2 = vlib_buffer_from_rte_mbuf (mb2);
272 b3 = vlib_buffer_from_rte_mbuf (mb3);
274 bi0 = vlib_get_buffer_index (vm, b0);
275 bi1 = vlib_get_buffer_index (vm, b1);
276 bi2 = vlib_get_buffer_index (vm, b2);
277 bi3 = vlib_get_buffer_index (vm, b3);
279 fl->buffers[f++] = bi0;
280 fl->buffers[f++] = bi1;
281 fl->buffers[f++] = bi2;
282 fl->buffers[f++] = bi3;
284 clib_memcpy64_x4 (b0, b1, b2, b3, &bt);
286 if (fl->buffer_init_function)
288 fl->buffer_init_function (vm, fl, &bi0, 1);
289 fl->buffer_init_function (vm, fl, &bi1, 1);
290 fl->buffer_init_function (vm, fl, &bi2, 1);
291 fl->buffer_init_function (vm, fl, &bi3, 1);
298 mb0 = vm->mbuf_alloc_list[i];
300 b0 = vlib_buffer_from_rte_mbuf (mb0);
301 bi0 = vlib_get_buffer_index (vm, b0);
303 fl->buffers[f++] = bi0;
304 clib_memcpy (b0, &bt, sizeof (vlib_buffer_t));
306 if (fl->buffer_init_function)
307 fl->buffer_init_function (vm, fl, &bi0, 1);
316 static_always_inline void
317 dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
321 b = vlib_get_buffer (vm, bi);
322 mb = rte_mbuf_from_vlib_buffer (b);
323 CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE);
324 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
327 static_always_inline void
328 recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
331 vlib_buffer_free_list_t *fl;
332 u32 thread_index = vlib_get_thread_index ();
333 vlib_buffer_free_list_index_t fi;
334 fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
336 /* The only current use of this callback: multicast recycle */
337 if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
341 vlib_buffer_add_to_free_list (vm, fl, bi,
342 (b->flags & VLIB_BUFFER_RECYCLE) == 0);
344 for (j = 0; j < vec_len (bm->announce_list); j++)
346 if (fl == bm->announce_list[j])
347 goto already_announced;
349 vec_add1 (bm->announce_list, fl);
355 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
356 dpdk_rte_pktmbuf_free (vm, thread_index, b);
360 static_always_inline void
361 vlib_buffer_free_inline (vlib_main_t * vm,
362 u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
364 vlib_buffer_main_t *bm = vm->buffer_main;
365 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
366 vlib_buffer_t *b0, *b1, *b2, *b3;
367 u32 thread_index = vlib_get_thread_index ();
368 dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
370 u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
371 u32 follow_buffer_next);
373 cb = bm->buffer_free_callback;
375 if (PREDICT_FALSE (cb != 0))
376 n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
381 while (i + 7 < n_buffers)
383 dpdk_prefetch_buffer_by_index (vm, buffers[i + 4]);
384 dpdk_prefetch_buffer_by_index (vm, buffers[i + 5]);
385 dpdk_prefetch_buffer_by_index (vm, buffers[i + 6]);
386 dpdk_prefetch_buffer_by_index (vm, buffers[i + 7]);
388 b0 = vlib_get_buffer (vm, buffers[i]);
389 b1 = vlib_get_buffer (vm, buffers[i + 1]);
390 b2 = vlib_get_buffer (vm, buffers[i + 2]);
391 b3 = vlib_get_buffer (vm, buffers[i + 3]);
393 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
394 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
395 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
396 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
398 recycle_or_free (vm, bm, buffers[i], b0);
399 recycle_or_free (vm, bm, buffers[i + 1], b1);
400 recycle_or_free (vm, bm, buffers[i + 2], b2);
401 recycle_or_free (vm, bm, buffers[i + 3], b3);
405 while (i < n_buffers)
407 b0 = vlib_get_buffer (vm, buffers[i]);
408 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
409 recycle_or_free (vm, bm, buffers[i], b0);
412 if (vec_len (bm->announce_list))
414 vlib_buffer_free_list_t *fl;
415 for (i = 0; i < vec_len (bm->announce_list); i++)
417 fl = bm->announce_list[i];
418 fl->buffers_added_to_freelist_function (vm, fl);
420 _vec_len (bm->announce_list) = 0;
423 vec_foreach_index (i, d->mbuf_pending_free_list)
425 int len = vec_len (d->mbuf_pending_free_list[i]);
428 rte_mempool_put_bulk (d->mbuf_pending_free_list[i][len - 1]->pool,
429 (void *) d->mbuf_pending_free_list[i], len);
430 vec_reset_length (d->mbuf_pending_free_list[i]);
436 CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers,
439 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
444 CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers,
447 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
451 #ifndef CLIB_MULTIARCH_VARIANT
453 dpdk_packet_template_init (vlib_main_t * vm,
456 uword n_packet_data_bytes,
457 uword min_n_buffers_each_physmem_alloc, u8 * name)
459 vlib_packet_template_t *t = (vlib_packet_template_t *) vt;
461 vlib_worker_thread_barrier_sync (vm);
462 memset (t, 0, sizeof (t[0]));
464 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
466 vlib_worker_thread_barrier_release (vm);
469 static clib_error_t *
470 scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name)
472 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
473 linux_vfio_main_t *lvm = &vfio_main;
474 const char fn[] = "/dev/vfio/vfio";
475 char buff[sizeof (fn)] = { 0 };
477 u8 *path = format (0, "%v%c", path_name, 0);
479 if (readlink ((char *) path, buff, sizeof (fn)) + 1 != sizeof (fn))
482 if (strncmp (fn, buff, sizeof (fn)))
485 fd = atoi ((char *) file_name);
486 if (fd != lvm->container_fd)
487 dbm->vfio_container_fd = fd;
495 dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
496 u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
497 struct rte_mempool ** _mp,
498 vlib_physmem_region_index_t * pri)
500 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
501 struct rte_mempool *mp;
502 vlib_physmem_region_t *pr;
503 clib_error_t *error = 0;
507 obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
508 size = rte_mempool_xmem_size (num_elts, obj_size, 21, 0);
511 vlib_physmem_region_alloc (vm, (i8 *) pool_name, size, numa,
512 VLIB_PHYSMEM_F_HUGETLB | VLIB_PHYSMEM_F_SHARED,
517 pr = vlib_physmem_get_region (vm, pri[0]);
520 rte_mempool_create_empty ((i8 *) pool_name, num_elts, elt_size,
521 512, pool_priv_size, numa, 0);
523 return clib_error_return (0, "failed to create %s", pool_name);
525 rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
528 rte_mempool_populate_iova_tab (mp, pr->mem, pr->page_table, pr->n_pages,
529 pr->log2_page_size, NULL, NULL);
530 if (ret != (i32) mp->size)
532 rte_mempool_free (mp);
533 return clib_error_return (0, "failed to populate %s", pool_name);
538 /* DPDK currently doesn't provide API to map DMA memory for empty mempool
539 so we are using this hack, will be nice to have at least API to get
541 if (dbm->vfio_container_fd == -1)
542 foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
544 if (dbm->vfio_container_fd != -1)
546 struct vfio_iommu_type1_dma_map dm = { 0 };
548 dm.argsz = sizeof (struct vfio_iommu_type1_dma_map);
549 dm.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
552 vec_foreach_index (i, pr->page_table)
554 dm.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
555 dm.size = 1 << pr->log2_page_size;
556 dm.iova = pr->page_table[i];
557 if ((rv = ioctl (dbm->vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dm)))
561 if (rv != 0 && rv != EINVAL)
562 clib_unix_warning ("ioctl(VFIO_IOMMU_MAP_DMA) pool '%s'", pool_name);
569 dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
572 dpdk_main_t *dm = &dpdk_main;
573 struct rte_mempool *rmp;
574 dpdk_mempool_private_t priv;
575 vlib_physmem_region_index_t pri;
576 clib_error_t *error = 0;
580 vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
582 /* pool already exists, nothing to do */
583 if (dm->pktmbuf_pools[socket_id])
586 pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
588 elt_size = sizeof (struct rte_mbuf) +
589 VLIB_BUFFER_HDR_SIZE /* priv size */ +
590 VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
593 dpdk_pool_create (vm, pool_name, elt_size, num_mbufs,
594 sizeof (dpdk_mempool_private_t), 512, socket_id,
597 vec_free (pool_name);
601 priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
602 VLIB_BUFFER_DATA_SIZE;
603 priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
605 /* call the mempool priv initializer */
606 rte_pktmbuf_pool_init (rmp, &priv);
608 /* call the object initializers */
609 rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
611 dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
612 privp->buffer_pool_index = vlib_buffer_add_physmem_region (vm, pri);
614 dm->pktmbuf_pools[socket_id] = rmp;
619 clib_error_report (error);
621 /* no usable pool for this socket, try to use pool from another one */
622 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
624 if (dm->pktmbuf_pools[i])
626 clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
627 "%u. Threads running on socket %u will use socket %u "
628 "mempool.", socket_id, socket_id, i);
629 dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
634 return clib_error_return (0, "failed to allocate mempool on socket %u",
640 u32 *vlib_buffer_state_validation_lock;
641 uword *vlib_buffer_state_validation_hash;
642 void *vlib_buffer_state_heap;
644 static clib_error_t *
645 buffer_state_validation_init (vlib_main_t * vm)
649 vlib_buffer_state_heap = mheap_alloc (0, 10 << 20);
651 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
653 vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword));
654 vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
655 CLIB_CACHE_LINE_BYTES);
656 clib_mem_set_heap (oldheap);
660 VLIB_INIT_FUNCTION (buffer_state_validation_init);
664 struct dpdk_validate_buf_result
670 #define DPDK_TRAJECTORY_POISON 31
673 dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque,
674 void *obj, unsigned obj_idx)
677 struct dpdk_validate_buf_result *counter = opaque;
678 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
679 if (b->pre_data[0] != 0)
681 if (b->pre_data[0] == DPDK_TRAJECTORY_POISON)
682 counter->uninitialized++;
689 dpdk_buffer_validate_trajectory_all (u32 * uninitialized)
691 dpdk_main_t *dm = &dpdk_main;
692 struct dpdk_validate_buf_result counter = { 0 };
695 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
696 rte_mempool_obj_iter (dm->pktmbuf_pools[i],
697 dpdk_buffer_validate_trajectory, &counter);
699 *uninitialized = counter.uninitialized;
700 return counter.invalid;
704 dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque,
705 void *obj, unsigned obj_idx)
708 b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
709 b->pre_data[0] = DPDK_TRAJECTORY_POISON;
713 dpdk_buffer_poison_trajectory_all (void)
715 dpdk_main_t *dm = &dpdk_main;
718 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
719 rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory,
724 static clib_error_t *
725 dpdk_buffer_init (vlib_main_t * vm)
727 dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
728 vlib_thread_main_t *tm = vlib_get_thread_main ();
730 vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1,
731 CLIB_CACHE_LINE_BYTES);
733 dbm->vfio_container_fd = -1;
738 VLIB_INIT_FUNCTION (dpdk_buffer_init);
741 VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = {
742 .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list,
743 .vlib_buffer_free_cb = &dpdk_buffer_free,
744 .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next,
745 .vlib_packet_template_init_cb = &dpdk_packet_template_init,
746 .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list,
751 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512;
752 vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2;
753 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512;
754 vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2;
755 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512;
756 vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2;
758 static void __clib_constructor
759 dpdk_input_multiarch_select (void)
761 vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks;
762 if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ())
764 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512;
765 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512;
766 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512;
768 else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ())
770 cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2;
771 cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2;
772 cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2;
780 * fd.io coding-style-patch-verification: ON
783 * eval: (c-set-style "gnu")