2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
20 #include <sys/ioctl.h>
21 #include <linux/ethtool.h>
22 #include <linux/if_link.h>
23 #include <linux/sockios.h>
24 #include <linux/limits.h>
26 #include <vlib/vlib.h>
27 #include <vlib/unix/unix.h>
28 #include <vlib/pci/pci.h>
29 #include <vppinfra/linux/netns.h>
30 #include <vppinfra/linux/sysfs.h>
31 #include <vppinfra/unix.h>
32 #include <vnet/ethernet/ethernet.h>
33 #include <vnet/interface/rx_queue_funcs.h>
34 #include <vnet/interface/tx_queue_funcs.h>
37 #ifndef XDP_UMEM_MIN_CHUNK_SIZE
38 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
41 af_xdp_main_t af_xdp_main;
50 gdb_af_xdp_get_prod (const struct xsk_ring_prod *prod)
52 gdb_af_xdp_pair_t pair = { *prod->producer, *prod->consumer };
57 gdb_af_xdp_get_cons (const struct xsk_ring_cons * cons)
59 gdb_af_xdp_pair_t pair = { *cons->producer, *cons->consumer };
64 af_xdp_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new)
66 af_xdp_main_t *am = &af_xdp_main;
67 af_xdp_device_t *ad = vec_elt_at_index (am->devices, hw->dev_instance);
68 errno_t err = memcpy_s (ad->hwaddr, sizeof (ad->hwaddr), new, 6);
70 return clib_error_return_code (0, -err, CLIB_ERROR_ERRNO_VALID,
76 af_xdp_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hw,
79 af_xdp_main_t *am = &af_xdp_main;
80 af_xdp_device_t *ad = vec_elt_at_index (am->devices, hw->dev_instance);
81 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad, "set mtu not supported yet");
82 return vnet_error (VNET_ERR_UNSUPPORTED, 0);
86 af_xdp_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
88 af_xdp_main_t *am = &af_xdp_main;
89 af_xdp_device_t *ad = vec_elt_at_index (am->devices, hw->dev_instance);
94 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad, "set unicast not supported yet");
96 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
97 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad,
98 "set promiscuous not supported yet");
102 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad, "unknown flag %x requested", flags);
107 af_xdp_enter_netns (char *netns, int *fds)
109 *fds = *(fds + 1) = -1;
112 *fds = clib_netns_open (NULL /* self */);
113 if ((*(fds + 1) = clib_netns_open ((u8 *) netns)) == -1)
114 return VNET_API_ERROR_SYSCALL_ERROR_8;
115 if (clib_setns (*(fds + 1)) == -1)
116 return VNET_API_ERROR_SYSCALL_ERROR_9;
122 af_xdp_cleanup_netns (int *fds)
127 if (*(fds + 1) != -1)
130 *fds = *(fds + 1) = -1;
134 af_xdp_exit_netns (char *netns, int *fds)
140 ret = clib_setns (*fds);
142 af_xdp_cleanup_netns (fds);
149 af_xdp_remove_program (af_xdp_device_t *ad)
151 u32 curr_prog_id = 0;
155 af_xdp_enter_netns (ad->netns, ns_fds);
156 ret = bpf_xdp_query_id (ad->linux_ifindex, XDP_FLAGS_UPDATE_IF_NOEXIST,
160 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad, "bpf_xdp_query_id failed\n");
164 ret = bpf_xdp_detach (ad->linux_ifindex, XDP_FLAGS_UPDATE_IF_NOEXIST, NULL);
167 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad, "bpf_xdp_detach failed\n");
170 af_xdp_exit_netns (ad->netns, ns_fds);
172 bpf_object__close (ad->bpf_obj);
177 af_xdp_exit_netns (ad->netns, ns_fds);
182 af_xdp_delete_if (vlib_main_t * vm, af_xdp_device_t * ad)
184 vnet_main_t *vnm = vnet_get_main ();
185 af_xdp_main_t *axm = &af_xdp_main;
186 struct xsk_socket **xsk;
187 struct xsk_umem **umem;
192 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
193 ethernet_delete_interface (vnm, ad->hw_if_index);
196 for (i = 0; i < ad->txq_num; i++)
197 clib_spinlock_free (&vec_elt (ad->txqs, i).lock);
199 vec_foreach (xsk, ad->xsk)
200 xsk_socket__delete (*xsk);
202 vec_foreach (umem, ad->umem)
203 xsk_umem__delete (*umem);
205 for (i = 0; i < ad->rxq_num; i++)
206 clib_file_del_by_index (&file_main, vec_elt (ad->rxqs, i).file_index);
208 if (af_xdp_remove_program (ad) != 0)
209 af_xdp_log (VLIB_LOG_LEVEL_ERR, ad, "Error while removing XDP program.\n");
213 vec_free (ad->buffer_template);
217 vec_free (ad->linux_ifname);
218 vec_free (ad->netns);
219 clib_error_free (ad->error);
220 pool_put (axm->devices, ad);
224 af_xdp_load_program (af_xdp_create_if_args_t * args, af_xdp_device_t * ad)
227 struct bpf_program *bpf_prog;
228 struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
230 if (setrlimit (RLIMIT_MEMLOCK, &r))
231 af_xdp_log (VLIB_LOG_LEVEL_WARNING, ad,
232 "setrlimit(%s) failed: %s (errno %d)", ad->linux_ifname,
233 strerror (errno), errno);
235 ad->bpf_obj = bpf_object__open_file (args->prog, NULL);
236 if (libbpf_get_error (ad->bpf_obj))
238 args->rv = VNET_API_ERROR_SYSCALL_ERROR_5;
239 args->error = clib_error_return_unix (
240 0, "bpf_object__open_file(%s) failed", args->prog);
244 bpf_prog = bpf_object__next_program (ad->bpf_obj, NULL);
248 bpf_program__set_type (bpf_prog, BPF_PROG_TYPE_XDP);
250 if (bpf_object__load (ad->bpf_obj))
253 fd = bpf_program__fd (bpf_prog);
255 if (bpf_xdp_attach (ad->linux_ifindex, fd, XDP_FLAGS_UPDATE_IF_NOEXIST,
258 args->rv = VNET_API_ERROR_SYSCALL_ERROR_6;
259 args->error = clib_error_return_unix (0, "bpf_xdp_attach(%s) failed",
267 bpf_object__close (ad->bpf_obj);
274 af_xdp_create_queue (vlib_main_t *vm, af_xdp_create_if_args_t *args,
275 af_xdp_device_t *ad, int qid)
277 struct xsk_umem **umem;
278 struct xsk_socket **xsk;
281 struct xsk_umem_config umem_config;
282 struct xsk_socket_config sock_config;
283 struct xdp_options opt;
285 const int is_rx = qid < ad->rxq_num;
286 const int is_tx = qid < ad->txq_num;
288 umem = vec_elt_at_index (ad->umem, qid);
289 xsk = vec_elt_at_index (ad->xsk, qid);
290 rxq = vec_elt_at_index (ad->rxqs, qid);
291 txq = vec_elt_at_index (ad->txqs, qid);
294 * fq and cq must always be allocated even if unused
295 * whereas rx and tx indicates whether we want rxq, txq, or both
297 struct xsk_ring_cons *rx = is_rx ? &rxq->rx : 0;
298 struct xsk_ring_prod *fq = &rxq->fq;
299 struct xsk_ring_prod *tx = is_tx ? &txq->tx : 0;
300 struct xsk_ring_cons *cq = &txq->cq;
303 memset (&umem_config, 0, sizeof (umem_config));
304 umem_config.fill_size = args->rxq_size;
305 umem_config.comp_size = args->txq_size;
306 umem_config.frame_size =
307 sizeof (vlib_buffer_t) + vlib_buffer_get_default_data_size (vm);
308 umem_config.frame_headroom = sizeof (vlib_buffer_t);
309 umem_config.flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG;
311 (umem, uword_to_pointer (vm->buffer_main->buffer_mem_start, void *),
312 vm->buffer_main->buffer_mem_size, fq, cq, &umem_config))
314 uword sys_page_size = clib_mem_get_page_size ();
315 args->rv = VNET_API_ERROR_SYSCALL_ERROR_1;
316 args->error = clib_error_return_unix (0, "xsk_umem__create() failed");
317 /* this should mimic the Linux kernel net/xdp/xdp_umem.c:xdp_umem_reg()
319 if (umem_config.frame_size < XDP_UMEM_MIN_CHUNK_SIZE ||
320 umem_config.frame_size > sys_page_size)
321 args->error = clib_error_return (
323 "(unsupported data-size? (should be between %d and %d))",
324 XDP_UMEM_MIN_CHUNK_SIZE - sizeof (vlib_buffer_t),
325 sys_page_size - sizeof (vlib_buffer_t));
329 memset (&sock_config, 0, sizeof (sock_config));
330 sock_config.rx_size = args->rxq_size;
331 sock_config.tx_size = args->txq_size;
332 sock_config.bind_flags = XDP_USE_NEED_WAKEUP;
335 case AF_XDP_MODE_AUTO:
337 case AF_XDP_MODE_COPY:
338 sock_config.bind_flags |= XDP_COPY;
340 case AF_XDP_MODE_ZERO_COPY:
341 sock_config.bind_flags |= XDP_ZEROCOPY;
345 sock_config.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
346 if (xsk_socket__create
347 (xsk, ad->linux_ifname, qid, *umem, rx, tx, &sock_config))
349 args->rv = VNET_API_ERROR_SYSCALL_ERROR_2;
351 clib_error_return_unix (0,
352 "xsk_socket__create() failed (is linux netdev %s up?)",
357 fd = xsk_socket__fd (*xsk);
360 struct bpf_map *map =
361 bpf_object__find_map_by_name (ad->bpf_obj, "xsks_map");
362 int ret = xsk_socket__update_xskmap (*xsk, bpf_map__fd (map));
365 args->rv = VNET_API_ERROR_SYSCALL_ERROR_3;
366 args->error = clib_error_return_unix (
367 0, "xsk_socket__update_xskmap %s qid %d return %d",
368 ad->linux_ifname, qid, ret);
372 optlen = sizeof (opt);
376 if (getsockopt (fd, SOL_XDP, XDP_OPTIONS, &opt, &optlen))
378 args->rv = VNET_API_ERROR_SYSCALL_ERROR_4;
380 clib_error_return_unix (0, "getsockopt(XDP_OPTIONS) failed");
383 if (opt.flags & XDP_OPTIONS_ZEROCOPY)
384 ad->flags |= AF_XDP_DEVICE_F_ZEROCOPY;
386 rxq->xsk_fd = is_rx ? fd : -1;
391 clib_spinlock_init (&txq->lock);
392 if (is_rx && (ad->flags & AF_XDP_DEVICE_F_SYSCALL_LOCK))
394 /* This is a shared rx+tx queue and we need to lock before syscalls.
395 * Prior to Linux 5.6 there is a race condition preventing to call
396 * poll() and sendto() concurrently on AF_XDP sockets. This was
397 * fixed with commit 11cc2d21499cabe7e7964389634ed1de3ee91d33
398 * to workaround this issue, we protect the syscalls with a
399 * spinlock. Note that it also prevents to use interrupt mode in
400 * multi workers setup, because in this case the poll() is done in
401 * the framework w/o any possibility to protect it.
403 * https://lore.kernel.org/bpf/BYAPR11MB365382C5DB1E5FCC53242609C1549@BYAPR11MB3653.namprd11.prod.outlook.com/
405 clib_spinlock_init (&rxq->syscall_lock);
406 txq->syscall_lock = rxq->syscall_lock;
417 xsk_socket__delete (*xsk);
419 xsk_umem__delete (*umem);
427 af_xdp_get_numa (const char *ifname)
434 (char *) format (0, "/sys/class/net/%s/device/numa_node%c", ifname, 0);
435 err = clib_sysfs_read (path, "%d", &numa);
439 clib_error_free (err);
445 af_xdp_get_q_count (const char *ifname, int *rxq_num, int *txq_num)
447 struct ethtool_channels ec = { .cmd = ETHTOOL_GCHANNELS };
448 struct ifreq ifr = { .ifr_data = (void *) &ec };
451 *rxq_num = *txq_num = 1;
453 fd = socket (AF_INET, SOCK_DGRAM, 0);
457 snprintf (ifr.ifr_name, sizeof (ifr.ifr_name), "%s", ifname);
458 err = ioctl (fd, SIOCETHTOOL, &ifr);
465 *rxq_num = clib_max (ec.combined_count, ec.rx_count);
466 *txq_num = clib_max (ec.combined_count, ec.tx_count);
469 static clib_error_t *
470 af_xdp_device_rxq_read_ready (clib_file_t * f)
472 vnet_hw_if_rx_queue_set_int_pending (vnet_get_main (), f->private_data);
476 static clib_error_t *
477 af_xdp_device_set_rxq_mode (const af_xdp_device_t *ad, af_xdp_rxq_t *rxq,
478 const af_xdp_rxq_mode_t mode)
480 clib_file_main_t *fm = &file_main;
481 clib_file_update_type_t update;
484 if (rxq->mode == mode)
489 case AF_XDP_RXQ_MODE_POLLING:
490 update = UNIX_FILE_UPDATE_DELETE;
492 case AF_XDP_RXQ_MODE_INTERRUPT:
493 if (ad->flags & AF_XDP_DEVICE_F_SYSCALL_LOCK)
494 return clib_error_create (
495 "kernel workaround incompatible with interrupt mode");
496 update = UNIX_FILE_UPDATE_ADD;
500 return clib_error_create ("unknown rxq mode %i", mode);
503 f = clib_file_get (fm, rxq->file_index);
504 fm->file_update (f, update);
510 af_xdp_find_rxq_for_thread (vnet_main_t *vnm, const af_xdp_device_t *ad,
514 for (i = 0; i < ad->rxq_num; i++)
516 const u32 qid = vec_elt (ad->rxqs, i).queue_index;
517 const u32 tid = vnet_hw_if_get_rx_queue (vnm, qid)->thread_index;
524 static clib_error_t *
525 af_xdp_finalize_queues (vnet_main_t *vnm, af_xdp_device_t *ad,
526 const int n_vlib_mains)
528 clib_error_t *err = 0;
531 for (i = 0; i < ad->rxq_num; i++)
533 af_xdp_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
534 rxq->queue_index = vnet_hw_if_register_rx_queue (
535 vnm, ad->hw_if_index, i, VNET_HW_IF_RXQ_THREAD_ANY);
536 u8 *desc = format (0, "%U rxq %d", format_af_xdp_device_name,
537 ad->dev_instance, i);
539 .file_descriptor = rxq->xsk_fd,
540 .private_data = rxq->queue_index,
541 .read_function = af_xdp_device_rxq_read_ready,
544 rxq->file_index = clib_file_add (&file_main, &f);
545 vnet_hw_if_set_rx_queue_file_index (vnm, rxq->queue_index,
547 err = af_xdp_device_set_rxq_mode (ad, rxq, AF_XDP_RXQ_MODE_POLLING);
552 for (i = 0; i < ad->txq_num; i++)
553 vec_elt (ad->txqs, i).queue_index =
554 vnet_hw_if_register_tx_queue (vnm, ad->hw_if_index, i);
556 /* We set the rxq and txq of the same queue pair on the same thread
557 * by default to avoid locking because of the syscall lock. */
558 int last_qid = clib_min (ad->rxq_num, ad->txq_num - 1);
559 for (i = 0; i < n_vlib_mains; i++)
561 /* search for the 1st rxq assigned on this thread, if any */
562 u32 qid = af_xdp_find_rxq_for_thread (vnm, ad, i);
563 /* if this rxq is combined with a txq, use it. Otherwise, we'll
564 * assign txq in a round-robin fashion. We start from the 1st txq
565 * not shared with a rxq if possible... */
566 qid = qid < ad->txq_num ? qid : (last_qid++ % ad->txq_num);
567 vnet_hw_if_tx_queue_assign_thread (
568 vnm, vec_elt (ad->txqs, qid).queue_index, i);
571 vnet_hw_if_update_runtime_data (vnm, ad->hw_if_index);
576 af_xdp_create_if (vlib_main_t * vm, af_xdp_create_if_args_t * args)
578 vnet_main_t *vnm = vnet_get_main ();
579 vlib_thread_main_t *tm = vlib_get_thread_main ();
580 vnet_eth_interface_registration_t eir = {};
581 af_xdp_main_t *am = &af_xdp_main;
583 vnet_sw_interface_t *sw;
584 int rxq_num, txq_num, q_num;
588 args->rxq_size = args->rxq_size ? args->rxq_size : 2 * VLIB_FRAME_SIZE;
589 args->txq_size = args->txq_size ? args->txq_size : 2 * VLIB_FRAME_SIZE;
590 args->rxq_num = args->rxq_num ? args->rxq_num : 1;
592 if (!args->linux_ifname)
594 args->rv = VNET_API_ERROR_INVALID_VALUE;
595 args->error = clib_error_return (0, "missing host interface");
599 if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
600 args->rxq_size > 65535 || args->txq_size > 65535 ||
601 !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
603 args->rv = VNET_API_ERROR_INVALID_VALUE;
605 clib_error_return (0,
606 "queue size must be a power of two between %i and 65535",
611 ret = af_xdp_enter_netns (args->netns, ns_fds);
615 args->error = clib_error_return (0, "enter netns %s failed, ret %d",
616 args->netns, args->rv);
620 af_xdp_get_q_count (args->linux_ifname, &rxq_num, &txq_num);
621 if (args->rxq_num > rxq_num && AF_XDP_NUM_RX_QUEUES_ALL != args->rxq_num)
623 args->rv = VNET_API_ERROR_INVALID_VALUE;
624 args->error = clib_error_create ("too many rxq requested (%d > %d)",
625 args->rxq_num, rxq_num);
628 rxq_num = clib_min (rxq_num, args->rxq_num);
629 txq_num = clib_min (txq_num, tm->n_vlib_mains);
631 pool_get_zero (am->devices, ad);
633 if (tm->n_vlib_mains > 1 &&
634 0 == (args->flags & AF_XDP_CREATE_FLAGS_NO_SYSCALL_LOCK))
635 ad->flags |= AF_XDP_DEVICE_F_SYSCALL_LOCK;
637 ad->linux_ifname = (char *) format (0, "%s", args->linux_ifname);
638 vec_validate (ad->linux_ifname, IFNAMSIZ - 1); /* libbpf expects ifname to be at least IFNAMSIZ */
641 ad->netns = (char *) format (0, "%s%c", args->netns, 0);
643 ad->linux_ifindex = if_nametoindex (ad->linux_ifname);
644 if (!ad->linux_ifindex)
646 args->rv = VNET_API_ERROR_INVALID_VALUE;
647 args->error = clib_error_return_unix (0, "if_nametoindex(%s) failed",
649 ad->linux_ifindex = ~0;
653 if (args->prog && af_xdp_load_program (args, ad))
656 q_num = clib_max (rxq_num, txq_num);
657 ad->rxq_num = rxq_num;
658 ad->txq_num = txq_num;
660 vec_validate_aligned (ad->umem, q_num - 1, CLIB_CACHE_LINE_BYTES);
661 vec_validate_aligned (ad->xsk, q_num - 1, CLIB_CACHE_LINE_BYTES);
662 vec_validate_aligned (ad->rxqs, q_num - 1, CLIB_CACHE_LINE_BYTES);
663 vec_validate_aligned (ad->txqs, q_num - 1, CLIB_CACHE_LINE_BYTES);
665 for (i = 0; i < q_num; i++)
667 if (af_xdp_create_queue (vm, args, ad, i))
670 * queue creation failed
671 * it is only a fatal error if we could not create the number of rx
672 * queues requested explicitely by the user and the user did not
674 * we might create less tx queues than workers but this is ok
676 af_xdp_log (VLIB_LOG_LEVEL_DEBUG, ad,
677 "create interface failed to create queue qid=%d", i);
679 /* fixup vectors length */
680 vec_set_len (ad->umem, i);
681 vec_set_len (ad->xsk, i);
682 vec_set_len (ad->rxqs, i);
683 vec_set_len (ad->txqs, i);
685 ad->rxq_num = clib_min (i, rxq_num);
686 ad->txq_num = clib_min (i, txq_num);
689 (i < rxq_num && AF_XDP_NUM_RX_QUEUES_ALL != args->rxq_num))
691 ad->rxq_num = ad->txq_num = 0;
692 goto err2; /* failed creating requested rxq: fatal error, bailing
698 clib_error_free (args->error);
703 if (af_xdp_exit_netns (args->netns, ns_fds))
705 args->rv = VNET_API_ERROR_SYSCALL_ERROR_10;
706 args->error = clib_error_return (0, "exit netns failed");
710 ad->dev_instance = ad - am->devices;
711 ad->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
713 vlib_buffer_pool_get_default_for_numa (vm,
718 char *ifname = ad->linux_ifname;
719 if (args->netns != NULL && strncmp (args->netns, "pid:", 4) == 0)
722 (char *) format (0, "%s/%u", ifname, atoi (args->netns + 4));
725 ad->name = (char *) format (0, "%s/%d", ifname, ad->dev_instance);
728 ad->name = (char *) format (0, "%s", args->name);
730 ethernet_mac_address_generate (ad->hwaddr);
732 /* create interface */
733 eir.dev_class_index = af_xdp_device_class.index;
734 eir.dev_instance = ad->dev_instance;
735 eir.address = ad->hwaddr;
736 eir.cb.flag_change = af_xdp_flag_change;
737 eir.cb.set_max_frame_size = af_xdp_set_max_frame_size;
738 ad->hw_if_index = vnet_eth_register_interface (vnm, &eir);
740 sw = vnet_get_hw_sw_interface (vnm, ad->hw_if_index);
741 args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
743 vnet_hw_if_set_caps (vnm, ad->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
745 vnet_hw_if_set_input_node (vnm, ad->hw_if_index, af_xdp_input_node.index);
747 args->error = af_xdp_finalize_queues (vnm, ad, tm->n_vlib_mains);
750 args->rv = VNET_API_ERROR_SYSCALL_ERROR_7;
754 /* buffer template */
755 vec_validate_aligned (ad->buffer_template, 1, CLIB_CACHE_LINE_BYTES);
756 ad->buffer_template->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
757 ad->buffer_template->ref_count = 1;
758 vnet_buffer (ad->buffer_template)->sw_if_index[VLIB_RX] = ad->sw_if_index;
759 vnet_buffer (ad->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
760 ad->buffer_template->buffer_pool_index = ad->pool;
765 af_xdp_delete_if (vm, ad);
767 af_xdp_cleanup_netns (ns_fds);
769 vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
772 static clib_error_t *
773 af_xdp_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
775 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
776 af_xdp_main_t *am = &af_xdp_main;
777 af_xdp_device_t *ad = vec_elt_at_index (am->devices, hi->dev_instance);
778 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
780 if (ad->flags & AF_XDP_DEVICE_F_ERROR)
781 return clib_error_return (0, "device is in error state");
785 vnet_hw_interface_set_flags (vnm, ad->hw_if_index,
786 VNET_HW_INTERFACE_FLAG_LINK_UP);
787 ad->flags |= AF_XDP_DEVICE_F_ADMIN_UP;
788 af_xdp_device_input_refill (ad);
792 vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
793 ad->flags &= ~AF_XDP_DEVICE_F_ADMIN_UP;
798 static clib_error_t *
799 af_xdp_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
800 vnet_hw_if_rx_mode mode)
802 af_xdp_main_t *am = &af_xdp_main;
803 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
804 af_xdp_device_t *ad = pool_elt_at_index (am->devices, hw->dev_instance);
805 af_xdp_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
809 default: /* fallthrough */
810 case VNET_HW_IF_RX_MODE_UNKNOWN: /* fallthrough */
811 case VNET_HW_IF_NUM_RX_MODES:
812 return clib_error_create ("uknown rx mode - doing nothing");
813 case VNET_HW_IF_RX_MODE_DEFAULT: /* fallthrough */
814 case VNET_HW_IF_RX_MODE_POLLING:
815 return af_xdp_device_set_rxq_mode (ad, rxq, AF_XDP_RXQ_MODE_POLLING);
816 case VNET_HW_IF_RX_MODE_INTERRUPT: /* fallthrough */
817 case VNET_HW_IF_RX_MODE_ADAPTIVE:
818 return af_xdp_device_set_rxq_mode (ad, rxq, AF_XDP_RXQ_MODE_INTERRUPT);
821 ASSERT (0 && "unreachable");
822 return clib_error_create ("unreachable");
826 af_xdp_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
829 af_xdp_main_t *am = &af_xdp_main;
830 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
831 af_xdp_device_t *ad = pool_elt_at_index (am->devices, hw->dev_instance);
833 /* Shut off redirection */
834 if (node_index == ~0)
836 ad->per_interface_next_index = node_index;
840 ad->per_interface_next_index =
841 vlib_node_add_next (vlib_get_main (), af_xdp_input_node.index,
845 static char *af_xdp_tx_func_error_strings[] = {
847 foreach_af_xdp_tx_func_error
852 af_xdp_clear (u32 dev_instance)
854 af_xdp_main_t *am = &af_xdp_main;
855 af_xdp_device_t *ad = pool_elt_at_index (am->devices, dev_instance);
856 clib_error_free (ad->error);
860 VNET_DEVICE_CLASS (af_xdp_device_class) = {
861 .name = "AF_XDP interface",
862 .format_device = format_af_xdp_device,
863 .format_device_name = format_af_xdp_device_name,
864 .admin_up_down_function = af_xdp_interface_admin_up_down,
865 .rx_mode_change_function = af_xdp_interface_rx_mode_change,
866 .rx_redirect_to_node = af_xdp_set_interface_next_node,
867 .tx_function_n_errors = AF_XDP_TX_N_ERROR,
868 .tx_function_error_strings = af_xdp_tx_func_error_strings,
869 .mac_addr_change_function = af_xdp_mac_change,
870 .clear_counters = af_xdp_clear,
875 af_xdp_init (vlib_main_t * vm)
877 af_xdp_main_t *am = &af_xdp_main;
879 am->log_class = vlib_log_register_class ("af_xdp", 0);
884 VLIB_INIT_FUNCTION (af_xdp_init);
887 * fd.io coding-style-patch-verification: ON
890 * eval: (c-set-style "gnu")