1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
16 #include <rte_debug.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev.h>
20 #include <rte_errno.h>
23 #include <sys/types.h>
25 #include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #include <sys/utsname.h>
35 #include <arpa/inet.h>
37 #include <linux/if_tun.h>
38 #include <linux/if_ether.h>
42 #include <rte_eth_tap.h>
44 #include <tap_netlink.h>
45 #include <tap_tcmsgs.h>
47 /* Linux based path to the TUN device */
48 #define TUN_TAP_DEV_PATH "/dev/net/tun"
49 #define DEFAULT_TAP_NAME "dtap"
50 #define DEFAULT_TUN_NAME "dtun"
52 #define ETH_TAP_IFACE_ARG "iface"
53 #define ETH_TAP_REMOTE_ARG "remote"
54 #define ETH_TAP_MAC_ARG "mac"
55 #define ETH_TAP_MAC_FIXED "fixed"
57 #define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
58 #define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
59 #define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
61 #define TAP_GSO_MBUFS_PER_CORE 128
62 #define TAP_GSO_MBUF_SEG_SIZE 128
63 #define TAP_GSO_MBUF_CACHE_SIZE 4
64 #define TAP_GSO_MBUFS_NUM \
65 (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
67 /* IPC key for queue fds sync */
68 #define TAP_MP_KEY "tap_mp_sync_queues"
70 #define TAP_IOV_DEFAULT_MAX 1024
72 static int tap_devices_count;
73 static struct rte_vdev_driver pmd_tap_drv;
74 static struct rte_vdev_driver pmd_tun_drv;
76 static const char *valid_arguments[] = {
83 static char tuntap_name[8];
85 static volatile uint32_t tap_trigger; /* Rx trigger */
87 static struct rte_eth_link pmd_link = {
88 .link_speed = ETH_SPEED_NUM_10G,
89 .link_duplex = ETH_LINK_FULL_DUPLEX,
90 .link_status = ETH_LINK_DOWN,
91 .link_autoneg = ETH_LINK_FIXED,
95 tap_trigger_cb(int sig __rte_unused)
97 /* Valid trigger values are nonzero */
98 tap_trigger = (tap_trigger + 1) | 0x80000000;
101 /* Specifies on what netdevices the ioctl should be applied */
108 /* Message header to synchronize queues via IPC */
110 char port_name[RTE_DEV_NAME_MAX_LEN];
114 * The file descriptors are in the dedicated part
115 * of the Unix message to be translated by the kernel.
119 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
122 * Tun/Tap allocation routine
125 * Pointer to private structure.
127 * @param[in] is_keepalive
131 * -1 on failure, fd on success
134 tun_alloc(struct pmd_internals *pmd, int is_keepalive)
137 #ifdef IFF_MULTI_QUEUE
138 unsigned int features;
142 memset(&ifr, 0, sizeof(struct ifreq));
145 * Do not set IFF_NO_PI as packet information header will be needed
146 * to check if a received packet has been truncated.
148 ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
149 IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
150 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
152 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
154 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
158 #ifdef IFF_MULTI_QUEUE
159 /* Grab the TUN features to verify we can work multi-queue */
160 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
161 TAP_LOG(ERR, "%s unable to get TUN/TAP features",
165 TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features);
167 if (features & IFF_MULTI_QUEUE) {
168 TAP_LOG(DEBUG, " Multi-queue support for %d queues",
169 RTE_PMD_TAP_MAX_QUEUES);
170 ifr.ifr_flags |= IFF_MULTI_QUEUE;
174 ifr.ifr_flags |= IFF_ONE_QUEUE;
175 TAP_LOG(DEBUG, " Single queue only support");
178 /* Set the TUN/TAP configuration and set the name if needed */
179 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
180 TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
181 ifr.ifr_name, strerror(errno));
186 * Name passed to kernel might be wildcard like dtun%d
187 * and need to find the resulting device.
189 TAP_LOG(DEBUG, "Device name is '%s'", ifr.ifr_name);
190 strlcpy(pmd->name, ifr.ifr_name, RTE_ETH_NAME_MAX_LEN);
194 * Detach the TUN/TAP keep-alive queue
195 * to avoid traffic through it
197 ifr.ifr_flags = IFF_DETACH_QUEUE;
198 if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
200 "Unable to detach keep-alive queue for %s: %s",
201 ifr.ifr_name, strerror(errno));
206 /* Always set the file descriptor to non-blocking */
207 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
209 "Unable to set %s to nonblocking: %s",
210 ifr.ifr_name, strerror(errno));
214 /* Set up trigger to optimize empty Rx bursts */
218 int flags = fcntl(fd, F_GETFL);
220 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
222 if (sa.sa_handler != tap_trigger_cb) {
224 * Make sure SIGIO is not already taken. This is done
225 * as late as possible to leave the application a
226 * chance to set up its own signal handler first.
228 if (sa.sa_handler != SIG_IGN &&
229 sa.sa_handler != SIG_DFL) {
233 sa = (struct sigaction){
234 .sa_flags = SA_RESTART,
235 .sa_handler = tap_trigger_cb,
237 if (sigaction(SIGIO, &sa, NULL) == -1)
240 /* Enable SIGIO on file descriptor */
241 fcntl(fd, F_SETFL, flags | O_ASYNC);
242 fcntl(fd, F_SETOWN, getpid());
246 /* Disable trigger globally in case of error */
248 TAP_LOG(WARNING, "Rx trigger disabled: %s",
261 tap_verify_csum(struct rte_mbuf *mbuf)
263 uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
264 uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
265 uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
266 unsigned int l2_len = sizeof(struct ether_hdr);
272 if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
274 else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
276 /* Don't verify checksum for packets with discontinuous L2 header */
277 if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
278 rte_pktmbuf_data_len(mbuf)))
280 l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
281 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
282 struct ipv4_hdr *iph = l3_hdr;
284 /* ihl contains the number of 4-byte words in the header */
285 l3_len = 4 * (iph->version_ihl & 0xf);
286 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
288 /* check that the total length reported by header is not
289 * greater than the total received size
291 if (l2_len + rte_be_to_cpu_16(iph->total_length) >
292 rte_pktmbuf_data_len(mbuf))
295 cksum = ~rte_raw_cksum(iph, l3_len);
296 mbuf->ol_flags |= cksum ?
297 PKT_RX_IP_CKSUM_BAD :
298 PKT_RX_IP_CKSUM_GOOD;
299 } else if (l3 == RTE_PTYPE_L3_IPV6) {
300 struct ipv6_hdr *iph = l3_hdr;
302 l3_len = sizeof(struct ipv6_hdr);
303 /* check that the total length reported by header is not
304 * greater than the total received size
306 if (l2_len + l3_len + rte_be_to_cpu_16(iph->payload_len) >
307 rte_pktmbuf_data_len(mbuf))
310 /* IPv6 extensions are not supported */
313 if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
314 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
315 /* Don't verify checksum for multi-segment packets. */
316 if (mbuf->nb_segs > 1)
318 if (l3 == RTE_PTYPE_L3_IPV4)
319 cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
320 else if (l3 == RTE_PTYPE_L3_IPV6)
321 cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
322 mbuf->ol_flags |= cksum ?
323 PKT_RX_L4_CKSUM_BAD :
324 PKT_RX_L4_CKSUM_GOOD;
329 tap_rx_offload_get_port_capa(void)
332 * No specific port Rx offload capabilities.
338 tap_rx_offload_get_queue_capa(void)
340 return DEV_RX_OFFLOAD_SCATTER |
341 DEV_RX_OFFLOAD_IPV4_CKSUM |
342 DEV_RX_OFFLOAD_UDP_CKSUM |
343 DEV_RX_OFFLOAD_TCP_CKSUM;
346 /* Callback to handle the rx burst of packets to the correct interface and
347 * file descriptor(s) in a multi-queue setup.
350 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
352 struct rx_queue *rxq = queue;
353 struct pmd_process_private *process_private;
355 unsigned long num_rx_bytes = 0;
356 uint32_t trigger = tap_trigger;
358 if (trigger == rxq->trigger_seen)
361 rxq->trigger_seen = trigger;
362 process_private = rte_eth_devices[rxq->in_port].process_private;
363 rte_compiler_barrier();
364 for (num_rx = 0; num_rx < nb_pkts; ) {
365 struct rte_mbuf *mbuf = rxq->pool;
366 struct rte_mbuf *seg = NULL;
367 struct rte_mbuf *new_tail = NULL;
368 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
371 len = readv(process_private->rxq_fds[rxq->queue_id],
373 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
374 rxq->nb_rx_desc : 1));
375 if (len < (int)sizeof(struct tun_pi))
378 /* Packet couldn't fit in the provided mbuf */
379 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
380 rxq->stats.ierrors++;
384 len -= sizeof(struct tun_pi);
387 mbuf->port = rxq->in_port;
389 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
391 if (unlikely(!buf)) {
392 rxq->stats.rx_nombuf++;
393 /* No new buf has been allocated: do nothing */
394 if (!new_tail || !seg)
398 rte_pktmbuf_free(mbuf);
402 seg = seg ? seg->next : mbuf;
403 if (rxq->pool == mbuf)
406 new_tail->next = buf;
408 new_tail->next = seg->next;
410 /* iovecs[0] is reserved for packet info (pi) */
411 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
412 buf->buf_len - data_off;
413 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
414 (char *)buf->buf_addr + data_off;
416 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
417 seg->data_off = data_off;
419 len -= seg->data_len;
423 /* First segment has headroom, not the others */
427 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
429 if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
430 tap_verify_csum(mbuf);
432 /* account for the receive frame */
433 bufs[num_rx++] = mbuf;
434 num_rx_bytes += mbuf->pkt_len;
437 rxq->stats.ipackets += num_rx;
438 rxq->stats.ibytes += num_rx_bytes;
444 tap_tx_offload_get_port_capa(void)
447 * No specific port Tx offload capabilities.
453 tap_tx_offload_get_queue_capa(void)
455 return DEV_TX_OFFLOAD_MULTI_SEGS |
456 DEV_TX_OFFLOAD_IPV4_CKSUM |
457 DEV_TX_OFFLOAD_UDP_CKSUM |
458 DEV_TX_OFFLOAD_TCP_CKSUM |
459 DEV_TX_OFFLOAD_TCP_TSO;
462 /* Finalize l4 checksum calculation */
464 tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
465 uint32_t l4_raw_cksum)
470 cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
471 cksum += l4_phdr_cksum;
473 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
474 cksum = (~cksum) & 0xffff;
481 /* Accumaulate L4 raw checksums */
483 tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
484 uint32_t *l4_raw_cksum)
486 if (l4_cksum == NULL)
489 *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
492 /* L3 and L4 pseudo headers checksum offloads */
494 tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
495 unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
496 uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
498 void *l3_hdr = packet + l2_len;
500 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
501 struct ipv4_hdr *iph = l3_hdr;
504 iph->hdr_checksum = 0;
505 cksum = rte_raw_cksum(iph, l3_len);
506 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
508 if (ol_flags & PKT_TX_L4_MASK) {
511 l4_hdr = packet + l2_len + l3_len;
512 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
513 *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
514 else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
515 *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
519 if (ol_flags & PKT_TX_IPV4)
520 *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
522 *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
523 *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
528 tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
529 struct rte_mbuf **pmbufs,
530 uint16_t *num_packets, unsigned long *num_tx_bytes)
534 struct pmd_process_private *process_private;
536 process_private = rte_eth_devices[txq->out_port].process_private;
538 for (i = 0; i < num_mbufs; i++) {
539 struct rte_mbuf *mbuf = pmbufs[i];
540 struct iovec iovecs[mbuf->nb_segs + 2];
541 struct tun_pi pi = { .flags = 0, .proto = 0x00 };
542 struct rte_mbuf *seg = mbuf;
543 char m_copy[mbuf->data_len];
547 int k; /* current index in iovecs for copying segments */
548 uint16_t seg_len; /* length of first segment */
550 uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
551 uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
552 uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
553 uint16_t is_cksum = 0; /* in case cksum should be offloaded */
556 if (txq->type == ETH_TUNTAP_TYPE_TUN) {
558 * TUN and TAP are created with IFF_NO_PI disabled.
559 * For TUN PMD this mandatory as fields are used by
560 * Kernel tun.c to determine whether its IP or non IP
563 * The logic fetches the first byte of data from mbuf
564 * then compares whether its v4 or v6. If first byte
565 * is 4 or 6, then protocol field is updated.
567 char *buff_data = rte_pktmbuf_mtod(seg, void *);
568 proto = (*buff_data & 0xf0);
569 pi.proto = (proto == 0x40) ?
570 rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
572 rte_cpu_to_be_16(ETHER_TYPE_IPv6) :
577 iovecs[k].iov_base = π
578 iovecs[k].iov_len = sizeof(pi);
581 nb_segs = mbuf->nb_segs;
583 ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
584 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
585 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
588 /* Support only packets with at least layer 4
589 * header included in the first segment
591 seg_len = rte_pktmbuf_data_len(mbuf);
592 l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
593 if (seg_len < l234_hlen)
596 /* To change checksums, work on a * copy of l2, l3
597 * headers + l4 pseudo header
599 rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
601 tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
602 mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
603 &l4_cksum, &l4_phdr_cksum,
605 iovecs[k].iov_base = m_copy;
606 iovecs[k].iov_len = l234_hlen;
609 /* Update next iovecs[] beyond l2, l3, l4 headers */
610 if (seg_len > l234_hlen) {
611 iovecs[k].iov_len = seg_len - l234_hlen;
613 rte_pktmbuf_mtod(seg, char *) +
615 tap_tx_l4_add_rcksum(iovecs[k].iov_base,
616 iovecs[k].iov_len, l4_cksum,
624 for (j = k; j <= nb_segs; j++) {
625 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
626 iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
628 tap_tx_l4_add_rcksum(iovecs[j].iov_base,
629 iovecs[j].iov_len, l4_cksum,
635 tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
637 /* copy the tx frame data */
638 n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
642 (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
646 /* Callback to handle sending packets from the tap interface
649 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
651 struct tx_queue *txq = queue;
653 uint16_t num_packets = 0;
654 unsigned long num_tx_bytes = 0;
658 if (unlikely(nb_pkts == 0))
661 struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
662 max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
663 for (i = 0; i < nb_pkts; i++) {
664 struct rte_mbuf *mbuf_in = bufs[num_tx];
665 struct rte_mbuf **mbuf;
666 uint16_t num_mbufs = 0;
667 uint16_t tso_segsz = 0;
673 tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
675 struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
677 assert(gso_ctx != NULL);
679 /* TCP segmentation implies TCP checksum offload */
680 mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
682 /* gso size is calculated without ETHER_CRC_LEN */
683 hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
685 tso_segsz = mbuf_in->tso_segsz + hdrs_len;
686 if (unlikely(tso_segsz == hdrs_len) ||
687 tso_segsz > *txq->mtu) {
691 gso_ctx->gso_size = tso_segsz;
692 ret = rte_gso_segment(mbuf_in, /* packet to segment */
693 gso_ctx, /* gso control block */
694 (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
695 RTE_DIM(gso_mbufs)); /* max tso mbufs */
697 /* ret contains the number of new created mbufs */
704 /* stats.errs will be incremented */
705 if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
708 /* ret 0 indicates no new mbufs were created */
714 tap_write_mbufs(txq, num_mbufs, mbuf,
715 &num_packets, &num_tx_bytes);
717 /* free original mbuf */
718 rte_pktmbuf_free(mbuf_in);
720 for (j = 0; j < ret; j++)
721 rte_pktmbuf_free(mbuf[j]);
724 txq->stats.opackets += num_packets;
725 txq->stats.errs += nb_pkts - num_tx;
726 txq->stats.obytes += num_tx_bytes;
732 tap_ioctl_req2str(unsigned long request)
736 return "SIOCSIFFLAGS";
738 return "SIOCGIFFLAGS";
740 return "SIOCGIFHWADDR";
742 return "SIOCSIFHWADDR";
750 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
751 struct ifreq *ifr, int set, enum ioctl_mode mode)
753 short req_flags = ifr->ifr_flags;
754 int remote = pmd->remote_if_index &&
755 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
757 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
760 * If there is a remote netdevice, apply ioctl on it, then apply it on
765 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
766 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
767 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
770 /* fetch current flags to leave other flags untouched */
771 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
774 ifr->ifr_flags |= req_flags;
776 ifr->ifr_flags &= ~req_flags;
784 RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
788 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
790 if (remote-- && mode == LOCAL_AND_REMOTE)
795 TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
796 tap_ioctl_req2str(request), strerror(errno), errno);
801 tap_link_set_down(struct rte_eth_dev *dev)
803 struct pmd_internals *pmd = dev->data->dev_private;
804 struct ifreq ifr = { .ifr_flags = IFF_UP };
806 dev->data->dev_link.link_status = ETH_LINK_DOWN;
807 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
811 tap_link_set_up(struct rte_eth_dev *dev)
813 struct pmd_internals *pmd = dev->data->dev_private;
814 struct ifreq ifr = { .ifr_flags = IFF_UP };
816 dev->data->dev_link.link_status = ETH_LINK_UP;
817 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
821 tap_dev_start(struct rte_eth_dev *dev)
825 err = tap_intr_handle_set(dev, 1);
829 err = tap_link_set_up(dev);
833 for (i = 0; i < dev->data->nb_tx_queues; i++)
834 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
835 for (i = 0; i < dev->data->nb_rx_queues; i++)
836 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
841 /* This function gets called when the current port gets stopped.
844 tap_dev_stop(struct rte_eth_dev *dev)
848 for (i = 0; i < dev->data->nb_tx_queues; i++)
849 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
850 for (i = 0; i < dev->data->nb_rx_queues; i++)
851 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
853 tap_intr_handle_set(dev, 0);
854 tap_link_set_down(dev);
858 tap_dev_configure(struct rte_eth_dev *dev)
860 if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
862 "%s: number of rx queues %d exceeds max num of queues %d",
864 dev->data->nb_rx_queues,
865 RTE_PMD_TAP_MAX_QUEUES);
868 if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
870 "%s: number of tx queues %d exceeds max num of queues %d",
872 dev->data->nb_tx_queues,
873 RTE_PMD_TAP_MAX_QUEUES);
877 TAP_LOG(INFO, "%s: %p: TX configured queues number: %u",
878 dev->device->name, (void *)dev, dev->data->nb_tx_queues);
880 TAP_LOG(INFO, "%s: %p: RX configured queues number: %u",
881 dev->device->name, (void *)dev, dev->data->nb_rx_queues);
887 tap_dev_speed_capa(void)
889 uint32_t speed = pmd_link.link_speed;
892 if (speed >= ETH_SPEED_NUM_10M)
893 capa |= ETH_LINK_SPEED_10M;
894 if (speed >= ETH_SPEED_NUM_100M)
895 capa |= ETH_LINK_SPEED_100M;
896 if (speed >= ETH_SPEED_NUM_1G)
897 capa |= ETH_LINK_SPEED_1G;
898 if (speed >= ETH_SPEED_NUM_5G)
899 capa |= ETH_LINK_SPEED_2_5G;
900 if (speed >= ETH_SPEED_NUM_5G)
901 capa |= ETH_LINK_SPEED_5G;
902 if (speed >= ETH_SPEED_NUM_10G)
903 capa |= ETH_LINK_SPEED_10G;
904 if (speed >= ETH_SPEED_NUM_20G)
905 capa |= ETH_LINK_SPEED_20G;
906 if (speed >= ETH_SPEED_NUM_25G)
907 capa |= ETH_LINK_SPEED_25G;
908 if (speed >= ETH_SPEED_NUM_40G)
909 capa |= ETH_LINK_SPEED_40G;
910 if (speed >= ETH_SPEED_NUM_50G)
911 capa |= ETH_LINK_SPEED_50G;
912 if (speed >= ETH_SPEED_NUM_56G)
913 capa |= ETH_LINK_SPEED_56G;
914 if (speed >= ETH_SPEED_NUM_100G)
915 capa |= ETH_LINK_SPEED_100G;
921 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
923 struct pmd_internals *internals = dev->data->dev_private;
925 dev_info->if_index = internals->if_index;
926 dev_info->max_mac_addrs = 1;
927 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
928 dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
929 dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
930 dev_info->min_rx_bufsize = 0;
931 dev_info->speed_capa = tap_dev_speed_capa();
932 dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
933 dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
934 dev_info->rx_queue_offload_capa;
935 dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
936 dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
937 dev_info->tx_queue_offload_capa;
938 dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
940 * limitation: TAP supports all of IP, UDP and TCP hash
941 * functions together and not in partial combinations
943 dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
947 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
949 unsigned int i, imax;
950 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
951 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
952 unsigned long rx_nombuf = 0, ierrors = 0;
953 const struct pmd_internals *pmd = dev->data->dev_private;
955 /* rx queue statistics */
956 imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
957 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
958 for (i = 0; i < imax; i++) {
959 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
960 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
961 rx_total += tap_stats->q_ipackets[i];
962 rx_bytes_total += tap_stats->q_ibytes[i];
963 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
964 ierrors += pmd->rxq[i].stats.ierrors;
967 /* tx queue statistics */
968 imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
969 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
971 for (i = 0; i < imax; i++) {
972 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
973 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
974 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
975 tx_total += tap_stats->q_opackets[i];
976 tx_err_total += tap_stats->q_errors[i];
977 tx_bytes_total += tap_stats->q_obytes[i];
980 tap_stats->ipackets = rx_total;
981 tap_stats->ibytes = rx_bytes_total;
982 tap_stats->ierrors = ierrors;
983 tap_stats->rx_nombuf = rx_nombuf;
984 tap_stats->opackets = tx_total;
985 tap_stats->oerrors = tx_err_total;
986 tap_stats->obytes = tx_bytes_total;
991 tap_stats_reset(struct rte_eth_dev *dev)
994 struct pmd_internals *pmd = dev->data->dev_private;
996 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
997 pmd->rxq[i].stats.ipackets = 0;
998 pmd->rxq[i].stats.ibytes = 0;
999 pmd->rxq[i].stats.ierrors = 0;
1000 pmd->rxq[i].stats.rx_nombuf = 0;
1002 pmd->txq[i].stats.opackets = 0;
1003 pmd->txq[i].stats.errs = 0;
1004 pmd->txq[i].stats.obytes = 0;
1009 tap_dev_close(struct rte_eth_dev *dev)
1012 struct pmd_internals *internals = dev->data->dev_private;
1013 struct pmd_process_private *process_private = dev->process_private;
1015 tap_link_set_down(dev);
1016 tap_flow_flush(dev, NULL);
1017 tap_flow_implicit_flush(internals, NULL);
1019 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1020 if (process_private->rxq_fds[i] != -1) {
1021 close(process_private->rxq_fds[i]);
1022 process_private->rxq_fds[i] = -1;
1024 if (process_private->txq_fds[i] != -1) {
1025 close(process_private->txq_fds[i]);
1026 process_private->txq_fds[i] = -1;
1030 if (internals->remote_if_index) {
1031 /* Restore initial remote state */
1032 ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
1033 &internals->remote_initial_flags);
1036 if (internals->ka_fd != -1) {
1037 close(internals->ka_fd);
1038 internals->ka_fd = -1;
1041 * Since TUN device has no more opened file descriptors
1042 * it will be removed from kernel
1047 tap_rx_queue_release(void *queue)
1049 struct rx_queue *rxq = queue;
1050 struct pmd_process_private *process_private;
1054 process_private = rte_eth_devices[rxq->in_port].process_private;
1055 if (process_private->rxq_fds[rxq->queue_id] > 0) {
1056 close(process_private->rxq_fds[rxq->queue_id]);
1057 process_private->rxq_fds[rxq->queue_id] = -1;
1058 rte_pktmbuf_free(rxq->pool);
1059 rte_free(rxq->iovecs);
1066 tap_tx_queue_release(void *queue)
1068 struct tx_queue *txq = queue;
1069 struct pmd_process_private *process_private;
1073 process_private = rte_eth_devices[txq->out_port].process_private;
1075 if (process_private->txq_fds[txq->queue_id] > 0) {
1076 close(process_private->txq_fds[txq->queue_id]);
1077 process_private->txq_fds[txq->queue_id] = -1;
1082 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1084 struct rte_eth_link *dev_link = &dev->data->dev_link;
1085 struct pmd_internals *pmd = dev->data->dev_private;
1086 struct ifreq ifr = { .ifr_flags = 0 };
1088 if (pmd->remote_if_index) {
1089 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
1090 if (!(ifr.ifr_flags & IFF_UP) ||
1091 !(ifr.ifr_flags & IFF_RUNNING)) {
1092 dev_link->link_status = ETH_LINK_DOWN;
1096 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
1097 dev_link->link_status =
1098 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
1105 tap_promisc_enable(struct rte_eth_dev *dev)
1107 struct pmd_internals *pmd = dev->data->dev_private;
1108 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1110 dev->data->promiscuous = 1;
1111 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1112 if (pmd->remote_if_index && !pmd->flow_isolate)
1113 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
1117 tap_promisc_disable(struct rte_eth_dev *dev)
1119 struct pmd_internals *pmd = dev->data->dev_private;
1120 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1122 dev->data->promiscuous = 0;
1123 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1124 if (pmd->remote_if_index && !pmd->flow_isolate)
1125 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
1129 tap_allmulti_enable(struct rte_eth_dev *dev)
1131 struct pmd_internals *pmd = dev->data->dev_private;
1132 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1134 dev->data->all_multicast = 1;
1135 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1136 if (pmd->remote_if_index && !pmd->flow_isolate)
1137 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
1141 tap_allmulti_disable(struct rte_eth_dev *dev)
1143 struct pmd_internals *pmd = dev->data->dev_private;
1144 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1146 dev->data->all_multicast = 0;
1147 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1148 if (pmd->remote_if_index && !pmd->flow_isolate)
1149 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
1153 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1155 struct pmd_internals *pmd = dev->data->dev_private;
1156 enum ioctl_mode mode = LOCAL_ONLY;
1160 if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
1161 TAP_LOG(ERR, "%s: can't MAC address for TUN",
1166 if (is_zero_ether_addr(mac_addr)) {
1167 TAP_LOG(ERR, "%s: can't set an empty MAC address",
1171 /* Check the actual current MAC address on the tap netdevice */
1172 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
1175 if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
1178 /* Check the current MAC address on the remote */
1179 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
1182 if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
1184 mode = LOCAL_AND_REMOTE;
1185 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1186 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
1187 ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
1190 rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
1191 if (pmd->remote_if_index && !pmd->flow_isolate) {
1192 /* Replace MAC redirection rule after a MAC change */
1193 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
1196 "%s: Couldn't delete MAC redirection rule",
1200 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
1203 "%s: Couldn't add MAC redirection rule",
1213 tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
1219 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
1220 * size per mbuf use this pool for both direct and indirect mbufs
1223 struct rte_mempool *mp; /* Mempool for GSO packets */
1225 /* initialize GSO context */
1226 gso_types = DEV_TX_OFFLOAD_TCP_TSO;
1227 snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
1228 mp = rte_mempool_lookup((const char *)pool_name);
1230 mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
1231 TAP_GSO_MBUF_CACHE_SIZE, 0,
1232 RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
1235 struct pmd_internals *pmd = dev->data->dev_private;
1236 RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n",
1237 pmd->name, dev->device->name);
1242 gso_ctx->direct_pool = mp;
1243 gso_ctx->indirect_pool = mp;
1244 gso_ctx->gso_types = gso_types;
1245 gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
1252 tap_setup_queue(struct rte_eth_dev *dev,
1253 struct pmd_internals *internals,
1261 struct pmd_internals *pmd = dev->data->dev_private;
1262 struct pmd_process_private *process_private = dev->process_private;
1263 struct rx_queue *rx = &internals->rxq[qid];
1264 struct tx_queue *tx = &internals->txq[qid];
1265 struct rte_gso_ctx *gso_ctx;
1268 fd = &process_private->rxq_fds[qid];
1269 other_fd = &process_private->txq_fds[qid];
1273 fd = &process_private->txq_fds[qid];
1274 other_fd = &process_private->rxq_fds[qid];
1276 gso_ctx = &tx->gso_ctx;
1279 /* fd for this queue already exists */
1280 TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
1281 pmd->name, *fd, dir, qid);
1283 } else if (*other_fd != -1) {
1284 /* Only other_fd exists. dup it */
1285 *fd = dup(*other_fd);
1288 TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
1291 TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
1292 pmd->name, *other_fd, dir, qid, *fd);
1294 /* Both RX and TX fds do not exist (equal -1). Create fd */
1295 *fd = tun_alloc(pmd, 0);
1297 *fd = -1; /* restore original value */
1298 TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
1301 TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
1302 pmd->name, dir, qid, *fd);
1305 tx->mtu = &dev->data->mtu;
1306 rx->rxmode = &dev->data->dev_conf.rxmode;
1308 ret = tap_gso_ctx_setup(gso_ctx, dev);
1313 tx->type = pmd->type;
1319 tap_rx_queue_setup(struct rte_eth_dev *dev,
1320 uint16_t rx_queue_id,
1321 uint16_t nb_rx_desc,
1322 unsigned int socket_id,
1323 const struct rte_eth_rxconf *rx_conf __rte_unused,
1324 struct rte_mempool *mp)
1326 struct pmd_internals *internals = dev->data->dev_private;
1327 struct pmd_process_private *process_private = dev->process_private;
1328 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1329 struct rte_mbuf **tmp = &rxq->pool;
1330 long iov_max = sysconf(_SC_IOV_MAX);
1334 "_SC_IOV_MAX is not defined. Using %d as default",
1335 TAP_IOV_DEFAULT_MAX);
1336 iov_max = TAP_IOV_DEFAULT_MAX;
1338 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1339 struct iovec (*iovecs)[nb_desc + 1];
1340 int data_off = RTE_PKTMBUF_HEADROOM;
1345 if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1347 "nb_rx_queues %d too small or mempool NULL",
1348 dev->data->nb_rx_queues);
1353 rxq->trigger_seen = 1; /* force initial burst */
1354 rxq->in_port = dev->data->port_id;
1355 rxq->queue_id = rx_queue_id;
1356 rxq->nb_rx_desc = nb_desc;
1357 iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1361 "%s: Couldn't allocate %d RX descriptors",
1362 dev->device->name, nb_desc);
1365 rxq->iovecs = iovecs;
1367 dev->data->rx_queues[rx_queue_id] = rxq;
1368 fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1374 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1375 (*rxq->iovecs)[0].iov_base = &rxq->pi;
1377 for (i = 1; i <= nb_desc; i++) {
1378 *tmp = rte_pktmbuf_alloc(rxq->mp);
1381 "%s: couldn't allocate memory for queue %d",
1382 dev->device->name, rx_queue_id);
1386 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1387 (*rxq->iovecs)[i].iov_base =
1388 (char *)(*tmp)->buf_addr + data_off;
1390 tmp = &(*tmp)->next;
1393 TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
1394 internals->name, rx_queue_id,
1395 process_private->rxq_fds[rx_queue_id]);
1400 rte_pktmbuf_free(rxq->pool);
1402 rte_free(rxq->iovecs);
1408 tap_tx_queue_setup(struct rte_eth_dev *dev,
1409 uint16_t tx_queue_id,
1410 uint16_t nb_tx_desc __rte_unused,
1411 unsigned int socket_id __rte_unused,
1412 const struct rte_eth_txconf *tx_conf)
1414 struct pmd_internals *internals = dev->data->dev_private;
1415 struct pmd_process_private *process_private = dev->process_private;
1416 struct tx_queue *txq;
1420 if (tx_queue_id >= dev->data->nb_tx_queues)
1422 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1423 txq = dev->data->tx_queues[tx_queue_id];
1424 txq->out_port = dev->data->port_id;
1425 txq->queue_id = tx_queue_id;
1427 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1428 txq->csum = !!(offloads &
1429 (DEV_TX_OFFLOAD_IPV4_CKSUM |
1430 DEV_TX_OFFLOAD_UDP_CKSUM |
1431 DEV_TX_OFFLOAD_TCP_CKSUM));
1433 ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1437 " TX TUNTAP device name %s, qid %d on fd %d csum %s",
1438 internals->name, tx_queue_id,
1439 process_private->txq_fds[tx_queue_id],
1440 txq->csum ? "on" : "off");
1446 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1448 struct pmd_internals *pmd = dev->data->dev_private;
1449 struct ifreq ifr = { .ifr_mtu = mtu };
1452 err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1454 dev->data->mtu = mtu;
1460 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1461 struct ether_addr *mc_addr_set __rte_unused,
1462 uint32_t nb_mc_addr __rte_unused)
1465 * Nothing to do actually: the tap has no filtering whatsoever, every
1466 * packet is received.
1472 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1474 struct rte_eth_dev *dev = arg;
1475 struct pmd_internals *pmd = dev->data->dev_private;
1476 struct ifinfomsg *info = NLMSG_DATA(nh);
1478 if (nh->nlmsg_type != RTM_NEWLINK ||
1479 (info->ifi_index != pmd->if_index &&
1480 info->ifi_index != pmd->remote_if_index))
1482 return tap_link_update(dev, 0);
1486 tap_dev_intr_handler(void *cb_arg)
1488 struct rte_eth_dev *dev = cb_arg;
1489 struct pmd_internals *pmd = dev->data->dev_private;
1491 tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1495 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1497 struct pmd_internals *pmd = dev->data->dev_private;
1499 /* In any case, disable interrupt if the conf is no longer there. */
1500 if (!dev->data->dev_conf.intr_conf.lsc) {
1501 if (pmd->intr_handle.fd != -1) {
1502 tap_nl_final(pmd->intr_handle.fd);
1503 rte_intr_callback_unregister(&pmd->intr_handle,
1504 tap_dev_intr_handler, dev);
1509 pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
1510 if (unlikely(pmd->intr_handle.fd == -1))
1512 return rte_intr_callback_register(
1513 &pmd->intr_handle, tap_dev_intr_handler, dev);
1515 tap_nl_final(pmd->intr_handle.fd);
1516 return rte_intr_callback_unregister(&pmd->intr_handle,
1517 tap_dev_intr_handler, dev);
1521 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1525 err = tap_lsc_intr_handle_set(dev, set);
1528 err = tap_rx_intr_vec_set(dev, set);
1530 tap_lsc_intr_handle_set(dev, 0);
1534 static const uint32_t*
1535 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1537 static const uint32_t ptypes[] = {
1538 RTE_PTYPE_INNER_L2_ETHER,
1539 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1540 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1541 RTE_PTYPE_INNER_L3_IPV4,
1542 RTE_PTYPE_INNER_L3_IPV4_EXT,
1543 RTE_PTYPE_INNER_L3_IPV6,
1544 RTE_PTYPE_INNER_L3_IPV6_EXT,
1545 RTE_PTYPE_INNER_L4_FRAG,
1546 RTE_PTYPE_INNER_L4_UDP,
1547 RTE_PTYPE_INNER_L4_TCP,
1548 RTE_PTYPE_INNER_L4_SCTP,
1550 RTE_PTYPE_L2_ETHER_VLAN,
1551 RTE_PTYPE_L2_ETHER_QINQ,
1553 RTE_PTYPE_L3_IPV4_EXT,
1554 RTE_PTYPE_L3_IPV6_EXT,
1566 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1567 struct rte_eth_fc_conf *fc_conf)
1569 fc_conf->mode = RTE_FC_NONE;
1574 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1575 struct rte_eth_fc_conf *fc_conf)
1577 if (fc_conf->mode != RTE_FC_NONE)
1583 * DPDK callback to update the RSS hash configuration.
1586 * Pointer to Ethernet device structure.
1587 * @param[in] rss_conf
1588 * RSS configuration data.
1591 * 0 on success, a negative errno value otherwise and rte_errno is set.
1594 tap_rss_hash_update(struct rte_eth_dev *dev,
1595 struct rte_eth_rss_conf *rss_conf)
1597 if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
1601 if (rss_conf->rss_key && rss_conf->rss_key_len) {
1603 * Currently TAP RSS key is hard coded
1604 * and cannot be updated
1607 "port %u RSS key cannot be updated",
1608 dev->data->port_id);
1616 tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1618 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1624 tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1626 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1632 tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1634 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1640 tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1642 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1646 static const struct eth_dev_ops ops = {
1647 .dev_start = tap_dev_start,
1648 .dev_stop = tap_dev_stop,
1649 .dev_close = tap_dev_close,
1650 .dev_configure = tap_dev_configure,
1651 .dev_infos_get = tap_dev_info,
1652 .rx_queue_setup = tap_rx_queue_setup,
1653 .tx_queue_setup = tap_tx_queue_setup,
1654 .rx_queue_start = tap_rx_queue_start,
1655 .tx_queue_start = tap_tx_queue_start,
1656 .rx_queue_stop = tap_rx_queue_stop,
1657 .tx_queue_stop = tap_tx_queue_stop,
1658 .rx_queue_release = tap_rx_queue_release,
1659 .tx_queue_release = tap_tx_queue_release,
1660 .flow_ctrl_get = tap_flow_ctrl_get,
1661 .flow_ctrl_set = tap_flow_ctrl_set,
1662 .link_update = tap_link_update,
1663 .dev_set_link_up = tap_link_set_up,
1664 .dev_set_link_down = tap_link_set_down,
1665 .promiscuous_enable = tap_promisc_enable,
1666 .promiscuous_disable = tap_promisc_disable,
1667 .allmulticast_enable = tap_allmulti_enable,
1668 .allmulticast_disable = tap_allmulti_disable,
1669 .mac_addr_set = tap_mac_set,
1670 .mtu_set = tap_mtu_set,
1671 .set_mc_addr_list = tap_set_mc_addr_list,
1672 .stats_get = tap_stats_get,
1673 .stats_reset = tap_stats_reset,
1674 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1675 .rss_hash_update = tap_rss_hash_update,
1676 .filter_ctrl = tap_dev_filter_ctrl,
1680 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1681 char *remote_iface, struct ether_addr *mac_addr,
1682 enum rte_tuntap_type type)
1684 int numa_node = rte_socket_id();
1685 struct rte_eth_dev *dev;
1686 struct pmd_internals *pmd;
1687 struct pmd_process_private *process_private;
1688 struct rte_eth_dev_data *data;
1692 TAP_LOG(DEBUG, "%s device on numa %u",
1693 tuntap_name, rte_socket_id());
1695 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1697 TAP_LOG(ERR, "%s Unable to allocate device struct",
1699 goto error_exit_nodev;
1702 process_private = (struct pmd_process_private *)
1703 rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
1704 RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1706 if (process_private == NULL) {
1707 TAP_LOG(ERR, "Failed to alloc memory for process private");
1710 pmd = dev->data->dev_private;
1711 dev->process_private = process_private;
1713 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1716 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1717 if (pmd->ioctl_sock == -1) {
1719 "%s Unable to get a socket for management: %s",
1720 tuntap_name, strerror(errno));
1724 /* Setup some default values */
1726 data->dev_private = pmd;
1727 data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1728 data->numa_node = numa_node;
1730 data->dev_link = pmd_link;
1731 data->mac_addrs = &pmd->eth_addr;
1732 /* Set the number of RX and TX queues */
1733 data->nb_rx_queues = 0;
1734 data->nb_tx_queues = 0;
1736 dev->dev_ops = &ops;
1737 dev->rx_pkt_burst = pmd_rx_burst;
1738 dev->tx_pkt_burst = pmd_tx_burst;
1740 pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1741 pmd->intr_handle.fd = -1;
1742 dev->intr_handle = &pmd->intr_handle;
1744 /* Presetup the fds to -1 as being not valid */
1746 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1747 process_private->rxq_fds[i] = -1;
1748 process_private->txq_fds[i] = -1;
1751 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1752 if (is_zero_ether_addr(mac_addr))
1753 eth_random_addr((uint8_t *)&pmd->eth_addr);
1755 rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
1759 * Allocate a TUN device keep-alive file descriptor that will only be
1760 * closed when the TUN device itself is closed or removed.
1761 * This keep-alive file descriptor will guarantee that the TUN device
1762 * exists even when all of its queues are closed
1764 pmd->ka_fd = tun_alloc(pmd, 1);
1765 if (pmd->ka_fd == -1) {
1766 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
1769 TAP_LOG(DEBUG, "allocated %s", pmd->name);
1771 ifr.ifr_mtu = dev->data->mtu;
1772 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1775 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1776 memset(&ifr, 0, sizeof(struct ifreq));
1777 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1778 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
1780 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1785 * Set up everything related to rte_flow:
1787 * - tap / remote if_index
1788 * - mandatory QDISCs
1789 * - rte_flow actual/implicit lists
1792 pmd->nlsk_fd = tap_nl_init(0);
1793 if (pmd->nlsk_fd == -1) {
1794 TAP_LOG(WARNING, "%s: failed to create netlink socket.",
1796 goto disable_rte_flow;
1798 pmd->if_index = if_nametoindex(pmd->name);
1799 if (!pmd->if_index) {
1800 TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
1801 goto disable_rte_flow;
1803 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
1804 TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
1806 goto disable_rte_flow;
1808 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
1809 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
1811 goto disable_rte_flow;
1813 LIST_INIT(&pmd->flows);
1815 if (strlen(remote_iface)) {
1816 pmd->remote_if_index = if_nametoindex(remote_iface);
1817 if (!pmd->remote_if_index) {
1818 TAP_LOG(ERR, "%s: failed to get %s if_index.",
1819 pmd->name, remote_iface);
1822 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1823 "%s", remote_iface);
1825 /* Save state of remote device */
1826 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
1828 /* Replicate remote MAC address */
1829 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1830 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
1831 pmd->name, pmd->remote_iface);
1834 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1836 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
1837 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
1838 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
1839 pmd->name, remote_iface);
1844 * Flush usually returns negative value because it tries to
1845 * delete every QDISC (and on a running device, one QDISC at
1846 * least is needed). Ignore negative return value.
1848 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
1849 if (qdisc_create_ingress(pmd->nlsk_fd,
1850 pmd->remote_if_index) < 0) {
1851 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
1855 LIST_INIT(&pmd->implicit_flows);
1856 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
1857 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
1858 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
1859 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
1861 "%s: failed to create implicit rules.",
1867 rte_eth_dev_probing_finish(dev);
1871 TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
1872 strerror(errno), errno);
1873 if (strlen(remote_iface)) {
1874 TAP_LOG(ERR, "Remote feature requires flow support.");
1877 rte_eth_dev_probing_finish(dev);
1881 TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
1882 strerror(errno), errno);
1883 tap_flow_implicit_flush(pmd, NULL);
1886 if (pmd->ioctl_sock > 0)
1887 close(pmd->ioctl_sock);
1888 /* mac_addrs must not be freed alone because part of dev_private */
1889 dev->data->mac_addrs = NULL;
1890 rte_eth_dev_release_port(dev);
1893 TAP_LOG(ERR, "%s Unable to initialize %s",
1894 tuntap_name, rte_vdev_device_name(vdev));
1900 set_interface_name(const char *key __rte_unused,
1904 char *name = (char *)extra_args;
1907 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
1909 /* use tap%d which causes kernel to choose next available */
1910 strlcpy(name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
1916 set_remote_iface(const char *key __rte_unused,
1920 char *name = (char *)extra_args;
1923 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
1928 static int parse_user_mac(struct ether_addr *user_mac,
1931 unsigned int index = 0;
1932 char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
1934 if (user_mac == NULL || value == NULL)
1937 strlcpy(mac_temp, value, sizeof(mac_temp));
1938 mac_byte = strtok(mac_temp, ":");
1940 while ((mac_byte != NULL) &&
1941 (strlen(mac_byte) <= 2) &&
1942 (strlen(mac_byte) == strspn(mac_byte,
1943 ETH_TAP_CMP_MAC_FMT))) {
1944 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
1945 mac_byte = strtok(NULL, ":");
1952 set_mac_type(const char *key __rte_unused,
1956 struct ether_addr *user_mac = extra_args;
1961 if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
1962 static int iface_idx;
1964 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1965 memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
1966 user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
1970 if (parse_user_mac(user_mac, value) != 6)
1973 TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
1977 TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
1978 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
1983 * Open a TUN interface device. TUN PMD
1984 * 1) sets tap_type as false
1985 * 2) intakes iface as argument.
1986 * 3) as interface is virtual set speed to 10G
1989 rte_pmd_tun_probe(struct rte_vdev_device *dev)
1991 const char *name, *params;
1993 struct rte_kvargs *kvlist = NULL;
1994 char tun_name[RTE_ETH_NAME_MAX_LEN];
1995 char remote_iface[RTE_ETH_NAME_MAX_LEN];
1996 struct rte_eth_dev *eth_dev;
1998 strcpy(tuntap_name, "TUN");
2000 name = rte_vdev_device_name(dev);
2001 params = rte_vdev_device_args(dev);
2002 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2004 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
2005 strlen(params) == 0) {
2006 eth_dev = rte_eth_dev_attach_secondary(name);
2008 TAP_LOG(ERR, "Failed to probe %s", name);
2011 eth_dev->dev_ops = &ops;
2012 eth_dev->device = &dev->device;
2013 rte_eth_dev_probing_finish(eth_dev);
2017 /* use tun%d which causes kernel to choose next available */
2018 strlcpy(tun_name, DEFAULT_TUN_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2020 if (params && (params[0] != '\0')) {
2021 TAP_LOG(DEBUG, "parameters (%s)", params);
2023 kvlist = rte_kvargs_parse(params, valid_arguments);
2025 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2026 ret = rte_kvargs_process(kvlist,
2028 &set_interface_name,
2036 pmd_link.link_speed = ETH_SPEED_NUM_10G;
2038 TAP_LOG(NOTICE, "Initializing pmd_tun for %s", name);
2040 ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
2041 ETH_TUNTAP_TYPE_TUN);
2045 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2048 rte_kvargs_free(kvlist);
2053 /* Request queue file descriptors from secondary to primary. */
2055 tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
2058 struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
2059 struct rte_mp_msg request, *reply;
2060 struct rte_mp_reply replies;
2061 struct ipc_queues *request_param = (struct ipc_queues *)request.param;
2062 struct ipc_queues *reply_param;
2063 struct pmd_process_private *process_private = dev->process_private;
2064 int queue, fd_iterator;
2066 /* Prepare the request */
2067 memset(&request, 0, sizeof(request));
2068 strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
2069 strlcpy(request_param->port_name, port_name,
2070 sizeof(request_param->port_name));
2071 request.len_param = sizeof(*request_param);
2072 /* Send request and receive reply */
2073 ret = rte_mp_request_sync(&request, &replies, &timeout);
2074 if (ret < 0 || replies.nb_received != 1) {
2075 TAP_LOG(ERR, "Failed to request queues from primary: %d",
2079 reply = &replies.msgs[0];
2080 reply_param = (struct ipc_queues *)reply->param;
2081 TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
2083 /* Attach the queues from received file descriptors */
2084 if (reply_param->rxq_count + reply_param->txq_count != reply->num_fds) {
2085 TAP_LOG(ERR, "Unexpected number of fds received");
2089 dev->data->nb_rx_queues = reply_param->rxq_count;
2090 dev->data->nb_tx_queues = reply_param->txq_count;
2092 for (queue = 0; queue < reply_param->rxq_count; queue++)
2093 process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
2094 for (queue = 0; queue < reply_param->txq_count; queue++)
2095 process_private->txq_fds[queue] = reply->fds[fd_iterator++];
2100 /* Send the queue file descriptors from the primary process to secondary. */
2102 tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
2104 struct rte_eth_dev *dev;
2105 struct pmd_process_private *process_private;
2106 struct rte_mp_msg reply;
2107 const struct ipc_queues *request_param =
2108 (const struct ipc_queues *)request->param;
2109 struct ipc_queues *reply_param =
2110 (struct ipc_queues *)reply.param;
2115 /* Get requested port */
2116 TAP_LOG(DEBUG, "Received IPC request for %s", request_param->port_name);
2117 ret = rte_eth_dev_get_port_by_name(request_param->port_name, &port_id);
2119 TAP_LOG(ERR, "Failed to get port id for %s",
2120 request_param->port_name);
2123 dev = &rte_eth_devices[port_id];
2124 process_private = dev->process_private;
2126 /* Fill file descriptors for all queues */
2128 reply_param->rxq_count = 0;
2129 if (dev->data->nb_rx_queues + dev->data->nb_tx_queues >
2131 TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds");
2135 for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
2136 reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
2137 reply_param->rxq_count++;
2139 RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
2141 reply_param->txq_count = 0;
2142 for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
2143 reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
2144 reply_param->txq_count++;
2146 RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
2149 strlcpy(reply.name, request->name, sizeof(reply.name));
2150 strlcpy(reply_param->port_name, request_param->port_name,
2151 sizeof(reply_param->port_name));
2152 reply.len_param = sizeof(*reply_param);
2153 if (rte_mp_reply(&reply, peer) < 0) {
2154 TAP_LOG(ERR, "Failed to reply an IPC request to sync queues");
2160 /* Open a TAP interface device.
2163 rte_pmd_tap_probe(struct rte_vdev_device *dev)
2165 const char *name, *params;
2167 struct rte_kvargs *kvlist = NULL;
2169 char tap_name[RTE_ETH_NAME_MAX_LEN];
2170 char remote_iface[RTE_ETH_NAME_MAX_LEN];
2171 struct ether_addr user_mac = { .addr_bytes = {0} };
2172 struct rte_eth_dev *eth_dev;
2173 int tap_devices_count_increased = 0;
2175 strcpy(tuntap_name, "TAP");
2177 name = rte_vdev_device_name(dev);
2178 params = rte_vdev_device_args(dev);
2180 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2181 eth_dev = rte_eth_dev_attach_secondary(name);
2183 TAP_LOG(ERR, "Failed to probe %s", name);
2186 eth_dev->dev_ops = &ops;
2187 eth_dev->device = &dev->device;
2188 eth_dev->rx_pkt_burst = pmd_rx_burst;
2189 eth_dev->tx_pkt_burst = pmd_tx_burst;
2190 if (!rte_eal_primary_proc_alive(NULL)) {
2191 TAP_LOG(ERR, "Primary process is missing");
2194 eth_dev->process_private = (struct pmd_process_private *)
2195 rte_zmalloc_socket(name,
2196 sizeof(struct pmd_process_private),
2197 RTE_CACHE_LINE_SIZE,
2198 eth_dev->device->numa_node);
2199 if (eth_dev->process_private == NULL) {
2201 "Failed to alloc memory for process private");
2205 ret = tap_mp_attach_queues(name, eth_dev);
2208 rte_eth_dev_probing_finish(eth_dev);
2212 speed = ETH_SPEED_NUM_10G;
2214 /* use tap%d which causes kernel to choose next available */
2215 strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2216 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2218 if (params && (params[0] != '\0')) {
2219 TAP_LOG(DEBUG, "parameters (%s)", params);
2221 kvlist = rte_kvargs_parse(params, valid_arguments);
2223 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2224 ret = rte_kvargs_process(kvlist,
2226 &set_interface_name,
2232 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
2233 ret = rte_kvargs_process(kvlist,
2241 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
2242 ret = rte_kvargs_process(kvlist,
2251 pmd_link.link_speed = speed;
2253 TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
2256 /* Register IPC feed callback */
2257 if (!tap_devices_count) {
2258 ret = rte_mp_action_register(TAP_MP_KEY, tap_mp_sync_queues);
2260 TAP_LOG(ERR, "%s: Failed to register IPC callback: %s",
2261 tuntap_name, strerror(rte_errno));
2265 tap_devices_count++;
2266 tap_devices_count_increased = 1;
2267 ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
2268 ETH_TUNTAP_TYPE_TAP);
2272 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2274 if (tap_devices_count_increased == 1) {
2275 if (tap_devices_count == 1)
2276 rte_mp_action_unregister(TAP_MP_KEY);
2277 tap_devices_count--;
2280 rte_kvargs_free(kvlist);
2285 /* detach a TUNTAP device.
2288 rte_pmd_tap_remove(struct rte_vdev_device *dev)
2290 struct rte_eth_dev *eth_dev = NULL;
2291 struct pmd_internals *internals;
2292 struct pmd_process_private *process_private;
2295 /* find the ethdev entry */
2296 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2300 /* mac_addrs must not be freed alone because part of dev_private */
2301 eth_dev->data->mac_addrs = NULL;
2303 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2304 return rte_eth_dev_release_port(eth_dev);
2306 internals = eth_dev->data->dev_private;
2307 process_private = eth_dev->process_private;
2309 TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
2310 (internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
2313 if (internals->nlsk_fd) {
2314 tap_flow_flush(eth_dev, NULL);
2315 tap_flow_implicit_flush(internals, NULL);
2316 tap_nl_final(internals->nlsk_fd);
2318 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
2319 if (process_private->rxq_fds[i] != -1) {
2320 close(process_private->rxq_fds[i]);
2321 process_private->rxq_fds[i] = -1;
2323 if (process_private->txq_fds[i] != -1) {
2324 close(process_private->txq_fds[i]);
2325 process_private->txq_fds[i] = -1;
2329 close(internals->ioctl_sock);
2330 rte_free(eth_dev->process_private);
2331 if (tap_devices_count == 1)
2332 rte_mp_action_unregister(TAP_MP_KEY);
2333 tap_devices_count--;
2334 rte_eth_dev_release_port(eth_dev);
2336 if (internals->ka_fd != -1) {
2337 close(internals->ka_fd);
2338 internals->ka_fd = -1;
2343 static struct rte_vdev_driver pmd_tun_drv = {
2344 .probe = rte_pmd_tun_probe,
2345 .remove = rte_pmd_tap_remove,
2348 static struct rte_vdev_driver pmd_tap_drv = {
2349 .probe = rte_pmd_tap_probe,
2350 .remove = rte_pmd_tap_remove,
2353 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
2354 RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
2355 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
2356 RTE_PMD_REGISTER_PARAM_STRING(net_tun,
2357 ETH_TAP_IFACE_ARG "=<string> ");
2358 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
2359 ETH_TAP_IFACE_ARG "=<string> "
2360 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
2361 ETH_TAP_REMOTE_ARG "=<string>");
2364 RTE_INIT(tap_init_log)
2366 tap_logtype = rte_log_register("pmd.net.tap");
2367 if (tap_logtype >= 0)
2368 rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);