4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_atomic.h>
35 #include <rte_branch_prediction.h>
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
39 #include <rte_ethdev.h>
40 #include <rte_ethdev_vdev.h>
41 #include <rte_malloc.h>
42 #include <rte_bus_vdev.h>
43 #include <rte_kvargs.h>
45 #include <rte_debug.h>
48 #include <sys/types.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/utsname.h>
59 #include <arpa/inet.h>
61 #include <linux/if_tun.h>
62 #include <linux/if_ether.h>
65 #include <rte_eth_tap.h>
67 #include <tap_netlink.h>
68 #include <tap_tcmsgs.h>
70 /* Linux based path to the TUN device */
71 #define TUN_TAP_DEV_PATH "/dev/net/tun"
72 #define DEFAULT_TAP_NAME "dtap"
74 #define ETH_TAP_IFACE_ARG "iface"
75 #define ETH_TAP_SPEED_ARG "speed"
76 #define ETH_TAP_REMOTE_ARG "remote"
77 #define ETH_TAP_MAC_ARG "mac"
78 #define ETH_TAP_MAC_FIXED "fixed"
80 static struct rte_vdev_driver pmd_tap_drv;
82 static const char *valid_arguments[] = {
92 static volatile uint32_t tap_trigger; /* Rx trigger */
94 static struct rte_eth_link pmd_link = {
95 .link_speed = ETH_SPEED_NUM_10G,
96 .link_duplex = ETH_LINK_FULL_DUPLEX,
97 .link_status = ETH_LINK_DOWN,
98 .link_autoneg = ETH_LINK_FIXED,
102 tap_trigger_cb(int sig __rte_unused)
104 /* Valid trigger values are nonzero */
105 tap_trigger = (tap_trigger + 1) | 0x80000000;
108 /* Specifies on what netdevices the ioctl should be applied */
115 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
117 /* Tun/Tap allocation routine
119 * name is the number of the interface to use, unless NULL to take the host
123 tun_alloc(struct pmd_internals *pmd)
126 #ifdef IFF_MULTI_QUEUE
127 unsigned int features;
131 memset(&ifr, 0, sizeof(struct ifreq));
134 * Do not set IFF_NO_PI as packet information header will be needed
135 * to check if a received packet has been truncated.
137 ifr.ifr_flags = IFF_TAP;
138 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
140 RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
142 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
144 RTE_LOG(ERR, PMD, "Unable to create TAP interface\n");
148 #ifdef IFF_MULTI_QUEUE
149 /* Grab the TUN features to verify we can work multi-queue */
150 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
151 RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
154 RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
156 if (features & IFF_MULTI_QUEUE) {
157 RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
158 RTE_PMD_TAP_MAX_QUEUES);
159 ifr.ifr_flags |= IFF_MULTI_QUEUE;
163 ifr.ifr_flags |= IFF_ONE_QUEUE;
164 RTE_LOG(DEBUG, PMD, " Single queue only support\n");
167 /* Set the TUN/TAP configuration and set the name if needed */
168 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
169 RTE_LOG(WARNING, PMD,
170 "Unable to set TUNSETIFF for %s\n",
176 /* Always set the file descriptor to non-blocking */
177 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
178 RTE_LOG(WARNING, PMD,
179 "Unable to set %s to nonblocking\n",
181 perror("F_SETFL, NONBLOCK");
185 /* Set up trigger to optimize empty Rx bursts */
189 int flags = fcntl(fd, F_GETFL);
191 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
193 if (sa.sa_handler != tap_trigger_cb) {
195 * Make sure SIGIO is not already taken. This is done
196 * as late as possible to leave the application a
197 * chance to set up its own signal handler first.
199 if (sa.sa_handler != SIG_IGN &&
200 sa.sa_handler != SIG_DFL) {
204 sa = (struct sigaction){
205 .sa_flags = SA_RESTART,
206 .sa_handler = tap_trigger_cb,
208 if (sigaction(SIGIO, &sa, NULL) == -1)
211 /* Enable SIGIO on file descriptor */
212 fcntl(fd, F_SETFL, flags | O_ASYNC);
213 fcntl(fd, F_SETOWN, getpid());
216 /* Disable trigger globally in case of error */
218 RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
231 tap_verify_csum(struct rte_mbuf *mbuf)
233 uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
234 uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
235 uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
236 unsigned int l2_len = sizeof(struct ether_hdr);
242 if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
244 else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
246 /* Don't verify checksum for packets with discontinuous L2 header */
247 if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
248 rte_pktmbuf_data_len(mbuf)))
250 l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
251 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
252 struct ipv4_hdr *iph = l3_hdr;
254 /* ihl contains the number of 4-byte words in the header */
255 l3_len = 4 * (iph->version_ihl & 0xf);
256 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
259 cksum = ~rte_raw_cksum(iph, l3_len);
260 mbuf->ol_flags |= cksum ?
261 PKT_RX_IP_CKSUM_BAD :
262 PKT_RX_IP_CKSUM_GOOD;
263 } else if (l3 == RTE_PTYPE_L3_IPV6) {
264 l3_len = sizeof(struct ipv6_hdr);
266 /* IPv6 extensions are not supported */
269 if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
270 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
271 /* Don't verify checksum for multi-segment packets. */
272 if (mbuf->nb_segs > 1)
274 if (l3 == RTE_PTYPE_L3_IPV4)
275 cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
276 else if (l3 == RTE_PTYPE_L3_IPV6)
277 cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
278 mbuf->ol_flags |= cksum ?
279 PKT_RX_L4_CKSUM_BAD :
280 PKT_RX_L4_CKSUM_GOOD;
284 /* Callback to handle the rx burst of packets to the correct interface and
285 * file descriptor(s) in a multi-queue setup.
288 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
290 struct rx_queue *rxq = queue;
292 unsigned long num_rx_bytes = 0;
293 uint32_t trigger = tap_trigger;
295 if (trigger == rxq->trigger_seen)
298 rxq->trigger_seen = trigger;
299 rte_compiler_barrier();
300 for (num_rx = 0; num_rx < nb_pkts; ) {
301 struct rte_mbuf *mbuf = rxq->pool;
302 struct rte_mbuf *seg = NULL;
303 struct rte_mbuf *new_tail = NULL;
304 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
307 len = readv(rxq->fd, *rxq->iovecs,
308 1 + (rxq->rxmode->enable_scatter ?
309 rxq->nb_rx_desc : 1));
310 if (len < (int)sizeof(struct tun_pi))
313 /* Packet couldn't fit in the provided mbuf */
314 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
315 rxq->stats.ierrors++;
319 len -= sizeof(struct tun_pi);
322 mbuf->port = rxq->in_port;
324 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
326 if (unlikely(!buf)) {
327 rxq->stats.rx_nombuf++;
328 /* No new buf has been allocated: do nothing */
329 if (!new_tail || !seg)
333 rte_pktmbuf_free(mbuf);
337 seg = seg ? seg->next : mbuf;
338 if (rxq->pool == mbuf)
341 new_tail->next = buf;
343 new_tail->next = seg->next;
345 /* iovecs[0] is reserved for packet info (pi) */
346 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
347 buf->buf_len - data_off;
348 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
349 (char *)buf->buf_addr + data_off;
351 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
352 seg->data_off = data_off;
354 len -= seg->data_len;
358 /* First segment has headroom, not the others */
362 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
364 if (rxq->rxmode->hw_ip_checksum)
365 tap_verify_csum(mbuf);
367 /* account for the receive frame */
368 bufs[num_rx++] = mbuf;
369 num_rx_bytes += mbuf->pkt_len;
372 rxq->stats.ipackets += num_rx;
373 rxq->stats.ibytes += num_rx_bytes;
379 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
382 void *l3_hdr = packet + l2_len;
384 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
385 struct ipv4_hdr *iph = l3_hdr;
388 iph->hdr_checksum = 0;
389 cksum = rte_raw_cksum(iph, l3_len);
390 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
392 if (ol_flags & PKT_TX_L4_MASK) {
398 l4_hdr = packet + l2_len + l3_len;
399 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
400 l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
401 else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
402 l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
406 if (ol_flags & PKT_TX_IPV4) {
407 struct ipv4_hdr *iph = l3_hdr;
409 l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
410 cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
412 struct ipv6_hdr *ip6h = l3_hdr;
414 /* payload_len does not include ext headers */
415 l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
416 l3_len + sizeof(struct ipv6_hdr);
417 cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
419 cksum += rte_raw_cksum(l4_hdr, l4_len);
420 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
421 cksum = (~cksum) & 0xffff;
428 /* Callback to handle sending packets from the tap interface
431 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
433 struct tx_queue *txq = queue;
435 unsigned long num_tx_bytes = 0;
439 if (unlikely(nb_pkts == 0))
442 max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
443 for (i = 0; i < nb_pkts; i++) {
444 struct rte_mbuf *mbuf = bufs[num_tx];
445 struct iovec iovecs[mbuf->nb_segs + 1];
446 struct tun_pi pi = { .flags = 0 };
447 struct rte_mbuf *seg = mbuf;
448 char m_copy[mbuf->data_len];
452 /* stats.errs will be incremented */
453 if (rte_pktmbuf_pkt_len(mbuf) > max_size)
456 iovecs[0].iov_base = π
457 iovecs[0].iov_len = sizeof(pi);
458 for (j = 1; j <= mbuf->nb_segs; j++) {
459 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
461 rte_pktmbuf_mtod(seg, void *);
464 if (mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
465 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
466 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
467 /* Support only packets with all data in the same seg */
468 if (mbuf->nb_segs > 1)
470 /* To change checksums, work on a copy of data. */
471 rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
472 rte_pktmbuf_data_len(mbuf));
473 tap_tx_offload(m_copy, mbuf->ol_flags,
474 mbuf->l2_len, mbuf->l3_len);
475 iovecs[1].iov_base = m_copy;
477 /* copy the tx frame data */
478 n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
483 num_tx_bytes += mbuf->pkt_len;
484 rte_pktmbuf_free(mbuf);
487 txq->stats.opackets += num_tx;
488 txq->stats.errs += nb_pkts - num_tx;
489 txq->stats.obytes += num_tx_bytes;
495 tap_ioctl_req2str(unsigned long request)
499 return "SIOCSIFFLAGS";
501 return "SIOCGIFFLAGS";
503 return "SIOCGIFHWADDR";
505 return "SIOCSIFHWADDR";
513 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
514 struct ifreq *ifr, int set, enum ioctl_mode mode)
516 short req_flags = ifr->ifr_flags;
517 int remote = pmd->remote_if_index &&
518 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
520 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
523 * If there is a remote netdevice, apply ioctl on it, then apply it on
528 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
529 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
530 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
533 /* fetch current flags to leave other flags untouched */
534 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
537 ifr->ifr_flags |= req_flags;
539 ifr->ifr_flags &= ~req_flags;
547 RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
551 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
553 if (remote-- && mode == LOCAL_AND_REMOTE)
558 RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
559 __func__, tap_ioctl_req2str(request), strerror(errno), errno);
564 tap_link_set_down(struct rte_eth_dev *dev)
566 struct pmd_internals *pmd = dev->data->dev_private;
567 struct ifreq ifr = { .ifr_flags = IFF_UP };
569 dev->data->dev_link.link_status = ETH_LINK_DOWN;
570 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
574 tap_link_set_up(struct rte_eth_dev *dev)
576 struct pmd_internals *pmd = dev->data->dev_private;
577 struct ifreq ifr = { .ifr_flags = IFF_UP };
579 dev->data->dev_link.link_status = ETH_LINK_UP;
580 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
584 tap_dev_start(struct rte_eth_dev *dev)
588 err = tap_intr_handle_set(dev, 1);
591 return tap_link_set_up(dev);
594 /* This function gets called when the current port gets stopped.
597 tap_dev_stop(struct rte_eth_dev *dev)
599 tap_intr_handle_set(dev, 0);
600 tap_link_set_down(dev);
604 tap_dev_configure(struct rte_eth_dev *dev)
606 if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
608 "%s: number of rx queues %d exceeds max num of queues %d\n",
610 dev->data->nb_rx_queues,
611 RTE_PMD_TAP_MAX_QUEUES);
614 if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
616 "%s: number of tx queues %d exceeds max num of queues %d\n",
618 dev->data->nb_tx_queues,
619 RTE_PMD_TAP_MAX_QUEUES);
623 RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n",
624 dev->device->name, (void *)dev, dev->data->nb_tx_queues);
626 RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n",
627 dev->device->name, (void *)dev, dev->data->nb_rx_queues);
633 tap_dev_speed_capa(void)
635 uint32_t speed = pmd_link.link_speed;
638 if (speed >= ETH_SPEED_NUM_10M)
639 capa |= ETH_LINK_SPEED_10M;
640 if (speed >= ETH_SPEED_NUM_100M)
641 capa |= ETH_LINK_SPEED_100M;
642 if (speed >= ETH_SPEED_NUM_1G)
643 capa |= ETH_LINK_SPEED_1G;
644 if (speed >= ETH_SPEED_NUM_5G)
645 capa |= ETH_LINK_SPEED_2_5G;
646 if (speed >= ETH_SPEED_NUM_5G)
647 capa |= ETH_LINK_SPEED_5G;
648 if (speed >= ETH_SPEED_NUM_10G)
649 capa |= ETH_LINK_SPEED_10G;
650 if (speed >= ETH_SPEED_NUM_20G)
651 capa |= ETH_LINK_SPEED_20G;
652 if (speed >= ETH_SPEED_NUM_25G)
653 capa |= ETH_LINK_SPEED_25G;
654 if (speed >= ETH_SPEED_NUM_40G)
655 capa |= ETH_LINK_SPEED_40G;
656 if (speed >= ETH_SPEED_NUM_50G)
657 capa |= ETH_LINK_SPEED_50G;
658 if (speed >= ETH_SPEED_NUM_56G)
659 capa |= ETH_LINK_SPEED_56G;
660 if (speed >= ETH_SPEED_NUM_100G)
661 capa |= ETH_LINK_SPEED_100G;
667 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
669 struct pmd_internals *internals = dev->data->dev_private;
671 dev_info->if_index = internals->if_index;
672 dev_info->max_mac_addrs = 1;
673 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
674 dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
675 dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
676 dev_info->min_rx_bufsize = 0;
677 dev_info->pci_dev = NULL;
678 dev_info->speed_capa = tap_dev_speed_capa();
679 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
680 DEV_RX_OFFLOAD_UDP_CKSUM |
681 DEV_RX_OFFLOAD_TCP_CKSUM);
682 dev_info->tx_offload_capa =
683 (DEV_TX_OFFLOAD_IPV4_CKSUM |
684 DEV_TX_OFFLOAD_UDP_CKSUM |
685 DEV_TX_OFFLOAD_TCP_CKSUM);
689 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
691 unsigned int i, imax;
692 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
693 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
694 unsigned long rx_nombuf = 0, ierrors = 0;
695 const struct pmd_internals *pmd = dev->data->dev_private;
697 /* rx queue statistics */
698 imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
699 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
700 for (i = 0; i < imax; i++) {
701 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
702 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
703 rx_total += tap_stats->q_ipackets[i];
704 rx_bytes_total += tap_stats->q_ibytes[i];
705 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
706 ierrors += pmd->rxq[i].stats.ierrors;
709 /* tx queue statistics */
710 imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
711 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
713 for (i = 0; i < imax; i++) {
714 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
715 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
716 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
717 tx_total += tap_stats->q_opackets[i];
718 tx_err_total += tap_stats->q_errors[i];
719 tx_bytes_total += tap_stats->q_obytes[i];
722 tap_stats->ipackets = rx_total;
723 tap_stats->ibytes = rx_bytes_total;
724 tap_stats->ierrors = ierrors;
725 tap_stats->rx_nombuf = rx_nombuf;
726 tap_stats->opackets = tx_total;
727 tap_stats->oerrors = tx_err_total;
728 tap_stats->obytes = tx_bytes_total;
733 tap_stats_reset(struct rte_eth_dev *dev)
736 struct pmd_internals *pmd = dev->data->dev_private;
738 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
739 pmd->rxq[i].stats.ipackets = 0;
740 pmd->rxq[i].stats.ibytes = 0;
741 pmd->rxq[i].stats.ierrors = 0;
742 pmd->rxq[i].stats.rx_nombuf = 0;
744 pmd->txq[i].stats.opackets = 0;
745 pmd->txq[i].stats.errs = 0;
746 pmd->txq[i].stats.obytes = 0;
751 tap_dev_close(struct rte_eth_dev *dev)
754 struct pmd_internals *internals = dev->data->dev_private;
756 tap_link_set_down(dev);
757 tap_flow_flush(dev, NULL);
758 tap_flow_implicit_flush(internals, NULL);
760 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
761 if (internals->rxq[i].fd != -1) {
762 close(internals->rxq[i].fd);
763 internals->rxq[i].fd = -1;
765 if (internals->txq[i].fd != -1) {
766 close(internals->txq[i].fd);
767 internals->txq[i].fd = -1;
771 if (internals->remote_if_index) {
772 /* Restore initial remote state */
773 ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
774 &internals->remote_initial_flags);
779 tap_rx_queue_release(void *queue)
781 struct rx_queue *rxq = queue;
783 if (rxq && (rxq->fd > 0)) {
786 rte_pktmbuf_free(rxq->pool);
787 rte_free(rxq->iovecs);
794 tap_tx_queue_release(void *queue)
796 struct tx_queue *txq = queue;
798 if (txq && (txq->fd > 0)) {
805 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
807 struct rte_eth_link *dev_link = &dev->data->dev_link;
808 struct pmd_internals *pmd = dev->data->dev_private;
809 struct ifreq ifr = { .ifr_flags = 0 };
811 if (pmd->remote_if_index) {
812 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
813 if (!(ifr.ifr_flags & IFF_UP) ||
814 !(ifr.ifr_flags & IFF_RUNNING)) {
815 dev_link->link_status = ETH_LINK_DOWN;
819 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
820 dev_link->link_status =
821 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
828 tap_promisc_enable(struct rte_eth_dev *dev)
830 struct pmd_internals *pmd = dev->data->dev_private;
831 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
833 dev->data->promiscuous = 1;
834 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
835 if (pmd->remote_if_index && !pmd->flow_isolate)
836 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
840 tap_promisc_disable(struct rte_eth_dev *dev)
842 struct pmd_internals *pmd = dev->data->dev_private;
843 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
845 dev->data->promiscuous = 0;
846 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
847 if (pmd->remote_if_index && !pmd->flow_isolate)
848 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
852 tap_allmulti_enable(struct rte_eth_dev *dev)
854 struct pmd_internals *pmd = dev->data->dev_private;
855 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
857 dev->data->all_multicast = 1;
858 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
859 if (pmd->remote_if_index && !pmd->flow_isolate)
860 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
864 tap_allmulti_disable(struct rte_eth_dev *dev)
866 struct pmd_internals *pmd = dev->data->dev_private;
867 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
869 dev->data->all_multicast = 0;
870 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
871 if (pmd->remote_if_index && !pmd->flow_isolate)
872 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
876 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
878 struct pmd_internals *pmd = dev->data->dev_private;
879 enum ioctl_mode mode = LOCAL_ONLY;
882 if (is_zero_ether_addr(mac_addr)) {
883 RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
887 /* Check the actual current MAC address on the tap netdevice */
888 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
890 if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
893 /* Check the current MAC address on the remote */
894 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
896 if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
898 mode = LOCAL_AND_REMOTE;
899 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
900 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
901 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
903 rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
904 if (pmd->remote_if_index && !pmd->flow_isolate) {
905 /* Replace MAC redirection rule after a MAC change */
906 if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
908 "%s: Couldn't delete MAC redirection rule\n",
912 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
914 "%s: Couldn't add MAC redirection rule\n",
920 tap_setup_queue(struct rte_eth_dev *dev,
921 struct pmd_internals *internals,
928 struct pmd_internals *pmd = dev->data->dev_private;
929 struct rx_queue *rx = &internals->rxq[qid];
930 struct tx_queue *tx = &internals->txq[qid];
942 /* fd for this queue already exists */
943 RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n",
944 pmd->name, *fd, dir, qid);
945 } else if (*other_fd != -1) {
946 /* Only other_fd exists. dup it */
947 *fd = dup(*other_fd);
950 RTE_LOG(ERR, PMD, "%s: dup() failed.\n",
954 RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n",
955 pmd->name, *other_fd, dir, qid, *fd);
957 /* Both RX and TX fds do not exist (equal -1). Create fd */
958 *fd = tun_alloc(pmd);
960 *fd = -1; /* restore original value */
961 RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
965 RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n",
966 pmd->name, dir, qid, *fd);
969 tx->mtu = &dev->data->mtu;
970 rx->rxmode = &dev->data->dev_conf.rxmode;
976 tap_rx_queue_setup(struct rte_eth_dev *dev,
977 uint16_t rx_queue_id,
979 unsigned int socket_id,
980 const struct rte_eth_rxconf *rx_conf __rte_unused,
981 struct rte_mempool *mp)
983 struct pmd_internals *internals = dev->data->dev_private;
984 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
985 struct rte_mbuf **tmp = &rxq->pool;
986 long iov_max = sysconf(_SC_IOV_MAX);
987 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
988 struct iovec (*iovecs)[nb_desc + 1];
989 int data_off = RTE_PKTMBUF_HEADROOM;
994 if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
995 RTE_LOG(WARNING, PMD,
996 "nb_rx_queues %d too small or mempool NULL\n",
997 dev->data->nb_rx_queues);
1002 rxq->trigger_seen = 1; /* force initial burst */
1003 rxq->in_port = dev->data->port_id;
1004 rxq->nb_rx_desc = nb_desc;
1005 iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1008 RTE_LOG(WARNING, PMD,
1009 "%s: Couldn't allocate %d RX descriptors\n",
1010 dev->device->name, nb_desc);
1013 rxq->iovecs = iovecs;
1015 dev->data->rx_queues[rx_queue_id] = rxq;
1016 fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1022 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1023 (*rxq->iovecs)[0].iov_base = &rxq->pi;
1025 for (i = 1; i <= nb_desc; i++) {
1026 *tmp = rte_pktmbuf_alloc(rxq->mp);
1028 RTE_LOG(WARNING, PMD,
1029 "%s: couldn't allocate memory for queue %d\n",
1030 dev->device->name, rx_queue_id);
1034 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1035 (*rxq->iovecs)[i].iov_base =
1036 (char *)(*tmp)->buf_addr + data_off;
1038 tmp = &(*tmp)->next;
1041 RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
1042 internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
1047 rte_pktmbuf_free(rxq->pool);
1049 rte_free(rxq->iovecs);
1055 tap_tx_queue_setup(struct rte_eth_dev *dev,
1056 uint16_t tx_queue_id,
1057 uint16_t nb_tx_desc __rte_unused,
1058 unsigned int socket_id __rte_unused,
1059 const struct rte_eth_txconf *tx_conf __rte_unused)
1061 struct pmd_internals *internals = dev->data->dev_private;
1064 if (tx_queue_id >= dev->data->nb_tx_queues)
1067 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1068 ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1072 RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
1073 internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
1079 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1081 struct pmd_internals *pmd = dev->data->dev_private;
1082 struct ifreq ifr = { .ifr_mtu = mtu };
1085 err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1087 dev->data->mtu = mtu;
1093 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1094 struct ether_addr *mc_addr_set __rte_unused,
1095 uint32_t nb_mc_addr __rte_unused)
1098 * Nothing to do actually: the tap has no filtering whatsoever, every
1099 * packet is received.
1105 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1107 struct rte_eth_dev *dev = arg;
1108 struct pmd_internals *pmd = dev->data->dev_private;
1109 struct ifinfomsg *info = NLMSG_DATA(nh);
1111 if (nh->nlmsg_type != RTM_NEWLINK ||
1112 (info->ifi_index != pmd->if_index &&
1113 info->ifi_index != pmd->remote_if_index))
1115 return tap_link_update(dev, 0);
1119 tap_dev_intr_handler(void *cb_arg)
1121 struct rte_eth_dev *dev = cb_arg;
1122 struct pmd_internals *pmd = dev->data->dev_private;
1124 nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1128 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1130 struct pmd_internals *pmd = dev->data->dev_private;
1132 /* In any case, disable interrupt if the conf is no longer there. */
1133 if (!dev->data->dev_conf.intr_conf.lsc) {
1134 if (pmd->intr_handle.fd != -1) {
1135 nl_final(pmd->intr_handle.fd);
1136 rte_intr_callback_unregister(&pmd->intr_handle,
1137 tap_dev_intr_handler, dev);
1142 pmd->intr_handle.fd = nl_init(RTMGRP_LINK);
1143 if (unlikely(pmd->intr_handle.fd == -1))
1145 return rte_intr_callback_register(
1146 &pmd->intr_handle, tap_dev_intr_handler, dev);
1148 nl_final(pmd->intr_handle.fd);
1149 return rte_intr_callback_unregister(&pmd->intr_handle,
1150 tap_dev_intr_handler, dev);
1153 static const uint32_t*
1154 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1156 static const uint32_t ptypes[] = {
1157 RTE_PTYPE_INNER_L2_ETHER,
1158 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1159 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1160 RTE_PTYPE_INNER_L3_IPV4,
1161 RTE_PTYPE_INNER_L3_IPV4_EXT,
1162 RTE_PTYPE_INNER_L3_IPV6,
1163 RTE_PTYPE_INNER_L3_IPV6_EXT,
1164 RTE_PTYPE_INNER_L4_FRAG,
1165 RTE_PTYPE_INNER_L4_UDP,
1166 RTE_PTYPE_INNER_L4_TCP,
1167 RTE_PTYPE_INNER_L4_SCTP,
1169 RTE_PTYPE_L2_ETHER_VLAN,
1170 RTE_PTYPE_L2_ETHER_QINQ,
1172 RTE_PTYPE_L3_IPV4_EXT,
1173 RTE_PTYPE_L3_IPV6_EXT,
1185 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1186 struct rte_eth_fc_conf *fc_conf)
1188 fc_conf->mode = RTE_FC_NONE;
1193 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1194 struct rte_eth_fc_conf *fc_conf)
1196 if (fc_conf->mode != RTE_FC_NONE)
1201 static const struct eth_dev_ops ops = {
1202 .dev_start = tap_dev_start,
1203 .dev_stop = tap_dev_stop,
1204 .dev_close = tap_dev_close,
1205 .dev_configure = tap_dev_configure,
1206 .dev_infos_get = tap_dev_info,
1207 .rx_queue_setup = tap_rx_queue_setup,
1208 .tx_queue_setup = tap_tx_queue_setup,
1209 .rx_queue_release = tap_rx_queue_release,
1210 .tx_queue_release = tap_tx_queue_release,
1211 .flow_ctrl_get = tap_flow_ctrl_get,
1212 .flow_ctrl_set = tap_flow_ctrl_set,
1213 .link_update = tap_link_update,
1214 .dev_set_link_up = tap_link_set_up,
1215 .dev_set_link_down = tap_link_set_down,
1216 .promiscuous_enable = tap_promisc_enable,
1217 .promiscuous_disable = tap_promisc_disable,
1218 .allmulticast_enable = tap_allmulti_enable,
1219 .allmulticast_disable = tap_allmulti_disable,
1220 .mac_addr_set = tap_mac_set,
1221 .mtu_set = tap_mtu_set,
1222 .set_mc_addr_list = tap_set_mc_addr_list,
1223 .stats_get = tap_stats_get,
1224 .stats_reset = tap_stats_reset,
1225 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1226 .filter_ctrl = tap_dev_filter_ctrl,
1230 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1231 char *remote_iface, int fixed_mac_type)
1233 int numa_node = rte_socket_id();
1234 struct rte_eth_dev *dev;
1235 struct pmd_internals *pmd;
1236 struct rte_eth_dev_data *data;
1240 RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
1242 data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
1244 RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
1245 goto error_exit_nodev;
1248 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1250 RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
1251 goto error_exit_nodev;
1254 pmd = dev->data->dev_private;
1256 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1258 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1259 if (pmd->ioctl_sock == -1) {
1261 "TAP Unable to get a socket for management: %s\n",
1266 /* Setup some default values */
1267 rte_memcpy(data, dev->data, sizeof(*data));
1268 data->dev_private = pmd;
1269 data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1270 data->numa_node = numa_node;
1272 data->dev_link = pmd_link;
1273 data->mac_addrs = &pmd->eth_addr;
1274 /* Set the number of RX and TX queues */
1275 data->nb_rx_queues = 0;
1276 data->nb_tx_queues = 0;
1279 dev->dev_ops = &ops;
1280 dev->rx_pkt_burst = pmd_rx_burst;
1281 dev->tx_pkt_burst = pmd_tx_burst;
1283 pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1284 pmd->intr_handle.fd = -1;
1286 /* Presetup the fds to -1 as being not valid */
1287 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1288 pmd->rxq[i].fd = -1;
1289 pmd->txq[i].fd = -1;
1292 if (fixed_mac_type) {
1293 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1294 static int iface_idx;
1295 char mac[ETHER_ADDR_LEN] = "\0dtap";
1297 mac[ETHER_ADDR_LEN - 1] = iface_idx++;
1298 rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
1300 eth_random_addr((uint8_t *)&pmd->eth_addr);
1303 /* Immediately create the netdevice (this will create the 1st queue). */
1305 if (tap_setup_queue(dev, pmd, 0, 1) == -1)
1308 if (tap_setup_queue(dev, pmd, 0, 0) == -1)
1311 ifr.ifr_mtu = dev->data->mtu;
1312 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1315 memset(&ifr, 0, sizeof(struct ifreq));
1316 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1317 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
1318 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1322 * Set up everything related to rte_flow:
1324 * - tap / remote if_index
1325 * - mandatory QDISCs
1326 * - rte_flow actual/implicit lists
1329 pmd->nlsk_fd = nl_init(0);
1330 if (pmd->nlsk_fd == -1) {
1331 RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
1333 goto disable_rte_flow;
1335 pmd->if_index = if_nametoindex(pmd->name);
1336 if (!pmd->if_index) {
1337 RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
1338 goto disable_rte_flow;
1340 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
1341 RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
1343 goto disable_rte_flow;
1345 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
1346 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
1348 goto disable_rte_flow;
1350 LIST_INIT(&pmd->flows);
1352 if (strlen(remote_iface)) {
1353 pmd->remote_if_index = if_nametoindex(remote_iface);
1354 if (!pmd->remote_if_index) {
1355 RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
1356 pmd->name, remote_iface);
1359 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1360 "%s", remote_iface);
1362 /* Save state of remote device */
1363 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
1365 /* Replicate remote MAC address */
1366 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1367 RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
1368 pmd->name, pmd->remote_iface);
1371 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1373 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
1374 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
1375 RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
1376 pmd->name, remote_iface);
1381 * Flush usually returns negative value because it tries to
1382 * delete every QDISC (and on a running device, one QDISC at
1383 * least is needed). Ignore negative return value.
1385 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
1386 if (qdisc_create_ingress(pmd->nlsk_fd,
1387 pmd->remote_if_index) < 0) {
1388 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
1392 LIST_INIT(&pmd->implicit_flows);
1393 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
1394 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
1395 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
1396 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
1398 "%s: failed to create implicit rules.\n",
1407 RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
1408 strerror(errno), errno);
1409 if (strlen(remote_iface)) {
1410 RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
1416 RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
1417 strerror(errno), errno);
1418 tap_flow_implicit_flush(pmd, NULL);
1421 if (pmd->ioctl_sock > 0)
1422 close(pmd->ioctl_sock);
1423 rte_eth_dev_release_port(dev);
1426 RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
1427 rte_vdev_device_name(vdev));
1434 set_interface_name(const char *key __rte_unused,
1438 char *name = (char *)extra_args;
1441 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
1443 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
1444 DEFAULT_TAP_NAME, (tap_unit - 1));
1450 set_interface_speed(const char *key __rte_unused,
1454 *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
1460 set_remote_iface(const char *key __rte_unused,
1464 char *name = (char *)extra_args;
1467 snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
1473 set_mac_type(const char *key __rte_unused,
1478 !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
1479 *(int *)extra_args = 1;
1483 /* Open a TAP interface device.
1486 rte_pmd_tap_probe(struct rte_vdev_device *dev)
1488 const char *name, *params;
1490 struct rte_kvargs *kvlist = NULL;
1492 char tap_name[RTE_ETH_NAME_MAX_LEN];
1493 char remote_iface[RTE_ETH_NAME_MAX_LEN];
1494 int fixed_mac_type = 0;
1496 name = rte_vdev_device_name(dev);
1497 params = rte_vdev_device_args(dev);
1499 speed = ETH_SPEED_NUM_10G;
1500 snprintf(tap_name, sizeof(tap_name), "%s%d",
1501 DEFAULT_TAP_NAME, tap_unit++);
1502 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1504 if (params && (params[0] != '\0')) {
1505 RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
1507 kvlist = rte_kvargs_parse(params, valid_arguments);
1509 if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
1510 ret = rte_kvargs_process(kvlist,
1512 &set_interface_speed,
1518 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1519 ret = rte_kvargs_process(kvlist,
1521 &set_interface_name,
1527 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
1528 ret = rte_kvargs_process(kvlist,
1536 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
1537 ret = rte_kvargs_process(kvlist,
1546 pmd_link.link_speed = speed;
1548 RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
1551 ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
1555 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
1557 tap_unit--; /* Restore the unit number */
1559 rte_kvargs_free(kvlist);
1564 /* detach a TAP device.
1567 rte_pmd_tap_remove(struct rte_vdev_device *dev)
1569 struct rte_eth_dev *eth_dev = NULL;
1570 struct pmd_internals *internals;
1573 RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
1576 /* find the ethdev entry */
1577 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1581 internals = eth_dev->data->dev_private;
1582 if (internals->nlsk_fd) {
1583 tap_flow_flush(eth_dev, NULL);
1584 tap_flow_implicit_flush(internals, NULL);
1585 nl_final(internals->nlsk_fd);
1587 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1588 if (internals->rxq[i].fd != -1) {
1589 close(internals->rxq[i].fd);
1590 internals->rxq[i].fd = -1;
1592 if (internals->txq[i].fd != -1) {
1593 close(internals->txq[i].fd);
1594 internals->txq[i].fd = -1;
1598 close(internals->ioctl_sock);
1599 rte_free(eth_dev->data->dev_private);
1600 rte_free(eth_dev->data);
1602 rte_eth_dev_release_port(eth_dev);
1607 static struct rte_vdev_driver pmd_tap_drv = {
1608 .probe = rte_pmd_tap_probe,
1609 .remove = rte_pmd_tap_remove,
1611 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
1612 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
1613 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
1614 ETH_TAP_IFACE_ARG "=<string> "
1615 ETH_TAP_SPEED_ARG "=<int> "
1616 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
1617 ETH_TAP_REMOTE_ARG "=<string>");