New upstream version 18.08
[deb_dpdk.git] / drivers / net / vmxnet3 / vmxnet3_ethdev.c
index 3910991..2613cd1 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
  */
 
 #include <sys/queue.h>
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_pci.h>
-#include <rte_atomic.h>
+#include <rte_bus_pci.h>
 #include <rte_branch_prediction.h>
 #include <rte_memory.h>
 #include <rte_memzone.h>
 #include <rte_eal.h>
 #include <rte_alarm.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
 
 #define        VMXNET3_TX_MAX_SEG      UINT8_MAX
 
+#define VMXNET3_TX_OFFLOAD_CAP         \
+       (DEV_TX_OFFLOAD_VLAN_INSERT |   \
+        DEV_TX_OFFLOAD_IPV4_CKSUM |    \
+        DEV_TX_OFFLOAD_TCP_CKSUM |     \
+        DEV_TX_OFFLOAD_UDP_CKSUM |     \
+        DEV_TX_OFFLOAD_TCP_TSO |       \
+        DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define VMXNET3_RX_OFFLOAD_CAP         \
+       (DEV_RX_OFFLOAD_VLAN_STRIP |    \
+        DEV_RX_OFFLOAD_SCATTER |       \
+        DEV_RX_OFFLOAD_IPV4_CKSUM |    \
+        DEV_RX_OFFLOAD_UDP_CKSUM |     \
+        DEV_RX_OFFLOAD_TCP_CKSUM |     \
+        DEV_RX_OFFLOAD_TCP_LRO |       \
+        DEV_RX_OFFLOAD_JUMBO_FRAME |   \
+        DEV_RX_OFFLOAD_CRC_STRIP)
+
 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -87,7 +76,7 @@ static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
                                   int wait_to_complete);
 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
-static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
+static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
                                  struct rte_eth_stats *stats);
 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
                                        struct rte_eth_xstat_name *xstats,
@@ -100,11 +89,14 @@ static const uint32_t *
 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
                                       uint16_t vid, int on);
-static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
+static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
                                 struct ether_addr *mac_addr);
 static void vmxnet3_interrupt_handler(void *param);
 
+int vmxnet3_logtype_init;
+int vmxnet3_logtype_driver;
+
 /*
  * The set of PCI devices this driver supports
  */
@@ -176,66 +168,14 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
                if (mz)
                        rte_memzone_free(mz);
                return rte_memzone_reserve_aligned(z_name, size, socket_id,
-                                                  0, align);
+                               RTE_MEMZONE_IOVA_CONTIG, align);
        }
 
        if (mz)
                return mz;
 
-       return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
-}
-
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-
-static int
-vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
-                                   struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                               *(uint64_t *)src) == 0)
-               return -1;
-
-       return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to write to.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static int
-vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
-                                    struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = &(dev->data->dev_link);
-       struct rte_eth_link *src = link;
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                               *(uint64_t *)src) == 0)
-               return -1;
-
-       return 0;
+       return rte_memzone_reserve_aligned(z_name, size, socket_id,
+                       RTE_MEMZONE_IOVA_CONTIG, align);
 }
 
 /*
@@ -292,6 +232,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
        struct rte_pci_device *pci_dev;
        struct vmxnet3_hw *hw = eth_dev->data->dev_private;
        uint32_t mac_hi, mac_lo, ver;
+       struct rte_eth_link link;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -309,7 +250,6 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -395,6 +335,13 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
        memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
        memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
 
+       /* set the initial link status */
+       memset(&link, 0, sizeof(link));
+       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_speed = ETH_SPEED_NUM_10G;
+       link.link_autoneg = ETH_LINK_FIXED;
+       rte_eth_linkstatus_set(eth_dev, &link);
+
        return 0;
 }
 
@@ -484,7 +431,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
        memset(mz->addr, 0, mz->len);
 
        hw->shared = mz->addr;
-       hw->sharedPA = mz->phys_addr;
+       hw->sharedPA = mz->iova;
 
        /*
         * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
@@ -505,7 +452,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
        hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
        hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
 
-       hw->queueDescPA = mz->phys_addr;
+       hw->queueDescPA = mz->iova;
        hw->queue_desc_len = (uint16_t)size;
 
        if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
@@ -521,7 +468,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
                memset(mz->addr, 0, mz->len);
 
                hw->rss_conf = mz->addr;
-               hw->rss_confPA = mz->phys_addr;
+               hw->rss_confPA = mz->iova;
        }
 
        return 0;
@@ -537,10 +484,10 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
                     addr[0], addr[1], addr[2],
                     addr[3], addr[4], addr[5]);
 
-       val = *(const uint32_t *)addr;
+       memcpy(&val, addr, 4);
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
 
-       val = (addr[5] << 8) | addr[4];
+       memcpy(&val, addr + 4, 2);
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
 }
 
@@ -569,7 +516,7 @@ vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
                }
                memset(mz->addr, 0, mz->len);
                hw->memRegs = mz->addr;
-               hw->memRegsPA = mz->phys_addr;
+               hw->memRegsPA = mz->iova;
        }
 
        num = hw->num_rx_queues;
@@ -604,7 +551,7 @@ vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
                Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
 
                mr->startPA =
-                       (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
+                       (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
                mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
                        STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
                mr->txQueueBits = index[i];
@@ -638,9 +585,12 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
        uint32_t mtu = dev->data->mtu;
        Vmxnet3_DriverShared *shared = hw->shared;
        Vmxnet3_DSDevRead *devRead = &shared->devRead;
+       uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
        uint32_t i;
        int ret;
 
+       hw->mtu = mtu;
+
        shared->magic = VMXNET3_REV1_MAGIC;
        devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
 
@@ -670,6 +620,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
                Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
                vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
 
+               txq->shared = &hw->tqd_start[i];
+
                tqd->ctrl.txNumDeferred  = 0;
                tqd->ctrl.txThreshold    = 1;
                tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
@@ -690,6 +642,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
                Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
                vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
 
+               rxq->shared = &hw->rqd_start[i];
+
                rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
                rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
                rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
@@ -711,10 +665,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
        devRead->rxFilterConf.rxMode = 0;
 
        /* Setting up feature flags */
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
                devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-       if (dev->data->dev_conf.rxmode.enable_lro) {
+       if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
                devRead->misc.uptFeatures |= VMXNET3_F_LRO;
                devRead->misc.maxNumRxSG = 0;
        }
@@ -730,8 +684,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
                devRead->rssConfDesc.confPA  = hw->rss_confPA;
        }
 
-       vmxnet3_dev_vlan_offload_set(dev,
-                                    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+       ret = vmxnet3_dev_vlan_offload_set(dev,
+                       ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+       if (ret)
+               return ret;
 
        vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
 
@@ -877,7 +833,10 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
-       vmxnet3_dev_atomic_write_link_status(dev, &link);
+       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_speed = ETH_SPEED_NUM_10G;
+       link.link_autoneg = ETH_LINK_FIXED;
+       rte_eth_linkstatus_set(dev, &link);
 }
 
 /*
@@ -1034,7 +993,7 @@ vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        return count;
 }
 
-static void
+static int
 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        unsigned int i;
@@ -1078,16 +1037,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
                stats->q_errors[i] = rxStats.pktsRxError;
                stats->ierrors += rxStats.pktsRxError;
-               stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
+               stats->imissed += rxStats.pktsRxOutOfBuf;
        }
+
+       return 0;
 }
 
 static void
-vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
                     struct rte_eth_dev_info *dev_info)
 {
-       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
        dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
        dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
        dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
@@ -1095,7 +1054,6 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
        dev_info->speed_capa = ETH_LINK_SPEED_10G;
        dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
-       dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
        dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
 
        dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -1112,17 +1070,10 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
                .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
        };
 
-       dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_LRO;
-
-       dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_TSO;
+       dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
+       dev_info->rx_queue_offload_capa = 0;
+       dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
+       dev_info->tx_queue_offload_capa = 0;
 }
 
 static const uint32_t *
@@ -1139,12 +1090,14 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        return NULL;
 }
 
-static void
+static int
 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
 
+       ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
        vmxnet3_write_mac(hw, mac_addr->addr_bytes);
+       return 0;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -1153,25 +1106,21 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
                          __rte_unused int wait_to_complete)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
-       struct rte_eth_link old = { 0 }, link;
+       struct rte_eth_link link;
        uint32_t ret;
 
        memset(&link, 0, sizeof(link));
-       vmxnet3_dev_atomic_read_link_status(dev, &old);
 
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
        ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
-       if (ret & 0x1) {
+       if (ret & 0x1)
                link.link_status = ETH_LINK_UP;
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               link.link_speed = ETH_SPEED_NUM_10G;
-               link.link_autoneg = ETH_LINK_SPEED_FIXED;
-       }
+       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_speed = ETH_SPEED_NUM_10G;
+       link.link_autoneg = ETH_LINK_FIXED;
 
-       vmxnet3_dev_atomic_write_link_status(dev, &link);
-
-       return (old.link_status == link.link_status) ? -1 : 0;
+       return rte_eth_linkstatus_set(dev, &link);
 }
 
 static int
@@ -1218,8 +1167,9 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
        uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+       uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-       if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+       if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
        else
                memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1275,15 +1225,16 @@ vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
        return 0;
 }
 
-static void
+static int
 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
        Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
        uint32_t *vf_table = devRead->rxFilterConf.vfTable;
+       uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
        if (mask & ETH_VLAN_STRIP_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
                else
                        devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1293,7 +1244,7 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        }
 
        if (mask & ETH_VLAN_FILTER_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                        memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
                else
                        memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1301,6 +1252,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
                                       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
        }
+
+       return 0;
 }
 
 static void
@@ -1324,7 +1277,7 @@ vmxnet3_process_events(struct rte_eth_dev *dev)
                if (vmxnet3_dev_link_update(dev, 0) == 0)
                        _rte_eth_dev_callback_process(dev,
                                                      RTE_ETH_EVENT_INTR_LSC,
-                                                     NULL, NULL);
+                                                     NULL);
        }
 
        /* Check if there is an error on xmit/recv queues */
@@ -1366,3 +1319,13 @@ vmxnet3_interrupt_handler(void *param)
 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(vmxnet3_init_log)
+{
+       vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
+       if (vmxnet3_logtype_init >= 0)
+               rte_log_set_level(vmxnet3_logtype_init, RTE_LOG_NOTICE);
+       vmxnet3_logtype_driver = rte_log_register("pmd.net.vmxnet3.driver");
+       if (vmxnet3_logtype_driver >= 0)
+               rte_log_set_level(vmxnet3_logtype_driver, RTE_LOG_NOTICE);
+}