2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_bus_pci.h>
41 #include <rte_ethdev.h>
42 #include <rte_ethdev_pci.h>
43 #include <rte_kvargs.h>
44 #include <rte_string_fns.h>
46 #include "vnic_intr.h"
50 #include "vnic_enet.h"
53 #ifdef RTE_LIBRTE_ENIC_DEBUG
54 #define ENICPMD_FUNC_TRACE() \
55 RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
57 #define ENICPMD_FUNC_TRACE() (void)0
61 * The set of PCI devices this driver supports
63 #define CISCO_PCI_VENDOR_ID 0x1137
64 static const struct rte_pci_id pci_id_enic_map[] = {
65 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
66 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
67 {.vendor_id = 0, /* sentinel */},
70 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
73 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
74 enum rte_filter_op filter_op, void *arg)
76 struct enic *enic = pmd_priv(eth_dev);
80 if (filter_op == RTE_ETH_FILTER_NOP)
83 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
87 case RTE_ETH_FILTER_ADD:
88 case RTE_ETH_FILTER_UPDATE:
89 ret = enic_fdir_add_fltr(enic,
90 (struct rte_eth_fdir_filter *)arg);
93 case RTE_ETH_FILTER_DELETE:
94 ret = enic_fdir_del_fltr(enic,
95 (struct rte_eth_fdir_filter *)arg);
98 case RTE_ETH_FILTER_STATS:
99 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
102 case RTE_ETH_FILTER_FLUSH:
103 dev_warning(enic, "unsupported operation %u", filter_op);
106 case RTE_ETH_FILTER_INFO:
107 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
110 dev_err(enic, "unknown operation %u", filter_op);
118 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
119 enum rte_filter_type filter_type,
120 enum rte_filter_op filter_op,
125 ENICPMD_FUNC_TRACE();
127 switch (filter_type) {
128 case RTE_ETH_FILTER_GENERIC:
129 if (filter_op != RTE_ETH_FILTER_GET)
131 *(const void **)arg = &enic_flow_ops;
133 case RTE_ETH_FILTER_FDIR:
134 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
137 dev_warning(enic, "Filter type (%d) not supported",
146 static void enicpmd_dev_tx_queue_release(void *txq)
148 ENICPMD_FUNC_TRACE();
150 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
156 static int enicpmd_dev_setup_intr(struct enic *enic)
161 ENICPMD_FUNC_TRACE();
163 /* Are we done with the init of all the queues? */
164 for (index = 0; index < enic->cq_count; index++) {
165 if (!enic->cq[index].ctrl)
168 if (enic->cq_count != index)
170 for (index = 0; index < enic->wq_count; index++) {
171 if (!enic->wq[index].ctrl)
174 if (enic->wq_count != index)
176 /* check start of packet (SOP) RQs only in case scatter is disabled. */
177 for (index = 0; index < enic->rq_count; index++) {
178 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
181 if (enic->rq_count != index)
184 ret = enic_alloc_intr_resources(enic);
186 dev_err(enic, "alloc intr failed\n");
189 enic_init_vnic_resources(enic);
191 ret = enic_setup_finish(enic);
193 dev_err(enic, "setup could not be finished\n");
198 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
201 unsigned int socket_id,
202 __rte_unused const struct rte_eth_txconf *tx_conf)
205 struct enic *enic = pmd_priv(eth_dev);
207 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
208 return -E_RTE_SECONDARY;
210 ENICPMD_FUNC_TRACE();
211 RTE_ASSERT(queue_idx < enic->conf_wq_count);
212 eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
214 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
216 dev_err(enic, "error in allocating wq\n");
220 return enicpmd_dev_setup_intr(enic);
223 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
226 struct enic *enic = pmd_priv(eth_dev);
228 ENICPMD_FUNC_TRACE();
230 enic_start_wq(enic, queue_idx);
235 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
239 struct enic *enic = pmd_priv(eth_dev);
241 ENICPMD_FUNC_TRACE();
243 ret = enic_stop_wq(enic, queue_idx);
245 dev_err(enic, "error in stopping wq %d\n", queue_idx);
250 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
253 struct enic *enic = pmd_priv(eth_dev);
255 ENICPMD_FUNC_TRACE();
257 enic_start_rq(enic, queue_idx);
262 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
266 struct enic *enic = pmd_priv(eth_dev);
268 ENICPMD_FUNC_TRACE();
270 ret = enic_stop_rq(enic, queue_idx);
272 dev_err(enic, "error in stopping rq %d\n", queue_idx);
277 static void enicpmd_dev_rx_queue_release(void *rxq)
279 ENICPMD_FUNC_TRACE();
281 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
287 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
288 uint16_t rx_queue_id)
290 struct enic *enic = pmd_priv(dev);
291 uint32_t queue_count = 0;
297 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
298 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
299 cq_idx = cq->to_clean;
301 cq_tail = ioread32(&cq->ctrl->cq_tail);
303 if (cq_tail < cq_idx)
304 cq_tail += cq->ring.desc_count;
306 queue_count = cq_tail - cq_idx;
311 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
314 unsigned int socket_id,
315 const struct rte_eth_rxconf *rx_conf,
316 struct rte_mempool *mp)
319 struct enic *enic = pmd_priv(eth_dev);
321 ENICPMD_FUNC_TRACE();
323 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
324 return -E_RTE_SECONDARY;
325 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
326 eth_dev->data->rx_queues[queue_idx] =
327 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
329 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
330 rx_conf->rx_free_thresh);
332 dev_err(enic, "error in allocating rq\n");
336 return enicpmd_dev_setup_intr(enic);
339 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
340 uint16_t vlan_id, int on)
342 struct enic *enic = pmd_priv(eth_dev);
345 ENICPMD_FUNC_TRACE();
347 err = enic_add_vlan(enic, vlan_id);
349 err = enic_del_vlan(enic, vlan_id);
353 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
355 struct enic *enic = pmd_priv(eth_dev);
357 ENICPMD_FUNC_TRACE();
359 if (mask & ETH_VLAN_STRIP_MASK) {
360 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
361 enic->ig_vlan_strip_en = 1;
363 enic->ig_vlan_strip_en = 0;
365 enic_set_rss_nic_cfg(enic);
368 if (mask & ETH_VLAN_FILTER_MASK) {
370 "Configuration of VLAN filter is not supported\n");
373 if (mask & ETH_VLAN_EXTEND_MASK) {
375 "Configuration of extended VLAN is not supported\n");
381 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
384 struct enic *enic = pmd_priv(eth_dev);
386 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
387 return -E_RTE_SECONDARY;
389 ENICPMD_FUNC_TRACE();
390 ret = enic_set_vnic_res(enic);
392 dev_err(enic, "Set vNIC resource num failed, aborting\n");
396 if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
397 eth_dev->data->dev_conf.rxmode.header_split) {
398 /* Enable header-data-split */
399 enic_set_hdr_split_size(enic,
400 eth_dev->data->dev_conf.rxmode.split_hdr_size);
403 enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
404 ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
410 * It returns 0 on success.
412 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
414 struct enic *enic = pmd_priv(eth_dev);
416 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
417 return -E_RTE_SECONDARY;
419 ENICPMD_FUNC_TRACE();
420 return enic_enable(enic);
424 * Stop device: disable rx and tx functions to allow for reconfiguring.
426 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
428 struct rte_eth_link link;
429 struct enic *enic = pmd_priv(eth_dev);
431 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
434 ENICPMD_FUNC_TRACE();
436 memset(&link, 0, sizeof(link));
437 rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link,
438 *(uint64_t *)ð_dev->data->dev_link,
445 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
447 struct enic *enic = pmd_priv(eth_dev);
449 ENICPMD_FUNC_TRACE();
453 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
454 __rte_unused int wait_to_complete)
456 struct enic *enic = pmd_priv(eth_dev);
458 ENICPMD_FUNC_TRACE();
459 return enic_link_update(enic);
462 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
463 struct rte_eth_stats *stats)
465 struct enic *enic = pmd_priv(eth_dev);
467 ENICPMD_FUNC_TRACE();
468 return enic_dev_stats_get(enic, stats);
471 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
473 struct enic *enic = pmd_priv(eth_dev);
475 ENICPMD_FUNC_TRACE();
476 enic_dev_stats_clear(enic);
479 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
480 struct rte_eth_dev_info *device_info)
482 struct enic *enic = pmd_priv(eth_dev);
484 ENICPMD_FUNC_TRACE();
485 device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
486 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
487 device_info->max_rx_queues = enic->conf_rq_count / 2;
488 device_info->max_tx_queues = enic->conf_wq_count;
489 device_info->min_rx_bufsize = ENIC_MIN_MTU;
490 device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
491 device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
492 device_info->rx_offload_capa =
493 DEV_RX_OFFLOAD_VLAN_STRIP |
494 DEV_RX_OFFLOAD_IPV4_CKSUM |
495 DEV_RX_OFFLOAD_UDP_CKSUM |
496 DEV_RX_OFFLOAD_TCP_CKSUM;
497 device_info->tx_offload_capa =
498 DEV_TX_OFFLOAD_VLAN_INSERT |
499 DEV_TX_OFFLOAD_IPV4_CKSUM |
500 DEV_TX_OFFLOAD_UDP_CKSUM |
501 DEV_TX_OFFLOAD_TCP_CKSUM |
502 DEV_TX_OFFLOAD_TCP_TSO;
503 device_info->default_rxconf = (struct rte_eth_rxconf) {
504 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
508 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
510 static const uint32_t ptypes[] = {
512 RTE_PTYPE_L2_ETHER_VLAN,
513 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
514 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
518 RTE_PTYPE_L4_NONFRAG,
522 if (dev->rx_pkt_burst == enic_recv_pkts)
527 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
529 struct enic *enic = pmd_priv(eth_dev);
531 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
534 ENICPMD_FUNC_TRACE();
537 enic_add_packet_filter(enic);
540 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
542 struct enic *enic = pmd_priv(eth_dev);
544 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
547 ENICPMD_FUNC_TRACE();
549 enic_add_packet_filter(enic);
552 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
554 struct enic *enic = pmd_priv(eth_dev);
556 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
559 ENICPMD_FUNC_TRACE();
561 enic_add_packet_filter(enic);
564 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
566 struct enic *enic = pmd_priv(eth_dev);
568 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
571 ENICPMD_FUNC_TRACE();
573 enic_add_packet_filter(enic);
576 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
577 struct ether_addr *mac_addr,
578 __rte_unused uint32_t index, __rte_unused uint32_t pool)
580 struct enic *enic = pmd_priv(eth_dev);
582 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
583 return -E_RTE_SECONDARY;
585 ENICPMD_FUNC_TRACE();
586 return enic_set_mac_address(enic, mac_addr->addr_bytes);
589 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
591 struct enic *enic = pmd_priv(eth_dev);
593 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
596 ENICPMD_FUNC_TRACE();
597 enic_del_mac_address(enic, index);
600 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
602 struct enic *enic = pmd_priv(eth_dev);
604 ENICPMD_FUNC_TRACE();
605 return enic_set_mtu(enic, mtu);
608 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
609 .dev_configure = enicpmd_dev_configure,
610 .dev_start = enicpmd_dev_start,
611 .dev_stop = enicpmd_dev_stop,
612 .dev_set_link_up = NULL,
613 .dev_set_link_down = NULL,
614 .dev_close = enicpmd_dev_close,
615 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
616 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
617 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
618 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
619 .link_update = enicpmd_dev_link_update,
620 .stats_get = enicpmd_dev_stats_get,
621 .stats_reset = enicpmd_dev_stats_reset,
622 .queue_stats_mapping_set = NULL,
623 .dev_infos_get = enicpmd_dev_info_get,
624 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
625 .mtu_set = enicpmd_mtu_set,
626 .vlan_filter_set = enicpmd_vlan_filter_set,
627 .vlan_tpid_set = NULL,
628 .vlan_offload_set = enicpmd_vlan_offload_set,
629 .vlan_strip_queue_set = NULL,
630 .rx_queue_start = enicpmd_dev_rx_queue_start,
631 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
632 .tx_queue_start = enicpmd_dev_tx_queue_start,
633 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
634 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
635 .rx_queue_release = enicpmd_dev_rx_queue_release,
636 .rx_queue_count = enicpmd_dev_rx_queue_count,
637 .rx_descriptor_done = NULL,
638 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
639 .tx_queue_release = enicpmd_dev_tx_queue_release,
642 .flow_ctrl_get = NULL,
643 .flow_ctrl_set = NULL,
644 .priority_flow_ctrl_set = NULL,
645 .mac_addr_add = enicpmd_add_mac_addr,
646 .mac_addr_remove = enicpmd_remove_mac_addr,
647 .filter_ctrl = enicpmd_dev_filter_ctrl,
650 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
656 enic = (struct enic *)opaque;
657 if (strcmp(value, "trunk") == 0) {
658 /* Trunk mode: always tag */
659 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
660 } else if (strcmp(value, "untag") == 0) {
661 /* Untag default VLAN mode: untag if VLAN = default VLAN */
662 enic->ig_vlan_rewrite_mode =
663 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
664 } else if (strcmp(value, "priority") == 0) {
666 * Priority-tag default VLAN mode: priority tag (VLAN header
667 * with ID=0) if VLAN = default
669 enic->ig_vlan_rewrite_mode =
670 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
671 } else if (strcmp(value, "pass") == 0) {
672 /* Pass through mode: do not touch tags */
673 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
675 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
676 ": expected=trunk|untag|priority|pass given=%s\n",
683 static int enic_check_devargs(struct rte_eth_dev *dev)
685 static const char *const valid_keys[] = {
686 ENIC_DEVARG_IG_VLAN_REWRITE,
688 struct enic *enic = pmd_priv(dev);
689 struct rte_kvargs *kvlist;
691 ENICPMD_FUNC_TRACE();
693 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
694 if (!dev->device->devargs)
696 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
699 if (rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
700 enic_parse_ig_vlan_rewrite, enic) < 0) {
701 rte_kvargs_free(kvlist);
704 rte_kvargs_free(kvlist);
708 struct enic *enicpmd_list_head = NULL;
709 /* Initialize the driver
710 * It returns 0 on success.
712 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
714 struct rte_pci_device *pdev;
715 struct rte_pci_addr *addr;
716 struct enic *enic = pmd_priv(eth_dev);
719 ENICPMD_FUNC_TRACE();
721 enic->port_id = eth_dev->data->port_id;
722 enic->rte_dev = eth_dev;
723 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
724 eth_dev->rx_pkt_burst = &enic_recv_pkts;
725 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
727 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
728 rte_eth_copy_pci_info(eth_dev, pdev);
732 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
733 addr->domain, addr->bus, addr->devid, addr->function);
735 err = enic_check_devargs(eth_dev);
738 return enic_probe(enic);
741 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
742 struct rte_pci_device *pci_dev)
744 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
745 eth_enicpmd_dev_init);
748 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
750 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
753 static struct rte_pci_driver rte_enic_pmd = {
754 .id_table = pci_id_enic_map,
755 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
756 .probe = eth_enic_pci_probe,
757 .remove = eth_enic_pci_remove,
760 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
761 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
762 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
763 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
764 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");