X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=0590f0c10e6474a02ca4861466925ac252219a5a;hb=ca33590b6af032bff57d9cc70455660466a654b2;hp=5a3175942b8d916a8109b5d9c903a7e4d8928abe;hpb=ce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6;p=deb_dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 5a317594..0590f0c1 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -47,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -64,15 +34,16 @@ #include #include #include +#include #include "rte_ether.h" #include "rte_ethdev.h" +#include "rte_ethdev_driver.h" +#include "ethdev_profile.h" static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; -static struct rte_eth_dev_data *rte_eth_dev_data; static uint8_t eth_dev_last_created_port; -static uint8_t nb_ports; /* spinlock for eth device callbacks */ static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; @@ -83,17 +54,28 @@ static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; /* spinlock for add/remove tx callbacks */ static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; +/* spinlock for shared data allocation */ +static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER; + /* store statistics names and its offset in stats structure */ struct rte_eth_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; unsigned offset; }; +/* Shared memory between primary and secondary processes. */ +static struct { + uint64_t next_owner_id; + rte_spinlock_t ownership_lock; + struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; +} *rte_eth_dev_shared_data; + static const struct rte_eth_xstats_name_off rte_stats_strings[] = { {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, + {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, @@ -118,6 +100,61 @@ static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = { #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \ sizeof(rte_txq_stats_strings[0])) +#define RTE_RX_OFFLOAD_BIT2STR(_name) \ + { DEV_RX_OFFLOAD_##_name, #_name } + +static const struct { + uint64_t offload; + const char *name; +} rte_rx_offload_names[] = { + RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), + RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), + RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), + RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), + RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), + RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), + RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), + RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), + RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), + RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), + RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), + RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), + RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP), + RTE_RX_OFFLOAD_BIT2STR(SCATTER), + RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), + RTE_RX_OFFLOAD_BIT2STR(SECURITY), +}; + +#undef RTE_RX_OFFLOAD_BIT2STR + +#define RTE_TX_OFFLOAD_BIT2STR(_name) \ + { DEV_TX_OFFLOAD_##_name, #_name } + +static const struct { + uint64_t offload; + const char *name; +} rte_tx_offload_names[] = { + RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), + RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), + RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), + RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), + RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), + RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), + RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), + RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), + RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), + RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), + RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), + RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), + RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), + RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), + RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), + RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), + RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), + RTE_TX_OFFLOAD_BIT2STR(SECURITY), +}; + +#undef RTE_TX_OFFLOAD_BIT2STR /** * The user application callback description. @@ -129,6 +166,7 @@ struct rte_eth_dev_callback { TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ rte_eth_dev_cb_fn cb_fn; /**< Callback address */ void *cb_arg; /**< Parameter for callback */ + void *ret_param; /**< Return parameter */ enum rte_eth_event_type event; /**< Interrupt event type */ uint32_t active; /**< Callback is executing */ }; @@ -138,30 +176,50 @@ enum { STAT_QMAP_RX }; -enum { - DEV_DETACHED = 0, - DEV_ATTACHED -}; +uint16_t +rte_eth_find_next(uint16_t port_id) +{ + while (port_id < RTE_MAX_ETHPORTS && + rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && + rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) + port_id++; + + if (port_id >= RTE_MAX_ETHPORTS) + return RTE_MAX_ETHPORTS; + + return port_id; +} static void -rte_eth_dev_data_alloc(void) +rte_eth_dev_shared_data_prepare(void) { const unsigned flags = 0; const struct rte_memzone *mz; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, - RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data), - rte_socket_id(), flags); - } else - mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); - if (mz == NULL) - rte_panic("Cannot allocate memzone for ethernet port data\n"); + rte_spinlock_lock(&rte_eth_shared_data_lock); + + if (rte_eth_dev_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate port data and ownership shared memory. */ + mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, + sizeof(*rte_eth_dev_shared_data), + rte_socket_id(), flags); + } else + mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); + if (mz == NULL) + rte_panic("Cannot allocate ethdev shared data\n"); + + rte_eth_dev_shared_data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_eth_dev_shared_data->next_owner_id = + RTE_ETH_DEV_NO_OWNER + 1; + rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock); + memset(rte_eth_dev_shared_data->data, 0, + sizeof(rte_eth_dev_shared_data->data)); + } + } - rte_eth_dev_data = mz->addr; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - memset(rte_eth_dev_data, 0, - RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data)); + rte_spinlock_unlock(&rte_eth_shared_data_lock); } struct rte_eth_dev * @@ -170,35 +228,38 @@ rte_eth_dev_allocated(const char *name) unsigned i; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if ((rte_eth_devices[i].attached == DEV_ATTACHED) && + if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) && strcmp(rte_eth_devices[i].data->name, name) == 0) return &rte_eth_devices[i]; } return NULL; } -static uint8_t +static uint16_t rte_eth_dev_find_free_port(void) { unsigned i; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (rte_eth_devices[i].attached == DEV_DETACHED) + /* Using shared name field to find a free port. */ + if (rte_eth_dev_shared_data->data[i].name[0] == '\0') { + RTE_ASSERT(rte_eth_devices[i].state == + RTE_ETH_DEV_UNUSED); return i; + } } return RTE_MAX_ETHPORTS; } static struct rte_eth_dev * -eth_dev_get(uint8_t port_id) +eth_dev_get(uint16_t port_id) { struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; - eth_dev->data = &rte_eth_dev_data[port_id]; - eth_dev->attached = DEV_ATTACHED; + eth_dev->data = &rte_eth_dev_shared_data->data[port_id]; + eth_dev->state = RTE_ETH_DEV_ATTACHED; eth_dev_last_created_port = port_id; - nb_ports++; return eth_dev; } @@ -206,27 +267,36 @@ eth_dev_get(uint8_t port_id) struct rte_eth_dev * rte_eth_dev_allocate(const char *name) { - uint8_t port_id; - struct rte_eth_dev *eth_dev; + uint16_t port_id; + struct rte_eth_dev *eth_dev = NULL; + + rte_eth_dev_shared_data_prepare(); + + /* Synchronize port creation between primary and secondary threads. */ + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); port_id = rte_eth_dev_find_free_port(); if (port_id == RTE_MAX_ETHPORTS) { - RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n"); - return NULL; + RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n"); + goto unlock; } - if (rte_eth_dev_data == NULL) - rte_eth_dev_data_alloc(); - if (rte_eth_dev_allocated(name) != NULL) { - RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", + RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n", name); - return NULL; + goto unlock; } eth_dev = eth_dev_get(port_id); snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name); eth_dev->data->port_id = port_id; + eth_dev->data->mtu = ETHER_MTU; + +unlock: + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + + if (eth_dev != NULL) + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL); return eth_dev; } @@ -236,29 +306,31 @@ rte_eth_dev_allocate(const char *name) * makes sure that the same device would have the same port id both * in the primary and secondary process. */ -static struct rte_eth_dev * -eth_dev_attach_secondary(const char *name) +struct rte_eth_dev * +rte_eth_dev_attach_secondary(const char *name) { - uint8_t i; - struct rte_eth_dev *eth_dev; + uint16_t i; + struct rte_eth_dev *eth_dev = NULL; + + rte_eth_dev_shared_data_prepare(); - if (rte_eth_dev_data == NULL) - rte_eth_dev_data_alloc(); + /* Synchronize port attachment to primary port creation and release. */ + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (strcmp(rte_eth_dev_data[i].name, name) == 0) + if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0) break; } if (i == RTE_MAX_ETHPORTS) { RTE_PMD_DEBUG_TRACE( "device %s is not driven by the primary process\n", name); - return NULL; + } else { + eth_dev = eth_dev_get(i); + RTE_ASSERT(eth_dev->data->port_id == i); } - eth_dev = eth_dev_get(i); - RTE_ASSERT(eth_dev->data->port_id == i); - + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); return eth_dev; } @@ -268,141 +340,209 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) if (eth_dev == NULL) return -EINVAL; - eth_dev->attached = DEV_DETACHED; - nb_ports--; + rte_eth_dev_shared_data_prepare(); + + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); + + eth_dev->state = RTE_ETH_DEV_UNUSED; + + memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); + + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL); + return 0; } int -rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev) +rte_eth_dev_is_valid_port(uint16_t port_id) { - struct eth_driver *eth_drv; - struct rte_eth_dev *eth_dev; - char ethdev_name[RTE_ETH_NAME_MAX_LEN]; + if (port_id >= RTE_MAX_ETHPORTS || + (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) + return 0; + else + return 1; +} - int diag; +static int +rte_eth_is_valid_owner_id(uint64_t owner_id) +{ + if (owner_id == RTE_ETH_DEV_NO_OWNER || + rte_eth_dev_shared_data->next_owner_id <= owner_id) { + RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id); + return 0; + } + return 1; +} - eth_drv = (struct eth_driver *)pci_drv; +uint64_t __rte_experimental +rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) +{ + while (port_id < RTE_MAX_ETHPORTS && + ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && + rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) || + rte_eth_devices[port_id].data->owner.id != owner_id)) + port_id++; - rte_eal_pci_device_name(&pci_dev->addr, ethdev_name, - sizeof(ethdev_name)); + if (port_id >= RTE_MAX_ETHPORTS) + return RTE_MAX_ETHPORTS; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eth_dev = rte_eth_dev_allocate(ethdev_name); - if (eth_dev == NULL) - return -ENOMEM; + return port_id; +} - eth_dev->data->dev_private = rte_zmalloc("ethdev private structure", - eth_drv->dev_private_size, - RTE_CACHE_LINE_SIZE); - if (eth_dev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private port data\n"); - } else { - eth_dev = eth_dev_attach_secondary(ethdev_name); - if (eth_dev == NULL) { - /* - * if we failed to attach a device, it means the - * device is skipped in primary process, due to - * some errors. If so, we return a positive value, - * to let EAL skip it for the secondary process - * as well. - */ - return 1; - } +int __rte_experimental +rte_eth_dev_owner_new(uint64_t *owner_id) +{ + rte_eth_dev_shared_data_prepare(); + + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); + + *owner_id = rte_eth_dev_shared_data->next_owner_id++; + + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + return 0; +} + +static int +_rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, + const struct rte_eth_dev_owner *new_owner) +{ + struct rte_eth_dev_owner *port_owner; + int sret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (!rte_eth_is_valid_owner_id(new_owner->id) && + !rte_eth_is_valid_owner_id(old_owner_id)) + return -EINVAL; + + port_owner = &rte_eth_devices[port_id].data->owner; + if (port_owner->id != old_owner_id) { + RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned" + " by %s_%016lX.\n", port_id, + port_owner->name, port_owner->id); + return -EPERM; } - eth_dev->pci_dev = pci_dev; - eth_dev->driver = eth_drv; - eth_dev->data->rx_mbuf_alloc_failed = 0; - /* init user callbacks */ - TAILQ_INIT(&(eth_dev->link_intr_cbs)); + sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s", + new_owner->name); + if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN) + RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n", + port_id); - /* - * Set the default MTU. - */ - eth_dev->data->mtu = ETHER_MTU; + port_owner->id = new_owner->id; - /* Invoke PMD device initialization function */ - diag = (*eth_drv->eth_dev_init)(eth_dev); - if (diag == 0) - return 0; + RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id, + new_owner->name, new_owner->id); - RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n", - pci_drv->driver.name, - (unsigned) pci_dev->id.vendor_id, - (unsigned) pci_dev->id.device_id); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); - rte_eth_dev_release_port(eth_dev); - return diag; + return 0; } -int -rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +int __rte_experimental +rte_eth_dev_owner_set(const uint16_t port_id, + const struct rte_eth_dev_owner *owner) { - const struct eth_driver *eth_drv; - struct rte_eth_dev *eth_dev; - char ethdev_name[RTE_ETH_NAME_MAX_LEN]; int ret; - if (pci_dev == NULL) - return -EINVAL; + rte_eth_dev_shared_data_prepare(); - rte_eal_pci_device_name(&pci_dev->addr, ethdev_name, - sizeof(ethdev_name)); + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); - eth_dev = rte_eth_dev_allocated(ethdev_name); - if (eth_dev == NULL) - return -ENODEV; + ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); - eth_drv = (const struct eth_driver *)pci_dev->driver; + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + return ret; +} - /* Invoke PMD device uninit function */ - if (*eth_drv->eth_dev_uninit) { - ret = (*eth_drv->eth_dev_uninit)(eth_dev); - if (ret) - return ret; - } +int __rte_experimental +rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) +{ + const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) + {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; + int ret; - /* free ether device */ - rte_eth_dev_release_port(eth_dev); + rte_eth_dev_shared_data_prepare(); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); - eth_dev->pci_dev = NULL; - eth_dev->driver = NULL; - eth_dev->data = NULL; + ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner); - return 0; + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + return ret; } -int -rte_eth_dev_is_valid_port(uint8_t port_id) +void __rte_experimental +rte_eth_dev_owner_delete(const uint64_t owner_id) { - if (port_id >= RTE_MAX_ETHPORTS || - rte_eth_devices[port_id].attached != DEV_ATTACHED) - return 0; - else - return 1; + uint16_t port_id; + + rte_eth_dev_shared_data_prepare(); + + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); + + if (rte_eth_is_valid_owner_id(owner_id)) { + RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id) + memset(&rte_eth_devices[port_id].data->owner, 0, + sizeof(struct rte_eth_dev_owner)); + RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier" + " have removed.\n", owner_id); + } + + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); +} + +int __rte_experimental +rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) +{ + int ret = 0; + + rte_eth_dev_shared_data_prepare(); + + rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); + + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + ret = -ENODEV; + } else { + rte_memcpy(owner, &rte_eth_devices[port_id].data->owner, + sizeof(*owner)); + } + + rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + return ret; } int -rte_eth_dev_socket_id(uint8_t port_id) +rte_eth_dev_socket_id(uint16_t port_id) { RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); return rte_eth_devices[port_id].data->numa_node; } -uint8_t +void * +rte_eth_dev_get_sec_ctx(uint8_t port_id) +{ + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); + return rte_eth_devices[port_id].security_ctx; +} + +uint16_t rte_eth_dev_count(void) { - return nb_ports; + uint16_t p; + uint16_t count; + + count = 0; + + RTE_ETH_FOREACH_DEV(p) + count++; + + return count; } int -rte_eth_dev_get_name_by_port(uint8_t port_id, char *name) +rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) { char *tmp; @@ -415,67 +555,46 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name) /* shouldn't check 'rte_eth_devices[i].data', * because it might be overwritten by VDEV PMD */ - tmp = rte_eth_dev_data[port_id].name; + tmp = rte_eth_dev_shared_data->data[port_id].name; strcpy(name, tmp); return 0; } int -rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) +rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) { - int i; + uint32_t pid; if (name == NULL) { RTE_PMD_DEBUG_TRACE("Null pointer is specified\n"); return -EINVAL; } - if (!nb_ports) - return -ENODEV; - - *port_id = RTE_MAX_ETHPORTS; - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - - if (!strncmp(name, - rte_eth_dev_data[i].name, strlen(name))) { - - *port_id = i; - + for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { + if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED && + !strncmp(name, rte_eth_dev_shared_data->data[pid].name, + strlen(name))) { + *port_id = pid; return 0; } } + return -ENODEV; } static int -rte_eth_dev_is_detachable(uint8_t port_id) +eth_err(uint16_t port_id, int ret) { - uint32_t dev_flags; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); - - switch (rte_eth_devices[port_id].data->kdrv) { - case RTE_KDRV_IGB_UIO: - case RTE_KDRV_UIO_GENERIC: - case RTE_KDRV_NIC_UIO: - case RTE_KDRV_NONE: - break; - case RTE_KDRV_VFIO: - default: - return -ENOTSUP; - } - dev_flags = rte_eth_devices[port_id].data->dev_flags; - if ((dev_flags & RTE_ETH_DEV_DETACHABLE) && - (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE))) + if (ret == 0) return 0; - else - return 1; + if (rte_eth_dev_is_removed(port_id)) + return -EIO; + return ret; } /* attach the new device, then store port_id of the device */ int -rte_eth_dev_attach(const char *devargs, uint8_t *port_id) +rte_eth_dev_attach(const char *devargs, uint16_t *port_id) { int ret = -1; int current = rte_eth_dev_count(); @@ -521,25 +640,34 @@ err: /* detach the device, then store the name of the device */ int -rte_eth_dev_detach(uint8_t port_id, char *name) +rte_eth_dev_detach(uint16_t port_id, char *name) { + uint32_t dev_flags; int ret = -1; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + if (name == NULL) { ret = -EINVAL; goto err; } - /* FIXME: move this to eal, once device flags are relocated there */ - if (rte_eth_dev_is_detachable(port_id)) + dev_flags = rte_eth_devices[port_id].data->dev_flags; + if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) { + RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n", + port_id); + ret = -ENOTSUP; goto err; + } snprintf(name, sizeof(rte_eth_devices[port_id].data->name), "%s", rte_eth_devices[port_id].data->name); - ret = rte_eal_dev_detach(name); + + ret = rte_eal_dev_detach(rte_eth_devices[port_id].device); if (ret < 0) goto err; + rte_eth_dev_release_port(&rte_eth_devices[port_id]); return 0; err: @@ -588,13 +716,16 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->rx_queue_release)(rxq[i]); + + rte_free(dev->data->rx_queues); + dev->data->rx_queues = NULL; } dev->data->nb_rx_queues = nb_queues; return 0; } int -rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) +rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) { struct rte_eth_dev *dev; @@ -615,12 +746,13 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) return 0; } - return dev->dev_ops->rx_queue_start(dev, rx_queue_id); + return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, + rx_queue_id)); } int -rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) +rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) { struct rte_eth_dev *dev; @@ -641,12 +773,12 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) return 0; } - return dev->dev_ops->rx_queue_stop(dev, rx_queue_id); + return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); } int -rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) +rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) { struct rte_eth_dev *dev; @@ -667,12 +799,13 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) return 0; } - return dev->dev_ops->tx_queue_start(dev, tx_queue_id); + return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, + tx_queue_id)); } int -rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) +rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) { struct rte_eth_dev *dev; @@ -693,7 +826,7 @@ rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) return 0; } - return dev->dev_ops->tx_queue_stop(dev, tx_queue_id); + return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); } @@ -739,6 +872,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->tx_queue_release)(txq[i]); + + rte_free(dev->data->tx_queues); + dev->data->tx_queues = NULL; } dev->data->nb_tx_queues = nb_queues; return 0; @@ -777,12 +913,134 @@ rte_eth_speed_bitflag(uint32_t speed, int duplex) } } +/** + * A conversion function from rxmode bitfield API. + */ +static void +rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode, + uint64_t *rx_offloads) +{ + uint64_t offloads = 0; + + if (rxmode->header_split == 1) + offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT; + if (rxmode->hw_ip_checksum == 1) + offloads |= DEV_RX_OFFLOAD_CHECKSUM; + if (rxmode->hw_vlan_filter == 1) + offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + if (rxmode->hw_vlan_strip == 1) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + if (rxmode->hw_vlan_extend == 1) + offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + if (rxmode->jumbo_frame == 1) + offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + if (rxmode->hw_strip_crc == 1) + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + if (rxmode->enable_scatter == 1) + offloads |= DEV_RX_OFFLOAD_SCATTER; + if (rxmode->enable_lro == 1) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + if (rxmode->hw_timestamp == 1) + offloads |= DEV_RX_OFFLOAD_TIMESTAMP; + if (rxmode->security == 1) + offloads |= DEV_RX_OFFLOAD_SECURITY; + + *rx_offloads = offloads; +} + +/** + * A conversion function from rxmode offloads API. + */ +static void +rte_eth_convert_rx_offloads(const uint64_t rx_offloads, + struct rte_eth_rxmode *rxmode) +{ + + if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) + rxmode->header_split = 1; + else + rxmode->header_split = 0; + if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxmode->hw_ip_checksum = 1; + else + rxmode->hw_ip_checksum = 0; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + rxmode->hw_vlan_filter = 1; + else + rxmode->hw_vlan_filter = 0; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->hw_vlan_strip = 1; + else + rxmode->hw_vlan_strip = 0; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + rxmode->hw_vlan_extend = 1; + else + rxmode->hw_vlan_extend = 0; + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + rxmode->jumbo_frame = 1; + else + rxmode->jumbo_frame = 0; + if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) + rxmode->hw_strip_crc = 1; + else + rxmode->hw_strip_crc = 0; + if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) + rxmode->enable_scatter = 1; + else + rxmode->enable_scatter = 0; + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) + rxmode->enable_lro = 1; + else + rxmode->enable_lro = 0; + if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) + rxmode->hw_timestamp = 1; + else + rxmode->hw_timestamp = 0; + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) + rxmode->security = 1; + else + rxmode->security = 0; +} + +const char * __rte_experimental +rte_eth_dev_rx_offload_name(uint64_t offload) +{ + const char *name = "UNKNOWN"; + unsigned int i; + + for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) { + if (offload == rte_rx_offload_names[i].offload) { + name = rte_rx_offload_names[i].name; + break; + } + } + + return name; +} + +const char * __rte_experimental +rte_eth_dev_tx_offload_name(uint64_t offload) +{ + const char *name = "UNKNOWN"; + unsigned int i; + + for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) { + if (offload == rte_tx_offload_names[i].offload) { + name = rte_tx_offload_names[i].name; + break; + } + } + + return name; +} + int -rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, +rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_conf = *dev_conf; int diag; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -812,8 +1070,20 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return -EBUSY; } + /* + * Convert between the offloads API to enable PMDs to support + * only one of them. + */ + if (dev_conf->rxmode.ignore_offload_bitfield == 0) { + rte_eth_convert_rx_offload_bitfield( + &dev_conf->rxmode, &local_conf.rxmode.offloads); + } else { + rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads, + &local_conf.rxmode); + } + /* Copy the dev_conf parameter into the dev structure */ - memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); + memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf)); /* * Check that the numbers of RX and TX queues are not greater @@ -839,22 +1109,25 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return -EINVAL; } - /* - * If link state interrupt is enabled, check that the - * device supports it. - */ + /* Check that the device supports requested interrupts */ if ((dev_conf->intr_conf.lsc == 1) && (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n", - dev->data->drv_name); + dev->device->driver->name); return -EINVAL; } + if ((dev_conf->intr_conf.rmv == 1) && + (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { + RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n", + dev->device->driver->name); + return -EINVAL; + } /* * If jumbo frames are enabled, check that the maximum RX packet * length is supported by the configured device. */ - if (dev_conf->rxmode.jumbo_frame == 1) { + if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u" @@ -903,45 +1176,77 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, port_id, diag); rte_eth_dev_rx_queue_config(dev, 0); rte_eth_dev_tx_queue_config(dev, 0); - return diag; + return eth_err(port_id, diag); + } + + /* Initialize Rx profiling if enabled at compilation time. */ + diag = __rte_eth_profile_rx_init(port_id, dev); + if (diag != 0) { + RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n", + port_id, diag); + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); + return eth_err(port_id, diag); } return 0; } +void +_rte_eth_dev_reset(struct rte_eth_dev *dev) +{ + if (dev->data->dev_started) { + RTE_PMD_DEBUG_TRACE( + "port %d must be stopped to allow reset\n", + dev->data->port_id); + return; + } + + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); + + memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); +} + static void -rte_eth_dev_config_restore(uint8_t port_id) +rte_eth_dev_config_restore(uint16_t port_id) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; - struct ether_addr addr; + struct ether_addr *addr; uint16_t i; uint32_t pool = 0; + uint64_t pool_mask; dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); - if (RTE_ETH_DEV_SRIOV(dev).active) - pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; - - /* replay MAC address configuration */ - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = dev->data->mac_addrs[i]; - - /* skip zero address */ - if (is_zero_ether_addr(&addr)) - continue; - - /* add address to the hardware */ - if (*dev->dev_ops->mac_addr_add && - (dev->data->mac_pool_sel[i] & (1ULL << pool))) - (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool); - else { - RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n", - port_id); - /* exit the loop but not return an error */ - break; + /* replay MAC address configuration including default MAC */ + addr = &dev->data->mac_addrs[0]; + if (*dev->dev_ops->mac_addr_set != NULL) + (*dev->dev_ops->mac_addr_set)(dev, addr); + else if (*dev->dev_ops->mac_addr_add != NULL) + (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); + + if (*dev->dev_ops->mac_addr_add != NULL) { + for (i = 1; i < dev_info.max_mac_addrs; i++) { + addr = &dev->data->mac_addrs[i]; + + /* skip zero address */ + if (is_zero_ether_addr(addr)) + continue; + + pool = 0; + pool_mask = dev->data->mac_pool_sel[i]; + + do { + if (pool_mask & 1ULL) + (*dev->dev_ops->mac_addr_add)(dev, + addr, i, pool); + pool_mask >>= 1; + pool++; + } while (pool_mask); } } @@ -959,7 +1264,7 @@ rte_eth_dev_config_restore(uint8_t port_id) } int -rte_eth_dev_start(uint8_t port_id) +rte_eth_dev_start(uint16_t port_id) { struct rte_eth_dev *dev; int diag; @@ -971,7 +1276,7 @@ rte_eth_dev_start(uint8_t port_id) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); if (dev->data->dev_started != 0) { - RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8 + RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16 " already started\n", port_id); return 0; @@ -981,7 +1286,7 @@ rte_eth_dev_start(uint8_t port_id) if (diag == 0) dev->data->dev_started = 1; else - return diag; + return eth_err(port_id, diag); rte_eth_dev_config_restore(port_id); @@ -993,7 +1298,7 @@ rte_eth_dev_start(uint8_t port_id) } void -rte_eth_dev_stop(uint8_t port_id) +rte_eth_dev_stop(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1003,7 +1308,7 @@ rte_eth_dev_stop(uint8_t port_id) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); if (dev->data->dev_started == 0) { - RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8 + RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16 " already stopped\n", port_id); return; @@ -1014,7 +1319,7 @@ rte_eth_dev_stop(uint8_t port_id) } int -rte_eth_dev_set_link_up(uint8_t port_id) +rte_eth_dev_set_link_up(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1023,11 +1328,11 @@ rte_eth_dev_set_link_up(uint8_t port_id) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); - return (*dev->dev_ops->dev_set_link_up)(dev); + return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); } int -rte_eth_dev_set_link_down(uint8_t port_id) +rte_eth_dev_set_link_down(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1036,11 +1341,11 @@ rte_eth_dev_set_link_down(uint8_t port_id) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); - return (*dev->dev_ops->dev_set_link_down)(dev); + return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); } void -rte_eth_dev_close(uint8_t port_id) +rte_eth_dev_close(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1051,14 +1356,56 @@ rte_eth_dev_close(uint8_t port_id) dev->data->dev_started = 0; (*dev->dev_ops->dev_close)(dev); + dev->data->nb_rx_queues = 0; rte_free(dev->data->rx_queues); dev->data->rx_queues = NULL; + dev->data->nb_tx_queues = 0; rte_free(dev->data->tx_queues); dev->data->tx_queues = NULL; } int -rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, +rte_eth_dev_reset(uint16_t port_id) +{ + struct rte_eth_dev *dev; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); + + rte_eth_dev_stop(port_id); + ret = dev->dev_ops->dev_reset(dev); + + return eth_err(port_id, ret); +} + +int __rte_experimental +rte_eth_dev_is_removed(uint16_t port_id) +{ + struct rte_eth_dev *dev; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); + + dev = &rte_eth_devices[port_id]; + + if (dev->state == RTE_ETH_DEV_REMOVED) + return 1; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); + + ret = dev->dev_ops->is_removed(dev); + if (ret != 0) + /* Device is physically removed. */ + dev->state = RTE_ETH_DEV_REMOVED; + + return ret; +} + +int +rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) @@ -1067,6 +1414,8 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, uint32_t mbp_buf_size; struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf local_conf; + void **rxq; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1125,27 +1474,96 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, return -EINVAL; } + rxq = dev->data->rx_queues; + if (rxq[rx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, + -ENOTSUP); + (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); + rxq[rx_queue_id] = NULL; + } + if (rx_conf == NULL) rx_conf = &dev_info.default_rxconf; - ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, - socket_id, rx_conf, mp); - if (!ret) { + local_conf = *rx_conf; + if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) { + /** + * Reflect port offloads to queue offloads in order for + * offloads to not be discarded. + */ + rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode, + &local_conf.offloads); + } + + ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, + socket_id, &local_conf, mp); + if (!ret) { if (!dev->data->min_rx_buf_size || dev->data->min_rx_buf_size > mbp_buf_size) dev->data->min_rx_buf_size = mbp_buf_size; } - return ret; + return eth_err(port_id, ret); +} + +/** + * A conversion function from txq_flags API. + */ +static void +rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads) +{ + uint64_t offloads = 0; + + if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS)) + offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL)) + offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; + if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP)) + offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; + if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP)) + offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP)) + offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) && + (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP)) + offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + *tx_offloads = offloads; +} + +/** + * A conversion function from offloads API. + */ +static void +rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags) +{ + uint32_t flags = 0; + + if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)) + flags |= ETH_TXQ_FLAGS_NOMULTSEGS; + if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)) + flags |= ETH_TXQ_FLAGS_NOVLANOFFL; + if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + flags |= ETH_TXQ_FLAGS_NOXSUMSCTP; + if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) + flags |= ETH_TXQ_FLAGS_NOXSUMUDP; + if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) + flags |= ETH_TXQ_FLAGS_NOXSUMTCP; + if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP); + + *txq_flags = flags; } int -rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, +rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + struct rte_eth_txconf local_conf; + void **txq; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1178,11 +1596,34 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, return -EINVAL; } + txq = dev->data->tx_queues; + if (txq[tx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, + -ENOTSUP); + (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); + txq[tx_queue_id] = NULL; + } + if (tx_conf == NULL) tx_conf = &dev_info.default_txconf; - return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc, - socket_id, tx_conf); + /* + * Convert between the offloads API to enable PMDs to support + * only one of them. + */ + local_conf = *tx_conf; + if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) { + rte_eth_convert_txq_offloads(tx_conf->offloads, + &local_conf.txq_flags); + /* Keep the ignore flag. */ + local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE; + } else { + rte_eth_convert_txq_flags(tx_conf->txq_flags, + &local_conf.offloads); + } + + return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, + tx_queue_id, nb_tx_desc, socket_id, &local_conf)); } void @@ -1234,8 +1675,24 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) return ret; } +int +rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + int ret; + + /* Validate Input Data. Bail if not valid or not supported. */ + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); + + /* Call driver to free pending mbufs. */ + ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], + free_cnt); + return eth_err(port_id, ret); +} + void -rte_eth_promiscuous_enable(uint8_t port_id) +rte_eth_promiscuous_enable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1248,7 +1705,7 @@ rte_eth_promiscuous_enable(uint8_t port_id) } void -rte_eth_promiscuous_disable(uint8_t port_id) +rte_eth_promiscuous_disable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1261,7 +1718,7 @@ rte_eth_promiscuous_disable(uint8_t port_id) } int -rte_eth_promiscuous_get(uint8_t port_id) +rte_eth_promiscuous_get(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1272,7 +1729,7 @@ rte_eth_promiscuous_get(uint8_t port_id) } void -rte_eth_allmulticast_enable(uint8_t port_id) +rte_eth_allmulticast_enable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1285,7 +1742,7 @@ rte_eth_allmulticast_enable(uint8_t port_id) } void -rte_eth_allmulticast_disable(uint8_t port_id) +rte_eth_allmulticast_disable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1298,7 +1755,7 @@ rte_eth_allmulticast_disable(uint8_t port_id) } int -rte_eth_allmulticast_get(uint8_t port_id) +rte_eth_allmulticast_get(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1323,7 +1780,7 @@ rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev, } void -rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link) +rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; @@ -1340,7 +1797,7 @@ rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link) } void -rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link) +rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; @@ -1357,7 +1814,7 @@ rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link) } int -rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats) +rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) { struct rte_eth_dev *dev; @@ -1368,65 +1825,119 @@ rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; - (*dev->dev_ops->stats_get)(dev, stats); - return 0; + return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); } -void -rte_eth_stats_reset(uint8_t port_id) +int +rte_eth_stats_reset(uint16_t port_id) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); (*dev->dev_ops->stats_reset)(dev); dev->data->rx_mbuf_alloc_failed = 0; + + return 0; +} + +static inline int +get_xstats_basic_count(struct rte_eth_dev *dev) +{ + uint16_t nb_rxqs, nb_txqs; + int count; + + nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + + count = RTE_NB_STATS; + count += nb_rxqs * RTE_NB_RXQ_STATS; + count += nb_txqs * RTE_NB_TXQ_STATS; + + return count; } static int -get_xstats_count(uint8_t port_id) +get_xstats_count(uint16_t port_id) { struct rte_eth_dev *dev; int count; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; + if (dev->dev_ops->xstats_get_names_by_id != NULL) { + count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, + NULL, 0); + if (count < 0) + return eth_err(port_id, count); + } if (dev->dev_ops->xstats_get_names != NULL) { count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); if (count < 0) - return count; + return eth_err(port_id, count); } else count = 0; - count += RTE_NB_STATS; - count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) * - RTE_NB_RXQ_STATS; - count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) * - RTE_NB_TXQ_STATS; + + + count += get_xstats_basic_count(dev); + return count; } int -rte_eth_xstats_get_names(uint8_t port_id, - struct rte_eth_xstat_name *xstats_names, - unsigned size) +rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, + uint64_t *id) { - struct rte_eth_dev *dev; - int cnt_used_entries; - int cnt_expected_entries; - int cnt_driver_entries; - uint32_t idx, id_queue; - uint16_t num_q; + int cnt_xstats, idx_xstat; - cnt_expected_entries = get_xstats_count(port_id); - if (xstats_names == NULL || cnt_expected_entries < 0 || - (int)size < cnt_expected_entries) - return cnt_expected_entries; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - /* port_id checked in get_xstats_count() */ - dev = &rte_eth_devices[port_id]; - cnt_used_entries = 0; + if (!id) { + RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n"); + return -ENOMEM; + } + + if (!xstat_name) { + RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n"); + return -ENOMEM; + } + + /* Get count */ + cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); + if (cnt_xstats < 0) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n"); + return -ENODEV; + } + + /* Get id-name lookup table */ + struct rte_eth_xstat_name xstats_names[cnt_xstats]; + + if (cnt_xstats != rte_eth_xstats_get_names_by_id( + port_id, xstats_names, cnt_xstats, NULL)) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n"); + return -1; + } + + for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { + if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { + *id = idx_xstat; + return 0; + }; + } + + return -EINVAL; +} + +/* retrieve basic stats names */ +static int +rte_eth_basic_stats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names) +{ + int cnt_used_entries = 0; + uint32_t idx, id_queue; + uint16_t num_q; for (idx = 0; idx < RTE_NB_STATS; idx++) { snprintf(xstats_names[cnt_used_entries].name, @@ -1455,6 +1966,138 @@ rte_eth_xstats_get_names(uint8_t port_id, cnt_used_entries++; } } + return cnt_used_entries; +} + +/* retrieve ethdev extended statistics names */ +int +rte_eth_xstats_get_names_by_id(uint16_t port_id, + struct rte_eth_xstat_name *xstats_names, unsigned int size, + uint64_t *ids) +{ + struct rte_eth_xstat_name *xstats_names_copy; + unsigned int no_basic_stat_requested = 1; + unsigned int no_ext_stat_requested = 1; + unsigned int expected_entries; + unsigned int basic_count; + struct rte_eth_dev *dev; + unsigned int i; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + basic_count = get_xstats_basic_count(dev); + ret = get_xstats_count(port_id); + if (ret < 0) + return ret; + expected_entries = (unsigned int)ret; + + /* Return max number of stats if no ids given */ + if (!ids) { + if (!xstats_names) + return expected_entries; + else if (xstats_names && size < expected_entries) + return expected_entries; + } + + if (ids && !xstats_names) + return -EINVAL; + + if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { + uint64_t ids_copy[size]; + + for (i = 0; i < size; i++) { + if (ids[i] < basic_count) { + no_basic_stat_requested = 0; + break; + } + + /* + * Convert ids to xstats ids that PMD knows. + * ids known by user are basic + extended stats. + */ + ids_copy[i] = ids[i] - basic_count; + } + + if (no_basic_stat_requested) + return (*dev->dev_ops->xstats_get_names_by_id)(dev, + xstats_names, ids_copy, size); + } + + /* Retrieve all stats */ + if (!ids) { + int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, + expected_entries); + if (num_stats < 0 || num_stats > (int)expected_entries) + return num_stats; + else + return expected_entries; + } + + xstats_names_copy = calloc(expected_entries, + sizeof(struct rte_eth_xstat_name)); + + if (!xstats_names_copy) { + RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory"); + return -ENOMEM; + } + + if (ids) { + for (i = 0; i < size; i++) { + if (ids[i] >= basic_count) { + no_ext_stat_requested = 0; + break; + } + } + } + + /* Fill xstats_names_copy structure */ + if (ids && no_ext_stat_requested) { + rte_eth_basic_stats_get_names(dev, xstats_names_copy); + } else { + ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, + expected_entries); + if (ret < 0) { + free(xstats_names_copy); + return ret; + } + } + + /* Filter stats */ + for (i = 0; i < size; i++) { + if (ids[i] >= expected_entries) { + RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n"); + free(xstats_names_copy); + return -1; + } + xstats_names[i] = xstats_names_copy[ids[i]]; + } + + free(xstats_names_copy); + return size; +} + +int +rte_eth_xstats_get_names(uint16_t port_id, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + struct rte_eth_dev *dev; + int cnt_used_entries; + int cnt_expected_entries; + int cnt_driver_entries; + + cnt_expected_entries = get_xstats_count(port_id); + if (xstats_names == NULL || cnt_expected_entries < 0 || + (int)size < cnt_expected_entries) + return cnt_expected_entries; + + /* port_id checked in get_xstats_count() */ + dev = &rte_eth_devices[port_id]; + + cnt_used_entries = rte_eth_basic_stats_get_names( + dev, xstats_names); if (dev->dev_ops->xstats_get_names != NULL) { /* If there are any driver-specific xstats, append them @@ -1465,56 +2108,33 @@ rte_eth_xstats_get_names(uint8_t port_id, xstats_names + cnt_used_entries, size - cnt_used_entries); if (cnt_driver_entries < 0) - return cnt_driver_entries; + return eth_err(port_id, cnt_driver_entries); cnt_used_entries += cnt_driver_entries; } return cnt_used_entries; } -/* retrieve ethdev extended statistics */ -int -rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, - unsigned n) + +static int +rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) { - struct rte_eth_stats eth_stats; struct rte_eth_dev *dev; - unsigned count = 0, i, q; - signed xcount = 0; + struct rte_eth_stats eth_stats; + unsigned int count = 0, i, q; uint64_t val, *stats_ptr; uint16_t nb_rxqs, nb_txqs; + int ret; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + ret = rte_eth_stats_get(port_id, ð_stats); + if (ret < 0) + return ret; dev = &rte_eth_devices[port_id]; nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); - /* Return generic statistics */ - count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + - (nb_txqs * RTE_NB_TXQ_STATS); - - /* implemented by the driver */ - if (dev->dev_ops->xstats_get != NULL) { - /* Retrieve the xstats from the driver at the end of the - * xstats struct. - */ - xcount = (*dev->dev_ops->xstats_get)(dev, - xstats ? xstats + count : NULL, - (n > count) ? n - count : 0); - - if (xcount < 0) - return xcount; - } - - if (n < count + xcount || xstats == NULL) - return count + xcount; - - /* now fill the xstats structure */ - count = 0; - rte_eth_stats_get(port_id, ð_stats); - /* global stats */ for (i = 0; i < RTE_NB_STATS; i++) { stats_ptr = RTE_PTR_ADD(ð_stats, @@ -1544,6 +2164,144 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, xstats[count++].value = val; } } + return count; +} + +/* retrieve ethdev extended statistics */ +int +rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, + uint64_t *values, unsigned int size) +{ + unsigned int no_basic_stat_requested = 1; + unsigned int no_ext_stat_requested = 1; + unsigned int num_xstats_filled; + unsigned int basic_count; + uint16_t expected_entries; + struct rte_eth_dev *dev; + unsigned int i; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + ret = get_xstats_count(port_id); + if (ret < 0) + return ret; + expected_entries = (uint16_t)ret; + struct rte_eth_xstat xstats[expected_entries]; + dev = &rte_eth_devices[port_id]; + basic_count = get_xstats_basic_count(dev); + + /* Return max number of stats if no ids given */ + if (!ids) { + if (!values) + return expected_entries; + else if (values && size < expected_entries) + return expected_entries; + } + + if (ids && !values) + return -EINVAL; + + if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { + unsigned int basic_count = get_xstats_basic_count(dev); + uint64_t ids_copy[size]; + + for (i = 0; i < size; i++) { + if (ids[i] < basic_count) { + no_basic_stat_requested = 0; + break; + } + + /* + * Convert ids to xstats ids that PMD knows. + * ids known by user are basic + extended stats. + */ + ids_copy[i] = ids[i] - basic_count; + } + + if (no_basic_stat_requested) + return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, + values, size); + } + + if (ids) { + for (i = 0; i < size; i++) { + if (ids[i] >= basic_count) { + no_ext_stat_requested = 0; + break; + } + } + } + + /* Fill the xstats structure */ + if (ids && no_ext_stat_requested) + ret = rte_eth_basic_stats_get(port_id, xstats); + else + ret = rte_eth_xstats_get(port_id, xstats, expected_entries); + + if (ret < 0) + return ret; + num_xstats_filled = (unsigned int)ret; + + /* Return all stats */ + if (!ids) { + for (i = 0; i < num_xstats_filled; i++) + values[i] = xstats[i].value; + return expected_entries; + } + + /* Filter stats */ + for (i = 0; i < size; i++) { + if (ids[i] >= expected_entries) { + RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n"); + return -1; + } + values[i] = xstats[ids[i]].value; + } + return size; +} + +int +rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct rte_eth_dev *dev; + unsigned int count = 0, i; + signed int xcount = 0; + uint16_t nb_rxqs, nb_txqs; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + + dev = &rte_eth_devices[port_id]; + + nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + + /* Return generic statistics */ + count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + + (nb_txqs * RTE_NB_TXQ_STATS); + + /* implemented by the driver */ + if (dev->dev_ops->xstats_get != NULL) { + /* Retrieve the xstats from the driver at the end of the + * xstats struct. + */ + xcount = (*dev->dev_ops->xstats_get)(dev, + xstats ? xstats + count : NULL, + (n > count) ? n - count : 0); + + if (xcount < 0) + return eth_err(port_id, xcount); + } + + if (n < count + xcount || xstats == NULL) + return count + xcount; + + /* now fill the xstats structure */ + ret = rte_eth_basic_stats_get(port_id, xstats); + if (ret < 0) + return ret; + count = ret; for (i = 0; i < count; i++) xstats[i].id = i; @@ -1556,7 +2314,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, /* reset ethdev extended statistics */ void -rte_eth_xstats_reset(uint8_t port_id) +rte_eth_xstats_reset(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1574,7 +2332,7 @@ rte_eth_xstats_reset(uint8_t port_id) } static int -set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx, +set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) { struct rte_eth_dev *dev; @@ -1590,24 +2348,37 @@ set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx, int -rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id, +rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx) { - return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx, - STAT_QMAP_TX); + return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id, + stat_idx, STAT_QMAP_TX)); } int -rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id, +rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx) { - return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx, - STAT_QMAP_RX); + return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id, + stat_idx, STAT_QMAP_RX)); +} + +int +rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); + return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, + fw_version, fw_size)); } void -rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) +rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) { struct rte_eth_dev *dev; const struct rte_eth_desc_lim lim = { @@ -1625,14 +2396,13 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); - dev_info->pci_dev = dev->pci_dev; - dev_info->driver_name = dev->data->drv_name; + dev_info->driver_name = dev->device->driver->name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; } int -rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask, +rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num) { int i, j; @@ -1658,7 +2428,7 @@ rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask, } void -rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr) +rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr) { struct rte_eth_dev *dev; @@ -1669,7 +2439,7 @@ rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr) int -rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu) +rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) { struct rte_eth_dev *dev; @@ -1681,7 +2451,7 @@ rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu) } int -rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu) +rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) { int ret; struct rte_eth_dev *dev; @@ -1694,17 +2464,19 @@ rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu) if (!ret) dev->data->mtu = mtu; - return ret; + return eth_err(port_id, ret); } int -rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) +rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) { + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) { RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id); return -ENOSYS; } @@ -1716,11 +2488,28 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); - return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); + ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); + if (ret == 0) { + struct rte_vlan_filter_conf *vfc; + int vidx; + int vbit; + + vfc = &dev->data->vlan_filter_conf; + vidx = vlan_id / 64; + vbit = vlan_id % 64; + + if (on) + vfc->ids[vidx] |= UINT64_C(1) << vbit; + else + vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); + } + + return eth_err(port_id, ret); } int -rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on) +rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, + int on) { struct rte_eth_dev *dev; @@ -1738,7 +2527,7 @@ rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int o } int -rte_eth_dev_set_vlan_ether_type(uint8_t port_id, +rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tpid) { @@ -1748,39 +2537,62 @@ rte_eth_dev_set_vlan_ether_type(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); - return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid); + return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, + tpid)); } int -rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) +rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) { struct rte_eth_dev *dev; int ret = 0; int mask = 0; int cur, org = 0; + uint64_t orig_offloads; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; + /* save original values in case of failure */ + orig_offloads = dev->data->dev_conf.rxmode.offloads; + /*check which option changed by application*/ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); + org = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP); if (cur != org) { - dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur; + if (cur) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_STRIP; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_STRIP; mask |= ETH_VLAN_STRIP_MASK; } cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter); + org = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER); if (cur != org) { - dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur; + if (cur) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_FILTER; mask |= ETH_VLAN_FILTER_MASK; } cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend); + org = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND); if (cur != org) { - dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur; + if (cur) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_EXTEND; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_EXTEND; mask |= ETH_VLAN_EXTEND_MASK; } @@ -1789,13 +2601,26 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) return ret; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); - (*dev->dev_ops->vlan_offload_set)(dev, mask); - return ret; + /* + * Convert to the offload bitfield API just in case the underlying PMD + * still supporting it. + */ + rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads, + &dev->data->dev_conf.rxmode); + ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); + if (ret) { + /* hit an error restore original values */ + dev->data->dev_conf.rxmode.offloads = orig_offloads; + rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads, + &dev->data->dev_conf.rxmode); + } + + return eth_err(port_id, ret); } int -rte_eth_dev_get_vlan_offload(uint8_t port_id) +rte_eth_dev_get_vlan_offload(uint16_t port_id) { struct rte_eth_dev *dev; int ret = 0; @@ -1803,33 +2628,35 @@ rte_eth_dev_get_vlan_offload(uint8_t port_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) ret |= ETH_VLAN_STRIP_OFFLOAD; - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) ret |= ETH_VLAN_FILTER_OFFLOAD; - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) ret |= ETH_VLAN_EXTEND_OFFLOAD; return ret; } int -rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on) +rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) { struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); - (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on); - return 0; + return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); } int -rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) +rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) { struct rte_eth_dev *dev; @@ -1837,11 +2664,11 @@ rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); memset(fc_conf, 0, sizeof(*fc_conf)); - return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf); + return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); } int -rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) +rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) { struct rte_eth_dev *dev; @@ -1853,11 +2680,12 @@ rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); - return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf); + return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); } int -rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf) +rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, + struct rte_eth_pfc_conf *pfc_conf) { struct rte_eth_dev *dev; @@ -1870,7 +2698,8 @@ rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc dev = &rte_eth_devices[port_id]; /* High water, low water validation are device specific */ if (*dev->dev_ops->priority_flow_ctrl_set) - return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf); + return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) + (dev, pfc_conf)); return -ENOTSUP; } @@ -1883,13 +2712,7 @@ rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, if (!reta_conf) return -EINVAL; - if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) { - RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n", - RTE_RETA_GROUP_SIZE); - return -EINVAL; - } - - num = reta_size / RTE_RETA_GROUP_SIZE; + num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; for (i = 0; i < num; i++) { if (reta_conf[i].mask) return 0; @@ -1929,7 +2752,7 @@ rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, } int -rte_eth_dev_rss_reta_update(uint8_t port_id, +rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { @@ -1951,11 +2774,12 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, return ret; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); - return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size); + return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, + reta_size)); } int -rte_eth_dev_rss_reta_query(uint8_t port_id, +rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { @@ -1971,30 +2795,25 @@ rte_eth_dev_rss_reta_query(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); - return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size); + return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, + reta_size)); } int -rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf) +rte_eth_dev_rss_hash_update(uint16_t port_id, + struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev *dev; - uint16_t rss_hash_protos; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - rss_hash_protos = rss_conf->rss_hf; - if ((rss_hash_protos != 0) && - ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) { - RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n", - rss_hash_protos); - return -EINVAL; - } dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); - return (*dev->dev_ops->rss_hash_update)(dev, rss_conf); + return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, + rss_conf)); } int -rte_eth_dev_rss_hash_conf_get(uint8_t port_id, +rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev *dev; @@ -2002,11 +2821,12 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id, RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); - return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf); + return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, + rss_conf)); } int -rte_eth_dev_udp_tunnel_port_add(uint8_t port_id, +rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *udp_tunnel) { struct rte_eth_dev *dev; @@ -2024,11 +2844,12 @@ rte_eth_dev_udp_tunnel_port_add(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); - return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel); + return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, + udp_tunnel)); } int -rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id, +rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *udp_tunnel) { struct rte_eth_dev *dev; @@ -2047,29 +2868,30 @@ rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id, } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); - return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel); + return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, + udp_tunnel)); } int -rte_eth_led_on(uint8_t port_id) +rte_eth_led_on(uint16_t port_id) { struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); - return (*dev->dev_ops->dev_led_on)(dev); + return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); } int -rte_eth_led_off(uint8_t port_id) +rte_eth_led_off(uint16_t port_id) { struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); - return (*dev->dev_ops->dev_led_off)(dev); + return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); } /* @@ -2077,12 +2899,13 @@ rte_eth_led_off(uint8_t port_id) * an empty spot. */ static int -get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) +get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); rte_eth_dev_info_get(port_id, &dev_info); for (i = 0; i < dev_info.max_mac_addrs; i++) @@ -2095,12 +2918,13 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) static const struct ether_addr null_mac_addr; int -rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, +rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, uint32_t pool) { struct rte_eth_dev *dev; int index; uint64_t pool_mask; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2133,19 +2957,21 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, } /* Update NIC */ - (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); + ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); - /* Update address in NIC data structure */ - ether_addr_copy(addr, &dev->data->mac_addrs[index]); + if (ret == 0) { + /* Update address in NIC data structure */ + ether_addr_copy(addr, &dev->data->mac_addrs[index]); - /* Update pool bitmap in NIC data structure */ - dev->data->mac_pool_sel[index] |= (1ULL << pool); + /* Update pool bitmap in NIC data structure */ + dev->data->mac_pool_sel[index] |= (1ULL << pool); + } - return 0; + return eth_err(port_id, ret); } int -rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) +rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) { struct rte_eth_dev *dev; int index; @@ -2174,7 +3000,7 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) } int -rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr) +rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) { struct rte_eth_dev *dev; @@ -2194,39 +3020,13 @@ rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr) return 0; } -int -rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, - uint16_t rx_mode, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf); - return -EINVAL; - } - - if (rx_mode == 0) { - RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n"); - return -EINVAL; - } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP); - return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on); -} /* * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find * an empty spot. */ static int -get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) +get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; @@ -2245,7 +3045,7 @@ get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) } int -rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, +rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, uint8_t on) { int index; @@ -2255,107 +3055,49 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (is_zero_ether_addr(addr)) { - RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", - port_id); - return -EINVAL; - } - - index = get_hash_mac_addr_index(port_id, addr); - /* Check if it's already there, and do nothing */ - if ((index >= 0) && (on)) - return 0; - - if (index < 0) { - if (!on) { - RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not " - "set in UTA\n", port_id); - return -EINVAL; - } - - index = get_hash_mac_addr_index(port_id, &null_mac_addr); - if (index < 0) { - RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n", - port_id); - return -ENOSPC; - } - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); - ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); - if (ret == 0) { - /* Update address in NIC data structure */ - if (on) - ether_addr_copy(addr, - &dev->data->hash_mac_addrs[index]); - else - ether_addr_copy(&null_mac_addr, - &dev->data->hash_mac_addrs[index]); - } - - return ret; -} - -int -rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); - return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); -} - -int -rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP); - return (*dev->dev_ops->set_vf_rx)(dev, vf, on); -} - -int -rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf); + if (is_zero_ether_addr(addr)) { + RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + port_id); return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP); - return (*dev->dev_ops->set_vf_tx)(dev, vf, on); + index = get_hash_mac_addr_index(port_id, addr); + /* Check if it's already there, and do nothing */ + if ((index >= 0) && on) + return 0; + + if (index < 0) { + if (!on) { + RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not " + "set in UTA\n", port_id); + return -EINVAL; + } + + index = get_hash_mac_addr_index(port_id, &null_mac_addr); + if (index < 0) { + RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n", + port_id); + return -ENOSPC; + } + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); + ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); + if (ret == 0) { + /* Update address in NIC data structure */ + if (on) + ether_addr_copy(addr, + &dev->data->hash_mac_addrs[index]); + else + ether_addr_copy(&null_mac_addr, + &dev->data->hash_mac_addrs[index]); + } + + return eth_err(port_id, ret); } int -rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, - uint64_t vf_mask, uint8_t vlan_on) +rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) { struct rte_eth_dev *dev; @@ -2363,23 +3105,12 @@ rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, dev = &rte_eth_devices[port_id]; - if (vlan_id > ETHER_MAX_VLAN_ID) { - RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n", - vlan_id); - return -EINVAL; - } - - if (vf_mask == 0) { - RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n"); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP); - return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id, - vf_mask, vlan_on); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); + return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, + on)); } -int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx, +int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate) { struct rte_eth_dev *dev; @@ -2406,48 +3137,16 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx, } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); - return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate); -} - -int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate, - uint64_t q_msk) -{ - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - struct rte_eth_link link; - - if (q_msk == 0) - return 0; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - link = dev->data->dev_link; - - if (vf > dev_info.max_vfs) { - RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: " - "invalid vf id=%d\n", port_id, vf); - return -EINVAL; - } - - if (tx_rate > link.link_speed) { - RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, " - "bigger than link speed= %d\n", - tx_rate, link.link_speed); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP); - return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk); + return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, + queue_idx, tx_rate)); } int -rte_eth_mirror_rule_set(uint8_t port_id, +rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); if (mirror_conf->rule_type == 0) { @@ -2477,111 +3176,158 @@ rte_eth_mirror_rule_set(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); - return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on); + return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev, + mirror_conf, rule_id, on)); } int -rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id) +rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); - return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id); + return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, + rule_id)); +} + +RTE_INIT(eth_dev_init_cb_lists) +{ + int i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) + TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); } int -rte_eth_dev_callback_register(uint8_t port_id, +rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg) { struct rte_eth_dev *dev; struct rte_eth_dev_callback *user_cb; + uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ + uint16_t last_port; if (!cb_fn) return -EINVAL; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { + RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + if (port_id == RTE_ETH_ALL) { + next_port = 0; + last_port = RTE_MAX_ETHPORTS - 1; + } else { + next_port = last_port = port_id; + } - dev = &rte_eth_devices[port_id]; rte_spinlock_lock(&rte_eth_dev_cb_lock); - TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { - if (user_cb->cb_fn == cb_fn && - user_cb->cb_arg == cb_arg && - user_cb->event == event) { - break; + do { + dev = &rte_eth_devices[next_port]; + + TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { + if (user_cb->cb_fn == cb_fn && + user_cb->cb_arg == cb_arg && + user_cb->event == event) { + break; + } } - } - /* create a new callback. */ - if (user_cb == NULL) { - user_cb = rte_zmalloc("INTR_USER_CALLBACK", - sizeof(struct rte_eth_dev_callback), 0); - if (user_cb != NULL) { - user_cb->cb_fn = cb_fn; - user_cb->cb_arg = cb_arg; - user_cb->event = event; - TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); + /* create a new callback. */ + if (user_cb == NULL) { + user_cb = rte_zmalloc("INTR_USER_CALLBACK", + sizeof(struct rte_eth_dev_callback), 0); + if (user_cb != NULL) { + user_cb->cb_fn = cb_fn; + user_cb->cb_arg = cb_arg; + user_cb->event = event; + TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), + user_cb, next); + } else { + rte_spinlock_unlock(&rte_eth_dev_cb_lock); + rte_eth_dev_callback_unregister(port_id, event, + cb_fn, cb_arg); + return -ENOMEM; + } + } - } + } while (++next_port <= last_port); rte_spinlock_unlock(&rte_eth_dev_cb_lock); - return (user_cb == NULL) ? -ENOMEM : 0; + return 0; } int -rte_eth_dev_callback_unregister(uint8_t port_id, +rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg) { int ret; struct rte_eth_dev *dev; struct rte_eth_dev_callback *cb, *next; + uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ + uint16_t last_port; if (!cb_fn) return -EINVAL; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { + RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + if (port_id == RTE_ETH_ALL) { + next_port = 0; + last_port = RTE_MAX_ETHPORTS - 1; + } else { + next_port = last_port = port_id; + } - dev = &rte_eth_devices[port_id]; rte_spinlock_lock(&rte_eth_dev_cb_lock); - ret = 0; - for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { + do { + dev = &rte_eth_devices[next_port]; + ret = 0; + for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; + cb = next) { - next = TAILQ_NEXT(cb, next); + next = TAILQ_NEXT(cb, next); - if (cb->cb_fn != cb_fn || cb->event != event || - (cb->cb_arg != (void *)-1 && - cb->cb_arg != cb_arg)) - continue; + if (cb->cb_fn != cb_fn || cb->event != event || + (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) + continue; - /* - * if this callback is not executing right now, - * then remove it. - */ - if (cb->active == 0) { - TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); - rte_free(cb); - } else { - ret = -EAGAIN; + /* + * if this callback is not executing right now, + * then remove it. + */ + if (cb->active == 0) { + TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); + rte_free(cb); + } else { + ret = -EAGAIN; + } } - } + } while (++next_port <= last_port); rte_spinlock_unlock(&rte_eth_dev_cb_lock); return ret; } -void +int _rte_eth_dev_callback_process(struct rte_eth_dev *dev, - enum rte_eth_event_type event, void *cb_arg) + enum rte_eth_event_type event, void *ret_param) { struct rte_eth_dev_callback *cb_lst; struct rte_eth_dev_callback dev_cb; + int rc = 0; rte_spinlock_lock(&rte_eth_dev_cb_lock); TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { @@ -2589,20 +3335,21 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, continue; dev_cb = *cb_lst; cb_lst->active = 1; - if (cb_arg != NULL) - dev_cb.cb_arg = (void *) cb_arg; + if (ret_param != NULL) + dev_cb.ret_param = ret_param; rte_spinlock_unlock(&rte_eth_dev_cb_lock); - dev_cb.cb_fn(dev->data->port_id, dev_cb.event, - dev_cb.cb_arg); + rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, + dev_cb.cb_arg, dev_cb.ret_param); rte_spinlock_lock(&rte_eth_dev_cb_lock); cb_lst->active = 0; } rte_spinlock_unlock(&rte_eth_dev_cb_lock); + return rc; } int -rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) +rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) { uint32_t vec; struct rte_eth_dev *dev; @@ -2613,7 +3360,13 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - intr_handle = &dev->pci_dev->intr_handle; + + if (!dev->intr_handle) { + RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n"); + return -ENOTSUP; + } + + intr_handle = dev->intr_handle; if (!intr_handle->intr_vec) { RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n"); return -EPERM; @@ -2641,23 +3394,18 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.driver.name, ring_name, + dev->device->driver->name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); if (mz) return mz; - if (rte_xen_dom0_supported()) - return rte_memzone_reserve_bounded(z_name, size, socket_id, - 0, align, RTE_PGSIZE_2M); - else - return rte_memzone_reserve_aligned(z_name, size, socket_id, - 0, align); + return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align); } int -rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, +rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data) { uint32_t vec; @@ -2673,7 +3421,12 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, return -EINVAL; } - intr_handle = &dev->pci_dev->intr_handle; + if (!dev->intr_handle) { + RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n"); + return -ENOTSUP; + } + + intr_handle = dev->intr_handle; if (!intr_handle->intr_vec) { RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n"); return -EPERM; @@ -2692,7 +3445,7 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, } int -rte_eth_dev_rx_intr_enable(uint8_t port_id, +rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id) { struct rte_eth_dev *dev; @@ -2702,11 +3455,12 @@ rte_eth_dev_rx_intr_enable(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); - return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id); + return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, + queue_id)); } int -rte_eth_dev_rx_intr_disable(uint8_t port_id, +rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id) { struct rte_eth_dev *dev; @@ -2716,148 +3470,173 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); - return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id); -} - -#ifdef RTE_NIC_BYPASS -int rte_eth_dev_bypass_init(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP); - (*dev->dev_ops->bypass_init)(dev); - return 0; -} - -int -rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); - (*dev->dev_ops->bypass_state_show)(dev, state); - return 0; -} - -int -rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP); - (*dev->dev_ops->bypass_state_set)(dev, new_state); - return 0; -} - -int -rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); - (*dev->dev_ops->bypass_event_show)(dev, event, state); - return 0; + return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, + queue_id)); } -int -rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP); - (*dev->dev_ops->bypass_event_set)(dev, event, state); - return 0; -} int -rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout) +rte_eth_dev_filter_supported(uint16_t port_id, + enum rte_filter_type filter_type) { struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP); - (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout); - return 0; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); + return (*dev->dev_ops->filter_ctrl)(dev, filter_type, + RTE_ETH_FILTER_NOP, NULL); } int -rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP); - (*dev->dev_ops->bypass_ver_show)(dev, ver); - return 0; -} +rte_eth_dev_filter_ctrl_v22(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); int -rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +rte_eth_dev_filter_ctrl_v22(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + struct rte_eth_fdir_info_v22 { + enum rte_fdir_mode mode; + struct rte_eth_fdir_masks mask; + struct rte_eth_fdir_flex_conf flex_conf; + uint32_t guarant_spc; + uint32_t best_spc; + uint32_t flow_types_mask[1]; + uint32_t max_flexpayload; + uint32_t flex_payload_unit; + uint32_t max_flex_payload_segment_num; + uint16_t flex_payload_limit; + uint32_t flex_bitmask_unit; + uint32_t max_flex_bitmask_num; + }; - dev = &rte_eth_devices[port_id]; + struct rte_eth_hash_global_conf_v22 { + enum rte_eth_hash_function hash_func; + uint32_t sym_hash_enable_mask[1]; + uint32_t valid_bit_mask[1]; + }; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP); - (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout); - return 0; -} + struct rte_eth_hash_filter_info_v22 { + enum rte_eth_hash_filter_info_type info_type; + union { + uint8_t enable; + struct rte_eth_hash_global_conf_v22 global_conf; + struct rte_eth_input_set_conf input_set_conf; + } info; + }; -int -rte_eth_dev_bypass_wd_reset(uint8_t port_id) -{ struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP); - (*dev->dev_ops->bypass_wd_reset)(dev); - return 0; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); + if (filter_op == RTE_ETH_FILTER_INFO) { + int retval; + struct rte_eth_fdir_info_v22 *fdir_info_v22; + struct rte_eth_fdir_info fdir_info; + + fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg; + + retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type, + filter_op, (void *)&fdir_info); + fdir_info_v22->mode = fdir_info.mode; + fdir_info_v22->mask = fdir_info.mask; + fdir_info_v22->flex_conf = fdir_info.flex_conf; + fdir_info_v22->guarant_spc = fdir_info.guarant_spc; + fdir_info_v22->best_spc = fdir_info.best_spc; + fdir_info_v22->flow_types_mask[0] = + (uint32_t)fdir_info.flow_types_mask[0]; + fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload; + fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit; + fdir_info_v22->max_flex_payload_segment_num = + fdir_info.max_flex_payload_segment_num; + fdir_info_v22->flex_payload_limit = + fdir_info.flex_payload_limit; + fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit; + fdir_info_v22->max_flex_bitmask_num = + fdir_info.max_flex_bitmask_num; + return retval; + } else if (filter_op == RTE_ETH_FILTER_GET) { + int retval; + struct rte_eth_hash_filter_info f_info; + struct rte_eth_hash_filter_info_v22 *f_info_v22 = + (struct rte_eth_hash_filter_info_v22 *)arg; + + f_info.info_type = f_info_v22->info_type; + retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type, + filter_op, (void *)&f_info); + + switch (f_info_v22->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + f_info_v22->info.enable = f_info.info.enable; + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + f_info_v22->info.global_conf.hash_func = + f_info.info.global_conf.hash_func; + f_info_v22->info.global_conf.sym_hash_enable_mask[0] = + (uint32_t) + f_info.info.global_conf.sym_hash_enable_mask[0]; + f_info_v22->info.global_conf.valid_bit_mask[0] = + (uint32_t) + f_info.info.global_conf.valid_bit_mask[0]; + break; + case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + f_info_v22->info.input_set_conf = + f_info.info.input_set_conf; + break; + default: + break; + } + return retval; + } else if (filter_op == RTE_ETH_FILTER_SET) { + struct rte_eth_hash_filter_info f_info; + struct rte_eth_hash_filter_info_v22 *f_v22 = + (struct rte_eth_hash_filter_info_v22 *)arg; + + f_info.info_type = f_v22->info_type; + switch (f_v22->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + f_info.info.enable = f_v22->info.enable; + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + f_info.info.global_conf.hash_func = + f_v22->info.global_conf.hash_func; + f_info.info.global_conf.sym_hash_enable_mask[0] = + (uint32_t) + f_v22->info.global_conf.sym_hash_enable_mask[0]; + f_info.info.global_conf.valid_bit_mask[0] = + (uint32_t) + f_v22->info.global_conf.valid_bit_mask[0]; + break; + case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + f_info.info.input_set_conf = + f_v22->info.input_set_conf; + break; + default: + break; + } + return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, + (void *)&f_info); + } else + return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, + arg); } -#endif +VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2); int -rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); - return (*dev->dev_ops->filter_ctrl)(dev, filter_type, - RTE_ETH_FILTER_NOP, NULL); -} +rte_eth_dev_filter_ctrl_v1802(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); int -rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type, - enum rte_filter_op filter_op, void *arg) +rte_eth_dev_filter_ctrl_v1802(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) { struct rte_eth_dev *dev; @@ -2865,11 +3644,17 @@ rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); - return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg); + return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type, + filter_op, arg)); } +BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02); +MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg), + rte_eth_dev_filter_ctrl_v1802); void * -rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -2911,7 +3696,7 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, } void * -rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -2946,7 +3731,7 @@ rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id, } void * -rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -2989,7 +3774,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id, } int -rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxtx_callback *user_cb) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -3023,7 +3808,7 @@ rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id, } int -rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxtx_callback *user_cb) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -3057,7 +3842,7 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id, } int -rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id, +rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo) { struct rte_eth_dev *dev; @@ -3081,7 +3866,7 @@ rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id, } int -rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id, +rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo) { struct rte_eth_dev *dev; @@ -3105,7 +3890,7 @@ rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id, } int -rte_eth_dev_set_mc_addr_list(uint8_t port_id, +rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) { @@ -3115,11 +3900,12 @@ rte_eth_dev_set_mc_addr_list(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); - return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr); + return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, + mc_addr_set, nb_mc_addr)); } int -rte_eth_timesync_enable(uint8_t port_id) +rte_eth_timesync_enable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -3127,11 +3913,11 @@ rte_eth_timesync_enable(uint8_t port_id) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); - return (*dev->dev_ops->timesync_enable)(dev); + return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); } int -rte_eth_timesync_disable(uint8_t port_id) +rte_eth_timesync_disable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -3139,11 +3925,11 @@ rte_eth_timesync_disable(uint8_t port_id) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); - return (*dev->dev_ops->timesync_disable)(dev); + return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); } int -rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp, +rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags) { struct rte_eth_dev *dev; @@ -3152,11 +3938,13 @@ rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); - return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags); + return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) + (dev, timestamp, flags)); } int -rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp) +rte_eth_timesync_read_tx_timestamp(uint16_t port_id, + struct timespec *timestamp) { struct rte_eth_dev *dev; @@ -3164,11 +3952,12 @@ rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); - return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp); + return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) + (dev, timestamp)); } int -rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta) +rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) { struct rte_eth_dev *dev; @@ -3176,11 +3965,12 @@ rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); - return (*dev->dev_ops->timesync_adjust_time)(dev, delta); + return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, + delta)); } int -rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp) +rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) { struct rte_eth_dev *dev; @@ -3188,11 +3978,12 @@ rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); - return (*dev->dev_ops->timesync_read_time)(dev, timestamp); + return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, + timestamp)); } int -rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp) +rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) { struct rte_eth_dev *dev; @@ -3200,11 +3991,12 @@ rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); - return (*dev->dev_ops->timesync_write_time)(dev, timestamp); + return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, + timestamp)); } int -rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info) +rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) { struct rte_eth_dev *dev; @@ -3212,11 +4004,11 @@ rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); - return (*dev->dev_ops->get_reg)(dev, info); + return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); } int -rte_eth_dev_get_eeprom_length(uint8_t port_id) +rte_eth_dev_get_eeprom_length(uint16_t port_id) { struct rte_eth_dev *dev; @@ -3224,11 +4016,11 @@ rte_eth_dev_get_eeprom_length(uint8_t port_id) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); - return (*dev->dev_ops->get_eeprom_length)(dev); + return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); } int -rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) +rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) { struct rte_eth_dev *dev; @@ -3236,11 +4028,11 @@ rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); - return (*dev->dev_ops->get_eeprom)(dev, info); + return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); } int -rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) +rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) { struct rte_eth_dev *dev; @@ -3248,11 +4040,11 @@ rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); - return (*dev->dev_ops->set_eeprom)(dev, info); + return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); } int -rte_eth_dev_get_dcb_info(uint8_t port_id, +rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info) { struct rte_eth_dev *dev; @@ -3263,31 +4055,11 @@ rte_eth_dev_get_dcb_info(uint8_t port_id, memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); - return (*dev->dev_ops->get_dcb_info)(dev, dcb_info); -} - -void -rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev) -{ - if ((eth_dev == NULL) || (pci_dev == NULL)) { - RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n", - eth_dev, pci_dev); - return; - } - - eth_dev->data->dev_flags = 0; - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) - eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE) - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; - - eth_dev->data->kdrv = pci_dev->kdrv; - eth_dev->data->numa_node = pci_dev->device.numa_node; - eth_dev->data->drv_name = pci_dev->driver->driver.name; + return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); } int -rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id, +rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel) { struct rte_eth_dev *dev; @@ -3306,11 +4078,12 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf, -ENOTSUP); - return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel); + return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, + l2_tunnel)); } int -rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id, +rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en) @@ -3337,5 +4110,61 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set, -ENOTSUP); - return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en); + return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev, + l2_tunnel, mask, en)); +} + +static void +rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc, + const struct rte_eth_desc_lim *desc_lim) +{ + if (desc_lim->nb_align != 0) + *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); + + if (desc_lim->nb_max != 0) + *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); + + *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); +} + +int +rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, + uint16_t *nb_rx_desc, + uint16_t *nb_tx_desc) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + + rte_eth_dev_info_get(port_id, &dev_info); + + if (nb_rx_desc != NULL) + rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); + + if (nb_tx_desc != NULL) + rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); + + return 0; +} + +int +rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (pool == NULL) + return -EINVAL; + + dev = &rte_eth_devices[port_id]; + + if (*dev->dev_ops->pool_ops_supported == NULL) + return 1; /* all pools are supported */ + + return (*dev->dev_ops->pool_ops_supported)(dev, pool); }