X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=318af286974336589b6295fc9c9b2743dc45c581;hb=055c52583a2794da8ba1e85a48cce3832372b12f;hp=a31018e88de831d04011f4cfe2019140e5f4d8a0;hpb=97f17497d162afdb82c8704bf097f0fee3724b2e;p=deb_dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index a31018e8..318af286 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -58,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -68,15 +66,22 @@ #include "rte_ether.h" #include "rte_ethdev.h" +#include "ethdev_profile.h" static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; static struct rte_eth_dev_data *rte_eth_dev_data; -static uint8_t nb_ports; +static uint8_t eth_dev_last_created_port; /* spinlock for eth device callbacks */ static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; +/* spinlock for add/remove rx callbacks */ +static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; + +/* spinlock for add/remove tx callbacks */ +static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; + /* store statistics names and its offset in stats structure */ struct rte_eth_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -123,6 +128,7 @@ struct rte_eth_dev_callback { TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ rte_eth_dev_cb_fn cb_fn; /**< Callback address */ void *cb_arg; /**< Parameter for callback */ + void *ret_param; /**< Return parameter */ enum rte_eth_event_type event; /**< Interrupt event type */ uint32_t active; /**< Callback is executing */ }; @@ -132,10 +138,18 @@ enum { STAT_QMAP_RX }; -enum { - DEV_DETACHED = 0, - DEV_ATTACHED -}; +uint16_t +rte_eth_find_next(uint16_t port_id) +{ + while (port_id < RTE_MAX_ETHPORTS && + rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED) + port_id++; + + if (port_id >= RTE_MAX_ETHPORTS) + return RTE_MAX_ETHPORTS; + + return port_id; +} static void rte_eth_dev_data_alloc(void) @@ -164,29 +178,43 @@ rte_eth_dev_allocated(const char *name) unsigned i; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if ((rte_eth_devices[i].attached == DEV_ATTACHED) && + if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) && strcmp(rte_eth_devices[i].data->name, name) == 0) return &rte_eth_devices[i]; } return NULL; } -static uint8_t +static uint16_t rte_eth_dev_find_free_port(void) { unsigned i; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (rte_eth_devices[i].attached == DEV_DETACHED) + if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED) return i; } return RTE_MAX_ETHPORTS; } +static struct rte_eth_dev * +eth_dev_get(uint16_t port_id) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; + + eth_dev->data = &rte_eth_dev_data[port_id]; + eth_dev->state = RTE_ETH_DEV_ATTACHED; + TAILQ_INIT(&(eth_dev->link_intr_cbs)); + + eth_dev_last_created_port = port_id; + + return eth_dev; +} + struct rte_eth_dev * -rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type) +rte_eth_dev_allocate(const char *name) { - uint8_t port_id; + uint16_t port_id; struct rte_eth_dev *eth_dev; port_id = rte_eth_dev_find_free_port(); @@ -204,206 +232,97 @@ rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type) return NULL; } - eth_dev = &rte_eth_devices[port_id]; - eth_dev->data = &rte_eth_dev_data[port_id]; + memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data)); + eth_dev = eth_dev_get(port_id); snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name); eth_dev->data->port_id = port_id; - eth_dev->attached = DEV_ATTACHED; - eth_dev->dev_type = type; - nb_ports++; - return eth_dev; -} - -static int -rte_eth_dev_create_unique_device_name(char *name, size_t size, - struct rte_pci_device *pci_dev) -{ - int ret; - - ret = snprintf(name, size, "%d:%d.%d", - pci_dev->addr.bus, pci_dev->addr.devid, - pci_dev->addr.function); - if (ret < 0) - return ret; - return 0; -} - -int -rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) -{ - if (eth_dev == NULL) - return -EINVAL; + eth_dev->data->mtu = ETHER_MTU; - eth_dev->attached = DEV_DETACHED; - nb_ports--; - return 0; + return eth_dev; } -static int -rte_eth_dev_init(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev) +/* + * Attach to a port already registered by the primary process, which + * makes sure that the same device would have the same port id both + * in the primary and secondary process. + */ +struct rte_eth_dev * +rte_eth_dev_attach_secondary(const char *name) { - struct eth_driver *eth_drv; + uint16_t i; struct rte_eth_dev *eth_dev; - char ethdev_name[RTE_ETH_NAME_MAX_LEN]; - - int diag; - - eth_drv = (struct eth_driver *)pci_drv; - /* Create unique Ethernet device name using PCI address */ - rte_eth_dev_create_unique_device_name(ethdev_name, - sizeof(ethdev_name), pci_dev); - - eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI); - if (eth_dev == NULL) - return -ENOMEM; + if (rte_eth_dev_data == NULL) + rte_eth_dev_data_alloc(); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eth_dev->data->dev_private = rte_zmalloc("ethdev private structure", - eth_drv->dev_private_size, - RTE_CACHE_LINE_SIZE); - if (eth_dev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private port data\n"); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (strcmp(rte_eth_dev_data[i].name, name) == 0) + break; + } + if (i == RTE_MAX_ETHPORTS) { + RTE_PMD_DEBUG_TRACE( + "device %s is not driven by the primary process\n", + name); + return NULL; } - eth_dev->pci_dev = pci_dev; - eth_dev->driver = eth_drv; - eth_dev->data->rx_mbuf_alloc_failed = 0; - - /* init user callbacks */ - TAILQ_INIT(&(eth_dev->link_intr_cbs)); - - /* - * Set the default MTU. - */ - eth_dev->data->mtu = ETHER_MTU; - /* Invoke PMD device initialization function */ - diag = (*eth_drv->eth_dev_init)(eth_dev); - if (diag == 0) - return 0; + eth_dev = eth_dev_get(i); + RTE_ASSERT(eth_dev->data->port_id == i); - RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n", - pci_drv->name, - (unsigned) pci_dev->id.vendor_id, - (unsigned) pci_dev->id.device_id); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); - rte_eth_dev_release_port(eth_dev); - return diag; + return eth_dev; } -static int -rte_eth_dev_uninit(struct rte_pci_device *pci_dev) +int +rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) { - const struct eth_driver *eth_drv; - struct rte_eth_dev *eth_dev; - char ethdev_name[RTE_ETH_NAME_MAX_LEN]; - int ret; - - if (pci_dev == NULL) - return -EINVAL; - - /* Create unique Ethernet device name using PCI address */ - rte_eth_dev_create_unique_device_name(ethdev_name, - sizeof(ethdev_name), pci_dev); - - eth_dev = rte_eth_dev_allocated(ethdev_name); if (eth_dev == NULL) - return -ENODEV; - - eth_drv = (const struct eth_driver *)pci_dev->driver; - - /* Invoke PMD device uninit function */ - if (*eth_drv->eth_dev_uninit) { - ret = (*eth_drv->eth_dev_uninit)(eth_dev); - if (ret) - return ret; - } - - /* free ether device */ - rte_eth_dev_release_port(eth_dev); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); - - eth_dev->pci_dev = NULL; - eth_dev->driver = NULL; - eth_dev->data = NULL; + return -EINVAL; + eth_dev->state = RTE_ETH_DEV_UNUSED; return 0; } -/** - * Register an Ethernet [Poll Mode] driver. - * - * Function invoked by the initialization function of an Ethernet driver - * to simultaneously register itself as a PCI driver and as an Ethernet - * Poll Mode Driver. - * Invokes the rte_eal_pci_register() function to register the *pci_drv* - * structure embedded in the *eth_drv* structure, after having stored the - * address of the rte_eth_dev_init() function in the *devinit* field of - * the *pci_drv* structure. - * During the PCI probing phase, the rte_eth_dev_init() function is - * invoked for each PCI [Ethernet device] matching the embedded PCI - * identifiers provided by the driver. - */ -void -rte_eth_driver_register(struct eth_driver *eth_drv) -{ - eth_drv->pci_drv.devinit = rte_eth_dev_init; - eth_drv->pci_drv.devuninit = rte_eth_dev_uninit; - rte_eal_pci_register(ð_drv->pci_drv); -} - int -rte_eth_dev_is_valid_port(uint8_t port_id) +rte_eth_dev_is_valid_port(uint16_t port_id) { if (port_id >= RTE_MAX_ETHPORTS || - rte_eth_devices[port_id].attached != DEV_ATTACHED) + (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && + rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED)) return 0; else return 1; } int -rte_eth_dev_socket_id(uint8_t port_id) +rte_eth_dev_socket_id(uint16_t port_id) { - if (!rte_eth_dev_is_valid_port(port_id)) - return -1; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); return rte_eth_devices[port_id].data->numa_node; } -uint8_t -rte_eth_dev_count(void) +void * +rte_eth_dev_get_sec_ctx(uint8_t port_id) { - return nb_ports; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); + return rte_eth_devices[port_id].security_ctx; } -static enum rte_eth_dev_type -rte_eth_dev_get_device_type(uint8_t port_id) +uint16_t +rte_eth_dev_count(void) { - if (!rte_eth_dev_is_valid_port(port_id)) - return RTE_ETH_DEV_UNKNOWN; - return rte_eth_devices[port_id].dev_type; -} + uint16_t p; + uint16_t count; -static int -rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr) -{ - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + count = 0; - if (addr == NULL) { - RTE_PMD_DEBUG_TRACE("Null pointer is specified\n"); - return -EINVAL; - } + RTE_ETH_FOREACH_DEV(p) + count++; - *addr = rte_eth_devices[port_id].pci_dev->addr; - return 0; + return count; } -static int -rte_eth_dev_get_name_by_port(uint8_t port_id, char *name) +int +rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) { char *tmp; @@ -421,8 +340,8 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name) return 0; } -static int -rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) +int +rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) { int i; @@ -431,10 +350,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) return -EINVAL; } - *port_id = RTE_MAX_ETHPORTS; - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - + RTE_ETH_FOREACH_DEV(i) { if (!strncmp(name, rte_eth_dev_data[i].name, strlen(name))) { @@ -446,225 +362,85 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) return -ENODEV; } -static int -rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id) -{ - int i; - struct rte_pci_device *pci_dev = NULL; - - if (addr == NULL) { - RTE_PMD_DEBUG_TRACE("Null pointer is specified\n"); - return -EINVAL; - } - - *port_id = RTE_MAX_ETHPORTS; - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - - pci_dev = rte_eth_devices[i].pci_dev; - - if (pci_dev && - !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) { - - *port_id = i; - - return 0; - } - } - return -ENODEV; -} - -static int -rte_eth_dev_is_detachable(uint8_t port_id) -{ - uint32_t dev_flags; - - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return -EINVAL; - } - - switch (rte_eth_devices[port_id].data->kdrv) { - case RTE_KDRV_IGB_UIO: - case RTE_KDRV_UIO_GENERIC: - case RTE_KDRV_NIC_UIO: - case RTE_KDRV_NONE: - break; - case RTE_KDRV_VFIO: - default: - return -ENOTSUP; - } - dev_flags = rte_eth_devices[port_id].data->dev_flags; - if ((dev_flags & RTE_ETH_DEV_DETACHABLE) && - (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE))) - return 0; - else - return 1; -} - -/* attach the new physical device, then store port_id of the device */ -static int -rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id) +/* attach the new device, then store port_id of the device */ +int +rte_eth_dev_attach(const char *devargs, uint16_t *port_id) { - /* re-construct pci_device_list */ - if (rte_eal_pci_scan()) - goto err; - /* Invoke probe func of the driver can handle the new device. */ - if (rte_eal_pci_probe_one(addr)) - goto err; + int ret = -1; + int current = rte_eth_dev_count(); + char *name = NULL; + char *args = NULL; - if (rte_eth_dev_get_port_by_addr(addr, port_id)) + if ((devargs == NULL) || (port_id == NULL)) { + ret = -EINVAL; goto err; + } - return 0; -err: - return -1; -} - -/* detach the new physical device, then store pci_addr of the device */ -static int -rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr) -{ - struct rte_pci_addr freed_addr; - struct rte_pci_addr vp; - - /* get pci address by port id */ - if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr)) + /* parse devargs, then retrieve device name and args */ + if (rte_eal_parse_devargs_str(devargs, &name, &args)) goto err; - /* Zeroed pci addr means the port comes from virtual device */ - vp.domain = vp.bus = vp.devid = vp.function = 0; - if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0) + ret = rte_eal_dev_attach(name, args); + if (ret < 0) goto err; - /* invoke devuninit func of the pci driver, - * also remove the device from pci_device_list */ - if (rte_eal_pci_detach(&freed_addr)) + /* no point looking at the port count if no port exists */ + if (!rte_eth_dev_count()) { + RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name); + ret = -1; goto err; + } - *addr = freed_addr; - return 0; -err: - return -1; -} - -/* attach the new virtual device, then store port_id of the device */ -static int -rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id) -{ - char *name = NULL, *args = NULL; - int ret = -1; - - /* parse vdevargs, then retrieve device name and args */ - if (rte_eal_parse_devargs_str(vdevargs, &name, &args)) - goto end; - - /* walk around dev_driver_list to find the driver of the device, - * then invoke probe function of the driver. - * rte_eal_vdev_init() updates port_id allocated after - * initialization. + /* if nothing happened, there is a bug here, since some driver told us + * it did attach a device, but did not create a port. */ - if (rte_eal_vdev_init(name, args)) - goto end; - - if (rte_eth_dev_get_port_by_name(name, port_id)) - goto end; - - ret = 0; -end: - free(name); - free(args); - - return ret; -} - -/* detach the new virtual device, then store the name of the device */ -static int -rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname) -{ - char name[RTE_ETH_NAME_MAX_LEN]; - - /* get device name by port id */ - if (rte_eth_dev_get_name_by_port(port_id, name)) - goto err; - /* walk around dev_driver_list to find the driver of the device, - * then invoke uninit function of the driver */ - if (rte_eal_vdev_uninit(name)) - goto err; - - strncpy(vdevname, name, sizeof(name)); - return 0; -err: - return -1; -} - -/* attach the new device, then store port_id of the device */ -int -rte_eth_dev_attach(const char *devargs, uint8_t *port_id) -{ - struct rte_pci_addr addr; - int ret = -1; - - if ((devargs == NULL) || (port_id == NULL)) { - ret = -EINVAL; + if (current == rte_eth_dev_count()) { + ret = -1; goto err; } - if (eal_parse_pci_DomBDF(devargs, &addr) == 0) { - ret = rte_eth_dev_attach_pdev(&addr, port_id); - if (ret < 0) - goto err; - } else { - ret = rte_eth_dev_attach_vdev(devargs, port_id); - if (ret < 0) - goto err; - } + *port_id = eth_dev_last_created_port; + ret = 0; - return 0; err: - RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n"); + free(name); + free(args); return ret; } /* detach the device, then store the name of the device */ int -rte_eth_dev_detach(uint8_t port_id, char *name) +rte_eth_dev_detach(uint16_t port_id, char *name) { - struct rte_pci_addr addr; + uint32_t dev_flags; int ret = -1; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + if (name == NULL) { ret = -EINVAL; goto err; } - /* check whether the driver supports detach feature, or not */ - if (rte_eth_dev_is_detachable(port_id)) + dev_flags = rte_eth_devices[port_id].data->dev_flags; + if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) { + RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n", + port_id); + ret = -ENOTSUP; goto err; + } - if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) { - ret = rte_eth_dev_get_addr_by_port(port_id, &addr); - if (ret < 0) - goto err; - - ret = rte_eth_dev_detach_pdev(port_id, &addr); - if (ret < 0) - goto err; + snprintf(name, sizeof(rte_eth_devices[port_id].data->name), + "%s", rte_eth_devices[port_id].data->name); - snprintf(name, RTE_ETH_NAME_MAX_LEN, - "%04x:%02x:%02x.%d", - addr.domain, addr.bus, - addr.devid, addr.function); - } else { - ret = rte_eth_dev_detach_vdev(port_id, name); - if (ret < 0) - goto err; - } + ret = rte_eal_dev_detach(rte_eth_devices[port_id].device); + if (ret < 0) + goto err; + rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED; return 0; err: - RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n"); return ret; } @@ -710,13 +486,16 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->rx_queue_release)(rxq[i]); + + rte_free(dev->data->rx_queues); + dev->data->rx_queues = NULL; } dev->data->nb_rx_queues = nb_queues; return 0; } int -rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) +rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) { struct rte_eth_dev *dev; @@ -742,7 +521,7 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) } int -rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) +rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) { struct rte_eth_dev *dev; @@ -768,7 +547,7 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) } int -rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) +rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) { struct rte_eth_dev *dev; @@ -794,7 +573,7 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) } int -rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) +rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) { struct rte_eth_dev *dev; @@ -861,6 +640,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->tx_queue_release)(txq[i]); + + rte_free(dev->data->tx_queues); + dev->data->tx_queues = NULL; } dev->data->nb_tx_queues = nb_queues; return 0; @@ -899,12 +681,102 @@ rte_eth_speed_bitflag(uint32_t speed, int duplex) } } +/** + * A conversion function from rxmode bitfield API. + */ +static void +rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode, + uint64_t *rx_offloads) +{ + uint64_t offloads = 0; + + if (rxmode->header_split == 1) + offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT; + if (rxmode->hw_ip_checksum == 1) + offloads |= DEV_RX_OFFLOAD_CHECKSUM; + if (rxmode->hw_vlan_filter == 1) + offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + if (rxmode->hw_vlan_strip == 1) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + if (rxmode->hw_vlan_extend == 1) + offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + if (rxmode->jumbo_frame == 1) + offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + if (rxmode->hw_strip_crc == 1) + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + if (rxmode->enable_scatter == 1) + offloads |= DEV_RX_OFFLOAD_SCATTER; + if (rxmode->enable_lro == 1) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + if (rxmode->hw_timestamp == 1) + offloads |= DEV_RX_OFFLOAD_TIMESTAMP; + if (rxmode->security == 1) + offloads |= DEV_RX_OFFLOAD_SECURITY; + + *rx_offloads = offloads; +} + +/** + * A conversion function from rxmode offloads API. + */ +static void +rte_eth_convert_rx_offloads(const uint64_t rx_offloads, + struct rte_eth_rxmode *rxmode) +{ + + if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) + rxmode->header_split = 1; + else + rxmode->header_split = 0; + if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxmode->hw_ip_checksum = 1; + else + rxmode->hw_ip_checksum = 0; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + rxmode->hw_vlan_filter = 1; + else + rxmode->hw_vlan_filter = 0; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->hw_vlan_strip = 1; + else + rxmode->hw_vlan_strip = 0; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + rxmode->hw_vlan_extend = 1; + else + rxmode->hw_vlan_extend = 0; + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + rxmode->jumbo_frame = 1; + else + rxmode->jumbo_frame = 0; + if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) + rxmode->hw_strip_crc = 1; + else + rxmode->hw_strip_crc = 0; + if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) + rxmode->enable_scatter = 1; + else + rxmode->enable_scatter = 0; + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) + rxmode->enable_lro = 1; + else + rxmode->enable_lro = 0; + if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) + rxmode->hw_timestamp = 1; + else + rxmode->hw_timestamp = 0; + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) + rxmode->security = 1; + else + rxmode->security = 0; +} + int -rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, +rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_conf = *dev_conf; int diag; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -934,8 +806,20 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return -EBUSY; } + /* + * Convert between the offloads API to enable PMDs to support + * only one of them. + */ + if ((dev_conf->rxmode.ignore_offload_bitfield == 0)) { + rte_eth_convert_rx_offload_bitfield( + &dev_conf->rxmode, &local_conf.rxmode.offloads); + } else { + rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads, + &local_conf.rxmode); + } + /* Copy the dev_conf parameter into the dev structure */ - memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); + memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf)); /* * Check that the numbers of RX and TX queues are not greater @@ -961,22 +845,25 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return -EINVAL; } - /* - * If link state interrupt is enabled, check that the - * device supports it. - */ + /* Check that the device supports requested interrupts */ if ((dev_conf->intr_conf.lsc == 1) && (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n", - dev->data->drv_name); + dev->device->driver->name); return -EINVAL; } + if ((dev_conf->intr_conf.rmv == 1) && + (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { + RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n", + dev->device->driver->name); + return -EINVAL; + } /* * If jumbo frames are enabled, check that the maximum RX packet * length is supported by the configured device. */ - if (dev_conf->rxmode.jumbo_frame == 1) { + if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u" @@ -1028,42 +915,74 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return diag; } + /* Initialize Rx profiling if enabled at compilation time. */ + diag = __rte_eth_profile_rx_init(port_id, dev); + if (diag != 0) { + RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n", + port_id, diag); + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); + return diag; + } + return 0; } +void +_rte_eth_dev_reset(struct rte_eth_dev *dev) +{ + if (dev->data->dev_started) { + RTE_PMD_DEBUG_TRACE( + "port %d must be stopped to allow reset\n", + dev->data->port_id); + return; + } + + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); + + memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); +} + static void -rte_eth_dev_config_restore(uint8_t port_id) +rte_eth_dev_config_restore(uint16_t port_id) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; - struct ether_addr addr; + struct ether_addr *addr; uint16_t i; uint32_t pool = 0; + uint64_t pool_mask; dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); - if (RTE_ETH_DEV_SRIOV(dev).active) - pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; - - /* replay MAC address configuration */ - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = dev->data->mac_addrs[i]; - - /* skip zero address */ - if (is_zero_ether_addr(&addr)) - continue; - - /* add address to the hardware */ - if (*dev->dev_ops->mac_addr_add && - (dev->data->mac_pool_sel[i] & (1ULL << pool))) - (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool); - else { - RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n", - port_id); - /* exit the loop but not return an error */ - break; + /* replay MAC address configuration including default MAC */ + addr = &dev->data->mac_addrs[0]; + if (*dev->dev_ops->mac_addr_set != NULL) + (*dev->dev_ops->mac_addr_set)(dev, addr); + else if (*dev->dev_ops->mac_addr_add != NULL) + (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); + + if (*dev->dev_ops->mac_addr_add != NULL) { + for (i = 1; i < dev_info.max_mac_addrs; i++) { + addr = &dev->data->mac_addrs[i]; + + /* skip zero address */ + if (is_zero_ether_addr(addr)) + continue; + + pool = 0; + pool_mask = dev->data->mac_pool_sel[i]; + + do { + if (pool_mask & 1ULL) + (*dev->dev_ops->mac_addr_add)(dev, + addr, i, pool); + pool_mask >>= 1; + pool++; + } while (pool_mask); } } @@ -1081,7 +1000,7 @@ rte_eth_dev_config_restore(uint8_t port_id) } int -rte_eth_dev_start(uint8_t port_id) +rte_eth_dev_start(uint16_t port_id) { struct rte_eth_dev *dev; int diag; @@ -1093,7 +1012,7 @@ rte_eth_dev_start(uint8_t port_id) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); if (dev->data->dev_started != 0) { - RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8 + RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16 " already started\n", port_id); return 0; @@ -1115,7 +1034,7 @@ rte_eth_dev_start(uint8_t port_id) } void -rte_eth_dev_stop(uint8_t port_id) +rte_eth_dev_stop(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1125,7 +1044,7 @@ rte_eth_dev_stop(uint8_t port_id) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); if (dev->data->dev_started == 0) { - RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8 + RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16 " already stopped\n", port_id); return; @@ -1136,7 +1055,7 @@ rte_eth_dev_stop(uint8_t port_id) } int -rte_eth_dev_set_link_up(uint8_t port_id) +rte_eth_dev_set_link_up(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1149,7 +1068,7 @@ rte_eth_dev_set_link_up(uint8_t port_id) } int -rte_eth_dev_set_link_down(uint8_t port_id) +rte_eth_dev_set_link_down(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1162,7 +1081,7 @@ rte_eth_dev_set_link_down(uint8_t port_id) } void -rte_eth_dev_close(uint8_t port_id) +rte_eth_dev_close(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1173,14 +1092,33 @@ rte_eth_dev_close(uint8_t port_id) dev->data->dev_started = 0; (*dev->dev_ops->dev_close)(dev); + dev->data->nb_rx_queues = 0; rte_free(dev->data->rx_queues); dev->data->rx_queues = NULL; + dev->data->nb_tx_queues = 0; rte_free(dev->data->tx_queues); dev->data->tx_queues = NULL; } int -rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, +rte_eth_dev_reset(uint16_t port_id) +{ + struct rte_eth_dev *dev; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); + + rte_eth_dev_stop(port_id); + ret = dev->dev_ops->dev_reset(dev); + + return ret; +} + +int +rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) @@ -1189,6 +1127,8 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, uint32_t mbp_buf_size; struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf local_conf; + void **rxq; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1247,11 +1187,29 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, return -EINVAL; } + rxq = dev->data->rx_queues; + if (rxq[rx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, + -ENOTSUP); + (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); + rxq[rx_queue_id] = NULL; + } + if (rx_conf == NULL) rx_conf = &dev_info.default_rxconf; + local_conf = *rx_conf; + if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) { + /** + * Reflect port offloads to queue offloads in order for + * offloads to not be discarded. + */ + rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode, + &local_conf.offloads); + } + ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, - socket_id, rx_conf, mp); + socket_id, &local_conf, mp); if (!ret) { if (!dev->data->min_rx_buf_size || dev->data->min_rx_buf_size > mbp_buf_size) @@ -1261,13 +1219,64 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, return ret; } +/** + * A conversion function from txq_flags API. + */ +static void +rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads) +{ + uint64_t offloads = 0; + + if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS)) + offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL)) + offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; + if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP)) + offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; + if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP)) + offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP)) + offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) && + (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP)) + offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + *tx_offloads = offloads; +} + +/** + * A conversion function from offloads API. + */ +static void +rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags) +{ + uint32_t flags = 0; + + if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)) + flags |= ETH_TXQ_FLAGS_NOMULTSEGS; + if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)) + flags |= ETH_TXQ_FLAGS_NOVLANOFFL; + if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + flags |= ETH_TXQ_FLAGS_NOXSUMSCTP; + if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) + flags |= ETH_TXQ_FLAGS_NOXSUMUDP; + if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) + flags |= ETH_TXQ_FLAGS_NOXSUMTCP; + if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP); + + *txq_flags = flags; +} + int -rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, +rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + struct rte_eth_txconf local_conf; + void **txq; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1300,11 +1309,34 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, return -EINVAL; } + txq = dev->data->tx_queues; + if (txq[tx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, + -ENOTSUP); + (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); + txq[tx_queue_id] = NULL; + } + if (tx_conf == NULL) tx_conf = &dev_info.default_txconf; + /* + * Convert between the offloads API to enable PMDs to support + * only one of them. + */ + local_conf = *tx_conf; + if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) { + rte_eth_convert_txq_offloads(tx_conf->offloads, + &local_conf.txq_flags); + /* Keep the ignore flag. */ + local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE; + } else { + rte_eth_convert_txq_flags(tx_conf->txq_flags, + &local_conf.offloads); + } + return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc, - socket_id, tx_conf); + socket_id, &local_conf); } void @@ -1356,8 +1388,22 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) return ret; } +int +rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + /* Validate Input Data. Bail if not valid or not supported. */ + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); + + /* Call driver to free pending mbufs. */ + return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], + free_cnt); +} + void -rte_eth_promiscuous_enable(uint8_t port_id) +rte_eth_promiscuous_enable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1370,7 +1416,7 @@ rte_eth_promiscuous_enable(uint8_t port_id) } void -rte_eth_promiscuous_disable(uint8_t port_id) +rte_eth_promiscuous_disable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1383,7 +1429,7 @@ rte_eth_promiscuous_disable(uint8_t port_id) } int -rte_eth_promiscuous_get(uint8_t port_id) +rte_eth_promiscuous_get(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1394,137 +1440,456 @@ rte_eth_promiscuous_get(uint8_t port_id) } void -rte_eth_allmulticast_enable(uint8_t port_id) +rte_eth_allmulticast_enable(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_RET(port_id); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); + (*dev->dev_ops->allmulticast_enable)(dev); + dev->data->all_multicast = 1; +} + +void +rte_eth_allmulticast_disable(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_RET(port_id); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); + dev->data->all_multicast = 0; + (*dev->dev_ops->allmulticast_disable)(dev); +} + +int +rte_eth_allmulticast_get(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + + dev = &rte_eth_devices[port_id]; + return dev->data->all_multicast; +} + +static inline int +rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +void +rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_RET(port_id); + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.intr_conf.lsc != 0) + rte_eth_dev_atomic_read_link_status(dev, eth_link); + else { + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + (*dev->dev_ops->link_update)(dev, 1); + *eth_link = dev->data->dev_link; + } +} + +void +rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_RET(port_id); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); - (*dev->dev_ops->allmulticast_enable)(dev); - dev->data->all_multicast = 1; + if (dev->data->dev_conf.intr_conf.lsc != 0) + rte_eth_dev_atomic_read_link_status(dev, eth_link); + else { + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + (*dev->dev_ops->link_update)(dev, 0); + *eth_link = dev->data->dev_link; + } +} + +int +rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + + dev = &rte_eth_devices[port_id]; + memset(stats, 0, sizeof(*stats)); + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; + return (*dev->dev_ops->stats_get)(dev, stats); +} + +int +rte_eth_stats_reset(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); + (*dev->dev_ops->stats_reset)(dev); + dev->data->rx_mbuf_alloc_failed = 0; + + return 0; +} + +static inline int +get_xstats_basic_count(struct rte_eth_dev *dev) +{ + uint16_t nb_rxqs, nb_txqs; + int count; + + nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + + count = RTE_NB_STATS; + count += nb_rxqs * RTE_NB_RXQ_STATS; + count += nb_txqs * RTE_NB_TXQ_STATS; + + return count; } -void -rte_eth_allmulticast_disable(uint8_t port_id) +static int +get_xstats_count(uint16_t port_id) { struct rte_eth_dev *dev; + int count; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; + if (dev->dev_ops->xstats_get_names_by_id != NULL) { + count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, + NULL, 0); + if (count < 0) + return count; + } + if (dev->dev_ops->xstats_get_names != NULL) { + count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); + if (count < 0) + return count; + } else + count = 0; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); - dev->data->all_multicast = 0; - (*dev->dev_ops->allmulticast_disable)(dev); + + count += get_xstats_basic_count(dev); + + return count; } int -rte_eth_allmulticast_get(uint8_t port_id) +rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, + uint64_t *id) { - struct rte_eth_dev *dev; + int cnt_xstats, idx_xstat; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - return dev->data->all_multicast; -} + if (!id) { + RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n"); + return -ENOMEM; + } -static inline int -rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); + if (!xstat_name) { + RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n"); + return -ENOMEM; + } - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) + /* Get count */ + cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); + if (cnt_xstats < 0) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n"); + return -ENODEV; + } + + /* Get id-name lookup table */ + struct rte_eth_xstat_name xstats_names[cnt_xstats]; + + if (cnt_xstats != rte_eth_xstats_get_names_by_id( + port_id, xstats_names, cnt_xstats, NULL)) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n"); return -1; + } - return 0; + for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { + if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { + *id = idx_xstat; + return 0; + }; + } + + return -EINVAL; } -void -rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link) +/* retrieve ethdev extended statistics names */ +int +rte_eth_xstats_get_names_by_id(uint16_t port_id, + struct rte_eth_xstat_name *xstats_names, unsigned int size, + uint64_t *ids) { + struct rte_eth_xstat_name *xstats_names_copy; + unsigned int no_basic_stat_requested = 1; + unsigned int expected_entries; struct rte_eth_dev *dev; + unsigned int i; + int ret; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (dev->data->dev_conf.intr_conf.lsc != 0) - rte_eth_dev_atomic_read_link_status(dev, eth_link); - else { - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); - (*dev->dev_ops->link_update)(dev, 1); - *eth_link = dev->data->dev_link; + ret = get_xstats_count(port_id); + if (ret < 0) + return ret; + expected_entries = (unsigned int)ret; + + /* Return max number of stats if no ids given */ + if (!ids) { + if (!xstats_names) + return expected_entries; + else if (xstats_names && size < expected_entries) + return expected_entries; } -} -void -rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link) -{ - struct rte_eth_dev *dev; + if (ids && !xstats_names) + return -EINVAL; - RTE_ETH_VALID_PORTID_OR_RET(port_id); - dev = &rte_eth_devices[port_id]; + if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { + unsigned int basic_count = get_xstats_basic_count(dev); + uint64_t ids_copy[size]; - if (dev->data->dev_conf.intr_conf.lsc != 0) - rte_eth_dev_atomic_read_link_status(dev, eth_link); - else { - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); - (*dev->dev_ops->link_update)(dev, 0); - *eth_link = dev->data->dev_link; + for (i = 0; i < size; i++) { + if (ids[i] < basic_count) { + no_basic_stat_requested = 0; + break; + } + + /* + * Convert ids to xstats ids that PMD knows. + * ids known by user are basic + extended stats. + */ + ids_copy[i] = ids[i] - basic_count; + } + + if (no_basic_stat_requested) + return (*dev->dev_ops->xstats_get_names_by_id)(dev, + xstats_names, ids_copy, size); + } + + /* Retrieve all stats */ + if (!ids) { + int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, + expected_entries); + if (num_stats < 0 || num_stats > (int)expected_entries) + return num_stats; + else + return expected_entries; + } + + xstats_names_copy = calloc(expected_entries, + sizeof(struct rte_eth_xstat_name)); + + if (!xstats_names_copy) { + RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory"); + return -ENOMEM; } + + /* Fill xstats_names_copy structure */ + rte_eth_xstats_get_names(port_id, xstats_names_copy, expected_entries); + + /* Filter stats */ + for (i = 0; i < size; i++) { + if (ids[i] >= expected_entries) { + RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n"); + free(xstats_names_copy); + return -1; + } + xstats_names[i] = xstats_names_copy[ids[i]]; + } + + free(xstats_names_copy); + return size; } int -rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats) +rte_eth_xstats_get_names(uint16_t port_id, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) { struct rte_eth_dev *dev; + int cnt_used_entries; + int cnt_expected_entries; + int cnt_driver_entries; + uint32_t idx, id_queue; + uint16_t num_q; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + cnt_expected_entries = get_xstats_count(port_id); + if (xstats_names == NULL || cnt_expected_entries < 0 || + (int)size < cnt_expected_entries) + return cnt_expected_entries; + /* port_id checked in get_xstats_count() */ dev = &rte_eth_devices[port_id]; - memset(stats, 0, sizeof(*stats)); + cnt_used_entries = 0; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); - (*dev->dev_ops->stats_get)(dev, stats); - stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; - return 0; + for (idx = 0; idx < RTE_NB_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "%s", rte_stats_strings[idx].name); + cnt_used_entries++; + } + num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (id_queue = 0; id_queue < num_q; id_queue++) { + for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "rx_q%u%s", + id_queue, rte_rxq_stats_strings[idx].name); + cnt_used_entries++; + } + + } + num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (id_queue = 0; id_queue < num_q; id_queue++) { + for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "tx_q%u%s", + id_queue, rte_txq_stats_strings[idx].name); + cnt_used_entries++; + } + } + + if (dev->dev_ops->xstats_get_names != NULL) { + /* If there are any driver-specific xstats, append them + * to end of list. + */ + cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( + dev, + xstats_names + cnt_used_entries, + size - cnt_used_entries); + if (cnt_driver_entries < 0) + return cnt_driver_entries; + cnt_used_entries += cnt_driver_entries; + } + + return cnt_used_entries; } -void -rte_eth_stats_reset(uint8_t port_id) +/* retrieve ethdev extended statistics */ +int +rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, + uint64_t *values, unsigned int size) { + unsigned int no_basic_stat_requested = 1; + unsigned int num_xstats_filled; + uint16_t expected_entries; struct rte_eth_dev *dev; + unsigned int i; + int ret; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + expected_entries = get_xstats_count(port_id); + struct rte_eth_xstat xstats[expected_entries]; dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); - (*dev->dev_ops->stats_reset)(dev); - dev->data->rx_mbuf_alloc_failed = 0; + /* Return max number of stats if no ids given */ + if (!ids) { + if (!values) + return expected_entries; + else if (values && size < expected_entries) + return expected_entries; + } + + if (ids && !values) + return -EINVAL; + + if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { + unsigned int basic_count = get_xstats_basic_count(dev); + uint64_t ids_copy[size]; + + for (i = 0; i < size; i++) { + if (ids[i] < basic_count) { + no_basic_stat_requested = 0; + break; + } + + /* + * Convert ids to xstats ids that PMD knows. + * ids known by user are basic + extended stats. + */ + ids_copy[i] = ids[i] - basic_count; + } + + if (no_basic_stat_requested) + return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, + values, size); + } + + /* Fill the xstats structure */ + ret = rte_eth_xstats_get(port_id, xstats, expected_entries); + if (ret < 0) + return ret; + num_xstats_filled = (unsigned int)ret; + + /* Return all stats */ + if (!ids) { + for (i = 0; i < num_xstats_filled; i++) + values[i] = xstats[i].value; + return expected_entries; + } + + /* Filter stats */ + for (i = 0; i < size; i++) { + if (ids[i] >= expected_entries) { + RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n"); + return -1; + } + values[i] = xstats[ids[i]].value; + } + return size; } -/* retrieve ethdev extended statistics */ int -rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats, - unsigned n) +rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, + unsigned int n) { struct rte_eth_stats eth_stats; struct rte_eth_dev *dev; - unsigned count = 0, i, q; - signed xcount = 0; + unsigned int count = 0, i, q; + signed int xcount = 0; uint64_t val, *stats_ptr; + uint16_t nb_rxqs, nb_txqs; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; + nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + /* Return generic statistics */ - count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) + - (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS); + count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + + (nb_txqs * RTE_NB_TXQ_STATS); /* implemented by the driver */ if (dev->dev_ops->xstats_get != NULL) { @@ -1551,45 +1916,43 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats, stats_ptr = RTE_PTR_ADD(ð_stats, rte_stats_strings[i].offset); val = *stats_ptr; - snprintf(xstats[count].name, sizeof(xstats[count].name), - "%s", rte_stats_strings[i].name); xstats[count++].value = val; } /* per-rxq stats */ - for (q = 0; q < dev->data->nb_rx_queues; q++) { + for (q = 0; q < nb_rxqs; q++) { for (i = 0; i < RTE_NB_RXQ_STATS; i++) { stats_ptr = RTE_PTR_ADD(ð_stats, rte_rxq_stats_strings[i].offset + q * sizeof(uint64_t)); val = *stats_ptr; - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_q%u_%s", q, - rte_rxq_stats_strings[i].name); xstats[count++].value = val; } } /* per-txq stats */ - for (q = 0; q < dev->data->nb_tx_queues; q++) { + for (q = 0; q < nb_txqs; q++) { for (i = 0; i < RTE_NB_TXQ_STATS; i++) { stats_ptr = RTE_PTR_ADD(ð_stats, rte_txq_stats_strings[i].offset + q * sizeof(uint64_t)); val = *stats_ptr; - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_q%u_%s", q, - rte_txq_stats_strings[i].name); xstats[count++].value = val; } } + for (i = 0; i < count; i++) + xstats[i].id = i; + /* add an offset to driver-specific stats */ + for ( ; i < count + xcount; i++) + xstats[i].id += count; + return count + xcount; } /* reset ethdev extended statistics */ void -rte_eth_xstats_reset(uint8_t port_id) +rte_eth_xstats_reset(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1607,7 +1970,7 @@ rte_eth_xstats_reset(uint8_t port_id) } static int -set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx, +set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) { struct rte_eth_dev *dev; @@ -1623,7 +1986,7 @@ set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx, int -rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id, +rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx) { return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx, @@ -1632,16 +1995,27 @@ rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id, int -rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id, +rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx) { return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx, STAT_QMAP_RX); } +int +rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); + return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size); +} void -rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) +rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) { struct rte_eth_dev *dev; const struct rte_eth_desc_lim lim = { @@ -1659,12 +2033,13 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); - dev_info->pci_dev = dev->pci_dev; - dev_info->driver_name = dev->data->drv_name; + dev_info->driver_name = dev->device->driver->name; + dev_info->nb_rx_queues = dev->data->nb_rx_queues; + dev_info->nb_tx_queues = dev->data->nb_tx_queues; } int -rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask, +rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num) { int i, j; @@ -1690,7 +2065,7 @@ rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask, } void -rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr) +rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr) { struct rte_eth_dev *dev; @@ -1701,7 +2076,7 @@ rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr) int -rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu) +rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) { struct rte_eth_dev *dev; @@ -1713,7 +2088,7 @@ rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu) } int -rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu) +rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) { int ret; struct rte_eth_dev *dev; @@ -1730,13 +2105,15 @@ rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu) } int -rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) +rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) { + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) { RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id); return -ENOSYS; } @@ -1748,11 +2125,28 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); - return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); + ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); + if (ret == 0) { + struct rte_vlan_filter_conf *vfc; + int vidx; + int vbit; + + vfc = &dev->data->vlan_filter_conf; + vidx = vlan_id / 64; + vbit = vlan_id % 64; + + if (on) + vfc->ids[vidx] |= UINT64_C(1) << vbit; + else + vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); + } + + return ret; } int -rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on) +rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, + int on) { struct rte_eth_dev *dev; @@ -1770,7 +2164,7 @@ rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int o } int -rte_eth_dev_set_vlan_ether_type(uint8_t port_id, +rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tpid) { @@ -1784,35 +2178,57 @@ rte_eth_dev_set_vlan_ether_type(uint8_t port_id, } int -rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) +rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) { struct rte_eth_dev *dev; int ret = 0; int mask = 0; int cur, org = 0; + uint64_t orig_offloads; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; + /* save original values in case of failure */ + orig_offloads = dev->data->dev_conf.rxmode.offloads; + /*check which option changed by application*/ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); + org = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP); if (cur != org) { - dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur; + if (cur) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_STRIP; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_STRIP; mask |= ETH_VLAN_STRIP_MASK; } cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter); + org = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER); if (cur != org) { - dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur; + if (cur) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_FILTER; mask |= ETH_VLAN_FILTER_MASK; } cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend); + org = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND); if (cur != org) { - dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur; + if (cur) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_EXTEND; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_EXTEND; mask |= ETH_VLAN_EXTEND_MASK; } @@ -1821,13 +2237,26 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) return ret; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); - (*dev->dev_ops->vlan_offload_set)(dev, mask); + + /* + * Convert to the offload bitfield API just in case the underlying PMD + * still supporting it. + */ + rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads, + &dev->data->dev_conf.rxmode); + ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); + if (ret) { + /* hit an error restore original values */ + dev->data->dev_conf.rxmode.offloads = orig_offloads; + rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads, + &dev->data->dev_conf.rxmode); + } return ret; } int -rte_eth_dev_get_vlan_offload(uint8_t port_id) +rte_eth_dev_get_vlan_offload(uint16_t port_id) { struct rte_eth_dev *dev; int ret = 0; @@ -1835,20 +2264,23 @@ rte_eth_dev_get_vlan_offload(uint8_t port_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) ret |= ETH_VLAN_STRIP_OFFLOAD; - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) ret |= ETH_VLAN_FILTER_OFFLOAD; - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) ret |= ETH_VLAN_EXTEND_OFFLOAD; return ret; } int -rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on) +rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) { struct rte_eth_dev *dev; @@ -1861,7 +2293,7 @@ rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on) } int -rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) +rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) { struct rte_eth_dev *dev; @@ -1873,7 +2305,7 @@ rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) } int -rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) +rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) { struct rte_eth_dev *dev; @@ -1889,7 +2321,8 @@ rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) } int -rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf) +rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, + struct rte_eth_pfc_conf *pfc_conf) { struct rte_eth_dev *dev; @@ -1915,13 +2348,7 @@ rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, if (!reta_conf) return -EINVAL; - if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) { - RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n", - RTE_RETA_GROUP_SIZE); - return -EINVAL; - } - - num = reta_size / RTE_RETA_GROUP_SIZE; + num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; for (i = 0; i < num; i++) { if (reta_conf[i].mask) return 0; @@ -1961,7 +2388,7 @@ rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, } int -rte_eth_dev_rss_reta_update(uint8_t port_id, +rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { @@ -1987,17 +2414,14 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, } int -rte_eth_dev_rss_reta_query(uint8_t port_id, +rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { struct rte_eth_dev *dev; int ret; - if (port_id >= nb_ports) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return -ENODEV; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); /* Check mask bits */ ret = rte_eth_check_reta_mask(reta_conf, reta_size); @@ -2010,26 +2434,19 @@ rte_eth_dev_rss_reta_query(uint8_t port_id, } int -rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf) +rte_eth_dev_rss_hash_update(uint16_t port_id, + struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev *dev; - uint16_t rss_hash_protos; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - rss_hash_protos = rss_conf->rss_hf; - if ((rss_hash_protos != 0) && - ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) { - RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n", - rss_hash_protos); - return -EINVAL; - } dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); return (*dev->dev_ops->rss_hash_update)(dev, rss_conf); } int -rte_eth_dev_rss_hash_conf_get(uint8_t port_id, +rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev *dev; @@ -2041,7 +2458,7 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id, } int -rte_eth_dev_udp_tunnel_port_add(uint8_t port_id, +rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *udp_tunnel) { struct rte_eth_dev *dev; @@ -2063,7 +2480,7 @@ rte_eth_dev_udp_tunnel_port_add(uint8_t port_id, } int -rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id, +rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *udp_tunnel) { struct rte_eth_dev *dev; @@ -2086,7 +2503,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id, } int -rte_eth_led_on(uint8_t port_id) +rte_eth_led_on(uint16_t port_id) { struct rte_eth_dev *dev; @@ -2097,7 +2514,7 @@ rte_eth_led_on(uint8_t port_id) } int -rte_eth_led_off(uint8_t port_id) +rte_eth_led_off(uint16_t port_id) { struct rte_eth_dev *dev; @@ -2112,12 +2529,13 @@ rte_eth_led_off(uint8_t port_id) * an empty spot. */ static int -get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) +get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); rte_eth_dev_info_get(port_id, &dev_info); for (i = 0; i < dev_info.max_mac_addrs; i++) @@ -2130,12 +2548,13 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) static const struct ether_addr null_mac_addr; int -rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, +rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, uint32_t pool) { struct rte_eth_dev *dev; int index; uint64_t pool_mask; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2168,19 +2587,21 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, } /* Update NIC */ - (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); + ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); - /* Update address in NIC data structure */ - ether_addr_copy(addr, &dev->data->mac_addrs[index]); + if (ret == 0) { + /* Update address in NIC data structure */ + ether_addr_copy(addr, &dev->data->mac_addrs[index]); - /* Update pool bitmap in NIC data structure */ - dev->data->mac_pool_sel[index] |= (1ULL << pool); + /* Update pool bitmap in NIC data structure */ + dev->data->mac_pool_sel[index] |= (1ULL << pool); + } - return 0; + return ret; } int -rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) +rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) { struct rte_eth_dev *dev; int index; @@ -2209,7 +2630,7 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) } int -rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr) +rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) { struct rte_eth_dev *dev; @@ -2229,39 +2650,13 @@ rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr) return 0; } -int -rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, - uint16_t rx_mode, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf); - return -EINVAL; - } - - if (rx_mode == 0) { - RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n"); - return -EINVAL; - } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP); - return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on); -} /* * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find * an empty spot. */ static int -get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) +get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; @@ -2280,7 +2675,7 @@ get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) } int -rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, +rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, uint8_t on) { int index; @@ -2332,157 +2727,54 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, } int -rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); - return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); -} - -int -rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP); - return (*dev->dev_ops->set_vf_rx)(dev, vf, on); -} - -int -rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP); - return (*dev->dev_ops->set_vf_tx)(dev, vf, on); -} - -int -rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, - uint64_t vf_mask, uint8_t vlan_on) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - if (vlan_id > ETHER_MAX_VLAN_ID) { - RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n", - vlan_id); - return -EINVAL; - } - - if (vf_mask == 0) { - RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n"); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP); - return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id, - vf_mask, vlan_on); -} - -int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx, - uint16_t tx_rate) +rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) { struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - struct rte_eth_link link; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - link = dev->data->dev_link; - if (queue_idx > dev_info.max_tx_queues) { - RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: " - "invalid queue id=%d\n", port_id, queue_idx); - return -EINVAL; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - if (tx_rate > link.link_speed) { - RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, " - "bigger than link speed= %d\n", - tx_rate, link.link_speed); - return -EINVAL; - } + dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); - return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); + return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); } -int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate, - uint64_t q_msk) +int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, + uint16_t tx_rate) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; struct rte_eth_link link; - if (q_msk == 0) - return 0; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); link = dev->data->dev_link; - if (vf > dev_info.max_vfs) { - RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: " - "invalid vf id=%d\n", port_id, vf); + if (queue_idx > dev_info.max_tx_queues) { + RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: " + "invalid queue id=%d\n", port_id, queue_idx); return -EINVAL; } if (tx_rate > link.link_speed) { - RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, " + RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, " "bigger than link speed= %d\n", - tx_rate, link.link_speed); + tx_rate, link.link_speed); return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP); - return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); + return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate); } int -rte_eth_mirror_rule_set(uint8_t port_id, +rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); if (mirror_conf->rule_type == 0) { @@ -2516,9 +2808,9 @@ rte_eth_mirror_rule_set(uint8_t port_id, } int -rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id) +rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); @@ -2529,7 +2821,7 @@ rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id) } int -rte_eth_dev_callback_register(uint8_t port_id, +rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg) { @@ -2553,14 +2845,15 @@ rte_eth_dev_callback_register(uint8_t port_id, } /* create a new callback. */ - if (user_cb == NULL) + if (user_cb == NULL) { user_cb = rte_zmalloc("INTR_USER_CALLBACK", sizeof(struct rte_eth_dev_callback), 0); - if (user_cb != NULL) { - user_cb->cb_fn = cb_fn; - user_cb->cb_arg = cb_arg; - user_cb->event = event; - TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); + if (user_cb != NULL) { + user_cb->cb_fn = cb_fn; + user_cb->cb_arg = cb_arg; + user_cb->event = event; + TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); + } } rte_spinlock_unlock(&rte_eth_dev_cb_lock); @@ -2568,7 +2861,7 @@ rte_eth_dev_callback_register(uint8_t port_id, } int -rte_eth_dev_callback_unregister(uint8_t port_id, +rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg) { @@ -2610,12 +2903,13 @@ rte_eth_dev_callback_unregister(uint8_t port_id, return ret; } -void +int _rte_eth_dev_callback_process(struct rte_eth_dev *dev, - enum rte_eth_event_type event) + enum rte_eth_event_type event, void *cb_arg, void *ret_param) { struct rte_eth_dev_callback *cb_lst; struct rte_eth_dev_callback dev_cb; + int rc = 0; rte_spinlock_lock(&rte_eth_dev_cb_lock); TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { @@ -2623,17 +2917,23 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, continue; dev_cb = *cb_lst; cb_lst->active = 1; + if (cb_arg != NULL) + dev_cb.cb_arg = cb_arg; + if (ret_param != NULL) + dev_cb.ret_param = ret_param; + rte_spinlock_unlock(&rte_eth_dev_cb_lock); - dev_cb.cb_fn(dev->data->port_id, dev_cb.event, - dev_cb.cb_arg); + rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, + dev_cb.cb_arg, dev_cb.ret_param); rte_spinlock_lock(&rte_eth_dev_cb_lock); cb_lst->active = 0; } rte_spinlock_unlock(&rte_eth_dev_cb_lock); + return rc; } int -rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) +rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) { uint32_t vec; struct rte_eth_dev *dev; @@ -2641,13 +2941,16 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) uint16_t qid; int rc; - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id); - return -ENODEV; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - intr_handle = &dev->pci_dev->intr_handle; + + if (!dev->intr_handle) { + RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n"); + return -ENOTSUP; + } + + intr_handle = dev->intr_handle; if (!intr_handle->intr_vec) { RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n"); return -EPERM; @@ -2675,23 +2978,18 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, + dev->device->driver->name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); if (mz) return mz; - if (rte_xen_dom0_supported()) - return rte_memzone_reserve_bounded(z_name, size, socket_id, - 0, align, RTE_PGSIZE_2M); - else - return rte_memzone_reserve_aligned(z_name, size, socket_id, - 0, align); + return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align); } int -rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, +rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data) { uint32_t vec; @@ -2699,10 +2997,7 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, struct rte_intr_handle *intr_handle; int rc; - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id); - return -ENODEV; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; if (queue_id >= dev->data->nb_rx_queues) { @@ -2710,7 +3005,12 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, return -EINVAL; } - intr_handle = &dev->pci_dev->intr_handle; + if (!dev->intr_handle) { + RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n"); + return -ENOTSUP; + } + + intr_handle = dev->intr_handle; if (!intr_handle->intr_vec) { RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n"); return -EPERM; @@ -2729,15 +3029,12 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, } int -rte_eth_dev_rx_intr_enable(uint8_t port_id, +rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id) { struct rte_eth_dev *dev; - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return -ENODEV; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2746,15 +3043,12 @@ rte_eth_dev_rx_intr_enable(uint8_t port_id, } int -rte_eth_dev_rx_intr_disable(uint8_t port_id, +rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id) { struct rte_eth_dev *dev; - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return -ENODEV; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2762,131 +3056,10 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id, return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id); } -#ifdef RTE_NIC_BYPASS -int rte_eth_dev_bypass_init(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP); - (*dev->dev_ops->bypass_init)(dev); - return 0; -} - -int -rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); - (*dev->dev_ops->bypass_state_show)(dev, state); - return 0; -} - -int -rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP); - (*dev->dev_ops->bypass_state_set)(dev, new_state); - return 0; -} - -int -rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); - (*dev->dev_ops->bypass_event_show)(dev, event, state); - return 0; -} - -int -rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP); - (*dev->dev_ops->bypass_event_set)(dev, event, state); - return 0; -} - -int -rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP); - (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout); - return 0; -} - -int -rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP); - (*dev->dev_ops->bypass_ver_show)(dev, ver); - return 0; -} - -int -rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP); - (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout); - return 0; -} - -int -rte_eth_dev_bypass_wd_reset(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP); - (*dev->dev_ops->bypass_wd_reset)(dev); - return 0; -} -#endif int -rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type) +rte_eth_dev_filter_supported(uint16_t port_id, + enum rte_filter_type filter_type) { struct rte_eth_dev *dev; @@ -2899,7 +3072,7 @@ rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type) } int -rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type, +rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg) { struct rte_eth_dev *dev; @@ -2912,7 +3085,7 @@ rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type, } void * -rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -2925,7 +3098,6 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, rte_errno = EINVAL; return NULL; } - struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); if (cb == NULL) { @@ -2936,6 +3108,7 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, cb->fn.rx = fn; cb->param = user_param; + rte_spinlock_lock(&rte_eth_rx_cb_lock); /* Add the callbacks in fifo order. */ struct rte_eth_rxtx_callback *tail = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; @@ -2948,12 +3121,48 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, tail = tail->next; tail->next = cb; } + rte_spinlock_unlock(&rte_eth_rx_cb_lock); + + return cb; +} + +void * +rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, + rte_rx_callback_fn fn, void *user_param) +{ +#ifndef RTE_ETHDEV_RXTX_CALLBACKS + rte_errno = ENOTSUP; + return NULL; +#endif + /* check input parameters */ + if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || + queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { + rte_errno = EINVAL; + return NULL; + } + + struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); + + if (cb == NULL) { + rte_errno = ENOMEM; + return NULL; + } + + cb->fn.rx = fn; + cb->param = user_param; + + rte_spinlock_lock(&rte_eth_rx_cb_lock); + /* Add the callbacks at fisrt position*/ + cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; + rte_smp_wmb(); + rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; + rte_spinlock_unlock(&rte_eth_rx_cb_lock); return cb; } void * -rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS @@ -2977,6 +3186,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id, cb->fn.tx = fn; cb->param = user_param; + rte_spinlock_lock(&rte_eth_tx_cb_lock); /* Add the callbacks in fifo order. */ struct rte_eth_rxtx_callback *tail = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; @@ -2989,90 +3199,81 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id, tail = tail->next; tail->next = cb; } + rte_spinlock_unlock(&rte_eth_tx_cb_lock); return cb; } int -rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxtx_callback *user_cb) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS return -ENOTSUP; #endif /* Check input parameters. */ - if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL || - queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + if (user_cb == NULL || + queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) return -EINVAL; - } struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id]; - struct rte_eth_rxtx_callback *prev_cb; - - /* Reset head pointer and remove user cb if first in the list. */ - if (cb == user_cb) { - dev->post_rx_burst_cbs[queue_id] = user_cb->next; - return 0; - } - - /* Remove the user cb from the callback list. */ - do { - prev_cb = cb; - cb = cb->next; - + struct rte_eth_rxtx_callback *cb; + struct rte_eth_rxtx_callback **prev_cb; + int ret = -EINVAL; + + rte_spinlock_lock(&rte_eth_rx_cb_lock); + prev_cb = &dev->post_rx_burst_cbs[queue_id]; + for (; *prev_cb != NULL; prev_cb = &cb->next) { + cb = *prev_cb; if (cb == user_cb) { - prev_cb->next = user_cb->next; - return 0; + /* Remove the user cb from the callback list. */ + *prev_cb = cb->next; + ret = 0; + break; } + } + rte_spinlock_unlock(&rte_eth_rx_cb_lock); - } while (cb != NULL); - - /* Callback wasn't found. */ - return -EINVAL; + return ret; } int -rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id, +rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxtx_callback *user_cb) { #ifndef RTE_ETHDEV_RXTX_CALLBACKS return -ENOTSUP; #endif /* Check input parameters. */ - if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL || - queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + if (user_cb == NULL || + queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) return -EINVAL; - } struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id]; - struct rte_eth_rxtx_callback *prev_cb; - - /* Reset head pointer and remove user cb if first in the list. */ - if (cb == user_cb) { - dev->pre_tx_burst_cbs[queue_id] = user_cb->next; - return 0; - } - - /* Remove the user cb from the callback list. */ - do { - prev_cb = cb; - cb = cb->next; - + int ret = -EINVAL; + struct rte_eth_rxtx_callback *cb; + struct rte_eth_rxtx_callback **prev_cb; + + rte_spinlock_lock(&rte_eth_tx_cb_lock); + prev_cb = &dev->pre_tx_burst_cbs[queue_id]; + for (; *prev_cb != NULL; prev_cb = &cb->next) { + cb = *prev_cb; if (cb == user_cb) { - prev_cb->next = user_cb->next; - return 0; + /* Remove the user cb from the callback list. */ + *prev_cb = cb->next; + ret = 0; + break; } + } + rte_spinlock_unlock(&rte_eth_tx_cb_lock); - } while (cb != NULL); - - /* Callback wasn't found. */ - return -EINVAL; + return ret; } int -rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id, +rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo) { struct rte_eth_dev *dev; @@ -3096,7 +3297,7 @@ rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id, } int -rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id, +rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo) { struct rte_eth_dev *dev; @@ -3120,7 +3321,7 @@ rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id, } int -rte_eth_dev_set_mc_addr_list(uint8_t port_id, +rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) { @@ -3134,7 +3335,7 @@ rte_eth_dev_set_mc_addr_list(uint8_t port_id, } int -rte_eth_timesync_enable(uint8_t port_id) +rte_eth_timesync_enable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -3146,7 +3347,7 @@ rte_eth_timesync_enable(uint8_t port_id) } int -rte_eth_timesync_disable(uint8_t port_id) +rte_eth_timesync_disable(uint16_t port_id) { struct rte_eth_dev *dev; @@ -3158,7 +3359,7 @@ rte_eth_timesync_disable(uint8_t port_id) } int -rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp, +rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags) { struct rte_eth_dev *dev; @@ -3171,7 +3372,8 @@ rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp, } int -rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp) +rte_eth_timesync_read_tx_timestamp(uint16_t port_id, + struct timespec *timestamp) { struct rte_eth_dev *dev; @@ -3183,7 +3385,7 @@ rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp) } int -rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta) +rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) { struct rte_eth_dev *dev; @@ -3195,7 +3397,7 @@ rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta) } int -rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp) +rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) { struct rte_eth_dev *dev; @@ -3207,7 +3409,7 @@ rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp) } int -rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp) +rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) { struct rte_eth_dev *dev; @@ -3219,19 +3421,7 @@ rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp) } int -rte_eth_dev_get_reg_length(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP); - return (*dev->dev_ops->get_reg_length)(dev); -} - -int -rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info) +rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) { struct rte_eth_dev *dev; @@ -3243,7 +3433,7 @@ rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info) } int -rte_eth_dev_get_eeprom_length(uint8_t port_id) +rte_eth_dev_get_eeprom_length(uint16_t port_id) { struct rte_eth_dev *dev; @@ -3255,7 +3445,7 @@ rte_eth_dev_get_eeprom_length(uint8_t port_id) } int -rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) +rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) { struct rte_eth_dev *dev; @@ -3267,7 +3457,7 @@ rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) } int -rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) +rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) { struct rte_eth_dev *dev; @@ -3279,15 +3469,12 @@ rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) } int -rte_eth_dev_get_dcb_info(uint8_t port_id, +rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info) { struct rte_eth_dev *dev; - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return -ENODEV; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); @@ -3296,28 +3483,8 @@ rte_eth_dev_get_dcb_info(uint8_t port_id, return (*dev->dev_ops->get_dcb_info)(dev, dcb_info); } -void -rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev) -{ - if ((eth_dev == NULL) || (pci_dev == NULL)) { - RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n", - eth_dev, pci_dev); - return; - } - - eth_dev->data->dev_flags = 0; - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) - eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE) - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; - - eth_dev->data->kdrv = pci_dev->kdrv; - eth_dev->data->numa_node = pci_dev->numa_node; - eth_dev->data->drv_name = pci_dev->driver->name; -} - int -rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id, +rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel) { struct rte_eth_dev *dev; @@ -3340,7 +3507,7 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id, } int -rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id, +rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en) @@ -3369,3 +3536,58 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id, -ENOTSUP); return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en); } + +static void +rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc, + const struct rte_eth_desc_lim *desc_lim) +{ + if (desc_lim->nb_align != 0) + *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); + + if (desc_lim->nb_max != 0) + *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); + + *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); +} + +int +rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, + uint16_t *nb_rx_desc, + uint16_t *nb_tx_desc) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + + rte_eth_dev_info_get(port_id, &dev_info); + + if (nb_rx_desc != NULL) + rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); + + if (nb_tx_desc != NULL) + rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); + + return 0; +} + +int +rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (pool == NULL) + return -EINVAL; + + dev = &rte_eth_devices[port_id]; + + if (*dev->dev_ops->pool_ops_supported == NULL) + return 1; /* all pools are supported */ + + return (*dev->dev_ops->pool_ops_supported)(dev, pool); +}