#include <string.h>
#include <stdarg.h>
#include <errno.h>
+#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <netinet/in.h>
#include <rte_errno.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
-#include <rte_compat.h>
+#include <rte_kvargs.h>
#include "rte_ether.h"
#include "rte_ethdev.h"
#include "rte_ethdev_driver.h"
#include "ethdev_profile.h"
+static int ethdev_logtype;
+
+#define ethdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
+
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static uint8_t eth_dev_last_created_port;
+static uint16_t eth_dev_last_created_port;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
rte_spinlock_unlock(&rte_eth_shared_data_lock);
}
-struct rte_eth_dev *
-rte_eth_dev_allocated(const char *name)
+static bool
+is_allocated(const struct rte_eth_dev *ethdev)
+{
+ return ethdev->data->name[0] != '\0';
+}
+
+static struct rte_eth_dev *
+_rte_eth_dev_allocated(const char *name)
{
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
+ if (rte_eth_devices[i].data != NULL &&
strcmp(rte_eth_devices[i].data->name, name) == 0)
return &rte_eth_devices[i];
}
return NULL;
}
+struct rte_eth_dev *
+rte_eth_dev_allocated(const char *name)
+{
+ struct rte_eth_dev *ethdev;
+
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ ethdev = _rte_eth_dev_allocated(name);
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+
+ return ethdev;
+}
+
static uint16_t
rte_eth_dev_find_free_port(void)
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
- eth_dev->state = RTE_ETH_DEV_ATTACHED;
eth_dev_last_created_port = port_id;
/* Synchronize port creation between primary and secondary threads. */
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
- port_id = rte_eth_dev_find_free_port();
- if (port_id == RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
+ if (_rte_eth_dev_allocated(name) != NULL) {
+ ethdev_log(ERR, "Ethernet device with name %s already allocated",
+ name);
goto unlock;
}
- if (rte_eth_dev_allocated(name) != NULL) {
- RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
- name);
+ port_id = rte_eth_dev_find_free_port();
+ if (port_id == RTE_MAX_ETHPORTS) {
+ ethdev_log(ERR, "Reached maximum number of Ethernet ports");
goto unlock;
}
unlock:
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
- if (eth_dev != NULL)
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
-
return eth_dev;
}
rte_eth_dev_shared_data_prepare();
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
+
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
eth_dev->state = RTE_ETH_DEV_UNUSED;
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
-
return 0;
}
{
if (owner_id == RTE_ETH_DEV_NO_OWNER ||
rte_eth_dev_shared_data->next_owner_id <= owner_id) {
- RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
+ RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016"PRIX64".\n", owner_id);
return 0;
}
return 1;
}
-uint64_t __rte_experimental
+uint64_t
rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
{
while (port_id < RTE_MAX_ETHPORTS &&
_rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
const struct rte_eth_dev_owner *new_owner)
{
+ struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
struct rte_eth_dev_owner *port_owner;
int sret;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
+ RTE_PMD_DEBUG_TRACE("Port id %"PRIu16" is not allocated.\n", port_id);
+ return -ENODEV;
+ }
if (!rte_eth_is_valid_owner_id(new_owner->id) &&
!rte_eth_is_valid_owner_id(old_owner_id))
port_owner = &rte_eth_devices[port_id].data->owner;
if (port_owner->id != old_owner_id) {
RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
- " by %s_%016lX.\n", port_id,
+ " by %s_%016"PRIX64".\n", port_id,
port_owner->name, port_owner->id);
return -EPERM;
}
port_owner->id = new_owner->id;
- RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
+ RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016"PRIX64".\n", port_id,
new_owner->name, new_owner->id);
return 0;
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
if (rte_eth_is_valid_owner_id(owner_id)) {
- RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
- memset(&rte_eth_devices[port_id].data->owner, 0,
- sizeof(struct rte_eth_dev_owner));
- RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
- " have removed.\n", owner_id);
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ if (rte_eth_devices[port_id].data->owner.id == owner_id)
+ memset(&rte_eth_devices[port_id].data->owner, 0,
+ sizeof(struct rte_eth_dev_owner));
+ RTE_PMD_DEBUG_TRACE("All port owners owned by %016"PRIX64
+ " identifier have removed.\n", owner_id);
}
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
{
int ret = 0;
+ struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
rte_eth_dev_shared_data_prepare();
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
+ RTE_PMD_DEBUG_TRACE("Port id %"PRIu16" is not allocated.\n", port_id);
ret = -ENODEV;
} else {
- rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
- sizeof(*owner));
+ rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
}
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
}
void *
-rte_eth_dev_get_sec_ctx(uint8_t port_id)
+rte_eth_dev_get_sec_ctx(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
return rte_eth_devices[port_id].security_ctx;
uint16_t
rte_eth_dev_count(void)
+{
+ return rte_eth_dev_count_avail();
+}
+
+uint16_t
+rte_eth_dev_count_avail(void)
{
uint16_t p;
uint16_t count;
return count;
}
+uint16_t __rte_experimental
+rte_eth_dev_count_total(void)
+{
+ uint16_t port, count = 0;
+
+ for (port = 0; port < RTE_MAX_ETHPORTS; port++)
+ if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
+ count++;
+
+ return count;
+}
+
int
rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
{
for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
- !strncmp(name, rte_eth_dev_shared_data->data[pid].name,
- strlen(name))) {
+ !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
*port_id = pid;
return 0;
}
int
rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
{
+ int current = rte_eth_dev_count_total();
+ struct rte_devargs da;
int ret = -1;
- int current = rte_eth_dev_count();
- char *name = NULL;
- char *args = NULL;
+
+ memset(&da, 0, sizeof(da));
if ((devargs == NULL) || (port_id == NULL)) {
ret = -EINVAL;
goto err;
}
- /* parse devargs, then retrieve device name and args */
- if (rte_eal_parse_devargs_str(devargs, &name, &args))
+ /* parse devargs */
+ if (rte_devargs_parse(&da, "%s", devargs))
goto err;
- ret = rte_eal_dev_attach(name, args);
+ ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
if (ret < 0)
goto err;
/* no point looking at the port count if no port exists */
- if (!rte_eth_dev_count()) {
- RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
+ if (!rte_eth_dev_count_total()) {
+ ethdev_log(ERR, "No port found for device (%s)", da.name);
ret = -1;
goto err;
}
/* if nothing happened, there is a bug here, since some driver told us
* it did attach a device, but did not create a port.
+ * FIXME: race condition in case of plug-out of another device
*/
- if (current == rte_eth_dev_count()) {
+ if (current == rte_eth_dev_count_total()) {
ret = -1;
goto err;
}
ret = 0;
err:
- free(name);
- free(args);
+ free(da.args);
return ret;
}
/* detach the device, then store the name of the device */
int
-rte_eth_dev_detach(uint16_t port_id, char *name)
+rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
{
+ struct rte_device *dev;
+ struct rte_bus *bus;
uint32_t dev_flags;
int ret = -1;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
- if (name == NULL) {
- ret = -EINVAL;
- goto err;
- }
-
dev_flags = rte_eth_devices[port_id].data->dev_flags;
if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
- RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
- port_id);
- ret = -ENOTSUP;
- goto err;
+ ethdev_log(ERR,
+ "Port %" PRIu16 " is bonded, cannot detach", port_id);
+ return -ENOTSUP;
}
- snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
- "%s", rte_eth_devices[port_id].data->name);
+ dev = rte_eth_devices[port_id].device;
+ if (dev == NULL)
+ return -EINVAL;
+
+ bus = rte_bus_find_by_device(dev);
+ if (bus == NULL)
+ return -ENOENT;
- ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
+ ret = rte_eal_hotplug_remove(bus->name, dev->name);
if (ret < 0)
- goto err;
+ return ret;
rte_eth_dev_release_port(&rte_eth_devices[port_id]);
return 0;
-
-err:
- return ret;
}
static int
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be started before start any queue\n", port_id);
+ return -EINVAL;
+ }
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be started before start any queue\n", port_id);
+ return -EINVAL;
+ }
+
if (tx_queue_id >= dev->data->nb_tx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
*rx_offloads = offloads;
}
-/**
- * A conversion function from rxmode offloads API.
- */
-static void
-rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
- struct rte_eth_rxmode *rxmode)
-{
-
- if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
- rxmode->header_split = 1;
- else
- rxmode->header_split = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
- rxmode->hw_ip_checksum = 1;
- else
- rxmode->hw_ip_checksum = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- rxmode->hw_vlan_filter = 1;
- else
- rxmode->hw_vlan_filter = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- rxmode->hw_vlan_strip = 1;
- else
- rxmode->hw_vlan_strip = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
- rxmode->hw_vlan_extend = 1;
- else
- rxmode->hw_vlan_extend = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- rxmode->jumbo_frame = 1;
- else
- rxmode->jumbo_frame = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- rxmode->hw_strip_crc = 1;
- else
- rxmode->hw_strip_crc = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
- rxmode->enable_scatter = 1;
- else
- rxmode->enable_scatter = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
- rxmode->enable_lro = 1;
- else
- rxmode->enable_lro = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
- rxmode->hw_timestamp = 1;
- else
- rxmode->hw_timestamp = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
- rxmode->security = 1;
- else
- rxmode->security = 0;
-}
-
const char * __rte_experimental
rte_eth_dev_rx_offload_name(uint64_t offload)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ /* If number of queues specified by application for both Rx and Tx is
+ * zero, use driver preferred values. This cannot be done individually
+ * as it is valid for either Tx or Rx (but not both) to be zero.
+ * If driver does not provide any preferred valued, fall back on
+ * EAL defaults.
+ */
+ if (nb_rx_q == 0 && nb_tx_q == 0) {
+ nb_rx_q = dev_info.default_rxportconf.nb_queues;
+ if (nb_rx_q == 0)
+ nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
+ nb_tx_q = dev_info.default_txportconf.nb_queues;
+ if (nb_tx_q == 0)
+ nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
+ }
+
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_PMD_DEBUG_TRACE(
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
return -EINVAL;
}
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
-
if (dev->data->dev_started) {
RTE_PMD_DEBUG_TRACE(
"port %d must be stopped to allow configuration\n", port_id);
* Convert between the offloads API to enable PMDs to support
* only one of them.
*/
- if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
+ if (dev_conf->rxmode.ignore_offload_bitfield == 0)
rte_eth_convert_rx_offload_bitfield(
&dev_conf->rxmode, &local_conf.rxmode.offloads);
- } else {
- rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
- &local_conf.rxmode);
- }
/* Copy the dev_conf parameter into the dev structure */
memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
* than the maximum number of RX and TX queues supported by the
* configured device.
*/
- (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
-
- if (nb_rx_q == 0 && nb_tx_q == 0) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
- return -EINVAL;
- }
-
if (nb_rx_q > dev_info.max_rx_queues) {
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
port_id, nb_rx_q, dev_info.max_rx_queues);
ETHER_MAX_LEN;
}
+ /* Any requested offloading must be within its device capabilities */
+ if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+ local_conf.rxmode.offloads) {
+ ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
+ "0x%" PRIx64 " doesn't match Rx offloads "
+ "capabilities 0x%" PRIx64 " in %s()\n",
+ port_id,
+ local_conf.rxmode.offloads,
+ dev_info.rx_offload_capa,
+ __func__);
+ /* Will return -EINVAL in the next release */
+ }
+ if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+ local_conf.txmode.offloads) {
+ ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
+ "0x%" PRIx64 " doesn't match Tx offloads "
+ "capabilities 0x%" PRIx64 " in %s()\n",
+ port_id,
+ local_conf.txmode.offloads,
+ dev_info.tx_offload_capa,
+ __func__);
+ /* Will return -EINVAL in the next release */
+ }
+
+ /* Check that device supports requested rss hash functions. */
+ if ((dev_info.flow_type_rss_offloads |
+ dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
+ dev_info.flow_type_rss_offloads) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
+ "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
+ port_id,
+ dev_conf->rx_adv_conf.rss_conf.rss_hf,
+ dev_info.flow_type_rss_offloads);
+ }
+
/*
* Setup new number of RX/TX queues and reconfigure device.
*/
return -EINVAL;
}
- if (dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return -EBUSY;
- }
-
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
return -EINVAL;
}
+ /* Use default specified by driver, if nb_rx_desc is zero */
+ if (nb_rx_desc == 0) {
+ nb_rx_desc = dev_info.default_rxportconf.ring_size;
+ /* If driver default is also zero, fall back on EAL default */
+ if (nb_rx_desc == 0)
+ nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
+ }
+
if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
return -EINVAL;
}
+ if (dev->data->dev_started &&
+ !(dev_info.dev_capa &
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
+ return -EBUSY;
+
+ if (dev->data->dev_started &&
+ (dev->data->rx_queue_state[rx_queue_id] !=
+ RTE_ETH_QUEUE_STATE_STOPPED))
+ return -EBUSY;
+
rxq = dev->data->rx_queues;
if (rxq[rx_queue_id]) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
&local_conf.offloads);
}
+ /*
+ * If an offloading has already been enabled in
+ * rte_eth_dev_configure(), it has been enabled on all queues,
+ * so there is no need to enable it in this queue again.
+ * The local_conf.offloads input to underlying PMD only carries
+ * those offloadings which are only enabled on this queue and
+ * not enabled on all queues.
+ */
+ local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * New added offloadings for this queue are those not enabled in
+ * rte_eth_dev_configure() and they must be per-queue type.
+ * A pure per-port offloading can't be enabled on a queue while
+ * disabled on another queue. A pure per-port offloading can't
+ * be enabled for any queue as new added one if it hasn't been
+ * enabled in rte_eth_dev_configure().
+ */
+ if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+ local_conf.offloads) {
+ ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
+ "added offloads 0x%" PRIx64 " must be "
+ "within pre-queue offload capabilities 0x%"
+ PRIx64 " in %s()\n",
+ port_id,
+ rx_queue_id,
+ local_conf.offloads,
+ dev_info.rx_queue_offload_capa,
+ __func__);
+ /* Will return -EINVAL in the next release */
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
socket_id, &local_conf, mp);
if (!ret) {
return eth_err(port_id, ret);
}
+/**
+ * Convert from tx offloads to txq_flags.
+ */
+static void
+rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
+{
+ uint32_t flags = 0;
+
+ if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+ flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
+ flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
+ if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
+
+ *txq_flags = flags;
+}
+
/**
* A conversion function from txq_flags API.
*/
*tx_offloads = offloads;
}
-/**
- * A conversion function from offloads API.
- */
-static void
-rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
-{
- uint32_t flags = 0;
-
- if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
- flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
- if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
- flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
- if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
- if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
- if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
- if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
-
- *txq_flags = flags;
-}
-
int
rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
return -EINVAL;
}
- if (dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return -EBUSY;
- }
-
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
rte_eth_dev_info_get(port_id, &dev_info);
+ /* Use default specified by driver, if nb_tx_desc is zero */
+ if (nb_tx_desc == 0) {
+ nb_tx_desc = dev_info.default_txportconf.ring_size;
+ /* If driver default is zero, fall back on EAL default */
+ if (nb_tx_desc == 0)
+ nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
+ }
if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
return -EINVAL;
}
+ if (dev->data->dev_started &&
+ !(dev_info.dev_capa &
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
+ return -EBUSY;
+
+ if (dev->data->dev_started &&
+ (dev->data->tx_queue_state[tx_queue_id] !=
+ RTE_ETH_QUEUE_STATE_STOPPED))
+ return -EBUSY;
+
txq = dev->data->tx_queues;
if (txq[tx_queue_id]) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
* only one of them.
*/
local_conf = *tx_conf;
- if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
- rte_eth_convert_txq_offloads(tx_conf->offloads,
- &local_conf.txq_flags);
- /* Keep the ignore flag. */
- local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
- } else {
+ if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
rte_eth_convert_txq_flags(tx_conf->txq_flags,
&local_conf.offloads);
}
+ /*
+ * If an offloading has already been enabled in
+ * rte_eth_dev_configure(), it has been enabled on all queues,
+ * so there is no need to enable it in this queue again.
+ * The local_conf.offloads input to underlying PMD only carries
+ * those offloadings which are only enabled on this queue and
+ * not enabled on all queues.
+ */
+ local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * New added offloadings for this queue are those not enabled in
+ * rte_eth_dev_configure() and they must be per-queue type.
+ * A pure per-port offloading can't be enabled on a queue while
+ * disabled on another queue. A pure per-port offloading can't
+ * be enabled for any queue as new added one if it hasn't been
+ * enabled in rte_eth_dev_configure().
+ */
+ if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+ local_conf.offloads) {
+ ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
+ "added offloads 0x%" PRIx64 " must be "
+ "within pre-queue offload capabilities 0x%"
+ PRIx64 " in %s()\n",
+ port_id,
+ tx_queue_id,
+ local_conf.offloads,
+ dev_info.tx_queue_offload_capa,
+ __func__);
+ /* Will return -EINVAL in the next release */
+ }
+
return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
tx_queue_id, nb_tx_desc, socket_id, &local_conf));
}
return dev->data->all_multicast;
}
-static inline int
-rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
void
rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
{
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ dev->data->dev_started)
+ rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 1);
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ dev->data->dev_started)
+ rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 0);
rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
{
struct rte_eth_dev *dev;
+ struct rte_eth_txconf *txconf;
const struct rte_eth_desc_lim lim = {
.nb_max = UINT16_MAX,
.nb_min = 0,
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
+ dev_info->device = dev->device;
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
+
+ dev_info->dev_flags = &dev->data->dev_flags;
+ txconf = &dev_info->default_txconf;
+ /* convert offload to txq_flags to support legacy app */
+ rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
}
int
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
-
- /*
- * Convert to the offload bitfield API just in case the underlying PMD
- * still supporting it.
- */
- rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
- &dev->data->dev_conf.rxmode);
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
if (ret) {
/* hit an error restore original values */
dev->data->dev_conf.rxmode.offloads = orig_offloads;
- rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
- &dev->data->dev_conf.rxmode);
}
return eth_err(port_id, ret);
struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
+ dev_info.flow_type_rss_offloads) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
+ "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
+ port_id,
+ rss_conf->rss_hf,
+ dev_info.flow_type_rss_offloads);
+ }
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
rss_conf));
rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
{
struct rte_eth_dev *dev;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
+ ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
+ if (ret < 0)
+ return ret;
+
/* Update default address in NIC data structure */
ether_addr_copy(addr, &dev->data->mac_addrs[0]);
- (*dev->dev_ops->mac_addr_set)(dev, addr);
-
return 0;
}
return -EINVAL;
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
- RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ ethdev_log(ERR, "Invalid port_id=%d", port_id);
return -EINVAL;
}
return -EINVAL;
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
- RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ ethdev_log(ERR, "Invalid port_id=%d", port_id);
return -EINVAL;
}
return rc;
}
+void
+rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
+{
+ if (dev == NULL)
+ return;
+
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
+
+ dev->state = RTE_ETH_DEV_ATTACHED;
+}
+
int
rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
{
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+}
+
+int __rte_experimental
+rte_eth_dev_create(struct rte_device *device, const char *name,
+ size_t priv_data_size,
+ ethdev_bus_specific_init ethdev_bus_specific_init,
+ void *bus_init_params,
+ ethdev_init_t ethdev_init, void *init_params)
+{
+ struct rte_eth_dev *ethdev;
+ int retval;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ ethdev = rte_eth_dev_allocate(name);
+ if (!ethdev) {
+ retval = -ENODEV;
+ goto probe_failed;
+ }
+
+ if (priv_data_size) {
+ ethdev->data->dev_private = rte_zmalloc_socket(
+ name, priv_data_size, RTE_CACHE_LINE_SIZE,
+ device->numa_node);
+
+ if (!ethdev->data->dev_private) {
+ RTE_LOG(ERR, EAL, "failed to allocate private data");
+ retval = -ENOMEM;
+ goto probe_failed;
+ }
+ }
+ } else {
+ ethdev = rte_eth_dev_attach_secondary(name);
+ if (!ethdev) {
+ RTE_LOG(ERR, EAL, "secondary process attach failed, "
+ "ethdev doesn't exist");
+ retval = -ENODEV;
+ goto probe_failed;
+ }
+ }
+
+ ethdev->device = device;
+
+ if (ethdev_bus_specific_init) {
+ retval = ethdev_bus_specific_init(ethdev, bus_init_params);
+ if (retval) {
+ RTE_LOG(ERR, EAL,
+ "ethdev bus specific initialisation failed");
+ goto probe_failed;
+ }
+ }
+
+ retval = ethdev_init(ethdev, init_params);
+ if (retval) {
+ RTE_LOG(ERR, EAL, "ethdev initialisation failed");
+ goto probe_failed;
+ }
+
+ rte_eth_dev_probing_finish(ethdev);
+
+ return retval;
+probe_failed:
+ /* free ports private data if primary process */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(ethdev->data->dev_private);
+
+ rte_eth_dev_release_port(ethdev);
+
+ return retval;
+}
+
+int __rte_experimental
+rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
+ ethdev_uninit_t ethdev_uninit)
+{
+ int ret;
+
+ ethdev = rte_eth_dev_allocated(ethdev->data->name);
+ if (!ethdev)
+ return -ENODEV;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
+ if (ethdev_uninit) {
+ ret = ethdev_uninit(ethdev);
+ if (ret)
+ return ret;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(ethdev->data->dev_private);
+
+ ethdev->data->dev_private = NULL;
+
+ return rte_eth_dev_release_port(ethdev);
}
int
}
int
-rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
-
-int
-rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
-{
- struct rte_eth_fdir_info_v22 {
- enum rte_fdir_mode mode;
- struct rte_eth_fdir_masks mask;
- struct rte_eth_fdir_flex_conf flex_conf;
- uint32_t guarant_spc;
- uint32_t best_spc;
- uint32_t flow_types_mask[1];
- uint32_t max_flexpayload;
- uint32_t flex_payload_unit;
- uint32_t max_flex_payload_segment_num;
- uint16_t flex_payload_limit;
- uint32_t flex_bitmask_unit;
- uint32_t max_flex_bitmask_num;
- };
-
- struct rte_eth_hash_global_conf_v22 {
- enum rte_eth_hash_function hash_func;
- uint32_t sym_hash_enable_mask[1];
- uint32_t valid_bit_mask[1];
- };
-
- struct rte_eth_hash_filter_info_v22 {
- enum rte_eth_hash_filter_info_type info_type;
- union {
- uint8_t enable;
- struct rte_eth_hash_global_conf_v22 global_conf;
- struct rte_eth_input_set_conf input_set_conf;
- } info;
- };
-
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
- if (filter_op == RTE_ETH_FILTER_INFO) {
- int retval;
- struct rte_eth_fdir_info_v22 *fdir_info_v22;
- struct rte_eth_fdir_info fdir_info;
-
- fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
-
- retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
- filter_op, (void *)&fdir_info);
- fdir_info_v22->mode = fdir_info.mode;
- fdir_info_v22->mask = fdir_info.mask;
- fdir_info_v22->flex_conf = fdir_info.flex_conf;
- fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
- fdir_info_v22->best_spc = fdir_info.best_spc;
- fdir_info_v22->flow_types_mask[0] =
- (uint32_t)fdir_info.flow_types_mask[0];
- fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
- fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
- fdir_info_v22->max_flex_payload_segment_num =
- fdir_info.max_flex_payload_segment_num;
- fdir_info_v22->flex_payload_limit =
- fdir_info.flex_payload_limit;
- fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
- fdir_info_v22->max_flex_bitmask_num =
- fdir_info.max_flex_bitmask_num;
- return retval;
- } else if (filter_op == RTE_ETH_FILTER_GET) {
- int retval;
- struct rte_eth_hash_filter_info f_info;
- struct rte_eth_hash_filter_info_v22 *f_info_v22 =
- (struct rte_eth_hash_filter_info_v22 *)arg;
-
- f_info.info_type = f_info_v22->info_type;
- retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
- filter_op, (void *)&f_info);
-
- switch (f_info_v22->info_type) {
- case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
- f_info_v22->info.enable = f_info.info.enable;
- break;
- case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
- f_info_v22->info.global_conf.hash_func =
- f_info.info.global_conf.hash_func;
- f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
- (uint32_t)
- f_info.info.global_conf.sym_hash_enable_mask[0];
- f_info_v22->info.global_conf.valid_bit_mask[0] =
- (uint32_t)
- f_info.info.global_conf.valid_bit_mask[0];
- break;
- case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
- f_info_v22->info.input_set_conf =
- f_info.info.input_set_conf;
- break;
- default:
- break;
- }
- return retval;
- } else if (filter_op == RTE_ETH_FILTER_SET) {
- struct rte_eth_hash_filter_info f_info;
- struct rte_eth_hash_filter_info_v22 *f_v22 =
- (struct rte_eth_hash_filter_info_v22 *)arg;
-
- f_info.info_type = f_v22->info_type;
- switch (f_v22->info_type) {
- case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
- f_info.info.enable = f_v22->info.enable;
- break;
- case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
- f_info.info.global_conf.hash_func =
- f_v22->info.global_conf.hash_func;
- f_info.info.global_conf.sym_hash_enable_mask[0] =
- (uint32_t)
- f_v22->info.global_conf.sym_hash_enable_mask[0];
- f_info.info.global_conf.valid_bit_mask[0] =
- (uint32_t)
- f_v22->info.global_conf.valid_bit_mask[0];
- break;
- case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
- f_info.info.input_set_conf =
- f_v22->info.input_set_conf;
- break;
- default:
- break;
- }
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
- (void *)&f_info);
- } else
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
- arg);
-}
-VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
-
-int
-rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
-
-int
-rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_dev *dev;
return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
filter_op, arg));
}
-BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
-MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg),
- rte_eth_dev_filter_ctrl_v1802);
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
return cb;
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
return cb;
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param)
{
int
rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb)
+ const struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
return -ENOTSUP;
int
rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb)
+ const struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
return -ENOTSUP;
struct rte_eth_txq_info *qinfo)
{
struct rte_eth_dev *dev;
+ struct rte_eth_txconf *txconf = &qinfo->conf;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+ /* convert offload to txq_flags to support legacy app */
+ rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
+
return 0;
}
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
+int __rte_experimental
+rte_eth_dev_get_module_info(uint16_t port_id,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
+ return (*dev->dev_ops->get_module_info)(dev, modinfo);
+}
+
+int __rte_experimental
+rte_eth_dev_get_module_eeprom(uint16_t port_id,
+ struct rte_dev_eeprom_info *info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
+ return (*dev->dev_ops->get_module_eeprom)(dev, info);
+}
+
int
rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info)
return (*dev->dev_ops->pool_ops_supported)(dev, pool);
}
+
+/**
+ * A set of values to describe the possible states of a switch domain.
+ */
+enum rte_eth_switch_domain_state {
+ RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
+ RTE_ETH_SWITCH_DOMAIN_ALLOCATED
+};
+
+/**
+ * Array of switch domains available for allocation. Array is sized to
+ * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
+ * ethdev ports in a single process.
+ */
+struct rte_eth_dev_switch {
+ enum rte_eth_switch_domain_state state;
+} rte_eth_switch_domains[RTE_MAX_ETHPORTS];
+
+int __rte_experimental
+rte_eth_switch_domain_alloc(uint16_t *domain_id)
+{
+ unsigned int i;
+
+ *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+
+ for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
+ i < RTE_MAX_ETHPORTS; i++) {
+ if (rte_eth_switch_domains[i].state ==
+ RTE_ETH_SWITCH_DOMAIN_UNUSED) {
+ rte_eth_switch_domains[i].state =
+ RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
+ *domain_id = i;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+int __rte_experimental
+rte_eth_switch_domain_free(uint16_t domain_id)
+{
+ if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
+ domain_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+
+ if (rte_eth_switch_domains[domain_id].state !=
+ RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
+ return -EINVAL;
+
+ rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
+
+ return 0;
+}
+
+typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
+
+static int
+rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
+{
+ int state;
+ struct rte_kvargs_pair *pair;
+ char *letter;
+
+ arglist->str = strdup(str_in);
+ if (arglist->str == NULL)
+ return -ENOMEM;
+
+ letter = arglist->str;
+ state = 0;
+ arglist->count = 0;
+ pair = &arglist->pairs[0];
+ while (1) {
+ switch (state) {
+ case 0: /* Initial */
+ if (*letter == '=')
+ return -EINVAL;
+ else if (*letter == '\0')
+ return 0;
+
+ state = 1;
+ pair->key = letter;
+ /* fall-thru */
+
+ case 1: /* Parsing key */
+ if (*letter == '=') {
+ *letter = '\0';
+ pair->value = letter + 1;
+ state = 2;
+ } else if (*letter == ',' || *letter == '\0')
+ return -EINVAL;
+ break;
+
+
+ case 2: /* Parsing value */
+ if (*letter == '[')
+ state = 3;
+ else if (*letter == ',') {
+ *letter = '\0';
+ arglist->count++;
+ pair = &arglist->pairs[arglist->count];
+ state = 0;
+ } else if (*letter == '\0') {
+ letter--;
+ arglist->count++;
+ pair = &arglist->pairs[arglist->count];
+ state = 0;
+ }
+ break;
+
+ case 3: /* Parsing list */
+ if (*letter == ']')
+ state = 2;
+ else if (*letter == '\0')
+ return -EINVAL;
+ break;
+ }
+ letter++;
+ }
+}
+
+static int
+rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
+ void *data)
+{
+ char *str_start;
+ int state;
+ int result;
+
+ if (*str != '[')
+ /* Single element, not a list */
+ return callback(str, data);
+
+ /* Sanity check, then strip the brackets */
+ str_start = &str[strlen(str) - 1];
+ if (*str_start != ']') {
+ RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
+ return -EINVAL;
+ }
+ str++;
+ *str_start = '\0';
+
+ /* Process list elements */
+ state = 0;
+ while (1) {
+ if (state == 0) {
+ if (*str == '\0')
+ break;
+ if (*str != ',') {
+ str_start = str;
+ state = 1;
+ }
+ } else if (state == 1) {
+ if (*str == ',' || *str == '\0') {
+ if (str > str_start) {
+ /* Non-empty string fragment */
+ *str = '\0';
+ result = callback(str_start, data);
+ if (result < 0)
+ return result;
+ }
+ state = 0;
+ }
+ }
+ str++;
+ }
+ return 0;
+}
+
+static int
+rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
+ const uint16_t max_list)
+{
+ uint16_t lo, hi, val;
+ int result;
+
+ result = sscanf(str, "%hu-%hu", &lo, &hi);
+ if (result == 1) {
+ if (*len_list >= max_list)
+ return -ENOMEM;
+ list[(*len_list)++] = lo;
+ } else if (result == 2) {
+ if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
+ return -EINVAL;
+ for (val = lo; val <= hi; val++) {
+ if (*len_list >= max_list)
+ return -ENOMEM;
+ list[(*len_list)++] = val;
+ }
+ } else
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+rte_eth_devargs_parse_representor_ports(char *str, void *data)
+{
+ struct rte_eth_devargs *eth_da = data;
+
+ return rte_eth_devargs_process_range(str, eth_da->representor_ports,
+ ð_da->nb_representor_ports, RTE_MAX_ETHPORTS);
+}
+
+int __rte_experimental
+rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
+{
+ struct rte_kvargs args;
+ struct rte_kvargs_pair *pair;
+ unsigned int i;
+ int result = 0;
+
+ memset(eth_da, 0, sizeof(*eth_da));
+
+ result = rte_eth_devargs_tokenise(&args, dargs);
+ if (result < 0)
+ goto parse_cleanup;
+
+ for (i = 0; i < args.count; i++) {
+ pair = &args.pairs[i];
+ if (strcmp("representor", pair->key) == 0) {
+ result = rte_eth_devargs_parse_list(pair->value,
+ rte_eth_devargs_parse_representor_ports,
+ eth_da);
+ if (result < 0)
+ goto parse_cleanup;
+ }
+ }
+
+parse_cleanup:
+ if (args.str)
+ free(args.str);
+
+ return result;
+}
+
+RTE_INIT(ethdev_init_log);
+static void
+ethdev_init_log(void)
+{
+ ethdev_logtype = rte_log_register("lib.ethdev");
+ if (ethdev_logtype >= 0)
+ rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
+}