#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_flow.h>
+#include <rte_cycles.h>
#include "failsafe_private.h"
.flow_type_rss_offloads = 0x0,
};
-/**
- * Check whether a specific offloading capability
- * is supported by a sub_device.
- *
- * @return
- * 0: all requested capabilities are supported by the sub_device
- * positive value: This flag at least is not supported by the sub_device
- */
-static int
-fs_port_offload_validate(struct rte_eth_dev *dev,
- struct sub_device *sdev)
-{
- struct rte_eth_dev_info infos = {0};
- struct rte_eth_conf *cf;
- uint32_t cap;
-
- cf = &dev->data->dev_conf;
- SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
- /* RX capabilities */
- cap = infos.rx_offload_capa;
- if (cf->rxmode.hw_vlan_strip &&
- ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
- WARN("VLAN stripping offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_VLAN_STRIP;
- }
- if (cf->rxmode.hw_ip_checksum &&
- ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM)) !=
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM))) {
- WARN("IP checksum offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- }
- if (cf->rxmode.enable_lro &&
- ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
- WARN("TCP LRO offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_TCP_LRO;
- }
- if (cf->rxmode.hw_vlan_extend &&
- ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
- WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_QINQ_STRIP;
- }
- /* TX capabilities */
- /* Nothing to do, no tx capa supported */
- return 0;
-}
-
-/*
- * Disable the dev_conf flag related to an offload capability flag
- * within an ethdev configuration.
- */
-static int
-fs_port_disable_offload(struct rte_eth_conf *cf,
- uint32_t ol_cap)
-{
- switch (ol_cap) {
- case DEV_RX_OFFLOAD_VLAN_STRIP:
- INFO("Disabling VLAN stripping offload");
- cf->rxmode.hw_vlan_strip = 0;
- break;
- case DEV_RX_OFFLOAD_IPV4_CKSUM:
- case DEV_RX_OFFLOAD_UDP_CKSUM:
- case DEV_RX_OFFLOAD_TCP_CKSUM:
- case (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM):
- INFO("Disabling IP checksum offload");
- cf->rxmode.hw_ip_checksum = 0;
- break;
- case DEV_RX_OFFLOAD_TCP_LRO:
- INFO("Disabling TCP LRO offload");
- cf->rxmode.enable_lro = 0;
- break;
- case DEV_RX_OFFLOAD_QINQ_STRIP:
- INFO("Disabling stacked VLAN stripping offload");
- cf->rxmode.hw_vlan_extend = 0;
- break;
- default:
- DEBUG("Unable to disable offload capability: %" PRIx32,
- ol_cap);
- return -1;
- }
- return 0;
-}
-
static int
fs_dev_configure(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
uint8_t i;
- int capa_flag;
int ret;
- FOREACH_SUBDEV(sdev, i, dev) {
- if (sdev->state != DEV_PROBED)
- continue;
- DEBUG("Checking capabilities for sub_device %d", i);
- while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
- /*
- * Refuse to change configuration if multiple devices
- * are present and we already have configured at least
- * some of them.
- */
- if (PRIV(dev)->state >= DEV_ACTIVE &&
- PRIV(dev)->subs_tail > 1) {
- ERROR("device already configured, cannot fix live configuration");
- return -1;
- }
- ret = fs_port_disable_offload(&dev->data->dev_conf,
- capa_flag);
- if (ret) {
- ERROR("Unable to disable offload capability");
- return ret;
- }
- }
- }
FOREACH_SUBDEV(sdev, i, dev) {
int rmv_interrupt = 0;
int lsc_interrupt = 0;
ERROR("Could not configure sub_device %d", i);
return ret;
}
- if (rmv_interrupt) {
+ if (rmv_interrupt && sdev->rmv_callback == 0) {
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
RTE_ETH_EVENT_INTR_RMV,
failsafe_eth_rmv_event_callback,
if (ret)
WARN("Failed to register RMV callback for sub_device %d",
SUB_ID(sdev));
+ else
+ sdev->rmv_callback = 1;
}
dev->data->dev_conf.intr_conf.rmv = 0;
- if (lsc_interrupt) {
+ if (lsc_interrupt && sdev->lsc_callback == 0) {
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
RTE_ETH_EVENT_INTR_LSC,
failsafe_eth_lsc_event_callback,
if (ret)
WARN("Failed to register LSC callback for sub_device %d",
SUB_ID(sdev));
+ else
+ sdev->lsc_callback = 1;
}
dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
sdev->state = DEV_ACTIVE;
PRIV(dev)->state = DEV_ACTIVE - 1;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Closing sub_device %d", i);
+ failsafe_eth_dev_unregister_callbacks(sdev);
rte_eth_dev_close(PORT_ID(sdev));
sdev->state = DEV_ACTIVE - 1;
}
return;
rxq = queue;
dev = rxq->priv->dev;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, rx_queue_release)
- (ETH(sdev)->data->rx_queues[rxq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->rx_queues != NULL &&
+ ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ }
+ }
dev->data->rx_queues[rxq->qid] = NULL;
rte_free(rxq);
}
uint8_t i;
int ret;
+ if (rx_conf->rx_deferred_start) {
+ ERROR("Rx queue deferred start is not supported");
+ return -EINVAL;
+ }
+
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq != NULL) {
fs_rx_queue_release(rxq);
return;
txq = queue;
dev = txq->priv->dev;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, tx_queue_release)
- (ETH(sdev)->data->tx_queues[txq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->tx_queues != NULL &&
+ ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ }
+ }
dev->data->tx_queues[txq->qid] = NULL;
rte_free(txq);
}
uint8_t i;
int ret;
+ if (tx_conf->tx_deferred_start) {
+ ERROR("Tx queue deferred start is not supported");
+ return -EINVAL;
+ }
+
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
return -1;
}
-static void
+static int
fs_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
- if (TX_SUBDEV(dev) == NULL)
- return;
- rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
+ uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
+
+ ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
+ if (ret) {
+ ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
+ i, ret);
+ *timestamp = 0;
+ return ret;
+ }
+ *timestamp = rte_rdtsc();
+ failsafe_stats_increment(stats, snapshot);
+ }
+ return 0;
}
static void
struct sub_device *sdev;
uint8_t i;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
rte_eth_stats_reset(PORT_ID(sdev));
+ memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
+ }
+ memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
}
/**