-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Semihalf nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <rte_ethdev_driver.h>
int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+int mrvl_logtype;
+
struct mrvl_ifnames {
const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
int idx;
int port_id;
uint64_t bytes_sent;
struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
+ int tx_deferred_start;
};
static int mrvl_lcore_first;
struct pp2_hif *hif, unsigned int core_id,
struct mrvl_shadow_txq *sq, int qid, int force);
+#define MRVL_XSTATS_TBL_ENTRY(name) { \
+ #name, offsetof(struct pp2_ppio_statistics, name), \
+ sizeof(((struct pp2_ppio_statistics *)0)->name) \
+}
+
+/* Table with xstats data */
+static struct {
+ const char *name;
+ unsigned int offset;
+ unsigned int size;
+} mrvl_xstats_tbl[] = {
+ MRVL_XSTATS_TBL_ENTRY(rx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(rx_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_errors),
+ MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
+ MRVL_XSTATS_TBL_ENTRY(tx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(tx_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_errors)
+};
+
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
{
ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
return ret;
}
params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
ret = pp2_hif_init(¶ms, &hifs[core_id]);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
return ret;
}
ret = mrvl_init_hif(core_id);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
goto out;
}
mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
{
if (rss_conf->rss_key)
- RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
+ MRVL_LOG(WARNING, "Changing hash key is not supported");
if (rss_conf->rss_hf == 0) {
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
- RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
+ MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
return -EINVAL;
}
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- RTE_LOG(INFO, PMD,
- "L2 CRC stripping is always enabled in hw\n");
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ MRVL_LOG(INFO, "L2 CRC stripping is always enabled in hw");
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
- RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
- return -EINVAL;
- }
-
if (dev->data->dev_conf.rxmode.split_hdr_size) {
- RTE_LOG(INFO, PMD, "Split headers not supported\n");
- return -EINVAL;
- }
-
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
- RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
- return -EINVAL;
- }
-
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
- RTE_LOG(INFO, PMD, "LRO not supported\n");
+ MRVL_LOG(INFO, "Split headers not supported");
return -EINVAL;
}
if (ret < 0)
return ret;
+ ret = mrvl_configure_txqs(priv, dev->data->port_id,
+ dev->data->nb_tx_queues);
+ if (ret < 0)
+ return ret;
+
priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
priv->ppio_params.maintain_stats = 1;
priv->nb_rx_queues = dev->data->nb_rx_queues;
if (dev->data->nb_rx_queues == 1 &&
dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
- RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
+ MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
return 0;
return pp2_ppio_disable(priv->ppio);
}
+/**
+ * DPDK callback to start tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv)
+ return -EPERM;
+
+ /* passing 1 enables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to stop tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ /* passing 0 disables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
/**
* DPDK callback to start the device.
*
{
struct mrvl_priv *priv = dev->data->dev_private;
char match[MRVL_MATCH_LEN];
- int ret = 0, def_init_size;
+ int ret = 0, i, def_init_size;
snprintf(match, sizeof(match), "ppio-%d:%d",
priv->pp_id, priv->ppio_id);
priv->bpool_init_size += buffs_to_add;
ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
+ MRVL_LOG(ERR, "Failed to add buffers to bpool");
}
/*
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init ppio\n");
+ MRVL_LOG(ERR, "Failed to init ppio");
return ret;
}
if (!priv->uc_mc_flushed) {
ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to flush uc/mc filter list\n");
+ MRVL_LOG(ERR,
+ "Failed to flush uc/mc filter list");
goto out;
}
priv->uc_mc_flushed = 1;
if (!priv->vlan_flushed) {
ret = pp2_ppio_flush_vlan(priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
+ MRVL_LOG(ERR, "Failed to flush vlan list");
/*
* TODO
* once pp2_ppio_flush_vlan() is supported jump to out
if (mrvl_qos_cfg) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
+ MRVL_LOG(ERR, "Failed to setup QoS mapping");
goto out;
}
}
ret = mrvl_dev_set_link_up(dev);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to set link up\n");
+ MRVL_LOG(ERR, "Failed to set link up");
goto out;
}
+ /* start tx queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ if (!txq->tx_deferred_start)
+ continue;
+
+ /*
+ * All txqs are started by default. Stop them
+ * so that tx_deferred_start works as expected.
+ */
+ ret = mrvl_tx_queue_stop(dev, i);
+ if (ret)
+ goto out;
+ }
+
return 0;
out:
- RTE_LOG(ERR, PMD, "Failed to start device\n");
+ MRVL_LOG(ERR, "Failed to start device");
pp2_ppio_deinit(priv->ppio);
return ret;
}
{
int i;
- RTE_LOG(INFO, PMD, "Flushing rx queues\n");
+ MRVL_LOG(INFO, "Flushing rx queues");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
int ret, num;
int i, j;
struct mrvl_txq *txq;
- RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
+ MRVL_LOG(INFO, "Flushing tx shadow queues");
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = (struct mrvl_txq *)dev->data->tx_queues[i];
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
+ MRVL_LOG(ERR, "Failed to get bpool buffers number");
return;
}
mrvl_dev_set_link_down(dev);
mrvl_flush_rx_queues(dev);
mrvl_flush_tx_shadow_queues(dev);
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
if (priv->qos_tbl) {
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
priv->qos_tbl = NULL;
}
- pp2_ppio_deinit(priv->ppio);
+ if (priv->ppio)
+ pp2_ppio_deinit(priv->ppio);
priv->ppio = NULL;
+
+ /* policer must be released after ppio deinitialization */
+ if (priv->policer) {
+ pp2_cls_plcr_deinit(priv->policer);
+ priv->policer = NULL;
+ }
}
/**
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to enable promiscuous mode");
}
/**
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed enable all-multicast mode");
}
/**
ret = pp2_ppio_set_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to disable promiscuous mode");
}
/**
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed to disable all-multicast mode");
}
/**
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_remove_mac_addr(priv->ppio,
dev->data->mac_addrs[index].addr_bytes);
if (ret) {
ether_format_addr(buf, sizeof(buf),
&dev->data->mac_addrs[index]);
- RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to remove mac %s", buf);
}
}
char buf[ETHER_ADDR_FMT_SIZE];
int ret;
+ if (priv->isolated)
+ return -ENOTSUP;
+
if (index == 0)
/* For setting index 0, mrvl_mac_addr_set() should be used.*/
return -1;
ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to add mac %s", buf);
return -1;
}
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
*/
-static void
+static int
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
if (!priv->ppio)
- return;
+ return 0;
+
+ if (priv->isolated)
+ return -ENOTSUP;
ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
char buf[ETHER_ADDR_FMT_SIZE];
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
+ MRVL_LOG(ERR, "Failed to set mac to %s", buf);
}
+
+ return ret;
}
/**
idx = rxq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "rx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "rx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
continue;
}
priv->rxq_map[idx].inq,
&rx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update rx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update rx queue %d stats", idx);
break;
}
idx = txq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "tx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "tx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
}
ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
&tx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update tx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update tx queue %d stats", idx);
break;
}
ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
+ MRVL_LOG(ERR, "Failed to update port statistics");
return ret;
}
pp2_ppio_get_statistics(priv->ppio, NULL, 1);
}
+/**
+ * DPDK callback to get extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Pointer to xstats table.
+ * @param n
+ * Number of entries in xstats table.
+ * @return
+ * Negative value on error, number of read xstats otherwise.
+ */
+static int
+mrvl_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ unsigned int i;
+
+ if (!stats)
+ return 0;
+
+ pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
+ uint64_t val;
+
+ if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
+ val = *(uint32_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
+ val = *(uint64_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else
+ return -EINVAL;
+
+ stats[i].id = i;
+ stats[i].value = val;
+ }
+
+ return n;
+}
+
+/**
+ * DPDK callback to reset extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_xstats_reset(struct rte_eth_dev *dev)
+{
+ mrvl_stats_reset(dev);
+}
+
+/**
+ * DPDK callback to get extended statistics names.
+ *
+ * @param dev (unused)
+ * Pointer to Ethernet device structure.
+ * @param xstats_names
+ * Pointer to xstats names table.
+ * @param size
+ * Size of the xstats names table.
+ * @return
+ * Number of read names.
+ */
+static int
+mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (!xstats_names)
+ return RTE_DIM(mrvl_xstats_tbl);
+
+ for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
+ snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
+ mrvl_xstats_tbl[i].name);
+
+ return size;
+}
+
/**
* DPDK callback to get information about the device.
*
struct rte_eth_txq_info *qinfo)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
qinfo->nb_desc =
priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
/**
if (!priv->ppio)
return -EPERM;
+ if (priv->isolated)
+ return -ENOTSUP;
+
return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
pp2_ppio_remove_vlan(priv->ppio, vlan_id);
}
static int
mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
{
- struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
- struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
+ struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
+ struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
int i, ret;
unsigned int core_id;
struct pp2_hif *hif;
for (i = 0; i < num; i++) {
if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
!= cookie_addr_high) {
- RTE_LOG(ERR, PMD,
- "mbuf virtual addr high 0x%lx out of range\n",
+ MRVL_LOG(ERR,
+ "mbuf virtual addr high 0x%lx out of range",
(uint64_t)mbufs[i] >> 32);
goto out;
}
return -1;
}
-/**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = MRVL_RX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
/**
* DPDK callback to configure the receive queue.
*
uint32_t min_size,
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ uint64_t offloads;
- if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
* Unknown TC mapping, mapping will not have a correct queue.
*/
- RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
+ MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
idx, priv->ppio_id);
return -EFAULT;
}
min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
MRVL_PKT_EFFEC_OFFS;
if (min_size < max_rx_pkt_len) {
- RTE_LOG(ERR, PMD,
- "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
+ MRVL_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
MRVL_PKT_EFFEC_OFFS,
max_rx_pkt_len);
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled =
- dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
if (core_id == LCORE_ID_ANY)
core_id = 0;
+ if (!q)
+ return;
+
hif = mrvl_get_hif(q->priv, core_id);
- if (!q || !hif)
+ if (!hif)
return;
tc = q->priv->rxq_map[q->queue_id].tc;
rte_free(q);
}
-/**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
- uint64_t supported = MRVL_TX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
/**
* DPDK callback to configure the transmit queue.
*
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters.
+ * Tx queue configuration parameters.
*
* @return
* 0 on success, negative error value otherwise.
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
- if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
-
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
txq->priv = priv;
txq->queue_id = idx;
txq->port_id = dev->data->port_id;
+ txq->tx_deferred_start = conf->tx_deferred_start;
dev->data->tx_queues[idx] = txq;
priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
- priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
return 0;
}
rte_free(q);
}
+/**
+ * DPDK callback to get flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv)
+ return -EPERM;
+
+ ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to read rx pause state");
+ return ret;
+ }
+
+ fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!priv)
+ return -EPERM;
+
+ if (fc_conf->high_water ||
+ fc_conf->low_water ||
+ fc_conf->pause_time ||
+ fc_conf->mac_ctrl_frame_fwd ||
+ fc_conf->autoneg) {
+ MRVL_LOG(ERR, "Flowctrl parameter is not supported");
+
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE ||
+ fc_conf->mode == RTE_FC_RX_PAUSE) {
+ int ret, en;
+
+ en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
+ ret = pp2_ppio_set_rx_pause(priv->ppio, en);
+ if (ret)
+ MRVL_LOG(ERR,
+ "Failed to change flowctrl on RX side");
+
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* Update RSS hash configuration
*
{
struct mrvl_priv *priv = dev->data->dev_private;
+ if (priv->isolated)
+ return -ENOTSUP;
+
return mrvl_configure_rss(priv, rss_conf);
}
return 0;
}
+/**
+ * DPDK callback to get rte_flow callbacks.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param filer_type
+ * Flow filter type.
+ * @param filter_op
+ * Flow filter operation.
+ * @param arg
+ * Pointer to pass the flow ops.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mrvl_flow_ops;
+ return 0;
+ default:
+ MRVL_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ return -EINVAL;
+ }
+}
+
static const struct eth_dev_ops mrvl_ops = {
.dev_configure = mrvl_dev_configure,
.dev_start = mrvl_dev_start,
.mtu_set = mrvl_mtu_set,
.stats_get = mrvl_stats_get,
.stats_reset = mrvl_stats_reset,
+ .xstats_get = mrvl_xstats_get,
+ .xstats_reset = mrvl_xstats_reset,
+ .xstats_get_names = mrvl_xstats_get_names,
.dev_infos_get = mrvl_dev_infos_get,
.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
.rxq_info_get = mrvl_rxq_info_get,
.txq_info_get = mrvl_txq_info_get,
.vlan_filter_set = mrvl_vlan_filter_set,
+ .tx_queue_start = mrvl_tx_queue_start,
+ .tx_queue_stop = mrvl_tx_queue_stop,
.rx_queue_setup = mrvl_rx_queue_setup,
.rx_queue_release = mrvl_rx_queue_release,
.tx_queue_setup = mrvl_tx_queue_setup,
.tx_queue_release = mrvl_tx_queue_release,
+ .flow_ctrl_get = mrvl_flow_ctrl_get,
+ .flow_ctrl_set = mrvl_flow_ctrl_set,
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
+ .filter_ctrl = mrvl_eth_filter_ctrl,
};
/**
*l4_offset = *l3_offset + MRVL_ARP_LENGTH;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
break;
}
packet_type |= RTE_PTYPE_L4_UDP;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
break;
}
ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
if (unlikely(ret < 0)) {
- RTE_LOG(ERR, PMD, "Failed to receive packets\n");
+ MRVL_LOG(ERR, "Failed to receive packets");
return 0;
}
mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
(!rx_done && num < q->priv->bpool_init_size))) {
ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
+ MRVL_LOG(ERR, "Failed to fill bpool");
} else if (unlikely(num > q->priv->bpool_max_size)) {
int i;
int pkt_to_remove = num - q->priv->bpool_init_size;
struct rte_mbuf *mbuf;
struct pp2_buff_inf buff;
- RTE_LOG(DEBUG, PMD,
- "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
+ MRVL_LOG(DEBUG,
+ "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
bpool->pp2_id, q->priv->ppio->port_id,
bpool->id, pkt_to_remove, num,
q->priv->bpool_init_size);
for (i = 0; i < nb_done; i++) {
entry = &sq->ent[sq->tail + num];
if (unlikely(!entry->buff.addr)) {
- RTE_LOG(ERR, PMD,
- "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
+ MRVL_LOG(ERR,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
sq->tail, (u64)entry->buff.cookie,
(u64)entry->buff.addr);
skip_bufs = 1;
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
if (unlikely(nb_pkts > sq_free_size)) {
- RTE_LOG(DEBUG, PMD,
- "No room in shadow queue for %d packets! %d packets will be sent.\n",
+ MRVL_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
nb_pkts, sq_free_size);
nb_pkts = sq_free_size;
}
rte_zmalloc("mac_addrs",
ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
if (!eth_dev->data->mac_addrs) {
- RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
+ MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
goto out_free_priv;
}
eth_dev->device = &vdev->device;
eth_dev->dev_ops = &mrvl_ops;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
out_free_mac:
rte_free(eth_dev->data->mac_addrs);
*/
if (!mrvl_qos_cfg) {
cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
- RTE_LOG(INFO, PMD, "Parsing config file!\n");
+ MRVL_LOG(INFO, "Parsing config file!");
if (cfgnum > 1) {
- RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ MRVL_LOG(ERR, "Cannot handle more than one config file!");
goto out_free_kvlist;
} else if (cfgnum == 1) {
rte_kvargs_process(kvlist, MRVL_CFG_ARG,
if (mrvl_dev_num)
goto init_devices;
- RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
+ MRVL_LOG(INFO, "Perform MUSDK initializations");
/*
* ret == -EEXIST is correct, it means DMA
* has been already initialized (by another PMD).
if (ret != -EEXIST)
goto out_free_kvlist;
else
- RTE_LOG(INFO, PMD,
- "DMA memory has been already initialized by a different driver.\n");
+ MRVL_LOG(INFO,
+ "DMA memory has been already initialized by a different driver.");
}
ret = mrvl_init_pp2();
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init PP!\n");
+ MRVL_LOG(ERR, "Failed to init PP!");
goto out_deinit_dma;
}
init_devices:
for (i = 0; i < ifnum; i++) {
- RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
+ MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
if (ret)
goto out_cleanup;
if (!name)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Removing %s\n", name);
+ MRVL_LOG(INFO, "Removing %s", name);
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
char ifname[RTE_ETH_NAME_MAX_LEN];
rte_eth_dev_get_name_by_port(i, ifname);
}
if (mrvl_dev_num == 0) {
- RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
+ MRVL_LOG(INFO, "Perform MUSDK deinit");
mrvl_deinit_hifs();
mrvl_deinit_pp2();
mv_sys_dma_mem_destroy();
.remove = rte_pmd_mrvl_remove,
};
-RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
-RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);
+RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
+RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
+
+RTE_INIT(mrvl_init_log)
+{
+ mrvl_logtype = rte_log_register("pmd.net.mvpp2");
+ if (mrvl_logtype >= 0)
+ rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);
+}