#include "qede_ethdev.h"
#include <rte_alarm.h>
#include <rte_version.h>
+#include <rte_kvargs.h>
/* Globals */
static const struct qed_eth_ops *qed_ops;
}
#endif
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for_each_rss(qid) {
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rcv_pkts), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+ sizeof(uint64_t));
+
+ if (xstats)
+ for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
+ OSAL_MEMSET((((char *)
+ (qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[j].offset),
+ 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+
+ i = 0;
+
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+
+ OSAL_MEMSET((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == txq_stat_cntrs)
+ break;
+ }
+}
+
static int
qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
{
}
}
ecore_reset_vport_stats(edev);
+ if (IS_PF(edev))
+ qede_reset_queue_stats(qdev, true);
DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
return 0;
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
+ if (!qdev->enable_tx_switching) {
+ if (IS_VF(edev)) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
+ DP_INFO(edev, "VF tx-switching is disabled\n");
+ }
+ }
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
break;
}
}
- DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+ DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
+
return rc;
}
/* Enable LRO in split mode */
sge_tpa_params->tpa_ipv4_en_flg = enable;
sge_tpa_params->tpa_ipv6_en_flg = enable;
- sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
- sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
/* set if tpa enable changes */
sge_tpa_params->update_tpa_en_flg = 1;
/* set if tpa parameters should be handled */
return -1;
}
}
-
+ qdev->enable_lro = flg;
DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
return 0;
return ecore_filter_accept_cmd(edev, 0, flags, false, false,
ECORE_SPQ_MODE_CB, NULL);
}
-static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
- uint8_t clss, bool mode, bool mask)
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable, bool mask)
{
- memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
- p_tunn->vxlan.b_update_mode = mode;
- p_tunn->vxlan.b_mode_enabled = mask;
- p_tunn->b_update_rx_cls = true;
- p_tunn->b_update_tx_cls = true;
- p_tunn->vxlan.tun_cls = clss;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_ptt *p_ptt;
+ struct ecore_tunnel_info tunn;
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = enable;
+ tunn.vxlan.b_mode_enabled = mask;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+ break;
+ }
+ }
+
+ if (rc == ECORE_SUCCESS) {
+ qdev->vxlan.enable = enable;
+ qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+ DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
+ }
+
+ return rc;
}
static int
return rc;
}
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+
+ return 0;
}
static void qede_prandom_bytes(uint32_t *buff)
static int qede_dev_start(struct rte_eth_dev *eth_dev)
{
+ struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
if (qede_update_mtu(eth_dev, qdev->new_mtu))
goto err;
qdev->mtu = qdev->new_mtu;
- /* If MTU has changed then update TPA too */
- if (qdev->enable_lro)
- if (qede_enable_tpa(eth_dev, true))
- goto err;
+ }
+
+ /* Configure TPA parameters */
+ if (rxmode->enable_lro) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
}
/* Start queues */
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
+ /* Update link status */
+ qede_link_update(eth_dev, 0);
+
/* Start/resume traffic */
qede_fastpath_start(edev);
if (qdev->enable_lro)
qede_enable_tpa(eth_dev, false);
- /* TODO: Do we need disable LRO or RSS */
/* Stop queues */
qede_stop_queues(eth_dev);
DP_INFO(edev, "Device is stopped\n");
}
+#define QEDE_TX_SWITCHING "vf_txswitch"
+
+const char *valid_args[] = {
+ QEDE_TX_SWITCHING,
+ NULL,
+};
+
+static int qede_args_check(const char *key, const char *val, void *opaque)
+{
+ unsigned long tmp;
+ int ret = 0;
+ struct rte_eth_dev *eth_dev = opaque;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
+ return errno;
+ }
+
+ if (strcmp(QEDE_TX_SWITCHING, key) == 0)
+ qdev->enable_tx_switching = !!tmp;
+
+ return ret;
+}
+
+static int qede_args(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_kvargs *kvlist;
+ struct rte_devargs *devargs;
+ int ret;
+ int i;
+
+ devargs = pci_dev->device.devargs;
+ if (!devargs)
+ return 0; /* return success */
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ /* Process parameters. */
+ for (i = 0; (valid_args[i] != NULL); ++i) {
+ if (rte_kvargs_count(kvlist, valid_args[i])) {
+ ret = rte_kvargs_process(kvlist, valid_args[i],
+ qede_args_check, eth_dev);
+ if (ret != ECORE_SUCCESS) {
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+ }
+ }
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
+ int ret;
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
}
}
+ /* We need to have min 1 RX queue.There is no min check in
+ * rte_eth_dev_configure(), so we are checking it here.
+ */
+ if (eth_dev->data->nb_rx_queues == 0) {
+ DP_ERR(edev, "Minimum one RX queue is required\n");
+ return -EINVAL;
+ }
+
+ /* Enable Tx switching by default */
+ qdev->enable_tx_switching = 1;
+
+ /* Parse devargs and fix up rxmode */
+ if (qede_args(eth_dev))
+ return -ENOTSUP;
+
/* Sanity checks and throw warnings */
if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
return -ENOMEM;
}
+ /* If jumbo enabled adjust MTU */
+ if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
/* VF's MTU has to be set using vport-start where as
* PF's MTU can be updated via vport-update.
*/
if (IS_VF(edev)) {
- if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+ if (qede_start_vport(qdev, eth_dev->data->mtu))
return -1;
} else {
- if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+ if (qede_update_mtu(eth_dev, eth_dev->data->mtu))
return -1;
}
- qdev->mtu = rxmode->max_rx_pkt_len;
+ qdev->mtu = eth_dev->data->mtu;
qdev->new_mtu = qdev->mtu;
- /* Configure TPA parameters */
- if (rxmode->enable_lro) {
- if (qede_enable_tpa(eth_dev, true))
- return -EINVAL;
- /* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
- }
- qdev->enable_lro = rxmode->enable_lro;
-
/* Enable VLAN offloads by default */
- qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
+ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK);
+ if (ret)
+ return ret;
DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
}
/* return 0 means link status changed, -1 means not changed */
-static int
+int
qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
rte_intr_disable(&pci_dev->intr_handle);
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (edev->num_hwfns > 1)
+ if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
}
-static void
+static int
qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
if (j == txq_stat_cntrs)
break;
}
+
+ return 0;
}
static unsigned
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, true);
}
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, false);
}
static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_L4_FRAG,
+ /* Inner */
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_FRAG,
RTE_PTYPE_UNKNOWN
};
memset(&vport_update_params, 0, sizeof(vport_update_params));
params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
RTE_CACHE_LINE_SIZE);
+ if (params == NULL) {
+ DP_ERR(edev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
params->update_rss_config = 1;
/* Fix up RETA for CMT mode device */
- if (edev->num_hwfns > 1)
+ if (ECORE_IS_CMT(edev))
qdev->rss_enable = qede_update_rss_parm_cmt(edev,
params);
vport_update_params.vport_id = 0;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
+ uint32_t max_rx_pkt_len;
uint32_t frame_size;
uint16_t rx_buf_size;
uint16_t bufsz;
+ bool restart = false;
int i;
PMD_INIT_FUNC_TRACE(edev);
+ if (IS_VF(edev))
+ return -ENOTSUP;
qede_dev_info_get(dev, &dev_info);
- frame_size = mtu + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
- DP_ERR(edev, "MTU %u out of range\n", mtu);
+ DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+ mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
*/
dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
- qede_dev_stop(dev);
+ if (dev->data->dev_started) {
+ dev->data->dev_started = 0;
+ qede_dev_stop(dev);
+ restart = true;
+ }
rte_delay_ms(1000);
- qdev->mtu = mtu;
+ qdev->new_mtu = mtu;
/* Fix up RX buf size for all queues of the port */
for_each_rss(i) {
fp = &qdev->fp_array[i];
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ if (fp->rxq != NULL) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = frame_size;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ }
}
- qede_dev_start(dev);
- if (frame_size > ETHER_MAX_LEN)
+ if (max_rx_pkt_len > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ if (!dev->data->dev_started && restart) {
+ qede_dev_start(dev);
+ dev->data->dev_started = 1;
+ }
/* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
/* Reassign back */
dev->rx_pkt_burst = qede_recv_pkts;
dev->tx_pkt_burst = qede_xmit_pkts;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_tunnel_info tunn; /* @DPDK */
struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ uint16_t udp_port;
int rc, i;
PMD_INIT_FUNC_TRACE(edev);
memset(&tunn, 0, sizeof(tunn));
if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+ /* Enable VxLAN tunnel if needed before UDP port update using
+ * default MAC/VLAN classification.
+ */
+ if (add) {
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+ /* Enable VXLAN if it was not enabled while adding
+ * VXLAN filter.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+ } else {
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+ udp_port = 0;
+ }
+
tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
- QEDE_VXLAN_DEF_PORT;
+ tunn.vxlan_port.port = udp_port;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
}
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * VXLAN filters have reached 0 then VxLAN offload can be be
+ * disabled.
+ */
+ if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false, true);
}
return 0;
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn;
- struct ecore_hwfn *p_hwfn;
enum ecore_filter_ucast_type type;
- enum ecore_tunn_clss clss;
- struct ecore_filter_ucast ucast;
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ struct ecore_filter_ucast ucast = {0};
char str[80];
- uint16_t filter_type;
- int rc, i;
+ uint16_t filter_type = 0;
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
- filter_type = conf->filter_type | qdev->vxlan_filter_type;
- /* First determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
- if (clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Wrong filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
- conf->filter_type);
- return rc;
- }
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
+ if (IS_VF(edev))
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
ucast.opcode = ECORE_FILTER_ADD;
/* Skip MAC/VLAN if filter is based on VNI */
if (rc != ECORE_SUCCESS)
return rc;
- qdev->vxlan_filter_type = filter_type;
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = filter_type;
+ if (!qdev->vxlan.enable)
+ return qede_vxlan_enable(eth_dev, clss, true, true);
- DP_INFO(edev, "Enabling VXLAN tunneling\n");
- qede_set_cmn_tunn_param(&tunn, clss, true, true);
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
- &tunn, ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- }
- }
- qdev->num_tunn_filters++; /* Filter added successfully */
break;
case RTE_ETH_FILTER_DELETE:
+ if (IS_VF(edev))
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false, true);
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
ucast.opcode = ECORE_FILTER_REMOVE;
if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
if (rc != ECORE_SUCCESS)
return rc;
- qdev->vxlan_filter_type = filter_type;
- qdev->num_tunn_filters--;
+ qdev->vxlan.num_filters--;
/* Disable VXLAN if VXLAN filters become 0 */
- if (qdev->num_tunn_filters == 0) {
- DP_INFO(edev, "Disabling VXLAN tunneling\n");
-
- /* Use 0 as tunnel mode */
- qede_set_cmn_tunn_param(&tunn, clss, false, true);
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
- ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev,
- "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- break;
- }
- }
- }
+ if (qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev, clss, false, true);
break;
default:
DP_ERR(edev, "Unsupported operation %d\n", filter_op);
return -EINVAL;
}
- DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
return 0;
}
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
};
static void qede_update_pf_params(struct ecore_dev *edev)
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
+ adapter->ethdev = eth_dev;
edev = &adapter->edev;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
pci_addr = pci_dev->addr;
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
*/
- if (edev->num_hwfns > 1 && IS_PF(edev)) {
+ if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
rc = rte_eal_alarm_set(timer_period * US_PER_S,
qede_poll_sp_sb_cb,
(void *)eth_dev);