1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Cavium Inc.
10 #include <rte_errno.h>
11 #include <rte_flow_driver.h>
13 #include "qede_ethdev.h"
15 /* VXLAN tunnel classification mapping */
16 const struct _qede_udp_tunn_types {
17 uint16_t rte_filter_type;
18 enum ecore_filter_ucast_type qede_type;
19 enum ecore_tunn_clss qede_tunn_clss;
21 } qede_tunn_types[] = {
23 ETH_TUNNEL_FILTER_OMAC,
25 ECORE_TUNN_CLSS_MAC_VLAN,
29 ETH_TUNNEL_FILTER_TENID,
31 ECORE_TUNN_CLSS_MAC_VNI,
35 ETH_TUNNEL_FILTER_IMAC,
36 ECORE_FILTER_INNER_MAC,
37 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
41 ETH_TUNNEL_FILTER_IVLAN,
42 ECORE_FILTER_INNER_VLAN,
43 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
47 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
48 ECORE_FILTER_MAC_VNI_PAIR,
49 ECORE_TUNN_CLSS_MAC_VNI,
53 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
56 "outer-mac and inner-mac"
59 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
62 "outer-mac and inner-vlan"
65 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
66 ECORE_FILTER_INNER_MAC_VNI_PAIR,
67 ECORE_TUNN_CLSS_INNER_MAC_VNI,
71 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
77 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
78 ECORE_FILTER_INNER_PAIR,
79 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
80 "inner-mac and inner-vlan",
83 ETH_TUNNEL_FILTER_OIP,
89 ETH_TUNNEL_FILTER_IIP,
95 RTE_TUNNEL_FILTER_IMAC_IVLAN,
101 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
107 RTE_TUNNEL_FILTER_IMAC_TENID,
113 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
120 #define IP_VERSION (0x40)
121 #define IP_HDRLEN (0x5)
122 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
123 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
124 #define QEDE_FDIR_IPV4_DEF_TTL (64)
125 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
126 /* Sum of length of header types of L2, L3, L4.
127 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
131 #define QEDE_MAX_FDIR_PKT_LEN (86)
133 static inline bool qede_valid_flow(uint16_t flow_type)
135 return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
136 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
137 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
138 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
142 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
143 struct qede_arfs_entry *arfs,
145 struct ecore_arfs_config_params *params);
147 /* Note: Flowdir support is only partial.
148 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
149 * Parameters like pballoc/status fields are irrelevant here.
151 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
153 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
154 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
155 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
157 /* check FDIR modes */
158 switch (fdir->mode) {
159 case RTE_FDIR_MODE_NONE:
160 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
161 DP_INFO(edev, "flowdir is disabled\n");
163 case RTE_FDIR_MODE_PERFECT:
164 if (ECORE_IS_CMT(edev)) {
165 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
166 qdev->arfs_info.arfs.mode =
167 ECORE_FILTER_CONFIG_MODE_DISABLE;
170 qdev->arfs_info.arfs.mode =
171 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
172 DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
174 case RTE_FDIR_MODE_PERFECT_TUNNEL:
175 case RTE_FDIR_MODE_SIGNATURE:
176 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
177 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
184 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
186 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
187 struct qede_arfs_entry *tmp = NULL;
189 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
192 rte_memzone_free(tmp->mz);
193 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
194 qede_arfs_entry, list);
201 qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
202 struct rte_eth_fdir_filter *fdir,
203 struct qede_arfs_entry *arfs)
205 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
206 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
207 struct rte_eth_fdir_input *input;
209 static const uint8_t next_proto[] = {
210 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
211 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
212 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
213 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
216 input = &fdir->input;
218 DP_INFO(edev, "flow_type %d\n", input->flow_type);
220 switch (input->flow_type) {
221 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
222 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
223 /* fill the common ip header */
224 arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
225 arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
226 arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
227 arfs->tuple.ip_proto = next_proto[input->flow_type];
230 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
231 arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
232 arfs->tuple.src_port = input->flow.udp4_flow.src_port;
234 arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
235 arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
238 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
239 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
240 arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
241 arfs->tuple.ip_proto = next_proto[input->flow_type];
242 rte_memcpy(arfs->tuple.dst_ipv6,
243 &input->flow.ipv6_flow.dst_ip,
245 rte_memcpy(arfs->tuple.src_ipv6,
246 &input->flow.ipv6_flow.src_ip,
250 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
251 arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
252 arfs->tuple.src_port = input->flow.udp6_flow.src_port;
254 arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
255 arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
259 DP_ERR(edev, "Unsupported flow_type %u\n",
264 arfs->rx_queue = fdir->action.rx_queue;
269 qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
270 struct qede_arfs_entry *arfs,
273 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
274 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
275 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
276 struct qede_arfs_entry *tmp = NULL;
277 const struct rte_memzone *mz;
278 struct ecore_hwfn *p_hwfn;
279 enum _ecore_status_t rc;
284 if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
285 DP_ERR(edev, "Reached max flowdir filter limit\n");
290 /* soft_id could have been used as memzone string, but soft_id is
291 * not currently used so it has no significance.
293 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
294 (unsigned long)rte_get_timer_cycles());
295 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
296 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
298 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
299 rte_strerror(rte_errno));
304 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
305 pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
306 &qdev->arfs_info.arfs);
312 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
314 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
315 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
316 DP_INFO(edev, "flowdir filter exist\n");
322 SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
323 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
327 DP_ERR(edev, "flowdir filter does not exist\n");
332 p_hwfn = ECORE_LEADING_HWFN(edev);
334 if (qdev->arfs_info.arfs.mode ==
335 ECORE_FILTER_CONFIG_MODE_DISABLE) {
337 eth_dev->data->dev_conf.fdir_conf.mode =
338 RTE_FDIR_MODE_PERFECT;
339 qdev->arfs_info.arfs.mode =
340 ECORE_FILTER_CONFIG_MODE_5_TUPLE;
341 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
343 /* Enable ARFS searcher with updated flow_types */
344 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
345 &qdev->arfs_info.arfs);
347 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
348 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
349 (dma_addr_t)mz->iova,
353 if (rc == ECORE_SUCCESS) {
355 arfs->pkt_len = pkt_len;
357 SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
359 qdev->arfs_info.filter_count++;
360 DP_INFO(edev, "flowdir filter added, count = %d\n",
361 qdev->arfs_info.filter_count);
363 rte_memzone_free(tmp->mz);
364 SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
365 qede_arfs_entry, list);
366 rte_free(tmp); /* the node deleted */
367 rte_memzone_free(mz); /* temp node allocated */
368 qdev->arfs_info.filter_count--;
369 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
370 qdev->arfs_info.filter_count);
373 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
374 rc, qdev->arfs_info.filter_count);
377 /* Disable ARFS searcher if there are no more filters */
378 if (qdev->arfs_info.filter_count == 0) {
379 memset(&qdev->arfs_info.arfs, 0,
380 sizeof(struct ecore_arfs_config_params));
381 DP_INFO(edev, "Disabling flowdir\n");
382 qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
383 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
384 &qdev->arfs_info.arfs);
389 rte_memzone_free(mz);
394 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
395 struct rte_eth_fdir_filter *fdir_filter,
398 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
399 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
400 struct qede_arfs_entry *arfs = NULL;
403 arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
404 RTE_CACHE_LINE_SIZE);
406 DP_ERR(edev, "Did not allocate memory for arfs\n");
410 rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
414 rc = qede_config_arfs_filter(eth_dev, arfs, add);
422 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
423 struct rte_eth_fdir_filter *fdir,
426 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
427 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
429 if (!qede_valid_flow(fdir->input.flow_type)) {
430 DP_ERR(edev, "invalid flow_type input\n");
434 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
435 DP_ERR(edev, "invalid queue number %u\n",
436 fdir->action.rx_queue);
440 if (fdir->input.flow_ext.is_vf) {
441 DP_ERR(edev, "flowdir is not supported over VF\n");
445 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
448 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
450 qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
451 struct qede_arfs_entry *arfs,
453 struct ecore_arfs_config_params *params)
456 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
457 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
458 uint16_t *ether_type;
461 struct ipv6_hdr *ip6;
466 raw_pkt = (uint8_t *)buff;
468 len = 2 * sizeof(struct ether_addr);
469 raw_pkt += 2 * sizeof(struct ether_addr);
470 ether_type = (uint16_t *)raw_pkt;
471 raw_pkt += sizeof(uint16_t);
472 len += sizeof(uint16_t);
474 *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
475 switch (arfs->tuple.eth_proto) {
476 case ETHER_TYPE_IPv4:
477 ip = (struct ipv4_hdr *)raw_pkt;
478 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
479 ip->total_length = sizeof(struct ipv4_hdr);
480 ip->next_proto_id = arfs->tuple.ip_proto;
481 ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
482 ip->dst_addr = arfs->tuple.dst_ipv4;
483 ip->src_addr = arfs->tuple.src_ipv4;
484 len += sizeof(struct ipv4_hdr);
487 raw_pkt = (uint8_t *)buff;
489 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
490 udp = (struct udp_hdr *)(raw_pkt + len);
491 udp->dst_port = arfs->tuple.dst_port;
492 udp->src_port = arfs->tuple.src_port;
493 udp->dgram_len = sizeof(struct udp_hdr);
494 len += sizeof(struct udp_hdr);
495 /* adjust ip total_length */
496 ip->total_length += sizeof(struct udp_hdr);
499 tcp = (struct tcp_hdr *)(raw_pkt + len);
500 tcp->src_port = arfs->tuple.src_port;
501 tcp->dst_port = arfs->tuple.dst_port;
502 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
503 len += sizeof(struct tcp_hdr);
504 /* adjust ip total_length */
505 ip->total_length += sizeof(struct tcp_hdr);
509 case ETHER_TYPE_IPv6:
510 ip6 = (struct ipv6_hdr *)raw_pkt;
511 ip6->proto = arfs->tuple.ip_proto;
513 rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
515 rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
517 rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
519 len += sizeof(struct ipv6_hdr);
522 raw_pkt = (uint8_t *)buff;
524 if (arfs->tuple.ip_proto == IPPROTO_UDP) {
525 udp = (struct udp_hdr *)(raw_pkt + len);
526 udp->src_port = arfs->tuple.src_port;
527 udp->dst_port = arfs->tuple.dst_port;
528 len += sizeof(struct udp_hdr);
531 tcp = (struct tcp_hdr *)(raw_pkt + len);
532 tcp->src_port = arfs->tuple.src_port;
533 tcp->dst_port = arfs->tuple.dst_port;
534 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
535 len += sizeof(struct tcp_hdr);
540 DP_ERR(edev, "Unsupported eth_proto %u\n",
541 arfs->tuple.eth_proto);
549 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
550 enum rte_filter_op filter_op,
553 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
554 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
555 struct rte_eth_fdir_filter *fdir;
558 fdir = (struct rte_eth_fdir_filter *)arg;
560 case RTE_ETH_FILTER_NOP:
561 /* Typically used to query flowdir support */
562 if (ECORE_IS_CMT(edev)) {
563 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
566 return 0; /* means supported */
567 case RTE_ETH_FILTER_ADD:
568 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
570 case RTE_ETH_FILTER_DELETE:
571 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
573 case RTE_ETH_FILTER_FLUSH:
574 case RTE_ETH_FILTER_UPDATE:
575 case RTE_ETH_FILTER_INFO:
579 DP_ERR(edev, "unknown operation %u", filter_op);
586 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
587 enum rte_filter_op filter_op,
590 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
591 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
592 struct rte_eth_ntuple_filter *ntuple;
593 struct rte_eth_fdir_filter fdir_entry;
594 struct rte_eth_tcpv4_flow *tcpv4_flow;
595 struct rte_eth_udpv4_flow *udpv4_flow;
599 case RTE_ETH_FILTER_NOP:
600 /* Typically used to query fdir support */
601 if (ECORE_IS_CMT(edev)) {
602 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
605 return 0; /* means supported */
606 case RTE_ETH_FILTER_ADD:
609 case RTE_ETH_FILTER_DELETE:
611 case RTE_ETH_FILTER_INFO:
612 case RTE_ETH_FILTER_GET:
613 case RTE_ETH_FILTER_UPDATE:
614 case RTE_ETH_FILTER_FLUSH:
615 case RTE_ETH_FILTER_SET:
616 case RTE_ETH_FILTER_STATS:
617 case RTE_ETH_FILTER_OP_MAX:
618 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
621 ntuple = (struct rte_eth_ntuple_filter *)arg;
622 /* Internally convert ntuple to fdir entry */
623 memset(&fdir_entry, 0, sizeof(fdir_entry));
624 if (ntuple->proto == IPPROTO_TCP) {
625 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
626 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
627 tcpv4_flow->ip.src_ip = ntuple->src_ip;
628 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
629 tcpv4_flow->ip.proto = IPPROTO_TCP;
630 tcpv4_flow->src_port = ntuple->src_port;
631 tcpv4_flow->dst_port = ntuple->dst_port;
633 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
634 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
635 udpv4_flow->ip.src_ip = ntuple->src_ip;
636 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
637 udpv4_flow->ip.proto = IPPROTO_TCP;
638 udpv4_flow->src_port = ntuple->src_port;
639 udpv4_flow->dst_port = ntuple->dst_port;
642 fdir_entry.action.rx_queue = ntuple->queue;
644 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
648 qede_tunnel_update(struct qede_dev *qdev,
649 struct ecore_tunnel_info *tunn_info)
651 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
652 enum _ecore_status_t rc = ECORE_INVAL;
653 struct ecore_hwfn *p_hwfn;
654 struct ecore_ptt *p_ptt;
657 for_each_hwfn(edev, i) {
658 p_hwfn = &edev->hwfns[i];
660 p_ptt = ecore_ptt_acquire(p_hwfn);
662 DP_ERR(p_hwfn, "Can't acquire PTT\n");
669 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
670 tunn_info, ECORE_SPQ_MODE_CB, NULL);
672 ecore_ptt_release(p_hwfn, p_ptt);
674 if (rc != ECORE_SUCCESS)
682 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
685 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
686 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
687 enum _ecore_status_t rc = ECORE_INVAL;
688 struct ecore_tunnel_info tunn;
690 if (qdev->vxlan.enable == enable)
691 return ECORE_SUCCESS;
693 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
694 tunn.vxlan.b_update_mode = true;
695 tunn.vxlan.b_mode_enabled = enable;
696 tunn.b_update_rx_cls = true;
697 tunn.b_update_tx_cls = true;
698 tunn.vxlan.tun_cls = clss;
700 tunn.vxlan_port.b_update_port = true;
701 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
703 rc = qede_tunnel_update(qdev, &tunn);
704 if (rc == ECORE_SUCCESS) {
705 qdev->vxlan.enable = enable;
706 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
707 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
708 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
710 DP_ERR(edev, "Failed to update tunn_clss %u\n",
718 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
721 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
722 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
723 enum _ecore_status_t rc = ECORE_INVAL;
724 struct ecore_tunnel_info tunn;
726 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
727 tunn.l2_geneve.b_update_mode = true;
728 tunn.l2_geneve.b_mode_enabled = enable;
729 tunn.ip_geneve.b_update_mode = true;
730 tunn.ip_geneve.b_mode_enabled = enable;
731 tunn.l2_geneve.tun_cls = clss;
732 tunn.ip_geneve.tun_cls = clss;
733 tunn.b_update_rx_cls = true;
734 tunn.b_update_tx_cls = true;
736 tunn.geneve_port.b_update_port = true;
737 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
739 rc = qede_tunnel_update(qdev, &tunn);
740 if (rc == ECORE_SUCCESS) {
741 qdev->geneve.enable = enable;
742 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
743 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
744 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
746 DP_ERR(edev, "Failed to update tunn_clss %u\n",
754 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
757 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
758 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
759 enum _ecore_status_t rc = ECORE_INVAL;
760 struct ecore_tunnel_info tunn;
762 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
763 tunn.ip_gre.b_update_mode = true;
764 tunn.ip_gre.b_mode_enabled = enable;
765 tunn.ip_gre.tun_cls = clss;
766 tunn.ip_gre.tun_cls = clss;
767 tunn.b_update_rx_cls = true;
768 tunn.b_update_tx_cls = true;
770 rc = qede_tunnel_update(qdev, &tunn);
771 if (rc == ECORE_SUCCESS) {
772 qdev->ipgre.enable = enable;
773 DP_INFO(edev, "IPGRE is %s\n",
774 enable ? "enabled" : "disabled");
776 DP_ERR(edev, "Failed to update tunn_clss %u\n",
784 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
785 struct rte_eth_udp_tunnel *tunnel_udp)
787 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
788 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
789 struct ecore_tunnel_info tunn; /* @DPDK */
793 PMD_INIT_FUNC_TRACE(edev);
795 memset(&tunn, 0, sizeof(tunn));
797 switch (tunnel_udp->prot_type) {
798 case RTE_TUNNEL_TYPE_VXLAN:
799 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
800 DP_ERR(edev, "UDP port %u doesn't exist\n",
801 tunnel_udp->udp_port);
806 tunn.vxlan_port.b_update_port = true;
807 tunn.vxlan_port.port = udp_port;
809 rc = qede_tunnel_update(qdev, &tunn);
810 if (rc != ECORE_SUCCESS) {
811 DP_ERR(edev, "Unable to config UDP port %u\n",
812 tunn.vxlan_port.port);
816 qdev->vxlan.udp_port = udp_port;
817 /* If the request is to delete UDP port and if the number of
818 * VXLAN filters have reached 0 then VxLAN offload can be be
821 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
822 return qede_vxlan_enable(eth_dev,
823 ECORE_TUNN_CLSS_MAC_VLAN, false);
826 case RTE_TUNNEL_TYPE_GENEVE:
827 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
828 DP_ERR(edev, "UDP port %u doesn't exist\n",
829 tunnel_udp->udp_port);
835 tunn.geneve_port.b_update_port = true;
836 tunn.geneve_port.port = udp_port;
838 rc = qede_tunnel_update(qdev, &tunn);
839 if (rc != ECORE_SUCCESS) {
840 DP_ERR(edev, "Unable to config UDP port %u\n",
841 tunn.vxlan_port.port);
845 qdev->vxlan.udp_port = udp_port;
846 /* If the request is to delete UDP port and if the number of
847 * GENEVE filters have reached 0 then GENEVE offload can be be
850 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
851 return qede_geneve_enable(eth_dev,
852 ECORE_TUNN_CLSS_MAC_VLAN, false);
864 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
865 struct rte_eth_udp_tunnel *tunnel_udp)
867 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
868 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
869 struct ecore_tunnel_info tunn; /* @DPDK */
873 PMD_INIT_FUNC_TRACE(edev);
875 memset(&tunn, 0, sizeof(tunn));
877 switch (tunnel_udp->prot_type) {
878 case RTE_TUNNEL_TYPE_VXLAN:
879 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
881 "UDP port %u for VXLAN was already configured\n",
882 tunnel_udp->udp_port);
883 return ECORE_SUCCESS;
886 /* Enable VxLAN tunnel with default MAC/VLAN classification if
887 * it was not enabled while adding VXLAN filter before UDP port
890 if (!qdev->vxlan.enable) {
891 rc = qede_vxlan_enable(eth_dev,
892 ECORE_TUNN_CLSS_MAC_VLAN, true);
893 if (rc != ECORE_SUCCESS) {
894 DP_ERR(edev, "Failed to enable VXLAN "
895 "prior to updating UDP port\n");
899 udp_port = tunnel_udp->udp_port;
901 tunn.vxlan_port.b_update_port = true;
902 tunn.vxlan_port.port = udp_port;
904 rc = qede_tunnel_update(qdev, &tunn);
905 if (rc != ECORE_SUCCESS) {
906 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
911 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
913 qdev->vxlan.udp_port = udp_port;
915 case RTE_TUNNEL_TYPE_GENEVE:
916 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
918 "UDP port %u for GENEVE was already configured\n",
919 tunnel_udp->udp_port);
920 return ECORE_SUCCESS;
923 /* Enable GENEVE tunnel with default MAC/VLAN classification if
924 * it was not enabled while adding GENEVE filter before UDP port
927 if (!qdev->geneve.enable) {
928 rc = qede_geneve_enable(eth_dev,
929 ECORE_TUNN_CLSS_MAC_VLAN, true);
930 if (rc != ECORE_SUCCESS) {
931 DP_ERR(edev, "Failed to enable GENEVE "
932 "prior to updating UDP port\n");
936 udp_port = tunnel_udp->udp_port;
938 tunn.geneve_port.b_update_port = true;
939 tunn.geneve_port.port = udp_port;
941 rc = qede_tunnel_update(qdev, &tunn);
942 if (rc != ECORE_SUCCESS) {
943 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
948 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
950 qdev->geneve.udp_port = udp_port;
959 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
960 uint32_t *clss, char *str)
963 *clss = MAX_ECORE_TUNN_CLSS;
965 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
966 if (filter == qede_tunn_types[j].rte_filter_type) {
967 *type = qede_tunn_types[j].qede_type;
968 *clss = qede_tunn_types[j].qede_tunn_clss;
969 strcpy(str, qede_tunn_types[j].string);
976 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
977 const struct rte_eth_tunnel_filter_conf *conf,
980 /* Init commmon ucast params first */
981 qede_set_ucast_cmn_params(ucast);
983 /* Copy out the required fields based on classification type */
987 case ECORE_FILTER_VNI:
988 ucast->vni = conf->tenant_id;
990 case ECORE_FILTER_INNER_VLAN:
991 ucast->vlan = conf->inner_vlan;
993 case ECORE_FILTER_MAC:
994 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
997 case ECORE_FILTER_INNER_MAC:
998 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1001 case ECORE_FILTER_MAC_VNI_PAIR:
1002 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1004 ucast->vni = conf->tenant_id;
1006 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1007 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1009 ucast->vni = conf->tenant_id;
1011 case ECORE_FILTER_INNER_PAIR:
1012 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1014 ucast->vlan = conf->inner_vlan;
1020 return ECORE_SUCCESS;
1024 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1025 const struct rte_eth_tunnel_filter_conf *conf,
1026 __attribute__((unused)) enum rte_filter_op filter_op,
1027 enum ecore_tunn_clss *clss,
1030 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1031 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1032 struct ecore_filter_ucast ucast = {0};
1033 enum ecore_filter_ucast_type type;
1034 uint16_t filter_type = 0;
1038 filter_type = conf->filter_type;
1039 /* Determine if the given filter classification is supported */
1040 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
1041 if (*clss == MAX_ECORE_TUNN_CLSS) {
1042 DP_ERR(edev, "Unsupported filter type\n");
1045 /* Init tunnel ucast params */
1046 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
1047 if (rc != ECORE_SUCCESS) {
1048 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
1052 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
1053 str, filter_op, ucast.type);
1055 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
1057 /* Skip MAC/VLAN if filter is based on VNI */
1058 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1059 rc = qede_mac_int_ops(eth_dev, &ucast, add);
1060 if (rc == 0 && add) {
1061 /* Enable accept anyvlan */
1062 qede_config_accept_any_vlan(qdev, true);
1065 rc = qede_ucast_filter(eth_dev, &ucast, add);
1067 rc = ecore_filter_ucast_cmd(edev, &ucast,
1068 ECORE_SPQ_MODE_CB, NULL);
1075 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
1076 enum rte_eth_tunnel_type tunn_type, bool enable)
1080 switch (tunn_type) {
1081 case RTE_TUNNEL_TYPE_VXLAN:
1082 rc = qede_vxlan_enable(eth_dev, clss, enable);
1084 case RTE_TUNNEL_TYPE_GENEVE:
1085 rc = qede_geneve_enable(eth_dev, clss, enable);
1087 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1088 rc = qede_ipgre_enable(eth_dev, clss, enable);
1099 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
1100 enum rte_filter_op filter_op,
1101 const struct rte_eth_tunnel_filter_conf *conf)
1103 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1104 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1105 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
1109 PMD_INIT_FUNC_TRACE(edev);
1111 switch (filter_op) {
1112 case RTE_ETH_FILTER_ADD:
1115 case RTE_ETH_FILTER_DELETE:
1119 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
1124 return qede_tunn_enable(eth_dev,
1125 ECORE_TUNN_CLSS_MAC_VLAN,
1126 conf->tunnel_type, add);
1128 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
1129 if (rc != ECORE_SUCCESS)
1133 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
1134 qdev->vxlan.num_filters++;
1135 qdev->vxlan.filter_type = conf->filter_type;
1136 } else { /* GENEVE */
1137 qdev->geneve.num_filters++;
1138 qdev->geneve.filter_type = conf->filter_type;
1141 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
1142 !qdev->ipgre.enable)
1143 return qede_tunn_enable(eth_dev, clss,
1147 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
1148 qdev->vxlan.num_filters--;
1150 qdev->geneve.num_filters--;
1152 /* Disable VXLAN if VXLAN filters become 0 */
1153 if (qdev->vxlan.num_filters == 0 ||
1154 qdev->geneve.num_filters == 0)
1155 return qede_tunn_enable(eth_dev, clss,
1164 qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
1165 const struct rte_flow_attr *attr,
1166 struct rte_flow_error *error)
1169 rte_flow_error_set(error, EINVAL,
1170 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1175 if (attr->group != 0) {
1176 rte_flow_error_set(error, ENOTSUP,
1177 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1178 "Groups are not supported");
1182 if (attr->priority != 0) {
1183 rte_flow_error_set(error, ENOTSUP,
1184 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1185 "Priorities are not supported");
1189 if (attr->egress != 0) {
1190 rte_flow_error_set(error, ENOTSUP,
1191 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1192 "Egress is not supported");
1196 if (attr->transfer != 0) {
1197 rte_flow_error_set(error, ENOTSUP,
1198 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1199 "Transfer is not supported");
1203 if (attr->ingress == 0) {
1204 rte_flow_error_set(error, ENOTSUP,
1205 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1206 "Only ingress is supported");
1214 qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
1215 const struct rte_flow_item pattern[],
1216 struct rte_flow_error *error,
1217 struct rte_flow *flow)
1219 bool l3 = false, l4 = false;
1221 if (pattern == NULL) {
1222 rte_flow_error_set(error, EINVAL,
1223 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1228 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1229 if (!pattern->spec) {
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ITEM,
1233 "Item spec not defined");
1237 if (pattern->last) {
1238 rte_flow_error_set(error, EINVAL,
1239 RTE_FLOW_ERROR_TYPE_ITEM,
1241 "Item last not supported");
1245 if (pattern->mask) {
1246 rte_flow_error_set(error, EINVAL,
1247 RTE_FLOW_ERROR_TYPE_ITEM,
1249 "Item mask not supported");
1253 /* Below validation is only for 4 tuple flow
1254 * (GFT_PROFILE_TYPE_4_TUPLE)
1255 * - src and dst L3 address (IPv4 or IPv6)
1256 * - src and dst L4 port (TCP or UDP)
1259 switch (pattern->type) {
1260 case RTE_FLOW_ITEM_TYPE_IPV4:
1264 const struct rte_flow_item_ipv4 *spec;
1266 spec = pattern->spec;
1267 flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
1268 flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
1269 flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
1273 case RTE_FLOW_ITEM_TYPE_IPV6:
1277 const struct rte_flow_item_ipv6 *spec;
1279 spec = pattern->spec;
1280 rte_memcpy(flow->entry.tuple.src_ipv6,
1283 rte_memcpy(flow->entry.tuple.dst_ipv6,
1286 flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
1290 case RTE_FLOW_ITEM_TYPE_UDP:
1294 const struct rte_flow_item_udp *spec;
1296 spec = pattern->spec;
1297 flow->entry.tuple.src_port =
1299 flow->entry.tuple.dst_port =
1301 flow->entry.tuple.ip_proto = IPPROTO_UDP;
1305 case RTE_FLOW_ITEM_TYPE_TCP:
1309 const struct rte_flow_item_tcp *spec;
1311 spec = pattern->spec;
1312 flow->entry.tuple.src_port =
1314 flow->entry.tuple.dst_port =
1316 flow->entry.tuple.ip_proto = IPPROTO_TCP;
1321 rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ITEM,
1324 "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ITEM,
1333 "Item types need to have both L3 and L4 protocols");
1341 qede_flow_parse_actions(struct rte_eth_dev *dev,
1342 const struct rte_flow_action actions[],
1343 struct rte_flow_error *error,
1344 struct rte_flow *flow)
1346 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
1347 const struct rte_flow_action_queue *queue;
1349 if (actions == NULL) {
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1356 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1357 switch (actions->type) {
1358 case RTE_FLOW_ACTION_TYPE_QUEUE:
1359 queue = actions->conf;
1361 if (queue->index >= QEDE_RSS_COUNT(qdev)) {
1362 rte_flow_error_set(error, EINVAL,
1363 RTE_FLOW_ERROR_TYPE_ACTION,
1365 "Bad QUEUE action");
1370 flow->entry.rx_queue = queue->index;
1375 rte_flow_error_set(error, ENOTSUP,
1376 RTE_FLOW_ERROR_TYPE_ACTION,
1378 "Action is not supported - only ACTION_TYPE_QUEUE supported");
1387 qede_flow_parse(struct rte_eth_dev *dev,
1388 const struct rte_flow_attr *attr,
1389 const struct rte_flow_item patterns[],
1390 const struct rte_flow_action actions[],
1391 struct rte_flow_error *error,
1392 struct rte_flow *flow)
1397 rc = qede_flow_validate_attr(dev, attr, error);
1401 /* parse and validate item pattern and actions.
1402 * Given item list and actions will be translate to qede PMD
1403 * specific arfs structure.
1405 rc = qede_flow_parse_pattern(dev, patterns, error, flow);
1409 rc = qede_flow_parse_actions(dev, actions, error, flow);
1415 qede_flow_validate(struct rte_eth_dev *dev,
1416 const struct rte_flow_attr *attr,
1417 const struct rte_flow_item patterns[],
1418 const struct rte_flow_action actions[],
1419 struct rte_flow_error *error)
1421 return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
1424 static struct rte_flow *
1425 qede_flow_create(struct rte_eth_dev *dev,
1426 const struct rte_flow_attr *attr,
1427 const struct rte_flow_item pattern[],
1428 const struct rte_flow_action actions[],
1429 struct rte_flow_error *error)
1431 struct rte_flow *flow = NULL;
1434 flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
1436 rte_flow_error_set(error, ENOMEM,
1437 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1438 "Failed to allocate memory");
1442 rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
1448 rc = qede_config_arfs_filter(dev, &flow->entry, true);
1450 rte_flow_error_set(error, rc,
1451 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1452 "Failed to configure flow filter");
1461 qede_flow_destroy(struct rte_eth_dev *eth_dev,
1462 struct rte_flow *flow,
1463 struct rte_flow_error *error)
1467 rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
1469 rte_flow_error_set(error, rc,
1470 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1471 "Failed to delete flow filter");
1478 const struct rte_flow_ops qede_flow_ops = {
1479 .validate = qede_flow_validate,
1480 .create = qede_flow_create,
1481 .destroy = qede_flow_destroy,
1484 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
1485 enum rte_filter_type filter_type,
1486 enum rte_filter_op filter_op,
1489 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1490 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1491 struct rte_eth_tunnel_filter_conf *filter_conf =
1492 (struct rte_eth_tunnel_filter_conf *)arg;
1494 switch (filter_type) {
1495 case RTE_ETH_FILTER_TUNNEL:
1496 switch (filter_conf->tunnel_type) {
1497 case RTE_TUNNEL_TYPE_VXLAN:
1498 case RTE_TUNNEL_TYPE_GENEVE:
1499 case RTE_TUNNEL_TYPE_IP_IN_GRE:
1501 "Packet steering to the specified Rx queue"
1502 " is not supported with UDP tunneling");
1503 return(qede_tunn_filter_config(eth_dev, filter_op,
1505 case RTE_TUNNEL_TYPE_TEREDO:
1506 case RTE_TUNNEL_TYPE_NVGRE:
1507 case RTE_L2_TUNNEL_TYPE_E_TAG:
1508 DP_ERR(edev, "Unsupported tunnel type %d\n",
1509 filter_conf->tunnel_type);
1511 case RTE_TUNNEL_TYPE_NONE:
1516 case RTE_ETH_FILTER_FDIR:
1517 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
1518 case RTE_ETH_FILTER_NTUPLE:
1519 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
1520 case RTE_ETH_FILTER_GENERIC:
1521 if (ECORE_IS_CMT(edev)) {
1522 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
1526 if (filter_op != RTE_ETH_FILTER_GET)
1529 *(const void **)arg = &qede_flow_ops;
1531 case RTE_ETH_FILTER_MACVLAN:
1532 case RTE_ETH_FILTER_ETHERTYPE:
1533 case RTE_ETH_FILTER_FLEXIBLE:
1534 case RTE_ETH_FILTER_SYN:
1535 case RTE_ETH_FILTER_HASH:
1536 case RTE_ETH_FILTER_L2_TUNNEL:
1537 case RTE_ETH_FILTER_MAX:
1539 DP_ERR(edev, "Unsupported filter type %d\n",