dpdk: DPDK 20.05 iavf flow director backporting to DPDK 20.02 91/26191/6
authorChenmin Sun <chenmin.sun@intel.com>
Fri, 27 Mar 2020 16:34:19 +0000 (00:34 +0800)
committerDamjan Marion <dmarion@me.com>
Wed, 22 Apr 2020 08:44:56 +0000 (08:44 +0000)
0001 ~ 0014 patches are for virtual channel and PMD
0015 is the iavf fdir framework
0016 ~ 0017 are for the iavf fidr driver

Type: feature

Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
Change-Id: I38e69ca0065a71cc6ba0b44ef7c7db51193a0899

17 files changed:
build/external/patches/dpdk_20.02/0001-net-iavf-unify-Rx-ptype-table.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0002-common-iavf-add-virtual-channel-opcodes-39-40-43.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0003-common-iavf-support-VSI-mapping-table.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0004-common-iavf-add-PTYPE-definition.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0005-common-iavf-add-virtual-channel-support-for-Flex-RXD.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0006-common-iavf-add-virtual-channel-protocol-header.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0007-net-iavf-flexible-Rx-descriptor-definitions.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0008-net-iavf-return-error-if-opcode-is-mismatched.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0010-net-iavf-flexible-Rx-descriptor-support-in-AVX-path.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0011-net-iavf-add-flow-director-enabled-switch-value.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0012-net-iavf-support-flow-mark-in-normal-data-path.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0014-net-iavf-add-RSS-hash-parsing-in-AVX-path.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0015-net-iavf-support-generic-flow.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0016-common-iavf-add-flow-director-support-in-virtual-cha.patch [new file with mode: 0644]
build/external/patches/dpdk_20.02/0017-net-iavf-add-support-for-FDIR-basic-rule.patch [new file with mode: 0644]

diff --git a/build/external/patches/dpdk_20.02/0001-net-iavf-unify-Rx-ptype-table.patch b/build/external/patches/dpdk_20.02/0001-net-iavf-unify-Rx-ptype-table.patch
new file mode 100644 (file)
index 0000000..de7333a
--- /dev/null
@@ -0,0 +1,816 @@
+From daa3f3ab896ca261fd2eca99609437dacd95dd7a Mon Sep 17 00:00:00 2001
+From: Shougang Wang <shougangx.wang@intel.com>
+Date: Fri, 6 Mar 2020 02:24:19 +0000
+Subject: [DPDK 01/17] net/iavf: unify Rx ptype table
+
+This patch unified the Rx ptype table.
+
+Signed-off-by: Shougang Wang <shougangx.wang@intel.com>
+Acked-by: Leyi Rong <leyi.rong@intel.com>
+Acked-by: Jingjing Wu <jingjing.wu@intel.com>
+---
+ drivers/net/iavf/iavf.h               |   3 +-
+ drivers/net/iavf/iavf_ethdev.c        |   3 +
+ drivers/net/iavf/iavf_rxtx.c          | 604 +++++++++++++++++++++++---
+ drivers/net/iavf/iavf_rxtx.h          |   3 +
+ drivers/net/iavf/iavf_rxtx_vec_avx2.c |  21 +-
+ drivers/net/iavf/iavf_rxtx_vec_sse.c  |  25 +-
+ 6 files changed, 561 insertions(+), 98 deletions(-)
+
+diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
+index fe25d807c..526040c6e 100644
+--- a/drivers/net/iavf/iavf.h
++++ b/drivers/net/iavf/iavf.h
+@@ -119,7 +119,7 @@ struct iavf_info {
+       uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+ };
+-#define IAVF_MAX_PKT_TYPE 256
++#define IAVF_MAX_PKT_TYPE 1024
+ /* Structure to store private data for each VF instance. */
+ struct iavf_adapter {
+@@ -131,6 +131,7 @@ struct iavf_adapter {
+       /* For vector PMD */
+       bool rx_vec_allowed;
+       bool tx_vec_allowed;
++      const uint32_t *ptype_tbl;
+       bool stopped;
+ };
+diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
+index 34913f9c4..ee9f82249 100644
+--- a/drivers/net/iavf/iavf_ethdev.c
++++ b/drivers/net/iavf/iavf_ethdev.c
+@@ -1334,6 +1334,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
+               return -1;
+       }
++      /* set default ptype table */
++      adapter->ptype_tbl = iavf_get_default_ptype_table();
++
+       /* copy mac addr */
+       eth_dev->data->mac_addrs = rte_zmalloc(
+               "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
+diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
+index 85d9a8e3b..9eccb7c41 100644
+--- a/drivers/net/iavf/iavf_rxtx.c
++++ b/drivers/net/iavf/iavf_rxtx.c
+@@ -303,6 +303,9 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct iavf_adapter *ad =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct iavf_info *vf =
++              IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
++      struct iavf_vsi *vsi = &vf->vsi;
+       struct iavf_rx_queue *rxq;
+       const struct rte_memzone *mz;
+       uint32_t ring_size;
+@@ -351,6 +354,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+       rxq->crc_len = 0; /* crc stripping by default */
+       rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+       rxq->rx_hdr_len = 0;
++      rxq->vsi = vsi;
+       len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+       rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
+@@ -769,31 +773,14 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+       uint16_t rx_id, nb_hold;
+       uint64_t dma_addr;
+       uint64_t pkt_flags;
+-      static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+-              /* [0] reserved */
+-              [1] = RTE_PTYPE_L2_ETHER,
+-              /* [2] - [21] reserved */
+-              [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_FRAG,
+-              [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_NONFRAG,
+-              [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_UDP,
+-              /* [25] reserved */
+-              [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_TCP,
+-              [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_SCTP,
+-              [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_ICMP,
+-              /* All others reserved */
+-      };
++      const uint32_t *ptype_tbl;
+       nb_rx = 0;
+       nb_hold = 0;
+       rxq = rx_queue;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
++      ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+       while (nb_rx < nb_pkts) {
+               rxdp = &rx_ring[rx_id];
+@@ -909,25 +896,7 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+       volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
+       volatile union iavf_rx_desc *rxdp;
+-      static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+-              /* [0] reserved */
+-              [1] = RTE_PTYPE_L2_ETHER,
+-              /* [2] - [21] reserved */
+-              [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_FRAG,
+-              [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_NONFRAG,
+-              [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_UDP,
+-              /* [25] reserved */
+-              [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_TCP,
+-              [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_SCTP,
+-              [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_ICMP,
+-              /* All others reserved */
+-      };
++      const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+       while (nb_rx < nb_pkts) {
+               rxdp = &rx_ring[rx_id];
+@@ -1094,25 +1063,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+       int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+       int32_t i, j, nb_rx = 0;
+       uint64_t pkt_flags;
+-      static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+-              /* [0] reserved */
+-              [1] = RTE_PTYPE_L2_ETHER,
+-              /* [2] - [21] reserved */
+-              [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_FRAG,
+-              [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_NONFRAG,
+-              [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_UDP,
+-              /* [25] reserved */
+-              [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_TCP,
+-              [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_SCTP,
+-              [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_ICMP,
+-              /* All others reserved */
+-      };
++      const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+       rxdp = &rxq->rx_ring[rxq->rx_tail];
+       rxep = &rxq->sw_ring[rxq->rx_tail];
+@@ -1921,3 +1872,542 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
+       return RTE_ETH_TX_DESC_FULL;
+ }
++
++const uint32_t *
++iavf_get_default_ptype_table(void)
++{
++      static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
++              __rte_cache_aligned = {
++              /* L2 types */
++              /* [0] reserved */
++              [1] = RTE_PTYPE_L2_ETHER,
++              [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
++              /* [3] - [5] reserved */
++              [6] = RTE_PTYPE_L2_ETHER_LLDP,
++              /* [7] - [10] reserved */
++              [11] = RTE_PTYPE_L2_ETHER_ARP,
++              /* [12] - [21] reserved */
++
++              /* Non tunneled IPv4 */
++              [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_FRAG,
++              [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_NONFRAG,
++              [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_UDP,
++              /* [25] reserved */
++              [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_TCP,
++              [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_SCTP,
++              [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_ICMP,
++
++              /* IPv4 --> IPv4 */
++              [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [32] reserved */
++              [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_SCTP,
++              [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv4 --> IPv6 */
++              [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [39] reserved */
++              [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_SCTP,
++              [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv4 --> GRE/Teredo/VXLAN */
++              [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT,
++
++              /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
++              [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [47] reserved */
++              [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_SCTP,
++              [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
++              [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [54] reserved */
++              [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_SCTP,
++              [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
++              [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
++
++              /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
++              [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [62] reserved */
++              [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_SCTP,
++              [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
++              [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [69] reserved */
++              [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_SCTP,
++              [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_ICMP,
++              /* [73] - [87] reserved */
++
++              /* Non tunneled IPv6 */
++              [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_FRAG,
++              [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_NONFRAG,
++              [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_UDP,
++              /* [91] reserved */
++              [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_TCP,
++              [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_SCTP,
++              [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_L4_ICMP,
++
++              /* IPv6 --> IPv4 */
++              [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_FRAG,
++              [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_NONFRAG,
++              [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_UDP,
++              /* [98] reserved */
++              [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                     RTE_PTYPE_TUNNEL_IP |
++                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                     RTE_PTYPE_INNER_L4_TCP,
++              [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_SCTP,
++              [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> IPv6 */
++              [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              /* [105] reserved */
++              [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_SCTP,
++              [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_IP |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> GRE/Teredo/VXLAN */
++              [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT,
++
++              /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
++              [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              /* [113] reserved */
++              [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_SCTP,
++              [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
++              [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              /* [120] reserved */
++              [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_SCTP,
++              [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
++              [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
++
++              /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
++              [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              /* [128] reserved */
++              [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_SCTP,
++              [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
++              [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              /* [135] reserved */
++              [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_SCTP,
++              [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++              /* [139] - [299] reserved */
++
++              /* PPPoE */
++              [300] = RTE_PTYPE_L2_ETHER_PPPOE,
++              [301] = RTE_PTYPE_L2_ETHER_PPPOE,
++
++              /* PPPoE --> IPv4 */
++              [302] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_FRAG,
++              [303] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_NONFRAG,
++              [304] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_UDP,
++              [305] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_TCP,
++              [306] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_SCTP,
++              [307] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_ICMP,
++
++              /* PPPoE --> IPv6 */
++              [308] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_FRAG,
++              [309] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_NONFRAG,
++              [310] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_UDP,
++              [311] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_TCP,
++              [312] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_SCTP,
++              [313] = RTE_PTYPE_L2_ETHER_PPPOE |
++                      RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_L4_ICMP,
++              /* [314] - [324] reserved */
++
++              /* IPv4/IPv6 --> GTPC/GTPU */
++              [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPC,
++              [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPC,
++              [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPC,
++              [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPC,
++              [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU,
++              [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU,
++
++              /* IPv4 --> GTPU --> IPv4 */
++              [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> GTPU --> IPv4 */
++              [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv4 --> GTPU --> IPv6 */
++              [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++
++              /* IPv6 --> GTPU --> IPv6 */
++              [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_FRAG,
++              [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_NONFRAG,
++              [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_UDP,
++              [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_TCP,
++              [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_TUNNEL_GTPU |
++                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
++                      RTE_PTYPE_INNER_L4_ICMP,
++              /* All others reserved */
++      };
++
++      return ptype_tbl;
++}
+diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
+index 60d02c521..09b5bd99e 100644
+--- a/drivers/net/iavf/iavf_rxtx.h
++++ b/drivers/net/iavf/iavf_rxtx.h
+@@ -105,6 +105,7 @@ struct iavf_rx_queue {
+       uint16_t rx_buf_len;    /* The packet buffer size */
+       uint16_t rx_hdr_len;    /* The header buffer size */
+       uint16_t max_pkt_len;   /* Maximum packet length */
++      struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
+       bool q_set;             /* if rx queue has been configured */
+       bool rx_deferred_start; /* don't start this queue in dev start */
+@@ -216,6 +217,8 @@ int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
+ int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
+ int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
++const uint32_t *iavf_get_default_ptype_table(void);
++
+ static inline
+ void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
+                           const volatile void *desc,
+diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+index 7c5d23fd0..2587083d8 100644
+--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
++++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+@@ -142,25 +142,8 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+ #define IAVF_DESCS_PER_LOOP_AVX 8
+       /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
+-      static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+-              /* [0] reserved */
+-              [1] = RTE_PTYPE_L2_ETHER,
+-              /* [2] - [21] reserved */
+-              [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_FRAG,
+-              [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_NONFRAG,
+-              [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_UDP,
+-              /* [25] reserved */
+-              [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_TCP,
+-              [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_SCTP,
+-              [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_ICMP,
+-              /* All others reserved */
+-      };
++      const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
++
+       const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+                       0, rxq->mbuf_initializer);
+       /* struct iavf_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; */
+diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
+index b978cc6e0..0365c49e1 100644
+--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
++++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
+@@ -192,29 +192,11 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
+ #define PKTLEN_SHIFT     10
+ static inline void
+-desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
++desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
++              const uint32_t *type_table)
+ {
+       __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+       __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+-      static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+-              /* [0] reserved */
+-              [1] = RTE_PTYPE_L2_ETHER,
+-              /* [2] - [21] reserved */
+-              [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_FRAG,
+-              [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_NONFRAG,
+-              [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_UDP,
+-              /* [25] reserved */
+-              [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_TCP,
+-              [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_SCTP,
+-              [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+-                      RTE_PTYPE_L4_ICMP,
+-              /* All others reserved */
+-      };
+       ptype0 = _mm_srli_epi64(ptype0, 30);
+       ptype1 = _mm_srli_epi64(ptype1, 30);
+@@ -240,6 +222,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+       int pos;
+       uint64_t var;
+       __m128i shuf_msk;
++      const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+       __m128i crc_adjust = _mm_set_epi16(
+                               0, 0, 0,    /* ignore non-length fields */
+@@ -456,7 +439,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+                       pkt_mb2);
+               _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+                                pkt_mb1);
+-              desc_to_ptype_v(descs, &rx_pkts[pos]);
++              desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+               /* C.4 calc avaialbe number of desc */
+               var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+               nb_pkts_recd += var;
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0002-common-iavf-add-virtual-channel-opcodes-39-40-43.patch b/build/external/patches/dpdk_20.02/0002-common-iavf-add-virtual-channel-opcodes-39-40-43.patch
new file mode 100644 (file)
index 0000000..ee4f74e
--- /dev/null
@@ -0,0 +1,75 @@
+From e2a382090a344152a79d079bb0af32bc7f03fb16 Mon Sep 17 00:00:00 2001
+From: Chenmin Sun <chenmin.sun@intel.com>
+Date: Fri, 27 Mar 2020 08:26:17 +0800
+Subject: [DPDK 02/17] common/iavf: add virtual channel opcodes 39,40,43
+
+VIRTCHNL_OP_DCF_CMD_DESC = 39,
+VIRTCHNL_OP_DCF_CMD_BUFF = 40,
+VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
+
+Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
+---
+ drivers/common/iavf/virtchnl.h | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
+index 2fbbb9e28..83a7a7174 100644
+--- a/drivers/common/iavf/virtchnl.h
++++ b/drivers/common/iavf/virtchnl.h
+@@ -129,6 +129,9 @@ enum virtchnl_ops {
+       VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+       VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+       /* opcodes 34, 35, 36, 37 and 38 are reserved */
++      VIRTCHNL_OP_DCF_CMD_DESC = 39,
++      VIRTCHNL_OP_DCF_CMD_BUFF = 40,
++      VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
+ };
+ /* These macros are used to generate compilation errors if a structure/union
+@@ -266,6 +269,28 @@ struct virtchnl_vf_resource {
+ VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
++#define PKG_NAME_SIZE 32
++#define DSN_SIZE      8
++
++struct pkg_version {
++      u8 major;
++      u8 minor;
++      u8 update;
++      u8 draft;
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(4, pkg_version);
++
++struct virtchnl_pkg_info {
++      struct pkg_version pkg_ver;
++      u32 track_id;
++      char pkg_name[PKG_NAME_SIZE];
++      u8 dsn[DSN_SIZE];
++};
++
++
++VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info);
++
+ /* VIRTCHNL_OP_CONFIG_TX_QUEUE
+  * VF sends this message to set up parameters for one TX queue.
+  * External data buffer contains one instance of virtchnl_txq_info.
+@@ -879,6 +904,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+       case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+               valid_len = sizeof(struct virtchnl_filter);
+               break;
++      case VIRTCHNL_OP_DCF_CMD_DESC:
++      case VIRTCHNL_OP_DCF_CMD_BUFF:
++              /* These two opcodes are specific to handle the AdminQ command,
++               * so the validation needs to be done in PF's context.
++               */
++               return 0;
++      case VIRTCHNL_OP_DCF_GET_PKG_INFO:
++              break;
+       /* These are always errors coming from the VF. */
+       case VIRTCHNL_OP_EVENT:
+       case VIRTCHNL_OP_UNKNOWN:
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0003-common-iavf-support-VSI-mapping-table.patch b/build/external/patches/dpdk_20.02/0003-common-iavf-support-VSI-mapping-table.patch
new file mode 100644 (file)
index 0000000..10a30f5
--- /dev/null
@@ -0,0 +1,90 @@
+From a2c92bf26e724eacd52971c4a83861ada82a6cb4 Mon Sep 17 00:00:00 2001
+From: Chenmin Sun <chenmin.sun@intel.com>
+Date: Fri, 17 Apr 2020 00:37:41 +0800
+Subject: [DPDK 03/17] common/iavf: support VSI mapping table
+
+Add an opcode for getting VSI mapping table.
+Add an virtchnl event code for VF reset done.
+
+Signed-off-by: Beilei Xing <beilei.xing@intel.com>
+Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
+Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
+Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
+
+Acked-by: Beilei Xing <beilei.xing@intel.com>
+---
+ drivers/common/iavf/virtchnl.h | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
+index 83a7a7174..6f9cf18cb 100644
+--- a/drivers/common/iavf/virtchnl.h
++++ b/drivers/common/iavf/virtchnl.h
+@@ -131,6 +131,7 @@ enum virtchnl_ops {
+       /* opcodes 34, 35, 36, 37 and 38 are reserved */
+       VIRTCHNL_OP_DCF_CMD_DESC = 39,
+       VIRTCHNL_OP_DCF_CMD_BUFF = 40,
++      VIRTCHNL_OP_DCF_GET_VSI_MAP = 42,
+       VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
+ };
+@@ -645,6 +646,25 @@ struct virtchnl_filter {
+ VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
++/* VIRTCHNL_OP_DCF_GET_VSI_MAP
++ * VF sends this message to get VSI mapping table.
++ * PF responds with an indirect message containing VF's
++ * HW VSI IDs.
++ * The index of vf_vsi array is the logical VF ID, the
++ * value of vf_vsi array is the VF's HW VSI ID with its
++ * valid configuration.
++ */
++struct virtchnl_dcf_vsi_map {
++      u16 pf_vsi;     /* PF's HW VSI ID */
++      u16 num_vfs;    /* The actual number of VFs allocated */
++#define VIRTCHNL_DCF_VF_VSI_ID_S      0
++#define VIRTCHNL_DCF_VF_VSI_ID_M      (0xFFF << VIRTCHNL_DCF_VF_VSI_ID_S)
++#define VIRTCHNL_DCF_VF_VSI_VALID     (1 << 15)
++      u16 vf_vsi[1];
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map);
++
+ /* VIRTCHNL_OP_EVENT
+  * PF sends this message to inform the VF driver of events that may affect it.
+  * No direct response is expected from the VF, though it may generate other
+@@ -655,6 +675,7 @@ enum virtchnl_event_codes {
+       VIRTCHNL_EVENT_LINK_CHANGE,
+       VIRTCHNL_EVENT_RESET_IMPENDING,
+       VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
++      VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE,
+ };
+ #define PF_EVENT_SEVERITY_INFO                0
+@@ -682,6 +703,10 @@ struct virtchnl_pf_event {
+                       u32 link_speed;
+                       u8 link_status;
+               } link_event_adv;
++              struct {
++                      u16 vf_id;
++                      u16 vsi_id;
++              } vf_vsi_map;
+       } event_data;
+       int severity;
+@@ -912,6 +937,11 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+                return 0;
+       case VIRTCHNL_OP_DCF_GET_PKG_INFO:
+               break;
++      case VIRTCHNL_OP_DCF_GET_VSI_MAP:
++              /* The two opcodes are required by DCF without message buffer,
++               * so the valid length keeps the default value 0.
++               */
++              break;
+       /* These are always errors coming from the VF. */
+       case VIRTCHNL_OP_EVENT:
+       case VIRTCHNL_OP_UNKNOWN:
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0004-common-iavf-add-PTYPE-definition.patch b/build/external/patches/dpdk_20.02/0004-common-iavf-add-PTYPE-definition.patch
new file mode 100644 (file)
index 0000000..2215bd3
--- /dev/null
@@ -0,0 +1,33 @@
+From 585d75cec67cc3f4ee2eb32dc33fb7e2174b3125 Mon Sep 17 00:00:00 2001
+From: Qi Zhang <qi.z.zhang@intel.com>
+Date: Thu, 9 Apr 2020 12:50:56 +0800
+Subject: [DPDK 04/17] common/iavf: add PTYPE definition
+
+Add IAVF_RX_PTYPE_PARSER_ABORTED definition, so iavf driver will know
+opcode for parser aborted packets.
+Without this definition driver would have to rely on magic numbers.
+
+Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
+Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
+Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
+---
+ drivers/common/iavf/iavf_type.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
+index 6f85f8c04..97a25b2d1 100644
+--- a/drivers/common/iavf/iavf_type.h
++++ b/drivers/common/iavf/iavf_type.h
+@@ -552,7 +552,8 @@ enum iavf_rx_l2_ptype {
+       IAVF_RX_PTYPE_GRENAT4_MAC_PAY3                  = 58,
+       IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4    = 87,
+       IAVF_RX_PTYPE_GRENAT6_MAC_PAY3                  = 124,
+-      IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4    = 153
++      IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4    = 153,
++      IAVF_RX_PTYPE_PARSER_ABORTED                    = 255
+ };
+ struct iavf_rx_ptype_decoded {
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0005-common-iavf-add-virtual-channel-support-for-Flex-RXD.patch b/build/external/patches/dpdk_20.02/0005-common-iavf-add-virtual-channel-support-for-Flex-RXD.patch
new file mode 100644 (file)
index 0000000..18abe00
--- /dev/null
@@ -0,0 +1,74 @@
+From 296799a9a9006e4c99e428c52818d1e34b26aec2 Mon Sep 17 00:00:00 2001
+From: Chenmin Sun <chenmin.sun@intel.com>
+Date: Fri, 17 Apr 2020 01:49:08 +0800
+Subject: [DPDK 05/17] common/iavf: add virtual channel support for Flex RXD
+
+Add new VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC flag, opcode
+VIRTCHNL_OP_GET_SUPPORTED_RXDIDS and add member rxdid
+in struct virtchnl_rxq_info to support AVF Flex RXD
+extension.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
+Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
+Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
+---
+ drivers/common/iavf/virtchnl.h | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
+index 6f9cf18cb..e8d936843 100644
+--- a/drivers/common/iavf/virtchnl.h
++++ b/drivers/common/iavf/virtchnl.h
+@@ -133,6 +133,7 @@ enum virtchnl_ops {
+       VIRTCHNL_OP_DCF_CMD_BUFF = 40,
+       VIRTCHNL_OP_DCF_GET_VSI_MAP = 42,
+       VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
++      VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
+ };
+ /* These macros are used to generate compilation errors if a structure/union
+@@ -247,6 +248,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+ #define VIRTCHNL_VF_OFFLOAD_ADQ                       0X00800000
+ #define VIRTCHNL_VF_OFFLOAD_ADQ_V2            0X01000000
+ #define VIRTCHNL_VF_OFFLOAD_USO                       0X02000000
++#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC      0X04000000
+       /* 0X80000000 is reserved */
+ /* Define below the capability flags that are not offloads */
+@@ -332,7 +334,9 @@ struct virtchnl_rxq_info {
+       u32 databuffer_size;
+       u32 max_pkt_size;
+       u8 crc_disable;
+-      u8 pad1[3];
++      /* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */
++      u8 rxdid;
++      u8 pad1[2];
+       u64 dma_ring_addr;
+       enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+       u32 pad2;
+@@ -665,6 +669,12 @@ struct virtchnl_dcf_vsi_map {
+ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map);
++struct virtchnl_supported_rxdids {
++      u64 supported_rxdids;
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_supported_rxdids);
++
+ /* VIRTCHNL_OP_EVENT
+  * PF sends this message to inform the VF driver of events that may affect it.
+  * No direct response is expected from the VF, though it may generate other
+@@ -937,6 +947,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+                return 0;
+       case VIRTCHNL_OP_DCF_GET_PKG_INFO:
+               break;
++      case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
++              break;
+       case VIRTCHNL_OP_DCF_GET_VSI_MAP:
+               /* The two opcodes are required by DCF without message buffer,
+                * so the valid length keeps the default value 0.
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0006-common-iavf-add-virtual-channel-protocol-header.patch b/build/external/patches/dpdk_20.02/0006-common-iavf-add-virtual-channel-protocol-header.patch
new file mode 100644 (file)
index 0000000..1cd03b2
--- /dev/null
@@ -0,0 +1,264 @@
+From e9e33a31aa58293c0442ddbfb96f3b8badfad250 Mon Sep 17 00:00:00 2001
+From: Qi Zhang <qi.z.zhang@intel.com>
+Date: Thu, 9 Apr 2020 13:10:12 +0800
+Subject: [DPDK 06/17] common/iavf: add virtual channel protocol header
+
+To support advanced AVF's FDIR and RSS feature, we need to figure out
+what kind of data structure should be passed from VF to PF to describe
+an FDIR rule or RSS config rule. The common part of the requirement is
+we need a data structure to represent the input set selection of a rule's
+hash key.
+
+An input set selection is a group of fields be selected from one or more
+network protocol layers that could be identified as a specific flow.
+For example, select dst IP address from an IPv4 header combined with
+dst port from the TCP header as the input set for an IPv4/TCP flow.
+
+The patch adds a new data structure virtchnl_proto_hdrs to abstract
+a network protocol headers group which is composed of layers of network
+protocol header(virtchnl_proto_hdr).
+
+A protocol header contains a 32 bits mask (field_selector) to describe
+which fields are selected as input sets, as well as a header type
+(enum virtchnl_proto_hdr_type). Each bit is mapped to a field in
+enum virtchnl_proto_hdr_field guided by its header type.
+
++------------+-----------+------------------------------+
+|            | Proto Hdr | Header Type A                |
+|            |           +------------------------------+
+|            |           | BIT 31 | ... | BIT 1 | BIT 0 |
+|            |-----------+------------------------------+
+|Proto Hdrs  | Proto Hdr | Header Type B                |
+|            |           +------------------------------+
+|            |           | BIT 31 | ... | BIT 1 | BIT 0 |
+|            |-----------+------------------------------+
+|            | Proto Hdr | Header Type C                |
+|            |           +------------------------------+
+|            |           | BIT 31 | ... | BIT 1 | BIT 0 |
+|            |-----------+------------------------------+
+|            |    ....                                  |
++-------------------------------------------------------+
+
+All fields in enum virtchnl_proto_hdr_fields are grouped with header type
+and the value of the first field of a header type is always 32 aligned.
+
+enum proto_hdr_type {
+       header_type_A = 0;
+       header_type_B = 1;
+       ....
+}
+
+enum proto_hdr_field {
+       /* header type A */
+       header_A_field_0 = 0,
+       header_A_field_1 = 1,
+       header_A_field_2 = 2,
+       header_A_field_3 = 3,
+
+       /* header type B */
+       header_B_field_0 = 32, // = header_type_B << 5
+       header_B_field_0 = 33,
+       header_B_field_0 = 34
+       header_B_field_0 = 35,
+       ....
+};
+
+So we have:
+proto_hdr_type = proto_hdr_field / 32
+bit offset = proto_hdr_field % 32
+
+To simply the protocol header's operations, couple help macros are added.
+For example, to select src IP and dst port as input set for an IPv4/UDP
+flow.
+
+we have:
+struct virtchnl_proto_hdr hdr[2];
+
+VIRTCHNL_SET_PROTO_HDR_TYPE(&hdr[0], IPV4)
+VIRTCHNL_ADD_PROTO_HDR_FIELD(&hdr[0], IPV4, SRC)
+
+VIRTCHNL_SET_PROTO_HDR_TYPE(&hdr[1], UDP)
+VIRTCHNL_ADD_PROTO_HDR_FIELD(&hdr[1], UDP, DST)
+
+A protocol header also contains a byte array, this field should only
+be used by an FDIR rule and should be ignored by RSS. For an FDIR rule,
+the byte array is used to store the protocol header of a training
+package. The byte array must be network order.
+
+Signed-off-by: Jeff Guo <jia.guo@intel.com>
+Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
+Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
+---
+ drivers/common/iavf/virtchnl.h | 156 +++++++++++++++++++++++++++++++++
+ 1 file changed, 156 insertions(+)
+
+diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
+index e8d936843..667762643 100644
+--- a/drivers/common/iavf/virtchnl.h
++++ b/drivers/common/iavf/virtchnl.h
+@@ -769,6 +769,162 @@ enum virtchnl_vfr_states {
+       VIRTCHNL_VFR_VFACTIVE,
+ };
++#define VIRTCHNL_MAX_NUM_PROTO_HDRS   32
++#define PROTO_HDR_SHIFT                       5
++#define PROTO_HDR_FIELD_START(proto_hdr_type) \
++                                      (proto_hdr_type << PROTO_HDR_SHIFT)
++#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
++
++/* VF use these macros to configure each protocol header.
++ * Specify which protocol headers and protocol header fields base on
++ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
++ * @param hdr: a struct of virtchnl_proto_hdr
++ * @param hdr_type: ETH/IPV4/TCP, etc
++ * @param field: SRC/DST/TEID/SPI, etc
++ */
++#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
++      ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
++#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
++      ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
++#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
++      ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
++#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)     ((hdr)->field_selector)
++
++#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
++      (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
++              VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
++#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
++      (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
++              VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
++
++#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
++      ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
++#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
++      (((hdr)->type) >> PROTO_HDR_SHIFT)
++#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
++      ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
++#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
++      (VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
++       VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
++
++/* Protocol header type within a packet segment. A segment consists of one or
++ * more protocol headers that make up a logical group of protocol headers. Each
++ * logical group of protocol headers encapsulates or is encapsulated using/by
++ * tunneling or encapsulation protocols for network virtualization.
++ */
++enum virtchnl_proto_hdr_type {
++      VIRTCHNL_PROTO_HDR_NONE,
++      VIRTCHNL_PROTO_HDR_ETH,
++      VIRTCHNL_PROTO_HDR_S_VLAN,
++      VIRTCHNL_PROTO_HDR_C_VLAN,
++      VIRTCHNL_PROTO_HDR_IPV4,
++      VIRTCHNL_PROTO_HDR_IPV6,
++      VIRTCHNL_PROTO_HDR_TCP,
++      VIRTCHNL_PROTO_HDR_UDP,
++      VIRTCHNL_PROTO_HDR_SCTP,
++      VIRTCHNL_PROTO_HDR_GTPU_IP,
++      VIRTCHNL_PROTO_HDR_GTPU_EH,
++      VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
++      VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
++      VIRTCHNL_PROTO_HDR_PPPOE,
++      VIRTCHNL_PROTO_HDR_L2TPV3,
++      VIRTCHNL_PROTO_HDR_ESP,
++      VIRTCHNL_PROTO_HDR_AH,
++      VIRTCHNL_PROTO_HDR_PFCP,
++};
++
++/* Protocol header field within a protocol header. */
++enum virtchnl_proto_hdr_field {
++      /* ETHER */
++      VIRTCHNL_PROTO_HDR_ETH_SRC =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
++      VIRTCHNL_PROTO_HDR_ETH_DST,
++      VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
++      /* S-VLAN */
++      VIRTCHNL_PROTO_HDR_S_VLAN_ID =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
++      /* C-VLAN */
++      VIRTCHNL_PROTO_HDR_C_VLAN_ID =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
++      /* IPV4 */
++      VIRTCHNL_PROTO_HDR_IPV4_SRC =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
++      VIRTCHNL_PROTO_HDR_IPV4_DST,
++      VIRTCHNL_PROTO_HDR_IPV4_DSCP,
++      VIRTCHNL_PROTO_HDR_IPV4_TTL,
++      VIRTCHNL_PROTO_HDR_IPV4_PROT,
++      /* IPV6 */
++      VIRTCHNL_PROTO_HDR_IPV6_SRC =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
++      VIRTCHNL_PROTO_HDR_IPV6_DST,
++      VIRTCHNL_PROTO_HDR_IPV6_TC,
++      VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
++      VIRTCHNL_PROTO_HDR_IPV6_PROT,
++      /* TCP */
++      VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
++      VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
++      /* UDP */
++      VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
++      VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
++      /* SCTP */
++      VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
++      VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
++      /* GTPU_IP */
++      VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
++      /* GTPU_EH */
++      VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
++      VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
++      /* PPPOE */
++      VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
++      /* L2TPV3 */
++      VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
++      /* ESP */
++      VIRTCHNL_PROTO_HDR_ESP_SPI =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
++      /* AH */
++      VIRTCHNL_PROTO_HDR_AH_SPI =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
++      /* PFCP */
++      VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
++              PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
++      VIRTCHNL_PROTO_HDR_PFCP_SEID,
++};
++
++struct virtchnl_proto_hdr {
++      enum virtchnl_proto_hdr_type type;
++      u32 field_selector; /* a bit mask to select field for header type */
++      u8 buffer[64];
++      /**
++       * binary buffer in network order for specific header type.
++       * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
++       * header is expected to be copied into the buffer.
++       */
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
++
++struct virtchnl_proto_hdrs {
++      u8 tunnel_level;
++      /**
++       * specify where protocol header start from.
++       * 0 - from the outer layer
++       * 1 - from the first inner layer
++       * 2 - from the second inner layer
++       * ....
++       **/
++      int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
++      struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
++
+ /**
+  * virtchnl_vc_validate_vf_msg
+  * @ver: Virtchnl version info
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0007-net-iavf-flexible-Rx-descriptor-definitions.patch b/build/external/patches/dpdk_20.02/0007-net-iavf-flexible-Rx-descriptor-definitions.patch
new file mode 100644 (file)
index 0000000..07ad868
--- /dev/null
@@ -0,0 +1,226 @@
+From a7cbf4fabd46b0d02b651f5defac754e56e11e0e Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:00 +0800
+Subject: [DPDK 07/17] net/iavf: flexible Rx descriptor definitions
+
+Add definitions for flexible Rx descriptor structures and macros.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf_rxtx.h | 200 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 200 insertions(+)
+
+diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
+index 09b5bd99e..5e309631e 100644
+--- a/drivers/net/iavf/iavf_rxtx.h
++++ b/drivers/net/iavf/iavf_rxtx.h
+@@ -157,6 +157,206 @@ union iavf_tx_offload {
+       };
+ };
++/* Rx Flex Descriptors
++ * These descriptors are used instead of the legacy version descriptors
++ */
++union iavf_16b_rx_flex_desc {
++      struct {
++              __le64 pkt_addr; /* Packet buffer address */
++              __le64 hdr_addr; /* Header buffer address */
++                               /* bit 0 of hdr_addr is DD bit */
++      } read;
++      struct {
++              /* Qword 0 */
++              u8 rxdid; /* descriptor builder profile ID */
++              u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
++              __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
++              __le16 pkt_len; /* [15:14] are reserved */
++              __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
++                                              /* sph=[11:11] */
++                                              /* ff1/ext=[15:12] */
++
++              /* Qword 1 */
++              __le16 status_error0;
++              __le16 l2tag1;
++              __le16 flex_meta0;
++              __le16 flex_meta1;
++      } wb; /* writeback */
++};
++
++union iavf_32b_rx_flex_desc {
++      struct {
++              __le64 pkt_addr; /* Packet buffer address */
++              __le64 hdr_addr; /* Header buffer address */
++                               /* bit 0 of hdr_addr is DD bit */
++              __le64 rsvd1;
++              __le64 rsvd2;
++      } read;
++      struct {
++              /* Qword 0 */
++              u8 rxdid; /* descriptor builder profile ID */
++              u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
++              __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
++              __le16 pkt_len; /* [15:14] are reserved */
++              __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
++                                              /* sph=[11:11] */
++                                              /* ff1/ext=[15:12] */
++
++              /* Qword 1 */
++              __le16 status_error0;
++              __le16 l2tag1;
++              __le16 flex_meta0;
++              __le16 flex_meta1;
++
++              /* Qword 2 */
++              __le16 status_error1;
++              u8 flex_flags2;
++              u8 time_stamp_low;
++              __le16 l2tag2_1st;
++              __le16 l2tag2_2nd;
++
++              /* Qword 3 */
++              __le16 flex_meta2;
++              __le16 flex_meta3;
++              union {
++                      struct {
++                              __le16 flex_meta4;
++                              __le16 flex_meta5;
++                      } flex;
++                      __le32 ts_high;
++              } flex_ts;
++      } wb; /* writeback */
++};
++
++/* Rx Flex Descriptor for Comms Package Profile
++ * RxDID Profile ID 16-21
++ * Flex-field 0: RSS hash lower 16-bits
++ * Flex-field 1: RSS hash upper 16-bits
++ * Flex-field 2: Flow ID lower 16-bits
++ * Flex-field 3: Flow ID upper 16-bits
++ * Flex-field 4: AUX0
++ * Flex-field 5: AUX1
++ */
++struct iavf_32b_rx_flex_desc_comms {
++      /* Qword 0 */
++      u8 rxdid;
++      u8 mir_id_umb_cast;
++      __le16 ptype_flexi_flags0;
++      __le16 pkt_len;
++      __le16 hdr_len_sph_flex_flags1;
++
++      /* Qword 1 */
++      __le16 status_error0;
++      __le16 l2tag1;
++      __le32 rss_hash;
++
++      /* Qword 2 */
++      __le16 status_error1;
++      u8 flexi_flags2;
++      u8 ts_low;
++      __le16 l2tag2_1st;
++      __le16 l2tag2_2nd;
++
++      /* Qword 3 */
++      __le32 flow_id;
++      union {
++              struct {
++                      __le16 aux0;
++                      __le16 aux1;
++              } flex;
++              __le32 ts_high;
++      } flex_ts;
++};
++
++/* Rx Flex Descriptor for Comms Package Profile
++ * RxDID Profile ID 22-23 (swap Hash and FlowID)
++ * Flex-field 0: Flow ID lower 16-bits
++ * Flex-field 1: Flow ID upper 16-bits
++ * Flex-field 2: RSS hash lower 16-bits
++ * Flex-field 3: RSS hash upper 16-bits
++ * Flex-field 4: AUX0
++ * Flex-field 5: AUX1
++ */
++struct iavf_32b_rx_flex_desc_comms_ovs {
++      /* Qword 0 */
++      u8 rxdid;
++      u8 mir_id_umb_cast;
++      __le16 ptype_flexi_flags0;
++      __le16 pkt_len;
++      __le16 hdr_len_sph_flex_flags1;
++
++      /* Qword 1 */
++      __le16 status_error0;
++      __le16 l2tag1;
++      __le32 flow_id;
++
++      /* Qword 2 */
++      __le16 status_error1;
++      u8 flexi_flags2;
++      u8 ts_low;
++      __le16 l2tag2_1st;
++      __le16 l2tag2_2nd;
++
++      /* Qword 3 */
++      __le32 rss_hash;
++      union {
++              struct {
++                      __le16 aux0;
++                      __le16 aux1;
++              } flex;
++              __le32 ts_high;
++      } flex_ts;
++};
++
++/* Receive Flex Descriptor profile IDs: There are a total
++ * of 64 profiles where profile IDs 0/1 are for legacy; and
++ * profiles 2-63 are flex profiles that can be programmed
++ * with a specific metadata (profile 7 reserved for HW)
++ */
++enum iavf_rxdid {
++      IAVF_RXDID_LEGACY_0             = 0,
++      IAVF_RXDID_LEGACY_1             = 1,
++      IAVF_RXDID_FLEX_NIC             = 2,
++      IAVF_RXDID_FLEX_NIC_2           = 6,
++      IAVF_RXDID_HW                   = 7,
++      IAVF_RXDID_COMMS_GENERIC        = 16,
++      IAVF_RXDID_COMMS_AUX_VLAN       = 17,
++      IAVF_RXDID_COMMS_AUX_IPV4       = 18,
++      IAVF_RXDID_COMMS_AUX_IPV6       = 19,
++      IAVF_RXDID_COMMS_AUX_IPV6_FLOW  = 20,
++      IAVF_RXDID_COMMS_AUX_TCP        = 21,
++      IAVF_RXDID_COMMS_OVS_1          = 22,
++      IAVF_RXDID_COMMS_OVS_2          = 23,
++      IAVF_RXDID_LAST                 = 63,
++};
++
++enum iavf_rx_flex_desc_status_error_0_bits {
++      /* Note: These are predefined bit offsets */
++      IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
++      IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
++      IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
++      IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
++      IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
++      IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
++      IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
++      IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
++      IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
++      IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
++      IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
++      IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
++      IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
++      IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
++      IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
++      IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
++      IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
++};
++
++/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
++#define IAVF_RX_FLEX_DESC_PTYPE_M     (0x3FF) /* 10-bits */
++
++/* for iavf_32b_rx_flex_desc.pkt_len member */
++#define IAVF_RX_FLX_DESC_PKT_LEN_M    (0x3FFF) /* 14-bits */
++
+ int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
+                          uint16_t queue_idx,
+                          uint16_t nb_desc,
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0008-net-iavf-return-error-if-opcode-is-mismatched.patch b/build/external/patches/dpdk_20.02/0008-net-iavf-return-error-if-opcode-is-mismatched.patch
new file mode 100644 (file)
index 0000000..0d597f5
--- /dev/null
@@ -0,0 +1,33 @@
+From 91d510242b7aae1aff4468059840feff4075f99c Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:01 +0800
+Subject: [DPDK 08/17] net/iavf: return error if opcode is mismatched
+
+Adds error return when the opcode of read message is
+mismatched which is received from adminQ.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf_vchnl.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
+index fa4da3a6d..b7fb05d32 100644
+--- a/drivers/net/iavf/iavf_vchnl.c
++++ b/drivers/net/iavf/iavf_vchnl.c
+@@ -52,9 +52,11 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
+       PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
+                   opcode, vf->cmd_retval);
+-      if (opcode != vf->pend_cmd)
++      if (opcode != vf->pend_cmd) {
+               PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+                           vf->pend_cmd, opcode);
++              return IAVF_ERR_OPCODE_MISMATCH;
++      }
+       return IAVF_SUCCESS;
+ }
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch b/build/external/patches/dpdk_20.02/0009-net-iavf-flexible-Rx-descriptor-support-in-normal-pa.patch
new file mode 100644 (file)
index 0000000..130262c
--- /dev/null
@@ -0,0 +1,729 @@
+From 3d10b7f1332d3f1326c182d3b7fa13669a528592 Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:02 +0800
+Subject: [DPDK 09/17] net/iavf: flexible Rx descriptor support in normal path
+
+Support flexible Rx descriptor format in normal
+path of iAVF PMD.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf.h        |   2 +
+ drivers/net/iavf/iavf_ethdev.c |   8 +
+ drivers/net/iavf/iavf_rxtx.c   | 479 ++++++++++++++++++++++++++++++---
+ drivers/net/iavf/iavf_rxtx.h   |   8 +
+ drivers/net/iavf/iavf_vchnl.c  |  42 ++-
+ 5 files changed, 501 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
+index 526040c6e..67d625053 100644
+--- a/drivers/net/iavf/iavf.h
++++ b/drivers/net/iavf/iavf.h
+@@ -97,6 +97,7 @@ struct iavf_info {
+       struct virtchnl_version_info virtchnl_version;
+       struct virtchnl_vf_resource *vf_res; /* VF resource */
+       struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
++      uint64_t supported_rxdid;
+       volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+       uint32_t cmd_retval; /* return value of the cmd response from PF */
+@@ -225,6 +226,7 @@ int iavf_disable_queues(struct iavf_adapter *adapter);
+ int iavf_configure_rss_lut(struct iavf_adapter *adapter);
+ int iavf_configure_rss_key(struct iavf_adapter *adapter);
+ int iavf_configure_queues(struct iavf_adapter *adapter);
++int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
+ int iavf_config_irq_map(struct iavf_adapter *adapter);
+ void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
+ int iavf_dev_link_update(struct rte_eth_dev *dev,
+diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
+index ee9f82249..d3a121eac 100644
+--- a/drivers/net/iavf/iavf_ethdev.c
++++ b/drivers/net/iavf/iavf_ethdev.c
+@@ -1236,6 +1236,14 @@ iavf_init_vf(struct rte_eth_dev *dev)
+                       goto err_rss;
+               }
+       }
++
++      if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
++              if (iavf_get_supported_rxdid(adapter) != 0) {
++                      PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
++                      goto err_rss;
++              }
++      }
++
+       return 0;
+ err_rss:
+       rte_free(vf->rss_key);
+diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
+index 9eccb7c41..67297dcb7 100644
+--- a/drivers/net/iavf/iavf_rxtx.c
++++ b/drivers/net/iavf/iavf_rxtx.c
+@@ -346,6 +346,14 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+               return -ENOMEM;
+       }
++      if (vf->vf_res->vf_cap_flags &
++          VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
++          vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
++              rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
++      } else {
++              rxq->rxdid = IAVF_RXDID_LEGACY_1;
++      }
++
+       rxq->mp = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->rx_free_thresh = rx_free_thresh;
+@@ -720,6 +728,20 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
+       }
+ }
++static inline void
++iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
++                        volatile union iavf_rx_flex_desc *rxdp)
++{
++      if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
++              (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
++              mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
++              mb->vlan_tci =
++                      rte_le_to_cpu_16(rxdp->wb.l2tag1);
++      } else {
++              mb->vlan_tci = 0;
++      }
++}
++
+ /* Translate the rx descriptor status and error fields to pkt flags */
+ static inline uint64_t
+ iavf_rxd_to_pkt_flags(uint64_t qword)
+@@ -754,6 +776,87 @@ iavf_rxd_to_pkt_flags(uint64_t qword)
+       return flags;
+ }
++/* Translate the rx flex descriptor status to pkt flags */
++static inline void
++iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
++                     volatile union iavf_rx_flex_desc *rxdp)
++{
++      volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
++                      (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
++      uint16_t stat_err;
++
++#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
++      stat_err = rte_le_to_cpu_16(desc->status_error0);
++      if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
++              mb->ol_flags |= PKT_RX_RSS_HASH;
++              mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
++      }
++#endif
++}
++
++#define IAVF_RX_FLEX_ERR0_BITS        \
++      ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
++       (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
++       (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
++       (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
++       (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
++       (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
++
++/* Rx L3/L4 checksum */
++static inline uint64_t
++iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
++{
++      uint64_t flags = 0;
++
++      /* check if HW has decoded the packet and checksum */
++      if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
++              return 0;
++
++      if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
++              flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
++              return flags;
++      }
++
++      if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
++              flags |= PKT_RX_IP_CKSUM_BAD;
++      else
++              flags |= PKT_RX_IP_CKSUM_GOOD;
++
++      if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
++              flags |= PKT_RX_L4_CKSUM_BAD;
++      else
++              flags |= PKT_RX_L4_CKSUM_GOOD;
++
++      if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
++              flags |= PKT_RX_EIP_CKSUM_BAD;
++
++      return flags;
++}
++
++/* If the number of free RX descriptors is greater than the RX free
++ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
++ * register. Update the RDT with the value of the last processed RX
++ * descriptor minus 1, to guarantee that the RDT register is never
++ * equal to the RDH register, which creates a "full" ring situtation
++ * from the hardware point of view.
++ */
++static inline void
++iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
++{
++      nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
++
++      if (nb_hold > rxq->rx_free_thresh) {
++              PMD_RX_LOG(DEBUG,
++                         "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
++                         rxq->port_id, rxq->queue_id, rx_id, nb_hold);
++              rx_id = (uint16_t)((rx_id == 0) ?
++                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
++              IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
++              nb_hold = 0;
++      }
++      rxq->nb_rx_hold = nb_hold;
++}
++
+ /* implement recv_pkts */
+ uint16_t
+ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+@@ -854,23 +957,256 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+       }
+       rxq->rx_tail = rx_id;
+-      /* If the number of free RX descriptors is greater than the RX free
+-       * threshold of the queue, advance the receive tail register of queue.
+-       * Update that register with the value of the last processed RX
+-       * descriptor minus 1.
+-       */
+-      nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+-      if (nb_hold > rxq->rx_free_thresh) {
+-              PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+-                         "nb_hold=%u nb_rx=%u",
+-                         rxq->port_id, rxq->queue_id,
+-                         rx_id, nb_hold, nb_rx);
+-              rx_id = (uint16_t)((rx_id == 0) ?
+-                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
+-              IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+-              nb_hold = 0;
++      iavf_update_rx_tail(rxq, nb_hold, rx_id);
++
++      return nb_rx;
++}
++
++/* implement recv_pkts for flexible Rx descriptor */
++uint16_t
++iavf_recv_pkts_flex_rxd(void *rx_queue,
++                      struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
++{
++      volatile union iavf_rx_desc *rx_ring;
++      volatile union iavf_rx_flex_desc *rxdp;
++      struct iavf_rx_queue *rxq;
++      union iavf_rx_flex_desc rxd;
++      struct rte_mbuf *rxe;
++      struct rte_eth_dev *dev;
++      struct rte_mbuf *rxm;
++      struct rte_mbuf *nmb;
++      uint16_t nb_rx;
++      uint16_t rx_stat_err0;
++      uint16_t rx_packet_len;
++      uint16_t rx_id, nb_hold;
++      uint64_t dma_addr;
++      uint64_t pkt_flags;
++      const uint32_t *ptype_tbl;
++
++      nb_rx = 0;
++      nb_hold = 0;
++      rxq = rx_queue;
++      rx_id = rxq->rx_tail;
++      rx_ring = rxq->rx_ring;
++      ptype_tbl = rxq->vsi->adapter->ptype_tbl;
++
++      while (nb_rx < nb_pkts) {
++              rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
++              rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
++
++              /* Check the DD bit first */
++              if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
++                      break;
++              IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
++
++              nmb = rte_mbuf_raw_alloc(rxq->mp);
++              if (unlikely(!nmb)) {
++                      dev = &rte_eth_devices[rxq->port_id];
++                      dev->data->rx_mbuf_alloc_failed++;
++                      PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
++                                 "queue_id=%u", rxq->port_id, rxq->queue_id);
++                      break;
++              }
++
++              rxd = *rxdp;
++              nb_hold++;
++              rxe = rxq->sw_ring[rx_id];
++              rx_id++;
++              if (unlikely(rx_id == rxq->nb_rx_desc))
++                      rx_id = 0;
++
++              /* Prefetch next mbuf */
++              rte_prefetch0(rxq->sw_ring[rx_id]);
++
++              /* When next RX descriptor is on a cache line boundary,
++               * prefetch the next 4 RX descriptors and next 8 pointers
++               * to mbufs.
++               */
++              if ((rx_id & 0x3) == 0) {
++                      rte_prefetch0(&rx_ring[rx_id]);
++                      rte_prefetch0(rxq->sw_ring[rx_id]);
++              }
++              rxm = rxe;
++              rxe = nmb;
++              dma_addr =
++                      rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
++              rxdp->read.hdr_addr = 0;
++              rxdp->read.pkt_addr = dma_addr;
++
++              rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
++                              IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
++
++              rxm->data_off = RTE_PKTMBUF_HEADROOM;
++              rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
++              rxm->nb_segs = 1;
++              rxm->next = NULL;
++              rxm->pkt_len = rx_packet_len;
++              rxm->data_len = rx_packet_len;
++              rxm->port = rxq->port_id;
++              rxm->ol_flags = 0;
++              rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
++                      rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
++              iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
++              iavf_rxd_to_pkt_fields(rxm, &rxd);
++              pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
++              rxm->ol_flags |= pkt_flags;
++
++              rx_pkts[nb_rx++] = rxm;
+       }
+-      rxq->nb_rx_hold = nb_hold;
++      rxq->rx_tail = rx_id;
++
++      iavf_update_rx_tail(rxq, nb_hold, rx_id);
++
++      return nb_rx;
++}
++
++/* implement recv_scattered_pkts for flexible Rx descriptor */
++uint16_t
++iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
++                                uint16_t nb_pkts)
++{
++      struct iavf_rx_queue *rxq = rx_queue;
++      union iavf_rx_flex_desc rxd;
++      struct rte_mbuf *rxe;
++      struct rte_mbuf *first_seg = rxq->pkt_first_seg;
++      struct rte_mbuf *last_seg = rxq->pkt_last_seg;
++      struct rte_mbuf *nmb, *rxm;
++      uint16_t rx_id = rxq->rx_tail;
++      uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
++      struct rte_eth_dev *dev;
++      uint16_t rx_stat_err0;
++      uint64_t dma_addr;
++      uint64_t pkt_flags;
++
++      volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
++      volatile union iavf_rx_flex_desc *rxdp;
++      const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
++
++      while (nb_rx < nb_pkts) {
++              rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
++              rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
++
++              /* Check the DD bit */
++              if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
++                      break;
++              IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
++
++              nmb = rte_mbuf_raw_alloc(rxq->mp);
++              if (unlikely(!nmb)) {
++                      PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
++                                 "queue_id=%u", rxq->port_id, rxq->queue_id);
++                      dev = &rte_eth_devices[rxq->port_id];
++                      dev->data->rx_mbuf_alloc_failed++;
++                      break;
++              }
++
++              rxd = *rxdp;
++              nb_hold++;
++              rxe = rxq->sw_ring[rx_id];
++              rx_id++;
++              if (rx_id == rxq->nb_rx_desc)
++                      rx_id = 0;
++
++              /* Prefetch next mbuf */
++              rte_prefetch0(rxq->sw_ring[rx_id]);
++
++              /* When next RX descriptor is on a cache line boundary,
++               * prefetch the next 4 RX descriptors and next 8 pointers
++               * to mbufs.
++               */
++              if ((rx_id & 0x3) == 0) {
++                      rte_prefetch0(&rx_ring[rx_id]);
++                      rte_prefetch0(rxq->sw_ring[rx_id]);
++              }
++
++              rxm = rxe;
++              rxe = nmb;
++              dma_addr =
++                      rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
++
++              /* Set data buffer address and data length of the mbuf */
++              rxdp->read.hdr_addr = 0;
++              rxdp->read.pkt_addr = dma_addr;
++              rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
++                              IAVF_RX_FLX_DESC_PKT_LEN_M;
++              rxm->data_len = rx_packet_len;
++              rxm->data_off = RTE_PKTMBUF_HEADROOM;
++
++              /* If this is the first buffer of the received packet, set the
++               * pointer to the first mbuf of the packet and initialize its
++               * context. Otherwise, update the total length and the number
++               * of segments of the current scattered packet, and update the
++               * pointer to the last mbuf of the current packet.
++               */
++              if (!first_seg) {
++                      first_seg = rxm;
++                      first_seg->nb_segs = 1;
++                      first_seg->pkt_len = rx_packet_len;
++              } else {
++                      first_seg->pkt_len =
++                              (uint16_t)(first_seg->pkt_len +
++                                              rx_packet_len);
++                      first_seg->nb_segs++;
++                      last_seg->next = rxm;
++              }
++
++              /* If this is not the last buffer of the received packet,
++               * update the pointer to the last mbuf of the current scattered
++               * packet and continue to parse the RX ring.
++               */
++              if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
++                      last_seg = rxm;
++                      continue;
++              }
++
++              /* This is the last buffer of the received packet. If the CRC
++               * is not stripped by the hardware:
++               *  - Subtract the CRC length from the total packet length.
++               *  - If the last buffer only contains the whole CRC or a part
++               *  of it, free the mbuf associated to the last buffer. If part
++               *  of the CRC is also contained in the previous mbuf, subtract
++               *  the length of that CRC part from the data length of the
++               *  previous mbuf.
++               */
++              rxm->next = NULL;
++              if (unlikely(rxq->crc_len > 0)) {
++                      first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
++                      if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
++                              rte_pktmbuf_free_seg(rxm);
++                              first_seg->nb_segs--;
++                              last_seg->data_len =
++                                      (uint16_t)(last_seg->data_len -
++                                      (RTE_ETHER_CRC_LEN - rx_packet_len));
++                              last_seg->next = NULL;
++                      } else {
++                              rxm->data_len = (uint16_t)(rx_packet_len -
++                                                      RTE_ETHER_CRC_LEN);
++                      }
++              }
++
++              first_seg->port = rxq->port_id;
++              first_seg->ol_flags = 0;
++              first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
++                      rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
++              iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
++              iavf_rxd_to_pkt_fields(first_seg, &rxd);
++              pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
++
++              first_seg->ol_flags |= pkt_flags;
++
++              /* Prefetch data of first segment, if configured to do so. */
++              rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
++                                        first_seg->data_off));
++              rx_pkts[nb_rx++] = first_seg;
++              first_seg = NULL;
++      }
++
++      /* Record index of the next RX descriptor to probe. */
++      rxq->rx_tail = rx_id;
++      rxq->pkt_first_seg = first_seg;
++      rxq->pkt_last_seg = last_seg;
++
++      iavf_update_rx_tail(rxq, nb_hold, rx_id);
+       return nb_rx;
+ }
+@@ -1027,30 +1363,88 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+       rxq->pkt_first_seg = first_seg;
+       rxq->pkt_last_seg = last_seg;
+-      /* If the number of free RX descriptors is greater than the RX free
+-       * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+-       * register. Update the RDT with the value of the last processed RX
+-       * descriptor minus 1, to guarantee that the RDT register is never
+-       * equal to the RDH register, which creates a "full" ring situtation
+-       * from the hardware point of view.
++      iavf_update_rx_tail(rxq, nb_hold, rx_id);
++
++      return nb_rx;
++}
++
++#define IAVF_LOOK_AHEAD 8
++static inline int
++iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
++{
++      volatile union iavf_rx_flex_desc *rxdp;
++      struct rte_mbuf **rxep;
++      struct rte_mbuf *mb;
++      uint16_t stat_err0;
++      uint16_t pkt_len;
++      int32_t s[IAVF_LOOK_AHEAD], nb_dd;
++      int32_t i, j, nb_rx = 0;
++      uint64_t pkt_flags;
++      const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
++
++      rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
++      rxep = &rxq->sw_ring[rxq->rx_tail];
++
++      stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
++
++      /* Make sure there is at least 1 packet to receive */
++      if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
++              return 0;
++
++      /* Scan LOOK_AHEAD descriptors at a time to determine which
++       * descriptors reference packets that are ready to be received.
+        */
+-      nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+-      if (nb_hold > rxq->rx_free_thresh) {
+-              PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+-                         "nb_hold=%u nb_rx=%u",
+-                         rxq->port_id, rxq->queue_id,
+-                         rx_id, nb_hold, nb_rx);
+-              rx_id = (uint16_t)(rx_id == 0 ?
+-                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
+-              IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+-              nb_hold = 0;
++      for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
++           rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
++              /* Read desc statuses backwards to avoid race condition */
++              for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
++                      s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
++
++              rte_smp_rmb();
++
++              /* Compute how many status bits were set */
++              for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
++                      nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
++
++              nb_rx += nb_dd;
++
++              /* Translate descriptor info to mbuf parameters */
++              for (j = 0; j < nb_dd; j++) {
++                      IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
++                                        rxq->rx_tail +
++                                        i * IAVF_LOOK_AHEAD + j);
++
++                      mb = rxep[j];
++                      pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
++                              IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
++                      mb->data_len = pkt_len;
++                      mb->pkt_len = pkt_len;
++                      mb->ol_flags = 0;
++
++                      mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
++                              rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
++                      iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
++                      iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
++                      stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
++                      pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
++
++                      mb->ol_flags |= pkt_flags;
++              }
++
++              for (j = 0; j < IAVF_LOOK_AHEAD; j++)
++                      rxq->rx_stage[i + j] = rxep[j];
++
++              if (nb_dd != IAVF_LOOK_AHEAD)
++                      break;
+       }
+-      rxq->nb_rx_hold = nb_hold;
++
++      /* Clear software ring entries */
++      for (i = 0; i < nb_rx; i++)
++              rxq->sw_ring[rxq->rx_tail + i] = NULL;
+       return nb_rx;
+ }
+-#define IAVF_LOOK_AHEAD 8
+ static inline int
+ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+ {
+@@ -1219,7 +1613,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+       if (rxq->rx_nb_avail)
+               return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+-      nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
++      if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
++              nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
++      else
++              nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
+       rxq->rx_next_avail = 0;
+       rxq->rx_nb_avail = nb_rx;
+       rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+@@ -1663,6 +2060,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+ {
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ #ifdef RTE_ARCH_X86
+       struct iavf_rx_queue *rxq;
+       int i;
+@@ -1702,7 +2100,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+       if (dev->data->scattered_rx) {
+               PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
+                           dev->data->port_id);
+-              dev->rx_pkt_burst = iavf_recv_scattered_pkts;
++              if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
++                      dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
++              else
++                      dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+       } else if (adapter->rx_bulk_alloc_allowed) {
+               PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
+                           dev->data->port_id);
+@@ -1710,7 +2111,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+       } else {
+               PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
+                           dev->data->port_id);
+-              dev->rx_pkt_burst = iavf_recv_pkts;
++              if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
++                      dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
++              else
++                      dev->rx_pkt_burst = iavf_recv_pkts;
+       }
+ }
+@@ -1797,6 +2201,7 @@ iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+       rxq = dev->data->rx_queues[queue_id];
+       rxdp = &rxq->rx_ring[rxq->rx_tail];
++
+       while ((desc < rxq->nb_rx_desc) &&
+              ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+                IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
+diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
+index 5e309631e..f33d1df41 100644
+--- a/drivers/net/iavf/iavf_rxtx.h
++++ b/drivers/net/iavf/iavf_rxtx.h
+@@ -62,6 +62,7 @@
+ #define iavf_rx_desc iavf_16byte_rx_desc
+ #else
+ #define iavf_rx_desc iavf_32byte_rx_desc
++#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
+ #endif
+ struct iavf_rxq_ops {
+@@ -87,6 +88,7 @@ struct iavf_rx_queue {
+       struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+       struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
+       struct rte_mbuf fake_mbuf;      /* dummy mbuf */
++      uint8_t rxdid;
+       /* used for VPMD */
+       uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
+@@ -379,9 +381,15 @@ void iavf_dev_tx_queue_release(void *txq);
+ void iavf_stop_queues(struct rte_eth_dev *dev);
+ uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                      uint16_t nb_pkts);
++uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
++                               struct rte_mbuf **rx_pkts,
++                               uint16_t nb_pkts);
+ uint16_t iavf_recv_scattered_pkts(void *rx_queue,
+                                struct rte_mbuf **rx_pkts,
+                                uint16_t nb_pkts);
++uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
++                                         struct rte_mbuf **rx_pkts,
++                                         uint16_t nb_pkts);
+ uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                      uint16_t nb_pkts);
+ uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
+index b7fb05d32..3f0d23a92 100644
+--- a/drivers/net/iavf/iavf_vchnl.c
++++ b/drivers/net/iavf/iavf_vchnl.c
+@@ -88,6 +88,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+               break;
+       case VIRTCHNL_OP_VERSION:
+       case VIRTCHNL_OP_GET_VF_RESOURCES:
++      case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+               /* for init virtchnl ops, need to poll the response */
+               do {
+                       ret = iavf_read_msg_from_pf(adapter, args->out_size,
+@@ -338,7 +339,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
+        * add advanced/optional offload capabilities
+        */
+-      caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
++      caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
++              VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+       args.in_args = (uint8_t *)&caps;
+       args.in_args_size = sizeof(caps);
+@@ -375,6 +377,32 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
+       return 0;
+ }
++int
++iavf_get_supported_rxdid(struct iavf_adapter *adapter)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
++      struct iavf_cmd_info args;
++      int ret;
++
++      args.ops = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
++      args.in_args = NULL;
++      args.in_args_size = 0;
++      args.out_buffer = vf->aq_resp;
++      args.out_size = IAVF_AQ_BUF_SZ;
++
++      ret = iavf_execute_vf_cmd(adapter, &args);
++      if (ret) {
++              PMD_DRV_LOG(ERR,
++                          "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
++              return ret;
++      }
++
++      vf->supported_rxdid =
++              ((struct virtchnl_supported_rxdids *)args.out_buffer)->supported_rxdids;
++
++      return 0;
++}
++
+ int
+ iavf_enable_queues(struct iavf_adapter *adapter)
+ {
+@@ -567,6 +595,18 @@ iavf_configure_queues(struct iavf_adapter *adapter)
+                       vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+                       vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+                       vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
++
++                      if (vf->vf_res->vf_cap_flags &
++                          VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
++                          vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
++                              vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
++                              PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
++                                          "Queue[%d]", vc_qp->rxq.rxdid, i);
++                      } else {
++                              vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
++                              PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
++                                          "Queue[%d]", vc_qp->rxq.rxdid, i);
++                      }
+               }
+       }
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0010-net-iavf-flexible-Rx-descriptor-support-in-AVX-path.patch b/build/external/patches/dpdk_20.02/0010-net-iavf-flexible-Rx-descriptor-support-in-AVX-path.patch
new file mode 100644 (file)
index 0000000..009a2c2
--- /dev/null
@@ -0,0 +1,671 @@
+From b1138c10d2cd5938f4c0316e0b132caeb7e869dd Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:03 +0800
+Subject: [DPDK 10/17] net/iavf: flexible Rx descriptor support in AVX path
+
+Support flexible Rx descriptor format in AVX
+path of iAVF PMD.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf_rxtx.c          |  24 +-
+ drivers/net/iavf/iavf_rxtx.h          |   6 +
+ drivers/net/iavf/iavf_rxtx_vec_avx2.c | 550 +++++++++++++++++++++++++-
+ 3 files changed, 570 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
+index 67297dcb7..34c41d104 100644
+--- a/drivers/net/iavf/iavf_rxtx.c
++++ b/drivers/net/iavf/iavf_rxtx.c
+@@ -2081,16 +2081,28 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
+                                   "Using %sVector Scattered Rx (port %d).",
+                                   use_avx2 ? "avx2 " : "",
+                                   dev->data->port_id);
+-                      dev->rx_pkt_burst = use_avx2 ?
+-                                          iavf_recv_scattered_pkts_vec_avx2 :
+-                                          iavf_recv_scattered_pkts_vec;
++                      if (vf->vf_res->vf_cap_flags &
++                              VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
++                              dev->rx_pkt_burst = use_avx2 ?
++                                      iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
++                                      iavf_recv_scattered_pkts_vec;
++                      else
++                              dev->rx_pkt_burst = use_avx2 ?
++                                      iavf_recv_scattered_pkts_vec_avx2 :
++                                      iavf_recv_scattered_pkts_vec;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+                                   use_avx2 ? "avx2 " : "",
+                                   dev->data->port_id);
+-                      dev->rx_pkt_burst = use_avx2 ?
+-                                          iavf_recv_pkts_vec_avx2 :
+-                                          iavf_recv_pkts_vec;
++                      if (vf->vf_res->vf_cap_flags &
++                              VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
++                              dev->rx_pkt_burst = use_avx2 ?
++                                      iavf_recv_pkts_vec_avx2_flex_rxd :
++                                      iavf_recv_pkts_vec;
++                      else
++                              dev->rx_pkt_burst = use_avx2 ?
++                                      iavf_recv_pkts_vec_avx2 :
++                                      iavf_recv_pkts_vec;
+               }
+               return;
+diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
+index f33d1df41..8e1db2588 100644
+--- a/drivers/net/iavf/iavf_rxtx.h
++++ b/drivers/net/iavf/iavf_rxtx.h
+@@ -413,9 +413,15 @@ uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                 uint16_t nb_pkts);
+ uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                uint16_t nb_pkts);
++uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
++                                        struct rte_mbuf **rx_pkts,
++                                        uint16_t nb_pkts);
+ uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
+                                          struct rte_mbuf **rx_pkts,
+                                          uint16_t nb_pkts);
++uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
++                                                  struct rte_mbuf **rx_pkts,
++                                                  uint16_t nb_pkts);
+ uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                           uint16_t nb_pkts);
+ uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+index 2587083d8..b23188fd3 100644
+--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
++++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+@@ -11,14 +11,16 @@
+ #endif
+ static inline void
+-iavf_rxq_rearm(struct iavf_rx_queue *rxq)
++iavf_rxq_rearm(struct iavf_rx_queue *rxq, volatile union iavf_rx_desc *rxdp)
+ {
+       int i;
+       uint16_t rx_id;
+-      volatile union iavf_rx_desc *rxdp;
+       struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
+-      rxdp = rxq->rx_ring + rxq->rxrearm_start;
++      if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1) {
++              volatile union iavf_rx_flex_desc *rxdp =
++                      (union iavf_rx_flex_desc *)rxdp;
++      }
+       /* Pull 'n' more MBUFs into the software ring */
+       if (rte_mempool_get_bulk(rxq->mp,
+@@ -160,7 +162,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+        * of time to act
+        */
+       if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+-              iavf_rxq_rearm(rxq);
++              iavf_rxq_rearm(rxq, rxq->rx_ring + rxq->rxrearm_start);
+       /* Before we start moving massive data around, check to see if
+        * there is actually a packet available
+@@ -614,6 +616,465 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+       return received;
+ }
++static inline uint16_t
++_iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
++                                    struct rte_mbuf **rx_pkts,
++                                    uint16_t nb_pkts, uint8_t *split_packet)
++{
++#define IAVF_DESCS_PER_LOOP_AVX 8
++
++      const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
++
++      const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
++                      0, rxq->mbuf_initializer);
++      struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
++      volatile union iavf_rx_flex_desc *rxdp =
++              (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
++
++      rte_prefetch0(rxdp);
++
++      /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
++      nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
++
++      /* See if we need to rearm the RX queue - gives the prefetch a bit
++       * of time to act
++       */
++      if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
++              /* iavf_rxq_rearm(rxq); */
++              iavf_rxq_rearm(rxq, rxq->rx_ring + rxq->rxrearm_start);
++
++      /* Before we start moving massive data around, check to see if
++       * there is actually a packet available
++       */
++      if (!(rxdp->wb.status_error0 &
++                      rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
++              return 0;
++
++      /* constants used in processing loop */
++      const __m256i crc_adjust =
++              _mm256_set_epi16
++                      (/* first descriptor */
++                       0, 0, 0,       /* ignore non-length fields */
++                       -rxq->crc_len, /* sub crc on data_len */
++                       0,             /* ignore high-16bits of pkt_len */
++                       -rxq->crc_len, /* sub crc on pkt_len */
++                       0, 0,          /* ignore pkt_type field */
++                       /* second descriptor */
++                       0, 0, 0,       /* ignore non-length fields */
++                       -rxq->crc_len, /* sub crc on data_len */
++                       0,             /* ignore high-16bits of pkt_len */
++                       -rxq->crc_len, /* sub crc on pkt_len */
++                       0, 0           /* ignore pkt_type field */
++                      );
++
++      /* 8 packets DD mask, LSB in each 32-bit value */
++      const __m256i dd_check = _mm256_set1_epi32(1);
++
++      /* 8 packets EOP mask, second-LSB in each 32-bit value */
++      const __m256i eop_check = _mm256_slli_epi32(dd_check,
++                      IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
++
++      /* mask to shuffle from desc. to mbuf (2 descriptors)*/
++      const __m256i shuf_msk =
++              _mm256_set_epi8
++                      (/* first descriptor */
++                       15, 14,
++                       13, 12,        /* octet 12~15, 32 bits rss */
++                       11, 10,        /* octet 10~11, 16 bits vlan_macip */
++                       5, 4,          /* octet 4~5, 16 bits data_len */
++                       0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
++                       5, 4,          /* octet 4~5, 16 bits pkt_len */
++                       0xFF, 0xFF,    /* pkt_type set as unknown */
++                       0xFF, 0xFF,    /*pkt_type set as unknown */
++                       /* second descriptor */
++                       15, 14,
++                       13, 12,        /* octet 12~15, 32 bits rss */
++                       11, 10,        /* octet 10~11, 16 bits vlan_macip */
++                       5, 4,          /* octet 4~5, 16 bits data_len */
++                       0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
++                       5, 4,          /* octet 4~5, 16 bits pkt_len */
++                       0xFF, 0xFF,    /* pkt_type set as unknown */
++                       0xFF, 0xFF     /*pkt_type set as unknown */
++                      );
++      /**
++       * compile-time check the above crc and shuffle layout is correct.
++       * NOTE: the first field (lowest address) is given last in set_epi
++       * calls above.
++       */
++      RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
++                      offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
++      RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
++                      offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
++      RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
++                      offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
++      RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
++                      offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
++
++      /* Status/Error flag masks */
++      /**
++       * mask everything except Checksum Reports, RSS indication
++       * and VLAN indication.
++       * bit6:4 for IP/L4 checksum errors.
++       * bit12 is for RSS indication.
++       * bit13 is for VLAN indication.
++       */
++      const __m256i flags_mask =
++               _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
++      /**
++       * data to be shuffled by the result of the flags mask shifted by 4
++       * bits.  This gives use the l3_l4 flags.
++       */
++      const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
++                      /* shift right 1 bit to make sure it not exceed 255 */
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
++                       PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
++                       PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
++                       PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
++                       PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      /* second 128-bits */
++                      0, 0, 0, 0, 0, 0, 0, 0,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
++                       PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
++                       PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
++                       PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
++                       PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
++                      (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
++                      (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
++      const __m256i cksum_mask =
++               _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
++                                 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
++                                 PKT_RX_EIP_CKSUM_BAD);
++      /**
++       * data to be shuffled by result of flag mask, shifted down 12.
++       * If RSS(bit12)/VLAN(bit13) are set,
++       * shuffle moves appropriate flags in place.
++       */
++      const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
++                      0, 0, 0, 0,
++                      0, 0, 0, 0,
++                      PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
++                      PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
++                      PKT_RX_RSS_HASH, 0,
++                      /* end up 128-bits */
++                      0, 0, 0, 0,
++                      0, 0, 0, 0,
++                      0, 0, 0, 0,
++                      PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
++                      PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
++                      PKT_RX_RSS_HASH, 0);
++
++      uint16_t i, received;
++
++      for (i = 0, received = 0; i < nb_pkts;
++           i += IAVF_DESCS_PER_LOOP_AVX,
++           rxdp += IAVF_DESCS_PER_LOOP_AVX) {
++              /* step 1, copy over 8 mbuf pointers to rx_pkts array */
++              _mm256_storeu_si256((void *)&rx_pkts[i],
++                                  _mm256_loadu_si256((void *)&sw_ring[i]));
++#ifdef RTE_ARCH_X86_64
++              _mm256_storeu_si256
++                      ((void *)&rx_pkts[i + 4],
++                       _mm256_loadu_si256((void *)&sw_ring[i + 4]));
++#endif
++
++              __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
++
++              const __m128i raw_desc7 =
++                      _mm_load_si128((void *)(rxdp + 7));
++              rte_compiler_barrier();
++              const __m128i raw_desc6 =
++                      _mm_load_si128((void *)(rxdp + 6));
++              rte_compiler_barrier();
++              const __m128i raw_desc5 =
++                      _mm_load_si128((void *)(rxdp + 5));
++              rte_compiler_barrier();
++              const __m128i raw_desc4 =
++                      _mm_load_si128((void *)(rxdp + 4));
++              rte_compiler_barrier();
++              const __m128i raw_desc3 =
++                      _mm_load_si128((void *)(rxdp + 3));
++              rte_compiler_barrier();
++              const __m128i raw_desc2 =
++                      _mm_load_si128((void *)(rxdp + 2));
++              rte_compiler_barrier();
++              const __m128i raw_desc1 =
++                      _mm_load_si128((void *)(rxdp + 1));
++              rte_compiler_barrier();
++              const __m128i raw_desc0 =
++                      _mm_load_si128((void *)(rxdp + 0));
++
++              raw_desc6_7 =
++                      _mm256_inserti128_si256
++                              (_mm256_castsi128_si256(raw_desc6),
++                               raw_desc7, 1);
++              raw_desc4_5 =
++                      _mm256_inserti128_si256
++                              (_mm256_castsi128_si256(raw_desc4),
++                               raw_desc5, 1);
++              raw_desc2_3 =
++                      _mm256_inserti128_si256
++                              (_mm256_castsi128_si256(raw_desc2),
++                               raw_desc3, 1);
++              raw_desc0_1 =
++                      _mm256_inserti128_si256
++                              (_mm256_castsi128_si256(raw_desc0),
++                               raw_desc1, 1);
++
++              if (split_packet) {
++                      int j;
++
++                      for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
++                              rte_mbuf_prefetch_part2(rx_pkts[i + j]);
++              }
++
++              /**
++               * convert descriptors 4-7 into mbufs, re-arrange fields.
++               * Then write into the mbuf.
++               */
++              __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
++              __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
++
++              mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
++              mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
++              /**
++               * to get packet types, ptype is located in bit16-25
++               * of each 128bits
++               */
++              const __m256i ptype_mask =
++                      _mm256_set1_epi16(IAVF_RX_FLEX_DESC_PTYPE_M);
++              const __m256i ptypes6_7 =
++                      _mm256_and_si256(raw_desc6_7, ptype_mask);
++              const __m256i ptypes4_5 =
++                      _mm256_and_si256(raw_desc4_5, ptype_mask);
++              const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
++              const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
++              const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
++              const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
++
++              mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype7], 4);
++              mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype6], 0);
++              mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype5], 4);
++              mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype4], 0);
++              /* merge the status bits into one register */
++              const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
++                              raw_desc4_5);
++
++              /**
++               * convert descriptors 0-3 into mbufs, re-arrange fields.
++               * Then write into the mbuf.
++               */
++              __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
++              __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
++
++              mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
++              mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
++              /**
++               * to get packet types, ptype is located in bit16-25
++               * of each 128bits
++               */
++              const __m256i ptypes2_3 =
++                      _mm256_and_si256(raw_desc2_3, ptype_mask);
++              const __m256i ptypes0_1 =
++                      _mm256_and_si256(raw_desc0_1, ptype_mask);
++              const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
++              const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
++              const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
++              const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
++
++              mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype3], 4);
++              mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype2], 0);
++              mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype1], 4);
++              mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype0], 0);
++              /* merge the status bits into one register */
++              const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
++                                                              raw_desc0_1);
++
++              /**
++               * take the two sets of status bits and merge to one
++               * After merge, the packets status flags are in the
++               * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
++               */
++              __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
++                                                        status0_3);
++
++              /* now do flag manipulation */
++
++              /* get only flag/error bits we want */
++              const __m256i flag_bits =
++                      _mm256_and_si256(status0_7, flags_mask);
++              /**
++               * l3_l4_error flags, shuffle, then shift to correct adjustment
++               * of flags in flags_shuf, and finally mask out extra bits
++               */
++              __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
++                              _mm256_srli_epi32(flag_bits, 4));
++              l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
++              l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
++              /* set rss and vlan flags */
++              const __m256i rss_vlan_flag_bits =
++                      _mm256_srli_epi32(flag_bits, 12);
++              const __m256i rss_vlan_flags =
++                      _mm256_shuffle_epi8(rss_vlan_flags_shuf,
++                                          rss_vlan_flag_bits);
++
++              /* merge flags */
++              const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
++                              rss_vlan_flags);
++              /**
++               * At this point, we have the 8 sets of flags in the low 16-bits
++               * of each 32-bit value in vlan0.
++               * We want to extract these, and merge them with the mbuf init
++               * data so we can do a single write to the mbuf to set the flags
++               * and all the other initialization fields. Extracting the
++               * appropriate flags means that we have to do a shift and blend
++               * for each mbuf before we do the write. However, we can also
++               * add in the previously computed rx_descriptor fields to
++               * make a single 256-bit write per mbuf
++               */
++              /* check the structure matches expectations */
++              RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
++                               offsetof(struct rte_mbuf, rearm_data) + 8);
++              RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
++                               RTE_ALIGN(offsetof(struct rte_mbuf,
++                                                  rearm_data),
++                                         16));
++              /* build up data and do writes */
++              __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
++                      rearm6, rearm7;
++              rearm6 = _mm256_blend_epi32(mbuf_init,
++                                          _mm256_slli_si256(mbuf_flags, 8),
++                                          0x04);
++              rearm4 = _mm256_blend_epi32(mbuf_init,
++                                          _mm256_slli_si256(mbuf_flags, 4),
++                                          0x04);
++              rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
++              rearm0 = _mm256_blend_epi32(mbuf_init,
++                                          _mm256_srli_si256(mbuf_flags, 4),
++                                          0x04);
++              /* permute to add in the rx_descriptor e.g. rss fields */
++              rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
++              rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
++              rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
++              rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
++              /* write to mbuf */
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
++                                  rearm6);
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
++                                  rearm4);
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
++                                  rearm2);
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
++                                  rearm0);
++
++              /* repeat for the odd mbufs */
++              const __m256i odd_flags =
++                      _mm256_castsi128_si256
++                              (_mm256_extracti128_si256(mbuf_flags, 1));
++              rearm7 = _mm256_blend_epi32(mbuf_init,
++                                          _mm256_slli_si256(odd_flags, 8),
++                                          0x04);
++              rearm5 = _mm256_blend_epi32(mbuf_init,
++                                          _mm256_slli_si256(odd_flags, 4),
++                                          0x04);
++              rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
++              rearm1 = _mm256_blend_epi32(mbuf_init,
++                                          _mm256_srli_si256(odd_flags, 4),
++                                          0x04);
++              /* since odd mbufs are already in hi 128-bits use blend */
++              rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
++              rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
++              rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
++              rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
++              /* again write to mbufs */
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
++                                  rearm7);
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
++                                  rearm5);
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
++                                  rearm3);
++              _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
++                                  rearm1);
++
++              /* extract and record EOP bit */
++              if (split_packet) {
++                      const __m128i eop_mask =
++                              _mm_set1_epi16(1 <<
++                                             IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
++                      const __m256i eop_bits256 = _mm256_and_si256(status0_7,
++                                                                   eop_check);
++                      /* pack status bits into a single 128-bit register */
++                      const __m128i eop_bits =
++                              _mm_packus_epi32
++                                      (_mm256_castsi256_si128(eop_bits256),
++                                       _mm256_extractf128_si256(eop_bits256,
++                                                                1));
++                      /**
++                       * flip bits, and mask out the EOP bit, which is now
++                       * a split-packet bit i.e. !EOP, rather than EOP one.
++                       */
++                      __m128i split_bits = _mm_andnot_si128(eop_bits,
++                                      eop_mask);
++                      /**
++                       * eop bits are out of order, so we need to shuffle them
++                       * back into order again. In doing so, only use low 8
++                       * bits, which acts like another pack instruction
++                       * The original order is (hi->lo): 1,3,5,7,0,2,4,6
++                       * [Since we use epi8, the 16-bit positions are
++                       * multiplied by 2 in the eop_shuffle value.]
++                       */
++                      __m128i eop_shuffle =
++                              _mm_set_epi8(/* zero hi 64b */
++                                           0xFF, 0xFF, 0xFF, 0xFF,
++                                           0xFF, 0xFF, 0xFF, 0xFF,
++                                           /* move values to lo 64b */
++                                           8, 0, 10, 2,
++                                           12, 4, 14, 6);
++                      split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
++                      *(uint64_t *)split_packet =
++                              _mm_cvtsi128_si64(split_bits);
++                      split_packet += IAVF_DESCS_PER_LOOP_AVX;
++              }
++
++              /* perform dd_check */
++              status0_7 = _mm256_and_si256(status0_7, dd_check);
++              status0_7 = _mm256_packs_epi32(status0_7,
++                                             _mm256_setzero_si256());
++
++              uint64_t burst = __builtin_popcountll
++                                      (_mm_cvtsi128_si64
++                                              (_mm256_extracti128_si256
++                                                      (status0_7, 1)));
++              burst += __builtin_popcountll
++                              (_mm_cvtsi128_si64
++                                      (_mm256_castsi256_si128(status0_7)));
++              received += burst;
++              if (burst != IAVF_DESCS_PER_LOOP_AVX)
++                      break;
++      }
++
++      /* update tail pointers */
++      rxq->rx_tail += received;
++      rxq->rx_tail &= (rxq->nb_rx_desc - 1);
++      if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
++              rxq->rx_tail--;
++              received--;
++      }
++      rxq->rxrearm_nb += received;
++      return received;
++}
++
+ /**
+  * Notice:
+  * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+@@ -625,6 +1086,18 @@ iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+       return _iavf_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
+ }
++/**
++ * Notice:
++ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
++ */
++uint16_t
++iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
++                               uint16_t nb_pkts)
++{
++      return _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rx_queue, rx_pkts,
++                                                   nb_pkts, NULL);
++}
++
+ /**
+  * vPMD receive routine that reassembles single burst of 32 scattered packets
+  * Notice:
+@@ -690,6 +1163,75 @@ iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+                               rx_pkts + retval, nb_pkts);
+ }
++/**
++ * vPMD receive routine that reassembles single burst of
++ * 32 scattered packets for flex RxD
++ * Notice:
++ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
++ */
++static uint16_t
++iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue,
++                                          struct rte_mbuf **rx_pkts,
++                                          uint16_t nb_pkts)
++{
++      struct iavf_rx_queue *rxq = rx_queue;
++      uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
++
++      /* get some new buffers */
++      uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rxq,
++                                      rx_pkts, nb_pkts, split_flags);
++      if (nb_bufs == 0)
++              return 0;
++
++      /* happy day case, full burst + no packets to be joined */
++      const uint64_t *split_fl64 = (uint64_t *)split_flags;
++
++      if (!rxq->pkt_first_seg &&
++          split_fl64[0] == 0 && split_fl64[1] == 0 &&
++          split_fl64[2] == 0 && split_fl64[3] == 0)
++              return nb_bufs;
++
++      /* reassemble any packets that need reassembly*/
++      unsigned int i = 0;
++
++      if (!rxq->pkt_first_seg) {
++              /* find the first split flag, and only reassemble then*/
++              while (i < nb_bufs && !split_flags[i])
++                      i++;
++              if (i == nb_bufs)
++                      return nb_bufs;
++              rxq->pkt_first_seg = rx_pkts[i];
++      }
++      return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
++                                           &split_flags[i]);
++}
++
++/**
++ * vPMD receive routine that reassembles scattered packets for flex RxD.
++ * Main receive routine that can handle arbitrary burst sizes
++ * Notice:
++ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
++ */
++uint16_t
++iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
++                                         struct rte_mbuf **rx_pkts,
++                                         uint16_t nb_pkts)
++{
++      uint16_t retval = 0;
++
++      while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
++              uint16_t burst =
++                      iavf_recv_scattered_burst_vec_avx2_flex_rxd
++                      (rx_queue, rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST);
++              retval += burst;
++              nb_pkts -= burst;
++              if (burst < IAVF_VPMD_RX_MAX_BURST)
++                      return retval;
++      }
++      return retval + iavf_recv_scattered_burst_vec_avx2_flex_rxd(rx_queue,
++                              rx_pkts + retval, nb_pkts);
++}
++
+ static inline void
+ iavf_vtx1(volatile struct iavf_tx_desc *txdp,
+         struct rte_mbuf *pkt, uint64_t flags)
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0011-net-iavf-add-flow-director-enabled-switch-value.patch b/build/external/patches/dpdk_20.02/0011-net-iavf-add-flow-director-enabled-switch-value.patch
new file mode 100644 (file)
index 0000000..f2fe52e
--- /dev/null
@@ -0,0 +1,78 @@
+From e69d36c549609c02b6814cd06232e340fe0b873b Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:05 +0800
+Subject: [DPDK 11/17] net/iavf: add flow director enabled switch value
+
+The commit adds fdir_enabled flag into iavf_rx_queue structure
+to identify if fdir id is active. Rx data path can be benefit if
+fdir id parsing is not needed, especially in vector path.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf.h      |  1 +
+ drivers/net/iavf/iavf_rxtx.h | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
+index 67d625053..0cd0117c2 100644
+--- a/drivers/net/iavf/iavf.h
++++ b/drivers/net/iavf/iavf.h
+@@ -134,6 +134,7 @@ struct iavf_adapter {
+       bool tx_vec_allowed;
+       const uint32_t *ptype_tbl;
+       bool stopped;
++      uint16_t fdir_ref_cnt;
+ };
+ /* IAVF_DEV_PRIVATE_TO */
+diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
+index 8e1db2588..f37438953 100644
+--- a/drivers/net/iavf/iavf_rxtx.h
++++ b/drivers/net/iavf/iavf_rxtx.h
+@@ -103,6 +103,7 @@ struct iavf_rx_queue {
+       uint16_t port_id;        /* device port ID */
+       uint8_t crc_len;        /* 0 if CRC stripped, 4 otherwise */
++      uint8_t fdir_enabled;   /* 0 if FDIR disabled, 1 when enabled */
+       uint16_t queue_id;      /* Rx queue index */
+       uint16_t rx_buf_len;    /* The packet buffer size */
+       uint16_t rx_hdr_len;    /* The header buffer size */
+@@ -485,6 +486,35 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
+              tx_desc->cmd_type_offset_bsz);
+ }
++#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
++      int i; \
++      for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
++              struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
++              if (!rxq) \
++                      continue; \
++              rxq->fdir_enabled = on; \
++      } \
++      PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
++} while (0)
++
++/* Enable/disable flow director Rx processing in data path. */
++static inline
++void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
++{
++      if (on) {
++              /* enable flow director processing */
++              if (ad->fdir_ref_cnt++ == 0)
++                      FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
++      } else {
++              if (ad->fdir_ref_cnt >= 1) {
++                      ad->fdir_ref_cnt--;
++
++                      if (ad->fdir_ref_cnt == 0)
++                              FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
++              }
++      }
++}
++
+ #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
+ #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
+       iavf_dump_rx_descriptor(rxq, desc, rx_id)
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0012-net-iavf-support-flow-mark-in-normal-data-path.patch b/build/external/patches/dpdk_20.02/0012-net-iavf-support-flow-mark-in-normal-data-path.patch
new file mode 100644 (file)
index 0000000..646793b
--- /dev/null
@@ -0,0 +1,113 @@
+From 8aa451f16a44c4d278e38991b0c24e89a5a9aff2 Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:06 +0800
+Subject: [DPDK 12/17] net/iavf: support flow mark in normal data path
+
+Support Flow Director mark ID parsing in normal path.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf.h      |  3 +++
+ drivers/net/iavf/iavf_rxtx.c | 37 ++++++++++++++++++++++++++++++++++++
+ 2 files changed, 40 insertions(+)
+
+diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
+index 0cd0117c2..b63efd4e8 100644
+--- a/drivers/net/iavf/iavf.h
++++ b/drivers/net/iavf/iavf.h
+@@ -67,6 +67,9 @@
+ #define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)
+ #define IAVF_48_BIT_MASK  RTE_LEN2MASK(IAVF_48_BIT_WIDTH, uint64_t)
++#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK  0x03
++#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
++
+ struct iavf_adapter;
+ struct iavf_rx_queue;
+ struct iavf_tx_queue;
+diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
+index 34c41d104..ca47c6ab6 100644
+--- a/drivers/net/iavf/iavf_rxtx.c
++++ b/drivers/net/iavf/iavf_rxtx.c
+@@ -756,6 +756,10 @@ iavf_rxd_to_pkt_flags(uint64_t qword)
+                                       IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
+                       IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
++      /* Check if FDIR Match */
++      flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
++                              PKT_RX_FDIR : 0);
++
+       if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
+               flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+               return flags;
+@@ -776,6 +780,25 @@ iavf_rxd_to_pkt_flags(uint64_t qword)
+       return flags;
+ }
++static inline uint64_t
++iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
++{
++      uint64_t flags = 0;
++      uint16_t flexbh;
++
++      flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
++              IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
++              IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
++
++      if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
++              mb->hash.fdir.hi =
++                      rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
++              flags |= PKT_RX_FDIR_ID;
++      }
++
++      return flags;
++}
++
+ /* Translate the rx flex descriptor status to pkt flags */
+ static inline void
+ iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
+@@ -792,6 +815,11 @@ iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
+               mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+       }
+ #endif
++
++      if (desc->flow_id != 0xFFFFFFFF) {
++              mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
++              mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
++      }
+ }
+ #define IAVF_RX_FLEX_ERR0_BITS        \
+@@ -951,6 +979,9 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+                       rxm->hash.rss =
+                               rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
++              if (pkt_flags & PKT_RX_FDIR)
++                      pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
++
+               rxm->ol_flags |= pkt_flags;
+               rx_pkts[nb_rx++] = rxm;
+@@ -1349,6 +1380,9 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                       first_seg->hash.rss =
+                               rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
++              if (pkt_flags & PKT_RX_FDIR)
++                      pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
++
+               first_seg->ol_flags |= pkt_flags;
+               /* Prefetch data of first segment, if configured to do so. */
+@@ -1515,6 +1549,9 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+                               mb->hash.rss = rte_le_to_cpu_32(
+                                       rxdp[j].wb.qword0.hi_dword.rss);
++                      if (pkt_flags & PKT_RX_FDIR)
++                              pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
++
+                       mb->ol_flags |= pkt_flags;
+               }
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch b/build/external/patches/dpdk_20.02/0013-net-iavf-support-flow-mark-in-AVX-path.patch
new file mode 100644 (file)
index 0000000..74baf14
--- /dev/null
@@ -0,0 +1,121 @@
+From f5de510dd842be737259ef31d1300b57890ae90e Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:07 +0800
+Subject: [DPDK 13/17] net/iavf: support flow mark in AVX path
+
+Support Flow Director mark ID parsing from Flex
+Rx descriptor in AVX path.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf_rxtx_vec_avx2.c | 72 +++++++++++++++++++++++++--
+ 1 file changed, 67 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+index b23188fd3..3bf5833fa 100644
+--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
++++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+@@ -616,6 +616,25 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+       return received;
+ }
++static inline __m256i
++flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
++{
++#define FDID_MIS_MAGIC 0xFFFFFFFF
++      RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
++      RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
++      const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
++                      PKT_RX_FDIR_ID);
++      /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
++      const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
++      __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
++                      fdir_mis_mask);
++      /* this XOR op results to bit-reverse the fdir_mask */
++      fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
++      const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
++
++      return fdir_flags;
++}
++
+ static inline uint16_t
+ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+                                     struct rte_mbuf **rx_pkts,
+@@ -678,8 +697,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+       const __m256i shuf_msk =
+               _mm256_set_epi8
+                       (/* first descriptor */
+-                       15, 14,
+-                       13, 12,        /* octet 12~15, 32 bits rss */
++                       0xFF, 0xFF,
++                       0xFF, 0xFF,    /* rss not supported */
+                        11, 10,        /* octet 10~11, 16 bits vlan_macip */
+                        5, 4,          /* octet 4~5, 16 bits data_len */
+                        0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
+@@ -687,8 +706,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+                        0xFF, 0xFF,    /* pkt_type set as unknown */
+                        0xFF, 0xFF,    /*pkt_type set as unknown */
+                        /* second descriptor */
+-                       15, 14,
+-                       13, 12,        /* octet 12~15, 32 bits rss */
++                       0xFF, 0xFF,
++                       0xFF, 0xFF,    /* rss not supported */
+                        11, 10,        /* octet 10~11, 16 bits vlan_macip */
+                        5, 4,          /* octet 4~5, 16 bits data_len */
+                        0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
+@@ -930,8 +949,51 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+                                           rss_vlan_flag_bits);
+               /* merge flags */
+-              const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
++              __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+                               rss_vlan_flags);
++
++              if (rxq->fdir_enabled) {
++                      const __m256i fdir_id4_7 =
++                              _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
++
++                      const __m256i fdir_id0_3 =
++                              _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
++
++                      const __m256i fdir_id0_7 =
++                              _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
++
++                      const __m256i fdir_flags =
++                              flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
++
++                      /* merge with fdir_flags */
++                      mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
++
++                      /* write to mbuf: have to use scalar store here */
++                      rx_pkts[i + 0]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 3);
++
++                      rx_pkts[i + 1]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 7);
++
++                      rx_pkts[i + 2]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 2);
++
++                      rx_pkts[i + 3]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 6);
++
++                      rx_pkts[i + 4]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 1);
++
++                      rx_pkts[i + 5]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 5);
++
++                      rx_pkts[i + 6]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 0);
++
++                      rx_pkts[i + 7]->hash.fdir.hi =
++                              _mm256_extract_epi32(fdir_id0_7, 4);
++              } /* if() on fdir_enabled */
++
+               /**
+                * At this point, we have the 8 sets of flags in the low 16-bits
+                * of each 32-bit value in vlan0.
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0014-net-iavf-add-RSS-hash-parsing-in-AVX-path.patch b/build/external/patches/dpdk_20.02/0014-net-iavf-add-RSS-hash-parsing-in-AVX-path.patch
new file mode 100644 (file)
index 0000000..34ce786
--- /dev/null
@@ -0,0 +1,133 @@
+From d338aa7cb45638b3a14177a8d83ef01c4ec20d1b Mon Sep 17 00:00:00 2001
+From: Leyi Rong <leyi.rong@intel.com>
+Date: Wed, 8 Apr 2020 14:22:09 +0800
+Subject: [DPDK 14/17] net/iavf: add RSS hash parsing in AVX path
+
+Support RSS hash parsing from Flex Rx
+descriptor in AVX data path.
+
+Signed-off-by: Leyi Rong <leyi.rong@intel.com>
+---
+ drivers/net/iavf/iavf_rxtx_vec_avx2.c | 92 ++++++++++++++++++++++++++-
+ 1 file changed, 90 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+index 3bf5833fa..22f1b7887 100644
+--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
++++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+@@ -698,7 +698,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+               _mm256_set_epi8
+                       (/* first descriptor */
+                        0xFF, 0xFF,
+-                       0xFF, 0xFF,    /* rss not supported */
++                       0xFF, 0xFF,    /* rss hash parsed separately */
+                        11, 10,        /* octet 10~11, 16 bits vlan_macip */
+                        5, 4,          /* octet 4~5, 16 bits data_len */
+                        0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
+@@ -707,7 +707,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+                        0xFF, 0xFF,    /*pkt_type set as unknown */
+                        /* second descriptor */
+                        0xFF, 0xFF,
+-                       0xFF, 0xFF,    /* rss not supported */
++                       0xFF, 0xFF,    /* rss hash parsed separately */
+                        11, 10,        /* octet 10~11, 16 bits vlan_macip */
+                        5, 4,          /* octet 4~5, 16 bits data_len */
+                        0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
+@@ -994,6 +994,94 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+                               _mm256_extract_epi32(fdir_id0_7, 4);
+               } /* if() on fdir_enabled */
++              /**
++               * needs to load 2nd 16B of each desc for RSS hash parsing,
++               * will cause performance drop to get into this context.
++               */
++              if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
++                              DEV_RX_OFFLOAD_RSS_HASH) {
++                      /* load bottom half of every 32B desc */
++                      const __m128i raw_desc_bh7 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[7].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh6 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[6].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh5 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[5].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh4 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[4].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh3 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[3].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh2 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[2].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh1 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[1].wb.status_error1));
++                      rte_compiler_barrier();
++                      const __m128i raw_desc_bh0 =
++                              _mm_load_si128
++                                      ((void *)(&rxdp[0].wb.status_error1));
++
++                      __m256i raw_desc_bh6_7 =
++                              _mm256_inserti128_si256
++                                      (_mm256_castsi128_si256(raw_desc_bh6),
++                                      raw_desc_bh7, 1);
++                      __m256i raw_desc_bh4_5 =
++                              _mm256_inserti128_si256
++                                      (_mm256_castsi128_si256(raw_desc_bh4),
++                                      raw_desc_bh5, 1);
++                      __m256i raw_desc_bh2_3 =
++                              _mm256_inserti128_si256
++                                      (_mm256_castsi128_si256(raw_desc_bh2),
++                                      raw_desc_bh3, 1);
++                      __m256i raw_desc_bh0_1 =
++                              _mm256_inserti128_si256
++                                      (_mm256_castsi128_si256(raw_desc_bh0),
++                                      raw_desc_bh1, 1);
++
++                      /**
++                       * to shift the 32b RSS hash value to the
++                       * highest 32b of each 128b before mask
++                       */
++                      __m256i rss_hash6_7 =
++                              _mm256_slli_epi64(raw_desc_bh6_7, 32);
++                      __m256i rss_hash4_5 =
++                              _mm256_slli_epi64(raw_desc_bh4_5, 32);
++                      __m256i rss_hash2_3 =
++                              _mm256_slli_epi64(raw_desc_bh2_3, 32);
++                      __m256i rss_hash0_1 =
++                              _mm256_slli_epi64(raw_desc_bh0_1, 32);
++
++                      __m256i rss_hash_msk =
++                              _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
++                                               0xFFFFFFFF, 0, 0, 0);
++
++                      rss_hash6_7 = _mm256_and_si256
++                                      (rss_hash6_7, rss_hash_msk);
++                      rss_hash4_5 = _mm256_and_si256
++                                      (rss_hash4_5, rss_hash_msk);
++                      rss_hash2_3 = _mm256_and_si256
++                                      (rss_hash2_3, rss_hash_msk);
++                      rss_hash0_1 = _mm256_and_si256
++                                      (rss_hash0_1, rss_hash_msk);
++
++                      mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
++                      mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
++                      mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
++                      mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
++              } /* if() on RSS hash parsing */
++
+               /**
+                * At this point, we have the 8 sets of flags in the low 16-bits
+                * of each 32-bit value in vlan0.
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0015-net-iavf-support-generic-flow.patch b/build/external/patches/dpdk_20.02/0015-net-iavf-support-generic-flow.patch
new file mode 100644 (file)
index 0000000..61ed8e5
--- /dev/null
@@ -0,0 +1,1531 @@
+From 0692e4be875c64c5d26f2e6df80bbb1a24df36a6 Mon Sep 17 00:00:00 2001
+From: Chenmin Sun <chenmin.sun@intel.com>
+Date: Fri, 17 Apr 2020 05:02:22 +0800
+Subject: [DPDK 15/17] net/iavf: support generic flow
+
+This patch added iavf_flow_create, iavf_flow_destroy,
+iavf_flow_flush and iavf_flow_validate support,
+these are used to handle all the generic filters.
+
+This patch supported basic L2, L3, L4 and GTPU patterns.
+
+Signed-off-by: Qiming Yang <qiming.yang@intel.com>
+Acked-by: Qi Zhang <qi.z.zhang@intel.com>
+Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
+---
+ doc/guides/nics/features/iavf.ini    |    1 +
+ drivers/net/iavf/Makefile            |    1 +
+ drivers/net/iavf/iavf.h              |   10 +
+ drivers/net/iavf/iavf_ethdev.c       |   45 ++
+ drivers/net/iavf/iavf_generic_flow.c | 1008 ++++++++++++++++++++++++++
+ drivers/net/iavf/iavf_generic_flow.h |  313 ++++++++
+ drivers/net/iavf/meson.build         |    1 +
+ 7 files changed, 1379 insertions(+)
+ create mode 100644 drivers/net/iavf/iavf_generic_flow.c
+ create mode 100644 drivers/net/iavf/iavf_generic_flow.h
+
+diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
+index 80143059e..3bf368785 100644
+--- a/doc/guides/nics/features/iavf.ini
++++ b/doc/guides/nics/features/iavf.ini
+@@ -19,6 +19,7 @@ Multicast MAC filter = Y
+ RSS hash             = Y
+ RSS key update       = Y
+ RSS reta update      = Y
++Flow API             = Y
+ VLAN filter          = Y
+ CRC offload          = Y
+ VLAN offload         = Y
+diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
+index 514073d76..1bf0f26b5 100644
+--- a/drivers/net/iavf/Makefile
++++ b/drivers/net/iavf/Makefile
+@@ -23,6 +23,7 @@ EXPORT_MAP := rte_pmd_iavf_version.map
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
++SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
+ ifeq ($(CONFIG_RTE_ARCH_X86), y)
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
+ endif
+diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
+index b63efd4e8..78bdaff20 100644
+--- a/drivers/net/iavf/iavf.h
++++ b/drivers/net/iavf/iavf.h
+@@ -86,6 +86,12 @@ struct iavf_vsi {
+       struct virtchnl_eth_stats eth_stats_offset;
+ };
++struct rte_flow;
++TAILQ_HEAD(iavf_flow_list, rte_flow);
++
++struct iavf_flow_parser_node;
++TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
++
+ /* TODO: is that correct to assume the max number to be 16 ?*/
+ #define IAVF_MAX_MSIX_VECTORS   16
+@@ -121,6 +127,10 @@ struct iavf_info {
+       uint16_t msix_base; /* msix vector base from */
+       /* queue bitmask for each vector */
+       uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
++      struct iavf_flow_list flow_list;
++      rte_spinlock_t flow_ops_lock;
++      struct iavf_parser_list rss_parser_list;
++      struct iavf_parser_list dist_parser_list;
+ };
+ #define IAVF_MAX_PKT_TYPE 1024
+diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
+index d3a121eac..95ab6e246 100644
+--- a/drivers/net/iavf/iavf_ethdev.c
++++ b/drivers/net/iavf/iavf_ethdev.c
+@@ -27,6 +27,7 @@
+ #include "iavf.h"
+ #include "iavf_rxtx.h"
++#include "iavf_generic_flow.h"
+ static int iavf_dev_configure(struct rte_eth_dev *dev);
+ static int iavf_dev_start(struct rte_eth_dev *dev);
+@@ -67,6 +68,11 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+                                       uint16_t queue_id);
+ static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+                                        uint16_t queue_id);
++static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
++                   enum rte_filter_type filter_type,
++                   enum rte_filter_op filter_op,
++                   void *arg);
++
+ int iavf_logtype_init;
+ int iavf_logtype_driver;
+@@ -125,6 +131,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
+       .mtu_set                    = iavf_dev_mtu_set,
+       .rx_queue_intr_enable       = iavf_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable      = iavf_dev_rx_queue_intr_disable,
++      .filter_ctrl                = iavf_dev_filter_ctrl,
+ };
+ static int
+@@ -1298,6 +1305,33 @@ iavf_dev_interrupt_handler(void *param)
+       iavf_enable_irq0(hw);
+ }
++static int
++iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
++                   enum rte_filter_type filter_type,
++                   enum rte_filter_op filter_op,
++                   void *arg)
++{
++      int ret = 0;
++
++      if (!dev)
++              return -EINVAL;
++
++      switch (filter_type) {
++      case RTE_ETH_FILTER_GENERIC:
++              if (filter_op != RTE_ETH_FILTER_GET)
++                      return -EINVAL;
++              *(const void **)arg = &iavf_flow_ops;
++              break;
++      default:
++              PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
++                          filter_type);
++              ret = -EINVAL;
++              break;
++      }
++
++      return ret;
++}
++
+ static int
+ iavf_dev_init(struct rte_eth_dev *eth_dev)
+ {
+@@ -1305,6 +1339,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
+               IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
++      int ret = 0;
+       PMD_INIT_FUNC_TRACE();
+@@ -1374,6 +1409,12 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
+       /* configure and enable device interrupt */
+       iavf_enable_irq0(hw);
++      ret = iavf_flow_init(adapter);
++      if (ret) {
++              PMD_INIT_LOG(ERR, "Failed to initialize flow");
++              return ret;
++      }
++
+       return 0;
+ }
+@@ -1383,6 +1424,8 @@ iavf_dev_close(struct rte_eth_dev *dev)
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
++      struct iavf_adapter *adapter =
++              IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       iavf_dev_stop(dev);
+       iavf_shutdown_adminq(hw);
+@@ -1393,6 +1436,8 @@ iavf_dev_close(struct rte_eth_dev *dev)
+       rte_intr_callback_unregister(intr_handle,
+                                    iavf_dev_interrupt_handler, dev);
+       iavf_disable_irq0(hw);
++
++      iavf_flow_uninit(adapter);
+ }
+ static int
+diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
+new file mode 100644
+index 000000000..98f1626d6
+--- /dev/null
++++ b/drivers/net/iavf/iavf_generic_flow.c
+@@ -0,0 +1,1008 @@
++/* SPDX-License-Identifier: BSD-3-Clause
++ * Copyright(c) 2019 Intel Corporation
++ */
++
++#include <sys/queue.h>
++#include <stdio.h>
++#include <errno.h>
++#include <stdint.h>
++#include <string.h>
++#include <unistd.h>
++#include <stdarg.h>
++
++#include <rte_ether.h>
++#include <rte_ethdev_driver.h>
++#include <rte_malloc.h>
++#include <rte_tailq.h>
++
++#include "iavf.h"
++#include "iavf_generic_flow.h"
++
++static struct iavf_engine_list engine_list =
++              TAILQ_HEAD_INITIALIZER(engine_list);
++
++static int iavf_flow_validate(struct rte_eth_dev *dev,
++              const struct rte_flow_attr *attr,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct rte_flow_error *error);
++static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
++              const struct rte_flow_attr *attr,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct rte_flow_error *error);
++static int iavf_flow_destroy(struct rte_eth_dev *dev,
++              struct rte_flow *flow,
++              struct rte_flow_error *error);
++static int iavf_flow_flush(struct rte_eth_dev *dev,
++              struct rte_flow_error *error);
++static int iavf_flow_query(struct rte_eth_dev *dev,
++              struct rte_flow *flow,
++              const struct rte_flow_action *actions,
++              void *data,
++              struct rte_flow_error *error);
++
++const struct rte_flow_ops iavf_flow_ops = {
++      .validate = iavf_flow_validate,
++      .create = iavf_flow_create,
++      .destroy = iavf_flow_destroy,
++      .flush = iavf_flow_flush,
++      .query = iavf_flow_query,
++};
++
++/* empty */
++enum rte_flow_item_type iavf_pattern_empty[] = {
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* L2 */
++enum rte_flow_item_type iavf_pattern_ethertype[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* ARP */
++enum rte_flow_item_type iavf_pattern_eth_arp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* non-tunnel IPv4 */
++enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_SCTP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_SCTP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_SCTP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_ICMP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_ICMP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_ICMP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* non-tunnel IPv6 */
++enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_SCTP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_SCTP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_SCTP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_ICMP6,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_ICMP6,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_VLAN,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_ICMP6,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* GTPU */
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_GTP_PSC,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_GTP_PSC,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_GTP_PSC,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_GTP_PSC,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_TCP,
++      RTE_FLOW_ITEM_TYPE_END,
++
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_GTPU,
++      RTE_FLOW_ITEM_TYPE_GTP_PSC,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_ICMP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* ESP */
++enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_ESP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_ESP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_ESP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_UDP,
++      RTE_FLOW_ITEM_TYPE_ESP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* AH */
++enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_AH,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_AH,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++/* L2TPV3 */
++enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV4,
++      RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
++      RTE_FLOW_ITEM_TYPE_ETH,
++      RTE_FLOW_ITEM_TYPE_IPV6,
++      RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
++      RTE_FLOW_ITEM_TYPE_END,
++};
++
++typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              struct iavf_parser_list *parser_list,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct rte_flow_error *error);
++
++void
++iavf_register_flow_engine(struct iavf_flow_engine *engine)
++{
++      TAILQ_INSERT_TAIL(&engine_list, engine, node);
++}
++
++int
++iavf_flow_init(struct iavf_adapter *ad)
++{
++      int ret;
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      void *temp;
++      struct iavf_flow_engine *engine;
++
++      TAILQ_INIT(&vf->flow_list);
++      TAILQ_INIT(&vf->rss_parser_list);
++      TAILQ_INIT(&vf->dist_parser_list);
++      rte_spinlock_init(&vf->flow_ops_lock);
++
++      TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
++              if (engine->init == NULL) {
++                      PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
++                                   engine->type);
++                      return -ENOTSUP;
++              }
++
++              ret = engine->init(ad);
++              if (ret && ret != -ENOTSUP) {
++                      PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
++                                   engine->type);
++                      return ret;
++              }
++      }
++      return 0;
++}
++
++void
++iavf_flow_uninit(struct iavf_adapter *ad)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      struct iavf_flow_engine *engine;
++      struct rte_flow *p_flow;
++      struct iavf_flow_parser_node *p_parser;
++      void *temp;
++
++      TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
++              if (engine->uninit)
++                      engine->uninit(ad);
++      }
++
++      /* Remove all flows */
++      while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
++              TAILQ_REMOVE(&vf->flow_list, p_flow, node);
++              if (p_flow->engine->free)
++                      p_flow->engine->free(p_flow);
++              rte_free(p_flow);
++      }
++
++      /* Cleanup parser list */
++      while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
++              TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
++              rte_free(p_parser);
++      }
++
++      while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
++              TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
++              rte_free(p_parser);
++      }
++}
++
++int
++iavf_register_parser(struct iavf_flow_parser *parser,
++                   struct iavf_adapter *ad)
++{
++      struct iavf_parser_list *list = NULL;
++      struct iavf_flow_parser_node *parser_node;
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++
++      parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
++      if (parser_node == NULL) {
++              PMD_DRV_LOG(ERR, "Failed to allocate memory.");
++              return -ENOMEM;
++      }
++      parser_node->parser = parser;
++
++      if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
++              list = &vf->rss_parser_list;
++              TAILQ_INSERT_TAIL(list, parser_node, node);
++      } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
++              list = &vf->dist_parser_list;
++              TAILQ_INSERT_HEAD(list, parser_node, node);
++      } else {
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++void
++iavf_unregister_parser(struct iavf_flow_parser *parser,
++                     struct iavf_adapter *ad)
++{
++      struct iavf_parser_list *list = NULL;
++      struct iavf_flow_parser_node *p_parser;
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      void *temp;
++
++      if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
++              list = &vf->rss_parser_list;
++      else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
++              list = &vf->dist_parser_list;
++
++      if (list == NULL)
++              return;
++
++      TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
++              if (p_parser->parser->engine->type == parser->engine->type) {
++                      TAILQ_REMOVE(list, p_parser, node);
++                      rte_free(p_parser);
++              }
++      }
++}
++
++static int
++iavf_flow_valid_attr(const struct rte_flow_attr *attr,
++                   struct rte_flow_error *error)
++{
++      /* Must be input direction */
++      if (!attr->ingress) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
++                              attr, "Only support ingress.");
++              return -rte_errno;
++      }
++
++      /* Not supported */
++      if (attr->egress) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
++                              attr, "Not support egress.");
++              return -rte_errno;
++      }
++
++      /* Not supported */
++      if (attr->priority) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
++                              attr, "Not support priority.");
++              return -rte_errno;
++      }
++
++      /* Not supported */
++      if (attr->group) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
++                              attr, "Not support group.");
++              return -rte_errno;
++      }
++
++      return 0;
++}
++
++/* Find the first VOID or non-VOID item pointer */
++static const struct rte_flow_item *
++iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
++{
++      bool is_find;
++
++      while (item->type != RTE_FLOW_ITEM_TYPE_END) {
++              if (is_void)
++                      is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
++              else
++                      is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
++              if (is_find)
++                      break;
++              item++;
++      }
++      return item;
++}
++
++/* Skip all VOID items of the pattern */
++static void
++iavf_pattern_skip_void_item(struct rte_flow_item *items,
++                      const struct rte_flow_item *pattern)
++{
++      uint32_t cpy_count = 0;
++      const struct rte_flow_item *pb = pattern, *pe = pattern;
++
++      for (;;) {
++              /* Find a non-void item first */
++              pb = iavf_find_first_item(pb, false);
++              if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
++                      pe = pb;
++                      break;
++              }
++
++              /* Find a void item */
++              pe = iavf_find_first_item(pb + 1, true);
++
++              cpy_count = pe - pb;
++              rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
++
++              items += cpy_count;
++
++              if (pe->type == RTE_FLOW_ITEM_TYPE_END)
++                      break;
++
++              pb = pe + 1;
++      }
++      /* Copy the END item. */
++      rte_memcpy(items, pe, sizeof(struct rte_flow_item));
++}
++
++/* Check if the pattern matches a supported item type array */
++static bool
++iavf_match_pattern(enum rte_flow_item_type *item_array,
++                 const struct rte_flow_item *pattern)
++{
++      const struct rte_flow_item *item = pattern;
++
++      while ((*item_array == item->type) &&
++             (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
++              item_array++;
++              item++;
++      }
++
++      return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
++              item->type == RTE_FLOW_ITEM_TYPE_END);
++}
++
++struct iavf_pattern_match_item *
++iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
++              struct iavf_pattern_match_item *array,
++              uint32_t array_len,
++              struct rte_flow_error *error)
++{
++      uint16_t i = 0;
++      struct iavf_pattern_match_item *pattern_match_item;
++      /* need free by each filter */
++      struct rte_flow_item *items; /* used for pattern without VOID items */
++      uint32_t item_num = 0; /* non-void item number */
++
++      /* Get the non-void item number of pattern */
++      while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
++              if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
++                      item_num++;
++              i++;
++      }
++      item_num++;
++
++      items = rte_zmalloc("iavf_pattern",
++                          item_num * sizeof(struct rte_flow_item), 0);
++      if (!items) {
++              rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
++                                 NULL, "No memory for PMD internal items.");
++              return NULL;
++      }
++      pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
++                              sizeof(struct iavf_pattern_match_item), 0);
++      if (!pattern_match_item) {
++              rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
++                                 NULL, "Failed to allocate memory.");
++              return NULL;
++      }
++
++      iavf_pattern_skip_void_item(items, pattern);
++
++      for (i = 0; i < array_len; i++)
++              if (iavf_match_pattern(array[i].pattern_list,
++                                     items)) {
++                      pattern_match_item->input_set_mask =
++                              array[i].input_set_mask;
++                      pattern_match_item->pattern_list =
++                              array[i].pattern_list;
++                      pattern_match_item->meta = array[i].meta;
++                      rte_free(items);
++                      return pattern_match_item;
++              }
++      rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
++                         pattern, "Unsupported pattern");
++
++      rte_free(items);
++      rte_free(pattern_match_item);
++      return NULL;
++}
++
++static struct iavf_flow_engine *
++iavf_parse_engine_create(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              struct iavf_parser_list *parser_list,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct rte_flow_error *error)
++{
++      struct iavf_flow_engine *engine = NULL;
++      struct iavf_flow_parser_node *parser_node;
++      void *temp;
++      void *meta = NULL;
++
++      TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
++              if (parser_node->parser->parse_pattern_action(ad,
++                              parser_node->parser->array,
++                              parser_node->parser->array_len,
++                              pattern, actions, &meta, error) < 0)
++                      continue;
++
++              engine = parser_node->parser->engine;
++
++              RTE_ASSERT(engine->create != NULL);
++              if (!(engine->create(ad, flow, meta, error)))
++                      return engine;
++      }
++      return NULL;
++}
++
++static struct iavf_flow_engine *
++iavf_parse_engine_validate(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              struct iavf_parser_list *parser_list,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct rte_flow_error *error)
++{
++      struct iavf_flow_engine *engine = NULL;
++      struct iavf_flow_parser_node *parser_node;
++      void *temp;
++      void *meta = NULL;
++
++      TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
++              if (parser_node->parser->parse_pattern_action(ad,
++                              parser_node->parser->array,
++                              parser_node->parser->array_len,
++                              pattern, actions, &meta,  error) < 0)
++                      continue;
++
++              engine = parser_node->parser->engine;
++              if (engine->validation == NULL) {
++                      rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_HANDLE,
++                              NULL, "Validation not support");
++                      continue;
++              }
++
++              if (engine->validation(ad, flow, meta, error)) {
++                      rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_HANDLE,
++                              NULL, "Validation failed");
++                      break;
++              }
++      }
++      return engine;
++}
++
++
++static int
++iavf_flow_process_filter(struct rte_eth_dev *dev,
++              struct rte_flow *flow,
++              const struct rte_flow_attr *attr,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct iavf_flow_engine **engine,
++              parse_engine_t iavf_parse_engine,
++              struct rte_flow_error *error)
++{
++      int ret = IAVF_ERR_CONFIG;
++      struct iavf_adapter *ad =
++              IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++
++      if (!pattern) {
++              rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
++                                 NULL, "NULL pattern.");
++              return -rte_errno;
++      }
++
++      if (!actions) {
++              rte_flow_error_set(error, EINVAL,
++                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
++                                 NULL, "NULL action.");
++              return -rte_errno;
++      }
++
++      if (!attr) {
++              rte_flow_error_set(error, EINVAL,
++                                 RTE_FLOW_ERROR_TYPE_ATTR,
++                                 NULL, "NULL attribute.");
++              return -rte_errno;
++      }
++
++      ret = iavf_flow_valid_attr(attr, error);
++      if (ret)
++              return ret;
++
++      *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
++                                  actions, error);
++      if (*engine != NULL)
++              return 0;
++
++      *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
++                                  actions, error);
++
++      if (*engine == NULL)
++              return -EINVAL;
++
++      return 0;
++}
++
++static int
++iavf_flow_validate(struct rte_eth_dev *dev,
++              const struct rte_flow_attr *attr,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              struct rte_flow_error *error)
++{
++      struct iavf_flow_engine *engine;
++
++      return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
++                      &engine, iavf_parse_engine_validate, error);
++}
++
++static struct rte_flow *
++iavf_flow_create(struct rte_eth_dev *dev,
++               const struct rte_flow_attr *attr,
++               const struct rte_flow_item pattern[],
++               const struct rte_flow_action actions[],
++               struct rte_flow_error *error)
++{
++      struct iavf_adapter *ad =
++              IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      struct iavf_flow_engine *engine = NULL;
++      struct rte_flow *flow = NULL;
++      int ret;
++
++      flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
++      if (!flow) {
++              rte_flow_error_set(error, ENOMEM,
++                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
++                                 "Failed to allocate memory");
++              return flow;
++      }
++
++      ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
++                      &engine, iavf_parse_engine_create, error);
++      if (ret < 0) {
++              PMD_DRV_LOG(ERR, "Failed to create flow");
++              rte_free(flow);
++              flow = NULL;
++              goto free_flow;
++      }
++
++      flow->engine = engine;
++      TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
++      PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
++
++free_flow:
++      rte_spinlock_unlock(&vf->flow_ops_lock);
++      return flow;
++}
++
++static int
++iavf_flow_destroy(struct rte_eth_dev *dev,
++                struct rte_flow *flow,
++                struct rte_flow_error *error)
++{
++      struct iavf_adapter *ad =
++              IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      int ret = 0;
++
++      if (!flow || !flow->engine || !flow->engine->destroy) {
++              rte_flow_error_set(error, EINVAL,
++                                 RTE_FLOW_ERROR_TYPE_HANDLE,
++                                 NULL, "Invalid flow");
++              return -rte_errno;
++      }
++
++      rte_spinlock_lock(&vf->flow_ops_lock);
++
++      ret = flow->engine->destroy(ad, flow, error);
++
++      if (!ret) {
++              TAILQ_REMOVE(&vf->flow_list, flow, node);
++              rte_free(flow);
++      } else {
++              PMD_DRV_LOG(ERR, "Failed to destroy flow");
++      }
++
++      rte_spinlock_unlock(&vf->flow_ops_lock);
++
++      return ret;
++}
++
++static int
++iavf_flow_flush(struct rte_eth_dev *dev,
++              struct rte_flow_error *error)
++{
++      struct iavf_adapter *ad =
++              IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      struct rte_flow *p_flow;
++      void *temp;
++      int ret = 0;
++
++      TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
++              ret = iavf_flow_destroy(dev, p_flow, error);
++              if (ret) {
++                      PMD_DRV_LOG(ERR, "Failed to flush flows");
++                      return -EINVAL;
++              }
++      }
++
++      return ret;
++}
++
++static int
++iavf_flow_query(struct rte_eth_dev *dev,
++              struct rte_flow *flow,
++              const struct rte_flow_action *actions,
++              void *data,
++              struct rte_flow_error *error)
++{
++      int ret = -EINVAL;
++      struct iavf_adapter *ad =
++              IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
++      struct rte_flow_query_count *count = data;
++
++      if (!flow || !flow->engine || !flow->engine->query_count) {
++              rte_flow_error_set(error, EINVAL,
++                                 RTE_FLOW_ERROR_TYPE_HANDLE,
++                                 NULL, "Invalid flow");
++              return -rte_errno;
++      }
++
++      for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
++              switch (actions->type) {
++              case RTE_FLOW_ACTION_TYPE_VOID:
++                      break;
++              case RTE_FLOW_ACTION_TYPE_COUNT:
++                      ret = flow->engine->query_count(ad, flow, count, error);
++                      break;
++              default:
++                      return rte_flow_error_set(error, ENOTSUP,
++                                      RTE_FLOW_ERROR_TYPE_ACTION,
++                                      actions,
++                                      "action not supported");
++              }
++      }
++      return ret;
++}
+diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
+new file mode 100644
+index 000000000..f4906b43a
+--- /dev/null
++++ b/drivers/net/iavf/iavf_generic_flow.h
+@@ -0,0 +1,313 @@
++/* SPDX-License-Identifier: BSD-3-Clause
++ * Copyright(c) 2019 Intel Corporation
++ */
++
++#ifndef _IAVF_GENERIC_FLOW_H_
++#define _IAVF_GENERIC_FLOW_H_
++
++#include <rte_flow_driver.h>
++
++/* protocol */
++
++#define IAVF_PROT_MAC_INNER         (1ULL << 1)
++#define IAVF_PROT_MAC_OUTER         (1ULL << 2)
++#define IAVF_PROT_VLAN_INNER        (1ULL << 3)
++#define IAVF_PROT_VLAN_OUTER        (1ULL << 4)
++#define IAVF_PROT_IPV4_INNER        (1ULL << 5)
++#define IAVF_PROT_IPV4_OUTER        (1ULL << 6)
++#define IAVF_PROT_IPV6_INNER        (1ULL << 7)
++#define IAVF_PROT_IPV6_OUTER        (1ULL << 8)
++#define IAVF_PROT_TCP_INNER         (1ULL << 9)
++#define IAVF_PROT_TCP_OUTER         (1ULL << 10)
++#define IAVF_PROT_UDP_INNER         (1ULL << 11)
++#define IAVF_PROT_UDP_OUTER         (1ULL << 12)
++#define IAVF_PROT_SCTP_INNER        (1ULL << 13)
++#define IAVF_PROT_SCTP_OUTER        (1ULL << 14)
++#define IAVF_PROT_ICMP4_INNER       (1ULL << 15)
++#define IAVF_PROT_ICMP4_OUTER       (1ULL << 16)
++#define IAVF_PROT_ICMP6_INNER       (1ULL << 17)
++#define IAVF_PROT_ICMP6_OUTER       (1ULL << 18)
++#define IAVF_PROT_VXLAN             (1ULL << 19)
++#define IAVF_PROT_NVGRE             (1ULL << 20)
++#define IAVF_PROT_GTPU              (1ULL << 21)
++#define IAVF_PROT_ESP             (1ULL << 22)
++#define IAVF_PROT_AH              (1ULL << 23)
++#define IAVF_PROT_L2TPV3OIP       (1ULL << 24)
++#define IAVF_PROT_PFCP                    (1ULL << 25)
++
++
++/* field */
++
++#define IAVF_SMAC                   (1ULL << 63)
++#define IAVF_DMAC                   (1ULL << 62)
++#define IAVF_ETHERTYPE              (1ULL << 61)
++#define IAVF_IP_SRC                 (1ULL << 60)
++#define IAVF_IP_DST                 (1ULL << 59)
++#define IAVF_IP_PROTO               (1ULL << 58)
++#define IAVF_IP_TTL                 (1ULL << 57)
++#define IAVF_IP_TOS                 (1ULL << 56)
++#define IAVF_SPORT                  (1ULL << 55)
++#define IAVF_DPORT                  (1ULL << 54)
++#define IAVF_ICMP_TYPE              (1ULL << 53)
++#define IAVF_ICMP_CODE              (1ULL << 52)
++#define IAVF_VXLAN_VNI              (1ULL << 51)
++#define IAVF_NVGRE_TNI              (1ULL << 50)
++#define IAVF_GTPU_TEID              (1ULL << 49)
++#define IAVF_GTPU_QFI               (1ULL << 48)
++#define IAVF_ESP_SPI              (1ULL << 47)
++#define IAVF_AH_SPI               (1ULL << 46)
++#define IAVF_L2TPV3OIP_SESSION_ID   (1ULL << 45)
++#define IAVF_PFCP_S_FIELD         (1ULL << 44)
++#define IAVF_PFCP_SEID                    (1ULL << 43)
++
++/* input set */
++
++#define IAVF_INSET_NONE             0ULL
++
++/* non-tunnel */
++
++#define IAVF_INSET_SMAC         (IAVF_PROT_MAC_OUTER | IAVF_SMAC)
++#define IAVF_INSET_DMAC         (IAVF_PROT_MAC_OUTER | IAVF_DMAC)
++#define IAVF_INSET_VLAN_INNER   (IAVF_PROT_VLAN_INNER)
++#define IAVF_INSET_VLAN_OUTER   (IAVF_PROT_VLAN_OUTER)
++#define IAVF_INSET_ETHERTYPE    (IAVF_ETHERTYPE)
++
++#define IAVF_INSET_IPV4_SRC \
++      (IAVF_PROT_IPV4_OUTER | IAVF_IP_SRC)
++#define IAVF_INSET_IPV4_DST \
++      (IAVF_PROT_IPV4_OUTER | IAVF_IP_DST)
++#define IAVF_INSET_IPV4_TOS \
++      (IAVF_PROT_IPV4_OUTER | IAVF_IP_TOS)
++#define IAVF_INSET_IPV4_PROTO \
++      (IAVF_PROT_IPV4_OUTER | IAVF_IP_PROTO)
++#define IAVF_INSET_IPV4_TTL \
++      (IAVF_PROT_IPV4_OUTER | IAVF_IP_TTL)
++#define IAVF_INSET_IPV6_SRC \
++      (IAVF_PROT_IPV6_OUTER | IAVF_IP_SRC)
++#define IAVF_INSET_IPV6_DST \
++      (IAVF_PROT_IPV6_OUTER | IAVF_IP_DST)
++#define IAVF_INSET_IPV6_NEXT_HDR \
++      (IAVF_PROT_IPV6_OUTER | IAVF_IP_PROTO)
++#define IAVF_INSET_IPV6_HOP_LIMIT \
++      (IAVF_PROT_IPV6_OUTER | IAVF_IP_TTL)
++#define IAVF_INSET_IPV6_TC \
++      (IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
++
++#define IAVF_INSET_TCP_SRC_PORT \
++      (IAVF_PROT_TCP_OUTER | IAVF_SPORT)
++#define IAVF_INSET_TCP_DST_PORT \
++      (IAVF_PROT_TCP_OUTER | IAVF_DPORT)
++#define IAVF_INSET_UDP_SRC_PORT \
++      (IAVF_PROT_UDP_OUTER | IAVF_SPORT)
++#define IAVF_INSET_UDP_DST_PORT \
++      (IAVF_PROT_UDP_OUTER | IAVF_DPORT)
++#define IAVF_INSET_SCTP_SRC_PORT \
++      (IAVF_PROT_SCTP_OUTER | IAVF_SPORT)
++#define IAVF_INSET_SCTP_DST_PORT \
++      (IAVF_PROT_SCTP_OUTER | IAVF_DPORT)
++#define IAVF_INSET_ICMP4_SRC_PORT \
++      (IAVF_PROT_ICMP4_OUTER | IAVF_SPORT)
++#define IAVF_INSET_ICMP4_DST_PORT \
++      (IAVF_PROT_ICMP4_OUTER | IAVF_DPORT)
++#define IAVF_INSET_ICMP6_SRC_PORT \
++      (IAVF_PROT_ICMP6_OUTER | IAVF_SPORT)
++#define IAVF_INSET_ICMP6_DST_PORT \
++      (IAVF_PROT_ICMP6_OUTER | IAVF_DPORT)
++#define IAVF_INSET_ICMP4_TYPE \
++      (IAVF_PROT_ICMP4_OUTER | IAVF_ICMP_TYPE)
++#define IAVF_INSET_ICMP4_CODE \
++      (IAVF_PROT_ICMP4_OUTER | IAVF_ICMP_CODE)
++#define IAVF_INSET_ICMP6_TYPE \
++      (IAVF_PROT_ICMP6_OUTER | IAVF_ICMP_TYPE)
++#define IAVF_INSET_ICMP6_CODE \
++      (IAVF_PROT_ICMP6_OUTER | IAVF_ICMP_CODE)
++#define IAVF_INSET_GTPU_TEID \
++      (IAVF_PROT_GTPU | IAVF_GTPU_TEID)
++#define IAVF_INSET_GTPU_QFI \
++      (IAVF_PROT_GTPU | IAVF_GTPU_QFI)
++#define IAVF_INSET_ESP_SPI \
++      (IAVF_PROT_ESP | IAVF_ESP_SPI)
++#define IAVF_INSET_AH_SPI \
++      (IAVF_PROT_AH | IAVF_AH_SPI)
++#define IAVF_INSET_L2TPV3OIP_SESSION_ID \
++      (IAVF_PROT_L2TPV3OIP | IAVF_L2TPV3OIP_SESSION_ID)
++#define IAVF_INSET_PFCP_S_FIELD \
++      (IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD)
++#define IAVF_INSET_PFCP_SEID \
++      (IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD | IAVF_PFCP_SEID)
++
++
++/* empty pattern */
++extern enum rte_flow_item_type iavf_pattern_empty[];
++
++/* L2 */
++extern enum rte_flow_item_type iavf_pattern_ethertype[];
++extern enum rte_flow_item_type iavf_pattern_ethertype_vlan[];
++extern enum rte_flow_item_type iavf_pattern_ethertype_qinq[];
++
++/* ARP */
++extern enum rte_flow_item_type iavf_pattern_eth_arp[];
++
++/* non-tunnel IPv4 */
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[];
++
++/* non-tunnel IPv6 */
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[];
++extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[];
++extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[];
++
++/* GTPU */
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[];
++
++/* ESP */
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[];
++
++/* AH */
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[];
++
++/* L2TPV3 */
++extern enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[];
++extern enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[];
++
++extern const struct rte_flow_ops iavf_flow_ops;
++
++/* pattern structure */
++struct iavf_pattern_match_item {
++      enum rte_flow_item_type *pattern_list;
++      /* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */
++      uint64_t input_set_mask;
++      void *meta;
++};
++
++typedef int (*engine_init_t)(struct iavf_adapter *ad);
++typedef void (*engine_uninit_t)(struct iavf_adapter *ad);
++typedef int (*engine_validation_t)(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              void *meta,
++              struct rte_flow_error *error);
++typedef int (*engine_create_t)(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              void *meta,
++              struct rte_flow_error *error);
++typedef int (*engine_destroy_t)(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              struct rte_flow_error *error);
++typedef int (*engine_query_t)(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              struct rte_flow_query_count *count,
++              struct rte_flow_error *error);
++typedef void (*engine_free_t) (struct rte_flow *flow);
++typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
++              struct iavf_pattern_match_item *array,
++              uint32_t array_len,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              void **meta,
++              struct rte_flow_error *error);
++
++/* engine types. */
++enum iavf_flow_engine_type {
++      IAVF_FLOW_ENGINE_NONE = 0,
++      IAVF_FLOW_ENGINE_FDIR,
++      IAVF_FLOW_ENGINE_HASH,
++      IAVF_FLOW_ENGINE_MAX,
++};
++
++/**
++ * classification stages.
++ * for non-pipeline mode, we have two classification stages: Distributor/RSS
++ * for pipeline-mode we have three classification stages:
++ * Permission/Distributor/RSS
++ */
++enum iavf_flow_classification_stage {
++      IAVF_FLOW_STAGE_NONE = 0,
++      IAVF_FLOW_STAGE_RSS,
++      IAVF_FLOW_STAGE_DISTRIBUTOR,
++      IAVF_FLOW_STAGE_MAX,
++};
++
++/* Struct to store engine created. */
++struct iavf_flow_engine {
++      TAILQ_ENTRY(iavf_flow_engine) node;
++      engine_init_t init;
++      engine_uninit_t uninit;
++      engine_validation_t validation;
++      engine_create_t create;
++      engine_destroy_t destroy;
++      engine_query_t query_count;
++      engine_free_t free;
++      enum iavf_flow_engine_type type;
++};
++
++TAILQ_HEAD(iavf_engine_list, iavf_flow_engine);
++
++/* Struct to store flow created. */
++struct rte_flow {
++      TAILQ_ENTRY(rte_flow) node;
++      struct iavf_flow_engine *engine;
++      void *rule;
++};
++
++struct iavf_flow_parser {
++      struct iavf_flow_engine *engine;
++      struct iavf_pattern_match_item *array;
++      uint32_t array_len;
++      parse_pattern_action_t parse_pattern_action;
++      enum iavf_flow_classification_stage stage;
++};
++
++/* Struct to store parser created. */
++struct iavf_flow_parser_node {
++      TAILQ_ENTRY(iavf_flow_parser_node) node;
++      struct iavf_flow_parser *parser;
++};
++
++void iavf_register_flow_engine(struct iavf_flow_engine *engine);
++int iavf_flow_init(struct iavf_adapter *ad);
++void iavf_flow_uninit(struct iavf_adapter *ad);
++int iavf_register_parser(struct iavf_flow_parser *parser,
++                       struct iavf_adapter *ad);
++void iavf_unregister_parser(struct iavf_flow_parser *parser,
++                          struct iavf_adapter *ad);
++struct iavf_pattern_match_item *
++iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
++              struct iavf_pattern_match_item *array,
++              uint32_t array_len,
++              struct rte_flow_error *error);
++#endif
+diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
+index dbd0b01db..32eabca4b 100644
+--- a/drivers/net/iavf/meson.build
++++ b/drivers/net/iavf/meson.build
+@@ -12,6 +12,7 @@ sources = files(
+       'iavf_ethdev.c',
+       'iavf_rxtx.c',
+       'iavf_vchnl.c',
++      'iavf_generic_flow.c',
+ )
+ if arch_subdir == 'x86'
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0016-common-iavf-add-flow-director-support-in-virtual-cha.patch b/build/external/patches/dpdk_20.02/0016-common-iavf-add-flow-director-support-in-virtual-cha.patch
new file mode 100644 (file)
index 0000000..adf3613
--- /dev/null
@@ -0,0 +1,238 @@
+From 5e4e6320a3c306b277d71a1811cf616fc2a6de93 Mon Sep 17 00:00:00 2001
+From: Chenmin Sun <chenmin.sun@intel.com>
+Date: Fri, 17 Apr 2020 05:53:35 +0800
+Subject: [DPDK 16/17] common/iavf: add flow director support in virtual
+ channel
+
+Adds new ops and structures to support VF to add/delete/validate/
+query flow director.
+
+ADD and VALIDATE FDIR share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+VF sends this request to PF by filling out the related field in
+virtchnl_fdir_add. If the rule is created successfully, PF
+will return flow id and program status to VF. If the rule is
+validated successfully, the PF will only return program status
+to VF.
+
+DELETE FDIR uses ops: VIRTCHNL_OP_DEL_FDIR_FILTER.
+VF sends this request to PF by filling out the related field in
+virtchnl_fdir_del. If the rule is deleted successfully, PF
+will return program status to VF.
+
+Query FDIR uses ops: VIRTCHNL_OP_QUERY_FDIR_FILTER.
+VF sends this request to PF by filling out the related field in
+virtchnl_fdir_query. If the request is successfully done by PF,
+PF will return program status and query info to VF.
+
+Signed-off-by: Simei Su <simei.su@intel.com>
+Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
+Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
+Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
+---
+ drivers/common/iavf/virtchnl.h | 162 +++++++++++++++++++++++++++++++++
+ 1 file changed, 162 insertions(+)
+
+diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
+index 667762643..4dbf9c1c2 100644
+--- a/drivers/common/iavf/virtchnl.h
++++ b/drivers/common/iavf/virtchnl.h
+@@ -134,6 +134,9 @@ enum virtchnl_ops {
+       VIRTCHNL_OP_DCF_GET_VSI_MAP = 42,
+       VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
+       VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
++      VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
++      VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
++      VIRTCHNL_OP_QUERY_FDIR_FILTER = 49,
+ };
+ /* These macros are used to generate compilation errors if a structure/union
+@@ -249,6 +252,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+ #define VIRTCHNL_VF_OFFLOAD_ADQ_V2            0X01000000
+ #define VIRTCHNL_VF_OFFLOAD_USO                       0X02000000
+ #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC      0X04000000
++#define VIRTCHNL_VF_OFFLOAD_FDIR_PF           0X10000000
+       /* 0X80000000 is reserved */
+ /* Define below the capability flags that are not offloads */
+@@ -629,6 +633,11 @@ enum virtchnl_action {
+       /* action types */
+       VIRTCHNL_ACTION_DROP = 0,
+       VIRTCHNL_ACTION_TC_REDIRECT,
++      VIRTCHNL_ACTION_PASSTHRU,
++      VIRTCHNL_ACTION_QUEUE,
++      VIRTCHNL_ACTION_Q_REGION,
++      VIRTCHNL_ACTION_MARK,
++      VIRTCHNL_ACTION_COUNT,
+ };
+ enum virtchnl_flow_type {
+@@ -925,6 +934,150 @@ struct virtchnl_proto_hdrs {
+ VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
++/* action configuration for FDIR */
++struct virtchnl_filter_action {
++      enum virtchnl_action type;
++      union {
++              /* used for queue and qgroup action */
++              struct {
++                      u16 index;
++                      u8 region;
++              } queue;
++              /* used for count action */
++              struct {
++                      /* share counter ID with other flow rules */
++                      u8 shared;
++                      u32 id; /* counter ID */
++              } count;
++              /* used for mark action */
++              u32 mark_id;
++              u8 reserve[32];
++      } act_conf;
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
++
++#define VIRTCHNL_MAX_NUM_ACTIONS  8
++
++struct virtchnl_filter_action_set {
++      /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
++      int count;
++      struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
++
++/* pattern and action for FDIR rule */
++struct virtchnl_fdir_rule {
++      struct virtchnl_proto_hdrs proto_hdrs;
++      struct virtchnl_filter_action_set action_set;
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
++
++/* query information to retrieve fdir rule counters.
++ * PF will fill out this structure to reset counter.
++ */
++struct virtchnl_fdir_query_info {
++      u32 match_packets_valid:1;
++      u32 match_bytes_valid:1;
++      u32 reserved:30;  /* Reserved, must be zero. */
++      u32 pad;
++      u64 matched_packets; /* Number of packets for this rule. */
++      u64 matched_bytes;   /* Number of bytes through this rule. */
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info);
++
++/* Status returned to VF after VF requests FDIR commands
++ * VIRTCHNL_FDIR_SUCCESS
++ * VF FDIR related request is successfully done by PF
++ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
++ *
++ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
++ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
++ *
++ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
++ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
++ *
++ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
++ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
++ *
++ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
++ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
++ *
++ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
++ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
++ * or HW doesn't support.
++ *
++ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
++ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
++ * for programming.
++ *
++ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
++ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
++ * for example, VF query counter of a rule who has no counter action.
++ */
++enum virtchnl_fdir_prgm_status {
++      VIRTCHNL_FDIR_SUCCESS = 0,
++      VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
++      VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
++      VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
++      VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
++      VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
++      VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
++      VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
++};
++
++/* VIRTCHNL_OP_ADD_FDIR_FILTER
++ * VF sends this request to PF by filling out vsi_id,
++ * validate_only and rule_cfg. PF will return flow_id
++ * if the request is successfully done and return add_status to VF.
++ */
++struct virtchnl_fdir_add {
++      u16 vsi_id;  /* INPUT */
++      /*
++       * 1 for validating a fdir rule, 0 for creating a fdir rule.
++       * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
++       */
++      u16 validate_only; /* INPUT */
++      u32 flow_id;       /* OUTPUT */
++      struct virtchnl_fdir_rule rule_cfg; /* INPUT */
++      enum virtchnl_fdir_prgm_status status; /* OUTPUT */
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
++
++/* VIRTCHNL_OP_DEL_FDIR_FILTER
++ * VF sends this request to PF by filling out vsi_id
++ * and flow_id. PF will return del_status to VF.
++ */
++struct virtchnl_fdir_del {
++      u16 vsi_id;  /* INPUT */
++      u16 pad;
++      u32 flow_id; /* INPUT */
++      enum virtchnl_fdir_prgm_status status; /* OUTPUT */
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
++
++/* VIRTCHNL_OP_QUERY_FDIR_FILTER
++ * VF sends this request to PF by filling out vsi_id,
++ * flow_id and reset_counter. PF will return query_info
++ * and query_status to VF.
++ */
++struct virtchnl_fdir_query {
++      u16 vsi_id;   /* INPUT */
++      u16 pad1[3];
++      u32 flow_id;  /* INPUT */
++      u32 reset_counter:1; /* INPUT */
++      struct virtchnl_fdir_query_info query_info; /* OUTPUT */
++      enum virtchnl_fdir_prgm_status status;  /* OUTPUT */
++      u32 pad2;
++};
++
++VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query);
++
+ /**
+  * virtchnl_vc_validate_vf_msg
+  * @ver: Virtchnl version info
+@@ -1110,6 +1263,15 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+                * so the valid length keeps the default value 0.
+                */
+               break;
++      case VIRTCHNL_OP_ADD_FDIR_FILTER:
++              valid_len = sizeof(struct virtchnl_fdir_add);
++              break;
++      case VIRTCHNL_OP_DEL_FDIR_FILTER:
++              valid_len = sizeof(struct virtchnl_fdir_del);
++              break;
++      case VIRTCHNL_OP_QUERY_FDIR_FILTER:
++              valid_len = sizeof(struct virtchnl_fdir_query);
++              break;
+       /* These are always errors coming from the VF. */
+       case VIRTCHNL_OP_EVENT:
+       case VIRTCHNL_OP_UNKNOWN:
+-- 
+2.17.1
+
diff --git a/build/external/patches/dpdk_20.02/0017-net-iavf-add-support-for-FDIR-basic-rule.patch b/build/external/patches/dpdk_20.02/0017-net-iavf-add-support-for-FDIR-basic-rule.patch
new file mode 100644 (file)
index 0000000..072922d
--- /dev/null
@@ -0,0 +1,1211 @@
+From 813dc1da330eb21cf5ed399dfcff8ee7bde6aafd Mon Sep 17 00:00:00 2001
+From: Chenmin Sun <chenmin.sun@intel.com>
+Date: Fri, 17 Apr 2020 05:46:45 +0800
+Subject: [DPDK 17/17] net/iavf: add support for FDIR basic rule
+
+This patch adds FDIR create/destroy/validate function in AVF.
+Common pattern and queue/qgroup/passthru/drop actions are supported.
+
+Signed-off-by: Simei Su <simei.su@intel.com>
+Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
+---
+ drivers/net/iavf/Makefile     |   1 +
+ drivers/net/iavf/iavf.h       |  18 +
+ drivers/net/iavf/iavf_fdir.c  | 949 ++++++++++++++++++++++++++++++++++
+ drivers/net/iavf/iavf_vchnl.c | 154 +++++-
+ drivers/net/iavf/meson.build  |   1 +
+ 5 files changed, 1122 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/iavf/iavf_fdir.c
+
+diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
+index 1bf0f26b5..193bc55a7 100644
+--- a/drivers/net/iavf/Makefile
++++ b/drivers/net/iavf/Makefile
+@@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
++SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
+ ifeq ($(CONFIG_RTE_ARCH_X86), y)
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
+ endif
+diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
+index 78bdaff20..5fb7881c9 100644
+--- a/drivers/net/iavf/iavf.h
++++ b/drivers/net/iavf/iavf.h
+@@ -92,6 +92,18 @@ TAILQ_HEAD(iavf_flow_list, rte_flow);
+ struct iavf_flow_parser_node;
+ TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
++struct iavf_fdir_conf {
++      struct virtchnl_fdir_add add_fltr;
++      struct virtchnl_fdir_del del_fltr;
++      uint64_t input_set;
++      uint32_t flow_id;
++      uint32_t mark_flag;
++};
++
++struct iavf_fdir_info {
++      struct iavf_fdir_conf conf;
++};
++
+ /* TODO: is that correct to assume the max number to be 16 ?*/
+ #define IAVF_MAX_MSIX_VECTORS   16
+@@ -131,6 +143,8 @@ struct iavf_info {
+       rte_spinlock_t flow_ops_lock;
+       struct iavf_parser_list rss_parser_list;
+       struct iavf_parser_list dist_parser_list;
++
++      struct iavf_fdir_info fdir; /* flow director info */
+ };
+ #define IAVF_MAX_PKT_TYPE 1024
+@@ -252,4 +266,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
+ int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
+                        struct rte_ether_addr *addr, bool add);
+ int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
++int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
++int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
++int iavf_fdir_check(struct iavf_adapter *adapter,
++              struct iavf_fdir_conf *filter);
+ #endif /* _IAVF_ETHDEV_H_ */
+diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
+new file mode 100644
+index 000000000..fc1a4f817
+--- /dev/null
++++ b/drivers/net/iavf/iavf_fdir.c
+@@ -0,0 +1,949 @@
++/* SPDX-License-Identifier: BSD-3-Clause
++ * Copyright(c) 2019 Intel Corporation
++ */
++
++#include <sys/queue.h>
++#include <stdio.h>
++#include <errno.h>
++#include <stdint.h>
++#include <string.h>
++#include <unistd.h>
++#include <stdarg.h>
++
++#include <rte_ether.h>
++#include <rte_ethdev_driver.h>
++#include <rte_malloc.h>
++#include <rte_tailq.h>
++
++#include "iavf.h"
++#include "iavf_generic_flow.h"
++#include "virtchnl.h"
++#include "iavf_rxtx.h"
++
++#define IAVF_FDIR_MAX_QREGION_SIZE 128
++
++#define IAVF_FDIR_IPV6_TC_OFFSET 20
++#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
++
++#define IAVF_FDIR_INSET_ETH (\
++      IAVF_INSET_ETHERTYPE)
++
++#define IAVF_FDIR_INSET_ETH_IPV4 (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
++      IAVF_INSET_IPV4_TTL)
++
++#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
++      IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
++
++#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
++      IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
++
++#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
++      IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
++
++#define IAVF_FDIR_INSET_ETH_IPV6 (\
++      IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
++      IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
++      IAVF_INSET_IPV6_HOP_LIMIT)
++
++#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
++      IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
++      IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
++      IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
++
++#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
++      IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
++      IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
++      IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
++
++#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
++      IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
++      IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
++      IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
++
++#define IAVF_FDIR_INSET_GTPU (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_GTPU_TEID)
++
++#define IAVF_FDIR_INSET_GTPU_EH (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
++
++#define IAVF_FDIR_INSET_L2TPV3OIP (\
++      IAVF_L2TPV3OIP_SESSION_ID)
++
++#define IAVF_FDIR_INSET_ESP (\
++      IAVF_INSET_ESP_SPI)
++
++#define IAVF_FDIR_INSET_AH (\
++      IAVF_INSET_AH_SPI)
++
++#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
++      IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
++      IAVF_INSET_ESP_SPI)
++
++#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
++      IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
++      IAVF_INSET_ESP_SPI)
++
++#define IAVF_FDIR_INSET_PFCP (\
++      IAVF_INSET_PFCP_S_FIELD)
++
++static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
++      {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_gtpu,            IAVF_FDIR_INSET_GTPU,                   IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_gtpu_eh,         IAVF_FDIR_INSET_GTPU_EH,                IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv4_udp_esp,         IAVF_FDIR_INSET_IPV4_NATT_ESP,          IAVF_INSET_NONE},
++      {iavf_pattern_eth_ipv6_udp_esp,         IAVF_FDIR_INSET_IPV6_NATT_ESP,          IAVF_INSET_NONE},
++};
++
++static struct iavf_flow_parser iavf_fdir_parser;
++
++static int
++iavf_fdir_init(struct iavf_adapter *ad)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      struct iavf_flow_parser *parser;
++
++      if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
++              parser = &iavf_fdir_parser;
++      else
++              return -ENOTSUP;
++
++      return iavf_register_parser(parser, ad);
++}
++
++static void
++iavf_fdir_uninit(struct iavf_adapter *ad)
++{
++      struct iavf_flow_parser *parser;
++
++      parser = &iavf_fdir_parser;
++
++      iavf_unregister_parser(parser, ad);
++}
++
++static int
++iavf_fdir_create(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              void *meta,
++              struct rte_flow_error *error)
++{
++      struct iavf_fdir_conf *filter = meta;
++      struct iavf_fdir_conf *rule;
++      int ret;
++
++      rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
++      if (!rule) {
++              rte_flow_error_set(error, ENOMEM,
++                              RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
++                              "Failed to allocate memory");
++              return -rte_errno;
++      }
++
++      ret = iavf_fdir_add(ad, filter);
++      if (ret) {
++              rte_flow_error_set(error, -ret,
++                              RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
++                              "Add filter rule failed.");
++              goto free_entry;
++      }
++
++      if (filter->mark_flag == 1)
++              iavf_fdir_rx_proc_enable(ad, 1);
++
++      rte_memcpy(rule, filter, sizeof(*rule));
++      flow->rule = rule;
++
++      return 0;
++
++free_entry:
++      rte_free(rule);
++      return -rte_errno;
++}
++
++static int
++iavf_fdir_destroy(struct iavf_adapter *ad,
++              struct rte_flow *flow,
++              struct rte_flow_error *error)
++{
++      struct iavf_fdir_conf *filter;
++      int ret;
++
++      filter = (struct iavf_fdir_conf *)flow->rule;
++
++      ret = iavf_fdir_del(ad, filter);
++      if (ret) {
++              rte_flow_error_set(error, -ret,
++                              RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
++                              "Del filter rule failed.");
++              return -rte_errno;
++      }
++
++      if (filter->mark_flag == 1)
++              iavf_fdir_rx_proc_enable(ad, 0);
++
++      flow->rule = NULL;
++      rte_free(filter);
++
++      return 0;
++}
++
++static int
++iavf_fdir_validation(struct iavf_adapter *ad,
++              __rte_unused struct rte_flow *flow,
++              void *meta,
++              struct rte_flow_error *error)
++{
++      struct iavf_fdir_conf *filter = meta;
++      int ret;
++
++      ret = iavf_fdir_check(ad, filter);
++      if (ret) {
++              rte_flow_error_set(error, -ret,
++                              RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
++                              "Validate filter rule failed.");
++              return -rte_errno;
++      }
++
++      return 0;
++};
++
++static struct iavf_flow_engine iavf_fdir_engine = {
++      .init = iavf_fdir_init,
++      .uninit = iavf_fdir_uninit,
++      .create = iavf_fdir_create,
++      .destroy = iavf_fdir_destroy,
++      .validation = iavf_fdir_validation,
++      .type = IAVF_FLOW_ENGINE_FDIR,
++};
++
++static int
++iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
++                      struct rte_flow_error *error,
++                      const struct rte_flow_action *act,
++                      struct virtchnl_filter_action *filter_action)
++{
++      const struct rte_flow_action_rss *rss = act->conf;
++      uint32_t i;
++
++      if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ACTION, act,
++                              "Invalid action.");
++              return -rte_errno;
++      }
++
++      if (rss->queue_num <= 1) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ACTION, act,
++                              "Queue region size can't be 0 or 1.");
++              return -rte_errno;
++      }
++
++      /* check if queue index for queue region is continuous */
++      for (i = 0; i < rss->queue_num - 1; i++) {
++              if (rss->queue[i + 1] != rss->queue[i] + 1) {
++                      rte_flow_error_set(error, EINVAL,
++                                      RTE_FLOW_ERROR_TYPE_ACTION, act,
++                                      "Discontinuous queue region");
++                      return -rte_errno;
++              }
++      }
++
++      if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ACTION, act,
++                              "Invalid queue region indexes.");
++              return -rte_errno;
++      }
++
++      if (!(rte_is_power_of_2(rss->queue_num) &&
++              (rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ACTION, act,
++                              "The region size should be any of the following values:"
++                              "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
++                              "of queues do not exceed the VSI allocation.");
++              return -rte_errno;
++      }
++
++      filter_action->act_conf.queue.index = rss->queue[0];
++      filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
++
++      return 0;
++}
++
++static int
++iavf_fdir_parse_action(struct iavf_adapter *ad,
++                      const struct rte_flow_action actions[],
++                      struct rte_flow_error *error,
++                      struct iavf_fdir_conf *filter)
++{
++      const struct rte_flow_action_queue *act_q;
++      const struct rte_flow_action_mark *mark_spec = NULL;
++      uint32_t dest_num = 0;
++      uint32_t mark_num = 0;
++      int ret;
++
++      int number = 0;
++      struct virtchnl_filter_action *filter_action;
++
++      for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
++              switch (actions->type) {
++              case RTE_FLOW_ACTION_TYPE_VOID:
++                      break;
++
++              case RTE_FLOW_ACTION_TYPE_PASSTHRU:
++                      dest_num++;
++
++                      filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
++
++                      filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
++
++                      filter->add_fltr.rule_cfg.action_set.count = ++number;
++                      break;
++
++              case RTE_FLOW_ACTION_TYPE_DROP:
++                      dest_num++;
++
++                      filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
++
++                      filter_action->type = VIRTCHNL_ACTION_DROP;
++
++                      filter->add_fltr.rule_cfg.action_set.count = ++number;
++                      break;
++
++              case RTE_FLOW_ACTION_TYPE_QUEUE:
++                      dest_num++;
++
++                      act_q = actions->conf;
++                      filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
++
++                      filter_action->type = VIRTCHNL_ACTION_QUEUE;
++                      filter_action->act_conf.queue.index = act_q->index;
++
++                      if (filter_action->act_conf.queue.index >=
++                              ad->eth_dev->data->nb_rx_queues) {
++                              rte_flow_error_set(error, EINVAL,
++                                      RTE_FLOW_ERROR_TYPE_ACTION,
++                                      actions, "Invalid queue for FDIR.");
++                              return -rte_errno;
++                      }
++
++                      filter->add_fltr.rule_cfg.action_set.count = ++number;
++                      break;
++
++              case RTE_FLOW_ACTION_TYPE_RSS:
++                      dest_num++;
++
++                      filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
++
++                      filter_action->type = VIRTCHNL_ACTION_Q_REGION;
++
++                      ret = iavf_fdir_parse_action_qregion(ad,
++                                              error, actions, filter_action);
++                      if (ret)
++                              return ret;
++
++                      filter->add_fltr.rule_cfg.action_set.count = ++number;
++                      break;
++
++              case RTE_FLOW_ACTION_TYPE_MARK:
++                      mark_num++;
++
++                      filter->mark_flag = 1;
++                      mark_spec = actions->conf;
++                      filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
++
++                      filter_action->type = VIRTCHNL_ACTION_MARK;
++                      filter_action->act_conf.mark_id = mark_spec->id;
++
++                      filter->add_fltr.rule_cfg.action_set.count = ++number;
++                      break;
++
++              default:
++                      rte_flow_error_set(error, EINVAL,
++                                      RTE_FLOW_ERROR_TYPE_ACTION, actions,
++                                      "Invalid action.");
++                      return -rte_errno;
++              }
++      }
++
++      if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
++              rte_flow_error_set(error, EINVAL,
++                      RTE_FLOW_ERROR_TYPE_ACTION, actions,
++                      "Action numbers exceed the maximum value");
++              return -rte_errno;
++      }
++
++      if (dest_num >= 2) {
++              rte_flow_error_set(error, EINVAL,
++                      RTE_FLOW_ERROR_TYPE_ACTION, actions,
++                      "Unsupported action combination");
++              return -rte_errno;
++      }
++
++      if (mark_num >= 2) {
++              rte_flow_error_set(error, EINVAL,
++                      RTE_FLOW_ERROR_TYPE_ACTION, actions,
++                      "Too many mark actions");
++              return -rte_errno;
++      }
++
++      if (dest_num + mark_num == 0) {
++              rte_flow_error_set(error, EINVAL,
++                      RTE_FLOW_ERROR_TYPE_ACTION, actions,
++                      "Emtpy action");
++              return -rte_errno;
++      }
++
++      /* Mark only is equal to mark + passthru. */
++      if (dest_num == 0) {
++              filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
++              filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
++              filter->add_fltr.rule_cfg.action_set.count = ++number;
++      }
++
++      return 0;
++}
++
++static int
++iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
++                      const struct rte_flow_item pattern[],
++                      struct rte_flow_error *error,
++                      struct iavf_fdir_conf *filter)
++{
++      const struct rte_flow_item *item = pattern;
++      enum rte_flow_item_type item_type;
++      enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
++      const struct rte_flow_item_eth *eth_spec, *eth_mask;
++      const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
++      const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
++      const struct rte_flow_item_udp *udp_spec, *udp_mask;
++      const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
++      const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
++      const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
++      const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
++      const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
++      const struct rte_flow_item_esp *esp_spec, *esp_mask;
++      const struct rte_flow_item_ah *ah_spec, *ah_mask;
++      uint64_t input_set = IAVF_INSET_NONE;
++
++      enum rte_flow_item_type next_type;
++      uint16_t ether_type;
++
++      int layer = 0;
++      struct virtchnl_proto_hdr *hdr;
++
++      uint8_t  ipv6_addr_mask[16] = {
++              0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
++              0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
++      };
++
++      for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
++              if (item->last) {
++                      rte_flow_error_set(error, EINVAL,
++                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
++                                      "Not support range");
++              }
++
++              item_type = item->type;
++
++              switch (item_type) {
++              case RTE_FLOW_ITEM_TYPE_ETH:
++                      eth_spec = item->spec;
++                      eth_mask = item->mask;
++                      next_type = (item + 1)->type;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
++
++                      if (next_type == RTE_FLOW_ITEM_TYPE_END &&
++                              (!eth_spec || !eth_mask)) {
++                              rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM,
++                                              item, "NULL eth spec/mask.");
++                              return -rte_errno;
++                      }
++
++                      if (eth_spec && eth_mask) {
++                              if (!rte_is_zero_ether_addr(&eth_mask->src) ||
++                                  !rte_is_zero_ether_addr(&eth_mask->dst)) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM, item,
++                                              "Invalid MAC_addr mask.");
++                                      return -rte_errno;
++                              }
++                      }
++
++                      if (eth_spec && eth_mask && eth_mask->type) {
++                              if (eth_mask->type != RTE_BE16(0xffff)) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM,
++                                              item, "Invalid type mask.");
++                                      return -rte_errno;
++                              }
++
++                              ether_type = rte_be_to_cpu_16(eth_spec->type);
++                              if (ether_type == RTE_ETHER_TYPE_IPV4 ||
++                                      ether_type == RTE_ETHER_TYPE_IPV6) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM,
++                                              item,
++                                              "Unsupported ether_type.");
++                                      return -rte_errno;
++                              }
++
++                              input_set |= IAVF_INSET_ETHERTYPE;
++                              VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
++
++                              rte_memcpy(hdr->buffer,
++                                      eth_spec, sizeof(*eth_spec));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_IPV4:
++                      l3 = RTE_FLOW_ITEM_TYPE_IPV4;
++                      ipv4_spec = item->spec;
++                      ipv4_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
++
++                      if (ipv4_spec && ipv4_mask) {
++                              if (ipv4_mask->hdr.version_ihl ||
++                                      ipv4_mask->hdr.total_length ||
++                                      ipv4_mask->hdr.packet_id ||
++                                      ipv4_mask->hdr.fragment_offset ||
++                                      ipv4_mask->hdr.hdr_checksum) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM,
++                                              item, "Invalid IPv4 mask.");
++                                      return -rte_errno;
++                              }
++
++                              if (ipv4_mask->hdr.type_of_service ==
++                                                              UINT8_MAX) {
++                                      input_set |= IAVF_INSET_IPV4_TOS;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
++                              }
++                              if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
++                                      input_set |= IAVF_INSET_IPV4_PROTO;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
++                              }
++                              if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
++                                      input_set |= IAVF_INSET_IPV4_TTL;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
++                              }
++                              if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
++                                      input_set |= IAVF_INSET_IPV4_SRC;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
++                              }
++                              if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
++                                      input_set |= IAVF_INSET_IPV4_DST;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
++                              }
++
++                              rte_memcpy(hdr->buffer,
++                                      &ipv4_spec->hdr,
++                                      sizeof(ipv4_spec->hdr));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_IPV6:
++                      l3 = RTE_FLOW_ITEM_TYPE_IPV6;
++                      ipv6_spec = item->spec;
++                      ipv6_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
++
++                      if (ipv6_spec && ipv6_mask) {
++                              if (ipv6_mask->hdr.payload_len) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM,
++                                              item, "Invalid IPv6 mask");
++                                      return -rte_errno;
++                              }
++
++                              if ((ipv6_mask->hdr.vtc_flow &
++                                      rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
++                                      == rte_cpu_to_be_32(
++                                                      IAVF_IPV6_TC_MASK)) {
++                                      input_set |= IAVF_INSET_IPV6_TC;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
++                              }
++                              if (ipv6_mask->hdr.proto == UINT8_MAX) {
++                                      input_set |= IAVF_INSET_IPV6_NEXT_HDR;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
++                              }
++                              if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
++                                      input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
++                              }
++                              if (!memcmp(ipv6_mask->hdr.src_addr,
++                                      ipv6_addr_mask,
++                                      RTE_DIM(ipv6_mask->hdr.src_addr))) {
++                                      input_set |= IAVF_INSET_IPV6_SRC;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
++                              }
++                              if (!memcmp(ipv6_mask->hdr.dst_addr,
++                                      ipv6_addr_mask,
++                                      RTE_DIM(ipv6_mask->hdr.dst_addr))) {
++                                      input_set |= IAVF_INSET_IPV6_DST;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
++                              }
++
++                              rte_memcpy(hdr->buffer,
++                                      &ipv6_spec->hdr,
++                                      sizeof(ipv6_spec->hdr));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_UDP:
++                      udp_spec = item->spec;
++                      udp_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
++
++                      if (udp_spec && udp_mask) {
++                              if (udp_mask->hdr.dgram_len ||
++                                      udp_mask->hdr.dgram_cksum) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM, item,
++                                              "Invalid UDP mask");
++                                      return -rte_errno;
++                              }
++
++                              if (udp_mask->hdr.src_port == UINT16_MAX) {
++                                      input_set |= IAVF_INSET_UDP_SRC_PORT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
++                              }
++                              if (udp_mask->hdr.dst_port == UINT16_MAX) {
++                                      input_set |= IAVF_INSET_UDP_DST_PORT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
++                              }
++
++                              if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
++                                      rte_memcpy(hdr->buffer,
++                                              &udp_spec->hdr,
++                                              sizeof(udp_spec->hdr));
++                              else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
++                                      rte_memcpy(hdr->buffer,
++                                              &udp_spec->hdr,
++                                              sizeof(udp_spec->hdr));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_TCP:
++                      tcp_spec = item->spec;
++                      tcp_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
++
++                      if (tcp_spec && tcp_mask) {
++                              if (tcp_mask->hdr.sent_seq ||
++                                      tcp_mask->hdr.recv_ack ||
++                                      tcp_mask->hdr.data_off ||
++                                      tcp_mask->hdr.tcp_flags ||
++                                      tcp_mask->hdr.rx_win ||
++                                      tcp_mask->hdr.cksum ||
++                                      tcp_mask->hdr.tcp_urp) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM, item,
++                                              "Invalid TCP mask");
++                                      return -rte_errno;
++                              }
++
++                              if (tcp_mask->hdr.src_port == UINT16_MAX) {
++                                      input_set |= IAVF_INSET_TCP_SRC_PORT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
++                              }
++                              if (tcp_mask->hdr.dst_port == UINT16_MAX) {
++                                      input_set |= IAVF_INSET_TCP_DST_PORT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
++                              }
++
++                              if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
++                                      rte_memcpy(hdr->buffer,
++                                              &tcp_spec->hdr,
++                                              sizeof(tcp_spec->hdr));
++                              else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
++                                      rte_memcpy(hdr->buffer,
++                                              &tcp_spec->hdr,
++                                              sizeof(tcp_spec->hdr));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_SCTP:
++                      sctp_spec = item->spec;
++                      sctp_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
++
++                      if (sctp_spec && sctp_mask) {
++                              if (sctp_mask->hdr.cksum) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM, item,
++                                              "Invalid UDP mask");
++                                      return -rte_errno;
++                              }
++
++                              if (sctp_mask->hdr.src_port == UINT16_MAX) {
++                                      input_set |= IAVF_INSET_SCTP_SRC_PORT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
++                              }
++                              if (sctp_mask->hdr.dst_port == UINT16_MAX) {
++                                      input_set |= IAVF_INSET_SCTP_DST_PORT;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
++                              }
++
++                              if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
++                                      rte_memcpy(hdr->buffer,
++                                              &sctp_spec->hdr,
++                                              sizeof(sctp_spec->hdr));
++                              else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
++                                      rte_memcpy(hdr->buffer,
++                                              &sctp_spec->hdr,
++                                              sizeof(sctp_spec->hdr));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_GTPU:
++                      gtp_spec = item->spec;
++                      gtp_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
++
++                      if (gtp_spec && gtp_mask) {
++                              if (gtp_mask->v_pt_rsv_flags ||
++                                      gtp_mask->msg_type ||
++                                      gtp_mask->msg_len) {
++                                      rte_flow_error_set(error, EINVAL,
++                                              RTE_FLOW_ERROR_TYPE_ITEM,
++                                              item, "Invalid GTP mask");
++                                      return -rte_errno;
++                              }
++
++                              if (gtp_mask->teid == UINT32_MAX) {
++                                      input_set |= IAVF_INSET_GTPU_TEID;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
++                              }
++
++                              rte_memcpy(hdr->buffer,
++                                      gtp_spec, sizeof(*gtp_spec));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_GTP_PSC:
++                      gtp_psc_spec = item->spec;
++                      gtp_psc_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
++
++                      if (gtp_psc_spec && gtp_psc_mask) {
++                              if (gtp_psc_mask->qfi == UINT8_MAX) {
++                                      input_set |= IAVF_INSET_GTPU_QFI;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
++                              }
++
++                              rte_memcpy(hdr->buffer, gtp_psc_spec,
++                                      sizeof(*gtp_psc_spec));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
++                      l2tpv3oip_spec = item->spec;
++                      l2tpv3oip_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
++
++                      if (l2tpv3oip_spec && l2tpv3oip_mask) {
++                              if (l2tpv3oip_mask->session_id == UINT32_MAX) {
++                                      input_set |= IAVF_L2TPV3OIP_SESSION_ID;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
++                              }
++
++                              rte_memcpy(hdr->buffer, l2tpv3oip_spec,
++                                      sizeof(*l2tpv3oip_spec));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_ESP:
++                      esp_spec = item->spec;
++                      esp_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
++
++                      if (esp_spec && esp_mask) {
++                              if (esp_mask->hdr.spi == UINT32_MAX) {
++                                      input_set |= IAVF_INSET_ESP_SPI;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
++                              }
++
++                              rte_memcpy(hdr->buffer, &esp_spec->hdr,
++                                      sizeof(esp_spec->hdr));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_AH:
++                      ah_spec = item->spec;
++                      ah_mask = item->mask;
++
++                      hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
++
++                      VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
++
++                      if (ah_spec && ah_mask) {
++                              if (ah_mask->spi == UINT32_MAX) {
++                                      input_set |= IAVF_INSET_AH_SPI;
++                                      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
++                              }
++
++                              rte_memcpy(hdr->buffer, ah_spec,
++                                      sizeof(*ah_spec));
++                      }
++
++                      filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
++                      break;
++
++              case RTE_FLOW_ITEM_TYPE_VOID:
++                      break;
++
++              default:
++                      rte_flow_error_set(error, EINVAL,
++                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
++                                      "Invalid pattern item.");
++                      return -rte_errno;
++              }
++      }
++
++      if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
++              rte_flow_error_set(error, EINVAL,
++                      RTE_FLOW_ERROR_TYPE_ITEM, item,
++                      "Protocol header layers exceed the maximum value");
++              return -rte_errno;
++      }
++
++      filter->input_set = input_set;
++
++      return 0;
++}
++
++static int
++iavf_fdir_parse(struct iavf_adapter *ad,
++              struct iavf_pattern_match_item *array,
++              uint32_t array_len,
++              const struct rte_flow_item pattern[],
++              const struct rte_flow_action actions[],
++              void **meta,
++              struct rte_flow_error *error)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
++      struct iavf_fdir_conf *filter = &vf->fdir.conf;
++      struct iavf_pattern_match_item *item = NULL;
++      uint64_t input_set;
++      int ret;
++
++      memset(filter, 0, sizeof(*filter));
++
++      item = iavf_search_pattern_match_item(pattern, array, array_len, error);
++      if (!item)
++              return -rte_errno;
++
++      ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
++      if (ret)
++              goto error;
++
++      input_set = filter->input_set;
++      if (!input_set || input_set & ~item->input_set_mask) {
++              rte_flow_error_set(error, EINVAL,
++                              RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
++                              "Invalid input set");
++              ret = -rte_errno;
++              goto error;
++      }
++
++      ret = iavf_fdir_parse_action(ad, actions, error, filter);
++      if (ret)
++              goto error;
++
++      if (meta)
++              *meta = filter;
++
++error:
++      rte_free(item);
++      return ret;
++}
++
++static struct iavf_flow_parser iavf_fdir_parser = {
++      .engine = &iavf_fdir_engine,
++      .array = iavf_fdir_pattern,
++      .array_len = RTE_DIM(iavf_fdir_pattern),
++      .parse_pattern_action = iavf_fdir_parse,
++      .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
++};
++
++RTE_INIT(iavf_fdir_engine_register)
++{
++      iavf_register_flow_engine(&iavf_fdir_engine);
++}
+diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
+index 3f0d23a92..25e490bc4 100644
+--- a/drivers/net/iavf/iavf_vchnl.c
++++ b/drivers/net/iavf/iavf_vchnl.c
+@@ -340,7 +340,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
+        */
+       caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
+-              VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
++              VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
++              VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+       args.in_args = (uint8_t *)&caps;
+       args.in_args_size = sizeof(caps);
+@@ -842,3 +843,154 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
+       return err;
+ }
++
++int
++iavf_fdir_add(struct iavf_adapter *adapter,
++      struct iavf_fdir_conf *filter)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
++      struct virtchnl_fdir_add *fdir_ret;
++
++      struct iavf_cmd_info args;
++      int err;
++
++      filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
++      filter->add_fltr.validate_only = 0;
++
++      args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
++      args.in_args = (uint8_t *)(&filter->add_fltr);
++      args.in_args_size = sizeof(*(&filter->add_fltr));
++      args.out_buffer = vf->aq_resp;
++      args.out_size = IAVF_AQ_BUF_SZ;
++
++      err = iavf_execute_vf_cmd(adapter, &args);
++      if (err) {
++              PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
++              return err;
++      }
++
++      fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
++      filter->flow_id = fdir_ret->flow_id;
++
++      if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
++              PMD_DRV_LOG(INFO,
++                      "add rule request is successfully done by PF");
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
++              PMD_DRV_LOG(ERR,
++                      "add rule request is failed due to no hw resource");
++              return -1;
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
++              PMD_DRV_LOG(ERR,
++                      "add rule request is failed due to the rule is already existed");
++              return -1;
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
++              PMD_DRV_LOG(ERR,
++                      "add rule request is failed due to the rule is conflict with existing rule");
++              return -1;
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
++              PMD_DRV_LOG(ERR,
++                      "add rule request is failed due to the hw doesn't support");
++              return -1;
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
++              PMD_DRV_LOG(ERR,
++                      "add rule request is failed due to time out for programming");
++              return -1;
++      } else {
++              PMD_DRV_LOG(ERR,
++                      "add rule request is failed due to other reasons");
++              return -1;
++      }
++
++      return 0;
++};
++
++int
++iavf_fdir_del(struct iavf_adapter *adapter,
++      struct iavf_fdir_conf *filter)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
++      struct virtchnl_fdir_del *fdir_ret;
++
++      struct iavf_cmd_info args;
++      int err;
++
++      filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
++      filter->del_fltr.flow_id = filter->flow_id;
++
++      args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
++      args.in_args = (uint8_t *)(&filter->del_fltr);
++      args.in_args_size = sizeof(filter->del_fltr);
++      args.out_buffer = vf->aq_resp;
++      args.out_size = IAVF_AQ_BUF_SZ;
++
++      err = iavf_execute_vf_cmd(adapter, &args);
++      if (err) {
++              PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
++              return err;
++      }
++
++      fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
++
++      if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
++              PMD_DRV_LOG(INFO,
++                      "delete rule request is successfully done by PF");
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
++              PMD_DRV_LOG(ERR,
++                      "delete rule request is failed due to this rule doesn't exist");
++              return -1;
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
++              PMD_DRV_LOG(ERR,
++                      "delete rule request is failed due to time out for programming");
++              return -1;
++      } else {
++              PMD_DRV_LOG(ERR,
++                      "delete rule request is failed due to other reasons");
++              return -1;
++      }
++
++      return 0;
++};
++
++int
++iavf_fdir_check(struct iavf_adapter *adapter,
++              struct iavf_fdir_conf *filter)
++{
++      struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
++      struct virtchnl_fdir_add *fdir_ret;
++
++      struct iavf_cmd_info args;
++      int err;
++
++      filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
++      filter->add_fltr.validate_only = 1;
++
++      args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
++      args.in_args = (uint8_t *)(&filter->add_fltr);
++      args.in_args_size = sizeof(*(&filter->add_fltr));
++      args.out_buffer = vf->aq_resp;
++      args.out_size = IAVF_AQ_BUF_SZ;
++
++      err = iavf_execute_vf_cmd(adapter, &args);
++      if (err) {
++              PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
++              return err;
++      }
++
++      fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
++
++      if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
++              PMD_DRV_LOG(INFO,
++                      "check rule request is successfully done by PF");
++      } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
++              PMD_DRV_LOG(ERR,
++                      "check rule request is failed due to parameters validation"
++                      " or HW doesn't support");
++              return -1;
++      } else {
++              PMD_DRV_LOG(ERR,
++                      "check rule request is failed due to other reasons");
++              return -1;
++      }
++
++      return 0;
++}
+diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
+index 32eabca4b..ce71054fb 100644
+--- a/drivers/net/iavf/meson.build
++++ b/drivers/net/iavf/meson.build
+@@ -13,6 +13,7 @@ sources = files(
+       'iavf_rxtx.c',
+       'iavf_vchnl.c',
+       'iavf_generic_flow.c',
++      'iavf_fdir.c',
+ )
+ if arch_subdir == 'x86'
+-- 
+2.17.1
+