New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
index f12b7f4..aeb92af 100644 (file)
 #include <rte_tailq.h>
 
 #include "base/i40e_prototype.h"
+#include "base/i40e_dcb.h"
 #include "i40e_ethdev.h"
 #include "i40e_pf.h"
 #include "i40e_rxtx.h"
 #include "rte_pmd_i40e.h"
 
 int
-rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -66,7 +67,7 @@ rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
 }
 
 int
-rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -117,7 +118,7 @@ rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
                vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
 
        memset(&ctxt, 0, sizeof(ctxt));
-       (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
        ctxt.seid = vsi->seid;
 
        hw = I40E_VSI_TO_HW(vsi);
@@ -170,7 +171,7 @@ i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
 }
 
 int
-rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -219,7 +220,7 @@ rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
                vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
 
        memset(&ctxt, 0, sizeof(ctxt));
-       (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
        ctxt.seid = vsi->seid;
 
        hw = I40E_VSI_TO_HW(vsi);
@@ -264,7 +265,7 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
 
                for (i = 0; i < vlan_num; i++) {
                        mv_f[i].filter_type = filter_type;
-                       (void)rte_memcpy(&mv_f[i].macaddr,
+                       rte_memcpy(&mv_f[i].macaddr,
                                         &f->mac_info.mac_addr,
                                         ETH_ADDR_LEN);
                }
@@ -325,7 +326,7 @@ i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
 
                for (i = 0; i < vlan_num; i++) {
                        mv_f[i].filter_type = f->mac_info.filter_type;
-                       (void)rte_memcpy(&mv_f[i].macaddr,
+                       rte_memcpy(&mv_f[i].macaddr,
                                         &f->mac_info.mac_addr,
                                         ETH_ADDR_LEN);
                }
@@ -407,7 +408,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
                vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
 
        memset(&ctxt, 0, sizeof(ctxt));
-       (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
        ctxt.seid = vsi->seid;
 
        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
@@ -430,7 +431,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
 }
 
 int
-rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -473,7 +474,7 @@ rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
 }
 
 int
-rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -514,7 +515,7 @@ rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
 }
 
 int
-rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -555,7 +556,7 @@ rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
 }
 
 int
-rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
                             struct ether_addr *mac_addr)
 {
        struct i40e_mac_filter *f;
@@ -591,14 +592,16 @@ rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
 
        /* Remove all existing mac */
        TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
-               i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+               if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
+                               != I40E_SUCCESS)
+                       PMD_DRV_LOG(WARNING, "Delete MAC failed");
 
        return 0;
 }
 
 /* Set vlan strip on/off for specific VF from host */
 int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -633,7 +636,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
        return ret;
 }
 
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
                                    uint16_t vlan_id)
 {
        struct rte_eth_dev *dev;
@@ -685,7 +688,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
                vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
 
        memset(&ctxt, 0, sizeof(ctxt));
-       (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
        ctxt.seid = vsi->seid;
 
        hw = I40E_VSI_TO_HW(vsi);
@@ -698,7 +701,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
        return ret;
 }
 
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
                                  uint8_t on)
 {
        struct rte_eth_dev *dev;
@@ -747,7 +750,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
        }
 
        if (on) {
-               (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+               rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
                filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
                ret = i40e_vsi_add_mac(vsi, &filter);
        } else {
@@ -764,7 +767,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
        return ret;
 }
 
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -819,7 +822,7 @@ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
        }
 
        memset(&ctxt, 0, sizeof(ctxt));
-       (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
        ctxt.seid = vsi->seid;
 
        hw = I40E_VSI_TO_HW(vsi);
@@ -858,7 +861,7 @@ i40e_vlan_filter_count(struct i40e_vsi *vsi)
        return count;
 }
 
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
                                    uint64_t vf_mask, uint8_t on)
 {
        struct rte_eth_dev *dev;
@@ -941,7 +944,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
 }
 
 int
-rte_pmd_i40e_get_vf_stats(uint8_t port,
+rte_pmd_i40e_get_vf_stats(uint16_t port,
                          uint16_t vf_id,
                          struct rte_eth_stats *stats)
 {
@@ -986,7 +989,7 @@ rte_pmd_i40e_get_vf_stats(uint8_t port,
 }
 
 int
-rte_pmd_i40e_reset_vf_stats(uint8_t port,
+rte_pmd_i40e_reset_vf_stats(uint16_t port,
                            uint16_t vf_id)
 {
        struct rte_eth_dev *dev;
@@ -1020,7 +1023,7 @@ rte_pmd_i40e_reset_vf_stats(uint8_t port,
 }
 
 int
-rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -1109,7 +1112,7 @@ rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
 }
 
 int
-rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
                                uint8_t tc_num, uint8_t *bw_weight)
 {
        struct rte_eth_dev *dev;
@@ -1223,7 +1226,7 @@ rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
 }
 
 int
-rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
                              uint8_t tc_no, uint32_t bw)
 {
        struct rte_eth_dev *dev;
@@ -1341,7 +1344,7 @@ rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
 }
 
 int
-rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
 {
        struct rte_eth_dev *dev;
        struct i40e_pf *pf;
@@ -1513,7 +1516,7 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
 
 /* Check if the profile info exists */
 static int
-i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port];
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1557,7 +1560,7 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
 }
 
 int
-rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
                                 uint32_t size,
                                 enum rte_pmd_i40e_package_op op)
 {
@@ -1606,6 +1609,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
                return -EINVAL;
        }
 
+       i40e_update_customized_info(dev, buff, size);
+
        /* Find metadata segment */
        metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
                                                        pkg_hdr);
@@ -1704,6 +1709,27 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
        return status;
 }
 
+/* Get number of tvl records in the section */
+static unsigned int
+i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
+{
+       unsigned int i, nb_rec, nb_tlv = 0;
+       struct i40e_profile_tlv_section_record *tlv;
+
+       if (!sec)
+               return nb_tlv;
+
+       /* get number of records in the section */
+       nb_rec = sec->section.size /
+                               sizeof(struct i40e_profile_tlv_section_record);
+       for (i = 0; i < nb_rec; ) {
+               tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
+               i += tlv->len;
+               nb_tlv++;
+       }
+       return nb_tlv;
+}
+
 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
        uint8_t *info_buff, uint32_t info_size,
        enum rte_pmd_i40e_package_info type)
@@ -1858,12 +1884,162 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
                return I40E_SUCCESS;
        }
 
+       /* get number of protocols */
+       if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
+               struct i40e_profile_section_header *proto;
+
+               if (info_size < sizeof(uint32_t)) {
+                       PMD_DRV_LOG(ERR, "Invalid information buffer size");
+                       return -EINVAL;
+               }
+               proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+                               (struct i40e_profile_segment *)i40e_seg_hdr);
+               *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
+               return I40E_SUCCESS;
+       }
+
+       /* get list of protocols */
+       if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
+               uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+               struct rte_pmd_i40e_proto_info *pinfo;
+               struct i40e_profile_section_header *proto;
+               struct i40e_profile_tlv_section_record *tlv;
+
+               pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
+               nb_proto_info = info_size /
+                                       sizeof(struct rte_pmd_i40e_proto_info);
+               for (i = 0; i < nb_proto_info; i++) {
+                       pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
+                       memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
+               }
+               proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+                               (struct i40e_profile_segment *)i40e_seg_hdr);
+               nb_tlv = i40e_get_tlv_section_size(proto);
+               if (nb_tlv == 0)
+                       return I40E_SUCCESS;
+               if (nb_proto_info < nb_tlv) {
+                       PMD_DRV_LOG(ERR, "Invalid information buffer size");
+                       return -EINVAL;
+               }
+               /* get number of records in the section */
+               nb_rec = proto->section.size /
+                               sizeof(struct i40e_profile_tlv_section_record);
+               tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
+               for (i = j = 0; i < nb_rec; j++) {
+                       pinfo[j].proto_id = tlv->data[0];
+                       snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
+                                (const char *)&tlv->data[1]);
+                       i += tlv->len;
+                       tlv = &tlv[tlv->len];
+               }
+               return I40E_SUCCESS;
+       }
+
+       /* get number of packet classification types */
+       if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
+               struct i40e_profile_section_header *pctype;
+
+               if (info_size < sizeof(uint32_t)) {
+                       PMD_DRV_LOG(ERR, "Invalid information buffer size");
+                       return -EINVAL;
+               }
+               pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+                               (struct i40e_profile_segment *)i40e_seg_hdr);
+               *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
+               return I40E_SUCCESS;
+       }
+
+       /* get list of packet classification types */
+       if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
+               uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+               struct rte_pmd_i40e_ptype_info *pinfo;
+               struct i40e_profile_section_header *pctype;
+               struct i40e_profile_tlv_section_record *tlv;
+
+               pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+               nb_proto_info = info_size /
+                                       sizeof(struct rte_pmd_i40e_ptype_info);
+               for (i = 0; i < nb_proto_info; i++)
+                       memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+                              sizeof(struct rte_pmd_i40e_ptype_info));
+               pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+                               (struct i40e_profile_segment *)i40e_seg_hdr);
+               nb_tlv = i40e_get_tlv_section_size(pctype);
+               if (nb_tlv == 0)
+                       return I40E_SUCCESS;
+               if (nb_proto_info < nb_tlv) {
+                       PMD_DRV_LOG(ERR, "Invalid information buffer size");
+                       return -EINVAL;
+               }
+
+               /* get number of records in the section */
+               nb_rec = pctype->section.size /
+                               sizeof(struct i40e_profile_tlv_section_record);
+               tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
+               for (i = j = 0; i < nb_rec; j++) {
+                       memcpy(&pinfo[j], tlv->data,
+                              sizeof(struct rte_pmd_i40e_ptype_info));
+                       i += tlv->len;
+                       tlv = &tlv[tlv->len];
+               }
+               return I40E_SUCCESS;
+       }
+
+       /* get number of packet types */
+       if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
+               struct i40e_profile_section_header *ptype;
+
+               if (info_size < sizeof(uint32_t)) {
+                       PMD_DRV_LOG(ERR, "Invalid information buffer size");
+                       return -EINVAL;
+               }
+               ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+                               (struct i40e_profile_segment *)i40e_seg_hdr);
+               *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
+               return I40E_SUCCESS;
+       }
+
+       /* get list of packet types */
+       if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
+               uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+               struct rte_pmd_i40e_ptype_info *pinfo;
+               struct i40e_profile_section_header *ptype;
+               struct i40e_profile_tlv_section_record *tlv;
+
+               pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+               nb_proto_info = info_size /
+                                       sizeof(struct rte_pmd_i40e_ptype_info);
+               for (i = 0; i < nb_proto_info; i++)
+                       memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+                              sizeof(struct rte_pmd_i40e_ptype_info));
+               ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+                               (struct i40e_profile_segment *)i40e_seg_hdr);
+               nb_tlv = i40e_get_tlv_section_size(ptype);
+               if (nb_tlv == 0)
+                       return I40E_SUCCESS;
+               if (nb_proto_info < nb_tlv) {
+                       PMD_DRV_LOG(ERR, "Invalid information buffer size");
+                       return -EINVAL;
+               }
+               /* get number of records in the section */
+               nb_rec = ptype->section.size /
+                               sizeof(struct i40e_profile_tlv_section_record);
+               for (i = j = 0; i < nb_rec; j++) {
+                       tlv = (struct i40e_profile_tlv_section_record *)
+                                                               &ptype[1 + i];
+                       memcpy(&pinfo[j], tlv->data,
+                              sizeof(struct rte_pmd_i40e_ptype_info));
+                       i += tlv->len;
+               }
+               return I40E_SUCCESS;
+       }
+
        PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
        return -EINVAL;
 }
 
 int
-rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
 {
        struct rte_eth_dev *dev;
        struct i40e_hw *hw;
@@ -1933,7 +2109,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
            tnl != RTE_PTYPE_TUNNEL_VXLAN &&
            tnl != RTE_PTYPE_TUNNEL_NVGRE &&
            tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-           tnl != RTE_PTYPE_TUNNEL_GRENAT)
+           tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+           tnl != RTE_PTYPE_TUNNEL_GTPC &&
+           tnl != RTE_PTYPE_TUNNEL_GTPU)
                return -1;
 
        if (il2 &&
@@ -1991,7 +2169,7 @@ static int check_invalid_ptype_mapping(
 
 int
 rte_pmd_i40e_ptype_mapping_update(
-                       uint8_t port,
+                       uint16_t port,
                        struct rte_pmd_i40e_ptype_mapping *mapping_items,
                        uint16_t count,
                        uint8_t exclusive)
@@ -2027,7 +2205,7 @@ rte_pmd_i40e_ptype_mapping_update(
        return 0;
 }
 
-int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
 {
        struct rte_eth_dev *dev;
 
@@ -2044,7 +2222,7 @@ int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
 }
 
 int rte_pmd_i40e_ptype_mapping_get(
-                       uint8_t port,
+                       uint16_t port,
                        struct rte_pmd_i40e_ptype_mapping *mapping_items,
                        uint16_t size,
                        uint16_t *count,
@@ -2078,7 +2256,7 @@ int rte_pmd_i40e_ptype_mapping_get(
        return 0;
 }
 
-int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
                                       uint32_t target,
                                       uint8_t mask,
                                       uint32_t pkt_type)
@@ -2115,3 +2293,695 @@ int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
 
        return 0;
 }
+
+int
+rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
+                            struct ether_addr *mac_addr)
+{
+       struct rte_eth_dev *dev;
+       struct i40e_pf_vf *vf;
+       struct i40e_vsi *vsi;
+       struct i40e_pf *pf;
+       struct i40e_mac_filter_info mac_filter;
+       int ret;
+
+       if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+               return -EINVAL;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+       if (vf_id >= pf->vf_num || !pf->vfs)
+               return -EINVAL;
+
+       vf = &pf->vfs[vf_id];
+       vsi = vf->vsi;
+       if (!vsi) {
+               PMD_DRV_LOG(ERR, "Invalid VSI.");
+               return -EINVAL;
+       }
+
+       mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+       ether_addr_copy(mac_addr, &mac_filter.mac_addr);
+       ret = i40e_vsi_add_mac(vsi, &mac_filter);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+               return -1;
+       }
+
+       return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
+{
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       i40e_set_default_pctype_table(dev);
+
+       return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_get(
+                       uint16_t port,
+                       struct rte_pmd_i40e_flow_type_mapping *mapping_items)
+{
+       struct rte_eth_dev *dev;
+       struct i40e_adapter *ad;
+       uint16_t i;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+               mapping_items[i].flow_type = i;
+               mapping_items[i].pctype = ad->pctypes_tbl[i];
+       }
+
+       return 0;
+}
+
+int
+rte_pmd_i40e_flow_type_mapping_update(
+                       uint16_t port,
+                       struct rte_pmd_i40e_flow_type_mapping *mapping_items,
+                       uint16_t count,
+                       uint8_t exclusive)
+{
+       struct rte_eth_dev *dev;
+       struct i40e_adapter *ad;
+       int i;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       if (count > I40E_FLOW_TYPE_MAX)
+               return -EINVAL;
+
+       for (i = 0; i < count; i++)
+               if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
+                   mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
+                   (mapping_items[i].pctype &
+                   (1ULL << I40E_FILTER_PCTYPE_INVALID)))
+                       return -EINVAL;
+
+       ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       if (exclusive) {
+               for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+                       ad->pctypes_tbl[i] = 0ULL;
+               ad->flow_types_mask = 0ULL;
+       }
+
+       for (i = 0; i < count; i++) {
+               ad->pctypes_tbl[mapping_items[i].flow_type] =
+                                               mapping_items[i].pctype;
+               if (mapping_items[i].pctype)
+                       ad->flow_types_mask |=
+                                       (1ULL << mapping_items[i].flow_type);
+               else
+                       ad->flow_types_mask &=
+                                       ~(1ULL << mapping_items[i].flow_type);
+       }
+
+       for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
+               ad->pctypes_mask |= ad->pctypes_tbl[i];
+
+       return 0;
+}
+
+int
+rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
+{
+       struct rte_eth_dev *dev;
+       struct ether_addr *mac;
+       struct i40e_pf *pf;
+       int vf_id;
+       struct i40e_pf_vf *vf;
+       uint16_t vf_num;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+       dev = &rte_eth_devices[port];
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       vf_num = pf->vf_num;
+
+       for (vf_id = 0; vf_id < vf_num; vf_id++) {
+               vf = &pf->vfs[vf_id];
+               mac = &vf->mac_addr;
+
+               if (is_same_ether_addr(mac, vf_mac))
+                       return vf_id;
+       }
+
+       return -EINVAL;
+}
+
+static int
+i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
+                             struct i40e_pf *pf)
+{
+       uint16_t i;
+       struct i40e_vsi *vsi = pf->main_vsi;
+       uint16_t queue_offset, bsf, tc_index;
+       struct i40e_vsi_context ctxt;
+       struct i40e_aqc_vsi_properties_data *vsi_info;
+       struct i40e_queue_regions *region_info =
+                               &pf->queue_region;
+       int32_t ret = -EINVAL;
+
+       if (!region_info->queue_region_number) {
+               PMD_INIT_LOG(ERR, "there is no that region id been set before");
+               return ret;
+       }
+
+       memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
+
+       /* Update Queue Pairs Mapping for currently enabled UPs */
+       ctxt.seid = vsi->seid;
+       ctxt.pf_num = hw->pf_id;
+       ctxt.vf_num = 0;
+       ctxt.uplink_seid = vsi->uplink_seid;
+       ctxt.info = vsi->info;
+       vsi_info = &ctxt.info;
+
+       memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
+       memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
+
+       /* Configure queue region and queue mapping parameters,
+        * for enabled queue region, allocate queues to this region.
+        */
+
+       for (i = 0; i < region_info->queue_region_number; i++) {
+               tc_index = region_info->region[i].region_id;
+               bsf = rte_bsf32(region_info->region[i].queue_num);
+               queue_offset = region_info->region[i].queue_start_index;
+               vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
+                       (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+                               (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+       }
+
+       /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+       vsi_info->mapping_flags |=
+                       rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+       vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+       vsi_info->valid_sections |=
+               rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+       /* Update the VSI after updating the VSI queue-mapping information */
+       ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
+                               hw->aq.asq_last_status);
+               return ret;
+       }
+       /* update the local VSI info with updated queue map */
+       rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+                                       sizeof(vsi->info.tc_mapping));
+       rte_memcpy(&vsi->info.queue_mapping,
+                       &ctxt.info.queue_mapping,
+                       sizeof(vsi->info.queue_mapping));
+       vsi->info.mapping_flags = ctxt.info.mapping_flags;
+       vsi->info.valid_sections = 0;
+
+       return 0;
+}
+
+
+static int
+i40e_queue_region_set_region(struct i40e_pf *pf,
+                               struct rte_pmd_i40e_queue_region_conf *conf_ptr)
+{
+       uint16_t i;
+       struct i40e_vsi *main_vsi = pf->main_vsi;
+       struct i40e_queue_regions *info = &pf->queue_region;
+       int32_t ret = -EINVAL;
+
+       if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
+                               conf_ptr->queue_num <= 64)) {
+               PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+                       "total number of queues do not exceed the VSI allocation");
+               return ret;
+       }
+
+       if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
+               PMD_DRV_LOG(ERR, "the queue region max index is 7");
+               return ret;
+       }
+
+       if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
+                                       > main_vsi->nb_used_qps) {
+               PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
+               return ret;
+       }
+
+       for (i = 0; i < info->queue_region_number; i++)
+               if (conf_ptr->region_id == info->region[i].region_id)
+                       break;
+
+       if (i == info->queue_region_number &&
+                               i <= I40E_REGION_MAX_INDEX) {
+               info->region[i].region_id = conf_ptr->region_id;
+               info->region[i].queue_num = conf_ptr->queue_num;
+               info->region[i].queue_start_index =
+                       conf_ptr->queue_start_index;
+               info->queue_region_number++;
+       } else {
+               PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+i40e_queue_region_set_flowtype(struct i40e_pf *pf,
+                       struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+       int32_t ret = -EINVAL;
+       struct i40e_queue_regions *info = &pf->queue_region;
+       uint16_t i, j;
+       uint16_t region_index, flowtype_index;
+
+       /* For the pctype or hardware flowtype of packet,
+        * the specific index for each type has been defined
+        * in file i40e_type.h as enum i40e_filter_pctype.
+        */
+
+       if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
+               PMD_DRV_LOG(ERR, "the queue region max index is 7");
+               return ret;
+       }
+
+       if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
+               PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+               return ret;
+       }
+
+
+       for (i = 0; i < info->queue_region_number; i++)
+               if (rss_region_conf->region_id == info->region[i].region_id)
+                       break;
+
+       if (i == info->queue_region_number) {
+               PMD_DRV_LOG(ERR, "that region id has not been set before");
+               ret = -EINVAL;
+               return ret;
+       }
+       region_index = i;
+
+       for (i = 0; i < info->queue_region_number; i++) {
+               for (j = 0; j < info->region[i].flowtype_num; j++) {
+                       if (rss_region_conf->hw_flowtype ==
+                               info->region[i].hw_flowtype[j]) {
+                               PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
+                               return 0;
+                       }
+               }
+       }
+
+       flowtype_index = info->region[region_index].flowtype_num;
+       info->region[region_index].hw_flowtype[flowtype_index] =
+                                       rss_region_conf->hw_flowtype;
+       info->region[region_index].flowtype_num++;
+
+       return 0;
+}
+
+static void
+i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
+                               struct i40e_pf *pf)
+{
+       uint8_t hw_flowtype;
+       uint32_t pfqf_hregion;
+       uint16_t i, j, index;
+       struct i40e_queue_regions *info = &pf->queue_region;
+
+       /* For the pctype or hardware flowtype of packet,
+        * the specific index for each type has been defined
+        * in file i40e_type.h as enum i40e_filter_pctype.
+        */
+
+       for (i = 0; i < info->queue_region_number; i++) {
+               for (j = 0; j < info->region[i].flowtype_num; j++) {
+                       hw_flowtype = info->region[i].hw_flowtype[j];
+                       index = hw_flowtype >> 3;
+                       pfqf_hregion =
+                               i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
+
+                       if ((hw_flowtype & 0x7) == 0) {
+                               pfqf_hregion |= info->region[i].region_id <<
+                                       I40E_PFQF_HREGION_REGION_0_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
+                       } else if ((hw_flowtype & 0x7) == 1) {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_1_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
+                       } else if ((hw_flowtype & 0x7) == 2) {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_2_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
+                       } else if ((hw_flowtype & 0x7) == 3) {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_3_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
+                       } else if ((hw_flowtype & 0x7) == 4) {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_4_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
+                       } else if ((hw_flowtype & 0x7) == 5) {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_5_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
+                       } else if ((hw_flowtype & 0x7) == 6) {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_6_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
+                       } else {
+                               pfqf_hregion |= info->region[i].region_id  <<
+                                       I40E_PFQF_HREGION_REGION_7_SHIFT;
+                               pfqf_hregion |= 1 <<
+                                       I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
+                       }
+
+                       i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
+                                               pfqf_hregion);
+               }
+       }
+}
+
+static int
+i40e_queue_region_set_user_priority(struct i40e_pf *pf,
+               struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+       struct i40e_queue_regions *info = &pf->queue_region;
+       int32_t ret = -EINVAL;
+       uint16_t i, j, region_index;
+
+       if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
+               PMD_DRV_LOG(ERR, "the queue region max index is 7");
+               return ret;
+       }
+
+       if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
+               PMD_DRV_LOG(ERR, "the region_id max index is 7");
+               return ret;
+       }
+
+       for (i = 0; i < info->queue_region_number; i++)
+               if (rss_region_conf->region_id == info->region[i].region_id)
+                       break;
+
+       if (i == info->queue_region_number) {
+               PMD_DRV_LOG(ERR, "that region id has not been set before");
+               ret = -EINVAL;
+               return ret;
+       }
+
+       region_index = i;
+
+       for (i = 0; i < info->queue_region_number; i++) {
+               for (j = 0; j < info->region[i].user_priority_num; j++) {
+                       if (info->region[i].user_priority[j] ==
+                               rss_region_conf->user_priority) {
+                               PMD_DRV_LOG(ERR, "that user priority has been set before");
+                               return 0;
+                       }
+               }
+       }
+
+       j = info->region[region_index].user_priority_num;
+       info->region[region_index].user_priority[j] =
+                                       rss_region_conf->user_priority;
+       info->region[region_index].user_priority_num++;
+
+       return 0;
+}
+
+static int
+i40e_queue_region_dcb_configure(struct i40e_hw *hw,
+                               struct i40e_pf *pf)
+{
+       struct i40e_dcbx_config dcb_cfg_local;
+       struct i40e_dcbx_config *dcb_cfg;
+       struct i40e_queue_regions *info = &pf->queue_region;
+       struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+       int32_t ret = -EINVAL;
+       uint16_t i, j, prio_index, region_index;
+       uint8_t tc_map, tc_bw, bw_lf;
+
+       if (!info->queue_region_number) {
+               PMD_DRV_LOG(ERR, "No queue region been set before");
+               return ret;
+       }
+
+       dcb_cfg = &dcb_cfg_local;
+       memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+       /* assume each tc has the same bw */
+       tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
+       for (i = 0; i < info->queue_region_number; i++)
+               dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+       /* to ensure the sum of tcbw is equal to 100 */
+       bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
+       for (i = 0; i < bw_lf; i++)
+               dcb_cfg->etscfg.tcbwtable[i]++;
+
+       /* assume each tc has the same Transmission Selection Algorithm */
+       for (i = 0; i < info->queue_region_number; i++)
+               dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+       for (i = 0; i < info->queue_region_number; i++) {
+               for (j = 0; j < info->region[i].user_priority_num; j++) {
+                       prio_index = info->region[i].user_priority[j];
+                       region_index = info->region[i].region_id;
+                       dcb_cfg->etscfg.prioritytable[prio_index] =
+                                               region_index;
+               }
+       }
+
+       /* FW needs one App to configure HW */
+       dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+       dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+       dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+       dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+       tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
+
+       dcb_cfg->pfc.willing = 0;
+       dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+       dcb_cfg->pfc.pfcenable = tc_map;
+
+       /* Copy the new config to the current config */
+       *old_cfg = *dcb_cfg;
+       old_cfg->etsrec = old_cfg->etscfg;
+       ret = i40e_set_dcb_config(hw);
+
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
+                        i40e_stat_str(hw, ret),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+       struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
+{
+       int32_t ret = -EINVAL;
+       struct i40e_queue_regions *info = &pf->queue_region;
+       struct i40e_vsi *main_vsi = pf->main_vsi;
+
+       if (on) {
+               i40e_queue_region_pf_flowtype_conf(hw, pf);
+
+               ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+                       return ret;
+               }
+
+               ret = i40e_queue_region_dcb_configure(hw, pf);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+                       return ret;
+               }
+
+               return 0;
+       }
+
+       info->queue_region_number = 1;
+       info->region[0].queue_num = main_vsi->nb_used_qps;
+       info->region[0].queue_start_index = 0;
+
+       ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+       if (ret != I40E_SUCCESS)
+               PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+
+       ret = i40e_dcb_init_configure(dev, TRUE);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+               pf->flags &= ~I40E_FLAG_DCB;
+       }
+
+       i40e_init_queue_region_conf(dev);
+
+       return 0;
+}
+
+static int
+i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       uint64_t hena;
+
+       hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+       hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+       if (!hena)
+               return -ENOTSUP;
+
+       return 0;
+}
+
+static int
+i40e_queue_region_get_all_info(struct i40e_pf *pf,
+               struct i40e_queue_regions *regions_ptr)
+{
+       struct i40e_queue_regions *info = &pf->queue_region;
+
+       rte_memcpy(regions_ptr, info,
+                       sizeof(struct i40e_queue_regions));
+
+       return 0;
+}
+
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+               enum rte_pmd_i40e_queue_region_op op_type, void *arg)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int32_t ret;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       if (!(!i40e_queue_region_pf_check_rss(pf)))
+               return -ENOTSUP;
+
+       /* This queue region feature only support pf by now. It should
+        * be called after dev_start, and will be clear after dev_stop.
+        * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
+        * is just an enable function which server for other configuration,
+        * it is for all configuration about queue region from up layer,
+        * at first will only keep in DPDK softwarestored in driver,
+        * only after "FLUSH_ON", it commit all configuration to HW.
+        * Because PMD had to set hardware configuration at a time, so
+        * it will record all up layer command at first.
+        * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
+        * just clean all configuration about queue region just now,
+        * and restore all to DPDK i40e driver default
+        * config when start up.
+        */
+
+       switch (op_type) {
+       case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
+               ret = i40e_queue_region_set_region(pf,
+                               (struct rte_pmd_i40e_queue_region_conf *)arg);
+               break;
+       case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
+               ret = i40e_queue_region_set_flowtype(pf,
+                               (struct rte_pmd_i40e_queue_region_conf *)arg);
+               break;
+       case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
+               ret = i40e_queue_region_set_user_priority(pf,
+                               (struct rte_pmd_i40e_queue_region_conf *)arg);
+               break;
+       case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
+               ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+               break;
+       case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
+               ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+               break;
+       case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
+               ret = i40e_queue_region_get_all_info(pf,
+                               (struct i40e_queue_regions *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "op type (%d) not supported",
+                           op_type);
+               ret = -EINVAL;
+       }
+
+       I40E_WRITE_FLUSH(hw);
+
+       return ret;
+}
+
+int rte_pmd_i40e_flow_add_del_packet_template(
+                       uint16_t port,
+                       const struct rte_pmd_i40e_pkt_template_conf *conf,
+                       uint8_t add)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[port];
+       struct i40e_fdir_filter_conf filter_conf;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       if (!is_i40e_supported(dev))
+               return -ENOTSUP;
+
+       memset(&filter_conf, 0, sizeof(filter_conf));
+       filter_conf.soft_id = conf->soft_id;
+       filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
+       filter_conf.input.flow.raw_flow.packet = conf->input.packet;
+       filter_conf.input.flow.raw_flow.length = conf->input.length;
+       filter_conf.input.flow_ext.pkt_template = true;
+
+       filter_conf.action.rx_queue = conf->action.rx_queue;
+       filter_conf.action.behavior =
+               (enum i40e_fdir_behavior)conf->action.behavior;
+       filter_conf.action.report_status =
+               (enum i40e_fdir_status)conf->action.report_status;
+       filter_conf.action.flex_off = conf->action.flex_off;
+
+       return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
+}